From 5726093fcb92586db06e5aa9929895eb8fe97b9c Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Mon, 22 Jul 2024 10:49:50 -0400 Subject: [PATCH] Pin --- .pin/constraints-cuda-torch.txt | 48 ++- .../accelerate_opt/requirements.cuda.txt | 10 +- benchmarks/brax/requirements.cuda.txt | 4 +- benchmarks/diffusion/requirements.cuda.txt | 383 ++++++++++++++++++ benchmarks/diffusion/requirements.in | 3 +- benchmarks/dlrm/requirements.cuda.txt | 4 +- benchmarks/flops/requirements.cuda.txt | 2 +- benchmarks/huggingface/requirements.cuda.txt | 6 +- benchmarks/llama/requirements.cuda.txt | 6 +- benchmarks/stargan/requirements.cuda.txt | 4 +- benchmarks/super-slomo/requirements.cuda.txt | 2 +- benchmarks/timm/requirements.cuda.txt | 6 +- benchmarks/torchvision/requirements.cuda.txt | 2 +- .../torchvision_ddp/requirements.cuda.txt | 2 +- milabench/_version.py | 6 +- scripts/article/run_cuda_dev.sh | 25 +- 16 files changed, 467 insertions(+), 46 deletions(-) create mode 100644 benchmarks/diffusion/requirements.cuda.txt diff --git a/.pin/constraints-cuda-torch.txt b/.pin/constraints-cuda-torch.txt index 22b7d0106..ca3f9e49c 100644 --- a/.pin/constraints-cuda-torch.txt +++ b/.pin/constraints-cuda-torch.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile with Python 3.10 # by the following command: # -# pip-compile --output-file=.pin/constraints-cuda-torch.txt .pin/tmp-constraints.txt benchmarks/accelerate_opt/requirements.in benchmarks/brax/requirements.in benchmarks/dlrm/requirements.in benchmarks/flops/requirements.in benchmarks/huggingface/requirements.in benchmarks/llama/requirements.in benchmarks/stargan/requirements.in benchmarks/super-slomo/requirements.in benchmarks/timm/requirements.in benchmarks/torchvision/requirements.in benchmarks/torchvision_ddp/requirements.in +# pip-compile --output-file=.pin/constraints-cuda-torch.txt .pin/tmp-constraints.txt benchmarks/accelerate_opt/requirements.in benchmarks/brax/requirements.in benchmarks/diffusion/requirements.in benchmarks/dlrm/requirements.in benchmarks/flops/requirements.in benchmarks/huggingface/requirements.in benchmarks/llama/requirements.in benchmarks/stargan/requirements.in benchmarks/super-slomo/requirements.in benchmarks/timm/requirements.in benchmarks/torchvision/requirements.in benchmarks/torchvision_ddp/requirements.in # --extra-index-url https://pypi.ngc.nvidia.com --extra-index-url https://download.pytorch.org/whl/cu121 @@ -21,7 +21,10 @@ absl-py==2.1.0 # orbax-checkpoint # tensorboard accelerate==0.32.1 - # via -r benchmarks/accelerate_opt/requirements.in + # via + # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/diffusion/requirements.in + # diffusers aiohttp==3.9.5 # via # datasets @@ -32,6 +35,8 @@ annotated-types==0.7.0 # via pydantic antlr4-python3-runtime==4.9.3 # via omegaconf +argklass==1.4.4 + # via -r benchmarks/diffusion/requirements.in asttokens==2.4.1 # via giving async-timeout==4.0.3 @@ -44,7 +49,7 @@ blinker==1.8.2 # via flask brax==0.10.5 # via -r benchmarks/brax/requirements.in -certifi==2024.6.2 +certifi==2024.7.4 # via requests charset-normalizer==3.3.2 # via requests @@ -61,10 +66,13 @@ contextlib2==21.6.0 datasets==2.20.0 # via # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/diffusion/requirements.in # -r benchmarks/llama/requirements.in # evaluate deepspeed==0.14.4 # via -r benchmarks/accelerate_opt/requirements.in +diffusers[torch]==0.29.2 + # via -r benchmarks/diffusion/requirements.in dill==0.3.8 # via # datasets @@ -96,6 +104,7 @@ fbgemm-gpu==0.7.0+cu121 filelock==3.15.4 # via # datasets + # diffusers # gdown # huggingface-hub # torch @@ -145,12 +154,15 @@ gym==0.26.2 gym-notices==0.0.8 # via gym hjson==3.1.0 - # via deepspeed -huggingface-hub==0.23.5 + # via + # argklass + # deepspeed +huggingface-hub==0.24.0 # via # -r benchmarks/timm/requirements.in # accelerate # datasets + # diffusers # evaluate # tokenizers # transformers @@ -159,9 +171,12 @@ idna==3.7 # requests # yarl importlib-metadata==8.0.0 - # via torchx + # via + # diffusers + # torchx importlib-resources==6.4.0 # via + # argklass # etils # torchcompat itsdangerous==2.2.0 @@ -255,6 +270,7 @@ numpy==1.26.4 # chex # datasets # deepspeed + # diffusers # dm-env # evaluate # fairscale @@ -346,7 +362,7 @@ optax==0.2.3 # via # brax # flax -orbax-checkpoint==0.5.21 +orbax-checkpoint==0.5.22 # via # brax # flax @@ -370,6 +386,7 @@ pandas==2.2.2 pillow==10.4.0 # via # brax + # diffusers # torchvision protobuf==4.25.3 # via @@ -390,9 +407,9 @@ pyarrow==17.0.0 # via datasets pyarrow-hotfix==0.6 # via datasets -pydantic==2.7.4 +pydantic==2.8.2 # via deepspeed -pydantic-core==2.18.4 +pydantic-core==2.20.1 # via pydantic pydot==3.0.1 # via -r benchmarks/dlrm/requirements.in @@ -429,10 +446,13 @@ pyyaml==6.0.1 reactivex==4.0.4 # via giving regex==2024.5.15 - # via transformers + # via + # diffusers + # transformers requests[socks]==2.32.3 # via # datasets + # diffusers # docker # evaluate # gdown @@ -447,6 +467,7 @@ safetensors==0.4.3 # via # -r benchmarks/timm/requirements.in # accelerate + # diffusers # transformers scikit-learn==1.5.1 # via -r benchmarks/dlrm/requirements.in @@ -469,7 +490,7 @@ six==1.16.0 # tensorboard soupsieve==2.5 # via beautifulsoup4 -sympy==1.13.0 +sympy==1.13.1 # via torch tabulate==0.9.0 # via torchx @@ -506,6 +527,7 @@ torch==2.3.1+cu121 # -r benchmarks/torchvision_ddp/requirements.in # accelerate # deepspeed + # diffusers # fairscale # torchaudio # torchmetrics @@ -526,6 +548,7 @@ torchrec==0.7.0+cu121 torchvision==0.18.1+cu121 # via # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/diffusion/requirements.in # -r benchmarks/flops/requirements.in # -r benchmarks/stargan/requirements.in # -r benchmarks/super-slomo/requirements.in @@ -538,6 +561,7 @@ torchx==0.7.0 # via -r benchmarks/dlrm/requirements.in tqdm==4.66.4 # via + # -r benchmarks/diffusion/requirements.in # -r benchmarks/dlrm/requirements.in # -r benchmarks/flops/requirements.in # -r benchmarks/super-slomo/requirements.in @@ -553,6 +577,7 @@ tqdm==4.66.4 transformers==4.42.4 # via # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/diffusion/requirements.in # -r benchmarks/huggingface/requirements.in # -r benchmarks/llama/requirements.in trimesh==4.4.3 @@ -592,6 +617,7 @@ voir==0.2.16 # -c .pin/../constraints/cuda.txt # -r benchmarks/accelerate_opt/requirements.in # -r benchmarks/brax/requirements.in + # -r benchmarks/diffusion/requirements.in # -r benchmarks/dlrm/requirements.in # -r benchmarks/flops/requirements.in # -r benchmarks/huggingface/requirements.in diff --git a/benchmarks/accelerate_opt/requirements.cuda.txt b/benchmarks/accelerate_opt/requirements.cuda.txt index cc88ed752..7c629e53d 100644 --- a/benchmarks/accelerate_opt/requirements.cuda.txt +++ b/benchmarks/accelerate_opt/requirements.cuda.txt @@ -42,7 +42,7 @@ attrs==23.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp -certifi==2024.6.2 +certifi==2024.7.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests @@ -106,7 +106,7 @@ hjson==3.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed -huggingface-hub==0.23.5 +huggingface-hub==0.24.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # accelerate @@ -272,11 +272,11 @@ pyarrow-hotfix==0.6 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets -pydantic==2.7.4 +pydantic==2.8.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed -pydantic-core==2.18.4 +pydantic-core==2.20.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pydantic @@ -334,7 +334,7 @@ six==1.16.0 # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens # python-dateutil -sympy==1.13.0 +sympy==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch diff --git a/benchmarks/brax/requirements.cuda.txt b/benchmarks/brax/requirements.cuda.txt index 005bd477e..2a424416a 100644 --- a/benchmarks/brax/requirements.cuda.txt +++ b/benchmarks/brax/requirements.cuda.txt @@ -314,7 +314,7 @@ optax==0.2.3 # -c .pin/../.pin/constraints-cuda-torch.txt # brax # flax -orbax-checkpoint==0.5.21 +orbax-checkpoint==0.5.22 # via # -c .pin/../.pin/constraints-cuda-torch.txt # brax @@ -389,7 +389,7 @@ six==1.16.0 # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens # ml-collections -sympy==1.13.0 +sympy==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch diff --git a/benchmarks/diffusion/requirements.cuda.txt b/benchmarks/diffusion/requirements.cuda.txt new file mode 100644 index 000000000..16d5d651a --- /dev/null +++ b/benchmarks/diffusion/requirements.cuda.txt @@ -0,0 +1,383 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --output-file=benchmarks/diffusion/requirements.cuda.txt .pin/tmp-constraints-cuda-diffusion-gpus.txt benchmarks/diffusion/requirements.in +# +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/cu121 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com + +accelerate==0.32.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/diffusion/requirements.in + # diffusers +aiohttp==3.9.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # datasets + # fsspec +aiosignal==1.3.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # aiohttp +antlr4-python3-runtime==4.9.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # omegaconf +argklass==1.4.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/diffusion/requirements.in +asttokens==2.4.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # giving +async-timeout==4.0.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # aiohttp +attrs==23.2.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # aiohttp +certifi==2024.7.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # requests +charset-normalizer==3.3.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # requests +codefind==0.1.6 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # ptera +datasets==2.20.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/diffusion/requirements.in +diffusers[torch]==0.29.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/diffusion/requirements.in +dill==0.3.8 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # datasets + # multiprocess +executing==1.2.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # varname +filelock==3.15.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # datasets + # diffusers + # huggingface-hub + # torch + # transformers + # triton +frozenlist==1.4.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # aiohttp + # aiosignal +fsspec[http]==2024.5.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # datasets + # huggingface-hub + # torch +giving==0.4.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # ptera + # voir +hjson==3.1.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # argklass +huggingface-hub==0.24.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # accelerate + # datasets + # diffusers + # tokenizers + # transformers +idna==3.7 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # requests + # yarl +importlib-metadata==8.0.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # diffusers +importlib-resources==6.4.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # argklass +jinja2==3.1.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +markdown-it-py==3.0.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # rich +markupsafe==2.1.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jinja2 +mdurl==0.1.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # markdown-it-py +mpmath==1.3.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # sympy +multidict==6.0.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # aiohttp + # yarl +multiprocess==0.70.16 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # datasets +networkx==3.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +numpy==1.26.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # accelerate + # datasets + # diffusers + # pandas + # pyarrow + # torchvision + # transformers +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.82 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +omegaconf==2.3.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir +ovld==0.3.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir +packaging==24.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # accelerate + # datasets + # huggingface-hub + # transformers +pandas==2.2.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # datasets +pillow==10.4.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # diffusers + # torchvision +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # accelerate + # voir +ptera==1.4.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir +pyarrow==17.0.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # datasets +pyarrow-hotfix==0.6 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # datasets +pygments==2.18.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # rich +pynvml==11.5.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir +python-dateutil==2.9.0.post0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # pandas +pytz==2024.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # pandas +pyyaml==6.0.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # accelerate + # datasets + # huggingface-hub + # omegaconf + # transformers +reactivex==4.0.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # giving +regex==2024.5.15 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # diffusers + # transformers +requests==2.32.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # datasets + # diffusers + # huggingface-hub + # transformers +rich==13.7.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir +safetensors==0.4.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # accelerate + # diffusers + # transformers +six==1.16.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # asttokens + # python-dateutil +sympy==1.13.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +tokenizers==0.19.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # transformers +torch==2.3.1+cu121 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # accelerate + # diffusers + # torchvision +torchvision==0.18.1+cu121 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/diffusion/requirements.in +tqdm==4.66.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/diffusion/requirements.in + # datasets + # huggingface-hub + # transformers +transformers==4.42.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/diffusion/requirements.in +triton==2.3.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +typing-extensions==4.12.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # huggingface-hub + # reactivex + # torch +tzdata==2024.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # pandas +urllib3==1.26.19 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # requests +varname==0.10.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # giving +voir==0.2.16 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -c .pin/../constraints/cuda.txt + # -r benchmarks/diffusion/requirements.in +xxhash==3.4.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # datasets +yarl==1.9.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # aiohttp +zipp==3.19.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # importlib-metadata diff --git a/benchmarks/diffusion/requirements.in b/benchmarks/diffusion/requirements.in index 9f27eae35..4a7a2c824 100644 --- a/benchmarks/diffusion/requirements.in +++ b/benchmarks/diffusion/requirements.in @@ -1,8 +1,9 @@ voir>=0.2.9,<0.3 +diffusers diffusers[torch] accelerate datasets tqdm torchvision argklass -transformers \ No newline at end of file +transformers diff --git a/benchmarks/dlrm/requirements.cuda.txt b/benchmarks/dlrm/requirements.cuda.txt index d26916eb3..eb3bf343d 100644 --- a/benchmarks/dlrm/requirements.cuda.txt +++ b/benchmarks/dlrm/requirements.cuda.txt @@ -21,7 +21,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -certifi==2024.6.2 +certifi==2024.7.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests @@ -268,7 +268,7 @@ six==1.16.0 # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens # tensorboard -sympy==1.13.0 +sympy==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch diff --git a/benchmarks/flops/requirements.cuda.txt b/benchmarks/flops/requirements.cuda.txt index f737a09ec..91aee8272 100644 --- a/benchmarks/flops/requirements.cuda.txt +++ b/benchmarks/flops/requirements.cuda.txt @@ -167,7 +167,7 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens -sympy==1.13.0 +sympy==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch diff --git a/benchmarks/huggingface/requirements.cuda.txt b/benchmarks/huggingface/requirements.cuda.txt index b9832fd4b..40ab23769 100644 --- a/benchmarks/huggingface/requirements.cuda.txt +++ b/benchmarks/huggingface/requirements.cuda.txt @@ -17,7 +17,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -certifi==2024.6.2 +certifi==2024.7.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests @@ -50,7 +50,7 @@ giving==0.4.2 # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.23.5 +huggingface-hub==0.24.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tokenizers @@ -199,7 +199,7 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens -sympy==1.13.0 +sympy==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch diff --git a/benchmarks/llama/requirements.cuda.txt b/benchmarks/llama/requirements.cuda.txt index 8c0edf268..bd0e84db2 100644 --- a/benchmarks/llama/requirements.cuda.txt +++ b/benchmarks/llama/requirements.cuda.txt @@ -34,7 +34,7 @@ attrs==23.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp -certifi==2024.6.2 +certifi==2024.7.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests @@ -91,7 +91,7 @@ giving==0.4.2 # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.23.5 +huggingface-hub==0.24.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -284,7 +284,7 @@ six==1.16.0 # asttokens # fire # python-dateutil -sympy==1.13.0 +sympy==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch diff --git a/benchmarks/stargan/requirements.cuda.txt b/benchmarks/stargan/requirements.cuda.txt index 7483e2b0b..9cb7eb695 100644 --- a/benchmarks/stargan/requirements.cuda.txt +++ b/benchmarks/stargan/requirements.cuda.txt @@ -21,7 +21,7 @@ beautifulsoup4==4.12.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # gdown -certifi==2024.6.2 +certifi==2024.7.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests @@ -197,7 +197,7 @@ soupsieve==2.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # beautifulsoup4 -sympy==1.13.0 +sympy==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch diff --git a/benchmarks/super-slomo/requirements.cuda.txt b/benchmarks/super-slomo/requirements.cuda.txt index c4ad9f952..0cc1b2000 100644 --- a/benchmarks/super-slomo/requirements.cuda.txt +++ b/benchmarks/super-slomo/requirements.cuda.txt @@ -169,7 +169,7 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens -sympy==1.13.0 +sympy==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch diff --git a/benchmarks/timm/requirements.cuda.txt b/benchmarks/timm/requirements.cuda.txt index cff9514c1..6d10fab4e 100644 --- a/benchmarks/timm/requirements.cuda.txt +++ b/benchmarks/timm/requirements.cuda.txt @@ -17,7 +17,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -certifi==2024.6.2 +certifi==2024.7.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests @@ -49,7 +49,7 @@ giving==0.4.2 # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.23.5 +huggingface-hub==0.24.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/timm/requirements.in @@ -195,7 +195,7 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens -sympy==1.13.0 +sympy==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch diff --git a/benchmarks/torchvision/requirements.cuda.txt b/benchmarks/torchvision/requirements.cuda.txt index ed2fd79bc..ea33e86f2 100644 --- a/benchmarks/torchvision/requirements.cuda.txt +++ b/benchmarks/torchvision/requirements.cuda.txt @@ -167,7 +167,7 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens -sympy==1.13.0 +sympy==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch diff --git a/benchmarks/torchvision_ddp/requirements.cuda.txt b/benchmarks/torchvision_ddp/requirements.cuda.txt index eb62b0d97..68bacfebb 100644 --- a/benchmarks/torchvision_ddp/requirements.cuda.txt +++ b/benchmarks/torchvision_ddp/requirements.cuda.txt @@ -167,7 +167,7 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens -sympy==1.13.0 +sympy==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch diff --git a/milabench/_version.py b/milabench/_version.py index 37d5b140f..214ff6852 100644 --- a/milabench/_version.py +++ b/milabench/_version.py @@ -1,5 +1,5 @@ """This file is generated, do not modify""" -__tag__ = "v0.1.0-28-g4a1f5055" -__commit__ = "4a1f5055e66cb406b0700e8169ef5dd236f338f9" -__date__ = "2024-07-18 14:56:52 -0400" +__tag__ = "v0.1.0-31-gb8360e4e" +__commit__ = "b8360e4e2b93bdd30f7a4db1b14ff5f0006515f7" +__date__ = "2024-07-19 14:12:04 -0400" diff --git a/scripts/article/run_cuda_dev.sh b/scripts/article/run_cuda_dev.sh index 51450d132..1c3d478ea 100644 --- a/scripts/article/run_cuda_dev.sh +++ b/scripts/article/run_cuda_dev.sh @@ -16,6 +16,11 @@ export MILABENCH_CONFIG="$MILABENCH_WORDIR/milabench/config/standard.yaml" export MILABENCH_VENV="$MILABENCH_WORDIR/env" export BENCHMARK_VENV="$MILABENCH_WORDIR/results/venv/torch" + +if [ -z "${MILABENCH_PREPARE}" ]; then + export MILABENCH_PREPARE=0 +fi + if [ -z "${MILABENCH_SOURCE}" ]; then export MILABENCH_CONFIG="$MILABENCH_WORDIR/milabench/config/standard.yaml" else @@ -46,6 +51,8 @@ install_prepare() { . $MILABENCH_WORDIR/env/bin/activate pip install -e $MILABENCH_SOURCE + milabench pin --variant cuda "$@" + # # Install milabench's benchmarks in their venv # @@ -82,12 +89,16 @@ else . $MILABENCH_WORDIR/env/bin/activate fi -cd $MILABENCH_WORDIR -# -# Run the benchmakrs -milabench run "$@" -# -# Display report -milabench report --runs $MILABENCH_WORDIR/results/runs +if [ "$MILABENCH_PREPARE" -eq 0 ]; then + cd $MILABENCH_WORDIR + + # + # Run the benchmakrs + milabench run "$@" + + # + # Display report + milabench report --runs $MILABENCH_WORDIR/results/runs +fi \ No newline at end of file