From 77f56a8824d620b3cb09abb451d5afc5864ef4ac Mon Sep 17 00:00:00 2001 From: "pierre.delaunay" Date: Thu, 12 Sep 2024 21:16:18 -0400 Subject: [PATCH] Update pings --- .github/workflows/tests_unit.yml | 28 +- .pin/constraints-cuda-torch.txt | 202 +++++++- benchmarks/brax/requirements.cuda.txt | 6 +- benchmarks/diffusion/requirements.cuda.txt | 10 +- benchmarks/dinov2/requirements.cuda.txt | 6 +- benchmarks/flops/requirements.cuda.txt | 6 +- benchmarks/geo_gnn/requirements-pre.cuda.txt | 115 +++-- benchmarks/geo_gnn/requirements.cuda.txt | 234 +++++---- benchmarks/huggingface/requirements.cuda.txt | 10 +- benchmarks/lightning/requirements.cuda.txt | 6 +- benchmarks/llama/requirements.cuda.txt | 10 +- benchmarks/llava/requirements.cuda.txt | 10 +- benchmarks/llm/requirements.cuda.txt | 10 +- benchmarks/purejaxrl/requirements.cuda.txt | 488 +++++++++++++----- benchmarks/recursiongfn/requirements.cuda.txt | 10 +- benchmarks/rlhf/requirements.cuda.txt | 10 +- benchmarks/timm/requirements.cuda.txt | 10 +- benchmarks/torchatari/requirements.cuda.txt | 6 +- benchmarks/torchvision/requirements.cuda.txt | 6 +- .../torchvision_ddp/requirements.cuda.txt | 7 +- benchmarks/vjepa/requirements.cuda.txt | 10 +- milabench/_version.py | 6 +- milabench/report.py | 8 +- scripts/article/run_cuda.sh | 4 +- tests/test_mock.py | 2 + 25 files changed, 878 insertions(+), 342 deletions(-) diff --git a/.github/workflows/tests_unit.yml b/.github/workflows/tests_unit.yml index 8be9f5e87..4ddcc8471 100644 --- a/.github/workflows/tests_unit.yml +++ b/.github/workflows/tests_unit.yml @@ -16,6 +16,8 @@ on: # Allow manual triggers workflow_dispatch: +env: + PIP_CACHE_DIR: /home/runner/work/milabench/milabench/cache jobs: tests: @@ -27,6 +29,15 @@ jobs: cancel-in-progress: true steps: + - uses: easimon/maximize-build-space@master + with: + remove-dotnet: 'true' + remove-codeql: 'true' + remove-haskell: 'true' + remove-android: 'true' + build-mount-path: /home/runner/work/milabench/ + temp-reserve-mb: 10000 + - uses: actions/checkout@v3 - uses: actions/setup-python@v5 @@ -35,24 +46,23 @@ jobs: - name: dependencies run: | + cd /home/runner/work/milabench/milabench + pip install virtualenv + virtualenv ./env + source ./env/bin/activate + # pip install -U pip pip install poetry - poetry env use python3.10 - source $(poetry env info -p)/bin/activate # # poetry doesnot work when installing those !? # pip install antlr4-python3-runtime==4.9.3 pip install -e . pip install -e benchmate - # - # - # - poetry install --with dev - name: Simple Template run: | - source $(poetry env info -p)/bin/activate + source ./env/bin/activate milabench new --name simplebench --template simple cd benchmarks/simplebench make tests @@ -61,7 +71,7 @@ jobs: - name: Voir Template run: | - source $(poetry env info -p)/bin/activate + source ./env/bin/activate milabench new --name voirbench --template voir cd benchmarks/voirbench make tests @@ -76,7 +86,7 @@ jobs: env: HUGGING_FACE_TOKEN: ${{ secrets.HUGGING_FACE_TOKEN}} run: | - source $(poetry env info -p)/bin/activate + source ./env/bin/activate coverage run --source=milabench -m pytest --ignore=tests/integration tests/ -vv -x coverage report -m coverage xml diff --git a/.pin/constraints-cuda-torch.txt b/.pin/constraints-cuda-torch.txt index 8ca40820b..71dfbab94 100644 --- a/.pin/constraints-cuda-torch.txt +++ b/.pin/constraints-cuda-torch.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile with Python 3.10 # by the following command: # -# pip-compile --output-file=.pin/constraints-cuda-torch.txt .pin/tmp-constraints.txt benchmarks/brax/requirements.in benchmarks/diffusion/requirements.in benchmarks/dinov2/requirements.in benchmarks/flops/requirements.in benchmarks/huggingface/requirements.in benchmarks/lightning/requirements.in benchmarks/llama/requirements.in benchmarks/llava/requirements.in benchmarks/llm/requirements.in benchmarks/recursiongfn/requirements.in benchmarks/rlhf/requirements.in benchmarks/timm/requirements.in benchmarks/torchatari/requirements.in benchmarks/torchvision/requirements.in benchmarks/vjepa/requirements.in constraints/extra/torch.cuda.txt +# pip-compile --output-file=.pin/constraints-cuda-torch.txt .pin/tmp-constraints.txt benchmarks/brax/requirements.in benchmarks/diffusion/requirements.in benchmarks/dinov2/requirements.in benchmarks/flops/requirements.in benchmarks/geo_gnn/requirements-pre.in benchmarks/geo_gnn/requirements.in benchmarks/huggingface/requirements.in benchmarks/lightning/requirements.in benchmarks/llama/requirements.in benchmarks/llava/requirements.in benchmarks/llm/requirements.in benchmarks/purejaxrl/requirements.in benchmarks/recursiongfn/requirements.in benchmarks/rlhf/requirements.in benchmarks/timm/requirements.in benchmarks/torchatari/requirements.in benchmarks/torchvision/requirements.in benchmarks/torchvision_ddp/requirements.in benchmarks/vjepa/requirements.in constraints/extra/torch.cuda.txt # --extra-index-url https://pypi.ngc.nvidia.com --extra-index-url https://download.pytorch.org/whl/cu121 @@ -14,13 +14,16 @@ absl-py==2.1.0 # via # brax # chex + # distrax # dm-env # ml-collections # mujoco # mujoco-mjx # optax # orbax-checkpoint + # rlax # tensorboard + # tensorflow-probability accelerate==0.34.2 # via # -r benchmarks/diffusion/requirements.in @@ -46,6 +49,9 @@ argklass==1.4.4 # via # -r benchmarks/diffusion/requirements.in # -r benchmarks/llm/requirements.in + # -r benchmarks/purejaxrl/requirements.in +astroid==3.2.4 + # via pylint asttokens==2.4.1 # via giving async-timeout==4.0.3 @@ -54,6 +60,8 @@ attrs==24.2.0 # via aiohttp beartype==0.18.5 # via -r benchmarks/vjepa/requirements.in +black==24.8.0 + # via navix blinker==1.8.2 # via flask blobfile==3.0.0 @@ -69,7 +77,9 @@ braceexpand==0.1.7 # -r benchmarks/vjepa/requirements.in # webdataset brax==0.10.5 - # via -r benchmarks/brax/requirements.in + # via + # -r benchmarks/brax/requirements.in + # -r benchmarks/purejaxrl/requirements.in cantilever==0.1.0 # via -r benchmarks/torchatari/requirements.in certifi==2024.8.30 @@ -79,9 +89,16 @@ certifi==2024.8.30 charset-normalizer==3.3.2 # via requests chex==0.1.86 - # via optax + # via + # distrax + # evosax + # flashbax + # gymnax + # optax + # rlax click==8.1.7 # via + # black # flask # wandb cloudpickle==3.0.0 @@ -89,12 +106,17 @@ cloudpickle==3.0.0 # gym # gymnasium # submitit + # tensorflow-probability codefind==0.1.7 # via ptera contextlib2==21.6.0 # via ml-collections +contourpy==1.3.0 + # via matplotlib cvxopt==1.3.2 # via -r benchmarks/recursiongfn/requirements.in +cycler==0.12.1 + # via matplotlib datasets==3.0.0 # via # -r benchmarks/diffusion/requirements.in @@ -103,6 +125,8 @@ datasets==3.0.0 # -r benchmarks/rlhf/requirements.in # torchtune # trl +decorator==5.1.1 + # via tensorflow-probability decord==0.6.0 # via -r benchmarks/vjepa/requirements.in diffusers[torch]==0.30.2 @@ -111,16 +135,26 @@ dill==0.3.8 # via # datasets # multiprocess + # pylint +distrax==0.1.5 + # via + # -r benchmarks/purejaxrl/requirements.in + # rlax dm-env==1.6 # via # brax # envpool + # rlax dm-tree==0.1.8 - # via dm-env + # via + # dm-env + # tensorflow-probability docker-pycreds==0.4.0 # via wandb docstring-parser==0.16 # via tyro +dotmap==1.3.30 + # via evosax einops==0.8.0 # via -r benchmarks/vjepa/requirements.in envpool==0.8.4 @@ -132,7 +166,11 @@ etils[epath,epy]==1.9.4 # mujoco-mjx # optax # orbax-checkpoint -executing==1.2.0 +evosax==0.1.6 + # via -r benchmarks/purejaxrl/requirements.in +exceptiongroup==1.2.2 + # via pytest +executing==2.1.0 # via varname fairscale==0.4.13 # via @@ -154,6 +192,10 @@ fire==0.6.0 # via # -r benchmarks/llama/requirements.in # -r benchmarks/llm/requirements.txt +flake8==7.1.1 + # via navix +flashbax==0.1.2 + # via -r benchmarks/purejaxrl/requirements.in flask==3.0.3 # via # brax @@ -161,7 +203,15 @@ flask==3.0.3 flask-cors==5.0.0 # via brax flax==0.9.0 - # via brax + # via + # -r benchmarks/purejaxrl/requirements.in + # brax + # evosax + # flashbax + # gymnax + # navix +fonttools==4.53.1 + # via matplotlib frozenlist==1.4.1 # via # aiohttp @@ -177,13 +227,15 @@ fsspec[http]==2024.6.1 # torch-geometric fvcore==0.1.5.post20221221 # via -r benchmarks/dinov2/requirements.in +gast==0.6.0 + # via tensorflow-probability gitdb==4.0.11 # via gitpython gitpython==3.1.43 # via # -r benchmarks/recursiongfn/requirements.in # wandb -giving==0.4.2 +giving==0.4.3 # via # ptera # voir @@ -206,9 +258,11 @@ gym-notices==0.0.8 # via gym gymnasium==0.29.1 # via envpool +gymnax==0.0.5 + # via -r benchmarks/purejaxrl/requirements.in hjson==3.1.0 # via argklass -huggingface-hub==0.24.6 +huggingface-hub==0.24.7 # via # -r benchmarks/timm/requirements.in # accelerate @@ -232,23 +286,33 @@ importlib-resources==6.4.5 # cantilever # etils # torchcompat +iniconfig==2.0.0 + # via pytest iopath==0.1.10 # via # -r benchmarks/dinov2/requirements.in # fvcore +isort==5.13.2 + # via pylint itsdangerous==2.2.0 # via flask jax[cuda12]==0.4.31 # via # -r benchmarks/brax/requirements.in + # -r benchmarks/purejaxrl/requirements.in # -r constraints/extra/torch.cuda.txt # brax # chex + # distrax + # evosax + # flashbax # flax + # gymnax # jaxopt # mujoco-mjx # optax # orbax-checkpoint + # rlax jax-cuda12-pjrt==0.4.31 # via jax-cuda12-plugin jax-cuda12-plugin[with-cuda]==0.4.31 @@ -257,11 +321,16 @@ jaxlib==0.4.31 # via # brax # chex + # distrax + # evosax + # flashbax + # gymnax # jax # jaxopt # mujoco-mjx # optax # orbax-checkpoint + # rlax jaxopt==0.8.3 # via brax jaxtyping==0.2.34 @@ -274,6 +343,8 @@ jinja2==3.1.4 # torch-geometric joblib==1.4.2 # via scikit-learn +kiwisolver==1.4.7 + # via matplotlib lightning==2.4.0 # via -r benchmarks/lightning/requirements.in lightning-utilities==0.11.7 @@ -295,6 +366,12 @@ markupsafe==2.1.5 # via # jinja2 # werkzeug +matplotlib==3.9.2 + # via evosax +mccabe==0.7.0 + # via + # flake8 + # pylint mdurl==0.1.2 # via markdown-it-py ml-collections==0.1.1 @@ -328,6 +405,10 @@ multipledispatch==1.0.0 # via botorch multiprocess==0.70.16 # via datasets +mypy-extensions==1.0.0 + # via black +navix==0.7.0 + # via -r benchmarks/purejaxrl/requirements.in ndindex==1.8 # via blosc2 nest-asyncio==1.6.0 @@ -342,7 +423,9 @@ numexpr==2.10.1 # tables numpy==1.26.4 # via + # -r benchmarks/geo_gnn/requirements.in # -r benchmarks/llava/requirements.in + # -r benchmarks/purejaxrl/requirements.in # -r benchmarks/torchatari/requirements.in # -r benchmarks/vjepa/requirements.in # accelerate @@ -350,20 +433,26 @@ numpy==1.26.4 # botorch # brax # chex + # contourpy # datasets # decord # diffusers + # distrax # dm-env # envpool + # evosax # fairscale + # flashbax # fvcore # gym # gymnasium # jax # jaxlib # jaxopt + # matplotlib # ml-dtypes # mujoco + # navix # numexpr # opencv-python # opt-einsum @@ -373,11 +462,13 @@ numpy==1.26.4 # pyarrow # pyro-ppl # rdkit + # rlax # scikit-learn # scipy # tables # tensorboard # tensorboardx + # tensorflow-probability # tensorstore # torch-geometric # torchmetrics @@ -452,6 +543,7 @@ opt-einsum==3.3.0 # pyro-ppl optax==0.2.3 # via + # -r benchmarks/purejaxrl/requirements.in # brax # flax optree==0.12.1 @@ -465,12 +557,16 @@ ovld==0.3.9 packaging==24.1 # via # accelerate + # black # datasets # envpool # huggingface-hub # lightning # lightning-utilities + # matplotlib + # pytest # pytorch-lightning + # setuptools-scm # tables # tensorboard # tensorboardx @@ -478,9 +574,12 @@ packaging==24.1 # transformers pandas==2.2.2 # via + # -r benchmarks/geo_gnn/requirements.in # -r benchmarks/recursiongfn/requirements.in # -r benchmarks/vjepa/requirements.in # datasets +pathspec==0.12.1 + # via black pillow==10.4.0 # via # -r benchmarks/huggingface/requirements.in @@ -488,10 +587,17 @@ pillow==10.4.0 # brax # diffusers # fvcore + # matplotlib + # navix # rdkit # torchvision platformdirs==4.3.2 - # via wandb + # via + # black + # pylint + # wandb +pluggy==1.5.0 + # via pytest portalocker==2.10.1 # via iopath protobuf==5.28.1 @@ -516,22 +622,34 @@ pyarrow==17.0.0 # via # -r benchmarks/recursiongfn/requirements.in # datasets +pycodestyle==2.12.1 + # via flake8 pycryptodomex==3.20.0 # via blobfile +pyflakes==3.2.0 + # via flake8 pygments==2.18.0 # via rich +pylint==3.2.7 + # via navix pyopengl==3.1.7 # via mujoco pyparsing==3.1.4 - # via torch-geometric + # via + # matplotlib + # torch-geometric pyro-api==0.1.2 # via pyro-ppl pyro-ppl==1.9.1 # via # -r benchmarks/recursiongfn/requirements.in # botorch +pytest==8.3.3 + # via navix python-dateutil==2.9.0.post0 - # via pandas + # via + # matplotlib + # pandas pytinyrenderer==0.0.14 # via brax pytorch-lightning==2.4.0 @@ -545,8 +663,10 @@ pyyaml==6.0.2 # -r benchmarks/vjepa/requirements.in # accelerate # datasets + # evosax # flax # fvcore + # gymnax # huggingface-hub # lightning # ml-collections @@ -559,7 +679,9 @@ pyyaml==6.0.2 # webdataset # yacs rdkit==2024.3.5 - # via -r benchmarks/recursiongfn/requirements.in + # via + # -r benchmarks/geo_gnn/requirements.in + # -r benchmarks/recursiongfn/requirements.in reactivex==4.0.4 # via giving regex==2024.9.11 @@ -581,6 +703,8 @@ rich==13.8.1 # flax # tyro # voir +rlax==0.1.6 + # via navix safetensors==0.4.5 # via # -r benchmarks/timm/requirements.in @@ -617,6 +741,8 @@ sentry-sdk==2.14.0 # via wandb setproctitle==1.3.3 # via wandb +setuptools-scm==8.1.0 + # via navix shtab==1.7.1 # via tyro six==1.16.0 @@ -627,6 +753,7 @@ six==1.16.0 # ml-collections # python-dateutil # tensorboard + # tensorflow-probability smmap==5.0.1 # via gitdb submitit==1.5.1 @@ -647,8 +774,11 @@ tensorboard-data-server==0.7.2 # via tensorboard tensorboardx==2.6.2.2 # via brax +tensorflow-probability==0.24.0 + # via distrax tensorstore==0.1.65 # via + # flashbax # flax # orbax-checkpoint termcolor==2.4.0 @@ -663,6 +793,14 @@ timm==1.0.9 # via -r benchmarks/vjepa/requirements.in tokenizers==0.19.1 # via transformers +tomli==2.0.1 + # via + # black + # pylint + # pytest + # setuptools-scm +tomlkit==0.13.2 + # via pylint toolz==0.12.1 # via chex torch==2.4.0+cu121 @@ -670,17 +808,20 @@ torch==2.4.0+cu121 # -r benchmarks/brax/requirements.in # -r benchmarks/dinov2/requirements.in # -r benchmarks/flops/requirements.in + # -r benchmarks/geo_gnn/requirements-pre.in # -r benchmarks/huggingface/requirements.in # -r benchmarks/lightning/requirements.in # -r benchmarks/llama/requirements.in # -r benchmarks/llava/requirements.in # -r benchmarks/llm/requirements.in # -r benchmarks/llm/requirements.txt + # -r benchmarks/purejaxrl/requirements.in # -r benchmarks/recursiongfn/requirements.in # -r benchmarks/rlhf/requirements.in # -r benchmarks/timm/requirements.in # -r benchmarks/torchatari/requirements.in # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in # -r benchmarks/vjepa/requirements.in # accelerate # botorch @@ -696,13 +837,21 @@ torch==2.4.0+cu121 # trl # xformers torch-cluster==1.6.3+pt24cu121 - # via -r benchmarks/recursiongfn/requirements.in + # via + # -r benchmarks/geo_gnn/requirements.in + # -r benchmarks/recursiongfn/requirements.in torch-geometric==2.5.3 - # via -r benchmarks/recursiongfn/requirements.in + # via + # -r benchmarks/geo_gnn/requirements.in + # -r benchmarks/recursiongfn/requirements.in torch-scatter==2.1.2+pt24cu121 - # via -r benchmarks/recursiongfn/requirements.in + # via + # -r benchmarks/geo_gnn/requirements.in + # -r benchmarks/recursiongfn/requirements.in torch-sparse==0.6.18+pt24cu121 - # via -r benchmarks/recursiongfn/requirements.in + # via + # -r benchmarks/geo_gnn/requirements.in + # -r benchmarks/recursiongfn/requirements.in torchao==0.3.1+cu121 # via torchtune torchcompat==1.1.4 @@ -712,6 +861,7 @@ torchcompat==1.1.4 # -r benchmarks/lightning/requirements.in # -r benchmarks/torchatari/requirements.in # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in torchmetrics==1.4.1 # via # -r benchmarks/dinov2/requirements.in @@ -727,6 +877,7 @@ torchvision==0.19.0+cu121 # -r benchmarks/lightning/requirements.in # -r benchmarks/timm/requirements.in # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in # -r benchmarks/vjepa/requirements.in # timm tqdm==4.66.5 @@ -734,6 +885,7 @@ tqdm==4.66.5 # -r benchmarks/diffusion/requirements.in # -r benchmarks/flops/requirements.in # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in # datasets # fvcore # huggingface-hub @@ -769,10 +921,13 @@ types-protobuf==5.27.0.20240907 # via envpool typing-extensions==4.12.2 # via + # astroid + # black # brax # chex # envpool # etils + # flashbax # flax # gymnasium # huggingface-hub @@ -780,6 +935,7 @@ typing-extensions==4.12.2 # lightning # lightning-utilities # multidict + # navix # optree # orbax-checkpoint # pytorch-lightning @@ -791,15 +947,16 @@ typing-extensions==4.12.2 tyro==0.8.10 # via # -r benchmarks/torchatari/requirements.in + # navix # trl tzdata==2024.1 # via pandas -urllib3==2.2.2 +urllib3==2.2.3 # via # blobfile # requests # sentry-sdk -varname==0.10.0 +varname==0.13.3 # via giving voir==0.2.19 # via @@ -808,19 +965,24 @@ voir==0.2.19 # -r benchmarks/diffusion/requirements.in # -r benchmarks/dinov2/requirements.in # -r benchmarks/flops/requirements.in + # -r benchmarks/geo_gnn/requirements.in # -r benchmarks/huggingface/requirements.in # -r benchmarks/lightning/requirements.in # -r benchmarks/llama/requirements.in # -r benchmarks/llava/requirements.in # -r benchmarks/llm/requirements.in + # -r benchmarks/purejaxrl/requirements.in # -r benchmarks/recursiongfn/requirements.in # -r benchmarks/rlhf/requirements.in # -r benchmarks/timm/requirements.in # -r benchmarks/torchatari/requirements.in # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in # -r benchmarks/vjepa/requirements.in -wandb==0.17.9 - # via -r benchmarks/recursiongfn/requirements.in +wandb==0.18.0 + # via + # -r benchmarks/recursiongfn/requirements.in + # navix webdataset==0.2.100 # via -r benchmarks/vjepa/requirements.in werkzeug==3.0.4 diff --git a/benchmarks/brax/requirements.cuda.txt b/benchmarks/brax/requirements.cuda.txt index 4263af9fd..e6a1bf1a1 100644 --- a/benchmarks/brax/requirements.cuda.txt +++ b/benchmarks/brax/requirements.cuda.txt @@ -73,7 +73,7 @@ etils[epath,epy]==1.9.4 # mujoco-mjx # optax # orbax-checkpoint -executing==1.2.0 +executing==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname @@ -100,7 +100,7 @@ fsspec==2024.6.1 # -c .pin/../.pin/constraints-cuda-torch.txt # etils # torch -giving==0.4.2 +giving==0.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera @@ -436,7 +436,7 @@ typing-extensions==4.12.2 # orbax-checkpoint # reactivex # torch -varname==0.10.0 +varname==0.13.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving diff --git a/benchmarks/diffusion/requirements.cuda.txt b/benchmarks/diffusion/requirements.cuda.txt index 20bcb54f7..2efbb6508 100644 --- a/benchmarks/diffusion/requirements.cuda.txt +++ b/benchmarks/diffusion/requirements.cuda.txt @@ -73,7 +73,7 @@ dill==0.3.8 # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # multiprocess -executing==1.2.0 +executing==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname @@ -97,7 +97,7 @@ fsspec[http]==2024.6.1 # datasets # huggingface-hub # torch -giving==0.4.2 +giving==0.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera @@ -106,7 +106,7 @@ hjson==3.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # argklass -huggingface-hub==0.24.6 +huggingface-hub==0.24.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # accelerate @@ -408,11 +408,11 @@ tzdata==2024.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas -urllib3==2.2.2 +urllib3==2.2.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -varname==0.10.0 +varname==0.13.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving diff --git a/benchmarks/dinov2/requirements.cuda.txt b/benchmarks/dinov2/requirements.cuda.txt index 308ad4fe3..e76148ee7 100644 --- a/benchmarks/dinov2/requirements.cuda.txt +++ b/benchmarks/dinov2/requirements.cuda.txt @@ -26,7 +26,7 @@ codefind==0.1.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -executing==1.2.0 +executing==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname @@ -43,7 +43,7 @@ fvcore==0.1.5.post20221221 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/dinov2/requirements.in -giving==0.4.2 +giving==0.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera @@ -294,7 +294,7 @@ typing-extensions==4.12.2 # reactivex # submitit # torch -varname==0.10.0 +varname==0.13.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving diff --git a/benchmarks/flops/requirements.cuda.txt b/benchmarks/flops/requirements.cuda.txt index 9a890144e..0d16b53f9 100644 --- a/benchmarks/flops/requirements.cuda.txt +++ b/benchmarks/flops/requirements.cuda.txt @@ -22,7 +22,7 @@ codefind==0.1.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -executing==1.2.0 +executing==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname @@ -35,7 +35,7 @@ fsspec==2024.6.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -giving==0.4.2 +giving==0.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera @@ -249,7 +249,7 @@ typing-extensions==4.12.2 # -c .pin/../.pin/constraints-cuda-torch.txt # reactivex # torch -varname==0.10.0 +varname==0.13.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving diff --git a/benchmarks/geo_gnn/requirements-pre.cuda.txt b/benchmarks/geo_gnn/requirements-pre.cuda.txt index 396cdd441..2fbd359f6 100644 --- a/benchmarks/geo_gnn/requirements-pre.cuda.txt +++ b/benchmarks/geo_gnn/requirements-pre.cuda.txt @@ -2,104 +2,161 @@ # This file is autogenerated by pip-compile with Python 3.10 # by the following command: # -# pip-compile --output-file=benchmarks/geo_gnn/requirements-pre.cuda.txt .pin/tmp-constraints-cuda-geo_gnn.txt benchmarks/geo_gnn/requirements-pre.in +# pip-compile --output-file=benchmarks/geo_gnn/requirements-pre.cuda.txt .pin/tmp-constraints-cuda-dimenet.txt benchmarks/geo_gnn/requirements-pre.in # --extra-index-url https://pypi.ngc.nvidia.com --extra-index-url https://download.pytorch.org/whl/cu121 ---find-links https://data.pyg.org/whl/torch-2.3.0+cu121.html +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--find-links https://data.pyg.org/whl/torch-2.4.0+cu121.html --trusted-host pypi.ngc.nvidia.com filelock==3.16.0 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # torch # triton -fsspec==2024.9.0 +fsspec==2024.6.1 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # torch +jax[cuda12]==0.4.31 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r .pin/../constraints/extra/torch.cuda.txt +jax-cuda12-pjrt==0.4.31 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin +jax-cuda12-plugin[with-cuda]==0.4.31 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax +jaxlib==0.4.31 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax jinja2==3.1.4 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # torch markupsafe==2.1.5 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # jinja2 +ml-dtypes==0.4.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax + # jaxlib mpmath==1.3.0 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # sympy networkx==3.3 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # torch +numpy==1.26.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax + # jaxlib + # ml-dtypes + # opt-einsum + # scipy + # xformers nvidia-cublas-cu12==12.1.3.1 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin # nvidia-cudnn-cu12 # nvidia-cusolver-cu12 # torch nvidia-cuda-cupti-cu12==12.1.105 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin # torch +nvidia-cuda-nvcc-cu12==12.6.68 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin nvidia-cuda-nvrtc-cu12==12.1.105 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # torch nvidia-cuda-runtime-cu12==12.1.105 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin # torch -nvidia-cudnn-cu12==8.9.2.26 +nvidia-cudnn-cu12==9.1.0.70 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin # torch nvidia-cufft-cu12==11.0.2.54 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin # torch nvidia-curand-cu12==10.3.2.106 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # torch nvidia-cusolver-cu12==11.4.5.107 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin # torch nvidia-cusparse-cu12==12.1.0.106 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin # nvidia-cusolver-cu12 # torch nvidia-nccl-cu12==2.20.5 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin # torch nvidia-nvjitlink-cu12==12.6.68 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin # nvidia-cusolver-cu12 # nvidia-cusparse-cu12 nvidia-nvtx-cu12==12.1.105 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # torch +opt-einsum==3.3.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax +scipy==1.14.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax + # jaxlib sympy==1.13.2 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # torch -torch==2.3.1+cu121 +torch==2.4.0+cu121 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt - # -r .pin/../constraints/extra/gnn.cuda.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.in -triton==2.3.1 + # xformers +triton==3.0.0 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # torch typing-extensions==4.12.2 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # torch +xformers==0.0.27.post2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r .pin/../constraints/extra/torch.cuda.txt diff --git a/benchmarks/geo_gnn/requirements.cuda.txt b/benchmarks/geo_gnn/requirements.cuda.txt index ddad4a7b5..3fd1a2b0a 100644 --- a/benchmarks/geo_gnn/requirements.cuda.txt +++ b/benchmarks/geo_gnn/requirements.cuda.txt @@ -2,340 +2,400 @@ # This file is autogenerated by pip-compile with Python 3.10 # by the following command: # -# pip-compile --output-file=benchmarks/geo_gnn/requirements.cuda.txt .pin/tmp-constraints-cuda-geo_gnn.txt benchmarks/geo_gnn/requirements-pre.cuda.txt benchmarks/geo_gnn/requirements.in +# pip-compile --output-file=benchmarks/geo_gnn/requirements.cuda.txt .pin/tmp-constraints-cuda-dimenet.txt benchmarks/geo_gnn/requirements-pre.cuda.txt benchmarks/geo_gnn/requirements.in # --extra-index-url https://pypi.ngc.nvidia.com --extra-index-url https://download.pytorch.org/whl/cu121 ---find-links https://data.pyg.org/whl/torch-2.3.0+cu121.html +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--find-links https://data.pyg.org/whl/torch-2.4.0+cu121.html --trusted-host pypi.ngc.nvidia.com aiohappyeyeballs==2.4.0 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp aiohttp==3.10.5 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # torch-geometric aiosignal==1.3.1 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp antlr4-python3-runtime==4.9.3 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # omegaconf asttokens==2.4.1 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # giving async-timeout==4.0.3 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp attrs==24.2.0 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp certifi==2024.8.30 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # requests charset-normalizer==3.3.2 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # requests codefind==0.1.7 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -executing==1.2.0 +executing==2.1.0 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # varname filelock==3.16.0 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt # torch # triton frozenlist==1.4.1 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp # aiosignal -fsspec==2024.9.0 +fsspec==2024.6.1 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt # torch # torch-geometric -giving==0.4.2 +giving==0.4.3 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir idna==3.8 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # requests # yarl +jax[cuda12]==0.4.31 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r .pin/../constraints/extra/torch.cuda.txt + # -r benchmarks/geo_gnn/requirements-pre.cuda.txt +jax-cuda12-pjrt==0.4.31 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/geo_gnn/requirements-pre.cuda.txt + # jax-cuda12-plugin +jax-cuda12-plugin[with-cuda]==0.4.31 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/geo_gnn/requirements-pre.cuda.txt + # jax +jaxlib==0.4.31 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/geo_gnn/requirements-pre.cuda.txt + # jax jinja2==3.1.4 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt # torch # torch-geometric joblib==1.4.2 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # scikit-learn markdown-it-py==3.0.0 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # rich markupsafe==2.1.5 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt # jinja2 mdurl==0.1.2 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # markdown-it-py +ml-dtypes==0.4.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/geo_gnn/requirements-pre.cuda.txt + # jax + # jaxlib mpmath==1.3.0 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt # sympy multidict==6.1.0 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp # yarl networkx==3.3 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt # torch numpy==1.26.4 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/geo_gnn/requirements-pre.cuda.txt # -r benchmarks/geo_gnn/requirements.in + # jax + # jaxlib + # ml-dtypes + # opt-einsum # pandas # rdkit # scikit-learn # scipy # torch-geometric + # xformers nvidia-cublas-cu12==12.1.3.1 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt + # jax-cuda12-plugin # nvidia-cudnn-cu12 # nvidia-cusolver-cu12 # torch nvidia-cuda-cupti-cu12==12.1.105 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt + # jax-cuda12-plugin # torch +nvidia-cuda-nvcc-cu12==12.6.68 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/geo_gnn/requirements-pre.cuda.txt + # jax-cuda12-plugin nvidia-cuda-nvrtc-cu12==12.1.105 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt # torch nvidia-cuda-runtime-cu12==12.1.105 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt + # jax-cuda12-plugin # torch -nvidia-cudnn-cu12==8.9.2.26 +nvidia-cudnn-cu12==9.1.0.70 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt + # jax-cuda12-plugin # torch nvidia-cufft-cu12==11.0.2.54 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt + # jax-cuda12-plugin # torch nvidia-curand-cu12==10.3.2.106 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt # torch nvidia-cusolver-cu12==11.4.5.107 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt + # jax-cuda12-plugin # torch nvidia-cusparse-cu12==12.1.0.106 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt + # jax-cuda12-plugin # nvidia-cusolver-cu12 # torch nvidia-ml-py==12.560.30 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # voir nvidia-nccl-cu12==2.20.5 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt + # jax-cuda12-plugin # torch nvidia-nvjitlink-cu12==12.6.68 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt + # jax-cuda12-plugin # nvidia-cusolver-cu12 # nvidia-cusparse-cu12 nvidia-nvtx-cu12==12.1.105 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt # torch omegaconf==2.3.0 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # voir +opt-einsum==3.3.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/geo_gnn/requirements-pre.cuda.txt + # jax ovld==0.3.9 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # voir pandas==2.2.2 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements.in pillow==10.4.0 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # rdkit psutil==5.9.8 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # torch-geometric # voir ptera==1.4.1 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # voir pygments==2.18.0 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # rich pyparsing==3.1.4 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # torch-geometric python-dateutil==2.9.0.post0 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # pandas -pytz==2024.1 +pytz==2024.2 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # pandas pyyaml==6.0.2 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # omegaconf rdkit==2024.3.5 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements.in reactivex==4.0.4 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # giving requests==2.32.3 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # torch-geometric rich==13.8.1 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # voir -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # torch-geometric scipy==1.14.1 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/geo_gnn/requirements-pre.cuda.txt + # jax + # jaxlib # scikit-learn # torch-cluster # torch-geometric # torch-sparse six==1.16.0 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens # python-dateutil sympy==1.13.2 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt # torch threadpoolctl==3.5.0 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # scikit-learn -torch==2.3.1+cu121 +torch==2.4.0+cu121 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt - # -r .pin/../constraints/extra/gnn.cuda.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt -torch-cluster==1.6.3+pt23cu121 + # xformers +torch-cluster==1.6.3+pt24cu121 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements.in torch-geometric==2.5.3 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements.in -torch-scatter==2.1.2+pt23cu121 +torch-scatter==2.1.2+pt24cu121 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements.in -torch-sparse==0.6.18+pt23cu121 +torch-sparse==0.6.18+pt24cu121 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements.in tqdm==4.66.5 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # torch-geometric -triton==2.3.1 +triton==3.0.0 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt # torch typing-extensions==4.12.2 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt # multidict # reactivex # torch tzdata==2024.1 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # pandas -urllib3==2.2.2 +urllib3==2.2.3 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # requests -varname==0.10.0 +varname==0.13.3 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # giving voir==0.2.19 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # -c .pin/../constraints/cuda.txt # -r benchmarks/geo_gnn/requirements.in +xformers==0.0.27.post2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r .pin/../constraints/extra/torch.cuda.txt + # -r benchmarks/geo_gnn/requirements-pre.cuda.txt yarl==1.11.1 # via - # -c .pin/../.pin/constraints-cuda-gnn.txt + # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp diff --git a/benchmarks/huggingface/requirements.cuda.txt b/benchmarks/huggingface/requirements.cuda.txt index 33c9b4d5d..a7f31485e 100644 --- a/benchmarks/huggingface/requirements.cuda.txt +++ b/benchmarks/huggingface/requirements.cuda.txt @@ -30,7 +30,7 @@ codefind==0.1.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -executing==1.2.0 +executing==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname @@ -46,12 +46,12 @@ fsspec==2024.6.1 # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # torch -giving==0.4.2 +giving==0.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.24.6 +huggingface-hub==0.24.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tokenizers @@ -285,11 +285,11 @@ typing-extensions==4.12.2 # huggingface-hub # reactivex # torch -urllib3==2.2.2 +urllib3==2.2.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -varname==0.10.0 +varname==0.13.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving diff --git a/benchmarks/lightning/requirements.cuda.txt b/benchmarks/lightning/requirements.cuda.txt index 6b4e94fbd..5cf924abf 100644 --- a/benchmarks/lightning/requirements.cuda.txt +++ b/benchmarks/lightning/requirements.cuda.txt @@ -42,7 +42,7 @@ codefind==0.1.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -executing==1.2.0 +executing==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname @@ -62,7 +62,7 @@ fsspec[http]==2024.6.1 # lightning # pytorch-lightning # torch -giving==0.4.2 +giving==0.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera @@ -322,7 +322,7 @@ typing-extensions==4.12.2 # pytorch-lightning # reactivex # torch -varname==0.10.0 +varname==0.13.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving diff --git a/benchmarks/llama/requirements.cuda.txt b/benchmarks/llama/requirements.cuda.txt index 7afa4670d..03618dfac 100644 --- a/benchmarks/llama/requirements.cuda.txt +++ b/benchmarks/llama/requirements.cuda.txt @@ -60,7 +60,7 @@ dill==0.3.8 # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # multiprocess -executing==1.2.0 +executing==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname @@ -91,12 +91,12 @@ fsspec[http]==2024.6.1 # datasets # huggingface-hub # torch -giving==0.4.2 +giving==0.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.24.6 +huggingface-hub==0.24.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -377,11 +377,11 @@ tzdata==2024.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas -urllib3==2.2.2 +urllib3==2.2.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -varname==0.10.0 +varname==0.13.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving diff --git a/benchmarks/llava/requirements.cuda.txt b/benchmarks/llava/requirements.cuda.txt index f5326b1a2..51e099d2c 100644 --- a/benchmarks/llava/requirements.cuda.txt +++ b/benchmarks/llava/requirements.cuda.txt @@ -64,7 +64,7 @@ dill==0.3.8 # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # multiprocess -executing==1.2.0 +executing==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname @@ -87,12 +87,12 @@ fsspec[http]==2024.6.1 # datasets # huggingface-hub # torch -giving==0.4.2 +giving==0.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.24.6 +huggingface-hub==0.24.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # accelerate @@ -374,11 +374,11 @@ tzdata==2024.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas -urllib3==2.2.2 +urllib3==2.2.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -varname==0.10.0 +varname==0.13.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving diff --git a/benchmarks/llm/requirements.cuda.txt b/benchmarks/llm/requirements.cuda.txt index 7312d50f0..beb491d3d 100644 --- a/benchmarks/llm/requirements.cuda.txt +++ b/benchmarks/llm/requirements.cuda.txt @@ -73,7 +73,7 @@ dill==0.3.8 # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # multiprocess -executing==1.2.0 +executing==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname @@ -106,7 +106,7 @@ fsspec[http]==2024.6.1 # datasets # huggingface-hub # torch -giving==0.4.2 +giving==0.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera @@ -115,7 +115,7 @@ hjson==3.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # argklass -huggingface-hub==0.24.6 +huggingface-hub==0.24.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # accelerate @@ -436,12 +436,12 @@ tzdata==2024.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas -urllib3==2.2.2 +urllib3==2.2.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # blobfile # requests -varname==0.10.0 +varname==0.13.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving diff --git a/benchmarks/purejaxrl/requirements.cuda.txt b/benchmarks/purejaxrl/requirements.cuda.txt index 74f54b90e..7125c4357 100644 --- a/benchmarks/purejaxrl/requirements.cuda.txt +++ b/benchmarks/purejaxrl/requirements.cuda.txt @@ -6,10 +6,13 @@ # --extra-index-url https://pypi.ngc.nvidia.com --extra-index-url https://download.pytorch.org/whl/cu121 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--find-links https://data.pyg.org/whl/torch-2.4.0+cu121.html --trusted-host pypi.ngc.nvidia.com absl-py==2.1.0 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # brax # chex # distrax @@ -22,27 +25,45 @@ absl-py==2.1.0 # rlax # tensorflow-probability antlr4-python3-runtime==4.9.3 - # via omegaconf + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # omegaconf argklass==1.4.4 - # via -r benchmarks/purejaxrl/requirements.in + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/purejaxrl/requirements.in astroid==3.2.4 - # via pylint + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # pylint asttokens==2.4.1 - # via giving + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # giving black==24.8.0 - # via navix + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # navix blinker==1.8.2 - # via flask + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # flask brax==0.10.5 - # via -r benchmarks/purejaxrl/requirements.in + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/purejaxrl/requirements.in certifi==2024.8.30 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # requests # sentry-sdk charset-normalizer==3.3.2 - # via requests + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # requests chex==0.1.86 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # distrax # evosax # flashbax @@ -51,75 +72,111 @@ chex==0.1.86 # rlax click==8.1.7 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # black # flask # wandb cloudpickle==3.0.0 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # gym - # gymnasium # tensorflow-probability codefind==0.1.7 - # via ptera + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # ptera contextlib2==21.6.0 - # via ml-collections + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # ml-collections contourpy==1.3.0 - # via matplotlib + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # matplotlib cycler==0.12.1 - # via matplotlib + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # matplotlib decorator==5.1.1 - # via tensorflow-probability + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # tensorflow-probability dill==0.3.8 - # via pylint + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # pylint distrax==0.1.5 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/purejaxrl/requirements.in # rlax dm-env==1.6 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # brax # rlax dm-tree==0.1.8 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # dm-env # tensorflow-probability docker-pycreds==0.4.0 - # via wandb + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # wandb docstring-parser==0.16 - # via tyro + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # tyro dotmap==1.3.30 - # via evosax + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # evosax etils[epath,epy]==1.9.4 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # brax # mujoco # mujoco-mjx # optax # orbax-checkpoint evosax==0.1.6 - # via -r benchmarks/purejaxrl/requirements.in + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/purejaxrl/requirements.in exceptiongroup==1.2.2 - # via pytest -executing==1.2.0 - # via varname -farama-notifications==0.0.4 - # via gymnasium + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # pytest +executing==2.1.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # varname filelock==3.16.0 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # torch # triton flake8==7.1.1 - # via navix + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # navix flashbax==0.1.2 - # via -r benchmarks/purejaxrl/requirements.in + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/purejaxrl/requirements.in flask==3.0.3 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # brax # flask-cors flask-cors==5.0.0 - # via brax + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax flax==0.9.0 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/purejaxrl/requirements.in # brax # evosax @@ -127,53 +184,84 @@ flax==0.9.0 # gymnax # navix fonttools==4.53.1 - # via matplotlib -fsspec==2024.9.0 # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # matplotlib +fsspec==2024.6.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt # etils # torch gast==0.6.0 - # via tensorflow-probability + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # tensorflow-probability gitdb==4.0.11 - # via gitpython + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # gitpython gitpython==3.1.43 - # via wandb -giving==0.4.2 # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # wandb +giving==0.4.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir glfw==2.7.0 - # via mujoco + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # mujoco grpcio==1.66.1 - # via brax -gym==0.26.2 # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax +gym==0.23.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt # brax - # gymnax gym-notices==0.0.8 - # via gym -gymnasium==0.29.1 - # via gymnax -gymnax==0.0.8 - # via -r benchmarks/purejaxrl/requirements.in + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # gym +gymnax==0.0.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/purejaxrl/requirements.in hjson==3.1.0 - # via argklass + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # argklass humanize==4.10.0 - # via orbax-checkpoint + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # orbax-checkpoint idna==3.8 - # via requests + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # requests importlib-resources==6.4.5 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # argklass # etils iniconfig==2.0.0 - # via pytest + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # pytest isort==5.13.2 - # via pylint + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # pylint itsdangerous==2.2.0 - # via flask -jax==0.4.31 # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # flask +jax[cuda12]==0.4.31 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r .pin/../constraints/extra/torch.cuda.txt # -r benchmarks/purejaxrl/requirements.in # brax # chex @@ -187,8 +275,17 @@ jax==0.4.31 # optax # orbax-checkpoint # rlax +jax-cuda12-pjrt==0.4.31 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin +jax-cuda12-plugin[with-cuda]==0.4.31 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax jaxlib==0.4.31 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # brax # chex # distrax @@ -202,60 +299,88 @@ jaxlib==0.4.31 # orbax-checkpoint # rlax jaxopt==0.8.3 - # via brax + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax jinja2==3.1.4 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # brax # flask # torch kiwisolver==1.4.7 - # via matplotlib + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # matplotlib markdown-it-py==3.0.0 - # via rich + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # rich markupsafe==2.1.5 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # jinja2 # werkzeug matplotlib==3.9.2 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # evosax - # gymnax - # seaborn mccabe==0.7.0 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # flake8 # pylint mdurl==0.1.2 - # via markdown-it-py + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # markdown-it-py ml-collections==0.1.1 - # via brax + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax ml-dtypes==0.4.0 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # jax # jaxlib # tensorstore mpmath==1.3.0 - # via sympy + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # sympy msgpack==1.1.0 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # flax # orbax-checkpoint mujoco==3.2.2 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # brax # mujoco-mjx mujoco-mjx==3.2.2 - # via brax + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax mypy-extensions==1.0.0 - # via black + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # black navix==0.7.0 - # via -r benchmarks/purejaxrl/requirements.in + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/purejaxrl/requirements.in nest-asyncio==1.6.0 - # via orbax-checkpoint + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # orbax-checkpoint networkx==3.3 - # via torch -numpy==2.1.1 # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +numpy==1.26.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/purejaxrl/requirements.in # brax # chex @@ -265,7 +390,6 @@ numpy==2.1.1 # evosax # flashbax # gym - # gymnasium # jax # jaxlib # jaxopt @@ -276,120 +400,187 @@ numpy==2.1.1 # opt-einsum # optax # orbax-checkpoint - # pandas # rlax # scipy - # seaborn # tensorboardx # tensorflow-probability # tensorstore # trimesh + # xformers nvidia-cublas-cu12==12.1.3.1 # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin # nvidia-cudnn-cu12 # nvidia-cusolver-cu12 # torch nvidia-cuda-cupti-cu12==12.1.105 - # via torch + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin + # torch +nvidia-cuda-nvcc-cu12==12.6.68 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin nvidia-cuda-nvrtc-cu12==12.1.105 - # via torch + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch nvidia-cuda-runtime-cu12==12.1.105 - # via torch + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin + # torch nvidia-cudnn-cu12==9.1.0.70 - # via torch + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin + # torch nvidia-cufft-cu12==11.0.2.54 - # via torch + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin + # torch nvidia-curand-cu12==10.3.2.106 - # via torch + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch nvidia-cusolver-cu12==11.4.5.107 - # via torch + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin + # torch nvidia-cusparse-cu12==12.1.0.106 # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin # nvidia-cusolver-cu12 # torch nvidia-ml-py==12.560.30 - # via voir + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir nvidia-nccl-cu12==2.20.5 - # via torch + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin + # torch nvidia-nvjitlink-cu12==12.6.68 # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin # nvidia-cusolver-cu12 # nvidia-cusparse-cu12 nvidia-nvtx-cu12==12.1.105 - # via torch + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch omegaconf==2.3.0 - # via voir + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir opt-einsum==3.3.0 - # via jax + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax optax==0.2.3 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/purejaxrl/requirements.in # brax # flax orbax-checkpoint==0.6.3 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # brax # flax ovld==0.3.9 - # via voir + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir packaging==24.1 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # black # matplotlib # pytest # setuptools-scm # tensorboardx -pandas==2.2.2 - # via seaborn pathspec==0.12.1 - # via black + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # black pillow==10.4.0 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # brax # matplotlib # navix platformdirs==4.3.2 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # black # pylint # wandb pluggy==1.5.0 - # via pytest + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # pytest protobuf==5.28.1 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # orbax-checkpoint # tensorboardx # wandb psutil==5.9.8 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # voir # wandb ptera==1.4.1 - # via voir + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir pycodestyle==2.12.1 - # via flake8 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # flake8 pyflakes==3.2.0 - # via flake8 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # flake8 pygments==2.18.0 - # via rich + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # rich pylint==3.2.7 - # via navix + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # navix pyopengl==3.1.7 - # via mujoco + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # mujoco pyparsing==3.1.4 - # via matplotlib + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # matplotlib pytest==8.3.3 - # via navix + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # navix python-dateutil==2.9.0.post0 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # matplotlib - # pandas pytinyrenderer==0.0.14 - # via brax -pytz==2024.2 - # via pandas + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax pyyaml==6.0.2 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # evosax # flax # gymnax @@ -398,73 +589,109 @@ pyyaml==6.0.2 # orbax-checkpoint # wandb reactivex==4.0.4 - # via giving + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # giving requests==2.32.3 - # via wandb + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # wandb rich==13.8.1 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # flax # tyro # voir rlax==0.1.6 - # via navix + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # navix scipy==1.14.1 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # brax # jax # jaxlib # jaxopt # mujoco-mjx -seaborn==0.13.2 - # via gymnax sentry-sdk==2.14.0 - # via wandb + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # wandb setproctitle==1.3.3 - # via wandb + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # wandb setuptools-scm==8.1.0 - # via navix + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # navix shtab==1.7.1 - # via tyro + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # tyro six==1.16.0 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens # docker-pycreds # ml-collections # python-dateutil # tensorflow-probability smmap==5.0.1 - # via gitdb + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # gitdb sympy==1.13.2 - # via torch + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch tensorboardx==2.6.2.2 - # via brax + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax tensorflow-probability==0.24.0 - # via distrax + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # distrax tensorstore==0.1.65 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # flashbax # flax # orbax-checkpoint tomli==2.0.1 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # black # pylint # pytest # setuptools-scm tomlkit==0.13.2 - # via pylint + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # pylint toolz==0.12.1 - # via chex -torch==2.4.1+cu121 - # via -r benchmarks/purejaxrl/requirements.in + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # chex +torch==2.4.0+cu121 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/purejaxrl/requirements.in + # xformers trimesh==4.4.9 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # brax # mujoco-mjx triton==3.0.0 - # via torch + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch typing-extensions==4.12.2 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # astroid # black # brax @@ -472,32 +699,45 @@ typing-extensions==4.12.2 # etils # flashbax # flax - # gymnasium # navix # orbax-checkpoint # reactivex # torch # tyro tyro==0.8.10 - # via navix -tzdata==2024.1 - # via pandas -urllib3==2.2.2 # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # navix +urllib3==2.2.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt # requests # sentry-sdk -varname==0.10.0 - # via giving +varname==0.13.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # giving voir==0.2.19 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -c .pin/../constraints/cuda.txt # -r benchmarks/purejaxrl/requirements.in -wandb==0.17.9 - # via navix +wandb==0.18.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # navix werkzeug==3.0.4 - # via flask + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # flask +xformers==0.0.27.post2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r .pin/../constraints/extra/torch.cuda.txt zipp==3.20.1 - # via etils + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # etils # The following packages are considered to be unsafe in a requirements file: # setuptools diff --git a/benchmarks/recursiongfn/requirements.cuda.txt b/benchmarks/recursiongfn/requirements.cuda.txt index f9a1c6c30..e8757e8e2 100644 --- a/benchmarks/recursiongfn/requirements.cuda.txt +++ b/benchmarks/recursiongfn/requirements.cuda.txt @@ -75,7 +75,7 @@ docker-pycreds==0.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # wandb -executing==1.2.0 +executing==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname @@ -103,7 +103,7 @@ gitpython==3.1.43 # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/recursiongfn/requirements.in # wandb -giving==0.4.2 +giving==0.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera @@ -508,12 +508,12 @@ tzdata==2024.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas -urllib3==2.2.2 +urllib3==2.2.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests # sentry-sdk -varname==0.10.0 +varname==0.13.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving @@ -522,7 +522,7 @@ voir==0.2.19 # -c .pin/../.pin/constraints-cuda-torch.txt # -c .pin/../constraints/cuda.txt # -r benchmarks/recursiongfn/requirements.in -wandb==0.17.9 +wandb==0.18.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/recursiongfn/requirements.in diff --git a/benchmarks/rlhf/requirements.cuda.txt b/benchmarks/rlhf/requirements.cuda.txt index 1a5039475..881ebeb99 100644 --- a/benchmarks/rlhf/requirements.cuda.txt +++ b/benchmarks/rlhf/requirements.cuda.txt @@ -70,7 +70,7 @@ docstring-parser==0.16 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tyro -executing==1.2.0 +executing==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname @@ -93,12 +93,12 @@ fsspec[http]==2024.6.1 # datasets # huggingface-hub # torch -giving==0.4.2 +giving==0.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.24.6 +huggingface-hub==0.24.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # accelerate @@ -392,11 +392,11 @@ tzdata==2024.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas -urllib3==2.2.2 +urllib3==2.2.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -varname==0.10.0 +varname==0.13.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving diff --git a/benchmarks/timm/requirements.cuda.txt b/benchmarks/timm/requirements.cuda.txt index 57d5f77b6..a11b5ade0 100644 --- a/benchmarks/timm/requirements.cuda.txt +++ b/benchmarks/timm/requirements.cuda.txt @@ -30,7 +30,7 @@ codefind==0.1.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -executing==1.2.0 +executing==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname @@ -45,12 +45,12 @@ fsspec==2024.6.1 # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # torch -giving==0.4.2 +giving==0.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.24.6 +huggingface-hub==0.24.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/timm/requirements.in @@ -273,11 +273,11 @@ typing-extensions==4.12.2 # huggingface-hub # reactivex # torch -urllib3==2.2.2 +urllib3==2.2.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -varname==0.10.0 +varname==0.13.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving diff --git a/benchmarks/torchatari/requirements.cuda.txt b/benchmarks/torchatari/requirements.cuda.txt index 760f20845..13df69c7a 100644 --- a/benchmarks/torchatari/requirements.cuda.txt +++ b/benchmarks/torchatari/requirements.cuda.txt @@ -56,7 +56,7 @@ envpool==0.8.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/torchatari/requirements.in -executing==1.2.0 +executing==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname @@ -73,7 +73,7 @@ fsspec==2024.6.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -giving==0.4.2 +giving==0.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera @@ -341,7 +341,7 @@ tyro==0.8.10 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/torchatari/requirements.in -varname==0.10.0 +varname==0.13.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving diff --git a/benchmarks/torchvision/requirements.cuda.txt b/benchmarks/torchvision/requirements.cuda.txt index 38c2e1b39..7a989b1be 100644 --- a/benchmarks/torchvision/requirements.cuda.txt +++ b/benchmarks/torchvision/requirements.cuda.txt @@ -22,7 +22,7 @@ codefind==0.1.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -executing==1.2.0 +executing==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname @@ -35,7 +35,7 @@ fsspec==2024.6.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -giving==0.4.2 +giving==0.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera @@ -249,7 +249,7 @@ typing-extensions==4.12.2 # -c .pin/../.pin/constraints-cuda-torch.txt # reactivex # torch -varname==0.10.0 +varname==0.13.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving diff --git a/benchmarks/torchvision_ddp/requirements.cuda.txt b/benchmarks/torchvision_ddp/requirements.cuda.txt index 2f112e811..771d1e3df 100644 --- a/benchmarks/torchvision_ddp/requirements.cuda.txt +++ b/benchmarks/torchvision_ddp/requirements.cuda.txt @@ -7,6 +7,7 @@ --extra-index-url https://pypi.ngc.nvidia.com --extra-index-url https://download.pytorch.org/whl/cu121 --find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--find-links https://data.pyg.org/whl/torch-2.4.0+cu121.html --trusted-host pypi.ngc.nvidia.com antlr4-python3-runtime==4.9.3 @@ -21,7 +22,7 @@ codefind==0.1.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -executing==1.2.0 +executing==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname @@ -34,7 +35,7 @@ fsspec==2024.6.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -giving==0.4.2 +giving==0.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera @@ -248,7 +249,7 @@ typing-extensions==4.12.2 # -c .pin/../.pin/constraints-cuda-torch.txt # reactivex # torch -varname==0.10.0 +varname==0.13.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving diff --git a/benchmarks/vjepa/requirements.cuda.txt b/benchmarks/vjepa/requirements.cuda.txt index 1ae29f2b8..9eb10ceef 100644 --- a/benchmarks/vjepa/requirements.cuda.txt +++ b/benchmarks/vjepa/requirements.cuda.txt @@ -51,7 +51,7 @@ einops==0.8.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/vjepa/requirements.in -executing==1.2.0 +executing==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname @@ -66,12 +66,12 @@ fsspec==2024.6.1 # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # torch -giving==0.4.2 +giving==0.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.24.6 +huggingface-hub==0.24.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # timm @@ -333,11 +333,11 @@ tzdata==2024.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas -urllib3==2.2.2 +urllib3==2.2.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -varname==0.10.0 +varname==0.13.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving diff --git a/milabench/_version.py b/milabench/_version.py index 93558e8ce..361d76c9a 100644 --- a/milabench/_version.py +++ b/milabench/_version.py @@ -1,5 +1,5 @@ """This file is generated, do not modify""" -__tag__ = "v0.1.0-104-g8dfefe05" -__commit__ = "8dfefe05f6db22af595bf4c48f486685b6be1ed7" -__date__ = "2024-09-11 15:12:33 -0400" +__tag__ = "v0.1.0-110-g0548deb4" +__commit__ = "0548deb40b61c705daca46dc68ca7590b2ee0dce" +__date__ = "2024-09-12 20:36:26 -0400" diff --git a/milabench/report.py b/milabench/report.py index 506573dd0..aebcaf093 100644 --- a/milabench/report.py +++ b/milabench/report.py @@ -304,7 +304,9 @@ def print_meta(out, meta): if k == "accelerators": gpus = v["gpus"] n = len(gpus) - _, gpu = gpus.popitem() + gpu = {} + if n > 0: + _, gpu = gpus.popitem() stats = { "n": n, "product": gpu.get("product", "NA"), @@ -325,7 +327,9 @@ def short_meta(out, meta): if k == "accelerators": gpus = v["gpus"] n = len(gpus) - _, gpu = gpus.popitem() + gpu = {} + if n > 0: + _, gpu = gpus.popitem() stats["product"] = gpu.get("product", "NA") stats["n_gpu"] = n stats["memory"] = str(gpu.get("memory", {}).get("total", 0)) diff --git a/scripts/article/run_cuda.sh b/scripts/article/run_cuda.sh index 4c498eca3..68ed199c9 100644 --- a/scripts/article/run_cuda.sh +++ b/scripts/article/run_cuda.sh @@ -83,8 +83,8 @@ if [ "$MILABENCH_PREPARE" -eq 0 ]; then . $MILABENCH_WORDIR/env/bin/activate - # milabench pin --variant cuda --from-scratch $ARGS - # milabench install --system $MILABENCH_WORDIR/system.yaml $ARGS --force + milabench pin --variant cuda --from-scratch $ARGS + milabench install --system $MILABENCH_WORDIR/system.yaml $ARGS --force # milabench prepare --system $MILABENCH_WORDIR/system.yaml $ARGS diff --git a/tests/test_mock.py b/tests/test_mock.py index 1e2a26f6d..ad440d81b 100644 --- a/tests/test_mock.py +++ b/tests/test_mock.py @@ -26,6 +26,8 @@ OVERSIZED_INSTALL_BENCHMARKS = { "dqn", "ppo", + "dimenet", + "recursiongfn" } def run_cli(*args, expected_code=0, msg=None):