diff --git a/.pin/constraints-cuda-torch.txt b/.pin/constraints-cuda-torch.txt index 0d1ab6808..d6e5ae440 100644 --- a/.pin/constraints-cuda-torch.txt +++ b/.pin/constraints-cuda-torch.txt @@ -2,15 +2,27 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=.pin/constraints-cuda-torch.txt --resolver=backtracking .pin/tmp-constraints.txt benchmarks/accelerate_opt/requirements.in benchmarks/dlrm/requirements.in benchmarks/flops/requirements.in benchmarks/huggingface/requirements.in benchmarks/rwkv/requirements.in benchmarks/stargan/requirements.in benchmarks/super-slomo/requirements.in benchmarks/timm/requirements.in benchmarks/torchvision/requirements.in +# pip-compile --output-file=.pin/constraints-cuda-torch.txt .pin/tmp-constraints.txt benchmarks/accelerate_opt/requirements.in benchmarks/brax/requirements.in benchmarks/dlrm/requirements.in benchmarks/flops/requirements.in benchmarks/huggingface/requirements.in benchmarks/llama/requirements.in benchmarks/rwkv/requirements.in benchmarks/stargan/requirements.in benchmarks/super-slomo/requirements.in benchmarks/timm/requirements.in benchmarks/torchvision/requirements.in benchmarks/torchvision_ddp/requirements.in # ---extra-index-url https://download.pytorch.org/whl/cu118 +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/cu121 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com -absl-py==2.0.0 - # via tensorboard -accelerate==0.24.1 +absl-py==2.1.0 + # via + # brax + # chex + # dm-env + # ml-collections + # mujoco + # mujoco-mjx + # optax + # orbax-checkpoint + # tensorboard +accelerate==0.31.0 # via -r benchmarks/accelerate_opt/requirements.in -aiohttp==3.8.6 +aiohttp==3.9.5 # via # datasets # fsspec @@ -20,81 +32,114 @@ antlr4-python3-runtime==4.9.3 # via omegaconf asttokens==2.4.1 # via giving -async-timeout==4.0.3 - # via aiohttp -attrs==23.1.0 +attrs==23.2.0 # via aiohttp -cachetools==5.3.2 - # via google-auth -certifi==2023.7.22 +blinker==1.8.2 + # via flask +brax==0.10.5 + # via -r benchmarks/brax/requirements.in +certifi==2024.6.2 # via requests charset-normalizer==3.3.2 - # via - # aiohttp - # requests -codefind==0.1.3 + # via requests +chex==0.1.86 + # via optax +click==8.1.7 + # via flask +cloudpickle==3.0.0 + # via gym +codefind==0.1.6 # via ptera -datasets==2.14.6 +contextlib2==21.6.0 + # via ml-collections +datasets==2.19.2 # via # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/llama/requirements.in # evaluate -deepspeed==0.11.1 +deepspeed==0.14.2 # via # -r benchmarks/accelerate_opt/requirements.in # -r benchmarks/rwkv/requirements.in -dill==0.3.7 +dill==0.3.8 # via # datasets # evaluate # multiprocess -docker==6.1.3 +dm-env==1.6 + # via brax +dm-tree==0.1.8 + # via dm-env +docker==7.1.0 # via torchx -docstring-parser==0.15 +docstring-parser==0.8.1 # via torchx -evaluate==0.4.1 +etils[epath,epy]==1.9.1 + # via + # brax + # mujoco + # mujoco-mjx + # orbax-checkpoint +evaluate==0.4.2 # via -r benchmarks/accelerate_opt/requirements.in executing==1.2.0 # via varname -fbgemm-gpu==0.5.0+cu118 +fairscale==0.4.13 + # via -r benchmarks/llama/requirements.in +fbgemm-gpu==0.7.0+cu121 # via torchrec -filelock==3.13.1 +filelock==3.14.0 # via + # datasets # huggingface-hub # torch # torchx # transformers # triton -frozenlist==1.4.0 +fire==0.6.0 + # via -r benchmarks/llama/requirements.in +flask==3.0.3 + # via + # brax + # flask-cors +flask-cors==4.0.1 + # via brax +flax==0.8.4 + # via brax +frozenlist==1.4.1 # via # aiohttp # aiosignal -fsspec[http]==2023.1.0 +fsspec[http]==2024.3.1 # via # datasets + # etils # evaluate # huggingface-hub # pytorch-lightning # torch # torchx -future==0.18.3 +future==1.0.0 # via -r benchmarks/dlrm/requirements.in giving==0.4.2 # via # ptera # voir -google-auth==2.23.4 +glfw==2.7.0 + # via mujoco +graphviz==0.20.3 + # via torchviz +grpcio==1.64.1 # via - # google-auth-oauthlib + # brax # tensorboard -google-auth-oauthlib==1.1.0 - # via tensorboard -graphviz==0.20.1 - # via torchviz -grpcio==1.59.2 - # via tensorboard +gym==0.26.2 + # via brax +gym-notices==0.0.8 + # via gym hjson==3.1.0 # via deepspeed -huggingface-hub==0.17.3 +huggingface-hub==0.23.3 # via # -r benchmarks/timm/requirements.in # accelerate @@ -102,227 +147,403 @@ huggingface-hub==0.17.3 # evaluate # tokenizers # transformers -idna==3.4 +idna==3.7 # via # requests # yarl -importlib-metadata==6.8.0 +importlib-metadata==7.1.0 # via torchx -jinja2==3.1.2 - # via torch -joblib==1.3.2 +importlib-resources==6.4.0 + # via + # etils + # torchcompat +itsdangerous==2.2.0 + # via flask +jax[cuda12]==0.4.28 + # via + # -r benchmarks/brax/requirements.in + # brax + # chex + # flax + # jaxopt + # mujoco-mjx + # optax + # orbax-checkpoint +jax-cuda12-pjrt==0.4.28 + # via jax-cuda12-plugin +jax-cuda12-plugin==0.4.28 + # via jax +jaxlib==0.4.28+cuda12.cudnn89 + # via + # brax + # chex + # jax + # jaxopt + # mujoco-mjx + # optax + # orbax-checkpoint +jaxopt==0.8.3 + # via brax +jinja2==3.1.4 + # via + # brax + # flask + # torch +joblib==1.4.2 # via scikit-learn -lightning-utilities==0.9.0 +lightning-utilities==0.11.2 # via # pytorch-lightning # torchmetrics -markdown==3.5.1 +markdown==3.6 # via tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # jinja2 # werkzeug mdurl==0.1.2 # via markdown-it-py +ml-collections==0.1.1 + # via brax +ml-dtypes==0.4.0 + # via + # jax + # jaxlib + # tensorstore mpmath==1.3.0 # via sympy -multidict==6.0.4 +msgpack==1.0.8 + # via + # flax + # orbax-checkpoint +mujoco==3.1.6 + # via + # brax + # mujoco-mjx +mujoco-mjx==3.1.6 + # via brax +multidict==6.0.5 # via # aiohttp # yarl -multiprocess==0.70.15 +multiprocess==0.70.16 # via # datasets # evaluate mypy-extensions==1.0.0 # via typing-inspect -networkx==3.2.1 +nest-asyncio==1.6.0 + # via orbax-checkpoint +networkx==3.3 # via torch ninja==1.11.1.1 # via # -r benchmarks/rwkv/requirements.in # deepspeed -numpy==1.26.1 +numpy==1.26.4 # via + # -r benchmarks/dlrm/requirements.in + # -r benchmarks/rwkv/requirements.in # -r benchmarks/stargan/requirements.in # -r benchmarks/super-slomo/requirements.in # accelerate + # brax + # chex # datasets # deepspeed + # dm-env # evaluate + # fairscale + # fbgemm-gpu + # flax + # gym + # jax + # jaxlib + # jaxopt + # ml-dtypes + # mujoco # onnx # opencv-python + # opt-einsum + # optax + # orbax-checkpoint # pandas # pyarrow # pytorch-lightning # scikit-learn # scipy # tensorboard + # tensorboardx + # tensorstore # torchmetrics # torchvision # transformers -oauthlib==3.2.2 - # via requests-oauthlib + # trimesh +nvidia-cublas-cu12==12.1.3.1 + # via + # jax + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # jax + # torch +nvidia-cuda-nvcc-cu12==12.5.40 + # via jax +nvidia-cuda-nvrtc-cu12==12.1.105 + # via torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # jax + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # jax + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # jax + # torch +nvidia-curand-cu12==10.3.2.106 + # via torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # jax + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # jax + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # jax + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # jax + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via torch omegaconf==2.3.0 # via voir -onnx==1.15.0 +onnx==1.16.1 # via -r benchmarks/dlrm/requirements.in -opencv-python==4.8.1.78 +opencv-python==4.10.0.82 # via -r benchmarks/super-slomo/requirements.in -ovld==0.3.2 +opt-einsum==3.3.0 + # via jax +optax==0.2.2 + # via + # brax + # flax +orbax-checkpoint==0.5.15 + # via + # brax + # flax +ovld==0.3.5 # via voir -packaging==23.2 +packaging==24.1 # via # accelerate # datasets # deepspeed - # docker # evaluate # huggingface-hub # lightning-utilities # pytorch-lightning + # tensorboardx # torchmetrics # transformers -pandas==2.1.2 +pandas==2.2.2 # via # datasets # evaluate -pillow==10.1.0 - # via torchvision -protobuf==4.23.4 +pillow==10.3.0 + # via + # brax + # torchvision +protobuf==4.25.3 # via # onnx + # orbax-checkpoint # tensorboard -psutil==5.9.6 + # tensorboardx +psutil==5.9.8 # via # accelerate # deepspeed + # voir ptera==1.4.1 # via voir py-cpuinfo==9.0.0 # via deepspeed -pyarrow==14.0.0 +pyarrow==16.1.0 # via datasets -pyasn1==0.5.0 - # via - # pyasn1-modules - # rsa -pyasn1-modules==0.3.0 - # via google-auth -pydantic==1.10.13 +pyarrow-hotfix==0.6 + # via datasets +pydantic==1.10.15 # via # -r benchmarks/rwkv/requirements.in # deepspeed -pydot==1.4.2 +pydot==2.0.0 # via -r benchmarks/dlrm/requirements.in -pygments==2.16.1 +pygments==2.18.0 # via rich pynvml==11.5.0 - # via voir -pyparsing==3.1.1 + # via + # deepspeed + # voir +pyopengl==3.1.7 + # via mujoco +pyparsing==3.1.2 # via pydot pyre-extensions==0.0.30 # via torchx -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via pandas +pytinyrenderer==0.0.14 + # via brax pytorch-lightning==1.9.5 # via -r benchmarks/rwkv/requirements.in -pytz==2023.3.post1 +pytz==2024.1 # via pandas pyyaml==6.0.1 # via # -r benchmarks/timm/requirements.in # accelerate # datasets + # flax # huggingface-hub + # ml-collections # omegaconf + # orbax-checkpoint # pytorch-lightning # torchx # transformers reactivex==4.0.4 # via giving -regex==2023.10.3 +regex==2024.5.15 # via transformers -requests==2.31.0 +requests==2.32.3 # via # datasets # docker # evaluate - # fsspec # huggingface-hub - # requests-oauthlib - # responses - # tensorboard - # torchvision # transformers -requests-oauthlib==1.3.1 - # via google-auth-oauthlib -responses==0.18.0 - # via evaluate -rich==13.6.0 +rich==13.7.1 # via # -r benchmarks/accelerate_opt/requirements.in + # flax # voir -rsa==4.9 - # via google-auth -safetensors==0.4.0 +safetensors==0.4.3 # via # -r benchmarks/timm/requirements.in + # accelerate # transformers -scikit-learn==1.3.2 +scikit-learn==1.5.0 # via -r benchmarks/dlrm/requirements.in -scipy==1.11.3 - # via scikit-learn +scipy==1.13.1 + # via + # brax + # jax + # jaxlib + # jaxopt + # mujoco-mjx + # scikit-learn +sentencepiece==0.2.0 + # via -r benchmarks/llama/requirements.in six==1.16.0 # via # asttokens + # fire + # ml-collections # python-dateutil # tensorboard -sympy==1.12 +sympy==1.12.1 # via torch tabulate==0.9.0 # via torchx -tensorboard==2.15.0 +tensorboard==2.17.0 # via -r benchmarks/dlrm/requirements.in tensorboard-data-server==0.7.2 # via tensorboard -threadpoolctl==3.2.0 +tensorboardx==2.6.2.2 + # via brax +tensorstore==0.1.60 + # via + # flax + # orbax-checkpoint +termcolor==2.4.0 + # via fire +threadpoolctl==3.5.0 # via scikit-learn -tokenizers==0.14.1 +tokenizers==0.19.1 # via transformers -torch==2.1.0+cu118 +toolz==0.12.1 + # via chex +torch==2.3.1+cu121 # via + # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/brax/requirements.in + # -r benchmarks/dlrm/requirements.in + # -r benchmarks/flops/requirements.in + # -r benchmarks/huggingface/requirements.in + # -r benchmarks/llama/requirements.in + # -r benchmarks/rwkv/requirements.in # -r benchmarks/stargan/requirements.in # -r benchmarks/super-slomo/requirements.in + # -r benchmarks/timm/requirements.in + # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in # accelerate # deepspeed + # fairscale # pytorch-lightning # torchaudio + # torchcompat # torchmetrics # torchvision # torchviz -torchaudio==2.1.0+cu118 +torchaudio==2.3.1+cu121 # via -r benchmarks/accelerate_opt/requirements.in +torchcompat==1.0.2 + # via + # -c .pin/../constraints/cuda.txt + # -r benchmarks/flops/requirements.in + # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in torchmetrics==1.0.3 # via # pytorch-lightning # torchrec -torchrec==0.5.0+cu118 +torchrec==0.7.0+cu121 # via -r benchmarks/dlrm/requirements.in -torchvision==0.16.0+cu118 +torchvision==0.18.1+cu121 # via + # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/flops/requirements.in # -r benchmarks/stargan/requirements.in # -r benchmarks/super-slomo/requirements.in + # -r benchmarks/timm/requirements.in + # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in torchviz==0.0.2 # via -r benchmarks/dlrm/requirements.in -torchx==0.6.0 +torchx==0.5.0 # via -r benchmarks/dlrm/requirements.in -tqdm==4.66.1 +tqdm==4.66.4 # via + # -r benchmarks/dlrm/requirements.in + # -r benchmarks/flops/requirements.in # -r benchmarks/super-slomo/requirements.in # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in # datasets # deepspeed # evaluate @@ -330,16 +551,26 @@ tqdm==4.66.1 # pytorch-lightning # torchrec # transformers -transformers==4.34.1 +transformers==4.41.2 # via # -r benchmarks/accelerate_opt/requirements.in # -r benchmarks/huggingface/requirements.in -triton==2.1.0 + # -r benchmarks/llama/requirements.in +trimesh==4.4.1 + # via + # brax + # mujoco-mjx +triton==2.3.1 # via torch -typing-extensions==4.8.0 +typing-extensions==4.12.2 # via + # brax + # chex + # etils + # flax # huggingface-hub # lightning-utilities + # orbax-checkpoint # pydantic # pyre-extensions # pytorch-lightning @@ -348,32 +579,44 @@ typing-extensions==4.8.0 # typing-inspect typing-inspect==0.9.0 # via pyre-extensions -tzdata==2023.3 +tzdata==2024.1 # via pandas urllib3==1.26.18 # via # docker # requests - # responses # torchx varname==0.10.0 # via giving voir==0.2.15 # via + # -c .pin/../constraints/cuda.txt + # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/brax/requirements.in + # -r benchmarks/dlrm/requirements.in + # -r benchmarks/flops/requirements.in + # -r benchmarks/huggingface/requirements.in + # -r benchmarks/llama/requirements.in + # -r benchmarks/rwkv/requirements.in # -r benchmarks/stargan/requirements.in # -r benchmarks/super-slomo/requirements.in -websocket-client==1.6.4 - # via docker -werkzeug==3.0.1 - # via tensorboard + # -r benchmarks/timm/requirements.in + # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in +werkzeug==3.0.3 + # via + # flask + # tensorboard xxhash==3.4.1 # via # datasets # evaluate -yarl==1.9.2 +yarl==1.9.4 # via aiohttp -zipp==3.17.0 - # via importlib-metadata +zipp==3.19.2 + # via + # etils + # importlib-metadata # The following packages are considered to be unsafe in a requirements file: # setuptools diff --git a/.pin/constraints-hpu-torch.txt b/.pin/constraints-hpu-torch.txt new file mode 100644 index 000000000..20f5f2672 --- /dev/null +++ b/.pin/constraints-hpu-torch.txt @@ -0,0 +1,621 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --output-file=.pin/constraints-hpu-torch.txt .pin/tmp-constraints.txt benchmarks/accelerate_opt/requirements.in benchmarks/brax/requirements.in benchmarks/dlrm/requirements.in benchmarks/flops/requirements.in benchmarks/huggingface/requirements.in benchmarks/llama/requirements.in benchmarks/rwkv/requirements.in benchmarks/stargan/requirements.in benchmarks/super-slomo/requirements.in benchmarks/timm/requirements.in benchmarks/torchvision/requirements.in benchmarks/torchvision_ddp/requirements.in +# +--extra-index-url https://pypi.ngc.nvidia.com +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com + +absl-py==2.1.0 + # via + # brax + # chex + # dm-env + # ml-collections + # mujoco + # mujoco-mjx + # optax + # orbax-checkpoint + # tensorboard +accelerate==0.31.0 + # via -r benchmarks/accelerate_opt/requirements.in +aiohttp==3.9.5 + # via + # datasets + # fsspec +aiosignal==1.3.1 + # via aiohttp +antlr4-python3-runtime==4.9.3 + # via omegaconf +asttokens==2.4.1 + # via giving +attrs==23.2.0 + # via aiohttp +blinker==1.8.2 + # via flask +brax==0.10.5 + # via -r benchmarks/brax/requirements.in +certifi==2024.6.2 + # via requests +charset-normalizer==3.3.2 + # via requests +chex==0.1.86 + # via optax +click==8.1.7 + # via flask +cloudpickle==3.0.0 + # via gym +codefind==0.1.6 + # via ptera +contextlib2==21.6.0 + # via ml-collections +datasets==2.19.2 + # via + # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/llama/requirements.in + # evaluate +deepspeed==0.14.2 + # via + # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/rwkv/requirements.in +dill==0.3.8 + # via + # datasets + # evaluate + # multiprocess +dm-env==1.6 + # via brax +dm-tree==0.1.8 + # via dm-env +docker==7.1.0 + # via torchx +docstring-parser==0.8.1 + # via torchx +etils[epath,epy]==1.9.1 + # via + # brax + # mujoco + # mujoco-mjx + # orbax-checkpoint +evaluate==0.4.2 + # via -r benchmarks/accelerate_opt/requirements.in +executing==1.2.0 + # via varname +fairscale==0.4.13 + # via -r benchmarks/llama/requirements.in +fbgemm-gpu==0.7.0 + # via torchrec +filelock==3.14.0 + # via + # datasets + # huggingface-hub + # torch + # torchx + # transformers + # triton +fire==0.6.0 + # via -r benchmarks/llama/requirements.in +flask==3.0.3 + # via + # brax + # flask-cors +flask-cors==4.0.1 + # via brax +flax==0.8.4 + # via brax +frozenlist==1.4.1 + # via + # aiohttp + # aiosignal +fsspec[http]==2024.3.1 + # via + # datasets + # etils + # evaluate + # huggingface-hub + # pytorch-lightning + # torch + # torchx +future==1.0.0 + # via -r benchmarks/dlrm/requirements.in +giving==0.4.2 + # via + # ptera + # voir +glfw==2.7.0 + # via mujoco +graphviz==0.20.3 + # via torchviz +grpcio==1.64.1 + # via + # brax + # tensorboard +gym==0.26.2 + # via brax +gym-notices==0.0.8 + # via gym +hjson==3.1.0 + # via deepspeed +huggingface-hub==0.23.3 + # via + # -r benchmarks/timm/requirements.in + # accelerate + # datasets + # evaluate + # tokenizers + # transformers +idna==3.7 + # via + # requests + # yarl +importlib-metadata==7.1.0 + # via torchx +importlib-resources==6.4.0 + # via + # etils + # torchcompat +itsdangerous==2.2.0 + # via flask +jax[cuda12]==0.4.28 + # via + # -r benchmarks/brax/requirements.in + # brax + # chex + # flax + # jaxopt + # mujoco-mjx + # optax + # orbax-checkpoint +jax-cuda12-pjrt==0.4.28 + # via jax-cuda12-plugin +jax-cuda12-plugin==0.4.28 + # via jax +jaxlib==0.4.28+cuda12.cudnn89 + # via + # brax + # chex + # jax + # jaxopt + # mujoco-mjx + # optax + # orbax-checkpoint +jaxopt==0.8.3 + # via brax +jinja2==3.1.4 + # via + # brax + # flask + # torch +joblib==1.4.2 + # via scikit-learn +lightning-utilities==0.11.2 + # via + # pytorch-lightning + # torchmetrics +markdown==3.6 + # via tensorboard +markdown-it-py==3.0.0 + # via rich +markupsafe==2.1.5 + # via + # jinja2 + # werkzeug +mdurl==0.1.2 + # via markdown-it-py +ml-collections==0.1.1 + # via brax +ml-dtypes==0.4.0 + # via + # jax + # jaxlib + # tensorstore +mpmath==1.3.0 + # via sympy +msgpack==1.0.8 + # via + # flax + # orbax-checkpoint +mujoco==3.1.6 + # via + # brax + # mujoco-mjx +mujoco-mjx==3.1.6 + # via brax +multidict==6.0.5 + # via + # aiohttp + # yarl +multiprocess==0.70.16 + # via + # datasets + # evaluate +mypy-extensions==1.0.0 + # via typing-inspect +nest-asyncio==1.6.0 + # via orbax-checkpoint +networkx==3.3 + # via torch +ninja==1.11.1.1 + # via + # -r benchmarks/rwkv/requirements.in + # deepspeed +numpy==1.26.4 + # via + # -r benchmarks/dlrm/requirements.in + # -r benchmarks/rwkv/requirements.in + # -r benchmarks/stargan/requirements.in + # -r benchmarks/super-slomo/requirements.in + # accelerate + # brax + # chex + # datasets + # deepspeed + # dm-env + # evaluate + # fairscale + # fbgemm-gpu + # flax + # gym + # jax + # jaxlib + # jaxopt + # ml-dtypes + # mujoco + # onnx + # opencv-python + # opt-einsum + # optax + # orbax-checkpoint + # pandas + # pyarrow + # pytorch-lightning + # scikit-learn + # scipy + # tensorboard + # tensorboardx + # tensorstore + # torchmetrics + # torchvision + # transformers + # trimesh +nvidia-cublas-cu12==12.1.3.1 + # via + # jax + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # jax + # torch +nvidia-cuda-nvcc-cu12==12.5.40 + # via jax +nvidia-cuda-nvrtc-cu12==12.1.105 + # via torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # jax + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # jax + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # jax + # torch +nvidia-curand-cu12==10.3.2.106 + # via torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # jax + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # jax + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # jax + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # jax + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via torch +omegaconf==2.3.0 + # via voir +onnx==1.16.1 + # via -r benchmarks/dlrm/requirements.in +opencv-python==4.10.0.82 + # via -r benchmarks/super-slomo/requirements.in +opt-einsum==3.3.0 + # via jax +optax==0.2.2 + # via + # brax + # flax +orbax-checkpoint==0.5.15 + # via + # brax + # flax +ovld==0.3.5 + # via voir +packaging==24.1 + # via + # accelerate + # datasets + # deepspeed + # evaluate + # huggingface-hub + # lightning-utilities + # pytorch-lightning + # tensorboardx + # torchmetrics + # transformers +pandas==2.2.2 + # via + # datasets + # evaluate +pillow==10.3.0 + # via + # brax + # torchvision +protobuf==4.25.3 + # via + # onnx + # orbax-checkpoint + # tensorboard + # tensorboardx +psutil==5.9.8 + # via + # accelerate + # deepspeed + # voir +ptera==1.4.1 + # via voir +py-cpuinfo==9.0.0 + # via deepspeed +pyarrow==16.1.0 + # via datasets +pyarrow-hotfix==0.6 + # via datasets +pydantic==1.10.15 + # via + # -r benchmarks/rwkv/requirements.in + # deepspeed +pydot==2.0.0 + # via -r benchmarks/dlrm/requirements.in +pygments==2.18.0 + # via rich +pynvml==11.5.0 + # via + # deepspeed + # voir +pyopengl==3.1.7 + # via mujoco +pyparsing==3.1.2 + # via pydot +pyre-extensions==0.0.30 + # via torchx +python-dateutil==2.9.0.post0 + # via pandas +pytinyrenderer==0.0.14 + # via brax +pytorch-lightning==1.9.5 + # via -r benchmarks/rwkv/requirements.in +pytz==2024.1 + # via pandas +pyyaml==6.0.1 + # via + # -r benchmarks/timm/requirements.in + # accelerate + # datasets + # flax + # huggingface-hub + # ml-collections + # omegaconf + # orbax-checkpoint + # pytorch-lightning + # torchx + # transformers +reactivex==4.0.4 + # via giving +regex==2024.5.15 + # via transformers +requests==2.32.3 + # via + # datasets + # docker + # evaluate + # huggingface-hub + # transformers +rich==13.7.1 + # via + # -r benchmarks/accelerate_opt/requirements.in + # flax + # voir +safetensors==0.4.3 + # via + # -r benchmarks/timm/requirements.in + # accelerate + # transformers +scikit-learn==1.5.0 + # via -r benchmarks/dlrm/requirements.in +scipy==1.13.1 + # via + # brax + # jax + # jaxlib + # jaxopt + # mujoco-mjx + # scikit-learn +sentencepiece==0.2.0 + # via -r benchmarks/llama/requirements.in +six==1.16.0 + # via + # asttokens + # fire + # ml-collections + # python-dateutil + # tensorboard +sympy==1.12.1 + # via torch +tabulate==0.9.0 + # via torchx +tensorboard==2.17.0 + # via -r benchmarks/dlrm/requirements.in +tensorboard-data-server==0.7.2 + # via tensorboard +tensorboardx==2.6.2.2 + # via brax +tensorstore==0.1.60 + # via + # flax + # orbax-checkpoint +termcolor==2.4.0 + # via fire +threadpoolctl==3.5.0 + # via scikit-learn +tokenizers==0.19.1 + # via transformers +toolz==0.12.1 + # via chex +torch==2.3.1 + # via + # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/brax/requirements.in + # -r benchmarks/dlrm/requirements.in + # -r benchmarks/flops/requirements.in + # -r benchmarks/huggingface/requirements.in + # -r benchmarks/llama/requirements.in + # -r benchmarks/rwkv/requirements.in + # -r benchmarks/stargan/requirements.in + # -r benchmarks/super-slomo/requirements.in + # -r benchmarks/timm/requirements.in + # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in + # accelerate + # deepspeed + # fairscale + # pytorch-lightning + # torchaudio + # torchcompat + # torchmetrics + # torchvision + # torchviz +torchaudio==2.3.1 + # via -r benchmarks/accelerate_opt/requirements.in +torchcompat==1.0.2 + # via + # -c .pin/../constraints/hpu.txt + # -r benchmarks/flops/requirements.in + # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in +torchmetrics==1.0.3 + # via + # pytorch-lightning + # torchrec +torchrec==0.7.0 + # via -r benchmarks/dlrm/requirements.in +torchvision==0.18.1 + # via + # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/flops/requirements.in + # -r benchmarks/stargan/requirements.in + # -r benchmarks/super-slomo/requirements.in + # -r benchmarks/timm/requirements.in + # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in +torchviz==0.0.2 + # via -r benchmarks/dlrm/requirements.in +torchx==0.5.0 + # via -r benchmarks/dlrm/requirements.in +tqdm==4.66.4 + # via + # -r benchmarks/dlrm/requirements.in + # -r benchmarks/flops/requirements.in + # -r benchmarks/super-slomo/requirements.in + # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in + # datasets + # deepspeed + # evaluate + # huggingface-hub + # pytorch-lightning + # torchrec + # transformers +transformers==4.41.2 + # via + # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/huggingface/requirements.in + # -r benchmarks/llama/requirements.in +trimesh==4.4.1 + # via + # brax + # mujoco-mjx +triton==2.3.1 + # via torch +typing-extensions==4.12.2 + # via + # brax + # chex + # etils + # flax + # huggingface-hub + # lightning-utilities + # orbax-checkpoint + # pydantic + # pyre-extensions + # pytorch-lightning + # reactivex + # torch + # typing-inspect +typing-inspect==0.9.0 + # via pyre-extensions +tzdata==2024.1 + # via pandas +urllib3==1.26.18 + # via + # docker + # requests + # torchx +varname==0.10.0 + # via giving +voir==0.2.14 + # via + # -c .pin/../constraints/hpu.txt + # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/brax/requirements.in + # -r benchmarks/dlrm/requirements.in + # -r benchmarks/flops/requirements.in + # -r benchmarks/huggingface/requirements.in + # -r benchmarks/llama/requirements.in + # -r benchmarks/rwkv/requirements.in + # -r benchmarks/stargan/requirements.in + # -r benchmarks/super-slomo/requirements.in + # -r benchmarks/timm/requirements.in + # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in +werkzeug==3.0.3 + # via + # flask + # tensorboard +xxhash==3.4.1 + # via + # datasets + # evaluate +yarl==1.9.4 + # via aiohttp +zipp==3.19.2 + # via + # etils + # importlib-metadata + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/.pin/constraints-rocm-torch.txt b/.pin/constraints-rocm-torch.txt index e5b78ff85..156333c22 100644 --- a/.pin/constraints-rocm-torch.txt +++ b/.pin/constraints-rocm-torch.txt @@ -2,15 +2,27 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=.pin/constraints-rocm-torch.txt --resolver=backtracking .pin/tmp-constraints.txt benchmarks/accelerate_opt/requirements.in benchmarks/dlrm/requirements.in benchmarks/flops/requirements.in benchmarks/huggingface/requirements.in benchmarks/rwkv/requirements.in benchmarks/stargan/requirements.in benchmarks/super-slomo/requirements.in benchmarks/timm/requirements.in benchmarks/torchvision/requirements.in +# pip-compile --output-file=.pin/constraints-rocm-torch.txt .pin/tmp-constraints.txt benchmarks/accelerate_opt/requirements.in benchmarks/brax/requirements.in benchmarks/dlrm/requirements.in benchmarks/flops/requirements.in benchmarks/huggingface/requirements.in benchmarks/llama/requirements.in benchmarks/rwkv/requirements.in benchmarks/stargan/requirements.in benchmarks/super-slomo/requirements.in benchmarks/timm/requirements.in benchmarks/torchvision/requirements.in benchmarks/torchvision_ddp/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.6/ +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/rocm6.0 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com -absl-py==2.0.0 - # via tensorboard -accelerate==0.24.1 +absl-py==2.1.0 + # via + # brax + # chex + # dm-env + # ml-collections + # mujoco + # mujoco-mjx + # optax + # orbax-checkpoint + # tensorboard +accelerate==0.31.0 # via -r benchmarks/accelerate_opt/requirements.in -aiohttp==3.8.6 +aiohttp==3.9.5 # via # datasets # fsspec @@ -20,83 +32,114 @@ antlr4-python3-runtime==4.9.3 # via omegaconf asttokens==2.4.1 # via giving -async-timeout==4.0.3 +attrs==23.2.0 # via aiohttp -attrs==23.1.0 - # via aiohttp -cachetools==5.3.2 - # via google-auth -certifi==2023.7.22 +blinker==1.8.2 + # via flask +brax==0.10.5 + # via -r benchmarks/brax/requirements.in +certifi==2024.6.2 # via requests charset-normalizer==3.3.2 - # via - # aiohttp - # requests -cmake==3.27.7 - # via pytorch-triton-rocm -codefind==0.1.3 + # via requests +chex==0.1.86 + # via optax +click==8.1.7 + # via flask +cloudpickle==3.0.0 + # via gym +codefind==0.1.6 # via ptera -datasets==2.14.6 +contextlib2==21.6.0 + # via ml-collections +datasets==2.19.2 # via # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/llama/requirements.in # evaluate -deepspeed==0.12.2 +deepspeed==0.14.2 # via # -r benchmarks/accelerate_opt/requirements.in # -r benchmarks/rwkv/requirements.in -dill==0.3.7 +dill==0.3.8 # via # datasets # evaluate # multiprocess -docker==6.1.3 +dm-env==1.6 + # via brax +dm-tree==0.1.8 + # via dm-env +docker==7.1.0 # via torchx docstring-parser==0.8.1 # via torchx -evaluate==0.4.1 +etils[epath,epy]==1.9.1 + # via + # brax + # mujoco + # mujoco-mjx + # orbax-checkpoint +evaluate==0.4.2 # via -r benchmarks/accelerate_opt/requirements.in executing==1.2.0 # via varname -fbgemm-gpu==0.5.0 +fairscale==0.4.13 + # via -r benchmarks/llama/requirements.in +fbgemm-gpu==0.7.0+rocm6.0 # via torchrec -filelock==3.13.1 +filelock==3.14.0 # via + # datasets # huggingface-hub # pytorch-triton-rocm # torch # torchx # transformers -frozenlist==1.4.0 +fire==0.6.0 + # via -r benchmarks/llama/requirements.in +flask==3.0.3 + # via + # brax + # flask-cors +flask-cors==4.0.1 + # via brax +flax==0.8.4 + # via brax +frozenlist==1.4.1 # via # aiohttp # aiosignal -fsspec[http]==2023.10.0 +fsspec[http]==2024.3.1 # via # datasets + # etils # evaluate # huggingface-hub # pytorch-lightning # torch # torchx -future==0.18.3 +future==1.0.0 # via -r benchmarks/dlrm/requirements.in giving==0.4.2 # via # ptera # voir -google-auth==2.23.4 +glfw==2.7.0 + # via mujoco +graphviz==0.20.3 + # via torchviz +grpcio==1.64.1 # via - # google-auth-oauthlib + # brax # tensorboard -google-auth-oauthlib==1.1.0 - # via tensorboard -graphviz==0.20.1 - # via torchviz -grpcio==1.59.2 - # via tensorboard +gym==0.26.2 + # via brax +gym-notices==0.0.8 + # via gym hjson==3.1.0 # via deepspeed -huggingface-hub==0.17.3 +huggingface-hub==0.23.3 # via # -r benchmarks/timm/requirements.in # accelerate @@ -104,235 +147,388 @@ huggingface-hub==0.17.3 # evaluate # tokenizers # transformers -idna==3.4 +idna==3.7 # via # requests # yarl -importlib-metadata==6.8.0 +importlib-metadata==7.1.0 # via torchx -jinja2==3.1.2 - # via torch -joblib==1.3.2 +importlib-resources==6.4.0 + # via + # etils + # torchcompat +itsdangerous==2.2.0 + # via flask +jax[cuda12]==0.4.28 + # via + # -r benchmarks/brax/requirements.in + # brax + # chex + # flax + # jaxopt + # mujoco-mjx + # optax + # orbax-checkpoint +jax-cuda12-pjrt==0.4.28 + # via jax-cuda12-plugin +jax-cuda12-plugin==0.4.28 + # via jax +jaxlib==0.4.28+cuda12.cudnn89 + # via + # brax + # chex + # jax + # jaxopt + # mujoco-mjx + # optax + # orbax-checkpoint +jaxopt==0.8.3 + # via brax +jinja2==3.1.4 + # via + # brax + # flask + # torch +joblib==1.4.2 # via scikit-learn -lightning-utilities==0.9.0 +lightning-utilities==0.11.2 # via # pytorch-lightning # torchmetrics -lit==17.0.4 - # via pytorch-triton-rocm -markdown==3.5.1 +markdown==3.6 # via tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # jinja2 # werkzeug mdurl==0.1.2 # via markdown-it-py +ml-collections==0.1.1 + # via brax +ml-dtypes==0.4.0 + # via + # jax + # jaxlib + # tensorstore mpmath==1.3.0 # via sympy -multidict==6.0.4 +msgpack==1.0.8 + # via + # flax + # orbax-checkpoint +mujoco==3.1.6 + # via + # brax + # mujoco-mjx +mujoco-mjx==3.1.6 + # via brax +multidict==6.0.5 # via # aiohttp # yarl -multiprocess==0.70.15 +multiprocess==0.70.16 # via # datasets # evaluate mypy-extensions==1.0.0 # via typing-inspect -networkx==3.2.1 +nest-asyncio==1.6.0 + # via orbax-checkpoint +networkx==3.3 # via torch ninja==1.11.1.1 # via # -r benchmarks/rwkv/requirements.in # deepspeed -numpy==1.26.1 +numpy==1.26.4 # via # -r benchmarks/dlrm/requirements.in # -r benchmarks/rwkv/requirements.in + # -r benchmarks/stargan/requirements.in + # -r benchmarks/super-slomo/requirements.in # accelerate + # brax + # chex # datasets # deepspeed + # dm-env # evaluate + # fairscale # fbgemm-gpu + # flax + # gym + # jax + # jaxlib + # jaxopt + # ml-dtypes + # mujoco # onnx # opencv-python + # opt-einsum + # optax + # orbax-checkpoint # pandas # pyarrow # pytorch-lightning # scikit-learn # scipy # tensorboard + # tensorboardx + # tensorstore # torchmetrics # torchvision # transformers -oauthlib==3.2.2 - # via requests-oauthlib + # trimesh +nvidia-cublas-cu12==12.5.2.13 + # via + # jax + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 +nvidia-cuda-cupti-cu12==12.5.39 + # via jax +nvidia-cuda-nvcc-cu12==12.5.40 + # via jax +nvidia-cuda-nvrtc-cu12==12.5.40 + # via nvidia-cudnn-cu12 +nvidia-cuda-runtime-cu12==12.5.39 + # via jax +nvidia-cudnn-cu12==8.9.7.29 + # via jax +nvidia-cufft-cu12==11.2.3.18 + # via jax +nvidia-cusolver-cu12==11.6.2.40 + # via jax +nvidia-cusparse-cu12==12.4.1.24 + # via + # jax + # nvidia-cusolver-cu12 +nvidia-nccl-cu12==2.21.5 + # via jax +nvidia-nvjitlink-cu12==12.5.40 + # via + # jax + # nvidia-cufft-cu12 + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 omegaconf==2.3.0 # via voir -onnx==1.15.0 +onnx==1.16.1 # via -r benchmarks/dlrm/requirements.in -opencv-python==4.8.1.78 +opencv-python==4.10.0.82 # via -r benchmarks/super-slomo/requirements.in -ovld==0.3.2 +opt-einsum==3.3.0 + # via jax +optax==0.2.2 + # via + # brax + # flax +orbax-checkpoint==0.5.15 + # via + # brax + # flax +ovld==0.3.5 # via voir -packaging==23.2 +packaging==24.1 # via # accelerate # datasets # deepspeed - # docker # evaluate # huggingface-hub # lightning-utilities # pytorch-lightning + # tensorboardx # torchmetrics # transformers -pandas==2.1.2 +pandas==2.2.2 # via # datasets # evaluate -pillow==10.1.0 - # via torchvision -protobuf==4.23.4 +pillow==10.3.0 + # via + # brax + # torchvision +protobuf==4.25.3 # via # onnx + # orbax-checkpoint # tensorboard -psutil==5.9.6 + # tensorboardx +psutil==5.9.8 # via # accelerate # deepspeed + # voir ptera==1.4.1 # via voir py-cpuinfo==9.0.0 # via deepspeed -pyarrow==14.0.0 +pyarrow==16.1.0 # via datasets -pyasn1==0.5.0 - # via - # pyasn1-modules - # rsa -pyasn1-modules==0.3.0 - # via google-auth -pydantic==1.10.13 +pyarrow-hotfix==0.6 + # via datasets +pydantic==1.10.15 # via # -r benchmarks/rwkv/requirements.in # deepspeed -pydot==1.4.2 +pydot==2.0.0 # via -r benchmarks/dlrm/requirements.in -pygments==2.16.1 +pygments==2.18.0 # via rich pynvml==11.5.0 # via # deepspeed # voir -pyparsing==3.1.1 +pyopengl==3.1.7 + # via mujoco +pyparsing==3.1.2 # via pydot pyre-extensions==0.0.30 # via torchx -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via pandas +pytinyrenderer==0.0.14 + # via brax pytorch-lightning==1.9.5 # via -r benchmarks/rwkv/requirements.in -pytorch-triton-rocm==2.1.0 +pytorch-triton-rocm==2.3.1 # via torch -pytz==2023.3.post1 +pytz==2024.1 # via pandas pyyaml==6.0.1 # via # -r benchmarks/timm/requirements.in # accelerate # datasets + # flax # huggingface-hub + # ml-collections # omegaconf + # orbax-checkpoint # pytorch-lightning # torchx # transformers reactivex==4.0.4 # via giving -regex==2023.10.3 +regex==2024.5.15 # via transformers -requests==2.31.0 +requests==2.32.3 # via # datasets # docker # evaluate - # fsspec # huggingface-hub - # requests-oauthlib - # responses - # tensorboard - # torchvision # transformers -requests-oauthlib==1.3.1 - # via google-auth-oauthlib -responses==0.18.0 - # via evaluate -rich==13.6.0 +rich==13.7.1 # via # -r benchmarks/accelerate_opt/requirements.in + # flax # voir -rsa==4.9 - # via google-auth -safetensors==0.4.0 +safetensors==0.4.3 # via # -r benchmarks/timm/requirements.in + # accelerate # transformers -scikit-learn==1.3.2 +scikit-learn==1.5.0 # via -r benchmarks/dlrm/requirements.in -scipy==1.11.3 - # via scikit-learn +scipy==1.13.1 + # via + # brax + # jax + # jaxlib + # jaxopt + # mujoco-mjx + # scikit-learn +sentencepiece==0.2.0 + # via -r benchmarks/llama/requirements.in six==1.16.0 # via # asttokens + # fire + # ml-collections # python-dateutil # tensorboard -sympy==1.12 +sympy==1.12.1 # via torch tabulate==0.9.0 # via torchx -tensorboard==2.15.1 +tensorboard==2.17.0 # via -r benchmarks/dlrm/requirements.in tensorboard-data-server==0.7.2 # via tensorboard -threadpoolctl==3.2.0 +tensorboardx==2.6.2.2 + # via brax +tensorstore==0.1.60 + # via + # flax + # orbax-checkpoint +termcolor==2.4.0 + # via fire +threadpoolctl==3.5.0 # via scikit-learn -tokenizers==0.14.1 +tokenizers==0.19.1 # via transformers -torch==2.1.0+rocm5.6 +toolz==0.12.1 + # via chex +torch==2.3.1+rocm6.0 # via # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/brax/requirements.in + # -r benchmarks/dlrm/requirements.in + # -r benchmarks/flops/requirements.in + # -r benchmarks/huggingface/requirements.in + # -r benchmarks/llama/requirements.in # -r benchmarks/rwkv/requirements.in + # -r benchmarks/stargan/requirements.in + # -r benchmarks/super-slomo/requirements.in + # -r benchmarks/timm/requirements.in + # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in # accelerate # deepspeed + # fairscale # pytorch-lightning - # pytorch-triton-rocm # torchaudio + # torchcompat # torchmetrics # torchvision # torchviz -torchaudio==2.1.0+rocm5.6 +torchaudio==2.3.1+rocm6.0 # via -r benchmarks/accelerate_opt/requirements.in +torchcompat==1.0.2 + # via + # -c .pin/../constraints/rocm.txt + # -r benchmarks/flops/requirements.in + # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in torchmetrics==1.0.3 # via # pytorch-lightning # torchrec -torchrec==0.5.0 +torchrec==0.7.0 # via -r benchmarks/dlrm/requirements.in -torchvision==0.16.0+rocm5.6 +torchvision==0.18.1+rocm6.0 # via # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/flops/requirements.in # -r benchmarks/stargan/requirements.in + # -r benchmarks/super-slomo/requirements.in + # -r benchmarks/timm/requirements.in + # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in torchviz==0.0.2 # via -r benchmarks/dlrm/requirements.in torchx==0.5.0 # via -r benchmarks/dlrm/requirements.in -tqdm==4.66.1 +tqdm==4.66.4 # via + # -r benchmarks/dlrm/requirements.in # -r benchmarks/flops/requirements.in + # -r benchmarks/super-slomo/requirements.in # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in # datasets # deepspeed # evaluate @@ -340,14 +536,24 @@ tqdm==4.66.1 # pytorch-lightning # torchrec # transformers -transformers==4.35.0 +transformers==4.41.2 # via # -r benchmarks/accelerate_opt/requirements.in # -r benchmarks/huggingface/requirements.in -typing-extensions==4.8.0 + # -r benchmarks/llama/requirements.in +trimesh==4.4.1 + # via + # brax + # mujoco-mjx +typing-extensions==4.12.2 # via + # brax + # chex + # etils + # flax # huggingface-hub # lightning-utilities + # orbax-checkpoint # pydantic # pyre-extensions # pytorch-lightning @@ -356,32 +562,44 @@ typing-extensions==4.8.0 # typing-inspect typing-inspect==0.9.0 # via pyre-extensions -tzdata==2023.3 +tzdata==2024.1 # via pandas urllib3==1.26.18 # via # docker # requests - # responses # torchx varname==0.10.0 # via giving voir==0.2.15 # via + # -c .pin/../constraints/rocm.txt # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/brax/requirements.in + # -r benchmarks/dlrm/requirements.in + # -r benchmarks/flops/requirements.in + # -r benchmarks/huggingface/requirements.in + # -r benchmarks/llama/requirements.in # -r benchmarks/rwkv/requirements.in -websocket-client==1.6.4 - # via docker -werkzeug==3.0.1 - # via tensorboard + # -r benchmarks/stargan/requirements.in + # -r benchmarks/super-slomo/requirements.in + # -r benchmarks/timm/requirements.in + # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in +werkzeug==3.0.3 + # via + # flask + # tensorboard xxhash==3.4.1 # via # datasets # evaluate -yarl==1.9.2 +yarl==1.9.4 # via aiohttp -zipp==3.17.0 - # via importlib-metadata +zipp==3.19.2 + # via + # etils + # importlib-metadata # The following packages are considered to be unsafe in a requirements file: # setuptools diff --git a/.pin/constraints-xpu-torch.txt b/.pin/constraints-xpu-torch.txt index ea806e191..1d12ca32a 100644 --- a/.pin/constraints-xpu-torch.txt +++ b/.pin/constraints-xpu-torch.txt @@ -2,13 +2,25 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --output-file=.pin/constraints-xpu-torch.txt .pin/tmp-constraints.txt benchmarks/accelerate_opt/requirements.in benchmarks/dlrm/requirements.in benchmarks/flops/requirements.in benchmarks/huggingface/requirements.in benchmarks/llama/requirements.in benchmarks/rwkv/requirements.in benchmarks/stargan/requirements.in benchmarks/super-slomo/requirements.in benchmarks/timm/requirements.in benchmarks/torchvision/requirements.in +# pip-compile --output-file=.pin/constraints-xpu-torch.txt .pin/tmp-constraints.txt benchmarks/accelerate_opt/requirements.in benchmarks/brax/requirements.in benchmarks/dlrm/requirements.in benchmarks/flops/requirements.in benchmarks/huggingface/requirements.in benchmarks/llama/requirements.in benchmarks/rwkv/requirements.in benchmarks/stargan/requirements.in benchmarks/super-slomo/requirements.in benchmarks/timm/requirements.in benchmarks/torchvision/requirements.in benchmarks/torchvision_ddp/requirements.in # +--extra-index-url https://pypi.ngc.nvidia.com --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com absl-py==2.1.0 - # via tensorboard -accelerate==0.29.3 + # via + # brax + # chex + # dm-env + # ml-collections + # mujoco + # mujoco-mjx + # optax + # orbax-checkpoint + # tensorboard +accelerate==0.31.0 # via -r benchmarks/accelerate_opt/requirements.in aiohttp==3.9.5 # via @@ -22,18 +34,30 @@ asttokens==2.4.1 # via giving attrs==23.2.0 # via aiohttp -certifi==2024.2.2 +blinker==1.8.2 + # via flask +brax==0.10.5 + # via -r benchmarks/brax/requirements.in +certifi==2024.6.2 # via requests charset-normalizer==3.3.2 # via requests -codefind==0.1.3 +chex==0.1.86 + # via optax +click==8.1.7 + # via flask +cloudpickle==3.0.0 + # via gym +codefind==0.1.6 # via ptera -datasets==2.18.0 +contextlib2==21.6.0 + # via ml-collections +datasets==2.19.2 # via # -r benchmarks/accelerate_opt/requirements.in # -r benchmarks/llama/requirements.in # evaluate -deepspeed==0.14.1 +deepspeed==0.14.2 # via # -r benchmarks/accelerate_opt/requirements.in # -r benchmarks/rwkv/requirements.in @@ -42,34 +66,54 @@ dill==0.3.8 # datasets # evaluate # multiprocess -docker==7.0.0 +dm-env==1.6 + # via brax +dm-tree==0.1.8 + # via dm-env +docker==7.1.0 # via torchx docstring-parser==0.8.1 # via torchx -evaluate==0.4.1 +etils[epath,epy]==1.9.1 + # via + # brax + # mujoco + # mujoco-mjx + # orbax-checkpoint +evaluate==0.4.2 # via -r benchmarks/accelerate_opt/requirements.in executing==1.2.0 # via varname fairscale==0.4.13 # via -r benchmarks/llama/requirements.in -fbgemm-gpu==0.6.0 +fbgemm-gpu==0.7.0 # via torchrec -filelock==3.13.4 +filelock==3.14.0 # via # datasets # huggingface-hub # torch # torchx # transformers + # triton fire==0.6.0 # via -r benchmarks/llama/requirements.in +flask==3.0.3 + # via + # brax + # flask-cors +flask-cors==4.0.1 + # via brax +flax==0.8.4 + # via brax frozenlist==1.4.1 # via # aiohttp # aiosignal -fsspec[http]==2024.2.0 +fsspec[http]==2024.3.1 # via # datasets + # etils # evaluate # huggingface-hub # pytorch-lightning @@ -81,13 +125,21 @@ giving==0.4.2 # via # ptera # voir +glfw==2.7.0 + # via mujoco graphviz==0.20.3 # via torchviz -grpcio==1.62.2 - # via tensorboard +grpcio==1.64.1 + # via + # brax + # tensorboard +gym==0.26.2 + # via brax +gym-notices==0.0.8 + # via gym hjson==3.1.0 # via deepspeed -huggingface-hub==0.22.2 +huggingface-hub==0.23.3 # via # -r benchmarks/timm/requirements.in # accelerate @@ -101,9 +153,43 @@ idna==3.7 # yarl importlib-metadata==7.1.0 # via torchx -jinja2==3.1.3 - # via torch -joblib==1.4.0 +importlib-resources==6.4.0 + # via + # etils + # torchcompat +itsdangerous==2.2.0 + # via flask +jax[cuda12]==0.4.28 + # via + # -r benchmarks/brax/requirements.in + # brax + # chex + # flax + # jaxopt + # mujoco-mjx + # optax + # orbax-checkpoint +jax-cuda12-pjrt==0.4.28 + # via jax-cuda12-plugin +jax-cuda12-plugin==0.4.28 + # via jax +jaxlib==0.4.28+cuda12.cudnn89 + # via + # brax + # chex + # jax + # jaxopt + # mujoco-mjx + # optax + # orbax-checkpoint +jaxopt==0.8.3 + # via brax +jinja2==3.1.4 + # via + # brax + # flask + # torch +joblib==1.4.2 # via scikit-learn lightning-utilities==0.11.2 # via @@ -119,8 +205,25 @@ markupsafe==2.1.5 # werkzeug mdurl==0.1.2 # via markdown-it-py +ml-collections==0.1.1 + # via brax +ml-dtypes==0.4.0 + # via + # jax + # jaxlib + # tensorstore mpmath==1.3.0 # via sympy +msgpack==1.0.8 + # via + # flax + # orbax-checkpoint +mujoco==3.1.6 + # via + # brax + # mujoco-mjx +mujoco-mjx==3.1.6 + # via brax multidict==6.0.5 # via # aiohttp @@ -131,7 +234,9 @@ multiprocess==0.70.16 # evaluate mypy-extensions==1.0.0 # via typing-inspect -networkx +nest-asyncio==1.6.0 + # via orbax-checkpoint +networkx==3.3 # via torch ninja==1.11.1.1 # via @@ -144,40 +249,114 @@ numpy==1.26.4 # -r benchmarks/stargan/requirements.in # -r benchmarks/super-slomo/requirements.in # accelerate + # brax + # chex # datasets # deepspeed + # dm-env # evaluate # fairscale # fbgemm-gpu + # flax + # gym + # jax + # jaxlib + # jaxopt + # ml-dtypes + # mujoco # onnx # opencv-python + # opt-einsum + # optax + # orbax-checkpoint # pandas # pyarrow # pytorch-lightning # scikit-learn # scipy # tensorboard + # tensorboardx + # tensorstore # torchmetrics # torchvision # transformers + # trimesh +nvidia-cublas-cu12==12.1.3.1 + # via + # jax + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # jax + # torch +nvidia-cuda-nvcc-cu12==12.5.40 + # via jax +nvidia-cuda-nvrtc-cu12==12.1.105 + # via torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # jax + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # jax + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # jax + # torch +nvidia-curand-cu12==10.3.2.106 + # via torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # jax + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # jax + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # jax + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # jax + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via torch omegaconf==2.3.0 # via voir -onnx==1.16.0 +onnx==1.16.1 # via -r benchmarks/dlrm/requirements.in -opencv-python==4.9.0.80 +opencv-python==4.10.0.82 # via -r benchmarks/super-slomo/requirements.in +opt-einsum==3.3.0 + # via jax +optax==0.2.2 + # via + # brax + # flax +orbax-checkpoint==0.5.15 + # via + # brax + # flax ovld==0.3.5 # via voir -packaging==24.0 +packaging==24.1 # via # accelerate # datasets # deepspeed - # docker # evaluate # huggingface-hub # lightning-utilities # pytorch-lightning + # tensorboardx # torchmetrics # transformers pandas==2.2.2 @@ -185,20 +364,25 @@ pandas==2.2.2 # datasets # evaluate pillow==10.3.0 - # via torchvision -protobuf==5.26.1 + # via + # brax + # torchvision +protobuf==4.25.3 # via # onnx + # orbax-checkpoint # tensorboard + # tensorboardx psutil==5.9.8 # via # accelerate # deepspeed + # voir ptera==1.4.1 # via voir py-cpuinfo==9.0.0 # via deepspeed -pyarrow==15.0.2 +pyarrow==16.1.0 # via datasets pyarrow-hotfix==0.6 # via datasets @@ -208,18 +392,22 @@ pydantic==1.10.15 # deepspeed pydot==2.0.0 # via -r benchmarks/dlrm/requirements.in -pygments==2.17.2 +pygments==2.18.0 # via rich pynvml==11.5.0 # via # deepspeed # voir +pyopengl==3.1.7 + # via mujoco pyparsing==3.1.2 # via pydot pyre-extensions==0.0.30 # via torchx python-dateutil==2.9.0.post0 # via pandas +pytinyrenderer==0.0.14 + # via brax pytorch-lightning==1.9.5 # via -r benchmarks/rwkv/requirements.in pytz==2024.1 @@ -229,65 +417,81 @@ pyyaml==6.0.1 # -r benchmarks/timm/requirements.in # accelerate # datasets + # flax # huggingface-hub + # ml-collections # omegaconf + # orbax-checkpoint # pytorch-lightning # torchx # transformers reactivex==4.0.4 # via giving -regex==2024.4.16 +regex==2024.5.15 # via transformers -requests==2.31.0 +requests==2.32.3 # via # datasets # docker # evaluate # huggingface-hub - # responses - # torchvision # transformers -responses==0.18.0 - # via evaluate rich==13.7.1 # via # -r benchmarks/accelerate_opt/requirements.in + # flax # voir safetensors==0.4.3 # via # -r benchmarks/timm/requirements.in # accelerate # transformers -scikit-learn==1.4.2 +scikit-learn==1.5.0 # via -r benchmarks/dlrm/requirements.in -scipy==1.13.0 - # via scikit-learn +scipy==1.13.1 + # via + # brax + # jax + # jaxlib + # jaxopt + # mujoco-mjx + # scikit-learn sentencepiece==0.2.0 # via -r benchmarks/llama/requirements.in six==1.16.0 # via # asttokens # fire + # ml-collections # python-dateutil # tensorboard -sympy==1.12 +sympy==1.12.1 # via torch tabulate==0.9.0 # via torchx -tensorboard==2.16.2 +tensorboard==2.17.0 # via -r benchmarks/dlrm/requirements.in tensorboard-data-server==0.7.2 # via tensorboard +tensorboardx==2.6.2.2 + # via brax +tensorstore==0.1.60 + # via + # flax + # orbax-checkpoint termcolor==2.4.0 # via fire -threadpoolctl==3.4.0 +threadpoolctl==3.5.0 # via scikit-learn tokenizers==0.19.1 # via transformers -torch==2.1.0a0+cxx11.abi +toolz==0.12.1 + # via chex +torch==2.3.1 # via # -c .pin/../constraints/xpu.txt # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/brax/requirements.in # -r benchmarks/dlrm/requirements.in # -r benchmarks/flops/requirements.in # -r benchmarks/huggingface/requirements.in @@ -297,25 +501,33 @@ torch==2.1.0a0+cxx11.abi # -r benchmarks/super-slomo/requirements.in # -r benchmarks/timm/requirements.in # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in # accelerate # deepspeed # fairscale # pytorch-lightning # torchaudio + # torchcompat # torchmetrics # torchvision # torchviz -torchaudio==2.1.0a0+cxx11.abi +torchaudio==2.3.1 # via # -c .pin/../constraints/xpu.txt # -r benchmarks/accelerate_opt/requirements.in +torchcompat==1.0.2 + # via + # -c .pin/../constraints/xpu.txt + # -r benchmarks/flops/requirements.in + # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in torchmetrics==1.0.3 # via # pytorch-lightning # torchrec -torchrec==0.6.0 +torchrec==0.7.0 # via -r benchmarks/dlrm/requirements.in -torchvision==0.16.0a0+cxx11.abi +torchvision==0.18.1 # via # -c .pin/../constraints/xpu.txt # -r benchmarks/accelerate_opt/requirements.in @@ -324,16 +536,18 @@ torchvision==0.16.0a0+cxx11.abi # -r benchmarks/super-slomo/requirements.in # -r benchmarks/timm/requirements.in # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in torchviz==0.0.2 # via -r benchmarks/dlrm/requirements.in torchx==0.5.0 # via -r benchmarks/dlrm/requirements.in -tqdm==4.66.2 +tqdm==4.66.4 # via # -r benchmarks/dlrm/requirements.in # -r benchmarks/flops/requirements.in # -r benchmarks/super-slomo/requirements.in # -r benchmarks/torchvision/requirements.in + # -r benchmarks/torchvision_ddp/requirements.in # datasets # deepspeed # evaluate @@ -341,15 +555,26 @@ tqdm==4.66.2 # pytorch-lightning # torchrec # transformers -transformers==4.40.0 +transformers==4.41.2 # via # -r benchmarks/accelerate_opt/requirements.in # -r benchmarks/huggingface/requirements.in # -r benchmarks/llama/requirements.in -typing-extensions==4.11.0 +trimesh==4.4.1 # via + # brax + # mujoco-mjx +triton==2.3.1 + # via torch +typing-extensions==4.12.2 + # via + # brax + # chex + # etils + # flax # huggingface-hub # lightning-utilities + # orbax-checkpoint # pydantic # pyre-extensions # pytorch-lightning @@ -364,14 +589,18 @@ urllib3==1.26.18 # via # docker # requests - # responses # torchx varname==0.10.0 # via giving +<<<<<<< HEAD voir==0.2.15 +======= +voir==0.2.14 +>>>>>>> baa6757f78c08eb64ed139ebec96250f9ef6f180 # via # -c .pin/../constraints/xpu.txt # -r benchmarks/accelerate_opt/requirements.in + # -r benchmarks/brax/requirements.in # -r benchmarks/dlrm/requirements.in # -r benchmarks/flops/requirements.in # -r benchmarks/huggingface/requirements.in @@ -381,16 +610,21 @@ voir==0.2.15 # -r benchmarks/super-slomo/requirements.in # -r benchmarks/timm/requirements.in # -r benchmarks/torchvision/requirements.in -werkzeug==3.0.2 - # via tensorboard + # -r benchmarks/torchvision_ddp/requirements.in +werkzeug==3.0.3 + # via + # flask + # tensorboard xxhash==3.4.1 # via # datasets # evaluate yarl==1.9.4 # via aiohttp -zipp==3.18.1 - # via importlib-metadata +zipp==3.19.2 + # via + # etils + # importlib-metadata # The following packages are considered to be unsafe in a requirements file: # setuptools diff --git a/benchmarks/accelerate_opt/main.py b/benchmarks/accelerate_opt/main.py index 36b460844..9c003eda1 100644 --- a/benchmarks/accelerate_opt/main.py +++ b/benchmarks/accelerate_opt/main.py @@ -86,6 +86,7 @@ def arguments(): get_scheduler, ) from benchmate.observer import BenchObserver +from benchmate.monitor import milabench_sys_monitor logger = get_logger(__name__) @@ -124,7 +125,6 @@ class CustomInitProcessGroupKwargs(InitProcessGroupKwargs): rank=int(os.environ["RANK"]), world_size=int(os.environ["WORLD_SIZE"]), ) - print(init_process_group_kwargs.backend) # Accelerator SUCK, it is impossible to make it use hccl # We can bypass Accelerator logic by initializing the group ourselves @@ -143,8 +143,8 @@ class CustomInitProcessGroupKwargs(InitProcessGroupKwargs): # Set up logging for milabench (only in the run phase, for the main process) monitor = None if not is_prepare_phase and accelerator.is_main_process: - from benchmate.common import opt_voir - monitor = opt_voir() + # Set up logging for milabench (only in the run phase, for the main process) + milabench_sys_monitor() logging.basicConfig( level=logging.INFO, @@ -170,13 +170,13 @@ class CustomInitProcessGroupKwargs(InitProcessGroupKwargs): raw_datasets["validation"] = load_dataset( dataset_name, dataset_config_name, - split=f"train[:{validation_split_percentage}%]", + split=f"train[:{validation_split_percentage}%]", revision=config["dataset_rev"] ) raw_datasets["train"] = load_dataset( dataset_name, dataset_config_name, - split=f"train[{validation_split_percentage}%:]", + split=f"train[{validation_split_percentage}%:]", revision=config["dataset_rev"] ) @@ -360,9 +360,9 @@ def group_texts(examples): starting_epoch = 0 observer = BenchObserver( - event_fn=acc.Event, - earlystop=30, - rank=int(os.environ["RANK"]), + event_fn=acc.Event, + earlystop=30, + rank=int(os.environ["RANK"]), device=acc.fetch_device(int(os.environ["RANK"])), stdout=True, batch_size_fn=lambda batch: batch["labels"].shape[0] diff --git a/benchmarks/accelerate_opt/requirements.cuda.txt b/benchmarks/accelerate_opt/requirements.cuda.txt index 14fbc8701..76e10219d 100644 --- a/benchmarks/accelerate_opt/requirements.cuda.txt +++ b/benchmarks/accelerate_opt/requirements.cuda.txt @@ -2,13 +2,18 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=benchmarks/accelerate_opt/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-opt.txt benchmarks/accelerate_opt/requirements.in +# pip-compile --output-file=benchmarks/accelerate_opt/requirements.cuda.txt .pin/tmp-constraints-cuda-opt.txt benchmarks/accelerate_opt/requirements.in # ---extra-index-url https://download.pytorch.org/whl/cu118 +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/cu121 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com -accelerate==0.24.1 - # via -r benchmarks/accelerate_opt/requirements.in -aiohttp==3.8.6 +accelerate==0.31.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/accelerate_opt/requirements.in +aiohttp==3.9.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -25,58 +30,59 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -async-timeout==4.0.3 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # aiohttp -attrs==23.1.0 +attrs==23.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp -certifi==2023.7.22 +certifi==2024.6.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # aiohttp # requests -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -datasets==2.14.6 +datasets==2.19.2 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/accelerate_opt/requirements.in # evaluate -deepspeed==0.12.2 - # via -r benchmarks/accelerate_opt/requirements.in -dill==0.3.7 +deepspeed==0.14.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/accelerate_opt/requirements.in +dill==0.3.8 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # evaluate # multiprocess -evaluate==0.4.1 - # via -r benchmarks/accelerate_opt/requirements.in +evaluate==0.4.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/accelerate_opt/requirements.in executing==1.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname -filelock==3.13.1 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt + # datasets # huggingface-hub # torch # transformers # triton -frozenlist==1.4.0 +frozenlist==1.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp # aiosignal -fsspec[http]==2023.10.0 +fsspec[http]==2024.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -92,7 +98,7 @@ hjson==3.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed -huggingface-hub==0.17.3 +huggingface-hub==0.23.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # accelerate @@ -100,12 +106,12 @@ huggingface-hub==0.17.3 # evaluate # tokenizers # transformers -idna==3.4 +idna==3.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests # yarl -jinja2==3.1.2 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -113,7 +119,7 @@ markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jinja2 @@ -125,17 +131,17 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # sympy -multidict==6.0.4 +multidict==6.0.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp # yarl -multiprocess==0.70.15 +multiprocess==0.70.16 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # evaluate -networkx==3.2.1 +networkx==3.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -143,7 +149,7 @@ ninja==1.11.1.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed -numpy==1.26.1 +numpy==1.26.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # accelerate @@ -154,15 +160,67 @@ numpy==1.26.1 # pyarrow # torchvision # transformers +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -ovld==0.3.2 +ovld==0.3.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -packaging==23.2 +packaging==24.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # accelerate @@ -171,20 +229,21 @@ packaging==23.2 # evaluate # huggingface-hub # transformers -pandas==2.1.2 +pandas==2.2.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # evaluate -pillow==10.1.0 +pillow==10.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchvision -psutil==5.9.6 +psutil==5.9.8 # via # -c .pin/../.pin/constraints-cuda-torch.txt # accelerate # deepspeed + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -193,15 +252,19 @@ py-cpuinfo==9.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed -pyarrow==14.0.0 +pyarrow==16.1.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # datasets +pyarrow-hotfix==0.6 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets -pydantic==1.10.13 +pydantic==1.10.15 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed -pygments==2.16.1 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich @@ -210,11 +273,11 @@ pynvml==11.5.0 # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed # voir -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas -pytz==2023.3.post1 +pytz==2024.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas @@ -230,57 +293,57 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -regex==2023.10.3 +regex==2024.5.15 # via # -c .pin/../.pin/constraints-cuda-torch.txt # transformers -requests==2.31.0 +requests==2.32.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # evaluate - # fsspec # huggingface-hub - # responses - # torchvision # transformers -responses==0.18.0 +rich==13.7.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # evaluate -rich==13.6.0 - # via # -r benchmarks/accelerate_opt/requirements.in # voir -safetensors==0.4.0 +safetensors==0.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt + # accelerate # transformers six==1.16.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens # python-dateutil -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -tokenizers==0.14.1 +tokenizers==0.19.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # transformers -torch==2.1.0+cu118 +torch==2.3.1+cu121 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/accelerate_opt/requirements.in # accelerate # deepspeed # torchaudio # torchvision -torchaudio==2.1.0+cu118 - # via -r benchmarks/accelerate_opt/requirements.in -torchvision==0.16.0+cu118 - # via -r benchmarks/accelerate_opt/requirements.in -tqdm==4.66.1 +torchaudio==2.3.1+cu121 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/accelerate_opt/requirements.in +torchvision==0.18.1+cu121 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/accelerate_opt/requirements.in +tqdm==4.66.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -288,20 +351,22 @@ tqdm==4.66.1 # evaluate # huggingface-hub # transformers -transformers==4.35.0 - # via -r benchmarks/accelerate_opt/requirements.in -triton==2.1.0 +transformers==4.41.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/accelerate_opt/requirements.in +triton==2.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -typing-extensions==4.8.0 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # pydantic # reactivex # torch -tzdata==2023.3 +tzdata==2024.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas @@ -309,7 +374,6 @@ urllib3==1.26.18 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests - # responses varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -321,7 +385,7 @@ xxhash==3.4.1 # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # evaluate -yarl==1.9.2 +yarl==1.9.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp diff --git a/benchmarks/accelerate_opt/requirements.hpu.txt b/benchmarks/accelerate_opt/requirements.hpu.txt index a9037c133..fc464bcaf 100644 --- a/benchmarks/accelerate_opt/requirements.hpu.txt +++ b/benchmarks/accelerate_opt/requirements.hpu.txt @@ -1,11 +1,17 @@ # -# This file is autogenerated by pip-compile with Python 3.10 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --output-file=milabench/benchmarks/accelerate_opt/requirements.hpu.txt --resolver=backtracking .pin/tmp-constraints-hpu-opt.txt milabench/benchmarks/accelerate_opt/requirements.in +# pip-compile --output-file=benchmarks/accelerate_opt/requirements.hpu.txt .pin/tmp-constraints-hpu-opt.txt benchmarks/accelerate_opt/requirements.in # -accelerate==0.30.0 - # via -r milabench/benchmarks/accelerate_opt/requirements.in +--extra-index-url https://pypi.ngc.nvidia.com +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com + +accelerate==0.31.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/accelerate_opt/requirements.in aiohttp==3.9.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -23,15 +29,11 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # giving -async-timeout==4.0.3 - # via - # -c .pin/../.pin/constraints-hpu-torch.txt - # aiohttp attrs==23.2.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # aiohttp -certifi==2024.2.2 +certifi==2024.6.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # requests @@ -39,16 +41,19 @@ charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # requests -codefind==0.1.4 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-hpu-torch.txt # ptera -datasets==2.19.1 +datasets==2.19.2 # via - # -r milabench/benchmarks/accelerate_opt/requirements.in + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/accelerate_opt/requirements.in # evaluate deepspeed==0.14.2 - # via -r milabench/benchmarks/accelerate_opt/requirements.in + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/accelerate_opt/requirements.in dill==0.3.8 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -56,7 +61,9 @@ dill==0.3.8 # evaluate # multiprocess evaluate==0.4.2 - # via -r milabench/benchmarks/accelerate_opt/requirements.in + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/accelerate_opt/requirements.in executing==1.2.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -90,7 +97,7 @@ hjson==3.1.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # deepspeed -huggingface-hub==0.23.0 +huggingface-hub==0.23.3 # via # -c .pin/../.pin/constraints-hpu-torch.txt # accelerate @@ -195,7 +202,7 @@ nvidia-nccl-cu12==2.20.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -nvidia-nvjitlink-cu12==12.4.127 +nvidia-nvjitlink-cu12==12.5.40 # via # -c .pin/../.pin/constraints-hpu-torch.txt # nvidia-cusolver-cu12 @@ -212,7 +219,7 @@ ovld==0.3.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt # voir -packaging==24.0 +packaging==24.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # accelerate @@ -244,7 +251,7 @@ py-cpuinfo==9.0.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # deepspeed -pyarrow==16.0.0 +pyarrow==16.1.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # datasets @@ -285,11 +292,11 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-hpu-torch.txt # giving -regex==2024.4.28 +regex==2024.5.15 # via # -c .pin/../.pin/constraints-hpu-torch.txt # transformers -requests==2.31.0 +requests==2.32.3 # via # -c .pin/../.pin/constraints-hpu-torch.txt # datasets @@ -298,7 +305,8 @@ requests==2.31.0 # transformers rich==13.7.1 # via - # -r milabench/benchmarks/accelerate_opt/requirements.in + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/accelerate_opt/requirements.in # voir safetensors==0.4.3 # via @@ -310,7 +318,7 @@ six==1.16.0 # -c .pin/../.pin/constraints-hpu-torch.txt # asttokens # python-dateutil -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch @@ -318,17 +326,22 @@ tokenizers==0.19.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # transformers -torch==2.3.0 +torch==2.3.1 # via - # -r milabench/benchmarks/accelerate_opt/requirements.in + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/accelerate_opt/requirements.in # accelerate # deepspeed # torchaudio # torchvision -torchaudio==2.3.0 - # via -r milabench/benchmarks/accelerate_opt/requirements.in -torchvision==0.18.0 - # via -r milabench/benchmarks/accelerate_opt/requirements.in +torchaudio==2.3.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/accelerate_opt/requirements.in +torchvision==0.18.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/accelerate_opt/requirements.in tqdm==4.66.4 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -337,13 +350,15 @@ tqdm==4.66.4 # evaluate # huggingface-hub # transformers -transformers==4.40.2 - # via -r milabench/benchmarks/accelerate_opt/requirements.in -triton==2.3.0 +transformers==4.41.2 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/accelerate_opt/requirements.in +triton==2.3.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -typing-extensions==4.11.0 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # huggingface-hub diff --git a/benchmarks/accelerate_opt/requirements.rocm.txt b/benchmarks/accelerate_opt/requirements.rocm.txt index 5d97d604a..eb5902c86 100644 --- a/benchmarks/accelerate_opt/requirements.rocm.txt +++ b/benchmarks/accelerate_opt/requirements.rocm.txt @@ -2,13 +2,18 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=benchmarks/accelerate_opt/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-opt.txt benchmarks/accelerate_opt/requirements.in +# pip-compile --output-file=benchmarks/accelerate_opt/requirements.rocm.txt .pin/tmp-constraints-rocm-opt.txt benchmarks/accelerate_opt/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.6/ +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/rocm6.0 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com -accelerate==0.24.1 - # via -r benchmarks/accelerate_opt/requirements.in -aiohttp==3.8.6 +accelerate==0.31.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/accelerate_opt/requirements.in +aiohttp==3.9.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # datasets @@ -25,62 +30,59 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -async-timeout==4.0.3 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # aiohttp -attrs==23.1.0 +attrs==23.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp -certifi==2023.7.22 +certifi==2024.6.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # aiohttp # requests -cmake==3.27.7 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pytorch-triton-rocm -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-rocm-torch.txt # ptera -datasets==2.14.6 +datasets==2.19.2 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # -r benchmarks/accelerate_opt/requirements.in # evaluate -deepspeed==0.12.2 - # via -r benchmarks/accelerate_opt/requirements.in -dill==0.3.7 +deepspeed==0.14.2 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/accelerate_opt/requirements.in +dill==0.3.8 # via # -c .pin/../.pin/constraints-rocm-torch.txt # datasets # evaluate # multiprocess -evaluate==0.4.1 - # via -r benchmarks/accelerate_opt/requirements.in +evaluate==0.4.2 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/accelerate_opt/requirements.in executing==1.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # varname -filelock==3.13.1 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt + # datasets # huggingface-hub # pytorch-triton-rocm # torch # transformers -frozenlist==1.4.0 +frozenlist==1.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp # aiosignal -fsspec[http]==2023.10.0 +fsspec[http]==2024.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # datasets @@ -96,7 +98,7 @@ hjson==3.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed -huggingface-hub==0.17.3 +huggingface-hub==0.23.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # accelerate @@ -104,24 +106,20 @@ huggingface-hub==0.17.3 # evaluate # tokenizers # transformers -idna==3.4 +idna==3.7 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests # yarl -jinja2==3.1.2 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lit==17.0.4 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pytorch-triton-rocm markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # jinja2 @@ -133,17 +131,17 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # sympy -multidict==6.0.4 +multidict==6.0.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp # yarl -multiprocess==0.70.15 +multiprocess==0.70.16 # via # -c .pin/../.pin/constraints-rocm-torch.txt # datasets # evaluate -networkx==3.2.1 +networkx==3.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -151,7 +149,7 @@ ninja==1.11.1.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed -numpy==1.26.1 +numpy==1.26.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # accelerate @@ -166,11 +164,11 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -ovld==0.3.2 +ovld==0.3.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -packaging==23.2 +packaging==24.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # accelerate @@ -179,20 +177,21 @@ packaging==23.2 # evaluate # huggingface-hub # transformers -pandas==2.1.2 +pandas==2.2.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # datasets # evaluate -pillow==10.1.0 +pillow==10.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchvision -psutil==5.9.6 +psutil==5.9.8 # via # -c .pin/../.pin/constraints-rocm-torch.txt # accelerate # deepspeed + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt @@ -201,15 +200,19 @@ py-cpuinfo==9.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed -pyarrow==14.0.0 +pyarrow==16.1.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # datasets +pyarrow-hotfix==0.6 # via # -c .pin/../.pin/constraints-rocm-torch.txt # datasets -pydantic==1.10.13 +pydantic==1.10.15 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed -pygments==2.16.1 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich @@ -218,15 +221,15 @@ pynvml==11.5.0 # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed # voir -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pandas -pytorch-triton-rocm==2.1.0 +pytorch-triton-rocm==2.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -pytz==2023.3.post1 +pytz==2024.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pandas @@ -242,58 +245,57 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -regex==2023.10.3 +regex==2024.5.15 # via # -c .pin/../.pin/constraints-rocm-torch.txt # transformers -requests==2.31.0 +requests==2.32.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # datasets # evaluate - # fsspec # huggingface-hub - # responses - # torchvision # transformers -responses==0.18.0 +rich==13.7.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # evaluate -rich==13.6.0 - # via # -r benchmarks/accelerate_opt/requirements.in # voir -safetensors==0.4.0 +safetensors==0.4.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt + # accelerate # transformers six==1.16.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # asttokens # python-dateutil -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -tokenizers==0.14.1 +tokenizers==0.19.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # transformers -torch==2.1.0+rocm5.6 +torch==2.3.1+rocm6.0 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # -r benchmarks/accelerate_opt/requirements.in # accelerate # deepspeed - # pytorch-triton-rocm # torchaudio # torchvision -torchaudio==2.1.0+rocm5.6 - # via -r benchmarks/accelerate_opt/requirements.in -torchvision==0.16.0+rocm5.6 - # via -r benchmarks/accelerate_opt/requirements.in -tqdm==4.66.1 +torchaudio==2.3.1+rocm6.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/accelerate_opt/requirements.in +torchvision==0.18.1+rocm6.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/accelerate_opt/requirements.in +tqdm==4.66.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # datasets @@ -301,16 +303,18 @@ tqdm==4.66.1 # evaluate # huggingface-hub # transformers -transformers==4.35.0 - # via -r benchmarks/accelerate_opt/requirements.in -typing-extensions==4.8.0 +transformers==4.41.2 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/accelerate_opt/requirements.in +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub # pydantic # reactivex # torch -tzdata==2023.3 +tzdata==2024.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pandas @@ -318,7 +322,6 @@ urllib3==1.26.18 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests - # responses varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt @@ -330,7 +333,7 @@ xxhash==3.4.1 # -c .pin/../.pin/constraints-rocm-torch.txt # datasets # evaluate -yarl==1.9.2 +yarl==1.9.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp diff --git a/benchmarks/accelerate_opt/requirements.xpu.txt b/benchmarks/accelerate_opt/requirements.xpu.txt index 2ee6573ff..0d34ed0f0 100644 --- a/benchmarks/accelerate_opt/requirements.xpu.txt +++ b/benchmarks/accelerate_opt/requirements.xpu.txt @@ -4,9 +4,12 @@ # # pip-compile --output-file=benchmarks/accelerate_opt/requirements.xpu.txt .pin/tmp-constraints-xpu-opt.txt benchmarks/accelerate_opt/requirements.in # +--extra-index-url https://pypi.ngc.nvidia.com --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com -accelerate==0.29.3 +accelerate==0.31.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/accelerate_opt/requirements.in @@ -31,7 +34,7 @@ attrs==23.2.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # aiohttp -certifi==2024.2.2 +certifi==2024.6.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # requests @@ -39,16 +42,16 @@ charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # requests -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-xpu-torch.txt # ptera -datasets==2.18.0 +datasets==2.19.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/accelerate_opt/requirements.in # evaluate -deepspeed==0.14.1 +deepspeed==0.14.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/accelerate_opt/requirements.in @@ -58,7 +61,7 @@ dill==0.3.8 # datasets # evaluate # multiprocess -evaluate==0.4.1 +evaluate==0.4.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/accelerate_opt/requirements.in @@ -66,19 +69,20 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # varname -filelock==3.13.4 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # datasets # huggingface-hub # torch # transformers + # triton frozenlist==1.4.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # aiohttp # aiosignal -fsspec[http]==2024.2.0 +fsspec[http]==2024.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # datasets @@ -94,7 +98,7 @@ hjson==3.1.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # deepspeed -huggingface-hub==0.22.2 +huggingface-hub==0.23.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # accelerate @@ -107,7 +111,7 @@ idna==3.7 # -c .pin/../.pin/constraints-xpu-torch.txt # requests # yarl -jinja2==3.1.3 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -137,7 +141,7 @@ multiprocess==0.70.16 # -c .pin/../.pin/constraints-xpu-torch.txt # datasets # evaluate -networkx +networkx==3.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -156,6 +160,58 @@ numpy==1.26.4 # pyarrow # torchvision # transformers +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt @@ -164,7 +220,7 @@ ovld==0.3.5 # via # -c .pin/../.pin/constraints-xpu-torch.txt # voir -packaging==24.0 +packaging==24.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # accelerate @@ -187,6 +243,7 @@ psutil==5.9.8 # -c .pin/../.pin/constraints-xpu-torch.txt # accelerate # deepspeed + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt @@ -195,7 +252,7 @@ py-cpuinfo==9.0.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # deepspeed -pyarrow==15.0.2 +pyarrow==16.1.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # datasets @@ -207,7 +264,7 @@ pydantic==1.10.15 # via # -c .pin/../.pin/constraints-xpu-torch.txt # deepspeed -pygments==2.17.2 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # rich @@ -236,23 +293,17 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # giving -regex==2024.4.16 +regex==2024.5.15 # via # -c .pin/../.pin/constraints-xpu-torch.txt # transformers -requests==2.31.0 +requests==2.32.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # datasets # evaluate # huggingface-hub - # responses - # torchvision # transformers -responses==0.18.0 - # via - # -c .pin/../.pin/constraints-xpu-torch.txt - # evaluate rich==13.7.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt @@ -268,7 +319,7 @@ six==1.16.0 # -c .pin/../.pin/constraints-xpu-torch.txt # asttokens # python-dateutil -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -276,7 +327,7 @@ tokenizers==0.19.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # transformers -torch==2.1.0a0+cxx11.abi +torch==2.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -c .pin/../constraints/xpu.txt @@ -285,17 +336,17 @@ torch==2.1.0a0+cxx11.abi # deepspeed # torchaudio # torchvision -torchaudio==2.1.0a0+cxx11.abi +torchaudio==2.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -c .pin/../constraints/xpu.txt # -r benchmarks/accelerate_opt/requirements.in -torchvision==0.16.0a0+cxx11.abi +torchvision==0.18.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -c .pin/../constraints/xpu.txt # -r benchmarks/accelerate_opt/requirements.in -tqdm==4.66.2 +tqdm==4.66.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # datasets @@ -303,11 +354,15 @@ tqdm==4.66.2 # evaluate # huggingface-hub # transformers -transformers==4.40.0 +transformers==4.41.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/accelerate_opt/requirements.in -typing-extensions==4.11.0 +triton==2.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # huggingface-hub @@ -322,7 +377,6 @@ urllib3==1.26.18 # via # -c .pin/../.pin/constraints-xpu-torch.txt # requests - # responses varname==0.10.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt @@ -341,5 +395,3 @@ yarl==1.9.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # aiohttp - -intel-extension-for-pytorch==2.1.10+xpu diff --git a/benchmarks/brax/README.md b/benchmarks/brax/README.md new file mode 100644 index 000000000..ebce72bf5 --- /dev/null +++ b/benchmarks/brax/README.md @@ -0,0 +1,4 @@ + +# Benchmark + +Rewrite this README to explain what the benchmark is! diff --git a/benchmarks/brax/benchfile.py b/benchmarks/brax/benchfile.py new file mode 100644 index 000000000..0388956d6 --- /dev/null +++ b/benchmarks/brax/benchfile.py @@ -0,0 +1,9 @@ +from milabench.pack import Package + + +class BraxBenchmark(Package): + base_requirements = "requirements.in" + main_script = "main.py" + + +__pack__ = BraxBenchmark diff --git a/benchmarks/brax/main.py b/benchmarks/brax/main.py new file mode 100644 index 000000000..572ce739c --- /dev/null +++ b/benchmarks/brax/main.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +"""Orion + Brax Training with PyTorch on GPU + +Automatically generated by Colaboratory. + +Original file is located at + https://colab.research.google.com/drive/1KlaTeScstmRg7AIWLgrXy9zGmayb5zMS +""" +import argparse +import os + +from giving import give, given + +os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"] = "False" + +import torch # This is a bit of a trick to make jax use torch's packaged libs +from brax import envs +from brax.training.agents.ppo.train import train + + +def run(): + parser = argparse.ArgumentParser(description="Brax training") + parser.add_argument( + "--batch-size", + type=int, + default=1024, + help="Input batch size for training (default: 1024)", + ) + parser.add_argument( + "--env", + type=str, + default="ant", + help="Environment to simulate", + ) + parser.add_argument( + "--num-timesteps", + type=int, + default=100_000_000, + ) + parser.add_argument( + "--discounting", + type=float, + default=0.97, + ) + parser.add_argument( + "--learning-rate", + type=float, + default=0.0003, + ) + parser.add_argument( + "--entropy-cost", + type=float, + default=0.01, + ) + parser.add_argument( + "--num-evals", + type=int, + default=500, + ) + parser.add_argument( + "--reward-scaling", + type=float, + default=0.1, + ) + parser.add_argument( + "--episode-length", + type=int, + default=10, + ) + parser.add_argument( + "--unroll-length", + type=int, + default=5, + ) + parser.add_argument( + "--num-minibatches", + type=int, + default=32, + ) + parser.add_argument( + "--num-envs", + type=int, + default=8192, + ) + + args = parser.parse_args() + + train( + environment=envs.get_environment(env_name=args.env), + num_timesteps=args.num_timesteps, + discounting=args.discounting, + learning_rate=args.learning_rate, + entropy_cost=args.entropy_cost, + normalize_observations=True, + action_repeat=1, + progress_fn=lambda n, metrics: give(**metrics), + num_evals=args.num_evals, + reward_scaling=args.reward_scaling, + episode_length=args.episode_length, + unroll_length=args.unroll_length, + num_minibatches=args.num_minibatches, + num_envs=args.num_envs, + batch_size=args.batch_size, + ) + + +def main(): + try: + run() + except KeyboardInterrupt: + pass + + +if __name__ == "__main__": + with given() as gv: + gv["?training/sps"].map( + lambda sps: {"task": "train", "rate": sps, "units": "steps/s"} + ).give() + gv["?eval/episode_reward"].map(lambda reward: -reward.item()).as_("loss").give() + main() diff --git a/benchmarks/brax/prepare.py b/benchmarks/brax/prepare.py new file mode 100755 index 000000000..32bd5901d --- /dev/null +++ b/benchmarks/brax/prepare.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python + +import os + +if __name__ == "__main__": + # If you need the whole configuration: + # config = json.loads(os.environ["MILABENCH_CONFIG"]) + + data_directory = os.environ["MILABENCH_DIR_DATA"] + + # Download (or generate) the needed dataset(s). You are responsible + # to check if it has already been properly downloaded or not, and to + # do nothing if it has been. + print("Hello I am doing some data stuff!") + + # If there is nothing to download or generate, just delete this file. diff --git a/benchmarks/brax/requirements.cuda.txt b/benchmarks/brax/requirements.cuda.txt new file mode 100644 index 000000000..5e7dc7c3d --- /dev/null +++ b/benchmarks/brax/requirements.cuda.txt @@ -0,0 +1,447 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --output-file=benchmarks/brax/requirements.cuda.txt .pin/tmp-constraints-cuda-brax.txt benchmarks/brax/requirements.in +# +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/cu121 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com + +absl-py==2.1.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax + # chex + # dm-env + # ml-collections + # mujoco + # mujoco-mjx + # optax + # orbax-checkpoint +antlr4-python3-runtime==4.9.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # omegaconf +asttokens==2.4.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # giving +blinker==1.8.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # flask +brax==0.10.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/brax/requirements.in +chex==0.1.86 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # optax +click==8.1.7 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # flask +cloudpickle==3.0.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # gym +codefind==0.1.6 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # ptera +contextlib2==21.6.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # ml-collections +dm-env==1.6 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax +dm-tree==0.1.8 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # dm-env +etils[epath,epy]==1.9.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax + # mujoco + # mujoco-mjx + # orbax-checkpoint +executing==1.2.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # varname +filelock==3.14.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch + # triton +flask==3.0.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax + # flask-cors +flask-cors==4.0.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax +flax==0.8.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax +fsspec==2024.3.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # etils + # torch +giving==0.4.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # ptera + # voir +glfw==2.7.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # mujoco +grpcio==1.64.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax +gym==0.26.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax +gym-notices==0.0.8 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # gym +importlib-resources==6.4.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # etils +itsdangerous==2.2.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # flask +jax[cuda12]==0.4.28 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/brax/requirements.in + # brax + # chex + # flax + # jaxopt + # mujoco-mjx + # optax + # orbax-checkpoint +jax-cuda12-pjrt==0.4.28 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax-cuda12-plugin +jax-cuda12-plugin==0.4.28 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax +jaxlib==0.4.28+cuda12.cudnn89 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax + # chex + # jax + # jaxopt + # mujoco-mjx + # optax + # orbax-checkpoint +jaxopt==0.8.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax +jinja2==3.1.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax + # flask + # torch +markdown-it-py==3.0.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # rich +markupsafe==2.1.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jinja2 + # werkzeug +mdurl==0.1.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # markdown-it-py +ml-collections==0.1.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax +ml-dtypes==0.4.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax + # jaxlib + # tensorstore +mpmath==1.3.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # sympy +msgpack==1.0.8 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # flax + # orbax-checkpoint +mujoco==3.1.6 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax + # mujoco-mjx +mujoco-mjx==3.1.6 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax +nest-asyncio==1.6.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # orbax-checkpoint +networkx==3.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +numpy==1.26.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax + # chex + # dm-env + # flax + # gym + # jax + # jaxlib + # jaxopt + # ml-dtypes + # mujoco + # opt-einsum + # optax + # orbax-checkpoint + # scipy + # tensorboardx + # tensorstore + # trimesh +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax + # torch +nvidia-cuda-nvcc-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +omegaconf==2.3.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir +opt-einsum==3.3.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jax +optax==0.2.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax + # flax +orbax-checkpoint==0.5.15 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax + # flax +ovld==0.3.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir +packaging==24.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # tensorboardx +pillow==10.3.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax +protobuf==4.25.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # orbax-checkpoint + # tensorboardx +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir +ptera==1.4.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir +pygments==2.18.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # rich +pynvml==11.5.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir +pyopengl==3.1.7 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # mujoco +pytinyrenderer==0.0.14 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax +pyyaml==6.0.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # flax + # ml-collections + # omegaconf + # orbax-checkpoint +reactivex==4.0.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # giving +rich==13.7.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # flax + # voir +scipy==1.13.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax + # jax + # jaxlib + # jaxopt + # mujoco-mjx +six==1.16.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # asttokens + # ml-collections +sympy==1.12.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +tensorboardx==2.6.2.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax +tensorstore==0.1.60 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # flax + # orbax-checkpoint +toolz==0.12.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # chex +torch==2.3.1+cu121 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/brax/requirements.in +trimesh==4.4.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax + # mujoco-mjx +triton==2.3.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +typing-extensions==4.12.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # brax + # chex + # etils + # flax + # orbax-checkpoint + # reactivex + # torch +varname==0.10.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # giving +voir==0.2.14 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -c .pin/../constraints/cuda.txt + # -r benchmarks/brax/requirements.in +werkzeug==3.0.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # flask +zipp==3.19.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # etils diff --git a/benchmarks/brax/requirements.hpu.txt b/benchmarks/brax/requirements.hpu.txt new file mode 100644 index 000000000..ed3084061 --- /dev/null +++ b/benchmarks/brax/requirements.hpu.txt @@ -0,0 +1,446 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --output-file=benchmarks/brax/requirements.hpu.txt .pin/tmp-constraints-hpu-brax.txt benchmarks/brax/requirements.in +# +--extra-index-url https://pypi.ngc.nvidia.com +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com + +absl-py==2.1.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax + # chex + # dm-env + # ml-collections + # mujoco + # mujoco-mjx + # optax + # orbax-checkpoint +antlr4-python3-runtime==4.9.3 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # omegaconf +asttokens==2.4.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # giving +blinker==1.8.2 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # flask +brax==0.10.5 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/brax/requirements.in +chex==0.1.86 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # optax +click==8.1.7 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # flask +cloudpickle==3.0.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # gym +codefind==0.1.6 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # ptera +contextlib2==21.6.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # ml-collections +dm-env==1.6 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax +dm-tree==0.1.8 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # dm-env +etils[epath,epy]==1.9.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax + # mujoco + # mujoco-mjx + # orbax-checkpoint +executing==1.2.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # varname +filelock==3.14.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # torch + # triton +flask==3.0.3 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax + # flask-cors +flask-cors==4.0.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax +flax==0.8.4 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax +fsspec==2024.3.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # etils + # torch +giving==0.4.2 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # ptera + # voir +glfw==2.7.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # mujoco +grpcio==1.64.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax +gym==0.26.2 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax +gym-notices==0.0.8 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # gym +importlib-resources==6.4.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # etils +itsdangerous==2.2.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # flask +jax[cuda12]==0.4.28 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/brax/requirements.in + # brax + # chex + # flax + # jaxopt + # mujoco-mjx + # optax + # orbax-checkpoint +jax-cuda12-pjrt==0.4.28 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # jax-cuda12-plugin +jax-cuda12-plugin==0.4.28 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # jax +jaxlib==0.4.28+cuda12.cudnn89 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax + # chex + # jax + # jaxopt + # mujoco-mjx + # optax + # orbax-checkpoint +jaxopt==0.8.3 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax +jinja2==3.1.4 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax + # flask + # torch +markdown-it-py==3.0.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # rich +markupsafe==2.1.5 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # jinja2 + # werkzeug +mdurl==0.1.2 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # markdown-it-py +ml-collections==0.1.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax +ml-dtypes==0.4.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # jax + # jaxlib + # tensorstore +mpmath==1.3.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # sympy +msgpack==1.0.8 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # flax + # orbax-checkpoint +mujoco==3.1.6 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax + # mujoco-mjx +mujoco-mjx==3.1.6 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax +nest-asyncio==1.6.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # orbax-checkpoint +networkx==3.3 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # torch +numpy==1.26.4 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax + # chex + # dm-env + # flax + # gym + # jax + # jaxlib + # jaxopt + # ml-dtypes + # mujoco + # opt-einsum + # optax + # orbax-checkpoint + # scipy + # tensorboardx + # tensorstore + # trimesh +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # jax + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # jax + # torch +nvidia-cuda-nvcc-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # jax +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # jax + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # jax + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # jax + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # jax + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # jax + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # jax + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # jax + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # torch +omegaconf==2.3.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # voir +opt-einsum==3.3.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # jax +optax==0.2.2 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax + # flax +orbax-checkpoint==0.5.15 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax + # flax +ovld==0.3.5 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # voir +packaging==24.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # tensorboardx +pillow==10.3.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax +protobuf==4.25.3 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # orbax-checkpoint + # tensorboardx +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # voir +ptera==1.4.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # voir +pygments==2.18.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # rich +pynvml==11.5.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # voir +pyopengl==3.1.7 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # mujoco +pytinyrenderer==0.0.14 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax +pyyaml==6.0.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # flax + # ml-collections + # omegaconf + # orbax-checkpoint +reactivex==4.0.4 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # giving +rich==13.7.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # flax + # voir +scipy==1.13.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax + # jax + # jaxlib + # jaxopt + # mujoco-mjx +six==1.16.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # asttokens + # ml-collections +sympy==1.12.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # torch +tensorboardx==2.6.2.2 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax +tensorstore==0.1.60 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # flax + # orbax-checkpoint +toolz==0.12.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # chex +torch==2.3.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/brax/requirements.in +trimesh==4.4.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax + # mujoco-mjx +triton==2.3.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # torch +typing-extensions==4.12.2 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # brax + # chex + # etils + # flax + # orbax-checkpoint + # reactivex + # torch +varname==0.10.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # giving +voir==0.2.14 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -c .pin/../constraints/hpu.txt + # -r benchmarks/brax/requirements.in +werkzeug==3.0.3 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # flask +zipp==3.19.2 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # etils diff --git a/benchmarks/brax/requirements.in b/benchmarks/brax/requirements.in new file mode 100644 index 000000000..9db61b9e8 --- /dev/null +++ b/benchmarks/brax/requirements.in @@ -0,0 +1,5 @@ +jax[cuda12] +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +torch +brax +voir>=0.2.10,<0.3 diff --git a/benchmarks/brax/requirements.rocm.txt b/benchmarks/brax/requirements.rocm.txt new file mode 100644 index 000000000..6e1503248 --- /dev/null +++ b/benchmarks/brax/requirements.rocm.txt @@ -0,0 +1,432 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --output-file=benchmarks/brax/requirements.rocm.txt .pin/tmp-constraints-rocm-brax.txt benchmarks/brax/requirements.in +# +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/rocm6.0 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com + +absl-py==2.1.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax + # chex + # dm-env + # ml-collections + # mujoco + # mujoco-mjx + # optax + # orbax-checkpoint +antlr4-python3-runtime==4.9.3 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # omegaconf +asttokens==2.4.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # giving +blinker==1.8.2 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # flask +brax==0.10.5 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/brax/requirements.in +chex==0.1.86 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # optax +click==8.1.7 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # flask +cloudpickle==3.0.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # gym +codefind==0.1.6 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # ptera +contextlib2==21.6.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # ml-collections +dm-env==1.6 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax +dm-tree==0.1.8 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # dm-env +etils[epath,epy]==1.9.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax + # mujoco + # mujoco-mjx + # orbax-checkpoint +executing==1.2.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # varname +filelock==3.14.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # pytorch-triton-rocm + # torch +flask==3.0.3 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax + # flask-cors +flask-cors==4.0.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax +flax==0.8.4 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax +fsspec==2024.3.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # etils + # torch +giving==0.4.2 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # ptera + # voir +glfw==2.7.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # mujoco +grpcio==1.64.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax +gym==0.26.2 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax +gym-notices==0.0.8 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # gym +importlib-resources==6.4.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # etils +itsdangerous==2.2.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # flask +jax[cuda12]==0.4.28 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/brax/requirements.in + # brax + # chex + # flax + # jaxopt + # mujoco-mjx + # optax + # orbax-checkpoint +jax-cuda12-pjrt==0.4.28 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # jax-cuda12-plugin +jax-cuda12-plugin==0.4.28 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # jax +jaxlib==0.4.28+cuda12.cudnn89 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax + # chex + # jax + # jaxopt + # mujoco-mjx + # optax + # orbax-checkpoint +jaxopt==0.8.3 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax +jinja2==3.1.4 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax + # flask + # torch +markdown-it-py==3.0.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # rich +markupsafe==2.1.5 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # jinja2 + # werkzeug +mdurl==0.1.2 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # markdown-it-py +ml-collections==0.1.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax +ml-dtypes==0.4.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # jax + # jaxlib + # tensorstore +mpmath==1.3.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # sympy +msgpack==1.0.8 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # flax + # orbax-checkpoint +mujoco==3.1.6 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax + # mujoco-mjx +mujoco-mjx==3.1.6 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax +nest-asyncio==1.6.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # orbax-checkpoint +networkx==3.3 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # torch +numpy==1.26.4 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax + # chex + # dm-env + # flax + # gym + # jax + # jaxlib + # jaxopt + # ml-dtypes + # mujoco + # opt-einsum + # optax + # orbax-checkpoint + # scipy + # tensorboardx + # tensorstore + # trimesh +nvidia-cublas-cu12==12.5.2.13 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # jax + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 +nvidia-cuda-cupti-cu12==12.5.39 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # jax +nvidia-cuda-nvcc-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # jax +nvidia-cuda-nvrtc-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # nvidia-cudnn-cu12 +nvidia-cuda-runtime-cu12==12.5.39 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # jax +nvidia-cudnn-cu12==8.9.7.29 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # jax +nvidia-cufft-cu12==11.2.3.18 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # jax +nvidia-cusolver-cu12==11.6.2.40 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # jax +nvidia-cusparse-cu12==12.4.1.24 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # jax + # nvidia-cusolver-cu12 +nvidia-nccl-cu12==2.21.5 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # jax +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # jax + # nvidia-cufft-cu12 + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +omegaconf==2.3.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # voir +opt-einsum==3.3.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # jax +optax==0.2.2 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax + # flax +orbax-checkpoint==0.5.15 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax + # flax +ovld==0.3.5 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # voir +packaging==24.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # tensorboardx +pillow==10.3.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax +protobuf==4.25.3 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # orbax-checkpoint + # tensorboardx +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # voir +ptera==1.4.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # voir +pygments==2.18.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # rich +pynvml==11.5.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # voir +pyopengl==3.1.7 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # mujoco +pytinyrenderer==0.0.14 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax +pytorch-triton-rocm==2.3.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # torch +pyyaml==6.0.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # flax + # ml-collections + # omegaconf + # orbax-checkpoint +reactivex==4.0.4 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # giving +rich==13.7.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # flax + # voir +scipy==1.13.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax + # jax + # jaxlib + # jaxopt + # mujoco-mjx +six==1.16.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # asttokens + # ml-collections +sympy==1.12.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # torch +tensorboardx==2.6.2.2 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax +tensorstore==0.1.60 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # flax + # orbax-checkpoint +toolz==0.12.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # chex +torch==2.3.1+rocm6.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/brax/requirements.in +trimesh==4.4.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax + # mujoco-mjx +typing-extensions==4.12.2 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # brax + # chex + # etils + # flax + # orbax-checkpoint + # reactivex + # torch +varname==0.10.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # giving +voir==0.2.14 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -c .pin/../constraints/rocm.txt + # -r benchmarks/brax/requirements.in +werkzeug==3.0.3 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # flask +zipp==3.19.2 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # etils diff --git a/benchmarks/brax/requirements.xpu.txt b/benchmarks/brax/requirements.xpu.txt new file mode 100644 index 000000000..41b63f8a5 --- /dev/null +++ b/benchmarks/brax/requirements.xpu.txt @@ -0,0 +1,448 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --output-file=benchmarks/brax/requirements.xpu.txt .pin/tmp-constraints-xpu-brax.txt benchmarks/brax/requirements.in +# +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com + +absl-py==2.1.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax + # chex + # dm-env + # ml-collections + # mujoco + # mujoco-mjx + # optax + # orbax-checkpoint +antlr4-python3-runtime==4.9.3 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # omegaconf +asttokens==2.4.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # giving +blinker==1.8.2 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # flask +brax==0.10.5 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # -r benchmarks/brax/requirements.in +chex==0.1.86 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # optax +click==8.1.7 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # flask +cloudpickle==3.0.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # gym +codefind==0.1.6 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # ptera +contextlib2==21.6.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # ml-collections +dm-env==1.6 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax +dm-tree==0.1.8 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # dm-env +etils[epath,epy]==1.9.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax + # mujoco + # mujoco-mjx + # orbax-checkpoint +executing==1.2.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # varname +filelock==3.14.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch + # triton +flask==3.0.3 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax + # flask-cors +flask-cors==4.0.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax +flax==0.8.4 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax +fsspec==2024.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # etils + # torch +giving==0.4.2 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # ptera + # voir +glfw==2.7.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # mujoco +grpcio==1.64.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax +gym==0.26.2 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax +gym-notices==0.0.8 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # gym +importlib-resources==6.4.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # etils +itsdangerous==2.2.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # flask +jax[cuda12]==0.4.28 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # -r benchmarks/brax/requirements.in + # brax + # chex + # flax + # jaxopt + # mujoco-mjx + # optax + # orbax-checkpoint +jax-cuda12-pjrt==0.4.28 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # jax-cuda12-plugin +jax-cuda12-plugin==0.4.28 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # jax +jaxlib==0.4.28+cuda12.cudnn89 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax + # chex + # jax + # jaxopt + # mujoco-mjx + # optax + # orbax-checkpoint +jaxopt==0.8.3 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax +jinja2==3.1.4 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax + # flask + # torch +markdown-it-py==3.0.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # rich +markupsafe==2.1.5 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # jinja2 + # werkzeug +mdurl==0.1.2 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # markdown-it-py +ml-collections==0.1.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax +ml-dtypes==0.4.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # jax + # jaxlib + # tensorstore +mpmath==1.3.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # sympy +msgpack==1.0.8 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # flax + # orbax-checkpoint +mujoco==3.1.6 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax + # mujoco-mjx +mujoco-mjx==3.1.6 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax +nest-asyncio==1.6.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # orbax-checkpoint +networkx==3.3 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +numpy==1.26.4 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax + # chex + # dm-env + # flax + # gym + # jax + # jaxlib + # jaxopt + # ml-dtypes + # mujoco + # opt-einsum + # optax + # orbax-checkpoint + # scipy + # tensorboardx + # tensorstore + # trimesh +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # jax + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # jax + # torch +nvidia-cuda-nvcc-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # jax +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # jax + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # jax + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # jax + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # jax + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # jax + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # jax + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # jax + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +omegaconf==2.3.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # voir +opt-einsum==3.3.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # jax +optax==0.2.2 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax + # flax +orbax-checkpoint==0.5.15 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax + # flax +ovld==0.3.5 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # voir +packaging==24.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # tensorboardx +pillow==10.3.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax +protobuf==4.25.3 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # orbax-checkpoint + # tensorboardx +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # voir +ptera==1.4.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # voir +pygments==2.18.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # rich +pynvml==11.5.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # voir +pyopengl==3.1.7 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # mujoco +pytinyrenderer==0.0.14 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax +pyyaml==6.0.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # flax + # ml-collections + # omegaconf + # orbax-checkpoint +reactivex==4.0.4 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # giving +rich==13.7.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # flax + # voir +scipy==1.13.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax + # jax + # jaxlib + # jaxopt + # mujoco-mjx +six==1.16.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # asttokens + # ml-collections +sympy==1.12.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +tensorboardx==2.6.2.2 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax +tensorstore==0.1.60 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # flax + # orbax-checkpoint +toolz==0.12.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # chex +torch==2.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # -c .pin/../constraints/xpu.txt + # -r benchmarks/brax/requirements.in +trimesh==4.4.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax + # mujoco-mjx +triton==2.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +typing-extensions==4.12.2 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # brax + # chex + # etils + # flax + # orbax-checkpoint + # reactivex + # torch +varname==0.10.0 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # giving +voir==0.2.14 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # -c .pin/../constraints/xpu.txt + # -r benchmarks/brax/requirements.in +werkzeug==3.0.3 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # flask +zipp==3.19.2 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # etils diff --git a/benchmarks/brax/voirfile.py b/benchmarks/brax/voirfile.py new file mode 100644 index 000000000..771927e50 --- /dev/null +++ b/benchmarks/brax/voirfile.py @@ -0,0 +1,42 @@ +from dataclasses import dataclass + +from voir import configurable +from voir.instruments import dash, early_stop, gpu_monitor, log, rate + + +@dataclass +class Config: + """voir configuration""" + + # Whether to display the dash or not + dash: bool = False + + # How often to log the rates + interval: str = "1s" + + # Number of rates to skip before logging + skip: int = 5 + + # Number of rates to log before stopping + stop: int = 20 + + # Number of seconds between each gpu poll + gpu_poll: int = 3 + + +@configurable +def instrument_main(ov, options: Config): + yield ov.phases.init + + if options.dash: + ov.require(dash) + + ov.require( + log("value", "progress", "rate", "units", "loss", "gpudata", context="task"), + rate( + interval=options.interval, + sync=None, + ), + early_stop(n=options.stop, key="rate", task="train"), + gpu_monitor(poll_interval=3), + ) diff --git a/benchmarks/dlrm/requirements.cuda.txt b/benchmarks/dlrm/requirements.cuda.txt index 8a264845b..b8e79dd60 100644 --- a/benchmarks/dlrm/requirements.cuda.txt +++ b/benchmarks/dlrm/requirements.cuda.txt @@ -2,11 +2,14 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=benchmarks/dlrm/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-dlrm.txt benchmarks/dlrm/requirements.in +# pip-compile --output-file=benchmarks/dlrm/requirements.cuda.txt .pin/tmp-constraints-cuda-dlrm.txt benchmarks/dlrm/requirements.in # ---extra-index-url https://download.pytorch.org/whl/cu118 +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/cu121 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com -absl-py==2.0.0 +absl-py==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tensorboard @@ -18,11 +21,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -cachetools==5.3.2 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # google-auth -certifi==2023.7.22 +certifi==2024.6.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests @@ -30,11 +29,11 @@ charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -docker==6.1.3 +docker==7.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchx @@ -46,66 +45,59 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname -fbgemm-gpu==0.5.0+cu118 +fbgemm-gpu==0.7.0+cu121 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchrec -filelock==3.13.1 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch # torchx # triton -fsspec==2023.10.0 +fsspec==2024.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch # torchx -future==0.18.3 - # via -r benchmarks/dlrm/requirements.in +future==1.0.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/dlrm/requirements.in giving==0.4.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -google-auth==2.23.4 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # google-auth-oauthlib - # tensorboard -google-auth-oauthlib==1.1.0 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # tensorboard -graphviz==0.20.1 +graphviz==0.20.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchviz -grpcio==1.59.2 +grpcio==1.64.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tensorboard -idna==3.4 +idna==3.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -importlib-metadata==6.8.0 +importlib-metadata==7.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchx -jinja2==3.1.2 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -joblib==1.3.2 +joblib==1.4.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # scikit-learn -lightning-utilities==0.9.0 +lightning-utilities==0.11.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchmetrics -markdown==3.5.1 +markdown==3.6 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tensorboard @@ -113,7 +105,7 @@ markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jinja2 @@ -130,59 +122,107 @@ mypy-extensions==1.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # typing-inspect -networkx==3.2.1 +networkx==3.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -numpy==1.26.1 +numpy==1.26.4 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/dlrm/requirements.in + # fbgemm-gpu # onnx # scikit-learn # scipy # tensorboard # torchmetrics -oauthlib==3.2.2 +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # requests-oauthlib + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -onnx==1.15.0 - # via -r benchmarks/dlrm/requirements.in -ovld==0.3.2 +onnx==1.16.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/dlrm/requirements.in +ovld==0.3.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -packaging==23.2 +packaging==24.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # docker # lightning-utilities # torchmetrics -protobuf==4.23.4 +protobuf==4.25.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # onnx # tensorboard -ptera==1.4.1 +psutil==5.9.8 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pyasn1==0.5.0 +ptera==1.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # pyasn1-modules - # rsa -pyasn1-modules==0.3.0 + # voir +pydot==2.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # google-auth -pydot==1.4.2 - # via -r benchmarks/dlrm/requirements.in -pygments==2.16.1 + # -r benchmarks/dlrm/requirements.in +pygments==2.18.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich @@ -190,7 +230,7 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pyparsing==3.1.1 +pyparsing==3.1.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pydot @@ -207,27 +247,19 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -requests==2.31.0 +requests==2.32.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # docker - # requests-oauthlib - # tensorboard -requests-oauthlib==1.3.1 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # google-auth-oauthlib -rich==13.6.0 +rich==13.7.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -rsa==4.9 +scikit-learn==1.5.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # google-auth -scikit-learn==1.3.2 - # via -r benchmarks/dlrm/requirements.in -scipy==1.11.3 + # -r benchmarks/dlrm/requirements.in +scipy==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # scikit-learn @@ -236,7 +268,7 @@ six==1.16.0 # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens # tensorboard -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -244,18 +276,21 @@ tabulate==0.9.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchx -tensorboard==2.15.1 - # via -r benchmarks/dlrm/requirements.in +tensorboard==2.17.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/dlrm/requirements.in tensorboard-data-server==0.7.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tensorboard -threadpoolctl==3.2.0 +threadpoolctl==3.5.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # scikit-learn -torch==2.1.0+cu118 +torch==2.3.1+cu121 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/dlrm/requirements.in # torchmetrics # torchviz @@ -263,21 +298,28 @@ torchmetrics==1.0.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchrec -torchrec==0.5.0+cu118 - # via -r benchmarks/dlrm/requirements.in +torchrec==0.7.0+cu121 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/dlrm/requirements.in torchviz==0.0.2 - # via -r benchmarks/dlrm/requirements.in + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/dlrm/requirements.in torchx==0.5.0 - # via -r benchmarks/dlrm/requirements.in -tqdm==4.66.1 # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/dlrm/requirements.in +tqdm==4.66.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/dlrm/requirements.in # torchrec -triton==2.1.0 +triton==2.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -typing-extensions==4.8.0 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # lightning-utilities @@ -300,16 +342,15 @@ varname==0.10.0 # -c .pin/../.pin/constraints-cuda-torch.txt # giving voir==0.2.15 - # via -r benchmarks/dlrm/requirements.in -websocket-client==1.6.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # docker -werkzeug==3.0.1 + # -c .pin/../constraints/cuda.txt + # -r benchmarks/dlrm/requirements.in +werkzeug==3.0.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tensorboard -zipp==3.17.0 +zipp==3.19.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # importlib-metadata diff --git a/benchmarks/dlrm/requirements.hpu.txt b/benchmarks/dlrm/requirements.hpu.txt index 39df8f0f1..1c7902cf6 100644 --- a/benchmarks/dlrm/requirements.hpu.txt +++ b/benchmarks/dlrm/requirements.hpu.txt @@ -1,9 +1,13 @@ # -# This file is autogenerated by pip-compile with Python 3.10 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --output-file=milabench/benchmarks/dlrm/requirements.hpu.txt --resolver=backtracking .pin/tmp-constraints-hpu-dlrm.txt milabench/benchmarks/dlrm/requirements.in +# pip-compile --output-file=benchmarks/dlrm/requirements.hpu.txt .pin/tmp-constraints-hpu-dlrm.txt benchmarks/dlrm/requirements.in # +--extra-index-url https://pypi.ngc.nvidia.com +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com + absl-py==2.1.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -16,7 +20,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # giving -certifi==2024.2.2 +certifi==2024.6.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # requests @@ -24,11 +28,11 @@ charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # requests -codefind==0.1.4 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-hpu-torch.txt # ptera -docker==7.0.0 +docker==7.1.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torchx @@ -56,7 +60,9 @@ fsspec==2024.3.1 # torch # torchx future==1.0.0 - # via -r milabench/benchmarks/dlrm/requirements.in + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/dlrm/requirements.in giving==0.4.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -66,7 +72,7 @@ graphviz==0.20.3 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torchviz -grpcio==1.63.0 +grpcio==1.64.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # tensorboard @@ -121,7 +127,8 @@ networkx==3.3 # torch numpy==1.26.4 # via - # -r milabench/benchmarks/dlrm/requirements.in + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/dlrm/requirements.in # fbgemm-gpu # onnx # scikit-learn @@ -171,7 +178,7 @@ nvidia-nccl-cu12==2.20.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -nvidia-nvjitlink-cu12==12.4.127 +nvidia-nvjitlink-cu12==12.5.40 # via # -c .pin/../.pin/constraints-hpu-torch.txt # nvidia-cusolver-cu12 @@ -184,19 +191,20 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # voir -onnx==1.16.0 - # via -r milabench/benchmarks/dlrm/requirements.in +onnx==1.16.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/dlrm/requirements.in ovld==0.3.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt # voir -packaging==24.0 +packaging==24.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt - # docker # lightning-utilities # torchmetrics -protobuf==5.26.1 +protobuf==4.25.3 # via # -c .pin/../.pin/constraints-hpu-torch.txt # onnx @@ -210,7 +218,9 @@ ptera==1.4.1 # -c .pin/../.pin/constraints-hpu-torch.txt # voir pydot==2.0.0 - # via -r milabench/benchmarks/dlrm/requirements.in + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/dlrm/requirements.in pygments==2.18.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -236,7 +246,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-hpu-torch.txt # giving -requests==2.31.0 +requests==2.32.3 # via # -c .pin/../.pin/constraints-hpu-torch.txt # docker @@ -244,9 +254,11 @@ rich==13.7.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # voir -scikit-learn==1.4.2 - # via -r milabench/benchmarks/dlrm/requirements.in -scipy==1.13.0 +scikit-learn==1.5.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/dlrm/requirements.in +scipy==1.13.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # scikit-learn @@ -255,7 +267,7 @@ six==1.16.0 # -c .pin/../.pin/constraints-hpu-torch.txt # asttokens # tensorboard -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch @@ -263,8 +275,10 @@ tabulate==0.9.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torchx -tensorboard==2.16.2 - # via -r milabench/benchmarks/dlrm/requirements.in +tensorboard==2.17.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/dlrm/requirements.in tensorboard-data-server==0.7.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -273,9 +287,10 @@ threadpoolctl==3.5.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # scikit-learn -torch==2.3.0 +torch==2.3.1 # via - # -r milabench/benchmarks/dlrm/requirements.in + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/dlrm/requirements.in # torchmetrics # torchviz torchmetrics==1.0.3 @@ -283,20 +298,27 @@ torchmetrics==1.0.3 # -c .pin/../.pin/constraints-hpu-torch.txt # torchrec torchrec==0.7.0 - # via -r milabench/benchmarks/dlrm/requirements.in + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/dlrm/requirements.in torchviz==0.0.2 - # via -r milabench/benchmarks/dlrm/requirements.in + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/dlrm/requirements.in torchx==0.5.0 - # via -r milabench/benchmarks/dlrm/requirements.in + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/dlrm/requirements.in tqdm==4.66.4 # via - # -r milabench/benchmarks/dlrm/requirements.in + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/dlrm/requirements.in # torchrec -triton==2.3.0 +triton==2.3.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -typing-extensions==4.11.0 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # lightning-utilities @@ -324,7 +346,7 @@ werkzeug==3.0.3 # via # -c .pin/../.pin/constraints-hpu-torch.txt # tensorboard -zipp==3.18.1 +zipp==3.19.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # importlib-metadata diff --git a/benchmarks/dlrm/requirements.rocm.txt b/benchmarks/dlrm/requirements.rocm.txt index 32c5e3d0d..7b688ac29 100644 --- a/benchmarks/dlrm/requirements.rocm.txt +++ b/benchmarks/dlrm/requirements.rocm.txt @@ -2,11 +2,14 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=benchmarks/dlrm/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-dlrm.txt benchmarks/dlrm/requirements.in +# pip-compile --output-file=benchmarks/dlrm/requirements.rocm.txt .pin/tmp-constraints-rocm-dlrm.txt benchmarks/dlrm/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.6/ +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/rocm6.0 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com -absl-py==2.0.0 +absl-py==2.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # tensorboard @@ -18,11 +21,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -cachetools==5.3.2 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # google-auth -certifi==2023.7.22 +certifi==2024.6.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests @@ -30,15 +29,11 @@ charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -cmake==3.27.7 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pytorch-triton-rocm -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-rocm-torch.txt # ptera -docker==6.1.3 +docker==7.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchx @@ -50,70 +45,59 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # varname -fbgemm-gpu==0.5.0 +fbgemm-gpu==0.7.0+rocm6.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchrec -filelock==3.13.1 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm # torch # torchx -fsspec==2023.10.0 +fsspec==2024.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch # torchx -future==0.18.3 - # via -r benchmarks/dlrm/requirements.in +future==1.0.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/dlrm/requirements.in giving==0.4.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # ptera # voir -google-auth==2.23.4 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # google-auth-oauthlib - # tensorboard -google-auth-oauthlib==1.1.0 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # tensorboard -graphviz==0.20.1 +graphviz==0.20.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchviz -grpcio==1.59.2 +grpcio==1.64.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # tensorboard -idna==3.4 +idna==3.7 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -importlib-metadata==6.8.0 +importlib-metadata==7.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchx -jinja2==3.1.2 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -joblib==1.3.2 +joblib==1.4.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # scikit-learn -lightning-utilities==0.9.0 +lightning-utilities==0.11.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchmetrics -lit==17.0.4 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pytorch-triton-rocm -markdown==3.5.1 +markdown==3.6 # via # -c .pin/../.pin/constraints-rocm-torch.txt # tensorboard @@ -121,7 +105,7 @@ markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # jinja2 @@ -138,12 +122,13 @@ mypy-extensions==1.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # typing-inspect -networkx==3.2.1 +networkx==3.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -numpy==1.26.1 +numpy==1.26.4 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # -r benchmarks/dlrm/requirements.in # fbgemm-gpu # onnx @@ -151,47 +136,41 @@ numpy==1.26.1 # scipy # tensorboard # torchmetrics -oauthlib==3.2.2 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # requests-oauthlib omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -onnx==1.15.0 - # via -r benchmarks/dlrm/requirements.in -ovld==0.3.2 +onnx==1.16.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/dlrm/requirements.in +ovld==0.3.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -packaging==23.2 +packaging==24.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # docker # lightning-utilities # torchmetrics -protobuf==4.23.4 +protobuf==4.25.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # onnx # tensorboard -ptera==1.4.1 +psutil==5.9.8 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pyasn1==0.5.0 +ptera==1.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # pyasn1-modules - # rsa -pyasn1-modules==0.3.0 + # voir +pydot==2.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # google-auth -pydot==1.4.2 - # via -r benchmarks/dlrm/requirements.in -pygments==2.16.1 + # -r benchmarks/dlrm/requirements.in +pygments==2.18.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich @@ -199,7 +178,7 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pyparsing==3.1.1 +pyparsing==3.1.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pydot @@ -207,7 +186,7 @@ pyre-extensions==0.0.30 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchx -pytorch-triton-rocm==2.1.0 +pytorch-triton-rocm==2.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -220,27 +199,19 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -requests==2.31.0 +requests==2.32.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # docker - # requests-oauthlib - # tensorboard -requests-oauthlib==1.3.1 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # google-auth-oauthlib -rich==13.6.0 +rich==13.7.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -rsa==4.9 +scikit-learn==1.5.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # google-auth -scikit-learn==1.3.2 - # via -r benchmarks/dlrm/requirements.in -scipy==1.11.3 + # -r benchmarks/dlrm/requirements.in +scipy==1.13.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # scikit-learn @@ -249,7 +220,7 @@ six==1.16.0 # -c .pin/../.pin/constraints-rocm-torch.txt # asttokens # tensorboard -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -257,37 +228,46 @@ tabulate==0.9.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchx -tensorboard==2.15.1 - # via -r benchmarks/dlrm/requirements.in +tensorboard==2.17.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/dlrm/requirements.in tensorboard-data-server==0.7.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # tensorboard -threadpoolctl==3.2.0 +threadpoolctl==3.5.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # scikit-learn -torch==2.1.0+rocm5.6 +torch==2.3.1+rocm6.0 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # -r benchmarks/dlrm/requirements.in - # pytorch-triton-rocm # torchmetrics # torchviz torchmetrics==1.0.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchrec -torchrec==0.5.0 - # via -r benchmarks/dlrm/requirements.in +torchrec==0.7.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/dlrm/requirements.in torchviz==0.0.2 - # via -r benchmarks/dlrm/requirements.in + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/dlrm/requirements.in torchx==0.5.0 - # via -r benchmarks/dlrm/requirements.in -tqdm==4.66.1 # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/dlrm/requirements.in +tqdm==4.66.4 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt # -r benchmarks/dlrm/requirements.in # torchrec -typing-extensions==4.8.0 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # lightning-utilities @@ -310,16 +290,15 @@ varname==0.10.0 # -c .pin/../.pin/constraints-rocm-torch.txt # giving voir==0.2.15 - # via -r benchmarks/dlrm/requirements.in -websocket-client==1.6.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # docker -werkzeug==3.0.1 + # -c .pin/../constraints/rocm.txt + # -r benchmarks/dlrm/requirements.in +werkzeug==3.0.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # tensorboard -zipp==3.17.0 +zipp==3.19.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # importlib-metadata diff --git a/benchmarks/dlrm/requirements.xpu.txt b/benchmarks/dlrm/requirements.xpu.txt index be809964a..e26b4bbac 100644 --- a/benchmarks/dlrm/requirements.xpu.txt +++ b/benchmarks/dlrm/requirements.xpu.txt @@ -4,7 +4,10 @@ # # pip-compile --output-file=benchmarks/dlrm/requirements.xpu.txt .pin/tmp-constraints-xpu-dlrm.txt benchmarks/dlrm/requirements.in # +--extra-index-url https://pypi.ngc.nvidia.com --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com absl-py==2.1.0 # via @@ -18,7 +21,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # giving -certifi==2024.2.2 +certifi==2024.6.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # requests @@ -26,11 +29,11 @@ charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # requests -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-xpu-torch.txt # ptera -docker==7.0.0 +docker==7.1.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torchx @@ -42,16 +45,17 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # varname -fbgemm-gpu==0.6.0 +fbgemm-gpu==0.7.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torchrec -filelock==3.13.4 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch # torchx -fsspec==2024.2.0 + # triton +fsspec==2024.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -69,7 +73,7 @@ graphviz==0.20.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torchviz -grpcio==1.62.2 +grpcio==1.64.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # tensorboard @@ -81,11 +85,11 @@ importlib-metadata==7.1.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torchx -jinja2==3.1.3 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch -joblib==1.4.0 +joblib==1.4.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # scikit-learn @@ -118,7 +122,7 @@ mypy-extensions==1.0.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # typing-inspect -networkx +networkx==3.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -132,11 +136,63 @@ numpy==1.26.4 # scipy # tensorboard # torchmetrics +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # voir -onnx==1.16.0 +onnx==1.16.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/dlrm/requirements.in @@ -144,17 +200,20 @@ ovld==0.3.5 # via # -c .pin/../.pin/constraints-xpu-torch.txt # voir -packaging==24.0 +packaging==24.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt - # docker # lightning-utilities # torchmetrics -protobuf==5.26.1 +protobuf==4.25.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # onnx # tensorboard +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt @@ -163,7 +222,7 @@ pydot==2.0.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/dlrm/requirements.in -pygments==2.17.2 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # rich @@ -188,7 +247,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # giving -requests==2.31.0 +requests==2.32.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # docker @@ -196,11 +255,11 @@ rich==13.7.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # voir -scikit-learn==1.4.2 +scikit-learn==1.5.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/dlrm/requirements.in -scipy==1.13.0 +scipy==1.13.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # scikit-learn @@ -209,7 +268,7 @@ six==1.16.0 # -c .pin/../.pin/constraints-xpu-torch.txt # asttokens # tensorboard -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -217,7 +276,7 @@ tabulate==0.9.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torchx -tensorboard==2.16.2 +tensorboard==2.17.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/dlrm/requirements.in @@ -225,11 +284,11 @@ tensorboard-data-server==0.7.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # tensorboard -threadpoolctl==3.4.0 +threadpoolctl==3.5.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # scikit-learn -torch==2.1.0a0+cxx11.abi +torch==2.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -c .pin/../constraints/xpu.txt @@ -240,7 +299,7 @@ torchmetrics==1.0.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torchrec -torchrec==0.6.0 +torchrec==0.7.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/dlrm/requirements.in @@ -252,12 +311,16 @@ torchx==0.5.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/dlrm/requirements.in -tqdm==4.66.2 +tqdm==4.66.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/dlrm/requirements.in # torchrec -typing-extensions==4.11.0 +triton==2.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # lightning-utilities @@ -284,11 +347,11 @@ voir==0.2.15 # -c .pin/../.pin/constraints-xpu-torch.txt # -c .pin/../constraints/xpu.txt # -r benchmarks/dlrm/requirements.in -werkzeug==3.0.2 +werkzeug==3.0.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # tensorboard -zipp==3.18.1 +zipp==3.19.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # importlib-metadata diff --git a/benchmarks/flops/main.py b/benchmarks/flops/main.py index 9520b5c7e..e4f05c178 100755 --- a/benchmarks/flops/main.py +++ b/benchmarks/flops/main.py @@ -6,7 +6,7 @@ import torch import torchcompat.core as accelerator -from benchmate.common import setupvoir +from benchmate.monitor import setupvoir KILO = 1e3 MEGA = 1e6 @@ -14,10 +14,6 @@ TERA = 1e12 EXA = 1e18 - -print(f"Using, {accelerator.device_type}") - - def empty_cache(): accelerator.empty_cache() @@ -26,7 +22,6 @@ def synchronize(): accelerator.synchronize() - def modelflops( model: torch.nn.Module, shape, repeat=10, dtype=torch.float32, unit=TERA ): @@ -93,8 +88,6 @@ def f(N, R=30, m=5000000, n=256, unit=TERA, dtype=torch.float32, log=None): empty_cache() - - def main(): dtypes = { "bf16": torch.bfloat16, diff --git a/benchmarks/flops/requirements.cuda.txt b/benchmarks/flops/requirements.cuda.txt index b28ed4d44..21a41a149 100644 --- a/benchmarks/flops/requirements.cuda.txt +++ b/benchmarks/flops/requirements.cuda.txt @@ -2,9 +2,12 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=benchmarks/flops/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-flops.txt benchmarks/flops/requirements.in +# pip-compile --output-file=benchmarks/flops/requirements.cuda.txt .pin/tmp-constraints-cuda-flops.txt benchmarks/flops/requirements.in # ---extra-index-url https://download.pytorch.org/whl/cu118 +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/cu121 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com antlr4-python3-runtime==4.9.3 # via @@ -14,15 +17,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -certifi==2023.7.22 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # requests -charset-normalizer==3.3.2 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # requests -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera @@ -30,12 +25,12 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname -filelock==3.13.1 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch # triton -fsspec==2023.10.0 +fsspec==2024.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -44,11 +39,11 @@ giving==0.4.2 # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -idna==3.4 +importlib-resources==6.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # requests -jinja2==3.1.2 + # torchcompat +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -56,7 +51,7 @@ markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jinja2 @@ -68,31 +63,87 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # sympy -networkx==3.2.1 +networkx==3.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -numpy==1.26.1 +numpy==1.26.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchvision +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -ovld==0.3.2 +ovld==0.3.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pillow==10.1.0 +pillow==10.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchvision +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pygments==2.16.1 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich @@ -108,11 +159,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # torchvision -rich==13.6.0 +rich==13.7.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -120,31 +167,38 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -torch==2.1.0+cu118 +torch==2.3.1+cu121 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/flops/requirements.in + # torchcompat # torchvision -torchvision==0.16.0+cu118 - # via -r benchmarks/flops/requirements.in -tqdm==4.66.1 - # via -r benchmarks/flops/requirements.in -triton==2.1.0 +torchcompat==1.0.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # torch -typing-extensions==4.8.0 + # -c .pin/../constraints/cuda.txt + # -r benchmarks/flops/requirements.in +torchvision==0.18.1+cu121 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/flops/requirements.in +tqdm==4.66.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/flops/requirements.in +triton==2.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # reactivex # torch -urllib3==1.26.18 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # requests + # reactivex + # torch varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt diff --git a/benchmarks/flops/requirements.hpu.txt b/benchmarks/flops/requirements.hpu.txt index a58e4f411..08eda7ab3 100644 --- a/benchmarks/flops/requirements.hpu.txt +++ b/benchmarks/flops/requirements.hpu.txt @@ -1,9 +1,13 @@ # -# This file is autogenerated by pip-compile with Python 3.10 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --output-file=milabench/benchmarks/flops/requirements.hpu.txt --resolver=backtracking .pin/tmp-constraints-hpu-flops.txt milabench/benchmarks/flops/requirements.in +# pip-compile --output-file=benchmarks/flops/requirements.hpu.txt .pin/tmp-constraints-hpu-flops.txt benchmarks/flops/requirements.in # +--extra-index-url https://pypi.ngc.nvidia.com +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com + antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -12,7 +16,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # giving -codefind==0.1.4 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-hpu-torch.txt # ptera @@ -109,7 +113,7 @@ nvidia-nccl-cu12==2.20.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -nvidia-nvjitlink-cu12==12.4.127 +nvidia-nvjitlink-cu12==12.5.40 # via # -c .pin/../.pin/constraints-hpu-torch.txt # nvidia-cusolver-cu12 @@ -162,25 +166,34 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -torch==2.3.0 +torch==2.3.1 # via - # -r milabench/benchmarks/flops/requirements.in + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/flops/requirements.in + # torchcompat # torchvision -torchcompat==0.0.1 - # via -r milabench/benchmarks/flops/requirements.in -torchvision==0.18.0 - # via -r milabench/benchmarks/flops/requirements.in +torchcompat==1.0.2 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -c .pin/../constraints/hpu.txt + # -r benchmarks/flops/requirements.in +torchvision==0.18.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/flops/requirements.in tqdm==4.66.4 - # via -r milabench/benchmarks/flops/requirements.in -triton==2.3.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/flops/requirements.in +triton==2.3.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -typing-extensions==4.11.0 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # reactivex diff --git a/benchmarks/flops/requirements.rocm.txt b/benchmarks/flops/requirements.rocm.txt index 61fa99dd7..ff65456a0 100644 --- a/benchmarks/flops/requirements.rocm.txt +++ b/benchmarks/flops/requirements.rocm.txt @@ -2,9 +2,12 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=benchmarks/flops/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-flops.txt benchmarks/flops/requirements.in +# pip-compile --output-file=benchmarks/flops/requirements.rocm.txt .pin/tmp-constraints-rocm-flops.txt benchmarks/flops/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.6/ +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/rocm6.0 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com antlr4-python3-runtime==4.9.3 # via @@ -14,19 +17,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -certifi==2023.7.22 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # requests -charset-normalizer==3.3.2 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # requests -cmake==3.27.7 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pytorch-triton-rocm -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-rocm-torch.txt # ptera @@ -34,12 +25,12 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # varname -filelock==3.13.1 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm # torch -fsspec==2023.10.0 +fsspec==2024.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -48,23 +39,19 @@ giving==0.4.2 # -c .pin/../.pin/constraints-rocm-torch.txt # ptera # voir -idna==3.4 +importlib-resources==6.4.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # requests -jinja2==3.1.2 + # torchcompat +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lit==17.0.4 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pytorch-triton-rocm markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # jinja2 @@ -76,11 +63,11 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # sympy -networkx==3.2.1 +networkx==3.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -numpy==1.26.1 +numpy==1.26.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchvision @@ -88,19 +75,23 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -ovld==0.3.2 +ovld==0.3.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pillow==10.1.0 +pillow==10.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchvision +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pygments==2.16.1 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich @@ -108,7 +99,7 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pytorch-triton-rocm==2.1.0 +pytorch-triton-rocm==2.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -120,11 +111,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # torchvision -rich==13.6.0 +rich==13.7.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir @@ -132,28 +119,34 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -torch==2.1.0+rocm5.6 +torch==2.3.1+rocm6.0 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # -r benchmarks/flops/requirements.in - # pytorch-triton-rocm + # torchcompat # torchvision -torchvision==0.16.0+rocm5.6 - # via -r benchmarks/flops/requirements.in -tqdm==4.66.1 - # via -r benchmarks/flops/requirements.in -typing-extensions==4.8.0 +torchcompat==1.0.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # reactivex - # torch -urllib3==1.26.18 + # -c .pin/../constraints/rocm.txt + # -r benchmarks/flops/requirements.in +torchvision==0.18.1+rocm6.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # requests + # -r benchmarks/flops/requirements.in +tqdm==4.66.4 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/flops/requirements.in +typing-extensions==4.12.2 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # reactivex + # torch varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt diff --git a/benchmarks/flops/requirements.xpu.txt b/benchmarks/flops/requirements.xpu.txt index a4a2e503c..0fc5be0db 100644 --- a/benchmarks/flops/requirements.xpu.txt +++ b/benchmarks/flops/requirements.xpu.txt @@ -4,7 +4,10 @@ # # pip-compile --output-file=benchmarks/flops/requirements.xpu.txt .pin/tmp-constraints-xpu-flops.txt benchmarks/flops/requirements.in # +--extra-index-url https://pypi.ngc.nvidia.com --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com antlr4-python3-runtime==4.9.3 # via @@ -14,15 +17,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # giving -certifi==2024.2.2 - # via - # -c .pin/../.pin/constraints-xpu-torch.txt - # requests -charset-normalizer==3.3.2 - # via - # -c .pin/../.pin/constraints-xpu-torch.txt - # requests -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-xpu-torch.txt # ptera @@ -30,11 +25,12 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # varname -filelock==3.13.4 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch -fsspec==2024.2.0 + # triton +fsspec==2024.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -43,11 +39,11 @@ giving==0.4.2 # -c .pin/../.pin/constraints-xpu-torch.txt # ptera # voir -idna==3.7 +importlib-resources==6.4.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt - # requests -jinja2==3.1.3 + # torchcompat +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -67,7 +63,7 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # sympy -networkx +networkx==3.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -75,6 +71,58 @@ numpy==1.26.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torchvision +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt @@ -87,11 +135,15 @@ pillow==10.3.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torchvision +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # voir -pygments==2.17.2 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # rich @@ -107,10 +159,6 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # giving -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-xpu-torch.txt - # torchvision rich==13.7.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt @@ -119,34 +167,40 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch -torch==2.1.0a0+cxx11.abi +torch==2.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -c .pin/../constraints/xpu.txt # -r benchmarks/flops/requirements.in + # torchcompat # torchvision -torchvision==0.16.0a0+cxx11.abi +torchcompat==1.0.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -c .pin/../constraints/xpu.txt # -r benchmarks/flops/requirements.in -tqdm==4.66.2 +torchvision==0.18.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt + # -c .pin/../constraints/xpu.txt # -r benchmarks/flops/requirements.in -typing-extensions==4.11.0 +tqdm==4.66.4 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # -r benchmarks/flops/requirements.in +triton==2.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt - # reactivex # torch -urllib3==1.26.18 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt - # requests + # reactivex + # torch varname==0.10.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt diff --git a/benchmarks/huggingface/prepare.py b/benchmarks/huggingface/prepare.py index fdcc73fcd..d1bdaf280 100755 --- a/benchmarks/huggingface/prepare.py +++ b/benchmarks/huggingface/prepare.py @@ -8,3 +8,8 @@ print(f"Preparing {args.model}") make_config = models[args.model] make_config() + + # bert dataset + # t5 dataset + # reformer dataset + # whisper dataset \ No newline at end of file diff --git a/benchmarks/huggingface/requirements.cuda.txt b/benchmarks/huggingface/requirements.cuda.txt index b426771b5..e095395ee 100644 --- a/benchmarks/huggingface/requirements.cuda.txt +++ b/benchmarks/huggingface/requirements.cuda.txt @@ -2,9 +2,12 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=benchmarks/huggingface/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-hf.txt benchmarks/huggingface/requirements.in +# pip-compile --output-file=benchmarks/huggingface/requirements.cuda.txt .pin/tmp-constraints-cuda-hf.txt benchmarks/huggingface/requirements.in # ---extra-index-url https://download.pytorch.org/whl/cu118 +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/cu121 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com antlr4-python3-runtime==4.9.3 # via @@ -14,7 +17,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -certifi==2023.7.22 +certifi==2024.6.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests @@ -22,7 +25,7 @@ charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera @@ -30,14 +33,14 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname -filelock==3.13.1 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # torch # transformers # triton -fsspec==2023.10.0 +fsspec==2024.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub @@ -47,16 +50,16 @@ giving==0.4.2 # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.17.3 +huggingface-hub==0.23.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tokenizers # transformers -idna==3.4 +idna==3.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -jinja2==3.1.2 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -64,7 +67,7 @@ markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jinja2 @@ -76,32 +79,88 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # sympy -networkx==3.2.1 +networkx==3.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -numpy==1.26.1 +numpy==1.26.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # transformers +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -ovld==0.3.2 +ovld==0.3.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -packaging==23.2 +packaging==24.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # transformers +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pygments==2.16.1 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich @@ -119,20 +178,20 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -regex==2023.10.3 +regex==2024.5.15 # via # -c .pin/../.pin/constraints-cuda-torch.txt # transformers -requests==2.31.0 +requests==2.32.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # transformers -rich==13.6.0 +rich==13.7.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -safetensors==0.4.0 +safetensors==0.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # transformers @@ -140,28 +199,32 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -tokenizers==0.14.1 +tokenizers==0.19.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # transformers -torch==2.1.0+cu118 - # via -r benchmarks/huggingface/requirements.in -tqdm==4.66.1 +torch==2.3.1+cu121 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/huggingface/requirements.in +tqdm==4.66.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # transformers -transformers==4.35.0 - # via -r benchmarks/huggingface/requirements.in -triton==2.1.0 +transformers==4.41.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/huggingface/requirements.in +triton==2.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -typing-extensions==4.8.0 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub diff --git a/benchmarks/huggingface/requirements.hpu.txt b/benchmarks/huggingface/requirements.hpu.txt index ad06ded13..e0190abae 100644 --- a/benchmarks/huggingface/requirements.hpu.txt +++ b/benchmarks/huggingface/requirements.hpu.txt @@ -1,9 +1,13 @@ # -# This file is autogenerated by pip-compile with Python 3.10 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --output-file=milabench/benchmarks/huggingface/requirements.hpu.txt --resolver=backtracking .pin/tmp-constraints-hpu-hf.txt milabench/benchmarks/huggingface/requirements.in +# pip-compile --output-file=benchmarks/huggingface/requirements.hpu.txt .pin/tmp-constraints-hpu-hf.txt benchmarks/huggingface/requirements.in # +--extra-index-url https://pypi.ngc.nvidia.com +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com + antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -12,7 +16,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # giving -certifi==2024.2.2 +certifi==2024.6.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # requests @@ -20,7 +24,7 @@ charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # requests -codefind==0.1.4 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-hpu-torch.txt # ptera @@ -45,7 +49,7 @@ giving==0.4.2 # -c .pin/../.pin/constraints-hpu-torch.txt # ptera # voir -huggingface-hub==0.23.0 +huggingface-hub==0.23.3 # via # -c .pin/../.pin/constraints-hpu-torch.txt # tokenizers @@ -125,7 +129,7 @@ nvidia-nccl-cu12==2.20.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -nvidia-nvjitlink-cu12==12.4.127 +nvidia-nvjitlink-cu12==12.5.40 # via # -c .pin/../.pin/constraints-hpu-torch.txt # nvidia-cusolver-cu12 @@ -142,7 +146,7 @@ ovld==0.3.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt # voir -packaging==24.0 +packaging==24.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # huggingface-hub @@ -173,11 +177,11 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-hpu-torch.txt # giving -regex==2024.4.28 +regex==2024.5.15 # via # -c .pin/../.pin/constraints-hpu-torch.txt # transformers -requests==2.31.0 +requests==2.32.3 # via # -c .pin/../.pin/constraints-hpu-torch.txt # huggingface-hub @@ -194,7 +198,7 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch @@ -202,20 +206,24 @@ tokenizers==0.19.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # transformers -torch==2.3.0 - # via -r milabench/benchmarks/huggingface/requirements.in +torch==2.3.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/huggingface/requirements.in tqdm==4.66.4 # via # -c .pin/../.pin/constraints-hpu-torch.txt # huggingface-hub # transformers -transformers==4.40.2 - # via -r milabench/benchmarks/huggingface/requirements.in -triton==2.3.0 +transformers==4.41.2 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/huggingface/requirements.in +triton==2.3.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -typing-extensions==4.11.0 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # huggingface-hub diff --git a/benchmarks/huggingface/requirements.rocm.txt b/benchmarks/huggingface/requirements.rocm.txt index 55a7a09a6..a84b09b52 100644 --- a/benchmarks/huggingface/requirements.rocm.txt +++ b/benchmarks/huggingface/requirements.rocm.txt @@ -2,9 +2,12 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=benchmarks/huggingface/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-hf.txt benchmarks/huggingface/requirements.in +# pip-compile --output-file=benchmarks/huggingface/requirements.rocm.txt .pin/tmp-constraints-rocm-hf.txt benchmarks/huggingface/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.6/ +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/rocm6.0 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com antlr4-python3-runtime==4.9.3 # via @@ -14,7 +17,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -certifi==2023.7.22 +certifi==2024.6.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests @@ -22,11 +25,7 @@ charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -cmake==3.27.7 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pytorch-triton-rocm -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-rocm-torch.txt # ptera @@ -34,14 +33,14 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # varname -filelock==3.13.1 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub # pytorch-triton-rocm # torch # transformers -fsspec==2023.10.0 +fsspec==2024.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub @@ -51,28 +50,24 @@ giving==0.4.2 # -c .pin/../.pin/constraints-rocm-torch.txt # ptera # voir -huggingface-hub==0.17.3 +huggingface-hub==0.23.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # tokenizers # transformers -idna==3.4 +idna==3.7 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -jinja2==3.1.2 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lit==17.0.4 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pytorch-triton-rocm markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # jinja2 @@ -84,11 +79,11 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # sympy -networkx==3.2.1 +networkx==3.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -numpy==1.26.1 +numpy==1.26.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # transformers @@ -96,20 +91,24 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -ovld==0.3.2 +ovld==0.3.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -packaging==23.2 +packaging==24.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub # transformers +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pygments==2.16.1 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich @@ -117,7 +116,7 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pytorch-triton-rocm==2.1.0 +pytorch-triton-rocm==2.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -131,20 +130,20 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -regex==2023.10.3 +regex==2024.5.15 # via # -c .pin/../.pin/constraints-rocm-torch.txt # transformers -requests==2.31.0 +requests==2.32.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub # transformers -rich==13.6.0 +rich==13.7.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -safetensors==0.4.0 +safetensors==0.4.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # transformers @@ -152,26 +151,28 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -tokenizers==0.14.1 +tokenizers==0.19.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # transformers -torch==2.1.0+rocm5.6 +torch==2.3.1+rocm6.0 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # -r benchmarks/huggingface/requirements.in - # pytorch-triton-rocm -tqdm==4.66.1 +tqdm==4.66.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub # transformers -transformers==4.35.0 - # via -r benchmarks/huggingface/requirements.in -typing-extensions==4.8.0 +transformers==4.41.2 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/huggingface/requirements.in +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub diff --git a/benchmarks/huggingface/requirements.xpu.txt b/benchmarks/huggingface/requirements.xpu.txt index 5fb5d8f0d..f46a61603 100644 --- a/benchmarks/huggingface/requirements.xpu.txt +++ b/benchmarks/huggingface/requirements.xpu.txt @@ -4,7 +4,10 @@ # # pip-compile --output-file=benchmarks/huggingface/requirements.xpu.txt .pin/tmp-constraints-xpu-hf.txt benchmarks/huggingface/requirements.in # +--extra-index-url https://pypi.ngc.nvidia.com --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com antlr4-python3-runtime==4.9.3 # via @@ -14,7 +17,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # giving -certifi==2024.2.2 +certifi==2024.6.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # requests @@ -22,7 +25,7 @@ charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # requests -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-xpu-torch.txt # ptera @@ -30,13 +33,14 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # varname -filelock==3.13.4 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # huggingface-hub # torch # transformers -fsspec==2024.2.0 + # triton +fsspec==2024.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # huggingface-hub @@ -46,7 +50,7 @@ giving==0.4.2 # -c .pin/../.pin/constraints-xpu-torch.txt # ptera # voir -huggingface-hub==0.22.2 +huggingface-hub==0.23.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # tokenizers @@ -55,7 +59,7 @@ idna==3.7 # via # -c .pin/../.pin/constraints-xpu-torch.txt # requests -jinja2==3.1.3 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -75,7 +79,7 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # sympy -networkx +networkx==3.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -83,6 +87,58 @@ numpy==1.26.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # transformers +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt @@ -91,16 +147,20 @@ ovld==0.3.5 # via # -c .pin/../.pin/constraints-xpu-torch.txt # voir -packaging==24.0 +packaging==24.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # huggingface-hub # transformers +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # voir -pygments==2.17.2 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # rich @@ -118,11 +178,11 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # giving -regex==2024.4.16 +regex==2024.5.15 # via # -c .pin/../.pin/constraints-xpu-torch.txt # transformers -requests==2.31.0 +requests==2.32.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # huggingface-hub @@ -139,7 +199,7 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -147,21 +207,25 @@ tokenizers==0.19.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # transformers -torch==2.1.0a0+cxx11.abi +torch==2.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -c .pin/../constraints/xpu.txt # -r benchmarks/huggingface/requirements.in -tqdm==4.66.2 +tqdm==4.66.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # huggingface-hub # transformers -transformers==4.40.0 +transformers==4.41.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/huggingface/requirements.in -typing-extensions==4.11.0 +triton==2.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # huggingface-hub diff --git a/benchmarks/llama/main.py b/benchmarks/llama/main.py index bc85c056e..da00a9c91 100755 --- a/benchmarks/llama/main.py +++ b/benchmarks/llama/main.py @@ -8,7 +8,7 @@ import torch -from benchmate.common import setupvoir +from benchmate.monitor import setupvoir import torchcompat.core as accelerator root = os.path.dirname(__file__) @@ -71,16 +71,22 @@ def huggingface_main(args, model, config): LlamaTokenizerFast.from_pretrained("hf-internal-testing/llama-tokenizer") ) + if args.pretrained and args.prepare: + model = LlamaForCausalLM.from_pretrained(config["_name_or_path"]) + # Prepare is done if args.prepare: return 0 - + # We do not download LLAMA because it takes too long # we just instantiate an untrained one println("Model") device = accelerator.fetch_device(0) - model = LlamaForCausalLM(LlamaConfig.from_dict(config)).to(device=device) + if args.pretrained: + model = LlamaForCausalLM.from_pretrained(config["_name_or_path"]).to(device=device) + else: + model = LlamaForCausalLM(LlamaConfig.from_dict(config)).to(device=device) println("Pipeline") pipeline = transformers.pipeline( @@ -155,6 +161,7 @@ def main(): parser.add_argument("--model", default="llama2-7b", choices=models.keys()) parser.add_argument("--prepare", action="store_true") parser.add_argument("--cache", required=True, type=str) + parser.add_argument("--pretrained", action="store_true", default=False) # args = parser.parse_args() @@ -171,8 +178,11 @@ def main(): if __name__ == "__main__": + from voir.wrapper import StopProgram + import traceback try: main() - except Exception as err: - # Habana likes to eat exceptions - print(err) \ No newline at end of file + except StopProgram: + print("Early stopped") + except Exception: + traceback.print_exc() \ No newline at end of file diff --git a/benchmarks/llama/requirements.cuda.txt b/benchmarks/llama/requirements.cuda.txt index 3811e9577..52a3b8c66 100644 --- a/benchmarks/llama/requirements.cuda.txt +++ b/benchmarks/llama/requirements.cuda.txt @@ -2,180 +2,338 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=benchmarks/llama/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-llm.txt benchmarks/llama/requirements.in +# pip-compile --output-file=benchmarks/llama/requirements.cuda.txt .pin/tmp-constraints-cuda-llm.txt benchmarks/llama/requirements.in # ---extra-index-url https://download.pytorch.org/whl/cu118 +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/cu121 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com -aiohttp==3.8.6 +aiohttp==3.9.5 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # fsspec aiosignal==1.3.1 - # via aiohttp + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # aiohttp antlr4-python3-runtime==4.9.3 - # via omegaconf + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # omegaconf asttokens==2.4.1 - # via giving -async-timeout==4.0.3 - # via aiohttp -attrs==23.1.0 - # via aiohttp -certifi==2023.7.22 - # via requests -charset-normalizer==3.3.2 # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # giving +attrs==23.2.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp +certifi==2024.6.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt # requests -codefind==0.1.3 - # via ptera -datasets==2.14.6 - # via -r benchmarks/llama/requirements.in -dill==0.3.7 +charset-normalizer==3.3.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # requests +codefind==0.1.6 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # ptera +datasets==2.19.2 # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/llama/requirements.in +dill==0.3.8 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # multiprocess executing==1.2.0 - # via varname + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # varname fairscale==0.4.13 - # via -r benchmarks/llama/requirements.in -filelock==3.13.1 # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/llama/requirements.in +filelock==3.14.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # datasets # huggingface-hub # torch # transformers # triton -fire==0.5.0 - # via -r benchmarks/llama/requirements.in -frozenlist==1.4.0 +fire==0.6.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/llama/requirements.in +frozenlist==1.4.1 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp # aiosignal -fsspec[http]==2023.10.0 +fsspec[http]==2024.3.1 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # huggingface-hub # torch giving==0.4.2 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.17.3 +huggingface-hub==0.23.3 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # tokenizers # transformers -idna==3.4 +idna==3.7 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # requests # yarl -jinja2==3.1.2 - # via torch +jinja2==3.1.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch markdown-it-py==3.0.0 - # via rich -markupsafe==2.1.3 - # via jinja2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # rich +markupsafe==2.1.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # jinja2 mdurl==0.1.2 - # via markdown-it-py + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # markdown-it-py mpmath==1.3.0 - # via sympy -multidict==6.0.4 # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # sympy +multidict==6.0.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp # yarl -multiprocess==0.70.15 - # via datasets -networkx==3.2.1 - # via torch -numpy==1.26.1 +multiprocess==0.70.16 # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # datasets +networkx==3.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +numpy==1.26.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # fairscale # pandas # pyarrow # transformers +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch omegaconf==2.3.0 - # via voir -ovld==0.3.2 - # via voir -packaging==23.2 # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir +ovld==0.3.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir +packaging==24.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # huggingface-hub # transformers -pandas==2.1.2 - # via datasets +pandas==2.2.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # datasets +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir ptera==1.4.1 - # via voir -pyarrow==14.0.0 - # via datasets -pygments==2.16.1 - # via rich + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir +pyarrow==16.1.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # datasets +pyarrow-hotfix==0.6 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # datasets +pygments==2.18.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # rich pynvml==11.5.0 - # via voir -python-dateutil==2.8.2 - # via pandas -pytz==2023.3.post1 - # via pandas + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir +python-dateutil==2.9.0.post0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # pandas +pytz==2024.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # pandas pyyaml==6.0.1 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # huggingface-hub # omegaconf # transformers reactivex==4.0.4 - # via giving -regex==2023.10.3 - # via transformers -requests==2.31.0 # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # giving +regex==2024.5.15 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # transformers +requests==2.32.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt # datasets - # fsspec # huggingface-hub # transformers -rich==13.6.0 - # via voir -safetensors==0.4.0 - # via transformers -sentencepiece==0.1.99 - # via -r benchmarks/llama/requirements.in +rich==13.7.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir +safetensors==0.4.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # transformers +sentencepiece==0.2.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/llama/requirements.in six==1.16.0 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens # fire # python-dateutil -sympy==1.12 - # via torch -termcolor==2.3.0 - # via fire -tokenizers==0.14.1 - # via transformers -torch==2.1.0+cu118 +sympy==1.12.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +termcolor==2.4.0 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # fire +tokenizers==0.19.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # transformers +torch==2.3.1+cu121 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/llama/requirements.in # fairscale -tqdm==4.66.1 +tqdm==4.66.4 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # datasets # huggingface-hub # transformers -transformers==4.35.0 - # via -r benchmarks/llama/requirements.in -triton==2.1.0 - # via torch -typing-extensions==4.8.0 +transformers==4.41.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/llama/requirements.in +triton==2.3.1 # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +typing-extensions==4.12.2 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # reactivex # torch -tzdata==2023.3 - # via pandas -urllib3==2.0.7 - # via requests +tzdata==2024.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # pandas +urllib3==1.26.18 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # requests varname==0.10.0 # via giving voir==0.2.15 # via -r benchmarks/llama/requirements.in xxhash==3.4.1 - # via datasets -yarl==1.9.2 - # via aiohttp + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # datasets +yarl==1.9.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # aiohttp diff --git a/benchmarks/llama/requirements.hpu.txt b/benchmarks/llama/requirements.hpu.txt index e7670b6f7..d4b7b2514 100644 --- a/benchmarks/llama/requirements.hpu.txt +++ b/benchmarks/llama/requirements.hpu.txt @@ -1,9 +1,13 @@ # -# This file is autogenerated by pip-compile with Python 3.10 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --output-file=milabench/benchmarks/llama/requirements.hpu.txt --resolver=backtracking .pin/tmp-constraints-hpu-llm.txt milabench/benchmarks/llama/requirements.in +# pip-compile --output-file=benchmarks/llama/requirements.hpu.txt .pin/tmp-constraints-hpu-llm.txt benchmarks/llama/requirements.in # +--extra-index-url https://pypi.ngc.nvidia.com +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com + aiohttp==3.9.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -21,15 +25,11 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # giving -async-timeout==4.0.3 - # via - # -c .pin/../.pin/constraints-hpu-torch.txt - # aiohttp attrs==23.2.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # aiohttp -certifi==2024.2.2 +certifi==2024.6.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # requests @@ -37,12 +37,14 @@ charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # requests -codefind==0.1.4 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-hpu-torch.txt # ptera -datasets==2.19.1 - # via -r milabench/benchmarks/llama/requirements.in +datasets==2.19.2 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/llama/requirements.in dill==0.3.8 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -53,7 +55,9 @@ executing==1.2.0 # -c .pin/../.pin/constraints-hpu-torch.txt # varname fairscale==0.4.13 - # via -r milabench/benchmarks/llama/requirements.in + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/llama/requirements.in filelock==3.14.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -63,7 +67,9 @@ filelock==3.14.0 # transformers # triton fire==0.6.0 - # via -r milabench/benchmarks/llama/requirements.in + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/llama/requirements.in frozenlist==1.4.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -80,7 +86,7 @@ giving==0.4.2 # -c .pin/../.pin/constraints-hpu-torch.txt # ptera # voir -huggingface-hub==0.23.0 +huggingface-hub==0.23.3 # via # -c .pin/../.pin/constraints-hpu-torch.txt # datasets @@ -175,7 +181,7 @@ nvidia-nccl-cu12==2.20.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -nvidia-nvjitlink-cu12==12.4.127 +nvidia-nvjitlink-cu12==12.5.40 # via # -c .pin/../.pin/constraints-hpu-torch.txt # nvidia-cusolver-cu12 @@ -192,7 +198,7 @@ ovld==0.3.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt # voir -packaging==24.0 +packaging==24.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # datasets @@ -210,7 +216,7 @@ ptera==1.4.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # voir -pyarrow==16.0.0 +pyarrow==16.1.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # datasets @@ -245,11 +251,11 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-hpu-torch.txt # giving -regex==2024.4.28 +regex==2024.5.15 # via # -c .pin/../.pin/constraints-hpu-torch.txt # transformers -requests==2.31.0 +requests==2.32.3 # via # -c .pin/../.pin/constraints-hpu-torch.txt # datasets @@ -264,14 +270,16 @@ safetensors==0.4.3 # -c .pin/../.pin/constraints-hpu-torch.txt # transformers sentencepiece==0.2.0 - # via -r milabench/benchmarks/llama/requirements.in + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/llama/requirements.in six==1.16.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # asttokens # fire # python-dateutil -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch @@ -283,9 +291,10 @@ tokenizers==0.19.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # transformers -torch==2.3.0 +torch==2.3.1 # via - # -r milabench/benchmarks/llama/requirements.in + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/llama/requirements.in # fairscale tqdm==4.66.4 # via @@ -293,13 +302,15 @@ tqdm==4.66.4 # datasets # huggingface-hub # transformers -transformers==4.40.2 - # via -r milabench/benchmarks/llama/requirements.in -triton==2.3.0 +transformers==4.41.2 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/llama/requirements.in +triton==2.3.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -typing-extensions==4.11.0 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # huggingface-hub diff --git a/benchmarks/llama/requirements.rocm.txt b/benchmarks/llama/requirements.rocm.txt index bee33abac..d7d177dba 100644 --- a/benchmarks/llama/requirements.rocm.txt +++ b/benchmarks/llama/requirements.rocm.txt @@ -2,185 +2,286 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=benchmarks/llama/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-llm.txt benchmarks/llama/requirements.in +# pip-compile --output-file=benchmarks/llama/requirements.rocm.txt .pin/tmp-constraints-rocm-llm.txt benchmarks/llama/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.6/ +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/rocm6.0 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com -aiohttp==3.8.6 +aiohttp==3.9.5 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # datasets # fsspec aiosignal==1.3.1 - # via aiohttp + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # aiohttp antlr4-python3-runtime==4.9.3 - # via omegaconf + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # omegaconf asttokens==2.4.1 - # via giving -async-timeout==4.0.3 - # via aiohttp -attrs==23.1.0 - # via aiohttp -certifi==2023.7.22 - # via requests -charset-normalizer==3.3.2 # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # giving +attrs==23.2.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp +certifi==2024.6.2 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt # requests -cmake==3.27.7 - # via pytorch-triton-rocm -codefind==0.1.3 - # via ptera -datasets==2.14.6 - # via -r benchmarks/llama/requirements.in -dill==0.3.7 +charset-normalizer==3.3.2 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # requests +codefind==0.1.6 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # ptera +datasets==2.19.2 # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/llama/requirements.in +dill==0.3.8 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt # datasets # multiprocess executing==1.2.0 - # via varname + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # varname fairscale==0.4.13 - # via -r benchmarks/llama/requirements.in -filelock==3.13.1 # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/llama/requirements.in +filelock==3.14.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # datasets # huggingface-hub # pytorch-triton-rocm # torch # transformers -fire==0.5.0 - # via -r benchmarks/llama/requirements.in -frozenlist==1.4.0 +fire==0.6.0 # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/llama/requirements.in +frozenlist==1.4.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp # aiosignal -fsspec[http]==2023.10.0 +fsspec[http]==2024.3.1 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # datasets # huggingface-hub # torch giving==0.4.2 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # ptera # voir -huggingface-hub==0.17.3 +huggingface-hub==0.23.3 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # datasets # tokenizers # transformers -idna==3.4 +idna==3.7 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # requests # yarl -jinja2==3.1.2 - # via torch -lit==17.0.4 - # via pytorch-triton-rocm +jinja2==3.1.4 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # torch markdown-it-py==3.0.0 - # via rich -markupsafe==2.1.3 - # via jinja2 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # rich +markupsafe==2.1.5 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # jinja2 mdurl==0.1.2 - # via markdown-it-py + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # markdown-it-py mpmath==1.3.0 - # via sympy -multidict==6.0.4 # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # sympy +multidict==6.0.5 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp # yarl -multiprocess==0.70.15 - # via datasets -networkx==3.2.1 - # via torch -numpy==1.26.1 +multiprocess==0.70.16 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # datasets +networkx==3.3 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # torch +numpy==1.26.4 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # datasets # fairscale # pandas # pyarrow # transformers omegaconf==2.3.0 - # via voir -ovld==0.3.2 - # via voir -packaging==23.2 # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # voir +ovld==0.3.5 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # voir +packaging==24.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt # datasets # huggingface-hub # transformers -pandas==2.1.2 - # via datasets +pandas==2.2.2 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # datasets +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # voir ptera==1.4.1 - # via voir -pyarrow==14.0.0 - # via datasets -pygments==2.16.1 - # via rich + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # voir +pyarrow==16.1.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # datasets +pyarrow-hotfix==0.6 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # datasets +pygments==2.18.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # rich pynvml==11.5.0 - # via voir -python-dateutil==2.8.2 - # via pandas -pytorch-triton-rocm==2.1.0 - # via torch -pytz==2023.3.post1 - # via pandas + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # voir +python-dateutil==2.9.0.post0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # pandas +pytorch-triton-rocm==2.3.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # torch +pytz==2024.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # pandas pyyaml==6.0.1 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # datasets # huggingface-hub # omegaconf # transformers reactivex==4.0.4 - # via giving -regex==2023.10.3 - # via transformers -requests==2.31.0 # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # giving +regex==2024.5.15 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # transformers +requests==2.32.3 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt # datasets - # fsspec # huggingface-hub # transformers -rich==13.6.0 - # via voir -safetensors==0.4.0 - # via transformers -sentencepiece==0.1.99 - # via -r benchmarks/llama/requirements.in +rich==13.7.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # voir +safetensors==0.4.3 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # transformers +sentencepiece==0.2.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/llama/requirements.in six==1.16.0 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # asttokens # fire # python-dateutil -sympy==1.12 - # via torch -termcolor==2.3.0 - # via fire -tokenizers==0.14.1 - # via transformers -torch==2.1.0+rocm5.6 +sympy==1.12.1 # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # torch +termcolor==2.4.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # fire +tokenizers==0.19.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # transformers +torch==2.3.1+rocm6.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt # -r benchmarks/llama/requirements.in # fairscale - # pytorch-triton-rocm -tqdm==4.66.1 +tqdm==4.66.4 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # datasets # huggingface-hub # transformers -transformers==4.35.0 - # via -r benchmarks/llama/requirements.in -typing-extensions==4.8.0 +transformers==4.41.2 # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/llama/requirements.in +typing-extensions==4.12.2 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub # reactivex # torch -tzdata==2023.3 - # via pandas -urllib3==2.0.7 - # via requests +tzdata==2024.1 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # pandas +urllib3==1.26.18 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # requests varname==0.10.0 # via giving voir==0.2.15 # via -r benchmarks/llama/requirements.in xxhash==3.4.1 - # via datasets -yarl==1.9.2 - # via aiohttp + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # datasets +yarl==1.9.4 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # aiohttp diff --git a/benchmarks/llama/requirements.xpu.txt b/benchmarks/llama/requirements.xpu.txt index 30ed1c6db..cb7355a26 100644 --- a/benchmarks/llama/requirements.xpu.txt +++ b/benchmarks/llama/requirements.xpu.txt @@ -4,7 +4,10 @@ # # pip-compile --output-file=benchmarks/llama/requirements.xpu.txt .pin/tmp-constraints-xpu-llm.txt benchmarks/llama/requirements.in # +--extra-index-url https://pypi.ngc.nvidia.com --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com aiohttp==3.9.5 # via @@ -27,7 +30,7 @@ attrs==23.2.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # aiohttp -certifi==2024.2.2 +certifi==2024.6.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # requests @@ -35,11 +38,11 @@ charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # requests -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-xpu-torch.txt # ptera -datasets==2.18.0 +datasets==2.19.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/llama/requirements.in @@ -56,13 +59,14 @@ fairscale==0.4.13 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/llama/requirements.in -filelock==3.13.4 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # datasets # huggingface-hub # torch # transformers + # triton fire==0.6.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt @@ -72,7 +76,7 @@ frozenlist==1.4.1 # -c .pin/../.pin/constraints-xpu-torch.txt # aiohttp # aiosignal -fsspec[http]==2024.2.0 +fsspec[http]==2024.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # datasets @@ -83,7 +87,7 @@ giving==0.4.2 # -c .pin/../.pin/constraints-xpu-torch.txt # ptera # voir -huggingface-hub==0.22.2 +huggingface-hub==0.23.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # datasets @@ -94,7 +98,7 @@ idna==3.7 # -c .pin/../.pin/constraints-xpu-torch.txt # requests # yarl -jinja2==3.1.3 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -123,7 +127,7 @@ multiprocess==0.70.16 # via # -c .pin/../.pin/constraints-xpu-torch.txt # datasets -networkx +networkx==3.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -135,6 +139,58 @@ numpy==1.26.4 # pandas # pyarrow # transformers +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt @@ -143,7 +199,7 @@ ovld==0.3.5 # via # -c .pin/../.pin/constraints-xpu-torch.txt # voir -packaging==24.0 +packaging==24.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # datasets @@ -153,11 +209,15 @@ pandas==2.2.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # datasets +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # voir -pyarrow==15.0.2 +pyarrow==16.1.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # datasets @@ -165,7 +225,7 @@ pyarrow-hotfix==0.6 # via # -c .pin/../.pin/constraints-xpu-torch.txt # datasets -pygments==2.17.2 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # rich @@ -192,11 +252,11 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # giving -regex==2024.4.16 +regex==2024.5.15 # via # -c .pin/../.pin/constraints-xpu-torch.txt # transformers -requests==2.31.0 +requests==2.32.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # datasets @@ -220,7 +280,7 @@ six==1.16.0 # asttokens # fire # python-dateutil -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -232,23 +292,27 @@ tokenizers==0.19.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # transformers -torch==2.1.0a0+cxx11.abi +torch==2.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -c .pin/../constraints/xpu.txt # -r benchmarks/llama/requirements.in # fairscale -tqdm==4.66.2 +tqdm==4.66.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # datasets # huggingface-hub # transformers -transformers==4.40.0 +transformers==4.41.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/llama/requirements.in -typing-extensions==4.11.0 +triton==2.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # huggingface-hub diff --git a/benchmarks/rwkv/requirements.cuda.txt b/benchmarks/rwkv/requirements.cuda.txt index f63d62f4c..b84be8dd8 100644 --- a/benchmarks/rwkv/requirements.cuda.txt +++ b/benchmarks/rwkv/requirements.cuda.txt @@ -2,11 +2,14 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=benchmarks/rwkv/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-rwkv.txt benchmarks/rwkv/requirements.in +# pip-compile --output-file=benchmarks/rwkv/requirements.cuda.txt .pin/tmp-constraints-cuda-rwkv.txt benchmarks/rwkv/requirements.in # ---extra-index-url https://download.pytorch.org/whl/cu118 +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/cu121 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com -aiohttp==3.8.6 +aiohttp==3.9.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # fsspec @@ -22,44 +25,33 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -async-timeout==4.0.3 +attrs==23.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp -attrs==23.1.0 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # aiohttp -certifi==2023.7.22 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # requests -charset-normalizer==3.3.2 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # aiohttp - # requests -codefind==0.1.3 + # ptera +deepspeed==0.14.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # ptera -deepspeed==0.12.2 - # via -r benchmarks/rwkv/requirements.in + # -r benchmarks/rwkv/requirements.in executing==1.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname -filelock==3.13.1 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch # triton -frozenlist==1.4.0 +frozenlist==1.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp # aiosignal -fsspec[http]==2023.10.0 +fsspec[http]==2024.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pytorch-lightning @@ -73,16 +65,15 @@ hjson==3.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed -idna==3.4 +idna==3.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # requests # yarl -jinja2==3.1.2 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -lightning-utilities==0.9.0 +lightning-utilities==0.11.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pytorch-lightning @@ -91,7 +82,7 @@ markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jinja2 @@ -103,44 +94,99 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # sympy -multidict==6.0.4 +multidict==6.0.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp # yarl -networkx==3.2.1 +networkx==3.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch ninja==1.11.1.1 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/rwkv/requirements.in # deepspeed -numpy==1.26.1 +numpy==1.26.4 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/rwkv/requirements.in # deepspeed # pytorch-lightning # torchmetrics +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -ovld==0.3.2 +ovld==0.3.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -packaging==23.2 +packaging==24.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed # lightning-utilities # pytorch-lightning # torchmetrics -psutil==5.9.6 +psutil==5.9.8 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -149,11 +195,12 @@ py-cpuinfo==9.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed -pydantic==1.10.13 +pydantic==1.10.15 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/rwkv/requirements.in # deepspeed -pygments==2.16.1 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich @@ -163,7 +210,9 @@ pynvml==11.5.0 # deepspeed # voir pytorch-lightning==1.9.5 - # via -r benchmarks/rwkv/requirements.in + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/rwkv/requirements.in pyyaml==6.0.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -173,11 +222,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # fsspec -rich==13.6.0 +rich==13.7.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -185,12 +230,13 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -torch==2.1.0+cu118 +torch==2.3.1+cu121 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/rwkv/requirements.in # deepspeed # pytorch-lightning @@ -199,16 +245,16 @@ torchmetrics==1.0.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pytorch-lightning -tqdm==4.66.1 +tqdm==4.66.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # deepspeed # pytorch-lightning -triton==2.1.0 +triton==2.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -typing-extensions==4.8.0 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # lightning-utilities @@ -216,10 +262,6 @@ typing-extensions==4.8.0 # pytorch-lightning # reactivex # torch -urllib3==1.26.18 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # requests varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt @@ -230,3 +272,6 @@ yarl==1.9.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/benchmarks/rwkv/requirements.hpu.txt b/benchmarks/rwkv/requirements.hpu.txt index eec415a46..1221dca43 100644 --- a/benchmarks/rwkv/requirements.hpu.txt +++ b/benchmarks/rwkv/requirements.hpu.txt @@ -1,9 +1,13 @@ # -# This file is autogenerated by pip-compile with Python 3.10 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --output-file=milabench/benchmarks/rwkv/requirements.hpu.txt --resolver=backtracking .pin/tmp-constraints-hpu-rwkv.txt milabench/benchmarks/rwkv/requirements.in +# pip-compile --output-file=benchmarks/rwkv/requirements.hpu.txt .pin/tmp-constraints-hpu-rwkv.txt benchmarks/rwkv/requirements.in # +--extra-index-url https://pypi.ngc.nvidia.com +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com + aiohttp==3.9.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -20,20 +24,18 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # giving -async-timeout==4.0.3 - # via - # -c .pin/../.pin/constraints-hpu-torch.txt - # aiohttp attrs==23.2.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # aiohttp -codefind==0.1.4 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-hpu-torch.txt # ptera deepspeed==0.14.2 - # via -r milabench/benchmarks/rwkv/requirements.in + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/rwkv/requirements.in executing==1.2.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -102,11 +104,13 @@ networkx==3.3 # torch ninja==1.11.1.1 # via - # -r milabench/benchmarks/rwkv/requirements.in + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/rwkv/requirements.in # deepspeed numpy==1.26.4 # via - # -r milabench/benchmarks/rwkv/requirements.in + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/rwkv/requirements.in # deepspeed # pytorch-lightning # torchmetrics @@ -153,7 +157,7 @@ nvidia-nccl-cu12==2.20.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -nvidia-nvjitlink-cu12==12.4.127 +nvidia-nvjitlink-cu12==12.5.40 # via # -c .pin/../.pin/constraints-hpu-torch.txt # nvidia-cusolver-cu12 @@ -170,7 +174,7 @@ ovld==0.3.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt # voir -packaging==24.0 +packaging==24.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # deepspeed @@ -192,7 +196,8 @@ py-cpuinfo==9.0.0 # deepspeed pydantic==1.10.15 # via - # -r milabench/benchmarks/rwkv/requirements.in + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/rwkv/requirements.in # deepspeed pygments==2.18.0 # via @@ -204,7 +209,9 @@ pynvml==11.5.0 # deepspeed # voir pytorch-lightning==1.9.5 - # via -r milabench/benchmarks/rwkv/requirements.in + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/rwkv/requirements.in pyyaml==6.0.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -222,13 +229,14 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -torch==2.3.0 +torch==2.3.1 # via - # -r milabench/benchmarks/rwkv/requirements.in + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/rwkv/requirements.in # deepspeed # pytorch-lightning # torchmetrics @@ -241,11 +249,11 @@ tqdm==4.66.4 # -c .pin/../.pin/constraints-hpu-torch.txt # deepspeed # pytorch-lightning -triton==2.3.0 +triton==2.3.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -typing-extensions==4.11.0 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # lightning-utilities diff --git a/benchmarks/rwkv/requirements.rocm.txt b/benchmarks/rwkv/requirements.rocm.txt index 873cf87cb..966009096 100644 --- a/benchmarks/rwkv/requirements.rocm.txt +++ b/benchmarks/rwkv/requirements.rocm.txt @@ -2,11 +2,14 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=benchmarks/rwkv/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-rwkv.txt benchmarks/rwkv/requirements.in +# pip-compile --output-file=benchmarks/rwkv/requirements.rocm.txt .pin/tmp-constraints-rocm-rwkv.txt benchmarks/rwkv/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.6/ +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/rocm6.0 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com -aiohttp==3.8.6 +aiohttp==3.9.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # fsspec @@ -22,48 +25,33 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -async-timeout==4.0.3 +attrs==23.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp -attrs==23.1.0 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # aiohttp -certifi==2023.7.22 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # requests -charset-normalizer==3.3.2 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # aiohttp - # requests -cmake==3.27.7 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pytorch-triton-rocm -codefind==0.1.3 + # ptera +deepspeed==0.14.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # ptera -deepspeed==0.12.2 - # via -r benchmarks/rwkv/requirements.in + # -r benchmarks/rwkv/requirements.in executing==1.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # varname -filelock==3.13.1 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm # torch -frozenlist==1.4.0 +frozenlist==1.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp # aiosignal -fsspec[http]==2023.10.0 +fsspec[http]==2024.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-lightning @@ -77,29 +65,24 @@ hjson==3.1.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed -idna==3.4 +idna==3.7 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # requests # yarl -jinja2==3.1.2 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lightning-utilities==0.9.0 +lightning-utilities==0.11.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-lightning # torchmetrics -lit==17.0.4 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pytorch-triton-rocm markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # jinja2 @@ -111,21 +94,23 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # sympy -multidict==6.0.4 +multidict==6.0.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp # yarl -networkx==3.2.1 +networkx==3.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch ninja==1.11.1.1 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # -r benchmarks/rwkv/requirements.in # deepspeed -numpy==1.26.1 +numpy==1.26.4 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # -r benchmarks/rwkv/requirements.in # deepspeed # pytorch-lightning @@ -134,21 +119,22 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -ovld==0.3.2 +ovld==0.3.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -packaging==23.2 +packaging==24.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed # lightning-utilities # pytorch-lightning # torchmetrics -psutil==5.9.6 +psutil==5.9.8 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt @@ -157,11 +143,12 @@ py-cpuinfo==9.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed -pydantic==1.10.13 +pydantic==1.10.15 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # -r benchmarks/rwkv/requirements.in # deepspeed -pygments==2.16.1 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich @@ -171,8 +158,10 @@ pynvml==11.5.0 # deepspeed # voir pytorch-lightning==1.9.5 - # via -r benchmarks/rwkv/requirements.in -pytorch-triton-rocm==2.1.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/rwkv/requirements.in +pytorch-triton-rocm==2.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -185,11 +174,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # fsspec -rich==13.6.0 +rich==13.7.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir @@ -197,27 +182,27 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -torch==2.1.0+rocm5.6 +torch==2.3.1+rocm6.0 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # -r benchmarks/rwkv/requirements.in # deepspeed # pytorch-lightning - # pytorch-triton-rocm # torchmetrics torchmetrics==1.0.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-lightning -tqdm==4.66.1 +tqdm==4.66.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # deepspeed # pytorch-lightning -typing-extensions==4.8.0 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # lightning-utilities @@ -225,10 +210,6 @@ typing-extensions==4.8.0 # pytorch-lightning # reactivex # torch -urllib3==1.26.18 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # requests varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt @@ -239,3 +220,6 @@ yarl==1.9.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # aiohttp + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/benchmarks/rwkv/requirements.xpu.txt b/benchmarks/rwkv/requirements.xpu.txt index d95c280d5..78116105c 100644 --- a/benchmarks/rwkv/requirements.xpu.txt +++ b/benchmarks/rwkv/requirements.xpu.txt @@ -4,7 +4,10 @@ # # pip-compile --output-file=benchmarks/rwkv/requirements.xpu.txt .pin/tmp-constraints-xpu-rwkv.txt benchmarks/rwkv/requirements.in # +--extra-index-url https://pypi.ngc.nvidia.com --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com aiohttp==3.9.5 # via @@ -26,11 +29,11 @@ attrs==23.2.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # aiohttp -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-xpu-torch.txt # ptera -deepspeed==0.14.1 +deepspeed==0.14.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/rwkv/requirements.in @@ -38,16 +41,17 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # varname -filelock==3.13.4 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch + # triton frozenlist==1.4.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # aiohttp # aiosignal -fsspec[http]==2024.2.0 +fsspec[http]==2024.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # pytorch-lightning @@ -65,7 +69,7 @@ idna==3.7 # via # -c .pin/../.pin/constraints-xpu-torch.txt # yarl -jinja2==3.1.3 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -95,7 +99,7 @@ multidict==6.0.5 # -c .pin/../.pin/constraints-xpu-torch.txt # aiohttp # yarl -networkx +networkx==3.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -111,6 +115,58 @@ numpy==1.26.4 # deepspeed # pytorch-lightning # torchmetrics +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt @@ -119,7 +175,7 @@ ovld==0.3.5 # via # -c .pin/../.pin/constraints-xpu-torch.txt # voir -packaging==24.0 +packaging==24.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # deepspeed @@ -130,6 +186,7 @@ psutil==5.9.8 # via # -c .pin/../.pin/constraints-xpu-torch.txt # deepspeed + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt @@ -143,7 +200,7 @@ pydantic==1.10.15 # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/rwkv/requirements.in # deepspeed -pygments==2.17.2 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # rich @@ -173,11 +230,11 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch -torch==2.1.0a0+cxx11.abi +torch==2.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -c .pin/../constraints/xpu.txt @@ -189,12 +246,16 @@ torchmetrics==1.0.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # pytorch-lightning -tqdm==4.66.2 +tqdm==4.66.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # deepspeed # pytorch-lightning -typing-extensions==4.11.0 +triton==2.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # lightning-utilities diff --git a/benchmarks/stargan/prepare.py b/benchmarks/stargan/prepare.py new file mode 100755 index 000000000..c17e38033 --- /dev/null +++ b/benchmarks/stargan/prepare.py @@ -0,0 +1,14 @@ + + + +def download_celebA(): + from datasets import load_dataset + dataset = load_dataset( + "student/celebA", + revision="2d31e6555722815c74ea7c845b07c1063dd705e9", + cache_dir="/tmp/milabench/cuda/results/data" + ) + + +if __name__ == "__main__": + download_celebA() diff --git a/benchmarks/stargan/requirements.cuda.txt b/benchmarks/stargan/requirements.cuda.txt index 183ad0e85..d2541bf52 100644 --- a/benchmarks/stargan/requirements.cuda.txt +++ b/benchmarks/stargan/requirements.cuda.txt @@ -2,9 +2,12 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=benchmarks/stargan/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-stargan.txt benchmarks/stargan/requirements.in +# pip-compile --output-file=benchmarks/stargan/requirements.cuda.txt .pin/tmp-constraints-cuda-stargan.txt benchmarks/stargan/requirements.in # ---extra-index-url https://download.pytorch.org/whl/cu118 +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/cu121 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com antlr4-python3-runtime==4.9.3 # via @@ -14,15 +17,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -certifi==2023.7.22 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # requests -charset-normalizer==3.3.2 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # requests -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera @@ -30,12 +25,12 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname -filelock==3.13.1 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch # triton -fsspec==2023.10.0 +fsspec==2024.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -44,11 +39,7 @@ giving==0.4.2 # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -idna==3.4 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # requests -jinja2==3.1.2 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -56,7 +47,7 @@ markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jinja2 @@ -68,31 +59,88 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # sympy -networkx==3.2.1 +networkx==3.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -numpy==1.26.1 +numpy==1.26.4 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/stargan/requirements.in # torchvision +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -ovld==0.3.2 +ovld==0.3.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pillow==10.1.0 +pillow==10.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchvision +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pygments==2.16.1 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich @@ -108,11 +156,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # torchvision -rich==13.6.0 +rich==13.7.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -120,29 +164,28 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -torch==2.1.0+cu118 +torch==2.3.1+cu121 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/stargan/requirements.in # torchvision -torchvision==0.16.0+cu118 - # via -r benchmarks/stargan/requirements.in -triton==2.1.0 +torchvision==0.18.1+cu121 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # torch -typing-extensions==4.8.0 + # -r benchmarks/stargan/requirements.in +triton==2.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # reactivex # torch -urllib3==1.26.18 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # requests + # reactivex + # torch varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt diff --git a/benchmarks/stargan/requirements.hpu.txt b/benchmarks/stargan/requirements.hpu.txt index 69860ceb8..10ad3e3eb 100644 --- a/benchmarks/stargan/requirements.hpu.txt +++ b/benchmarks/stargan/requirements.hpu.txt @@ -1,9 +1,13 @@ # -# This file is autogenerated by pip-compile with Python 3.10 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --output-file=milabench/benchmarks/stargan/requirements.hpu.txt --resolver=backtracking .pin/tmp-constraints-hpu-stargan.txt milabench/benchmarks/stargan/requirements.in +# pip-compile --output-file=benchmarks/stargan/requirements.hpu.txt .pin/tmp-constraints-hpu-stargan.txt benchmarks/stargan/requirements.in # +--extra-index-url https://pypi.ngc.nvidia.com +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com + antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -12,7 +16,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # giving -codefind==0.1.4 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-hpu-torch.txt # ptera @@ -60,7 +64,8 @@ networkx==3.3 # torch numpy==1.26.4 # via - # -r milabench/benchmarks/stargan/requirements.in + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/stargan/requirements.in # torchvision nvidia-cublas-cu12==12.1.3.1 # via @@ -105,7 +110,7 @@ nvidia-nccl-cu12==2.20.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -nvidia-nvjitlink-cu12==12.4.127 +nvidia-nvjitlink-cu12==12.5.40 # via # -c .pin/../.pin/constraints-hpu-torch.txt # nvidia-cusolver-cu12 @@ -158,21 +163,24 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -torch==2.3.0 +torch==2.3.1 # via - # -r milabench/benchmarks/stargan/requirements.in + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/stargan/requirements.in # torchvision -torchvision==0.18.0 - # via -r milabench/benchmarks/stargan/requirements.in -triton==2.3.0 +torchvision==0.18.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/stargan/requirements.in +triton==2.3.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -typing-extensions==4.11.0 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # reactivex diff --git a/benchmarks/stargan/requirements.rocm.txt b/benchmarks/stargan/requirements.rocm.txt index 97fb18a81..0ee10cc6f 100644 --- a/benchmarks/stargan/requirements.rocm.txt +++ b/benchmarks/stargan/requirements.rocm.txt @@ -2,9 +2,12 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=benchmarks/stargan/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-stargan.txt benchmarks/stargan/requirements.in +# pip-compile --output-file=benchmarks/stargan/requirements.rocm.txt .pin/tmp-constraints-rocm-stargan.txt benchmarks/stargan/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.6/ +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/rocm6.0 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com antlr4-python3-runtime==4.9.3 # via @@ -14,19 +17,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -certifi==2023.7.22 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # requests -charset-normalizer==3.3.2 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # requests -cmake==3.27.7 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pytorch-triton-rocm -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-rocm-torch.txt # ptera @@ -34,12 +25,12 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # varname -filelock==3.13.1 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm # torch -fsspec==2023.10.0 +fsspec==2024.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -48,23 +39,15 @@ giving==0.4.2 # -c .pin/../.pin/constraints-rocm-torch.txt # ptera # voir -idna==3.4 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # requests -jinja2==3.1.2 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lit==17.0.4 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pytorch-triton-rocm markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # jinja2 @@ -76,31 +59,36 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # sympy -networkx==3.2.1 +networkx==3.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -numpy==1.26.1 +numpy==1.26.4 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # -r benchmarks/stargan/requirements.in # torchvision omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -ovld==0.3.2 +ovld==0.3.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pillow==10.1.0 +pillow==10.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchvision +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pygments==2.16.1 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich @@ -108,7 +96,7 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pytorch-triton-rocm==2.1.0 +pytorch-triton-rocm==2.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -120,11 +108,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # torchvision -rich==13.6.0 +rich==13.7.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir @@ -132,26 +116,24 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -torch==2.1.0+rocm5.6 +torch==2.3.1+rocm6.0 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # -r benchmarks/stargan/requirements.in - # pytorch-triton-rocm # torchvision -torchvision==0.16.0+rocm5.6 - # via -r benchmarks/stargan/requirements.in -typing-extensions==4.8.0 +torchvision==0.18.1+rocm6.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # reactivex - # torch -urllib3==1.26.18 + # -r benchmarks/stargan/requirements.in +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # requests + # reactivex + # torch varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt diff --git a/benchmarks/stargan/requirements.xpu.txt b/benchmarks/stargan/requirements.xpu.txt index b78e94550..0ec9b9954 100644 --- a/benchmarks/stargan/requirements.xpu.txt +++ b/benchmarks/stargan/requirements.xpu.txt @@ -4,7 +4,10 @@ # # pip-compile --output-file=benchmarks/stargan/requirements.xpu.txt .pin/tmp-constraints-xpu-stargan.txt benchmarks/stargan/requirements.in # +--extra-index-url https://pypi.ngc.nvidia.com --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com antlr4-python3-runtime==4.9.3 # via @@ -14,15 +17,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # giving -certifi==2024.2.2 - # via - # -c .pin/../.pin/constraints-xpu-torch.txt - # requests -charset-normalizer==3.3.2 - # via - # -c .pin/../.pin/constraints-xpu-torch.txt - # requests -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-xpu-torch.txt # ptera @@ -30,11 +25,12 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # varname -filelock==3.13.4 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch -fsspec==2024.2.0 + # triton +fsspec==2024.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -43,11 +39,7 @@ giving==0.4.2 # -c .pin/../.pin/constraints-xpu-torch.txt # ptera # voir -idna==3.7 - # via - # -c .pin/../.pin/constraints-xpu-torch.txt - # requests -jinja2==3.1.3 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -67,7 +59,7 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # sympy -networkx +networkx==3.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -76,6 +68,58 @@ numpy==1.26.4 # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/stargan/requirements.in # torchvision +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt @@ -88,11 +132,15 @@ pillow==10.3.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torchvision +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # voir -pygments==2.17.2 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # rich @@ -108,10 +156,6 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # giving -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-xpu-torch.txt - # torchvision rich==13.7.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt @@ -120,30 +164,30 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch -torch==2.1.0a0+cxx11.abi +torch==2.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -c .pin/../constraints/xpu.txt # -r benchmarks/stargan/requirements.in # torchvision -torchvision==0.16.0a0+cxx11.abi +torchvision==0.18.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -c .pin/../constraints/xpu.txt # -r benchmarks/stargan/requirements.in -typing-extensions==4.11.0 +triton==2.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt - # reactivex # torch -urllib3==1.26.18 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt - # requests + # reactivex + # torch varname==0.10.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt diff --git a/benchmarks/stargan/stargan/data_loader.py b/benchmarks/stargan/stargan/data_loader.py index 2f79594c6..0f4f210d9 100644 --- a/benchmarks/stargan/stargan/data_loader.py +++ b/benchmarks/stargan/stargan/data_loader.py @@ -91,6 +91,7 @@ def get_loader( if dataset == "CelebA": dataset = CelebA(image_dir, attr_path, selected_attrs, transform, mode) + elif dataset == "RaFD": dataset = ImageFolder(image_dir, transform) diff --git a/benchmarks/stargan/stargan/main.py b/benchmarks/stargan/stargan/main.py index 683ae121c..39fdfca19 100644 --- a/benchmarks/stargan/stargan/main.py +++ b/benchmarks/stargan/stargan/main.py @@ -1,4 +1,3 @@ -import json import os import argparse from solver import Solver @@ -197,24 +196,21 @@ def main(config): parser.add_argument("--mode", type=str, default="train", choices=["train", "test"]) parser.add_argument("--use_tensorboard", type=str2bool, default=False) - mbconfig = json.loads(os.environ["MILABENCH_CONFIG"]) - datadir = mbconfig["dirs"]["extra"] - # Directories. parser.add_argument("--celeba_image_dir", type=str, default="data/celeba/images") parser.add_argument( "--attr_path", type=str, default="data/celeba/list_attr_celeba.txt" ) parser.add_argument("--rafd_image_dir", type=str, default="data/RaFD/train") - parser.add_argument("--log_dir", type=str, default=os.path.join(datadir, "logs")) + parser.add_argument("--log_dir", type=str, default="/data/logs") parser.add_argument( - "--model_save_dir", type=str, default=os.path.join(datadir, "models") + "--model_save_dir", type=str, default="data/models" ) parser.add_argument( - "--sample_dir", type=str, default=os.path.join(datadir, "samples") + "--sample_dir", type=str, default="data/samples" ) parser.add_argument( - "--result_dir", type=str, default=os.path.join(datadir, "results") + "--result_dir", type=str, default="data/results" ) # Step size. diff --git a/benchmarks/super-slomo/prepare.py b/benchmarks/super-slomo/prepare.py index 781e71d8e..a7e5a2f4c 100755 --- a/benchmarks/super-slomo/prepare.py +++ b/benchmarks/super-slomo/prepare.py @@ -1,16 +1,9 @@ #!/usr/bin/env python import torchvision - - - -def download_celebA(): - # celebA use Google drive, and google drive wants to tell us that - # they cant scan for virus so the download fails - # torchvision 0.17.1 might solve this issue though but we dont have it - pass - +from benchmate.datagen import generate_fakeimagenet if __name__ == "__main__": # This will download the weights for vgg16 + generate_fakeimagenet() torchvision.models.vgg16(pretrained=True) diff --git a/benchmarks/super-slomo/requirements.cuda.txt b/benchmarks/super-slomo/requirements.cuda.txt index b2fc3527c..df60c83e3 100644 --- a/benchmarks/super-slomo/requirements.cuda.txt +++ b/benchmarks/super-slomo/requirements.cuda.txt @@ -2,9 +2,12 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=benchmarks/super-slomo/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-super-slomo.txt benchmarks/super-slomo/requirements.in +# pip-compile --output-file=benchmarks/super-slomo/requirements.cuda.txt .pin/tmp-constraints-cuda-super-slomo.txt benchmarks/super-slomo/requirements.in # ---extra-index-url https://download.pytorch.org/whl/cu118 +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/cu121 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com antlr4-python3-runtime==4.9.3 # via @@ -14,15 +17,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -certifi==2023.7.22 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # requests -charset-normalizer==3.3.2 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # requests -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera @@ -30,12 +25,12 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname -filelock==3.13.1 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch # triton -fsspec==2023.10.0 +fsspec==2024.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -44,11 +39,7 @@ giving==0.4.2 # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -idna==3.4 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # requests -jinja2==3.1.2 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -56,7 +47,7 @@ markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jinja2 @@ -68,34 +59,93 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # sympy -networkx==3.2.1 +networkx==3.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -numpy==1.26.1 +numpy==1.26.4 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/super-slomo/requirements.in # opencv-python # torchvision +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -opencv-python==4.8.1.78 - # via -r benchmarks/super-slomo/requirements.in -ovld==0.3.2 +opencv-python==4.10.0.82 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/super-slomo/requirements.in +ovld==0.3.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pillow==10.1.0 +pillow==10.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchvision +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pygments==2.16.1 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich @@ -111,11 +161,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-cuda-torch.txt - # torchvision -rich==13.6.0 +rich==13.7.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -123,31 +169,32 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -torch==2.1.0+cu118 +torch==2.3.1+cu121 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/super-slomo/requirements.in # torchvision -torchvision==0.16.0+cu118 - # via -r benchmarks/super-slomo/requirements.in -tqdm==4.66.1 - # via -r benchmarks/super-slomo/requirements.in -triton==2.1.0 +torchvision==0.18.1+cu121 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # torch -typing-extensions==4.8.0 + # -r benchmarks/super-slomo/requirements.in +tqdm==4.66.4 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/super-slomo/requirements.in +triton==2.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # reactivex # torch -urllib3==1.26.18 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt - # requests + # reactivex + # torch varname==0.10.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt diff --git a/benchmarks/super-slomo/requirements.hpu.txt b/benchmarks/super-slomo/requirements.hpu.txt index 46166edb9..993c5aa57 100644 --- a/benchmarks/super-slomo/requirements.hpu.txt +++ b/benchmarks/super-slomo/requirements.hpu.txt @@ -1,9 +1,13 @@ # -# This file is autogenerated by pip-compile with Python 3.10 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --output-file=milabench/benchmarks/super-slomo/requirements.hpu.txt --resolver=backtracking .pin/tmp-constraints-hpu-super-slomo.txt milabench/benchmarks/super-slomo/requirements.in +# pip-compile --output-file=benchmarks/super-slomo/requirements.hpu.txt .pin/tmp-constraints-hpu-super-slomo.txt benchmarks/super-slomo/requirements.in # +--extra-index-url https://pypi.ngc.nvidia.com +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com + antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -12,7 +16,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # giving -codefind==0.1.4 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-hpu-torch.txt # ptera @@ -60,7 +64,8 @@ networkx==3.3 # torch numpy==1.26.4 # via - # -r milabench/benchmarks/super-slomo/requirements.in + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/super-slomo/requirements.in # opencv-python # torchvision nvidia-cublas-cu12==12.1.3.1 @@ -106,7 +111,7 @@ nvidia-nccl-cu12==2.20.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -nvidia-nvjitlink-cu12==12.4.127 +nvidia-nvjitlink-cu12==12.5.40 # via # -c .pin/../.pin/constraints-hpu-torch.txt # nvidia-cusolver-cu12 @@ -119,8 +124,10 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # voir -opencv-python==4.9.0.80 - # via -r milabench/benchmarks/super-slomo/requirements.in +opencv-python==4.10.0.82 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/super-slomo/requirements.in ovld==0.3.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -161,23 +168,28 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -torch==2.3.0 +torch==2.3.1 # via - # -r milabench/benchmarks/super-slomo/requirements.in + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/super-slomo/requirements.in # torchvision -torchvision==0.18.0 - # via -r milabench/benchmarks/super-slomo/requirements.in +torchvision==0.18.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/super-slomo/requirements.in tqdm==4.66.4 - # via -r milabench/benchmarks/super-slomo/requirements.in -triton==2.3.0 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/super-slomo/requirements.in +triton==2.3.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -typing-extensions==4.11.0 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # reactivex diff --git a/benchmarks/super-slomo/requirements.rocm.txt b/benchmarks/super-slomo/requirements.rocm.txt index 6254fa5d3..507bc7012 100644 --- a/benchmarks/super-slomo/requirements.rocm.txt +++ b/benchmarks/super-slomo/requirements.rocm.txt @@ -2,9 +2,12 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=benchmarks/super-slomo/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-super-slomo.txt benchmarks/super-slomo/requirements.in +# pip-compile --output-file=benchmarks/super-slomo/requirements.rocm.txt .pin/tmp-constraints-rocm-super-slomo.txt benchmarks/super-slomo/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.6/ +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/rocm6.0 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com antlr4-python3-runtime==4.9.3 # via @@ -14,19 +17,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -certifi==2023.7.22 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # requests -charset-normalizer==3.3.2 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # requests -cmake==3.27.7 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pytorch-triton-rocm -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-rocm-torch.txt # ptera @@ -34,12 +25,12 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # varname -filelock==3.13.1 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # pytorch-triton-rocm # torch -fsspec==2023.10.0 +fsspec==2024.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -48,23 +39,15 @@ giving==0.4.2 # -c .pin/../.pin/constraints-rocm-torch.txt # ptera # voir -idna==3.4 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # requests -jinja2==3.1.2 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -lit==17.0.4 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pytorch-triton-rocm markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # jinja2 @@ -76,12 +59,13 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # sympy -networkx==3.2.1 +networkx==3.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -numpy==1.26.1 +numpy==1.26.4 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # -r benchmarks/super-slomo/requirements.in # opencv-python # torchvision @@ -89,21 +73,27 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -opencv-python==4.8.1.78 - # via -r benchmarks/super-slomo/requirements.in -ovld==0.3.2 +opencv-python==4.10.0.82 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/super-slomo/requirements.in +ovld==0.3.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pillow==10.1.0 +pillow==10.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchvision +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pygments==2.16.1 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich @@ -111,7 +101,7 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pytorch-triton-rocm==2.1.0 +pytorch-triton-rocm==2.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch @@ -123,11 +113,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # torchvision -rich==13.6.0 +rich==13.7.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir @@ -135,28 +121,28 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -torch==2.1.0+rocm5.6 +torch==2.3.1+rocm6.0 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # -r benchmarks/super-slomo/requirements.in - # pytorch-triton-rocm # torchvision -torchvision==0.16.0+rocm5.6 - # via -r benchmarks/super-slomo/requirements.in -tqdm==4.66.1 - # via -r benchmarks/super-slomo/requirements.in -typing-extensions==4.8.0 +torchvision==0.18.1+rocm6.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # reactivex - # torch -urllib3==1.26.18 + # -r benchmarks/super-slomo/requirements.in +tqdm==4.66.4 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/super-slomo/requirements.in +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # requests + # reactivex + # torch varname==0.10.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt diff --git a/benchmarks/super-slomo/requirements.xpu.txt b/benchmarks/super-slomo/requirements.xpu.txt index 20397c03c..fccf5db12 100644 --- a/benchmarks/super-slomo/requirements.xpu.txt +++ b/benchmarks/super-slomo/requirements.xpu.txt @@ -4,7 +4,10 @@ # # pip-compile --output-file=benchmarks/super-slomo/requirements.xpu.txt .pin/tmp-constraints-xpu-super-slomo.txt benchmarks/super-slomo/requirements.in # +--extra-index-url https://pypi.ngc.nvidia.com --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com antlr4-python3-runtime==4.9.3 # via @@ -14,15 +17,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # giving -certifi==2024.2.2 - # via - # -c .pin/../.pin/constraints-xpu-torch.txt - # requests -charset-normalizer==3.3.2 - # via - # -c .pin/../.pin/constraints-xpu-torch.txt - # requests -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-xpu-torch.txt # ptera @@ -30,11 +25,12 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # varname -filelock==3.13.4 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch -fsspec==2024.2.0 + # triton +fsspec==2024.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -43,11 +39,7 @@ giving==0.4.2 # -c .pin/../.pin/constraints-xpu-torch.txt # ptera # voir -idna==3.7 - # via - # -c .pin/../.pin/constraints-xpu-torch.txt - # requests -jinja2==3.1.3 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -67,7 +59,7 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # sympy -networkx +networkx==3.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -77,11 +69,63 @@ numpy==1.26.4 # -r benchmarks/super-slomo/requirements.in # opencv-python # torchvision +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # voir -opencv-python==4.9.0.80 +opencv-python==4.10.0.82 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/super-slomo/requirements.in @@ -93,11 +137,15 @@ pillow==10.3.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torchvision +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # voir -pygments==2.17.2 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # rich @@ -113,10 +161,6 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # giving -requests==2.31.0 - # via - # -c .pin/../.pin/constraints-xpu-torch.txt - # torchvision rich==13.7.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt @@ -125,34 +169,34 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch -torch==2.1.0a0+cxx11.abi +torch==2.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -c .pin/../constraints/xpu.txt # -r benchmarks/super-slomo/requirements.in # torchvision -torchvision==0.16.0a0+cxx11.abi +torchvision==0.18.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -c .pin/../constraints/xpu.txt # -r benchmarks/super-slomo/requirements.in -tqdm==4.66.2 +tqdm==4.66.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/super-slomo/requirements.in -typing-extensions==4.11.0 +triton==2.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt - # reactivex # torch -urllib3==1.26.18 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt - # requests + # reactivex + # torch varname==0.10.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt diff --git a/benchmarks/super-slomo/slomo/train.py b/benchmarks/super-slomo/slomo/train.py index 8ec3bddac..531747369 100644 --- a/benchmarks/super-slomo/slomo/train.py +++ b/benchmarks/super-slomo/slomo/train.py @@ -8,11 +8,13 @@ import torch.nn as nn import torch.nn.functional as F import torchcompat.core as accelerator +import torchvision.transforms as transforms -import model -from giving import give from benchmate.observer import BenchObserver + +import model from synth import SyntheticData +import dataloader def main(): @@ -75,6 +77,19 @@ def main(): action="store_false", help="do not allow tf32", ) + parser.add_argument( + "--loader", + type=str, + default="synthetic", + help="Dataloader to use", + ) + parser.add_argument( + "--num_workers", + type=int, + default=8, + help="Dataloader to use", + ) + args = parser.parse_args() @@ -96,35 +111,57 @@ def main(): validationFlowBackWarp = validationFlowBackWarp.to(device) ###Load Datasets + def load_dataset(): + if args.loader == "synthetic": + def igen(): + sz = 352 + f0 = torch.rand((3, sz, sz)) * 2 - 1 + ft = torch.rand((3, sz, sz)) * 2 - 1 + f1 = torch.rand((3, sz, sz)) * 2 - 1 + return [f0, ft, f1] + + def ogen(): + return torch.randint(0, 7, ()) + + trainset = SyntheticData( + n=args.train_batch_size, + repeat=10000, + generators=[igen, ogen] + ) + + return torch.utils.data.DataLoader( + trainset, + batch_size=args.train_batch_size, + num_workers=8 + ) - # # Channel wise mean calculated on adobe240-fps training dataset - # mean = [0.429, 0.431, 0.397] - # std = [1, 1, 1] - # normalize = transforms.Normalize(mean=mean, - # std=std) - # transform = transforms.Compose([transforms.ToTensor(), normalize]) + # Channel wise mean calculated on adobe240-fps training dataset + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize( + mean=[0.429, 0.431, 0.397], + std=[1, 1, 1] + ) + ]) - # trainset = dataloader.SuperSloMo(root=args.dataset_root + '/train', transform=transform, train=True) - # trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.train_batch_size, shuffle=False) + trainset = dataloader.SuperSloMo(root=args.dataset_root + '/train', transform=transform, train=True) - def igen(): - sz = 352 - f0 = torch.rand((3, sz, sz)) * 2 - 1 - ft = torch.rand((3, sz, sz)) * 2 - 1 - f1 = torch.rand((3, sz, sz)) * 2 - 1 - return [f0, ft, f1] + too_small = [] + for i, p in enumerate(trainset.framesPath): + if len(p) < 12: + too_small.append(i) - def ogen(): - return torch.randint(0, 7, ()) + for i in reversed(too_small): + del trainset.framesPath[i] - trainset = SyntheticData( - n=args.train_batch_size, repeat=10000, generators=[igen, ogen] - ) - trainloader = torch.utils.data.DataLoader( - trainset, - batch_size=args.train_batch_size, - num_workers=8 - ) + return torch.utils.data.DataLoader( + trainset, + batch_size=args.train_batch_size, + shuffle=False, + num_workers=args.num_workers + ) + + trainloader = load_dataset() ###Utils diff --git a/benchmarks/timm/prepare.py b/benchmarks/timm/prepare.py index 9158ae0e0..b67ca2325 100755 --- a/benchmarks/timm/prepare.py +++ b/benchmarks/timm/prepare.py @@ -1,59 +1,6 @@ #!/usr/bin/env python -import multiprocessing -import os -from pathlib import Path - -from tqdm import tqdm - - -def write(args): - from torchvision.datasets import FakeData - - image_size, offset, count, outdir = args - dataset = FakeData( - size=count, image_size=image_size, num_classes=1000, random_offset=offset - ) - - image, y = next(iter(dataset)) - class_val = int(y) - image_name = f"{offset}.jpeg" - - path = os.path.join(outdir, str(class_val)) - os.makedirs(path, exist_ok=True) - - image_path = os.path.join(path, image_name) - image.save(image_path) - - -def generate(image_size, n, outdir): - p_count = min(multiprocessing.cpu_count(), 8) - pool = multiprocessing.Pool(p_count) - for _ in tqdm( - pool.imap_unordered(write, ((image_size, i, n, outdir) for i in range(n))), - total=n, - ): - pass - - -def generate_sets(root, sets, shape): - root = Path(root) - sentinel = root / "done" - if sentinel.exists(): - print(f"{root} was already generated") - return - if root.exists(): - print(f"{root} exists but is not marked complete; deleting") - root.rm() - for name, n in sets.items(): - print(f"Generating {name}") - generate(shape, n, os.path.join(root, name)) - sentinel.touch() - +from benchmate.datagen import generate_fakeimagenet if __name__ == "__main__": - data_directory = os.environ["MILABENCH_DIR_DATA"] - dest = os.path.join(data_directory, "FakeImageNet") - print(f"Generating fake data into {dest}...") - generate_sets(dest, {"train": 4096, "val": 16, "test": 16}, (3, 384, 384)) - print("Done!") + generate_fakeimagenet() \ No newline at end of file diff --git a/benchmarks/timm/requirements.cuda.txt b/benchmarks/timm/requirements.cuda.txt index b3012f73a..8b162dd50 100644 --- a/benchmarks/timm/requirements.cuda.txt +++ b/benchmarks/timm/requirements.cuda.txt @@ -2,9 +2,12 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=benchmarks/timm/requirements.cuda.txt --resolver=backtracking .pin/tmp-constraints-cuda-timm.txt benchmarks/timm/requirements.in +# pip-compile --output-file=benchmarks/timm/requirements.cuda.txt .pin/tmp-constraints-cuda-timm.txt benchmarks/timm/requirements.in # ---extra-index-url https://download.pytorch.org/whl/cu118 +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/cu121 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com antlr4-python3-runtime==4.9.3 # via @@ -14,7 +17,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -certifi==2023.7.22 +certifi==2024.6.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests @@ -22,7 +25,7 @@ charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera @@ -30,13 +33,13 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # varname -filelock==3.13.1 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # torch # triton -fsspec==2023.10.0 +fsspec==2024.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub @@ -46,13 +49,15 @@ giving==0.4.2 # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.17.3 - # via -r benchmarks/timm/requirements.in -idna==3.4 +huggingface-hub==0.23.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/timm/requirements.in +idna==3.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -jinja2==3.1.2 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch @@ -60,7 +65,7 @@ markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jinja2 @@ -72,35 +77,91 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # sympy -networkx==3.2.1 +networkx==3.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -numpy==1.26.1 +numpy==1.26.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchvision +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # torch omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -ovld==0.3.2 +ovld==0.3.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -packaging==23.2 +packaging==24.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub -pillow==10.1.0 +pillow==10.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchvision +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pygments==2.16.1 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich @@ -110,6 +171,7 @@ pynvml==11.5.0 # voir pyyaml==6.0.1 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/timm/requirements.in # huggingface-hub # omegaconf @@ -117,40 +179,44 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -requests==2.31.0 +requests==2.32.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub - # torchvision -rich==13.6.0 +rich==13.7.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -safetensors==0.4.0 - # via -r benchmarks/timm/requirements.in +safetensors==0.4.3 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/timm/requirements.in six==1.16.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -torch==2.1.0+cu118 +torch==2.3.1+cu121 # via + # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/timm/requirements.in # torchvision -torchvision==0.16.0+cu118 - # via -r benchmarks/timm/requirements.in -tqdm==4.66.1 +torchvision==0.18.1+cu121 + # via + # -c .pin/../.pin/constraints-cuda-torch.txt + # -r benchmarks/timm/requirements.in +tqdm==4.66.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub -triton==2.1.0 +triton==2.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -typing-extensions==4.8.0 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub diff --git a/benchmarks/timm/requirements.hpu.txt b/benchmarks/timm/requirements.hpu.txt index 1cfdc4058..ce479d3e4 100644 --- a/benchmarks/timm/requirements.hpu.txt +++ b/benchmarks/timm/requirements.hpu.txt @@ -1,9 +1,13 @@ # -# This file is autogenerated by pip-compile with Python 3.10 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --output-file=milabench/benchmarks/timm/requirements.hpu.txt --resolver=backtracking .pin/tmp-constraints-hpu-timm.txt milabench/benchmarks/timm/requirements.in +# pip-compile --output-file=benchmarks/timm/requirements.hpu.txt .pin/tmp-constraints-hpu-timm.txt benchmarks/timm/requirements.in # +--extra-index-url https://pypi.ngc.nvidia.com +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com + antlr4-python3-runtime==4.9.3 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -12,7 +16,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # giving -certifi==2024.2.2 +certifi==2024.6.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # requests @@ -20,7 +24,7 @@ charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # requests -codefind==0.1.4 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-hpu-torch.txt # ptera @@ -44,8 +48,10 @@ giving==0.4.2 # -c .pin/../.pin/constraints-hpu-torch.txt # ptera # voir -huggingface-hub==0.23.0 - # via -r milabench/benchmarks/timm/requirements.in +huggingface-hub==0.23.3 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/timm/requirements.in idna==3.7 # via # -c .pin/../.pin/constraints-hpu-torch.txt @@ -121,7 +127,7 @@ nvidia-nccl-cu12==2.20.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -nvidia-nvjitlink-cu12==12.4.127 +nvidia-nvjitlink-cu12==12.5.40 # via # -c .pin/../.pin/constraints-hpu-torch.txt # nvidia-cusolver-cu12 @@ -138,7 +144,7 @@ ovld==0.3.5 # via # -c .pin/../.pin/constraints-hpu-torch.txt # voir -packaging==24.0 +packaging==24.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # huggingface-hub @@ -164,14 +170,15 @@ pynvml==11.5.0 # voir pyyaml==6.0.1 # via - # -r milabench/benchmarks/timm/requirements.in + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/timm/requirements.in # huggingface-hub # omegaconf reactivex==4.0.4 # via # -c .pin/../.pin/constraints-hpu-torch.txt # giving -requests==2.31.0 +requests==2.32.3 # via # -c .pin/../.pin/constraints-hpu-torch.txt # huggingface-hub @@ -180,30 +187,35 @@ rich==13.7.1 # -c .pin/../.pin/constraints-hpu-torch.txt # voir safetensors==0.4.3 - # via -r milabench/benchmarks/timm/requirements.in + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/timm/requirements.in six==1.16.0 # via # -c .pin/../.pin/constraints-hpu-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -torch==2.3.0 +torch==2.3.1 # via - # -r milabench/benchmarks/timm/requirements.in + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/timm/requirements.in # torchvision -torchvision==0.18.0 - # via -r milabench/benchmarks/timm/requirements.in +torchvision==0.18.1 + # via + # -c .pin/../.pin/constraints-hpu-torch.txt + # -r benchmarks/timm/requirements.in tqdm==4.66.4 # via # -c .pin/../.pin/constraints-hpu-torch.txt # huggingface-hub -triton==2.3.0 +triton==2.3.1 # via # -c .pin/../.pin/constraints-hpu-torch.txt # torch -typing-extensions==4.11.0 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-hpu-torch.txt # huggingface-hub diff --git a/benchmarks/timm/requirements.rocm.txt b/benchmarks/timm/requirements.rocm.txt index e56b58921..762371c35 100644 --- a/benchmarks/timm/requirements.rocm.txt +++ b/benchmarks/timm/requirements.rocm.txt @@ -2,9 +2,12 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --config=pyproject.toml --output-file=benchmarks/timm/requirements.rocm.txt --resolver=backtracking .pin/tmp-constraints-rocm-timm.txt benchmarks/timm/requirements.in +# pip-compile --output-file=benchmarks/timm/requirements.rocm.txt .pin/tmp-constraints-rocm-timm.txt benchmarks/timm/requirements.in # ---extra-index-url https://download.pytorch.org/whl/rocm5.6/ +--extra-index-url https://pypi.ngc.nvidia.com +--extra-index-url https://download.pytorch.org/whl/rocm6.0 +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com antlr4-python3-runtime==4.9.3 # via @@ -14,7 +17,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -certifi==2023.7.22 +certifi==2024.6.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests @@ -22,11 +25,7 @@ charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # requests -cmake==3.27.7 - # via - # -c .pin/../.pin/constraints-rocm-torch.txt - # pytorch-triton-rocm -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-rocm-torch.txt # ptera @@ -34,13 +33,13 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # varname -filelock==3.13.1 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub # pytorch-triton-rocm # torch -fsspec==2023.10.0 +fsspec==2024.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub @@ -50,25 +49,23 @@ giving==0.4.2 # -c .pin/../.pin/constraints-rocm-torch.txt # ptera # voir -huggingface-hub==0.17.3 - # via -r benchmarks/timm/requirements.in -idna==3.4 +huggingface-hub==0.23.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # requests -jinja2==3.1.2 + # -r benchmarks/timm/requirements.in +idna==3.7 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # torch -lit==17.0.4 + # requests +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt - # pytorch-triton-rocm + # torch markdown-it-py==3.0.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # jinja2 @@ -80,11 +77,11 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # sympy -networkx==3.2.1 +networkx==3.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -numpy==1.26.1 +numpy==1.26.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchvision @@ -92,23 +89,27 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -ovld==0.3.2 +ovld==0.3.5 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -packaging==23.2 +packaging==24.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub -pillow==10.1.0 +pillow==10.3.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torchvision +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pygments==2.16.1 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # rich @@ -116,12 +117,13 @@ pynvml==11.5.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -pytorch-triton-rocm==2.1.0 +pytorch-triton-rocm==2.3.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch pyyaml==6.0.1 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # -r benchmarks/timm/requirements.in # huggingface-hub # omegaconf @@ -129,37 +131,40 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # giving -requests==2.31.0 +requests==2.32.3 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub - # torchvision -rich==13.6.0 +rich==13.7.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # voir -safetensors==0.4.0 - # via -r benchmarks/timm/requirements.in +safetensors==0.4.3 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/timm/requirements.in six==1.16.0 # via # -c .pin/../.pin/constraints-rocm-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-rocm-torch.txt # torch -torch==2.1.0+rocm5.6 +torch==2.3.1+rocm6.0 # via + # -c .pin/../.pin/constraints-rocm-torch.txt # -r benchmarks/timm/requirements.in - # pytorch-triton-rocm # torchvision -torchvision==0.16.0+rocm5.6 - # via -r benchmarks/timm/requirements.in -tqdm==4.66.1 +torchvision==0.18.1+rocm6.0 + # via + # -c .pin/../.pin/constraints-rocm-torch.txt + # -r benchmarks/timm/requirements.in +tqdm==4.66.4 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub -typing-extensions==4.8.0 +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-rocm-torch.txt # huggingface-hub diff --git a/benchmarks/timm/requirements.xpu.txt b/benchmarks/timm/requirements.xpu.txt index 044d718cf..1e32e7820 100644 --- a/benchmarks/timm/requirements.xpu.txt +++ b/benchmarks/timm/requirements.xpu.txt @@ -4,7 +4,10 @@ # # pip-compile --output-file=benchmarks/timm/requirements.xpu.txt .pin/tmp-constraints-xpu-timm.txt benchmarks/timm/requirements.in # +--extra-index-url https://pypi.ngc.nvidia.com --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +--trusted-host pypi.ngc.nvidia.com antlr4-python3-runtime==4.9.3 # via @@ -14,7 +17,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # giving -certifi==2024.2.2 +certifi==2024.6.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # requests @@ -22,7 +25,7 @@ charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # requests -codefind==0.1.3 +codefind==0.1.6 # via # -c .pin/../.pin/constraints-xpu-torch.txt # ptera @@ -30,12 +33,13 @@ executing==1.2.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # varname -filelock==3.13.4 +filelock==3.14.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # huggingface-hub # torch -fsspec==2024.2.0 + # triton +fsspec==2024.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # huggingface-hub @@ -45,7 +49,7 @@ giving==0.4.2 # -c .pin/../.pin/constraints-xpu-torch.txt # ptera # voir -huggingface-hub==0.22.2 +huggingface-hub==0.23.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -r benchmarks/timm/requirements.in @@ -53,7 +57,7 @@ idna==3.7 # via # -c .pin/../.pin/constraints-xpu-torch.txt # requests -jinja2==3.1.3 +jinja2==3.1.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -73,7 +77,7 @@ mpmath==1.3.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # sympy -networkx +networkx==3.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch @@ -81,6 +85,58 @@ numpy==1.26.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torchvision +nvidia-cublas-cu12==12.1.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cuda-runtime-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cudnn-cu12==8.9.2.26 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cufft-cu12==11.0.2.54 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-curand-cu12==10.3.2.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusolver-cu12==11.4.5.107 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +nvidia-nvjitlink-cu12==12.5.40 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt @@ -89,7 +145,7 @@ ovld==0.3.5 # via # -c .pin/../.pin/constraints-xpu-torch.txt # voir -packaging==24.0 +packaging==24.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # huggingface-hub @@ -97,11 +153,15 @@ pillow==10.3.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torchvision +psutil==5.9.8 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # voir ptera==1.4.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # voir -pygments==2.17.2 +pygments==2.18.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # rich @@ -119,11 +179,10 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # giving -requests==2.31.0 +requests==2.32.3 # via # -c .pin/../.pin/constraints-xpu-torch.txt # huggingface-hub - # torchvision rich==13.7.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt @@ -136,26 +195,30 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-xpu-torch.txt # asttokens -sympy==1.12 +sympy==1.12.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # torch -torch==2.1.0a0+cxx11.abi +torch==2.3.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -c .pin/../constraints/xpu.txt # -r benchmarks/timm/requirements.in # torchvision -torchvision==0.16.0a0+cxx11.abi +torchvision==0.18.1 # via # -c .pin/../.pin/constraints-xpu-torch.txt # -c .pin/../constraints/xpu.txt # -r benchmarks/timm/requirements.in -tqdm==4.66.2 +tqdm==4.66.4 # via # -c .pin/../.pin/constraints-xpu-torch.txt # huggingface-hub -typing-extensions==4.11.0 +triton==2.3.1 + # via + # -c .pin/../.pin/constraints-xpu-torch.txt + # torch +typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-xpu-torch.txt # huggingface-hub diff --git a/benchmarks/timm/voirfile.py b/benchmarks/timm/voirfile.py index 1f25e1b48..5dbe351be 100644 --- a/benchmarks/timm/voirfile.py +++ b/benchmarks/timm/voirfile.py @@ -35,7 +35,6 @@ def instrument_main(ov, options: Config): from benchmate.observer import BenchObserver from timm.utils.distributed import is_global_primary - from timm.data import create_loader observer = BenchObserver( accelerator.Event, diff --git a/benchmate/benchmate/datagen.py b/benchmate/benchmate/datagen.py index 7299a07f8..d3eacecf3 100644 --- a/benchmate/benchmate/datagen.py +++ b/benchmate/benchmate/datagen.py @@ -22,7 +22,7 @@ def write(args): offset, outdir, size = args img = torch.randn(*size) - target = torch.randint(0, 1000, size=(1,), dtype=torch.long)[0] + target = offset % 1000 # torch.randint(0, 1000, size=(1,), dtype=torch.long)[0] img = transforms.ToPILImage()(img) class_val = int(target) diff --git a/benchmate/benchmate/dataloader.py b/benchmate/benchmate/dataloader.py index 28177f6d3..869834cc6 100644 --- a/benchmate/benchmate/dataloader.py +++ b/benchmate/benchmate/dataloader.py @@ -123,6 +123,11 @@ class Adapter: def __init__(self, iter): self.iter = iter + class sampler: + @staticmethod + def set_epoch(epoch): + pass + def __len__(self): return len(self.iter) @@ -160,7 +165,7 @@ def image_transforms(): def pytorch(folder, batch_size, num_workers, distributed=False): train = datasets.ImageFolder( - os.path.join(folder, "train"), + folder, image_transforms() ) diff --git a/benchmate/benchmate/dataset.py b/benchmate/benchmate/dataset.py new file mode 100644 index 000000000..89ac90e4c --- /dev/null +++ b/benchmate/benchmate/dataset.py @@ -0,0 +1,50 @@ + +import os +from collections import defaultdict + + +def no_transform(args): + return args + + +def transform_images(transform_x, transform_y=no_transform): + def _(args): + print(args) + return transform_x(args[0]), transform_y(args[1]) + return _ + + +def transform_celebA(transform_x): + def _(args): + print(args) + return transform_x(args["image"]) + return _ + + +class TransformedDataset: + def __init__(self, dataset, transforms=no_transform): + self.dataset = dataset + self.transforms = transforms + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, item): + return self.transforms(self.dataset[item]) + + + +class ImageNetAsFrames: + def __init__(self, folder) -> None: + self.clip = defaultdict(list) + for root, _, files in os.walk(folder): + clip_id = root.split("/")[-1] + video = self.clip[clip_id] + for frame in files: + video.append(frame) + + def __getitem__(self, item): + return self.clip[item] + + def __len__(self): + return len(self.clip) diff --git a/benchmate/benchmate/monitor.py b/benchmate/benchmate/monitor.py new file mode 100644 index 000000000..bb8e0a437 --- /dev/null +++ b/benchmate/benchmate/monitor.py @@ -0,0 +1,90 @@ +import sys +import json +import time +import sys +import multiprocessing + + +from voir.smuggle import SmuggleWriter +from voir.instruments.gpu import get_gpu_info +from voir.instruments.utils import Monitor + + +def milabench_sys_monitor(): + data_file = SmuggleWriter(sys.stdout) + def mblog(data): + if data_file is not None: + print(json.dumps(data), file=data_file) + + def monitor_fn(): + data = { + gpu["device"]: { + "memory": [gpu["memory"]["used"], gpu["memory"]["total"]], + "load": gpu["utilization"]["compute"], + "temperature": gpu["temperature"], + } + for gpu in get_gpu_info()["gpus"].values() + } + mblog({"task": "main", "gpudata": data}) + + monitor_fn() + monitor = Monitor(3, monitor_fn) + monitor.start() + + + + +def _worker(state, queue, func, delay): + while state["running"]: + queue.put(func()) + time.sleep(delay) + + +class CustomMonitor: + def __init__(self, delay, func): + self.manager = multiprocessing.Manager() + self.state = self.manager.dict() + self.state["running"] = True + self.results = multiprocessing.Queue() + self.process = multiprocessing.Process( + target=_worker, args=(self.state, self.results, func, delay), + ) + + def start(self): + self.process.start() + + def stop(self): + self.state["running"] = False + self.process.join() + +def setupvoir(): + # wtf this do + data_file = SmuggleWriter(sys.stdout) + # data_file = sys.stdout + + def monitor_fn(): + data = { + gpu["device"]: { + "memory": [gpu["memory"]["used"], gpu["memory"]["total"],], + "load": gpu["utilization"]["compute"], + "temperature": gpu["temperature"], + "power": gpu["power"], + } + for gpu in get_gpu_info()["gpus"].values() + } + return {"task": "main", "gpudata": data, "t": time.time()} + + monitor = CustomMonitor(0.5, monitor_fn) + + def log(data): + nonlocal monitor + + if data_file is not None: + data["t"] = time.time() + print(json.dumps(data), file=data_file) + + while not monitor.results.empty(): + print(json.dumps(monitor.results.get()), file=data_file) + + monitor.start() + return log, monitor diff --git a/config/base.yaml b/config/base.yaml index 0fc7060bd..b4e01b5e1 100644 --- a/config/base.yaml +++ b/config/base.yaml @@ -23,6 +23,8 @@ _torchvision: --no-stdout: true --epochs: 50 --num-workers: 8 + --loader: pytorch + --data: "{milabench_data}/FakeImageNet" _torchvision_ddp: @@ -36,6 +38,8 @@ _torchvision_ddp: argv: --epochs: 10 --num-workers: 8 + --loader: pytorch + --data: "{milabench_data}/FakeImageNet" _flops: inherits: _defaults @@ -59,6 +63,10 @@ llama: group: llm install_group: torch max_duration: 800 + tags: + - nlp + - llm + - inference voir: options: @@ -67,10 +75,12 @@ llama: plan: method: per_gpu - - tags: - - nlp - - llm + + # Note: when NOT using pretrained model + # the benchmark becomes much harder as no end token is ever outputted by the model + # which makes inference much slower + argv: + --pretrained: true _hf: @@ -192,10 +202,9 @@ resnet50: argv: --model: resnet50 - --batch-size: 64 + --batch-size: 256 --num-workers: "{cpu_per_gpu}" --loader: pytorch - --data: "{milabench_data}/FakeImageNet" resnet50-noio: inherits: _torchvision @@ -208,8 +217,7 @@ resnet50-noio: argv: --model: resnet50 - --batch-size: 64 - --num-workers: 8 + --batch-size: 256 --loader: synthetic_fixed resnet152-ddp: @@ -225,7 +233,6 @@ resnet152-ddp: --batch-size: 256 --num-workers: 8 --loader: dali - --data: "{milabench_data}/FakeImageNet" efficientnet_b4: inherits: _torchvision @@ -495,6 +502,11 @@ stargan: --c_dim: 5 --batch_size: 16 --dataset: "synth" + --celeba_image_dir: "{milabench_data}" + --log_dir: "{milabench_extra}/logs" + --model_save_dir: "{milabench_extra}/models" + --sample_dir: "{milabench_extra}/samples" + --result_dir: "{milabench_extra}/results" super-slomo: inherits: _defaults @@ -503,14 +515,16 @@ super-slomo: - video-interpolation - unet - convnet - - noio definition: ../benchmarks/super-slomo group: super-slomo install_group: torch plan: method: per_gpu argv: - --train_batch_size: 32 + --train_batch_size: 64 + --dataset_root: "{milabench_data}/FakeImageNet" + --loader: pytorch + --num_workers: 8 ppo: inherits: _sb3 @@ -611,3 +625,19 @@ rwkv: --grad_cp: 0 --random_seed: 1234 --enable_progress_bar: "False" +brax: + inherits: _defaults + tags: + - rl + - jax + definition: ../benchmarks/brax + group: brax + install_group: torch + plan: + method: njobs + n: 1 + argv: + --episode-length: 20 + --batch-size: 1024 + --num-minibatches: 32 + --num-envs: 8192 diff --git a/config/standard.yaml b/config/standard.yaml index d24b1fee8..3f43a8055 100644 --- a/config/standard.yaml +++ b/config/standard.yaml @@ -126,7 +126,7 @@ dlrm: weight: 1.0 rwkv: - enabled: true + enabled: false weight: 1.0 fp16: @@ -145,6 +145,10 @@ fp32: enabled: true weight: 0.0 +brax: + enabled: true + weight: 1.0 + ################## # Disabled tests # ################## diff --git a/constraints/cuda.txt b/constraints/cuda.txt index b9132764b..56109d809 100644 --- a/constraints/cuda.txt +++ b/constraints/cuda.txt @@ -1,6 +1,6 @@ ---extra-index-url https://download.pytorch.org/whl/cu118 +--extra-index-url https://download.pytorch.org/whl/cu121 # # voir > 0.2.10 -torchcompat > 1.0.0 \ No newline at end of file +torchcompat >= 1.0.0 \ No newline at end of file diff --git a/constraints/hpu.txt b/constraints/hpu.txt index 029a897d7..1dba3a1ee 100644 --- a/constraints/hpu.txt +++ b/constraints/hpu.txt @@ -4,5 +4,5 @@ # # voir > 0.2.10 -torchcompat > 1.0.0 -git+https://github.com/huggingface/optimum-habana.git +torchcompat >= 1.0.0 + diff --git a/constraints/rocm.txt b/constraints/rocm.txt index e881a0e5f..1bf0919e8 100644 --- a/constraints/rocm.txt +++ b/constraints/rocm.txt @@ -1,6 +1,6 @@ ---extra-index-url https://download.pytorch.org/whl/rocm5.6/ +--extra-index-url https://download.pytorch.org/whl/rocm6.0 # # voir > 0.2.10 -torchcompat > 1.0.0 \ No newline at end of file +torchcompat >= 1.0.0 \ No newline at end of file diff --git a/constraints/xpu.txt b/constraints/xpu.txt index 83e818dc7..d0cf6bdac 100644 --- a/constraints/xpu.txt +++ b/constraints/xpu.txt @@ -1,12 +1,17 @@ +# --extra-index-url https://download.pytorch.org/whl/cpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ - -torch==2.1.0a0 -torchvision==0.16.0a0 -torchaudio==2.1.0a0 -intel-extension-for-pytorch==2.1.10+xpu +# +# Including a package in a constraints file does not trigger installation of the package. +# +torch>=2.1.0 +torchvision>=0.16.0a0 +torchaudio>=2.1.0a0 +intel-extension-for-pytorch>=2.1.10+xpu +oneccl_bind_pt==2.1.100+xpu +intel-extension-for-pytorch-deepspeed>=2.1.30 # # voir > 0.2.10 -torchcompat > 1.0.0 \ No newline at end of file +torchcompat >= 1.0.0 \ No newline at end of file diff --git a/docs/flow.rst b/docs/flow.rst index 406297386..45f212c46 100644 --- a/docs/flow.rst +++ b/docs/flow.rst @@ -54,6 +54,11 @@ Milabench is configured using a yaml file that specify where are the benchmark a .. code-block:: yaml + # you can include a previous configuration + # and override its values + include: + - base.yaml + _defaults: max_duration: 600 # Bench time out voir: @@ -210,7 +215,7 @@ Execution Flow * Modify: ``$MILABENCH_BASE/venv/{bench}`` * ``milabench prepare`` - * Call the prepare script for each benchmarks to download/generate dataset + * Call the prepare script for each benchmarks to download/generate dataset and download pretrained models * Modify: ``$MILABENCH_BASE/data/{dataset}`` * ``milabench run`` diff --git a/milabench/_version.py b/milabench/_version.py index dc555d9ae..57c79d91e 100644 --- a/milabench/_version.py +++ b/milabench/_version.py @@ -1,5 +1,5 @@ """This file is generated, do not modify""" -__tag__ = "v0.0.6-121-g67745383" -__commit__ = "677453835ac0a55e61137ce4437b44da3cf40b4b" -__date__ = "2024-06-06 11:52:59 -0400" +__tag__ = "v0.0.6-140-g57343f1" +__commit__ = "57343f10ef2b4ce598011ee308ebd06b4c654495" +__date__ = "2024-06-10 11:52:37 -0400" diff --git a/milabench/multi.py b/milabench/multi.py index 9946a3642..4ea76c701 100644 --- a/milabench/multi.py +++ b/milabench/multi.py @@ -2,6 +2,7 @@ import traceback from collections import defaultdict from copy import deepcopy +import os from voir.instruments.gpu import get_gpu_info @@ -225,10 +226,10 @@ async def do_pin( pack0 = packs[0] ivar = pack0.config["install_variant"] - pindir = XPath(".pin") + pindir = here.parent / XPath(".pin") constraint_path = pindir / "tmp-constraints.txt" - constraint_files = make_constraints_file(constraint_path, constraints) + constraint_files = make_constraints_file(constraint_path, constraints, str(here.parent)) ig_constraint_path = pindir / f"constraints-{ivar}-{ig}.txt" if ig_constraint_path.exists() and from_scratch: @@ -239,12 +240,17 @@ async def do_pin( requirements_file=ig_constraint_path.absolute(), input_files=(*constraint_files, *reqs), argv=pip_compile_args, + working_dir=here.parent ) + if not ig_constraint_path.exists(): + raise RuntimeError("Could not generate main requirements") + # Use master requirements to constrain the rest new_constraints = [ig_constraint_path, *constraints] for pack in packs: await pack.pin( pip_compile_args=pip_compile_args, constraints=new_constraints, + working_dir=here.parent ) diff --git a/milabench/pack.py b/milabench/pack.py index 0760f2208..df5971556 100644 --- a/milabench/pack.py +++ b/milabench/pack.py @@ -389,6 +389,7 @@ async def pin( pip_compile_args: Sequence = tuple(), input_files: Sequence = tuple(), constraints: Sequence = tuple(), + working_dir=None ): """Pin versions to requirements file. @@ -399,8 +400,10 @@ async def pin( constraint: The constraint file """ ivar = self.config.get("install_variant", None) + if ivar == "unpinned": raise Exception("Cannot pin the 'unpinned' variant.") + # assert self.phase == "pin" for base_reqs, reqs in self.requirements_map().items(): if not base_reqs.exists(): @@ -414,20 +417,20 @@ async def pin( grp = self.config["group"] constraint_path = XPath(".pin") / f"tmp-constraints-{ivar}-{grp}.txt" - constraint_files = make_constraints_file(constraint_path, constraints) + constraint_files = make_constraints_file(constraint_path, constraints, working_dir) current_input_files = constraint_files + (base_reqs, *input_files) await self.exec_pip_compile( - reqs, current_input_files, argv=pip_compile_args + reqs, current_input_files, argv=pip_compile_args, working_dir=working_dir ) # Add previous requirements as inputs input_files = (reqs, *input_files) async def exec_pip_compile( - self, requirements_file: XPath, input_files: XPath, argv=[] + self, requirements_file: XPath, input_files: XPath, argv=[], working_dir=None ): - input_files = [relativize(inp) for inp in input_files] + input_files = [relativize(inp, working_dir) for inp in input_files] from . import commands as cmd return await cmd.CmdCommand( @@ -437,10 +440,10 @@ async def exec_pip_compile( "piptools", "compile", "--resolver", "backtracking", - "--output-file", relativize(requirements_file), + "--output-file", relativize(requirements_file, working_dir), *argv, *input_files, - cwd=XPath(".").absolute(), + cwd=working_dir, external=True, ).execute() diff --git a/milabench/sizer.py b/milabench/sizer.py index 5e4c42f07..5c206b7a8 100644 --- a/milabench/sizer.py +++ b/milabench/sizer.py @@ -291,6 +291,7 @@ def resolve_argv(pack, argv): context["milabench_data"] = pack.config.get("dirs", {}).get("data", None) context["milabench_cache"] = pack.config.get("dirs", {}).get("cache", None) + context["milabench_extra"] = pack.config.get("dirs", {}).get("extra", None) max_worker = 16 context["n_worker"] = min(context["cpu_per_gpu"], max_worker) diff --git a/milabench/utils.py b/milabench/utils.py index 575795361..59f294744 100644 --- a/milabench/utils.py +++ b/milabench/utils.py @@ -106,22 +106,23 @@ def assemble_options(options: dict): return args -def relativize(pth): +def relativize(pth, working_dir): pth = XPath(pth) if pth.is_absolute(): - return pth.relative_to(XPath(".").absolute()) + return pth.relative_to(XPath(working_dir)) else: return pth -def make_constraints_file(pth, constraints): +def make_constraints_file(pth, constraints, working_dir): if constraints: - os.makedirs(XPath(pth).parent, exist_ok=True) - with open(pth, "w") as tfile: + constraint_file = XPath(working_dir) / XPath(pth) + os.makedirs(constraint_file.parent, exist_ok=True) + with open(constraint_file, "w") as tfile: # We prefix the constraint with ../ because we are creating a constraint # file in ./.pin/,but containing constraints with paths relative to ./ - tfile.write("\n".join([f"-c ../{relativize(c)}" for c in constraints])) - return (pth,) + tfile.write("\n".join([f"-c ../{relativize(c, working_dir)}" for c in constraints])) + return (constraint_file,) else: return () diff --git a/pyproject.toml b/pyproject.toml index 005328948..fc5a6d032 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ authors = [ license = "MIT" [tool.poetry.dependencies] -voir = {git = "https://github.com/breuleux/voir", branch = "master"} +voir = "0.2.14" benchmate = {path = "benchmate", develop = false} python = ">=3.8,<4.0" giving = "^0.4.0" @@ -29,7 +29,7 @@ pandas = "^1.4.2" numpy = ">=1.23.0" pynvml = "^11.4.1" tqdm = "^4.64.1" -pip-tools = "^6.12.3" +pip-tools = "^7.4.1" rich = "^13.3.2" omegaconf = "^2.3.0" sqlalchemy = "^2.0.15" diff --git a/scripts/article/run_cuda.sh b/scripts/article/run_cuda.sh index 870f67f76..5a8f73e96 100644 --- a/scripts/article/run_cuda.sh +++ b/scripts/article/run_cuda.sh @@ -15,7 +15,9 @@ install_prepare() { mkdir -p $MILABENCH_WORDIR cd $MILABENCH_WORDIR - virtualenv $MILABENCH_WORDIR/env + if [ ! -d "$MILABENCH_WORDIR/env" ]; then + virtualenv $MILABENCH_WORDIR/env + fi if [ ! -d "$MILABENCH_WORDIR/milabench" ]; then git clone https://github.com/mila-iqia/milabench.git -b intel @@ -47,7 +49,9 @@ install_prepare() { milabench prepare "$@" } -if [ ! -d "$MILABENCH_WORDIR" ]; then +module load cuda/12.3.2 + +if [ ! -d "$MILABENCH_WORDIR/results" ]; then install_prepare else echo "Reusing previous install" @@ -57,7 +61,6 @@ fi cd $MILABENCH_WORDIR (cd $MILABENCH_WORDIR/milabench && git pull origin intel) -pip install -e $MILABENCH_WORDIR/milabench # # Run the benchmakrs diff --git a/scripts/article/run_dev.sh b/scripts/article/run_cuda_dev.sh similarity index 78% rename from scripts/article/run_dev.sh rename to scripts/article/run_cuda_dev.sh index 43d86518f..35faeb51f 100644 --- a/scripts/article/run_dev.sh +++ b/scripts/article/run_cuda_dev.sh @@ -2,8 +2,6 @@ set -ex -export MILABENCH_BRANCH="intel" -export VOIR_BRANCH="master" export MILABENCH_GPU_ARCH=cuda export MILABENCH_WORDIR="$(pwd)/$MILABENCH_GPU_ARCH" @@ -20,8 +18,9 @@ install_prepare() { virtualenv $MILABENCH_WORDIR/env if [ ! -d "$MILABENCH_WORDIR/milabench" ]; then - git clone https://github.com/mila-iqia/milabench.git -b $MILABENCH_BRANCH - git clone https://github.com/Delaunay/voir.git -b $VOIR_BRANCH + git clone https://github.com/mila-iqia/milabench.git -b intel + git clone https://github.com/Delaunay/voir.git -b async_timer + git clone https://github.com/Delaunay/torchcompat.git fi . $MILABENCH_WORDIR/env/bin/activate @@ -33,10 +32,14 @@ install_prepare() { milabench install "$@" which pip + pip install -e $MILABENCH_WORDIR/voir + pip install -e $MILABENCH_WORDIR/torchcompat ( . $BENCHMARK_VENV/bin/activate which pip + pip install -e $MILABENCH_WORDIR/voir + pip install -e $MILABENCH_WORDIR/torchcompat pip install torch torchvision torchaudio # DALI stuff @@ -50,6 +53,8 @@ install_prepare() { milabench prepare "$@" } +module load cuda/12.3.2 + if [ ! -d "$MILABENCH_WORDIR" ]; then install_prepare else @@ -59,8 +64,7 @@ fi cd $MILABENCH_WORDIR -(cd $MILABENCH_WORDIR/milabench && git pull origin $MILABENCH_BRANCH) -pip install -e $MILABENCH_WORDIR/milabench +(cd $MILABENCH_WORDIR/milabench && git pull origin intel) # # Run the benchmakrs diff --git a/scripts/article/run_hpu.sh b/scripts/article/run_hpu.sh index 242d82328..c732b09a4 100644 --- a/scripts/article/run_hpu.sh +++ b/scripts/article/run_hpu.sh @@ -44,7 +44,7 @@ install_prepare() { pip install -e $MILABENCH_WORDIR/optimum-habana ( - cd $MILABENCH_WORDIR/milabench/benchmarks/dlrm/dlrm; + cd $MILABENCH_WORDIR/milabench/benchmarks/dlrm/dlrm; git remote add me https://github.com/Delaunay/dlrm.git git fetch me git checkout me/main @@ -52,7 +52,8 @@ install_prepare() { # Override dependencies for HPU # benchmarks need pytorch - export HABANALABS_VIRTUAL_DIR=$BENCHMARK_VENV + pip uninstall torch torchvision torchaudio + export HABANALABS_VIRTUAL_DIR=$BENCHMARK_VENV ./habanalabs-installer.sh install -t dependencies --venv -y ./habanalabs-installer.sh install -t pytorch --venv -y ) @@ -63,7 +64,7 @@ install_prepare() { } if [ ! -d "$MILABENCH_WORDIR" ]; then - install_prepare + install_prepare else echo "Reusing previous install" . $MILABENCH_WORDIR/env/bin/activate diff --git a/scripts/article/run_rocm.sh b/scripts/article/run_rocm.sh index 5018597a7..eaafd522f 100644 --- a/scripts/article/run_rocm.sh +++ b/scripts/article/run_rocm.sh @@ -41,7 +41,7 @@ install_prepare() { } if [ ! -d "$MILABENCH_WORDIR" ]; then - install_prepare + install_prepare else echo "Reusing previous install" . $MILABENCH_WORDIR/env/bin/activate diff --git a/scripts/article/run_xpu.sh b/scripts/article/run_xpu.sh index 4ef912ce0..9e51b0bc0 100644 --- a/scripts/article/run_xpu.sh +++ b/scripts/article/run_xpu.sh @@ -38,7 +38,8 @@ install_prepare() { which pip # Override dependencies for XPU - pip install torch, torchvision torchaudio intel-extension-for-pytorch --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ + pip uninstall torch torchvision torchaudio + pip install torch torchvision torchaudio intel-extension-for-pytorch oneccl_bind_pt intel-extension-for-pytorch-deepspeed --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ ) # @@ -47,7 +48,7 @@ install_prepare() { } if [ ! -d "$MILABENCH_WORDIR" ]; then - install_prepare + install_prepare else echo "Reusing previous install" . $MILABENCH_WORDIR/env/bin/activate diff --git a/scripts/config.yaml b/scripts/config.yaml deleted file mode 100644 index e69de29bb..000000000 diff --git a/scripts/multinode.sh b/scripts/multinode.sh deleted file mode 100644 index e69de29bb..000000000 diff --git a/scripts/update_pins.sh b/scripts/update_pins.sh new file mode 100644 index 000000000..23b6ac841 --- /dev/null +++ b/scripts/update_pins.sh @@ -0,0 +1,7 @@ + +module load cuda/12.3.2 + +MILABENCH_GPU_ARCH=cuda milabench pin -c constraints/cuda.txt --config config/standard.yaml --from-scratch +MILABENCH_GPU_ARCH=rocm milabench pin -c constraints/rocm.txt --config config/standard.yaml --from-scratch +MILABENCH_GPU_ARCH=xpu milabench pin -c constraints/xpu.txt --config config/standard.yaml --from-scratch +MILABENCH_GPU_ARCH=hpu milabench pin -c constraints/hpu.txt --config config/standard.yaml --from-scratch