From 1e9bbedc45eba48faec6ced79cdfbe4caf25b71c Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 5 Nov 2024 09:23:59 -0500 Subject: [PATCH 01/22] [CI/Build] Drop Python 3.8 support Pytorch 3.5 dropped support for Python 3.8, so it's time we do the same. Signed-off-by: Russell Bryant --- .github/workflows/mypy.yaml | 2 +- .github/workflows/publish.yml | 2 +- .readthedocs.yaml | 2 +- CMakeLists.txt | 2 +- docs/source/getting_started/installation.rst | 4 ++-- docs/source/getting_started/neuron-installation.rst | 2 +- docs/source/getting_started/quickstart.rst | 2 +- pyproject.toml | 2 +- setup.py | 3 +-- vllm/distributed/parallel_state.py | 1 + 10 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/mypy.yaml b/.github/workflows/mypy.yaml index 18b354948f0cc..28d2e5fb8dbd9 100644 --- a/.github/workflows/mypy.yaml +++ b/.github/workflows/mypy.yaml @@ -25,7 +25,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: ["3.9", "3.10", "3.11", "3.12"] steps: - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 - name: Set up Python ${{ matrix.python-version }} diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index f959a1cacf866..578c3fbd4e816 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -48,7 +48,7 @@ jobs: fail-fast: false matrix: os: ['ubuntu-20.04'] - python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] + python-version: ['3.9', '3.10', '3.11', '3.12'] pytorch-version: ['2.4.0'] # Must be the most recent version that meets requirements-cuda.txt. cuda-version: ['11.8', '12.1'] diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 42cbf18a0f712..ebb78366ba48c 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -6,7 +6,7 @@ version: 2 build: os: ubuntu-22.04 tools: - python: "3.8" + python: "3.12" sphinx: configuration: docs/source/conf.py diff --git a/CMakeLists.txt b/CMakeLists.txt index 943424bc4edfa..c094f82fd59fb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,7 +31,7 @@ install(CODE "set(CMAKE_INSTALL_LOCAL_ONLY TRUE)" ALL_COMPONENTS) # Supported python versions. These versions will be searched in order, the # first match will be selected. These should be kept in sync with setup.py. # -set(PYTHON_SUPPORTED_VERSIONS "3.8" "3.9" "3.10" "3.11" "3.12") +set(PYTHON_SUPPORTED_VERSIONS "3.9" "3.10" "3.11" "3.12") # Supported NVIDIA architectures. set(CUDA_SUPPORTED_ARCHS "7.0;7.5;8.0;8.6;8.9;9.0") diff --git a/docs/source/getting_started/installation.rst b/docs/source/getting_started/installation.rst index a706b285edede..2fd2f8c6495bc 100644 --- a/docs/source/getting_started/installation.rst +++ b/docs/source/getting_started/installation.rst @@ -10,7 +10,7 @@ Requirements ============ * OS: Linux -* Python: 3.8 - 3.12 +* Python: 3.9 - 3.12 * GPU: compute capability 7.0 or higher (e.g., V100, T4, RTX20xx, A100, L4, H100, etc.) Install released versions @@ -66,7 +66,7 @@ If you want to access the wheels for previous commits, you can specify the commi $ export VLLM_COMMIT=33f460b17a54acb3b6cc0b03f4a17876cff5eafd # use full commit hash from the main branch $ pip install https://vllm-wheels.s3.us-west-2.amazonaws.com/${VLLM_COMMIT}/vllm-1.0.0.dev-cp38-abi3-manylinux1_x86_64.whl -Note that the wheels are built with Python 3.8 ABI (see `PEP 425 `_ for more details about ABI), so **they are compatible with Python 3.8 and later**. The version string in the wheel file name (``1.0.0.dev``) is just a placeholder to have a unified URL for the wheels. The actual versions of wheels are contained in the wheel metadata. +Note that the wheels are built with Python 3.9 ABI (see `PEP 425 `_ for more details about ABI), so **they are compatible with Python 3.9 and later**. The version string in the wheel file name (``1.0.0.dev``) is just a placeholder to have a unified URL for the wheels. The actual versions of wheels are contained in the wheel metadata. Another way to access the latest code is to use the docker images: diff --git a/docs/source/getting_started/neuron-installation.rst b/docs/source/getting_started/neuron-installation.rst index ec99fc013057b..025ba6ef7ebd8 100644 --- a/docs/source/getting_started/neuron-installation.rst +++ b/docs/source/getting_started/neuron-installation.rst @@ -11,7 +11,7 @@ Requirements ------------ * OS: Linux -* Python: 3.8 -- 3.11 +* Python: 3.9 -- 3.11 * Accelerator: NeuronCore_v2 (in trn1/inf2 instances) * Pytorch 2.0.1/2.1.1 * AWS Neuron SDK 2.16/2.17 (Verified on python 3.8) diff --git a/docs/source/getting_started/quickstart.rst b/docs/source/getting_started/quickstart.rst index 00b762ccc2ccb..eeb46699fe81b 100644 --- a/docs/source/getting_started/quickstart.rst +++ b/docs/source/getting_started/quickstart.rst @@ -12,7 +12,7 @@ This guide will help you quickly get started with vLLM to: Prerequisites -------------- - OS: Linux -- Python: 3.8 - 3.12 +- Python: 3.9 - 3.12 - GPU: compute capability 7.0 or higher (e.g., V100, T4, RTX20xx, A100, L4, H100, etc.) Installation diff --git a/pyproject.toml b/pyproject.toml index 0bbab3cd3fbc3..bb49d046717f0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,7 +55,7 @@ ignore = [ ] [tool.mypy] -python_version = "3.8" +python_version = "3.9" ignore_missing_imports = true check_untyped_defs = true diff --git a/setup.py b/setup.py index 8abeb0ba739db..84526016b11c7 100644 --- a/setup.py +++ b/setup.py @@ -498,7 +498,6 @@ def _read_requirements(filename: str) -> List[str]: "Documentation": "https://vllm.readthedocs.io/en/latest/", }, classifiers=[ - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -512,7 +511,7 @@ def _read_requirements(filename: str) -> List[str]: ], packages=find_packages(exclude=("benchmarks", "csrc", "docs", "examples", "tests*")), - python_requires=">=3.8", + python_requires=">=3.9", install_requires=get_requirements(), ext_modules=ext_modules, extras_require={ diff --git a/vllm/distributed/parallel_state.py b/vllm/distributed/parallel_state.py index 94ba41a016f6d..1912311180da6 100644 --- a/vllm/distributed/parallel_state.py +++ b/vllm/distributed/parallel_state.py @@ -93,6 +93,7 @@ def _get_unique_name(name: str) -> str: def _register_group(group: "GroupCoordinator") -> None: + # TODO - revisit this now that we dropped 3.8 support # looks like Python 3.8 does not understand `ReferenceType` _groups[group.unique_name] = weakref.ref(group) # type: ignore From 8c99ea0568d13905cf4db53e470c9d92ff368f15 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 5 Nov 2024 09:29:37 -0500 Subject: [PATCH 02/22] [Core] Make use of str.removeprefix() Previously we had a custom implementation of this to support Python 3.8 since this method was added in Python 3.9. Now that we have dropped 3.8, we can use the built-in method for this. Signed-off-by: Russell Bryant --- benchmarks/backend_request_func.py | 22 +++++++--------------- setup.py | 10 ++-------- 2 files changed, 9 insertions(+), 23 deletions(-) diff --git a/benchmarks/backend_request_func.py b/benchmarks/backend_request_func.py index 0a903877f000d..a42e70170ba28 100644 --- a/benchmarks/backend_request_func.py +++ b/benchmarks/backend_request_func.py @@ -79,7 +79,7 @@ async def async_request_tgi( # any data, we should skip it. if chunk_bytes.startswith(":"): continue - chunk = remove_prefix(chunk_bytes, "data:") + chunk = chunk_bytes.removeprefix("data:") data = json.loads(chunk) timestamp = time.perf_counter() @@ -144,8 +144,8 @@ async def async_request_trt_llm( if not chunk_bytes: continue - chunk = remove_prefix(chunk_bytes.decode("utf-8"), - "data:") + chunk = chunk_bytes.decode("utf-8").removeprefix( + "data:") data = json.loads(chunk) output.generated_text += data["text_output"] @@ -261,8 +261,8 @@ async def async_request_openai_completions( if not chunk_bytes: continue - chunk = remove_prefix(chunk_bytes.decode("utf-8"), - "data: ") + chunk = chunk_bytes.decode("utf-8").removeprefix( + "data: ") if chunk == "[DONE]": latency = time.perf_counter() - st else: @@ -349,8 +349,8 @@ async def async_request_openai_chat_completions( if not chunk_bytes: continue - chunk = remove_prefix(chunk_bytes.decode("utf-8"), - "data: ") + chunk = chunk_bytes.decode("utf-8").removeprefix( + "data: ") if chunk == "[DONE]": latency = time.perf_counter() - st else: @@ -389,14 +389,6 @@ async def async_request_openai_chat_completions( return output -# Since vllm must support Python 3.8, we can't use str.removeprefix(prefix) -# introduced in Python 3.9 -def remove_prefix(text: str, prefix: str) -> str: - if text.startswith(prefix): - return text[len(prefix):] - return text - - def get_model(pretrained_model_name_or_path: str) -> str: if os.getenv('VLLM_USE_MODELSCOPE', 'False').lower() == 'true': from modelscope import snapshot_download diff --git a/setup.py b/setup.py index 84526016b11c7..d2bce790c2427 100644 --- a/setup.py +++ b/setup.py @@ -56,12 +56,6 @@ def is_ninja_available() -> bool: return which("ninja") is not None -def remove_prefix(text, prefix): - if text.startswith(prefix): - return text[len(prefix):] - return text - - class CMakeExtension(Extension): def __init__(self, name: str, cmake_lists_dir: str = '.', **kwa) -> None: @@ -198,8 +192,8 @@ def build_extensions(self) -> None: os.makedirs(self.build_temp) targets = [] - target_name = lambda s: remove_prefix(remove_prefix(s, "vllm."), - "vllm_flash_attn.") + target_name = lambda s: s.removeprefix("vllm.").removeprefix( + "vllm_flash_attn.") # Build all the extensions for ext in self.extensions: self.configure(ext) From ca9844b340f45f23f8d30fdce23777d215ad987c Mon Sep 17 00:00:00 2001 From: youkaichao Date: Tue, 5 Nov 2024 14:49:20 -0800 Subject: [PATCH 03/22] [bugfix] fix weak ref in piecewise cudagraph and tractable test (#10048) Signed-off-by: youkaichao --- tests/compile/piecewise/test_toy_llama.py | 111 ++++++++++++++++++++-- vllm/compilation/backends.py | 82 +++++++++++++--- 2 files changed, 168 insertions(+), 25 deletions(-) diff --git a/tests/compile/piecewise/test_toy_llama.py b/tests/compile/piecewise/test_toy_llama.py index e3e5a7d0fc5a5..9c65059c6b348 100644 --- a/tests/compile/piecewise/test_toy_llama.py +++ b/tests/compile/piecewise/test_toy_llama.py @@ -1,6 +1,10 @@ """ Test the piecewise compilation with a simple model, comparing the output with and without the piecewise compilation. + +This is a tractable model, the weights and computation are specially designed +if the config `tractable_init` is set to True. Otherwise, the weights are +initialized randomly with a fixed seed. """ import os from dataclasses import dataclass @@ -49,6 +53,12 @@ class LlamaConfig: mlp_size: int = 256 vocab_size: int = 128 num_layers: int = 2 + init_value: float = 1.0 + tractable_init: bool = False + random_seed: int = 0 + + def __post_init__(self): + assert self.mlp_size >= self.hidden_size class LlamaMLP(nn.Module): @@ -66,10 +76,23 @@ def __init__(self, config: LlamaConfig) -> None: bias=False, ) - self.gate_up_projection.weight.data.fill_(0.0) - self.down_projection.weight.data.fill_(0.0) + if config.tractable_init: + nn.init.eye_(self.gate_up_projection.weight.data[:config.mlp_size]) + nn.init.eye_(self.gate_up_projection.weight.data[config.mlp_size:]) + nn.init.eye_(self.down_projection.weight.data) + else: + nn.init.xavier_normal_(self.gate_up_projection.weight.data, + generator=torch.Generator().manual_seed( + config.random_seed), + gain=0.001) + nn.init.xavier_normal_(self.down_projection.weight.data, + generator=torch.Generator().manual_seed( + config.random_seed), + gain=0.001) def forward(self, x): + # for tractable_init and positive input, this is + # essentially an elementwise-square x = self.gate_up_projection(x) x = x[:, :x.size(1) // 2] * torch.nn.functional.relu( x[:, x.size(1) // 2:]) @@ -84,21 +107,39 @@ def __init__(self, config: LlamaConfig) -> None: self.qkv_projection = nn.Linear( in_features=config.hidden_size, out_features=config.hidden_size * 3, + bias=False, ) self.output_projection = nn.Linear( in_features=config.hidden_size, out_features=config.hidden_size, + bias=False, ) - self.qkv_projection.weight.data.fill_(0.0) - self.output_projection.weight.data.fill_(0.0) + if config.tractable_init: + nn.init.eye_(self.qkv_projection.weight.data[:config.hidden_size]) + nn.init.eye_(self.qkv_projection.weight.data[config.hidden_size:2 * + config.hidden_size]) + nn.init.eye_(self.qkv_projection.weight.data[2 * + config.hidden_size:]) + nn.init.eye_(self.output_projection.weight.data) + else: + nn.init.xavier_normal_(self.qkv_projection.weight.data, + generator=torch.Generator().manual_seed( + config.random_seed), + gain=0.001) + nn.init.xavier_normal_(self.output_projection.weight.data, + generator=torch.Generator().manual_seed( + config.random_seed), + gain=0.001) def forward( self, positions: torch.Tensor, hidden_states: torch.Tensor, ) -> torch.Tensor: + # for tractable_init, this is: + # output = (hidden_states * 3 + positions * 2) qkv = self.qkv_projection(hidden_states) hidden_size = qkv.size(-1) // 3 q, k, v = qkv.split([hidden_size, hidden_size, hidden_size], dim=-1) @@ -126,20 +167,29 @@ def forward( hidden_states: torch.Tensor, residual: Optional[torch.Tensor], ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + For tractable computation: + - if residual is None, the outputs are: + - residual = (hidden_states + 1) * 3 + positions * 2 + hidden_states = hidden_states * 4 + positions * 2 + 3 + - hidden_states = (residual + 1) ** 2 + - if residual is not None, the outputs are: + - residual = (hidden_states + residual + 1) * 3 + positions * 2 + hidden_states + residual = (hidden_states + residual) * 4 + positions * 2 + 3 + - hidden_states = (residual + 1) ** 2 + """ # noqa if residual is None: residual = hidden_states - hidden_states = hidden_states / 2 + hidden_states = hidden_states + 1 else: hidden_states = hidden_states + residual residual = hidden_states - hidden_states = hidden_states / 2 + hidden_states = hidden_states + 1 hidden_states = self.self_attention(positions=positions, hidden_states=hidden_states) hidden_states = hidden_states + residual residual = hidden_states - hidden_states = hidden_states / 2 + hidden_states = hidden_states + 1 hidden_states = self.mlp(hidden_states) return hidden_states, residual @@ -156,7 +206,8 @@ def __init__(self, config: LlamaConfig) -> None: self.layers = nn.ModuleList( [LlamaDecoderLayer(config) for _ in range(config.num_layers)]) - self.embedding_tokens.weight.data.fill_(0.0) + # this is the initial value of the hidden states + self.embedding_tokens.weight.data.fill_(config.init_value) def forward( self, @@ -170,6 +221,28 @@ def forward( return hidden_states +def tractable_computation(input_ids: torch.Tensor, + positions: torch.Tensor, + config: LlamaConfig, + init_value: float = 1.0) -> torch.Tensor: + hidden_states = torch.ones(input_ids.size(0), + config.hidden_size, + device=input_ids.device, + dtype=input_ids.dtype) * init_value + + # first layer + residual = hidden_states * 4 + positions.unsqueeze(1) * 2 + 3 + hidden_states = (residual + 1)**2 + + # following layers + for _ in range(config.num_layers - 1): + hidden_states = hidden_states + residual + residual = hidden_states * 4 + positions.unsqueeze(1) * 2 + 3 + hidden_states = (residual + 1)**2 + + return hidden_states + + @torch.inference_mode def run_model(llama_config, use_compile: bool, @@ -213,7 +286,15 @@ def run_model(llama_config, del os.environ["VLLM_TORCH_COMPILE_LEVEL"] set_compilation_config(None) - return output.cpu() + output = output.cpu() + + if llama_config.tractable_init: + expected_output = tractable_computation(input_ids[:2], positions[:2], + llama_config).cpu() + + assert torch.allclose(output, expected_output) + else: + return output.cpu() def test_toy_llama(): @@ -222,7 +303,13 @@ def test_toy_llama(): llama_config = LlamaConfig(hidden_size=128, mlp_size=256, vocab_size=128, - num_layers=2) + num_layers=12) + + tractable_config = LlamaConfig(hidden_size=128, + mlp_size=256, + vocab_size=128, + num_layers=2, + tractable_init=True) outputs = [] with compilation_counter.expect( @@ -233,6 +320,8 @@ def test_toy_llama(): num_cudagraph_caputured=0, ): outputs.append(run_model(llama_config, use_compile=False)) + run_model(tractable_config, use_compile=False) + with compilation_counter.expect( num_graphs_seen=1, # one graph for the model num_piecewise_graphs_seen=1, @@ -242,6 +331,7 @@ def test_toy_llama(): 2, # num_cudagraph_sizes * num_piecewise_capturable_graphs_seen ): outputs.append(run_model(llama_config, use_compile=True)) + run_model(tractable_config, use_compile=True) with compilation_counter.expect( num_graphs_seen=1, # one graph for the model @@ -257,6 +347,7 @@ def test_toy_llama(): ): outputs.append( run_model(llama_config, use_compile=True, split_attn=True)) + run_model(tractable_config, use_compile=True, split_attn=True) for i in range(1, len(outputs)): assert torch.allclose(outputs[0], outputs[i]) diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index 96ddcba467c5b..de32cabbe6d07 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -6,6 +6,7 @@ import torch import torch.fx as fx +import vllm.envs as envs from vllm.logger import init_logger from vllm.utils import weak_ref_tensors @@ -193,6 +194,7 @@ def wrap_inductor(graph, @dataclasses.dataclass class SplitItem: submod_name: str + graph_id: int is_splitting_graph: bool graph: fx.GraphModule @@ -226,9 +228,7 @@ def split_graph(graph: fx.GraphModule, outputs = [] - # sort the names to make sure the order is deterministic names = [name for (name, module) in split_gm.named_modules()] - names.sort() for name in names: if "." in name or name == "": @@ -238,7 +238,11 @@ def split_graph(graph: fx.GraphModule, module = getattr(split_gm, name) graph_id = int(name.replace("submod_", "")) - outputs.append(SplitItem(name, graph_id in split_op_graphs, module)) + outputs.append( + SplitItem(name, graph_id, (graph_id in split_op_graphs), module)) + + # sort by intetger graph_id, rather than string name + outputs.sort(key=lambda x: x.graph_id) return split_gm, outputs @@ -252,6 +256,11 @@ class PiecewiseCompileInterpreter(torch.fx.Interpreter): It runs the given graph with fake inputs, and compile some submodules specified by `compile_submod_names` with the given compilation configs. + + NOTE: the order in `compile_submod_names` matters, because + it will be used to determine the order of the compiled piecewise + graphs. The first graph will handle logging, and the last graph + has some special cudagraph output handling. """ def __init__(self, module: torch.fx.GraphModule, @@ -263,7 +272,6 @@ def __init__(self, module: torch.fx.GraphModule, self.compile_submod_names = compile_submod_names self.compilation_configs = compilation_configs self.graph_pool = graph_pool - self.have_seen_first_graph = False def run(self, *args): fake_args = [ @@ -279,6 +287,7 @@ def call_module(self, target: torch.fx.node.Target, output = super().call_module(target, args, kwargs) if target in self.compile_submod_names: + index = self.compile_submod_names.index(target) submod = self.fetch_attr(target) sym_shape_indices = [ i for i, x in enumerate(args) if isinstance(x, torch.SymInt) @@ -288,15 +297,14 @@ def call_module(self, target: torch.fx.node.Target, args, self.compilation_configs.inductor_compile_config, runtime_shape=None, - do_logging=not self.have_seen_first_graph, + do_logging=index == 0, use_inductor=self.compilation_configs.use_inductor) self.module.__dict__[target] = PiecewiseBackend( - submod, self.compilation_configs, self.graph_pool, - not self.have_seen_first_graph, sym_shape_indices, + submod, self.compilation_configs, self.graph_pool, index, + len(self.compile_submod_names), sym_shape_indices, compiled_graph_for_general_shape) - self.have_seen_first_graph = True compilation_counter.num_piecewise_capturable_graphs_seen += 1 return output @@ -352,8 +360,9 @@ def __call__(self, graph: fx.GraphModule, example_inputs) -> Callable: graph, self.compilation_configs.non_cudagraph_ops) from torch._dynamo.utils import lazy_format_graph_code - logger.debug("%s", - lazy_format_graph_code("stiching module", self.split_gm)) + logger.debug("%s", lazy_format_graph_code("before split", self.graph)) + logger.debug("%s", lazy_format_graph_code("after split", + self.split_gm)) compilation_counter.num_piecewise_graphs_seen += len( self.piecewise_graphs) @@ -385,12 +394,17 @@ class ConcreteSizeEntry: cudagraph: Optional[torch.cuda.CUDAGraph] = None output: Optional[Any] = None + # for cudagraph debugging, track the input addresses + # during capture, and check if they are the same during replay + input_addresses: Optional[List[int]] = None + class PiecewiseBackend: def __init__(self, graph: fx.GraphModule, compilation_configs: CompilationConfig, graph_pool: Any, - is_first_graph: bool, sym_shape_indices: List[int], + piecewise_compile_index: int, total_piecewise_compiles: int, + sym_shape_indices: List[int], compiled_graph_for_general_shape: Callable): """ The backend for piecewise compilation. @@ -408,7 +422,12 @@ def __init__(self, graph: fx.GraphModule, self.graph = graph self.compilation_configs = compilation_configs self.graph_pool = graph_pool - self.is_first_graph = is_first_graph + self.piecewise_compile_index = piecewise_compile_index + self.total_piecewise_compiles = total_piecewise_compiles + + self.is_first_graph = piecewise_compile_index == 0 + self.is_last_graph = ( + piecewise_compile_index == total_piecewise_compiles - 1) self.compile_sizes: Set[int] = set( self.compilation_configs.compile_sizes) @@ -422,6 +441,8 @@ def __init__(self, graph: fx.GraphModule, self.sym_shape_indices = sym_shape_indices + self.is_debugging_mode = envs.VLLM_LOGGING_LEVEL == "DEBUG" + # the entries for different shapes that we need to either # compile or capture cudagraph self.concrete_size_entries: Dict[int, ConcreteSizeEntry] = {} @@ -476,14 +497,45 @@ def __call__(self, *args) -> Any: logger.info("Capturing a cudagraph for shape %s", runtime_shape) + input_addresses = [ + x.data_ptr() for x in args if isinstance(x, torch.Tensor) + ] + entry.input_addresses = input_addresses cudagraph = torch.cuda.CUDAGraph() + + # mind-exploding: carefully manage the reference and memory. with torch.cuda.graph(cudagraph, pool=self.graph_pool): - entry.output = weak_ref_tensors(entry.runnable(*args)) + # `output` is managed by pytorch's cudagraph pool + output = entry.runnable(*args) + if self.is_last_graph: + # by converting it to weak ref, + # the original `output` will immediately be released + # to save memory. It is only safe to do this for + # the last graph, because the output of the last graph + # will not be used by any other cuda graph. + output = weak_ref_tensors(output) + + # here we always use weak ref for the output + # to save memory + entry.output = weak_ref_tensors(output) + entry.cudagraph = cudagraph compilation_counter.num_cudagraph_caputured += 1 - entry.cudagraph = cudagraph - return entry.output + # important: we need to return the output, rather than + # the weak ref of the output, so that pytorch can correctly + # manage the memory during cuda graph capture + return output + + if self.is_debugging_mode: + # check if the input addresses are the same + new_input_addresses = [ + x.data_ptr() for x in args if isinstance(x, torch.Tensor) + ] + assert new_input_addresses == entry.input_addresses, ( + "Input addresses for cudagraphs are different during replay." + f" Expected {entry.input_addresses}, got {new_input_addresses}" + ) entry.cudagraph.replay() return entry.output From 43300bd98a54d48e97d9fb78c9db88eda3a88c64 Mon Sep 17 00:00:00 2001 From: zifeitong Date: Tue, 5 Nov 2024 16:34:40 -0800 Subject: [PATCH 04/22] [Bugfix] Properly propagate trust_remote_code settings (#10047) Signed-off-by: Zifei Tong --- vllm/model_executor/models/chatglm.py | 7 ++++--- vllm/model_executor/models/molmo.py | 22 ++++++++++++---------- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/vllm/model_executor/models/chatglm.py b/vllm/model_executor/models/chatglm.py index c3c9ec703c1e6..181f3c2b0fc35 100644 --- a/vllm/model_executor/models/chatglm.py +++ b/vllm/model_executor/models/chatglm.py @@ -54,8 +54,9 @@ def mm_input_mapper_for_glmv( data: MultiModalData[object], ) -> Dict: model_config = ctx.model_config - tokenizer = cached_get_tokenizer(model_config.tokenizer, - trust_remote_code=True) + tokenizer = cached_get_tokenizer( + model_config.tokenizer, + trust_remote_code=model_config.trust_remote_code) if tokenizer is None: raise RuntimeError("No HuggingFace processor is available " "to process the image object") @@ -525,7 +526,7 @@ def _parse_and_validate_image_input( elif isinstance(pixel_values, list): return torch.concat(pixel_values) else: - raise TypeError("""pixel_values must be a torch.Tensor + raise TypeError("""pixel_values must be a torch.Tensor or a list of torch.Tensor """) return GLMImagePixelInputs(pixel_values=pixel_values) diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index ba798833e26a9..07c06149f0206 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -844,9 +844,10 @@ def get_max_tokens(max_crops: int, crop_patches: int, left_margin: int, def get_max_molmo_image_tokens(ctx: InputContext) -> int: - processor = cached_get_processor(ctx.model_config.model, - trust_remote_code=True, - revision=ctx.model_config.code_revision) + processor = cached_get_processor( + ctx.model_config.model, + trust_remote_code=ctx.model_config.trust_remote_code, + revision=ctx.model_config.code_revision) image_processor = processor.image_processor max_llm_image_tokens = get_max_tokens( image_processor.max_crops, @@ -870,9 +871,10 @@ def image_input_mapper_for_molmo( def dummy_data_for_molmo(ctx: InputContext, seq_len: int, mm_counts: Mapping[str, int]): - processor = cached_get_processor(ctx.model_config.model, - trust_remote_code=True, - revision=ctx.model_config.code_revision) + processor = cached_get_processor( + ctx.model_config.model, + trust_remote_code=ctx.model_config.trust_remote_code, + revision=ctx.model_config.code_revision) image_processor = processor.image_processor base_image_input_d = image_processor.image_patch_size @@ -935,11 +937,11 @@ def input_processor_for_molmo(ctx: InputContext, inputs: DecoderOnlyInputs): multi_modal_data = inputs.get("multi_modal_data") image = None if multi_modal_data is None else multi_modal_data.get("image") - processor = cached_get_processor(ctx.model_config.model, - trust_remote_code=True, - revision=ctx.model_config.code_revision) - model_config = ctx.model_config + processor = cached_get_processor( + ctx.model_config.model, + trust_remote_code=model_config.trust_remote_code, + revision=ctx.model_config.code_revision) tokenizer = cached_get_tokenizer( model_config.tokenizer, trust_remote_code=model_config.trust_remote_code) From 966e31697bdeb47b33b3e26b4aab5999c85f3e90 Mon Sep 17 00:00:00 2001 From: Wallas Henrique Date: Tue, 5 Nov 2024 21:39:26 -0300 Subject: [PATCH 05/22] [Bugfix] Fix pickle of input when async output processing is on (#9931) Signed-off-by: Wallas Santos --- .../test_basic_correctness.py | 26 +++++++++++++++++++ vllm/worker/model_runner.py | 12 +++++++++ 2 files changed, 38 insertions(+) diff --git a/tests/basic_correctness/test_basic_correctness.py b/tests/basic_correctness/test_basic_correctness.py index 79647589d5204..7f16baa65a644 100644 --- a/tests/basic_correctness/test_basic_correctness.py +++ b/tests/basic_correctness/test_basic_correctness.py @@ -156,3 +156,29 @@ def test_model_with_failure(vllm_runner) -> None: ModelInputForGPUWithSamplingMetadata) finally: os.remove(filename) + + +def test_failure_with_async_out_proc(vllm_runner) -> None: + + filename = None + try: + with vllm_runner("facebook/opt-125m", + dtype="half", + enforce_eager=False, + gpu_memory_utilization=0.7) as vllm_model,\ + patch("vllm.model_executor.models.opt.OPTForCausalLM.forward", + side_effect=ValueError()): + model_config = vllm_model.model.llm_engine.model_config + assert model_config.use_async_output_proc + with pytest.raises(ValueError) as exc_info: + vllm_model.generate_greedy('how to make pizza?', 250) + matches = re.search(r"input dumped to (.+).pkl", + str(exc_info.value)) + assert matches is not None + + filename = f"{matches.group(1)}.pkl" + finally: + # Clean up + if filename is not None: + os.remove(filename) + pass diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 2447eecf7957d..1e8ea4e8e79cf 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -136,6 +136,18 @@ def from_broadcasted_tensor_dict( attn_backend, tensor_dict) return cls(**tensor_dict) + # Exclude `async_callback` to be able to pickle this object + def __getstate__(self): + state = self.__dict__.copy() + del state["async_callback"] + return state + + # TODO: What happens when we depickle this object? + # How can we update this callback to properly pass it to the engine? + def __setstate__(self, state): + self.__dict__.update(state) + self.__dict__.update({'async_callback': None}) + @dataclass(frozen=True) class ModelInputForGPUWithSamplingMetadata(ModelInputForGPU): From 0c63c34f725f0b519fa094fbeca6e3cf12c911c1 Mon Sep 17 00:00:00 2001 From: Sungjae Lee <33976427+llsj14@users.noreply.github.com> Date: Wed, 6 Nov 2024 10:45:45 +0900 Subject: [PATCH 06/22] [Bugfix][SpecDecode] kv corruption with bonus tokens in spec decode (#9730) Co-authored-by: LiuXiaoxuanPKU --- tests/spec_decode/test_multi_step_worker.py | 107 ++++++++++++++++++++ tests/spec_decode/utils.py | 4 +- vllm/spec_decode/draft_model_runner.py | 35 ++++++- vllm/spec_decode/multi_step_worker.py | 23 ++++- 4 files changed, 159 insertions(+), 10 deletions(-) diff --git a/tests/spec_decode/test_multi_step_worker.py b/tests/spec_decode/test_multi_step_worker.py index e6f7f480eebb2..0b5d82b6610ca 100644 --- a/tests/spec_decode/test_multi_step_worker.py +++ b/tests/spec_decode/test_multi_step_worker.py @@ -5,6 +5,8 @@ import pytest import torch +from vllm.attention.selector import (_Backend, + global_force_attn_backend_context_manager) from vllm.model_executor.layers.sampler import SamplerOutput from vllm.model_executor.utils import set_random_seed from vllm.sequence import (ExecuteModelRequest, HiddenStates, Logprob, @@ -303,6 +305,7 @@ def test_multi_step_with_batch_expansion_correct_output(): seed, model_runner_cls=TP1DraftModelRunner, ) + multi_step_worker.set_include_gpu_probs_tensor() worker = create_worker( Worker, model_name, @@ -397,6 +400,7 @@ def test_multi_step_with_batch_expansion_incorrect_output(): seed, model_runner_cls=TP1DraftModelRunner, ) + multi_step_worker.set_include_gpu_probs_tensor() worker = create_worker( Worker, model_name, @@ -477,6 +481,109 @@ def test_multi_step_with_batch_expansion_incorrect_output(): assert (num_mismatch > 0) +@torch.inference_mode() +@pytest.mark.parametrize('num_steps', [1, 2, 3, 4]) +# The choice of backends forces the multi_step_worker to choose between +# the vanilla model_runner and TP1DraftModelRunner and that we can test +# both code paths. +@pytest.mark.parametrize('attn_backend', + [_Backend.XFORMERS, _Backend.FLASH_ATTN]) +def test_multi_step_correct_kvcache(num_steps, attn_backend): + """Verify that the KV cache of the draft model + is correctly updated for sequences with bonus token. + """ + seed = 100 + model_name = "JackFram/llama-68m" + + block_size = 16 + num_gpu_blocks = 2048 // block_size + batch_size = 1 + + with global_force_attn_backend_context_manager(attn_backend): + dtype = 'float16' if attn_backend == _Backend.FLASH_ATTN else 'float32' + multi_step_worker = create_worker(MultiStepWorker, + model_name, + block_size, + num_gpu_blocks, + seed, + model_runner_cls=TP1DraftModelRunner, + dtype=dtype) + multi_step_worker.set_include_gpu_probs_tensor() + worker = create_worker(Worker, + model_name, + block_size, + num_gpu_blocks, + seed, + dtype=dtype) + + prompts = [[0] for _ in range(batch_size)] + # Already generate two tokens for the sequence + # so that we can simulate the bonus token case + multi_step_continuations = [[ + random.randint(0, 1000), + random.randint(0, 1000) + ] for _ in prompts] + final_prompt_lens = [len(prompt) + 2 + num_steps for prompt in prompts] + + seq_ids_with_bonus_token_in_last_step = set(range(batch_size)) + seq_group_metadata_list = create_seq_group_metadata_from_prompts( + prompts, + num_gpu_blocks, + block_size, + continuations=multi_step_continuations, + final_prompt_lens=final_prompt_lens) + + # Run multi-step. + zero_kv_cache(multi_step_worker.cache_engine) + multi_step_worker.sampler_output(execute_model_req=ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list), + sample_len=num_steps, + seq_ids_with_bonus_token_in_last_step= + seq_ids_with_bonus_token_in_last_step) + + # Run single-step repeatedly. + zero_kv_cache(worker.cache_engine) + # Generate the kv cache for the bonus token first + single_step_continuations = [c[:1] for c in multi_step_continuations] + seq_group_metadata_list = create_seq_group_metadata_from_prompts( + prompts, + num_gpu_blocks, + block_size, + continuations=single_step_continuations, + final_prompt_lens=final_prompt_lens) + single_step_output = worker.execute_model( + execute_model_req=ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list)) + for _ in range(num_steps): + seq_group_metadata_list = create_seq_group_metadata_from_prompts( + prompts, + num_gpu_blocks, + block_size, + continuations=multi_step_continuations, + final_prompt_lens=final_prompt_lens) + + single_step_output = worker.execute_model( + execute_model_req=ExecuteModelRequest( + seq_group_metadata_list=seq_group_metadata_list)) + + for i, seq_group_output in enumerate(single_step_output[-1]): + multi_step_continuations[i].append( + seq_group_output.samples[0].output_token) + + # Verify that the KV cache of the single-step and + # multi-step workers are the same. + single_step_gpu_cache = worker.cache_engine[0].gpu_cache + multi_step_gpu_cache = multi_step_worker.cache_engine[0].gpu_cache + num_layers = len(single_step_gpu_cache) + allclose = lambda a, b: torch.allclose( + a.cuda(), b.cuda(), rtol=1e-2, atol=1e-2) + for i in range(num_layers): + assert allclose(single_step_gpu_cache[i][0], + multi_step_gpu_cache[i][0]) + assert allclose(single_step_gpu_cache[i][1], + multi_step_gpu_cache[i][1]) + + @torch.inference_mode() def test_draft_proposals_full_speculation_len(): """Verify Top1Proposer correctly handles case where all sequences diff --git a/tests/spec_decode/utils.py b/tests/spec_decode/utils.py index 6cf0cfb09b8fa..e5cb0530f9961 100644 --- a/tests/spec_decode/utils.py +++ b/tests/spec_decode/utils.py @@ -68,12 +68,14 @@ def create_worker(cls: Callable[..., T], seed: int, is_driver_worker: bool = True, enforce_eager: bool = True, - model_runner_cls: Optional[ModelRunner] = None) -> T: + model_runner_cls: Optional[ModelRunner] = None, + dtype: Optional[str] = "auto") -> T: engine_args = EngineArgs( model=model_name, seed=seed, block_size=block_size, enforce_eager=enforce_eager, + dtype=dtype, ) engine_config = engine_args.create_engine_config() diff --git a/vllm/spec_decode/draft_model_runner.py b/vllm/spec_decode/draft_model_runner.py index 17cc0ad1a4a3a..6330ac027db74 100644 --- a/vllm/spec_decode/draft_model_runner.py +++ b/vllm/spec_decode/draft_model_runner.py @@ -54,6 +54,8 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + self.indices_of_seq_with_bonus_tokens = None + def _update_sampling_metadata(self, sampling_metadata, num_seqs, num_queries): @@ -159,6 +161,10 @@ def supports_gpu_multi_step(self, execute_model_req: ExecuteModelRequest): # TODO: Add soft-tuning prompt adapter support return not self.prompt_adapter_config + def set_indices_of_seq_with_bonus_tokens(self, + indices_of_seq_with_bonus_tokens): + self.indices_of_seq_with_bonus_tokens = indices_of_seq_with_bonus_tokens + @torch.inference_mode() def execute_model( self, @@ -284,11 +290,30 @@ def execute_model( model_input.sampling_metadata) # Sample the next token. - outputs.append( - self.model.sample( - logits=logits, - sampling_metadata=model_input.sampling_metadata, - )) + output = self.model.sample( + logits=logits, + sampling_metadata=model_input.sampling_metadata, + ) + outputs.append(output) + + if model_input.attn_metadata.num_prefills == 0 \ + and self.indices_of_seq_with_bonus_tokens is not None: + assert output.sampled_token_ids is not None + # output.sampled_token_ids should be of shape (num_seqs, 1) + nums_seqs, num_tokens_per_seq = output.sampled_token_ids.shape + assert num_tokens_per_seq == 1 + count = 0 + for i in range(nums_seqs): + bonus_seq_idx = self.indices_of_seq_with_bonus_tokens[ + count] + if i != bonus_seq_idx: + # The following might cause a cpu->gpu sync + # However, the performance impact is negligible as we + # benchmarked on H100. + output.sampled_token_ids[ + i, :] = model_input.input_tokens[bonus_seq_idx] + else: + count += 1 # Prepare inputs for the next step if step != num_steps - 1: diff --git a/vllm/spec_decode/multi_step_worker.py b/vllm/spec_decode/multi_step_worker.py index 4b53fbe056c47..f49b98f5c9528 100644 --- a/vllm/spec_decode/multi_step_worker.py +++ b/vllm/spec_decode/multi_step_worker.py @@ -81,6 +81,8 @@ def sampler_output( # Here we run the draft_model_runner with multi-step prepare # on the GPU directly expanded_request.num_steps = sample_len + self.model_runner.set_indices_of_seq_with_bonus_tokens( + indices_of_seq_with_bonus_tokens) model_outputs = self.execute_model( execute_model_req=expanded_request) else: @@ -97,7 +99,8 @@ def sampler_output( model_output = model_output[0] self._append_new_tokens( - model_output, expanded_request.seq_group_metadata_list) + model_output, expanded_request.seq_group_metadata_list, + indices_of_seq_with_bonus_tokens) model_outputs.append(model_output) filtered_model_outputs = self._filter_model_output( @@ -221,13 +224,15 @@ def get_spec_proposals( @staticmethod def _append_new_tokens( model_output: List[SamplerOutput], - seq_group_metadata_list: List[SequenceGroupMetadata]) -> None: + seq_group_metadata_list: List[SequenceGroupMetadata], + indices_of_seq_with_bonus_tokens: List[int]) -> None: """Given model output from a single run, append the tokens to the sequences. This is normally done outside of the worker, but it is required if the worker is to perform multiple forward passes. """ - for seq_group_metadata, sequence_group_outputs in zip( - seq_group_metadata_list, model_output): + count = 0 + for index, (seq_group_metadata, sequence_group_outputs) in enumerate( + zip(seq_group_metadata_list, model_output)): seq_group_metadata.is_prompt = False for seq_output in sequence_group_outputs.samples: @@ -237,6 +242,16 @@ def _append_new_tokens( token_id = seq_output.output_token token_logprob = seq_output.logprobs[token_id] + # Determine the actual token ID to be generated, + # considering bonus tokens + if index != indices_of_seq_with_bonus_tokens[count]: + bonus_seq_metadata = seq_group_metadata_list[ + indices_of_seq_with_bonus_tokens[count]] + _, bonus_token_seq_data = next( + iter(bonus_seq_metadata.seq_data.items())) + token_id = bonus_token_seq_data.output_token_ids[-1] + else: + count += 1 seq.append_token_id(token_id, token_logprob.logprob) seq.update_num_computed_tokens(1) From c4cacbaa7faf9d0d3b2aa26e5df496724e80cb05 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Tue, 5 Nov 2024 18:19:50 -0800 Subject: [PATCH 07/22] [v1] reduce graph capture time for piecewise cudagraph (#10059) Signed-off-by: youkaichao --- vllm/compilation/backends.py | 36 +++++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index de32cabbe6d07..05deee7bd5473 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -1,7 +1,9 @@ import copy import dataclasses import operator +from contextlib import ExitStack from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union +from unittest.mock import patch import torch import torch.fx as fx @@ -503,17 +505,29 @@ def __call__(self, *args) -> Any: entry.input_addresses = input_addresses cudagraph = torch.cuda.CUDAGraph() - # mind-exploding: carefully manage the reference and memory. - with torch.cuda.graph(cudagraph, pool=self.graph_pool): - # `output` is managed by pytorch's cudagraph pool - output = entry.runnable(*args) - if self.is_last_graph: - # by converting it to weak ref, - # the original `output` will immediately be released - # to save memory. It is only safe to do this for - # the last graph, because the output of the last graph - # will not be used by any other cuda graph. - output = weak_ref_tensors(output) + with ExitStack() as stack: + if not self.is_first_graph: + # during every model forward, we will capture + # many pieces of cudagraphs (roughly one per layer). + # running gc again and again across layers will + # make the cudagraph capture very slow. + # therefore, we only run gc for the first graph, + # and disable gc for the rest of the graphs. + stack.enter_context(patch("gc.collect", lambda: None)) + stack.enter_context( + patch("torch.cuda.empty_cache", lambda: None)) + + # mind-exploding: carefully manage the reference and memory. + with torch.cuda.graph(cudagraph, pool=self.graph_pool): + # `output` is managed by pytorch's cudagraph pool + output = entry.runnable(*args) + if self.is_last_graph: + # by converting it to weak ref, + # the original `output` will immediately be released + # to save memory. It is only safe to do this for + # the last graph, because the output of the last graph + # will not be used by any other cuda graph. + output = weak_ref_tensors(output) # here we always use weak ref for the output # to save memory From 82bfc38d079b1ef5f4b88ac7094a00029d2e99af Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Wed, 6 Nov 2024 12:05:05 +0800 Subject: [PATCH 08/22] [Misc] Sort the list of embedding models (#10037) Signed-off-by: DarkLight1337 --- vllm/model_executor/models/registry.py | 26 ++++++++------------------ 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index af52fbffba19e..792c6cec34ae0 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -94,33 +94,23 @@ _EMBEDDING_MODELS = { # [Text-only] "BertModel": ("bert", "BertEmbeddingModel"), + "DeciLMForCausalLM": ("decilm", "DeciLMForCausalLM"), "Gemma2Model": ("gemma2", "Gemma2EmbeddingModel"), "LlamaModel": ("llama", "LlamaEmbeddingModel"), + **{ + # Multiple models share the same architecture, so we include them all + k: (mod, arch) for k, (mod, arch) in _TEXT_GENERATION_MODELS.items() + if arch == "LlamaForCausalLM" + }, "MistralModel": ("llama", "LlamaEmbeddingModel"), - "Qwen2ForRewardModel": ("qwen2_rm", "Qwen2ForRewardModel"), - "Qwen2ForSequenceClassification": ( - "qwen2_cls", "Qwen2ForSequenceClassification"), - "LlamaForCausalLM": ("llama", "LlamaForCausalLM"), "Phi3ForCausalLM": ("phi3", "Phi3ForCausalLM"), - "DeciLMForCausalLM": ("decilm", "DeciLMForCausalLM"), + "Qwen2ForRewardModel": ("qwen2_rm", "Qwen2ForRewardModel"), + "Qwen2ForSequenceClassification": ("qwen2_cls", "Qwen2ForSequenceClassification"), # noqa: E501 # [Multimodal] "LlavaNextForConditionalGeneration": ("llava_next", "LlavaNextForConditionalGeneration"), # noqa: E501 "Phi3VForCausalLM": ("phi3v", "Phi3VForCausalLM"), } -def add_embedding_models(base_models, embedding_models): - with_pooler_method_models = {} - embedding_models_name = embedding_models.keys() - for name, (path, arch) in base_models.items(): - if arch in embedding_models_name: - with_pooler_method_models[name] = (path, arch) - return with_pooler_method_models - -_EMBEDDING_MODELS = { - **add_embedding_models(_TEXT_GENERATION_MODELS, _EMBEDDING_MODELS), - **_EMBEDDING_MODELS, -} - _MULTIMODAL_MODELS = { # [Decoder-only] "Blip2ForConditionalGeneration": ("blip2", "Blip2ForConditionalGeneration"), From ffc0f2b47add6e0f70e2b5d4b4aaac64ee97f8ad Mon Sep 17 00:00:00 2001 From: Peter Salas Date: Tue, 5 Nov 2024 20:19:15 -0800 Subject: [PATCH 09/22] [Model][OpenVINO] Fix regressions from #8346 (#10045) Signed-off-by: Peter Salas --- .buildkite/run-openvino-test.sh | 2 +- vllm/attention/backends/openvino.py | 12 +++++++++++- vllm/model_executor/models/molmo.py | 6 +++--- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/.buildkite/run-openvino-test.sh b/.buildkite/run-openvino-test.sh index 70e56596c4a86..35ad5c0ddde77 100755 --- a/.buildkite/run-openvino-test.sh +++ b/.buildkite/run-openvino-test.sh @@ -11,4 +11,4 @@ trap remove_docker_container EXIT remove_docker_container # Run the image and launch offline inference -docker run --network host --env VLLM_OPENVINO_KVCACHE_SPACE=1 --name openvino-test openvino-test python3 /workspace/vllm/examples/offline_inference.py +docker run --network host --env VLLM_OPENVINO_KVCACHE_SPACE=1 --name openvino-test openvino-test python3 /workspace/examples/offline_inference.py diff --git a/vllm/attention/backends/openvino.py b/vllm/attention/backends/openvino.py index 6fddfc2002120..be06d16009988 100644 --- a/vllm/attention/backends/openvino.py +++ b/vllm/attention/backends/openvino.py @@ -1,5 +1,5 @@ from dataclasses import dataclass -from typing import List, Tuple, Type +from typing import Dict, List, Optional, Tuple, Type import openvino as ov import torch @@ -7,6 +7,7 @@ from vllm.attention.backends.abstract import (AttentionBackend, AttentionMetadata) from vllm.attention.backends.utils import CommonAttentionState +from vllm.multimodal import MultiModalPlaceholderMap def copy_cache_block(src_tensor: ov.Tensor, dst_tensor: ov.Tensor, @@ -128,3 +129,12 @@ class OpenVINOAttentionMetadata: # Shape: scalar # Type: i32 max_context_len: torch.Tensor + + # The index maps that relate multi-modal embeddings to the corresponding + # placeholders. + # + # N.B. These aren't really related to attention and don't belong on this + # type -- this is just a temporary solution to make them available to + # `model_executable`. + multi_modal_placeholder_index_maps: Optional[Dict[ + str, MultiModalPlaceholderMap.IndexMap]] diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index 07c06149f0206..522aa748f78b6 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -21,8 +21,8 @@ get_tensor_model_parallel_world_size, split_tensor_along_last_dim, tensor_model_parallel_all_gather) -from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, InputContext, - token_inputs) +from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, + InputContext, token_inputs) from vllm.model_executor import SamplingMetadata from vllm.model_executor.layers.activation import QuickGELU, SiluAndMul from vllm.model_executor.layers.layernorm import RMSNorm @@ -915,7 +915,7 @@ def dummy_data_for_molmo(ctx: InputContext, seq_len: int, if "image_masks" in out: dummy_imgdata["image_masks"] = out["image_masks"] dummy_imgdata["seq_len"] = torch.tensor(seq_len, dtype=torch.long) - return dummy_seqdata, {"image": dummy_imgdata} + return DummyData(dummy_seqdata, {"image": dummy_imgdata}) def pad_images( From 2bcbae704c0d52913c6a2887260fc6bde6c20361 Mon Sep 17 00:00:00 2001 From: Travis Johnson Date: Tue, 5 Nov 2024 21:28:29 -0700 Subject: [PATCH 10/22] [Bugfix] Fix edge-case crash when using chat with the Mistral Tekken Tokenizer (#10051) Signed-off-by: Travis Johnson --- tests/models/decoder_only/language/test_mistral.py | 9 ++++++--- vllm/transformers_utils/tokenizers/mistral.py | 8 ++++++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/tests/models/decoder_only/language/test_mistral.py b/tests/models/decoder_only/language/test_mistral.py index 5be44c54a717c..6ec4b7e7e3f71 100644 --- a/tests/models/decoder_only/language/test_mistral.py +++ b/tests/models/decoder_only/language/test_mistral.py @@ -10,19 +10,22 @@ MODELS = [ "mistralai/Mistral-7B-Instruct-v0.1", - "mistralai/Mistral-7B-Instruct-v0.3", - # Mistral-Nemo is to big for CI, but passes locally - # "mistralai/Mistral-Nemo-Instruct-2407" ] MISTRAL_FORMAT_MODELS = [ "mistralai/Mistral-7B-Instruct-v0.3", + # uses the v3-Tekken tokenizer + "mistralai/Ministral-8B-Instruct-2410", + # Mistral-Nemo is to big for CI, but passes locally + # "mistralai/Mistral-Nemo-Instruct-2407" ] SAMPLING_PARAMS = SamplingParams(max_tokens=512, temperature=0.0, logprobs=5) SYMBOLIC_LANG_PROMPTS = [ "勇敢な船乗りについての詩を書く", # japanese "寫一首關於勇敢的水手的詩", # chinese + "ပုံပြင်လေးပြောပြပါ်:\n", # burmese + "Repeat the phrase 'URGENCY🌶️':\nURGENCY🌶️\nURGENCY🌶️\n", # see https://github.com/vllm-project/vllm/pull/9625 ] # for function calling diff --git a/vllm/transformers_utils/tokenizers/mistral.py b/vllm/transformers_utils/tokenizers/mistral.py index 896f70bc1dafd..ccffdcc2a4df2 100644 --- a/vllm/transformers_utils/tokenizers/mistral.py +++ b/vllm/transformers_utils/tokenizers/mistral.py @@ -254,7 +254,7 @@ def decode(self, skip_special_tokens: bool = True) -> str: assert ( skip_special_tokens - ), "Skipping special tokens is not supported for Mistral tokenizers." + ), "skip_special_tokens=False is not supported for Mistral tokenizers." if isinstance(ids, int): ids = [ids] @@ -268,12 +268,16 @@ def convert_ids_to_tokens( # TODO(Patrick) - potentially allow special tokens to not be skipped assert ( skip_special_tokens - ), "Skipping special tokens is not supported for Mistral tokenizers." + ), "skip_special_tokens=False is not supported for Mistral tokenizers." assert isinstance(self.tokenizer, (Tekkenizer, SentencePieceTokenizer)), type( self.tokenizer) + if isinstance(self.tokenizer, Tekkenizer): + # skip special tokens + ids = [i for i in ids if i > self.tokenizer.num_special_tokens] + tokens = [self.tokenizer.id_to_piece(id) for id in ids] if any("�" in t for t in tokens): From ea928f608c44b825d28609460e0d375a5f877940 Mon Sep 17 00:00:00 2001 From: arakowsk-amd <182798202+arakowsk-amd@users.noreply.github.com> Date: Tue, 5 Nov 2024 21:10:40 -0800 Subject: [PATCH 11/22] [Bugfix] Gpt-j-6B patch kv_scale to k_scale path (#10063) Signed-off-by: Alex Rakowski Signed-off-by: Alex Rakowski <182798202+arakowsk-amd@users.noreply.github.com> --- vllm/model_executor/models/gpt_j.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/models/gpt_j.py b/vllm/model_executor/models/gpt_j.py index 0451d16b6c738..9a42b359ae44f 100644 --- a/vllm/model_executor/models/gpt_j.py +++ b/vllm/model_executor/models/gpt_j.py @@ -36,7 +36,8 @@ from vllm.model_executor.layers.sampler import Sampler, SamplerOutput from vllm.model_executor.layers.vocab_parallel_embedding import ( ParallelLMHead, VocabParallelEmbedding) -from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.model_loader.weight_utils import ( + default_weight_loader, maybe_remap_kv_scale_name) from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors @@ -308,6 +309,9 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader(param, loaded_weight, shard_id) break else: + name = maybe_remap_kv_scale_name(name, params_dict) + if name is None: + continue # Skip loading extra bias for GPTQ models. if name.endswith(".bias") and name not in params_dict: continue From 9d59b755934899b7ec5d7bb5b90d15bfd2302475 Mon Sep 17 00:00:00 2001 From: zifeitong Date: Tue, 5 Nov 2024 21:13:09 -0800 Subject: [PATCH 12/22] [Bugfix] Remove CustomChatCompletionContentPartParam multimodal input type (#10054) Signed-off-by: Zifei Tong --- vllm/entrypoints/chat_utils.py | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/vllm/entrypoints/chat_utils.py b/vllm/entrypoints/chat_utils.py index 8da08d4b2c93c..2b339ab6d44e4 100644 --- a/vllm/entrypoints/chat_utils.py +++ b/vllm/entrypoints/chat_utils.py @@ -22,7 +22,6 @@ ChatCompletionToolMessageParam) # yapf: enable # pydantic needs the TypedDict from typing_extensions -from pydantic import ConfigDict from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast from typing_extensions import Required, TypeAlias, TypedDict @@ -52,17 +51,10 @@ class ChatCompletionContentPartAudioParam(TypedDict, total=False): """The type of the content part.""" -class CustomChatCompletionContentPartParam(TypedDict, total=False): - __pydantic_config__ = ConfigDict(extra="allow") # type: ignore - - type: Required[str] - """The type of the content part.""" - - class CustomChatCompletionContentSimpleImageParam(TypedDict, total=False): """A simpler version of the param that only accepts a plain image_url. This is supported by OpenAI API, although it is not documented. - + Example: { "image_url": "https://example.com/image.jpg" @@ -73,7 +65,7 @@ class CustomChatCompletionContentSimpleImageParam(TypedDict, total=False): class CustomChatCompletionContentSimpleAudioParam(TypedDict, total=False): """A simpler version of the param that only accepts a plain audio_url. - + Example: { "audio_url": "https://example.com/audio.mp3" @@ -85,7 +77,6 @@ class CustomChatCompletionContentSimpleAudioParam(TypedDict, total=False): ChatCompletionContentPartParam: TypeAlias = Union[ OpenAIChatCompletionContentPartParam, ChatCompletionContentPartAudioParam, ChatCompletionContentPartRefusalParam, - CustomChatCompletionContentPartParam, CustomChatCompletionContentSimpleImageParam, CustomChatCompletionContentSimpleAudioParam, str] From 40899855520eb9497606bdb2b1b4e619233e598a Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Tue, 5 Nov 2024 22:16:04 -0800 Subject: [PATCH 13/22] [V1] Integrate Piecewise CUDA graphs (#10058) Signed-off-by: Woosuk Kwon --- vllm/compilation/backends.py | 7 +- vllm/v1/attention/backends/flash_attn.py | 35 ++++--- vllm/v1/worker/gpu_model_runner.py | 127 +++++++++++++++++++---- 3 files changed, 133 insertions(+), 36 deletions(-) diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index 05deee7bd5473..abd1d16accaf7 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -496,8 +496,11 @@ def __call__(self, *args) -> Any: return entry.runnable(*args) if self.is_first_graph: - logger.info("Capturing a cudagraph for shape %s", - runtime_shape) + # Since we capture cudagraph for many different shapes and + # capturing is fast, we don't need to log it for every shape. + # We only log it in the debug mode. + logger.debug("Capturing a cudagraph for shape %s", + runtime_shape) input_addresses = [ x.data_ptr() for x in args if isinstance(x, torch.Tensor) diff --git a/vllm/v1/attention/backends/flash_attn.py b/vllm/v1/attention/backends/flash_attn.py index b2af89ebf854a..906f06777a136 100644 --- a/vllm/v1/attention/backends/flash_attn.py +++ b/vllm/v1/attention/backends/flash_attn.py @@ -51,6 +51,7 @@ class FlashAttentionMetadata: # |-------------------- seq_len ---------------------| # |-- query_len ---| + num_actual_tokens: int # Number of tokens excluding padding. max_query_len: int query_start_loc: torch.Tensor max_seq_len: int @@ -134,7 +135,9 @@ def forward( assert k_scale == 1.0 and v_scale == 1.0, ( "key/v_scale is not supported in FlashAttention.") - output = torch.ops.vllm.unified_flash_attention( + output = torch.empty_like(query) + torch.ops.vllm.unified_flash_attention( + output, query, key, value, @@ -154,6 +157,7 @@ def forward( def unified_flash_attention( + output: torch.Tensor, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, @@ -168,17 +172,17 @@ def unified_flash_attention( window_size: Optional[List[int]] = None, alibi_slopes: Optional[torch.Tensor] = None, logits_soft_cap: Optional[float] = None, -) -> torch.Tensor: +) -> None: current_metadata = get_forward_context() if current_metadata is None: # Profiling run. - return torch.empty_like(query) + return assert current_metadata is not None assert isinstance(current_metadata, FlashAttentionMetadata) attn_metadata: FlashAttentionMetadata = current_metadata + num_actual_tokens = attn_metadata.num_actual_tokens - num_tokens, hidden_size = query.shape # Reshape the query, key, and value tensors. query = query.view(-1, num_heads, head_size) key = key.view(-1, num_kv_heads, head_size) @@ -188,18 +192,18 @@ def unified_flash_attention( key_cache = kv_cache[0] value_cache = kv_cache[1] torch.ops._C_cache_ops.reshape_and_cache_flash( - key, - value, - kv_cache[0], - kv_cache[1], + key[:num_actual_tokens], + value[:num_actual_tokens], + key_cache, + value_cache, attn_metadata.slot_mapping, kv_cache_dtype, k_scale, v_scale, ) - output = flash_attn_varlen_func( - q=query, + attn_output = flash_attn_varlen_func( + q=query[:num_actual_tokens], k=key_cache, v=value_cache, cu_seqlens_q=attn_metadata.query_start_loc, @@ -213,10 +217,13 @@ def unified_flash_attention( block_table=attn_metadata.block_table, softcap=logits_soft_cap, ) - return output.view(num_tokens, hidden_size) + attn_output = attn_output.view(num_actual_tokens, -1) + # TODO(woosuk): Optimize this. + output[:num_actual_tokens].copy_(attn_output) def unified_flash_attention_fake( + output: torch.Tensor, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, @@ -231,13 +238,13 @@ def unified_flash_attention_fake( window_size: Optional[List[int]] = None, alibi_slopes: Optional[torch.Tensor] = None, logits_soft_cap: Optional[float] = None, -) -> torch.Tensor: - return torch.empty_like(query) +) -> None: + return direct_register_custom_op( op_name="unified_flash_attention", op_func=unified_flash_attention, - mutates_args=["kv_cache"], + mutates_args=["kv_cache", "output"], fake_impl=unified_flash_attention_fake, ) diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index ae4239f8e1fab..63bf7c2e605a2 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -1,3 +1,5 @@ +import os +import time from dataclasses import dataclass from typing import TYPE_CHECKING, Dict, List, Optional, Set from unittest.mock import patch @@ -7,11 +9,16 @@ import torch.distributed import torch.nn as nn +from vllm import envs +from vllm.compilation.compile_context import set_compile_context +from vllm.compilation.config import CompilationConfig +from vllm.compilation.levels import CompilationLevel from vllm.config import VllmConfig from vllm.forward_context import set_forward_context from vllm.logger import init_logger from vllm.model_executor.model_loader import get_model from vllm.multimodal import MultiModalDataDict +from vllm.plugins import set_compilation_config from vllm.sampling_params import SamplingParams, SamplingType from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, DeviceMemoryProfiler, cdiv, is_pin_memory_available) @@ -86,6 +93,18 @@ def __init__( pin_memory=self.pin_memory, ) + self.use_cuda_graph = (envs.VLLM_TORCH_COMPILE_LEVEL + == CompilationLevel.PIECEWISE + and not self.model_config.enforce_eager) + # TODO(woosuk): Provide an option to tune the max cudagraph batch size. + self.cudagraph_batch_sizes = [1, 2, 4] + [i for i in range(8, 513, 8)] + self.input_ids = torch.zeros(self.max_num_tokens, + dtype=torch.int32, + device=self.device) + self.positions = torch.zeros(self.max_num_tokens, + dtype=torch.int64, + device=self.device) + def _update_states(self, scheduler_output: "SchedulerOutput") -> None: # Remove stopped requests from the cached states. # Keep the states of the pre-empted requests. @@ -268,12 +287,16 @@ def _prepare_inputs(self, scheduler_output: "SchedulerOutput"): seq_start_loc_np[0] = 0 np.cumsum(seq_lens, out=seq_start_loc_np[1:]) - input_ids = input_ids.to(self.device, non_blocking=True) - positions = positions.to(self.device, non_blocking=True).long() + self.input_ids[:total_num_scheduled_tokens].copy_(input_ids, + non_blocking=True) + self.positions[:total_num_scheduled_tokens].copy_(positions, + non_blocking=True) + query_start_loc = query_start_loc.to(self.device, non_blocking=True) seq_start_loc = seq_start_loc.to(self.device, non_blocking=True) slot_mapping = slot_mapping.to(self.device, non_blocking=True).long() attn_metadata = FlashAttentionMetadata( + num_actual_tokens=total_num_scheduled_tokens, max_query_len=max_num_scheduled_tokens, query_start_loc=query_start_loc, max_seq_len=max_seq_len, @@ -287,7 +310,7 @@ def _prepare_inputs(self, scheduler_output: "SchedulerOutput"): # token from the partial request. # TODO: Support prompt logprobs. logits_indices = query_start_loc[1:] - 1 - return input_ids, positions, attn_metadata, logits_indices + return attn_metadata, logits_indices def _prepare_sampling( self, @@ -310,16 +333,26 @@ def execute_model( scheduler_output: "SchedulerOutput", ) -> ModelRunnerOutput: self._update_states(scheduler_output) - inputs = self._prepare_inputs(scheduler_output) - input_ids, positions, attn_metadata, logits_indices = inputs + attn_metadata, logits_indices = self._prepare_inputs(scheduler_output) + num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens + if (self.use_cuda_graph + and num_scheduled_tokens <= self.cudagraph_batch_sizes[-1]): + # Use piecewise CUDA graphs. + # Add padding to the batch size. + num_input_tokens = self._get_padded_batch_size( + num_scheduled_tokens) + else: + # Eager mode. + num_input_tokens = num_scheduled_tokens with set_forward_context(attn_metadata): hidden_states = self.model( - input_ids=input_ids, - positions=positions, + input_ids=self.input_ids[:num_input_tokens], + positions=self.positions[:num_input_tokens], kv_caches=self.kv_caches, - attn_metadata=attn_metadata, + attn_metadata=None, ) + hidden_states = hidden_states[:num_scheduled_tokens] hidden_states = hidden_states[logits_indices] logits = self.model.compute_logits(hidden_states, None) @@ -371,6 +404,18 @@ def execute_model( return model_runner_output def load_model(self) -> None: + if self.use_cuda_graph: + # FIXME(woosuk): Currently, the custom ops are not supported + # in the piecewise compilation mode. We rely on TorchInductor + # to optimize the model. + os.environ["VLLM_CUSTOM_OPS"] = "none" + set_compilation_config( + CompilationConfig( + use_cudagraph=True, + non_cudagraph_ops=["vllm.unified_flash_attention"], + use_inductor=True, + )) + logger.info("Starting to load model %s...", self.model_config.model) with DeviceMemoryProfiler() as m: # noqa: SIM117 with patch("vllm.model_executor.layers.sampler.Sampler", Sampler): @@ -381,26 +426,61 @@ def load_model(self) -> None: self.model_memory_usage / float(2**30)) def _dummy_run(self, model: nn.Module, num_tokens: int) -> None: - input_ids = torch.zeros(num_tokens, - dtype=torch.int32, - device=self.device) - positions = torch.zeros(num_tokens, - dtype=torch.long, - device=self.device) - kv_caches = [None for _ in range(self.num_attn_layers)] - model(input_ids, positions, kv_caches, attn_metadata=None) - return + # use an empty tensor instead of `None`` to force Dynamo to pass + # it by reference, rather by specializing on the value `None`. + # the `dtype` argument does not matter, and we use `float32` as + # a placeholder (it has wide hardware support). + # it is important to create tensors inside the loop, rather than + # multiplying the list, to avoid Dynamo from treating them as + # tensor aliasing. + dummy_kv_caches = [ + torch.tensor([], dtype=torch.float32, device=self.device) + for _ in range(self.num_attn_layers) + ] + with set_forward_context(None): # noqa: SIM117 + with set_compile_context(self.cudagraph_batch_sizes): + # Trigger compilation for general shape. + model(self.input_ids, + self.positions, + dummy_kv_caches, + attn_metadata=None) @torch.inference_mode() def profile_run(self) -> None: self._dummy_run(self.model, self.max_num_tokens) torch.cuda.synchronize() - return @torch.inference_mode() def capture_model(self) -> None: - # TODO: Implement CUDA graph support. - return + if not self.use_cuda_graph: + logger.warning( + "Skipping CUDA graph capture. Please set " + "VLLM_TORCH_COMPILE_LEVEL=%d to use CUDA graphs.", + CompilationLevel.PIECEWISE) + return + + start_time = time.perf_counter() + start_free_gpu_memory = torch.cuda.mem_get_info()[0] + + with set_forward_context(None): + # Trigger CUDA graph capture for specific shapes. + # Capture the large shapes first so that the smaller shapes + # can reuse the memory pool allocated for the large shapes. + for num_tokens in reversed(self.cudagraph_batch_sizes): + self.model( + self.input_ids[:num_tokens], + self.positions[:num_tokens], + kv_caches=self.kv_caches, + attn_metadata=None, + ) + + end_time = time.perf_counter() + end_free_gpu_memory = torch.cuda.mem_get_info()[0] + elapsed_time = end_time - start_time + cuda_graph_size = start_free_gpu_memory - end_free_gpu_memory + # This usually takes 5~20 seconds. + logger.info("Graph capturing finished in %.0f secs, took %.2f GiB", + elapsed_time, cuda_graph_size / (1 << 30)) def initialize_kv_cache(self, num_blocks: int) -> None: assert len(self.kv_caches) == 0 @@ -412,6 +492,13 @@ def initialize_kv_cache(self, num_blocks: int) -> None: dtype=self.kv_cache_dtype, device=self.device)) + def _get_padded_batch_size(self, batch_size: int) -> Optional[int]: + # TODO: Optimize this? + for size in self.cudagraph_batch_sizes: + if batch_size <= size: + return size + return None + @dataclass class CachedRequestState: From 4be3a45158a7fb707973d4b00410e0d2981e6825 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Tue, 5 Nov 2024 22:35:03 -0800 Subject: [PATCH 14/22] [distributed] add function to create ipc buffers directly (#10064) Signed-off-by: youkaichao --- .buildkite/test-pipeline.yaml | 1 + tests/distributed/test_ca_buffer_sharing.py | 59 +++++++++++++++++++ .../device_communicators/custom_all_reduce.py | 31 ++++++++++ 3 files changed, 91 insertions(+) create mode 100644 tests/distributed/test_ca_buffer_sharing.py diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 1eb749f64d36b..3e940549862ea 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -510,6 +510,7 @@ steps: # NOTE: don't test llama model here, it seems hf implementation is buggy # see https://github.com/vllm-project/vllm/pull/5689 for details - pytest -v -s distributed/test_custom_all_reduce.py + - torchrun --nproc_per_node=2 distributed/test_ca_buffer_sharing.py - TARGET_TEST_SUITE=A100 pytest basic_correctness/ -v -s -m distributed_2_gpus - pytest -v -s -x lora/test_mixtral.py diff --git a/tests/distributed/test_ca_buffer_sharing.py b/tests/distributed/test_ca_buffer_sharing.py new file mode 100644 index 0000000000000..fc4043cd3014e --- /dev/null +++ b/tests/distributed/test_ca_buffer_sharing.py @@ -0,0 +1,59 @@ +# can only run on machines with p2p access across GPUs +# can only run with torchrun: +# torchrun --nproc_per_node=2 tests/distributed/test_ca_buffer_sharing.py + +import ctypes + +import torch +import torch.distributed as dist + +from vllm.distributed.device_communicators.cuda_wrapper import CudaRTLibrary +from vllm.distributed.device_communicators.custom_all_reduce import ( # noqa + CustomAllreduce) + +# create a cpu process group for communicating metadata (ipc handle) +dist.init_process_group(backend="gloo") +rank = local_rank = dist.get_rank() +world_size = dist.get_world_size() + +# every process sets its own device (differently) +lib = CudaRTLibrary() +lib.cudaSetDevice(rank) + +buffer_size_in_bytes = 1024 +byte_value = 2 # the value we write to the buffer for verification + +pointers = CustomAllreduce.create_shared_buffer(buffer_size_in_bytes) + +print(f"Rank {rank} has pointers {pointers}") + +dist.barrier() +torch.cuda.synchronize() + +if rank == 0: + # the first rank tries to write to all buffers + for p in pointers: + pointer = ctypes.c_void_p(p) + lib.cudaMemset(pointer, byte_value, buffer_size_in_bytes) + +dist.barrier() +torch.cuda.synchronize() + +host_data = (ctypes.c_char * buffer_size_in_bytes)() + +# all ranks read from all buffers, and check if the data is correct +for p in pointers: + pointer = ctypes.c_void_p(p) + lib.cudaMemcpy(host_data, pointer, buffer_size_in_bytes) + for i in range(buffer_size_in_bytes): + assert ord(host_data[i]) == byte_value, ( + f"Rank {rank} failed" + f" to verify buffer {p}. Expected {byte_value}, " + f"got {ord(host_data[i])}") + +print(f"Rank {rank} verified all buffers") + +dist.barrier() +torch.cuda.synchronize() + +CustomAllreduce.free_shared_buffer(pointers) diff --git a/vllm/distributed/device_communicators/custom_all_reduce.py b/vllm/distributed/device_communicators/custom_all_reduce.py index c3632aee6d11a..3b5d92561cf25 100644 --- a/vllm/distributed/device_communicators/custom_all_reduce.py +++ b/vllm/distributed/device_communicators/custom_all_reduce.py @@ -1,3 +1,4 @@ +import ctypes from contextlib import contextmanager from typing import Any, List, Optional, Union @@ -7,6 +8,7 @@ import vllm.envs as envs from vllm import _custom_ops as ops +from vllm.distributed.device_communicators.cuda_wrapper import CudaRTLibrary from vllm.distributed.device_communicators.custom_all_reduce_utils import ( gpu_p2p_access_check) from vllm.distributed.parallel_state import in_the_same_node_as @@ -174,6 +176,35 @@ def __init__(self, offsets, rank, self.full_nvlink) self.register_buffer(self.buffer) + @staticmethod + def create_shared_buffer( + size_in_bytes: int, + group: Optional[ProcessGroup] = None) -> List[int]: + lib = CudaRTLibrary() + pointer = lib.cudaMalloc(size_in_bytes) + handle = lib.cudaIpcGetMemHandle(pointer) + world_size = dist.get_world_size(group=group) + rank = dist.get_rank(group=group) + handles = [None] * world_size + dist.all_gather_object(handles, handle, group=group) + + pointers: List[int] = [] + for i, h in enumerate(handles): + if i == rank: + pointers.append(pointer.value) # type: ignore + else: + pointers.append( + lib.cudaIpcOpenMemHandle(h).value) # type: ignore + + return pointers + + @staticmethod + def free_shared_buffer(pointers: List[int], + group: Optional[ProcessGroup] = None) -> None: + rank = dist.get_rank(group=group) + lib = CudaRTLibrary() + lib.cudaFree(ctypes.c_void_p(pointers[rank])) + @contextmanager def capture(self): """ From 21063c11c7d340dbb01460e22d98d3619737cd4d Mon Sep 17 00:00:00 2001 From: Aaron Pham Date: Wed, 6 Nov 2024 02:11:55 -0500 Subject: [PATCH 15/22] [CI/Build] drop support for Python 3.8 EOL (#8464) Signed-off-by: Aaron Pham --- .../convert-results-json-to-markdown.py | 10 +-- .../scripts/generate-nightly-markdown.py | 4 +- .../scripts/summary-nightly-results.py | 4 +- .github/workflows/mypy.yaml | 2 +- .github/workflows/publish.yml | 2 +- .github/workflows/ruff.yml | 32 ++++----- .github/workflows/yapf.yml | 26 ++++---- .readthedocs.yaml | 11 ++-- CMakeLists.txt | 36 +++++----- benchmarks/backend_request_func.py | 22 ++----- benchmarks/kernels/benchmark_machete.py | 6 +- csrc/quantization/machete/generate.py | 8 +-- docs/source/getting_started/installation.rst | 10 +-- pyproject.toml | 4 +- setup.py | 9 ++- tests/compile/piecewise/test_toy_llama.py | 4 +- tests/conftest.py | 29 +++----- tests/core/block/test_prefix_caching_block.py | 12 ++-- tests/kernels/test_mamba_ssm.py | 2 +- .../mm_processor_kwargs/test_qwen.py | 2 +- tests/samplers/test_rejection_sampler.py | 10 ++- tests/test_logger.py | 2 +- tests/tokenization/test_detokenize.py | 4 +- tools/profiler/print_layerwise_table.py | 2 +- tools/profiler/visualize_layerwise_profile.py | 2 +- tools/report_build_time_ninja.py | 32 ++++----- use_existing_torch.py | 2 +- .../ops/blocksparse_attention/interface.py | 6 +- vllm/config.py | 7 +- vllm/core/evictor.py | 2 +- .../custom_all_reduce_utils.py | 2 +- vllm/engine/async_llm_engine.py | 2 +- vllm/engine/llm_engine.py | 4 +- vllm/engine/metrics_types.py | 2 +- vllm/engine/output_processor/multi_step.py | 2 +- vllm/entrypoints/chat_utils.py | 2 +- vllm/entrypoints/openai/run_batch.py | 2 +- vllm/executor/ray_gpu_executor.py | 2 +- vllm/logger.py | 3 +- vllm/lora/models.py | 4 +- vllm/model_executor/custom_op.py | 2 +- vllm/model_executor/layers/resampler.py | 1 - .../model_executor/layers/rotary_embedding.py | 1 - vllm/model_executor/model_loader/loader.py | 2 +- vllm/model_executor/model_loader/openvino.py | 2 +- .../model_executor/model_loader/tensorizer.py | 5 +- .../model_loader/weight_utils.py | 9 ++- vllm/model_executor/models/arctic.py | 4 +- vllm/model_executor/models/baichuan.py | 1 - vllm/model_executor/models/bloom.py | 1 - vllm/model_executor/models/chatglm.py | 1 - vllm/model_executor/models/commandr.py | 1 - vllm/model_executor/models/dbrx.py | 1 - vllm/model_executor/models/decilm.py | 1 - vllm/model_executor/models/deepseek.py | 1 - vllm/model_executor/models/deepseek_v2.py | 1 - vllm/model_executor/models/exaone.py | 1 - vllm/model_executor/models/falcon.py | 1 - vllm/model_executor/models/fuyu.py | 1 - vllm/model_executor/models/gemma.py | 1 - vllm/model_executor/models/gemma2.py | 1 - .../models/glm4_vision_encoder.py | 1 - vllm/model_executor/models/gpt2.py | 1 - vllm/model_executor/models/gpt_bigcode.py | 1 - vllm/model_executor/models/gpt_j.py | 1 - vllm/model_executor/models/gpt_neox.py | 1 - vllm/model_executor/models/granite.py | 1 - vllm/model_executor/models/granitemoe.py | 1 - .../models/idefics2_vision_model.py | 2 - vllm/model_executor/models/internlm2.py | 1 - vllm/model_executor/models/internlm2_ve.py | 1 - vllm/model_executor/models/jais.py | 1 - vllm/model_executor/models/jamba.py | 1 - vllm/model_executor/models/llama.py | 1 - vllm/model_executor/models/mamba.py | 1 - vllm/model_executor/models/minicpm.py | 1 - vllm/model_executor/models/minicpm3.py | 1 - vllm/model_executor/models/minicpmv.py | 1 - vllm/model_executor/models/mixtral.py | 1 - vllm/model_executor/models/mixtral_quant.py | 1 - vllm/model_executor/models/mllama.py | 1 - vllm/model_executor/models/mlp_speculator.py | 2 +- vllm/model_executor/models/molmo.py | 6 +- vllm/model_executor/models/mpt.py | 1 - vllm/model_executor/models/nemotron.py | 1 - vllm/model_executor/models/olmo.py | 1 - vllm/model_executor/models/opt.py | 1 - vllm/model_executor/models/orion.py | 1 - vllm/model_executor/models/persimmon.py | 1 - vllm/model_executor/models/phi.py | 1 - vllm/model_executor/models/phi3.py | 1 - vllm/model_executor/models/phi3v.py | 1 - vllm/model_executor/models/phimoe.py | 1 - vllm/model_executor/models/pixtral.py | 10 +-- vllm/model_executor/models/qwen.py | 1 - vllm/model_executor/models/qwen2.py | 7 +- vllm/model_executor/models/qwen2_audio.py | 1 - vllm/model_executor/models/qwen2_cls.py | 7 +- vllm/model_executor/models/qwen2_moe.py | 1 - vllm/model_executor/models/qwen2_rm.py | 7 +- vllm/model_executor/models/qwen2_vl.py | 10 ++- vllm/model_executor/models/solar.py | 1 - vllm/model_executor/models/stablelm.py | 1 - vllm/model_executor/models/starcoder2.py | 1 - vllm/model_executor/models/xverse.py | 1 - vllm/multimodal/base.py | 66 +++++++++++-------- vllm/prompt_adapter/utils.py | 17 +++-- vllm/transformers_utils/config.py | 2 +- vllm/transformers_utils/configs/chatglm.py | 1 - vllm/transformers_utils/configs/exaone.py | 1 - vllm/transformers_utils/configs/jais.py | 1 - vllm/transformers_utils/configs/mpt.py | 7 +- vllm/transformers_utils/configs/nemotron.py | 7 +- vllm/transformers_utils/configs/solar.py | 1 - vllm/utils.py | 4 +- 115 files changed, 240 insertions(+), 322 deletions(-) diff --git a/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py b/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py index f90e464288cf1..7cf05610b9953 100644 --- a/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py +++ b/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py @@ -56,7 +56,7 @@ def read_markdown(file): if os.path.exists(file): - with open(file, "r") as f: + with open(file) as f: return f.read() + "\n" else: return f"{file} not found.\n" @@ -75,14 +75,14 @@ def results_to_json(latency, throughput, serving): # collect results for test_file in results_folder.glob("*.json"): - with open(test_file, "r") as f: + with open(test_file) as f: raw_result = json.loads(f.read()) if "serving" in str(test_file): # this result is generated via `benchmark_serving.py` # attach the benchmarking command to raw_result - with open(test_file.with_suffix(".commands"), "r") as f: + with open(test_file.with_suffix(".commands")) as f: command = json.loads(f.read()) raw_result.update(command) @@ -97,7 +97,7 @@ def results_to_json(latency, throughput, serving): # this result is generated via `benchmark_latency.py` # attach the benchmarking command to raw_result - with open(test_file.with_suffix(".commands"), "r") as f: + with open(test_file.with_suffix(".commands")) as f: command = json.loads(f.read()) raw_result.update(command) @@ -119,7 +119,7 @@ def results_to_json(latency, throughput, serving): # this result is generated via `benchmark_throughput.py` # attach the benchmarking command to raw_result - with open(test_file.with_suffix(".commands"), "r") as f: + with open(test_file.with_suffix(".commands")) as f: command = json.loads(f.read()) raw_result.update(command) diff --git a/.buildkite/nightly-benchmarks/scripts/generate-nightly-markdown.py b/.buildkite/nightly-benchmarks/scripts/generate-nightly-markdown.py index 6059588fe7277..052060c576300 100644 --- a/.buildkite/nightly-benchmarks/scripts/generate-nightly-markdown.py +++ b/.buildkite/nightly-benchmarks/scripts/generate-nightly-markdown.py @@ -72,7 +72,7 @@ def main(args): # collect results for test_file in results_folder.glob("*_nightly_results.json"): - with open(test_file, "r") as f: + with open(test_file) as f: results = results + json.loads(f.read()) # generate markdown table @@ -80,7 +80,7 @@ def main(args): md_table = tabulate(df, headers='keys', tablefmt='pipe', showindex=False) - with open(args.description, "r") as f: + with open(args.description) as f: description = f.read() description = description.format( diff --git a/.buildkite/nightly-benchmarks/scripts/summary-nightly-results.py b/.buildkite/nightly-benchmarks/scripts/summary-nightly-results.py index 4e4d4cd4ca3c6..92d6fad73a94c 100644 --- a/.buildkite/nightly-benchmarks/scripts/summary-nightly-results.py +++ b/.buildkite/nightly-benchmarks/scripts/summary-nightly-results.py @@ -36,11 +36,11 @@ # collect results for test_file in results_folder.glob("*.json"): - with open(test_file, "r") as f: + with open(test_file) as f: raw_result = json.loads(f.read()) # attach the benchmarking command to raw_result - with open(test_file.with_suffix(".commands"), "r") as f: + with open(test_file.with_suffix(".commands")) as f: command = json.loads(f.read()) raw_result.update(command) diff --git a/.github/workflows/mypy.yaml b/.github/workflows/mypy.yaml index 18b354948f0cc..28d2e5fb8dbd9 100644 --- a/.github/workflows/mypy.yaml +++ b/.github/workflows/mypy.yaml @@ -25,7 +25,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: ["3.9", "3.10", "3.11", "3.12"] steps: - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 - name: Set up Python ${{ matrix.python-version }} diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index f959a1cacf866..578c3fbd4e816 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -48,7 +48,7 @@ jobs: fail-fast: false matrix: os: ['ubuntu-20.04'] - python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] + python-version: ['3.9', '3.10', '3.11', '3.12'] pytorch-version: ['2.4.0'] # Must be the most recent version that meets requirements-cuda.txt. cuda-version: ['11.8', '12.1'] diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index 197f918765e7d..edf98ce2fcab0 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -29,19 +29,19 @@ jobs: matrix: python-version: ["3.12"] steps: - - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 - with: - python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -r requirements-lint.txt - - name: Analysing the code with ruff - run: | - echo "::add-matcher::.github/workflows/matchers/ruff.json" - ruff check --output-format github . - - name: Run isort - run: | - isort . --check-only + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements-lint.txt + - name: Analysing the code with ruff + run: | + echo "::add-matcher::.github/workflows/matchers/ruff.json" + ruff check --output-format github . + - name: Run isort + run: | + isort . --check-only diff --git a/.github/workflows/yapf.yml b/.github/workflows/yapf.yml index 35579302c5c14..4221c139ccf79 100644 --- a/.github/workflows/yapf.yml +++ b/.github/workflows/yapf.yml @@ -23,16 +23,16 @@ jobs: matrix: python-version: ["3.12"] steps: - - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 - with: - python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install yapf==0.32.0 - pip install toml==0.10.2 - - name: Running yapf - run: | - yapf --diff --recursive . + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install yapf==0.32.0 + pip install toml==0.10.2 + - name: Running yapf + run: | + yapf --diff --recursive . diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 42cbf18a0f712..34735700a224e 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -6,17 +6,16 @@ version: 2 build: os: ubuntu-22.04 tools: - python: "3.8" + python: '3.9' sphinx: - configuration: docs/source/conf.py - fail_on_warning: true + configuration: docs/source/conf.py + fail_on_warning: true # If using Sphinx, optionally build your docs in additional formats such as PDF formats: [] # Optionally declare the Python requirements required to build your docs python: - install: - - requirements: docs/requirements-docs.txt - + install: + - requirements: docs/requirements-docs.txt diff --git a/CMakeLists.txt b/CMakeLists.txt index 943424bc4edfa..c372ba98befbf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -128,9 +128,9 @@ endif() if(VLLM_GPU_LANG STREQUAL "CUDA") # - # For cuda we want to be able to control which architectures we compile for on + # For cuda we want to be able to control which architectures we compile for on # a per-file basis in order to cut down on compile time. So here we extract - # the set of architectures we want to compile for and remove the from the + # the set of architectures we want to compile for and remove the from the # CMAKE_CUDA_FLAGS so that they are not applied globally. # clear_cuda_arches(CUDA_ARCH_FLAGS) @@ -138,7 +138,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") message(STATUS "CUDA target architectures: ${CUDA_ARCHS}") # Filter the target architectures by the supported supported archs # since for some files we will build for all CUDA_ARCHS. - cuda_archs_loose_intersection(CUDA_ARCHS + cuda_archs_loose_intersection(CUDA_ARCHS "${CUDA_SUPPORTED_ARCHS}" "${CUDA_ARCHS}") message(STATUS "CUDA supported target architectures: ${CUDA_ARCHS}") else() @@ -236,7 +236,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") # are not supported by Machete yet. cuda_archs_loose_intersection(MARLIN_ARCHS "8.0;8.6;8.9;9.0" ${CUDA_ARCHS}) if (MARLIN_ARCHS) - set(MARLIN_SRCS + set(MARLIN_SRCS "csrc/quantization/fp8/fp8_marlin.cu" "csrc/quantization/marlin/dense/marlin_cuda_kernel.cu" "csrc/quantization/marlin/sparse/marlin_24_cuda_kernel.cu" @@ -277,7 +277,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") "in CUDA target architectures") endif() - # clear SCALED_MM_3X_ARCHS so the scaled_mm_c2x kernels know we didn't + # clear SCALED_MM_3X_ARCHS so the scaled_mm_c2x kernels know we didn't # build any 3x kernels set(SCALED_MM_3X_ARCHS) endif() @@ -285,7 +285,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") # # For the cutlass_scaled_mm kernels we want to build the c2x (CUTLASS 2.x) # kernels for the remaining archs that are not already built for 3x. - cuda_archs_loose_intersection(SCALED_MM_2X_ARCHS + cuda_archs_loose_intersection(SCALED_MM_2X_ARCHS "7.5;8.0;8.6;8.9;9.0" "${CUDA_ARCHS}") # subtract out the archs that are already built for 3x list(REMOVE_ITEM SCALED_MM_2X_ARCHS ${SCALED_MM_3X_ARCHS}) @@ -316,10 +316,10 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") cuda_archs_loose_intersection(MACHETE_ARCHS "9.0a" "${CUDA_ARCHS}") if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER 12.0 AND MACHETE_ARCHS) # - # For the Machete kernels we automatically generate sources for various + # For the Machete kernels we automatically generate sources for various # preselected input type pairs and schedules. # Generate sources: - set(MACHETE_GEN_SCRIPT + set(MACHETE_GEN_SCRIPT ${CMAKE_CURRENT_SOURCE_DIR}/csrc/quantization/machete/generate.py) file(MD5 ${MACHETE_GEN_SCRIPT} MACHETE_GEN_SCRIPT_HASH) @@ -329,8 +329,8 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") if (NOT DEFINED CACHE{MACHETE_GEN_SCRIPT_HASH} OR NOT $CACHE{MACHETE_GEN_SCRIPT_HASH} STREQUAL ${MACHETE_GEN_SCRIPT_HASH}) execute_process( - COMMAND ${CMAKE_COMMAND} -E env - PYTHONPATH=${CMAKE_CURRENT_SOURCE_DIR}/csrc/cutlass_extensions/:${CUTLASS_DIR}/python/:${VLLM_PYTHON_PATH}:$PYTHONPATH + COMMAND ${CMAKE_COMMAND} -E env + PYTHONPATH=${CMAKE_CURRENT_SOURCE_DIR}/csrc/cutlass_extensions/:${CUTLASS_DIR}/python/:${VLLM_PYTHON_PATH}:$PYTHONPATH ${Python_EXECUTABLE} ${MACHETE_GEN_SCRIPT} RESULT_VARIABLE machete_generation_result OUTPUT_VARIABLE machete_generation_output @@ -340,11 +340,11 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") if (NOT machete_generation_result EQUAL 0) message(FATAL_ERROR "Machete generation failed." - " Result: \"${machete_generation_result}\"" + " Result: \"${machete_generation_result}\"" "\nCheck the log for details: " "${CMAKE_CURRENT_BINARY_DIR}/machete_generation.log") else() - set(MACHETE_GEN_SCRIPT_HASH ${MACHETE_GEN_SCRIPT_HASH} + set(MACHETE_GEN_SCRIPT_HASH ${MACHETE_GEN_SCRIPT_HASH} CACHE STRING "Last run machete generate script hash" FORCE) message(STATUS "Machete generation completed successfully.") endif() @@ -366,7 +366,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") message(STATUS "Building Machete kernels for archs: ${MACHETE_ARCHS}") else() - if (NOT ${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER 12.0 + if (NOT ${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER 12.0 AND MACHETE_ARCHS) message(STATUS "Not building Machete kernels as CUDA Compiler version is " "not >= 12.0, we recommend upgrading to CUDA 12.0 or " @@ -392,8 +392,8 @@ define_gpu_extension_target( USE_SABI 3 WITH_SOABI) -# If CUTLASS is compiled on NVCC >= 12.5, it by default uses -# cudaGetDriverEntryPointByVersion as a wrapper to avoid directly calling the +# If CUTLASS is compiled on NVCC >= 12.5, it by default uses +# cudaGetDriverEntryPointByVersion as a wrapper to avoid directly calling the # driver API. This causes problems when linking with earlier versions of CUDA. # Setting this variable sidesteps the issue by calling the driver directly. target_compile_definitions(_C PRIVATE CUTLASS_ENABLE_DIRECT_CUDA_DRIVER_CALL=1) @@ -471,9 +471,9 @@ if (NOT VLLM_TARGET_DEVICE STREQUAL "cuda") return() endif () -# vLLM flash attention requires VLLM_GPU_ARCHES to contain the set of target -# arches in the CMake syntax (75-real, 89-virtual, etc), since we clear the -# arches in the CUDA case (and instead set the gencodes on a per file basis) +# vLLM flash attention requires VLLM_GPU_ARCHES to contain the set of target +# arches in the CMake syntax (75-real, 89-virtual, etc), since we clear the +# arches in the CUDA case (and instead set the gencodes on a per file basis) # we need to manually set VLLM_GPU_ARCHES here. if(VLLM_GPU_LANG STREQUAL "CUDA") foreach(_ARCH ${CUDA_ARCHS}) diff --git a/benchmarks/backend_request_func.py b/benchmarks/backend_request_func.py index 0a903877f000d..a42e70170ba28 100644 --- a/benchmarks/backend_request_func.py +++ b/benchmarks/backend_request_func.py @@ -79,7 +79,7 @@ async def async_request_tgi( # any data, we should skip it. if chunk_bytes.startswith(":"): continue - chunk = remove_prefix(chunk_bytes, "data:") + chunk = chunk_bytes.removeprefix("data:") data = json.loads(chunk) timestamp = time.perf_counter() @@ -144,8 +144,8 @@ async def async_request_trt_llm( if not chunk_bytes: continue - chunk = remove_prefix(chunk_bytes.decode("utf-8"), - "data:") + chunk = chunk_bytes.decode("utf-8").removeprefix( + "data:") data = json.loads(chunk) output.generated_text += data["text_output"] @@ -261,8 +261,8 @@ async def async_request_openai_completions( if not chunk_bytes: continue - chunk = remove_prefix(chunk_bytes.decode("utf-8"), - "data: ") + chunk = chunk_bytes.decode("utf-8").removeprefix( + "data: ") if chunk == "[DONE]": latency = time.perf_counter() - st else: @@ -349,8 +349,8 @@ async def async_request_openai_chat_completions( if not chunk_bytes: continue - chunk = remove_prefix(chunk_bytes.decode("utf-8"), - "data: ") + chunk = chunk_bytes.decode("utf-8").removeprefix( + "data: ") if chunk == "[DONE]": latency = time.perf_counter() - st else: @@ -389,14 +389,6 @@ async def async_request_openai_chat_completions( return output -# Since vllm must support Python 3.8, we can't use str.removeprefix(prefix) -# introduced in Python 3.9 -def remove_prefix(text: str, prefix: str) -> str: - if text.startswith(prefix): - return text[len(prefix):] - return text - - def get_model(pretrained_model_name_or_path: str) -> str: if os.getenv('VLLM_USE_MODELSCOPE', 'False').lower() == 'true': from modelscope import snapshot_download diff --git a/benchmarks/kernels/benchmark_machete.py b/benchmarks/kernels/benchmark_machete.py index b70c4b94c97a1..665b50bf18cf0 100644 --- a/benchmarks/kernels/benchmark_machete.py +++ b/benchmarks/kernels/benchmark_machete.py @@ -269,10 +269,10 @@ def run_square_bench(args): def run_range_bench(args): - m_start, k_start, n_start = [int(x) for x in args.dim_start.split(",")] - m_end, k_end, n_end = [int(x) for x in args.dim_end.split(",")] + m_start, k_start, n_start = (int(x) for x in args.dim_start.split(",")) + m_end, k_end, n_end = (int(x) for x in args.dim_end.split(",")) m_increment, k_increment, n_increment = \ - [int(x) for x in args.dim_increment.split(",")] + (int(x) for x in args.dim_increment.split(",")) Ms = list(range(m_start, m_end + 1, m_increment)) Ks = list(range(k_start, k_end + 1, k_increment)) Ns = list(range(n_start, n_end + 1, n_increment)) diff --git a/csrc/quantization/machete/generate.py b/csrc/quantization/machete/generate.py index ebbe76cfb944a..d126af1849024 100644 --- a/csrc/quantization/machete/generate.py +++ b/csrc/quantization/machete/generate.py @@ -468,7 +468,7 @@ def generate(): impl_configs = [] GPTQ_kernel_type_configs = list( - (TypeConfig( + TypeConfig( element_a=element_a, element_b=element_b, element_b_scale=element_a, @@ -476,7 +476,7 @@ def generate(): element_d=element_a, accumulator=DataType.f32, ) for element_b in (VLLMDataType.u4b8, VLLMDataType.u8b128) - for element_a in (DataType.f16, DataType.bf16))) + for element_a in (DataType.f16, DataType.bf16)) GPTQ_kernel_specializations = [ Specialization(with_C=False, with_zeropoints=False, with_scales=True) @@ -490,7 +490,7 @@ def generate(): ] AWQ_kernel_type_configs = list( - (TypeConfig( + TypeConfig( element_a=element_a, element_b=element_b, element_b_scale=element_a, @@ -498,7 +498,7 @@ def generate(): element_d=element_a, accumulator=DataType.f32, ) for element_b in (DataType.u4, DataType.u8) - for element_a in (DataType.f16, DataType.bf16))) + for element_a in (DataType.f16, DataType.bf16)) AWQ_kernel_specializations = [ Specialization(with_C=False, with_zeropoints=True, with_scales=True) diff --git a/docs/source/getting_started/installation.rst b/docs/source/getting_started/installation.rst index a706b285edede..61871cdf41125 100644 --- a/docs/source/getting_started/installation.rst +++ b/docs/source/getting_started/installation.rst @@ -10,7 +10,7 @@ Requirements ============ * OS: Linux -* Python: 3.8 - 3.12 +* Python: 3.9 -- 3.12 * GPU: compute capability 7.0 or higher (e.g., V100, T4, RTX20xx, A100, L4, H100, etc.) Install released versions @@ -148,7 +148,7 @@ If you want to modify C++ or CUDA code, you'll need to build vLLM from source. T .. tip:: Building from source requires a lot of compilation. If you are building from source repeatedly, it's more efficient to cache the compilation results. - For example, you can install `ccache `_ using ``conda install ccache`` or ``apt install ccache`` . + For example, you can install `ccache `_ using ``conda install ccache`` or ``apt install ccache`` . As long as ``which ccache`` command can find the ``ccache`` binary, it will be used automatically by the build system. After the first build, subsequent builds will be much faster. @@ -181,8 +181,8 @@ to be run simultaneously, via the environment variable ``MAX_JOBS``. For example $ export MAX_JOBS=6 $ pip install -e . -This is especially useful when you are building on less powerful machines. For example, when you use WSL it only `assigns 50% of the total memory by default `_, so using ``export MAX_JOBS=1`` can avoid compiling multiple files simultaneously and running out of memory. -A side effect is a much slower build process. +This is especially useful when you are building on less powerful machines. For example, when you use WSL it only `assigns 50% of the total memory by default `_, so using ``export MAX_JOBS=1`` can avoid compiling multiple files simultaneously and running out of memory. +A side effect is a much slower build process. Additionally, if you have trouble building vLLM, we recommend using the NVIDIA PyTorch Docker image. @@ -209,7 +209,7 @@ Here is a sanity check to verify that the CUDA Toolkit is correctly installed: Unsupported OS build -------------------- -vLLM can fully run only on Linux but for development purposes, you can still build it on other systems (for example, macOS), allowing for imports and a more convenient development environment. The binaries will not be compiled and won't work on non-Linux systems. +vLLM can fully run only on Linux but for development purposes, you can still build it on other systems (for example, macOS), allowing for imports and a more convenient development environment. The binaries will not be compiled and won't work on non-Linux systems. Simply disable the ``VLLM_TARGET_DEVICE`` environment variable before installing: diff --git a/pyproject.toml b/pyproject.toml index 0bbab3cd3fbc3..3562569647391 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,7 +34,7 @@ select = [ # Pyflakes "F", # pyupgrade - # "UP", + "UP", # flake8-bugbear "B", # flake8-simplify @@ -55,7 +55,7 @@ ignore = [ ] [tool.mypy] -python_version = "3.8" +python_version = "3.9" ignore_missing_imports = true check_untyped_defs = true diff --git a/setup.py b/setup.py index 8abeb0ba739db..f145a33258d70 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,4 @@ import importlib.util -import io import logging import os import re @@ -327,7 +326,7 @@ def get_neuronxcc_version(): "__init__.py") # Check if the command was executed successfully - with open(version_file, "rt") as fp: + with open(version_file) as fp: content = fp.read() # Extract the version using a regular expression @@ -404,7 +403,8 @@ def read_readme() -> str: """Read the README file if present.""" p = get_path("README.md") if os.path.isfile(p): - return io.open(get_path("README.md"), "r", encoding="utf-8").read() + with open(get_path("README.md"), encoding="utf-8") as f: + return f.read() else: return "" @@ -498,7 +498,6 @@ def _read_requirements(filename: str) -> List[str]: "Documentation": "https://vllm.readthedocs.io/en/latest/", }, classifiers=[ - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -512,7 +511,7 @@ def _read_requirements(filename: str) -> List[str]: ], packages=find_packages(exclude=("benchmarks", "csrc", "docs", "examples", "tests*")), - python_requires=">=3.8", + python_requires=">=3.9", install_requires=get_requirements(), ext_modules=ext_modules, extras_require={ diff --git a/tests/compile/piecewise/test_toy_llama.py b/tests/compile/piecewise/test_toy_llama.py index 9c65059c6b348..73fa9e9906936 100644 --- a/tests/compile/piecewise/test_toy_llama.py +++ b/tests/compile/piecewise/test_toy_llama.py @@ -429,8 +429,8 @@ def benchmark(): # print in tabular format print("batch size\teager mode\tfull cudagraph\tpiecewise cudagraph") for b in cudagraph_sizes: - print((f"{b}\t{eager_time[b]:.3f}\t{full_cudagraph_time[b]:.3f}" - f"\t{piecewise_cudagraph_time[b]:.3f}")) + print(f"{b}\t{eager_time[b]:.3f}\t{full_cudagraph_time[b]:.3f}" + f"\t{piecewise_cudagraph_time[b]:.3f}") if __name__ == "__main__": diff --git a/tests/conftest.py b/tests/conftest.py index bdc6ffb148602..f9dfabc82639b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,6 +1,5 @@ import json import os -import sys import tempfile from collections import UserList from enum import Enum @@ -52,7 +51,7 @@ def _read_prompts(filename: str) -> List[str]: - with open(filename, "r") as f: + with open(filename) as f: prompts = f.readlines() return prompts @@ -62,14 +61,8 @@ class _ImageAssetPrompts(TypedDict): cherry_blossom: str -if sys.version_info < (3, 9): - # UserList cannot be subscripted - class _ImageAssetsBase(UserList): - pass -else: - - class _ImageAssetsBase(UserList[ImageAsset]): - pass +class _ImageAssetsBase(UserList[ImageAsset]): + pass class _ImageAssets(_ImageAssetsBase): @@ -94,14 +87,8 @@ class _VideoAssetPrompts(TypedDict): sample_demo_1: str -if sys.version_info < (3, 9): - # UserList cannot be subscripted - class _VideoAssetsBase(UserList): - pass -else: - - class _VideoAssetsBase(UserList[VideoAsset]): - pass +class _VideoAssetsBase(UserList[VideoAsset]): + pass class _VideoAssets(_VideoAssetsBase): @@ -958,7 +945,7 @@ def dummy_opt_path(): "*.msgpack" ]) assert os.path.exists(json_path) - with open(json_path, "r") as f: + with open(json_path) as f: config = json.load(f) config["architectures"] = ["MyOPTForCausalLM"] with open(json_path, "w") as f: @@ -977,7 +964,7 @@ def dummy_llava_path(): "*.msgpack" ]) assert os.path.exists(json_path) - with open(json_path, "r") as f: + with open(json_path) as f: config = json.load(f) config["architectures"] = ["MyLlava"] with open(json_path, "w") as f: @@ -996,7 +983,7 @@ def dummy_gemma2_embedding_path(): "*.msgpack" ]) assert os.path.exists(json_path) - with open(json_path, "r") as f: + with open(json_path) as f: config = json.load(f) config["architectures"] = ["MyGemma2Embedding"] with open(json_path, "w") as f: diff --git a/tests/core/block/test_prefix_caching_block.py b/tests/core/block/test_prefix_caching_block.py index 1a6e17ef7b445..d325b9606843e 100644 --- a/tests/core/block/test_prefix_caching_block.py +++ b/tests/core/block/test_prefix_caching_block.py @@ -99,13 +99,11 @@ def test_blocks_have_correct_hash_in_chain(block_size: int, token_ids = [random.randint(0, 50_000) for _ in range(num_tokens)] - first_chain, second_chain = [ - TestPrefixCachingBlock.create_chain( - block_size=block_size, - token_ids=token_ids, - num_empty_trailing_blocks=num_empty_trailing_blocks) - for _ in range(2) - ] + first_chain, second_chain = (TestPrefixCachingBlock.create_chain( + block_size=block_size, + token_ids=token_ids, + num_empty_trailing_blocks=num_empty_trailing_blocks) + for _ in range(2)) for first_chain_block, second_chain_block in zip( first_chain, second_chain): diff --git a/tests/kernels/test_mamba_ssm.py b/tests/kernels/test_mamba_ssm.py index ad05a97685351..19d1158c79c73 100644 --- a/tests/kernels/test_mamba_ssm.py +++ b/tests/kernels/test_mamba_ssm.py @@ -510,7 +510,7 @@ def test_selective_scan_varlen(with_padding, is_variable_B, is_variable_C, for var in (u_ref, delta_ref, B_ref, C_ref, z_ref) ] for i in range(len(seqlens[0])): - u_s, delta_s, B_s, C_s, z_s = [v[i].unsqueeze(0) for v in splits] + u_s, delta_s, B_s, C_s, z_s = (v[i].unsqueeze(0) for v in splits) if padded_state_indices[i] == PAD_SLOT_ID: continue out_ref_s, _ = selective_scan_ref( diff --git a/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_qwen.py b/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_qwen.py index a01651b171d60..6ae8a6a704b0a 100644 --- a/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_qwen.py +++ b/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_qwen.py @@ -104,7 +104,7 @@ def test_input_mapper_valid_mm_data(input_mapper_for_qwen, # Sad path tests for the multimodal input processor and mapper, respectively @pytest.mark.parametrize("mm_data", [ { - "image": torch.rand((5)) + "image": torch.rand(5) }, { "image": torch.rand((5, 5, 5, 5, 5)) diff --git a/tests/samplers/test_rejection_sampler.py b/tests/samplers/test_rejection_sampler.py index a8deab3718be1..f5497976faf7a 100644 --- a/tests/samplers/test_rejection_sampler.py +++ b/tests/samplers/test_rejection_sampler.py @@ -413,12 +413,10 @@ def __init__(self, vocab_size: int, rejection_sampler: RejectionSampler): def generate_probs_for_test( self, draft_and_target_probs_equal: bool ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - draft_probs, target_probs = [ - F.softmax( - torch.rand(self.vocab_size, dtype=torch.float32), - dim=-1, - ) for _ in range(2) - ] + draft_probs, target_probs = (F.softmax( + torch.rand(self.vocab_size, dtype=torch.float32), + dim=-1, + ) for _ in range(2)) num_reference_probs = 100 reference_probs = F.softmax( diff --git a/tests/test_logger.py b/tests/test_logger.py index fadf66f2b61d4..a937b0812ed0c 100644 --- a/tests/test_logger.py +++ b/tests/test_logger.py @@ -29,7 +29,7 @@ def test_trace_function_call(): cur_dir = os.path.dirname(__file__) enable_trace_function_call(path, cur_dir) f1(1) - with open(path, 'r') as f: + with open(path) as f: content = f.read() assert "f1" in content diff --git a/tests/tokenization/test_detokenize.py b/tests/tokenization/test_detokenize.py index a3e70a40db979..84348cbc0bced 100644 --- a/tests/tokenization/test_detokenize.py +++ b/tests/tokenization/test_detokenize.py @@ -93,10 +93,10 @@ def test_mistral_edge_case(tokenizer, truth): def skip_special_tokens(request, tokenizer_name) -> Generator[bool, Any, None]: if "mistral" in tokenizer_name: yield ( - bool(True) if request.param else + True if request.param else pytest.skip("mistral doesn't support skip_special_tokens=False")) else: - yield bool(True) if request.param else bool(False) + yield bool(request.param) @pytest.mark.parametrize("truth", TRUTH) diff --git a/tools/profiler/print_layerwise_table.py b/tools/profiler/print_layerwise_table.py index bbd24b085e3a7..081076ad7dbdc 100644 --- a/tools/profiler/print_layerwise_table.py +++ b/tools/profiler/print_layerwise_table.py @@ -46,7 +46,7 @@ def get_entries(node, curr_depth=0): args = parser.parse_args() - with open(args.json_trace, "r") as f: + with open(args.json_trace) as f: profile_data = json.load(f) if args.table == "summary": diff --git a/tools/profiler/visualize_layerwise_profile.py b/tools/profiler/visualize_layerwise_profile.py index 65ee3ae108ae1..efd6beee865c2 100644 --- a/tools/profiler/visualize_layerwise_profile.py +++ b/tools/profiler/visualize_layerwise_profile.py @@ -434,7 +434,7 @@ def make_plot_title_suffix(profile_json: dict) -> str: f"{', Sparsity ' + sparsity if sparsity else ''}") profile_json = None - with open(json_trace, "r") as f: + with open(json_trace) as f: profile_json = json.load(f) assert profile_json is not None diff --git a/tools/report_build_time_ninja.py b/tools/report_build_time_ninja.py index 33431a33ac837..51ad2adc74fe1 100644 --- a/tools/report_build_time_ninja.py +++ b/tools/report_build_time_ninja.py @@ -81,7 +81,7 @@ def WeightedDuration(self): # Allow for modest floating-point errors epsilon = 0.000002 if (self.weighted_duration > self.Duration() + epsilon): - print('%s > %s?' % (self.weighted_duration, self.Duration())) + print('{} > {}?'.format(self.weighted_duration, self.Duration())) assert (self.weighted_duration <= self.Duration() + epsilon) return self.weighted_duration @@ -104,7 +104,7 @@ def ReadTargets(log, show_all): The result is a list of Target objects.""" header = log.readline() assert header == '# ninja log v5\n', \ - 'unrecognized ninja log version %r' % header + 'unrecognized ninja log version {!r}'.format(header) targets_dict = {} last_end_seen = 0.0 for line in log: @@ -254,8 +254,8 @@ def SummarizeEntries(entries, extra_step_types): # Warn if the sum of weighted times is off by more than half a second. if abs(length - weighted_total) > 500: print('Warning: Possible corrupt ninja log, results may be ' - 'untrustworthy. Length = %.3f, weighted total = %.3f' % - (length, weighted_total)) + 'untrustworthy. Length = {:.3f}, weighted total = {:.3f}'.format( + length, weighted_total)) entries_by_ext = defaultdict(list) for target in entries: @@ -263,16 +263,17 @@ def SummarizeEntries(entries, extra_step_types): entries_by_ext[extension].append(target) for key, values in entries_by_ext.items(): - print(' Longest build steps for %s:' % key) + print(' Longest build steps for {}:'.format(key)) values.sort(key=lambda x: x.WeightedDuration()) for target in values[-long_count:]: - print(' %8.1f weighted s to build %s (%.1f s elapsed time)' % - (target.WeightedDuration(), target.DescribeTargets(), - target.Duration())) - - print(' %.1f s weighted time (%.1f s elapsed time sum, %1.1fx ' - 'parallelism)' % - (length, total_cpu_time, total_cpu_time * 1.0 / length)) + print( + ' {:8.1f} weighted s to build {} ({:.1f} s elapsed time)'. + format(target.WeightedDuration(), target.DescribeTargets(), + target.Duration())) + + print(' {:.1f} s weighted time ({:.1f} s elapsed time sum, {:1.1f}x ' + 'parallelism)'.format(length, total_cpu_time, + total_cpu_time * 1.0 / length)) print(' %d build steps completed, average of %1.2f/s' % (len(entries), len(entries) / (length))) @@ -298,11 +299,12 @@ def main(): long_ext_count += len(args.step_types.split(';')) try: - with open(log_file, 'r') as log: + with open(log_file) as log: entries = ReadTargets(log, False) SummarizeEntries(entries, args.step_types) - except IOError: - print('Log file %r not found, no build summary created.' % log_file) + except OSError: + print('Log file {!r} not found, no build summary created.'.format( + log_file)) return errno.ENOENT diff --git a/use_existing_torch.py b/use_existing_torch.py index e11746459908b..319d262898fe3 100644 --- a/use_existing_torch.py +++ b/use_existing_torch.py @@ -4,7 +4,7 @@ requires_files += ["pyproject.toml"] for file in requires_files: print(f">>> cleaning {file}") - with open(file, 'r') as f: + with open(file) as f: lines = f.readlines() if "torch" in "".join(lines).lower(): print("removed:") diff --git a/vllm/attention/ops/blocksparse_attention/interface.py b/vllm/attention/ops/blocksparse_attention/interface.py index a98eb431ac7fc..350f88c8f9740 100644 --- a/vllm/attention/ops/blocksparse_attention/interface.py +++ b/vllm/attention/ops/blocksparse_attention/interface.py @@ -192,10 +192,8 @@ def spda(self, q, k, v, cu_seqlens_k, cu_seqlens_q=None, sm_scale=None): attn_mask = self.dense_attn_mask[None, :, :maxlen, :maxlen] q2 = self.transpose_and_pad(q, cu_seqlens, maxlen, 1) - k2, v2 = [ - self.transpose_and_pad(x, cu_seqlens, maxlen, q_k_ratio) - for x in [k, v] - ] + k2, v2 = (self.transpose_and_pad(x, cu_seqlens, maxlen, q_k_ratio) + for x in [k, v]) spda_output = torch.nn.functional.scaled_dot_product_attention( q2, k2, v2, attn_mask=attn_mask, scale=sm_scale) return self.transpose_and_unpad(spda_output, cu_seqlens) diff --git a/vllm/config.py b/vllm/config.py index 814e00c8785f0..851d35dfd9fb0 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -668,9 +668,10 @@ def get_multimodal_config(self) -> "MultiModalConfig": @property def is_encoder_decoder_model(self) -> bool: """Extract the HF encoder/decoder model flag.""" - return getattr(self.hf_config, "is_encoder_decoder", False) or ( - (hasattr(self.hf_config, "text_config") and getattr( - self.hf_config.text_config, "is_encoder_decoder", False))) + return getattr( + self.hf_config, "is_encoder_decoder", + False) or (hasattr(self.hf_config, "text_config") and getattr( + self.hf_config.text_config, "is_encoder_decoder", False)) @property def is_multimodal_model(self) -> bool: diff --git a/vllm/core/evictor.py b/vllm/core/evictor.py index 0b943e6e65f1c..ed7e06cab2996 100644 --- a/vllm/core/evictor.py +++ b/vllm/core/evictor.py @@ -52,7 +52,7 @@ def num_blocks(self) -> int: pass -class BlockMetaData(): +class BlockMetaData: """Data structure for storing key data describe cached block, so that evitor could use to make its decision which one to choose for eviction diff --git a/vllm/distributed/device_communicators/custom_all_reduce_utils.py b/vllm/distributed/device_communicators/custom_all_reduce_utils.py index 983e772a3f79b..1f78e10cc1dcd 100644 --- a/vllm/distributed/device_communicators/custom_all_reduce_utils.py +++ b/vllm/distributed/device_communicators/custom_all_reduce_utils.py @@ -240,7 +240,7 @@ def gpu_p2p_access_check(src: int, tgt: int) -> bool: if is_distributed: get_world_group().barrier() logger.info("reading GPU P2P access cache from %s", path) - with open(path, "r") as f: + with open(path) as f: cache = json.load(f) _gpu_p2p_access_cache = cache return _gpu_p2p_access_cache[f"{src}->{tgt}"] diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index b0fdc67776bbd..161b85646b6e8 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -812,7 +812,7 @@ async def _engine_abort(self, request_ids: Iterable[str]): async def run_engine_loop(engine_ref: ReferenceType): """We use a weakref to the engine so that the running loop doesn't prevent the engine being garbage collected.""" - engine: Optional["AsyncLLMEngine"] = engine_ref() + engine: Optional[AsyncLLMEngine] = engine_ref() if not engine: return diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index a1809b1a9dd26..404e7ed2c6ef9 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -1541,8 +1541,8 @@ def _has_remaining_steps( seq_group.state.remaining_steps != ref_remaining_steps for seq_group in seq_group_metadata_list[1:] ]): - raise AssertionError(("All running sequence groups should " - "have the same remaining steps.")) + raise AssertionError("All running sequence groups should " + "have the same remaining steps.") return ref_remaining_steps > 0 diff --git a/vllm/engine/metrics_types.py b/vllm/engine/metrics_types.py index 25b7a7479672a..19dcbfe57d112 100644 --- a/vllm/engine/metrics_types.py +++ b/vllm/engine/metrics_types.py @@ -77,7 +77,7 @@ def __init__(self, local_interval: float) -> None: self.num_generation_tokens: List[int] = [] self.last_local_log = time.time() self.local_interval = local_interval - self.spec_decode_metrics: Optional["SpecDecodeWorkerMetrics"] = None + self.spec_decode_metrics: Optional[SpecDecodeWorkerMetrics] = None @abstractmethod def log(self, stats: Stats) -> None: diff --git a/vllm/engine/output_processor/multi_step.py b/vllm/engine/output_processor/multi_step.py index 3ed37a269c4b4..223790806ab18 100644 --- a/vllm/engine/output_processor/multi_step.py +++ b/vllm/engine/output_processor/multi_step.py @@ -63,7 +63,7 @@ def process_prompt_logprob(self, seq_group: SequenceGroup, single_step_process_prompt_logprob(self, seq_group, output) @staticmethod - @functools.lru_cache() + @functools.lru_cache def _log_prompt_logprob_unsupported_warning_once(): # Reminder: Please update docs/source/serving/compatibility_matrix.rst # If the feature combo become valid diff --git a/vllm/entrypoints/chat_utils.py b/vllm/entrypoints/chat_utils.py index 2b339ab6d44e4..0ada0aaacda24 100644 --- a/vllm/entrypoints/chat_utils.py +++ b/vllm/entrypoints/chat_utils.py @@ -362,7 +362,7 @@ def load_chat_template( if chat_template is None: return None try: - with open(chat_template, "r") as f: + with open(chat_template) as f: resolved_chat_template = f.read() except OSError as e: if isinstance(chat_template, Path): diff --git a/vllm/entrypoints/openai/run_batch.py b/vllm/entrypoints/openai/run_batch.py index a64467a311523..0d016d949d22b 100644 --- a/vllm/entrypoints/openai/run_batch.py +++ b/vllm/entrypoints/openai/run_batch.py @@ -120,7 +120,7 @@ async def read_file(path_or_url: str) -> str: session.get(path_or_url) as resp: return await resp.text() else: - with open(path_or_url, "r", encoding="utf-8") as f: + with open(path_or_url, encoding="utf-8") as f: return f.read() diff --git a/vllm/executor/ray_gpu_executor.py b/vllm/executor/ray_gpu_executor.py index 9433dce842b09..66bab2c686c67 100644 --- a/vllm/executor/ray_gpu_executor.py +++ b/vllm/executor/ray_gpu_executor.py @@ -32,7 +32,7 @@ class RayGPUExecutor(DistributedGPUExecutor): uses_ray: bool = True def _init_executor(self) -> None: - self.forward_dag: Optional["ray.dag.CompiledDAG"] = None + self.forward_dag: Optional[ray.dag.CompiledDAG] = None # If the env var is set, it uses the Ray's compiled DAG API # which optimizes the control plane overhead. # Run vLLM with VLLM_USE_RAY_COMPILED_DAG=1 to enable it. diff --git a/vllm/logger.py b/vllm/logger.py index ccf09691a052a..d6fcda02a0fb3 100644 --- a/vllm/logger.py +++ b/vllm/logger.py @@ -67,8 +67,7 @@ def _configure_vllm_root_logger() -> None: raise RuntimeError( "Could not load logging config. File does not exist: %s", VLLM_LOGGING_CONFIG_PATH) - with open(VLLM_LOGGING_CONFIG_PATH, encoding="utf-8", - mode="r") as file: + with open(VLLM_LOGGING_CONFIG_PATH, encoding="utf-8") as file: custom_config = json.loads(file.read()) if not isinstance(custom_config, dict): diff --git a/vllm/lora/models.py b/vllm/lora/models.py index d0279f273db7a..81e274612b73b 100644 --- a/vllm/lora/models.py +++ b/vllm/lora/models.py @@ -343,7 +343,7 @@ def __init__( # text modules (e.g. ChatGLM) and hasattr(self.model, "get_mm_mapping")) self.packed_modules: Dict[str, List[str]] = {} - self.modules: Dict[str, "BaseLayerWithLoRA"] = {} + self.modules: Dict[str, BaseLayerWithLoRA] = {} # Dict instead of a Set for compatibility with LRUCache. self._last_mapping: Optional[LoRAMapping] = None self._create_lora_modules() @@ -548,7 +548,7 @@ def create_dummy_lora( else: parts = module_name.split(".") replacements = self.packed_modules_mapping[parts[-1]] - subloras: List[Optional["LoRALayerWeights"]] = [] + subloras: List[Optional[LoRALayerWeights]] = [] for i, r in enumerate(replacements): lora = LoRALayerWeights.create_dummy_lora_weights( module_name + "." + r, diff --git a/vllm/model_executor/custom_op.py b/vllm/model_executor/custom_op.py index 764f4e9c99df8..bfca15c2b6a3e 100644 --- a/vllm/model_executor/custom_op.py +++ b/vllm/model_executor/custom_op.py @@ -103,7 +103,7 @@ def enabled(cls) -> bool: # On by default if VLLM_TORCH_COMPILE_LEVEL < CompilationLevel.PIECEWISE # Specifying 'all' or 'none' in VLLM_CUSTOM_OPS takes precedence. @staticmethod - @lru_cache() + @lru_cache def default_on() -> bool: count_none = envs.VLLM_CUSTOM_OPS.count("none") count_all = envs.VLLM_CUSTOM_OPS.count("all") diff --git a/vllm/model_executor/layers/resampler.py b/vllm/model_executor/layers/resampler.py index bca44d2bf2e28..aae806f6af323 100644 --- a/vllm/model_executor/layers/resampler.py +++ b/vllm/model_executor/layers/resampler.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py # https://huggingface.co/Qwen/Qwen-7B/blob/main/modeling_qwen.py diff --git a/vllm/model_executor/layers/rotary_embedding.py b/vllm/model_executor/layers/rotary_embedding.py index 2158ad3339673..ac60e0e6d48a0 100644 --- a/vllm/model_executor/layers/rotary_embedding.py +++ b/vllm/model_executor/layers/rotary_embedding.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.33.2/src/transformers/models/llama/modeling_llama.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index 1f8d531198324..464915248c9ad 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -746,7 +746,7 @@ def __init__(self, load_config: LoadConfig): config_file_path = self._get_config_file(qlora_adapter) - with open(config_file_path, "r") as f: + with open(config_file_path) as f: config = json.load(f) self.target_modules = config["target_modules"] diff --git a/vllm/model_executor/model_loader/openvino.py b/vllm/model_executor/model_loader/openvino.py index 573f2a04895d9..e6299295c85a2 100644 --- a/vllm/model_executor/model_loader/openvino.py +++ b/vllm/model_executor/model_loader/openvino.py @@ -190,7 +190,7 @@ def get_model( kv_cache_dtype: ov.Type, **kwargs, ) -> torch.nn.Module: - lora_config = kwargs.get("lora_config", None) + lora_config = kwargs.get("lora_config") ov_core = kwargs.get("ov_core") if lora_config: raise ValueError( diff --git a/vllm/model_executor/model_loader/tensorizer.py b/vllm/model_executor/model_loader/tensorizer.py index 36f33d6d139ee..437d2772e1f28 100644 --- a/vllm/model_executor/model_loader/tensorizer.py +++ b/vllm/model_executor/model_loader/tensorizer.py @@ -280,7 +280,7 @@ def __init__(self, tensorizer_config: TensorizerConfig, self.tensorizer_args = ( self.tensorizer_config._construct_tensorizer_args()) self.extra_kwargs = extra_kwargs - if extra_kwargs.get("quant_config", None) is not None: + if extra_kwargs.get("quant_config") is not None: self.quant_config = extra_kwargs["quant_config"] else: self.quant_config = quant_config @@ -380,8 +380,7 @@ def tensorizer_weights_iterator( stream = open_stream(tensorizer_args.tensorizer_uri, **stream_params) with TensorDeserializer(stream, **deserializer_args, device="cpu") as state: - for name, param in state.items(): - yield name, param + yield from state.items() del state diff --git a/vllm/model_executor/model_loader/weight_utils.py b/vllm/model_executor/model_loader/weight_utils.py index 0c51314bc90df..9488d54edf365 100644 --- a/vllm/model_executor/model_loader/weight_utils.py +++ b/vllm/model_executor/model_loader/weight_utils.py @@ -188,7 +188,7 @@ def get_quant_config(model_config: ModelConfig, f"{quant_config_files}") quant_config_file = quant_config_files[0] - with open(quant_config_file, "r") as f: + with open(quant_config_file) as f: config = json.load(f) if model_config.quantization == "bitsandbytes": @@ -306,7 +306,7 @@ def filter_duplicate_safetensors_files(hf_weights_files: List[str], # Iterate through the weight_map (weight_name: safetensors files) # to identify weights that we should use. - with open(index_file_name, "r") as f: + with open(index_file_name) as f: weight_map = json.load(f)["weight_map"] weight_files_in_index = set() for weight_name in weight_map: @@ -382,7 +382,7 @@ def np_cache_weights_iterator( with open(weight_names_file, "w") as f: json.dump(weight_names, f) - with open(weight_names_file, "r") as f: + with open(weight_names_file) as f: weight_names = json.load(f) for name in weight_names: @@ -423,8 +423,7 @@ def pt_weights_iterator( bar_format=_BAR_FORMAT, ): state = torch.load(bin_file, map_location="cpu") - for name, param in state.items(): - yield name, param + yield from state.items() del state torch.cuda.empty_cache() diff --git a/vllm/model_executor/models/arctic.py b/vllm/model_executor/models/arctic.py index fd29d4ccc59d8..5b712ba83c25a 100644 --- a/vllm/model_executor/models/arctic.py +++ b/vllm/model_executor/models/arctic.py @@ -48,7 +48,7 @@ def __init__(self, is_residual_mlp: bool = False, quant_config: Optional[QuantizationConfig] = None, reduce_results: bool = True): - super(ArcticMLP, self).__init__() + super().__init__() self.hidden_size = config.hidden_size self.expert_id = expert_id self.layer_id = layer_id @@ -89,7 +89,7 @@ def __init__(self, params_dtype: Optional[torch.dtype] = None, quant_config: Optional[QuantizationConfig] = None, reduce_results: bool = True): - super(ArcticMoE, self).__init__() + super().__init__() self.tp_size = tp_size or get_tensor_model_parallel_world_size() self.hidden_size = config.hidden_size diff --git a/vllm/model_executor/models/baichuan.py b/vllm/model_executor/models/baichuan.py index f2cfdf8ffd30a..1fbf4135add7a 100644 --- a/vllm/model_executor/models/baichuan.py +++ b/vllm/model_executor/models/baichuan.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX diff --git a/vllm/model_executor/models/bloom.py b/vllm/model_executor/models/bloom.py index 77ab7de6165fb..83ff39a30fbe3 100644 --- a/vllm/model_executor/models/bloom.py +++ b/vllm/model_executor/models/bloom.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/bloom/modeling_bloom.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/chatglm.py b/vllm/model_executor/models/chatglm.py index 181f3c2b0fc35..881b86564e811 100644 --- a/vllm/model_executor/models/chatglm.py +++ b/vllm/model_executor/models/chatglm.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/THUDM/GLM-4 """Inference-only ChatGLM model compatible with THUDM weights.""" diff --git a/vllm/model_executor/models/commandr.py b/vllm/model_executor/models/commandr.py index 348e6d20f3297..835682ca3b379 100644 --- a/vllm/model_executor/models/commandr.py +++ b/vllm/model_executor/models/commandr.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Copyright 2024 Cohere and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX diff --git a/vllm/model_executor/models/dbrx.py b/vllm/model_executor/models/dbrx.py index aae7ab7370b74..3e60eee2d8fe2 100644 --- a/vllm/model_executor/models/dbrx.py +++ b/vllm/model_executor/models/dbrx.py @@ -1,4 +1,3 @@ -# coding=utf-8 from typing import Iterable, List, Optional, Tuple, Union import torch diff --git a/vllm/model_executor/models/decilm.py b/vllm/model_executor/models/decilm.py index 7ed2b96e65c49..8c9653463858b 100644 --- a/vllm/model_executor/models/decilm.py +++ b/vllm/model_executor/models/decilm.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py # Copyright 2023 DeciAI Research Team. All rights reserved. diff --git a/vllm/model_executor/models/deepseek.py b/vllm/model_executor/models/deepseek.py index 5b4db8f258711..d278ea5b6a991 100644 --- a/vllm/model_executor/models/deepseek.py +++ b/vllm/model_executor/models/deepseek.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/deepseek_v2.py b/vllm/model_executor/models/deepseek_v2.py index d4ad0c6b5c99e..834be78bce87b 100644 --- a/vllm/model_executor/models/deepseek_v2.py +++ b/vllm/model_executor/models/deepseek_v2.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/exaone.py b/vllm/model_executor/models/exaone.py index 22f194c776b69..23efe0359cb4a 100644 --- a/vllm/model_executor/models/exaone.py +++ b/vllm/model_executor/models/exaone.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/blob/main/modeling_exaone.py # Copyright 2024 The LG U+ CTO AI Tech Lab. diff --git a/vllm/model_executor/models/falcon.py b/vllm/model_executor/models/falcon.py index c376347811965..ad07fc3b3776e 100644 --- a/vllm/model_executor/models/falcon.py +++ b/vllm/model_executor/models/falcon.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/a5cc30d72ae2dc19af534e4b35c986cc28db1275/src/transformers/models/falcon/modeling_falcon.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/fuyu.py b/vllm/model_executor/models/fuyu.py index 0de590d1d8372..3db82a898159b 100644 --- a/vllm/model_executor/models/fuyu.py +++ b/vllm/model_executor/models/fuyu.py @@ -1,4 +1,3 @@ -# coding=utf-8 # adapted from https://github.com/huggingface/transformers/blob/v4.39.3/src/transformers/models/fuyu/modeling_fuyu.py # Copyright 2023 The vLLM team. # Copyright 2023 HuggingFace Inc. team. All rights reserved. diff --git a/vllm/model_executor/models/gemma.py b/vllm/model_executor/models/gemma.py index 029178af61da0..fc3f5cb20afb0 100644 --- a/vllm/model_executor/models/gemma.py +++ b/vllm/model_executor/models/gemma.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Copyright 2023 The vLLM team. # Copyright (c) Google Inc. # diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index 9238ed839c9de..c365880109ef8 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Copyright 2024 The vLLM team. # Copyright 2024 Google Inc. HuggingFace Inc. team. All rights reserved. # diff --git a/vllm/model_executor/models/glm4_vision_encoder.py b/vllm/model_executor/models/glm4_vision_encoder.py index 3213a8b29a104..025615b0920fd 100644 --- a/vllm/model_executor/models/glm4_vision_encoder.py +++ b/vllm/model_executor/models/glm4_vision_encoder.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/THUDM/GLM-4 """Inference-only GLM-4v model visual encoder compatible with THUDM weights.""" diff --git a/vllm/model_executor/models/gpt2.py b/vllm/model_executor/models/gpt2.py index 3330d84021368..a06200c4b7e08 100644 --- a/vllm/model_executor/models/gpt2.py +++ b/vllm/model_executor/models/gpt2.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/gpt2/modeling_gpt2.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/gpt_bigcode.py b/vllm/model_executor/models/gpt_bigcode.py index 24c79a8855475..7612ea641d95c 100644 --- a/vllm/model_executor/models/gpt_bigcode.py +++ b/vllm/model_executor/models/gpt_bigcode.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/gpt2/modeling_gpt2.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/gpt_j.py b/vllm/model_executor/models/gpt_j.py index 9a42b359ae44f..b28a6081b868f 100644 --- a/vllm/model_executor/models/gpt_j.py +++ b/vllm/model_executor/models/gpt_j.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/gptj/modeling_gptj.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/gpt_neox.py b/vllm/model_executor/models/gpt_neox.py index 1bccef7a5f173..931052c7cccf0 100644 --- a/vllm/model_executor/models/gpt_neox.py +++ b/vllm/model_executor/models/gpt_neox.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/gpt_neox/modeling_gpt_neox.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/granite.py b/vllm/model_executor/models/granite.py index c968817747754..bee48f377e0f5 100644 --- a/vllm/model_executor/models/granite.py +++ b/vllm/model_executor/models/granite.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/granitemoe.py b/vllm/model_executor/models/granitemoe.py index 5307bb21adb96..691a6e77c46c4 100644 --- a/vllm/model_executor/models/granitemoe.py +++ b/vllm/model_executor/models/granitemoe.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/idefics2_vision_model.py b/vllm/model_executor/models/idefics2_vision_model.py index 43f4f29814e6d..53869b8fa6bd8 100644 --- a/vllm/model_executor/models/idefics2_vision_model.py +++ b/vllm/model_executor/models/idefics2_vision_model.py @@ -1,5 +1,3 @@ -# coding=utf-8 - # adapted from https://github.com/huggingface/transformers/blob/v4.43.2/src/transformers/models/idefics2/modeling_idefics2.py # Copyright 2024 The vLLM team. # Copyright 2024 the HuggingFace Inc. team. All rights reserved. diff --git a/vllm/model_executor/models/internlm2.py b/vllm/model_executor/models/internlm2.py index 313d98b649b48..afefb6cd9fa96 100644 --- a/vllm/model_executor/models/internlm2.py +++ b/vllm/model_executor/models/internlm2.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- from functools import partial from typing import Any, Dict, Iterable, List, Optional, Tuple, Union diff --git a/vllm/model_executor/models/internlm2_ve.py b/vllm/model_executor/models/internlm2_ve.py index edd867e4b6457..108fc8382049d 100644 --- a/vllm/model_executor/models/internlm2_ve.py +++ b/vllm/model_executor/models/internlm2_ve.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- from typing import List, Optional, Tuple, Union import torch diff --git a/vllm/model_executor/models/jais.py b/vllm/model_executor/models/jais.py index b947f24a693b5..301893f74cb87 100644 --- a/vllm/model_executor/models/jais.py +++ b/vllm/model_executor/models/jais.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://huggingface.co/inceptionai/jais-30b-chat-v3/blob/main/modeling_jais.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/jamba.py b/vllm/model_executor/models/jamba.py index 6f7949c880e61..81d88a47c1941 100644 --- a/vllm/model_executor/models/jamba.py +++ b/vllm/model_executor/models/jamba.py @@ -1,4 +1,3 @@ -# coding=utf-8 """Inference-only Jamba model.""" from typing import Iterable, List, Optional, Tuple diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index 38a31f420cec9..6c0a8b5ef8451 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/mamba.py b/vllm/model_executor/models/mamba.py index 985ba6f3c60c1..aac4b7aa2661d 100644 --- a/vllm/model_executor/models/mamba.py +++ b/vllm/model_executor/models/mamba.py @@ -1,4 +1,3 @@ -# coding=utf-8 """PyTorch MAMBA model.""" from typing import Iterable, List, Optional, Tuple diff --git a/vllm/model_executor/models/minicpm.py b/vllm/model_executor/models/minicpm.py index 03fb036020f2f..acf03cd8cb8ad 100644 --- a/vllm/model_executor/models/minicpm.py +++ b/vllm/model_executor/models/minicpm.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/minicpm3.py b/vllm/model_executor/models/minicpm3.py index 3b5fd95328d74..eeedf55cf3e57 100644 --- a/vllm/model_executor/models/minicpm3.py +++ b/vllm/model_executor/models/minicpm3.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py # Copyright 2024 The ModelBest team. diff --git a/vllm/model_executor/models/minicpmv.py b/vllm/model_executor/models/minicpmv.py index f90df6b7df036..5acd3f65896c7 100644 --- a/vllm/model_executor/models/minicpmv.py +++ b/vllm/model_executor/models/minicpmv.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/mixtral.py b/vllm/model_executor/models/mixtral.py index 1514243ad59c9..e9b9c4d838faa 100644 --- a/vllm/model_executor/models/mixtral.py +++ b/vllm/model_executor/models/mixtral.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/mixtral_quant.py b/vllm/model_executor/models/mixtral_quant.py index 63e2c60a84271..9647d69be8a0a 100644 --- a/vllm/model_executor/models/mixtral_quant.py +++ b/vllm/model_executor/models/mixtral_quant.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/mllama.py b/vllm/model_executor/models/mllama.py index 251bfc079684e..5fa8d19b97fe8 100644 --- a/vllm/model_executor/models/mllama.py +++ b/vllm/model_executor/models/mllama.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Copyright 2024 the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/vllm/model_executor/models/mlp_speculator.py b/vllm/model_executor/models/mlp_speculator.py index 42ccd01298169..ae218d749fc0b 100644 --- a/vllm/model_executor/models/mlp_speculator.py +++ b/vllm/model_executor/models/mlp_speculator.py @@ -37,7 +37,7 @@ def __init__( eps=1e-06, elementwise_scale_and_shift=True, ): - super(MLPSpeculatorLayerNorm, self).__init__() + super().__init__() self.elementwise_scale_and_shift = elementwise_scale_and_shift if self.elementwise_scale_and_shift: self.weight = nn.Parameter(torch.empty(normalized_shape)) diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index 522aa748f78b6..785b53670542f 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -1121,9 +1121,9 @@ def _merge_multimodal_embeddings( batch_size * num_image * num_patch, -1).contiguous() image_input_idx = image_input_idx * valid.to(image_input_idx.dtype) - offset = torch.cat( - [seq_len.new_zeros( - (1)), seq_len.cumsum(dim=0)[:-1]], dim=0)[:, None] + offset = torch.cat([seq_len.new_zeros(1), + seq_len.cumsum(dim=0)[:-1]], + dim=0)[:, None] image_input_idx = image_input_idx + offset.to(image_input_idx.dtype) image_input_idx = image_input_idx.flatten()[:, None] mat = image_input_idx == torch.arange( diff --git a/vllm/model_executor/models/mpt.py b/vllm/model_executor/models/mpt.py index ee802030a5ef3..fdd8af79b5470 100644 --- a/vllm/model_executor/models/mpt.py +++ b/vllm/model_executor/models/mpt.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from https://huggingface.co/mosaicml/mpt-7b/tree/main import math from typing import Iterable, List, Optional, Tuple, Union diff --git a/vllm/model_executor/models/nemotron.py b/vllm/model_executor/models/nemotron.py index 72a09129fed63..b649064536dc2 100644 --- a/vllm/model_executor/models/nemotron.py +++ b/vllm/model_executor/models/nemotron.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/olmo.py b/vllm/model_executor/models/olmo.py index 90ab8abcb84b4..dd3f58289a227 100644 --- a/vllm/model_executor/models/olmo.py +++ b/vllm/model_executor/models/olmo.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.40.1/src/transformers/models/olmo/modeling_olmo.py # Copyright 2024 The vLLM team. diff --git a/vllm/model_executor/models/opt.py b/vllm/model_executor/models/opt.py index 7521ab749e10f..7a76e4a0906db 100644 --- a/vllm/model_executor/models/opt.py +++ b/vllm/model_executor/models/opt.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/opt/modeling_opt.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/orion.py b/vllm/model_executor/models/orion.py index 055407587c598..a338a93c2dd9a 100644 --- a/vllm/model_executor/models/orion.py +++ b/vllm/model_executor/models/orion.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://huggingface.co/OrionStarAI/Orion-14B-Base/blob/main/modeling_orion.py # Copyright (c) OrionStar Inc. diff --git a/vllm/model_executor/models/persimmon.py b/vllm/model_executor/models/persimmon.py index fc9ef15db26c0..bd4a9f698bacd 100644 --- a/vllm/model_executor/models/persimmon.py +++ b/vllm/model_executor/models/persimmon.py @@ -1,4 +1,3 @@ -# coding=utf-8 # adapted from https://github.com/huggingface/transformers/blob/v4.39.3/src/transformers/models/persimmon/modeling_persimmon.py # Copyright 2023 The vLLM team. # Copyright 2023 EleutherAI and the HuggingFace Inc. team. All rights reserved. diff --git a/vllm/model_executor/models/phi.py b/vllm/model_executor/models/phi.py index 4e7935a7636c5..492122450b237 100644 --- a/vllm/model_executor/models/phi.py +++ b/vllm/model_executor/models/phi.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://huggingface.co/microsoft/phi-1_5/blob/main/modeling_phi.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/phi3.py b/vllm/model_executor/models/phi3.py index 02b2ff01c3832..34141511ea791 100644 --- a/vllm/model_executor/models/phi3.py +++ b/vllm/model_executor/models/phi3.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from llama.py """Inference-only Phi3 model code inherit from Llama.py""" diff --git a/vllm/model_executor/models/phi3v.py b/vllm/model_executor/models/phi3v.py index 5b477a8ed5f49..1c41891ced416 100644 --- a/vllm/model_executor/models/phi3v.py +++ b/vllm/model_executor/models/phi3v.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Copyright 2024 The vLLM team. # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved. # diff --git a/vllm/model_executor/models/phimoe.py b/vllm/model_executor/models/phimoe.py index bb8a9327b4ac8..59843ae3dfd59 100644 --- a/vllm/model_executor/models/phimoe.py +++ b/vllm/model_executor/models/phimoe.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index ee9f150b17cfc..6e9092432467a 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -136,11 +136,11 @@ def input_processor_for_pixtral(ctx: InputContext, inputs: DecoderOnlyInputs): if image_token_id not in inputs['prompt_token_ids']: raise ValueError( - (f"You've passed {inputs=} without {image_token_id=}" - " Make sure to process your input via mistral_common's" - " tokenizer or pass a chat completion request. For more" - " For more info, see: " - "https://github.com/vllm-project/vllm/issues/8411.")) + f"You've passed {inputs=} without {image_token_id=}" + " Make sure to process your input via mistral_common's" + " tokenizer or pass a chat completion request. For more" + " For more info, see: " + "https://github.com/vllm-project/vllm/issues/8411.") return inputs diff --git a/vllm/model_executor/models/qwen.py b/vllm/model_executor/models/qwen.py index b2b5c70182135..3a0e33e8a3eff 100644 --- a/vllm/model_executor/models/qwen.py +++ b/vllm/model_executor/models/qwen.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://huggingface.co/Qwen/Qwen-7B/blob/main/modeling_qwen.py # Copyright (c) Alibaba Cloud. diff --git a/vllm/model_executor/models/qwen2.py b/vllm/model_executor/models/qwen2.py index 72b286fe6f6d6..49b3de1304cca 100644 --- a/vllm/model_executor/models/qwen2.py +++ b/vllm/model_executor/models/qwen2.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/qwen2/modeling_qwen2.py # Copyright 2024 The Qwen team. @@ -417,9 +416,9 @@ def __init__( and hasattr(config, "max_window_layers")): raise ValueError("Sliding window for some but all layers is not " "supported. This model uses sliding window " - "but `max_window_layers` = %s is less than " - "`num_hidden_layers` = %s. Please open an issue " - "to discuss this feature." % ( + "but `max_window_layers` = {} is less than " + "`num_hidden_layers` = {}. Please open an issue " + "to discuss this feature.".format( config.max_window_layers, config.num_hidden_layers, )) diff --git a/vllm/model_executor/models/qwen2_audio.py b/vllm/model_executor/models/qwen2_audio.py index 6114548bda42c..556c09400ee83 100644 --- a/vllm/model_executor/models/qwen2_audio.py +++ b/vllm/model_executor/models/qwen2_audio.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Copyright 2024 The Qwen team. # Copyright 2023 The vLLM team. # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. diff --git a/vllm/model_executor/models/qwen2_cls.py b/vllm/model_executor/models/qwen2_cls.py index 2d6f3e90f761c..b9e3b74c477e2 100644 --- a/vllm/model_executor/models/qwen2_cls.py +++ b/vllm/model_executor/models/qwen2_cls.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://huggingface.co/Qwen/Qwen2.5-Math-RM-72B/blob/main/modeling_qwen2_rm.py # Copyright 2024 Kakao Corp. (Kanana-X Team) @@ -60,9 +59,9 @@ def __init__( and hasattr(config, "max_window_layers")): raise ValueError("Sliding window for some but all layers is not " "supported. This model uses sliding window " - "but `max_window_layers` = %s is less than " - "`num_hidden_layers` = %s. Please open an issue " - "to discuss this feature." % ( + "but `max_window_layers` = {} is less than " + "`num_hidden_layers` = {}. Please open an issue " + "to discuss this feature.".format( config.max_window_layers, config.num_hidden_layers, )) diff --git a/vllm/model_executor/models/qwen2_moe.py b/vllm/model_executor/models/qwen2_moe.py index dac85e35d369d..98bb48a274e49 100644 --- a/vllm/model_executor/models/qwen2_moe.py +++ b/vllm/model_executor/models/qwen2_moe.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py # Copyright 2024 The Qwen team. diff --git a/vllm/model_executor/models/qwen2_rm.py b/vllm/model_executor/models/qwen2_rm.py index 901b1daaa14a4..0fbf305da8b94 100644 --- a/vllm/model_executor/models/qwen2_rm.py +++ b/vllm/model_executor/models/qwen2_rm.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://huggingface.co/Qwen/Qwen2.5-Math-RM-72B/blob/main/modeling_qwen2_rm.py # Copyright 2024 The Qwen team. @@ -71,9 +70,9 @@ def __init__( and hasattr(config, "max_window_layers")): raise ValueError("Sliding window for some but all layers is not " "supported. This model uses sliding window " - "but `max_window_layers` = %s is less than " - "`num_hidden_layers` = %s. Please open an issue " - "to discuss this feature." % ( + "but `max_window_layers` = {} is less than " + "`num_hidden_layers` = {}. Please open an issue " + "to discuss this feature.".format( config.max_window_layers, config.num_hidden_layers, )) diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index d801903f8f9fe..e30b84e8dd44c 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/19e6e80e10118f855137b90740936c0b11ac397f/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py # Copyright 2024 The Qwen team. @@ -246,9 +245,8 @@ def forward( q, k, v = dist_utils.split_tensor_along_last_dim(x, 3) batch_size = q.shape[1] - q, k, v = [ - rearrange(x, "s b ... -> b s ...").contiguous() for x in (q, k, v) - ] + q, k, v = (rearrange(x, "s b ... -> b s ...").contiguous() + for x in (q, k, v)) if rotary_pos_emb is not None: q = apply_rotary_pos_emb_vision(q, rotary_pos_emb) k = apply_rotary_pos_emb_vision(k, rotary_pos_emb) @@ -258,7 +256,7 @@ def forward( # flash_attn_varlen_func) from flash_attn import flash_attn_varlen_func - q, k, v = [rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v]] + q, k, v = (rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v]) max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item() output = flash_attn_varlen_func(q, @@ -276,7 +274,7 @@ def forward( b=batch_size) elif self.attn_backend == _Backend.TORCH_SDPA: seq_length = q.size(1) - q, k, v = [rearrange(x, "b s h d -> b h s d") for x in [q, k, v]] + q, k, v = (rearrange(x, "b s h d -> b h s d") for x in [q, k, v]) attention_mask = torch.zeros([1, seq_length, seq_length], device=q.device, dtype=torch.bool) diff --git a/vllm/model_executor/models/solar.py b/vllm/model_executor/models/solar.py index e3e7ccb5cf179..1b233ac7427dd 100644 --- a/vllm/model_executor/models/solar.py +++ b/vllm/model_executor/models/solar.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py # Copyright 2023 The vLLM team. diff --git a/vllm/model_executor/models/stablelm.py b/vllm/model_executor/models/stablelm.py index 083a48588d01a..34389b645a7c1 100644 --- a/vllm/model_executor/models/stablelm.py +++ b/vllm/model_executor/models/stablelm.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Copyright 2023 Stability AI, EleutherAI, and The HuggingFace Inc. team. # All rights reserved. # diff --git a/vllm/model_executor/models/starcoder2.py b/vllm/model_executor/models/starcoder2.py index 8f0644bca3e2e..b24c5dadb2b2b 100644 --- a/vllm/model_executor/models/starcoder2.py +++ b/vllm/model_executor/models/starcoder2.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Copyright 2024 BigCode and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX diff --git a/vllm/model_executor/models/xverse.py b/vllm/model_executor/models/xverse.py index 036789642d3c4..e559988ada753 100644 --- a/vllm/model_executor/models/xverse.py +++ b/vllm/model_executor/models/xverse.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://huggingface.co/xverse/XVERSE-7B/blob/main/modeling_xverse.py # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. diff --git a/vllm/multimodal/base.py b/vllm/multimodal/base.py index 6b10d0c609f13..5ff6f93fb25b4 100644 --- a/vllm/multimodal/base.py +++ b/vllm/multimodal/base.py @@ -1,4 +1,3 @@ -import sys from abc import ABC, abstractmethod from collections import UserDict, defaultdict from typing import (TYPE_CHECKING, Any, Callable, Dict, List, Mapping, @@ -34,14 +33,9 @@ :meth:`MultiModalInputs.batch`. """ -if sys.version_info < (3, 9): - # UserDict cannot be subscripted - class _MultiModalInputsBase(UserDict): - pass -else: - class _MultiModalInputsBase(UserDict[str, NestedTensors]): - pass +class _MultiModalInputsBase(UserDict[str, NestedTensors]): + pass class MultiModalInputs(_MultiModalInputsBase): @@ -262,18 +256,23 @@ def wrapper(model_cls: N) -> N: logger.warning( "Model class %s already has an input mapper " "registered to %s. It is overwritten by the new one.", - model_cls, self) + model_cls, + self, + ) - self._input_mappers[model_cls] = mapper \ - or self._default_input_mapper + self._input_mappers[model_cls] = (mapper + or self._default_input_mapper) return model_cls return wrapper - def map_input(self, model_config: "ModelConfig", - data: MultiModalData[object], - mm_processor_kwargs: Dict[str, Any]) -> MultiModalInputs: + def map_input( + self, + model_config: "ModelConfig", + data: MultiModalData[object], + mm_processor_kwargs: Dict[str, Any], + ) -> MultiModalInputs: """ Transform the data into a dictionary of model inputs using the input mapper registered for that model. @@ -348,13 +347,15 @@ def wrapper(model_cls: N) -> N: logger.warning( "Model class %s already calculates maximum number of " "tokens in %s. It is overwritten by the new one.", - model_cls, self) + model_cls, + self, + ) if isinstance(max_mm_tokens, int): self._validate_max_multimodal_tokens(max_mm_tokens) - self._max_mm_tokens[model_cls] = max_mm_tokens \ - or self._default_max_multimodal_tokens + self._max_mm_tokens[model_cls] = ( + max_mm_tokens or self._default_max_multimodal_tokens) return model_cls @@ -482,8 +483,10 @@ def from_seq_group( placeholder_maps: Dict[str, MultiModalPlaceholderMap] = defaultdict( MultiModalPlaceholderMap) - for modality, placeholders in seq_group.multi_modal_placeholders.items( - ): + for ( + modality, + placeholders, + ) in seq_group.multi_modal_placeholders.items(): mm_items = mm_data.pop(modality) if not isinstance(mm_items, list): mm_items = [mm_items] @@ -499,8 +502,11 @@ def from_seq_group( return mm_data, placeholder_maps def append_items_from_seq_group( - self, positions: range, multi_modal_items: List[_T], - multi_modal_placeholders: List[PlaceholderRange]) -> List[_T]: + self, + positions: range, + multi_modal_items: List[_T], + multi_modal_placeholders: List[PlaceholderRange], + ) -> List[_T]: """ Adds the multi-modal items that intersect ```positions`` to this placeholder map and returns the intersecting items. @@ -515,20 +521,26 @@ def append_items_from_seq_group( multi_modal_items): placeholder = range( placeholder_dict["offset"], - placeholder_dict["offset"] + placeholder_dict["length"]) - intersection = range(max(positions.start, placeholder.start), - min(positions.stop, placeholder.stop)) + placeholder_dict["offset"] + placeholder_dict["length"], + ) + intersection = range( + max(positions.start, placeholder.start), + min(positions.stop, placeholder.stop), + ) if not intersection: # Skip this multi-modal item. continue - token_embedding_range = range(intersection.start - positions.start, - intersection.stop - positions.start) + token_embedding_range = range( + intersection.start - positions.start, + intersection.stop - positions.start, + ) multimodal_embedding_range = range( intersection.start - placeholder.start + self.src_len, - intersection.stop - placeholder.start + self.src_len) + intersection.stop - placeholder.start + self.src_len, + ) intersecting_items.append(mm_item) self.dest_ranges.append(token_embedding_range) diff --git a/vllm/prompt_adapter/utils.py b/vllm/prompt_adapter/utils.py index 4cde2a0254b90..473b87c89c21d 100644 --- a/vllm/prompt_adapter/utils.py +++ b/vllm/prompt_adapter/utils.py @@ -37,9 +37,8 @@ def load_peft_weights(model_id: str, Additional arguments to pass to the `hf_hub_download` method when loading from the HuggingFace Hub. """ - path = (os.path.join(model_id, hf_hub_download_kwargs["subfolder"]) - if hf_hub_download_kwargs.get("subfolder", None) is not None else - model_id) + path = (os.path.join(model_id, hf_hub_download_kwargs["subfolder"]) if + hf_hub_download_kwargs.get("subfolder") is not None else model_id) if device is None: device = infer_device() @@ -51,19 +50,19 @@ def load_peft_weights(model_id: str, filename = os.path.join(path, WEIGHTS_NAME) use_safetensors = False else: - token = hf_hub_download_kwargs.get("token", None) + token = hf_hub_download_kwargs.get("token") if token is None: - token = hf_hub_download_kwargs.get("use_auth_token", None) + token = hf_hub_download_kwargs.get("use_auth_token") hub_filename = (os.path.join(hf_hub_download_kwargs["subfolder"], SAFETENSORS_WEIGHTS_NAME) - if hf_hub_download_kwargs.get("subfolder", None) - is not None else SAFETENSORS_WEIGHTS_NAME) + if hf_hub_download_kwargs.get("subfolder") is not None + else SAFETENSORS_WEIGHTS_NAME) has_remote_safetensors_file = file_exists( repo_id=model_id, filename=hub_filename, - revision=hf_hub_download_kwargs.get("revision", None), - repo_type=hf_hub_download_kwargs.get("repo_type", None), + revision=hf_hub_download_kwargs.get("revision"), + repo_type=hf_hub_download_kwargs.get("repo_type"), token=token, ) use_safetensors = has_remote_safetensors_file diff --git a/vllm/transformers_utils/config.py b/vllm/transformers_utils/config.py index 08697274854e0..1a5870aa4f84c 100644 --- a/vllm/transformers_utils/config.py +++ b/vllm/transformers_utils/config.py @@ -308,7 +308,7 @@ def load_params_config(model, revision) -> PretrainedConfig: config_path = Path( hf_hub_download(model, config_file_name, revision=revision)) - with open(config_path, "r") as file: + with open(config_path) as file: config_dict = json.load(file) config_mapping = { diff --git a/vllm/transformers_utils/configs/chatglm.py b/vllm/transformers_utils/configs/chatglm.py index 49d2b8d8e21b1..e563bf6268d72 100644 --- a/vllm/transformers_utils/configs/chatglm.py +++ b/vllm/transformers_utils/configs/chatglm.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Adapted from # https://github.com/THUDM/ChatGLM2-6B from transformers import PretrainedConfig diff --git a/vllm/transformers_utils/configs/exaone.py b/vllm/transformers_utils/configs/exaone.py index 805b8ad930039..f60a59f554133 100644 --- a/vllm/transformers_utils/configs/exaone.py +++ b/vllm/transformers_utils/configs/exaone.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Copied from # https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/blob/main/configuration_exaone.py # Copyright 2021 The LG AI Research EXAONE Lab. All rights reserved. diff --git a/vllm/transformers_utils/configs/jais.py b/vllm/transformers_utils/configs/jais.py index b06a946f34a47..82f129eb2018e 100644 --- a/vllm/transformers_utils/configs/jais.py +++ b/vllm/transformers_utils/configs/jais.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Copyright 2023 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # Copyright 2023 Cerebras Systems. diff --git a/vllm/transformers_utils/configs/mpt.py b/vllm/transformers_utils/configs/mpt.py index 497db0ae48c96..0f047c8b0361c 100644 --- a/vllm/transformers_utils/configs/mpt.py +++ b/vllm/transformers_utils/configs/mpt.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Copied from # https://huggingface.co/mosaicml/mpt-7b/blob/main/configuration_mpt.py """A HuggingFace-style model configuration.""" @@ -117,10 +116,10 @@ def _validate_config(self) -> None: init_config_defaults) if self.d_model % self.n_heads != 0: raise ValueError('d_model must be divisible by n_heads') - if any(( + if any( prob < 0 or prob > 1 for prob in - [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop] - )): + [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop + ]): raise ValueError( "self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are " "probabilities and must be between 0 and 1") diff --git a/vllm/transformers_utils/configs/nemotron.py b/vllm/transformers_utils/configs/nemotron.py index 139e6b3cdacbe..93fec667d1cf3 100644 --- a/vllm/transformers_utils/configs/nemotron.py +++ b/vllm/transformers_utils/configs/nemotron.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Copyright 2024 HuggingFace Inc. team. All rights reserved. # Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. # @@ -144,7 +143,7 @@ def __init__( self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads - head_dim = head_dim or kwargs.get("kv_channels", None) + head_dim = head_dim or kwargs.get("kv_channels") self.head_dim = head_dim if head_dim is not None else ( hidden_size // num_attention_heads) @@ -160,8 +159,8 @@ def __init__( self.rope_theta = rope_theta self.rope_scaling = rope_scaling # for backward compatibility - partial_rotary_factor = kwargs.get("rope_percent", None) or kwargs.get( - "rope_percentage", None) or partial_rotary_factor + partial_rotary_factor = kwargs.get("rope_percent") or kwargs.get( + "rope_percentage") or partial_rotary_factor self.partial_rotary_factor = partial_rotary_factor self._rope_scaling_validation() self.attention_bias = attention_bias diff --git a/vllm/transformers_utils/configs/solar.py b/vllm/transformers_utils/configs/solar.py index d5113bf01695a..0c1c048f670ee 100644 --- a/vllm/transformers_utils/configs/solar.py +++ b/vllm/transformers_utils/configs/solar.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX diff --git a/vllm/utils.py b/vllm/utils.py index 0b75e8761c916..6edc8d72f6bcf 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -1153,7 +1153,7 @@ class SortedHelpFormatter(argparse.HelpFormatter): def add_arguments(self, actions): actions = sorted(actions, key=lambda x: x.option_strings) - super(SortedHelpFormatter, self).add_arguments(actions) + super().add_arguments(actions) class FlexibleArgumentParser(argparse.ArgumentParser): @@ -1279,7 +1279,7 @@ def _load_config_file(self, file_path: str) -> List[str]: config: Dict[str, Union[int, str]] = {} try: - with open(file_path, 'r') as config_file: + with open(file_path) as config_file: config = yaml.safe_load(config_file) except Exception as ex: logger.error( From a5fda50a10641e47c0c290907f30ef2add6d4e7a Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Wed, 6 Nov 2024 16:50:37 +0800 Subject: [PATCH 16/22] [CI/Build] Fix large_gpu_mark reason (#10070) Signed-off-by: Isotr0py <2037008807@qq.com> --- tests/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/utils.py b/tests/utils.py index 16e21f68c7c96..00c7dabe16a7b 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -699,7 +699,7 @@ def large_gpu_mark(min_gb: int) -> pytest.MarkDecorator: return pytest.mark.skipif( memory_gb < min_gb, - reason=f"Need at least {memory_gb}GB GPU memory to run the test.", + reason=f"Need at least {min_gb}GB GPU memory to run the test.", ) From a02a50e6e5bb74f9d48f75942e47197d22ec6444 Mon Sep 17 00:00:00 2001 From: Konrad Zawora Date: Wed, 6 Nov 2024 10:09:10 +0100 Subject: [PATCH 17/22] [Hardware][Intel-Gaudi] Add Intel Gaudi (HPU) inference backend (#6143) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yuwenzho Signed-off-by: Chendi.Xue Signed-off-by: Bob Zhu Signed-off-by: zehao-intel Signed-off-by: Konrad Zawora Co-authored-by: Kunshang Ji Co-authored-by: Sanju C Sudhakaran Co-authored-by: Michal Adamczyk Co-authored-by: Marceli Fylcek Co-authored-by: Himangshu Lahkar <49579433+hlahkar@users.noreply.github.com> Co-authored-by: Vivek Goel Co-authored-by: yuwenzho Co-authored-by: Dominika Olszewska Co-authored-by: barak goldberg <149692267+bgoldberg-habana@users.noreply.github.com> Co-authored-by: Michal Szutenberg <37601244+szutenberg@users.noreply.github.com> Co-authored-by: Jan Kaniecki Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyniewicz-habana@users.noreply.github.com> Co-authored-by: Krzysztof Wisniewski Co-authored-by: Dudi Lester <160421192+dudilester@users.noreply.github.com> Co-authored-by: Ilia Taraban Co-authored-by: Chendi.Xue Co-authored-by: Michał Kuligowski Co-authored-by: Jakub Maksymczuk Co-authored-by: Tomasz Zielinski <85164140+tzielinski-habana@users.noreply.github.com> Co-authored-by: Sun Choi Co-authored-by: Iryna Boiko Co-authored-by: Bob Zhu <41610754+czhu15@users.noreply.github.com> Co-authored-by: hlin99 <73271530+hlin99@users.noreply.github.com> Co-authored-by: Zehao Huang Co-authored-by: Andrzej Kotłowski Co-authored-by: Yan Tomsinsky <73292515+Yantom1@users.noreply.github.com> Co-authored-by: Nir David Co-authored-by: Yu-Zhou Co-authored-by: Ruheena Suhani Shaik Co-authored-by: Karol Damaszke Co-authored-by: Marcin Swiniarski Co-authored-by: Woosuk Kwon Co-authored-by: Jacek Czaja Co-authored-by: Jacek Czaja Co-authored-by: Yuan --- Dockerfile.hpu | 16 + .../getting_started/gaudi-installation.rst | 402 ++++ docs/source/index.rst | 3 +- requirements-hpu.txt | 11 + setup.py | 47 +- vllm/_custom_ops.py | 2 +- vllm/attention/backends/hpu_attn.py | 264 +++ vllm/attention/ops/hpu_paged_attn.py | 103 + vllm/attention/selector.py | 8 + vllm/config.py | 22 +- vllm/core/block/cpu_gpu_block_allocator.py | 7 +- .../device_communicators/hpu_communicator.py | 48 + vllm/distributed/parallel_state.py | 19 + vllm/engine/arg_utils.py | 11 +- vllm/engine/async_llm_engine.py | 8 + vllm/engine/llm_engine.py | 8 + vllm/executor/hpu_executor.py | 205 ++ vllm/executor/ray_hpu_executor.py | 554 +++++ vllm/executor/ray_utils.py | 6 +- vllm/model_executor/custom_op.py | 5 +- vllm/model_executor/layers/layernorm.py | 19 + .../model_executor/layers/logits_processor.py | 10 +- .../model_executor/layers/rotary_embedding.py | 55 + .../layers/vocab_parallel_embedding.py | 17 +- vllm/model_executor/sampling_metadata.py | 3 +- vllm/platforms/__init__.py | 10 + vllm/platforms/hpu.py | 11 + vllm/platforms/interface.py | 4 + vllm/utils.py | 3 + vllm/worker/hpu_model_runner.py | 2008 +++++++++++++++++ vllm/worker/hpu_worker.py | 410 ++++ 31 files changed, 4279 insertions(+), 20 deletions(-) create mode 100644 Dockerfile.hpu create mode 100644 docs/source/getting_started/gaudi-installation.rst create mode 100644 requirements-hpu.txt create mode 100644 vllm/attention/backends/hpu_attn.py create mode 100644 vllm/attention/ops/hpu_paged_attn.py create mode 100644 vllm/distributed/device_communicators/hpu_communicator.py create mode 100644 vllm/executor/hpu_executor.py create mode 100644 vllm/executor/ray_hpu_executor.py create mode 100644 vllm/platforms/hpu.py create mode 100644 vllm/worker/hpu_model_runner.py create mode 100644 vllm/worker/hpu_worker.py diff --git a/Dockerfile.hpu b/Dockerfile.hpu new file mode 100644 index 0000000000000..f481c8c6a57bf --- /dev/null +++ b/Dockerfile.hpu @@ -0,0 +1,16 @@ +FROM vault.habana.ai/gaudi-docker/1.18.0/ubuntu22.04/habanalabs/pytorch-installer-2.4.0:latest + +COPY ./ /workspace/vllm + +WORKDIR /workspace/vllm + +RUN pip install -v -r requirements-hpu.txt + +ENV no_proxy=localhost,127.0.0.1 +ENV PT_HPU_ENABLE_LAZY_COLLECTIVES=true + +RUN VLLM_TARGET_DEVICE=hpu python3 setup.py install + +WORKDIR /workspace/ + +ENTRYPOINT ["python3", "-m", "vllm.entrypoints.openai.api_server"] diff --git a/docs/source/getting_started/gaudi-installation.rst b/docs/source/getting_started/gaudi-installation.rst new file mode 100644 index 0000000000000..68c1a56660fa4 --- /dev/null +++ b/docs/source/getting_started/gaudi-installation.rst @@ -0,0 +1,402 @@ +Installation with Intel® Gaudi® AI Accelerators +=============================================== + +This README provides instructions on running vLLM with Intel Gaudi devices. + +Requirements and Installation +============================= + +Please follow the instructions provided in the `Gaudi Installation +Guide `__ +to set up the execution environment. To achieve the best performance, +please follow the methods outlined in the `Optimizing Training Platform +Guide `__. + +Requirements +------------ + +- OS: Ubuntu 22.04 LTS +- Python: 3.10 +- Intel Gaudi accelerator +- Intel Gaudi software version 1.18.0 + + +Quick start using Dockerfile +---------------------------- +.. code:: console + + $ docker build -f Dockerfile.hpu -t vllm-hpu-env . + $ docker run -it --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --net=host --rm vllm-hpu-env + + +.. tip:: + If you're observing the following error: ``docker: Error response from daemon: Unknown runtime specified habana.``, please refer to "Install Using Containers" section of `Intel Gaudi Software Stack and Driver Installation `__. Make sure you have ``habana-container-runtime`` package installed and that ``habana`` container runtime is registered. + + +Build from source +----------------- + +Environment verification +~~~~~~~~~~~~~~~~~~~~~~~~ + +To verify that the Intel Gaudi software was correctly installed, run: + +.. code:: console + + $ hl-smi # verify that hl-smi is in your PATH and each Gaudi accelerator is visible + $ apt list --installed | grep habana # verify that habanalabs-firmware-tools, habanalabs-graph, habanalabs-rdma-core, habanalabs-thunk and habanalabs-container-runtime are installed + $ pip list | grep habana # verify that habana-torch-plugin, habana-torch-dataloader, habana-pyhlml and habana-media-loader are installed + $ pip list | grep neural # verify that neural_compressor is installed + +Refer to `Intel Gaudi Software Stack +Verification `__ +for more details. + +Run Docker Image +~~~~~~~~~~~~~~~~ + +It is highly recommended to use the latest Docker image from Intel Gaudi +vault. Refer to the `Intel Gaudi +documentation `__ +for more details. + +Use the following commands to run a Docker image: + +.. code:: console + + $ docker pull vault.habana.ai/gaudi-docker/1.18.0/ubuntu22.04/habanalabs/pytorch-installer-2.4.0:latest + $ docker run -it --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --net=host --ipc=host vault.habana.ai/gaudi-docker/1.18.0/ubuntu22.04/habanalabs/pytorch-installer-2.4.0:latest + +Build and Install vLLM +~~~~~~~~~~~~~~~~~~~~~~ + +To build and install vLLM from source, run: + +.. code:: console + + $ git clone https://github.com/vllm-project/vllm.git + $ cd vllm + $ python setup.py develop + + +Currently, the latest features and performance optimizations are developed in Gaudi's `vLLM-fork `__ and we periodically upstream them to vLLM main repo. To install latest `HabanaAI/vLLM-fork `__, run the following: + +.. code:: console + + $ git clone https://github.com/HabanaAI/vllm-fork.git + $ cd vllm-fork + $ git checkout habana_main + $ python setup.py develop + + +Supported Features +================== + +- `Offline batched + inference `__ +- Online inference via `OpenAI-Compatible + Server `__ +- HPU autodetection - no need to manually select device within vLLM +- Paged KV cache with algorithms enabled for Intel Gaudi accelerators +- Custom Intel Gaudi implementations of Paged Attention, KV cache ops, + prefill attention, Root Mean Square Layer Normalization, Rotary + Positional Encoding +- Tensor parallelism support for multi-card inference +- Inference with `HPU Graphs `__ + for accelerating low-batch latency and throughput +- Attention with Linear Biases (ALiBi) + +Unsupported Features +==================== + +- Beam search +- LoRA adapters +- Quantization +- Prefill chunking (mixed-batch inferencing) + +Supported Configurations +======================== + +The following configurations have been validated to be function with +Gaudi2 devices. Configurations that are not listed may or may not work. + +- `meta-llama/Llama-2-7b `__ + on single HPU, or with tensor parallelism on 2x and 8x HPU, BF16 + datatype with random or greedy sampling +- `meta-llama/Llama-2-7b-chat-hf `__ + on single HPU, or with tensor parallelism on 2x and 8x HPU, BF16 + datatype with random or greedy sampling +- `meta-llama/Meta-Llama-3-8B `__ + on single HPU, or with tensor parallelism on 2x and 8x HPU, BF16 + datatype with random or greedy sampling +- `meta-llama/Meta-Llama-3-8B-Instruct `__ + on single HPU, or with tensor parallelism on 2x and 8x HPU, BF16 + datatype with random or greedy sampling +- `meta-llama/Meta-Llama-3.1-8B `__ + on single HPU, or with tensor parallelism on 2x and 8x HPU, BF16 + datatype with random or greedy sampling +- `meta-llama/Meta-Llama-3.1-8B-Instruct `__ + on single HPU, or with tensor parallelism on 2x and 8x HPU, BF16 + datatype with random or greedy sampling +- `meta-llama/Llama-2-70b `__ + with tensor parallelism on 8x HPU, BF16 datatype with random or greedy sampling +- `meta-llama/Llama-2-70b-chat-hf `__ + with tensor parallelism on 8x HPU, BF16 datatype with random or greedy sampling +- `meta-llama/Meta-Llama-3-70B `__ + with tensor parallelism on 8x HPU, BF16 datatype with random or greedy sampling +- `meta-llama/Meta-Llama-3-70B-Instruct `__ + with tensor parallelism on 8x HPU, BF16 datatype with random or greedy sampling +- `meta-llama/Meta-Llama-3.1-70B `__ + with tensor parallelism on 8x HPU, BF16 datatype with random or greedy sampling +- `meta-llama/Meta-Llama-3.1-70B-Instruct `__ + with tensor parallelism on 8x HPU, BF16 datatype with random or greedy sampling + +Performance Tuning +================== + +Execution modes +--------------- + +Currently in vLLM for HPU we support four execution modes, depending on selected HPU PyTorch Bridge backend (via ``PT_HPU_LAZY_MODE`` environment variable), and ``--enforce-eager`` flag. + +.. list-table:: vLLM execution modes + :widths: 25 25 50 + :header-rows: 1 + + * - ``PT_HPU_LAZY_MODE`` + - ``enforce_eager`` + - execution mode + * - 0 + - 0 + - torch.compile + * - 0 + - 1 + - PyTorch eager mode + * - 1 + - 0 + - HPU Graphs + * - 1 + - 1 + - PyTorch lazy mode + +.. warning:: + In 1.18.0, all modes utilizing ``PT_HPU_LAZY_MODE=0`` are highly experimental and should be only used for validating functional correctness. Their performance will be improved in the next releases. For obtaining the best performance in 1.18.0, please use HPU Graphs, or PyTorch lazy mode. + + +Bucketing mechanism +------------------- + +Intel Gaudi accelerators work best when operating on models with fixed tensor shapes. `Intel Gaudi Graph Compiler `__ is responsible for generating optimized binary code that implements the given model topology on Gaudi. In its default configuration, the produced binary code may be heavily dependent on input and output tensor shapes, and can require graph recompilation when encountering differently shaped tensors within the same topology. While the resulting binaries utilize Gaudi efficiently, the compilation itself may introduce a noticeable overhead in end-to-end execution. +In a dynamic inference serving scenario, there is a need to minimize the number of graph compilations and reduce the risk of graph compilation occurring during server runtime. Currently it is achieved by "bucketing" model's forward pass across two dimensions - ``batch_size`` and ``sequence_length``. + +.. note:: + Bucketing allows us to reduce the number of required graphs significantly, but it does not handle any graph compilation and device code generation - this is done in warmup and HPUGraph capture phase. + +Bucketing ranges are determined with 3 parameters - ``min``, ``step`` and ``max``. They can be set separately for prompt and decode phase, and for batch size and sequence length dimension. These parameters can be observed in logs during vLLM startup: + +.. code-block:: + + INFO 08-01 21:37:59 hpu_model_runner.py:493] Prompt bucket config (min, step, max_warmup) bs:[1, 32, 4], seq:[128, 128, 1024] + INFO 08-01 21:37:59 hpu_model_runner.py:499] Generated 24 prompt buckets: [(1, 128), (1, 256), (1, 384), (1, 512), (1, 640), (1, 768), (1, 896), (1, 1024), (2, 128), (2, 256), (2, 384), (2, 512), (2, 640), (2, 768), (2, 896), (2, 1024), (4, 128), (4, 256), (4, 384), (4, 512), (4, 640), (4, 768), (4, 896), (4, 1024)] + INFO 08-01 21:37:59 hpu_model_runner.py:504] Decode bucket config (min, step, max_warmup) bs:[1, 128, 4], seq:[128, 128, 2048] + INFO 08-01 21:37:59 hpu_model_runner.py:509] Generated 48 decode buckets: [(1, 128), (1, 256), (1, 384), (1, 512), (1, 640), (1, 768), (1, 896), (1, 1024), (1, 1152), (1, 1280), (1, 1408), (1, 1536), (1, 1664), (1, 1792), (1, 1920), (1, 2048), (2, 128), (2, 256), (2, 384), (2, 512), (2, 640), (2, 768), (2, 896), (2, 1024), (2, 1152), (2, 1280), (2, 1408), (2, 1536), (2, 1664), (2, 1792), (2, 1920), (2, 2048), (4, 128), (4, 256), (4, 384), (4, 512), (4, 640), (4, 768), (4, 896), (4, 1024), (4, 1152), (4, 1280), (4, 1408), (4, 1536), (4, 1664), (4, 1792), (4, 1920), (4, 2048)] + +``min`` determines the lowest value of the bucket. ``step`` determines the interval between buckets, and ``max`` determines the upper bound of the bucket. Furthermore, interval between ``min`` and ``step`` has special handling - ``min`` gets multiplied by consecutive powers of two, until ``step`` gets reached. We call this the ramp-up phase and it is used for handling lower batch sizes with minimum wastage, while allowing larger padding on larger batch sizes. + +Example (with ramp-up) + +.. code-block:: + + min = 2, step = 32, max = 64 + => ramp_up = (2, 4, 8, 16) + => stable = (32, 64) + => buckets = ramp_up + stable => (2, 4, 8, 16, 32, 64) + +Example (without ramp-up) + +.. code-block:: + + min = 128, step = 128, max = 512 + => ramp_up = () + => stable = (128, 256, 384, 512) + => buckets = ramp_up + stable => (128, 256, 384, 512) + + +In the logged scenario, 24 buckets were generated for prompt (prefill) runs, and 48 buckets for decode runs. Each bucket corresponds to a separate optimized device binary for a given model with specified tensor shapes. Whenever a batch of requests is processed, it is padded across batch and sequence length dimension to the smallest possible bucket. + +.. warning:: + If a request exceeds maximum bucket size in any dimension, it will be processed without padding, and its processing may require a graph compilation, potentially significantly increasing end-to-end latency. The boundaries of the buckets are user-configurable via environment variables, and upper bucket boundaries can be increased to avoid such scenario. + +As an example, if a request of 3 sequences, with max sequence length of 412 comes in to an idle vLLM server, it will be padded executed as ``(4, 512)`` prefill bucket, as ``batch_size`` (number of sequences) will be padded to 4 (closest batch_size dimension higher than 3), and max sequence length will be padded to 512 (closest sequence length dimension higher than 412). After prefill stage, it will be executed as ``(4, 512)`` decode bucket and will continue as that bucket until either batch dimension changes (due to request being finished) - in which case it will become a ``(2, 512)`` bucket, or context length increases above 512 tokens, in which case it will become ``(4, 640)`` bucket. + +.. note:: + Bucketing is transparent to a client - padding in sequence length dimension is never returned to the client, and padding in batch dimension does not create new requests. + +Warmup +------ + +Warmup is an optional, but highly recommended step occurring before vLLM server starts listening. It executes a forward pass for each bucket with dummy data. The goal is to pre-compile all graphs and not incur any graph compilation overheads within bucket boundaries during server runtime. Each warmup step is logged during vLLM startup: + +.. code-block:: + + INFO 08-01 22:26:47 hpu_model_runner.py:1066] [Warmup][Prompt][1/24] batch_size:4 seq_len:1024 free_mem:79.16 GiB + INFO 08-01 22:26:47 hpu_model_runner.py:1066] [Warmup][Prompt][2/24] batch_size:4 seq_len:896 free_mem:55.43 GiB + INFO 08-01 22:26:48 hpu_model_runner.py:1066] [Warmup][Prompt][3/24] batch_size:4 seq_len:768 free_mem:55.43 GiB + ... + INFO 08-01 22:26:59 hpu_model_runner.py:1066] [Warmup][Prompt][24/24] batch_size:1 seq_len:128 free_mem:55.43 GiB + INFO 08-01 22:27:00 hpu_model_runner.py:1066] [Warmup][Decode][1/48] batch_size:4 seq_len:2048 free_mem:55.43 GiB + INFO 08-01 22:27:00 hpu_model_runner.py:1066] [Warmup][Decode][2/48] batch_size:4 seq_len:1920 free_mem:55.43 GiB + INFO 08-01 22:27:01 hpu_model_runner.py:1066] [Warmup][Decode][3/48] batch_size:4 seq_len:1792 free_mem:55.43 GiB + ... + INFO 08-01 22:27:16 hpu_model_runner.py:1066] [Warmup][Decode][47/48] batch_size:2 seq_len:128 free_mem:55.43 GiB + INFO 08-01 22:27:16 hpu_model_runner.py:1066] [Warmup][Decode][48/48] batch_size:1 seq_len:128 free_mem:55.43 GiB + +This example uses the same buckets as in *Bucketing mechanism* section. Each output line corresponds to execution of a single bucket. When bucket is executed for the first time, its graph is compiled and can be reused later on, skipping further graph compilations. + +.. tip:: + Compiling all the buckets might take some time and can be turned off with ``VLLM_SKIP_WARMUP=true`` environment variable. Keep in mind that if you do that, you may face graph compilations once executing a given bucket for the first time. It is fine to disable warmup for development, but it's highly recommended to enable it in deployment. + +HPU Graph capture +----------------- + +`HPU Graphs `__ are currently the most performant execution method of vLLM on Intel Gaudi. When HPU Graphs are enabled, execution graphs will be traced (recorded) ahead of time (after performing warmup), to be later replayed during inference, significantly reducing host overheads. Recording can take large amounts of memory, which needs to be taken into account when allocating KV cache. Enabling HPU Graphs will impact the number of available KV cache blocks, but vLLM provides user-configurable variables to control memory management. + + +When HPU Graphs are being used, they share the common memory pool ("usable memory") as KV cache, determined by ``gpu_memory_utilization`` flag (``0.9`` by default). +Before KV cache gets allocated, model weights are loaded onto the device, and a forward pass of the model is executed on dummy data, to estimate memory usage. +Only after that, ``gpu_memory_utilization`` flag is utilized - at its default value, will mark 90% of free device memory at that point as usable. +Next, KV cache gets allocated, model is warmed up, and HPU Graphs are captured. +Environment variable ``VLLM_GRAPH_RESERVED_MEM`` defines the ratio of memory reserved for HPU Graphs capture. +With its default value (``VLLM_GRAPH_RESERVED_MEM=0.1``), 10% of usable memory will be reserved for graph capture (later referred to as "usable graph memory"), and the remaining 90% will be utilized for KV cache. +Environment variable ``VLLM_GRAPH_PROMPT_RATIO`` determines the ratio of usable graph memory reserved for prefill and decode graphs. By default (``VLLM_GRAPH_PROMPT_RATIO=0.3``), both stages have equal memory constraints. +Lower value corresponds to less usable graph memory reserved for prefill stage, e.g. ``VLLM_GRAPH_PROMPT_RATIO=0.2`` will reserve 20% of usable graph memory for prefill graphs, and 80% of usable graph memory for decode graphs. + +.. note:: + ``gpu_memory_utilization`` does not correspond to the absolute memory usage across HPU. It specifies the memory margin after loading the model and performing a profile run. If device has 100 GiB of total memory, and 50 GiB of free memory after loading model weights and executing profiling run, ``gpu_memory_utilization`` at its default value will mark 90% of 50 GiB as usable, leaving 5 GiB of margin, regardless of total device memory. + +User can also configure the strategy for capturing HPU Graphs for prompt and decode stages separately. Strategy affects the order of capturing graphs. There are two strategies implemented: +- ``max_bs`` - graph capture queue will sorted in descending order by their batch sizes. Buckets with equal batch sizes are sorted by sequence length in ascending order (e.g. ``(64, 128)``, ``(64, 256)``, ``(32, 128)``, ``(32, 256)``, ``(1, 128)``, ``(1,256)``), default strategy for decode +- ``min_tokens`` - graph capture queue will be sorted in ascending order by the number of tokens each graph processes (``batch_size*sequence_length``), default strategy for prompt + +When there's large amount of requests pending, vLLM scheduler will attempt to fill the maximum batch size for decode as soon as possible. When a request is finished, decode batch size decreases. When that happens, vLLM will attempt to schedule a prefill iteration for requests in the waiting queue, to fill the decode batch size to its previous state. This means that in a full load scenario, decode batch size is often at its maximum, which makes large batch size HPU Graphs crucial to capture, as reflected by ``max_bs`` strategy. On the other hand, prefills will be executed most frequently with very low batch sizes (1-4), which is reflected in ``min_tokens`` strategy. + + +.. note:: + ``VLLM_GRAPH_PROMPT_RATIO`` does not set a hard limit on memory taken by graphs for each stage (prefill and decode). vLLM will first attempt to use up entirety of usable prefill graph memory (usable graph memory * ``VLLM_GRAPH_PROMPT_RATIO``) for capturing prefill HPU Graphs, next it will attempt do the same for decode graphs and usable decode graph memory pool. If one stage is fully captured, and there is unused memory left within usable graph memory pool, vLLM will attempt further graph capture for the other stage, until no more HPU Graphs can be captured without exceeding reserved memory pool. The behavior on that mechanism can be observed in the example below. + + +Each described step is logged by vLLM server, as follows (negative values correspond to memory being released): + +.. code-block:: + + INFO 08-02 17:37:44 hpu_model_runner.py:493] Prompt bucket config (min, step, max_warmup) bs:[1, 32, 4], seq:[128, 128, 1024] + INFO 08-02 17:37:44 hpu_model_runner.py:499] Generated 24 prompt buckets: [(1, 128), (1, 256), (1, 384), (1, 512), (1, 640), (1, 768), (1, 896), (1, 1024), (2, 128), (2, 256), (2, 384), (2, 512), (2, 640), (2, 768), (2, 896), (2, 1024), (4, 128), (4, 256), (4, 384), (4, 512), (4, 640), (4, 768), (4, 896), (4, 1024)] + INFO 08-02 17:37:44 hpu_model_runner.py:504] Decode bucket config (min, step, max_warmup) bs:[1, 128, 4], seq:[128, 128, 2048] + INFO 08-02 17:37:44 hpu_model_runner.py:509] Generated 48 decode buckets: [(1, 128), (1, 256), (1, 384), (1, 512), (1, 640), (1, 768), (1, 896), (1, 1024), (1, 1152), (1, 1280), (1, 1408), (1, 1536), (1, 1664), (1, 1792), (1, 1920), (1, 2048), (2, 128), (2, 256), (2, 384), (2, 512), (2, 640), (2, 768), (2, 896), (2, 1024), (2, 1152), (2, 1280), (2, 1408), (2, 1536), (2, 1664), (2, 1792), (2, 1920), (2, 2048), (4, 128), (4, 256), (4, 384), (4, 512), (4, 640), (4, 768), (4, 896), (4, 1024), (4, 1152), (4, 1280), (4, 1408), (4, 1536), (4, 1664), (4, 1792), (4, 1920), (4, 2048)] + INFO 08-02 17:37:52 hpu_model_runner.py:430] Pre-loading model weights on hpu:0 took 14.97 GiB of device memory (14.97 GiB/94.62 GiB used) and 2.95 GiB of host memory (475.2 GiB/1007 GiB used) + INFO 08-02 17:37:52 hpu_model_runner.py:438] Wrapping in HPU Graph took 0 B of device memory (14.97 GiB/94.62 GiB used) and -252 KiB of host memory (475.2 GiB/1007 GiB used) + INFO 08-02 17:37:52 hpu_model_runner.py:442] Loading model weights took in total 14.97 GiB of device memory (14.97 GiB/94.62 GiB used) and 2.95 GiB of host memory (475.2 GiB/1007 GiB used) + INFO 08-02 17:37:54 hpu_worker.py:134] Model profiling run took 504 MiB of device memory (15.46 GiB/94.62 GiB used) and 180.9 MiB of host memory (475.4 GiB/1007 GiB used) + INFO 08-02 17:37:54 hpu_worker.py:158] Free device memory: 79.16 GiB, 39.58 GiB usable (gpu_memory_utilization=0.5), 15.83 GiB reserved for HPUGraphs (VLLM_GRAPH_RESERVED_MEM=0.4), 23.75 GiB reserved for KV cache + INFO 08-02 17:37:54 hpu_executor.py:85] # HPU blocks: 1519, # CPU blocks: 0 + INFO 08-02 17:37:54 hpu_worker.py:190] Initializing cache engine took 23.73 GiB of device memory (39.2 GiB/94.62 GiB used) and -1.238 MiB of host memory (475.4 GiB/1007 GiB used) + INFO 08-02 17:37:54 hpu_model_runner.py:1066] [Warmup][Prompt][1/24] batch_size:4 seq_len:1024 free_mem:55.43 GiB + ... + INFO 08-02 17:38:22 hpu_model_runner.py:1066] [Warmup][Decode][48/48] batch_size:1 seq_len:128 free_mem:55.43 GiB + INFO 08-02 17:38:22 hpu_model_runner.py:1159] Using 15.85 GiB/55.43 GiB of free device memory for HPUGraphs, 7.923 GiB for prompt and 7.923 GiB for decode (VLLM_GRAPH_PROMPT_RATIO=0.3) + INFO 08-02 17:38:22 hpu_model_runner.py:1066] [Warmup][Graph/Prompt][1/24] batch_size:1 seq_len:128 free_mem:55.43 GiB + ... + INFO 08-02 17:38:26 hpu_model_runner.py:1066] [Warmup][Graph/Prompt][11/24] batch_size:1 seq_len:896 free_mem:48.77 GiB + INFO 08-02 17:38:27 hpu_model_runner.py:1066] [Warmup][Graph/Decode][1/48] batch_size:4 seq_len:128 free_mem:47.51 GiB + ... + INFO 08-02 17:38:41 hpu_model_runner.py:1066] [Warmup][Graph/Decode][48/48] batch_size:1 seq_len:2048 free_mem:47.35 GiB + INFO 08-02 17:38:41 hpu_model_runner.py:1066] [Warmup][Graph/Prompt][12/24] batch_size:4 seq_len:256 free_mem:47.35 GiB + INFO 08-02 17:38:42 hpu_model_runner.py:1066] [Warmup][Graph/Prompt][13/24] batch_size:2 seq_len:512 free_mem:45.91 GiB + INFO 08-02 17:38:42 hpu_model_runner.py:1066] [Warmup][Graph/Prompt][14/24] batch_size:1 seq_len:1024 free_mem:44.48 GiB + INFO 08-02 17:38:43 hpu_model_runner.py:1066] [Warmup][Graph/Prompt][15/24] batch_size:2 seq_len:640 free_mem:43.03 GiB + INFO 08-02 17:38:43 hpu_model_runner.py:1128] Graph/Prompt captured:15 (62.5%) used_mem:14.03 GiB buckets:[(1, 128), (1, 256), (1, 384), (1, 512), (1, 640), (1, 768), (1, 896), (1, 1024), (2, 128), (2, 256), (2, 384), (2, 512), (2, 640), (4, 128), (4, 256)] + INFO 08-02 17:38:43 hpu_model_runner.py:1128] Graph/Decode captured:48 (100.0%) used_mem:161.9 MiB buckets:[(1, 128), (1, 256), (1, 384), (1, 512), (1, 640), (1, 768), (1, 896), (1, 1024), (1, 1152), (1, 1280), (1, 1408), (1, 1536), (1, 1664), (1, 1792), (1, 1920), (1, 2048), (2, 128), (2, 256), (2, 384), (2, 512), (2, 640), (2, 768), (2, 896), (2, 1024), (2, 1152), (2, 1280), (2, 1408), (2, 1536), (2, 1664), (2, 1792), (2, 1920), (2, 2048), (4, 128), (4, 256), (4, 384), (4, 512), (4, 640), (4, 768), (4, 896), (4, 1024), (4, 1152), (4, 1280), (4, 1408), (4, 1536), (4, 1664), (4, 1792), (4, 1920), (4, 2048)] + INFO 08-02 17:38:43 hpu_model_runner.py:1206] Warmup finished in 49 secs, allocated 14.19 GiB of device memory + INFO 08-02 17:38:43 hpu_executor.py:91] init_cache_engine took 37.92 GiB of device memory (53.39 GiB/94.62 GiB used) and 57.86 MiB of host memory (475.4 GiB/1007 GiB used) + + +Recommended vLLM Parameters +--------------------------- + +- We recommend running inference on Gaudi 2 with ``block_size`` of 128 + for BF16 data type. Using default values (16, 32) might lead to + sub-optimal performance due to Matrix Multiplication Engine + under-utilization (see `Gaudi + Architecture `__). +- For max throughput on Llama 7B, we recommend running with batch size + of 128 or 256 and max context length of 2048 with HPU Graphs enabled. + If you encounter out-of-memory issues, see troubleshooting section. + +Environment variables +--------------------- + +**Diagnostic and profiling knobs:** + +- ``VLLM_PROFILER_ENABLED``: if ``true``, high level profiler will be enabled. Resulting JSON traces can be viewed in `perfetto.habana.ai `__. Disabled by default. +- ``VLLM_HPU_LOG_STEP_GRAPH_COMPILATION``: if ``true``, will log graph compilations per each vLLM engine step, only when there was any - highly recommended to use alongside ``PT_HPU_METRICS_GC_DETAILS=1``. Disabled by default. +- ``VLLM_HPU_LOG_STEP_GRAPH_COMPILATION_ALL``: if ``true``, will log graph compilations per each vLLM engine step, always, even if there were none. Disabled by default. +- ``VLLM_HPU_LOG_STEP_CPU_FALLBACKS``: if ``true``, will log cpu fallbacks per each vLLM engine step, only when there was any. Disabled by default. +- ``VLLM_HPU_LOG_STEP_CPU_FALLBACKS_ALL``: if ``true``, will log cpu fallbacks per each vLLM engine step, always, even if there were none. Disabled by default. + +**Performance tuning knobs:** + +- ``VLLM_SKIP_WARMUP``: if ``true``, warmup will be skipped, ``false`` by default +- ``VLLM_GRAPH_RESERVED_MEM``: percentage of memory dedicated for HPUGraph capture, ``0.1`` by default +- ``VLLM_GRAPH_PROMPT_RATIO``: percentage of reserved graph memory dedicated for prompt graphs, ``0.3`` by default +- ``VLLM_GRAPH_PROMPT_STRATEGY``: strategy determining order of prompt graph capture, ``min_tokens`` or ``max_bs``, ``min_tokens`` by default +- ``VLLM_GRAPH_DECODE_STRATEGY``: strategy determining order of decode graph capture, ``min_tokens`` or ``max_bs``, ``max_bs`` by default +- ``VLLM_{phase}_{dim}_BUCKET_{param}`` - collection of 12 environment variables configuring ranges of bucketing mechanism + + - ``{phase}`` is either ``PROMPT`` or ``DECODE`` + - ``{dim}`` is either ``BS``, ``SEQ`` or ``BLOCK`` + - ``{param}`` is either ``MIN``, ``STEP`` or ``MAX`` + - Default values: + + - Prompt: + - batch size min (``VLLM_PROMPT_BS_BUCKET_MIN``): ``1`` + - batch size step (``VLLM_PROMPT_BS_BUCKET_STEP``): ``min(max_num_seqs, 32)`` + - batch size max (``VLLM_PROMPT_BS_BUCKET_MAX``): ``min(max_num_seqs, 64)`` + - sequence length min (``VLLM_PROMPT_SEQ_BUCKET_MIN``): ``block_size`` + - sequence length step (``VLLM_PROMPT_SEQ_BUCKET_STEP``): ``block_size`` + - sequence length max (``VLLM_PROMPT_SEQ_BUCKET_MAX``): ``max_model_len`` + + - Decode: + - batch size min (``VLLM_DECODE_BS_BUCKET_MIN``): ``1`` + - batch size step (``VLLM_DECODE_BS_BUCKET_STEP``): ``min(max_num_seqs, 32)`` + - batch size max (``VLLM_DECODE_BS_BUCKET_MAX``): ``max_num_seqs`` + - sequence length min (``VLLM_DECODE_BLOCK_BUCKET_MIN``): ``block_size`` + - sequence length step (``VLLM_DECODE_BLOCK_BUCKET_STEP``): ``block_size`` + - sequence length max (``VLLM_DECODE_BLOCK_BUCKET_MAX``): ``max(128, (max_num_seqs*max_model_len)/block_size)`` + + +Additionally, there are HPU PyTorch Bridge environment variables impacting vLLM execution: + +- ``PT_HPU_LAZY_MODE``: if ``0``, PyTorch Eager backend for Gaudi will be used, if ``1`` PyTorch Lazy backend for Gaudi will be used, ``1`` is default +- ``PT_HPU_ENABLE_LAZY_COLLECTIVES``: required to be ``true`` for tensor parallel inference with HPU Graphs + +Troubleshooting: Tweaking HPU Graphs +==================================== + +If you experience device out-of-memory issues or want to attempt +inference at higher batch sizes, try tweaking HPU Graphs by following +the below: + +- Tweak ``gpu_memory_utilization`` knob. It will decrease the + allocation of KV cache, leaving some headroom for capturing graphs + with larger batch size. By default ``gpu_memory_utilization`` is set + to 0.9. It attempts to allocate ~90% of HBM left for KV cache after + short profiling run. Note that decreasing reduces the number of KV + cache blocks you have available, and therefore reduces the effective + maximum number of tokens you can handle at a given time. + +- If this method is not efficient, you can disable ``HPUGraph`` + completely. With HPU Graphs disabled, you are trading latency and + throughput at lower batches for potentially higher throughput on + higher batches. You can do that by adding ``--enforce-eager`` flag to + server (for online inference), or by passing ``enforce_eager=True`` + argument to LLM constructor (for offline inference). diff --git a/docs/source/index.rst b/docs/source/index.rst index 2399fcf5faec9..51add1fd4d0ab 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -43,7 +43,7 @@ vLLM is flexible and easy to use with: * Tensor parallelism and pipeline parallelism support for distributed inference * Streaming outputs * OpenAI-compatible API server -* Support NVIDIA GPUs, AMD CPUs and GPUs, Intel CPUs and GPUs, PowerPC CPUs, TPU, and AWS Trainium and Inferentia Accelerators. +* Support NVIDIA GPUs, AMD CPUs and GPUs, Intel CPUs, Gaudi® accelerators and GPUs, PowerPC CPUs, TPU, and AWS Trainium and Inferentia Accelerators. * Prefix caching support * Multi-lora support @@ -66,6 +66,7 @@ Documentation getting_started/amd-installation getting_started/openvino-installation getting_started/cpu-installation + getting_started/gaudi-installation getting_started/neuron-installation getting_started/tpu-installation getting_started/xpu-installation diff --git a/requirements-hpu.txt b/requirements-hpu.txt new file mode 100644 index 0000000000000..4674efb812cfd --- /dev/null +++ b/requirements-hpu.txt @@ -0,0 +1,11 @@ +# Common dependencies +-r requirements-common.txt + +# Dependencies for HPU code +ray +triton +pandas +tabulate +setuptools>=61 +setuptools-scm>=8 +vllm-hpu-extension @ git+https://github.com/HabanaAI/vllm-hpu-extension.git@fd7f2e6 diff --git a/setup.py b/setup.py index f145a33258d70..51ca5e2abecf7 100644 --- a/setup.py +++ b/setup.py @@ -253,6 +253,24 @@ def run(self): self.copy_file(file, dst_file) +def _is_hpu() -> bool: + is_hpu_available = True + try: + subprocess.run(["hl-smi"], capture_output=True, check=True) + except (FileNotFoundError, PermissionError, subprocess.CalledProcessError): + if not os.path.exists('/dev/accel/accel0') and not os.path.exists( + '/dev/accel/accel_controlD0'): + # last resort... + try: + output = subprocess.check_output( + 'lsmod | grep habanalabs | wc -l', shell=True) + is_hpu_available = int(output) > 0 + except (ValueError, FileNotFoundError, PermissionError, + subprocess.CalledProcessError): + is_hpu_available = False + return is_hpu_available or VLLM_TARGET_DEVICE == "hpu" + + def _no_device() -> bool: return VLLM_TARGET_DEVICE == "empty" @@ -260,7 +278,7 @@ def _no_device() -> bool: def _is_cuda() -> bool: has_cuda = torch.version.cuda is not None return (VLLM_TARGET_DEVICE == "cuda" and has_cuda - and not (_is_neuron() or _is_tpu())) + and not (_is_neuron() or _is_tpu() or _is_hpu())) def _is_hip() -> bool: @@ -356,6 +374,23 @@ def get_path(*filepath) -> str: return os.path.join(ROOT_DIR, *filepath) +def get_gaudi_sw_version(): + """ + Returns the driver version. + """ + # Enable console printing for `hl-smi` check + output = subprocess.run("hl-smi", + shell=True, + text=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env={"ENABLE_CONSOLE": "true"}) + if output.returncode == 0 and output.stdout: + return output.stdout.split("\n")[2].replace( + " ", "").split(":")[1][:-1].split("-")[0] + return "0.0.0" # when hl-smi is not available + + def get_vllm_version() -> str: version = get_version( write_to="vllm/_version.py", # TODO: move this to pyproject.toml @@ -385,6 +420,12 @@ def get_vllm_version() -> str: if neuron_version != MAIN_CUDA_VERSION: neuron_version_str = neuron_version.replace(".", "")[:3] version += f"{sep}neuron{neuron_version_str}" + elif _is_hpu(): + # Get the Intel Gaudi Software Suite version + gaudi_sw_version = str(get_gaudi_sw_version()) + if gaudi_sw_version != MAIN_CUDA_VERSION: + gaudi_sw_version = gaudi_sw_version.replace(".", "")[:3] + version += f"{sep}gaudi{gaudi_sw_version}" elif _is_openvino(): version += f"{sep}openvino" elif _is_tpu(): @@ -443,6 +484,8 @@ def _read_requirements(filename: str) -> List[str]: requirements = _read_requirements("requirements-rocm.txt") elif _is_neuron(): requirements = _read_requirements("requirements-neuron.txt") + elif _is_hpu(): + requirements = _read_requirements("requirements-hpu.txt") elif _is_openvino(): requirements = _read_requirements("requirements-openvino.txt") elif _is_tpu(): @@ -453,7 +496,7 @@ def _read_requirements(filename: str) -> List[str]: requirements = _read_requirements("requirements-xpu.txt") else: raise ValueError( - "Unsupported platform, please use CUDA, ROCm, Neuron, " + "Unsupported platform, please use CUDA, ROCm, Neuron, HPU, " "OpenVINO, or CPU.") return requirements diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index 46a2fb8bc80a2..682e08db99fa9 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -12,7 +12,7 @@ logger = init_logger(__name__) -if not current_platform.is_tpu(): +if not current_platform.is_tpu() and not current_platform.is_hpu(): try: import vllm._C except ImportError as e: diff --git a/vllm/attention/backends/hpu_attn.py b/vllm/attention/backends/hpu_attn.py new file mode 100644 index 0000000000000..a8f4b09b67274 --- /dev/null +++ b/vllm/attention/backends/hpu_attn.py @@ -0,0 +1,264 @@ +############################################################################### +# Copyright (C) 2024 Habana Labs, Ltd. an Intel Company +############################################################################### + +import os +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Type + +import torch +import vllm_hpu_extension.ops as ops +from vllm_hpu_extension.utils import Matmul, Softmax, VLLMKVCache + +from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, + AttentionMetadata, AttentionType) +from vllm.attention.backends.utils import CommonAttentionState +from vllm.attention.ops.hpu_paged_attn import (HPUPagedAttention, + HPUPagedAttentionMetadata) +from vllm.logger import init_logger + +logger = init_logger(__name__) + + +class HPUAttentionBackend(AttentionBackend): + + @staticmethod + def get_impl_cls() -> Type["HPUAttentionImpl"]: + return HPUAttentionImpl + + @staticmethod + def get_metadata_cls() -> Type["AttentionMetadata"]: + return HPUAttentionMetadata + + @staticmethod + def get_state_cls() -> Type["CommonAttentionState"]: + return CommonAttentionState + + @staticmethod + def get_kv_cache_shape( + num_blocks: int, + block_size: int, + num_kv_heads: int, + head_size: int, + ) -> Tuple[int, ...]: + return HPUPagedAttention.get_kv_cache_shape(num_blocks, block_size, + num_kv_heads, head_size) + + @staticmethod + def swap_blocks( + src_kv_cache: torch.Tensor, + dst_kv_cache: torch.Tensor, + src_to_dst: Dict[int, int], + ) -> None: + HPUPagedAttention.swap_blocks(src_kv_cache, dst_kv_cache, src_to_dst) + + @staticmethod + def copy_blocks( + kv_caches: List[torch.Tensor], + src_to_dists: Dict[int, List[int]], + ) -> None: + HPUPagedAttention.copy_blocks(kv_caches, src_to_dists) + + +@dataclass +class HPUAttentionMetadata(HPUPagedAttentionMetadata, AttentionMetadata): + """Metadata for HPUAttentionbackend.""" + # Currently, input sequences can only contain all prompts + # or all decoding. True if all sequences are prompts. + is_prompt: bool + attn_bias: Optional[torch.Tensor] + seq_lens_tensor: Optional[torch.Tensor] + + +class HPUAttentionImpl(AttentionImpl, torch.nn.Module): + """ + If the input tensors contain prompt tokens, the layout is as follows: + |<--------------- num_prefill_tokens ----------------->| + |<--prefill_0-->|<--prefill_1-->|...|<--prefill_N-1--->| + + Otherwise, the layout is as follows: + |<----------------- num_decode_tokens ------------------>| + |<--decode_0-->|..........|<--decode_M-1-->|<--padding-->| + + Generation tokens can contain padding when cuda-graph is used. + Currently, prompt tokens don't contain any padding. + + The prompts might have different lengths, while the generation tokens + always have length 1. + """ + + def __init__( + self, + num_heads: int, + head_size: int, + scale: float, + num_kv_heads: int, + alibi_slopes: Optional[List[float]], + sliding_window: Optional[int], + kv_cache_dtype: str, + blocksparse_params: Optional[Dict[str, Any]] = None, + max_seq_len: int = 4096, + ) -> None: + super(AttentionImpl, self).__init__() + self.kv_cache_dtype = kv_cache_dtype + self.num_heads = num_heads + self.head_size = head_size + self.scale = float(scale) + self.matmul_qk = Matmul() + self.softmax = Softmax() + self.matmul_av = Matmul() + self.k_cache = VLLMKVCache() + self.v_cache = VLLMKVCache() + self.num_kv_heads = num_heads if num_kv_heads is None else num_kv_heads + self.sliding_window = sliding_window + self.alibi_slopes = alibi_slopes + if alibi_slopes is not None: + alibi_slopes_tensor = torch.tensor(alibi_slopes, + dtype=torch.bfloat16) + self.alibi_slopes = alibi_slopes_tensor + assert self.num_heads % self.num_kv_heads == 0 + self.num_queries_per_kv = self.num_heads // self.num_kv_heads + + self.prefill_usefusedsdpa = os.getenv('VLLM_PROMPT_USE_FUSEDSDPA', + '0').lower() in ['1', 'true'] + if self.prefill_usefusedsdpa: + assert alibi_slopes is None, \ + 'Prefill with FusedSDPA not supported with alibi slopes!' + + suppored_head_sizes = HPUPagedAttention.get_supported_head_sizes() + if head_size not in suppored_head_sizes: + raise ValueError( + f"Head size {head_size} is not supported by PagedAttention. " + f"Supported head sizes are: {suppored_head_sizes}.") + + def forward( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: HPUAttentionMetadata, + k_scale: float = 1.0, + v_scale: float = 1.0, + attn_type: AttentionType = AttentionType.DECODER, + ) -> torch.Tensor: + """Forward pass with xFormers and PagedAttention. + + Args: + query: shape = [num_tokens, num_heads * head_size] + key: shape = [num_tokens, num_kv_heads * head_size] + value: shape = [num_tokens, num_kv_heads * head_size] + kv_cache = [2, num_blocks, block_size * num_kv_heads * head_size] + attn_metadata: Metadata for attention. + Returns: + shape = [num_tokens, num_heads * head_size] + """ + if attn_type != AttentionType.DECODER: + raise NotImplementedError("Encoder self-attention and " + "encoder/decoder cross-attention " + "are not implemented for " + "HPUAttentionImpl") + batch_size, seq_len, hidden_size = query.shape + _, seq_len_kv, _ = key.shape + + query = query.view(-1, self.num_heads, self.head_size) + key = key.view(-1, self.num_kv_heads, self.head_size) + value = value.view(-1, self.num_kv_heads, self.head_size) + block_indices = attn_metadata.block_indices + block_offsets = attn_metadata.block_offsets + if attn_metadata.is_prompt: + key = key.unflatten(0, (block_indices.size(0), -1)) + value = value.unflatten(0, (block_indices.size(0), -1)) + if kv_cache is not None: + key_cache, value_cache = HPUPagedAttention.split_kv_cache( + kv_cache, self.num_kv_heads, self.head_size) + + # Reshape the input keys and values and store them in the cache. + # If kv_cache is not provided, the new key and value tensors are + # not cached. This happens during the initial memory profiling run. + key_cache = self.k_cache(key, key_cache, block_indices, + block_offsets) + value_cache = self.v_cache(value, value_cache, block_indices, + block_offsets) + + if attn_metadata.is_prompt: + # Prompt run. + if not self.prefill_usefusedsdpa: + # TODO: move this outside of model + assert attn_metadata.attn_bias is not None, \ + 'attn_bias must be set before calling model.forward!' + attn_bias = attn_metadata.attn_bias + if self.alibi_slopes is not None: + position_bias = _make_alibi_bias(self.alibi_slopes, + self.num_kv_heads, + attn_bias.dtype, + attn_bias.shape[-1]) + attn_bias = attn_bias.tile((1, self.num_kv_heads, 1, 1)) + attn_bias.add_(position_bias) + else: + attn_bias = None + + query_shape = (batch_size, seq_len, self.num_heads, self.head_size) + kv_shape = (batch_size, seq_len_kv, self.num_kv_heads, + self.head_size) + out = ops.prompt_attention( + query.view(query_shape), + key.view(kv_shape), + value.view(kv_shape), + attn_bias=attn_bias, + p=0.0, + scale=self.scale, + matmul_qk_op=self.matmul_qk, + softmax_op=self.softmax, + matmul_av_op=self.matmul_av, + ) + output = out.reshape(batch_size, seq_len, hidden_size) + else: + # Decoding run. + output = HPUPagedAttention.forward_decode( + query=query, + key_cache=key_cache, + value_cache=value_cache, + block_list=attn_metadata.block_list, + block_mapping=attn_metadata.block_mapping, + block_bias=attn_metadata.attn_bias, + block_scales=attn_metadata.block_scales, + scale=self.scale, + matmul_qk_op=self.matmul_qk, + matmul_av_op=self.matmul_av, + keys_fetch_func=self.k_cache.fetch_from_cache, + values_fetch_func=self.v_cache.fetch_from_cache) + # Reshape the output tensor. + return output.view(batch_size, seq_len, hidden_size) + + +def _make_alibi_bias( + alibi_slopes: torch.Tensor, + num_kv_heads: int, + dtype: torch.dtype, + seq_len: int, +) -> torch.Tensor: + bias = torch.arange(seq_len, dtype=dtype) + # NOTE(zhuohan): HF uses + # `bias = bias[None, :].repeat(seq_len, 1)` + # here. We find that both biases give the same results, but + # the bias below more accurately follows the original ALiBi + # paper. + # Calculate a matrix where each element represents ith element- jth + # element. + bias = bias[None, :] - bias[:, None] + + padded_len = (seq_len + 7) // 8 * 8 + num_heads = alibi_slopes.shape[0] + bias = torch.empty( + 1, # batch size + num_heads, + seq_len, + padded_len, + device=alibi_slopes.device, + dtype=dtype, + )[:, :, :, :seq_len].copy_(bias) + bias.mul_(alibi_slopes[:, None, None]) + if num_heads != num_kv_heads: + bias = bias.unflatten(1, (num_kv_heads, num_heads // num_kv_heads)) + return bias diff --git a/vllm/attention/ops/hpu_paged_attn.py b/vllm/attention/ops/hpu_paged_attn.py new file mode 100644 index 0000000000000..4c0fb2a628361 --- /dev/null +++ b/vllm/attention/ops/hpu_paged_attn.py @@ -0,0 +1,103 @@ +############################################################################### +# Copyright (C) 2024 Habana Labs, Ltd. an Intel Company +############################################################################### + +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple + +import torch +from vllm_hpu_extension import cache_ops, ops + +# Should be the same as PARTITION_SIZE in `paged_attention_v2_launcher`. +_PARTITION_SIZE = 512 + + +@dataclass +class HPUPagedAttentionMetadata: + """Metadata for PagedAttention.""" + block_list: Optional[torch.Tensor] + block_mapping: Optional[torch.Tensor] + block_usage: Optional[torch.Tensor] + block_indices: Optional[torch.Tensor] + block_offsets: Optional[torch.Tensor] + block_scales: Optional[torch.Tensor] + + +class HPUPagedAttention: + + @staticmethod + def get_supported_head_sizes() -> List[int]: + return [64, 80, 96, 112, 128, 256] + + @staticmethod + def get_kv_cache_shape( + num_blocks: int, + block_size: int, + num_kv_heads: int, + head_size: int, + ) -> Tuple[int, ...]: + return (num_blocks, block_size, num_kv_heads, head_size) + + @staticmethod + def split_kv_cache( + kv_cache: torch.Tensor, + num_kv_heads: int, + head_size: int, + ) -> Tuple[torch.Tensor, torch.Tensor]: + key_cache = kv_cache[0] + value_cache = kv_cache[1] + return key_cache, value_cache + + @staticmethod + def write_to_paged_cache(key: torch.Tensor, value: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + slot_mapping: torch.Tensor, kv_cache_dtype: str, + is_prompt: bool) -> None: + cache_ops.reshape_and_cache(key, value, key_cache, value_cache, + slot_mapping, kv_cache_dtype, is_prompt) + + @staticmethod + def forward_decode(**kwargs) -> torch.Tensor: + return ops.flat_pa(**kwargs) + + @staticmethod + def forward_prefix( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + block_tables: torch.Tensor, + subquery_start_loc: torch.Tensor, + seq_lens_tensor: torch.Tensor, + context_lens: torch.Tensor, + max_query_len: int, + alibi_slopes: Optional[torch.Tensor], + sliding_window: Optional[int], + ) -> torch.Tensor: + raise NotImplementedError( + "forward_prefix is not implemented for HPUPagedAttention") + + @staticmethod + def swap_blocks( + src_kv_cache: torch.Tensor, + dst_kv_cache: torch.Tensor, + src_to_dst: Dict[int, int], + ) -> None: + src_key_cache = src_kv_cache[0] + dst_key_cache = dst_kv_cache[0] + cache_ops.swap_blocks(src_key_cache, dst_key_cache, src_to_dst) + + src_value_cache = src_kv_cache[1] + dst_value_cache = dst_kv_cache[1] + cache_ops.swap_blocks(src_value_cache, dst_value_cache, src_to_dst) + + @staticmethod + def copy_blocks( + kv_caches: List[torch.Tensor], + src_to_dists: Dict[int, List[int]], + ) -> None: + key_caches = [kv_cache[0] for kv_cache in kv_caches] + value_caches = [kv_cache[1] for kv_cache in kv_caches] + cache_ops.copy_blocks(key_caches, value_caches, src_to_dists) diff --git a/vllm/attention/selector.py b/vllm/attention/selector.py index 8a59cf41a689e..991602da2853a 100644 --- a/vllm/attention/selector.py +++ b/vllm/attention/selector.py @@ -23,6 +23,7 @@ class _Backend(enum.Enum): TORCH_SDPA = enum.auto() OPENVINO = enum.auto() FLASHINFER = enum.auto() + HPU_ATTN = enum.auto() PALLAS = enum.auto() IPEX = enum.auto() NO_ATTENTION = enum.auto() @@ -145,6 +146,10 @@ def get_attn_backend( logger.info("Using Flashinfer backend.") from vllm.attention.backends.flashinfer import FlashInferBackend return FlashInferBackend + elif backend == _Backend.HPU_ATTN: + logger.info("Using HPUAttention backend.") + from vllm.attention.backends.hpu_attn import HPUAttentionBackend + return HPUAttentionBackend elif backend == _Backend.PALLAS: logger.info("Using Pallas backend.") from vllm.attention.backends.pallas import PallasAttentionBackend @@ -220,6 +225,9 @@ def which_attn_to_use( logger.info("%s is not supported in AMD GPUs.", selected_backend) return _Backend.ROCM_FLASH + if current_platform.is_hpu(): + return _Backend.HPU_ATTN + if envs.VLLM_USE_V1: return _Backend.FLASH_ATTN_VLLM_V1 diff --git a/vllm/config.py b/vllm/config.py index 851d35dfd9fb0..91bbbfec4b7b3 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -466,9 +466,10 @@ def verify_async_output_proc(self, parallel_config, speculative_config, # Reminder: Please update docs/source/serving/compatibility_matrix.rst # If the feature combo become valid - if device_config.device_type not in ("cuda", "tpu", "xpu"): + if device_config.device_type not in ("cuda", "tpu", "xpu", "hpu"): logger.warning( - "Async output processing is only supported for CUDA, TPU, XPU. " + "Async output processing is only supported for CUDA, TPU, XPU " + "and HPU." "Disabling it for other platforms.") self.use_async_output_proc = False return @@ -860,7 +861,6 @@ class LoadConfig: ignore_patterns: The list of patterns to ignore when loading the model. Default to "original/**/*" to avoid repeated loading of llama's checkpoints. - """ load_format: Union[str, LoadFormat, "BaseModelLoader"] = LoadFormat.AUTO @@ -964,6 +964,13 @@ def __init__( raise ValueError( "TPU backend only supports Ray for distributed inference.") + if current_platform.is_hpu() and self.world_size > 1: + if self.distributed_executor_backend is None: + self.distributed_executor_backend = "ray" + if self.distributed_executor_backend != "ray": + raise ValueError( + "HPU backend only supports Ray for distributed inference.") + if self.distributed_executor_backend is None and self.world_size > 1: # We use multiprocessing by default if world_size fits on the # current node and we aren't in a ray placement group. @@ -1166,6 +1173,8 @@ def __init__(self, device: str = "auto") -> None: self.device_type = "cuda" elif current_platform.is_neuron(): self.device_type = "neuron" + elif current_platform.is_hpu(): + self.device_type = "hpu" elif current_platform.is_openvino(): self.device_type = "openvino" elif current_platform.is_tpu(): @@ -1745,6 +1754,13 @@ def _get_and_verify_dtype( torch_dtype = torch.float16 else: torch_dtype = config_dtype + + if current_platform.is_hpu() and config_dtype == torch.float16: + logger.info( + "For HPU, we cast models to bfloat16 instead of" + "using float16 by default. Please specify `dtype` if you " + "want to use float16.") + torch_dtype = torch.bfloat16 else: if dtype not in _STR_DTYPE_TO_TORCH_DTYPE: raise ValueError(f"Unknown dtype: {dtype}") diff --git a/vllm/core/block/cpu_gpu_block_allocator.py b/vllm/core/block/cpu_gpu_block_allocator.py index 6eda5f99aa1c8..9727f6e19b84e 100644 --- a/vllm/core/block/cpu_gpu_block_allocator.py +++ b/vllm/core/block/cpu_gpu_block_allocator.py @@ -4,6 +4,7 @@ DeviceAwareBlockAllocator) from vllm.core.block.naive_block import NaiveBlock, NaiveBlockAllocator from vllm.core.block.prefix_caching_block import PrefixCachingBlockAllocator +from vllm.platforms import current_platform from vllm.utils import Device @@ -52,7 +53,11 @@ def create( - The block IDs are assigned contiguously, with GPU block IDs coming before CPU block IDs. """ - block_ids = list(range(num_gpu_blocks + num_cpu_blocks)) + # For HPU, block id 0 is used only for padding + reserved_blocks = 1 if current_platform.is_hpu() else 0 + block_ids = list( + range(reserved_blocks, num_gpu_blocks + num_cpu_blocks)) + num_gpu_blocks -= reserved_blocks gpu_block_ids = block_ids[:num_gpu_blocks] cpu_block_ids = block_ids[num_gpu_blocks:] diff --git a/vllm/distributed/device_communicators/hpu_communicator.py b/vllm/distributed/device_communicators/hpu_communicator.py new file mode 100644 index 0000000000000..cc9b19ce022b5 --- /dev/null +++ b/vllm/distributed/device_communicators/hpu_communicator.py @@ -0,0 +1,48 @@ +import torch +import torch.distributed as dist +from torch.distributed import ProcessGroup + +from vllm.platforms import current_platform + +if current_platform.is_hpu(): + import habana_frameworks.torch as htorch # noqa: F401 + + +class HpuCommunicator: + + def __init__(self, group: ProcessGroup): + if not current_platform.is_hpu(): + self.disabled = True + return + self.disabled = False + self.group = group + self.world_size = dist.get_world_size(self.group) + + def all_reduce(self, x: torch.Tensor) -> torch.Tensor: + # FIXME(kzawora): this is a workaround for a bug in Habana PT bridge + # occurring when PT_HPU_ENABLE_LAZY_COLLECTIVES=true env var is used + # (which is required for tensor parallel HPUGraph inference) + htorch.core.mark_step() + dist.all_reduce(x, group=self.group) + return x + + def all_gather(self, x: torch.Tensor, dim: int = -1) -> torch.Tensor: + world_size = self.world_size + if dim < 0: + # Convert negative dim to positive. + dim += x.dim() + input_size = x.size() + # Allocate output tensor. + output_tensor = torch.empty((world_size, ) + input_size, + dtype=x.dtype, + device=x.device) + # All-gather. + htorch.core.mark_step() + dist.all_gather_into_tensor(output_tensor, x, group=self.group) + # Reshape + output_tensor = output_tensor.movedim(0, dim) + output_tensor = output_tensor.reshape(input_size[:dim] + + (world_size * + input_size[dim], ) + + input_size[dim + 1:]) + return output_tensor diff --git a/vllm/distributed/parallel_state.py b/vllm/distributed/parallel_state.py index 94ba41a016f6d..efa3525910a5e 100644 --- a/vllm/distributed/parallel_state.py +++ b/vllm/distributed/parallel_state.py @@ -177,6 +177,7 @@ def __init__( use_pynccl: bool, use_custom_allreduce: bool, use_tpu_communicator: bool, + use_hpu_communicator: bool, use_message_queue_broadcaster: bool = False, group_name: Optional[str] = None, ): @@ -213,6 +214,7 @@ def __init__( self.use_pynccl = use_pynccl self.use_custom_allreduce = use_custom_allreduce self.use_tpu_communicator = use_tpu_communicator + self.use_hpu_communicator = use_hpu_communicator # lazy import to avoid documentation build error from vllm.distributed.device_communicators.custom_all_reduce import ( @@ -241,6 +243,12 @@ def __init__( if use_tpu_communicator and self.world_size > 1: self.tpu_communicator = TpuCommunicator(group=self.cpu_group) + from vllm.distributed.device_communicators.hpu_communicator import ( + HpuCommunicator) + self.hpu_communicator: Optional[HpuCommunicator] + if use_hpu_communicator and self.world_size > 1: + self.hpu_communicator = HpuCommunicator(group=self.device_group) + from vllm.distributed.device_communicators.shm_broadcast import ( MessageQueue) self.mq_broadcaster: Optional[MessageQueue] = None @@ -362,6 +370,10 @@ def all_reduce(self, input_: torch.Tensor) -> torch.Tensor: # TPU handles Dynamo with its own logic. return self.tpu_communicator.all_reduce(input_) + if self.hpu_communicator is not None and \ + not self.hpu_communicator.disabled: + return self.hpu_communicator.all_reduce(input_) + if self.ca_comm is not None and \ not self.ca_comm.disabled and \ self.ca_comm.should_custom_ar(input_): @@ -400,6 +412,11 @@ def all_gather(self, input_: torch.Tensor, dim: int = -1) -> torch.Tensor: if tpu_comm is not None and not tpu_comm.disabled: return tpu_comm.all_gather(input_, dim) + # For HPUs, use HPU communicator. + hpu_comm = self.hpu_communicator + if hpu_comm is not None and not hpu_comm.disabled: + return hpu_comm.all_gather(input_, dim) + if dim < 0: # Convert negative dim to positive. dim += input_.dim() @@ -879,6 +896,7 @@ def init_world_group(ranks: List[int], local_rank: int, use_pynccl=False, use_custom_allreduce=False, use_tpu_communicator=False, + use_hpu_communicator=False, group_name="world", ) @@ -900,6 +918,7 @@ def init_model_parallel_group( use_pynccl=True, use_custom_allreduce=use_custom_allreduce, use_tpu_communicator=True, + use_hpu_communicator=True, use_message_queue_broadcaster=use_message_queue_broadcaster, group_name=group_name, ) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index bd39e72d58caa..b556c0eed3776 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -17,6 +17,7 @@ from vllm.executor.executor_base import ExecutorBase from vllm.logger import init_logger from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS +from vllm.platforms import current_platform from vllm.transformers_utils.config import ( maybe_register_config_serialize_by_value) from vllm.transformers_utils.utils import check_gguf_file @@ -37,6 +38,7 @@ "openvino", "tpu", "xpu", + "hpu", ] @@ -110,7 +112,9 @@ class EngineArgs: pipeline_parallel_size: int = 1 tensor_parallel_size: int = 1 max_parallel_loading_workers: Optional[int] = None - block_size: int = 16 + # NOTE(kzawora): default block size for Gaudi should be 128 + # smaller sizes still work, but very inefficiently + block_size: int = 16 if not current_platform.is_hpu() else 128 enable_prefix_caching: bool = False disable_sliding_window: bool = False use_v2_block_manager: bool = True @@ -397,7 +401,7 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: parser.add_argument('--block-size', type=int, default=EngineArgs.block_size, - choices=[8, 16, 32], + choices=[8, 16, 32, 64, 128], help='Token block size for contiguous chunks of ' 'tokens. This is ignored on neuron devices and ' 'set to max-model-len') @@ -1132,8 +1136,7 @@ def create_engine_config(self) -> VllmConfig: multi_step_stream_outputs=self.multi_step_stream_outputs, send_delta_data=(envs.VLLM_USE_RAY_SPMD_WORKER and parallel_config.use_ray), - policy=self.scheduling_policy, - ) + policy=self.scheduling_policy) lora_config = LoRAConfig( max_lora_rank=self.max_lora_rank, max_loras=self.max_loras, diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index 161b85646b6e8..1a371b52bb64b 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -627,6 +627,14 @@ def _get_executor_cls( elif engine_config.device_config.device_type == "cpu": from vllm.executor.cpu_executor import CPUExecutorAsync executor_class = CPUExecutorAsync + elif engine_config.device_config.device_type == "hpu": + if distributed_executor_backend == "ray": + initialize_ray_cluster(engine_config.parallel_config) + from vllm.executor.ray_hpu_executor import RayHPUExecutorAsync + executor_class = RayHPUExecutorAsync + else: + from vllm.executor.hpu_executor import HPUExecutorAsync + executor_class = HPUExecutorAsync elif engine_config.device_config.device_type == "openvino": assert distributed_executor_backend is None, ( "Distributed execution is not supported with " diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 404e7ed2c6ef9..5d321fc98aeb6 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -528,6 +528,14 @@ def _get_executor_cls(cls, elif engine_config.device_config.device_type == "cpu": from vllm.executor.cpu_executor import CPUExecutor executor_class = CPUExecutor + elif engine_config.device_config.device_type == "hpu": + if distributed_executor_backend == "ray": + initialize_ray_cluster(engine_config.parallel_config) + from vllm.executor.ray_hpu_executor import RayHPUExecutor + executor_class = RayHPUExecutor + else: + from vllm.executor.hpu_executor import HPUExecutor + executor_class = HPUExecutor elif engine_config.device_config.device_type == "openvino": from vllm.executor.openvino_executor import OpenVINOExecutor executor_class = OpenVINOExecutor diff --git a/vllm/executor/hpu_executor.py b/vllm/executor/hpu_executor.py new file mode 100644 index 0000000000000..220e9eee87bb3 --- /dev/null +++ b/vllm/executor/hpu_executor.py @@ -0,0 +1,205 @@ +############################################################################### +# Copyright (C) 2024 Habana Labs, Ltd. an Intel Company +############################################################################### + +import contextlib +import os +from typing import Any, Dict, List, Optional, Set, Tuple + +from vllm.executor.executor_base import ExecutorAsyncBase, ExecutorBase +from vllm.logger import init_logger +from vllm.lora.request import LoRARequest +from vllm.model_executor.layers.sampler import SamplerOutput +from vllm.prompt_adapter.request import PromptAdapterRequest +from vllm.sequence import ExecuteModelRequest +from vllm.utils import (get_distributed_init_method, get_ip, get_open_port, + make_async) +from vllm.worker.worker_base import WorkerWrapperBase + +logger = init_logger(__name__) + + +class HPUExecutor(ExecutorBase): + + uses_ray: bool = False + + def _init_executor(self) -> None: + """Initialize the worker and load the model.""" + self._init_worker() + + def _get_worker_kwargs( + self, + local_rank: int = 0, + rank: int = 0, + distributed_init_method: Optional[str] = None) -> Dict[str, Any]: + """Return worker init args for a given rank.""" + if distributed_init_method is None: + distributed_init_method = get_distributed_init_method( + get_ip(), get_open_port()) + return dict( + vllm_config=self.vllm_config, + local_rank=local_rank, + rank=rank, + distributed_init_method=distributed_init_method, + is_driver_worker=rank == 0, + ) + + def _create_worker(self, + local_rank: int = 0, + rank: int = 0, + distributed_init_method: Optional[str] = None): + wrapper = WorkerWrapperBase( + worker_module_name="vllm.worker.hpu_worker", + worker_class_name="HPUWorker", + ) + wrapper.init_worker(**self._get_worker_kwargs(local_rank, rank, + distributed_init_method)) + return wrapper.worker + + def _init_worker(self): + assert self.parallel_config.world_size == 1, ( + "GPUExecutor only supports single GPU.") + + self.driver_worker = self._create_worker() + self.driver_worker.init_device() + self.driver_worker.load_model() + + def determine_num_available_blocks(self) -> Tuple[int, int]: + """Determine the number of available KV blocks by invoking the + underlying worker. + """ + return self.driver_worker.determine_num_available_blocks() + + def initialize_cache(self, num_gpu_blocks: int, num_cpu_blocks) -> None: + """Initialize the KV cache by invoking the underlying worker. + """ + # NOTE: This is logged in the executor because there can be >1 worker + # with other executors. We could log in the engine level, but work + # remains to abstract away the device for non-GPU configurations. + logger.info("# HPU blocks: %d, # CPU blocks: %d", num_gpu_blocks, + num_cpu_blocks) + from vllm_hpu_extension.profiler import HabanaMemoryProfiler + with HabanaMemoryProfiler() as cache_init_m: + self.driver_worker.initialize_cache(num_gpu_blocks, num_cpu_blocks) + msg = f"init_cache_engine took {cache_init_m.get_summary_string()}" + logger.info(msg) + + def finish_measurements(self): + self.driver_worker.finish_measurements() + + def execute_model( + self, + execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]: + # VLLM_HPU_LOG_STEP_GRAPH_COMPILATION - will log graph compilations per engine step, only when there was any - highly recommended to use alongside PT_HPU_METRICS_GC_DETAILS! # noqa:E501 + # VLLM_HPU_LOG_STEP_GRAPH_COMPILATION_ALL - will log graph compilations per engine step, always, even if there were none # noqa:E501 + # VLLM_HPU_LOG_STEP_CPU_FALLBACKS - will log cpu fallbacks per engine step, only when there was any # noqa:E501 + # VLLM_HPU_LOG_STEP_CPU_FALLBACKS_ALL - will log cpu fallbacks per engine step, always, even if there were none # noqa:E501 + log_graph_compilation_all = os.environ.get( + 'VLLM_HPU_LOG_STEP_GRAPH_COMPILATION_ALL', '0') != '0' + log_graph_compilation = os.environ.get( + 'VLLM_HPU_LOG_STEP_GRAPH_COMPILATION', + '0') != '0' or log_graph_compilation_all + log_cpu_fallbacks_all = os.environ.get( + 'VLLM_HPU_LOG_STEP_CPU_FALLBACKS_ALL', '0') != '0' + log_cpu_fallbacks = os.environ.get('VLLM_HPU_LOG_STEP_CPU_FALLBACKS', + '0') != '0' or log_cpu_fallbacks_all + if log_graph_compilation or log_cpu_fallbacks: + from habana_frameworks.torch.hpu.metrics import metric_localcontext + seq_group_metadata_list = execute_model_req.seq_group_metadata_list + is_prompt = any([ + seq_group_metadata.is_prompt + for seq_group_metadata in seq_group_metadata_list + ]) + max_context_len = max([ + max([ + len(v.prompt_token_ids) + len(v.output_token_ids) + for v in seq_group_metadata.seq_data.values() + ]) for seq_group_metadata in seq_group_metadata_list + ]) # whoa, that's some spicy stuff right here + max_num_blocks = ( + (max_context_len - 1) // self.cache_config.block_size) + 1 + input_stats = (f'is_prompt: {is_prompt}, ' + f'num_seqs: {len(seq_group_metadata_list)}, ' + f'max_context_len: {max_context_len}, ' + f'max_num_blocks {max_num_blocks}') + gc_ctx = metric_localcontext( + "graph_compilation" + ) if log_graph_compilation else contextlib.nullcontext() + cpu_fallback_ctx = metric_localcontext( + "cpu_fallback" + ) if log_cpu_fallbacks else contextlib.nullcontext() + with gc_ctx as gc_local_metric, \ + cpu_fallback_ctx as cpu_fallback_local_metric: + output = self.driver_worker.execute_model(execute_model_req) + if (log_graph_compilation and gc_local_metric.stats()[0][1] > 0 + ) or log_graph_compilation_all: + msg = ("VLLM_HPU_STEP_GRAPH_COMPILATION: " + f"{gc_local_metric.stats()}, {input_stats}") + logger.warning(msg) + if (log_cpu_fallbacks and cpu_fallback_local_metric.stats()[0][1] > + 0) or log_cpu_fallbacks_all: + msg = ("VLLM_HPU_STEP_CPU_FALLBACK: " + f"{cpu_fallback_local_metric.stats()}, {input_stats}") + logger.warning(msg) + + return output + + output = self.driver_worker.execute_model(execute_model_req) + return output + + def add_lora(self, lora_request: LoRARequest) -> bool: + assert lora_request.lora_int_id > 0, "lora_id must be greater than 0." + return self.driver_worker.add_lora(lora_request) + + def remove_lora(self, lora_id: int) -> bool: + assert lora_id > 0, "lora_id must be greater than 0." + return self.driver_worker.remove_lora(lora_id) + + def pin_lora(self, lora_id: int) -> bool: + assert lora_id > 0, "lora_id must be greater than 0." + return self.driver_worker.pin_lora(lora_id) + + def list_loras(self) -> Set[int]: + return self.driver_worker.list_loras() + + def add_prompt_adapter( + self, prompt_adapter_request: PromptAdapterRequest) -> bool: + raise NotImplementedError( + "Prompt Adapter is not implemented for HPU backend.") + + def remove_prompt_adapter(self, prompt_adapter_id: int) -> bool: + raise NotImplementedError( + "Prompt Adapter is not implemented for HPU backend.") + + def pin_prompt_adapter(self, prompt_adapter_id: int) -> bool: + raise NotImplementedError( + "Prompt Adapter is not implemented for HPU backend.") + + def list_prompt_adapters(self) -> Set[int]: + raise NotImplementedError( + "Prompt Adapter is not implemented for HPU backend.") + + def check_health(self) -> None: + # GPUExecutor will always be healthy as long as + # it's running. + return + + def start_profile(self) -> None: + self.driver_worker.start_profile() + + def stop_profile(self) -> None: + self.driver_worker.stop_profile() + + def shutdown(self) -> None: + self.driver_worker.shutdown_inc() + + +class HPUExecutorAsync(HPUExecutor, ExecutorAsyncBase): + + async def execute_model_async( + self, + execute_model_req: ExecuteModelRequest, + ) -> List[SamplerOutput]: + output = await make_async(self.driver_worker.execute_model + )(execute_model_req=execute_model_req, ) + return output diff --git a/vllm/executor/ray_hpu_executor.py b/vllm/executor/ray_hpu_executor.py new file mode 100644 index 0000000000000..28d1882cb0db7 --- /dev/null +++ b/vllm/executor/ray_hpu_executor.py @@ -0,0 +1,554 @@ +import asyncio +import os +from collections import defaultdict +from itertools import islice, repeat +from typing import (TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, + Type) + +import msgspec + +import vllm.envs as envs +from vllm.executor.distributed_gpu_executor import ( # yapf: disable + DistributedGPUExecutor, DistributedGPUExecutorAsync) +from vllm.executor.msgspec_utils import encode_hook +from vllm.executor.ray_utils import RayWorkerWrapper, ray +from vllm.logger import init_logger +from vllm.model_executor.layers.sampler import SamplerOutput +from vllm.sequence import ExecuteModelRequest +from vllm.utils import (_run_task_with_lock, get_distributed_init_method, + get_ip, get_open_port, get_vllm_instance_id, + make_async) +from vllm.worker.worker_base import WorkerBase + +if ray is not None: + from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy + +if TYPE_CHECKING: + from ray.util.placement_group import PlacementGroup + +logger = init_logger(__name__) + + +class RayHPUExecutor(DistributedGPUExecutor): + + uses_ray: bool = True + + def _init_executor(self) -> None: + self.forward_dag: Optional["ray.dag.CompiledDAG"] = None + # If the env var is set, it uses the Ray's compiled DAG API + # which optimizes the control plane overhead. + # Run vLLM with VLLM_USE_RAY_COMPILED_DAG=1 to enable it. + # Currently, this requires USE_RAY_SPMD_WORKER=True. + self.use_ray_compiled_dag = envs.VLLM_USE_RAY_COMPILED_DAG + # If the env var is set, then we do not distinguish between the + # "driver worker" vs other workers. Also, the rank 0 worker will + # be executed in a remote Ray worker. Currently this requires + # USE_RAY_COMPILED_DAG=True. + self.use_ray_spmd_worker = envs.VLLM_USE_RAY_SPMD_WORKER + if self.use_ray_compiled_dag: + assert self.use_ray_spmd_worker, ( + "VLLM_USE_RAY_COMPILED_DAG=1 requires " + "VLLM_USE_RAY_SPMD_WORKER=1") + if self.use_ray_spmd_worker: + # TODO: Support SPMD worker for non-DAG Ray executor. + assert self.use_ray_compiled_dag, ( + "VLLM_USE_RAY_SPMD_WORKER=1 requires " + "VLLM_USE_RAY_COMPILED_DAG=1") + + assert self.uses_ray + placement_group = self.parallel_config.placement_group + + # Disable Ray usage stats collection. + ray_usage = os.environ.get("RAY_USAGE_STATS_ENABLED", "0") + if ray_usage != "1": + os.environ["RAY_USAGE_STATS_ENABLED"] = "0" + + # Create the parallel GPU workers. + self._init_workers_ray(placement_group) + + self.input_encoder = msgspec.msgpack.Encoder(enc_hook=encode_hook) + self.output_decoder = msgspec.msgpack.Decoder( + Optional[List[SamplerOutput]]) + + def shutdown(self) -> None: + if hasattr(self, "forward_dag") and self.forward_dag is not None: + self.forward_dag.teardown() + import ray + for worker in self.workers: + ray.kill(worker) + self.forward_dag = None + + def finish_measurements(self): + self._run_workers("finish_measurements") + + def _get_worker_module_and_class( + self + ) -> Tuple[str, str, Optional[Callable[[], + Type[WorkerBase]]]]: # noqa: F821 + worker_class_fn = None + if self.scheduler_config.is_multi_step: + raise NotImplementedError( + "Multi-step execution is not implemented for HPU") + elif self.speculative_config: + raise NotImplementedError( + "Speculative decoding is not implemented for HPU") + else: + worker_module_name = "vllm.worker.hpu_worker" + worker_class_name = "HPUWorker" + return (worker_module_name, worker_class_name, worker_class_fn) + + def _get_worker_wrapper_args(self) -> Dict[str, Any]: + (worker_module_name, worker_class_name, + worker_class_fn) = self._get_worker_module_and_class() + + return dict( + worker_module_name=worker_module_name, + worker_class_name=worker_class_name, + worker_class_fn=worker_class_fn, + trust_remote_code=self.model_config.trust_remote_code, + ) + + def _init_workers_ray(self, placement_group: "PlacementGroup", + **ray_remote_kwargs): + # Otherwise, the ray workers are allocated with a full GPU. + num_gpus = 1 + + # The driver dummy worker does not actually use any resources. + # It holds the resource for the driver worker. + self.driver_dummy_worker: Optional[RayWorkerWrapper] = None + # The remaining workers are the actual ray actors. + self.workers: List[RayWorkerWrapper] = [] + + # Used in ray compiled DAG: indexed first by PP rank, + # and then TP rank. In other words, the inner list is + # the TP group of workers for a PP rank. + self.pp_tp_workers: List[List[RayWorkerWrapper]] = [] + + logger.info("use_ray_spmd_worker: %s", self.use_ray_spmd_worker) + + # Create the workers. + driver_ip = get_ip() + worker_wrapper_kwargs = self._get_worker_wrapper_args() + for bundle_id, bundle in enumerate(placement_group.bundle_specs): + if not bundle.get("HPU", 0): + continue + scheduling_strategy = PlacementGroupSchedulingStrategy( + placement_group=placement_group, + placement_group_capture_child_tasks=True, + placement_group_bundle_index=bundle_id, + ) + + worker = ray.remote( + num_cpus=0, + num_gpus=0, + resources={'HPU': num_gpus}, + scheduling_strategy=scheduling_strategy, + **ray_remote_kwargs, + )(RayWorkerWrapper).remote(**worker_wrapper_kwargs) + + if self.use_ray_spmd_worker: + self.workers.append(worker) + else: + worker_ip = ray.get(worker.get_node_ip.remote()) + if worker_ip == driver_ip and self.driver_dummy_worker is None: + # If the worker is on the same node as the driver, we use it + # as the resource holder for the driver process. + self.driver_dummy_worker = worker + self.driver_worker = RayWorkerWrapper( + **worker_wrapper_kwargs) + else: + # Else, added to the list of workers. + self.workers.append(worker) + + logger.debug("workers: %s", self.workers) + logger.debug("driver_dummy_worker: %s", self.driver_dummy_worker) + if not self.use_ray_spmd_worker and self.driver_dummy_worker is None: + raise ValueError( + "Ray does not allocate any GPUs on the driver node. Consider " + "adjusting the Ray placement group or running the driver on a " + "GPU node.") + + worker_ips = [ + ray.get(worker.get_node_ip.remote()) # type: ignore[attr-defined] + for worker in self.workers + ] + ip_counts: Dict[str, int] = {} + for ip in worker_ips: + ip_counts[ip] = ip_counts.get(ip, 0) + 1 + + def sort_by_driver_then_worker_ip(worker): + """ + Sort the workers based on 3 properties: + 1. If the worker is on the same node as the driver (vllm engine), + it should be placed first. + 2. Then, if the worker is on a node with fewer workers, it should + be placed first. + 3. Finally, if the work is on a node with smaller IP address, it + should be placed first. + """ + ip = ray.get(worker.get_node_ip.remote()) + return (ip != driver_ip, ip_counts[ip], ip) + + # After sorting, the workers on the same node will be + # close to each other, and the workers on the driver + # node will be placed first. + self.workers = sorted(self.workers, key=sort_by_driver_then_worker_ip) + + # Get the set of GPU IDs used on each node. + worker_node_and_gpu_ids = self._run_workers("get_node_and_gpu_ids", + use_dummy_driver=True) + + node_workers = defaultdict(list) # node id -> list of worker ranks + node_gpus = defaultdict(list) # node id -> list of gpu ids + + for i, (node_id, gpu_ids) in enumerate(worker_node_and_gpu_ids): + node_workers[node_id].append(i) + # `gpu_ids` can be a list of strings or integers. + # convert them to integers for consistency. + # NOTE: gpu_ids can be larger than 9 (e.g. 16 GPUs), + # string sorting is not sufficient. + # see https://github.com/vllm-project/vllm/issues/5590 + gpu_ids = [int(x) for x in gpu_ids] + node_gpus[node_id].extend(gpu_ids) + for node_id, gpu_ids in node_gpus.items(): + node_gpus[node_id] = sorted(gpu_ids) + + all_ips = set(worker_ips + [driver_ip]) + n_ips = len(all_ips) + n_nodes = len(node_workers) + + if n_nodes != n_ips: + raise RuntimeError( + f"Every node should have a unique IP address. Got {n_nodes}" + f" nodes with node ids {list(node_workers.keys())} and " + f"{n_ips} unique IP addresses {all_ips}. Please check your" + " network configuration. If you set `VLLM_HOST_IP` or " + "`HOST_IP` environment variable, make sure it is unique for" + " each node.") + + VLLM_INSTANCE_ID = get_vllm_instance_id() + + # Set environment variables for the driver and workers. + all_args_to_update_environment_variables = [({ + "VLLM_INSTANCE_ID": + VLLM_INSTANCE_ID, + "VLLM_TRACE_FUNCTION": + str(envs.VLLM_TRACE_FUNCTION), + }, ) for (node_id, _) in worker_node_and_gpu_ids] + self._run_workers("update_environment_variables", + all_args=all_args_to_update_environment_variables) + + if len(node_gpus) == 1: + # in single node case, we don't need to get the IP address. + # the loopback address is sufficient + # NOTE: a node may have several IP addresses, one for each + # network interface. `get_ip()` might return any of them, + # while they might not work for communication inside the node + # if the network setup is complicated. Using the loopback address + # solves this issue, as it always works for communication inside + # the node. + driver_ip = "127.0.0.1" + distributed_init_method = get_distributed_init_method( + driver_ip, get_open_port()) + + # Initialize the actual workers inside worker wrapper. + init_worker_all_kwargs = [ + self._get_worker_kwargs( + local_rank=node_workers[node_id].index(rank), + rank=rank, + distributed_init_method=distributed_init_method, + ) for rank, (node_id, _) in enumerate(worker_node_and_gpu_ids) + ] + self._run_workers("init_worker", all_kwargs=init_worker_all_kwargs) + + self._run_workers("init_device") + self._run_workers("load_model", + max_concurrent_workers=self.parallel_config. + max_parallel_loading_workers) + + if self.use_ray_spmd_worker: + for pp_rank in range(self.parallel_config.pipeline_parallel_size): + self.pp_tp_workers.append([]) + for tp_rank in range( + self.parallel_config.tensor_parallel_size): + # PP=2, TP=4 + # pp_tp_workers = [[0, 1, 2, 3], [4, 5, 6, 7]] + rank = (pp_rank * self.parallel_config.tensor_parallel_size + ) + tp_rank + assert len(self.pp_tp_workers[pp_rank]) == tp_rank + assert pp_rank < len(self.pp_tp_workers) + self.pp_tp_workers[pp_rank].append(self.workers[rank]) + + # This is the list of workers that are rank 0 of each TP group EXCEPT + # global rank 0. These are the workers that will broadcast to the + # rest of the workers. + self.tp_driver_workers: List[RayWorkerWrapper] = [] + # This is the list of workers that are not drivers and not the first + # worker in a TP group. These are the workers that will be + # broadcasted to. + self.non_driver_workers: List[RayWorkerWrapper] = [] + + # Enforce rank order for correct rank to return final output. + for index, worker in enumerate(self.workers): + # The driver worker is rank 0 and not in self.workers. + rank = index + 1 + if rank % self.parallel_config.tensor_parallel_size == 0: + self.tp_driver_workers.append(worker) + else: + self.non_driver_workers.append(worker) + + def _driver_execute_model( + self, execute_model_req: Optional[ExecuteModelRequest] + ) -> Optional[List[SamplerOutput]]: + """Run execute_model in the driver worker. + + Passing None will cause the driver to stop the model execution + loop running in each of the remote workers. + """ + assert not self.use_ray_spmd_worker, ( + "driver_worker does not exist for VLLM_USE_RAY_SPMD_WORKER=1") + return self.driver_worker.execute_method("execute_model", + execute_model_req) + + def execute_model( + self, + execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]: + if not self.use_ray_spmd_worker: + return super().execute_model(execute_model_req) + + if self.forward_dag is None: + self.forward_dag = self._compiled_ray_dag(enable_asyncio=False) + + serialized_data = self.input_encoder.encode(execute_model_req) + outputs = ray.get(self.forward_dag.execute(serialized_data)) + output = self.output_decoder.decode(outputs[0]) + return output + + def _run_workers( + self, + method: str, + *args, + async_run_tensor_parallel_workers_only: bool = False, + all_args: Optional[List[Tuple[Any, ...]]] = None, + all_kwargs: Optional[List[Dict[str, Any]]] = None, + use_dummy_driver: bool = False, + max_concurrent_workers: Optional[int] = None, + **kwargs, + ) -> Any: + """Runs the given method on all workers. Can be used in the following + ways: + + Args: + - async_run_tensor_parallel_workers_only: If True the method will be + run only in the remote TP workers, not the driver worker. + It will also be run asynchronously and return a list of futures + rather than blocking on the results. + - args/kwargs: All workers share the same args/kwargs + - all_args/all_kwargs: args/kwargs for each worker are specified + individually + """ + if self.use_ray_spmd_worker: + assert not async_run_tensor_parallel_workers_only, ( + "async_run_tensor_parallel_workers_only is not supported for " + "spmd mode.") + + if max_concurrent_workers: + raise NotImplementedError( + "max_concurrent_workers is not supported yet.") + + count = len(self.workers) if not \ + async_run_tensor_parallel_workers_only \ + else len(self.non_driver_workers) + # If using SPMD worker, all workers are the same, so we should execute + # the args on all workers. Otherwise, we skip the first worker's args + # because those args will go to the driver worker. + first_worker_args_index: int = 0 if self.use_ray_spmd_worker else 1 + all_worker_args = repeat(args, count) if all_args is None \ + else islice(all_args, first_worker_args_index, None) + all_worker_kwargs = repeat(kwargs, count) if all_kwargs is None \ + else islice(all_kwargs, first_worker_args_index, None) + + # Start the ray workers first. + ray_workers = self.workers + if async_run_tensor_parallel_workers_only: + ray_workers = self.non_driver_workers + ray_worker_outputs = [ + worker.execute_method.remote(method, *worker_args, **worker_kwargs) + for (worker, worker_args, worker_kwargs + ) in zip(ray_workers, all_worker_args, all_worker_kwargs) + ] + + if async_run_tensor_parallel_workers_only: + # Just return futures + return ray_worker_outputs + + driver_worker_output = [] + # In SPMD mode, the driver worker is the same as any other worker, + # so we only explicitly execute on the driver worker if using a + # non-SPMD worker class. + if not self.use_ray_spmd_worker: + driver_args = args if all_args is None else all_args[0] + driver_kwargs = kwargs if all_kwargs is None else all_kwargs[0] + + # Start the driver worker after all the ray workers. + if not use_dummy_driver: + driver_worker_output = [ + self.driver_worker.execute_method(method, *driver_args, + **driver_kwargs) + ] + else: + assert self.driver_dummy_worker is not None + driver_worker_output = [ + ray.get( + self.driver_dummy_worker.execute_method.remote( + method, *driver_args, **driver_kwargs)) + ] + + # Get the results of the ray workers. + if self.workers: + ray_worker_outputs = ray.get(ray_worker_outputs) + + return driver_worker_output + ray_worker_outputs + + def _wait_for_tasks_completion(self, parallel_worker_tasks: Any) -> None: + """Wait for futures returned from _run_workers() with + async_run_remote_workers_only to complete.""" + ray.get(parallel_worker_tasks) + + def _check_ray_adag_installation(self): + import pkg_resources + from packaging import version + + required_version = version.parse("2.35") + current_version = version.parse( + pkg_resources.get_distribution("ray").version) + # TODO: update the constraint once we adapt to the backward + # incompatible API change from ray 2.36 + if current_version != required_version: + raise ValueError(f"Ray version {required_version} is " + f"required, but found {current_version}") + + import importlib.util + adag_spec = importlib.util.find_spec( + "ray.experimental.compiled_dag_ref") + if adag_spec is None: + raise ValueError("Ray accelerated DAG is not installed. " + "Run `pip install ray[adag]` to install it.") + + def _compiled_ray_dag(self, enable_asyncio: bool): + assert self.parallel_config.use_ray + self._check_ray_adag_installation() + from ray.dag import InputNode, MultiOutputNode + from ray.experimental.channel.torch_tensor_type import TorchTensorType + + with InputNode() as input_data: + # Example DAG: PP=2, TP=4 + # (ExecuteModelReq, None) -> 0 -> (ExecuteModelReq, IntermediateOutput) -> 4 -> SamplerOutput # noqa: E501 + # -> 1 -> (ExecuteModelReq, IntermediateOutput) -> 5 -> SamplerOutput # noqa: E501 + # -> 2 -> (ExecuteModelReq, IntermediateOutput) -> 6 -> SamplerOutput # noqa: E501 + # -> 3 -> (ExecuteModelReq, IntermediateOutput) -> 7 -> SamplerOutput # noqa: E501 + + # All workers in the first TP group will take in the + # ExecuteModelRequest as input. + outputs = [input_data for _ in self.pp_tp_workers[0]] + for pp_rank, tp_group in enumerate(self.pp_tp_workers): + # Each PP worker takes in the output of the previous PP worker, + # and the TP group executes in SPMD fashion. + outputs = [ + worker.execute_model_spmd. + bind( # type: ignore[attr-defined] + outputs[i]) for i, worker in enumerate(tp_group) + ] + + last_pp_rank = len(self.pp_tp_workers) - 1 + if pp_rank < last_pp_rank: + # Specify how intermediate tensors should be passed + # between pp stages, no need to specify for the last + # pp stage. + transport = "auto" + outputs = [ + output.with_type_hint( + TorchTensorType(transport=transport)) + for output in outputs + ] + + forward_dag = MultiOutputNode(outputs) + + return forward_dag.experimental_compile(enable_asyncio=enable_asyncio) + + def __del__(self): + self.shutdown() + + +class RayHPUExecutorAsync(RayHPUExecutor, DistributedGPUExecutorAsync): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.pp_locks: Optional[List[asyncio.Lock]] = None + self.use_ray_spmd_worker = envs.VLLM_USE_RAY_SPMD_WORKER + if not self.use_ray_compiled_dag: + self.driver_exec_method = make_async( + self.driver_worker.execute_method) + + async def execute_model_async( + self, + execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]: + if not self.use_ray_spmd_worker: + return await super().execute_model_async(execute_model_req) + + if self.forward_dag is None: + self.forward_dag = self._compiled_ray_dag(enable_asyncio=True) + + serialized_data = self.input_encoder.encode(execute_model_req) + dag_future = await self.forward_dag.execute_async(serialized_data) + outputs = await dag_future + return self.output_decoder.decode(outputs[0]) + + async def _driver_execute_model_async( + self, + execute_model_req: Optional[ExecuteModelRequest] = None + ) -> List[SamplerOutput]: + assert not self.use_ray_spmd_worker, ( + "driver_worker does not exist for VLLM_USE_RAY_SPMD_WORKER=1") + if not self.tp_driver_workers: + return await self.driver_exec_method("execute_model", + execute_model_req) + if self.pp_locks is None: + # This locks each pipeline parallel stage so multiple virtual + # engines can't execute on the same stage at the same time + # We create the locks here to avoid creating them in the constructor + # which uses a different asyncio loop. + self.pp_locks = [ + asyncio.Lock() + for _ in range(self.parallel_config.pipeline_parallel_size) + ] + + tasks = [ + asyncio.create_task( + _run_task_with_lock(self.driver_exec_method, self.pp_locks[0], + "execute_model", execute_model_req)) + ] + for pp_rank, driver_worker in enumerate(self.tp_driver_workers, + start=1): + tasks.append( + asyncio.create_task( + _run_task_with_lock(driver_worker.execute_method.remote, + self.pp_locks[pp_rank], + "execute_model", execute_model_req))) + + results = await asyncio.gather(*tasks) + + # Only the last PP stage has the final results. + return results[-1] + + async def _start_worker_execution_loop(self): + assert not self.use_ray_spmd_worker, ( + "worker loop is disabled for VLLM_USE_RAY_SPMD_WORKER=1") + coros = [ + worker.execute_method.remote("start_worker_execution_loop") + for worker in self.non_driver_workers + ] + return await asyncio.gather(*coros) + + def __del__(self): + self.shutdown() diff --git a/vllm/executor/ray_utils.py b/vllm/executor/ray_utils.py index 993d279890820..41dd59bc65ec5 100644 --- a/vllm/executor/ray_utils.py +++ b/vllm/executor/ray_utils.py @@ -249,7 +249,11 @@ def initialize_ray_cluster( # Placement group is already set. return - device_str = "GPU" if not current_platform.is_tpu() else "TPU" + device_str = "GPU" + if current_platform.is_tpu(): + device_str = "TPU" + elif current_platform.is_hpu(): + device_str = 'HPU' # Create placement group for worker processes current_placement_group = ray.util.get_current_placement_group() if current_placement_group: diff --git a/vllm/model_executor/custom_op.py b/vllm/model_executor/custom_op.py index bfca15c2b6a3e..24d75f4df4e02 100644 --- a/vllm/model_executor/custom_op.py +++ b/vllm/model_executor/custom_op.py @@ -55,10 +55,9 @@ def forward_tpu(self, *args, **kwargs): # NOTE(woosuk): This is a placeholder for future extensions. return self.forward_native(*args, **kwargs) - def forward_gaudi(self, *args, **kwargs): + def forward_hpu(self, *args, **kwargs): # By default, we assume that Gaudi ops are compatible with the # PyTorch-native implementation. - # NOTE(woosuk): This is a placeholder for future extensions. return self.forward_native(*args, **kwargs) def dispatch_forward(self): @@ -76,6 +75,8 @@ def dispatch_forward(self): return self.forward_hip elif current_platform.is_cpu(): return self.forward_cpu + elif current_platform.is_hpu(): + return self.forward_hpu elif current_platform.is_tpu(): return self.forward_tpu elif current_platform.is_xpu(): diff --git a/vllm/model_executor/layers/layernorm.py b/vllm/model_executor/layers/layernorm.py index 30b43f375dd5c..345919c5d1636 100644 --- a/vllm/model_executor/layers/layernorm.py +++ b/vllm/model_executor/layers/layernorm.py @@ -92,6 +92,25 @@ def forward_cuda( ) return out + def forward_hpu( + self, + x: torch.Tensor, + residual: Optional[torch.Tensor] = None, + ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + from vllm_hpu_extension.ops import HPUFusedRMSNorm + if HPUFusedRMSNorm is None: + return self.forward_native(x, residual) + if residual is not None: + orig_shape = x.shape + residual += x.view(residual.shape) + # Note: HPUFusedRMSNorm requires 3D tensors as inputs + x = HPUFusedRMSNorm.apply(residual, self.weight, + self.variance_epsilon) + return x.view(orig_shape), residual + + x = HPUFusedRMSNorm.apply(x, self.weight, self.variance_epsilon) + return x + def forward_xpu( self, x: torch.Tensor, diff --git a/vllm/model_executor/layers/logits_processor.py b/vllm/model_executor/layers/logits_processor.py index 288f5a1134b6b..fb76b1b17925e 100644 --- a/vllm/model_executor/layers/logits_processor.py +++ b/vllm/model_executor/layers/logits_processor.py @@ -111,8 +111,14 @@ def _prune_hidden_states( hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata, ) -> torch.Tensor: - return hidden_states.index_select(0, - sampling_metadata.selected_token_indices) + # NOTE(kzawora): The if guard is needed for Gaudi - in some scenarios + # (warmup, profile_run) we might not have selected_token_indices, + # so we skip pruning. + if sampling_metadata.selected_token_indices is not None: + return hidden_states.index_select( + 0, sampling_metadata.selected_token_indices) + else: + return hidden_states def _apply_logits_processors( diff --git a/vllm/model_executor/layers/rotary_embedding.py b/vllm/model_executor/layers/rotary_embedding.py index ac60e0e6d48a0..63ceec63e8317 100644 --- a/vllm/model_executor/layers/rotary_embedding.py +++ b/vllm/model_executor/layers/rotary_embedding.py @@ -194,6 +194,61 @@ def forward_xpu( self.cos_sin_cache, self.is_neox_style) return query, key + def forward_hpu( + self, + positions: torch.Tensor, + query: torch.Tensor, + key: torch.Tensor, + offsets: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + from habana_frameworks.torch.hpex.kernels import ( + RotaryPosEmbeddingMode, apply_rotary_pos_emb) + positions = positions.flatten() + if offsets is not None: + positions = positions + offsets + num_tokens = positions.shape[0] + cos_sin = self.cos_sin_cache.index_select(0, positions).view( + num_tokens, 1, -1) + cos, sin = cos_sin.chunk(2, dim=-1) + # HPU RoPE kernel requires hidden dimension for cos and sin to be equal + # to query hidden dimension, so the original tensors need to be + # expanded + # GPT-NeoX kernel requires position_ids = None, offset, mode = BLOCKWISE + # and expansion of cos/sin tensors via concatenation + # GPT-J kernel requires position_ids = None, offset = 0, mode = PAIRWISE + # and expansion of cos/sin tensors via repeat_interleave + rope_mode: RotaryPosEmbeddingMode + if self.is_neox_style: + rope_mode = RotaryPosEmbeddingMode.BLOCKWISE + cos = torch.cat((cos, cos), dim=-1) + sin = torch.cat((sin, sin), dim=-1) + else: + rope_mode = RotaryPosEmbeddingMode.PAIRWISE + sin = torch.repeat_interleave(sin, + 2, + dim=-1, + output_size=cos_sin.shape[-1]) + cos = torch.repeat_interleave(cos, + 2, + dim=-1, + output_size=cos_sin.shape[-1]) + + query_shape = query.shape + query = query.view(num_tokens, -1, self.head_size) + query_rot = query[..., :self.rotary_dim] + query_pass = query[..., self.rotary_dim:] + query_rot = apply_rotary_pos_emb(query_rot, cos, sin, None, 0, + rope_mode) + query = torch.cat((query_rot, query_pass), dim=-1).reshape(query_shape) + + key_shape = key.shape + key = key.view(num_tokens, -1, self.head_size) + key_rot = key[..., :self.rotary_dim] + key_pass = key[..., self.rotary_dim:] + key_rot = apply_rotary_pos_emb(key_rot, cos, sin, None, 0, rope_mode) + key = torch.cat((key_rot, key_pass), dim=-1).reshape(key_shape) + return query, key + def extra_repr(self) -> str: s = f"head_size={self.head_size}, rotary_dim={self.rotary_dim}" s += f", max_position_embeddings={self.max_position_embeddings}" diff --git a/vllm/model_executor/layers/vocab_parallel_embedding.py b/vllm/model_executor/layers/vocab_parallel_embedding.py index b448557af13b3..52771f50a7a23 100644 --- a/vllm/model_executor/layers/vocab_parallel_embedding.py +++ b/vllm/model_executor/layers/vocab_parallel_embedding.py @@ -12,6 +12,7 @@ QuantizationConfig, QuantizeMethodBase, method_has_implemented_embedding) from vllm.model_executor.parameter import BasevLLMParameter from vllm.model_executor.utils import set_weight_attrs +from vllm.platforms import current_platform DEFAULT_VOCAB_PADDING_SIZE = 64 @@ -382,8 +383,20 @@ def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor): # Copy the data. loaded_weight = loaded_weight.narrow(output_dim, start_idx, shard_size) - param[:loaded_weight.shape[0]].data.copy_(loaded_weight) - param[loaded_weight.shape[0]:].data.fill_(0) + + if current_platform.is_hpu(): + # FIXME(kzawora): Weight copy with slicing bugs out on Gaudi here, + # so we're using a workaround. Remove this when fixed in + # HPU PT bridge. + padded_weight = torch.cat([ + loaded_weight, + torch.zeros(param.shape[0] - loaded_weight.shape[0], + *loaded_weight.shape[1:]) + ]) + param.data.copy_(padded_weight) + else: + param[:loaded_weight.shape[0]].data.copy_(loaded_weight) + param[loaded_weight.shape[0]:].data.fill_(0) def forward(self, input_): if self.tp_size > 1: diff --git a/vllm/model_executor/sampling_metadata.py b/vllm/model_executor/sampling_metadata.py index ee02368bec8a8..84f35f75a0c32 100644 --- a/vllm/model_executor/sampling_metadata.py +++ b/vllm/model_executor/sampling_metadata.py @@ -284,7 +284,8 @@ def _prepare_seq_groups( else: # Decode prompt_logprob_len = 0 - query_len = query_lens[i] if query_lens is not None else 1 + query_len = query_lens[i] if query_lens is not None and len( + query_lens) > 0 else 1 sample_len = len(seq_ids) * query_len if do_sample else 0 if sampling_params.seed is not None and generators is not None: diff --git a/vllm/platforms/__init__.py b/vllm/platforms/__init__.py index 524150920b854..9e740837381f8 100644 --- a/vllm/platforms/__init__.py +++ b/vllm/platforms/__init__.py @@ -42,6 +42,13 @@ except Exception: pass +is_hpu = False +try: + from importlib import util + is_hpu = util.find_spec('habana_frameworks') is not None +except Exception: + pass + is_xpu = False try: @@ -86,6 +93,9 @@ elif is_rocm: from .rocm import RocmPlatform current_platform = RocmPlatform() +elif is_hpu: + from .hpu import HpuPlatform + current_platform = HpuPlatform() elif is_xpu: from .xpu import XPUPlatform current_platform = XPUPlatform() diff --git a/vllm/platforms/hpu.py b/vllm/platforms/hpu.py new file mode 100644 index 0000000000000..170cfff94f90d --- /dev/null +++ b/vllm/platforms/hpu.py @@ -0,0 +1,11 @@ +import torch + +from .interface import Platform, PlatformEnum + + +class HpuPlatform(Platform): + _enum = PlatformEnum.HPU + + @staticmethod + def inference_mode(): + return torch.no_grad() diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index c3a3e7a284457..81d8bdae2383c 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -10,6 +10,7 @@ class PlatformEnum(enum.Enum): CUDA = enum.auto() ROCM = enum.auto() TPU = enum.auto() + HPU = enum.auto() XPU = enum.auto() CPU = enum.auto() NEURON = enum.auto() @@ -46,6 +47,9 @@ def is_rocm(self) -> bool: def is_tpu(self) -> bool: return self._enum == PlatformEnum.TPU + def is_hpu(self) -> bool: + return self._enum == PlatformEnum.HPU + def is_xpu(self) -> bool: return self._enum == PlatformEnum.XPU diff --git a/vllm/utils.py b/vllm/utils.py index 6edc8d72f6bcf..d78130873d3dc 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -728,6 +728,9 @@ def is_pin_memory_available() -> bool: elif current_platform.is_neuron(): print_warning_once("Pin memory is not supported on Neuron.") return False + elif current_platform.is_hpu(): + print_warning_once("Pin memory is not supported on HPU.") + return False elif current_platform.is_cpu() or current_platform.is_openvino(): return False return True diff --git a/vllm/worker/hpu_model_runner.py b/vllm/worker/hpu_model_runner.py new file mode 100644 index 0000000000000..5008a2abd22ea --- /dev/null +++ b/vllm/worker/hpu_model_runner.py @@ -0,0 +1,2008 @@ +############################################################################### +# Copyright (C) 2024 Habana Labs, Ltd. an Intel Company +############################################################################### + +import collections +import contextlib +import dataclasses +import functools +import gc +import itertools +import math +import operator +import os +import time +from array import array +from dataclasses import dataclass, field +from enum import IntEnum +from typing import (TYPE_CHECKING, Any, Callable, Dict, List, NamedTuple, + Optional, Set, Tuple, Type, TypeVar, Union) + +import habana_frameworks.torch as htorch +import habana_frameworks.torch.internal.bridge_config as bc +import torch +from vllm_hpu_extension.ops import LoraMask as LoraMask +from vllm_hpu_extension.profiler import (HabanaHighLevelProfiler, + HabanaMemoryProfiler, format_bytes) + +from vllm.attention import AttentionMetadata, get_attn_backend +from vllm.config import DeviceConfig, VllmConfig +from vllm.distributed.parallel_state import get_world_group +from vllm.logger import init_logger +from vllm.lora.layers import LoRAMapping +from vllm.lora.request import LoRARequest +from vllm.lora.worker_manager import LRUCacheWorkerLoRAManager +from vllm.model_executor import SamplingMetadata +from vllm.model_executor.layers.sampler import SamplerOutput +from vllm.model_executor.model_loader import get_model +from vllm.multimodal import (MULTIMODAL_REGISTRY, BatchedTensorInputs, + MultiModalInputs) +from vllm.sampling_params import SamplingParams +from vllm.sequence import (IntermediateTensors, SequenceData, + SequenceGroupMetadata) +from vllm.utils import is_pin_memory_available, make_tensor_with_pad +from vllm.worker.model_runner_base import ( + ModelRunnerBase, ModelRunnerInputBase, + _add_attn_metadata_broadcastable_dict, + _add_sampling_metadata_broadcastable_dict, + _init_attn_metadata_from_tensor_dict, + _init_sampling_metadata_from_tensor_dict) + +if TYPE_CHECKING: + from vllm.attention.backends.abstract import AttentionBackend + +logger = init_logger(__name__) + +_TYPE_CACHE = {} +# These values are assumed to be zero in several places. +# Use caution when updating them! +_PAD_SLOT_ID = 0 +_PAD_BLOCK_ID = 0 + +LORA_WARMUP_RANK = 8 + + +class Singleton(type): + _instances: Dict[type, object] = {} + + def __call__(cls, *args, **kwargs): + if cls not in cls._instances: + cls._instances[cls] = super(Singleton, + cls).__call__(*args, **kwargs) + return cls._instances[cls] + + +@dataclass +class HPUBucketingGlobalState(metaclass=Singleton): + prompt_bs_bucket_cfg: Tuple[int, int, int] = field(init=False) + decode_bs_bucket_cfg: Tuple[int, int, int] = field(init=False) + prompt_seq_bucket_cfg: Tuple[int, int, int] = field(init=False) + decode_block_bucket_cfg: Tuple[int, int, int] = field(init=False) + prompt_buckets: List[Tuple[int, int]] = field(init=False) + decode_buckets: List[Tuple[int, int]] = field(init=False) + + +def subtuple(obj: object, + typename: str, + to_copy: List[str], + to_override: Optional[Dict[str, object]] = None): + if obj is None: + return None + if to_override is None: + to_override = {} + fields = set(to_copy) | set(to_override.keys()) + values = {f: to_override.get(f, getattr(obj, f)) for f in fields} + if typename not in _TYPE_CACHE: + _TYPE_CACHE[typename] = collections.namedtuple(typename, + ' '.join(fields)) + return _TYPE_CACHE[typename](**values) + + +def read_bucket_settings(phase: str, dim: str, **defaults): + """Read bucketing configuration from env variables. + + phase is either 'prompt' or 'decode' + dim is either 'bs', 'seq' or 'block' + param is either 'min', 'step' or 'max' + example env variable: VLLM_DECODE_BS_BUCKET_STEP=128 + """ + params = ['min', 'step', 'max'] + env_vars = [f'VLLM_{phase}_{dim}_BUCKET_{p}'.upper() for p in params] + default_values = [defaults[p] for p in params] + values = [ + int(os.environ.get(e, d)) for e, d in zip(env_vars, default_values) + ] + for e, v, d in zip(env_vars, values, default_values): + logger.info('%s=%s (default:%s)', e, v, d) + return values + + +def warmup_range(config: Tuple[int, int, int]): + """Generate a warmup range. + + Start from bmin and multiply by 2 until you reach bstep. + Then, increase the values in the range by the value of bstep until you + reach bmax. + + Example: + bmin = 2, bstep = 32, bmax = 64 + => ramp_up = (2, 4, 8, 16) + => stable = (32, 64) + => return ramp_up + stable => (2, 4, 8, 16, 32, 64) + """ + bmin, bstep, bmax = config + assert bmin <= bmax, ("Min. batch size cannot be greater than max. " + "batch size. If you want to skip warmup, " + "set VLLM_SKIP_WARMUP=true") + base = itertools.repeat(2) + ramp_up_acc = itertools.accumulate(base, func=operator.mul, initial=bmin) + ramp_up_tw = itertools.takewhile(lambda x: x < bstep and x <= bmax, \ + ramp_up_acc) + stable = range(bstep, bmax + 1, bstep) + buckets = list(ramp_up_tw) + list(stable) + return list(filter(lambda bucket: bucket >= bmin, buckets)) + + +def generate_prompt_buckets(bs_bucket_config, + seq_bucket_config, + max_num_batched_tokens=None): + buckets = list( + itertools.product(warmup_range(bs_bucket_config), + warmup_range(seq_bucket_config))) + if len(buckets) == 0: + msg = ("No buckets could be captured with following config " + f"(min, step, max_warmup): " + f"bs:{bs_bucket_config}, " + f"seq:{seq_bucket_config}") + raise ValueError(msg) + + filtered_buckets = buckets + if max_num_batched_tokens is not None: + # Remove buckets exceeding batch token budget + filtered_buckets = list( + filter( + lambda bucket: bucket[0] * bucket[1] <= max_num_batched_tokens, + buckets)) + + if len(filtered_buckets) == 0: + # we can handle this if we ignore max_num_batched_tokens + min_bucket_bs, min_bucket_seq = min(buckets, + key=lambda b: (b[0] * b[1])) + min_reqd_budget = min_bucket_bs * min_bucket_seq + msg = ( + "The current bucketing configuration " + f"(min, step, max_warmup): " + f"bs:{bs_bucket_config}, " + f"seq:{seq_bucket_config} cannot be used with specified " + f"max_num_batched_tokens ({max_num_batched_tokens}), as the " + f"smallest bucket ({min_reqd_budget}) would exceed token " + "budget. Please increase max_num_batched_tokens or decrease " + "bucket minimum Ignoring max_num_batched_tokens at risk of " + "out-of-memory errors.") + logger.error(msg) + return list( + sorted(buckets, key=lambda b: (b[0] * b[1], b[1], b[0]))), [] + + captured_buckets = list( + sorted(filtered_buckets, key=lambda b: (b[0] * b[1], b[1], b[0]))) + omitted_buckets = list( + sorted([x for x in buckets if x not in filtered_buckets])) + return captured_buckets, omitted_buckets + + +def generate_decode_buckets(bs_bucket_config, blocks_bucket_config, + max_blocks): + buckets = [] + bs_buckets = warmup_range(bs_bucket_config) + block_buckets = warmup_range(blocks_bucket_config) + bmin, bstep, bmax = blocks_bucket_config + last_bucket = round_up(max_blocks, bstep) + for bs in bs_buckets: + for blocks in block_buckets: + if blocks < bs: + continue + if blocks > last_bucket: + break + buckets.append((bs, blocks)) + return list(sorted(buckets, key=lambda b: (b[0] * b[1], b[1], b[0]))) + + +def next_pow2(value: int, base: int): + res = base + while value > 1: + value = (value + 1) // 2 + res *= 2 + return res + + +def round_up(value: int, k: int): + return (value + k - 1) // k * k + + +def find_bucket(value: int, config: Tuple[int, int, int]): + bmin, bstep, _ = config + next_step = round_up(value, bstep) + next_pow = next_pow2(value, bmin) + return max(bmin, min(next_step, next_pow)) + + +def align_workers(value, op): + group = get_world_group().cpu_group + world_size = torch.distributed.get_world_size() + if world_size <= 1: + return value + value_t = torch.tensor(value, device='cpu') + torch.distributed.all_reduce(value_t, op=op, group=group) + return value_t.item() + + +def setup_profiler(): + schedule = torch.profiler.schedule(wait=0, warmup=2, active=1, repeat=1) + DEVICE = 'hpu' + activities = [torch.profiler.ProfilerActivity.CPU] + activities.extend([torch.profiler.ProfilerActivity.HPU] if DEVICE == + 'hpu' else []) + #from habana_frameworks.torch.activity_profiler import DebugActivity + #debug_activities=[DebugActivity.BRIDGE_FUNCTION_CALLS] + + profiler = torch.profiler.profile( + schedule=schedule, + activities=activities, + #debug_activities=debug_activities, + on_trace_ready=torch.profiler.tensorboard_trace_handler('.', + use_gzip=True), + record_shapes=False, + with_stack=True) + return profiler + + +def pad_list(list, k, v): + target_len = round_up(len(list), k) + padding = target_len - len(list) + return list + [v] * padding + + +def precompute_indices_and_offsets(block_size, slot_mapping, is_prompt): + slot_mapping = slot_mapping.flatten() + indices = torch.div(slot_mapping, block_size, rounding_mode="floor") + if is_prompt: + indices = indices.unflatten(0, (-1, block_size))[:, 0] + offsets = None + else: + offsets = torch.fmod(slot_mapping, block_size) + return indices, offsets + + +class HpuModelAdapter(): + + def __init__(self, model, block_size, dtype, enforce_eager): + self.model = model + self.prefill_use_fusedsdpa = os.getenv('VLLM_PROMPT_USE_FUSEDSDPA', + '0').lower() in ['1', 'true'] + self.block_size = block_size + self.dtype = dtype + if not htorch.utils.internal.is_lazy() and not enforce_eager: + self.model = torch.compile(self.model, + backend='hpu_backend', + dynamic=False) + + def _set_attn_bias(self, attn_metadata, batch_size, seq_len, device, + dtype): + prefill_metadata = attn_metadata + if prefill_metadata is None or self.prefill_use_fusedsdpa: + return attn_metadata + + seq_lens_t = prefill_metadata.seq_lens_tensor + len_mask = (torch.arange(0, seq_len, device=device, + dtype=torch.int32).view(1, seq_len).ge( + seq_lens_t.unsqueeze(-1)).view( + batch_size, 1, 1, seq_len)) + causal_mask = torch.triu(torch.ones((batch_size, 1, seq_len, seq_len), + device=device, + dtype=torch.bool), + diagonal=1) + mask = causal_mask.logical_or(len_mask) + attn_bias = (torch.zeros_like(mask, dtype=dtype).masked_fill_( + mask, -math.inf)) + attn_metadata = prefill_metadata._replace(attn_bias=attn_bias) + return attn_metadata + + def _set_block_mapping(self, metadata, batch_size, device, dtype): + mask = torch.arange(0, + self.block_size, + device=device, + dtype=torch.int32).unsqueeze(0) + mask = mask >= metadata.block_usage.unsqueeze(-1) + attn_bias = (torch.zeros_like(mask, dtype=dtype).masked_fill_( + mask, -math.inf)) + block_mapping = torch.nn.functional.one_hot(metadata.block_mapping, + num_classes=batch_size) + block_mapping = block_mapping.to(dtype) + metadata = metadata._replace(block_mapping=block_mapping, + attn_bias=attn_bias) + return metadata + + def _update_metadata(self, attn_metadata, batch_size, seq_len, device, + dtype): + if attn_metadata.is_prompt: + meta = attn_metadata + attn_metadata = self._set_attn_bias(meta, batch_size, seq_len, + device, dtype) + else: + meta = attn_metadata + attn_metadata = self._set_block_mapping(meta, batch_size, device, + dtype) + return attn_metadata + + def forward(self, *args, **kwargs): + kwargs = kwargs.copy() + selected_token_indices = kwargs.pop('selected_token_indices') + if 'warmup_mode' in kwargs: + kwargs.pop('warmup_mode') + input_ids = kwargs['input_ids'] + kwargs['attn_metadata'] = self._update_metadata( + kwargs['attn_metadata'], input_ids.size(0), input_ids.size(1), + input_ids.device, self.dtype) + LoraMask.setLoraMask(kwargs.pop('lora_mask')) + hidden_states = self.model(*args, **kwargs) + hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) + hidden_states = hidden_states.index_select(0, selected_token_indices) + return hidden_states + + def compute_logits(self, *args, **kwargs): + return self.model.compute_logits(*args, **kwargs) + + def sample(self, *args, **kwargs): + return self.model.sample(*args, **kwargs) + + +class PreparePromptMetadata(NamedTuple): + input_tokens: torch.Tensor + input_positions: List[List[int]] + attn_metadata: Optional[AttentionMetadata] + seq_lens: List[int] + query_lens: List[int] + lora_index_mapping: List[List[int]] + lora_prompt_mapping: List[List[int]] + lora_requests: Set[LoRARequest] + multi_modal_kwargs: Optional[Dict[str, BatchedTensorInputs]] + slot_mapping: List[List[int]] + lora_ids: List[int] + + @classmethod + def empty(cls): + return PreparePromptMetadata(input_tokens=[], + input_positions=[], + attn_metadata=None, + seq_lens=[], + query_lens=[], + lora_index_mapping=[], + lora_prompt_mapping=[], + lora_requests=set(), + multi_modal_kwargs=None, + slot_mapping=[], + lora_ids=[]) + + +class PrepareDecodeMetadata(NamedTuple): + input_tokens: torch.Tensor + input_positions: List[List[int]] + attn_metadata: Optional[AttentionMetadata] + lora_index_mapping: List[List[int]] + lora_prompt_mapping: List[List[int]] + lora_requests: Set[LoRARequest] + slot_mapping: List[List[int]] + lora_ids: List[int] + + @classmethod + def empty(cls): + return PrepareDecodeMetadata(input_tokens=[], + input_positions=[], + attn_metadata=None, + lora_index_mapping=[], + lora_prompt_mapping=[], + lora_requests=set(), + slot_mapping=[], + lora_ids=[]) + + +# How batches are constructed. +class BatchType(IntEnum): + # Every batch is prefill. + PREFILL = 0 + # Every batch is decode. + DECODE = 1 + # Batch is a mixture of prefill and decode. + MIXED = 2 + + +TModelInputForHPU = TypeVar('TModelInputForHPU', bound="ModelInputForHPU") + + +@dataclasses.dataclass(frozen=True) +class ModelInputForHPU(ModelRunnerInputBase): + """ + This base class contains metadata needed for the base model forward pass + but not metadata for possible additional steps, e.g., sampling. Model + runners that run additional steps should subclass this method to add + additional fields. + """ + input_tokens: Optional[torch.Tensor] = None + input_positions: Optional[torch.Tensor] = None + seq_lens: Optional[List[int]] = None + query_lens: Optional[List[int]] = None + lora_mapping: Optional["LoRAMapping"] = None + lora_requests: Optional[Set[LoRARequest]] = None + attn_metadata: Optional["AttentionMetadata"] = None + multi_modal_kwargs: Optional[Dict[str, torch.Tensor]] = None + real_batch_size: Optional[int] = None + batch_size_padded: Optional[int] = None + virtual_engine: int = 0 + lora_ids: Optional[List[int]] = None + async_callback: Optional[Callable] = None + + def as_broadcastable_tensor_dict(self) -> Dict[str, Any]: + tensor_dict = { + "input_tokens": self.input_tokens, + "input_positions": self.input_positions, + "lora_requests": self.lora_requests, + "lora_mapping": self.lora_mapping, + "multi_modal_kwargs": self.multi_modal_kwargs, + "real_batch_size": self.real_batch_size, + "batch_size_padded": self.batch_size_padded, + "virtual_engine": self.virtual_engine, + "lora_ids": self.lora_ids, + } + _add_attn_metadata_broadcastable_dict(tensor_dict, self.attn_metadata) + return tensor_dict + + @classmethod + def from_broadcasted_tensor_dict( + cls: Type[TModelInputForHPU], + tensor_dict: Dict[str, Any], + attn_backend: Optional["AttentionBackend"] = None, + ) -> TModelInputForHPU: + if attn_backend is not None: + tensor_dict = _init_attn_metadata_from_tensor_dict( + attn_backend, tensor_dict) + return cls(**tensor_dict) + + +@dataclasses.dataclass(frozen=True) +class ModelInputForHPUWithSamplingMetadata(ModelInputForHPU): + """ + Used by the ModelRunner. + """ + sampling_metadata: Optional["SamplingMetadata"] = None + # Used for speculative decoding. We do not broadcast it because it is only + # used by the driver worker. + is_prompt: Optional[bool] = None + + def as_broadcastable_tensor_dict(self) -> Dict[str, Any]: + tensor_dict = { + "input_tokens": self.input_tokens, + "input_positions": self.input_positions, + "lora_requests": self.lora_requests, + "lora_mapping": self.lora_mapping, + "multi_modal_kwargs": self.multi_modal_kwargs, + "lora_ids": self.lora_ids, + } + _add_attn_metadata_broadcastable_dict(tensor_dict, self.attn_metadata) + _add_sampling_metadata_broadcastable_dict(tensor_dict, + self.sampling_metadata) + return tensor_dict + + @classmethod + def from_broadcasted_tensor_dict( + cls, + tensor_dict: Dict[str, Any], + attn_backend: Optional["AttentionBackend"] = None, + ) -> "ModelInputForHPUWithSamplingMetadata": + tensor_dict = _init_sampling_metadata_from_tensor_dict(tensor_dict) + # FIXME(kzawora): this fails for whatever reason - why? + if attn_backend is not None: + tensor_dict = _init_attn_metadata_from_tensor_dict( + attn_backend, tensor_dict) + return cls(**tensor_dict) + + +class HPUModelRunnerBase(ModelRunnerBase[TModelInputForHPU]): + """ + Helper class for shared methods between GPU model runners. + """ + _model_input_cls: Type[TModelInputForHPU] + + def __init__( + self, + vllm_config: VllmConfig, + is_driver_worker: bool = False, + return_hidden_states: bool = False, + ): + ModelRunnerBase.__init__(self, vllm_config=vllm_config) + self.is_driver_worker = is_driver_worker + self.return_hidden_states = return_hidden_states + + self.sliding_window = (self.model_config.get_sliding_window() + if self.model_config is not None else None) + self.device_config = (self.device_config if self.device_config + is not None else DeviceConfig()) + self.device = self.device_config.device + self.enforce_eager = self.model_config.enforce_eager + self.max_num_seqs = self.scheduler_config.max_num_seqs + # NOTE(kzawora): Change that to scheduler_config.max_num_prefill_seqs + # once padding-aware scheduling gets merged + self.max_num_prefill_seqs = 64 + self.max_model_len = self.scheduler_config.max_model_len + self.max_num_batched_tokens = \ + self.scheduler_config.max_num_batched_tokens + self.block_size = self.cache_config.block_size + + self.pin_memory = is_pin_memory_available() + self.kv_cache_dtype = self.cache_config.cache_dtype + + self.attn_backend = get_attn_backend( + self.model_config.get_head_size(), + self.model_config.dtype, + self.kv_cache_dtype, + self.block_size, + self.model_config.is_attention_free, + ) + + # Lazy initialization + self.lora_manager: LRUCacheWorkerLoRAManager = None + self.model: torch.nn.Module = None + self.inc_initialized_successfully = False + + # Profiler stats + self.profiler = HabanaHighLevelProfiler() + self.profiler_counter_helper = HabanaProfilerCounterHelper() + self.seen_configs: set = set() + self._mem_margin: Optional[int] = None + self.bucketing_global_state = HPUBucketingGlobalState() + self._setup_buckets() + self._set_gc_threshold() + + def _set_gc_threshold(self) -> None: + # Read https://docs.python.org/3/library/gc.html#gc.set_threshold + # for comprehensive description of gc generations. + # We can either use VLLM_GC_THR_GEN[0-2] (this has higher priority) + # to set particular generation threshold or use simpler + # VLLM_GC_THR_MULTIPLIER to multiply default values. + default_gc_thrs = list(gc.get_threshold()) + requested_gc_thrs = [0] * len(default_gc_thrs) + for i in range(len(default_gc_thrs)): + requested_gc_thrs[i] = int( + os.environ.get(f'VLLM_GC_THR_GEN{i}', default_gc_thrs[i])) + if requested_gc_thrs == default_gc_thrs: + gc_thr_multiplier = int(os.environ.get('VLLM_GC_THR_MULTIPLIER', + 2)) + requested_gc_thrs = [ + t * gc_thr_multiplier for t in default_gc_thrs + ] + gc.set_threshold(*requested_gc_thrs) + + # Multi-modal data support + self.multi_modal_input_mapper = MULTIMODAL_REGISTRY \ + .create_input_mapper(self.model_config) + + self.skip_warmup = os.environ.get('VLLM_SKIP_WARMUP', + 'false').lower() == 'true' + + def load_model(self) -> None: + import habana_frameworks.torch.core as htcore + if self.model_config.quantization == 'inc' or \ + self.model_config.quantization == 'fp8': + htcore.hpu_set_env() + with HabanaMemoryProfiler() as m: + with HabanaMemoryProfiler() as m_getmodel: + self.model = get_model(vllm_config=self.vllm_config) + msg = ("Pre-loading model weights on " + f"{next(self.model.parameters()).device} " + f"took {m_getmodel.get_summary_string()}") + logger.info(msg) + + if self.lora_config: + assert hasattr(self.model, "supported_lora_modules" + ) and self.model.supported_lora_modules, ( + "Model does not support LoRA") + assert hasattr(self.model, "embedding_modules" + ), "Model does not have embedding_modules" + assert hasattr( + self.model, "embedding_padding_modules" + ), "Model does not have embedding_padding_modules" + self.lora_manager = LRUCacheWorkerLoRAManager( + self.scheduler_config.max_num_seqs, + self.scheduler_config.max_num_batched_tokens, + self.vocab_size, self.lora_config, self.device, + self.model.embedding_modules, + self.model.embedding_padding_modules) + self.model = self.lora_manager.create_lora_manager(self.model) + + if self.model_config.quantization == 'inc': + logger.info("Preparing model with INC..") + with HabanaMemoryProfiler() as m_inc: + from neural_compressor.torch.quantization import ( + FP8Config, convert, prepare) + config = FP8Config.from_json_file( + os.getenv("QUANT_CONFIG", "")) + if config.measure: + self.model = prepare(self.model, config) + elif config.quantize: + self.model = convert(self.model, config) + htcore.hpu_initialize(self.model, + mark_only_scales_as_const=True) + self.inc_initialized_successfully = True + logger.info("Preparing model with INC took %s", + m_inc.get_summary_string()) + else: + self.model = self.model.to("hpu") + htcore.mark_step() + torch.hpu.synchronize() + + with HabanaMemoryProfiler() as m_wrap: + self.model = _maybe_wrap_in_hpu_graph( + self.model, + self.block_size, + dtype=self.model_config.dtype, + enforce_eager=self.enforce_eager) + msg = f"Wrapping in HPU Graph took {m_wrap.get_summary_string()}" + logger.info(msg) + + self.model_memory_usage = m.consumed_device_memory + msg = f"Loading model weights took in total {m.get_summary_string()}" + logger.info(msg) + + def _use_graphs(self, batch_size, seq_len, is_prompt): + if self.enforce_eager: + return False + if self.skip_warmup: + return True + return (batch_size, seq_len, is_prompt) in self.graphed_buckets + + def _is_valid_bucket(self, bucket): + return bucket[0] * bucket[1] <= self.max_num_batched_tokens + + def _setup_buckets(self) -> None: + align_bs = lambda x: min(self.max_num_seqs, x) + #FIXME: The default values should be max_model_len + max_prompt_seq = 1024 + max_decode_seq = 2048 + self.bucketing_global_state.prompt_bs_bucket_cfg = read_bucket_settings( + 'prompt', + 'bs', + min=1, + step=align_bs(32), + max=self.max_num_prefill_seqs) + self.bucketing_global_state.decode_bs_bucket_cfg = read_bucket_settings( + 'decode', 'bs', min=1, step=align_bs(32), max=self.max_num_seqs) + self.bucketing_global_state.prompt_seq_bucket_cfg = \ + read_bucket_settings( + 'prompt', + 'seq', + min=self.block_size, + step=self.block_size, + max=max_prompt_seq) + self.bucketing_global_state.decode_block_bucket_cfg = \ + read_bucket_settings( + 'decode', + 'block', + min=self.block_size, + step=self.block_size, + max=max(self.block_size, + self.max_num_seqs * max_decode_seq // self.block_size)) + self.graphed_buckets: Set[Any] = set() + + msg = ("Prompt bucket config (min, step, max_warmup) " + f"bs:{self.bucketing_global_state.prompt_bs_bucket_cfg}, " + f"seq:{self.bucketing_global_state.prompt_seq_bucket_cfg}") + logger.info(msg) + + msg = ("Decode bucket config (min, step, max_warmup) " + f"bs:{self.bucketing_global_state.decode_bs_bucket_cfg}, " + f"block:{self.bucketing_global_state.decode_block_bucket_cfg}") + logger.info(msg) + + def _prepare_prompt( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + ) -> PreparePromptMetadata: + input_tokens: List[List[int]] = [] + input_positions: List[List[int]] = [] + slot_mapping: List[List[int]] = [] + lora_index_mapping: List[List[int]] = [] + lora_prompt_mapping: List[List[int]] = [] + lora_requests: Set[LoRARequest] = set() + + seq_lens: List[int] = [] + context_lens: List[int] = [] + query_lens: List[int] = [] + prefix_block_tables: List[List[int]] = [] + multi_modal_inputs_list: List[MultiModalInputs] = [] + + if len(seq_group_metadata_list) == 0: + return PreparePromptMetadata.empty() + + for seq_group_metadata in seq_group_metadata_list: + assert seq_group_metadata.is_prompt + seq_ids = list(seq_group_metadata.seq_data.keys()) + assert len(seq_ids) == 1 + seq_id = seq_ids[0] + + computed_block_nums = seq_group_metadata.computed_block_nums + if (self.scheduler_config is not None + and self.scheduler_config.chunked_prefill_enabled + and not (computed_block_nums is None + or computed_block_nums == [])): + raise RuntimeError( + "chunked prefill cannot be used with prefix caching " + "now.") + + token_chunk_size = seq_group_metadata.token_chunk_size + seq_data = seq_group_metadata.seq_data[seq_id] + context_len = seq_data.get_num_computed_tokens() + # We should use get_len here because in case of preemption + # it contains output tokens. + seq_len = min(seq_data.get_len(), context_len + token_chunk_size) + prompt_tokens = seq_data.get_token_ids()[context_len:seq_len] + seq_lens.append(seq_len) + + # NOTE: This only works for oooooooxxx style attention. + if computed_block_nums is not None and len( + computed_block_nums) > 0 and self.sliding_window is None: + # Prefix is not supported with sliding_window + context_len = len(computed_block_nums) * self.block_size + prompt_tokens = prompt_tokens[context_len:] + prefix_block_tables.append(computed_block_nums) + elif self.scheduler_config.chunked_prefill_enabled: + if seq_group_metadata.block_tables is not None: + # Prefill has chunked before. + block_table = seq_group_metadata.block_tables[seq_id] + prefix_block_tables.append(block_table) + else: + # The first prefill. + prefix_block_tables.append([]) + else: + prefix_block_tables.append([]) + # Right now, prefill start is always 0. However, this + # assumption can be changed once chunked prefill is introduced. + assert context_len == 0 + + # actual prompt lens + context_lens.append(context_len) + query_lens.append(seq_len - context_len) + input_tokens.append(prompt_tokens) + # NOTE(woosuk): Here we assume that the first token in the prompt + # is always the first token in the sequence. + input_positions.append(list(range(context_len, seq_len))) + + mm_data = seq_group_metadata.multi_modal_data + if mm_data: + mm_kwargs = self.multi_modal_input_mapper(mm_data) + multi_modal_inputs_list.append(mm_kwargs) + + if seq_group_metadata.block_tables is None: + # During memory profiling, the block tables are not initialized + # yet. In this case, we just use a dummy slot mapping. + slot_mapping.append([_PAD_SLOT_ID] * seq_len) + continue + + # Compute the slot mapping. + slot_mapping.append([]) + block_table = seq_group_metadata.block_tables[seq_id] + + # Mask the [0, start_idx) tokens of the prompt with _PAD_SLOT_ID, + # where start_idx is max(0, seq_len - sliding_window). + # For example, if the prompt len is 10, sliding window is 8, and + # block size is 4, the first two tokens are masked and the slot + # mapping will be [-1, -1, 2, 3, 4, 5, 6, 7, 0, 1]. + start_idx = 0 + if self.sliding_window is not None: + assert context_len == 0, ( + "Prefix caching is currently not supported with " + "sliding window attention") + start_idx = max(0, seq_len - self.sliding_window) + for i in range(context_len, seq_len): + if i < start_idx: + slot_mapping[-1].append(_PAD_SLOT_ID) + continue + + block_number = block_table[i // self.block_size] + block_offset = i % self.block_size + slot = block_number * self.block_size + block_offset + slot_mapping[-1].append(slot) + + max_query_len = max(query_lens) + sum_query_len = sum(query_lens) + real_num_seqs = len(query_lens) + assert max_query_len > 0 + + max_prompt_len = max( + find_bucket(max(seq_lens), + self.bucketing_global_state.prompt_seq_bucket_cfg), + self.block_size) + + lora_ids: List[int] = [] + for seq_group_metadata, context_len in zip(seq_group_metadata_list, + context_lens): + lora_id = seq_group_metadata.lora_int_id + lora_ids.append(lora_id) + + if lora_id > 0: + lora_requests.add(seq_group_metadata.lora_request) + + lora_index_mapping += [lora_id] * (max_prompt_len - context_len) + lora_prompt_mapping.extend( + [lora_id] * + (max_prompt_len - context_len + if seq_group_metadata.sampling_params.prompt_logprobs else 1)) + + input_tokens = make_tensor_with_pad(input_tokens, + max_len=max_prompt_len, + pad=0, + dtype=torch.long, + device=self.device) + + input_positions = make_tensor_with_pad(input_positions, + max_len=max_prompt_len, + pad=0, + dtype=torch.long, + device=self.device) + + slot_mapping = make_tensor_with_pad(slot_mapping, + max_len=max_prompt_len, + pad=_PAD_SLOT_ID, + dtype=torch.long, + device=self.device) + + seq_lens_tensor = torch.tensor(seq_lens, + dtype=torch.long, + device=self.device) + + block_indices, block_offsets = precompute_indices_and_offsets( + self.block_size, slot_mapping, True) + attn_metadata = self.attn_backend.make_metadata( + is_prompt=True, + block_list=None, + block_mapping=None, + block_usage=None, + block_indices=block_indices, + block_offsets=block_offsets, + block_scales=None, + attn_bias=None, + seq_lens_tensor=seq_lens_tensor, + num_prefills=real_num_seqs, + num_prefill_tokens=sum_query_len, + num_decode_tokens=0, + slot_mapping=slot_mapping, + multi_modal_placeholder_index_maps= + None # FIXME(kzawora): mutli-modality will not work here + ) + multi_modal_kwargs = MultiModalInputs.batch(multi_modal_inputs_list) + + return PreparePromptMetadata(input_tokens=input_tokens, + input_positions=input_positions, + attn_metadata=attn_metadata, + seq_lens=seq_lens, + query_lens=query_lens, + lora_index_mapping=lora_index_mapping, + lora_prompt_mapping=lora_prompt_mapping, + lora_requests=lora_requests, + multi_modal_kwargs=multi_modal_kwargs, + slot_mapping=slot_mapping, + lora_ids=lora_ids) + + def _prepare_decode( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + ) -> PrepareDecodeMetadata: + input_tokens: List[List[int]] = [] + input_positions: List[List[int]] = [] + slot_mapping: List[List[int]] = [] + seq_lens: List[int] = [] + block_tables: List[List[int]] = [] + lora_index_mapping: List[List[int]] = [] + lora_prompt_mapping: List[List[int]] = [] + lora_requests: Set[LoRARequest] = set() + + if len(seq_group_metadata_list) == 0: + return PrepareDecodeMetadata.empty() + lora_ids: List[int] = [] + + dummy_slots = itertools.cycle( + range(_PAD_SLOT_ID, _PAD_SLOT_ID + self.block_size)) + + for seq_group_metadata in seq_group_metadata_list: + assert not seq_group_metadata.is_prompt + assert seq_group_metadata.token_chunk_size == 1 + + seq_ids = list(seq_group_metadata.seq_data.keys()) + lora_id = seq_group_metadata.lora_int_id + lora_ids.append(lora_id) + + if lora_id > 0: + lora_requests.add(seq_group_metadata.lora_request) + + for seq_id in seq_ids: + seq_data = seq_group_metadata.seq_data[seq_id] + generation_token = seq_data.get_last_token_id() + input_tokens.append([generation_token]) + + seq_len = seq_data.get_len() + position = seq_len - 1 + input_positions.append([position]) + + seq_len = seq_len if self.sliding_window is None else min( + seq_len, self.sliding_window) + seq_lens.append(seq_len) + + block_table = seq_group_metadata.block_tables[seq_id] + if len(block_table) == 0: + block_number = _PAD_BLOCK_ID + else: + block_number = block_table[position // self.block_size] + if block_number == _PAD_BLOCK_ID: + slot = next(dummy_slots) + else: + block_offset = position % self.block_size + slot = block_number * self.block_size + block_offset + slot_mapping.append([slot]) + lora_index_mapping.append(lora_id) + lora_prompt_mapping.append(lora_id) + + if self.sliding_window is not None: + sliding_window_blocks = (self.sliding_window // + self.block_size) + block_table = block_table[-sliding_window_blocks:] + block_tables.append(block_table) + + input_tokens = torch.tensor(input_tokens, + dtype=torch.long, + device=self.device) + input_positions = torch.tensor(input_positions, + dtype=torch.long, + device=self.device) + + num_decode_tokens = sum(seq_lens) + + blocks_used = [len(bt) for bt in block_tables if bt] + block_list = [] + block_scales = [] + for i, bt in enumerate(block_tables): + block_list.extend(bt) + blocks_in_group = len(bt) + if blocks_in_group > 0: + scale = 1.0 / blocks_in_group + block_scales.extend([scale] * blocks_in_group) + + block_mapping_nested: List[List[int]] = [ + [i] * b_u for i, b_u in enumerate(blocks_used) + ] + block_mapping: List[int] = list( + itertools.chain.from_iterable(block_mapping_nested)) + + last_block = [ + sl % self.block_size + 1 for sl in itertools.chain(*slot_mapping) + ] + block_usage = [[self.block_size] * (b_u - 1) + [lb] + for b_u, lb in zip(blocks_used, last_block)] + block_usage = list(itertools.chain(*block_usage)) + + block_bucket_size = find_bucket( + len(block_list), + self.bucketing_global_state.decode_block_bucket_cfg) + block_list = pad_list(block_list, block_bucket_size, _PAD_BLOCK_ID) + block_mapping = pad_list(block_mapping, block_bucket_size, -1) + block_usage = pad_list(block_usage, block_bucket_size, 1) + block_scales = pad_list(block_scales, block_bucket_size, 0.0) + + block_list = torch.tensor(block_list, + dtype=torch.int, + device=self.device) + block_mapping = torch.tensor(block_mapping, + dtype=torch.long, + device=self.device) + block_usage = torch.tensor(block_usage, + dtype=self.model_config.dtype, + device=self.device) + + slot_mapping = torch.tensor(slot_mapping, + dtype=torch.long, + device=self.device) + + block_indices, block_offsets = precompute_indices_and_offsets( + self.block_size, slot_mapping, False) + block_scales = torch.tensor(block_scales, + dtype=self.model_config.dtype, + device=self.device) + + attn_metadata = self.attn_backend.make_metadata( + is_prompt=False, + block_list=block_list, + block_mapping=block_mapping, + block_usage=block_usage, + block_indices=block_indices, + block_offsets=block_offsets, + block_scales=block_scales, + attn_bias=None, + seq_lens_tensor=None, + num_prefills=0, + num_prefill_tokens=0, + num_decode_tokens=num_decode_tokens, + slot_mapping=slot_mapping, + multi_modal_placeholder_index_maps=None) + return PrepareDecodeMetadata(input_tokens=input_tokens, + input_positions=input_positions, + attn_metadata=attn_metadata, + lora_index_mapping=lora_index_mapping, + lora_prompt_mapping=lora_prompt_mapping, + lora_requests=lora_requests, + slot_mapping=slot_mapping, + lora_ids=lora_ids) + + def prepare_input_tensors( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + ) -> Tuple[TModelInputForHPU, SamplingMetadata]: + if len(seq_group_metadata_list) == 0: + return self._model_input_cls(), None + + input_tokens = None + input_positions = None + lora_mapping = None + lora_requests = None + multi_modal_kwargs = None + batch_type = None + seq_lens = None + query_lens = None + real_batch_size = None + batch_size_padded = None + + self.event_start = self.profiler.get_timestamp_us() + is_prompt = seq_group_metadata_list[0].is_prompt + base_event_name = 'prompt' if is_prompt else 'decode' + self.profiler.start('internal', base_event_name) + + real_batch_size = len(seq_group_metadata_list) + bucket_cfg = self.bucketing_global_state.prompt_bs_bucket_cfg \ + if is_prompt else self.bucketing_global_state.decode_bs_bucket_cfg + batch_size_padded = find_bucket(real_batch_size, bucket_cfg) + batch_size_padding = batch_size_padded - real_batch_size + seq_group_metadata_list = seq_group_metadata_list.copy() + if batch_size_padding > 0: + dummy_seq_group_metadata = self.create_dummy_seq_group_metadata( + 0, 0, is_prompt) + seq_group_metadata_list.extend(dummy_seq_group_metadata + for _ in range(batch_size_padding)) + + prefill_reqs = [] + decode_reqs = [] + for seq_group_meta in seq_group_metadata_list: + if seq_group_meta.is_prompt: + prefill_reqs.append(seq_group_meta) + else: + decode_reqs.append(seq_group_meta) + + # Prepare input tensors. + ( + input_tokens, + input_positions, + prefill_attn_metadata, + seq_lens, + query_lens, + lora_index_mapping, + lora_prompt_mapping, + lora_requests, + multi_modal_kwargs, + slot_mapping, + lora_ids, + ) = self._prepare_prompt(prefill_reqs) + ( + decode_input_tokens, + decode_input_positions, + decode_attn_metadata, + decode_lora_index_mapping, + decode_lora_prompt_mapping, + decode_lora_requests, + decode_slot_mapping, + decode_lora_ids, + ) = self._prepare_decode(decode_reqs) + sampling_metadata = SamplingMetadata.prepare(seq_group_metadata_list, + seq_lens, query_lens, + self.device, + self.pin_memory) + + if not self.scheduler_config.chunked_prefill_enabled: + assert (len(prefill_reqs) and len(decode_reqs)) == 0 + + num_prefills = len(seq_lens) + num_prefill_tokens = len(input_tokens) + num_decode_tokens = len(decode_input_tokens) + + # NOTE(kzawora): Here we diverge from GPU code - we don't + # support mixed batches, so we either use decode or prefill + # inputs, without coalescing. + assert (num_prefills == 0 and num_decode_tokens > 0) or ( + num_prefills > 0 + and num_decode_tokens == 0), "HPU does not support mixed batches!" + if num_decode_tokens > 0: + input_tokens = decode_input_tokens + input_positions = decode_input_positions + slot_mapping = decode_slot_mapping + lora_index_mapping = decode_lora_index_mapping + lora_prompt_mapping = decode_lora_prompt_mapping + lora_requests = decode_lora_requests + lora_ids = decode_lora_ids + + # FIXME: We need to adjust selected_token_indices to accommodate + # for padding + max_len = input_tokens.size(1) + paddings = [max_len - s for s in seq_lens] + paddings = [0] + paddings[:-1] + paddings = list(itertools.accumulate(paddings)) + paddings_prompt_logprobs = [] + for i, seq_group_metadata in enumerate(seq_group_metadata_list): + if seq_group_metadata.sampling_params.prompt_logprobs is not None \ + and seq_group_metadata.is_prompt: + paddings_prompt_logprobs += ([paddings[i]] * seq_lens[i]) + paddings = torch.tensor( + paddings_prompt_logprobs if paddings_prompt_logprobs else paddings, + dtype=sampling_metadata.selected_token_indices.dtype, + device=sampling_metadata.selected_token_indices.device) + sampling_metadata.selected_token_indices.add_(paddings) + + if self.lora_config: + lora_mapping = LoRAMapping( + **dict(index_mapping=lora_index_mapping, + prompt_mapping=lora_prompt_mapping, + is_prefill=(num_prefills > 0))) + else: + lora_mapping = None + + if (prefill_attn_metadata is not None + and decode_attn_metadata is not None): + batch_type = BatchType.MIXED + raise NotImplementedError("Mixed batch is not supported on HPU") + elif prefill_attn_metadata is not None: + batch_type = BatchType.PREFILL + else: + batch_type = BatchType.DECODE + + metadata_dict = { + "input_tokens": input_tokens, + "input_positions": input_positions, + "selected_token_indices": sampling_metadata.selected_token_indices, + "lora_requests": lora_requests, + "lora_mapping": lora_mapping, + "multi_modal_kwargs": multi_modal_kwargs, + "num_prefill_tokens": num_prefill_tokens, + "num_decode_tokens": num_decode_tokens, + "slot_mapping": slot_mapping, + "num_prefills": num_prefills, + "batch_type": batch_type, + "seq_lens": seq_lens, + "query_lens": query_lens + } + if prefill_attn_metadata is not None: + metadata_dict.update(prefill_attn_metadata.asdict_zerocopy()) + else: + assert decode_attn_metadata is not None + metadata_dict.update(decode_attn_metadata.asdict_zerocopy()) + + attn_metadata = prefill_attn_metadata if \ + prefill_attn_metadata is not None else decode_attn_metadata + + return self._model_input_cls(input_tokens=input_tokens, + seq_lens=seq_lens, + query_lens=query_lens, + input_positions=input_positions, + attn_metadata=attn_metadata, + lora_requests=lora_requests, + lora_mapping=lora_mapping, + multi_modal_kwargs=multi_modal_kwargs, + real_batch_size=real_batch_size, + batch_size_padded=batch_size_padded, + lora_ids=lora_ids), \ + sampling_metadata + + def _seq_len(self, attn_metadata): + if attn_metadata.num_prefills != 0: + return attn_metadata.slot_mapping.size(1) + else: + return attn_metadata.block_list.numel() + + def trim_attn_metadata(self, metadata: AttentionMetadata) -> object: + # NOTE(kzawora): To anyone working on this in the future: + # Trimming metadata is required when using HPUGraphs. + # Attention metadata is going to be hashed by PT bridge, and + # appropriate HPUGraphs will be matched based on all inputs' hash. + + # Before you put more keys in here, make sure you know their + # value type and make sure you know how it's going to be hashed. + # You can find that information in input_hash function + # in habana_frameworks/torch/hpu/graphs.py. You can also hash + # it manually with torch.hpu.graphs.input_hash(attention_metadata) + + # If you use primitive types here - they will get hashed based + # on their value. You *will* get lots of excessive graph captures + # (and an OOM eventually) if you decide to put something like + # seq_len int here. + # If you absolutely need a scalar, put it in a tensor. Tensors + # get hashed using their metadata, not their values: + # input_hash(torch.tensor(123)) == input_hash(torch.tensor(321)) + # input_hash(123) != input_hash(321) + # input_hash("abc") != input_hash("cba") + attention_metadata = subtuple(metadata, 'TrimmedAttentionMetadata', [ + 'attn_bias', 'seq_lens_tensor', 'block_list', 'block_mapping', + 'block_usage', 'slot_mapping', 'is_prompt', 'block_indices', + 'block_offsets', 'block_scales' + ]) + return attention_metadata + + def create_dummy_seq_group_metadata(self, + group_id, + seq_len, + is_prompt, + lora_request=None): + sampling_params = SamplingParams(temperature=0) + num_blocks = math.ceil(seq_len / self.block_size) + seq_len = max(seq_len, 1) + if is_prompt: + input_len = seq_len + output_len = 0 + block_tables = None + else: + input_len = seq_len - 1 + output_len = 1 + block_tables = {group_id: [_PAD_BLOCK_ID] * num_blocks} + prompt_token_ids = [0] * input_len + output_token_ids = [1] * output_len + prompt_token_ids_array = array('l', prompt_token_ids) # noqa: F821 + seq_data = SequenceData(prompt_token_ids_array) + seq_data.output_token_ids = output_token_ids + return SequenceGroupMetadata(request_id=str(group_id), + is_prompt=(output_len == 0), + seq_data={group_id: seq_data}, + sampling_params=sampling_params, + block_tables=block_tables, + lora_request=lora_request) + + def profile_run(self) -> None: + num_layers = self.model_config.get_num_layers(self.parallel_config) + kv_caches = [None] * num_layers + max_batch_size = self.bucketing_global_state.prompt_bs_bucket_cfg[-1] + max_seq_len = min( + self.bucketing_global_state.prompt_seq_bucket_cfg[-1], + self.max_num_batched_tokens // max_batch_size) + + self.warmup_scenario(max_batch_size, max_seq_len, True, kv_caches, + False, True) + return + + def warmup_scenario(self, + batch_size, + seq_len, + is_prompt, + kv_caches, + is_pt_profiler_run=False, + is_lora_profile_run=False) -> None: + use_graphs = self._use_graphs(batch_size, seq_len, is_prompt) + scenario_name = ("warmup_" + f"{'prompt' if is_prompt else 'decode'}_" + f"bs{batch_size}_" + f"seq{seq_len}_" + f"graphs{'T' if use_graphs else 'F'}") + max_num_seqs = self.scheduler_config.max_num_seqs + # This represents the maximum number of different requests + # that will have unique loras, an therefore the max amount of memory + # consumption create dummy lora request copies from the lora request + # passed in, which contains a lora from the lora warmup path. + dummy_lora_requests: List[LoRARequest] = [] + dummy_lora_requests_per_seq: List[LoRARequest] = [] + if self.lora_config and is_lora_profile_run: + assert self.lora_manager is not None + with self.lora_manager.dummy_lora_cache(): + for idx in range(self.lora_config.max_loras): + lora_id = idx + 1 + dummy_lora_request = LoRARequest( + lora_name=f"warmup_{lora_id}", + lora_int_id=lora_id, + lora_local_path="/not/a/real/path", + ) + self.lora_manager.add_dummy_lora(dummy_lora_request, + rank=LORA_WARMUP_RANK) + dummy_lora_requests.append(dummy_lora_request) + dummy_lora_requests_per_seq = [ + dummy_lora_requests[idx % len(dummy_lora_requests)] + for idx in range(max_num_seqs) + ] + self.profiler.start('internal', scenario_name) + times = 3 if use_graphs or is_pt_profiler_run else 1 + if self.lora_config and not is_lora_profile_run: + lora_mapping = LoRAMapping( + **dict(index_mapping=[0] * batch_size * seq_len, + prompt_mapping=[0] * batch_size * seq_len, + is_prefill=is_prompt)) + self.set_active_loras(set(), lora_mapping) + if is_prompt: + seqs = [ + self.create_dummy_seq_group_metadata( + i, + seq_len, + is_prompt, + lora_request=dummy_lora_requests_per_seq[i] + if dummy_lora_requests_per_seq else None) + for i in range(batch_size) + ] + else: + # FIXME: seq_len is actually number of blocks + blocks = [seq_len // batch_size for _ in range(batch_size)] + blocks[0] += seq_len % batch_size + seqs = [ + self.create_dummy_seq_group_metadata( + i, + b * self.block_size - 1, + is_prompt, + lora_request=dummy_lora_requests_per_seq[i] + if dummy_lora_requests_per_seq else None) + for i, b in enumerate(blocks) + ] + torch.hpu.synchronize() + profiler = None + if is_pt_profiler_run and self.is_driver_worker: + profiler = setup_profiler() + profiler.start() + for _ in range(times): + inputs = self.prepare_model_input(seqs) + self.execute_model(inputs, kv_caches, warmup_mode=True) + torch.hpu.synchronize() + if profiler: + profiler.step() + if profiler: + profiler.stop() + self.profiler.end() + gc.collect() + + def remove_all_loras(self): + if not self.lora_manager: + raise RuntimeError("LoRA is not enabled.") + self.lora_manager.remove_all_adapters() + + def set_active_loras(self, lora_requests: Set[LoRARequest], + lora_mapping: LoRAMapping) -> None: + if not self.lora_manager: + raise RuntimeError("LoRA is not enabled.") + self.lora_manager.set_active_adapters(lora_requests, lora_mapping) + + def add_lora(self, lora_request: LoRARequest) -> bool: + if not self.lora_manager: + raise RuntimeError("LoRA is not enabled.") + return self.lora_manager.add_adapter(lora_request) + + def remove_lora(self, lora_id: int) -> bool: + if not self.lora_manager: + raise RuntimeError("LoRA is not enabled.") + return self.lora_manager.remove_adapter(lora_id) + + def pin_lora(self, lora_id: int) -> bool: + if not self.lora_manager: + raise RuntimeError("LoRA is not enabled.") + return self.lora_manager.pin_adapter(lora_id) + + def list_loras(self) -> Set[int]: + if not self.lora_manager: + raise RuntimeError("LoRA is not enabled.") + return self.lora_manager.list_adapters() + + def log_warmup(self, phase, i, max_i, batch_size, seq_len): + free_mem = format_bytes( + HabanaMemoryProfiler.current_free_device_memory()) + dim = "num_blocks" + if phase == "Prompt": + dim = "seq_len" + msg = (f"[Warmup][{phase}][{i+1}/{max_i}] " + f"batch_size:{batch_size} " + f"{dim}:{seq_len} " + f"free_mem:{free_mem}") + logger.info(msg) + + def warmup_all_buckets(self, buckets, is_prompt, kv_caches): + for i, (batch_size, seq_len) in enumerate(reversed(buckets)): + self.log_warmup('Prompt' if is_prompt else 'Decode', i, + len(buckets), batch_size, seq_len) + self.warmup_scenario(batch_size, seq_len, is_prompt, kv_caches) + + def warmup_graphs(self, + strategy, + buckets, + is_prompt, + kv_caches, + available_mem, + starting_mem=0, + total_batch_seq=0.001): + total_mem = starting_mem + idx = 0 + phase = f'Graph/{"Prompt" if is_prompt else "Decode"}' + num_candidates = len(buckets) + ordering : Union[Callable[[Any], Tuple[Any, Any]], \ + Callable[[Any], Tuple[Any, Any, Any]]] + if strategy == 'min_tokens': + ordering = lambda b: (b[0] * b[1], b[1], b[0]) + elif strategy == 'max_bs': + ordering = lambda b: (-b[0], b[1]) + else: + raise NotImplementedError( + f'Unsupported graph allocation strategy: {strategy}') + buckets = list(sorted(buckets, key=ordering)) + captured_all = True + for idx, (batch_size, seq_len) in enumerate(buckets): + # Graph memory usage is proportional to seq dimension in a batch + batch_seq = batch_size * seq_len if is_prompt else batch_size + mem_estimate = batch_seq / total_batch_seq * total_mem + if mem_estimate >= available_mem: + captured_all = False + continue + graphed_bucket = (batch_size, seq_len, is_prompt) + if graphed_bucket in self.graphed_buckets: + continue + self.graphed_buckets.add(graphed_bucket) + self.log_warmup(phase, idx, num_candidates, batch_size, seq_len) + with HabanaMemoryProfiler() as mem_prof: + self.warmup_scenario(batch_size, seq_len, is_prompt, kv_caches) + used_mem = align_workers(mem_prof.consumed_device_memory, + torch.distributed.ReduceOp.MAX) + available_mem -= used_mem + total_mem += used_mem + total_batch_seq += batch_seq + + return total_mem, total_batch_seq, captured_all + + def log_graph_warmup_summary(self, buckets, is_prompt, total_mem): + num_candidates = len(buckets) + phase = f'Graph/{"Prompt" if is_prompt else "Decode"}' + graphed = list(c[:2] for c in self.graphed_buckets + if c[2] == is_prompt) + if num_candidates == 0: + num_candidates = 1 + msg = (f'{phase} captured:{len(graphed)} ' + f'({100 * len(graphed) / num_candidates:.1f}%) ' + f'used_mem:{format_bytes(total_mem)} ' + f'buckets:{sorted(list(graphed))}') + logger.info(msg) + + @torch.inference_mode() + def warmup_model(self, kv_caches: List[torch.Tensor]) -> None: + if profile := os.environ.get('VLLM_PT_PROFILE', None): + phase, bs, seq_len, graph = profile.split('_') + is_prompt = phase == 'prompt' + graphs = graph == 't' + if graphs: + self.graphed_buckets.add((int(bs), int(seq_len), is_prompt)) + self.warmup_scenario(int(bs), int(seq_len), is_prompt, kv_caches, + True) + raise AssertionError("Finished profiling") + if self.skip_warmup: + logger.info("Skipping warmup...") + return + self.profiler.start('internal', 'warmup') + max_blocks = kv_caches[0][0].size(0) + + self.bucketing_global_state.prompt_buckets, prompt_omitted_buckets = \ + generate_prompt_buckets( + self.bucketing_global_state.prompt_bs_bucket_cfg, + self.bucketing_global_state.prompt_seq_bucket_cfg, + self.max_num_batched_tokens) + + msg = (f"Generated {len(self.bucketing_global_state.prompt_buckets)} " + f"prompt buckets [bs, seq]: \ + {list(sorted(self.bucketing_global_state.prompt_buckets))}") + logger.info(msg) + + msg = (f"Omitted {len(prompt_omitted_buckets)} " + "prompt buckets due to exceeded token budget " + f"(max_num_batched_tokens={self.max_num_batched_tokens})") + logger.info(msg) + + msg = f"Omitted prompt buckets: {list(sorted(prompt_omitted_buckets))}" + logger.debug(msg) + + self.bucketing_global_state.decode_buckets = generate_decode_buckets( + self.bucketing_global_state.decode_bs_bucket_cfg, + self.bucketing_global_state.decode_block_bucket_cfg, max_blocks) + logger.info("Generated %d decode buckets [bs, total_blocks]: %s", + len(self.bucketing_global_state.decode_buckets), + list(sorted(self.bucketing_global_state.decode_buckets))) + + if not htorch.utils.internal.is_lazy() and not self.enforce_eager: + cache_size_limit = len( + self.bucketing_global_state.prompt_buckets) + len( + self.bucketing_global_state.decode_buckets) + 1 + torch._dynamo.config.cache_size_limit = max( + cache_size_limit, torch._dynamo.config.cache_size_limit) + # Multiply by 8 to follow the original default ratio between + # the cache_size_limit and accumulated_cache_size_limit + torch._dynamo.config.accumulated_cache_size_limit = max( + cache_size_limit * 8, + torch._dynamo.config.accumulated_cache_size_limit) + + start_mem = HabanaMemoryProfiler.current_device_memory_usage() + start_time = time.perf_counter() + + compile_only_mode_context = functools.partial(bc.env_setting, + "PT_COMPILE_ONLY_MODE", + True) + can_use_compile_only_mode = True + try: + with compile_only_mode_context(): + pass + logger.debug("Using PT_COMPILE_ONLY_MODE.") + except KeyError: + can_use_compile_only_mode = False + logger.warning('Cannot use PT_COMPILE_ONLY_MODE. ' + 'Warmup time will be negatively impacted. ' + 'Please update Gaudi Software Suite.') + with compile_only_mode_context( + ) if can_use_compile_only_mode else contextlib.nullcontext(): + self.warmup_all_buckets(self.bucketing_global_state.prompt_buckets, + True, kv_caches) + self.warmup_all_buckets(self.bucketing_global_state.decode_buckets, + False, kv_caches) + + if not self.enforce_eager and htorch.utils.internal.is_lazy(): + assert self.mem_margin is not None, \ + ("HabanaWorker.determine_num_available_blocks needs " + "to be called before warming up the model.") + free_mem = HabanaMemoryProfiler.current_free_device_memory() + graph_free_mem = free_mem - self.mem_margin + graph_free_mem = align_workers(graph_free_mem, + torch.distributed.ReduceOp.MIN) + prompt_graph_mem_ratio = float( + os.environ.get('VLLM_GRAPH_PROMPT_RATIO', '0.3')) + prompt_available_memory = (prompt_graph_mem_ratio * + graph_free_mem) + decode_available_memory = (graph_free_mem - + prompt_available_memory) + msg = ( + f"Using {format_bytes(graph_free_mem)}" + f"/{format_bytes(free_mem)} " + "of free device memory for HPUGraphs, " + f"{format_bytes(prompt_available_memory)} for prompt and " + f"{format_bytes(decode_available_memory)} for decode " + f"(VLLM_GRAPH_PROMPT_RATIO={prompt_graph_mem_ratio})") + logger.info(msg) + prompt_strategy = os.environ.get('VLLM_GRAPH_PROMPT_STRATEGY', + 'min_tokens') + decode_strategy = os.environ.get('VLLM_GRAPH_DECODE_STRATEGY', + 'max_bs') + mem_post_prompt, prompt_batch_seq, prompt_captured_all = \ + self.warmup_graphs( + prompt_strategy, self.bucketing_global_state.prompt_buckets, + True, kv_caches, prompt_available_memory) + mem_post_decode, decode_batch_seq, decode_captured_all = \ + self.warmup_graphs( + decode_strategy, self.bucketing_global_state.decode_buckets, + False, kv_caches, decode_available_memory) + + # Not all prompt buckets were captured, but all decode buckets + # were captured and we have some free graph-allocated space + # left. Let's try to use it for capturing more prompt buckets. + if (mem_post_decode + mem_post_prompt < graph_free_mem + and not prompt_captured_all and decode_captured_all): + mem_post_prompt, _, prompt_captured_all = ( + self.warmup_graphs( + prompt_strategy, + self.bucketing_global_state.prompt_buckets, True, + kv_caches, + graph_free_mem - mem_post_prompt - mem_post_decode, + mem_post_prompt, prompt_batch_seq)) + + # Not all decode buckets were captured, but all prompt buckets + # were captured and we have some free graph-allocated space + # left. Let's try to use it for capturing more decode buckets. + if mem_post_decode + mem_post_prompt < graph_free_mem \ + and not decode_captured_all \ + and prompt_captured_all: + mem_post_decode, _, _ = self.warmup_graphs( + decode_strategy, + self.bucketing_global_state.decode_buckets, False, + kv_caches, + graph_free_mem - mem_post_prompt - mem_post_decode, + mem_post_decode, decode_batch_seq) + + self.log_graph_warmup_summary( + self.bucketing_global_state.prompt_buckets, True, + mem_post_prompt) + self.log_graph_warmup_summary( + self.bucketing_global_state.decode_buckets, False, + mem_post_decode) + + end_time = time.perf_counter() + end_mem = HabanaMemoryProfiler.current_device_memory_usage() + elapsed_time = end_time - start_time + msg = ( + f"Warmup finished in {elapsed_time:.0f} secs, " + f"allocated {format_bytes(end_mem - start_mem)} of device memory") + logger.info(msg) + self.profiler.end() + + @property + def vocab_size(self) -> int: + return self.model_config.get_vocab_size() + + @property + def mem_margin(self) -> Optional[int]: + return self._mem_margin + + @mem_margin.setter + def mem_margin(self, value): + self._mem_margin = value + + +def _maybe_wrap_in_hpu_graph(*args, **kwargs): + return htorch.hpu.wrap_in_hpu_graph( + HpuModelAdapter(*args, **kwargs), disable_tensor_cache=True + ) if htorch.utils.internal.is_lazy() else HpuModelAdapter(*args, **kwargs) + + +class HabanaProfilerCounterHelper(): + + def __init__(self): + self.niter = 0 + self.average_real_throughput = None + self.logged_once = False + self.real_seq_lens = [] + self.prompt_seq_lens = [] + + def capture_seq_group_metadata_stats(self, seq_group_metadata_list): + self.real_seq_lens = [ + len(seq_data.prompt_token_ids) + len(seq_data.output_token_ids) + for seq_group_metadata in seq_group_metadata_list + for seq_data in seq_group_metadata.seq_data.values() + ] + self.prompt_seq_lens = [ + len(seq_data.prompt_token_ids) + for seq_group_metadata in seq_group_metadata_list + for seq_data in seq_group_metadata.seq_data.values() + ] + + def get_counter_dict(self, cache_config, duration, seq_len, + batch_size_padded, real_batch_size, is_prompt): + throughput = batch_size_padded / (duration / 1e6) + throughput_effective = real_batch_size / (duration / 1e6) + + real_max_seq_len = max(self.real_seq_lens) + real_num_tokens = sum(self.real_seq_lens) + padded_num_tokens = batch_size_padded * seq_len + batch_token_utilization = real_num_tokens / padded_num_tokens + if self.average_real_throughput is None: + self.average_real_throughput = throughput_effective + else: # https://www.heikohoffmann.de/htmlthesis/node134.html + self.average_real_throughput = self.average_real_throughput + 1 / ( + self.niter + 1) * (throughput_effective - + self.average_real_throughput) + phase = "prompt" if is_prompt else "decode" + counters = { + f'{phase}_bucket_batch_size': batch_size_padded, + f'{phase}_batch_size': real_batch_size, + f'{phase}_bucket_seq_len': seq_len, + f'{phase}_seq_len': real_max_seq_len, + f'{phase}_bucket_gen_throughput': throughput, + f'{phase}_real_gen_throughput': throughput_effective, + f'{phase}_batch_token_utilization': batch_token_utilization, + 'average_real_throughput': self.average_real_throughput, + 'engine_iteration': self.niter, + } + self.niter += 1 + if is_prompt: + prompt_bucket_in_throughput = (seq_len * batch_size_padded) / ( + duration / 1e6) + prompt_real_in_throughput = sum( + self.prompt_seq_lens) / (duration / 1e6) + counters[ + f'{phase}_bucket_in_throughput'] = prompt_bucket_in_throughput + counters[f'{phase}_real_in_throughput'] = prompt_real_in_throughput + + # KV cache might not be created yet (e.g. for profiling run) + if cache_config.num_gpu_blocks is not None and \ + cache_config.num_gpu_blocks != 0: + cache_num_blocks_used = [ + math.ceil(sl / cache_config.block_size) + for sl in self.real_seq_lens + ] + cache_total_num_blocks_used = sum(cache_num_blocks_used) + num_cache_blocks = cache_config.num_gpu_blocks + cache_total_num_free_blocks = \ + num_cache_blocks - cache_total_num_blocks_used + cache_computed_utilization = \ + cache_total_num_blocks_used / num_cache_blocks + max_blocks_per_seq = math.ceil(seq_len / cache_config.block_size) + batch_block_utilization = cache_total_num_blocks_used / ( + batch_size_padded * max_blocks_per_seq) + counters['cache_num_blocks_used'] = cache_total_num_blocks_used + counters['cache_num_free_blocks'] = cache_total_num_free_blocks + counters['cache_computed_utilization'] = cache_computed_utilization + counters[ + f'{phase}_batch_block_utilization'] = batch_block_utilization + if not self.logged_once: + counters['const_cache_num_blocks'] = cache_config.num_gpu_blocks + counters[ + 'const_gpu_memory_utilization'] = \ + cache_config.gpu_memory_utilization + counters['const_block_size'] = cache_config.block_size + self.logged_once = True + return counters + + +def unwrap_model(model): + if isinstance(model, torch._dynamo.eval_frame.OptimizedModule): + return unwrap_model(model._orig_mod) + else: + model = list(vars(model)['_modules'].values())[0] + modules = list(vars(model)['_modules'].values()) + return modules + + +class HPUModelRunner(HPUModelRunnerBase[ModelInputForHPUWithSamplingMetadata]): + """ + GPU model runner with sampling step. + """ + _model_input_cls: Type[ModelInputForHPUWithSamplingMetadata] = ( + ModelInputForHPUWithSamplingMetadata) + + def make_model_input_from_broadcasted_tensor_dict( + self, + tensor_dict: Dict[str, Any], + ) -> ModelInputForHPUWithSamplingMetadata: + return ( + ModelInputForHPUWithSamplingMetadata.from_broadcasted_tensor_dict( + tensor_dict, + attn_backend=self.attn_backend, + )) + + @torch.inference_mode() + def prepare_model_input( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + virtual_engine: int = 0, + finished_requests_ids: Optional[List[str]] = None + ) -> ModelInputForHPUWithSamplingMetadata: + """Prepare the model input based on a given sequence group, including + metadata for the sampling step. + The API assumes seq_group_metadata_list is sorted by prefill -> decode. + The result tensors and data structure also batches input in prefill + -> decode order. For example, + - input_tokens[:num_prefill_tokens] contains prefill tokens. + - input_tokens[num_prefill_tokens:] contains decode tokens. + If cuda graph is required, this API automatically pads inputs. + """ + with self.profiler.record_event('internal', 'prepare_input_tensors'): + assert seq_group_metadata_list is not None + if self.profiler.enabled: + self.profiler_counter_helper.capture_seq_group_metadata_stats( + seq_group_metadata_list=seq_group_metadata_list) + model_input, sampling_metadata = self.prepare_input_tensors( + seq_group_metadata_list) + assert model_input.attn_metadata is not None + is_prompt = model_input.attn_metadata.is_prompt + + return dataclasses.replace(model_input, + sampling_metadata=sampling_metadata, + is_prompt=is_prompt, + virtual_engine=virtual_engine) + + def finish_measurements(self): + from neural_compressor.torch.quantization import finalize_calibration + finalize_calibration(self.model.model) + + def _check_config(self, batch_size, seq_len, is_prompt, warmup_mode): + cfg = (batch_size, seq_len, is_prompt) + seen = cfg in self.seen_configs + self.seen_configs.add(cfg) + if not seen and not warmup_mode: + phase = 'prompt' if is_prompt else 'decode' + logger.warning("Configuration: (%s, %s, %s) was not warmed-up!", + phase, batch_size, seq_len) + + def create_lora_mask(self, input_tokens: torch.Tensor, lora_ids: List[int], + is_prompt: bool): + ''' + This is a helper function to create the mask for lora computations. + Lora Mask is needed to ensure we match the correct lora weights for the + for the request. + For Prompt phase we have + lora_mask with shape (batch_size * seq_len, max_loras * max_rank) + lora_logits_mask with shape (batch_size, max_loras * max_rank) + For Decode phase we have both + lora_mask and lora_logits_mask with shape + (batch_size, max_loras * max_rank) + ''' + lora_mask: torch.Tensor = None + lora_logits_mask: torch.Tensor = None + lora_index = 0 + + if self.lora_config: + if is_prompt: + lora_mask = torch.zeros( + input_tokens.shape[0] * input_tokens.shape[1], + (self.lora_config.max_loras) *\ + self.lora_config.max_lora_rank, + dtype=self.lora_config.lora_dtype) + lora_logits_mask = torch.zeros( + input_tokens.shape[0], (self.lora_config.max_loras) * + self.lora_config.max_lora_rank, + dtype=self.lora_config.lora_dtype) + + ones = torch.ones(input_tokens.shape[1], + self.lora_config.max_lora_rank, + dtype=self.lora_config.lora_dtype) + logit_ones = torch.ones(1, + self.lora_config.max_lora_rank, + dtype=self.lora_config.lora_dtype) + + for i in range(len(lora_ids)): + if lora_ids[i] == 0: + continue + lora_index = self.lora_manager._adapter_manager.\ + lora_index_to_id.index(lora_ids[i]) + start_row = i * input_tokens.shape[1] + end_row = start_row + input_tokens.shape[1] + start_col = lora_index * self.lora_config.max_lora_rank + end_col = start_col + self.lora_config.max_lora_rank + lora_mask[start_row:end_row, start_col:end_col] = ones + lora_logits_mask[i, start_col:end_col] = logit_ones + lora_mask = lora_mask.to('hpu') + lora_logits_mask = lora_logits_mask.to('hpu') + else: + lora_mask = torch.zeros(input_tokens.shape[0], + (self.lora_config.max_loras) * + self.lora_config.max_lora_rank, + dtype=self.lora_config.lora_dtype) + ones = torch.ones(1, + self.lora_config.max_lora_rank, + dtype=self.lora_config.lora_dtype) + for i in range(len(lora_ids)): + if lora_ids[i] == 0: + continue + lora_index = self.lora_manager._adapter_manager.\ + lora_index_to_id.index(lora_ids[i]) + start_pos = lora_index * self.lora_config.max_lora_rank + end_pos = start_pos + self.lora_config.max_lora_rank + lora_mask[i, start_pos:end_pos] = ones + lora_mask = lora_mask.to('hpu') + lora_logits_mask = lora_mask + + return lora_mask, lora_logits_mask + + @torch.inference_mode() + def execute_model( + self, + model_input: ModelInputForHPUWithSamplingMetadata, + kv_caches: List[torch.Tensor], + intermediate_tensors: Optional[IntermediateTensors] = None, + num_steps: int = 1, + warmup_mode=False, + ) -> Optional[Union[List[SamplerOutput], IntermediateTensors]]: + if num_steps > 1: + raise ValueError( + "num_steps > 1 is not supported in HPUModelRunner") + + if self.lora_config: + assert model_input.lora_requests is not None + assert model_input.lora_mapping is not None + self.set_active_loras(model_input.lora_requests, + model_input.lora_mapping) + input_tokens = model_input.input_tokens + input_positions = model_input.input_positions + attn_metadata = model_input.attn_metadata + sampling_metadata = model_input.sampling_metadata + real_batch_size = model_input.real_batch_size + batch_size_padded = model_input.batch_size_padded + assert input_tokens is not None + assert input_positions is not None + assert sampling_metadata is not None + assert attn_metadata is not None + is_prompt = attn_metadata.is_prompt + assert is_prompt is not None + batch_size = input_tokens.size(0) + seq_len = self._seq_len(attn_metadata) + use_graphs = self._use_graphs(batch_size, seq_len, is_prompt) + self._check_config(batch_size, seq_len, is_prompt, warmup_mode) + + lora_mask: torch.Tensor = None + lora_logits_mask: torch.Tensor = None + if self.lora_config: + assert model_input.lora_ids is not None + lora_mask, lora_logits_mask = self.create_lora_mask( + input_tokens, model_input.lora_ids, attn_metadata.is_prompt) + + execute_model_kwargs = { + "input_ids": input_tokens, + "positions": input_positions, + "kv_caches": kv_caches, + "attn_metadata": self.trim_attn_metadata(attn_metadata), + "intermediate_tensors": intermediate_tensors, + "lora_mask": lora_mask, + **(model_input.multi_modal_kwargs or {}), + } + if htorch.utils.internal.is_lazy(): + execute_model_kwargs.update({"bypass_hpu_graphs": not use_graphs}) + + htorch.core.mark_step() + if self.is_driver_worker: + model_event_name = ("model_" + f"{'prompt' if is_prompt else 'decode'}_" + f"bs{batch_size}_" + f"seq{seq_len}_" + f"graphs{'T' if use_graphs else 'F'}") + else: + model_event_name = 'model_executable' + with self.profiler.record_event('internal', model_event_name): + hidden_states = self.model.forward( + **execute_model_kwargs, + selected_token_indices=sampling_metadata.selected_token_indices + ) + + if self.lora_config: + LoraMask.setLoraMask( + lora_logits_mask.index_select( + 0, sampling_metadata.selected_token_indices)) + + # Compute the logits. + with self.profiler.record_event( + 'internal', ('compute_logits_' + f'{"prompt" if is_prompt else "decode"}_bs' + f'{batch_size}_' + f'seq{seq_len}')): + sampling_metadata.selected_token_indices = None + logits = self.model.compute_logits(hidden_states, + sampling_metadata) + htorch.core.mark_step() + # Only perform sampling in the driver worker. + if not self.is_driver_worker: + return [] + + if model_input.async_callback is not None: + model_input.async_callback() + + # Sample the next token. + with self.profiler.record_event( + 'internal', ('sample_' + f'{"prompt" if is_prompt else "decode"}_' + f'bs{batch_size}_' + f'seq{seq_len}')): + output = self.model.sample( + logits=logits, + sampling_metadata=sampling_metadata, + ) + output.outputs = output.outputs[:real_batch_size] + htorch.core.mark_step() + + if self.is_driver_worker and self.profiler.enabled: + # Stop recording 'execute_model' event + self.profiler.end() + event_end = self.profiler.get_timestamp_us() + counters = self.profiler_counter_helper.get_counter_dict( + cache_config=self.cache_config, + duration=event_end - self.event_start, + seq_len=seq_len, + batch_size_padded=batch_size_padded, + real_batch_size=real_batch_size, + is_prompt=is_prompt) + self.profiler.record_counter(self.event_start, counters) + return [output] + + def shutdown_inc(self): + can_finalize_inc = False + from contextlib import suppress + with suppress(AttributeError): + can_finalize_inc = (self.model_config.quantization == 'inc') and \ + (self.model.model is not None) and \ + self.inc_initialized_successfully and \ + not getattr(self, "_is_inc_finalized", False) + if can_finalize_inc: + from neural_compressor.torch.quantization import ( + finalize_calibration) + finalize_calibration(self.model.model) + self._is_inc_finalized = True + + def __del__(self): + self.shutdown_inc() diff --git a/vllm/worker/hpu_worker.py b/vllm/worker/hpu_worker.py new file mode 100644 index 0000000000000..493f7a9fad098 --- /dev/null +++ b/vllm/worker/hpu_worker.py @@ -0,0 +1,410 @@ +############################################################################### +# Copyright (C) 2024 Habana Labs, Ltd. an Intel Company +############################################################################### + +import gc +import os +from typing import List, Optional, Set, Tuple, Type + +import habana_frameworks.torch as htorch # noqa:F401 +import torch +import torch.distributed +from vllm_hpu_extension.profiler import HabanaMemoryProfiler, format_bytes + +import vllm.envs as envs +from vllm.config import ParallelConfig, VllmConfig +from vllm.distributed import (ensure_model_parallel_initialized, + init_distributed_environment) +from vllm.logger import init_logger +from vllm.lora.request import LoRARequest +from vllm.model_executor import set_random_seed +from vllm.prompt_adapter.request import PromptAdapterRequest +from vllm.sequence import ExecuteModelRequest +from vllm.worker.cache_engine import CacheEngine +from vllm.worker.hpu_model_runner import HPUModelRunner +from vllm.worker.model_runner_base import ModelRunnerBase +from vllm.worker.worker_base import (LocalOrDistributedWorkerBase, WorkerBase, + WorkerInput) + +logger = init_logger(__name__) + + +class HPUWorker(LocalOrDistributedWorkerBase): + """A worker class that executes (a partition of) the model on a HPU. + + Each worker is associated with a single HPU. The worker is responsible for + maintaining the KV cache and executing the model on the HPU. In case of + distributed inference, each worker is assigned a partition of the model. + """ + + def __init__( + self, + vllm_config: VllmConfig, + local_rank: int, + rank: int, + distributed_init_method: str, + is_driver_worker: bool = False, + model_runner_cls: Optional[Type[ModelRunnerBase]] = None, + ) -> None: + WorkerBase.__init__(self, vllm_config=vllm_config) + self.parallel_config.rank = rank + self.local_rank = local_rank + self.rank = rank + self.distributed_init_method = distributed_init_method + self.is_driver_worker = is_driver_worker + if self.is_driver_worker: + assert self.rank == 0, "The driver worker must have rank 0." + + if self.model_config.trust_remote_code: + # note: lazy import to avoid importing torch before initializing + from vllm.utils import init_cached_hf_modules + init_cached_hf_modules() + + self.model_runner: HPUModelRunner = HPUModelRunner( + vllm_config=vllm_config, is_driver_worker=is_driver_worker) + # Uninitialized cache engine. Will be initialized by + # initialize_cache. + self.cache_engine: List[HPUCacheEngine] + # Initialize gpu_cache as embedding models don't initialize kv_caches + self.hpu_cache: Optional[List[List[torch.tensor]]] = None + # Torch profiler. Enabled and configured through env vars: + # VLLM_TORCH_PROFILER_DIR=/path/to/save/trace + if envs.VLLM_TORCH_PROFILER_DIR: + torch_profiler_trace_dir = envs.VLLM_TORCH_PROFILER_DIR + logger.info("Profiling enabled. Traces will be saved to: %s", + torch_profiler_trace_dir) + self.profiler = torch.profiler.profile( + activities=[ + torch.profiler.ProfilerActivity.CPU, + torch.profiler.ProfilerActivity.HPU, + ], + with_stack=True, + on_trace_ready=torch.profiler.tensorboard_trace_handler( + torch_profiler_trace_dir, use_gzip=True)) + else: + self.profiler = None + + def start_profile(self): + if self.profiler is None: + raise RuntimeError("Profiler is not enabled.") + self.profiler.start() + + def stop_profile(self): + if self.profiler is None: + raise RuntimeError("Profiler is not enabled.") + self.profiler.stop() + + def _set_env_vars(self): + local_rank = self.local_rank + if self.parallel_config.world_size == 1: + local_rank = -1 + import os + os.environ["LOCAL_RANK"] = str(local_rank) + os.environ["ID"] = str(local_rank) + os.environ["WORLD_SIZE"] = str(self.parallel_config.world_size) + os.environ["RANK"] = str(self.rank) + + def init_device(self) -> None: + if self.device_config.device.type == "hpu": + self.device = torch.device("hpu") + torch.hpu.set_device(self.device) + else: + raise RuntimeError( + f"Not support device type: {self.device_config.device}") + # Initialize the distributed environment. + if self.model_config.quantization == 'inc': + self._set_env_vars() + init_worker_distributed_environment(self.parallel_config, self.rank, + self.distributed_init_method, + self.local_rank) + # Set random seed. + set_random_seed(self.model_config.seed) + + def load_model(self): + self.model_runner.load_model() + + @torch.inference_mode() + def determine_num_available_blocks(self) -> Tuple[int, int]: + """Profiles the peak memory usage of the model to determine how many + KV blocks may be allocated without OOMs. + + The engine will first conduct a profiling of the existing memory usage. + Then, it calculate the maximum possible number of GPU and CPU blocks + that can be allocated with the remaining free memory. + + .. tip:: + You may limit the usage of GPU memory + by adjusting the `gpu_memory_utilization` parameter. + """ + # Profile the memory usage of the model and get the maximum number of + # cache blocks that can be allocated with the remaining free memory. + + # Execute a forward pass with dummy inputs to profile the memory usage + # of the model. + with HabanaMemoryProfiler() as m: + self.model_runner.profile_run() + torch.hpu.synchronize() + msg = ("Model profiling run " + f"took {m.get_summary_string()}") + logger.info(msg) + # At this point we should've allocated the maximum workspace for all + # recipes we will use the extra memory for graphs/blocks + free_hpu_memory = torch.hpu.mem_get_info()[0] + + cache_block_size = self.get_cache_block_size_bytes() + graph_reserved_mem = (float( + os.environ.get('VLLM_GRAPH_RESERVED_MEM', '0.1')) + if not self.model_config.enforce_eager else 0) + graph_headroom = 1 - graph_reserved_mem + available_hpu_memory = free_hpu_memory * \ + self.cache_config.gpu_memory_utilization + hpu_memory_margin = free_hpu_memory * ( + 1 - self.cache_config.gpu_memory_utilization) + self.model_runner.mem_margin = hpu_memory_margin + cache_size_bytes = available_hpu_memory * graph_headroom + graph_headroom_bytes = available_hpu_memory * (1 - graph_headroom) + msg = ( + f"Free device memory: {format_bytes(free_hpu_memory)}, " + f"{format_bytes(available_hpu_memory)} usable " + f"(gpu_memory_utilization={self.cache_config.gpu_memory_utilization})," + f" {format_bytes(graph_headroom_bytes)} reserved for HPUGraphs " + f"(VLLM_GRAPH_RESERVED_MEM={graph_reserved_mem}), " + f"{format_bytes(cache_size_bytes)} reserved for KV cache") + logger.info(msg) + num_hpu_blocks = int(cache_size_bytes // cache_block_size) + num_cpu_blocks = int(self.cache_config.swap_space_bytes // + cache_block_size) + num_hpu_blocks = max(num_hpu_blocks, 0) + num_cpu_blocks = max(num_cpu_blocks, 0) + + if self.model_runner.lora_manager: + self.model_runner.remove_all_loras() + + gc.collect() + return num_hpu_blocks, num_cpu_blocks + + def initialize_cache(self, num_gpu_blocks: int, + num_cpu_blocks: int) -> None: + """Allocate GPU and CPU KV cache with the specified number of blocks. + + This also warms up the model, which may record CUDA graphs. + """ + raise_if_cache_size_invalid(num_gpu_blocks, + self.cache_config.block_size, + self.model_config.max_model_len) + + self.cache_config.num_gpu_blocks = num_gpu_blocks + self.cache_config.num_cpu_blocks = num_cpu_blocks + + with HabanaMemoryProfiler() as m: + self._init_cache_engine() + torch.hpu.synchronize() + msg = ("Initializing cache engine " + f"took {m.get_summary_string()}") + logger.info(msg) + self._warm_up_model() + + def _init_cache_engine(self): + assert self.cache_config.num_gpu_blocks is not None + self.cache_engine = [ + HPUCacheEngine(self.cache_config, self.model_config, + self.parallel_config, self.device_config) + for _ in range(self.parallel_config.pipeline_parallel_size) + ] + self.hpu_cache = [ + self.cache_engine[ve].gpu_cache + for ve in range(self.parallel_config.pipeline_parallel_size) + ] + + def _warm_up_model(self) -> None: + # NOTE(kzawora): We should use virtual engine index here + # for pipeline parallelism. Using 0 for now. + assert self.hpu_cache is not None + self.model_runner.warmup_model(self.hpu_cache[0]) + # Reset the seed to ensure that the random state is not affected by + # the model initialization and profiling. + set_random_seed(self.model_config.seed) + + def finish_measurements(self): + self.model_runner.finish_measurements() + + @property + def do_metadata_broadcast(self) -> bool: + return self.parallel_config.tensor_parallel_size > 1 + + @property + def kv_cache(self) -> Optional[List[List[torch.Tensor]]]: + return self.hpu_cache + + @torch.inference_mode() + def prepare_worker_input( + self, execute_model_req: ExecuteModelRequest) -> WorkerInput: + virtual_engine = execute_model_req.virtual_engine + num_seq_groups = len(execute_model_req.seq_group_metadata_list) + # `blocks_to_swap_in` and `blocks_to_swap_out` are cpu tensors. + # they contain parameters to launch cudamemcpyasync. + blocks_to_swap_in = torch.tensor(execute_model_req.blocks_to_swap_in, + device="cpu", + dtype=torch.int64).view(-1, 2) + blocks_to_swap_out = torch.tensor(execute_model_req.blocks_to_swap_out, + device="cpu", + dtype=torch.int64).view(-1, 2) + # `blocks_to_copy` is a gpu tensor. The src and tgt of + # blocks to copy are in the same device, and `blocks_to_copy` + # can be used directly within cuda kernels. + blocks_to_copy = torch.tensor(execute_model_req.blocks_to_copy, + device=self.device, + dtype=torch.int64).view(-1, 2) + + return WorkerInput( + num_seq_groups=num_seq_groups, + blocks_to_swap_in=blocks_to_swap_in, + blocks_to_swap_out=blocks_to_swap_out, + blocks_to_copy=blocks_to_copy, + virtual_engine=virtual_engine, + ) + + @torch.inference_mode() + def execute_worker(self, worker_input: WorkerInput) -> None: + virtual_engine = worker_input.virtual_engine + # Issue cache operations. + if (worker_input.blocks_to_swap_in is not None + and worker_input.blocks_to_swap_in.numel() > 0): + self.cache_engine[virtual_engine].swap_in( + worker_input.blocks_to_swap_in) + if (worker_input.blocks_to_swap_out is not None + and worker_input.blocks_to_swap_out.numel() > 0): + self.cache_engine[virtual_engine].swap_out( + worker_input.blocks_to_swap_out) + if (worker_input.blocks_to_copy is not None + and worker_input.blocks_to_copy.numel() > 0): + self.cache_engine[virtual_engine].copy(worker_input.blocks_to_copy) + + def add_lora(self, lora_request: LoRARequest) -> bool: + return self.model_runner.add_lora(lora_request) + + def remove_lora(self, lora_id: int) -> bool: + return self.model_runner.remove_lora(lora_id) + + def pin_lora(self, lora_id: int) -> bool: + return self.model_runner.pin_lora(lora_id) + + def list_loras(self) -> Set[int]: + return self.model_runner.list_loras() + + def add_prompt_adapter( + self, prompt_adapter_request: PromptAdapterRequest) -> bool: + raise NotImplementedError( + "Prompt Adapter is not implemented for HPU backend.") + + def remove_prompt_adapter(self, prompt_adapter_id: int) -> bool: + raise NotImplementedError( + "Prompt Adapter is not implemented for HPU backend.") + + def pin_prompt_adapter(self, prompt_adapter_id: int) -> bool: + raise NotImplementedError( + "Prompt Adapter is not implemented for HPU backend.") + + def list_prompt_adapters(self) -> Set[int]: + raise NotImplementedError( + "Prompt Adapter is not implemented for HPU backend.") + + def shutdown_inc(self): + self.model_runner.shutdown_inc() + + @property + def max_model_len(self) -> int: + return self.model_config.max_model_len + + @property + def vocab_size(self) -> int: + return self.model_runner.vocab_size + + def get_cache_block_size_bytes(self) -> int: + """Get the size of the KV cache block size in bytes. + """ + return HPUCacheEngine.get_cache_block_size(self.cache_config, + self.model_config, + self.parallel_config) + + +def init_worker_distributed_environment( + parallel_config: ParallelConfig, + rank: int, + distributed_init_method: Optional[str] = None, + local_rank: int = -1, +) -> None: + """Initialize the distributed environment.""" + init_distributed_environment(parallel_config.world_size, + rank, + distributed_init_method, + local_rank, + backend='hccl') + + ensure_model_parallel_initialized(parallel_config.tensor_parallel_size, + parallel_config.pipeline_parallel_size) + + if torch.distributed.is_initialized(): + torch_world_size = torch.distributed.get_world_size() + if torch_world_size != parallel_config.world_size: + raise RuntimeError( + "torch.distributed is already initialized but the torch world " + "size does not match parallel_config.world_size " + f"({torch_world_size} vs. {parallel_config.world_size}).") + elif not distributed_init_method: + raise ValueError( + "distributed_init_method must be set if torch.distributed " + "is not already initialized") + else: + torch.distributed.init_process_group( + backend="hccl", + world_size=parallel_config.world_size, + rank=rank, + init_method=distributed_init_method, + ) + + # A small all_reduce for warmup & checking conformance. + dummy_tensor_hpu = torch.ones(1).to('hpu') + torch.distributed.all_reduce(dummy_tensor_hpu) + assert dummy_tensor_hpu.item() == parallel_config.world_size + ensure_model_parallel_initialized(parallel_config.tensor_parallel_size, + parallel_config.pipeline_parallel_size) + + +def raise_if_cache_size_invalid(num_gpu_blocks, block_size, + max_model_len) -> None: + if num_gpu_blocks <= 0: + raise ValueError("No available memory for the cache blocks. " + "Try increasing `gpu_memory_utilization` when " + "initializing the engine.") + max_seq_len = block_size * num_gpu_blocks + if max_model_len > max_seq_len: + raise ValueError( + f"The model's max seq len ({max_model_len}) " + "is larger than the maximum number of tokens that can be " + f"stored in KV cache ({max_seq_len}). Try increasing " + "`gpu_memory_utilization` or decreasing `max_model_len` when " + "initializing the engine.") + + +class HPUCacheEngine(CacheEngine): + + def _allocate_kv_cache( + self, + num_blocks: int, + device: str, + ) -> List[Tuple[torch.Tensor, torch.Tensor]]: + """Allocates KV cache on the specified device.""" + kv_cache_shape = self.attn_backend.get_kv_cache_shape( + num_blocks, self.block_size, self.num_kv_heads, self.head_size) + kv_cache: List[Tuple[torch.Tensor, torch.Tensor]] = [] + for _ in range(self.num_attention_layers): + key_cache = torch.zeros(kv_cache_shape, + dtype=self.dtype, + device=device) + value_cache = torch.zeros(kv_cache_shape, + dtype=self.dtype, + device=device) + kv_layer = (key_cache, value_cache) + kv_cache.append(kv_layer) + return kv_cache From 6a585a23d2e7960164c7bd9d767858d50ac54c47 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Wed, 6 Nov 2024 01:24:28 -0800 Subject: [PATCH 18/22] [Hotfix] Fix ruff errors (#10073) Signed-off-by: Woosuk Kwon --- setup.py | 3 +-- vllm/executor/ray_hpu_executor.py | 2 +- vllm/worker/hpu_model_runner.py | 7 +++---- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/setup.py b/setup.py index 51ca5e2abecf7..4a20e49235ac8 100644 --- a/setup.py +++ b/setup.py @@ -382,8 +382,7 @@ def get_gaudi_sw_version(): output = subprocess.run("hl-smi", shell=True, text=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + capture_output=True, env={"ENABLE_CONSOLE": "true"}) if output.returncode == 0 and output.stdout: return output.stdout.split("\n")[2].replace( diff --git a/vllm/executor/ray_hpu_executor.py b/vllm/executor/ray_hpu_executor.py index 28d1882cb0db7..a24bab6df370e 100644 --- a/vllm/executor/ray_hpu_executor.py +++ b/vllm/executor/ray_hpu_executor.py @@ -34,7 +34,7 @@ class RayHPUExecutor(DistributedGPUExecutor): uses_ray: bool = True def _init_executor(self) -> None: - self.forward_dag: Optional["ray.dag.CompiledDAG"] = None + self.forward_dag: Optional[ray.dag.CompiledDAG] = None # If the env var is set, it uses the Ray's compiled DAG API # which optimizes the control plane overhead. # Run vLLM with VLLM_USE_RAY_COMPILED_DAG=1 to enable it. diff --git a/vllm/worker/hpu_model_runner.py b/vllm/worker/hpu_model_runner.py index 5008a2abd22ea..7e9b2bd13b48a 100644 --- a/vllm/worker/hpu_model_runner.py +++ b/vllm/worker/hpu_model_runner.py @@ -67,8 +67,7 @@ class Singleton(type): def __call__(cls, *args, **kwargs): if cls not in cls._instances: - cls._instances[cls] = super(Singleton, - cls).__call__(*args, **kwargs) + cls._instances[cls] = super().__call__(*args, **kwargs) return cls._instances[cls] @@ -273,7 +272,7 @@ def precompute_indices_and_offsets(block_size, slot_mapping, is_prompt): return indices, offsets -class HpuModelAdapter(): +class HpuModelAdapter: def __init__(self, model, block_size, dtype, enforce_eager): self.model = model @@ -1643,7 +1642,7 @@ def _maybe_wrap_in_hpu_graph(*args, **kwargs): ) if htorch.utils.internal.is_lazy() else HpuModelAdapter(*args, **kwargs) -class HabanaProfilerCounterHelper(): +class HabanaProfilerCounterHelper: def __init__(self): self.niter = 0 From 18634b46577c6e7c93c469f3890e2370cee010da Mon Sep 17 00:00:00 2001 From: DarkLight1337 Date: Wed, 6 Nov 2024 09:32:09 +0000 Subject: [PATCH 19/22] Remove note Signed-off-by: DarkLight1337 --- docs/source/getting_started/amd-installation.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/source/getting_started/amd-installation.rst b/docs/source/getting_started/amd-installation.rst index 301337aebcf4c..ece5d785e0c65 100644 --- a/docs/source/getting_started/amd-installation.rst +++ b/docs/source/getting_started/amd-installation.rst @@ -13,8 +13,6 @@ Requirements * GPU: MI200s (gfx90a), MI300 (gfx942), Radeon RX 7900 series (gfx1100) * ROCm 6.2 -Note: PyTorch 2.5+/ROCm6.2 dropped the support for python 3.8. - Installation options: #. :ref:`Build from source with docker ` From 2442fb5da090dc175650318a137e092721e58c26 Mon Sep 17 00:00:00 2001 From: DarkLight1337 Date: Wed, 6 Nov 2024 09:32:23 +0000 Subject: [PATCH 20/22] Resolve type error Signed-off-by: DarkLight1337 --- vllm/distributed/parallel_state.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/vllm/distributed/parallel_state.py b/vllm/distributed/parallel_state.py index fcaf6a69749c9..0d15403264eee 100644 --- a/vllm/distributed/parallel_state.py +++ b/vllm/distributed/parallel_state.py @@ -89,13 +89,11 @@ def _get_unique_name(name: str) -> str: return newname -_groups: Dict[str, Callable[[], "GroupCoordinator"]] = {} +_groups: Dict[str, Callable[[], Optional["GroupCoordinator"]]] = {} def _register_group(group: "GroupCoordinator") -> None: - # TODO - revisit this now that we dropped 3.8 support - # looks like Python 3.8 does not understand `ReferenceType` - _groups[group.unique_name] = weakref.ref(group) # type: ignore + _groups[group.unique_name] = weakref.ref(group) if supports_custom_op(): From 5628293b519c4006d555db290920c071dba3bb2f Mon Sep 17 00:00:00 2001 From: DarkLight1337 Date: Wed, 6 Nov 2024 09:33:30 +0000 Subject: [PATCH 21/22] Format Signed-off-by: DarkLight1337 --- docs/source/getting_started/quickstart.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/getting_started/quickstart.rst b/docs/source/getting_started/quickstart.rst index eeb46699fe81b..0c0491c860563 100644 --- a/docs/source/getting_started/quickstart.rst +++ b/docs/source/getting_started/quickstart.rst @@ -12,7 +12,7 @@ This guide will help you quickly get started with vLLM to: Prerequisites -------------- - OS: Linux -- Python: 3.9 - 3.12 +- Python: 3.9 -- 3.12 - GPU: compute capability 7.0 or higher (e.g., V100, T4, RTX20xx, A100, L4, H100, etc.) Installation From 12b270ff47318ae027c186f6d58c4522bb4cc106 Mon Sep 17 00:00:00 2001 From: DarkLight1337 Date: Wed, 6 Nov 2024 09:35:21 +0000 Subject: [PATCH 22/22] format Signed-off-by: DarkLight1337 --- setup.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 82a38318e957c..d2438ae74c455 100644 --- a/setup.py +++ b/setup.py @@ -191,8 +191,10 @@ def build_extensions(self) -> None: os.makedirs(self.build_temp) targets = [] - target_name = lambda s: s.removeprefix("vllm.").removeprefix( - "vllm_flash_attn.") + + def target_name(s: str) -> str: + return s.removeprefix("vllm.").removeprefix("vllm_flash_attn.") + # Build all the extensions for ext in self.extensions: self.configure(ext)