From 905d0f0af4e2c07893e36778da9ab02bde01ace8 Mon Sep 17 00:00:00 2001 From: "Chendi.Xue" Date: Sun, 17 Nov 2024 00:58:22 -0600 Subject: [PATCH 001/122] [CI/Build] Fix IDC hpu [Device not found] issue (#10384) Signed-off-by: Chendi Xue --- .buildkite/run-hpu-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/run-hpu-test.sh b/.buildkite/run-hpu-test.sh index 4505dc7a9373c..fa4f74fca7a11 100644 --- a/.buildkite/run-hpu-test.sh +++ b/.buildkite/run-hpu-test.sh @@ -13,4 +13,4 @@ trap remove_docker_container EXIT remove_docker_container # Run the image and launch offline inference -docker run --runtime=habana --name=hpu-test --network=host -e VLLM_SKIP_WARMUP=true --entrypoint="" hpu-test-env python3 examples/offline_inference.py \ No newline at end of file +docker run --runtime=habana --name=hpu-test --network=host -e HABANA_VISIBLE_DEVICES=all -e VLLM_SKIP_WARMUP=true --entrypoint="" hpu-test-env python3 examples/offline_inference.py \ No newline at end of file From cf349c4a97adb36354bdc2b14448ea55279d1575 Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Sun, 17 Nov 2024 15:12:04 +0800 Subject: [PATCH 002/122] [Bugfix][CPU] Fix CPU embedding runner with tensor parallel (#10394) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/worker/cpu_embedding_model_runner.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/vllm/worker/cpu_embedding_model_runner.py b/vllm/worker/cpu_embedding_model_runner.py index 7053075bf4d8f..d0b8fec48d74f 100644 --- a/vllm/worker/cpu_embedding_model_runner.py +++ b/vllm/worker/cpu_embedding_model_runner.py @@ -66,6 +66,10 @@ def execute_model( hidden_states = model_executable(**execute_model_kwargs) + # Only perform pooling in the driver worker. + if not self.is_driver_worker: + return [] + return [ self.model.pooler(hidden_states=hidden_states, pooling_metadata=model_input.pooling_metadata) From 8d74b5aee9e780852de870c936b59707835e84f5 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sat, 16 Nov 2024 23:14:23 -0800 Subject: [PATCH 003/122] [platforms] refactor cpu code (#10402) Signed-off-by: youkaichao --- vllm/executor/cpu_executor.py | 68 +---------------------------------- vllm/platforms/cpu.py | 60 +++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+), 67 deletions(-) diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py index 4ceb5a837dd7f..1542a2ae367eb 100644 --- a/vllm/executor/cpu_executor.py +++ b/vllm/executor/cpu_executor.py @@ -2,9 +2,6 @@ from functools import partial from typing import Any, Awaitable, List, Optional, Set, Tuple, Union -import vllm.envs as envs -from vllm.config import (CacheConfig, ModelConfig, ParallelConfig, - SchedulerConfig) from vllm.executor.executor_base import ExecutorAsyncBase, ExecutorBase from vllm.executor.multiproc_worker_utils import (ProcessWorkerWrapper, ResultHandler, WorkerMonitor) @@ -13,7 +10,7 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sequence import ExecuteModelRequest -from vllm.utils import (GiB_bytes, get_distributed_init_method, get_open_port, +from vllm.utils import (get_distributed_init_method, get_open_port, get_vllm_instance_id, make_async) from vllm.worker.worker_base import WorkerWrapperBase @@ -57,13 +54,6 @@ def _init_executor(self) -> None: os.environ["LOCAL_WORLD_SIZE"] = str( self.parallel_config.tensor_parallel_size) - self.model_config = _verify_and_get_model_config(self.model_config) - self.cache_config = _verify_and_get_cache_config(self.cache_config) - self.scheduler_config = _verify_and_get_scheduler_config( - self.scheduler_config) - self.parallel_config = _verify_and_get_parallel_config( - self.parallel_config) - # Multiprocessing-based executor does not support multi-node setting. # Since it only works for single node, we can use the loopback address # 127.0.0.1 for communication. @@ -313,62 +303,6 @@ async def check_health_async(self) -> None: self.check_health() -def _verify_and_get_model_config(config: ModelConfig) -> ModelConfig: - # Reminder: Please update docs/source/serving/compatibility_matrix.rst - # If the feature combo become valid - if not config.enforce_eager: - logger.warning( - "CUDA graph is not supported on CPU, fallback to the eager " - "mode.") - config.enforce_eager = True - return config - - -def _verify_and_get_scheduler_config( - config: SchedulerConfig) -> SchedulerConfig: - # Reminder: Please update docs/source/serving/compatibility_matrix.rst - # If the feature combo become valid - if config.chunked_prefill_enabled: - logger.warning("Chunked prefill is not supported on CPU, disable it.") - config.chunked_prefill_enabled = False - - return config - - -def _verify_and_get_cache_config(config: CacheConfig) -> CacheConfig: - # Reminder: Please update docs/source/serving/compatibility_matrix.rst - # If the feature combo become valid - if config.enable_prefix_caching: - logger.warning("Prefix caching is not supported on CPU, disable it.") - config.enable_prefix_caching = False - - kv_cache_space = envs.VLLM_CPU_KVCACHE_SPACE - - if kv_cache_space >= 0: - if kv_cache_space == 0: - config.cpu_kvcache_space_bytes = 4 * GiB_bytes # type: ignore - logger.warning("Environment variable VLLM_CPU_KVCACHE_SPACE (GB) " - "for CPU backend is not set, using 4 by default.") - else: - config.cpu_kvcache_space_bytes = kv_cache_space * GiB_bytes # type: ignore - else: - raise RuntimeError( - "Invalid environment variable VLLM_CPU_KVCACHE_SPACE" - f" {kv_cache_space}, expect a positive integer value.") - - return config - - -def _verify_and_get_parallel_config(config: ParallelConfig) -> ParallelConfig: - if (config.distributed_executor_backend is not None - and config.distributed_executor_backend != "mp"): - logger.warning( - "%s is not supported on CPU, fallback to mp distributed executor " - "backend.", config.distributed_executor_backend) - config.distributed_executor_backend = "mp" - return config - - def _driver_method_invoker(driver, method: str, *args, **kwargs): return getattr(driver, method)(*args, **kwargs) diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index 5243f59203afc..42bee31dfb0e9 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -1,8 +1,19 @@ +from typing import TYPE_CHECKING + import psutil import torch +from vllm.logger import init_logger + from .interface import Platform, PlatformEnum +if TYPE_CHECKING: + from vllm.config import VllmConfig +else: + VllmConfig = None + +logger = init_logger(__name__) + class CpuPlatform(Platform): _enum = PlatformEnum.CPU @@ -18,3 +29,52 @@ def get_device_total_memory(cls, device_id: int = 0) -> int: @classmethod def inference_mode(cls): return torch.no_grad() + + @classmethod + def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + import vllm.envs as envs + from vllm.utils import GiB_bytes + model_config = vllm_config.model_config + # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # If the feature combo become valid + if not model_config.enforce_eager: + logger.warning( + "CUDA graph is not supported on CPU, fallback to the eager " + "mode.") + model_config.enforce_eager = True + + cache_config = vllm_config.cache_config + + if cache_config.enable_prefix_caching: + logger.warning( + "Prefix caching is not supported on CPU, disable it.") + cache_config.enable_prefix_caching = False + + kv_cache_space = envs.VLLM_CPU_KVCACHE_SPACE + + if kv_cache_space >= 0: + if kv_cache_space == 0: + cache_config.cpu_kvcache_space_bytes = 4 * GiB_bytes # type: ignore + logger.warning( + "Environment variable VLLM_CPU_KVCACHE_SPACE (GB) " + "for CPU backend is not set, using 4 by default.") + else: + cache_config.cpu_kvcache_space_bytes = kv_cache_space * GiB_bytes # type: ignore # noqa + else: + raise RuntimeError( + "Invalid environment variable VLLM_CPU_KVCACHE_SPACE" + f" {kv_cache_space}, expect a positive integer value.") + + scheduler_config = vllm_config.scheduler_config + if scheduler_config.chunked_prefill_enabled: + logger.warning( + "Chunked prefill is not supported on CPU, disable it.") + scheduler_config.chunked_prefill_enabled = False + + parallel_config = vllm_config.parallel_config + if (parallel_config.distributed_executor_backend is not None + and parallel_config.distributed_executor_backend != "mp"): + logger.warning(("%s is not supported on CPU, fallback to mp " + "distributed executor backend."), + parallel_config.distributed_executor_backend) + parallel_config.distributed_executor_backend = "mp" From 76aab90ab68476c353ad58019fd51fd18622056a Mon Sep 17 00:00:00 2001 From: Kunshang Ji Date: Sun, 17 Nov 2024 16:44:44 +0800 Subject: [PATCH 004/122] [Hardware] [HPU]add `mark_step` for hpu (#10239) Signed-off-by: Kunshang Ji --- vllm/worker/hpu_model_runner.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/vllm/worker/hpu_model_runner.py b/vllm/worker/hpu_model_runner.py index 1ff30d685c6b1..99cf9a7e67256 100644 --- a/vllm/worker/hpu_model_runner.py +++ b/vllm/worker/hpu_model_runner.py @@ -272,6 +272,19 @@ def precompute_indices_and_offsets(block_size, slot_mapping, is_prompt): return indices, offsets +def modify_decoder_layer(module: torch.nn.Module, suffix="DecoderLayer"): + if module.__class__.__name__.endswith(suffix): + + def forward_hook(module, args, output): + htorch.core.mark_step() + return output + + module.register_forward_hook(forward_hook) + + for child_name, child_module in module.named_children(): + modify_decoder_layer(child_module) + + class HpuModelAdapter: def __init__(self, model, block_size, dtype, enforce_eager): @@ -636,6 +649,7 @@ def load_model(self) -> None: else: self.model = self.model.to("hpu") htcore.mark_step() + modify_decoder_layer(self.model) torch.hpu.synchronize() with HabanaMemoryProfiler() as m_wrap: From 80d85c5d7bc33ce0ae210ebad3c45e4361b57640 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=94=B5=E8=84=91=E6=98=9F=E4=BA=BA?= Date: Sun, 17 Nov 2024 16:50:24 +0800 Subject: [PATCH 005/122] [Bugfix] Fix mrope_position_delta in non-last prefill chunk (#10403) Signed-off-by: imkero --- vllm/model_executor/layers/rotary_embedding.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/layers/rotary_embedding.py b/vllm/model_executor/layers/rotary_embedding.py index b01e4c61fe101..117fe086e5e87 100644 --- a/vllm/model_executor/layers/rotary_embedding.py +++ b/vllm/model_executor/layers/rotary_embedding.py @@ -922,9 +922,9 @@ def get_input_positions( torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1) - llm_positions = llm_positions[:, context_len:seq_len] mrope_position_delta = (llm_positions.max() + 1 - len(input_tokens)).item() + llm_positions = llm_positions[:, context_len:seq_len] return llm_positions.tolist(), mrope_position_delta From d1557e66d3227355e5aed8018a945a5e6a733147 Mon Sep 17 00:00:00 2001 From: wchen61 Date: Sun, 17 Nov 2024 19:32:40 +0800 Subject: [PATCH 006/122] =?UTF-8?q?[Misc]=20Enhance=20offline=5Finference?= =?UTF-8?q?=20to=20support=20user-configurable=20paramet=E2=80=A6=20(#1039?= =?UTF-8?q?2)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: wchen61 --- examples/offline_inference.py | 98 ++++++++++++++++++++++++++++------- 1 file changed, 78 insertions(+), 20 deletions(-) diff --git a/examples/offline_inference.py b/examples/offline_inference.py index 9b758fa2479f6..391ac6b9b6b03 100644 --- a/examples/offline_inference.py +++ b/examples/offline_inference.py @@ -1,22 +1,80 @@ +from dataclasses import asdict + from vllm import LLM, SamplingParams +from vllm.engine.arg_utils import EngineArgs +from vllm.utils import FlexibleArgumentParser + + +def get_prompts(num_prompts: int): + # The default sample prompts. + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + + if num_prompts != len(prompts): + prompts = (prompts * ((num_prompts // len(prompts)) + 1))[:num_prompts] + + return prompts + + +def main(args): + # Create prompts + prompts = get_prompts(args.num_prompts) + + # Create a sampling params object. + sampling_params = SamplingParams(n=args.n, + temperature=args.temperature, + top_p=args.top_p, + top_k=args.top_k, + max_tokens=args.max_tokens) + + # Create an LLM. + # The default model is 'facebook/opt-125m' + engine_args = EngineArgs.from_cli_args(args) + llm = LLM(**asdict(engine_args)) + + # Generate texts from the prompts. + # The output is a list of RequestOutput objects + # that contain the prompt, generated text, and other information. + outputs = llm.generate(prompts, sampling_params) + # Print the outputs. + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + + +if __name__ == '__main__': + parser = FlexibleArgumentParser() + parser = EngineArgs.add_cli_args(parser) + group = parser.add_argument_group("SamplingParams options") + group.add_argument("--num-prompts", + type=int, + default=4, + help="Number of prompts used for inference") + group.add_argument("--max-tokens", + type=int, + default=16, + help="Generated output length for sampling") + group.add_argument('--n', + type=int, + default=1, + help='Number of generated sequences per prompt') + group.add_argument('--temperature', + type=float, + default=0.8, + help='Temperature for text generation') + group.add_argument('--top-p', + type=float, + default=0.95, + help='top_p for text generation') + group.add_argument('--top-k', + type=int, + default=-1, + help='top_k for text generation') -# Sample prompts. -prompts = [ - "Hello, my name is", - "The president of the United States is", - "The capital of France is", - "The future of AI is", -] -# Create a sampling params object. -sampling_params = SamplingParams(temperature=0.8, top_p=0.95) - -# Create an LLM. -llm = LLM(model="facebook/opt-125m") -# Generate texts from the prompts. The output is a list of RequestOutput objects -# that contain the prompt, generated text, and other information. -outputs = llm.generate(prompts, sampling_params) -# Print the outputs. -for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + args = parser.parse_args() + main(args) From c4e464333eac5a46e1cc2701e095a44057c82927 Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Mon, 18 Nov 2024 09:07:46 +0800 Subject: [PATCH 007/122] [Misc] Add uninitialized params tracking for `AutoWeightsLoader` (#10327) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/model_executor/model_loader/loader.py | 12 +++++++++++- vllm/model_executor/models/arctic.py | 8 ++++++-- vllm/model_executor/models/baichuan.py | 8 ++++++-- vllm/model_executor/models/bert.py | 8 ++++++-- vllm/model_executor/models/blip.py | 12 ++++++++---- vllm/model_executor/models/blip2.py | 7 ++++--- vllm/model_executor/models/bloom.py | 8 ++++++-- vllm/model_executor/models/chameleon.py | 8 ++++++-- vllm/model_executor/models/chatglm.py | 10 ++++++++-- vllm/model_executor/models/clip.py | 11 ++++++++--- vllm/model_executor/models/commandr.py | 4 +++- vllm/model_executor/models/dbrx.py | 8 ++++++-- vllm/model_executor/models/decilm.py | 8 ++++++-- vllm/model_executor/models/deepseek.py | 8 ++++++-- vllm/model_executor/models/deepseek_v2.py | 8 ++++++-- vllm/model_executor/models/exaone.py | 9 +++++++-- vllm/model_executor/models/falcon.py | 8 ++++++-- vllm/model_executor/models/florence2.py | 17 +++++++++++------ vllm/model_executor/models/fuyu.py | 8 +++++--- vllm/model_executor/models/gemma.py | 4 +++- vllm/model_executor/models/gemma2.py | 9 ++++++--- vllm/model_executor/models/gpt2.py | 8 ++++++-- vllm/model_executor/models/gpt_bigcode.py | 8 ++++++-- vllm/model_executor/models/gpt_j.py | 8 ++++++-- vllm/model_executor/models/gpt_neox.py | 8 ++++++-- vllm/model_executor/models/granite.py | 9 +++++++-- vllm/model_executor/models/granitemoe.py | 8 +++++--- .../models/idefics2_vision_model.py | 11 ++++++++--- vllm/model_executor/models/idefics3.py | 7 ++++--- vllm/model_executor/models/intern_vit.py | 8 ++++++-- vllm/model_executor/models/internlm2.py | 8 ++++++-- vllm/model_executor/models/internvl.py | 7 ++++--- vllm/model_executor/models/jais.py | 8 ++++++-- vllm/model_executor/models/jamba.py | 8 ++++++-- vllm/model_executor/models/llama.py | 15 ++++++++++----- vllm/model_executor/models/llava.py | 7 ++++--- vllm/model_executor/models/llava_next.py | 7 ++++--- vllm/model_executor/models/llava_next_video.py | 7 ++++--- vllm/model_executor/models/llava_onevision.py | 7 ++++--- vllm/model_executor/models/mamba.py | 8 ++++++-- vllm/model_executor/models/medusa.py | 9 +++++++-- vllm/model_executor/models/minicpm.py | 8 ++++++-- vllm/model_executor/models/minicpmv.py | 14 +++++++++----- vllm/model_executor/models/mixtral.py | 8 ++++++-- vllm/model_executor/models/mixtral_quant.py | 8 ++++++-- vllm/model_executor/models/mllama.py | 9 ++++++--- vllm/model_executor/models/mlp_speculator.py | 8 ++++++-- vllm/model_executor/models/mpt.py | 8 ++++++-- vllm/model_executor/models/nemotron.py | 8 ++++++-- vllm/model_executor/models/olmo.py | 8 ++++++-- vllm/model_executor/models/olmoe.py | 8 ++++++-- vllm/model_executor/models/opt.py | 8 ++++++-- vllm/model_executor/models/orion.py | 8 ++++++-- vllm/model_executor/models/paligemma.py | 7 ++++--- vllm/model_executor/models/persimmon.py | 8 ++++++-- vllm/model_executor/models/phi.py | 8 ++++++-- vllm/model_executor/models/phi3_small.py | 8 ++++++-- vllm/model_executor/models/phi3v.py | 9 ++++++--- vllm/model_executor/models/phimoe.py | 8 ++++++-- vllm/model_executor/models/pixtral.py | 12 ++++++++---- vllm/model_executor/models/qwen.py | 8 ++++++-- vllm/model_executor/models/qwen2.py | 18 ++++++++++++------ vllm/model_executor/models/qwen2_audio.py | 9 +++++++-- vllm/model_executor/models/qwen2_cls.py | 7 ++++--- vllm/model_executor/models/qwen2_moe.py | 8 ++++++-- vllm/model_executor/models/qwen2_rm.py | 7 ++++--- vllm/model_executor/models/qwen2_vl.py | 8 ++++++-- vllm/model_executor/models/siglip.py | 11 ++++++++--- vllm/model_executor/models/solar.py | 9 +++++++-- vllm/model_executor/models/stablelm.py | 8 ++++++-- vllm/model_executor/models/starcoder2.py | 8 ++++++-- vllm/model_executor/models/ultravox.py | 7 ++++--- vllm/model_executor/models/utils.py | 11 ++++++----- vllm/model_executor/models/xverse.py | 8 ++++++-- 74 files changed, 454 insertions(+), 185 deletions(-) diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index 0f8b81c3ef40c..d9ce85949e4ee 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -334,7 +334,17 @@ def load_model(self, vllm_config: VllmConfig) -> nn.Module: with target_device: model = _initialize_model(vllm_config=vllm_config) - model.load_weights(self._get_all_weights(model_config, model)) + weights_to_load = {name for name, _ in model.named_parameters()} + loaded_weights = model.load_weights( + self._get_all_weights(model_config, model)) + # We only enable strict check for non-quantiized models + # that have loaded weights tracking currently. + if model_config.quantization is None and loaded_weights is not None: + weights_not_loaded = weights_to_load - loaded_weights + if weights_not_loaded: + raise ValueError( + "Following weights were not initialized from " + f"checkpoint: {weights_not_loaded}") for _, module in model.named_modules(): quant_method = getattr(module, "quant_method", None) diff --git a/vllm/model_executor/models/arctic.py b/vllm/model_executor/models/arctic.py index d52418ee0f6f1..e58ad19cab54c 100644 --- a/vllm/model_executor/models/arctic.py +++ b/vllm/model_executor/models/arctic.py @@ -1,5 +1,5 @@ """Inference-only Snowflake Arctic model.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -480,7 +480,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -518,6 +519,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("ws", f"experts.{expert_id}.w3.weight", expert_id)) params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() logger.info( "It will take ~10 minutes loading from the 16-bit weights. " @@ -573,3 +575,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/baichuan.py b/vllm/model_executor/models/baichuan.py index 01ce7c42cd391..3749a16a38994 100644 --- a/vllm/model_executor/models/baichuan.py +++ b/vllm/model_executor/models/baichuan.py @@ -18,7 +18,7 @@ # limitations under the License. """Inference-only BaiChuan model compatible with HuggingFace weights.""" import math -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -404,13 +404,15 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("gate_up_proj", "gate_proj", 0), ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -449,6 +451,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params class BaichuanForCausalLM(BaiChuanBaseForCausalLM): diff --git a/vllm/model_executor/models/bert.py b/vllm/model_executor/models/bert.py index 42dd6119e76f1..d8301a36acb01 100644 --- a/vllm/model_executor/models/bert.py +++ b/vllm/model_executor/models/bert.py @@ -1,4 +1,4 @@ -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Set, Tuple import torch from torch import nn @@ -337,7 +337,8 @@ def forward( return self.encoder(hidden_states, kv_caches, attn_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "query", "q"), @@ -346,6 +347,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "pooler" in name: continue @@ -368,6 +370,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params class BertEmbeddingModel(nn.Module): diff --git a/vllm/model_executor/models/blip.py b/vllm/model_executor/models/blip.py index e612010677364..6db6462e97f3f 100644 --- a/vllm/model_executor/models/blip.py +++ b/vllm/model_executor/models/blip.py @@ -1,6 +1,6 @@ """Minimal implementation of BlipVisionModel intended to be only used within a vision language model.""" -from typing import Iterable, Optional, Tuple, Union +from typing import Iterable, Optional, Set, Tuple, Union import torch import torch.nn as nn @@ -415,7 +415,8 @@ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: return self.post_layernorm(hidden_states) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -423,6 +424,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("qkv_proj", "v_proj", "v"), ] if self.shard_weight else [] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() layer_count = len(self.encoder.layers) for name, loaded_weight in weights: @@ -440,8 +442,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): for (param_name, weight_name, shard_id) in stacked_params_mapping: if weight_name not in name: continue - - param = params_dict[name.replace(weight_name, param_name)] + name = name.replace(weight_name, param_name) + param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break @@ -450,3 +452,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/blip2.py b/vllm/model_executor/models/blip2.py index 03dc1d15ab697..7d7639b4a92ce 100644 --- a/vllm/model_executor/models/blip2.py +++ b/vllm/model_executor/models/blip2.py @@ -1,5 +1,5 @@ from functools import cached_property -from typing import (Iterable, List, Literal, Mapping, Optional, Tuple, +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) import torch @@ -692,6 +692,7 @@ def sample( ) -> Optional[SamplerOutput]: return self.language_model.sample(logits, sampling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/bloom.py b/vllm/model_executor/models/bloom.py index cf2eee8172769..1060d418474ef 100644 --- a/vllm/model_executor/models/bloom.py +++ b/vllm/model_executor/models/bloom.py @@ -16,7 +16,7 @@ # limitations under the License. """Inference-only BLOOM model compatible with HuggingFace weights.""" import math -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -341,8 +341,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if name == "lm_head.weight": continue @@ -371,3 +373,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/chameleon.py b/vllm/model_executor/models/chameleon.py index 7b59c818e0b60..8f91abffaea90 100644 --- a/vllm/model_executor/models/chameleon.py +++ b/vllm/model_executor/models/chameleon.py @@ -1,5 +1,5 @@ from functools import cached_property -from typing import (Any, Dict, Iterable, List, Literal, Mapping, Optional, +from typing import (Any, Dict, Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) import torch @@ -1034,7 +1034,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) (".qkv_proj", ".q_proj", "q"), @@ -1044,6 +1045,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): (".gate_up_proj", ".up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -1111,3 +1113,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/chatglm.py b/vllm/model_executor/models/chatglm.py index 70e9b607b0642..81e56381eabd8 100644 --- a/vllm/model_executor/models/chatglm.py +++ b/vllm/model_executor/models/chatglm.py @@ -3,7 +3,8 @@ """Inference-only ChatGLM model compatible with THUDM weights.""" from argparse import Namespace from array import array -from typing import Dict, Iterable, List, Mapping, Optional, Tuple, TypedDict +from typing import (Dict, Iterable, List, Mapping, Optional, Set, Tuple, + TypedDict) import torch from PIL import Image @@ -645,7 +646,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: # Merge two ColumnParallelLinear into one MergedColumnParallelLinear merged_weights_dict: Dict[str, Dict[str, Optional[torch.Tensor]]] = { "transformer.vision.linear_proj.merged_proj.weight": { @@ -655,6 +657,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): } params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: is_weight_to_be_merge = False for _, merged_weight_dict in merged_weights_dict.items(): @@ -677,6 +680,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) for combined_name, merged_weight_dict in merged_weights_dict.items(): if combined_name in params_dict: @@ -686,3 +690,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, combined_weight) + loaded_params.add(combined_name) + return loaded_params diff --git a/vllm/model_executor/models/clip.py b/vllm/model_executor/models/clip.py index 2d81b9266826b..184758f4a8a45 100644 --- a/vllm/model_executor/models/clip.py +++ b/vllm/model_executor/models/clip.py @@ -1,6 +1,6 @@ """Minimal implementation of CLIPVisionModel intended to be only used within a vision language model.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import numpy as np import torch @@ -483,7 +483,8 @@ def device(self): # (TODO) Add prefix argument for filtering out weights to be loaded # ref: https://github.com/vllm-project/vllm/pull/7186#discussion_r1734163986 - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -491,6 +492,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("qkv_proj", "v_proj", "v"), ] if self.shard_weight else [] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() layer_count = len(self.vision_model.encoder.layers) for name, loaded_weight in weights: @@ -508,8 +510,9 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): for (param_name, weight_name, shard_id) in stacked_params_mapping: if weight_name not in name: continue + name = name.replace(weight_name, param_name) - param = params_dict[name.replace(weight_name, param_name)] + param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break @@ -518,3 +521,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/commandr.py b/vllm/model_executor/models/commandr.py index fbb09a64cde9b..9fd083e5a02a9 100644 --- a/vllm/model_executor/models/commandr.py +++ b/vllm/model_executor/models/commandr.py @@ -402,7 +402,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -447,3 +448,4 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): default_weight_loader) weight_loader(param, loaded_weight) loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/dbrx.py b/vllm/model_executor/models/dbrx.py index 3952ff31e5cec..eab338800249e 100644 --- a/vllm/model_executor/models/dbrx.py +++ b/vllm/model_executor/models/dbrx.py @@ -1,4 +1,4 @@ -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch import torch.nn as nn @@ -417,13 +417,15 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: expert_params_mapping = [( "w13_weight" if weight_name in ["w1", "v1"] else "w2_weight", f"mlp.{weight_name}", ) for weight_name in ["w1", "v1", "w2"]] params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: for param_name, weight_name in expert_params_mapping: if weight_name not in name: @@ -447,3 +449,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/decilm.py b/vllm/model_executor/models/decilm.py index b38fd9fa49c21..c551853956b92 100644 --- a/vllm/model_executor/models/decilm.py +++ b/vllm/model_executor/models/decilm.py @@ -22,7 +22,7 @@ # limitations under the License. """Inference-only DeciLM model compatible with HuggingFace weights.""" -from typing import Iterable, Tuple +from typing import Iterable, Set, Tuple import torch @@ -57,7 +57,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): delattr(config, "num_key_value_heads_per_layer") super().__init__(vllm_config=vllm_config) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -67,6 +68,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -97,6 +99,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params def _degroup_weight(self, loaded_weight: torch.Tensor) -> torch.Tensor: hidden_size = self.config.hidden_size diff --git a/vllm/model_executor/models/deepseek.py b/vllm/model_executor/models/deepseek.py index 36dfea5a65656..8c5ad9904e925 100644 --- a/vllm/model_executor/models/deepseek.py +++ b/vllm/model_executor/models/deepseek.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Deepseek model.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -442,7 +442,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -453,6 +454,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -487,3 +489,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/deepseek_v2.py b/vllm/model_executor/models/deepseek_v2.py index 1e32fe60c7a5b..d2c4ca0bf85e9 100644 --- a/vllm/model_executor/models/deepseek_v2.py +++ b/vllm/model_executor/models/deepseek_v2.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only DeepseekV2 model.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -550,7 +550,8 @@ def make_empty_intermediate_tensors( device=device), }) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("gate_up_proj", "gate_proj", 0), @@ -566,6 +567,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): num_experts=self.config.n_routed_experts) params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -623,3 +625,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/exaone.py b/vllm/model_executor/models/exaone.py index 52dd603ca558d..9d739d0479548 100644 --- a/vllm/model_executor/models/exaone.py +++ b/vllm/model_executor/models/exaone.py @@ -22,7 +22,7 @@ # limitations under the License. """Inference-only Exaone model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -513,7 +513,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) (".qkv_proj", ".q_proj", "q"), @@ -523,6 +524,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): (".gate_up_proj", ".c_fc_1", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -543,6 +545,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): default_weight_loader) loaded_weight = loaded_weight[0] weight_loader(param, loaded_weight) + loaded_params.add(scale_name) continue for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: @@ -576,6 +579,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params # If this function is called, it should always initialize KV cache scale # factors (or else raise an exception). Thus, handled exceptions should diff --git a/vllm/model_executor/models/falcon.py b/vllm/model_executor/models/falcon.py index e97abe949ccdb..2aa4b67d99894 100644 --- a/vllm/model_executor/models/falcon.py +++ b/vllm/model_executor/models/falcon.py @@ -18,7 +18,7 @@ """PyTorch Falcon model.""" import math -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -473,7 +473,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: total_num_heads = self.config.num_attention_heads if self.config.new_decoder_architecture: total_num_kv_heads = self.config.num_kv_heads @@ -483,6 +484,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): total_num_kv_heads = total_num_heads num_query_heads_per_kv_head = total_num_heads // total_num_kv_heads params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if name == "lm_head.weight" and self.tie_word_embeddings: # Falcon uses tied embeddings except Falcon-11b. @@ -519,3 +521,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/florence2.py b/vllm/model_executor/models/florence2.py index 971a71180164b..d3a9ff6915b84 100644 --- a/vllm/model_executor/models/florence2.py +++ b/vllm/model_executor/models/florence2.py @@ -1,5 +1,5 @@ import math -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Set, Tuple import torch import torch.nn as nn @@ -156,7 +156,8 @@ def sample(self, logits: torch.Tensor, next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -165,12 +166,13 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: for (param_name, weight_name, shard_id) in stacked_params_mapping: if weight_name not in name: continue - - param = params_dict[name.replace(weight_name, param_name)] + name = name.replace(weight_name, param_name) + param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break @@ -183,6 +185,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params class Florence2ForConditionalGeneration(nn.Module): @@ -248,10 +252,11 @@ def sample( ) -> SamplerOutput: return self.language_model.sample(logits, sampling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: skip_prefixes = [ 'image_projection', "vision_tower", "image_proj_norm", "image_pos_embed", "visual_temporal_embed" ] loader = AutoWeightsLoader(self, skip_prefixes=skip_prefixes) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/fuyu.py b/vllm/model_executor/models/fuyu.py index 31fc098a8bb3f..7b46907ac83ab 100644 --- a/vllm/model_executor/models/fuyu.py +++ b/vllm/model_executor/models/fuyu.py @@ -16,7 +16,8 @@ """ PyTorch Fuyu model.""" import math from array import array -from typing import Iterable, List, Literal, Mapping, Optional, Tuple, TypedDict +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, + TypedDict) import torch import torch.nn as nn @@ -354,6 +355,7 @@ def sample( next_tokens = self.language_model.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/gemma.py b/vllm/model_executor/models/gemma.py index ace13664c6ea6..64e03b30bf2f1 100644 --- a/vllm/model_executor/models/gemma.py +++ b/vllm/model_executor/models/gemma.py @@ -424,7 +424,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -469,3 +470,4 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): logger.warning( "Some weights are not initialized from checkpoints: %s", unloaded_params) + return loaded_params diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index a60b4e73a76d4..4ba39223cc07f 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -312,7 +312,8 @@ def forward( hidden_states, _ = self.norm(hidden_states, residual) return hidden_states - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -354,6 +355,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): logger.warning( "Some weights are not initialized from checkpoints: %s", unloaded_params) + return loaded_params class Gemma2ForCausalLM(nn.Module, SupportsLoRA, SupportsPP): @@ -451,13 +453,14 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader( self, skip_prefixes=(["lm_head."] if self.config.tie_word_embeddings else None), ) - loader.load_weights(weights) + return loader.load_weights(weights) class Gemma2EmbeddingModel(nn.Module, SupportsPP): diff --git a/vllm/model_executor/models/gpt2.py b/vllm/model_executor/models/gpt2.py index fa0fdad28d161..1c61408ae1dd9 100644 --- a/vllm/model_executor/models/gpt2.py +++ b/vllm/model_executor/models/gpt2.py @@ -16,7 +16,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only GPT-2 model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -298,8 +298,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "lm_head.weight" in name: # GPT-2 ties the weights of the embedding layer and the final @@ -328,3 +330,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/gpt_bigcode.py b/vllm/model_executor/models/gpt_bigcode.py index b2fc79d0d36dc..50a143cb1b600 100644 --- a/vllm/model_executor/models/gpt_bigcode.py +++ b/vllm/model_executor/models/gpt_bigcode.py @@ -17,7 +17,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only GPTBigCode model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -323,8 +323,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "lm_head.weight" in name: continue @@ -344,3 +346,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader(param, loaded_weight, 'v') else: weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/gpt_j.py b/vllm/model_executor/models/gpt_j.py index cec3fd12a67d6..d5defc60764e6 100644 --- a/vllm/model_executor/models/gpt_j.py +++ b/vllm/model_executor/models/gpt_j.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only GPT-J model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -291,7 +291,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -301,6 +302,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "attn.bias" in name or "attn.masked_bias" in name: continue @@ -330,3 +332,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/gpt_neox.py b/vllm/model_executor/models/gpt_neox.py index 11f286d6bcba0..0bb5e2f9b95f9 100644 --- a/vllm/model_executor/models/gpt_neox.py +++ b/vllm/model_executor/models/gpt_neox.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only GPT-NeoX model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -303,8 +303,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if ("attention.bias" in name or "attention.masked_bias" in name or "rotary_emb.inv_freq" in name): @@ -337,3 +339,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/granite.py b/vllm/model_executor/models/granite.py index cb2583e69d88d..c1e2e87f08ec3 100644 --- a/vllm/model_executor/models/granite.py +++ b/vllm/model_executor/models/granite.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only IBM Granite model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -455,7 +455,8 @@ def make_empty_intermediate_tensors( device=device), }) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) (".qkv_proj", ".q_proj", "q"), @@ -465,6 +466,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): (".gate_up_proj", ".up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -485,6 +487,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): default_weight_loader) loaded_weight = loaded_weight[0] weight_loader(param, loaded_weight) + loaded_params.add(scale_name) continue for (param_name, weight_name, shard_id) in stacked_params_mapping: if weight_name not in name: @@ -518,6 +521,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params # If this function is called, it should always initialize KV cache scale # factors (or else raise an exception). Thus, handled exceptions should diff --git a/vllm/model_executor/models/granitemoe.py b/vllm/model_executor/models/granitemoe.py index f437dd521a7d5..a91a18816995f 100644 --- a/vllm/model_executor/models/granitemoe.py +++ b/vllm/model_executor/models/granitemoe.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only GraniteMoe model.""" -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Set, Tuple import torch from torch import nn @@ -419,7 +419,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: new_weights = {} for n, p in weights: if n.endswith('.block_sparse_moe.input_linear.weight'): @@ -452,4 +453,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): pass else: new_weights[n] = p - mixtral.MixtralForCausalLM.load_weights(self, new_weights.items()) + return mixtral.MixtralForCausalLM.load_weights(self, + new_weights.items()) diff --git a/vllm/model_executor/models/idefics2_vision_model.py b/vllm/model_executor/models/idefics2_vision_model.py index b21bc2a3f9ce1..16192928beb1f 100644 --- a/vllm/model_executor/models/idefics2_vision_model.py +++ b/vllm/model_executor/models/idefics2_vision_model.py @@ -15,7 +15,7 @@ # limitations under the License. """PyTorch Idefics2 model.""" -from typing import Iterable, Optional, Tuple +from typing import Iterable, Optional, Set, Tuple import torch from torch import nn @@ -331,7 +331,8 @@ def forward( last_hidden_state = self.post_layernorm(encoder_outputs) return last_hidden_state - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -339,11 +340,13 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("qkv_proj", "v_proj", "v"), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue - param = params_dict[name.replace(weight_name, param_name)] + name = name.replace(weight_name, param_name) + param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break @@ -352,3 +355,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/idefics3.py b/vllm/model_executor/models/idefics3.py index 0cecc754e916f..5d176b2a4e416 100644 --- a/vllm/model_executor/models/idefics3.py +++ b/vllm/model_executor/models/idefics3.py @@ -15,7 +15,7 @@ import math from typing import (Dict, Iterable, List, Literal, Mapping, NamedTuple, - Optional, Tuple, TypedDict, Union) + Optional, Set, Tuple, TypedDict, Union) import torch import torch.utils.checkpoint @@ -751,9 +751,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) - loader.load_weights(weights) + return loader.load_weights(weights) def get_mm_mapping(self) -> MultiModelKeys: """ diff --git a/vllm/model_executor/models/intern_vit.py b/vllm/model_executor/models/intern_vit.py index 9761635d2a6c2..bd91a0806ae5c 100644 --- a/vllm/model_executor/models/intern_vit.py +++ b/vllm/model_executor/models/intern_vit.py @@ -5,7 +5,7 @@ # Licensed under The MIT License [see LICENSE for details] # -------------------------------------------------------- from functools import partial -from typing import Iterable, Optional, Tuple +from typing import Iterable, Optional, Set, Tuple import torch import torch.nn as nn @@ -469,10 +469,14 @@ def forward( return encoder_outputs - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: param = params_dict[name] weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/internlm2.py b/vllm/model_executor/models/internlm2.py index 19bfe16e4d5fc..94b819b5d9366 100644 --- a/vllm/model_executor/models/internlm2.py +++ b/vllm/model_executor/models/internlm2.py @@ -1,5 +1,5 @@ from functools import partial -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -369,13 +369,15 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("gate_up_proj", "w1", 0), ("gate_up_proj", "w3", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -402,3 +404,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/internvl.py b/vllm/model_executor/models/internvl.py index 92579e3aae949..7ea2f9be2191d 100644 --- a/vllm/model_executor/models/internvl.py +++ b/vllm/model_executor/models/internvl.py @@ -6,7 +6,7 @@ # -------------------------------------------------------- import re from functools import cached_property, partial -from typing import (Iterable, List, Literal, Mapping, Optional, Tuple, +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) import torch @@ -663,6 +663,7 @@ def sample( ) -> Optional[SamplerOutput]: return self.language_model.sample(logits, sampling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/jais.py b/vllm/model_executor/models/jais.py index ee49ffb3cd87f..41db85b678456 100644 --- a/vllm/model_executor/models/jais.py +++ b/vllm/model_executor/models/jais.py @@ -19,7 +19,7 @@ """Inference-only Jais model compatible with HuggingFace weights.""" import math -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -350,8 +350,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "lm_head.weight" in name: # GPT-2 ties the weights of the embedding layer and the final @@ -382,3 +384,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/jamba.py b/vllm/model_executor/models/jamba.py index 5612dd6886385..f83f0fce7275f 100644 --- a/vllm/model_executor/models/jamba.py +++ b/vllm/model_executor/models/jamba.py @@ -1,5 +1,5 @@ """Inference-only Jamba model.""" -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Set, Tuple import torch from torch import nn @@ -462,7 +462,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -479,6 +480,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): num_experts=self.config.num_experts) params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -534,6 +536,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params def _is_moe_layer(name: str): diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index e53631ef19f31..2b40e9ec73fad 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only LLaMA model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -350,7 +350,8 @@ def forward( hidden_states, _ = self.norm(hidden_states, residual) return hidden_states - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) (".qkv_proj", ".q_proj", "q"), @@ -360,6 +361,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): (".gate_up_proj", ".up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -375,6 +377,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): default_weight_loader) loaded_weight = loaded_weight[0] weight_loader(param, loaded_weight) + loaded_params.add(scale_name) continue for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: @@ -390,7 +393,6 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) - break else: # Skip loading extra bias for GPTQ models. @@ -408,6 +410,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params # If this function is called, it should always initialize KV cache scale # factors (or else raise an exception). Thus, handled exceptions should @@ -577,13 +581,14 @@ def sample(self, logits: torch.Tensor, next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader( self, skip_prefixes=(["lm_head."] if self.config.tie_word_embeddings else None), ) - loader.load_weights( + return loader.load_weights( self.maybe_remap_mistral(name, loaded_weight) for name, loaded_weight in weights) diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index b13bcfa676811..e7d3161a7cb2d 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -1,5 +1,5 @@ from functools import cached_property -from typing import (Iterable, List, Literal, Mapping, Optional, Protocol, +from typing import (Iterable, List, Literal, Mapping, Optional, Protocol, Set, Tuple, TypedDict, Union) import torch @@ -547,6 +547,7 @@ def sample( ) -> Optional[SamplerOutput]: return self.language_model.sample(logits, sampling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/llava_next.py b/vllm/model_executor/models/llava_next.py index dd2fa6cac969f..37e2227a52dcd 100644 --- a/vllm/model_executor/models/llava_next.py +++ b/vllm/model_executor/models/llava_next.py @@ -1,5 +1,5 @@ from functools import cached_property -from typing import (Iterable, List, Literal, Mapping, Optional, Tuple, +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) import torch @@ -654,6 +654,7 @@ def pooler( ) -> Optional[PoolerOutput]: return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/llava_next_video.py b/vllm/model_executor/models/llava_next_video.py index 5d5598d07bfde..e2880c76cf43d 100644 --- a/vllm/model_executor/models/llava_next_video.py +++ b/vllm/model_executor/models/llava_next_video.py @@ -1,6 +1,6 @@ import math from functools import cached_property -from typing import (Iterable, List, Literal, Mapping, Optional, Tuple, +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) import numpy as np @@ -445,10 +445,11 @@ def sample( ) -> Optional[SamplerOutput]: return self.language_model.sample(logits, sampling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader( self, # This model doesn't support images for now ignore_unexpected_prefixes=["image_newline"], ) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/llava_onevision.py b/vllm/model_executor/models/llava_onevision.py index a5b2108177830..705ca1e4ab6e6 100644 --- a/vllm/model_executor/models/llava_onevision.py +++ b/vllm/model_executor/models/llava_onevision.py @@ -1,6 +1,6 @@ import math from functools import cached_property -from typing import (Iterable, List, Literal, Mapping, Optional, Tuple, +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) import numpy as np @@ -887,6 +887,7 @@ def sample( ) -> Optional[SamplerOutput]: return self.language_model.sample(logits, sampling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/mamba.py b/vllm/model_executor/models/mamba.py index ac0d265a961f0..405b8f7787ba8 100644 --- a/vllm/model_executor/models/mamba.py +++ b/vllm/model_executor/models/mamba.py @@ -1,5 +1,5 @@ """PyTorch MAMBA model.""" -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Set, Tuple import torch from torch import nn @@ -243,8 +243,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "A_log" in name: name = name.replace("A_log", "A") @@ -256,3 +258,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/medusa.py b/vllm/model_executor/models/medusa.py index b05360b55466b..b4ed6538bddac 100644 --- a/vllm/model_executor/models/medusa.py +++ b/vllm/model_executor/models/medusa.py @@ -1,4 +1,4 @@ -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Set, Tuple import torch import torch.nn as nn @@ -156,8 +156,10 @@ def generate_proposals( sampling_metadata=sampling_metadata, ) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() weights_map = {} @@ -181,9 +183,12 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) if self.token_map is not None: self.token_map.to(device=self.lm_heads[0].weight.device) assert (self.truncated_vocab_size == self.orig_vocab_size) or (self.token_map is not None) + + return loaded_params diff --git a/vllm/model_executor/models/minicpm.py b/vllm/model_executor/models/minicpm.py index 6b67266c53362..b92bff4d7c28c 100644 --- a/vllm/model_executor/models/minicpm.py +++ b/vllm/model_executor/models/minicpm.py @@ -21,7 +21,7 @@ # limitations under the License. """Inference-only MiniCPM model compatible with HuggingFace weights.""" import math -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -539,7 +539,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -556,6 +557,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): for weight_name in ["w1", "w2", "w3"] ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -606,3 +608,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/minicpmv.py b/vllm/model_executor/models/minicpmv.py index fd8eda997f76f..99bf1d42d0355 100644 --- a/vllm/model_executor/models/minicpmv.py +++ b/vllm/model_executor/models/minicpmv.py @@ -24,7 +24,7 @@ import re from functools import partial from typing import (Any, Callable, Iterable, List, Literal, Mapping, Optional, - Tuple, TypedDict, Union) + Set, Tuple, TypedDict, Union) import torch import torch.types @@ -602,7 +602,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -612,6 +613,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: for key_to_modify, new_key in _KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in name: @@ -630,10 +632,10 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue - if is_pp_missing_parameter( - name.replace(weight_name, param_name), self): + name = name.replace(weight_name, param_name) + if is_pp_missing_parameter(name, self): continue - param = params_dict[name.replace(weight_name, param_name)] + param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break @@ -646,6 +648,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params def get_mm_mapping(self) -> MultiModelKeys: """ diff --git a/vllm/model_executor/models/mixtral.py b/vllm/model_executor/models/mixtral.py index eebf5bab5a288..0faffb4f1b00c 100644 --- a/vllm/model_executor/models/mixtral.py +++ b/vllm/model_executor/models/mixtral.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Mixtral model.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -404,7 +404,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -421,6 +422,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): num_experts=self.config.num_local_experts) params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -478,3 +480,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/mixtral_quant.py b/vllm/model_executor/models/mixtral_quant.py index af2e9586988df..ddd6afcf6a1b6 100644 --- a/vllm/model_executor/models/mixtral_quant.py +++ b/vllm/model_executor/models/mixtral_quant.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Mixtral model.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import numpy as np import torch @@ -409,7 +409,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -418,6 +419,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -448,3 +450,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/mllama.py b/vllm/model_executor/models/mllama.py index db7ee7b2d8537..41f62b37f3bd9 100644 --- a/vllm/model_executor/models/mllama.py +++ b/vllm/model_executor/models/mllama.py @@ -13,7 +13,7 @@ # limitations under the License. """PyTorch Mllama model.""" import math -from typing import (Iterable, List, Literal, Mapping, Optional, Tuple, +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) import numpy as np @@ -1427,7 +1427,8 @@ def forward( return outputs - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) (".qkv_proj", ".q_proj", "q"), @@ -1437,7 +1438,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): (".gate_up_proj", ".up_proj", 1), ] params_dict = dict(self.named_parameters()) - updated_params = set() + updated_params: Set[str] = set() for name, loaded_weight in weights: if 'patch_embedding.weight' in name: name = name.replace('patch_embedding.weight', @@ -1457,6 +1458,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + updated_params.add(name) + return updated_params def skip_attention_mask(sparse_mask: List[List[int]]) -> bool: diff --git a/vllm/model_executor/models/mlp_speculator.py b/vllm/model_executor/models/mlp_speculator.py index 4d7e82880041d..f2aa2653c4f5c 100644 --- a/vllm/model_executor/models/mlp_speculator.py +++ b/vllm/model_executor/models/mlp_speculator.py @@ -1,5 +1,5 @@ import math -from typing import Iterable, List, Tuple +from typing import Iterable, List, Set, Tuple import torch import torch.nn as nn @@ -188,11 +188,15 @@ def generate_proposals( return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: param = params_dict.get(name.replace("speculator.", "")) if param is not None: weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/mpt.py b/vllm/model_executor/models/mpt.py index 3c74ef2448abb..8716e92b0f1c2 100644 --- a/vllm/model_executor/models/mpt.py +++ b/vllm/model_executor/models/mpt.py @@ -1,6 +1,6 @@ # Adapted from https://huggingface.co/mosaicml/mpt-7b/tree/main import math -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch import torch.nn as nn @@ -324,8 +324,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: # Skip loading extra bias for GPTQ models. if name.endswith(".bias") and name not in params_dict: @@ -336,3 +338,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/nemotron.py b/vllm/model_executor/models/nemotron.py index eb45beae7d21a..ceab299a7950a 100644 --- a/vllm/model_executor/models/nemotron.py +++ b/vllm/model_executor/models/nemotron.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Nemotron model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -474,7 +474,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) (".qkv_proj", ".q_proj", "q"), @@ -482,6 +483,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): (".qkv_proj", ".v_proj", "v"), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -522,3 +524,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/olmo.py b/vllm/model_executor/models/olmo.py index 98d4e1ec320a4..dc138e2e636ad 100644 --- a/vllm/model_executor/models/olmo.py +++ b/vllm/model_executor/models/olmo.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only OLMo model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -356,7 +356,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -366,6 +367,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -402,3 +404,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/olmoe.py b/vllm/model_executor/models/olmoe.py index f4eebab8c98dd..ab87695d8e650 100644 --- a/vllm/model_executor/models/olmoe.py +++ b/vllm/model_executor/models/olmoe.py @@ -10,7 +10,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only OLMoE model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -364,7 +364,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -383,6 +384,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): num_experts=self.config.num_experts) params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -455,3 +457,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/opt.py b/vllm/model_executor/models/opt.py index 997fe642439e6..db85a494980a7 100644 --- a/vllm/model_executor/models/opt.py +++ b/vllm/model_executor/models/opt.py @@ -16,7 +16,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only OPT model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -394,7 +394,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -402,6 +403,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("qkv_proj", "v_proj", "v"), ] params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "lm_head.weight" in name and self.config.tie_word_embeddings: continue @@ -431,3 +433,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/orion.py b/vllm/model_executor/models/orion.py index 39d659c49cbcf..b01734af8ddd8 100644 --- a/vllm/model_executor/models/orion.py +++ b/vllm/model_executor/models/orion.py @@ -3,7 +3,7 @@ # Copyright (c) OrionStar Inc. # LICENSE: https://huggingface.co/OrionStarAI/Orion-14B-Base/blob/main/LICENSE """Inference-only Orion-14B model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -327,7 +327,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -337,6 +338,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -368,3 +370,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/paligemma.py b/vllm/model_executor/models/paligemma.py index eea229359255e..dd5256eb87ab3 100644 --- a/vllm/model_executor/models/paligemma.py +++ b/vllm/model_executor/models/paligemma.py @@ -1,4 +1,4 @@ -from typing import (Iterable, List, Literal, Mapping, Optional, Tuple, +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) import torch @@ -295,6 +295,7 @@ def sample( ) -> Optional[SamplerOutput]: return self.language_model.sample(logits, sampling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/persimmon.py b/vllm/model_executor/models/persimmon.py index 62c509153a111..3b8199f4f1661 100644 --- a/vllm/model_executor/models/persimmon.py +++ b/vllm/model_executor/models/persimmon.py @@ -19,7 +19,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only persimmon model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -324,8 +324,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -358,3 +360,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/phi.py b/vllm/model_executor/models/phi.py index a2ab0d74c48db..0a117bf16c9b3 100644 --- a/vllm/model_executor/models/phi.py +++ b/vllm/model_executor/models/phi.py @@ -34,7 +34,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Inference-only Phi-1.5 model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -345,7 +345,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -353,6 +354,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("qkv_proj", "v_proj", "v") ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: @@ -383,3 +385,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/phi3_small.py b/vllm/model_executor/models/phi3_small.py index 2139cec441807..a78e4d355a314 100644 --- a/vllm/model_executor/models/phi3_small.py +++ b/vllm/model_executor/models/phi3_small.py @@ -1,5 +1,5 @@ import math -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -457,9 +457,11 @@ def sample( sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -471,3 +473,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/phi3v.py b/vllm/model_executor/models/phi3v.py index 4db65edc174f1..2e583bb08e87a 100644 --- a/vllm/model_executor/models/phi3v.py +++ b/vllm/model_executor/models/phi3v.py @@ -15,7 +15,7 @@ import itertools import re from functools import cached_property, lru_cache -from typing import (Any, Dict, Iterable, List, Literal, Mapping, Optional, +from typing import (Any, Dict, Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) import numpy as np @@ -744,7 +744,8 @@ def pooler( ) -> Optional[PoolerOutput]: return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: hf_to_vllm_mapper = WeightsMapper( orig_to_new_prefix={ "model.vision_embed_tokens.wte": "embed_tokens", @@ -759,5 +760,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): # The HF config doesn't specify whether these are tied, # so we detect it this way - if "embed_tokens" not in autoloaded_weights: + if "embed_tokens.weight" not in autoloaded_weights: self.embed_tokens = self.language_model.model.embed_tokens + autoloaded_weights.add("embed_tokens.weight") + return autoloaded_weights diff --git a/vllm/model_executor/models/phimoe.py b/vllm/model_executor/models/phimoe.py index b7e70f8fa2c6d..e475d286bd7ea 100644 --- a/vllm/model_executor/models/phimoe.py +++ b/vllm/model_executor/models/phimoe.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only PhiMoE model.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -598,7 +598,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -613,6 +614,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): num_experts=self.config.num_local_experts) params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -666,3 +668,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index a3e30ea2dd299..307febde7eef0 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -1,7 +1,7 @@ from dataclasses import dataclass, fields from functools import cached_property from itertools import tee -from typing import Iterable, List, Mapping, Optional, Tuple, Union +from typing import Iterable, List, Mapping, Optional, Set, Tuple, Union import numpy import torch @@ -1053,7 +1053,8 @@ def forward( # (TODO) Add prefix argument for filtering out weights to be loaded # ref: https://github.com/vllm-project/vllm/pull/7186#discussion_r1734163986 - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) (".qkv_proj", ".q_proj", "q"), @@ -1063,6 +1064,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): (".gate_up_proj", ".up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() layer_count = len(self.transformer.layers) for name, loaded_weight in weights: @@ -1075,8 +1077,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): for (param_name, weight_name, shard_id) in stacked_params_mapping: if weight_name not in name: continue - - param = params_dict[name.replace(weight_name, param_name)] + name = name.replace(weight_name, param_name) + param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break @@ -1085,3 +1087,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/qwen.py b/vllm/model_executor/models/qwen.py index 447632cefcd9a..3978c176a2144 100644 --- a/vllm/model_executor/models/qwen.py +++ b/vllm/model_executor/models/qwen.py @@ -8,7 +8,7 @@ import re from functools import partial from typing import (Any, Callable, Dict, Iterable, List, Literal, Mapping, - Optional, Tuple, TypedDict, Union) + Optional, Set, Tuple, TypedDict, Union) import numpy as np import torch @@ -964,13 +964,15 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("gate_up_proj", "w2", 0), ("gate_up_proj", "w1", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -999,6 +1001,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params class QWenLLM(QWenBaseModel): diff --git a/vllm/model_executor/models/qwen2.py b/vllm/model_executor/models/qwen2.py index 8f10df808c216..370cff5fa153f 100644 --- a/vllm/model_executor/models/qwen2.py +++ b/vllm/model_executor/models/qwen2.py @@ -21,7 +21,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Qwen2 model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -332,7 +332,8 @@ def forward( hidden_states, _ = self.norm(hidden_states, residual) return hidden_states - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -342,6 +343,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -372,6 +374,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params class Qwen2ForCausalLM(nn.Module, SupportsLoRA, SupportsPP): @@ -494,13 +498,14 @@ def pooler( ) -> Optional[PoolerOutput]: return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader( self, skip_prefixes=(["lm_head."] if self.config.tie_word_embeddings else None), ) - loader.load_weights(weights) + return loader.load_weights(weights) class Qwen2EmbeddingModel(nn.Module, SupportsLoRA, SupportsPP): @@ -564,7 +569,8 @@ def pooler( ) -> Optional[PoolerOutput]: return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self, ignore_unexpected_prefixes=["lm_head."]) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/qwen2_audio.py b/vllm/model_executor/models/qwen2_audio.py index d30950361ad89..a4965f34b1ca8 100644 --- a/vllm/model_executor/models/qwen2_audio.py +++ b/vllm/model_executor/models/qwen2_audio.py @@ -20,7 +20,8 @@ # limitations under the License. """Inference-only Qwen2-Audio model compatible with HuggingFace weights.""" from functools import lru_cache -from typing import Iterable, List, Mapping, Optional, Tuple, TypedDict, Union +from typing import (Iterable, List, Mapping, Optional, Set, Tuple, TypedDict, + Union) import librosa import numpy as np @@ -420,7 +421,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -430,6 +432,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -463,3 +466,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/qwen2_cls.py b/vllm/model_executor/models/qwen2_cls.py index 07eb330620a43..dc5dabf6fc38b 100644 --- a/vllm/model_executor/models/qwen2_cls.py +++ b/vllm/model_executor/models/qwen2_cls.py @@ -4,7 +4,7 @@ # Copyright 2024 The Qwen team. # Copyright 2023 The vLLM team. """Inference-only Qwen2-Classification model compatible with HF weights.""" -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Set, Tuple import torch from torch import nn @@ -97,7 +97,8 @@ def pooler( ) -> Optional[PoolerOutput]: return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self, ignore_unexpected_prefixes=["lm_head."]) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/qwen2_moe.py b/vllm/model_executor/models/qwen2_moe.py index 249d94b5d95e9..96a9bc451f4df 100644 --- a/vllm/model_executor/models/qwen2_moe.py +++ b/vllm/model_executor/models/qwen2_moe.py @@ -21,7 +21,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Qwen2MoE model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch import torch.nn.functional as F @@ -436,7 +436,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -455,6 +456,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): num_experts=self.config.num_experts) params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -532,3 +534,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/qwen2_rm.py b/vllm/model_executor/models/qwen2_rm.py index 6db467af334f5..988d682d36be3 100644 --- a/vllm/model_executor/models/qwen2_rm.py +++ b/vllm/model_executor/models/qwen2_rm.py @@ -3,7 +3,7 @@ # Copyright 2024 The Qwen team. # Copyright 2023 The vLLM team. """Inference-only Qwen2-RM model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -110,7 +110,8 @@ def pooler( ) -> Optional[PoolerOutput]: return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self, ignore_unexpected_prefixes=["lm_head."]) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 2335baf459771..ef6b52db6e17d 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -23,7 +23,7 @@ """Inference-only Qwen2-VL model compatible with HuggingFace weights.""" from functools import partial from typing import (Any, Callable, Dict, Iterable, List, Literal, Mapping, - Optional, Tuple, Type, TypedDict, Union) + Optional, Set, Tuple, Type, TypedDict, Union) import torch import torch.nn as nn @@ -1333,7 +1333,8 @@ def pooler( ) -> Optional[PoolerOutput]: return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -1343,6 +1344,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "gate_proj", 0), ] params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -1392,3 +1394,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/siglip.py b/vllm/model_executor/models/siglip.py index acaf4afdecfe5..c9e09b879843a 100644 --- a/vllm/model_executor/models/siglip.py +++ b/vllm/model_executor/models/siglip.py @@ -2,7 +2,7 @@ within a vision language model.""" import math -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import numpy as np import torch @@ -594,7 +594,8 @@ def forward( interpolate_pos_encoding=interpolate_pos_encoding, ) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -602,6 +603,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("qkv_proj", "v_proj", "v"), ] if self.shard_weight else [] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() layer_count = len(self.vision_model.encoder.layers) for name, loaded_weight in weights: @@ -619,8 +621,9 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): for (param_name, weight_name, shard_id) in stacked_params_mapping: if weight_name not in name: continue + name = name.replace(weight_name, param_name) - param = params_dict[name.replace(weight_name, param_name)] + param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break @@ -629,3 +632,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/solar.py b/vllm/model_executor/models/solar.py index affb2c975ce4a..6d6fafc5ab0eb 100644 --- a/vllm/model_executor/models/solar.py +++ b/vllm/model_executor/models/solar.py @@ -21,7 +21,7 @@ # limitations under the License. """Inference-only Solar model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -477,7 +477,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) (".qkv_proj", ".q_proj", "q"), @@ -487,6 +488,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): (".gate_up_proj", ".up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -502,6 +504,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): default_weight_loader) loaded_weight = loaded_weight[0] weight_loader(param, loaded_weight) + loaded_params.add(scale_name) continue for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: @@ -535,6 +538,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params # If this function is called, it should always initialize KV cache scale # factors (or else raise an exception). Thus, handled exceptions should diff --git a/vllm/model_executor/models/stablelm.py b/vllm/model_executor/models/stablelm.py index 99acce596602e..e11d2e916730a 100644 --- a/vllm/model_executor/models/stablelm.py +++ b/vllm/model_executor/models/stablelm.py @@ -18,7 +18,7 @@ # https://huggingface.co/stabilityai/stablelm-3b-4e1t/blob/main/config.json """Inference-only StabeLM (https://github.com/Stability-AI/StableLM) model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -306,7 +306,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -316,6 +317,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -347,3 +349,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/starcoder2.py b/vllm/model_executor/models/starcoder2.py index 0ef940acebb93..74c66042226de 100644 --- a/vllm/model_executor/models/starcoder2.py +++ b/vllm/model_executor/models/starcoder2.py @@ -17,7 +17,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Starcoder2 model.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -314,7 +314,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -323,6 +324,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ] params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -346,3 +348,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/ultravox.py b/vllm/model_executor/models/ultravox.py index 9fde22c016de0..512adbc7db35e 100644 --- a/vllm/model_executor/models/ultravox.py +++ b/vllm/model_executor/models/ultravox.py @@ -3,7 +3,7 @@ import math from functools import cached_property, lru_cache -from typing import (Iterable, List, Literal, Mapping, Optional, Tuple, +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union, cast) import numpy as np @@ -504,10 +504,11 @@ def sample( ) -> Optional[SamplerOutput]: return self.language_model.sample(logits, sampling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: hf_to_vllm_mapper = WeightsMapper( orig_to_new_prefix={"audio_tower.model.encoder.": "audio_tower."}) loader = AutoWeightsLoader(self, ignore_unexpected_prefixes=["audio_tower."]) - loader.load_weights(weights, mapper=hf_to_vllm_mapper) + return loader.load_weights(weights, mapper=hf_to_vllm_mapper) diff --git a/vllm/model_executor/models/utils.py b/vllm/model_executor/models/utils.py index 1d51885f9094a..7a4fcce95603d 100644 --- a/vllm/model_executor/models/utils.py +++ b/vllm/model_executor/models/utils.py @@ -1,7 +1,7 @@ import itertools from dataclasses import dataclass, field from typing import (Any, Callable, Dict, Iterable, List, Literal, Mapping, - Optional, Protocol, Tuple, Union, overload) + Optional, Protocol, Set, Tuple, Union, overload) import torch import torch.nn as nn @@ -172,8 +172,9 @@ def _load_module( if module != self.module: module_load_weights = getattr(module, "load_weights", None) if callable(module_load_weights): - module_load_weights(weights) - return + loaded_params = module_load_weights(weights) + yield from map(lambda x: self._get_qualname(base_prefix, x), + loaded_params) child_modules = dict(module.named_children()) child_params = dict(module.named_parameters(recurse=False)) @@ -222,11 +223,11 @@ def load_weights( weights: Iterable[Tuple[str, torch.Tensor]], *, mapper: Optional[WeightsMapper] = None, - ) -> List[str]: + ) -> Set[str]: if mapper is not None: weights = mapper.apply(weights) - autoloaded_weights = list(self._load_module("", self.module, weights)) + autoloaded_weights = set(self._load_module("", self.module, weights)) return autoloaded_weights diff --git a/vllm/model_executor/models/xverse.py b/vllm/model_executor/models/xverse.py index 51172d8782a70..bc37a997eabb5 100644 --- a/vllm/model_executor/models/xverse.py +++ b/vllm/model_executor/models/xverse.py @@ -19,7 +19,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Xverse model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -376,7 +376,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ ("qkv_proj", "q_proj", "q"), ("qkv_proj", "k_proj", "k"), @@ -385,6 +386,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if ("rotary_emb.inv_freq" in name or "rotary_emb.cos_cached" in name @@ -413,3 +415,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params From 47826cacf0e037b4e109f0b2d8d594e47def500e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=84=8D=F0=9D=95=A0=F0=9D=95=9D=F0=9D=95=9D=F0=9D=95=A0?= =?UTF-8?q?=F0=9D=95=A8=20=F0=9D=95=84=F0=9D=95=92=F0=9D=95=9F?= Date: Mon, 18 Nov 2024 05:29:26 +0200 Subject: [PATCH 008/122] [Bugfix] Ignore ray reinit error when current platform is ROCm or XPU (#10375) Signed-off-by: Hollow Man --- vllm/executor/ray_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/executor/ray_utils.py b/vllm/executor/ray_utils.py index 41dd59bc65ec5..4f28efd639084 100644 --- a/vllm/executor/ray_utils.py +++ b/vllm/executor/ray_utils.py @@ -234,7 +234,7 @@ def initialize_ray_cluster( if current_platform.is_rocm() or current_platform.is_xpu(): # Try to connect existing ray instance and create a new one if not found try: - ray.init("auto") + ray.init("auto", ignore_reinit_error=True) except ConnectionError: logger.warning( "No existing RAY instance detected. " From 51bb12d17b374d5c4521cd01e5b066fd2419a8fa Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 17 Nov 2024 23:57:20 -0800 Subject: [PATCH 009/122] [4/N][torch.compile] clean up set_torch_compile_backend (#10401) Signed-off-by: youkaichao --- vllm/compilation/backends.py | 16 ++-------------- vllm/compilation/wrapper.py | 11 +++-------- vllm/config.py | 31 ++++++++++++++++++++++++++++++- vllm/platforms/tpu.py | 7 +++---- vllm/plugins/__init__.py | 14 +------------- vllm/utils.py | 9 +++++++++ vllm/worker/model_runner.py | 3 +-- 7 files changed, 49 insertions(+), 42 deletions(-) diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index 22c613931f082..0cf1e3a95fcba 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -2,15 +2,14 @@ import dataclasses import operator from contextlib import ExitStack -from typing import (Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, - Union) +from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple from unittest.mock import patch import torch import torch.fx as fx import vllm.envs as envs -from vllm.config import CompilationConfig, CompilationLevel +from vllm.config import CompilationConfig from vllm.logger import init_logger from vllm.utils import combine_fx_passes, weak_ref_tensors @@ -684,14 +683,3 @@ def __call__(self, *args) -> Any: entry.cudagraph.replay() return entry.output - - -def select_default_backend(level: int) -> Union[str, Callable]: - if level in [CompilationLevel.DYNAMO_AS_IS, CompilationLevel.DYNAMO_ONCE]: - backend_str = "eager" - return backend_str - assert level == CompilationLevel.PIECEWISE - - from vllm.plugins import get_current_vllm_config - compilation_config = get_current_vllm_config().compilation_config - return VllmBackend(compilation_config) diff --git a/vllm/compilation/wrapper.py b/vllm/compilation/wrapper.py index 2a1aecc11ce26..0143d0301ca1a 100644 --- a/vllm/compilation/wrapper.py +++ b/vllm/compilation/wrapper.py @@ -32,14 +32,9 @@ def __init__(self, # default compilation settings # compiling the forward method - # choose the compile backend - - # if the user has set the backend, use it - from vllm.plugins import get_torch_compile_backend - backend = get_torch_compile_backend() - if backend is None: - from vllm.compilation.backends import select_default_backend - backend = select_default_backend(compilation_level) + from vllm.plugins import get_current_vllm_config + backend = get_current_vllm_config( + ).compilation_config.init_backend() compiled_callable = torch.compile( self.forward, diff --git a/vllm/config.py b/vllm/config.py index 7e37edbe594b1..14017bbdb3cf2 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -22,7 +22,7 @@ get_hf_text_config, get_pooling_config, get_sentence_transformer_tokenizer_config, is_encoder_decoder, uses_mrope) from vllm.utils import (GiB_bytes, cuda_device_count_stateless, get_cpu_memory, - identity, print_warning_once) + identity, print_warning_once, resolve_obj_by_qualname) if TYPE_CHECKING: from ray.util.placement_group import PlacementGroup @@ -2072,6 +2072,13 @@ class CompilationConfig(BaseModel): - 1: dynamo as is. - 2: dynamo once. - 3: piecewise compilation. + - backend: the backend for compilation. It needs to be a string. + - "" (empty string): use the default backend. + - "eager"/"openxla"/...: use the specified backend registered in PyTorch. + - "full.module.name": a qualified name which can be used to import the backend function. + We use string to avoid serialization issues when using compilation in a distributed setting. + When the compilation level is 1 or 2, the backend is used for the compilation directly (it sees the whole graph). + When the compilation level is 3, the backend is used for the piecewise compilation (it sees a part of the graph). - custom_ops: fine-grained control over which custom ops to enable/disable. Use 'all' to enable all, 'none' to disable all. Also specify a list of custom op names to enable (prefixed with a '+'), @@ -2139,6 +2146,7 @@ class CompilationConfig(BaseModel): certain small batchsizes, where inductor is good at optimizing. """ # noqa level: int = 0 + backend: str = "" custom_ops: List[str] = Field(default_factory=list) use_inductor: bool = True @@ -2182,6 +2190,27 @@ def model_post_init(self, __context: Any) -> None: func = __import__(module).__dict__[func_name] self.inductor_compile_config[k] = func + def init_backend(self) -> Union[str, Callable]: + if self.level == CompilationLevel.NO_COMPILATION: + raise ValueError("No compilation level is set.") + + from torch._dynamo.backends.registry import list_backends + torch_backends = list_backends(exclude_tags=tuple()) + if self.level in [ + CompilationLevel.DYNAMO_AS_IS, CompilationLevel.DYNAMO_ONCE + ]: + if self.backend == "": + return "eager" + if self.backend in torch_backends: + return self.backend + return resolve_obj_by_qualname(self.backend) + + # TODO: pass user-specified backend to piecewise compilation + # merge with the config use_inductor + assert self.level == CompilationLevel.PIECEWISE + from vllm.compilation.backends import VllmBackend + return VllmBackend(self) + def init_during_runtime(self): """To complete the initialization of config, we need to know the compile context, which is only available diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index c2e22bfc09f22..643db835c85ff 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -3,8 +3,6 @@ import torch -from vllm.plugins import set_torch_compile_backend - from .interface import Platform, PlatformEnum if TYPE_CHECKING: @@ -12,8 +10,6 @@ else: VllmConfig = None -set_torch_compile_backend("openxla") - class TpuPlatform(Platform): _enum = PlatformEnum.TPU @@ -38,3 +34,6 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: compilation_config.level = CompilationLevel.DYNAMO_ONCE assert compilation_config.level < CompilationLevel.PIECEWISE,\ "TPU does not support Inductor." + + if compilation_config.backend == "": + compilation_config.backend = "openxla" diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index c20b9ec891d5d..a0c73a752b5e8 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -1,6 +1,6 @@ import logging from contextlib import contextmanager -from typing import TYPE_CHECKING, Callable, Optional, Union +from typing import TYPE_CHECKING, Optional import vllm.envs as envs @@ -50,18 +50,6 @@ def load_general_plugins(): logger.exception("Failed to load plugin %s", plugin.name) -_torch_compile_backend: Optional[Union[Callable, str]] = None - - -def set_torch_compile_backend(backend: Union[Callable, str]): - global _torch_compile_backend - _torch_compile_backend = backend - - -def get_torch_compile_backend() -> Optional[Union[Callable, str]]: - return _torch_compile_backend - - _compilation_config: Optional[CompilationConfig] = None diff --git a/vllm/utils.py b/vllm/utils.py index 111460a29de47..5d0514cd9d168 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -1600,3 +1600,12 @@ def direct_register_custom_op( my_lib.impl(op_name, op_func, "CUDA") if fake_impl is not None: my_lib._register_fake(op_name, fake_impl) + + +def resolve_obj_by_qualname(qualname: str) -> Any: + """ + Resolve an object by its fully qualified name. + """ + module_name, obj_name = qualname.rsplit(".", 1) + module = importlib.import_module(module_name) + return getattr(module, obj_name) diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index fd89f95445565..fb5813651680b 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -1143,8 +1143,7 @@ def load_model(self) -> None: if self.vllm_config.compilation_config.level ==\ CompilationLevel.DYNAMO_AS_IS and supports_dynamo(): - from vllm.plugins import get_torch_compile_backend - backend = get_torch_compile_backend() or "eager" + backend = self.vllm_config.compilation_config.init_backend() self.model = torch.compile( self.model, fullgraph=envs.VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE, From c7dec926f6f1beaed759b8689373926e68867358 Mon Sep 17 00:00:00 2001 From: lkchen Date: Mon, 18 Nov 2024 00:06:16 -0800 Subject: [PATCH 010/122] [VLM] Report multi_modal_placeholders in output (#10407) Signed-off-by: Linkun Chen --- .../vision_language/test_pixtral.py | 79 ++++++++++++++++++- vllm/model_executor/models/pixtral.py | 16 +++- vllm/outputs.py | 30 +++++-- 3 files changed, 115 insertions(+), 10 deletions(-) diff --git a/tests/models/decoder_only/vision_language/test_pixtral.py b/tests/models/decoder_only/vision_language/test_pixtral.py index d8a98a0f84d3b..6233860747b9c 100644 --- a/tests/models/decoder_only/vision_language/test_pixtral.py +++ b/tests/models/decoder_only/vision_language/test_pixtral.py @@ -8,13 +8,17 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import pytest +from mistral_common.multimodal import download_image from mistral_common.protocol.instruct.messages import ImageURLChunk from mistral_common.protocol.instruct.request import ChatCompletionRequest from mistral_common.tokens.tokenizers.mistral import MistralTokenizer from mistral_common.tokens.tokenizers.multimodal import image_from_chunk +from transformers import AutoProcessor -from vllm import EngineArgs, LLMEngine, SamplingParams, TokensPrompt +from vllm import (EngineArgs, LLMEngine, RequestOutput, SamplingParams, + TextPrompt, TokensPrompt) from vllm.multimodal import MultiModalDataBuiltins +from vllm.multimodal.inputs import PlaceholderRange from vllm.sequence import Logprob, SampleLogprobs from ....utils import VLLM_PATH, large_gpu_test @@ -49,6 +53,20 @@ def _create_msg_format(urls: List[str]) -> List[Dict[str, Any]]: }] +def _create_msg_format_hf(urls: List[str]) -> List[Dict[str, Any]]: + return [{ + "role": + "user", + "content": [{ + "type": "text", + "content": PROMPT, + }, *({ + "type": "image", + "image": download_image(url) + } for url in urls)], + }] + + def _create_engine_inputs(urls: List[str]) -> TokensPrompt: msg = _create_msg_format(urls) @@ -70,6 +88,23 @@ def _create_engine_inputs(urls: List[str]) -> TokensPrompt: return engine_inputs +def _create_engine_inputs_hf(urls: List[str]) -> TextPrompt: + msg = _create_msg_format_hf(urls) + + tokenizer = AutoProcessor.from_pretrained("mistral-community/pixtral-12b") + prompt = tokenizer.apply_chat_template(msg) + + images = [] + for chunk in msg[0]["content"]: + if chunk["type"] == "image": + images.append(chunk["image"]) + + mm_data = MultiModalDataBuiltins(image=images) + engine_inputs = TextPrompt(prompt=prompt, multi_modal_data=mm_data) + + return engine_inputs + + MSGS = [ _create_msg_format(IMG_URLS[:1]), _create_msg_format(IMG_URLS[:2]), @@ -191,3 +226,45 @@ def test_model_engine(vllm_runner, model: str, dtype: str) -> None: outputs_1_lst=logprobs, name_0="h100_ref", name_1="output") + + +@large_gpu_test(min_gb=24) +@pytest.mark.parametrize( + "prompt,expected_ranges", + [(_create_engine_inputs_hf(IMG_URLS[:1]), [{ + "offset": 10, + "length": 494 + }]), + (_create_engine_inputs_hf(IMG_URLS[1:4]), [{ + "offset": 10, + "length": 266 + }, { + "offset": 276, + "length": 1056 + }, { + "offset": 1332, + "length": 418 + }])]) +def test_multi_modal_placeholders( + vllm_runner, prompt, expected_ranges: list[PlaceholderRange]) -> None: + with vllm_runner( + "mistral-community/pixtral-12b", + max_model_len=8192, + limit_mm_per_prompt=LIMIT_MM_PER_PROMPT, + ) as vllm_model: + outputs = vllm_model.model.generate(prompt) + + assert len(outputs) == 1, f"{len(outputs)=}" + output: RequestOutput = outputs[0] + assert hasattr(output, + "multi_modal_placeholders"), f"{output.__dict__=}" + assert "image" in output.multi_modal_placeholders, \ + f"{output.multi_modal_placeholders.keys()=}" + image_placeholder_ranges: list[ + PlaceholderRange] = output.multi_modal_placeholders["image"] + assert len(image_placeholder_ranges) == len( + expected_ranges), f"{image_placeholder_ranges=}" + for real_range, expected_range in zip(image_placeholder_ranges, + expected_ranges): + assert real_range == expected_range, \ + f"{real_range=} {expected_range=}" diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index 307febde7eef0..d44a538d56b8c 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -30,6 +30,7 @@ from vllm.model_executor.models.utils import merge_multimodal_embeddings from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs +from vllm.multimodal.inputs import PlaceholderRange from vllm.multimodal.utils import (cached_get_tokenizer, consecutive_placeholder_ranges) from vllm.sequence import IntermediateTensors, SequenceData @@ -773,15 +774,28 @@ def input_processor_for_pixtral_hf( replace_tokens[-1] = image_end_id replace_tokens_list.append(replace_tokens) + reverse_offsets: List[int] = [] # Backward iteration for replacement without affecting known indices for placeholder_idx, replace_tokens in zip(reversed(placeholder_indices), reversed(replace_tokens_list)): + reverse_offsets.append( + len(new_token_ids) - placeholder_idx + len(replace_tokens)) new_token_ids[placeholder_idx:placeholder_idx + 1] = replace_tokens + placeholder_ranges: List[PlaceholderRange] = [] + for reverse_offset, replace_tokens in zip(reversed(reverse_offsets), + replace_tokens_list): + placeholder_ranges.append( + PlaceholderRange( + offset=len(new_token_ids) - reverse_offset, + length=len(replace_tokens), + )) + # NOTE: Create a defensive copy of the original inputs return token_inputs(prompt_token_ids=new_token_ids, prompt=new_prompt, - multi_modal_data=multi_modal_data) + multi_modal_data=multi_modal_data, + multi_modal_placeholders={"image": placeholder_ranges}) class PixtralHFMLP(nn.Module): diff --git a/vllm/outputs.py b/vllm/outputs.py index badf50d0602d6..4ae9b377ae693 100644 --- a/vllm/outputs.py +++ b/vllm/outputs.py @@ -5,6 +5,7 @@ from typing import Union from vllm.lora.request import LoRARequest +from vllm.multimodal.inputs import MultiModalPlaceholderDict from vllm.sampling_params import RequestOutputKind from vllm.sequence import (PromptLogprobs, RequestMetrics, SampleLogprobs, SequenceGroup, SequenceGroupBase, SequenceStatus) @@ -103,10 +104,13 @@ def __init__( encoder_prompt: Optional[str] = None, encoder_prompt_token_ids: Optional[List[int]] = None, num_cached_tokens: Optional[int] = None, + *, + multi_modal_placeholders: Optional[MultiModalPlaceholderDict] = None, ) -> None: self.request_id = request_id self.prompt = prompt self.prompt_token_ids = prompt_token_ids + self.multi_modal_placeholders = multi_modal_placeholders or {} self.prompt_logprobs = prompt_logprobs self.outputs = outputs self.finished = finished @@ -275,17 +279,26 @@ def from_seq_group( finished_time = time.time() if finished else None seq_group.set_finished_time(finished_time) - init_args = (seq_group.request_id, prompt, prompt_token_ids, - prompt_logprobs, outputs, finished, seq_group.metrics, - seq_group.lora_request, encoder_prompt, - encoder_prompt_token_ids, num_cached_tokens) + init_kwargs = { + "request_id": seq_group.request_id, + "prompt": prompt, + "prompt_token_ids": prompt_token_ids, + "prompt_logprobs": prompt_logprobs, + "outputs": outputs, + "finished": finished, + "metrics": seq_group.metrics, + "lora_request": seq_group.lora_request, + "encoder_prompt": encoder_prompt, + "encoder_prompt_token_ids": encoder_prompt_token_ids, + "num_cached_tokens": num_cached_tokens, + "multi_modal_placeholders": seq_group.multi_modal_placeholders + } if use_cache: request_output = seq_group.cached_request_output - request_output.__init__(*init_args) # type: ignore - + request_output.__init__(**init_kwargs) # type: ignore else: - request_output = cls(*init_args) + request_output = cls(**init_kwargs) # type: ignore return request_output @@ -300,7 +313,8 @@ def __repr__(self) -> str: f"finished={self.finished}, " f"metrics={self.metrics}, " f"lora_request={self.lora_request}, " - f"num_cached_tokens={self.num_cached_tokens})") + f"num_cached_tokens={self.num_cached_tokens}, " + f"multi_modal_placeholders={self.multi_modal_placeholders})") class EmbeddingRequestOutput: From 01aae1cc68d6013dd91e87418a6d82fa02c58457 Mon Sep 17 00:00:00 2001 From: Maybewuss <38156589+Maybewuss@users.noreply.github.com> Date: Mon, 18 Nov 2024 18:05:36 +0800 Subject: [PATCH 011/122] [Model] Remove redundant softmax when using PoolingType.STEP (#10415) --- vllm/model_executor/layers/pooler.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vllm/model_executor/layers/pooler.py b/vllm/model_executor/layers/pooler.py index 6fee57a0a03eb..bfe2d7d0f382e 100644 --- a/vllm/model_executor/layers/pooler.py +++ b/vllm/model_executor/layers/pooler.py @@ -118,14 +118,13 @@ def forward( if returned_token_ids is not None and len(returned_token_ids) > 0: hidden_states = hidden_states[:, returned_token_ids] - logits = hidden_states.softmax(dim=-1) step_tag_id = self.step_tag_id offset = 0 pooled_data_lst = [] for prompt_len, seq_data_i in zip( prompt_lens, pooling_metadata.seq_data.values()): - pooled_data_i = logits[offset:offset + prompt_len] + pooled_data_i = hidden_states[offset:offset + prompt_len] if step_tag_id is not None: token_ids = torch.tensor(seq_data_i.prompt_token_ids) pooled_data_i = pooled_data_i[token_ids == step_tag_id] From 5be4e52b6522113f7276e60b32cb5c1f912de6fd Mon Sep 17 00:00:00 2001 From: B-201 Date: Mon, 18 Nov 2024 20:57:10 +0800 Subject: [PATCH 012/122] [Model][LoRA]LoRA support added for glm-4v (#10418) Signed-off-by: B-201 --- vllm/model_executor/models/chatglm.py | 98 +++++++++++++++++++++------ 1 file changed, 79 insertions(+), 19 deletions(-) diff --git a/vllm/model_executor/models/chatglm.py b/vllm/model_executor/models/chatglm.py index 81e56381eabd8..625e31bb0d368 100644 --- a/vllm/model_executor/models/chatglm.py +++ b/vllm/model_executor/models/chatglm.py @@ -30,6 +30,7 @@ ParallelLMHead, VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.models.glm4_vision_encoder import EVA2CLIPModel +from vllm.model_executor.models.module_mapping import MultiModelKeys from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.inputs import MultiModalData, MultiModalKwargs @@ -574,25 +575,8 @@ def forward( return hidden_states -@MULTIMODAL_REGISTRY.register_image_input_mapper(mm_input_mapper_for_glmv) -@MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_glmv_image_tokens) -@INPUT_REGISTRY.register_dummy_data(dummy_data_for_glmv) -@INPUT_REGISTRY.register_input_processor(input_processor_for_glmv) -class ChatGLMForCausalLM(nn.Module, SupportsLoRA, SupportsPP, - SupportsMultiModal): - packed_modules_mapping = { - "query_key_value": ["query_key_value"], - "dense_h_to_4h": ["dense_h_to_4h"] - } - # LoRA specific attributes - supported_lora_modules = [ - "query_key_value", - "dense", - "dense_h_to_4h", - "dense_4h_to_h", - ] - embedding_modules = {} - embedding_padding_modules = [] +class ChatGLMBaseModel(nn.Module, SupportsLoRA, SupportsPP, + SupportsMultiModal): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() @@ -692,3 +676,79 @@ def load_weights(self, weights: Iterable[Tuple[str, weight_loader(param, combined_weight) loaded_params.add(combined_name) return loaded_params + + +class ChatGLM(ChatGLMBaseModel): + packed_modules_mapping = { + "query_key_value": ["query_key_value"], + "dense_h_to_4h": ["dense_h_to_4h"] + } + # LoRA specific attributes + supported_lora_modules = [ + "query_key_value", + "dense", + "dense_h_to_4h", + "dense_4h_to_h", + ] + + embedding_modules = {} + embedding_padding_modules = [] + + +class ChatGLMV(ChatGLMBaseModel): + packed_modules_mapping = { + "query_key_value": ["query_key_value"], + "dense_h_to_4h": ["dense_h_to_4h"], + "merged_proj": ["gate_proj", "dense_h_to_4h"] + } + # LoRA specific attributes + supported_lora_modules = [ + "query_key_value", + "dense", + "dense_h_to_4h", + "dense_4h_to_h", + # vision + "fc1", + "fc2", + "merged_proj", + "linear_proj" + ] + + embedding_modules = {} + embedding_padding_modules = [] + + def get_mm_mapping(self) -> MultiModelKeys: + """ + Get the module prefix in multimodal models + """ + return MultiModelKeys.from_string_field( + language_model="transformer.encoder", + connector="transformer.vision.linear_proj", + tower_model="transformer.vision.transformer") + + +@MULTIMODAL_REGISTRY.register_image_input_mapper(mm_input_mapper_for_glmv) +@MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_glmv_image_tokens) +@INPUT_REGISTRY.register_dummy_data(dummy_data_for_glmv) +@INPUT_REGISTRY.register_input_processor(input_processor_for_glmv) +class ChatGLMForCausalLM(ChatGLMBaseModel, SupportsLoRA, SupportsPP, + SupportsMultiModal): + # Ensure that the LoRA support check passes when the class is not + # initialized, but set all these attributes to empty. + packed_modules_mapping = {} + supported_lora_modules = [] + embedding_modules = {} + embedding_padding_modules = [] + + def __new__( + cls, + vllm_config: VllmConfig, + prefix: str = "", + ) -> None: + config = vllm_config.model_config.hf_config + # Initialize VL + if hasattr(config, "visual"): + return ChatGLM(vllm_config=vllm_config, prefix=prefix) + # Initialize LLM + else: + return ChatGLMV(vllm_config=vllm_config, prefix=prefix) From e7ebb662d777a9617644428031c1cf80c38939ba Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Mon, 18 Nov 2024 21:45:21 +0800 Subject: [PATCH 013/122] [Model] Remove transformers attention porting in VITs (#10414) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/model_executor/models/blip.py | 66 +++++++++++++----------- vllm/model_executor/models/clip.py | 65 ++++++++++++----------- vllm/model_executor/models/intern_vit.py | 32 ++++++++---- vllm/model_executor/models/molmo.py | 2 +- vllm/model_executor/models/qwen2_vl.py | 2 +- vllm/model_executor/models/siglip.py | 63 ++++++++++++---------- vllm/model_executor/models/utils.py | 11 ++-- 7 files changed, 139 insertions(+), 102 deletions(-) diff --git a/vllm/model_executor/models/blip.py b/vllm/model_executor/models/blip.py index 6db6462e97f3f..6af59697160a0 100644 --- a/vllm/model_executor/models/blip.py +++ b/vllm/model_executor/models/blip.py @@ -4,10 +4,11 @@ import torch import torch.nn as nn +import torch.nn.functional as F from PIL import Image from transformers import Blip2VisionConfig, BlipVisionConfig -from transformers.models.blip.modeling_blip import BlipAttention +from vllm.attention.selector import _Backend from vllm.config import ModelConfig from vllm.distributed import divide, get_tensor_model_parallel_world_size from vllm.inputs import DecoderOnlyInputs, token_inputs @@ -21,11 +22,7 @@ repeat_and_pad_placeholder_tokens) from vllm.sequence import SequenceData -try: - from xformers import ops as xops - USE_XFORMERS_OPS = True -except ImportError: - USE_XFORMERS_OPS = False +from .utils import get_vit_attn_backend def get_blip_patch_grid_length(*, image_size: int, patch_size: int) -> int: @@ -168,7 +165,7 @@ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: return embeddings -class BlipParallelAttention(nn.Module): +class BlipAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( @@ -208,6 +205,12 @@ def __init__( self.tp_size = get_tensor_model_parallel_world_size() self.num_heads_per_partition = divide(self.num_heads, self.tp_size) + # Detect attention implementation. + self.attn_backend = get_vit_attn_backend(support_fa=False) + if self.attn_backend not in {_Backend.TORCH_SDPA, _Backend.XFORMERS}: + raise RuntimeError( + f"BLIP does not support {self.attn_backend} backend now.") + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() @@ -231,11 +234,26 @@ def forward( self.num_heads_per_partition, self.head_dim) - out = xops.memory_efficient_attention_forward(query_states, - key_states, - value_states, - p=self.dropout, - scale=self.scale) + if self.attn_backend == _Backend.XFORMERS: + from xformers import ops as xops + + out = xops.memory_efficient_attention_forward(query_states, + key_states, + value_states, + p=self.dropout, + scale=self.scale) + elif self.attn_backend == _Backend.TORCH_SDPA: + query_states, key_states, value_states = (x.transpose(1, 2) + for x in (query_states, + key_states, + value_states)) + out = F.scaled_dot_product_attention(query_states, + key_states, + value_states, + dropout_p=self.dropout, + scale=self.scale) + out = out.transpose(1, 2) + out = out.view(bsz, tgt_len, -1) attn_output, _ = self.projection(out) @@ -285,18 +303,11 @@ def __init__( super().__init__() # fallback to sdpa attention if tp unavailable - num_heads = config.num_attention_heads - tp_size = get_tensor_model_parallel_world_size() - if USE_XFORMERS_OPS and num_heads % tp_size == 0: - self.self_attn = BlipParallelAttention( - config, - quant_config=quant_config, - prefix=f"{prefix}.self_attn", - ) - else: - # Blip doesn't have SDPA attention implemented in transformers - # use eager attention instead for cpu backend - self.self_attn = BlipAttention(config) + self.self_attn = BlipAttention( + config, + quant_config=quant_config, + prefix=f"{prefix}.self_attn", + ) self.layer_norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp = BlipMLP(config, @@ -374,11 +385,6 @@ def __init__( prefix: str = "", ) -> None: super().__init__() - - tp_size = get_tensor_model_parallel_world_size() - num_heads = config.num_attention_heads - self.shard_weight = USE_XFORMERS_OPS and num_heads % tp_size == 0 - self.config = config self.embeddings = BlipVisionEmbeddings(config) @@ -422,7 +428,7 @@ def load_weights(self, weights: Iterable[Tuple[str, ("qkv_proj", "q_proj", "q"), ("qkv_proj", "k_proj", "k"), ("qkv_proj", "v_proj", "v"), - ] if self.shard_weight else [] + ] params_dict = dict(self.named_parameters()) loaded_params: Set[str] = set() layer_count = len(self.encoder.layers) diff --git a/vllm/model_executor/models/clip.py b/vllm/model_executor/models/clip.py index 184758f4a8a45..7f638506f9fb2 100644 --- a/vllm/model_executor/models/clip.py +++ b/vllm/model_executor/models/clip.py @@ -5,10 +5,11 @@ import numpy as np import torch import torch.nn as nn +import torch.nn.functional as F from PIL import Image from transformers import CLIPVisionConfig -from transformers.models.clip.modeling_clip import CLIPSdpaAttention +from vllm.attention.selector import _Backend from vllm.config import ModelConfig from vllm.distributed import divide, get_tensor_model_parallel_world_size from vllm.inputs import DecoderOnlyInputs, token_inputs @@ -23,11 +24,7 @@ repeat_and_pad_placeholder_tokens) from vllm.sequence import SequenceData -try: - from xformers import ops as xops - USE_XFORMERS_OPS = True -except ImportError: - USE_XFORMERS_OPS = False +from .utils import get_vit_attn_backend def get_clip_patch_grid_length(*, image_size: int, patch_size: int) -> int: @@ -197,7 +194,7 @@ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: return embeddings -class CLIPParallelAttention(nn.Module): +class CLIPAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( @@ -237,6 +234,12 @@ def __init__( self.tp_size = get_tensor_model_parallel_world_size() self.num_heads_per_partition = divide(self.num_heads, self.tp_size) + # Detect attention implementation. + self.attn_backend = get_vit_attn_backend(support_fa=False) + if self.attn_backend not in {_Backend.TORCH_SDPA, _Backend.XFORMERS}: + raise RuntimeError( + f"CLIP does not support {self.attn_backend} backend now.") + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() @@ -261,11 +264,26 @@ def forward( self.num_heads_per_partition, self.head_dim) - out = xops.memory_efficient_attention_forward(query_states, - key_states, - value_states, - p=self.dropout, - scale=self.scale) + if self.attn_backend == _Backend.XFORMERS: + from xformers import ops as xops + + out = xops.memory_efficient_attention_forward(query_states, + key_states, + value_states, + p=self.dropout, + scale=self.scale) + elif self.attn_backend == _Backend.TORCH_SDPA: + query_states, key_states, value_states = (x.transpose(1, 2) + for x in (query_states, + key_states, + value_states)) + out = F.scaled_dot_product_attention(query_states, + key_states, + value_states, + dropout_p=self.dropout, + scale=self.scale) + out = out.transpose(1, 2) + out = out.view(bsz, tgt_len, -1) attn_output, _ = self.out_proj(out) @@ -311,17 +329,11 @@ def __init__( prefix: str = "", ) -> None: super().__init__() - - num_heads = config.num_attention_heads - tp_size = get_tensor_model_parallel_world_size() - if USE_XFORMERS_OPS and num_heads % tp_size == 0: - self.self_attn = CLIPParallelAttention( - config, - quant_config=quant_config, - prefix=f"{prefix}.self_attn", - ) - else: - self.self_attn = CLIPSdpaAttention(config) + self.self_attn = CLIPAttention( + config, + quant_config=quant_config, + prefix=f"{prefix}.self_attn", + ) self.layer_norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp = CLIPMLP(config, @@ -461,11 +473,6 @@ def __init__( prefix: str = "", ) -> None: super().__init__() - - tp_size = get_tensor_model_parallel_world_size() - num_heads = config.num_attention_heads - self.shard_weight = USE_XFORMERS_OPS and num_heads % tp_size == 0 - self.vision_model = CLIPVisionTransformer( config=config, quant_config=quant_config, @@ -490,7 +497,7 @@ def load_weights(self, weights: Iterable[Tuple[str, ("qkv_proj", "q_proj", "q"), ("qkv_proj", "k_proj", "k"), ("qkv_proj", "v_proj", "v"), - ] if self.shard_weight else [] + ] params_dict = dict(self.named_parameters()) loaded_params: Set[str] = set() layer_count = len(self.vision_model.encoder.layers) diff --git a/vllm/model_executor/models/intern_vit.py b/vllm/model_executor/models/intern_vit.py index bd91a0806ae5c..c4346fcb3bd2a 100644 --- a/vllm/model_executor/models/intern_vit.py +++ b/vllm/model_executor/models/intern_vit.py @@ -12,6 +12,7 @@ import torch.nn.functional as F from transformers import PretrainedConfig +from vllm.attention.selector import _Backend from vllm.distributed import (divide, get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, split_tensor_along_last_dim, @@ -24,11 +25,7 @@ from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.model_loader.weight_utils import default_weight_loader -try: - from xformers import ops as xops - USE_XFORMERS_OPS = True -except ImportError: - USE_XFORMERS_OPS = False +from .utils import get_vit_attn_backend NORM2FN = { 'rms_norm': RMSNorm, @@ -186,6 +183,11 @@ def __init__( prefix=f"{prefix}.proj", ) + self.attn_backend = get_vit_attn_backend(support_fa=False) + if self.attn_backend not in {_Backend.TORCH_SDPA, _Backend.XFORMERS}: + raise RuntimeError( + f"InternViT does not support {self.attn_backend} backend now.") + def _apply_qk_norm(self, q: torch.Tensor, k: torch.Tensor): if self.tp_size > 1: q = tensor_model_parallel_all_gather(q.contiguous()) @@ -211,11 +213,21 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: k = k.view(B, N, self.num_heads_per_partition, self.head_dim) v = v.view(B, N, self.num_heads_per_partition, self.head_dim) - x = xops.memory_efficient_attention_forward(q, k, v, scale=self.scale) - x = x.view(B, N, -1) + if self.attn_backend == _Backend.XFORMERS: + from xformers import ops as xops - x, _ = self.proj(x) - return x + out = xops.memory_efficient_attention_forward(q, + k, + v, + scale=self.scale) + elif self.attn_backend == _Backend.TORCH_SDPA: + q, k, v = (x.transpose(1, 2) for x in (q, k, v)) + out = F.scaled_dot_product_attention(q, k, v, scale=self.scale) + out = out.transpose(1, 2) + + out = out.view(B, N, -1) + out, _ = self.proj(out) + return out class InternSdpaAttention(nn.Module): @@ -362,7 +374,7 @@ def _init_attn( tp_size = get_tensor_model_parallel_world_size() num_heads = config.num_attention_heads - if USE_XFORMERS_OPS and (num_heads + num_dummy_heads) % tp_size == 0: + if (num_heads + num_dummy_heads) % tp_size == 0: return InternParallelAttention(config, quant_config=quant_config, num_dummy_heads=num_dummy_heads, diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index 035a1e2ab7b02..a7c90a3f5031b 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -187,7 +187,7 @@ def __init__( ) # Detect attention implementation. - self.attn_backend: _Backend = get_vit_attn_backend() + self.attn_backend: _Backend = get_vit_attn_backend(support_fa=True) if self.attn_backend not in { _Backend.FLASH_ATTN, _Backend.TORCH_SDPA, _Backend.XFORMERS }: diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index ef6b52db6e17d..a929b9323b245 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -260,7 +260,7 @@ def __init__( prefix=f"{prefix}.proj") # Detect attention implementation. - self.attn_backend: _Backend = get_vit_attn_backend() + self.attn_backend: _Backend = get_vit_attn_backend(support_fa=True) if self.attn_backend not in { _Backend.FLASH_ATTN, _Backend.TORCH_SDPA, _Backend.XFORMERS }: diff --git a/vllm/model_executor/models/siglip.py b/vllm/model_executor/models/siglip.py index c9e09b879843a..c58ad99692900 100644 --- a/vllm/model_executor/models/siglip.py +++ b/vllm/model_executor/models/siglip.py @@ -6,11 +6,12 @@ import numpy as np import torch +import torch.nn.functional as F from PIL import Image from torch import nn from transformers import SiglipVisionConfig -from transformers.models.siglip.modeling_siglip import SiglipSdpaAttention +from vllm.attention.selector import _Backend from vllm.config import ModelConfig from vllm.distributed import divide, get_tensor_model_parallel_world_size from vllm.inputs import DecoderOnlyInputs, token_inputs @@ -27,11 +28,7 @@ repeat_and_pad_placeholder_tokens) from vllm.sequence import SequenceData -try: - from xformers import ops as xops - USE_XFORMERS_OPS = True -except ImportError: - USE_XFORMERS_OPS = False +from .utils import get_vit_attn_backend def get_siglip_patch_grid_length(*, image_size: int, patch_size: int) -> int: @@ -254,7 +251,7 @@ def forward(self, return embeddings -class SiglipParallelAttention(nn.Module): +class SiglipAttention(nn.Module): def __init__( self, @@ -293,6 +290,11 @@ def __init__( self.tp_size = get_tensor_model_parallel_world_size() self.num_heads_per_partition = divide(self.num_heads, self.tp_size) + self.attn_backend = get_vit_attn_backend(support_fa=False) + if self.attn_backend not in {_Backend.TORCH_SDPA, _Backend.XFORMERS}: + raise RuntimeError( + f"SIGLIP does not support {self.attn_backend} backend now.") + def forward( self, hidden_states: torch.Tensor, @@ -313,11 +315,26 @@ def forward( self.num_heads_per_partition, self.head_dim) - out = xops.memory_efficient_attention_forward(query_states, - key_states, - value_states, - p=self.dropout, - scale=self.scale) + if self.attn_backend == _Backend.XFORMERS: + from xformers import ops as xops + + out = xops.memory_efficient_attention_forward(query_states, + key_states, + value_states, + p=self.dropout, + scale=self.scale) + elif self.attn_backend == _Backend.TORCH_SDPA: + query_states, key_states, value_states = (x.transpose(1, 2) + for x in (query_states, + key_states, + value_states)) + out = F.scaled_dot_product_attention(query_states, + key_states, + value_states, + dropout_p=self.dropout, + scale=self.scale) + out = out.transpose(1, 2) + out = out.view(batch_size, q_len, -1) attn_output, _ = self.out_proj(out) @@ -372,17 +389,11 @@ def __init__( self.embed_dim = config.hidden_size - num_heads = config.num_attention_heads - tp_size = get_tensor_model_parallel_world_size() - if USE_XFORMERS_OPS and num_heads % tp_size == 0: - self.self_attn = SiglipParallelAttention( - config, - quant_config=quant_config, - prefix=f"{prefix}.self_attn", - ) - else: - self.self_attn = SiglipSdpaAttention(config) - + self.self_attn = SiglipAttention( + config, + quant_config=quant_config, + prefix=f"{prefix}.self_attn", + ) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = SiglipMLP( @@ -569,10 +580,6 @@ def __init__( ) -> None: super().__init__() - num_heads = config.num_attention_heads - tp_size = get_tensor_model_parallel_world_size() - self.shard_weight = USE_XFORMERS_OPS and num_heads % tp_size == 0 - self.vision_model = SiglipVisionTransformer( config, quant_config, @@ -601,7 +608,7 @@ def load_weights(self, weights: Iterable[Tuple[str, ("qkv_proj", "q_proj", "q"), ("qkv_proj", "k_proj", "k"), ("qkv_proj", "v_proj", "v"), - ] if self.shard_weight else [] + ] params_dict = dict(self.named_parameters()) loaded_params: Set[str] = set() layer_count = len(self.vision_model.encoder.layers) diff --git a/vllm/model_executor/models/utils.py b/vllm/model_executor/models/utils.py index 7a4fcce95603d..03226f42ee053 100644 --- a/vllm/model_executor/models/utils.py +++ b/vllm/model_executor/models/utils.py @@ -587,7 +587,11 @@ def __call__(self, *args: Any, **kwargs: Any) -> Any: return llm(*args, **kwargs) -def get_vit_attn_backend() -> _Backend: +def get_vit_attn_backend(support_fa: bool = False) -> _Backend: + """ + Get the available attention backend for Vision Transformer. + """ + # TODO(Isotr0py): Remove `support_fa` after support FA for all ViTs attn. selected_backend: Optional[_Backend] = get_global_forced_attn_backend() if selected_backend is None: backend_by_env_var: Optional[str] = envs.VLLM_ATTENTION_BACKEND @@ -596,7 +600,7 @@ def get_vit_attn_backend() -> _Backend: if selected_backend is None: # For Volta and Turing GPUs, use xformers instead. device_available = current_platform.has_device_capability(80) - if device_available: + if device_available and support_fa: from transformers.utils import is_flash_attn_2_available if is_flash_attn_2_available(): selected_backend = _Backend.FLASH_ATTN @@ -606,7 +610,8 @@ def get_vit_attn_backend() -> _Backend: "so we use xformers backend instead. You can run " "`pip install flash-attn` to use flash-attention backend.") selected_backend = _Backend.XFORMERS - elif current_platform.is_cpu(): + elif current_platform.is_cpu() or current_platform.is_rocm(): + # ROCM doesn't support xformers selected_backend = _Backend.TORCH_SDPA else: selected_backend = _Backend.XFORMERS From 4186be8111e20c64d0cbcbdebbdd1081e77f1075 Mon Sep 17 00:00:00 2001 From: B-201 Date: Mon, 18 Nov 2024 23:08:30 +0800 Subject: [PATCH 014/122] [Doc] Update doc for LoRA support in GLM-4V (#10425) Signed-off-by: B-201 --- docs/source/models/supported_models.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index 96a513d42753b..e902d393f2f70 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -446,7 +446,7 @@ Text Generation - GLM-4V - T + I - :code:`THUDM/glm-4v-9b` etc. - - + - ✅︎ - ✅︎ * - :code:`H2OVLChatModel` - H2OVL From 7851b45196aff994277ec832c0cf5bec0073f08e Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 18 Nov 2024 07:20:06 -0800 Subject: [PATCH 015/122] [5/N][torch.compile] torch.jit.script --> torch.compile (#10406) Signed-off-by: youkaichao --- vllm/model_executor/layers/rejection_sampler.py | 2 +- vllm/model_executor/layers/vocab_parallel_embedding.py | 4 ++-- vllm/model_executor/models/phi3_small.py | 4 ++-- vllm/worker/model_runner.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/vllm/model_executor/layers/rejection_sampler.py b/vllm/model_executor/layers/rejection_sampler.py index 2e9a0e170693b..3ab0ba9e9f5c2 100644 --- a/vllm/model_executor/layers/rejection_sampler.py +++ b/vllm/model_executor/layers/rejection_sampler.py @@ -368,7 +368,7 @@ def _smallest_positive_value(self) -> float: # Note that we always sample with replacement. # probs will be modified in place, but this is fine, as we pass # in a copy already. -@torch.jit.script +@torch.compile(dynamic=True) def _multinomial( probs: torch.Tensor, num_samples: int, diff --git a/vllm/model_executor/layers/vocab_parallel_embedding.py b/vllm/model_executor/layers/vocab_parallel_embedding.py index 52771f50a7a23..30548e656c557 100644 --- a/vllm/model_executor/layers/vocab_parallel_embedding.py +++ b/vllm/model_executor/layers/vocab_parallel_embedding.py @@ -133,13 +133,13 @@ def __post_init__(self): assert self.num_added_elements <= self.num_added_elements_padded -@torch.jit.script +@torch.compile(dynamic=True) def get_masked_input_and_mask( input_: torch.Tensor, org_vocab_start_index: int, org_vocab_end_index: int, num_org_vocab_padding: int, added_vocab_start_index: int, added_vocab_end_index: int) -> Tuple[torch.Tensor, torch.Tensor]: - # torch.jit.script will fuse all of the pointwise ops below + # torch.compile will fuse all of the pointwise ops below # into a single kernel, making it very fast org_vocab_mask = (input_ >= org_vocab_start_index) & (input_ < org_vocab_end_index) diff --git a/vllm/model_executor/models/phi3_small.py b/vllm/model_executor/models/phi3_small.py index a78e4d355a314..f71cbd1264c45 100644 --- a/vllm/model_executor/models/phi3_small.py +++ b/vllm/model_executor/models/phi3_small.py @@ -54,12 +54,12 @@ def weight_loader(self, param: torch.nn.Parameter, return load_column_parallel_weight(param, loaded_weight) -@torch.jit.script +@torch.compile(dynamic=True) def quick_gelu(x): return x * torch.sigmoid(1.702 * x) -@torch.jit.script +@torch.compile(dynamic=True) def gegelu(input, limit: Optional[float] = None): a_gelu, a_linear = input[..., ::2], input[..., 1::2] if limit is not None: diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index fb5813651680b..ed0360fb7f727 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -1769,7 +1769,7 @@ def capture( # Run the model a few times without capturing the graph. # This is to make sure that the captured graph does not include the # kernel launches for initial benchmarking (e.g., Triton autotune). - # Note one iteration is not enough for torch.jit.script + # Note one iteration is not enough for torch.compile for _ in range(_NUM_WARMUP_ITERS): self.model( input_ids=input_ids, From 31894a21559436f4a9d72f751e8bd7ba4ab18613 Mon Sep 17 00:00:00 2001 From: ismael-dm Date: Mon, 18 Nov 2024 18:52:12 +0100 Subject: [PATCH 016/122] [Doc] Add documentation for Structured Outputs (#9943) Signed-off-by: ismael-dm --- docs/source/index.rst | 1 + docs/source/models/structured_outputs.rst | 173 ++++++++++++++++++ .../offline_inference_structured_outputs.py | 78 ++++++++ ...enai_chat_completion_structured_outputs.py | 94 ++++++++++ 4 files changed, 346 insertions(+) create mode 100644 docs/source/models/structured_outputs.rst create mode 100644 examples/offline_inference_structured_outputs.py create mode 100644 examples/openai_chat_completion_structured_outputs.py diff --git a/docs/source/index.rst b/docs/source/index.rst index 3b2698a8845ed..b04acbbce4169 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -101,6 +101,7 @@ Documentation models/engine_args models/lora models/vlm + models/structured_outputs models/spec_decode models/performance diff --git a/docs/source/models/structured_outputs.rst b/docs/source/models/structured_outputs.rst new file mode 100644 index 0000000000000..ff4ff7169fc5f --- /dev/null +++ b/docs/source/models/structured_outputs.rst @@ -0,0 +1,173 @@ +.. _structured_outputs: + +Structured Outputs +================== + +vLLM supports the generation of structured outputs using `outlines `_ or `lm-format-enforcer `_ as backends for the guided decoding. +This document shows you some examples of the different options that are available to generate structured outputs. + + +Online Inference (OpenAI API) +----------------------------- + +You can generate structured outputs using the OpenAI’s `Completions `_ and `Chat `_ API. + +The following parameters are supported, which must be added as extra parameters: + +- ``guided_choice``: the output will be exactly one of the choices. +- ``guided_regex``: the output will follow the regex pattern. +- ``guided_json``: the output will follow the JSON schema. +- ``guided_grammar``: the output will follow the context free grammar. +- ``guided_whitespace_pattern``: used to override the default whitespace pattern for guided json decoding. +- ``guided_decoding_backend``: used to select the guided decoding backend to use. + +You can see the complete list of supported parameters on the `OpenAI Compatible Server `_ page. + +Now let´s see an example for each of the cases, starting with the ``guided_choice``, as it´s the easiest one: + +.. code-block:: python + + from openai import OpenAI + client = OpenAI( + base_url="http://localhost:8000/v1", + api_key="-", + ) + + completion = client.chat.completions.create( + model="Qwen/Qwen2.5-3B-Instruct", + messages=[ + {"role": "user", "content": "Classify this sentiment: vLLM is wonderful!"} + ], + extra_body={"guided_choice": ["positive", "negative"]}, + ) + print(completion.choices[0].message.content) + + +The next example shows how to use the ``guided_regex``. The idea is to generate an email address, given a simple regex template: + +.. code-block:: python + + completion = client.chat.completions.create( + model="Qwen/Qwen2.5-3B-Instruct", + messages=[ + { + "role": "user", + "content": "Generate an example email address for Alan Turing, who works in Enigma. End in .com and new line. Example result: alan.turing@enigma.com\n", + } + ], + extra_body={"guided_regex": "\w+@\w+\.com\n", "stop": ["\n"]}, + ) + print(completion.choices[0].message.content) + +One of the most relevant features in structured text generation is the option to generate a valid JSON with pre-defined fields and formats. +For this we can use the ``guided_json`` parameter in two different ways: + +- Using directly a `JSON Schema `_ +- Defining a `Pydantic model `_ and then extracting the JSON Schema from it (which is normally an easier option). + +The next example shows how to use the ``guided_json`` parameter with a Pydantic model: + +.. code-block:: python + + from pydantic import BaseModel + from enum import Enum + + class CarType(str, Enum): + sedan = "sedan" + suv = "SUV" + truck = "Truck" + coupe = "Coupe" + + + class CarDescription(BaseModel): + brand: str + model: str + car_type: CarType + + + json_schema = CarDescription.model_json_schema() + + completion = client.chat.completions.create( + model="Qwen/Qwen2.5-3B-Instruct", + messages=[ + { + "role": "user", + "content": "Generate a JSON with the brand, model and car_type of the most iconic car from the 90's", + } + ], + extra_body={"guided_json": json_schema}, + ) + print(completion.choices[0].message.content) + +.. tip:: + While not strictly necessary, normally it´s better to indicate in the prompt that a JSON needs to be generated and which fields and how should the LLM fill them. + This can improve the results notably in most cases. + + +Finally we have the ``guided_grammar``, which probably is the most difficult one to use but it´s really powerful, as it allows us to define complete languages like SQL queries. +It works by using a context free EBNF grammar, which for example we can use to define a specific format of simplified SQL queries, like in the example below: + +.. code-block:: python + + simplified_sql_grammar = """ + ?start: select_statement + + ?select_statement: "SELECT " column_list " FROM " table_name + + ?column_list: column_name ("," column_name)* + + ?table_name: identifier + + ?column_name: identifier + + ?identifier: /[a-zA-Z_][a-zA-Z0-9_]*/ + """ + + completion = client.chat.completions.create( + model="Qwen/Qwen2.5-3B-Instruct", + messages=[ + { + "role": "user", + "content": "Generate an SQL query to show the 'username' and 'email' from the 'users' table.", + } + ], + extra_body={"guided_grammar": simplified_sql_grammar}, + ) + print(completion.choices[0].message.content) + +The complete code of the examples can be found on `examples/openai_chat_completion_structured_outputs.py `_. + + +Offline Inference +----------------- + +Offline inference allows for the same types of guided decoding. +To use it, we´ll need to configure the guided decoding using the class ``GuidedDecodingParams`` inside ``SamplingParams``. +The main available options inside ``GuidedDecodingParams`` are: + +- ``json`` +- ``regex`` +- ``choice`` +- ``grammar`` +- ``backend`` +- ``whitespace_pattern`` + +These parameters can be used in the same way as the parameters from the Online Inference examples above. +One example for the usage of the ``choices`` parameter is shown below: + +.. code-block:: python + + from vllm import LLM, SamplingParams + from vllm.sampling_params import GuidedDecodingParams + + llm = LLM(model="HuggingFaceTB/SmolLM2-1.7B-Instruct") + + guided_decoding_params = GuidedDecodingParams(choice=["Positive", "Negative"]) + sampling_params = SamplingParams(guided_decoding=guided_decoding_params) + outputs = llm.generate( + prompts="Classify this sentiment: vLLM is wonderful!", + sampling_params=sampling_params, + ) + print(outputs[0].outputs[0].text) + +A complete example with all options can be found in `examples/offline_inference_structured_outputs.py `_. \ No newline at end of file diff --git a/examples/offline_inference_structured_outputs.py b/examples/offline_inference_structured_outputs.py new file mode 100644 index 0000000000000..00d864606eeff --- /dev/null +++ b/examples/offline_inference_structured_outputs.py @@ -0,0 +1,78 @@ +from enum import Enum + +from pydantic import BaseModel + +from vllm import LLM, SamplingParams +from vllm.sampling_params import GuidedDecodingParams + +llm = LLM(model="Qwen/Qwen2.5-3B-Instruct", max_model_len=100) + +# Guided decoding by Choice (list of possible options) +guided_decoding_params = GuidedDecodingParams(choice=["Positive", "Negative"]) +sampling_params = SamplingParams(guided_decoding=guided_decoding_params) +outputs = llm.generate( + prompts="Classify this sentiment: vLLM is wonderful!", + sampling_params=sampling_params, +) +print(outputs[0].outputs[0].text) + +# Guided decoding by Regex +guided_decoding_params = GuidedDecodingParams(regex="\w+@\w+\.com\n") +sampling_params = SamplingParams(guided_decoding=guided_decoding_params, + stop=["\n"]) +prompt = ("Generate an email address for Alan Turing, who works in Enigma." + "End in .com and new line. Example result:" + "alan.turing@enigma.com\n") +outputs = llm.generate(prompts=prompt, sampling_params=sampling_params) +print(outputs[0].outputs[0].text) + + +# Guided decoding by JSON using Pydantic schema +class CarType(str, Enum): + sedan = "sedan" + suv = "SUV" + truck = "Truck" + coupe = "Coupe" + + +class CarDescription(BaseModel): + brand: str + model: str + car_type: CarType + + +json_schema = CarDescription.model_json_schema() + +guided_decoding_params = GuidedDecodingParams(json=json_schema) +sampling_params = SamplingParams(guided_decoding=guided_decoding_params) +prompt = ("Generate a JSON with the brand, model and car_type of" + "the most iconic car from the 90's") +outputs = llm.generate( + prompts=prompt, + sampling_params=sampling_params, +) +print(outputs[0].outputs[0].text) + +# Guided decoding by Grammar +simplified_sql_grammar = """ + ?start: select_statement + + ?select_statement: "SELECT " column_list " FROM " table_name + + ?column_list: column_name ("," column_name)* + + ?table_name: identifier + + ?column_name: identifier + + ?identifier: /[a-zA-Z_][a-zA-Z0-9_]*/ +""" +guided_decoding_params = GuidedDecodingParams(grammar=simplified_sql_grammar) +sampling_params = SamplingParams(guided_decoding=guided_decoding_params) +prompt = ("Generate an SQL query to show the 'username' and 'email'" + "from the 'users' table.") +outputs = llm.generate( + prompts=prompt, + sampling_params=sampling_params, +) +print(outputs[0].outputs[0].text) diff --git a/examples/openai_chat_completion_structured_outputs.py b/examples/openai_chat_completion_structured_outputs.py new file mode 100644 index 0000000000000..8c059c7ca07ce --- /dev/null +++ b/examples/openai_chat_completion_structured_outputs.py @@ -0,0 +1,94 @@ +from enum import Enum + +from openai import OpenAI +from pydantic import BaseModel + +client = OpenAI( + base_url="http://localhost:8000/v1", + api_key="-", +) + +# Guided decoding by Choice (list of possible options) +completion = client.chat.completions.create( + model="Qwen/Qwen2.5-3B-Instruct", + messages=[{ + "role": "user", + "content": "Classify this sentiment: vLLM is wonderful!" + }], + extra_body={"guided_choice": ["positive", "negative"]}, +) +print(completion.choices[0].message.content) + +# Guided decoding by Regex +prompt = ("Generate an email address for Alan Turing, who works in Enigma." + "End in .com and new line. Example result:" + "alan.turing@enigma.com\n") + +completion = client.chat.completions.create( + model="Qwen/Qwen2.5-3B-Instruct", + messages=[{ + "role": "user", + "content": prompt, + }], + extra_body={ + "guided_regex": "\w+@\w+\.com\n", + "stop": ["\n"] + }, +) +print(completion.choices[0].message.content) + + +# Guided decoding by JSON using Pydantic schema +class CarType(str, Enum): + sedan = "sedan" + suv = "SUV" + truck = "Truck" + coupe = "Coupe" + + +class CarDescription(BaseModel): + brand: str + model: str + car_type: CarType + + +json_schema = CarDescription.model_json_schema() + +prompt = ("Generate a JSON with the brand, model and car_type of" + "the most iconic car from the 90's") +completion = client.chat.completions.create( + model="Qwen/Qwen2.5-3B-Instruct", + messages=[{ + "role": "user", + "content": prompt, + }], + extra_body={"guided_json": json_schema}, +) +print(completion.choices[0].message.content) + +# Guided decoding by Grammar +simplified_sql_grammar = """ + ?start: select_statement + + ?select_statement: "SELECT " column_list " FROM " table_name + + ?column_list: column_name ("," column_name)* + + ?table_name: identifier + + ?column_name: identifier + + ?identifier: /[a-zA-Z_][a-zA-Z0-9_]*/ +""" + +prompt = ("Generate an SQL query to show the 'username' and 'email'" + "from the 'users' table.") +completion = client.chat.completions.create( + model="Qwen/Qwen2.5-3B-Instruct", + messages=[{ + "role": "user", + "content": prompt, + }], + extra_body={"guided_grammar": simplified_sql_grammar}, +) +print(completion.choices[0].message.content) From 4f686d139f6acb31ea31eaf57ed1bb3920a77682 Mon Sep 17 00:00:00 2001 From: Andrew Nesbitt Date: Mon, 18 Nov 2024 17:52:42 +0000 Subject: [PATCH 017/122] Fix open_collective value in FUNDING.yml (#10426) Signed-off-by: Andrew Nesbitt --- .github/FUNDING.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 71f4e520135d4..d1f6105a47166 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,2 +1,2 @@ github: [vllm-project] -open_collective: [vllm] +open_collective: vllm From 281cc4b3cd2f6c84c2cd8272ef83d97edd1c323a Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Mon, 18 Nov 2024 13:04:14 -0500 Subject: [PATCH 018/122] [Model][Bugfix] Support TP for PixtralHF ViT (#10405) Signed-off-by: mgoin --- vllm/model_executor/models/pixtral.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index d44a538d56b8c..f7f46770057e2 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -17,6 +17,7 @@ from vllm.attention import AttentionMetadata from vllm.config import ModelConfig, VllmConfig +from vllm.distributed import divide, get_tensor_model_parallel_world_size from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, InputContext, token_inputs) from vllm.model_executor.layers.activation import get_act_and_mul_fn @@ -843,17 +844,20 @@ def __init__( self.config = config assert not config.hidden_size % config.num_attention_heads - self.n_heads = config.num_attention_heads + self.total_num_heads = config.num_attention_heads + tp_size = get_tensor_model_parallel_world_size() + self.n_heads = divide(config.num_attention_heads, tp_size) self.head_dim = config.hidden_size // config.num_attention_heads self.qkv_proj = QKVParallelLinear( hidden_size=config.hidden_size, head_size=self.head_dim, - total_num_heads=self.n_heads, + total_num_heads=self.total_num_heads, bias=False, quant_config=quant_config, prefix=f"{prefix}.qkv_proj", ) + assert self.total_num_heads * self.head_dim == config.hidden_size self.o_proj = RowParallelLinear( input_size=config.hidden_size, output_size=config.hidden_size, From 6b2d25efc78f21867ca37e3f707c5a94f906478f Mon Sep 17 00:00:00 2001 From: Yan Ma Date: Tue, 19 Nov 2024 02:18:05 +0800 Subject: [PATCH 019/122] [Hardware][XPU] AWQ/GPTQ support for xpu backend (#10107) Signed-off-by: yan ma --- .../quantization/supported_hardware.rst | 8 +- tests/quantization/test_ipex_quant.py | 10 +- vllm/model_executor/layers/linear.py | 2 +- .../layers/quantization/gptq.py | 1 - .../layers/quantization/gptq_marlin.py | 4 + .../layers/quantization/ipex_quant.py | 169 +++++++++++++----- vllm/model_executor/model_loader/loader.py | 4 +- 7 files changed, 146 insertions(+), 52 deletions(-) diff --git a/docs/source/quantization/supported_hardware.rst b/docs/source/quantization/supported_hardware.rst index 9bf0cdb80376d..09f8e7112cf0c 100644 --- a/docs/source/quantization/supported_hardware.rst +++ b/docs/source/quantization/supported_hardware.rst @@ -27,7 +27,7 @@ The table below shows the compatibility of various quantization implementations - ✅︎ - ✅︎ - ✗ - - ✗ + - ✅︎ - ✅︎ - ✗ - ✗ @@ -38,8 +38,8 @@ The table below shows the compatibility of various quantization implementations - ✅︎ - ✅︎ - ✗ - - ✗ - - ✗ + - ✅︎ + - ✅︎ - ✗ - ✗ * - Marlin (GPTQ/AWQ/FP8) @@ -129,4 +129,4 @@ Notes: Please note that this compatibility chart may be subject to change as vLLM continues to evolve and expand its support for different hardware platforms and quantization methods. -For the most up-to-date information on hardware support and quantization methods, please check the `quantization directory `_ or consult with the vLLM development team. \ No newline at end of file +For the most up-to-date information on hardware support and quantization methods, please check the `quantization directory `_ or consult with the vLLM development team. diff --git a/tests/quantization/test_ipex_quant.py b/tests/quantization/test_ipex_quant.py index d541efcefcac3..68a73f0f8ab48 100644 --- a/tests/quantization/test_ipex_quant.py +++ b/tests/quantization/test_ipex_quant.py @@ -1,5 +1,5 @@ """Test model set-up and inference for quantized HF models supported - on the CPU backend using IPEX (including AWQ). + on the CPU/GPU backend using IPEX (including AWQ/GPTQ). Validating the configuration and printing results for manual checking. @@ -11,13 +11,15 @@ from vllm.platforms import current_platform MODELS = [ - "casperhansen/llama-3-8b-instruct-awq", + "AMead10/Llama-3.2-1B-Instruct-AWQ", + "shuyuej/Llama-3.2-1B-Instruct-GPTQ", # with g_idx ] DTYPE = ["bfloat16"] -@pytest.mark.skipif(not current_platform.is_cpu(), - reason="only supports the CPU backend.") +@pytest.mark.skipif(not current_platform.is_cpu() + and not current_platform.is_xpu(), + reason="only supports Intel CPU/XPU backend.") @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("dtype", DTYPE) def test_ipex_quant(vllm_runner, model, dtype): diff --git a/vllm/model_executor/layers/linear.py b/vllm/model_executor/layers/linear.py index 94f30412e43b3..e1f8a6e36d781 100644 --- a/vllm/model_executor/layers/linear.py +++ b/vllm/model_executor/layers/linear.py @@ -27,7 +27,7 @@ "AWQLinearMethod", "GPTQMarlinLinearMethod", "Fp8LinearMethod", "MarlinLinearMethod", "QQQLinearMethod", "GPTQMarlin24LinearMethod", "TPUInt8LinearMethod", "GPTQLinearMethod", "FBGEMMFp8LinearMethod", - "ModelOptFp8LinearMethod", "IPEXAWQLinearMethod" + "ModelOptFp8LinearMethod", "IPEXAWQLinearMethod", "IPEXGPTQLinearMethod" ] diff --git a/vllm/model_executor/layers/quantization/gptq.py b/vllm/model_executor/layers/quantization/gptq.py index 0aa605e62454e..abafad0f1047e 100644 --- a/vllm/model_executor/layers/quantization/gptq.py +++ b/vllm/model_executor/layers/quantization/gptq.py @@ -210,7 +210,6 @@ def create_weights( def process_weights_after_loading(self, layer: torch.nn.Module) -> None: # for torch.compile - layer.qweight = Parameter(layer.qweight.data, requires_grad=False) layer.qzeros = Parameter(layer.qzeros.data, requires_grad=False) layer.qweight = Parameter(layer.qweight.data, requires_grad=False) layer.g_idx = Parameter(layer.g_idx.data, requires_grad=False) diff --git a/vllm/model_executor/layers/quantization/gptq_marlin.py b/vllm/model_executor/layers/quantization/gptq_marlin.py index 1f72e3afbbce5..a3e58bf1b2a4c 100644 --- a/vllm/model_executor/layers/quantization/gptq_marlin.py +++ b/vllm/model_executor/layers/quantization/gptq_marlin.py @@ -23,6 +23,7 @@ PackedColumnParameter, PackedvLLMParameter, RowvLLMParameter) +from vllm.platforms import current_platform from vllm.scalar_type import scalar_types logger = init_logger(__name__) @@ -134,6 +135,9 @@ def is_gptq_marlin_compatible(cls, quant_config: Dict[str, Any]): sym = quant_config.get("sym") desc_act = quant_config.get("desc_act") + if not current_platform.is_cuda(): + return False + if quant_method != "gptq": return False diff --git a/vllm/model_executor/layers/quantization/ipex_quant.py b/vllm/model_executor/layers/quantization/ipex_quant.py index 330c2ad195d78..c16a962134d06 100644 --- a/vllm/model_executor/layers/quantization/ipex_quant.py +++ b/vllm/model_executor/layers/quantization/ipex_quant.py @@ -2,21 +2,26 @@ import torch -from vllm.model_executor.layers.linear import LinearBase, LinearMethodBase -from vllm.model_executor.layers.quantization.awq import AWQLinearMethod +from vllm.model_executor.layers.linear import (LinearBase, LinearMethodBase, + UnquantizedLinearMethod) +from vllm.model_executor.layers.quantization.awq import (AWQLinearMethod, + is_layer_skipped_awq) from vllm.model_executor.layers.quantization.base_config import ( QuantizationConfig) +from vllm.model_executor.layers.quantization.gptq import GPTQLinearMethod from vllm.platforms import current_platform +MIN_IPEX_VERSION = "2.5.0" + class IPEXConfig(QuantizationConfig): - """INT8 quantization config class using IPEX for the CPU backend, - including AWQ. + """INT8 quantization config class using IPEX for the CPU/XPU backend, + including AWQ, GPTQ. """ IPEX_QUANT_METHOD_MAP = { "awq": 1, - "gptq": 2, + "gptq": 0, } def __init__( @@ -24,29 +29,30 @@ def __init__( method: str, weight_bits: int, group_size: int, + modules_to_not_convert: Optional[List[str]] = None, + desc_act: Optional[bool] = None, + lm_head_quantized: Optional[bool] = None, ) -> None: self.method = method self.weight_bits = weight_bits self.group_size = group_size + self.modules_to_not_convert = modules_to_not_convert or [] + self.desc_act = desc_act + self.lm_head_quantized = lm_head_quantized self.pack_factor = 32 // self.weight_bits if self.weight_bits not in [4]: raise ValueError(f"IPEX quantization supports weight bits [4], " f"but got {self.weight_bits}.") - if self.method == "awq": - self.quant_method = IPEXAWQLinearMethod - else: - raise ValueError(f"IPEX quantization supports [awq], " + if self.method not in ["awq", "gptq"]: + raise ValueError(f"IPEX quantization supports [awq, gptq], " f"but got {self.method}.") def __repr__(self) -> str: - return (f"IPEXConfig(method={self.method}" + return (f"IPEXConfig(method={self.method}," f"weight_bits={self.weight_bits}, " - f"group_size={self.group_size}") - - def get_ipex_quant_method_id(self) -> int: - return IPEXConfig.IPEX_QUANT_METHOD_MAP[self.method] + f"group_size={self.group_size})") @classmethod def get_name(cls) -> str: @@ -70,19 +76,32 @@ def get_config_filenames() -> List[str]: @classmethod def from_config(cls, config: Dict[str, Any]) -> "IPEXConfig": method = cls.get_from_keys(config, ["quant_method"]).lower() - weight_bits = cls.get_from_keys(config, ["w_bit", "bits"]) - group_size = cls.get_from_keys(config, ["q_group_size", "group_size"]) - return cls(method, weight_bits, group_size) + if method == "awq": + weight_bits = cls.get_from_keys(config, ["w_bit", "bits"]) + group_size = cls.get_from_keys(config, + ["q_group_size", "group_size"]) + modules_to_not_convert = cls.get_from_keys_or( + config, ["modules_to_not_convert"], None) + return cls(method, weight_bits, group_size, modules_to_not_convert, + False, False) + # otherwise for gptq + weight_bits = cls.get_from_keys(config, ["bits"]) + group_size = cls.get_from_keys(config, ["group_size"]) + lm_head_quantized = cls.get_from_keys_or(config, ["lm_head"], + default=False) + desc_act = cls.get_from_keys_or(config, ["desc_act"], default=False) + return cls(method, weight_bits, group_size, [], desc_act, + lm_head_quantized) @classmethod def override_quantization_method(cls, hf_quant_cfg, user_quant) -> Optional[str]: - if not current_platform.is_cpu(): + if not current_platform.is_cpu() and not current_platform.is_xpu(): return None quant_method = hf_quant_cfg.get("quant_method", "").lower() - if quant_method in ["awq"]: + if quant_method in ["awq", "gptq"]: return cls.get_name() return None @@ -90,12 +109,81 @@ def override_quantization_method(cls, hf_quant_cfg, def get_quant_method(self, layer: torch.nn.Module, prefix: str) -> Optional["LinearMethodBase"]: if isinstance(layer, LinearBase): - return self.quant_method(self) + if self.method == "awq": + if is_layer_skipped_awq(prefix, self.modules_to_not_convert): + return UnquantizedLinearMethod() + return IPEXAWQLinearMethod(self) + if self.method == "gptq": + return IPEXGPTQLinearMethod(self) return None +class IPEXGPTQLinearMethod(GPTQLinearMethod): + """GPTQ linear method using IPEX for the CPU/XPU backend. + """ + + def __init__(self, quant_config: IPEXConfig): + self.quant_config = quant_config # type: ignore + + def process_weights_after_loading(self, layer: torch.nn.Module) -> None: + bias = layer.bias if not layer.skip_bias_add else None + + try: + import intel_extension_for_pytorch as ipex + if ipex.__version__ < MIN_IPEX_VERSION: + raise ImportError( + "intel_extension_for_pytorch version is " + "wrong. Please install " + f"intel_extension_for_pytorch>={MIN_IPEX_VERSION}.") + except ImportError as err: + raise ImportError( + "Please install " + f"intel_extension_for_pytorch>={MIN_IPEX_VERSION} via " + f"`pip install intel_extension_for_pytorch>={MIN_IPEX_VERSION}`" + " to use IPEX-AWQ linear method.") from err + # Using the compute dtype (lowp_mode) as INT8 to leverage instructions + # with better performance. + lowp_mode = ipex.quantization.WoqLowpMode.INT8 + # The weight will be de-packed from INT4 to INT8. + weight_dtype = ipex.quantization.WoqWeightDtype.INT4 + # The float activation will be quantized (dynamic, per-token) to INT8. + act_quant_mode = ipex.quantization.WoqActQuantMode.PER_BATCH_IC_BLOCK + + qconfig = ipex.quantization.get_weight_only_quant_qconfig_mapping( + weight_dtype=weight_dtype, + lowp_mode=lowp_mode, + act_quant_mode=act_quant_mode, + group_size=self.quant_config.group_size, + ) + layer.ipex_output_size = layer.qweight.shape[-1] + g_idx = layer.g_idx if self.quant_config.desc_act else None + layer.ipex_qlinear = ipex.llm.quantization.woq_linear. \ + IPEXWeightOnlyQuantizedLinear.from_weight( + layer.qweight, + layer.scales, + layer.qzeros, + layer.qweight.size(0), + layer.ipex_output_size, + qconfig=qconfig, + g_idx=g_idx, + bias=bias, + group_size=self.quant_config.group_size, + quant_method=IPEXConfig.IPEX_QUANT_METHOD_MAP["gptq"] + ) + + def apply(self, + layer: torch.nn.Module, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: + reshaped_x = x.reshape(-1, x.shape[-1]) + out = layer.ipex_qlinear(reshaped_x) + if bias is not None: + out.add_(bias) + return out.reshape(x.shape[:-1] + (layer.ipex_output_size, )) + + class IPEXAWQLinearMethod(AWQLinearMethod): - """AWQ linear method using IPEX for the CPU backend. + """AWQ linear method using IPEX for the CPU/XPU backend. """ def __init__(self, quant_config: IPEXConfig): @@ -108,15 +196,16 @@ def process_weights_after_loading(self, layer: torch.nn.Module) -> None: try: import intel_extension_for_pytorch as ipex - if ipex.__version__ < "2.4.0": - raise ImportError("intel_extension_for_pytorch version is " - "wrong. Please install " - "intel_extension_for_pytorch>=2.4.0.") + if ipex.__version__ < MIN_IPEX_VERSION: + raise ImportError( + "intel_extension_for_pytorch version is " + "wrong. Please install " + f"intel_extension_for_pytorch>={MIN_IPEX_VERSION}.") except ImportError as err: raise ImportError( "Please install " - "intel_extension_for_pytorch>=2.4.0 via " - "`pip install intel_extension_for_pytorch>=2.4.0`" + f"intel_extension_for_pytorch>={MIN_IPEX_VERSION} via " + f"`pip install intel_extension_for_pytorch>={MIN_IPEX_VERSION}`" " to use IPEX-AWQ linear method.") from err # Using the compute dtype (lowp_mode) as INT8 to leverage instructions @@ -136,19 +225,18 @@ def process_weights_after_loading(self, layer: torch.nn.Module) -> None: layer.ipex_output_size = layer.qweight.size( 1) * self.quant_config.pack_factor - layer.ipex_qlinear = ipex.nn.modules.weight_only_quantization.\ - WeightOnlyQuantizedLinear.from_weight( - layer.qweight, - layer.scales, - layer.qzeros, - layer.qweight.size(0), - layer.ipex_output_size, - qconfig=qconfig, - bias=bias, - group_size=self.quant_config.group_size, - quant_method= - self.quant_config.get_ipex_quant_method_id() # type: ignore - ) + layer.ipex_qlinear = ipex.llm.quantization.woq_linear. \ + IPEXWeightOnlyQuantizedLinear.from_weight( + layer.qweight, + layer.scales, + layer.qzeros, + layer.qweight.size(0), + layer.ipex_output_size, + qconfig=qconfig, + bias=bias, + group_size=self.quant_config.group_size, + quant_method=IPEXConfig.IPEX_QUANT_METHOD_MAP["awq"] # type: ignore + ) def apply(self, layer: torch.nn.Module, @@ -156,5 +244,4 @@ def apply(self, bias: Optional[torch.Tensor] = None) -> torch.Tensor: reshaped_x = x.reshape(-1, x.shape[-1]) out = layer.ipex_qlinear(reshaped_x) - return out.reshape(x.shape[:-1] + (layer.ipex_output_size, )) diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index d9ce85949e4ee..b41c23704b7ff 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -29,6 +29,8 @@ from vllm.logger import init_logger from vllm.model_executor.layers.linear import (ReplicatedLinear, RowParallelLinear) +from vllm.model_executor.layers.quantization.base_config import ( + QuantizeMethodBase) from vllm.model_executor.model_loader.tensorizer import ( TensorizerConfig, is_vllm_tensorized, load_with_tensorizer, serialize_vllm_model, tensorizer_weights_iterator) @@ -348,7 +350,7 @@ def load_model(self, vllm_config: VllmConfig) -> nn.Module: for _, module in model.named_modules(): quant_method = getattr(module, "quant_method", None) - if quant_method is not None: + if isinstance(quant_method, QuantizeMethodBase): # When quant methods need to process weights after loading # (for repacking, quantizing, etc), they expect parameters # to be on the global target device. This scope is for the From c2170a5b395acb9f5f4ce8425c3be18aacb67513 Mon Sep 17 00:00:00 2001 From: Angus Wang Date: Mon, 18 Nov 2024 11:39:40 -0800 Subject: [PATCH 020/122] [Kernel] Explicitly specify other value in tl.load calls (#9014) Signed-off-by: Angus Wang --- .../blocksparse_attention_kernel.py | 13 ++++++++++--- vllm/lora/ops/bgmv_expand.py | 4 +++- vllm/lora/ops/bgmv_expand_slice.py | 8 +++++++- vllm/lora/ops/sgmv_expand.py | 5 ++++- vllm/lora/ops/sgmv_expand_slice.py | 5 ++++- .../layers/quantization/awq_triton.py | 14 +++++++------- 6 files changed, 35 insertions(+), 14 deletions(-) diff --git a/vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py b/vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py index ec1c37c5bcb0e..727a470ba6d0e 100644 --- a/vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +++ b/vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py @@ -157,19 +157,22 @@ def _fwd_kernel_inner( k = tl.load( k_ptrs + start_n * stride_kt, mask=offs_n[None, :] + start_n < k_seqlen, + other=0.0, ) else: k = tl.load( k_ptrs + start_n * stride_kt, mask=(offs_n[None, :] + start_n < k_seqlen) & (offs_d[:, None] < D_HEAD), + other=0.0, ) else: if EVEN_D: k = tl.load(k_ptrs + start_n * stride_kt) else: k = tl.load(k_ptrs + start_n * stride_kt, - mask=offs_d[:, None] < D_HEAD) + mask=offs_d[:, None] < D_HEAD, + other=0.0) qk = tl.zeros([BLOCK_M_LOADING, BLOCK_N], dtype=tl.float32) qk += tl.dot(q, k) @@ -200,19 +203,22 @@ def _fwd_kernel_inner( v = tl.load( v_ptrs + start_n * stride_vt, mask=offs_n[:, None] + start_n < k_seqlen, + other=0.0, ) else: v = tl.load( v_ptrs + start_n * stride_vt, mask=(offs_n[:, None] + start_n < k_seqlen) & (offs_d[None, :] < D_HEAD), + other=0.0, ) else: if EVEN_D: v = tl.load(v_ptrs + start_n * stride_vt) else: v = tl.load(v_ptrs + start_n * stride_vt, - mask=offs_d[None, :] < D_HEAD) + mask=offs_d[None, :] < D_HEAD, + other=0.0) acc += tl.dot(p, v) @@ -318,12 +324,13 @@ def _fwd_kernel_batch_inference( q = tl.load( Q + offs_m[:, None] * stride_qt + offs_d[None, :] * stride_qd, mask=offs_m[:, None] < q_seqlen, + other=0.0, ) else: q = tl.load( Q + offs_m[:, None] * stride_qt + offs_d[None, :] * stride_qd, mask=(offs_m[:, None] < q_seqlen) & (offs_d[None, :] < D_HEAD), - other=0, + other=0.0, ) sparse_crow_ptr = (layout_crow_ptr + off_h * layout_crow_stride_h + diff --git a/vllm/lora/ops/bgmv_expand.py b/vllm/lora/ops/bgmv_expand.py index 6a32387a6f36c..f176259fddc78 100644 --- a/vllm/lora/ops/bgmv_expand.py +++ b/vllm/lora/ops/bgmv_expand.py @@ -75,7 +75,9 @@ def _bgmv_expand_kernel( other=0.0, ) # [BLOCK_N,BLOCK_K] if ADD_INPUTS: - tiled_out = tl.load(c_ptr + current_n * cn_stride, mask=c_mask) + tiled_out = tl.load(c_ptr + current_n * cn_stride, + mask=c_mask, + other=0.0) accumulator = tl.sum(tiled_a * tiled_b, 1) + tiled_out else: accumulator = tl.sum(tiled_a * tiled_b, 1) diff --git a/vllm/lora/ops/bgmv_expand_slice.py b/vllm/lora/ops/bgmv_expand_slice.py index 73628fd20d327..2c6ed96c253f0 100644 --- a/vllm/lora/ops/bgmv_expand_slice.py +++ b/vllm/lora/ops/bgmv_expand_slice.py @@ -78,7 +78,13 @@ def _bgmv_expand_slice_kernel( ) # [BLOCK_N,BLOCK_K] if ADD_INPUTS: - tiled_out = tl.load(c_ptr + current_n * cn_stride, mask=c_mask) + # explicitly pass in other=None to tell triton that masked values + # can be uninitialized. This is OK because the later tl.store + # operation uses the same mask, eliminating the risk of garbage + # values propagating + tiled_out = tl.load(c_ptr + current_n * cn_stride, + mask=c_mask, + other=None) accumulator = tl.sum(tiled_a * tiled_b, 1) + tiled_out else: accumulator = tl.sum(tiled_a * tiled_b, 1) diff --git a/vllm/lora/ops/sgmv_expand.py b/vllm/lora/ops/sgmv_expand.py index 4910cb4061298..ee2cd2e05e2ee 100644 --- a/vllm/lora/ops/sgmv_expand.py +++ b/vllm/lora/ops/sgmv_expand.py @@ -88,7 +88,10 @@ def _sgmv_expand_kernel( c_mask = (offset_cm[:, None] < (cur_seq_start + M)) & (offset_cn[None, :] < N) if ADD_INPUTS: - tiled_out = tl.load(c_ptr, mask=c_mask) + # explicitly pass in other=None to tell triton that masked values + # can be uninitialized. This is OK because the later tl.store operation + # uses the same mask, eliminating the risk of garbage values propagating + tiled_out = tl.load(c_ptr, mask=c_mask, other=None) tiled_c += tiled_out tl.store(c_ptr, tiled_c, mask=c_mask) diff --git a/vllm/lora/ops/sgmv_expand_slice.py b/vllm/lora/ops/sgmv_expand_slice.py index 844f5cec39e93..5244fa14913a4 100644 --- a/vllm/lora/ops/sgmv_expand_slice.py +++ b/vllm/lora/ops/sgmv_expand_slice.py @@ -94,7 +94,10 @@ def _sgmv_expand_slice_kernel( c_mask = (offset_cm[:, None] < (cur_seq_start + M)) & (offset_cn[None, :] < (slice_offset + N)) if ADD_INPUTS: - tiled_out = tl.load(c_ptr, mask=c_mask) + # explicitly pass in other=None to tell triton that masked values + # can be uninitialized. This is OK because the later tl.store operation + # uses the same mask, eliminating the risk of garbage values propagating + tiled_out = tl.load(c_ptr, mask=c_mask, other=None) tiled_c += tiled_out tl.store(c_ptr, tiled_c, mask=c_mask) diff --git a/vllm/model_executor/layers/quantization/awq_triton.py b/vllm/model_executor/layers/quantization/awq_triton.py index bbb7fc8ad5087..ace8f4a348812 100644 --- a/vllm/model_executor/layers/quantization/awq_triton.py +++ b/vllm/model_executor/layers/quantization/awq_triton.py @@ -42,7 +42,7 @@ def awq_dequantize_kernel( result_masks = result_masks_y[:, None] & result_masks_x[None, :] # Load the weights. - iweights = tl.load(qweight_ptr + offsets, masks) + iweights = tl.load(qweight_ptr + offsets, masks, 0.0) iweights = tl.interleave(iweights, iweights) iweights = tl.interleave(iweights, iweights) iweights = tl.interleave(iweights, iweights) @@ -71,7 +71,7 @@ def awq_dequantize_kernel( zero_masks = zero_masks_y[:, None] & zero_masks_x[None, :] # Load the zeros. - zeros = tl.load(zeros_ptr + zero_offsets, zero_masks) + zeros = tl.load(zeros_ptr + zero_offsets, zero_masks, 0.0) zeros = tl.interleave(zeros, zeros) zeros = tl.interleave(zeros, zeros) zeros = tl.interleave(zeros, zeros) @@ -91,7 +91,7 @@ def awq_dequantize_kernel( scale_masks = scale_masks_y[:, None] & scale_masks_x[None, :] # Load the scales. - scales = tl.load(scales_ptr + scale_offsets, scale_masks) + scales = tl.load(scales_ptr + scale_offsets, scale_masks, 0.0) scales = tl.broadcast_to(scales, (BLOCK_SIZE_Y, BLOCK_SIZE_X * 8)) # Dequantize. @@ -165,10 +165,10 @@ def awq_gemm_kernel(a_ptr, b_ptr, c_ptr, zeros_ptr, scales_ptr, M, N, K, for k in range(0, tl.cdiv(K, BLOCK_SIZE_K * SPLIT_K)): masks_k = offsets_k < K masks_a = masks_am[:, None] & masks_k[None, :] - a = tl.load(a_ptrs, mask=masks_a) + a = tl.load(a_ptrs, mask=masks_a, other=0.0) masks_b = masks_k[:, None] & masks_bn[None, :] - b = tl.load(b_ptrs, mask=masks_b) + b = tl.load(b_ptrs, mask=masks_b, other=0.0) b = tl.interleave(b, b) b = tl.interleave(b, b) b = tl.interleave(b, b) @@ -181,7 +181,7 @@ def awq_gemm_kernel(a_ptr, b_ptr, c_ptr, zeros_ptr, scales_ptr, M, N, K, masks_zk = offsets_szk < K // group_size masks_z = masks_zk[:, None] & masks_zn[None, :] zeros_ptrs = zeros_ptr + offsets_z - zeros = tl.load(zeros_ptrs, mask=masks_z) + zeros = tl.load(zeros_ptrs, mask=masks_z, other=0.0) zeros = tl.interleave(zeros, zeros) zeros = tl.interleave(zeros, zeros) zeros = tl.interleave(zeros, zeros) @@ -191,7 +191,7 @@ def awq_gemm_kernel(a_ptr, b_ptr, c_ptr, zeros_ptr, scales_ptr, M, N, K, masks_sk = offsets_szk < K // group_size masks_s = masks_sk[:, None] & masks_sn[None, :] scales_ptrs = scales_ptr + offsets_s - scales = tl.load(scales_ptrs, mask=masks_s) + scales = tl.load(scales_ptrs, mask=masks_s, other=0.0) scales = tl.broadcast_to(scales, (BLOCK_SIZE_K, BLOCK_SIZE_N)) b = (b >> shifts) & 0xF From 96d999fbe8d610fa4c5b7cad6bb0d0158d1d5b8b Mon Sep 17 00:00:00 2001 From: Lucas Wilkinson Date: Mon, 18 Nov 2024 14:59:29 -0500 Subject: [PATCH 021/122] [Kernel] Initial Machete W4A8 support + Refactors (#9855) Signed-off-by: Lucas Wilkinson --- benchmarks/kernels/benchmark_machete.py | 519 +++++++++---- benchmarks/kernels/graph_machete_bench.py | 5 +- benchmarks/kernels/weight_shapes.py | 6 + csrc/cutlass_extensions/cute_utils.cuh | 4 +- .../epilogue}/broadcast_load_epilogue_c2x.hpp | 1 + .../epilogue}/broadcast_load_epilogue_c3x.hpp | 0 .../epilogue/scaled_mm_epilogues_c2x.hpp | 317 ++++++++ .../epilogue/scaled_mm_epilogues_c3x.hpp | 315 ++++++++ .../vllm_cutlass_library_extension.py | 29 + .../vllm_numeric_conversion.cuh | 239 +++++- csrc/cutlass_extensions/vllm_type_utils.cuh | 42 + .../cutlass_w8a8/scaled_mm_c2x.cu | 53 +- .../cutlass_w8a8/scaled_mm_c2x.cuh | 302 -------- .../cutlass_w8a8/scaled_mm_c3x.cu | 312 +------- csrc/quantization/machete/generate.py | 732 ++++++++++-------- .../quantization/machete/machete_mainloop.cuh | 25 +- .../machete/machete_mm_kernel.cuh | 206 +++-- .../machete/machete_mm_launcher.cuh | 90 +-- .../machete/machete_prepack_kernel.cuh | 63 +- .../machete/machete_prepack_launcher.cuh | 15 +- .../machete/machete_prepacked_layout.cuh | 54 +- csrc/quantization/machete/machete_pytorch.cu | 120 ++- csrc/torch_bindings.cpp | 35 +- tests/kernels/test_machete_gemm.py | 284 ------- tests/kernels/test_machete_mm.py | 406 ++++++++++ vllm/_custom_ops.py | 75 +- .../layers/quantization/kernels/machete.py | 16 +- .../layers/quantization/utils/quant_utils.py | 45 +- 28 files changed, 2616 insertions(+), 1694 deletions(-) rename csrc/{quantization/cutlass_w8a8 => cutlass_extensions/epilogue}/broadcast_load_epilogue_c2x.hpp (99%) rename csrc/{quantization/cutlass_w8a8 => cutlass_extensions/epilogue}/broadcast_load_epilogue_c3x.hpp (100%) create mode 100644 csrc/cutlass_extensions/epilogue/scaled_mm_epilogues_c2x.hpp create mode 100644 csrc/cutlass_extensions/epilogue/scaled_mm_epilogues_c3x.hpp create mode 100644 csrc/cutlass_extensions/vllm_type_utils.cuh delete mode 100644 tests/kernels/test_machete_gemm.py create mode 100644 tests/kernels/test_machete_mm.py diff --git a/benchmarks/kernels/benchmark_machete.py b/benchmarks/kernels/benchmark_machete.py index 665b50bf18cf0..a0342d08f1db8 100644 --- a/benchmarks/kernels/benchmark_machete.py +++ b/benchmarks/kernels/benchmark_machete.py @@ -2,8 +2,10 @@ import copy import itertools import math +import os import pickle as pkl import time +from dataclasses import dataclass from itertools import product from typing import Callable, Iterable, List, Optional, Tuple @@ -15,11 +17,12 @@ from vllm import _custom_ops as ops from vllm.model_executor.layers.quantization.utils.marlin_utils import ( - GPTQ_MARLIN_MAX_PARALLEL, GPTQ_MARLIN_MIN_THREAD_N, marlin_permute_scales) + GPTQ_MARLIN_MAX_PARALLEL, GPTQ_MARLIN_MIN_THREAD_N, marlin_permute_scales, + marlin_zero_points) from vllm.model_executor.layers.quantization.utils.marlin_utils_test import ( MarlinWorkspace) from vllm.model_executor.layers.quantization.utils.quant_utils import ( - gptq_pack, pack_rows, quantize_weights) + pack_rows, quantize_weights) from vllm.scalar_type import ScalarType, scalar_types from vllm.utils import FlexibleArgumentParser @@ -27,149 +30,349 @@ DEFAULT_BATCH_SIZES = [1, 16, 32, 64, 128, 256, 512, 1024] DEFAULT_TP_SIZES = [1] +NVTX_PROFILE = os.environ.get("NVTX_PROFILE", False) + +if NVTX_PROFILE: + import nvtx + + +def terse_type_name(dt): + return { + torch.bfloat16: "bf16", + torch.float16: "fp16", + torch.int8: "int8", + torch.float8_e4m3fn: "fp8", + torch.bfloat16: "bf16", + torch.float: "float", + torch.int: "int", + }[dt] + + +@dataclass +class BenchmarkTensors: + w_ref: torch.Tensor + a: torch.Tensor + + w_q: torch.Tensor + group_size: Optional[int] + wtype: ScalarType + w_g_s: torch.Tensor + w_g_zp: Optional[torch.Tensor] + w_ch_s: Optional[torch.Tensor] + w_tok_s: Optional[torch.Tensor] + + +@dataclass +class TypeConfig: + act_type: torch.dtype + weight_type: ScalarType + output_type: Optional[torch.dtype] + group_scale_type: Optional[torch.dtype] + group_zero_type: Optional[torch.dtype] + channel_scale_type: Optional[torch.dtype] + token_scale_type: Optional[torch.dtype] + + +def rand_data(shape, dtype=torch.float16, scale=1): + if dtype.is_floating_point: + return (scale * torch.rand(shape, device="cuda") - 0.3).to(dtype) + else: + return torch.randint(-15, 15, shape, dtype=dtype, device="cuda") + + +def quantize_and_pack(atype: torch.dtype, + w: torch.Tensor, + wtype: ScalarType, + stype: Optional[torch.dtype], + group_size: Optional[int], + zero_points: bool = False): + assert wtype.is_integer(), "TODO: support floating point weights" + + w_ref, w_q, w_s, w_zp = quantize_weights( + w, + wtype, + group_size=group_size, + zero_points=zero_points, + # to match how the kernel applies zps + ref_zero_points_after_scales=True) -def machete_pack_weights(w_q: torch.tensor, wtype: ScalarType) -> torch.tensor: w_q = pack_rows(w_q, wtype.size_bits, *w_q.shape) - w_q = w_q.t().contiguous().t() # make col major - return ops.machete_prepack_B(w_q, wtype) + return w_ref, w_q, w_s, w_zp -def make_bench_tensors( - atype: torch.dtype, wtype: ScalarType, group_size: int, m: int, n: int, - k: int -) -> Tuple[torch.tensor, List[Tuple[torch.tensor, torch.tensor, torch.tensor, - torch.tensor]]]: - assert wtype.is_integer(), "TODO: support floating point weights" +def create_bench_tensors(shape: Tuple[int, int, int], types: TypeConfig, + group_size: Optional[int]) -> List[BenchmarkTensors]: + m, n, k = shape # we want to make sure that weights don't fit into L2 cache between runs so # we construct enough weights to exceed L2 cache, which is 50mb on a H100 # so we target total weight size > 2*50mb - num_weights = math.ceil(2 * 50 * 1024**2 * 8 / (k * n * wtype.size_bits)) - - a = torch.randn((m, k), device="cuda", dtype=atype) * 5 - weights = [ - torch.randn((k, n), device="cuda", dtype=atype) - for _ in range(num_weights) - ] - quanitized_weights = [ - quantize_weights(w, wtype, group_size) for w in weights - ] - - return a, quanitized_weights + num_weights = math.ceil(2 * 50 * 1024**2 * 8 / + (k * n * types.weight_type.size_bits)) + + a = rand_data((m, k), types.act_type, scale=5) + + benchmark_tensors: List[BenchmarkTensors] = [] + for _ in range(num_weights): + w = rand_data((k, n), types.act_type, scale=5) + + if types.group_scale_type is not None: + w = w.to(types.group_scale_type) + if w.dtype.itemsize == 1: + w = w.to(torch.float16) + + w_ref, w_q_packed, w_s, w_zp = quantize_and_pack( + a.dtype, w, types.weight_type, types.group_scale_type, group_size, + types.group_zero_type is not None) + + if not a.dtype.is_floating_point: + aiinfo = torch.iinfo(a.dtype) + w_ref = w_ref.round().clamp(aiinfo.min, aiinfo.max) + + w_ref = w_ref.to(torch.float32) + + w_ch_s = None if types.channel_scale_type is None else\ + rand_data((n,), types.channel_scale_type) + w_tok_s = None if types.token_scale_type is None else\ + rand_data((m,), types.token_scale_type) + + benchmark_tensors.append( + BenchmarkTensors(w_ref=w_ref, + a=a, + w_q=w_q_packed, + wtype=types.weight_type, + w_g_s=w_s, + w_g_zp=w_zp, + group_size=group_size, + w_ch_s=w_ch_s, + w_tok_s=w_tok_s)) + + return benchmark_tensors + + +def torch_matmul_f16_create_bench_fn(bt: BenchmarkTensors) -> Callable: + a = bt.a + w = bt.w_ref.to(bt.a.dtype) # use float reference tensor + if a.dtype not in [torch.float16, torch.bfloat16]: + a = a.to(torch.float16) + w = w.to(torch.float16) + return lambda: torch.matmul(a, w) + + +def cutlass_scaled_mm_create_bench_fn(bt: BenchmarkTensors) -> Callable: + if bt.w_ch_s is not None and bt.w_tok_s is not None: + scale_a = bt.w_tok_s.to(torch.float32) + scale_b = bt.w_ch_s.to(torch.float32) + else: + scale_a = torch.tensor(1.0, dtype=torch.float32, device=bt.a.device) + scale_b = torch.tensor(1.0, dtype=torch.float32, device=bt.a.device) + w_col_major = bt.w_ref.to(bt.a.dtype).t().contiguous().t() + return lambda: ops.cutlass_scaled_mm( + bt.a, w_col_major, scale_a, scale_b, out_dtype=torch.float16) + + +def marlin_create_bench_fn(bt: BenchmarkTensors) -> Callable: + device = bt.a.device + + workspace = MarlinWorkspace(bt.w_ref.shape[1], GPTQ_MARLIN_MIN_THREAD_N, + GPTQ_MARLIN_MAX_PARALLEL) + + if bt.w_g_zp is None: + w_zp = torch.empty(0, dtype=torch.int, device=device) + else: + w_zp = marlin_zero_points(bt.w_g_zp, bt.w_ref.shape[0], + bt.w_ref.shape[1], bt.wtype.size_bits) + + if bt.group_size is None: + w_s = torch.tensor([], device="cuda", dtype=torch.half) + else: + w_s = marlin_permute_scales(bt.w_g_s, bt.w_ref.shape[0], + bt.w_ref.shape[1], bt.group_size) + + sort_indices = torch.empty(0, dtype=torch.int, device=device) + g_idx = torch.empty(0, dtype=torch.int, device=device) + w_q = ops.gptq_marlin_repack(bt.w_q, sort_indices, bt.w_ref.shape[0], + bt.w_ref.shape[1], bt.wtype.size_bits) + + if bt.a.dtype.is_floating_point: + assert bt.w_ch_s is None + assert bt.w_tok_s is None + assert bt.group_size is not None + + fn = lambda: ops.gptq_marlin_gemm(a=bt.a, + b_q_weight=w_q, + b_scales=w_s, + b_zeros=w_zp, + g_idx=g_idx, + perm=sort_indices, + workspace=workspace.scratch, + b_q_type=bt.wtype, + size_m=bt.a.shape[0], + size_n=bt.w_ref.shape[1], + size_k=bt.w_ref.shape[0], + is_k_full=True) + else: + assert bt.a.dtype == torch.int8 + assert bt.wtype == scalar_types.uint4b8 + + if bt.w_ch_s is not None: + s_ch = bt.w_ch_s.to(torch.float32) + else: + s_ch = torch.ones(bt.w_ref.shape[1], + dtype=torch.float32, + device=device) + + if bt.w_tok_s is not None: + s_tok = bt.w_tok_s.to(torch.float32) + else: + s_tok = torch.ones(bt.a.shape[0], + dtype=torch.float32, + device=device) + + fn = lambda: ops.marlin_qqq_gemm(a=bt.a, + b_q_weight=w_q, + s_group=w_s, + s_tok=s_tok, + s_ch=s_ch, + workspace=workspace.scratch, + size_m=bt.a.shape[0], + size_n=bt.w_ref.shape[1], + size_k=bt.w_ref.shape[0]) + + return fn + + +def machete_create_bench_fn(bt: BenchmarkTensors, + out_type=torch.dtype, + schedule=None) -> Callable: + w_q = bt.w_q.t().contiguous().t() # make col major + w_q = ops.machete_prepack_B(w_q, bt.a.dtype, bt.wtype, + None if bt.w_g_s is None else bt.w_g_s.dtype) + + w_g_zp = bt.w_g_zp + if w_g_zp is not None: + w_g_zp = -1 * bt.w_g_s * (w_g_zp.to(bt.w_g_s.dtype)) + + return lambda: ops.machete_mm( + a=bt.a, + b_q=bt.w_q, + b_type=bt.wtype, + b_group_scales=bt.w_g_s, + b_group_zeros=w_g_zp, + b_group_size=bt.group_size, + b_channel_scales=bt.w_ch_s, + a_token_scales=bt.w_tok_s, + out_type=out_type, + schedule=schedule, + ) # impl - # bench -def bench_fn(label: str, sub_label: str, description: str, - fn: Callable) -> TMeasurement: - min_run_time = 1 - return TBenchmark.Timer( - stmt="fn()", + +def bench_fns(label: str, sub_label: str, description: str, + fns: List[Callable]): + + min_run_time = 1 if not NVTX_PROFILE else 0.1 + res = TBenchmark.Timer( + stmt=""" + for fn in fns: + fn() + """, globals={ - "fn": fn + "fns": fns }, label=label, sub_label=sub_label, description=description, ).blocked_autorange(min_run_time=min_run_time) + if NVTX_PROFILE: + with nvtx.annotate("mm-bench"), nvtx.annotate( + f"{label}|{sub_label}|{description}"): + fns[0]() -def loop_over_weights( - a: torch.tensor, weights: List[Tuple[torch.tensor, torch.tensor, - torch.tensor, torch.tensor]], - fn: Callable[[torch.tensor, torch.tensor, torch.tensor, torch.tensor], - None]): - for w_ref, w_q, w_s, _ in weights: - fn(a, w_ref, w_q, w_s) + return res _SWEEP_SCHEDULES_RESULTS: Optional[pd.DataFrame] = None _SWEEP_SCHEDULES_RESULTS_CSV: Optional[str] = None -def bench(atype: torch.dtype, - wtype: ScalarType, +def bench(types: TypeConfig, group_size: int, m: int, k: int, n: int, label: str, sub_label: str, - benchmark_marlinv1: bool = True, - sweep_schedules: bool = True) -> Iterable[TMeasurement]: - global _SWEEP_SCHEDULES_RESULTS - - a, weights = make_bench_tensors(atype, wtype, group_size, m, n, k) - sub_label += f", L={len(weights)}" - - weights_machete = [(w_ref, machete_pack_weights(w_q, wtype), w_s, w_zp) - for w_ref, w_q, w_s, w_zp in weights] + sweep_schedules: bool = True) -> List[TMeasurement]: + benchmark_tensors = create_bench_tensors((m, n, k), types, group_size) + sub_label += f", L={len(benchmark_tensors)}" + + name_type_string = f"W{types.weight_type}"+\ + f"-A{terse_type_name(types.act_type)}" + if types.group_scale_type is not None: + name_type_string += f"-GS{terse_type_name(types.group_scale_type)}" + if types.group_zero_type is not None: + name_type_string += f"-GZ{terse_type_name(types.group_zero_type)}" + if group_size is not None: + name_type_string += f"-G{group_size}" + if types.channel_scale_type is not None: + name_type_string += f"-CS{terse_type_name(types.channel_scale_type)}" + if types.token_scale_type is not None: + name_type_string += f"-TS{terse_type_name(types.token_scale_type)}" timers = [] # pytorch impl timers.append( - bench_fn( - label, sub_label, "torch.matmul", lambda: loop_over_weights( - a, - weights, - lambda a, w_ref, w_q, w_s: torch.matmul(a, w_ref), - ))) + bench_fns( + label, sub_label, "torch.matmul (fp16)", + [torch_matmul_f16_create_bench_fn(bt) + for bt in benchmark_tensors])) - if benchmark_marlinv1: - w_ref = weights[0][0] - - w_zp_empty = torch.empty(0, dtype=torch.int, device=w_ref.device) - sort_indices = torch.empty(0, dtype=torch.int, device=w_ref.device) - g_idx = torch.empty(0, dtype=torch.int, device=w_ref.device) - - def marlinv1_pack_weights(w_q: torch.tensor) -> torch.tensor: - w_q_gptq = gptq_pack(w_q, wtype.size_bits, *w_ref.shape) - return ops.gptq_marlin_repack(w_q_gptq, sort_indices, *w_ref.shape, - wtype.size_bits) - - def marlinv1_permute_scales(w_s: torch.tensor) -> torch.tensor: - return marlin_permute_scales(w_s, *w_ref.shape, group_size) - - weights_marlinv1 = [(w_ref, marlinv1_pack_weights(w_q), - marlinv1_permute_scales(w_s), w_zp) - for w_ref, w_q, w_s, w_zp in weights] - - workspace = MarlinWorkspace(w_ref.shape[1], GPTQ_MARLIN_MIN_THREAD_N, - GPTQ_MARLIN_MAX_PARALLEL) - - # marlinv1 + if types.act_type == torch.int8 or types.act_type == torch.float8_e4m3fn: + timers.append( + bench_fns( + label, sub_label, + f"cutlass_scaled_mm ({terse_type_name(types.act_type)})", [ + cutlass_scaled_mm_create_bench_fn(bt) + for bt in benchmark_tensors + ])) + + if types.act_type != torch.float8_e4m3fn: timers.append( - bench_fn( - label, sub_label, "marlin_orig", lambda: loop_over_weights( - a, weights_marlinv1, lambda a, w_ref, w_q, w_s: ops. - gptq_marlin_gemm(a, - w_q, - w_s, - w_zp_empty, - g_idx, - sort_indices, - workspace.scratch, - wtype, - size_m=a.shape[0], - size_n=w_ref.shape[1], - size_k=w_ref.shape[0], - is_k_full=True)))) + bench_fns(label, sub_label, f"marlin ({name_type_string})", + [marlin_create_bench_fn(bt) + for bt in benchmark_tensors])) # machete timers.append( - bench_fn( - label, sub_label, "machete_heuristic", lambda: loop_over_weights( - a, weights_machete, lambda a, _, w_q, w_s: ops.machete_gemm( - a, w_q, wtype, b_scales=w_s, b_group_size=group_size)))) + bench_fns(label, sub_label, f"machete ({name_type_string})", [ + machete_create_bench_fn(bt, out_type=types.output_type) + for bt in benchmark_tensors + ])) if sweep_schedules: + global _SWEEP_SCHEDULES_RESULTS + print("Finding best schedule for machete") best = None best_schedule = None - schedules = ops.machete_supported_schedules(wtype) + schedules = ops.machete_supported_schedules( + a_type=types.act_type, + b_type=types.weight_type, + group_scales_type=types.group_scale_type, + group_zeros_type=types.group_zero_type, + token_scales_type=types.token_scale_type, + channel_scales_type=types.channel_scale_type, + out_type=types.output_type) + + if schedules is None or len(schedules) == 0: + raise ValueError("No schedules found to sweep") + for schedule in reversed(schedules): schedule_M = int(schedule.split("_")[0].split("x")[1]) @@ -177,16 +380,11 @@ def marlinv1_permute_scales(w_s: torch.tensor) -> torch.tensor: if schedule_M >= 2 * max(m, 16) or schedule_M < m // 4: continue - def run(a, _, w_q, w_s, schedule=schedule): - ops.machete_gemm(a, - w_q, - wtype, - w_s, - b_group_size=group_size, - schedule=schedule) - - res = bench_fn(label, sub_label, "machete_best", - lambda: loop_over_weights(a, weights_machete, run)) + res = bench_fns(label, sub_label, "machete_best", [ + machete_create_bench_fn( + bt, out_type=types.output_type, schedule=schedule) + for bt in benchmark_tensors + ]) results_row = { "M": m, @@ -213,25 +411,33 @@ def run(a, _, w_q, w_s, schedule=schedule): # runner -def print_timers(timers: Iterable[TMeasurement]): +def print_timers(timers: List[TMeasurement]): compare = TBenchmark.Compare(timers) compare.print() -def run(dtype: torch.dtype, sweep_schedules: bool, - MKNs: Iterable[Tuple[int, int, int]]) -> Iterable[TMeasurement]: +def run(args, MKNs: Iterable[Tuple[int, int, int]]) -> Iterable[TMeasurement]: + types = TypeConfig( + act_type=args.act_type, + weight_type=scalar_types.uint4b8 if args.group_zero_type is None \ + else scalar_types.uint4, + output_type=args.out_type, + group_scale_type=args.group_scale_type, + group_zero_type=args.group_zero_type, + channel_scale_type=args.channel_scale_type, + token_scale_type=args.token_scale_type, + ) - results = [] + results: List[TMeasurement] = [] for m, k, n in MKNs: - timers = bench(dtype, - scalar_types.uint4b8, - 128, + timers = bench(types, + args.group_size, m, k, n, - f"{dtype}-gemm", + f"{args.act_type}-gemm", f"MKN=({m}x{k}x{n})", - sweep_schedules=sweep_schedules) + sweep_schedules=args.sweep_schedules) print_timers(timers) results.extend(timers) @@ -240,7 +446,7 @@ def run(dtype: torch.dtype, sweep_schedules: bool, # output makers def make_output( - data: Iterable[TMeasurement], + data: List[TMeasurement], MKNs: Iterable[Tuple[int, int, int]], base_description: str, timestamp=None, @@ -262,7 +468,6 @@ def run_square_bench(args): dim_sizes = list( range(args.dim_start, args.dim_end + 1, args.dim_increment)) MKNs = list(zip(dim_sizes, dim_sizes, dim_sizes)) - data = run(args.dtype, args.sweep_schedules, MKNs) make_output(data, MKNs, f"square_bench-{args.dtype}") @@ -306,33 +511,49 @@ def model_shapes(model_name: str, tp_size: int) -> List[Tuple[int, int]]: for k, n in KNs: MKNs.append((m, k, n)) - data = run(args.dtype, args.sweep_schedules, MKNs) + data = run(args, MKNs) model_bench_data.append(data) + type_string = f"{args.act_type}" + # Print all results for data, model_tp in zip(model_bench_data, models_tps): model, tp_size = model_tp - print(f"== Results {args.dtype} {model}-TP{tp_size} ====") + print(f"== Results {type_string} {model}-TP{tp_size} ====") print_timers(data) - timestamp = int(time.time()) + timestr = time.strftime("%Y%m%d-%H%M%S") - all_data = [] + all_results = [] for d in model_bench_data: - all_data.extend(d) + all_results.extend(d) + # pickle all data - with open(f"model_bench-{args.dtype}-{timestamp}.pkl", "wb") as f: - pkl.dump(all_data, f) + with open(f"model_bench-{type_string}-{timestr}.pkl", "wb") as f: + args_dict = vars(args) + args_dict.pop("func") + pkl.dump({ + "args": args_dict, + "results": all_results, + }, f) if __name__ == "__main__": def to_torch_dtype(dt): - if dt == "bfloat16": - return torch.bfloat16 - if dt == "float16": - return torch.float16 - raise ValueError("unsupported dtype") + return { + "bfloat16": torch.bfloat16, + "float16": torch.float16, + "int8": torch.int8, + "float8_e4m3fn": torch.float8_e4m3fn, + "int": torch.int, + "float": torch.float, + }[dt] + + class ToTorchDtype(argparse.Action): + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, to_torch_dtype(values)) parser = FlexibleArgumentParser( description=""" @@ -352,12 +573,42 @@ def to_torch_dtype(dt): """, # noqa: E501 formatter_class=argparse.RawTextHelpFormatter, ) - parser.add_argument( - "--dtype", - type=to_torch_dtype, + "--act-type", + action=ToTorchDtype, required=True, - help="Available options are ['bfloat16', 'float16']", + choices=['bfloat16', 'float16', 'int8', 'float8_e4m3fn'], + ) + parser.add_argument( + "--group-scale-type", + action=ToTorchDtype, + choices=['bfloat16', 'float16'], + ) + parser.add_argument( + "--group-zero-type", + type=to_torch_dtype, + choices=['bfloat16', 'float16'], + ) + parser.add_argument( + "--channel-scale-type", + action=ToTorchDtype, + choices=['float'], + ) + parser.add_argument( + "--token-scale-type", + action=ToTorchDtype, + choices=['float'], + ) + parser.add_argument( + "--out-type", + action=ToTorchDtype, + choices=['bfloat16', 'float16'], + ) + parser.add_argument( + "--group-size", + type=int, + help="Available options are ['None', '-1', '128'], default=128", + default=128, ) parser.add_argument( "--sweep-schedules", diff --git a/benchmarks/kernels/graph_machete_bench.py b/benchmarks/kernels/graph_machete_bench.py index de608fd05af70..7d0bd84150a27 100644 --- a/benchmarks/kernels/graph_machete_bench.py +++ b/benchmarks/kernels/graph_machete_bench.py @@ -20,10 +20,11 @@ args = parser.parse_args() with open(args.filename, 'rb') as f: - data: List[TMeasurement] = pickle.load(f) + data = pickle.load(f) + raw_results: List[TMeasurement] = data["results"] results = defaultdict(lambda: list()) - for v in data: + for v in raw_results: result = re.search(r"MKN=\(\d+x(\d+x\d+)\)", v.task_spec.sub_label) if result is not None: KN = result.group(1) diff --git a/benchmarks/kernels/weight_shapes.py b/benchmarks/kernels/weight_shapes.py index 25ec9d6028627..51f24f3ba1774 100644 --- a/benchmarks/kernels/weight_shapes.py +++ b/benchmarks/kernels/weight_shapes.py @@ -40,4 +40,10 @@ ([8192, 57344], 1), ([28672, 8192], 0), ], + "meta-llama/Llama-3.1-405b-hf": [ + ([16384, 18432], 1), + ([16384, 16384], 0), + ([16384, 106496], 1), + ([53248, 16384], 0), + ], } diff --git a/csrc/cutlass_extensions/cute_utils.cuh b/csrc/cutlass_extensions/cute_utils.cuh index 1842fab8b2cac..f61fe3ceb978a 100644 --- a/csrc/cutlass_extensions/cute_utils.cuh +++ b/csrc/cutlass_extensions/cute_utils.cuh @@ -20,9 +20,9 @@ CUTE_HOST_DEVICE static constexpr auto permute_layout(Layout l) { // is the layout f(x) = x template CUTE_HOST_DEVICE static constexpr bool is_identity_layout() { - if constexpr (std::is_same_v) + if constexpr (std::is_same_v) { return true; - else { + } else { constexpr auto coalesced_layout = coalesce(Layout{}); if constexpr (rank(coalesced_layout) == 1 && stride<0>(coalesced_layout) == 1) { diff --git a/csrc/quantization/cutlass_w8a8/broadcast_load_epilogue_c2x.hpp b/csrc/cutlass_extensions/epilogue/broadcast_load_epilogue_c2x.hpp similarity index 99% rename from csrc/quantization/cutlass_w8a8/broadcast_load_epilogue_c2x.hpp rename to csrc/cutlass_extensions/epilogue/broadcast_load_epilogue_c2x.hpp index d407d66ab2aa6..7aa87feb4cce2 100644 --- a/csrc/quantization/cutlass_w8a8/broadcast_load_epilogue_c2x.hpp +++ b/csrc/cutlass_extensions/epilogue/broadcast_load_epilogue_c2x.hpp @@ -52,6 +52,7 @@ // clang-format off #include "cutlass/epilogue/threadblock/fusion/visitor_2x.hpp" +#include "cutlass/epilogue/threadblock/fusion/visitors.hpp" #include "cute/tensor.hpp" namespace cutlass::epilogue::threadblock { diff --git a/csrc/quantization/cutlass_w8a8/broadcast_load_epilogue_c3x.hpp b/csrc/cutlass_extensions/epilogue/broadcast_load_epilogue_c3x.hpp similarity index 100% rename from csrc/quantization/cutlass_w8a8/broadcast_load_epilogue_c3x.hpp rename to csrc/cutlass_extensions/epilogue/broadcast_load_epilogue_c3x.hpp diff --git a/csrc/cutlass_extensions/epilogue/scaled_mm_epilogues_c2x.hpp b/csrc/cutlass_extensions/epilogue/scaled_mm_epilogues_c2x.hpp new file mode 100644 index 0000000000000..c69e87999ae71 --- /dev/null +++ b/csrc/cutlass_extensions/epilogue/scaled_mm_epilogues_c2x.hpp @@ -0,0 +1,317 @@ +#include "cutlass_extensions/epilogue/broadcast_load_epilogue_c2x.hpp" + +/* + This file defines custom epilogues for fusing channel scales, token scales, + bias, and activation zero-points onto a GEMM operation using the + CUTLASS 2.x API, for sm80 (Ampere) NVIDIA GPUs. + + Epilogues must contain a public type named EVTCompute of type Sm80EVT, + as well as a static prepare_args function that constructs an + EVTCompute::Arguments struct. +*/ + +namespace vllm::c2x { + +using namespace cute; + +/* + * This class provides the common load descriptors for the + * ScaledEpilogue[...] classes + */ +template +struct ScaledEpilogueBase { + protected: + using Accum = cutlass::epilogue::threadblock::VisitorAccFetch; + + template + using ColOrScalarLoad = + cutlass::epilogue::threadblock::VisitorColOrScalarBroadcast< + OutputTileThreadMap, T, Stride, Int<0>, Int<0>>>; + + template + using RowOrScalarLoad = + cutlass::epilogue::threadblock::VisitorRowOrScalarBroadcast< + OutputTileThreadMap, T, Stride, Int<1>, Int<0>>>; + + template + using ColLoad = cutlass::epilogue::threadblock::VisitorColBroadcast< + OutputTileThreadMap, T, Stride, Int<0>, Int<0>>>; + + template + using RowLoad = cutlass::epilogue::threadblock::VisitorRowBroadcast< + OutputTileThreadMap, T, Stride, Int<1>, Int<0>>>; + + template + using RowOrZeroLoad = + cutlass::epilogue::threadblock::VisitorRowOrZeroBroadcast< + OutputTileThreadMap, T, Stride, Int<1>, Int<0>>>; + + // This utility function constructs the arguments for the load descriptors + // from a tensor. It can handle both row and column, as well as row/column or + // scalar cases. + template + static auto args_from_tensor(torch::Tensor const& tensor) { + using Arguments = typename Descriptor::Arguments; + auto* data_ptr = static_cast(tensor.data_ptr()); + if constexpr (std::is_same_v> || + std::is_same_v>) { + return Arguments{data_ptr, tensor.numel() != 1}; + } else { + // it would technically work but no use case as data_ptr is never nullptr + static_assert(!std::is_same_v>); + return Arguments{data_ptr}; + } + } + + // This overload handles the case where there might not be a tensor, in which + // case a nullptr is passed and a constant (0) is used. + template + static auto args_from_tensor(c10::optional const& tensor) { + static_assert(std::is_same_v>); + using Arguments = typename Descriptor::Arguments; + auto* data_ptr = tensor ? static_cast(tensor->data_ptr()) : nullptr; + return Arguments{data_ptr}; + } +}; + +/* + This epilogue function defines a quantized GEMM operation similar to + torch._scaled_mm. + + A and B may be both either int8 or fp8_e4m3. A can be quantized per-tensor or + per-row. B can be quantized per-tensor or per-column. + Any combination of per-tensor and per-row or column is supported. + A and B must have symmetric quantization (zero point == 0). + + So the GEMM operation is D = (a_scales * A) (b_scales * B), where the + scales are applied elementwise with numpy-style broadcasting. + + ScaleA and ScaleB define the epilogue functions that apply the scales for + the A and B operands respectively. These scales may be either per-tensor or + per row or column. +*/ +template +struct ScaledEpilogue + : private ScaledEpilogueBase { + private: + using SUPER = ScaledEpilogueBase; + using Accum = typename SUPER::Accum; + using ScaleA = typename SUPER::template ColOrScalarLoad; + using ScaleB = typename SUPER::template RowOrScalarLoad; + + using Compute0 = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::multiplies, float, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTCompute0 = + cutlass::epilogue::threadblock::Sm80EVT; + + using Compute1 = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::multiplies, ElementD, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + public: + using EVTCompute = + cutlass::epilogue::threadblock::Sm80EVT; + using ArgumentType = typename EVTCompute::Arguments; + + static ArgumentType prepare_args(torch::Tensor const& a_scales, + torch::Tensor const& b_scales) { + auto a_args = SUPER::template args_from_tensor(a_scales); + auto b_args = SUPER::template args_from_tensor(b_scales); + + typename EVTCompute0::Arguments evt0_args{b_args}; + return ArgumentType{a_args, evt0_args}; + } +}; + +/* + * This epilogue performs the same operation as ScaledEpilogue, but adds a bias. + * This bias can also be used in the per-tensor azp case, where the activation + * zero point (azp) is used to compute an azp correction term, + * which is folded into the bias. + * + * The bias tensor must be per-output channel. + * ScaleA and ScaleB can be per-tensor or per-token/per-channel. + */ +template +struct ScaledEpilogueBias + : protected ScaledEpilogueBase { + protected: + using SUPER = ScaledEpilogueBase; + using Accum = typename SUPER::Accum; + using ScaleA = typename SUPER::template ColOrScalarLoad; + using ScaleB = typename SUPER::template RowOrScalarLoad; + using Bias = typename SUPER::template RowLoad; + using Compute0 = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::multiplies, float, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTCompute0 = + cutlass::epilogue::threadblock::Sm80EVT; + + using Compute1 = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::multiply_add, ElementD, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + public: + using EVTCompute = cutlass::epilogue::threadblock::Sm80EVT; + using ArgumentType = typename EVTCompute::Arguments; + static ArgumentType prepare_args(torch::Tensor const& a_scales, + torch::Tensor const& b_scales, + torch::Tensor const& bias) { + auto a_args = SUPER::template args_from_tensor(a_scales); + auto b_args = SUPER::template args_from_tensor(b_scales); + auto bias_args = SUPER::template args_from_tensor(bias); + + typename EVTCompute0::Arguments evt0_args{b_args}; + return ArgumentType{a_args, evt0_args, bias_args}; + } +}; + +/* + * This epilogue directly supports per-tensor azp in int32 form. + * As opposed to the per-token epilogue below, this epilogue only has an azp_adj + * term, which should already be multiplied with the scalar azp. + * The azp_adj term is a 1D tensor of shape (1,n), computed as azp * J @ B. + * + * This epilogue also supports bias, which remains per-channel. + */ +template +struct ScaledEpilogueBiasAzp + : protected ScaledEpilogueBase { + private: + using SUPER = ScaledEpilogueBase; + using Accum = typename SUPER::Accum; + using ScaleA = typename SUPER::template ColOrScalarLoad; + using ScaleB = typename SUPER::template RowOrScalarLoad; + using Bias = typename SUPER::template RowOrZeroLoad; + + // This is the full AZP term, azp * J @ B, shape (1,n) + using AzpWithAdj = typename SUPER::template RowLoad; + + // Compute float(accum - azp_adj), both operands are int32_t + using ComputeAzp = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::minus, float, int32_t, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeAzp = + cutlass::epilogue::threadblock::Sm80EVT; + + using ComputeScaleB = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::multiplies, float, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeScaleB = + cutlass::epilogue::threadblock::Sm80EVT; + + using ComputeScaleBiasA = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::multiply_add, ElementD, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + public: + using EVTCompute = + cutlass::epilogue::threadblock::Sm80EVT; + + using ArgumentType = typename EVTCompute::Arguments; + + static ArgumentType prepare_args(torch::Tensor const& a_scales, + torch::Tensor const& b_scales, + torch::Tensor const& azp_adj, + c10::optional const& bias) { + auto a_args = SUPER::template args_from_tensor(a_scales); + auto b_args = SUPER::template args_from_tensor(b_scales); + auto bias_args = SUPER::template args_from_tensor(bias); + auto azp_adj_args = + SUPER::template args_from_tensor(azp_adj); + + typename EVTComputeAzp::Arguments evt_azp_args{{}, azp_adj_args}; + typename EVTComputeScaleB::Arguments evt_scale_b_args{b_args, evt_azp_args}; + return ArgumentType{a_args, evt_scale_b_args, bias_args}; + } +}; + +/* + * This epilogue supports per-token azp by computing and applying + * the correction term using a rank-1 update. If the term were materialized, + * it would require O(m*n) space, and this way it only requires O(m+n) space. + * The azp term is a 1D tensor of shape (m,1), and represents the unscaled zero + * point for each row of A. + * The azp_adj term is a 1D tensor of shape (1,n), computed as J @ B. + * + * This epilogue also supports bias, which remains per-channel. + */ +template +struct ScaledEpilogueBiasAzpToken + : protected ScaledEpilogueBase { + private: + using SUPER = ScaledEpilogueBase; + using Accum = typename SUPER::Accum; + using ScaleA = typename SUPER::template ColOrScalarLoad; + using ScaleB = typename SUPER::template RowOrScalarLoad; + using Bias = typename SUPER::template RowOrZeroLoad; + + // Per-token azp term, shape (m,1) + using Azp = typename SUPER::template ColLoad; + + // This is the AZP adjustment term, J @ B, shape (1,n) + using AzpAdj = typename SUPER::template RowLoad; + + // Compute azp * azp_adj + using ComputeAzp = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::multiplies, int32_t, int32_t, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeAzp = + cutlass::epilogue::threadblock::Sm80EVT; + + // Compute float(accum - azp*azp_adj), all operands are int32_t + using ComputeAcc = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::minus, float, int32_t, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeAcc = + cutlass::epilogue::threadblock::Sm80EVT; + + using ComputeScaleB = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::multiplies, float, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeScaleB = + cutlass::epilogue::threadblock::Sm80EVT; + + using ComputeScaleBiasA = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::multiply_add, ElementD, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + public: + using EVTCompute = + cutlass::epilogue::threadblock::Sm80EVT; + + using ArgumentType = typename EVTCompute::Arguments; + + static ArgumentType prepare_args(torch::Tensor const& a_scales, + torch::Tensor const& b_scales, + torch::Tensor const& azp_adj, + torch::Tensor const& azp, + c10::optional const& bias) { + auto a_args = SUPER::template args_from_tensor(a_scales); + auto b_args = SUPER::template args_from_tensor(b_scales); + auto bias_args = SUPER::template args_from_tensor(bias); + auto azp_args = SUPER::template args_from_tensor(azp); + auto azp_adj_args = + SUPER::template args_from_tensor(azp_adj); + + typename EVTComputeAzp::Arguments evt_azp_args{azp_args, azp_adj_args}; + typename EVTComputeAcc::Arguments evt_acc_args{{}, evt_azp_args}; + typename EVTComputeScaleB::Arguments evt_scale_b_args{b_args, evt_acc_args}; + return ArgumentType{a_args, evt_scale_b_args, bias_args}; + } +}; + +}; // namespace vllm::c2x \ No newline at end of file diff --git a/csrc/cutlass_extensions/epilogue/scaled_mm_epilogues_c3x.hpp b/csrc/cutlass_extensions/epilogue/scaled_mm_epilogues_c3x.hpp new file mode 100644 index 0000000000000..95764ecddc79f --- /dev/null +++ b/csrc/cutlass_extensions/epilogue/scaled_mm_epilogues_c3x.hpp @@ -0,0 +1,315 @@ +#include "cutlass_extensions/epilogue/broadcast_load_epilogue_c3x.hpp" + +/* + This file defines custom epilogues for fusing channel scales, token scales, + bias, and activation zero-points onto a GEMM operation using the + CUTLASS 3.x API, for NVIDIA GPUs with sm90a (Hopper) or later. + + Epilogues must contain a public type named EVTCompute of type Sm90EVT, + as well as a static prepare_args function that constructs an + EVTCompute::Arguments struct. +*/ + +namespace vllm::c3x { + +using namespace cute; + +/* + * This class provides the common load descriptors for the + * ScaledEpilogue[...] classes + */ +template +struct ScaledEpilogueBase { + protected: + using Accum = cutlass::epilogue::fusion::Sm90AccFetch; + + template + using ColOrScalarLoad = cutlass::epilogue::fusion::Sm90ColOrScalarBroadcast< + 0 /*Stages*/, typename EpilogueDescriptor::TileShape, T, + Stride, Int<0>, Int<0>>>; + + template + using RowOrScalarLoad = cutlass::epilogue::fusion::Sm90RowOrScalarBroadcast< + 0 /*Stages*/, typename EpilogueDescriptor::TileShape, T, + Stride, Int<1>, Int<0>>>; + + // Don't want to support nullptr by default + template + using ColLoad = cutlass::epilogue::fusion::Sm90ColBroadcast< + 0 /*Stages*/, typename EpilogueDescriptor::TileShape, T, + Stride, Int<0>, Int<0>>, 128 / sizeof_bits_v, EnableNullPtr>; + + // Don't want to support nullptr by default + template + using RowLoad = cutlass::epilogue::fusion::Sm90RowBroadcast< + 0 /*Stages*/, typename EpilogueDescriptor::TileShape, T, + Stride, Int<1>, Int<0>>, 128 / sizeof_bits_v, EnableNullPtr>; + + // This utility function constructs the arguments for the load descriptors + // from a tensor. It can handle both row and column, as well as row/column or + // scalar cases. + template + static auto args_from_tensor(torch::Tensor const& tensor) { + using Arguments = typename Descriptor::Arguments; + auto* data_ptr = static_cast(tensor.data_ptr()); + if constexpr (std::is_same_v> || + std::is_same_v>) { + return Arguments{data_ptr, tensor.numel() != 1}; + } else { + static_assert(!std::is_same_v> && + !std::is_same_v>); + return Arguments{data_ptr}; + } + } + + // This overload handles the case where there might not be a tensor, in which + // case a nullptr is passed and a constant (0) is used. + template + static auto args_from_tensor(c10::optional const& tensor) { + using Arguments = typename Descriptor::Arguments; + auto* data_ptr = tensor ? static_cast(tensor->data_ptr()) : nullptr; + static_assert(std::is_same_v> || + std::is_same_v>); + return Arguments{data_ptr}; + } +}; + +/* + This epilogue function defines a quantized GEMM operation similar to + torch.scaled_mm_. + + A and B may be both either int8 or fp8_e4m3. A can be + quantized per-tensor or per-row. B can be quantized per-tensor or per-column. + Any combination of per-tensor and per-row or column is supported. + A and B must have symmetric quantization (zero point == 0). + + So the GEMM operation is D = (a_scales * A) (b_scales * B), where the + scales are applied elementwise with numpy-style broadcasting. + + ScaleA and ScaleB define the epilogue functions that apply the scales for + the A and B operands respectively. These scales may be either per-tensor or + per row or column. +*/ +template +struct ScaledEpilogue + : private ScaledEpilogueBase { + private: + using SUPER = ScaledEpilogueBase; + using Accum = typename SUPER::Accum; + using ScaleA = typename SUPER::template ColOrScalarLoad; + using ScaleB = typename SUPER::template RowOrScalarLoad; + + using Compute0 = cutlass::epilogue::fusion::Sm90Compute< + cutlass::multiplies, float, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTCompute0 = + cutlass::epilogue::fusion::Sm90EVT; + + using Compute1 = cutlass::epilogue::fusion::Sm90Compute< + cutlass::multiplies, ElementD, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + public: + using EVTCompute = + cutlass::epilogue::fusion::Sm90EVT; + using ArgumentType = typename EVTCompute::Arguments; + + static ArgumentType prepare_args(torch::Tensor const& a_scales, + torch::Tensor const& b_scales) { + auto a_args = SUPER::template args_from_tensor(a_scales); + auto b_args = SUPER::template args_from_tensor(b_scales); + + typename EVTCompute0::Arguments evt0_args{b_args}; + return ArgumentType{a_args, evt0_args}; + } +}; + +/* + * This epilogue performs the same operation as ScaledEpilogue, but adds a bias. + * This bias can also be used in the per-tensor azp case, where the activation + * zero point (azp) is used to compute an azp correction term, + * which is folded into the bias. + * + * The bias tensor must be per-output channel. + * ScaleA and ScaleB can be per-tensor or per-token/per-channel. + */ +template +struct ScaledEpilogueBias + : private ScaledEpilogueBase { + private: + using SUPER = ScaledEpilogueBase; + using Accum = typename SUPER::Accum; + using ScaleA = typename SUPER::template ColOrScalarLoad; + using ScaleB = typename SUPER::template RowOrScalarLoad; + using Bias = typename SUPER::template RowLoad; + + using Compute0 = cutlass::epilogue::fusion::Sm90Compute< + cutlass::multiplies, float, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTCompute0 = + cutlass::epilogue::fusion::Sm90EVT; + + using Compute1 = cutlass::epilogue::fusion::Sm90Compute< + cutlass::multiply_add, ElementD, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + public: + using EVTCompute = + cutlass::epilogue::fusion::Sm90EVT; + + using ArgumentType = typename EVTCompute::Arguments; + static ArgumentType prepare_args(torch::Tensor const& a_scales, + torch::Tensor const& b_scales, + torch::Tensor const& bias) { + auto a_args = SUPER::template args_from_tensor(a_scales); + auto b_args = SUPER::template args_from_tensor(b_scales); + auto bias_args = SUPER::template args_from_tensor(bias); + + typename EVTCompute0::Arguments evt0_args{b_args}; + return ArgumentType{a_args, evt0_args, bias_args}; + } +}; + +/* + * This epilogue directly supports per-tensor azp in int32 form. + * As opposed to the per-token epilogue below, this epilogue only has an azp_adj + * term, which should already be multiplied with the scalar azp. + * The azp_adj term is a 1D tensor of shape (1,n), computed as azp * J @ B. + * + * This epilogue also supports bias, which remains per-channel. + */ +template +struct ScaledEpilogueBiasAzp + : private ScaledEpilogueBase { + private: + using SUPER = ScaledEpilogueBase; + using Accum = typename SUPER::Accum; + using ScaleA = typename SUPER::template ColOrScalarLoad; + using ScaleB = typename SUPER::template RowOrScalarLoad; + using Bias = typename SUPER::template RowLoad; + + // This is the full AZP term, azp * J @ B, shape (1,n) + using AzpWithAdj = typename SUPER::template RowLoad; + + // Compute float(accum - azp_adj), both operands are int32_t + using ComputeAzp = cutlass::epilogue::fusion::Sm90Compute< + cutlass::minus, float, int32_t, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeAzp = + cutlass::epilogue::fusion::Sm90EVT; + + using ComputeScaleB = cutlass::epilogue::fusion::Sm90Compute< + cutlass::multiplies, float, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeScaleB = + cutlass::epilogue::fusion::Sm90EVT; + + using ComputeScaleBiasA = cutlass::epilogue::fusion::Sm90Compute< + cutlass::multiply_add, ElementD, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + public: + using EVTCompute = + cutlass::epilogue::fusion::Sm90EVT; + using ArgumentType = typename EVTCompute::Arguments; + + static ArgumentType prepare_args(torch::Tensor const& a_scales, + torch::Tensor const& b_scales, + torch::Tensor const& azp_adj, + c10::optional const& bias) { + auto a_args = SUPER::template args_from_tensor(a_scales); + auto b_args = SUPER::template args_from_tensor(b_scales); + auto bias_args = SUPER::template args_from_tensor(bias); + auto azp_adj_args = + SUPER::template args_from_tensor(azp_adj); + + typename EVTComputeAzp::Arguments evt_azp_args{{}, azp_adj_args}; + typename EVTComputeScaleB::Arguments evt_scale_b_args{b_args, evt_azp_args}; + return ArgumentType{a_args, evt_scale_b_args, bias_args}; + } +}; + +/* + * This epilogue supports per-token azp by computing and applying + * the correction term using a rank-1 update. If the term were materialized, + * it would require O(m*n) space, and this way it only requires O(m+n) space. + * The azp term is a 1D tensor of shape (m,1), and represents the unscaled zero + * point for each row of A. + * The azp_adj term is a 1D tensor of shape (1,n), computed as J @ B. + * + * This epilogue also supports bias, which remains per-channel. + */ +template +struct ScaledEpilogueBiasAzpToken + : private ScaledEpilogueBase { + private: + using SUPER = ScaledEpilogueBase; + using Accum = typename SUPER::Accum; + using ScaleA = typename SUPER::template ColOrScalarLoad; + using ScaleB = typename SUPER::template RowOrScalarLoad; + using Bias = typename SUPER::template RowLoad; + + // Per-token azp term, shape (m,1) + using Azp = typename SUPER::template ColLoad; + + // This is the AZP adjustment term, J @ B, shape (1,n) + using AzpAdj = typename SUPER::template RowLoad; + + // Compute azp * azp_adj + using ComputeAzp = cutlass::epilogue::fusion::Sm90Compute< + cutlass::multiplies, int32_t, int32_t, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeAzp = + cutlass::epilogue::fusion::Sm90EVT; + + // Compute float(accum - azp*azp_adj), all operands are int32_t + using ComputeAcc = cutlass::epilogue::fusion::Sm90Compute< + cutlass::minus, float, int32_t, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeAcc = + cutlass::epilogue::fusion::Sm90EVT; + + using ComputeScaleB = cutlass::epilogue::fusion::Sm90Compute< + cutlass::multiplies, float, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeScaleB = + cutlass::epilogue::fusion::Sm90EVT; + + using ComputeScaleBiasA = cutlass::epilogue::fusion::Sm90Compute< + cutlass::multiply_add, ElementD, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + public: + using EVTCompute = + cutlass::epilogue::fusion::Sm90EVT; + using ArgumentType = typename EVTCompute::Arguments; + + static ArgumentType prepare_args(torch::Tensor const& a_scales, + torch::Tensor const& b_scales, + torch::Tensor const& azp_adj, + torch::Tensor const& azp, + c10::optional const& bias) { + auto a_args = SUPER::template args_from_tensor(a_scales); + auto b_args = SUPER::template args_from_tensor(b_scales); + auto bias_args = SUPER::template args_from_tensor(bias); + auto azp_args = SUPER::template args_from_tensor(azp); + auto azp_adj_args = + SUPER::template args_from_tensor(azp_adj); + + typename EVTComputeAzp::Arguments evt_azp_args{azp_args, azp_adj_args}; + typename EVTComputeAcc::Arguments evt_acc_args{{}, evt_azp_args}; + typename EVTComputeScaleB::Arguments evt_scale_b_args{b_args, evt_acc_args}; + return ArgumentType{a_args, evt_scale_b_args, bias_args}; + } +}; + +}; // namespace vllm::c3x \ No newline at end of file diff --git a/csrc/cutlass_extensions/vllm_cutlass_library_extension.py b/csrc/cutlass_extensions/vllm_cutlass_library_extension.py index 4fcfcd311aa91..a5beea1a35e49 100644 --- a/csrc/cutlass_extensions/vllm_cutlass_library_extension.py +++ b/csrc/cutlass_extensions/vllm_cutlass_library_extension.py @@ -35,6 +35,35 @@ class MixedInputKernelScheduleType(enum.Enum): } } +VLLMDataTypeSize: Dict[Union[VLLMDataType, DataType], int] = { + **DataTypeSize, # type: ignore + **{ + VLLMDataType.u4b8: 4, + VLLMDataType.u8b128: 8, + } +} + +VLLMDataTypeVLLMScalarTypeTag: Dict[Union[VLLMDataType, DataType], str] = { + VLLMDataType.u4b8: "vllm::kU4B8", + VLLMDataType.u8b128: "vllm::kU8B128", + DataType.u4: "vllm::kU4", + DataType.u8: "vllm::kU8", + DataType.s4: "vllm::kS4", + DataType.s8: "vllm::kS8", + DataType.f16: "vllm::kFloat16", + DataType.bf16: "vllm::kBfloat16", +} + +VLLMDataTypeTorchDataTypeTag: Dict[Union[VLLMDataType, DataType], str] = { + DataType.u8: "at::ScalarType::Byte", + DataType.s8: "at::ScalarType::Char", + DataType.e4m3: "at::ScalarType::Float8_e4m3fn", + DataType.s32: "at::ScalarType::Int", + DataType.f16: "at::ScalarType::Half", + DataType.bf16: "at::ScalarType::BFloat16", + DataType.f32: "at::ScalarType::Float", +} + VLLMKernelScheduleTag: Dict[Union[ MixedInputKernelScheduleType, KernelScheduleType], str] = { **KernelScheduleTag, # type: ignore diff --git a/csrc/cutlass_extensions/vllm_numeric_conversion.cuh b/csrc/cutlass_extensions/vllm_numeric_conversion.cuh index 2ad914f8e9868..90f226cf64c0a 100644 --- a/csrc/cutlass_extensions/vllm_numeric_conversion.cuh +++ b/csrc/cutlass_extensions/vllm_numeric_conversion.cuh @@ -3,6 +3,7 @@ #include "cutlass/numeric_conversion.h" #include "cutlass_extensions/vllm_custom_types.cuh" #include "cutlass_extensions/cute_utils.cuh" +#include "cutlass_extensions/vllm_type_utils.cuh" // this file extends: // https://github.com/NVIDIA/cutlass/blob/cutlass-3.5.0/include/cutlass/numeric_conversion.h @@ -28,8 +29,19 @@ struct InterleavedNumericArrayConverter { CUTLASS_DEVICE static result_type convert(source_type const& source) { - CUTE_INVALID_CONTROL_PATH( - "InterleavedNumericArrayConverter not implemented\n"); + if (cute::elect_one_sync()) { + if constexpr (std::is_same_v) { + printf( + "Convert %s <= %s (N = %d, IlvBlkLayout = void), not implemented\n", + nameof_v, nameof_v, N); + } else { + printf( + "Convert %s <= %s (N = %d, size(IlvBlkLayout{}) = %d), not " + "implemented\n", + nameof_v, nameof_v, N, size(IlvBlkLayout{})); + } + __brkpt(); + } return {}; } @@ -56,11 +68,6 @@ struct InterleavedNumericArrayConverter< result_type operator()(source_type const& s) const { return convert(s); } }; -// TODO (LucasWilkinson): Implement -// for Array <= Array - -// .... - template struct ArrayConverterPacked32Bit { using result_type = Array; @@ -86,14 +93,16 @@ struct ArrayConverterPacked32Bit { using ScalarConverter = NumericConverter; template - CUTLASS_DEVICE static uint32_t to_reg(PackedSrc const& source) { + CUTLASS_DEVICE static auto to_regs(PackedSrc const& src) { if constexpr (sizeof(PackedSrc) == 1) { - return static_cast(reinterpret_cast(source)); + return Array{reinterpret_cast(src)}; } else if constexpr (sizeof(PackedSrc) == 2) { - return static_cast(reinterpret_cast(source)); + return Array{reinterpret_cast(src)}; + } else if constexpr (sizeof(PackedSrc) == 4) { + return Array{reinterpret_cast(src)}; } else { - static_assert(sizeof(PackedSrc) == 4); - return reinterpret_cast(source); + static_assert(sizeof(PackedSrc) == 8); + return reinterpret_cast const&>(src); } } @@ -110,7 +119,7 @@ struct ArrayConverterPacked32Bit { static_assert(std::is_same_v); static_assert(std::is_same_v); - return RegConvert32bit::template convert(to_reg(source)); + return RegConvert32bit::template convert(to_regs(source)); } friend class detail::VectorizedConverter; @@ -140,6 +149,131 @@ struct ArrayConverterPacked32Bit { } }; +// Convert 8 4bit values packed into a 32bit register to 8 8bit values packed +// into 2 32bit register. +template +CUTLASS_DEVICE cutlass::AlignedArray lut_4bit_to_8bit_convert( + uint32_t src) { + cutlass::AlignedArray r; + // Determines if the value is in the top half of the LUT if set or + // (i.e. LUT[8:15]) in the bottom half (i.e. LUT[0:7]) if not set. Then move + // into bit position 0x4 of each nibble so when or'd with final_prmt_base it + // selects the correct candidate. When elements in final_prmt_base + // are >= 0x4, the high candidate is selected (i.e. LUT[8:15]), when elements + // are < 0x4, the low candidate is selected (i.e. LUT[0:7]) + uint32_t high_bit = (src & 0x88888888) >> 1; + + // `high_bit` is OR'd with 0x31203120 to find the correct value in the LUT + // (selects correct high or low candidate) + const uint32_t final_prmt_base = 0x32103210; + + // Ignore the high bit when indexing into LUT, for each 4bit value + // we index into both the high and low candidates then use + // high_bit | final_prmt_base to select the correct candidate + uint32_t lut_idx = (src & 0x77777777); + + auto pack = [](uint8_t a, uint8_t b, uint8_t c, uint8_t d) { + return uint32_t(a) | (uint32_t(b) << 8) | (uint32_t(c) << 16) | + (uint32_t(d) << 24); + }; + + static constexpr uint32_t LOW_0 = pack(LUT0, LUT1, LUT2, LUT3); + static constexpr uint32_t LOW_1 = pack(LUT4, LUT5, LUT6, LUT7); + static constexpr uint32_t HIGH_0 = pack(LUT8, LUT9, LUT10, LUT11); + static constexpr uint32_t HIGH_1 = pack(LUT12, LUT13, LUT14, LUT15); + + CUTLASS_PRAGMA_UNROLL + for (int ii = 0; ii < 2; ++ii, lut_idx >>= 16, high_bit >>= 16) { + uint32_t final_prmt_idx = final_prmt_base | high_bit; + + // This uses a look up table to convert packed int4s to packed int8s, + // using the int4 value as the index to prmt. It first select both the + // high and low candidates, then uses the high bit (i.e. `high_bit`) to + // select the correct candidate. + asm volatile( + "{\n" + " .reg .b32 low, high;\n" + " prmt.b32 low, %1, %2, %5;\n" + " prmt.b32 high, %3, %4, %5;\n" + " prmt.b32 %0, low, high, %6;\n" + "}\n" + : "=r"(r[ii]) + : "n"(LOW_0), "n"(LOW_1), "n"(HIGH_0), "n"(HIGH_1), "r"(lut_idx), + "r"(final_prmt_idx)); + } + + return r; +}; + +// for Array <= Array +template +struct NumericArrayConverter { + using result_type = Array; + using source_type = Array; + + static FloatRoundStyle const round_style = Round; + + private: + struct RegConvert { + template + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + // [-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7] as int8s + auto r = lut_4bit_to_8bit_convert<0xF8, 0xF9, 0xFA, 0xFB, // + 0xFC, 0xFD, 0xFE, 0xFF, // + 0x00, 0x01, 0x02, 0x03, // + 0x04, 0x05, 0x06, 0x07>(src_[0]); + return reinterpret_cast(r); + }; + }; + + public: + CUTLASS_DEVICE + static result_type convert(source_type const& source) { + return ArrayConverterPacked32Bit::convert(source); + } + + CUTLASS_DEVICE + result_type operator()(source_type const& s) const { return convert(s); } +}; + +// for Array <= Array +template +struct NumericArrayConverter { + using result_type = Array; + using source_type = Array; + + static FloatRoundStyle const round_style = Round; + + private: + struct RegConvert { + template + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + // [-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7] as fp8s + auto r = lut_4bit_to_8bit_convert<0xD0, 0xCE, 0xCC, 0xCA, // + 0xC8, 0xC4, 0xC0, 0xB8, // + 0x00, 0x38, 0x40, 0x44, // + 0x48, 0x4A, 0x4C, 0x4E>(src_[0]); + return reinterpret_cast(r); + }; + }; + + public: + CUTLASS_DEVICE + static result_type convert(source_type const& source) { + return ArrayConverterPacked32Bit::convert(source); + } + + CUTLASS_DEVICE + result_type operator()(source_type const& s) const { return convert(s); } +}; + // for Array <= Array template struct NumericArrayConverter { @@ -148,7 +282,8 @@ struct NumericArrayConverter { struct RegConvert { template - CUTLASS_DEVICE static PackedResultType convert(uint32_t src) { + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + uint32_t src = src_[0]; using RegArray = cutlass::AlignedArray; @@ -249,7 +384,8 @@ struct InterleavedNumericArrayConverter, Stride<_4, _1>>, private: struct RegConvert { template - CUTLASS_DEVICE static PackedResultType convert(uint32_t src) { + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + uint32_t src = src_[0]; using RegArray = cutlass::AlignedArray; @@ -338,7 +474,8 @@ struct InterleavedNumericArrayConverter, Stride<_4, _1>>, private: struct RegConvert { template - CUTLASS_DEVICE static PackedResultType convert(uint32_t src) { + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + uint32_t src = src_[0]; using RegArray = cutlass::AlignedArray; @@ -417,7 +554,8 @@ struct NumericArrayConverter { struct RegConvert { template - CUTLASS_DEVICE static PackedResultType convert(uint32_t src) { + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + uint32_t src = src_[0]; // Hold output FP16s in reg. We need 1 reg for every 2 elements using RegArray = cutlass::AlignedArray { private: struct RegConvert { template - CUTLASS_DEVICE static PackedResultType convert(uint32_t src) { + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + uint32_t src = src_[0]; PackedResultType r; // __byte_perm simulates the add.u32 0x4B000000 to every u8 element of @@ -513,7 +652,8 @@ struct NumericArrayConverter { private: struct RegConvert { template - CUTLASS_DEVICE static PackedResultType convert(uint32_t src_reg) { + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + uint32_t src_reg = src_[0]; // Hold output BF16s in reg. We need 1 reg for every 2 elements using RegArray = cutlass::AlignedArray, Stride<_4, _1>>, private: struct RegConvert { template - CUTLASS_DEVICE static PackedResultType convert(uint32_t src) { + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + uint32_t src = src_[0]; using RegArray = cutlass::AlignedArray; @@ -671,7 +812,8 @@ struct InterleavedNumericArrayConverter, Stride<_4, _1>>, private: struct RegConvert { template - CUTLASS_DEVICE static PackedResultType convert(uint32_t src) { + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + uint32_t src = src_[0]; using RegArray = cutlass::AlignedArray; @@ -788,6 +930,61 @@ struct NumericArrayConverter { #endif +// for Array <= Array +// FastFP16toINT8 from https://arxiv.org/pdf/2406.09904 +template +struct NumericArrayConverter { + using result_type = Array; + using source_type = Array; + + struct RegConvert { + // FastFP16toINT8 from https://arxiv.org/pdf/2406.09904 + template + CUTLASS_DEVICE static PackedResultType convert( + Array src) { + // Hold output int8s in reg. We need 1 reg for every 4 elements + using RegArray = cutlass::AlignedArray< + uint32_t, std::max(PackedResultType::kElements / 4, size_t(1))>; + RegArray r; + + static constexpr uint32_t MAGIC_BIAS_ = 0x64806480; + auto MAGIC_BIAS = *reinterpret_cast(&MAGIC_BIAS_); + + *reinterpret_cast(&src[0]) = + __hadd2(*reinterpret_cast(&src[0]), MAGIC_BIAS); + + if constexpr (src_regs > 1) { + *reinterpret_cast(&src[1]) = + __hadd2(*reinterpret_cast(&src[1]), MAGIC_BIAS); + } + + static_assert(PackedResultType::kElements <= 4); + uint32_t uint8s; + static constexpr uint32_t MASK_0246 = 0x6420; + static constexpr uint32_t UINT8s_TO_INT8s_MASK = 0x80808080; + asm volatile("prmt.b32 %0,%1,%2,%3;\n" + : "=r"(uint8s) + : "r"(src[0]), "r"((src_regs > 1) ? src[1] : src[0]), + "n"(MASK_0246)); + + uint32_t int8s = (uint8s ^ UINT8s_TO_INT8s_MASK); + + return reinterpret_cast(int8s); + }; + }; + + public: + CUTLASS_DEVICE + static result_type convert(source_type const& source) { + return ArrayConverterPacked32Bit::convert(source); + } + + CUTLASS_DEVICE + result_type operator()(source_type const& s) const { return convert(s); } +}; + ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass diff --git a/csrc/cutlass_extensions/vllm_type_utils.cuh b/csrc/cutlass_extensions/vllm_type_utils.cuh new file mode 100644 index 0000000000000..500ed508c8303 --- /dev/null +++ b/csrc/cutlass_extensions/vllm_type_utils.cuh @@ -0,0 +1,42 @@ +#include "cutlass/bfloat16.h" +#include "cutlass/half.h" +#include "cuda_bf16.h" + +#include "cutlass_extensions/vllm_custom_types.cuh" + +namespace cutlass { + +template +struct nameof { + static constexpr char const* value = "unknown"; +}; + +template +inline constexpr auto nameof_v = nameof::value; + +#define NAMEOF_TYPE(T) \ + template <> \ + struct nameof { \ + static constexpr char const* value = #T; \ + }; + +NAMEOF_TYPE(float_e4m3_t) +NAMEOF_TYPE(float_e5m2_t) +NAMEOF_TYPE(half_t) +NAMEOF_TYPE(nv_bfloat16) +NAMEOF_TYPE(bfloat16_t) +NAMEOF_TYPE(float) + +NAMEOF_TYPE(int4b_t) +NAMEOF_TYPE(int8_t) +NAMEOF_TYPE(int32_t) +NAMEOF_TYPE(int64_t) + +NAMEOF_TYPE(vllm_uint4b8_t) +NAMEOF_TYPE(uint4b_t) +NAMEOF_TYPE(uint8_t) +NAMEOF_TYPE(vllm_uint8b128_t) +NAMEOF_TYPE(uint32_t) +NAMEOF_TYPE(uint64_t) + +}; // namespace cutlass \ No newline at end of file diff --git a/csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cu b/csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cu index ee801e16573d4..dbb72e8bbd3f5 100644 --- a/csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cu +++ b/csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cu @@ -8,6 +8,10 @@ #include "scaled_mm_c2x_sm89_fp8_dispatch.cuh" #include "scaled_mm_c2x_sm89_int8_dispatch.cuh" +#include "cutlass_extensions/epilogue/scaled_mm_epilogues_c2x.hpp" + +using namespace vllm; + /* This file defines quantized GEMM operations using the CUTLASS 2.x API, for NVIDIA GPUs with SM versions prior to sm90 (Hopper). @@ -22,12 +26,11 @@ void cutlass_scaled_mm_sm75_epilogue(torch::Tensor& out, torch::Tensor const& a, TORCH_CHECK(b.dtype() == torch::kInt8); if (out.dtype() == torch::kBFloat16) { - return vllm::cutlass_gemm_sm75_dispatch( + return cutlass_gemm_sm75_dispatch( out, a, b, std::forward(epilogue_args)...); } else { TORCH_CHECK(out.dtype() == torch::kFloat16); - return vllm::cutlass_gemm_sm75_dispatch( + return cutlass_gemm_sm75_dispatch( out, a, b, std::forward(epilogue_args)...); } } @@ -42,10 +45,10 @@ void cutlass_scaled_mm_sm75(torch::Tensor& out, torch::Tensor const& a, if (bias) { TORCH_CHECK(bias->dtype() == out.dtype(), "currently bias dtype must match output dtype ", out.dtype()); - return cutlass_scaled_mm_sm75_epilogue( + return cutlass_scaled_mm_sm75_epilogue( out, a, b, a_scales, b_scales, *bias); } else { - return cutlass_scaled_mm_sm75_epilogue( + return cutlass_scaled_mm_sm75_epilogue( out, a, b, a_scales, b_scales); } } @@ -61,10 +64,10 @@ void cutlass_scaled_mm_azp_sm75(torch::Tensor& out, torch::Tensor const& a, TORCH_CHECK(b_scales.dtype() == torch::kFloat32); if (azp) { - return cutlass_scaled_mm_sm75_epilogue( + return cutlass_scaled_mm_sm75_epilogue( out, a, b, a_scales, b_scales, azp_adj, *azp, bias); } else { - return cutlass_scaled_mm_sm75_epilogue( + return cutlass_scaled_mm_sm75_epilogue( out, a, b, a_scales, b_scales, azp_adj, bias); } } @@ -78,12 +81,11 @@ void cutlass_scaled_mm_sm80_epilogue(torch::Tensor& out, torch::Tensor const& a, TORCH_CHECK(b.dtype() == torch::kInt8); if (out.dtype() == torch::kBFloat16) { - return vllm::cutlass_gemm_sm80_dispatch( + return cutlass_gemm_sm80_dispatch( out, a, b, std::forward(epilogue_args)...); } else { TORCH_CHECK(out.dtype() == torch::kFloat16); - return vllm::cutlass_gemm_sm80_dispatch( + return cutlass_gemm_sm80_dispatch( out, a, b, std::forward(epilogue_args)...); } } @@ -98,10 +100,10 @@ void cutlass_scaled_mm_sm80(torch::Tensor& out, torch::Tensor const& a, if (bias) { TORCH_CHECK(bias->dtype() == out.dtype(), "currently bias dtype must match output dtype ", out.dtype()); - return cutlass_scaled_mm_sm80_epilogue( + return cutlass_scaled_mm_sm80_epilogue( out, a, b, a_scales, b_scales, *bias); } else { - return cutlass_scaled_mm_sm80_epilogue( + return cutlass_scaled_mm_sm80_epilogue( out, a, b, a_scales, b_scales); } } @@ -117,10 +119,10 @@ void cutlass_scaled_mm_azp_sm80(torch::Tensor& out, torch::Tensor const& a, TORCH_CHECK(b_scales.dtype() == torch::kFloat32); if (azp) { - return cutlass_scaled_mm_sm80_epilogue( + return cutlass_scaled_mm_sm80_epilogue( out, a, b, a_scales, b_scales, azp_adj, *azp, bias); } else { - return cutlass_scaled_mm_sm80_epilogue( + return cutlass_scaled_mm_sm80_epilogue( out, a, b, a_scales, b_scales, azp_adj, bias); } } @@ -134,13 +136,12 @@ void cutlass_scaled_mm_sm89_epilogue(torch::Tensor& out, torch::Tensor const& a, TORCH_CHECK(b.dtype() == torch::kInt8); if (out.dtype() == torch::kBFloat16) { - return vllm::cutlass_gemm_sm89_int8_dispatch( + return cutlass_gemm_sm89_int8_dispatch( out, a, b, std::forward(epilogue_args)...); } else { assert(out.dtype() == torch::kFloat16); - return vllm::cutlass_gemm_sm89_int8_dispatch( + return cutlass_gemm_sm89_int8_dispatch( out, a, b, std::forward(epilogue_args)...); } } else { @@ -148,13 +149,13 @@ void cutlass_scaled_mm_sm89_epilogue(torch::Tensor& out, torch::Tensor const& a, TORCH_CHECK(b.dtype() == torch::kFloat8_e4m3fn); if (out.dtype() == torch::kBFloat16) { - return vllm::cutlass_gemm_sm89_fp8_dispatch< - cutlass::float_e4m3_t, cutlass::bfloat16_t, Epilogue>( + return cutlass_gemm_sm89_fp8_dispatch( out, a, b, std::forward(epilogue_args)...); } else { TORCH_CHECK(out.dtype() == torch::kFloat16); - return vllm::cutlass_gemm_sm89_fp8_dispatch( + return cutlass_gemm_sm89_fp8_dispatch( out, a, b, std::forward(epilogue_args)...); } } @@ -170,10 +171,10 @@ void cutlass_scaled_mm_sm89(torch::Tensor& out, torch::Tensor const& a, if (bias) { TORCH_CHECK(bias->dtype() == out.dtype(), "currently bias dtype must match output dtype ", out.dtype()); - return cutlass_scaled_mm_sm89_epilogue( + return cutlass_scaled_mm_sm89_epilogue( out, a, b, a_scales, b_scales, *bias); } else { - return cutlass_scaled_mm_sm89_epilogue( + return cutlass_scaled_mm_sm89_epilogue( out, a, b, a_scales, b_scales); } } @@ -189,10 +190,10 @@ void cutlass_scaled_mm_azp_sm89(torch::Tensor& out, torch::Tensor const& a, TORCH_CHECK(b_scales.dtype() == torch::kFloat32); if (azp) { - return cutlass_scaled_mm_sm89_epilogue( + return cutlass_scaled_mm_sm89_epilogue( out, a, b, a_scales, b_scales, azp_adj, *azp, bias); } else { - return cutlass_scaled_mm_sm89_epilogue( + return cutlass_scaled_mm_sm89_epilogue( out, a, b, a_scales, b_scales, azp_adj, bias); } } diff --git a/csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cuh b/csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cuh index 6329ff63623e2..d03242f44ab1d 100644 --- a/csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cuh +++ b/csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cuh @@ -21,7 +21,6 @@ #include "cutlass/epilogue/threadblock/fusion/visitors.hpp" #include "cutlass/gemm/kernel/default_gemm_universal_with_visitor.h" -#include "broadcast_load_epilogue_c2x.hpp" #include "common.hpp" // clang-format on @@ -71,307 +70,6 @@ struct enable_sm89_to_sm90 : Kernel { #endif } }; - -/* - * This class provides the common load descriptors for the - * ScaledEpilogue[...] classes - */ -template -struct ScaledEpilogueBase { - protected: - using Accum = cutlass::epilogue::threadblock::VisitorAccFetch; - - template - using ColOrScalarLoad = - cutlass::epilogue::threadblock::VisitorColOrScalarBroadcast< - OutputTileThreadMap, T, Stride, Int<0>, Int<0>>>; - - template - using RowOrScalarLoad = - cutlass::epilogue::threadblock::VisitorRowOrScalarBroadcast< - OutputTileThreadMap, T, Stride, Int<1>, Int<0>>>; - - template - using ColLoad = cutlass::epilogue::threadblock::VisitorColBroadcast< - OutputTileThreadMap, T, Stride, Int<0>, Int<0>>>; - - template - using RowLoad = cutlass::epilogue::threadblock::VisitorRowBroadcast< - OutputTileThreadMap, T, Stride, Int<1>, Int<0>>>; - - template - using RowOrZeroLoad = - cutlass::epilogue::threadblock::VisitorRowOrZeroBroadcast< - OutputTileThreadMap, T, Stride, Int<1>, Int<0>>>; - - // This utility function constructs the arguments for the load descriptors - // from a tensor. It can handle both row and column, as well as row/column or - // scalar cases. - template - static auto args_from_tensor(torch::Tensor const& tensor) { - using Arguments = typename Descriptor::Arguments; - auto* data_ptr = static_cast(tensor.data_ptr()); - if constexpr (std::is_same_v> || - std::is_same_v>) { - return Arguments{data_ptr, tensor.numel() != 1}; - } else { - // it would technically work but no use case as data_ptr is never nullptr - static_assert(!std::is_same_v>); - return Arguments{data_ptr}; - } - } - - // This overload handles the case where there might not be a tensor, in which - // case a nullptr is passed and a constant (0) is used. - template - static auto args_from_tensor(c10::optional const& tensor) { - static_assert(std::is_same_v>); - using Arguments = typename Descriptor::Arguments; - auto* data_ptr = tensor ? static_cast(tensor->data_ptr()) : nullptr; - return Arguments{data_ptr}; - } -}; - -/* - This epilogue function defines a quantized GEMM operation similar to - torch._scaled_mm. - - A and B may be both either int8 or fp8_e4m3. A can be quantized per-tensor or - per-row. B can be quantized per-tensor or per-column. - Any combination of per-tensor and per-row or column is supported. - A and B must have symmetric quantization (zero point == 0). - - So the GEMM operation is D = (a_scales * A) (b_scales * B), where the - scales are applied elementwise with numpy-style broadcasting. - - ScaleA and ScaleB define the epilogue functions that apply the scales for - the A and B operands respectively. These scales may be either per-tensor or - per row or column. -*/ -template -struct ScaledEpilogue - : private ScaledEpilogueBase { - private: - using SUPER = ScaledEpilogueBase; - using Accum = typename SUPER::Accum; - using ScaleA = typename SUPER::template ColOrScalarLoad; - using ScaleB = typename SUPER::template RowOrScalarLoad; - - using Compute0 = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::multiplies, float, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTCompute0 = - cutlass::epilogue::threadblock::Sm80EVT; - - using Compute1 = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::multiplies, ElementD, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - public: - using EVTCompute = - cutlass::epilogue::threadblock::Sm80EVT; - using ArgumentType = typename EVTCompute::Arguments; - - static ArgumentType prepare_args(torch::Tensor const& a_scales, - torch::Tensor const& b_scales) { - auto a_args = SUPER::template args_from_tensor(a_scales); - auto b_args = SUPER::template args_from_tensor(b_scales); - - typename EVTCompute0::Arguments evt0_args{b_args}; - return ArgumentType{a_args, evt0_args}; - } -}; - -/* - * This epilogue performs the same operation as ScaledEpilogue, but adds a bias. - * This bias can also be used in the per-tensor azp case, where the activation - * zero point (azp) is used to compute an azp correction term, - * which is folded into the bias. - * - * The bias tensor must be per-output channel. - * ScaleA and ScaleB can be per-tensor or per-token/per-channel. - */ -template -struct ScaledEpilogueBias - : protected ScaledEpilogueBase { - protected: - using SUPER = ScaledEpilogueBase; - using Accum = typename SUPER::Accum; - using ScaleA = typename SUPER::template ColOrScalarLoad; - using ScaleB = typename SUPER::template RowOrScalarLoad; - using Bias = typename SUPER::template RowLoad; - using Compute0 = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::multiplies, float, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTCompute0 = - cutlass::epilogue::threadblock::Sm80EVT; - - using Compute1 = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::multiply_add, ElementD, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - public: - using EVTCompute = cutlass::epilogue::threadblock::Sm80EVT; - using ArgumentType = typename EVTCompute::Arguments; - static ArgumentType prepare_args(torch::Tensor const& a_scales, - torch::Tensor const& b_scales, - torch::Tensor const& bias) { - auto a_args = SUPER::template args_from_tensor(a_scales); - auto b_args = SUPER::template args_from_tensor(b_scales); - auto bias_args = SUPER::template args_from_tensor(bias); - - typename EVTCompute0::Arguments evt0_args{b_args}; - return ArgumentType{a_args, evt0_args, bias_args}; - } -}; - -/* - * This epilogue directly supports per-tensor azp in int32 form. - * As opposed to the per-token epilogue below, this epilogue only has an azp_adj - * term, which should already be multiplied with the scalar azp. - * The azp_adj term is a 1D tensor of shape (1,n), computed as azp * J @ B. - * - * This epilogue also supports bias, which remains per-channel. - */ -template -struct ScaledEpilogueBiasAzp - : protected ScaledEpilogueBase { - private: - using SUPER = ScaledEpilogueBase; - using Accum = typename SUPER::Accum; - using ScaleA = typename SUPER::template ColOrScalarLoad; - using ScaleB = typename SUPER::template RowOrScalarLoad; - using Bias = typename SUPER::template RowOrZeroLoad; - - // This is the full AZP term, azp * J @ B, shape (1,n) - using AzpWithAdj = typename SUPER::template RowLoad; - - // Compute float(accum - azp_adj), both operands are int32_t - using ComputeAzp = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::minus, float, int32_t, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeAzp = - cutlass::epilogue::threadblock::Sm80EVT; - - using ComputeScaleB = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::multiplies, float, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeScaleB = - cutlass::epilogue::threadblock::Sm80EVT; - - using ComputeScaleBiasA = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::multiply_add, ElementD, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - public: - using EVTCompute = - cutlass::epilogue::threadblock::Sm80EVT; - - using ArgumentType = typename EVTCompute::Arguments; - - static ArgumentType prepare_args(torch::Tensor const& a_scales, - torch::Tensor const& b_scales, - torch::Tensor const& azp_adj, - c10::optional const& bias) { - auto a_args = SUPER::template args_from_tensor(a_scales); - auto b_args = SUPER::template args_from_tensor(b_scales); - auto bias_args = SUPER::template args_from_tensor(bias); - auto azp_adj_args = - SUPER::template args_from_tensor(azp_adj); - - typename EVTComputeAzp::Arguments evt_azp_args{{}, azp_adj_args}; - typename EVTComputeScaleB::Arguments evt_scale_b_args{b_args, evt_azp_args}; - return ArgumentType{a_args, evt_scale_b_args, bias_args}; - } -}; - -/* - * This epilogue supports per-token azp by computing and applying - * the correction term using a rank-1 update. If the term were materialized, - * it would require O(m*n) space, and this way it only requires O(m+n) space. - * The azp term is a 1D tensor of shape (m,1), and represents the unscaled zero - * point for each row of A. - * The azp_adj term is a 1D tensor of shape (1,n), computed as J @ B. - * - * This epilogue also supports bias, which remains per-channel. - */ -template -struct ScaledEpilogueBiasAzpToken - : protected ScaledEpilogueBase { - private: - using SUPER = ScaledEpilogueBase; - using Accum = typename SUPER::Accum; - using ScaleA = typename SUPER::template ColOrScalarLoad; - using ScaleB = typename SUPER::template RowOrScalarLoad; - using Bias = typename SUPER::template RowOrZeroLoad; - - // Per-token azp term, shape (m,1) - using Azp = typename SUPER::template ColLoad; - - // This is the AZP adjustment term, J @ B, shape (1,n) - using AzpAdj = typename SUPER::template RowLoad; - - // Compute azp * azp_adj - using ComputeAzp = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::multiplies, int32_t, int32_t, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeAzp = - cutlass::epilogue::threadblock::Sm80EVT; - - // Compute float(accum - azp*azp_adj), all operands are int32_t - using ComputeAcc = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::minus, float, int32_t, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeAcc = - cutlass::epilogue::threadblock::Sm80EVT; - - using ComputeScaleB = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::multiplies, float, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeScaleB = - cutlass::epilogue::threadblock::Sm80EVT; - - using ComputeScaleBiasA = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::multiply_add, ElementD, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - public: - using EVTCompute = - cutlass::epilogue::threadblock::Sm80EVT; - - using ArgumentType = typename EVTCompute::Arguments; - - static ArgumentType prepare_args(torch::Tensor const& a_scales, - torch::Tensor const& b_scales, - torch::Tensor const& azp_adj, - torch::Tensor const& azp, - c10::optional const& bias) { - auto a_args = SUPER::template args_from_tensor(a_scales); - auto b_args = SUPER::template args_from_tensor(b_scales); - auto bias_args = SUPER::template args_from_tensor(bias); - auto azp_args = SUPER::template args_from_tensor(azp); - auto azp_adj_args = - SUPER::template args_from_tensor(azp_adj); - - typename EVTComputeAzp::Arguments evt_azp_args{azp_args, azp_adj_args}; - typename EVTComputeAcc::Arguments evt_acc_args{{}, evt_azp_args}; - typename EVTComputeScaleB::Arguments evt_scale_b_args{b_args, evt_acc_args}; - return ArgumentType{a_args, evt_scale_b_args, bias_args}; - } -}; - template typename ArchGuard, typename ElementAB_, typename ElementD_, template typename Epilogue_, typename TileShape, diff --git a/csrc/quantization/cutlass_w8a8/scaled_mm_c3x.cu b/csrc/quantization/cutlass_w8a8/scaled_mm_c3x.cu index 292c9e4b34e1c..33581a63d4c3d 100644 --- a/csrc/quantization/cutlass_w8a8/scaled_mm_c3x.cu +++ b/csrc/quantization/cutlass_w8a8/scaled_mm_c3x.cu @@ -23,11 +23,12 @@ #include "cutlass/epilogue/collective/collective_builder.hpp" #include "cutlass/gemm/collective/collective_builder.hpp" -#include "broadcast_load_epilogue_c3x.hpp" +#include "cutlass_extensions/epilogue/scaled_mm_epilogues_c3x.hpp" #include "common.hpp" // clang-format on using namespace cute; +using namespace vllm; /* This file defines quantized GEMM operations using the CUTLASS 3.x API, for @@ -56,305 +57,6 @@ struct enable_sm90_or_later : Kernel { #endif } }; - -/* - * This class provides the common load descriptors for the - * ScaledEpilogue[...] classes - */ -template -struct ScaledEpilogueBase { - protected: - using Accum = cutlass::epilogue::fusion::Sm90AccFetch; - - template - using ColOrScalarLoad = cutlass::epilogue::fusion::Sm90ColOrScalarBroadcast< - 0 /*Stages*/, typename EpilogueDescriptor::TileShape, T, - Stride, Int<0>, Int<0>>>; - - template - using RowOrScalarLoad = cutlass::epilogue::fusion::Sm90RowOrScalarBroadcast< - 0 /*Stages*/, typename EpilogueDescriptor::TileShape, T, - Stride, Int<1>, Int<0>>>; - - // Don't want to support nullptr by default - template - using ColLoad = cutlass::epilogue::fusion::Sm90ColBroadcast< - 0 /*Stages*/, typename EpilogueDescriptor::TileShape, T, - Stride, Int<0>, Int<0>>, 128 / sizeof_bits_v, EnableNullPtr>; - - // Don't want to support nullptr by default - template - using RowLoad = cutlass::epilogue::fusion::Sm90RowBroadcast< - 0 /*Stages*/, typename EpilogueDescriptor::TileShape, T, - Stride, Int<1>, Int<0>>, 128 / sizeof_bits_v, EnableNullPtr>; - - // This utility function constructs the arguments for the load descriptors - // from a tensor. It can handle both row and column, as well as row/column or - // scalar cases. - template - static auto args_from_tensor(torch::Tensor const& tensor) { - using Arguments = typename Descriptor::Arguments; - auto* data_ptr = static_cast(tensor.data_ptr()); - if constexpr (std::is_same_v> || - std::is_same_v>) { - return Arguments{data_ptr, tensor.numel() != 1}; - } else { - static_assert(!std::is_same_v> && - !std::is_same_v>); - return Arguments{data_ptr}; - } - } - - // This overload handles the case where there might not be a tensor, in which - // case a nullptr is passed and a constant (0) is used. - template - static auto args_from_tensor(c10::optional const& tensor) { - using Arguments = typename Descriptor::Arguments; - auto* data_ptr = tensor ? static_cast(tensor->data_ptr()) : nullptr; - static_assert(std::is_same_v> || - std::is_same_v>); - return Arguments{data_ptr}; - } -}; - -/* - This epilogue function defines a quantized GEMM operation similar to - torch.scaled_mm_. - - A and B may be both either int8 or fp8_e4m3. A can be - quantized per-tensor or per-row. B can be quantized per-tensor or per-column. - Any combination of per-tensor and per-row or column is supported. - A and B must have symmetric quantization (zero point == 0). - - So the GEMM operation is D = (a_scales * A) (b_scales * B), where the - scales are applied elementwise with numpy-style broadcasting. - - ScaleA and ScaleB define the epilogue functions that apply the scales for - the A and B operands respectively. These scales may be either per-tensor or - per row or column. -*/ -template -struct ScaledEpilogue - : private ScaledEpilogueBase { - private: - using SUPER = ScaledEpilogueBase; - using Accum = typename SUPER::Accum; - using ScaleA = typename SUPER::template ColOrScalarLoad; - using ScaleB = typename SUPER::template RowOrScalarLoad; - - using Compute0 = cutlass::epilogue::fusion::Sm90Compute< - cutlass::multiplies, float, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTCompute0 = - cutlass::epilogue::fusion::Sm90EVT; - - using Compute1 = cutlass::epilogue::fusion::Sm90Compute< - cutlass::multiplies, ElementD, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - public: - using EVTCompute = - cutlass::epilogue::fusion::Sm90EVT; - using ArgumentType = typename EVTCompute::Arguments; - - static ArgumentType prepare_args(torch::Tensor const& a_scales, - torch::Tensor const& b_scales) { - auto a_args = SUPER::template args_from_tensor(a_scales); - auto b_args = SUPER::template args_from_tensor(b_scales); - - typename EVTCompute0::Arguments evt0_args{b_args}; - return ArgumentType{a_args, evt0_args}; - } -}; - -/* - * This epilogue performs the same operation as ScaledEpilogue, but adds a bias. - * This bias can also be used in the per-tensor azp case, where the activation - * zero point (azp) is used to compute an azp correction term, - * which is folded into the bias. - * - * The bias tensor must be per-output channel. - * ScaleA and ScaleB can be per-tensor or per-token/per-channel. - */ -template -struct ScaledEpilogueBias - : private ScaledEpilogueBase { - private: - using SUPER = ScaledEpilogueBase; - using Accum = typename SUPER::Accum; - using ScaleA = typename SUPER::template ColOrScalarLoad; - using ScaleB = typename SUPER::template RowOrScalarLoad; - using Bias = typename SUPER::template RowLoad; - - using Compute0 = cutlass::epilogue::fusion::Sm90Compute< - cutlass::multiplies, float, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTCompute0 = - cutlass::epilogue::fusion::Sm90EVT; - - using Compute1 = cutlass::epilogue::fusion::Sm90Compute< - cutlass::multiply_add, ElementD, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - public: - using EVTCompute = - cutlass::epilogue::fusion::Sm90EVT; - - using ArgumentType = typename EVTCompute::Arguments; - static ArgumentType prepare_args(torch::Tensor const& a_scales, - torch::Tensor const& b_scales, - torch::Tensor const& bias) { - auto a_args = SUPER::template args_from_tensor(a_scales); - auto b_args = SUPER::template args_from_tensor(b_scales); - auto bias_args = SUPER::template args_from_tensor(bias); - - typename EVTCompute0::Arguments evt0_args{b_args}; - return ArgumentType{a_args, evt0_args, bias_args}; - } -}; - -/* - * This epilogue directly supports per-tensor azp in int32 form. - * As opposed to the per-token epilogue below, this epilogue only has an azp_adj - * term, which should already be multiplied with the scalar azp. - * The azp_adj term is a 1D tensor of shape (1,n), computed as azp * J @ B. - * - * This epilogue also supports bias, which remains per-channel. - */ -template -struct ScaledEpilogueBiasAzp - : private ScaledEpilogueBase { - private: - using SUPER = ScaledEpilogueBase; - using Accum = typename SUPER::Accum; - using ScaleA = typename SUPER::template ColOrScalarLoad; - using ScaleB = typename SUPER::template RowOrScalarLoad; - using Bias = typename SUPER::template RowLoad; - - // This is the full AZP term, azp * J @ B, shape (1,n) - using AzpWithAdj = typename SUPER::template RowLoad; - - // Compute float(accum - azp_adj), both operands are int32_t - using ComputeAzp = cutlass::epilogue::fusion::Sm90Compute< - cutlass::minus, float, int32_t, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeAzp = - cutlass::epilogue::fusion::Sm90EVT; - - using ComputeScaleB = cutlass::epilogue::fusion::Sm90Compute< - cutlass::multiplies, float, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeScaleB = - cutlass::epilogue::fusion::Sm90EVT; - - using ComputeScaleBiasA = cutlass::epilogue::fusion::Sm90Compute< - cutlass::multiply_add, ElementD, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - public: - using EVTCompute = - cutlass::epilogue::fusion::Sm90EVT; - using ArgumentType = typename EVTCompute::Arguments; - - static ArgumentType prepare_args(torch::Tensor const& a_scales, - torch::Tensor const& b_scales, - torch::Tensor const& azp_adj, - c10::optional const& bias) { - auto a_args = SUPER::template args_from_tensor(a_scales); - auto b_args = SUPER::template args_from_tensor(b_scales); - auto bias_args = SUPER::template args_from_tensor(bias); - auto azp_adj_args = - SUPER::template args_from_tensor(azp_adj); - - typename EVTComputeAzp::Arguments evt_azp_args{{}, azp_adj_args}; - typename EVTComputeScaleB::Arguments evt_scale_b_args{b_args, evt_azp_args}; - return ArgumentType{a_args, evt_scale_b_args, bias_args}; - } -}; - -/* - * This epilogue supports per-token azp by computing and applying - * the correction term using a rank-1 update. If the term were materialized, - * it would require O(m*n) space, and this way it only requires O(m+n) space. - * The azp term is a 1D tensor of shape (m,1), and represents the unscaled zero - * point for each row of A. - * The azp_adj term is a 1D tensor of shape (1,n), computed as J @ B. - * - * This epilogue also supports bias, which remains per-channel. - */ -template -struct ScaledEpilogueBiasAzpToken - : private ScaledEpilogueBase { - private: - using SUPER = ScaledEpilogueBase; - using Accum = typename SUPER::Accum; - using ScaleA = typename SUPER::template ColOrScalarLoad; - using ScaleB = typename SUPER::template RowOrScalarLoad; - using Bias = typename SUPER::template RowLoad; - - // Per-token azp term, shape (m,1) - using Azp = typename SUPER::template ColLoad; - - // This is the AZP adjustment term, J @ B, shape (1,n) - using AzpAdj = typename SUPER::template RowLoad; - - // Compute azp * azp_adj - using ComputeAzp = cutlass::epilogue::fusion::Sm90Compute< - cutlass::multiplies, int32_t, int32_t, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeAzp = - cutlass::epilogue::fusion::Sm90EVT; - - // Compute float(accum - azp*azp_adj), all operands are int32_t - using ComputeAcc = cutlass::epilogue::fusion::Sm90Compute< - cutlass::minus, float, int32_t, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeAcc = - cutlass::epilogue::fusion::Sm90EVT; - - using ComputeScaleB = cutlass::epilogue::fusion::Sm90Compute< - cutlass::multiplies, float, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeScaleB = - cutlass::epilogue::fusion::Sm90EVT; - - using ComputeScaleBiasA = cutlass::epilogue::fusion::Sm90Compute< - cutlass::multiply_add, ElementD, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - public: - using EVTCompute = - cutlass::epilogue::fusion::Sm90EVT; - using ArgumentType = typename EVTCompute::Arguments; - - static ArgumentType prepare_args(torch::Tensor const& a_scales, - torch::Tensor const& b_scales, - torch::Tensor const& azp_adj, - torch::Tensor const& azp, - c10::optional const& bias) { - auto a_args = SUPER::template args_from_tensor(a_scales); - auto b_args = SUPER::template args_from_tensor(b_scales); - auto bias_args = SUPER::template args_from_tensor(bias); - auto azp_args = SUPER::template args_from_tensor(azp); - auto azp_adj_args = - SUPER::template args_from_tensor(azp_adj); - - typename EVTComputeAzp::Arguments evt_azp_args{azp_args, azp_adj_args}; - typename EVTComputeAcc::Arguments evt_acc_args{{}, evt_azp_args}; - typename EVTComputeScaleB::Arguments evt_scale_b_args{b_args, evt_acc_args}; - return ArgumentType{a_args, evt_scale_b_args, bias_args}; - } -}; - template typename Epilogue_, typename TileShape, typename ClusterShape, typename KernelSchedule, @@ -721,11 +423,11 @@ void cutlass_scaled_mm_sm90(torch::Tensor& c, torch::Tensor const& a, if (bias) { TORCH_CHECK(bias->dtype() == c.dtype(), "currently bias dtype must match output dtype ", c.dtype()); - return cutlass_scaled_mm_sm90_epilogue( + return cutlass_scaled_mm_sm90_epilogue( c, a, b, a_scales, b_scales, *bias); } else { - return cutlass_scaled_mm_sm90_epilogue(c, a, b, a_scales, - b_scales); + return cutlass_scaled_mm_sm90_epilogue( + c, a, b, a_scales, b_scales); } } @@ -740,10 +442,10 @@ void cutlass_scaled_mm_azp_sm90(torch::Tensor& out, torch::Tensor const& a, TORCH_CHECK(b_scales.dtype() == torch::kFloat32); if (azp) { - return cutlass_scaled_mm_sm90_epilogue( + return cutlass_scaled_mm_sm90_epilogue( out, a, b, a_scales, b_scales, azp_adj, *azp, bias); } else { - return cutlass_scaled_mm_sm90_epilogue( + return cutlass_scaled_mm_sm90_epilogue( out, a, b, a_scales, b_scales, azp_adj, bias); } } diff --git a/csrc/quantization/machete/generate.py b/csrc/quantization/machete/generate.py index d126af1849024..ac63afe79a255 100644 --- a/csrc/quantization/machete/generate.py +++ b/csrc/quantization/machete/generate.py @@ -3,8 +3,10 @@ import os import shutil from collections.abc import Iterable -from dataclasses import dataclass -from typing import List, Optional, Tuple, Union +from copy import deepcopy +from dataclasses import dataclass, fields +from functools import reduce +from typing import Dict, List, Optional, Tuple, Union import jinja2 # yapf conflicts with isort for this block @@ -14,7 +16,10 @@ MixedInputKernelScheduleType, TileSchedulerTag, TileSchedulerType, VLLMDataType, - VLLMDataTypeNames, VLLMDataTypeTag, + VLLMDataTypeNames, + VLLMDataTypeSize, VLLMDataTypeTag, + VLLMDataTypeTorchDataTypeTag, + VLLMDataTypeVLLMScalarTypeTag, VLLMKernelScheduleTag) # yapf: enable @@ -27,49 +32,125 @@ #include "../machete_mm_launcher.cuh" namespace machete { -using GemmDispatcher_ = GemmDispatcher< - {{DataTypeTag[type_config.element_a]}}, // ElementA - {{DataTypeTag[type_config.element_b]}}, // ElementB - {{DataTypeTag[type_config.element_d]}}, // ElementD - {{DataTypeTag[type_config.accumulator]}}, // Accumulator - {{DataTypeTag[type_config.element_b_scale]}}, // Scales - {{DataTypeTag[type_config.element_b_zeropoint]}}>; // Zeropoints - -{% for s in schedules %}extern torch::Tensor -impl_{{type_name}}_sch_{{ gen_sch_name(s) }}(PyTorchArguments args); -{% endfor %} -template <> -torch::Tensor GemmDispatcher_::dispatch(PyTorchArguments args) { + +{% for impl_config in impl_configs %} +{% set type_sig = gen_type_sig(impl_config.types) -%} +{% for s in impl_config.schedules %} +extern torch::Tensor impl_{{type_sig}}_sch_{{gen_sch_sig(s)}}(MMArgs); +{%- endfor %} + +torch::Tensor mm_dispatch_{{type_sig}}(MMArgs args) { [[maybe_unused]] auto M = args.A.size(0); [[maybe_unused]] auto N = args.B.size(1); [[maybe_unused]] auto K = args.A.size(1); - if (!args.schedule) { - {%- for cond, s in heuristic %} + if (!args.maybe_schedule) { + {%- for cond, s in impl_config.heuristic %} {%if cond is not none%}if ({{cond}}) {%- else %}else {%- endif %} - return impl_{{ type_name }}_sch_{{ gen_sch_name(s) }}(args);{% endfor %} + return impl_{{type_sig}}_sch_{{ gen_sch_sig(s) }}(args);{% endfor %} } - {% for s in schedules %} - if (*args.schedule == "{{ gen_sch_name(s) }}") { - return impl_{{ type_name }}_sch_{{ gen_sch_name(s) }}(args); - } - {% endfor %} + {%- for s in impl_config.schedules %} + if (*args.maybe_schedule == "{{ gen_sch_sig(s) }}") + return impl_{{type_sig}}_sch_{{ gen_sch_sig(s) }}(args); + {%- endfor %} TORCH_CHECK_NOT_IMPLEMENTED(false, "machete_gemm(..) is not implemented for " - "schedule = ", *args.schedule); + "schedule = ", *args.maybe_schedule); } +{%- endfor %} + -template <> -std::vector GemmDispatcher_::supported_schedules() { - return { - {% for s in schedules -%} - "{{ gen_sch_name(s) }}"{{ ", - " if not loop.last }}{%- endfor %} - }; +static inline std::optional maybe_scalartype( + c10::optional const& t) { + if (!t) { + return std::nullopt; + } else { + return t->scalar_type(); + }; +} + +torch::Tensor mm_dispatch(MMArgs args) { + auto out_type = args.maybe_out_type.value_or(args.A.scalar_type()); + auto a_type = args.A.scalar_type(); + auto maybe_g_scales_type = maybe_scalartype(args.maybe_group_scales); + auto maybe_g_zeros_type = maybe_scalartype(args.maybe_group_zeros); + auto maybe_ch_scales_type = maybe_scalartype(args.maybe_channel_scales); + auto maybe_tok_scales_type = maybe_scalartype(args.maybe_token_scales); + + {% for impl_config in impl_configs %} + {% set t = impl_config.types -%} + {% set type_sig = gen_type_sig(t) -%} + if (args.b_type == {{VLLMScalarTypeTag[t.b]}} + && a_type == {{TorchTypeTag[t.a]}} + && out_type == {{TorchTypeTag[t.out]}} + && {%if t.b_group_scale != void -%} + maybe_g_scales_type == {{TorchTypeTag[t.b_group_scale]}} + {%- else %}!maybe_g_scales_type{%endif%} + && {%if t.b_group_zeropoint != void -%} + maybe_g_zeros_type == {{TorchTypeTag[t.b_group_zeropoint]}} + {%- else %}!maybe_g_zeros_type{%endif%} + && {%if t.b_channel_scale != void -%} + maybe_ch_scales_type == {{TorchTypeTag[t.b_channel_scale]}} + {%- else %}!maybe_ch_scales_type{%endif%} + && {%if t.a_token_scale != void -%} + maybe_tok_scales_type == {{TorchTypeTag[t.a_token_scale]}} + {%- else %}!maybe_tok_scales_type{%endif%} + ) { + return mm_dispatch_{{type_sig}}(args); + } + {%- endfor %} + + TORCH_CHECK_NOT_IMPLEMENTED( + false, "machete_mm(..) is not implemented for " + "a_type=", args.A.scalar_type(), + ", b_type=", args.b_type.str(), + ", out_type=", out_type, + ", with_group_scale_type=", maybe_g_scales_type + ? toString(*maybe_g_scales_type) : "None", + ", with_group_zeropoint_type=", maybe_g_zeros_type + ? toString(*maybe_g_zeros_type) : "None", + ", with_channel_scale_type=", maybe_ch_scales_type + ? toString(*maybe_ch_scales_type) : "None", + ", with_token_scale_type=", maybe_tok_scales_type + ? toString(*maybe_tok_scales_type) : "None", + "; implemented types are: \\n", + {%- for impl_config in impl_configs %} + {% set t = impl_config.types -%} + "\\t{{gen_type_option_name(t)}}\\n", + {%- endfor %} + ""); } +std::vector supported_schedules_dispatch( + SupportedSchedulesArgs args) { + auto out_type = args.maybe_out_type.value_or(args.a_type); + + {% for impl_config in impl_configs %} + {% set t = impl_config.types -%} + {% set schs = impl_config.schedules -%} + if (args.b_type == {{VLLMScalarTypeTag[t.b]}} + && args.a_type == {{TorchTypeTag[t.a]}} + && out_type == {{TorchTypeTag[t.out]}} + && {%if t.b_group_scale != void -%} + args.maybe_group_scales_type == {{TorchTypeTag[t.b_group_scale]}} + {%- else %}!args.maybe_group_scales_type{%endif%} + && {%if t.b_group_zeropoint != void-%} + args.maybe_group_zeros_type == {{TorchTypeTag[t.b_group_zeropoint]}} + {%- else %}!args.maybe_group_zeros_type{%endif%} + ) { + return { + {%- for s in impl_config.schedules %} + "{{gen_sch_sig(s)}}"{% if not loop.last %},{% endif %} + {%- endfor %} + }; + } + {%- endfor %} + + return {}; +}; + }; // namespace machete """ @@ -77,20 +158,10 @@ #include "../machete_mm_launcher.cuh" namespace machete { -template -using Kernel = MacheteKernelTemplate< - {{DataTypeTag[type_config.element_a]}}, // ElementA - {{DataTypeTag[type_config.element_b]}}, // ElementB - {{DataTypeTag[type_config.element_d]}}, // ElementD - {{DataTypeTag[type_config.accumulator]}}, // Accumulator - {{DataTypeTag[type_config.element_b_scale]}}, // Scales - {{DataTypeTag[type_config.element_b_zeropoint]}}, // Zeropoints - cutlass::gemm::KernelTmaWarpSpecializedCooperativeMixedInput, - Config, with_C, with_scales, with_zeropoints>; - -{% for sch in schedules %} -{% set schedule_name = gen_sch_name(sch) -%} -struct sch_{{schedule_name}} { + +{% for sch in unique_schedules(impl_configs) %} +{% set sch_sig = gen_sch_sig(sch) -%} +struct sch_{{sch_sig}} { using TileShapeNM = Shape<{{ to_cute_constant(sch.tile_shape_mn)|join(', ')}}>; using ClusterShape = Shape<{{ @@ -101,27 +172,34 @@ using TileScheduler = {{TileSchedulerTag[sch.tile_scheduler]}}; using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; }; - +{% endfor %} + +{% for impl_config in impl_configs %} +{% set t = impl_config.types -%} +{% set schs = impl_config.schedules -%} +{% set type_sig = gen_type_sig(t) -%} + +template +using Kernel_{{type_sig}} = MacheteKernelTemplate< + {{DataTypeTag[t.a]}}, // ElementA + {{DataTypeTag[t.b]}}, // ElementB + {{DataTypeTag[t.out]}}, // ElementD + {{DataTypeTag[t.accumulator]}}, // Accumulator + {{DataTypeTag[t.b_group_scale]}}, // GroupScaleT + {{DataTypeTag[t.b_group_zeropoint]}}, // GroupZeroT + {{DataTypeTag[t.b_channel_scale]}}, // ChannelScaleT + {{DataTypeTag[t.a_token_scale]}}, // TokenScaleT + cutlass::gemm::KernelTmaWarpSpecializedCooperativeMixedInput, + Sch>; + +{% for sch in schs %} +{% set sch_sig = gen_sch_sig(sch) -%} torch::Tensor -impl_{{type_name}}_sch_{{schedule_name}}(PyTorchArguments args) { - bool with_C = args.C.has_value(), with_scales = args.scales.has_value(), - with_zeropoints = args.zeros.has_value(); - - {% for s in specializations %} - if (with_C == {{s.with_C|lower}} - && with_zeropoints == {{s.with_zeropoints|lower}} - && with_scales == {{s.with_scales|lower}}) { - return run_impl>(args); - }{% endfor %} - - TORCH_CHECK_NOT_IMPLEMENTED( - false, "for the sake of compile times and binary size machete_mm(..) is " - " not implemented for with_C=", with_C, ", with_scales=", with_scales, - ", with_zeropoints=", with_zeropoints, - " (for {{type_name}}_sch_{{schedule_name}})"); +impl_{{type_sig}}_sch_{{sch_sig}}(MMArgs args) { + return run_impl>(args); } -{% endfor %} +{%- endfor %} +{%- endfor %} }; // namespace machete """ @@ -130,26 +208,34 @@ #include "../machete_prepack_launcher.cuh" namespace machete { -using PrepackBDispatcher_ = PrepackBDispatcher< - {{DataTypeTag[type_config.element_a]}}, // ElementA - {{DataTypeTag[type_config.element_b]}}, // ElementB - {{DataTypeTag[type_config.element_d]}}, // ElementD - {{DataTypeTag[type_config.accumulator]}}, // Accumulator - {{DataTypeTag[type_config.element_b_scale]}}, // Scales - {{DataTypeTag[type_config.element_b_zeropoint]}}>; // Zeropoints - -using PrepackedLayoutB = PrepackedLayoutBTemplate< - {{DataTypeTag[type_config.element_a]}}, // ElementA - {{DataTypeTag[type_config.element_b]}}, // ElementB - {{DataTypeTag[type_config.element_d]}}, // ElementD - {{DataTypeTag[type_config.accumulator]}}, // Accumulator - cutlass::layout::ColumnMajor, - cutlass::gemm::KernelTmaWarpSpecializedCooperativeMixedInput>; - -template <> -torch::Tensor PrepackBDispatcher_::dispatch(torch::Tensor B) { - return prepack_impl(B); + +torch::Tensor prepack_B_dispatch(PrepackBArgs args) { + auto convert_type = args.maybe_group_scales_type.value_or(args.a_type); + {%- for t in types %} + {% set b_type = unsigned_type_with_bitwidth(t.b_num_bits) %} + if (args.a_type == {{TorchTypeTag[t.a]}} + && args.b_type.size_bits() == {{t.b_num_bits}} + && convert_type == {{TorchTypeTag[t.convert]}}) { + return prepack_impl< + PrepackedLayoutBTemplate< + {{DataTypeTag[t.a]}}, // ElementA + {{DataTypeTag[b_type]}}, // ElementB + {{DataTypeTag[t.convert]}}, // ElementConvert + {{DataTypeTag[t.accumulator]}}, // Accumulator + cutlass::layout::ColumnMajor, + cutlass::gemm::KernelTmaWarpSpecializedCooperativeMixedInput> + >(args.B); + } + {%- endfor %} + + TORCH_CHECK_NOT_IMPLEMENTED(false, + "prepack_B_dispatch(..) is not implemented for " + "atype = ", args.a_type, + ", b_type = ", args.b_type.str(), + ", with_group_scales_type= ", args.maybe_group_scales_type ? + toString(*args.maybe_group_scales_type) : "None"); } + }; // namespace machete """ @@ -166,32 +252,34 @@ class ScheduleConfig: tile_scheduler: TileSchedulerType -@dataclass +@dataclass(frozen=True) class TypeConfig: - element_a: DataType - element_b: Union[DataType, VLLMDataType] - element_b_scale: DataType - element_b_zeropoint: DataType - element_d: DataType + a: DataType + b: Union[DataType, VLLMDataType] + b_group_scale: DataType + b_group_zeropoint: DataType + b_channel_scale: DataType + a_token_scale: DataType + out: DataType accumulator: DataType -@dataclass -class Specialization: - with_C: bool - with_zeropoints: bool - with_scales: bool +@dataclass(frozen=True) +class PrepackTypeConfig: + a: DataType + b_num_bits: int + convert: DataType + accumulator: DataType @dataclass class ImplConfig: - type_config: TypeConfig - schedule_configs: List[ScheduleConfig] - specializations: List[Specialization] + types: TypeConfig + schedules: List[ScheduleConfig] heuristic: List[Tuple[Optional[str], ScheduleConfig]] -def generate_schedule_name(schedule_config: ScheduleConfig) -> str: +def generate_sch_sig(schedule_config: ScheduleConfig) -> str: tile_shape = ( f"{schedule_config.tile_shape_mn[0]}x{schedule_config.tile_shape_mn[1]}" ) @@ -209,40 +297,34 @@ def generate_schedule_name(schedule_config: ScheduleConfig) -> str: f"_{epilogue_schedule}_{tile_scheduler}") -# mostly unique shorter schedule_name -def generate_terse_schedule_name(schedule_config: ScheduleConfig) -> str: +# mostly unique shorter sch_sig +def generate_terse_sch_sig(schedule_config: ScheduleConfig) -> str: kernel_terse_names_replace = { "KernelTmaWarpSpecializedCooperativeMixedInput_": "TmaMI_", "TmaWarpSpecializedCooperative_": "TmaCoop_", "StreamKScheduler": "streamK", } - schedule_name = generate_schedule_name(schedule_config) + sch_sig = generate_sch_sig(schedule_config) for orig, terse in kernel_terse_names_replace.items(): - schedule_name = schedule_name.replace(orig, terse) - return schedule_name + sch_sig = sch_sig.replace(orig, terse) + return sch_sig # unique type_name -def generate_type_signature(kernel_type_config: TypeConfig): - element_a = VLLMDataTypeNames[kernel_type_config.element_a] - element_b = VLLMDataTypeNames[kernel_type_config.element_b] - element_d = VLLMDataTypeNames[kernel_type_config.element_d] - accumulator = VLLMDataTypeNames[kernel_type_config.accumulator] - element_scale = VLLMDataTypeNames[kernel_type_config.element_b_scale] - element_zeropoint = VLLMDataTypeNames[ - kernel_type_config.element_b_zeropoint] - - return (f"{element_a}{element_b}{element_d}" - f"{accumulator}{element_scale}{element_zeropoint}") - +def generate_type_signature(kernel_types: TypeConfig): + return str("".join([ + VLLMDataTypeNames[getattr(kernel_types, field.name)] + for field in fields(TypeConfig) + ])) -# non-unique shorter type_name -def generate_terse_type_signature(kernel_type_config: TypeConfig): - element_a = VLLMDataTypeNames[kernel_type_config.element_a] - element_b = VLLMDataTypeNames[kernel_type_config.element_b] - return f"{element_a}{element_b}" +def generate_type_option_name(kernel_types: TypeConfig): + return ", ".join([ + f"{field.name.replace('b_', 'with_')+'_type'}=" + + VLLMDataTypeNames[getattr(kernel_types, field.name)] + for field in fields(TypeConfig) + ]) def is_power_of_two(n): @@ -263,13 +345,36 @@ def _to_cute_constant(value: int): return _to_cute_constant(value) +def unique_schedules(impl_configs: List[ImplConfig]): + return list( + set(sch for impl_config in impl_configs + for sch in impl_config.schedules)) + + +def unsigned_type_with_bitwidth(num_bits): + return { + 4: DataType.u4, + 8: DataType.u8, + 16: DataType.u16, + 32: DataType.u32, + 64: DataType.u64, + }[num_bits] + + template_globals = { + "void": DataType.void, "DataTypeTag": VLLMDataTypeTag, + "VLLMScalarTypeTag": VLLMDataTypeVLLMScalarTypeTag, + "TorchTypeTag": VLLMDataTypeTorchDataTypeTag, "KernelScheduleTag": VLLMKernelScheduleTag, "EpilogueScheduleTag": EpilogueScheduleTag, "TileSchedulerTag": TileSchedulerTag, "to_cute_constant": to_cute_constant, - "gen_sch_name": generate_terse_schedule_name, + "gen_sch_sig": generate_terse_sch_sig, + "gen_type_sig": generate_type_signature, + "unique_schedules": unique_schedules, + "unsigned_type_with_bitwidth": unsigned_type_with_bitwidth, + "gen_type_option_name": generate_type_option_name } @@ -284,42 +389,82 @@ def create_template(template_str): prepack_dispatch_template = create_template(PREPACK_TEMPLATE) -def create_sources(impl_config: ImplConfig, num_impl_files=1): +def create_sources(impl_configs: List[ImplConfig], num_impl_files=8): sources = [] - type_name = generate_type_signature(impl_config.type_config) - terse_type_name = generate_terse_type_signature(impl_config.type_config) - sources.append(( - f"machete_mm_{terse_type_name}", - mm_dispatch_template.render(type_name=type_name, - type_config=impl_config.type_config, - schedules=impl_config.schedule_configs, - heuristic=impl_config.heuristic), + "machete_mm_dispatch", + mm_dispatch_template.render(impl_configs=impl_configs), )) + prepack_types = [] + for impl_config in impl_configs: + convert_type = impl_config.types.a \ + if impl_config.types.b_group_scale == DataType.void \ + else impl_config.types.b_group_scale + prepack_types.append( + PrepackTypeConfig( + a=impl_config.types.a, + b_num_bits=VLLMDataTypeSize[impl_config.types.b], + convert=convert_type, + accumulator=impl_config.types.accumulator, + )) + + def prepacked_type_key(prepack_type: PrepackTypeConfig): + # For now we we can just use the first accumulator type seen since + # the tensor core shapes/layouts don't vary based on accumulator + # type so we can generate less code this way + return (prepack_type.a, prepack_type.b_num_bits, prepack_type.convert) + + unique_prepack_types = [] + prepack_types_seen = set() + for prepack_type in prepack_types: + key = prepacked_type_key(prepack_type) + if key not in prepack_types_seen: + unique_prepack_types.append(prepack_type) + prepack_types_seen.add(key) + sources.append(( - f"machete_prepack_{terse_type_name}", - prepack_dispatch_template.render( - type_name=type_name, - type_config=impl_config.type_config, - ), + "machete_prepack", + prepack_dispatch_template.render(types=unique_prepack_types, ), )) - num_schedules = len(impl_config.schedule_configs) - schedules_per_file = math.ceil(num_schedules / num_impl_files) - for part, i in enumerate(range(0, num_schedules, schedules_per_file)): - file_schedules = impl_config.schedule_configs[i:i + schedules_per_file] + # Split up impls across files + num_impls = reduce(lambda x, y: x + len(y.schedules), impl_configs, 0) + num_impls_per_file = math.ceil(num_impls / num_impl_files) + + files_impls: List[List[ImplConfig]] = [[]] + + curr_num_impls_assigned = 0 + curr_impl_in_file = 0 + curr_impl_configs = deepcopy(list(reversed(impl_configs))) + + while curr_num_impls_assigned < num_impls: + room_left_in_file = num_impls_per_file - curr_impl_in_file + if room_left_in_file == 0: + files_impls.append([]) + room_left_in_file = num_impls_per_file + curr_impl_in_file = 0 + + curr_ic = curr_impl_configs[-1] + if len(curr_ic.schedules) >= room_left_in_file: + # Break apart the current impl config + tmp_ic = deepcopy(curr_ic) + tmp_ic.schedules = curr_ic.schedules[:room_left_in_file] + curr_ic.schedules = curr_ic.schedules[room_left_in_file:] + files_impls[-1].append(tmp_ic) + else: + files_impls[-1].append(curr_ic) + curr_impl_configs.pop() + curr_num_impls_assigned += len(files_impls[-1][-1].schedules) + curr_impl_in_file += len(files_impls[-1][-1].schedules) + for part, file_impls in enumerate(files_impls): sources.append(( - f"machete_mm_{terse_type_name}_impl_part{part}", - mm_impl_template.render( - type_name=type_name, - type_config=impl_config.type_config, - schedules=file_schedules, - specializations=impl_config.specializations, - ), + f"machete_mm_impl_part{part+1}", + mm_impl_template.render(impl_configs=file_impls), )) + return sources @@ -328,187 +473,169 @@ def generate(): # about how this works SCRIPT_DIR = os.path.dirname(__file__) - schedule_common_params = dict( + sch_common_params = dict( kernel_schedule=TmaMI, epilogue_schedule=TmaCoop, tile_scheduler=TileSchedulerType.StreamK, ) - # For now we use the same heuristic for all types - # Heuristic is currently tuned for H100s - default_heuristic = [ + # Stored as "condition": ((tile_shape_mn), (cluster_shape_mnk)) + default_tile_heuristic_config = { #### M = 257+ - ( - "M > 256 && K <= 16384 && N <= 4096", - ScheduleConfig( - tile_shape_mn=(128, 128), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), - ( - "M > 256", - ScheduleConfig( - tile_shape_mn=(128, 256), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), + "M > 256 && K <= 16384 && N <= 4096": ((128, 128), (2, 1, 1)), + "M > 256": ((128, 256), (2, 1, 1)), #### M = 129-256 - ( - "M > 128 && K <= 4096 && N <= 4096", - ScheduleConfig( - tile_shape_mn=(128, 64), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), - ( - "M > 128 && K <= 8192 && N <= 8192", - ScheduleConfig( - tile_shape_mn=(128, 128), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), - ( - "M > 128", - ScheduleConfig( - tile_shape_mn=(128, 256), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), + "M > 128 && K <= 4096 && N <= 4096": ((128, 64), (2, 1, 1)), + "M > 128 && K <= 8192 && N <= 8192": ((128, 128), (2, 1, 1)), + "M > 128": ((128, 256), (2, 1, 1)), #### M = 65-128 - ( - "M > 64 && K <= 4069 && N <= 4069", - ScheduleConfig( - tile_shape_mn=(128, 32), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), - ( - "M > 64 && K <= 4069 && N <= 8192", - ScheduleConfig( - tile_shape_mn=(128, 64), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), - ( - "M > 64 && K >= 8192 && N >= 12288", - ScheduleConfig( - tile_shape_mn=(256, 128), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), - ( - "M > 64", - ScheduleConfig( - tile_shape_mn=(128, 128), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), + "M > 64 && K <= 4069 && N <= 4069": ((128, 32), (2, 1, 1)), + "M > 64 && K <= 4069 && N <= 8192": ((128, 64), (2, 1, 1)), + "M > 64 && K >= 8192 && N >= 12288": ((256, 128), (2, 1, 1)), + "M > 64": ((128, 128), (2, 1, 1)), #### M = 33-64 - ( - "M > 32 && K <= 6144 && N <= 6144", - ScheduleConfig( - tile_shape_mn=(128, 16), - cluster_shape_mnk=(1, 1, 1), - **schedule_common_params # type: ignore - )), - ( - "M > 32 && K >= 16384 && N >= 12288", - ScheduleConfig( - tile_shape_mn=(256, 64), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), - ( - "M > 32", - ScheduleConfig( - tile_shape_mn=(128, 64), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), + "M > 32 && K <= 6144 && N <= 6144": ((128, 16), (1, 1, 1)), + "M > 32 && K >= 16384 && N >= 12288": ((256, 64), (2, 1, 1)), + "M > 32": ((128, 64), (2, 1, 1)), #### M = 17-32 - ( - "M > 16 && K <= 12288 && N <= 8192", - ScheduleConfig( - tile_shape_mn=(128, 32), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), - ( - "M > 16", - ScheduleConfig( - tile_shape_mn=(256, 32), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), + "M > 16 && K <= 12288 && N <= 8192": ((128, 32), (2, 1, 1)), + "M > 16": ((256, 32), (2, 1, 1)), #### M = 1-16 - ( - "N >= 26624", - ScheduleConfig( - tile_shape_mn=(256, 16), - cluster_shape_mnk=(1, 1, 1), - **schedule_common_params # type: ignore - )), - ( - None, - ScheduleConfig( - tile_shape_mn=(128, 16), - cluster_shape_mnk=(1, 1, 1), - **schedule_common_params # type: ignore - )), + "N >= 26624": ((256, 16), (1, 1, 1)), + None: ((128, 16), (1, 1, 1)), + } + + # For now we use the same heuristic for all types + # Heuristic is currently tuned for H100s + default_heuristic = [ + (cond, ScheduleConfig(*tile_config, + **sch_common_params)) # type: ignore + for cond, tile_config in default_tile_heuristic_config.items() ] - # Do not use schedules = list(set(...)) because we need to make sure - # the output list is deterministic; otherwise the generated kernel file - # will be non-deterministic and causes ccache miss. - schedules = [] - for _, schedule_config in default_heuristic: - if schedule_config not in schedules: - schedules.append(schedule_config) + def get_unique_schedules(heuristic: Dict[str, ScheduleConfig]): + # Do not use schedules = list(set(...)) because we need to make sure + # the output list is deterministic; otherwise the generated kernel file + # will be non-deterministic and causes ccache miss. + schedules = [] + for _, schedule_config in heuristic: + if schedule_config not in schedules: + schedules.append(schedule_config) + return schedules impl_configs = [] GPTQ_kernel_type_configs = list( TypeConfig( - element_a=element_a, - element_b=element_b, - element_b_scale=element_a, - element_b_zeropoint=element_a, - element_d=element_a, + a=a, + b=b, + b_group_scale=a, + b_group_zeropoint=DataType.void, + b_channel_scale=DataType.void, + a_token_scale=DataType.void, + out=a, accumulator=DataType.f32, - ) for element_b in (VLLMDataType.u4b8, VLLMDataType.u8b128) - for element_a in (DataType.f16, DataType.bf16)) - - GPTQ_kernel_specializations = [ - Specialization(with_C=False, with_zeropoints=False, with_scales=True) - ] + ) for b in (VLLMDataType.u4b8, VLLMDataType.u8b128) + for a in (DataType.f16, DataType.bf16)) impl_configs += [ - ImplConfig(x[0], x[1], x[2], x[3]) - for x in zip(GPTQ_kernel_type_configs, itertools.repeat(schedules), - itertools.repeat(GPTQ_kernel_specializations), + ImplConfig(x[0], x[1], x[2]) + for x in zip(GPTQ_kernel_type_configs, + itertools.repeat(get_unique_schedules(default_heuristic)), itertools.repeat(default_heuristic)) ] AWQ_kernel_type_configs = list( TypeConfig( - element_a=element_a, - element_b=element_b, - element_b_scale=element_a, - element_b_zeropoint=element_a, - element_d=element_a, + a=a, + b=b, + b_group_scale=a, + b_group_zeropoint=a, + b_channel_scale=DataType.void, + a_token_scale=DataType.void, + out=a, accumulator=DataType.f32, - ) for element_b in (DataType.u4, DataType.u8) - for element_a in (DataType.f16, DataType.bf16)) + ) for b in (DataType.u4, DataType.u8) + for a in (DataType.f16, DataType.bf16)) + + impl_configs += [ + ImplConfig(x[0], x[1], x[2]) + for x in zip(AWQ_kernel_type_configs, + itertools.repeat(get_unique_schedules(default_heuristic)), + itertools.repeat(default_heuristic)) + ] - AWQ_kernel_specializations = [ - Specialization(with_C=False, with_zeropoints=True, with_scales=True) + # Stored as "condition": ((tile_shape_mn), (cluster_shape_mnk)) + # TODO (LucasWilkinson): Further tuning required + qqq_tile_heuristic_config = { + #### M = 257+ + # ((128, 256), (2, 1, 1)) Broken for QQQ types + # TODO (LucasWilkinson): Investigate further + # "M > 256 && K <= 16384 && N <= 4096": ((128, 128), (2, 1, 1)), + # "M > 256": ((128, 256), (2, 1, 1)), + "M > 256": ((128, 128), (2, 1, 1)), + #### M = 129-256 + "M > 128 && K <= 4096 && N <= 4096": ((128, 64), (2, 1, 1)), + "M > 128 && K <= 8192 && N <= 8192": ((128, 128), (2, 1, 1)), + # ((128, 256), (2, 1, 1)) Broken for QQQ types + # TODO (LucasWilkinson): Investigate further + # "M > 128": ((128, 256), (2, 1, 1)), + "M > 128": ((128, 128), (2, 1, 1)), + #### M = 65-128 + "M > 64 && K <= 4069 && N <= 4069": ((128, 32), (2, 1, 1)), + "M > 64 && K <= 4069 && N <= 8192": ((128, 64), (2, 1, 1)), + "M > 64 && K >= 8192 && N >= 12288": ((256, 128), (2, 1, 1)), + "M > 64": ((128, 128), (2, 1, 1)), + #### M = 33-64 + "M > 32 && K <= 6144 && N <= 6144": ((128, 16), (1, 1, 1)), + # Broken for QQQ types + # TODO (LucasWilkinson): Investigate further + #"M > 32 && K >= 16384 && N >= 12288": ((256, 64), (2, 1, 1)), + "M > 32": ((128, 64), (2, 1, 1)), + #### M = 17-32 + "M > 16 && K <= 12288 && N <= 8192": ((128, 32), (2, 1, 1)), + "M > 16": ((256, 32), (2, 1, 1)), + #### M = 1-16 + "N >= 26624": ((256, 16), (1, 1, 1)), + None: ((128, 16), (1, 1, 1)), + } + + # For now we use the same heuristic for all types + # Heuristic is currently tuned for H100s + qqq_heuristic = [ + (cond, ScheduleConfig(*tile_config, + **sch_common_params)) # type: ignore + for cond, tile_config in qqq_tile_heuristic_config.items() + ] + + QQQ_kernel_types = [ + *(TypeConfig( + a=DataType.s8, + b=VLLMDataType.u4b8, + b_group_scale=b_group_scale, + b_group_zeropoint=DataType.void, + b_channel_scale=DataType.f32, + a_token_scale=DataType.f32, + out=DataType.f16, + accumulator=DataType.s32, + ) for b_group_scale in (DataType.f16, DataType.void)), + *(TypeConfig( + a=DataType.e4m3, + b=VLLMDataType.u4b8, + b_group_scale=b_group_scale, + b_group_zeropoint=DataType.void, + b_channel_scale=DataType.f32, + a_token_scale=DataType.f32, + out=DataType.f16, + accumulator=DataType.f32, + ) for b_group_scale in (DataType.f16, DataType.void)), ] impl_configs += [ - ImplConfig(x[0], x[1], x[2], x[3]) - for x in zip(AWQ_kernel_type_configs, itertools.repeat(schedules), - itertools.repeat(AWQ_kernel_specializations), - itertools.repeat(default_heuristic)) + ImplConfig(x[0], x[1], x[2]) + for x in zip(QQQ_kernel_types, + itertools.repeat(get_unique_schedules(qqq_heuristic)), + itertools.repeat(qqq_heuristic)) ] output_dir = os.path.join(SCRIPT_DIR, "generated") @@ -521,12 +648,11 @@ def generate(): os.makedirs(output_dir) # Render each group of configurations into separate files - for impl_config in impl_configs: - for filename, code in create_sources(impl_config): - filepath = os.path.join(output_dir, f"{filename}.cu") - with open(filepath, "w") as output_file: - output_file.write(code) - print(f"Rendered template to {filepath}") + for filename, code in create_sources(impl_configs): + filepath = os.path.join(output_dir, f"{filename}.cu") + with open(filepath, "w") as output_file: + output_file.write(code) + print(f"Rendered template to {filepath}") if __name__ == "__main__": diff --git a/csrc/quantization/machete/machete_mainloop.cuh b/csrc/quantization/machete/machete_mainloop.cuh index e8e7b14de0da1..816f33a1078e5 100644 --- a/csrc/quantization/machete/machete_mainloop.cuh +++ b/csrc/quantization/machete/machete_mainloop.cuh @@ -171,6 +171,10 @@ struct MacheteCollectiveMma { make_shape(size<0>(TileShape_MNK{}), size<2>(TileShape_MNK{}), Int{}))); + using SmemLayoutACopy = decltype(GmemLayoutA::TVbNbKL_to_offset_copy( + make_shape(size<0>(TileShape_MNK{}), size<2>(TileShape_MNK{}), + Int{}))); + using SmemLayoutAtomARowMajor = decltype(rs_smem_selector(TileShape_MNK{})), @@ -288,14 +292,7 @@ struct MacheteCollectiveMma { static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomScale{})) == 0, "SmemLayoutAtomScale must evenly divide tile k shape."); - // Tile along modes in a way that maximizes the TMA box size. - using SmemLayoutACopy = decltype(tile_to_shape( - SmemLayoutAtomARowMajor{}, - make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), - Int{}), - conditional_t<::cutlass::gemm::detail::is_major<0, StrideA>(), - Step<_2, _1, _3>, Step<_1, _2, _3>>{})); - + // Tile along modes in a way that maximizes the TMA box size using SmemLayoutB = decltype(tile_to_shape( SmemLayoutAtomB{}, make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), @@ -428,12 +425,12 @@ struct MacheteCollectiveMma { // clang-format on // ((athrid, val), (BlocksM, BlockK), L) -> (storage_idx) - using PrepackedStrideA = decltype(stride(GmemLayoutA::TVbNbKL_to_offset( + using PrepackedStrideA = decltype(stride(GmemLayoutA::TVbNbKL_to_offset_copy( make_shape(int32_t(0), int32_t(0), int32_t(0))))); using ATensor = decltype(make_tensor( get_logical_ptr(static_cast(nullptr)), - shape(GmemLayoutA::TVbNbKL_to_offset( + shape(GmemLayoutA::TVbNbKL_to_offset_copy( make_shape(int32_t(0), int32_t(0), int32_t(0)))), PrepackedStrideA{})); @@ -450,8 +447,8 @@ struct MacheteCollectiveMma { static constexpr auto make_tma_copy_A(ATensor tensor_a = ATensor{}) { return make_tma_copy( - GmemTiledCopyA{}, tensor_a, SmemLayoutA{}(_, _, cute::Int<0>{}), - shape(SmemLayoutA{}(_, _, cute::Int<0>{})), + GmemTiledCopyA{}, tensor_a, SmemLayoutACopy{}(_, _, cute::Int<0>{}), + shape(SmemLayoutACopy{}(_, _, cute::Int<0>{})), size<1>(ClusterShape{})); // mcast along N mode for this M load, if any } @@ -584,7 +581,7 @@ struct MacheteCollectiveMma { typename Params::TMA_Scale tma_load_scale; typename Params::TMA_Zero tma_load_zero; - auto layout = GmemLayoutA::TVbNbKL_to_offset(make_shape(M, K, L)); + auto layout = GmemLayoutA::TVbNbKL_to_offset_copy(make_shape(M, K, L)); tma_load_a = make_tma_copy_A( make_logical_tensor(ptr_A, shape(layout), stride(layout))); @@ -722,7 +719,7 @@ struct MacheteCollectiveMma { // (TILE_V,TILE_B,m,k,l) auto make_gA_mkl = [&]() { // ((athrid, val), (BlocksM, BlockK), L) -> (storage_idx) - auto layout = GmemLayoutA::TVbNbKL_to_offset(make_shape(M, K, L)); + auto layout = GmemLayoutA::TVbNbKL_to_offset_copy(make_shape(M, K, L)); Tensor mA_mkl = mainloop_params.tma_load_a.get_tma_tensor(shape(layout)); return local_tile(mA_mkl, make_shape(size<0>(layout), PPBlocksPerTile_MK{}), diff --git a/csrc/quantization/machete/machete_mm_kernel.cuh b/csrc/quantization/machete/machete_mm_kernel.cuh index 4d41b8d291484..d4d19ae5deec7 100644 --- a/csrc/quantization/machete/machete_mm_kernel.cuh +++ b/csrc/quantization/machete/machete_mm_kernel.cuh @@ -21,6 +21,8 @@ #include "cutlass_extensions/cute_utils.cuh" #include "cutlass_extensions/vllm_numeric_conversion.cuh" +#include "cutlass_extensions/epilogue/scaled_mm_epilogues_c3x.hpp" +#include "cutlass_extensions/torch_utils.hpp" #include "machete_collective_builder.cuh" #include "machete_prepacked_layout.cuh" #include "machete_interleaving_utils.cuh" @@ -37,27 +39,42 @@ using namespace cute; // W is quantized, in this situation or right-hand operand is quantized so // we compute the transpose to move it to the left-hand side. template + typename AccumulatorT, typename GroupScaleT, typename GroupZeroT, + typename ChannelScaleT, typename TokenScaleT, class KernelSchedule, + typename ScheduleConfig> struct MacheteKernelTemplate { + static constexpr bool with_C = false; // not ever used + static constexpr bool with_group_scales = !std::is_same_v; + static constexpr bool with_group_zeropoints = + !std::is_same_v; + static constexpr bool with_channel_scales = + !std::is_same_v; + static constexpr bool with_token_scales = !std::is_same_v; + using MmaType = ElementA_; using ElementA = ElementA_; using ElementB = ElementB_; using ElementD = ElementD_; using ElementC = cute::conditional_t; - using ElementZ = ZeroT; - using ElementS = ScaleT; - - using ElementAccumulator = - AccumulatorT; // Element type for internal accumulation + using ElementAccumulator = AccumulatorT; using ElementCompute = AccumulatorT; // For Epilogue + // Use dummy values when we don't have scales or zeropoints + using ElementZGroup = + cute::conditional_t; + using ElementSGroup = + cute::conditional_t; + using ElementConvertGroup = + cute::conditional_t; + using ElementSChannel = + cute::conditional_t; + using ElementSToken = + cute::conditional_t; using BTypeTuple = cute::conditional_t< - with_scales, - cute::conditional_t, - cute::tuple>, + with_group_scales, + cute::conditional_t, + cute::tuple>, ElementB>; using LayoutA = cutlass::layout::RowMajor; @@ -71,8 +88,8 @@ struct MacheteKernelTemplate { using StrideA = cutlass::detail::TagToStrideA_t; using StrideC = cutlass::detail::TagToStrideA_t; using StrideD = cutlass::detail::TagToStrideA_t; - using StrideS = cutlass::detail::TagToStrideA_t; - using StrideZ = StrideS; + using StrideSGroup = cutlass::detail::TagToStrideA_t; + using StrideZGroup = StrideSGroup; using LayoutA_Transpose = typename cutlass::layout::LayoutTranspose::type; @@ -85,8 +102,8 @@ struct MacheteKernelTemplate { using OperatorClass = cutlass::arch::OpClassTensorOp; using PrepackedLayoutB = - PrepackedLayoutBTemplate; + PrepackedLayoutBTemplate; static int constexpr TileShapeK = 128 * 8 / cutlass::sizeof_bits::value; @@ -103,12 +120,42 @@ struct MacheteKernelTemplate { using EpilogueTileType = typename ScheduleConfig::EpilogueTileType; using TileScheduler = typename ScheduleConfig::TileScheduler; + static_assert( + (!with_channel_scales && !with_token_scales) || + ((with_channel_scales && with_token_scales) && + std::is_same_v), + "Currently token and channel scales (if present) must be the same type"); + + using EpilogueDescriptor = + cutlass::epilogue::collective::detail::EpilogueDescriptor< + TileShape, cutlass::epilogue::collective::EpilogueTileAuto, ElementD, + ElementD, EpilogueSchedule>; + + // Currently only supports float scales + using ChTokScalesEpilogue = + typename vllm::c3x::ScaledEpilogue; + static_assert((with_channel_scales || with_token_scales) || + (std::is_same_v && + std::is_same_v), + "Currently token and channel scales (if present) must be float " + "(and if one is present the other must be too)"); + + using StoreEpilogueCompute = typename cutlass::epilogue::fusion::Sm90EVT< + cutlass::epilogue::fusion::Sm90AccFetch>; + + using EVTCompute = + std::conditional_t; + + // EVTCompute using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< ArchTag, OperatorClass, TileShape, ClusterShape, EpilogueTileType, - ElementAccumulator, ElementAccumulator, ElementC, LayoutC_Transpose, - AlignmentC, ElementD, LayoutD_Transpose, AlignmentD, - EpilogueSchedule>::CollectiveOp; + ElementAccumulator, ElementSChannel, ElementC, LayoutC_Transpose, + AlignmentC, ElementD, LayoutD_Transpose, AlignmentD, EpilogueSchedule, + EVTCompute>::CollectiveOp; using CollectiveMainloop = typename cutlass::gemm::collective::VLLMCollectiveBuilder< @@ -131,26 +178,44 @@ struct MacheteKernelTemplate { using MainloopArguments = typename GemmKernel::MainloopArguments; using EpilogueArguments = typename GemmKernel::EpilogueArguments; - template static Arguments create_arguments( cudaStream_t stream, - ElementA const* A_ptr, // A is an MxK matrix - Layout const& layout_A, - ElementB const* B_ptr, // B is an KxN prepacked matrix - ElementD* D_ptr, // D is an MxN matrix - Layout const& layout_D, - ElementC const* C_ptr, // C is an MxN matrix - std::optional> const& layout_C, - ElementS const* S_ptr, // S is an scale_KxN matrix - std::optional> const& layout_S, - ElementZ const* Z_ptr, // Z is an scale_KxN matrix - std::optional> const& layout_Z, - ElementCompute alpha, ElementCompute beta, - std::optional maybe_group_size) { - static_assert(!with_zeropoints || with_scales); - - int M = size<0>(layout_A), N = size<1>(layout_D), K = size<1>(layout_A); + torch::Tensor const& A, // MxK matrix + torch::Tensor const& B, // KxN prepacked matrix + torch::Tensor& D, // MxN matrix + c10::optional const& maybe_g_scales, // scale_KxN matrix + c10::optional const& maybe_g_zeros, // scale_KxN matrix + c10::optional maybe_group_size, + c10::optional const& maybe_ch_scales, // len N vector + c10::optional const& maybe_tok_scales) // len M vector + { + static_assert(!with_group_zeropoints || with_group_scales); + + int M = A.size(0), N = B.size(1), K = A.size(1); + TORCH_CHECK(D.size(0) == M && D.size(1) == N); + + auto layout_A = make_cute_layout(A, "A"); + auto layout_D = make_cute_layout(D, "D"); + auto layout_S_group = + maybe_make_cute_layout(maybe_g_scales, "group_scales"); + auto layout_Z_group = + maybe_make_cute_layout(maybe_g_zeros, "group_zeros"); + int64_t numel_S_channel = maybe_ch_scales ? maybe_ch_scales->numel() : 0; + int64_t numel_S_token = maybe_tok_scales ? maybe_tok_scales->numel() : 0; + + auto unwrap = [](auto const& t) { + return t ? t->const_data_ptr() : nullptr; + }; + auto A_ptr = static_cast(A.const_data_ptr()); + auto B_ptr = static_cast(B.const_data_ptr()); + auto D_ptr = static_cast(D.mutable_data_ptr()); + auto S_group_ptr = + static_cast(unwrap(maybe_g_scales)); + auto Z_group_ptr = static_cast(unwrap(maybe_g_zeros)); + auto S_channel_ptr = + static_cast(unwrap(maybe_ch_scales)); + auto S_token_ptr = + static_cast(unwrap(maybe_tok_scales)); int const group_size = maybe_group_size == -1 ? K : maybe_group_size.value_or(K); @@ -159,26 +224,28 @@ struct MacheteKernelTemplate { TORCH_CHECK(size<0>(layout_A) == M && size<1>(layout_A) == K); TORCH_CHECK(size<0>(layout_D) == M && size<1>(layout_D) == N); - if constexpr (with_C) { - TORCH_CHECK(C_ptr && layout_C); + if constexpr (with_group_scales) { + TORCH_CHECK(S_group_ptr && layout_S_group); + TORCH_CHECK((size<0>(*layout_S_group) == scale_k && + size<1>(*layout_S_group) == N)); } else { - TORCH_CHECK(!C_ptr, "C not supported"); + TORCH_CHECK(!S_group_ptr, "Scales not supported"); } - if constexpr (with_scales) { - TORCH_CHECK(S_ptr && layout_S); - TORCH_CHECK((size<0>(*layout_S) == scale_k && size<1>(*layout_S) == N)); + if constexpr (with_group_zeropoints) { + TORCH_CHECK(Z_group_ptr && layout_Z_group); + TORCH_CHECK((size<0>(*layout_Z_group) == scale_k && + size<1>(*layout_Z_group) == N)); + TORCH_CHECK(layout_S_group && *layout_Z_group == *layout_S_group, + "Scales and zeros must have the same layout"); } else { - TORCH_CHECK(!S_ptr, "Scales not supported"); + TORCH_CHECK(!Z_group_ptr, "Zeropoints not supported"); } - if constexpr (with_zeropoints) { - TORCH_CHECK(Z_ptr && layout_Z); - TORCH_CHECK((size<0>(*layout_Z) == scale_k && size<1>(*layout_Z) == N)); - TORCH_CHECK(layout_S && *layout_Z == *layout_S, - "Scales and zeros must have the same layout"); - } else { - TORCH_CHECK(!Z_ptr, "Zeropoints not supported"); + if constexpr (with_channel_scales || with_token_scales) { + TORCH_CHECK( + (maybe_ch_scales->numel() == N || maybe_ch_scales->numel() == 1) && + (maybe_tok_scales->numel() == M || maybe_tok_scales->numel() == 1)); } // Transpose A and D @@ -186,24 +253,33 @@ struct MacheteKernelTemplate { // for B (which is At) auto stride_At = layout_A.stride(); auto stride_Dt = permute_layout<1, 0, 2>(layout_D).stride(); - auto stride_Ct = stride_Dt; - if (layout_C) { - stride_Ct = permute_layout<1, 0, 2>(*layout_C).stride(); - } MainloopArguments mainloop_arguments{}; - EpilogueArguments epilogue_arguments{ - {alpha, beta}, C_ptr, stride_Ct, D_ptr, stride_Dt}; + // {Accum, C, C_layout, D, D} + EpilogueArguments epilogue_arguments{}; + + if constexpr (with_channel_scales || with_token_scales) { + epilogue_arguments = + EpilogueArguments{ChTokScalesEpilogue::prepare_args( + *maybe_ch_scales, *maybe_tok_scales), + nullptr, + {}, + D_ptr, + stride_Dt}; + } else { + epilogue_arguments = EpilogueArguments{{}, nullptr, {}, D_ptr, stride_Dt}; + } - if constexpr (with_scales && with_zeropoints) { - auto stride_S = permute_layout<1, 0, 2>(*layout_S).stride(); - mainloop_arguments = - MainloopArguments{B_ptr, _StrideB{}, A_ptr, stride_At, - S_ptr, stride_S, group_size, Z_ptr}; - } else if constexpr (with_scales) { - auto stride_S = permute_layout<1, 0, 2>(*layout_S).stride(); + if constexpr (with_group_scales && with_group_zeropoints) { + auto stride_S_group = permute_layout<1, 0, 2>(*layout_S_group).stride(); mainloop_arguments = MainloopArguments{ - B_ptr, _StrideB{}, A_ptr, stride_At, S_ptr, stride_S, group_size}; + B_ptr, _StrideB{}, A_ptr, stride_At, + S_group_ptr, stride_S_group, group_size, Z_group_ptr}; + } else if constexpr (with_group_scales) { + auto stride_S_group = permute_layout<1, 0, 2>(*layout_S_group).stride(); + mainloop_arguments = + MainloopArguments{B_ptr, _StrideB{}, A_ptr, stride_At, + S_group_ptr, stride_S_group, group_size}; } else { mainloop_arguments = MainloopArguments{B_ptr, _StrideB{}, A_ptr, stride_At}; diff --git a/csrc/quantization/machete/machete_mm_launcher.cuh b/csrc/quantization/machete/machete_mm_launcher.cuh index 60a4ed60535b7..4b0da5b303e0c 100644 --- a/csrc/quantization/machete/machete_mm_launcher.cuh +++ b/csrc/quantization/machete/machete_mm_launcher.cuh @@ -5,73 +5,61 @@ #include "machete_mm_kernel.cuh" #include "cutlass_extensions/torch_utils.hpp" +#include "core/scalar_type.hpp" namespace machete { -struct PyTorchArguments { +struct MMArgs { torch::Tensor const& A; torch::Tensor const& B; - c10::optional const& scales; - c10::optional const& zeros; - c10::optional group_size; - c10::optional const& C; - c10::optional alpha; - c10::optional beta; - c10::optional schedule; + vllm::ScalarType const& b_type; + c10::optional const& maybe_out_type; + c10::optional const& maybe_group_scales; + c10::optional const& maybe_group_zeros; + c10::optional maybe_group_size; + c10::optional const& maybe_channel_scales; + c10::optional const& maybe_token_scales; + c10::optional maybe_schedule; }; +struct SupportedSchedulesArgs { + at::ScalarType a_type; + vllm::ScalarType b_type; + c10::optional maybe_group_scales_type; + c10::optional maybe_group_zeros_type; + c10::optional maybe_channel_scales_type; + c10::optional maybe_token_scales_type; + c10::optional maybe_out_type; +}; + +torch::Tensor mm_dispatch(MMArgs args); + +std::vector supported_schedules_dispatch( + SupportedSchedulesArgs args); + template -torch::Tensor run_impl(PyTorchArguments args) { +torch::Tensor run_impl(MMArgs args) { const at::cuda::OptionalCUDAGuard device_guard(device_of(args.A)); auto device = args.A.device(); auto stream = at::cuda::getCurrentCUDAStream(device.index()); - using EleA = typename MacheteKernel::ElementA; - using EleB = typename MacheteKernel::ElementB; - using EleC = typename MacheteKernel::ElementC; - using EleD = typename MacheteKernel::ElementD; - using EleScale = typename MacheteKernel::ElementS; - using EleZero = typename MacheteKernel::ElementZ; - - using StrideA = typename MacheteKernel::StrideA; - using StrideC = typename MacheteKernel::StrideC; - using StrideD = typename MacheteKernel::StrideD; - using StrideS = typename MacheteKernel::StrideS; - using StrideZ = typename MacheteKernel::StrideZ; - int M = args.A.size(0); int N = args.B.size(1); int K = args.A.size(1); // Allocate output - torch::Tensor D = - torch::empty({M, N}, torch::TensorOptions() - .dtype(equivalent_scalar_type_v) - .device(device)); - - auto const &A = args.A, &B = args.B; - auto const &C = args.C, &scales = args.scales, &zeros = args.zeros; - - auto layout_A = make_cute_layout(A, "A"); - auto layout_D = make_cute_layout(D, "D"); - auto layout_C = maybe_make_cute_layout(C, "C"); - auto layout_S = maybe_make_cute_layout(scales, "scales"); - auto layout_Z = maybe_make_cute_layout(zeros, "zeros"); - - auto A_ptr = static_cast(A.const_data_ptr()); - auto B_ptr = static_cast(B.const_data_ptr()); - auto D_ptr = static_cast(D.mutable_data_ptr()); - auto C_ptr = static_cast(C ? C->const_data_ptr() : nullptr); - auto S_ptr = - static_cast(scales ? scales->const_data_ptr() : nullptr); - auto Z_ptr = - static_cast(zeros ? zeros->const_data_ptr() : nullptr); + torch::Tensor D = torch::empty( + {M, N}, + torch::TensorOptions() + .dtype(equivalent_scalar_type_v) + .device(device)); auto arguments = MacheteKernel::create_arguments( - stream, A_ptr, layout_A, B_ptr, D_ptr, layout_D, C_ptr, layout_C, S_ptr, - layout_S, Z_ptr, layout_Z, args.alpha.value_or(1), args.beta.value_or(0), - args.group_size); + stream, // + args.A, args.B, D, args.maybe_group_scales, args.maybe_group_zeros, + args.maybe_group_size, args.maybe_channel_scales, + args.maybe_token_scales); TORCH_CHECK(MacheteKernel::can_implement(arguments), "Machete kernel cannot be run with these arguments"); @@ -84,12 +72,4 @@ torch::Tensor run_impl(PyTorchArguments args) { return D; }; -template -struct GemmDispatcher { - static torch::Tensor dispatch(PyTorchArguments args); - static std::vector supported_schedules(); -}; - }; // namespace machete \ No newline at end of file diff --git a/csrc/quantization/machete/machete_prepack_kernel.cuh b/csrc/quantization/machete/machete_prepack_kernel.cuh index f23483f928b47..d002355ca49d6 100644 --- a/csrc/quantization/machete/machete_prepack_kernel.cuh +++ b/csrc/quantization/machete/machete_prepack_kernel.cuh @@ -6,31 +6,49 @@ namespace machete { -template -static __global__ void prepack_B_kernel(BInTensor B_in, - BTiledOutTensor B_tiled_out) { - auto tB_in = local_tile(B_in, TileShapeNKL{}, - make_coord(blockIdx.x, blockIdx.y, blockIdx.z)); - auto tB_out = B_tiled_out(make_coord(_, _), - make_coord(blockIdx.x, blockIdx.y), blockIdx.z); +template +static __global__ void prepack_B_kernel(BInTensor B_in, ElementB* B_out_ptr) { + auto constexpr block_size = + Int{}; + auto constexpr eles_per_thread = Int{}; + static_assert(block_size % threads == 0, + "block_size must be divisible by the number of threads"); - auto tiled_copy = make_tiled_copy(Copy_Atom{}, - Layout, Stride<_32, _1>>{}, - Layout>{}); + // Which pre-packed are we responsible for + auto blk_coord = make_coord(blockIdx.x, blockIdx.y, blockIdx.z); + auto tB_in = local_tile( + B_in, append(typename PrepackedLayoutB::PPBlockShape_NK{}, _1{}), + blk_coord); - auto thr_copy = tiled_copy.get_thread_slice(threadIdx.x); + // Find the start offset in the output for this pre-packed block + auto bNbKL_to_offset = PrepackedLayoutB::bNbKL_to_offset(shape(B_in)); - Tensor thr_tile_S = thr_copy.partition_S(tB_in); - Tensor thr_tile_D = thr_copy.partition_D(tB_out); + // Tensor representing a 1:1 mapping to the output space in 1D + auto tB_out_linear = + make_tensor(get_logical_ptr(B_out_ptr) + bNbKL_to_offset(blk_coord), + make_layout(make_shape(block_size))); + // Mapping from output space (1D) to input space + auto tB_in_linear = make_tensor( + tB_in.data(), + tB_in.layout() + .compose(right_inverse(PrepackedLayoutB::ppblock_ilvd_NK_to_offset())) + .with_shape(make_shape(block_size))); + + // Tile for this specific thread (could have used a TiledCopy but these work + // best with 2d layouts, this is a simple 1d layout so local_tile is enough, + // we are also not that concerned with performance for this kernel) + auto thr_tB_in_linear = + local_tile(tB_in_linear, make_shape(eles_per_thread), threadIdx.x); + auto thr_tB_out_linear = + local_tile(tB_out_linear, make_shape(eles_per_thread), threadIdx.x); // Construct a register-backed Tensor with the same shape as each thread's // partition - auto fragment = make_tensor(shape(thr_tile_D)); + auto fragment = make_tensor(shape(thr_tB_in_linear)); - // Copy from GMEM to RMEM and from RMEM to GMEM - copy(tiled_copy, thr_tile_S, fragment); - copy(Copy_Atom{}, fragment, thr_tile_D); + copy(thr_tB_in_linear, fragment); + copy(Copy_Atom{}, fragment, thr_tB_out_linear); } template @@ -44,18 +62,15 @@ static void prepack_B_template( TORCH_CHECK(size<0>(B_layout) % size<0>(TileShapeNKL{}) == 0); TORCH_CHECK(size<1>(B_layout) % size<1>(TileShapeNKL{}) == 0); - TORCH_CHECK(size<2>(B_layout) % size<2>(TileShapeNKL{}) == 0); auto N_tiles = size<0>(B_layout) / size<0>(TileShapeNKL{}); auto K_tiles = size<1>(B_layout) / size<1>(TileShapeNKL{}); - auto L_tiles = size<2>(B_layout) / size<2>(TileShapeNKL{}); + auto L_tiles = size<2>(B_layout); auto B_in = make_tensor(get_logical_ptr(B_in_ptr), B_layout); - auto B_tiled_out = - make_tensor(get_logical_ptr(B_out_ptr), ilvd_NKbNbKL_to_offset); - prepack_B_kernel - <<>>(B_in, B_tiled_out); + prepack_B_kernel<128, PrepackedLayoutB> + <<>>(B_in, B_out_ptr); } }; // namespace machete \ No newline at end of file diff --git a/csrc/quantization/machete/machete_prepack_launcher.cuh b/csrc/quantization/machete/machete_prepack_launcher.cuh index a33d8f9484cfe..3486d28be2126 100644 --- a/csrc/quantization/machete/machete_prepack_launcher.cuh +++ b/csrc/quantization/machete/machete_prepack_launcher.cuh @@ -2,9 +2,17 @@ #include "machete_prepack_kernel.cuh" #include "cutlass_extensions/torch_utils.hpp" +#include "core/scalar_type.hpp" namespace machete { +struct PrepackBArgs { + torch::Tensor const& B; + at::ScalarType a_type; + vllm::ScalarType b_type; + c10::optional maybe_group_scales_type; +}; + template torch::Tensor prepack_impl(torch::Tensor const B) { const at::cuda::OptionalCUDAGuard device_guard(device_of(B)); @@ -61,11 +69,6 @@ torch::Tensor prepack_impl(torch::Tensor const B) { return D; }; -template -struct PrepackBDispatcher { - static torch::Tensor dispatch(torch::Tensor B); -}; +torch::Tensor prepack_B_dispatch(PrepackBArgs args); }; // namespace machete \ No newline at end of file diff --git a/csrc/quantization/machete/machete_prepacked_layout.cuh b/csrc/quantization/machete/machete_prepacked_layout.cuh index 78e2cc5eec7d8..680a858a893c1 100644 --- a/csrc/quantization/machete/machete_prepacked_layout.cuh +++ b/csrc/quantization/machete/machete_prepacked_layout.cuh @@ -41,7 +41,7 @@ struct IlvBlkLayoutAuto {}; // The contract here is that the `TiledMma` determined below matches the one // ultimately used in the kernel. (this is also why the other element types are // required along with the kernel schedule) -template // clang-format on @@ -49,20 +49,27 @@ struct PrepackedLayoutBTemplate { using MmaType = ElementA_; using ElementA = ElementA_; using ElementB = ElementB_; - using ElementD = ElementD_; - using ElementAccumulator = - AccumulatorT; // Element type for internal accumulation + using ElementAccumulator = AccumulatorT; using ElementMma = MmaType; - // Only use interleaved layouts for subbyte weights, prmt instructions makes - // non-interleaved layouts for 8bit+ weights efficient enough we don't need - // iterleaved layouts + // Interleave for 4bit bit types when we are not upconverting to fp8 or int8, + // in those cases case we use a LUT using prmt instructions to upconvert and + // is more efficient if the data is not interleaved For 8bit+ prmt + // instructions makes non-interleaved layouts efficient enough we don't need + // iterleaved layouts (and can reuse more of the existing cutlass converts) + static constexpr bool should_interleave = + sizeof_bits_v <= 4 && + !std::is_same_v && + !std::is_same_v; + + // Only use interleaved layouts for subbyte weights, using IlvdBlkLayout = std::conditional_t< std::is_same_v, - std::conditional_t <= 4, - decltype(get_interleaved_blk_layout< - ElementB, sizeof_bits_v, 32>()), - void>, + std::conditional_t< + should_interleave, + decltype(get_interleaved_blk_layout< + ElementB, sizeof_bits_v, 32>()), + void>, IlvBlkLayout_>; // TODO (LucasWilkinson): compare the performance for other sizes @@ -135,7 +142,8 @@ struct PrepackedLayoutBTemplate { // then ((IlvBlk), FrgB) is {A, C, B, D, C, G, D, H} auto frgV = get<1, 0>(layout_no_interleave); auto ilvdBlk = IlvdBlkLayout{}; - static_assert(size(frgV) % 4 == 0, "FrgV must be divisible by 4"); + static_assert(size(frgV) % size(ilvdBlk) == 0, + "FrgV must be divisible by size(ilvdBlk)"); auto ilvd_FrgV = make_layout( make_shape(shape(ilvdBlk), Int{}), make_stride(stride(ilvdBlk), size(ilvdBlk))); @@ -175,6 +183,15 @@ struct PrepackedLayoutBTemplate { return group<1, 3>(result(_, repeat(result)>(_))); } + // ((athrid_val), (BlocksN, BlocksK, L)) -> (N, K, L) + template + CUTE_HOST_DEVICE static constexpr auto TVbNbKL_to_offset_copy( + Shape_NKL shape_mkl) { + auto layout = TVbNbKL_to_offset(shape_mkl); + return make_layout(coalesce(get<0>(layout)), get<1>(layout), + get<2>(layout)); + } + // ((BlockN, BlockK), (BlocksN, BlocksK), L) -> (storage_idx) template CUTE_HOST_DEVICE static constexpr auto ilvd_NKbNbKL_to_offset( @@ -197,6 +214,19 @@ struct PrepackedLayoutBTemplate { return group<1, 3>(result(_, repeat(result)>(_))); } + // (BlocksN, BlocksK, L) -> (storage_idx) + template + CUTE_HOST_DEVICE static constexpr auto bNbKL_to_offset(Shape_NKL shape_mkl) { + // (BlocksN, BlocksK, L) + auto blocks_shape = + cute::transform(shape_mkl, append(PPBlockShape_NK{}, _1{}), + [](auto x, auto y) { return x / y; }); + auto stride = size(PPBlockShape_NK{}); + + // (BlocksN, BlocksK, L) -> (storage_idx) + return make_layout(blocks_shape, compact_col_major(blocks_shape, stride)); + } + // ((athrid, val), (BlocksN, BlocksK, L)) -> (N, K, L) template CUTE_HOST_DEVICE static auto TVbNbK_to_NKL(Shape_NKL shape_mkl) { diff --git a/csrc/quantization/machete/machete_pytorch.cu b/csrc/quantization/machete/machete_pytorch.cu index 9f9073ded6191..da2c2fb0d3e77 100644 --- a/csrc/quantization/machete/machete_pytorch.cu +++ b/csrc/quantization/machete/machete_pytorch.cu @@ -8,89 +8,61 @@ namespace machete { using namespace vllm; -// -// Utils (type dispatching) -// - -template -static auto scalar_type_dispatch(ScalarType const& type, Fn fn) { - if (type == vllm::kU4) { - return fn(cutlass::uint4b_t{}); - } else if (type == vllm::kU8) { - return fn(cutlass::uint8_t{}); - } else if (type == vllm::kU4B8) { - return fn(cutlass::vllm_uint4b8_t{}); - } else if (type == vllm::kU8B128) { - return fn(cutlass::vllm_uint8b128_t{}); - } else { - TORCH_CHECK(false, "Unsupported type ", type.str()); - } -} - -#define AT_DISPATCH_CASE_SUPPORTED_COMPUTE_TYPES(...) \ - AT_DISPATCH_CASE_REDUCED_FLOATING_TYPES(__VA_ARGS__) - -#define AT_DISPATCH_SUPPORTED_COMPUTE_TYPES(TYPE, NAME, ...) \ - AT_DISPATCH_SWITCH(TYPE, NAME, \ - AT_DISPATCH_CASE_SUPPORTED_COMPUTE_TYPES(__VA_ARGS__)) - -// -// Interface -// - -std::vector supported_schedules(ScalarTypeId const btype_id) { -#if defined(__CUDACC_VER_MAJOR__) && __CUDACC_VER_MAJOR__ >= 12 - vllm::ScalarType b_type = ScalarType::from_id(btype_id); - return scalar_type_dispatch(b_type, [&](auto BType) { - return GemmDispatcher::supported_schedules(); +std::vector supported_schedules( + at::ScalarType a_type, int64_t b_type_id, + c10::optional maybe_group_scales_type, + c10::optional maybe_group_zeros_type, + c10::optional maybe_channel_scales_type, + c10::optional maybe_token_scales_type, + c10::optional maybe_out_type) { + ScalarType const b_type = ScalarType::from_id(b_type_id); + return supported_schedules_dispatch({ + .a_type = a_type, + .b_type = b_type, + .maybe_group_scales_type = maybe_group_scales_type, + .maybe_group_zeros_type = maybe_group_zeros_type, + .maybe_channel_scales_type = maybe_channel_scales_type, + .maybe_token_scales_type = maybe_token_scales_type, + .maybe_out_type = maybe_out_type, }); -#else - TORCH_CHECK(false, "Machete requires CUDA 12.0 or later"); -#endif } -torch::Tensor gemm(torch::Tensor const& A, torch::Tensor const& B, - ScalarTypeId const btype_id, - c10::optional const& scales, - c10::optional const& zeros, - c10::optional group_size, - c10::optional const& C, - c10::optional alpha, c10::optional beta, - c10::optional schedule) { -#if defined(__CUDACC_VER_MAJOR__) && __CUDACC_VER_MAJOR__ >= 12 - ScalarType const btype = ScalarType::from_id(btype_id); - auto args = PyTorchArguments{.A = A, - .B = B, - .scales = scales, - .zeros = zeros, - .group_size = group_size, - .C = C, - .alpha = alpha, - .beta = beta, - .schedule = schedule}; - - return scalar_type_dispatch(btype, [&](auto BType) { - return AT_DISPATCH_SUPPORTED_COMPUTE_TYPES( - A.scalar_type(), "machete_gemm", [&] { - using ComputeType = equivalent_cutlass_type_t; - return GemmDispatcher::dispatch(args); - }); - }); -#else - TORCH_CHECK(false, "Machete requires CUDA 12.0 or later"); -#endif +torch::Tensor mm(torch::Tensor const& A, torch::Tensor const& B, + int64_t b_type_id, + c10::optional const& maybe_out_type, + c10::optional const& maybe_group_scales, + c10::optional const& maybe_group_zeros, + c10::optional maybe_group_size, + c10::optional const& maybe_channel_scales, + c10::optional const& maybe_token_scales, + c10::optional maybe_schedule) { + ScalarType const b_type = ScalarType::from_id(b_type_id); + return mm_dispatch({.A = A, + .B = B, + .b_type = b_type, + .maybe_out_type = maybe_out_type, + .maybe_group_scales = maybe_group_scales, + .maybe_group_zeros = maybe_group_zeros, + .maybe_group_size = maybe_group_size, + .maybe_channel_scales = maybe_channel_scales, + .maybe_token_scales = maybe_token_scales, + .maybe_schedule = maybe_schedule}); } -torch::Tensor prepack_B(torch::Tensor const& B, ScalarTypeId const btype_id) { - ScalarType const btype = ScalarType::from_id(btype_id); - return scalar_type_dispatch(btype, [&](auto BType) { - return PrepackBDispatcher::dispatch(B); - }); +torch::Tensor prepack_B( + torch::Tensor const& B, at::ScalarType const& a_type, int64_t b_type_id, + c10::optional const& maybe_group_scales_type) { + ScalarType const b_type = ScalarType::from_id(b_type_id); + return prepack_B_dispatch( + {.B = B, + .a_type = a_type, + .b_type = b_type, + .maybe_group_scales_type = maybe_group_scales_type}); } TORCH_LIBRARY_IMPL_EXPAND(TORCH_EXTENSION_NAME, CUDA, m) { m.impl("machete_prepack_B", &prepack_B); - m.impl("machete_gemm", &gemm); + m.impl("machete_mm", &mm); } // use CatchAll since supported_schedules has no tensor arguments diff --git a/csrc/torch_bindings.cpp b/csrc/torch_bindings.cpp index 229fd554d3eee..e4cc7ec951848 100644 --- a/csrc/torch_bindings.cpp +++ b/csrc/torch_bindings.cpp @@ -203,13 +203,36 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { // conditionally compiled so impl in source file // Machete (Dense) Optimized Mixed Precision GEMM for Hopper. - ops.def("machete_supported_schedules(int btype) -> str[]"); ops.def( - "machete_gemm(Tensor A, Tensor B, int btype, " - " Tensor? scales, Tensor? zeros, int? group_size, " - " Tensor? C, float? alpha, float? beta, str? schedule)" - "-> Tensor"); - ops.def("machete_prepack_B(Tensor B, int btype) -> Tensor"); + "machete_supported_schedules(" + " ScalarType a_type," + " int b_type," + " ScalarType? maybe_group_scales_type," + " ScalarType? maybe_group_zeros_type," + " ScalarType? maybe_channel_scales_type," + " ScalarType? maybe_token_scales_type," + " ScalarType? maybe_out_type" + ") -> str[]"); + ops.def( + "machete_mm(" + " Tensor A," + " Tensor B," + " int b_type," + " ScalarType? out_type," + " Tensor? group_scales," + " Tensor? group_zeros," + " int? group_size," + " Tensor? channel_scales," + " Tensor? token_scales," + " str? schedule" + ") -> Tensor"); + ops.def( + "machete_prepack_B(" + " Tensor B," + " ScalarType a_type," + " int b_type," + " ScalarType? group_scales_type" + ") -> Tensor"); // conditionally compiled so impl registration is in source file ops.def("permute_cols(Tensor A, Tensor perm) -> Tensor"); diff --git a/tests/kernels/test_machete_gemm.py b/tests/kernels/test_machete_gemm.py deleted file mode 100644 index 59c0a24753c3b..0000000000000 --- a/tests/kernels/test_machete_gemm.py +++ /dev/null @@ -1,284 +0,0 @@ -"""Tests for the machete kernel. - -Run `pytest tests/kernels/test_machete_gemm.py`. -""" - -import math -from typing import Optional, Tuple - -import pytest -import torch - -from tests.kernels.utils import opcheck -from vllm import _custom_ops as ops -from vllm.model_executor.layers.quantization.utils.quant_utils import ( - pack_rows, quantize_weights) -from vllm.platforms import current_platform -from vllm.scalar_type import ScalarType, scalar_types - -CUDA_DEVICES = [ - f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2) -] - -MNK_SHAPES = [ - (1, 128, 128), - (1, 512, 1024), - (1, 4096, 4096), - (1, 8192, 28672), - (13, 8192, 4096), - (26, 4096, 8192), - (64, 4096, 4096), - (64, 8192, 28672), - (257, 128, 4096), - (257, 4224, 4160), - (257, 4096, 4096), - (1024, 4096, 8192), - (1024, 8192, 4096), -] - -ACT_TYPES = [torch.float16, torch.bfloat16] -WTYPE_ZEROPOINTS = [ - # GPTQ style - (scalar_types.uint4b8, False), - (scalar_types.uint8b128, False), - # AWQ style - (scalar_types.uint4, True), - (scalar_types.uint8, True), -] - -# TODO: in future PR refactor this and `is_quant_method_supported` in the kernel -# unit tests to a common utility function. Currently the use of -# `is_quant_method_supported` conflates kernels with quantization methods -# an assumption which is breaking down as quantizations methods can have -# have kernels and some kernels support multiple quantization methods. -IS_SUPPORTED_BY_GPU = current_platform.has_device_capability(90) - - -def rand_data(shape, dtype=torch.float16): - return 10 * (torch.rand(shape, dtype=dtype, device="cuda") - 0.3) - - -def maybe_convert_zeropoints(zps: Optional[torch.Tensor], s: torch.Tensor): - return zps if zps is None else -1 * s * (zps.to(s.dtype)) - - -def machete_quantize_and_pack(w: torch.Tensor, - wtype: ScalarType, - group_size: int, - zero_points: bool = False): - assert wtype.is_integer(), "TODO: support floating point weights" - - w_ref, w_q, w_s, w_zp = quantize_weights( - w, - wtype, - group_size, - zero_points=zero_points, - # to match how the kernel applies zps - ref_zero_points_after_scales=True) - - w_q = pack_rows(w_q, wtype.size_bits, *w_q.shape) - w_q = w_q.t().contiguous().t() # convert to col major - w_q_machete = ops.machete_prepack_B(w_q, wtype) - - opcheck(torch.ops._C.machete_prepack_B, (w_q, wtype.id)) - - return w_ref, w_q_machete, w_s, w_zp - - -def machete_gemm_test_helper(a: torch.Tensor, b: torch.Tensor, - wtype: ScalarType, group_size: int, - zero_points: bool): - w_ref, w_q_packed, w_s, w_zp = machete_quantize_and_pack( - b, wtype, group_size, zero_points) - - output_ref = torch.matmul(a, w_ref) - - output = ops.machete_gemm( - a=a, - b_q=w_q_packed, - b_type=wtype, - b_scales=w_s, - b_zeros=maybe_convert_zeropoints(w_zp, w_s), - b_group_size=group_size, - ) - - # Relax atol as our reduction dim becomes larger (more rounding error) - # Relax atol when we have zeropoints since the way machete applies - # zeropoints (after scales) causes noise around 0 - atol = 1 if zero_points else min(5e-2 * math.sqrt(a.shape[1]), 1) - torch.testing.assert_close(output, output_ref, rtol=1e-1, atol=atol) - - -@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, - reason="Machete is not supported on this GPU type.") -@pytest.mark.parametrize("shape", - MNK_SHAPES, - ids=lambda x: "x".join(str(v) for v in x)) -@pytest.mark.parametrize("atype", ACT_TYPES, ids=lambda x: str(x)) -@pytest.mark.parametrize("wtype_zeropoints", WTYPE_ZEROPOINTS) -@pytest.mark.parametrize("group_size", [128, None]) -def test_machete_all_schedules(shape, atype: torch.dtype, - wtype_zeropoints: Tuple[ScalarType, bool], - group_size: Optional[int]): - m, n, k = shape - wtype, zero_points = wtype_zeropoints - - if group_size is not None and k % group_size != 0: - return - - print(f"MNK = {m} {n} {k}") - - # Normalize group_size - if group_size is None: - group_size = k - assert group_size <= k - - a = rand_data((m, k), atype) - w = rand_data((k, n), atype) - - w_ref, w_q_machete, w_s, w_zp = machete_quantize_and_pack( - w, wtype, group_size, zero_points) - - output_ref = torch.matmul(a, w_ref) - - for schedule in ops.machete_supported_schedules(wtype): - print(f"Testing schedule {schedule}") - output = ops.machete_gemm( - a, - b_q=w_q_machete, - b_type=wtype, - b_scales=w_s, - b_zeros=maybe_convert_zeropoints(w_zp, w_s), - b_group_size=group_size, - schedule=schedule, - ) - - opcheck( - torch.ops._C.machete_gemm, - (a, w_q_machete, wtype.id, w_s, maybe_convert_zeropoints( - w_zp, w_s), group_size, None, None, None, schedule)) - - # Relax atol as our reduction dim becomes larger (more rounding error) - # Relax atol when we have zeropoints since the way machete applies - # zeropoints (after scales) causes noise around 0 - atol = 1 if zero_points else min(5e-2 * math.sqrt(k), 1) - torch.testing.assert_close(output, output_ref, rtol=1e-1, atol=atol),\ - f"Schedule failed {schedule}" - - -@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, - reason="Machete is not supported on this GPU type.") -@pytest.mark.parametrize("shape", - MNK_SHAPES, - ids=lambda x: "x".join(str(v) for v in x)) -@pytest.mark.parametrize("atype", ACT_TYPES, ids=lambda x: str(x)) -@pytest.mark.parametrize("wtype_zeropoints", WTYPE_ZEROPOINTS) -@pytest.mark.parametrize("group_size", [128, None]) -def test_machete_heuristic(shape, atype: torch.dtype, - wtype_zeropoints: Tuple[ScalarType, bool], - group_size: Optional[int]): - m, n, k = shape - wtype, zero_points = wtype_zeropoints - - if group_size is not None and k % group_size != 0: - return - - # Normalize group_size - if group_size is None: - group_size = k - assert group_size <= k - - a = rand_data((m, k), atype) - b = rand_data((k, n), atype) - - machete_gemm_test_helper(a, b, wtype, group_size, zero_points) - - -# Test working on other devices -@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, - reason="Machete is not supported on this GPU type.") -@pytest.mark.parametrize("device", CUDA_DEVICES) -def test_machete_devices(device: str): - m, n, k = 512, 4096, 4096 - wtype = scalar_types.uint4b8 - group_size = 128 - zero_points = False - - print(f"MNK = {m} {n} {k}, device = {device}") - - a = rand_data((m, k), torch.float16).to(device) - b = rand_data((k, n), torch.float16).to(device) - - machete_gemm_test_helper(a, b, wtype, group_size, zero_points) - - -# Test working with a subset of A and B -@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, - reason="Machete is not supported on this GPU type.") -def test_machete_subset(): - big_m, big_n, big_k = 1024, 1024, 1024 - m, n, k = 512, 512, 512 - wtype = scalar_types.uint4b8 - group_size = 128 - zero_points = False - - whole_a = rand_data((big_m, big_k), torch.float16) - whole_b = rand_data((big_k, big_n), torch.float16) - - a = whole_a[0:m, 0:k] - b = whole_b[0:k, 0:n] - - machete_gemm_test_helper(a, b, wtype, group_size, zero_points) - - -# Test to make sure cuda graphs work -class MacheteLayer(torch.nn.Module): - - def __init__(self, **kwargs): - super().__init__() - self.kwargs = kwargs - - def forward(self, a): - return ops.machete_gemm(**self.kwargs) - - -@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, - reason="Machete is not supported on this GPU type.") -def test_machete_cuda_graph(): - m, n, k = 512, 4096, 4096 - - a = rand_data((m, k), torch.float16) - b = rand_data((k, n), torch.float16) - wtype = scalar_types.uint4b8 - group_size = 128 - zero_points = False - - w_ref, w_q_packed, w_s, w_zp = machete_quantize_and_pack( - b, wtype, group_size, zero_points) - - # Construct a trivial model with a single layer that calls a machete kernel - model = MacheteLayer( - a=a, - b_q=w_q_packed, - b_type=wtype, - b_scales=w_s, - b_zeros=maybe_convert_zeropoints(w_zp, w_s), - b_group_size=group_size, - ) - - output_ref = torch.matmul(a, w_ref) - - # Run the model with a cuda graph - stream = torch.cuda.Stream() - with torch.cuda.stream(stream): - g = torch.cuda.CUDAGraph() - with torch.cuda.graph(g): - output = model(a) - output.zero_() - g.replay() - - # Relax atol as our reduction dim becomes larger (more rounding error) - # Relax atol when we have zeropoints since the way machete applies - # zeropoints (after scales) causes noise around 0 - atol = 1 if zero_points else min(5e-2 * math.sqrt(k), 1) - torch.testing.assert_close(output, output_ref, rtol=1e-1, atol=atol) diff --git a/tests/kernels/test_machete_mm.py b/tests/kernels/test_machete_mm.py new file mode 100644 index 0000000000000..1c6eb2dd9a228 --- /dev/null +++ b/tests/kernels/test_machete_mm.py @@ -0,0 +1,406 @@ +"""Tests for the machete kernel. + +Run `pytest tests/kernels/test_machete_mm.py`. +""" + +import math +from dataclasses import dataclass, fields +from typing import List, Optional, Tuple + +import pytest +import torch + +from tests.kernels.utils import opcheck +from vllm import _custom_ops as ops +from vllm.model_executor.layers.quantization.utils.quant_utils import ( + pack_rows, quantize_weights) +from vllm.platforms import current_platform +from vllm.scalar_type import ScalarType, scalar_types + +CUDA_DEVICES = [ + f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2) +] + +# TODO: in future PR refactor this and `is_quant_method_supported` in the kernel +# unit tests to a common utility function. Currently the use of +# `is_quant_method_supported` conflates kernels with quantization methods +# an assumption which is breaking down as quantizations methods can have +# have kernels and some kernels support multiple quantization methods. +IS_SUPPORTED_BY_GPU = current_platform.get_device_capability()[0] >= 9 + +MNK_SHAPES = [ + (1, 128, 128), + (1, 512, 1024), + (1, 4096, 4096), + (1, 8192, 28672), + (13, 8192, 4096), + (26, 4096, 8192), + (64, 4096, 4096), + (64, 8192, 28672), + (257, 128, 4096), + (257, 4224, 4160), + (257, 4096, 4096), + (1024, 4096, 8192), + (1024, 8192, 4096), +] + +GROUP_SIZES_TO_TEST: List[Optional[int]] = [128, -1] + + +@dataclass +class TypeConfig: + act_type: torch.dtype + weight_type: ScalarType + output_type: Optional[torch.dtype] + group_scale_type: Optional[torch.dtype] + group_zero_type: Optional[torch.dtype] + channel_scale_type: Optional[torch.dtype] + token_scale_type: Optional[torch.dtype] + + +@dataclass +class Tensors: + w_ref: torch.Tensor + a_ref: torch.Tensor + a: torch.Tensor + w_q: torch.Tensor + w_g_s: Optional[torch.Tensor] + w_g_zp: Optional[torch.Tensor] + w_ch_s: Optional[torch.Tensor] + w_tok_s: Optional[torch.Tensor] + + +# (Act Type, Weight Type, Output Type, Scale Type, ZeroPoints, +# Ch Scales Type, Tok Scales Type) +# NOTE: None "Scale Type" means the act type is floating point +# None "Output Type" means the output type is the same as the act type +TestTypeTuple = Tuple[List[torch.dtype], ScalarType, Optional[torch.dtype], + Optional[torch.dtype], bool] +TEST_TYPES = [ + # GPTQ style + *(TypeConfig(act_type=a_type, + weight_type=w_type, + output_type=None, + group_scale_type=a_type, + group_zero_type=None, + channel_scale_type=None, + token_scale_type=None) + for w_type in [scalar_types.uint4b8, scalar_types.uint8b128] + for a_type in [torch.float16, torch.bfloat16]), + # AWQ style + *(TypeConfig(act_type=a_type, + weight_type=w_type, + output_type=None, + group_scale_type=a_type, + group_zero_type=a_type, + channel_scale_type=None, + token_scale_type=None) + for w_type in [scalar_types.uint4, scalar_types.uint8] + for a_type in [torch.float16, torch.bfloat16]), + # QQQ style + *(TypeConfig(act_type=torch.int8, + weight_type=scalar_types.uint4b8, + output_type=torch.float16, + group_scale_type=group_scale_type, + group_zero_type=None, + channel_scale_type=torch.float, + token_scale_type=torch.float) + for group_scale_type in [None, torch.float16]), + *(TypeConfig(act_type=torch.float8_e4m3fn, + weight_type=scalar_types.uint4b8, + output_type=torch.float16, + group_scale_type=group_scale_type, + group_zero_type=None, + channel_scale_type=torch.float, + token_scale_type=torch.float) + for group_scale_type in [None, torch.float16]), +] + +# TODO: in future PR refactor this and `is_quant_method_supported` in the kernel +# unit tests to a common utility function. Currently the use of +# `is_quant_method_supported` conflates kernels with quantization methods +# an assumption which is breaking down as quantizations methods can have +# have kernels and some kernels support multiple quantization methods. +IS_SUPPORTED_BY_GPU = current_platform.has_device_capability(90) + + +def rand_data(shape, dtype=torch.float16, scale=1, offset=0): + if dtype.is_floating_point: + return (scale * torch.rand(shape, device="cuda") - offset).to(dtype) + else: + return torch.randint(-8, 7, shape, dtype=dtype, device="cuda") + + +def maybe_convert_zeropoints(zps: Optional[torch.Tensor], s: torch.Tensor): + return zps if zps is None else -1 * s * (zps.to(s.dtype)) + + +def group_size_valid(shape: Tuple[int, int, int], + group_size: Optional[int]) -> bool: + return group_size is None or group_size == -1 or group_size % shape[2] == 0 + + +def machete_quantize_and_pack(atype: torch.dtype, + w: torch.Tensor, + wtype: ScalarType, + stype: Optional[torch.dtype], + group_size: Optional[int], + zero_points: bool = False): + assert wtype.is_integer(), "TODO: support floating point weights" + + w_ref, w_q, w_s, w_zp = quantize_weights( + w, + wtype, + group_size=group_size, + zero_points=zero_points, + # to match how the kernel applies zps + ref_zero_points_after_scales=True) + + w_q = pack_rows(w_q, wtype.size_bits, *w_q.shape) + w_q = w_q.t().contiguous().t() # convert to col major + + w_q_machete = ops.machete_prepack_B(w_q, atype, wtype, stype) + opcheck(torch.ops._C.machete_prepack_B, (w_q, atype, wtype.id, stype)) + + return w_ref, w_q_machete, w_s, w_zp + + +def create_test_tensors(shape: Tuple[int, int, int], + types: TypeConfig, + group_size: Optional[int], + subset_stride_factor: Optional[int] = None) -> Tensors: + m, n, k = shape + factor = subset_stride_factor or 1 + + print("create_test_tensors, shape:", shape, "types:", types, "group_size:", + group_size) + + a = rand_data((m * factor, k * factor), types.act_type, scale=3, offset=2) + w = rand_data((k * factor, n * factor), types.act_type, scale=3, offset=1) + + if factor > 1: + a = a[0:m, 0:k] + w = w[0:k, 0:n] + + if types.group_scale_type is not None: + w = w.to(types.group_scale_type) + if w.dtype.itemsize == 1: + w = w.to(torch.float16) + + w_ref, w_q_packed, w_s, w_zp = machete_quantize_and_pack( + a.dtype, w, types.weight_type, types.group_scale_type, group_size, + types.group_zero_type is not None) + + if not a.dtype.is_floating_point: + aiinfo = torch.iinfo(a.dtype) + w_ref = w_ref.round().clamp(aiinfo.min, aiinfo.max) + + a_ref = a.to(torch.float32) + w_ref = w_ref.to(torch.float32) + + w_ch_s = None if types.channel_scale_type is None else\ + rand_data((n,), types.channel_scale_type) + w_tok_s = None if types.token_scale_type is None else\ + rand_data((m,), types.token_scale_type) + + return Tensors(w_ref=w_ref, + a_ref=a_ref, + a=a, + w_q=w_q_packed, + w_g_s=w_s, + w_g_zp=maybe_convert_zeropoints(w_zp, w_s), + w_ch_s=w_ch_s, + w_tok_s=w_tok_s) + + +# None stype means scales use the same dtype as a +def machete_mm_test_helper(types: TypeConfig, + tensors: Tensors, + group_size: Optional[int] = None, + schedule: Optional[str] = None): + output_ref = torch.matmul(tensors.a_ref, tensors.w_ref) + output_ref_type = output_ref.dtype + + if tensors.w_ch_s is not None: + output_ref = (output_ref.to(tensors.w_ch_s.dtype) * + tensors.w_ch_s.unsqueeze(0)).to(output_ref_type) + if tensors.w_tok_s is not None: + output_ref = (output_ref.to(tensors.w_tok_s.dtype) * + tensors.w_tok_s.unsqueeze(1)).to(output_ref_type) + + output = ops.machete_mm( + a=tensors.a, + b_q=tensors.w_q, + b_type=types.weight_type, + b_group_scales=tensors.w_g_s, + b_group_zeros=tensors.w_g_zp, + b_group_size=group_size, + b_channel_scales=tensors.w_ch_s, + a_token_scales=tensors.w_tok_s, + out_type=types.output_type, + schedule=schedule, + ) + + print(output) + print(output_ref) + + # Relax atol as our reduction dim becomes larger (more rounding error) + # Relax atol when we have zeropoints since the way machete applies + # zeropoints (after scales) causes noise around 0 + atol = 1 if tensors.w_g_zp is not None\ + else min(5e-2 * math.sqrt(tensors.a.shape[1]), 1) + rtol = 1e-1 if tensors.a.element_size() >= 2 else 2e-1 + torch.testing.assert_close(output, + output_ref.to(output.dtype), + rtol=rtol, + atol=atol) + + +@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, + reason="Machete is not supported on this GPU type.") +@pytest.mark.parametrize("shape", + MNK_SHAPES, + ids=lambda x: "x".join(str(v) for v in x)) +@pytest.mark.parametrize("types", TEST_TYPES) +def test_machete_all_schedules(shape, types: TypeConfig): + + group_sizes: List[Optional[int]] = [] + if types.group_scale_type is None: + group_sizes = [None] + else: + group_sizes = GROUP_SIZES_TO_TEST + + for group_size in group_sizes: + if not group_size_valid(shape, group_size): + continue + + tensors = create_test_tensors(shape, types, group_size) + print(f"MNK = {shape}") + for schedule in ops.machete_supported_schedules( + types.act_type, + types.weight_type, + group_scales_type=types.group_scale_type, + group_zeros_type=types.group_scale_type, + out_type=types.output_type): + print(f"Testing schedule {schedule}") + machete_mm_test_helper(types, tensors, group_size, schedule) + + +@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, + reason="Machete is not supported on this GPU type.") +@pytest.mark.parametrize("shape", + MNK_SHAPES, + ids=lambda x: "x".join(str(v) for v in x)) +@pytest.mark.parametrize("types", TEST_TYPES) +def test_machete_heuristic(shape, types: TypeConfig): + group_sizes: List[Optional[int]] = [] + if types.group_scale_type is None: + group_sizes = [None] + else: + group_sizes = GROUP_SIZES_TO_TEST + + for group_size in group_sizes: + if not group_size_valid(shape, group_size): + continue + + tensors = create_test_tensors(shape, types, group_size) + machete_mm_test_helper(types, tensors, group_size) + + +# Test working on other devices +@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, + reason="Machete is not supported on this GPU type.") +@pytest.mark.parametrize("device", CUDA_DEVICES) +def test_machete_devices(device: str): + group_size = 128 + + type_config = TypeConfig(act_type=torch.float16, + weight_type=scalar_types.uint4b8, + output_type=None, + group_scale_type=torch.float16, + group_zero_type=None, + channel_scale_type=None, + token_scale_type=None) + + tensors = create_test_tensors((512, 4096, 4096), type_config, group_size) + + for field in fields(Tensors): + tensor = getattr(tensors, field.name) + if isinstance(tensor, torch.Tensor): + setattr(tensors, field.name, tensor.to(device)) + + machete_mm_test_helper(type_config, tensors, group_size) + + +# Test working with a subset of A and B +@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, + reason="Machete is not supported on this GPU type.") +def test_machete_subset(): + group_size = 128 + + type_config = TypeConfig(act_type=torch.float16, + weight_type=scalar_types.uint4b8, + output_type=None, + group_scale_type=torch.float16, + group_zero_type=None, + channel_scale_type=None, + token_scale_type=None) + + tensors = create_test_tensors((512, 4096, 4096), + type_config, + group_size, + subset_stride_factor=2) + machete_mm_test_helper(type_config, tensors, group_size) + + +# Test to make sure cuda graphs work +class MacheteLayer(torch.nn.Module): + + def __init__(self, **kwargs): + super().__init__() + self.kwargs = kwargs + + def forward(self, a): + return ops.machete_mm(a=a, **self.kwargs) + + +@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, + reason="Machete is not supported on this GPU type.") +def test_machete_cuda_graph(): + m, n, k = 512, 4096, 4096 + + a = rand_data((m, k), torch.float16) + b = rand_data((k, n), torch.float16) + wtype = scalar_types.uint4b8 + stype = torch.float16 + group_size = 128 + zero_points = False + + w_ref, w_q_packed, w_s, w_zp = machete_quantize_and_pack( + a.dtype, b, wtype, stype, group_size, zero_points) + + # Construct a trivial model with a single layer that calls a machete kernel + model = MacheteLayer( + b_q=w_q_packed, + b_type=wtype, + b_group_scales=w_s, + b_group_zeros=maybe_convert_zeropoints(w_zp, w_s), + b_group_size=group_size, + ) + + output_ref = torch.matmul(a, w_ref) + + # Run the model with a cuda graph + stream = torch.cuda.Stream() + with torch.cuda.stream(stream): + g = torch.cuda.CUDAGraph() + with torch.cuda.graph(g): + output = model(a) + output.zero_() + g.replay() + + # Relax atol as our reduction dim becomes larger (more rounding error) + # Relax atol when we have zeropoints since the way machete applies + # zeropoints (after scales) causes noise around 0 + atol = 1 if zero_points else min(5e-2 * math.sqrt(k), 1) + torch.testing.assert_close(output, output_ref, rtol=1e-1, atol=atol) diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index b276b8fc25473..aa89010ca8ecd 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -444,18 +444,18 @@ def _fp8_marlin_gemm_fake(a: torch.Tensor, b_q_weight: torch.Tensor, size_k: torch.SymInt) -> torch.Tensor: return torch.empty((size_m, size_n), dtype=a.dtype, device=a.device) - @register_fake("_C::machete_gemm") - def machete_gemm_fake( + @register_fake("_C::machete_mm") + def machete_mm_fake( a: torch.Tensor, - # Should be the tensor returned by machete_prepack_B + # b_q Should be the tensor returned by machete_prepack_B b_q: torch.Tensor, b_type: ScalarType, - b_scales: Optional[torch.Tensor] = None, - b_zeros: Optional[torch.Tensor] = None, + out_type: Optional[torch.dtype] = None, + b_group_scales: Optional[torch.Tensor] = None, + b_group_zeros: Optional[torch.Tensor] = None, b_group_size: Optional[int] = None, - c: Optional[torch.Tensor] = None, - alpha: Optional[float] = None, - beta: Optional[float] = None, + b_channel_scales: Optional[torch.Tensor] = None, + a_token_scales: Optional[torch.Tensor] = None, schedule: Optional[str] = None, ) -> torch.Tensor: m = a.size(0) @@ -463,8 +463,9 @@ def machete_gemm_fake( return torch.empty((m, n), device=a.device, dtype=a.dtype) @register_fake("_C::machete_prepack_B") - def machete_prepack_B_fake(b_q_weight: torch.Tensor, - b_type: ScalarType) -> torch.Tensor: + def machete_prepack_B_fake( + b_q_weight: torch.Tensor, a_type: torch.dtype, b_type: ScalarType, + group_scales_type: Optional[torch.dtype]) -> torch.Tensor: return torch.empty_like(b_q_weight, memory_format=torch.contiguous_format) @@ -617,29 +618,41 @@ def fp8_marlin_gemm(a: torch.Tensor, b_q_weight: torch.Tensor, # machete -def machete_supported_schedules(b_type: ScalarType) -> List[str]: - return torch.ops._C.machete_supported_schedules(b_type.id) - - -def machete_gemm( - a: torch.Tensor, - b_q: torch.Tensor, # Should be the tensor returned by machete_prepack_B - b_type: ScalarType, - b_scales: Optional[torch.Tensor] = None, - b_zeros: Optional[torch.Tensor] = None, - b_group_size: Optional[int] = None, - c: Optional[torch.Tensor] = None, - alpha: Optional[float] = None, - beta: Optional[float] = None, - schedule: Optional[str] = None, -) -> torch.Tensor: - return torch.ops._C.machete_gemm(a, b_q, b_type.id, b_scales, b_zeros, - b_group_size, c, alpha, beta, schedule) +def machete_supported_schedules( + a_type: torch.dtype, + b_type: ScalarType, + group_scales_type: Optional[torch.dtype], + group_zeros_type: Optional[torch.dtype] = None, + channel_scales_type: Optional[torch.dtype] = None, + token_scales_type: Optional[torch.dtype] = None, + out_type: Optional[torch.dtype] = None) -> List[str]: + return torch.ops._C.machete_supported_schedules( + a_type, b_type.id, group_scales_type, group_zeros_type, + channel_scales_type, token_scales_type, out_type) -def machete_prepack_B(b_q_weight: torch.Tensor, - b_type: ScalarType) -> torch.Tensor: - return torch.ops._C.machete_prepack_B(b_q_weight, b_type.id) +def machete_mm( + a: torch.Tensor, + # b_q Should be the tensor returned by machete_prepack_B + b_q: torch.Tensor, + b_type: ScalarType, + out_type: Optional[torch.dtype] = None, + b_group_scales: Optional[torch.Tensor] = None, + b_group_zeros: Optional[torch.Tensor] = None, + b_group_size: Optional[int] = None, + b_channel_scales: Optional[torch.Tensor] = None, + a_token_scales: Optional[torch.Tensor] = None, + schedule: Optional[str] = None) -> torch.Tensor: + return torch.ops._C.machete_mm(a, b_q, b_type.id, out_type, b_group_scales, + b_group_zeros, b_group_size, + b_channel_scales, a_token_scales, schedule) + + +def machete_prepack_B( + b_q_weight: torch.Tensor, a_type: torch.dtype, b_type: ScalarType, + group_scales_type: Optional[torch.dtype]) -> torch.Tensor: + return torch.ops._C.machete_prepack_B(b_q_weight, a_type, b_type.id, + group_scales_type) if hasattr(torch.ops._C, "permute_cols"): diff --git a/vllm/model_executor/layers/quantization/kernels/machete.py b/vllm/model_executor/layers/quantization/kernels/machete.py index e5696d08f30f5..15df0200f30b5 100644 --- a/vllm/model_executor/layers/quantization/kernels/machete.py +++ b/vllm/model_executor/layers/quantization/kernels/machete.py @@ -79,7 +79,9 @@ def transform_w_q(x): c.weight_type, packed_dim=0) x.data = ops.machete_prepack_B(x.data.t().contiguous().t(), - self.config.weight_type) + a_type=c.act_type, + b_type=c.weight_type, + group_scales_type=c.act_type) return x def transform_w_s(x): @@ -105,12 +107,12 @@ def apply_weights(self, if c.has_g_idx: x_2d = self.act_perm(x_2d) - output = ops.machete_gemm(a=x_2d, - b_q=w_q, - b_type=c.weight_type, - b_zeros=None, - b_scales=w_s, - b_group_size=c.group_size) + output = ops.machete_mm(a=x_2d, + b_q=w_q, + b_type=c.weight_type, + b_group_zeros=None, + b_group_scales=w_s, + b_group_size=c.group_size) if bias is not None: output.add_(bias) # In-place add diff --git a/vllm/model_executor/layers/quantization/utils/quant_utils.py b/vllm/model_executor/layers/quantization/utils/quant_utils.py index c217f5ca620a1..83055d6000d83 100644 --- a/vllm/model_executor/layers/quantization/utils/quant_utils.py +++ b/vllm/model_executor/layers/quantization/utils/quant_utils.py @@ -126,11 +126,14 @@ def permute_rows(q_w: torch.Tensor, def quantize_weights(w: torch.Tensor, quant_type: ScalarType, - group_size: int, + group_size: Optional[int], zero_points: bool = False, ref_zero_points_after_scales: bool = False): assert quant_type.is_integer(), \ "Floating point quantization may work but has not been tested" + assert not zero_points or group_size is not None, \ + "to have group zero points, group_size must be provided "\ + "(-1 group_size is channelwise)" orig_device = w.device orig_type = w.dtype @@ -140,10 +143,9 @@ def quantize_weights(w: torch.Tensor, if group_size == -1: group_size = size_k - assert group_size <= size_k # Reshape to [groupsize, -1] - if group_size < size_k: + if group_size is not None and group_size < size_k: w = w.reshape((-1, group_size, size_n)) w = w.permute(1, 0, 2) w = w.reshape((group_size, -1)) @@ -155,18 +157,20 @@ def quantize_weights(w: torch.Tensor, max_q_val = quant_type.max() min_q_val = quant_type.min() - if zero_points: - assert not quant_type.is_signed() and quant_type.max() > 0 - w_s = (max_val - min_val).clamp(min=1e-5) / quant_type.max() - maybe_w_zp = torch.round(torch.abs(min_val / w_s)) \ - .clamp(min_q_val, max_q_val).int() - else: - # If the bias is such that there are no possible negative/positive - # values, set the max value to inf to avoid divide by 0 - w_s = torch.max( - abs(max_val / (max_q_val if max_q_val != 0 else torch.inf)), - abs(min_val / (min_q_val if min_q_val != 0 else torch.inf))) - maybe_w_zp = None + w_s = torch.Tensor([1.0]).to(w.device) # unscaled case + maybe_w_zp = None + if group_size is not None: + if zero_points: + assert not quant_type.is_signed() and quant_type.max() > 0 + w_s = (max_val - min_val).clamp(min=1e-5) / quant_type.max() + maybe_w_zp = torch.round(torch.abs(min_val / w_s)) \ + .clamp(min_q_val, max_q_val).int() + else: + # If the bias is such that there are no possible negative/positive + # values, set the max value to inf to avoid divide by 0 + w_s = torch.max( + abs(max_val / (max_q_val if max_q_val != 0 else torch.inf)), + abs(min_val / (min_q_val if min_q_val != 0 else torch.inf))) # Quantize w_q = torch.round(w / w_s).int() + (maybe_w_zp if zero_points else 0) @@ -176,7 +180,7 @@ def quantize_weights(w: torch.Tensor, # For some kernels (namely Machete) the zero-points are applied after the # scales are applied, for this case computing the reference in similar way # allows us to use tighter error tolerances in our unit tests. - if ref_zero_points_after_scales and zero_points: + if ref_zero_points_after_scales and maybe_w_zp is not None: w_ref = w_q.to(orig_type) * w_s - maybe_w_zp.to(orig_type) * w_s else: w_ref = (w_q - (maybe_w_zp if zero_points else 0)).to(orig_type) * w_s @@ -185,7 +189,7 @@ def quantize_weights(w: torch.Tensor, w_q += quant_type.bias # Restore original shapes - if group_size < size_k: + if group_size is not None and group_size < size_k: def reshape_w(w): w = w.reshape((group_size, -1, size_n)) @@ -195,17 +199,16 @@ def reshape_w(w): w_q = reshape_w(w_q) w_ref = reshape_w(w_ref) + w_s = w_s.reshape((-1, size_n)).contiguous() - w_s = w_s.reshape((-1, size_n)).contiguous() - - if zero_points: + if maybe_w_zp is not None: maybe_w_zp = maybe_w_zp.reshape((-1, size_n)).contiguous() maybe_w_zp = maybe_w_zp.to(device=orig_device) return ( w_ref.to(device=orig_device), w_q.to(device=orig_device), - w_s.to(device=orig_device), + w_s if group_size is not None else None, maybe_w_zp, ) From a03ea40792201ac8ff547d37d9f9255b347b9ccd Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 18 Nov 2024 15:14:59 -0800 Subject: [PATCH 022/122] [3/N][torch.compile] consolidate custom op logging (#10399) Signed-off-by: youkaichao --- vllm/config.py | 12 ++++++++++-- vllm/model_executor/custom_op.py | 9 ++++++--- vllm/plugins/__init__.py | 4 ++++ 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 14017bbdb3cf2..ea9ec43cc5a15 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -4,8 +4,9 @@ import warnings from dataclasses import dataclass, field, replace from pathlib import Path -from typing import (TYPE_CHECKING, Any, Callable, ClassVar, Dict, Final, List, - Literal, Mapping, Optional, Set, Tuple, Type, Union) +from typing import (TYPE_CHECKING, Any, Callable, ClassVar, Counter, Dict, + Final, List, Literal, Mapping, Optional, Set, Tuple, Type, + Union) import torch from pydantic import BaseModel, Field, PrivateAttr @@ -2169,6 +2170,10 @@ class CompilationConfig(BaseModel): compile_sizes: List[int] = PrivateAttr capture_sizes: List[int] = PrivateAttr + # keep track of enabled and disabled custom ops + enabled_custom_ops: Counter[str] = PrivateAttr + disabled_custom_ops: Counter[str] = PrivateAttr + def model_post_init(self, __context: Any) -> None: self.level = envs.VLLM_TORCH_COMPILE_LEVEL @@ -2190,6 +2195,9 @@ def model_post_init(self, __context: Any) -> None: func = __import__(module).__dict__[func_name] self.inductor_compile_config[k] = func + self.enabled_custom_ops = Counter() + self.disabled_custom_ops = Counter() + def init_backend(self) -> Union[str, Callable]: if self.level == CompilationLevel.NO_COMPILATION: raise ValueError("No compilation level is set.") diff --git a/vllm/model_executor/custom_op.py b/vllm/model_executor/custom_op.py index 6ae7d7cf6964f..b07966f2ab7d0 100644 --- a/vllm/model_executor/custom_op.py +++ b/vllm/model_executor/custom_op.py @@ -61,10 +61,13 @@ def forward_hpu(self, *args, **kwargs): def dispatch_forward(self): # NOTE(woosuk): Here we assume that vLLM was built for only one # specific backend. Currently, we do not support dynamic dispatching. - + compilation_config = get_current_vllm_config().compilation_config enabled = self.enabled() - logger.debug("custom op %s %s", self.__class__.name, - "enabled" if enabled else "disabled") + if enabled: + compilation_config.enabled_custom_ops.update([self.__class__.name]) + else: + compilation_config.disabled_custom_ops.update( + [self.__class__.name]) if not enabled: return self.forward_native diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index a0c73a752b5e8..c5182139db50b 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -80,6 +80,10 @@ def set_current_vllm_config(vllm_config: VllmConfig): _current_vllm_config = vllm_config yield finally: + logger.debug("enabled custom ops: %s", + vllm_config.compilation_config.enabled_custom_ops) + logger.debug("disabled custom ops: %s", + vllm_config.compilation_config.disabled_custom_ops) _current_vllm_config = old_vllm_config From 2298e69b5f1dc77f00aee687a3843a4dae12cb91 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 18 Nov 2024 15:29:37 -0800 Subject: [PATCH 023/122] [ci][bugfix] fix kernel tests (#10431) Signed-off-by: youkaichao --- vllm/plugins/__init__.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index c5182139db50b..fdc848cedf054 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -6,9 +6,6 @@ if TYPE_CHECKING: from vllm.config import CompilationConfig, VllmConfig -else: - CompilationConfig = None - VllmConfig = None logger = logging.getLogger(__name__) @@ -50,23 +47,23 @@ def load_general_plugins(): logger.exception("Failed to load plugin %s", plugin.name) -_compilation_config: Optional[CompilationConfig] = None +_compilation_config: Optional["CompilationConfig"] = None -def set_compilation_config(config: Optional[CompilationConfig]): +def set_compilation_config(config: Optional["CompilationConfig"]): global _compilation_config _compilation_config = config -def get_compilation_config() -> Optional[CompilationConfig]: +def get_compilation_config() -> Optional["CompilationConfig"]: return _compilation_config -_current_vllm_config: Optional[VllmConfig] = None +_current_vllm_config: Optional["VllmConfig"] = None @contextmanager -def set_current_vllm_config(vllm_config: VllmConfig): +def set_current_vllm_config(vllm_config: "VllmConfig"): """ Temporarily set the current VLLM config. Used during model initialization. @@ -87,6 +84,12 @@ def set_current_vllm_config(vllm_config: VllmConfig): _current_vllm_config = old_vllm_config -def get_current_vllm_config() -> VllmConfig: - assert _current_vllm_config is not None, "Current VLLM config is not set." +def get_current_vllm_config() -> "VllmConfig": + if _current_vllm_config is None: + # in ci, usually when we test custom ops/modules directly, + # we don't set the vllm config. In that case, we set a default + # config. + logger.warning("Current VLLM config is not set.") + from vllm.config import VllmConfig + return VllmConfig() return _current_vllm_config From 90a6c759caf84ff7722449a33895e397ccf1a2af Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Mon, 18 Nov 2024 15:39:14 -0800 Subject: [PATCH 024/122] [misc] partial prefix & random input generation benchmark (#9929) Signed-off-by: rickyx --- benchmarks/benchmark_prefix_caching.py | 116 +++++++++++++++++++------ 1 file changed, 91 insertions(+), 25 deletions(-) diff --git a/benchmarks/benchmark_prefix_caching.py b/benchmarks/benchmark_prefix_caching.py index 6d33096ca1d11..5e9381f712e10 100644 --- a/benchmarks/benchmark_prefix_caching.py +++ b/benchmarks/benchmark_prefix_caching.py @@ -54,13 +54,30 @@ def test_prefix(llm=None, sampling_params=None, prompts=None): print(f"cost time {end_time - start_time}") -def sample_requests( +@dataclasses.dataclass +class Request: + prompt: str + prompt_len: int + output_len: int + + +def sample_tokens(tokenizer: PreTrainedTokenizerBase, length: int) -> str: + vocab = tokenizer.get_vocab() + # Remove the special tokens. + vocab = { + k: v + for k, v in vocab.items() if k not in tokenizer.all_special_ids + } + return random.choices(list(vocab.values()), k=length) + + +def sample_requests_from_dataset( dataset_path: str, num_requests: int, tokenizer: PreTrainedTokenizerBase, input_length_range: Tuple[int, int], fixed_output_len: Optional[int], -) -> List[Tuple[str, int, int]]: +) -> List[Request]: if fixed_output_len is not None and fixed_output_len < 4: raise ValueError("output_len too small") @@ -77,31 +94,55 @@ def sample_requests( random.shuffle(dataset) min_len, max_len = input_length_range + assert min_len >= 0 and max_len >= min_len, "input_length_range too small" # Filter out sequences that are too long or too short - filtered_dataset: List[Tuple[str, int, int]] = [] + filtered_requests: List[Request] = [] + for i in range(len(dataset)): - if len(filtered_dataset) == num_requests: + if len(filtered_requests) == num_requests: break # Tokenize the prompts and completions. - prompt = dataset[i][0] - prompt_token_ids = tokenizer(prompt).input_ids + prompt_token_ids = tokenizer(dataset[i][0]).input_ids + prompt = tokenizer.decode(prompt_token_ids) completion = dataset[i][1] completion_token_ids = tokenizer(completion).input_ids prompt_len = len(prompt_token_ids) - output_len = len(completion_token_ids - ) if fixed_output_len is None else fixed_output_len - if prompt_len < 4 or output_len < 4: - # Prune too short sequences. - continue + output_len = (len(completion_token_ids) + if fixed_output_len is None else fixed_output_len) if min_len <= prompt_len <= max_len: - filtered_dataset.append((prompt, prompt_len, output_len)) + filtered_requests.append(Request(prompt, prompt_len, output_len)) + + return filtered_requests + + +def sample_requests_from_random( + num_requests: int, + tokenizer: PreTrainedTokenizerBase, + input_length_range: Tuple[int, int], + fixed_output_len: Optional[int], + prefix_len: int, +) -> List[Request]: - return filtered_dataset + requests = [] + prefix_token_ids = sample_tokens(tokenizer, prefix_len) + min_len, max_len = input_length_range + + for i in range(num_requests): + unique_part_token_ids = sample_tokens( + tokenizer, + random.randint(min_len - prefix_len, max_len - prefix_len)) + prompt_token_ids = prefix_token_ids + unique_part_token_ids + prompt = tokenizer.decode(prompt_token_ids) + prompt_len = len(prompt_token_ids) + assert (min_len <= prompt_len <= max_len + ), f"prompt_len {prompt_len} out of range {min_len}:{max_len}" + requests.append(Request(prompt, prompt_len, fixed_output_len)) + return requests -def repeat_and_sort_requests(requests: List[Tuple[str, int, int]], +def repeat_and_sort_requests(requests: List[Request], repeat_count: int, sort: bool = False) -> List[str]: repeated_requests = requests * repeat_count @@ -109,7 +150,7 @@ def repeat_and_sort_requests(requests: List[Tuple[str, int, int]], repeated_requests.sort(key=lambda x: x[1]) else: random.shuffle(repeated_requests) - return [req[0] for req in repeated_requests] + return [req.prompt for req in repeated_requests] def main(args): @@ -117,9 +158,12 @@ def main(args): input_length_range = tuple(map(int, args.input_length_range.split(':'))) random.seed(args.seed) if args.dataset_path is not None: - print(f"Start to sample {args.num_prompts} prompts" + if args.prefix_len > 0: + raise ValueError("prefix-len is not supported when " + "dataset-path is provided.") + print(f"Start to sample {args.num_prompts} prompts " f"from {args.dataset_path}") - filtered_datasets = sample_requests( + filtered_requests = sample_requests_from_dataset( dataset_path=args.dataset_path, num_requests=args.num_prompts, tokenizer=tokenizer, @@ -127,9 +171,22 @@ def main(args): fixed_output_len=args.output_len, ) else: - prompt_len = len(tokenizer(PROMPT).input_ids) - filtered_datasets = [(PROMPT, prompt_len, args.output_len) - ] * args.num_prompts + print(f"Start to sample {args.num_prompts} prompts from random") + filtered_requests = sample_requests_from_random( + num_requests=args.num_prompts, + tokenizer=tokenizer, + input_length_range=input_length_range, + fixed_output_len=args.output_len, + prefix_len=args.prefix_len, + ) + + # Print some helpful stats of the requests. + print(f"Sampled {len(filtered_requests)} requests.") + prompt_lens = [req.prompt_len for req in filtered_requests] + print(f"Average input length: {sum(prompt_lens) / len(prompt_lens)}") + print(f"P50 input length: {sorted(prompt_lens)[len(prompt_lens) // 2]}") + print(f"Min Prompt Length: {min(prompt_lens)}") + print(f"Max Prompt Length: {max(prompt_lens)}") engine_args = EngineArgs.from_cli_args(args) @@ -137,8 +194,8 @@ def main(args): sampling_params = SamplingParams(temperature=0, max_tokens=args.output_len) - print("Testing filtered datasets") - prompts = repeat_and_sort_requests(filtered_datasets, + print("Testing filtered requests") + prompts = repeat_and_sort_requests(filtered_requests, repeat_count=args.repeat_count, sort=args.sort) @@ -161,20 +218,29 @@ def main(args): parser.add_argument('--output-len', type=int, default=10) parser.add_argument('--num-prompts', type=int, - default=1, + required=True, help="Number of the prompts sampled from dataset") parser.add_argument('--repeat-count', type=int, - default=100, + default=1, help='Number of times to repeat each prompt') parser.add_argument('--sort', action='store_true', help='Sort prompts by input length') parser.add_argument('--input-length-range', type=str, - default='128:256', + required=True, help='Range of input lengths for sampling prompts,' 'specified as "min:max" (e.g., "128:256").') + parser.add_argument( + "--prefix-len", + type=int, + default=0, + help="Specifies the length of a common prefix to be " + "added to the input prompt. The input-length-range will " + "subtract this length when filtering prompts. Only used " + "when dataset-path is not provided.", + ) parser = EngineArgs.add_cli_args(parser) args = parser.parse_args() From 284203f171d86a9581295436d6175246215437fd Mon Sep 17 00:00:00 2001 From: "Kevin H. Luu" Date: Mon, 18 Nov 2024 15:04:25 -1000 Subject: [PATCH 025/122] [ci/build] Have dependabot ignore all patch update (#10436) We have too many dependencies and all patch updates can be a little noisy. This is to have dependabot ignore all patch version updates. --- .github/dependabot.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4f54eea564ecb..683b70cd89989 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -15,6 +15,8 @@ updates: allow: - dependency-type: "all" ignore: + - dependency-name: "*" + update-types: ["version-update:semver-patch"] - dependency-name: "torch" - dependency-name: "torchvision" - dependency-name: "xformers" @@ -24,9 +26,6 @@ updates: - dependency-name: "ray[adag]" - dependency-name: "lm-eval" groups: - patch-update: - applies-to: version-updates - update-types: ["patch"] minor-update: applies-to: version-updates update-types: ["minor"] From 7eb719df13cf8059485f52648a6a115700158301 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Tue, 19 Nov 2024 11:21:42 +0800 Subject: [PATCH 026/122] [Bugfix]Fix Phi-3 BNB online quantization (#10417) Signed-off-by: Jee Jee Li --- vllm/model_executor/layers/linear.py | 12 +++++++++--- vllm/model_executor/models/phi3.py | 10 ++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/vllm/model_executor/layers/linear.py b/vllm/model_executor/layers/linear.py index e1f8a6e36d781..9da38d4857d6d 100644 --- a/vllm/model_executor/layers/linear.py +++ b/vllm/model_executor/layers/linear.py @@ -470,7 +470,8 @@ def weight_loader(self, needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False) if loaded_shard_id is None: - # Loaded weight is already fused on disk (qkv/mlp). + # Loaded weight is already fused on disk (mlp). + # (e.g., Phi-3's gate_up_proj). if output_dim is None: if needs_scalar_to_array: param_data, loaded_weight = adjust_scalar_to_fused_array( @@ -480,6 +481,8 @@ def weight_loader(self, param_data.copy_(loaded_weight) return current_shard_offset = 0 + use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit", + False) shard_offsets: List[Tuple[int, int, int]] = [] for i, output_size in enumerate(self.output_sizes): shard_offsets.append((i, current_shard_offset, output_size)) @@ -495,7 +498,9 @@ def weight_loader(self, # Special case for Marlin. shard_size, shard_offset = adjust_marlin_shard( param, shard_size, shard_offset) - + if use_bitsandbytes_4bit: + shard_size = loaded_weight.shape[output_dim] // 2 + shard_offset = shard_size * shard_id loaded_weight_shard = loaded_weight.narrow( output_dim, shard_offset, shard_size) self.weight_loader(param, loaded_weight_shard, shard_id) @@ -808,7 +813,8 @@ def weight_loader(self, needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False) if loaded_shard_id is None: - # Loaded weight is already fused on disk (qkv/mlp). + # Loaded weight is already fused on disk (qkv). + # (e.g., Phi-3's qkv_proj). if output_dim is None: if needs_scalar_to_array: param_data, loaded_weight = adjust_scalar_to_fused_array( diff --git a/vllm/model_executor/models/phi3.py b/vllm/model_executor/models/phi3.py index 34141511ea791..54158bc141235 100644 --- a/vllm/model_executor/models/phi3.py +++ b/vllm/model_executor/models/phi3.py @@ -14,3 +14,13 @@ class Phi3ForCausalLM(LlamaForCausalLM): "gate_up_proj", ], } + + # BitandBytes specific attributes + default_bitsandbytes_target_modules = [ + ".gate_up_proj.", + ".down_proj.", + ".qkv_proj.", + ".o_proj.", + ] + # Initialize an empty dict when there is no stacked parameter mapping. + bitsandbytes_stacked_params_mapping = {} From 8c1fb507052d385d94ac49a7388fd6db5d0069e7 Mon Sep 17 00:00:00 2001 From: Mengqing Cao Date: Tue, 19 Nov 2024 11:22:26 +0800 Subject: [PATCH 027/122] [Platform][Refactor] Extract func `get_default_attn_backend` to `Platform` (#10358) Signed-off-by: Mengqing Cao --- tests/kernels/test_attention_selector.py | 19 ++++---- vllm/attention/selector.py | 56 +++--------------------- vllm/model_executor/models/molmo.py | 2 +- vllm/model_executor/models/qwen2_vl.py | 2 +- vllm/model_executor/models/utils.py | 4 +- vllm/platforms/__init__.py | 1 + vllm/platforms/cpu.py | 10 ++++- vllm/platforms/hpu.py | 6 ++- vllm/platforms/interface.py | 19 ++++++++ vllm/platforms/openvino.py | 8 +++- vllm/platforms/rocm.py | 14 +++++- vllm/platforms/tpu.py | 12 ++++- vllm/platforms/xpu.py | 12 ++++- vllm/worker/enc_dec_model_runner.py | 3 +- 14 files changed, 99 insertions(+), 69 deletions(-) diff --git a/tests/kernels/test_attention_selector.py b/tests/kernels/test_attention_selector.py index 169ce040d370c..d37f95d48d5b2 100644 --- a/tests/kernels/test_attention_selector.py +++ b/tests/kernels/test_attention_selector.py @@ -5,6 +5,7 @@ from tests.kernels.utils import override_backend_env_variable from vllm.attention.selector import which_attn_to_use +from vllm.platforms import cpu, cuda, openvino, rocm from vllm.utils import STR_FLASH_ATTN_VAL, STR_INVALID_VAL @@ -19,26 +20,28 @@ def test_env(name: str, device: str, monkeypatch): override_backend_env_variable(monkeypatch, name) if device == "cpu": - with patch("vllm.attention.selector.current_platform.is_cpu", - return_value=True): + with patch("vllm.attention.selector.current_platform", + cpu.CpuPlatform()): backend = which_attn_to_use(16, torch.float16, torch.float16, 16, False) assert backend.name == "TORCH_SDPA" elif device == "hip": - with patch("vllm.attention.selector.current_platform.is_rocm", - return_value=True): + with patch("vllm.attention.selector.current_platform", + rocm.RocmPlatform()): backend = which_attn_to_use(16, torch.float16, torch.float16, 16, False) assert backend.name == "ROCM_FLASH" elif device == "openvino": - with patch("vllm.attention.selector.current_platform.is_openvino", - return_value=True): + with patch("vllm.attention.selector.current_platform", + openvino.OpenVinoPlatform()): backend = which_attn_to_use(16, torch.float16, torch.float16, 16, False) assert backend.name == "OPENVINO" else: - backend = which_attn_to_use(16, torch.float16, torch.float16, 16, - False) + with patch("vllm.attention.selector.current_platform", + cuda.CudaPlatform()): + backend = which_attn_to_use(16, torch.float16, torch.float16, 16, + False) assert backend.name == name diff --git a/vllm/attention/selector.py b/vllm/attention/selector.py index 664707e9dc65d..d263839705690 100644 --- a/vllm/attention/selector.py +++ b/vllm/attention/selector.py @@ -1,4 +1,3 @@ -import enum import os from contextlib import contextmanager from functools import lru_cache @@ -9,26 +8,12 @@ import vllm.envs as envs from vllm.attention.backends.abstract import AttentionBackend from vllm.logger import init_logger -from vllm.platforms import current_platform +from vllm.platforms import _Backend, current_platform from vllm.utils import STR_BACKEND_ENV_VAR logger = init_logger(__name__) -class _Backend(enum.Enum): - FLASH_ATTN = enum.auto() - FLASH_ATTN_VLLM_V1 = enum.auto() - XFORMERS = enum.auto() - ROCM_FLASH = enum.auto() - TORCH_SDPA = enum.auto() - OPENVINO = enum.auto() - FLASHINFER = enum.auto() - HPU_ATTN = enum.auto() - PALLAS = enum.auto() - IPEX = enum.auto() - NO_ATTENTION = enum.auto() - - def backend_name_to_enum(backend_name: str) -> _Backend: assert backend_name is not None @@ -216,40 +201,11 @@ def which_attn_to_use(head_size: int, if backend_by_env_var is not None: selected_backend = backend_name_to_enum(backend_by_env_var) - if current_platform.is_cpu(): - if selected_backend != _Backend.TORCH_SDPA: - logger.info("Cannot use %s backend on CPU.", selected_backend) - return _Backend.TORCH_SDPA - - if current_platform.is_openvino(): - if selected_backend != _Backend.OPENVINO: - logger.info("Cannot use %s backend on OpenVINO.", selected_backend) - return _Backend.OPENVINO - - if current_platform.is_xpu(): - if selected_backend != _Backend.IPEX: - logger.info("Cannot use %s backend on XPU.", selected_backend) - return _Backend.IPEX - - if current_platform.is_tpu(): - if selected_backend != _Backend.PALLAS: - logger.info("Cannot use %s backend on TPU.", selected_backend) - return _Backend.PALLAS - - if current_platform.is_rocm(): - # AMD GPUs. - selected_backend = (_Backend.ROCM_FLASH if selected_backend - == _Backend.FLASH_ATTN else selected_backend) - if selected_backend == _Backend.ROCM_FLASH: - if not current_platform.has_device_capability(90): - # not Instinct series GPUs. - logger.info("flash_attn is not supported on NAVI GPUs.") - else: - logger.info("%s is not supported in AMD GPUs.", selected_backend) - return _Backend.ROCM_FLASH - - if current_platform.is_hpu(): - return _Backend.HPU_ATTN + # get device-specific default attn_backend + default_backend = current_platform.get_default_attn_backend( + selected_backend) + if default_backend is not None: + return default_backend if use_v1: return _Backend.FLASH_ATTN_VLLM_V1 diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index a7c90a3f5031b..2528f741864b3 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -13,7 +13,6 @@ from transformers import PretrainedConfig from vllm.attention import Attention, AttentionMetadata -from vllm.attention.selector import _Backend from vllm.compilation.decorators import support_torch_compile from vllm.config import CacheConfig, VllmConfig from vllm.distributed import (get_pp_group, get_tensor_model_parallel_rank, @@ -38,6 +37,7 @@ from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs from vllm.multimodal.utils import cached_get_tokenizer +from vllm.platforms import _Backend from vllm.sequence import (VLLM_TOKEN_ID_ARRAY_TYPE, IntermediateTensors, SequenceData) from vllm.transformers_utils.processor import get_processor diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index a929b9323b245..0ac81387b1bd8 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -39,7 +39,6 @@ make_batched_images, make_batched_videos, smart_resize) from vllm.attention import AttentionMetadata -from vllm.attention.selector import _Backend from vllm.config import VllmConfig from vllm.distributed import get_pp_group, parallel_state from vllm.distributed import utils as dist_utils @@ -65,6 +64,7 @@ from vllm.multimodal.inputs import (MultiModalData, MultiModalDataDict, MultiModalKwargs) from vllm.multimodal.utils import cached_get_tokenizer +from vllm.platforms import _Backend from vllm.sequence import IntermediateTensors, PoolerOutput, SequenceData from vllm.transformers_utils.config import uses_mrope from vllm.transformers_utils.processor import cached_get_processor diff --git a/vllm/model_executor/models/utils.py b/vllm/model_executor/models/utils.py index 03226f42ee053..2ab9b19e22068 100644 --- a/vllm/model_executor/models/utils.py +++ b/vllm/model_executor/models/utils.py @@ -9,13 +9,13 @@ from transformers import PretrainedConfig import vllm.envs as envs -from vllm.attention.selector import (_Backend, backend_name_to_enum, +from vllm.attention.selector import (backend_name_to_enum, get_global_forced_attn_backend) from vllm.config import VllmConfig from vllm.logger import init_logger from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.multimodal import MultiModalPlaceholderMap, NestedTensors -from vllm.platforms import current_platform +from vllm.platforms import _Backend, current_platform from vllm.sequence import IntermediateTensors from vllm.utils import is_pin_memory_available diff --git a/vllm/platforms/__init__.py b/vllm/platforms/__init__.py index 9e740837381f8..1f68fc2e25df3 100644 --- a/vllm/platforms/__init__.py +++ b/vllm/platforms/__init__.py @@ -1,3 +1,4 @@ +from .interface import _Backend # noqa: F401 from .interface import Platform, PlatformEnum, UnspecifiedPlatform current_platform: Platform diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index 42bee31dfb0e9..f9a34a47959ec 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -5,7 +5,9 @@ from vllm.logger import init_logger -from .interface import Platform, PlatformEnum +from .interface import Platform, PlatformEnum, _Backend + +logger = init_logger(__name__) if TYPE_CHECKING: from vllm.config import VllmConfig @@ -22,6 +24,12 @@ class CpuPlatform(Platform): def get_device_name(cls, device_id: int = 0) -> str: return "cpu" + @classmethod + def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: + if selected_backend != _Backend.TORCH_SDPA: + logger.info("Cannot use %s backend on CPU.", selected_backend) + return _Backend.TORCH_SDPA + @classmethod def get_device_total_memory(cls, device_id: int = 0) -> int: return psutil.virtual_memory().total diff --git a/vllm/platforms/hpu.py b/vllm/platforms/hpu.py index 170cfff94f90d..1e0888a30ba96 100644 --- a/vllm/platforms/hpu.py +++ b/vllm/platforms/hpu.py @@ -1,11 +1,15 @@ import torch -from .interface import Platform, PlatformEnum +from .interface import Platform, PlatformEnum, _Backend class HpuPlatform(Platform): _enum = PlatformEnum.HPU + @classmethod + def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: + return _Backend.HPU_ATTN + @staticmethod def inference_mode(): return torch.no_grad() diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index 970c0d1be617e..f4849fa2ccfb0 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -11,6 +11,20 @@ VllmConfig = None +class _Backend(enum.Enum): + FLASH_ATTN = enum.auto() + FLASH_ATTN_VLLM_V1 = enum.auto() + XFORMERS = enum.auto() + ROCM_FLASH = enum.auto() + TORCH_SDPA = enum.auto() + OPENVINO = enum.auto() + FLASHINFER = enum.auto() + HPU_ATTN = enum.auto() + PALLAS = enum.auto() + IPEX = enum.auto() + NO_ATTENTION = enum.auto() + + class PlatformEnum(enum.Enum): CUDA = enum.auto() ROCM = enum.auto() @@ -71,6 +85,11 @@ def is_cuda_alike(self) -> bool: """Stateless version of :func:`torch.cuda.is_available`.""" return self._enum in (PlatformEnum.CUDA, PlatformEnum.ROCM) + @classmethod + def get_default_attn_backend(cls, selected_backend: _Backend): + """Get the default attention backend of a device.""" + return None + @classmethod def get_device_capability( cls, diff --git a/vllm/platforms/openvino.py b/vllm/platforms/openvino.py index 31fe3f1fcbfe4..ad69ced5417b3 100644 --- a/vllm/platforms/openvino.py +++ b/vllm/platforms/openvino.py @@ -3,7 +3,7 @@ import vllm.envs as envs from vllm.logger import init_logger -from .interface import Platform, PlatformEnum +from .interface import Platform, PlatformEnum, _Backend logger = init_logger(__name__) @@ -11,6 +11,12 @@ class OpenVinoPlatform(Platform): _enum = PlatformEnum.OPENVINO + @classmethod + def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: + if selected_backend != _Backend.OPENVINO: + logger.info("Cannot use %s backend on OpenVINO.", selected_backend) + return _Backend.OPENVINO + @classmethod def get_device_name(self, device_id: int = 0) -> str: return "openvino" diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index fd8afc92b0f28..022256996f97b 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -5,7 +5,7 @@ from vllm.logger import init_logger -from .interface import DeviceCapability, Platform, PlatformEnum +from .interface import DeviceCapability, Platform, PlatformEnum, _Backend logger = init_logger(__name__) @@ -19,6 +19,18 @@ class RocmPlatform(Platform): _enum = PlatformEnum.ROCM + @classmethod + def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: + selected_backend = (_Backend.ROCM_FLASH if selected_backend + == _Backend.FLASH_ATTN else selected_backend) + if selected_backend == _Backend.ROCM_FLASH: + if not cls.has_device_capability(90): + # not Instinct series GPUs. + logger.info("flash_attn is not supported on NAVI GPUs.") + else: + logger.info("%s is not supported in AMD GPUs.", selected_backend) + return _Backend.ROCM_FLASH + @classmethod @lru_cache(maxsize=8) def get_device_capability(cls, device_id: int = 0) -> DeviceCapability: diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index 643db835c85ff..9057afb6514e4 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -3,17 +3,27 @@ import torch -from .interface import Platform, PlatformEnum +from vllm.logger import init_logger + +from .interface import Platform, PlatformEnum, _Backend if TYPE_CHECKING: from vllm.config import VllmConfig else: VllmConfig = None +logger = init_logger(__name__) + class TpuPlatform(Platform): _enum = PlatformEnum.TPU + @classmethod + def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: + if selected_backend != _Backend.PALLAS: + logger.info("Cannot use %s backend on TPU.", selected_backend) + return _Backend.PALLAS + @classmethod def get_device_name(cls, device_id: int = 0) -> str: raise NotImplementedError diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index 106e8eddf458f..d0b3dca9a4195 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -1,11 +1,21 @@ import torch -from .interface import DeviceCapability, Platform, PlatformEnum +from vllm.logger import init_logger + +from .interface import DeviceCapability, Platform, PlatformEnum, _Backend + +logger = init_logger(__name__) class XPUPlatform(Platform): _enum = PlatformEnum.XPU + @classmethod + def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: + if selected_backend != _Backend.IPEX: + logger.info("Cannot use %s backend on XPU.", selected_backend) + return _Backend.IPEX + @staticmethod def get_device_capability(device_id: int = 0) -> DeviceCapability: major, minor, *_ = torch.xpu.get_device_capability( diff --git a/vllm/worker/enc_dec_model_runner.py b/vllm/worker/enc_dec_model_runner.py index 82824faa6629a..687d2cc79360f 100644 --- a/vllm/worker/enc_dec_model_runner.py +++ b/vllm/worker/enc_dec_model_runner.py @@ -8,7 +8,7 @@ from vllm.attention.backends.abstract import (AttentionBackend, AttentionMetadata) from vllm.attention.backends.utils import PAD_SLOT_ID -from vllm.attention.selector import (_Backend, get_env_variable_attn_backend, +from vllm.attention.selector import (get_env_variable_attn_backend, get_global_forced_attn_backend) from vllm.config import VllmConfig from vllm.forward_context import set_forward_context @@ -18,6 +18,7 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.multimodal import (MULTIMODAL_REGISTRY, MultiModalKwargs, MultiModalRegistry) +from vllm.platforms import _Backend from vllm.sampling_params import SamplingParams from vllm.sequence import (IntermediateTensors, PoolerOutput, SequenceGroupMetadata) From 74f8c2cf5f6a34fd21cfbe6d72bcc1b2a2a6754a Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Mon, 18 Nov 2024 23:37:46 -0500 Subject: [PATCH 028/122] Add openai.beta.chat.completions.parse example to structured_outputs.rst (#10433) --- docs/source/models/structured_outputs.rst | 98 ++++++++++++++++++++++- 1 file changed, 96 insertions(+), 2 deletions(-) diff --git a/docs/source/models/structured_outputs.rst b/docs/source/models/structured_outputs.rst index ff4ff7169fc5f..484e1f17d191e 100644 --- a/docs/source/models/structured_outputs.rst +++ b/docs/source/models/structured_outputs.rst @@ -10,7 +10,7 @@ This document shows you some examples of the different options that are availabl Online Inference (OpenAI API) ----------------------------- -You can generate structured outputs using the OpenAI’s `Completions `_ and `Chat `_ API. +You can generate structured outputs using the OpenAI's `Completions `_ and `Chat `_ API. The following parameters are supported, which must be added as extra parameters: @@ -137,6 +137,100 @@ It works by using a context free EBNF grammar, which for example we can use to d The complete code of the examples can be found on `examples/openai_chat_completion_structured_outputs.py `_. +Experimental Automatic Parsing (OpenAI API) +-------------------------------------------- + +This section covers the OpenAI beta wrapper over the ``client.chat.completions.create()`` method that provides richer integrations with Python specific types. + +At the time of writing (``openai==1.54.4``), this is a "beta" feature in the OpenAI client library. Code reference can be found `here `_. + +For the following examples, vLLM was setup using ``vllm serve meta-llama/Llama-3.1-8B-Instruct`` + +Here is a simple example demonstrating how to get structured output using Pydantic models: + +.. code-block:: python + + from pydantic import BaseModel + from openai import OpenAI + + + class Info(BaseModel): + name: str + age: int + + + client = OpenAI(base_url="http://0.0.0.0:8000/v1", api_key="dummy") + completion = client.beta.chat.completions.parse( + model="meta-llama/Llama-3.1-8B-Instruct", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "My name is Cameron, I'm 28. What's my name and age?"}, + ], + response_format=Info, + extra_body=dict(guided_decoding_backend="outlines"), + ) + + message = completion.choices[0].message + print(message) + assert message.parsed + print("Name:", message.parsed.name) + print("Age:", message.parsed.age) + +Output: + +.. code-block:: console + + ParsedChatCompletionMessage[Testing](content='{"name": "Cameron", "age": 28}', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=[], parsed=Testing(name='Cameron', age=28)) + Name: Cameron + Age: 28 + + +Here is a more complex example using nested Pydantic models to handle a step-by-step math solution: + +.. code-block:: python + + from typing import List + from pydantic import BaseModel + from openai import OpenAI + + + class Step(BaseModel): + explanation: str + output: str + + + class MathResponse(BaseModel): + steps: List[Step] + final_answer: str + + + client = OpenAI(base_url="http://0.0.0.0:8000/v1", api_key="dummy") + completion = client.beta.chat.completions.parse( + model="meta-llama/Llama-3.1-8B-Instruct", + messages=[ + {"role": "system", "content": "You are a helpful expert math tutor."}, + {"role": "user", "content": "Solve 8x + 31 = 2."}, + ], + response_format=MathResponse, + extra_body=dict(guided_decoding_backend="outlines"), + ) + + message = completion.choices[0].message + print(message) + assert message.parsed + for i, step in enumerate(message.parsed.steps): + print(f"Step #{i}:", step) + print("Answer:", message.parsed.final_answer) + +Output: + +.. code-block:: console + + ParsedChatCompletionMessage[MathResponse](content='{ "steps": [{ "explanation": "First, let\'s isolate the term with the variable \'x\'. To do this, we\'ll subtract 31 from both sides of the equation.", "output": "8x + 31 - 31 = 2 - 31"}, { "explanation": "By subtracting 31 from both sides, we simplify the equation to 8x = -29.", "output": "8x = -29"}, { "explanation": "Next, let\'s isolate \'x\' by dividing both sides of the equation by 8.", "output": "8x / 8 = -29 / 8"}], "final_answer": "x = -29/8" }', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=[], parsed=MathResponse(steps=[Step(explanation="First, let's isolate the term with the variable 'x'. To do this, we'll subtract 31 from both sides of the equation.", output='8x + 31 - 31 = 2 - 31'), Step(explanation='By subtracting 31 from both sides, we simplify the equation to 8x = -29.', output='8x = -29'), Step(explanation="Next, let's isolate 'x' by dividing both sides of the equation by 8.", output='8x / 8 = -29 / 8')], final_answer='x = -29/8')) + Step #0: explanation="First, let's isolate the term with the variable 'x'. To do this, we'll subtract 31 from both sides of the equation." output='8x + 31 - 31 = 2 - 31' + Step #1: explanation='By subtracting 31 from both sides, we simplify the equation to 8x = -29.' output='8x = -29' + Step #2: explanation="Next, let's isolate 'x' by dividing both sides of the equation by 8." output='8x / 8 = -29 / 8' + Answer: x = -29/8 Offline Inference ----------------- @@ -170,4 +264,4 @@ One example for the usage of the ``choices`` parameter is shown below: ) print(outputs[0].outputs[0].text) -A complete example with all options can be found in `examples/offline_inference_structured_outputs.py `_. \ No newline at end of file +A complete example with all options can be found in `examples/offline_inference_structured_outputs.py `_. From 272e31c0bd8640c15e85211c74fc9b428ad86902 Mon Sep 17 00:00:00 2001 From: Travis Johnson Date: Mon, 18 Nov 2024 21:57:10 -0700 Subject: [PATCH 029/122] [Bugfix] Guard for negative counter metrics to prevent crash (#10430) Signed-off-by: Travis Johnson --- vllm/engine/llm_engine.py | 2 +- vllm/engine/metrics.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 9a2d73a020c8f..e72dc81f35b67 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -1716,7 +1716,7 @@ def _get_stats(self, # not counted (to avoid double counting) actual_num_batched_tokens = scheduler_outputs.num_batched_tokens # type: ignore - num_generation_tokens_from_prefill_groups = 0. + num_generation_tokens_from_prefill_groups = 0 # NOTE: if scheduler_outputs.num_prefill_groups > 0 and # the len of scheduler_outputs.scheduled_seq_groups is != # scheduler_outputs.num_prefill_groups, this means that diff --git a/vllm/engine/metrics.py b/vllm/engine/metrics.py index e896bcdded2d1..47472c274ccb6 100644 --- a/vllm/engine/metrics.py +++ b/vllm/engine/metrics.py @@ -512,6 +512,11 @@ def _log_gauge(self, gauge, data: Union[int, float]) -> None: def _log_counter(self, counter, data: Union[int, float]) -> None: # Convenience function for logging to counter. + # Prevent ValueError from negative increment + if data < 0: + logger.warning("Skipping negative increment of %g to %s", data, + counter) + return counter.labels(**self.labels).inc(data) def _log_counter_labels(self, counter, data: CollectionsCounter, From 382b6a4852b9afc9a740b02736688e20f7d58446 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Tue, 19 Nov 2024 16:54:58 +0800 Subject: [PATCH 030/122] [Misc] Avoid misleading warning messages (#10438) Signed-off-by: Jee Jee Li --- vllm/model_executor/models/chatglm.py | 5 ++--- vllm/model_executor/models/qwen.py | 10 +++++----- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/vllm/model_executor/models/chatglm.py b/vllm/model_executor/models/chatglm.py index 625e31bb0d368..2ea592aaba9f9 100644 --- a/vllm/model_executor/models/chatglm.py +++ b/vllm/model_executor/models/chatglm.py @@ -575,8 +575,7 @@ def forward( return hidden_states -class ChatGLMBaseModel(nn.Module, SupportsLoRA, SupportsPP, - SupportsMultiModal): +class ChatGLMBaseModel(nn.Module, SupportsLoRA, SupportsPP): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() @@ -695,7 +694,7 @@ class ChatGLM(ChatGLMBaseModel): embedding_padding_modules = [] -class ChatGLMV(ChatGLMBaseModel): +class ChatGLMV(ChatGLMBaseModel, SupportsMultiModal): packed_modules_mapping = { "query_key_value": ["query_key_value"], "dense_h_to_4h": ["dense_h_to_4h"], diff --git a/vllm/model_executor/models/qwen.py b/vllm/model_executor/models/qwen.py index 3978c176a2144..44ce6eda42943 100644 --- a/vllm/model_executor/models/qwen.py +++ b/vllm/model_executor/models/qwen.py @@ -870,7 +870,7 @@ def dummy_data_for_qwen( return DummyData(seq_data, mm_data) -class QWenBaseModel(nn.Module, SupportsMultiModal, SupportsPP, SupportsLoRA): +class QWenBaseModel(nn.Module, SupportsPP, SupportsLoRA): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() @@ -1024,7 +1024,7 @@ class QWenLLM(QWenBaseModel): embedding_padding_modules = [] -class QWenVL(QWenBaseModel): +class QWenVL(QWenBaseModel, SupportsMultiModal): packed_modules_mapping = { "c_attn": ["c_attn"], "gate_up_proj": [ @@ -1062,7 +1062,7 @@ def get_mm_mapping(self) -> MultiModelKeys: @MULTIMODAL_REGISTRY.register_max_image_tokens(MAX_QWEN_IMG_TOKENS) @INPUT_REGISTRY.register_dummy_data(dummy_data_for_qwen) @INPUT_REGISTRY.register_input_processor(input_processor_for_qwen) -class QWenLMHeadModel(QWenBaseModel, SupportsLoRA): +class QWenLMHeadModel(QWenBaseModel, SupportsMultiModal, SupportsLoRA): """ QWenLMHeadModel is not only applicable to LLM but also to VL, which is not conducive to the current integration logic of LoRA in vLLM. Therefore, it @@ -1083,7 +1083,7 @@ def __new__( config = vllm_config.model_config.hf_config # Initialize VL if hasattr(config, "visual"): - return QWenVL(vllm_config=vllm_config) + return QWenVL(vllm_config=vllm_config, prefix=prefix) # Initialize LLM else: - return QWenLLM(vllm_config=vllm_config) + return QWenLLM(vllm_config=vllm_config, prefix=prefix) From 5390d6664f65d84f37a5fb524e967b01baad9100 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 19 Nov 2024 04:52:11 -0500 Subject: [PATCH 031/122] [Doc] Add the start of an arch overview page (#10368) --- .github/workflows/png-lint.yml | 37 +++ .../arch_overview/entrypoints.excalidraw.png | Bin 0 -> 123422 bytes .../arch_overview/llm_engine.excalidraw.png | Bin 0 -> 178116 bytes docs/source/design/arch_overview.rst | 274 ++++++++++++++++++ docs/source/design/class_hierarchy.rst | 74 ----- docs/source/design/plugin_system.rst | 4 +- docs/source/index.rst | 2 +- format.sh | 4 + tools/png-lint.sh | 15 + vllm/engine/arg_utils.py | 2 +- 10 files changed, 334 insertions(+), 78 deletions(-) create mode 100644 .github/workflows/png-lint.yml create mode 100644 docs/source/assets/design/arch_overview/entrypoints.excalidraw.png create mode 100644 docs/source/assets/design/arch_overview/llm_engine.excalidraw.png create mode 100644 docs/source/design/arch_overview.rst delete mode 100644 docs/source/design/class_hierarchy.rst create mode 100755 tools/png-lint.sh diff --git a/.github/workflows/png-lint.yml b/.github/workflows/png-lint.yml new file mode 100644 index 0000000000000..4932af943a07b --- /dev/null +++ b/.github/workflows/png-lint.yml @@ -0,0 +1,37 @@ +name: Lint PNG exports from excalidraw +on: + push: + branches: + - "main" + paths: + - '*.excalidraw.png' + - '.github/workflows/png-lint.yml' + pull_request: + branches: + - "main" + paths: + - '*.excalidraw.png' + - '.github/workflows/png-lint.yml' + +env: + LC_ALL: en_US.UTF-8 + +defaults: + run: + shell: bash + +permissions: + contents: read + +jobs: + actionlint: + runs-on: ubuntu-latest + steps: + - name: "Checkout" + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + fetch-depth: 0 + + - name: "Run png-lint.sh to check excalidraw exported images" + run: | + tools/png-lint.sh diff --git a/docs/source/assets/design/arch_overview/entrypoints.excalidraw.png b/docs/source/assets/design/arch_overview/entrypoints.excalidraw.png new file mode 100644 index 0000000000000000000000000000000000000000..bbf46286cfe5d0820e4183827f9c2b852b005b4b GIT binary patch literal 123422 zcmeEu^)B1}casAfPfRAfR-IqR2?cD2;T7l(blg0g8-tNOyONib#%tbSm9F z#1QX3eg(as_j&I7AGm)I{it*1oa^js@3q%j`|_5Tl{|8g>fo+jyN*cRlu+EYYk%;r zU3=6H?1R6#^I5DEe%o!UD0y{PS_944u3hxIq$I?Z?`uu=99maV+T5Cx8$(l`@Q%C3 z6eW1A@Fa>k0DaMq5qPD8`TJ}emUC%+XU=oF;XBQQmp8+E23+xLDMT*I(>-Qf#K**T?fwrxX!ag_ zZS3HG?(l#5kH^I5kI^^Qy?U^V=m7n{|Iv>Ui|TmsLgznxHF9@k_1$~^)8*jHw1^mj zOA8L%_>Yf++@kQmzq?=m`rljpw@dloRs6q${J*jIZ_~!8-rT{!a7M-7#8J5|l#$w7+e)VKb*j3IKFx`{6d ze_!SaTt@k#E$Q~X(9<29?98p@vFtYP%CpF}D&J_$v+AQ=cU`FGt=MAID9AYD(0&(_ zroo@AoamQ$!WOs4ntMV+Rajsh%%Uz?5(P8_z?|vjszMUSfbV>Zz;sss!zdIfMlSoG2 z7|LwY@M!zp=?8rnt`B(>Gyn0)Aq)IuM@7w$evZRbXHeN78z$urM^QYEhS+&&++d)} z*D)(mCHY?9(hg+ z!yP#$%@0N2V(&C~OI1rxopv33BWYNmYj}7ot$3OP``N+7pvIpBlhOQ7+$cfZIB4BA ze6oGF^bf>Nz3hQZf>i;yY{E%gsgCXRn3V=eVxU<8dCFob6RX z?=(CGXG%CT94yp&WUu{N9a&hN4kyhZioaGz;lOKSpQ~>~{=5tFm6ZWs3)mwSNv)~s z`Em7=%$O8or8wy;sZp^gxkzXpc%0_-N61q&i(8+cWF4oGqlXLjGe=T}BH^>OG2=1S zrtOjYh*aR~cx$Se7p-%G8RcixMF<6PF;dCxHG-Dm#on7-Mst-0rrSI1f$xK|uMt=D zp2`GsRm0bh*czkt(u=1BCc5&i(72#JS@=udbhm)p=~@=3Tkq79oyza) zoc;0Xkp=lZQ|FV5qm7@?0c>HP6~5&{Ua+S7yC)H|gas|+-FakJ=1BIr+i4mmhbY+& zlhsl4n14Ox?JKhO^*JSb_?3bB98ZW2?)Dd5xvZK+-)r8nPEg6xz#|`162nFNoDuxu z#@3RgQv1c1&I*P07vu7V{NeZPR@?SNAyHER`|WV~gTr*cqh>z+1AFdpA*+@eeUUX|gRGh?oV|_$)D^n(fr*eZxG|WJB zrGJm$;OlEmPR~vY=fOtlptdUcA>_5XFp}+-mWO3gh*2Vp06%8YaWuzoSp}e55dQgzLN%HUGUBlbM?J_M2Q6 zmXpj-KSHd^#$3rtN9A)(RYvX;L2B{rCSEjmS^42r`k*tt+BRM zYFs?B|GW=|+BWbvBnK^v255aS?aVQWiXT~_LEf?YzkA1r$-b3gk5Kc}MIOIkl)w=D zcAzM|Z6d9h?PB$GXAV)OYPMn8kI%?A@EobT&Sb)1fTctU@wtHXHMqcLnrwn23yswa$y@c z^J{q)?T3VHDY~(!WX(QW_Dyc?XXPW8>svC{c6qL&UXRu)kAyjfzoO!Pj`MwHMIBCZ z{87`Hziw8Q0apD58>74STu#; zaG>5~R23We#9{6 zIR*KcGgVQU$fT#p9zRz_*VUP87KR=$eWIJF7@H#DO;)}!>Mlr_%$6ClKXl?^zt9Z` z9{P>F4wLP7A@9o>M?&r>6`uXDhh(XuqMWin=))P$-+}ULLlr`<9q!$!@f z&Dw;hGyPnc&5|Vgyf1{Ga*C2;QODAs5_Yz$ZQ)P7F~Ju#dHCCa_%X(WHFycmUZ&jH_pf#uc#VYSnq|{`_c-hsOMXD zc>3+j?hksskJ9t(Ujg?z4suHK;>XLEcCz#Z&$)pr^>nRL#pgtdt+qX zpznyyhP6ABqLBpPfPz{1BCaPVVkSV_{q!V`5HGq>?SCicI(y{jO``}m!-=-^e2Ek$-ZNM`(rP0S!efD6eeP2A{y0E7k^`d)rsLledG62ZbQRf~jfS6O z(=e8#bF&*A$X*qLGV5v|ME%#ABpwG4LS@vj10W^NWdOv8j4?hjmD4Pn9WLzBdD|(GG~c4@Bm*{AQB{RYeo+NJMMbh1x#8paHK zJ4FsJhanP|PeRVv{=sw4loy@*>@>INJKS(kzgvj)pEZF$Jq9RCL+0~or~Z{3ljm2! z63OHoId%C1Y$iR(J<>TWJ~+K!`K;x(_8I2gZ>z4!jV^l8+`IYOwTbp8u!mpWJ?T8w z8yRIojUieNX~o!BoR{=@Ik}g$&34#;21guD;^eetW4RrUkf$y{4%*VRHQatlr`aNf z{R7ywX-!W;=E^E_BPibB^4D5B!v$%7;@kUcYez0spKmfD3ThUU*B_SqAdik)2cPK){$-Gi)B?u01=h+t)Zp#!r@@)@Ti4$Cq_P{A=Q2e zbrU-GmAlsImK|A@n+p+J#Omsh%x=O9%*qKmLp9h#S_g!N{FNpj9%4JN=|S~Z z{ERWf%4wz_9{7cGk>q#pxcsVo>gZxe^86-YCLdskZZL`{`o^oQ{j~S#Zy7F>H(%5) zD``pB?n6Krq}F_Z6~Q%n{{T&0sLMCY1@!?RIy|a%uB>>RNkyH>WQt8=NL<5R;F6x z#6py~Ktr5Xsk6hE(?)luI&)JE>w_zpKTDg7Zmnm#Z7j9nH$ks`#Rnx)Su z`{c^`2V(o6W4RgeBF|>vh6~gv^XcwF{BVfX4S@{!dEnu;H6u20!yl2Y~MWT0Tu7`i4Ad z4zx0-xxD}&v;!2A-emSzLr@pKl2Usxcvf_}VBn!*A3!qyGq!bCDk9J+BLStloZ~+^ z?htbztzNHIiLdBpyO!IMN{E2HsXUQjkj1x1afcR_+o**Ti?6oVi~hlbe27IW4$_?h z)^siM?x&}5g8{cfxJ(q~EkoX|5~Y`K7WLRQ%a{nhfsR-0Eu)My%e&mR7{AmYH6j7bD*@`Z{=OvN9$g!^@kd+lZ_Kc#UO`796AfMAr7KSFp zS)lB+{w1mqOc0i~Pj9Dh5h8XatKZsOE}wc#E;~CK_RWqWP{=9Eex_G=>S}fi9Jvp$?d4iQiK2l8W z6-%9tf8<72-=<--q%W!M{RF+78mHO*3II7+`6_m`oUhLK$sn^1OzOp%?}=}F?$xp) zp)Wu3Y@B|Iwajj;Nhf8kBU8^Sv2F#Rh4FtUSK)0(#nG>aYfcCn3W0Y-oH==mNXD{O^B3%JMuH*_D0!ll5|?ZpwhwGP`Wp+Y3I zrE4bmt=<3R$w{PT)ogp)@f%=>iIB}oaiwdQ_a3BV3-z^YGD|97pMSxsUDhqKwQk#0 z>f-pizi}%cUUvVX0VO$M?)Gb4C=x!$X$`Z!GB-7u3{0w8oW5inZ|cG6p5pvhbfO}1 zaZRuqk(Y{aGfC6Q5F*_sCAYPl3|@b0coF9&t?fL9A`tqvXcrbWi|j}+lakn#@8fnT zZ{80oq^jlW zY&wsLi>}Qy#oijJWPRH*G8pu_H{f>lkYle?m6pp?ZjN)_SZpZ2?Qje)ZSD-u;((8tO@uj`~prigv@o#1tqyyz7&t? z87udI3>!IVRqcmLRZdg{2E!`|^DX>d4O0d4*K^mO53Y9W9uZ@S+?)?69}Mp9a7^*b z{p58tAD~cGs4tm;{Y0xwGJk~o+8B?`&(AqjZ)bGuj&3fcdmO=c*VV=i_Cjxd?$7a)jWz~+Yg=ln3MWwA0xB3 zu8mThh>2gZGN=ceWOd~yr~U6}`f9$rO(NTm5zvG~%>_b- zQA7})QzKLN3A`5OJB=YPThla)btp`gS?Hr_qOBgLN(XUN988M#6%iP|oHLT=*XGTG z;>^`ms6HR2gnF!kUdijS%tr8+V>*L!2{x%AY)CxP&yQI+OtyMn-Kw-@k=)Ea8}m5= z-mRAI^V?(`DpQ}1iXJA*E0bC7KjYZ_!{v@Vt5`W5`Z7S{PQuEbqez{~HEI5mR=&BG zquG&6X=_*)cz58-s|DvV`olgV(Tg3;0J31=Gy>V?m#zqrH_gmXpmId$|(4?#{YOy6c6-4R<)^{`fQhdUnm2iB)Ft)26i3nhw2>SYYxAA?Cfp<66O|sdbY-dq z>Ss?s0rb%Y3u?h_`u%IJa(-I0pIO>e(=GPgDmssDZlS54b~8fjv!6_O*?QX0fx~%@ z3+t;3snBR>4Yay{V?H029STcdRRIC1$!Ca#Q~rVH=zH7`-CI$D4air+2EP0K|uF7Ur_)P(N^~(k}aO!Q8dQLKfc@ z(Zn(lBEptqV{RiY%&kzPf-ncpwC9-4K+8yif8Ih{Q#KW36rQ=cK_JlL)m@=VJ#4C& zL*t7WLa;I4DZfLA9mKE|bGfZ*WgE(jG!+MFG$<`kAK~rp|LA9QWzT-nx+`;A=^h)k zmJprkHczDbox^m>b*5~SC>HL9XsLPU7kOy+S(U9CW)NwNNE|19m zJZ*@=!+$@uT;^-)?o4u>Me7p}*2?#GxN3D&#iRXQkm@P$TmQQr*$xgIHQ$^hW>5X}Rb+3|% zyM^j|kL*#-*!y!oD|qM$TPK(l2lKZ?Tl9up|7@uP1;7?MD4g53-B8`UhGdtr?;1sR zCfzNWg)$0+52n6Gsc{NFFZP6>N0Lq$mSFNNHKF5DFUg=cNnvl->_mWOeF(23iT!gM z&BjFiuC=9!M`kHaT?!1&Tvn)hO|<|v&8t`v$1y?GE6|`GsCgEj_1PuLwlP;_?7Nj;!QPfj4BLwfbHYZW8da9bzxf2>;9Z` z_0@6~^gdK^eYkKJaGQFN(@nDED2hCKqMgs4Bd;}CkbdUYkv$d^X%v_D`ex-wxzEd6U4C`#F5rW};pDfTwe+I$oDkI4C?&Y2~sK}UTWYxFnPfkv(H zt=j-cjTRqUlr~daiffug`z7eFMqPgq8sERr z^w>833ei$=v`Y2e;M(4KX6kTK11s)UAapoPNL))Nqo40aa~Yu#gbu_y%k=X_yP%`E z$1Atbg@ltNxkim(vVu2DV*}*b3i^p^G%p+qa{r?jG~Iw)+Np7H-%kHpa0ns&9cq~3 z`RF25>Wdww(8Y>v4dk;2zCSO5LjGAu7n;I2iXUt;+Nb0~cvuY+@c7B@!fe|_lYoWk z7Yb3TBqPeasiF3)USTFI!ES!&x$dQ66(IWse>^5nfofe5-djYMqiYFf6{NCvIrh03 z)xKlRhN4_K(Xcv$W$>7-*dK-&N>-YxPob1ONWnbl>LWqqCK^pD6o9ETjacmNgk)*F z+~cTy?%K=6_6_Y_&yw^~+i9=pN?c9;7)^Dn-F;EcjWS4hsr;`ubVlE>^?`-ii z!r8v^S1rE0JbmTD+)#jar%8O+uu!9%$SQ;3L)oDhDqr|c%dq4uJ-H9%{cWRLnwx2C z0_J5=j#+wX>)P_n3|a8WD1CvtRoiG7=GLa|434Nk%Nfaazm%Yyj z&3z_+Gh>iSd3v!>1DHv{@r39}xU#JAJ|Xg-3kJ=c74{QvR+zV_-+Ra{PWK4<^|`g# zl6mbs`vb8eNCdJ0EERm)B*N~ap2gEewT#k%{WcPtB*HT#3)0DF^tiDH`|@$AFY4fq zQUFNvKV|N)*<2kdwj&o9j@Vj{-&nU64H6q z!ocP`GX(=aI+bcsERS*}7YD1~S^)LbknBgE`2t%Y6Vuo2Bf}8nQOlZt2Ny1EJGlnT zG>Lnk2L86(my(}LiP(BhqlO%ajn9Yb=Vq2T9->b`^SSWeN3QKr4v{vyAwy@JY#3F0 zibmmlewuz7jxK_kZokhXqpRyIZ)I?g(OBlnhzvc6Qs*V}^_4ks?n8ffP^$_E*HtoJ zcX}ISXo)4S=KN%(4)!BPuO&@dBYgp#+udXeGf*&ir`*>2wl>@NGZ$h1cU`B{JP{BF z)N>wiK?ODkzljtK)dp-`OYyr}bQ0Kt_e!F7AI7IpyhJuqjACqf@@5%#9@#UHYyR=3%r<7l3(T=4;=h#LumIgof|3 ziV}%Et!m@JtM;L3$=How6TM?tCv5K3SPRnkUY5Yj#d4k(F?yJ z5rRi?of0AHLKc~3Tu-qhZce#KJEzhUcSvHt!)MO3c$GTXHn?0LM=R0aKl25N5ZKmDS>Dk>X(!vZHp_lM_M-}5*#%7 z>(iG}RIoS(KpxnR%Nm%p_}*#`$8{2%w&H~EBXo3#uuI-8N|Gv9pxQAd_vaoHY^M5w zRp3QoG(#|_Xq`vjr~Pm`Nr^`tDK(DtTPjR7m$SPX!fTc1zOlBKq%5IpsZD#Pq|f2R zNb4aQ^x$I?bOAI!vHY}2!s)rrmo;y5&WMigJ3>XwV&#S@W6>g5UPvC}R3xP38#jJ5 z(QvMojBsNwnSBV|n2_JyBeX*8X(jXMnKHZjrS2CkY{iot)FQ40j(r5u{-9$aY>;i$ zcK1Q+^=-LkbN~3VDIm|dKBs>lRBHVQBZ$aCpJ{+WwFH(lFU;o4X>mXL8i^SEcUOcZ z0l-{gItu!j3u?DYjS9=8MkkQ+9=G4ANEu?~F{j&1Bltk@rTT4i56w2 z!Rg`|(oNy3fBHf(dQlJo8su(=6rTGhBa2}UT-2r2@q6MNs@F-X~egz7})_pr4UATN*vfA6`7R3KtB&t8 zU2^gwJyYWHy=Dnm4i`E~{H{{zsK?1<$@|l~k2a6k;)IP}QS*}88Yk#c@t7NT9_q?x zI-c}=KR-8i0HZpl@1NNAT%y$Oo=)`!kO3Vaj$3?qu)9oYm30%r%#1R@d}q?J-Gmw8 zNxRqAUTdX&n;Cf{NjrAzjG%<~GQ3}&i_76R7kaWgxEoU_eV!gZO=fB=M6R1{(8YNd zSgeXerV5N9OIGxyH-D@N(v9C3qTjAVKn^xZLs|;fJoa=)^!`kK9~y{B5CNW1zRN0a zL-S)VwYW#9H70FfIo~JN;aBDc!k!kfH>NOT`y47x0$wNkZdv;~+wU%MUxL>msvdDv zrpXw1P;;9OQiWfzfvDg{X}Se>VP|bB@9U(G=}?KM#d!xE2fp9)8be^2NO=i zx&t*W@lmVq+In~yQ!%fW=D&dQJ~f`4?&Er}s0=+ilCz&s_SOWtMryCsQWZLQ`SzWH zci4|zFZ9zK5rIm4h zgVakRTNV>2F*ina|FbAxG0+@#I2!~}{!ZTdsUL2!mk#F67%3k^`{$Xq zFkfVM`ABj>qQxS7sX0ZJv$-sqO-AYmP^eGPaS0x&SxWO3+F4dmh#ep!1!K?zF06wE zKu+A*_jR^3dj;36DA8?hJ%cP-TRtS6I~ZQ4Z`(a*?<&5CyVWk01kx}A4G!aZNpl4oO6+hb$N zq%`bVt3uFm{gL^48`n#VDkG@^Eo92rW(-ciD zTQ(ug)KA%JWa+8gL#V@sc#@F&wjvcv(i#Hi<6W3^2Q{%2b<2MWxbQp# zZ57|zC^N-b_Qg@WLiW!H_1uE}$ex{yeDCB5-6*e@8TUZTAR^?PFF6$jh(d}Y>3g1Jm|<%H%Q zQ!ptqRN1aF$bAlv-XU;R|mSb`X!coNJHd(^T!eK5(G6t z)MTLIS2Lk6y>hf|{AQa1pMuxH<4YXPHkF0BG#{aX(qbNqtIxU&_^TlszN zg43I^3NyFpD$Ba&+w5men6J|%m*gbTJeDD2sZMc=WKmA&=ZuN@3_ho3E%#M}k+NI? zZ5Qh*Q$fe+oVT(9)rq*v*aP<&KgZYS8C^{|o$QZK&F>xG_$lpL*(cczRJgd*)OjDi zkIV8u|MY{$Zc;<4FtKGibzmFk^)t@4m2Yx4x#?k_De8)`1nOb!?Wr{lC03`o#{^lX zs#8kcj$R4~QxYw6u`J)(Bw&)1nflH;Rk4GoMlH{L%(eUV*=y2-(Ks_gmFVVO==Sm# zW}EmMCl|83wdfU0#`Sqp4O>keQtvOCl)5ZCFkef3y!Gt(dDo%_Zk{CUc(LOQDY_OR ze6GwLF6(qZ`-?pm<)RC`TumnL4m8aN$FTtS8 z?g)yEkBaAO57{bFX#aj(qo*9<6kBKAZF5i5ZU!ER*fXbs~QYmN|1Qxerfx#kf+zqC2b zda~S<3~dXIKVQ==ZiSr71n%Y0#`Cw6GrSE;#To(HU5q=SN~c%llM5Ebw!-rJ1iNIQ z1gScT>?eNW6OCfDbZ1SV;WGnBadz|N7VlM;?g`G@sNT0diyhxV>+u&Fcq>e~_Q$aG zRTSxYak++Z+f6WvC`~)htcZXoZrOaNW|7e1h&E?_|9(Rmg!J`G3UeV}K+!>XO!xi# z63MPp6Y1raAavS3+uS+}eLUj2;6D?6ue)f!Ur#TB`lZy{TEWZ+*=+*ipEka!1SdjB zS#WBGK;R6}4`^r&k6WXe1C}_enNrhEp?e||?11!RKePO}wT#v_zHgQ)l--FYNRaTg zwU#(7XxujPlHrO^s_~qlp%}@IpWnnK-5IfV`$1o)g@=dN?lE^^{2Vbib3P|Z9mikl z|9UcxgTUKk_cqLFIH+2LVq*s%fX6fgTwISy#&(qDn_(Y7xu1kAXD@J`>iL#EB^>m~280G(a7XNytM<=Vb1D8CUKig-*J1tRzj`tTiPQ z3Q^J15N2BA^4UCoN=Xq}7Kx_MTRLVQs<|Td+LiWpvfG z<)5dfJHPnz1@q}qTXqT(BkWo^!ghQ-#QsuUO{C71Z-?dsbJs=Un_QCbZ_;gdbz;wn zUH#b9mXcc1b3_jsa}Pb(z2{u)X#u;0T;s-b$`K;5UMicbfbmG}X(Jw;WY?O^9hnFA z|89PdLkNy3RF4;|g2|ZFgXj-tds8t^Es0812Txua?e@Zd2o-cpvvq~Q$F|$|@Nxxt z4})*&G4r)4rOn^$2Fu9ANfv7``H7>v5<>8dZ8I;-YFSCB7#l;=u*AyZpA0J3M6wCf z#q90TJeY;T8Moz>;hu>WRRU%pS3VN%T~bu{SKn1o?|*Tp?t$yzB-o(C zcKlfSuOHxQZlj`dntbq*yZbn5G7n=5s5lK@+J5BBFH@Gi!{RW&ja_f0tgp80r)oZY9pF_VO8jeaB8*8gpJO3FREQ9Gk%fS8do_xACHz)fit?O*XV;p7ro`{VPEp6aFIjMS8e!~PC==Uiz zn9RJBH%H8qXfBSNp&onD#satxPE=8ICX-Fd=RmU;MJa`T-RO5~d6x~smlUrnONHpW zF{Y{KH{_^zn$^~yg(q)KQN@5UvrQ?U!E#}^zL&&JW#ZW~(Z}EVldSDGqXmS9-+#vL zwGyD>6=}de&qN1ikAso;(FW+gq<#&BQ`NL(%Ud2X;hsqhO`zw-kLa8j`C~(dEJ%vD z;)VYThY!$L7?3kcTT{&?C`2Ph7VGErR^sS`O#J@FwyIA?oJmm;%2YgBu=RrzH7uEf zam+eXZq$scfn7q2SD$F5JZoFL?ha2p@MP#t5?`x$B6_sy%ze_`w*uI5fszuSOLDnE zcvi&Nqieqoc{d1ue%Ydv#Tl4X@NQp8zvgx;ylT9}2p8@El;HiaKGc2Sj4|czZUNuu z8EYhL1>QB_on))88O&ag4L#L114`%f;&G=fSyw#KNYJqW(F0$B&gyze?%N5EJWKXY z>lhl-ys!*LPHwk`razhC*v(wTL;u2dM^B>Py?ctf7}8%n?L8LUxSjR8{bA5OlFY;= zY)Is$8!KCYOymGC1E8nZ0XB&9E7gj!Hr7@WWlK!; z^uxV}oPE!??k}P^-IWv#0V9nlKTS?jIG_HxC?)!1sbRh zg7n9C)Pa~64{)YY`csm@Y$xwlW!jF&3Xa9R{YXno6aN@XCIl|&n+XvcM_YSy+ca!n zDR}^Ol?|##7ZcALdh8&LsC$VLC5Me0$vG`A5P&kqTav2|9op3555&fR+isC9d1n~!ZHzzXG;gOlAR{uAd}wnnSn z9rM!G5jKl&)PAIxo?%N~`4qYV!eugI((!L1eN=U2yD;t|dJl`?r8}_0$Ix35yJD7O z&z(+WA4Mu`zhv?yEd4sTwjPOM83bm_m&&n=h=*e~o@_tj41RP}gzwJ#dHKI4)CLV{p} zEZ+~}<_MVg$>VI`JHKMphKE22uF>+cPzb2&3QL|eTrPQUrR z7wtGzxLGJ>Ao+XNEnBAQ6qgp>%=Qu|+j!w9%`bs31EE-Sf!0HtWH!~Y4&_6 zYjB>v-l6QHrE&`rz2Jn>xSK;z(};7|eB^q>z0f_iU6^rdh2HC1v@=2`em(Dd*v&O%Pi~DuwOT+N zM{ZiOqUKsH(=c?9SuirePbXqKEG-CXQ`ms*A!U=hs0*6GwOWIBF_)OHgrzjau`S&5 z{v`xv$Vkbg9=Po^4#dHF>nmo`lG%@kw36-gUfl(;5(665%-tj*w<@1ArM|ZOV&1AM zjPgywZ}2}~usa9b@wLY>U+F~VLe4U+u6-t#lPFuZD-lP=C9X_8N|_k!8Wc5Zpn=E? zZF^rt=i1PgZ`G%x{o~FnqlX04*LU#qr4nVL#{koh6r5K=u*fav#nIy`j*C0&DGcHG zKR|Zce_bH%0A&}%- zkI9TaR(aV%#DzH7D!{GUiLpTYiMbQw=a9^r_L;4DnUa-@TO#9+7XVa}C;cQF>K;9` zL#fgWBGWu0_6C^Wk@xxuQMM6|XR27TR#OLN958XxL4gvTN0p`2`r_uh)iSBA%H{gp zmwj)ObeFFWvCC=}qzj)&tq^nf0jW+ZY_-FdG1>uRN+tRg5GWuuiQ|;>kQMy?H z@l%o^x?bwX+<8GrGla}NNiA~1Kg|MBQz-K923rO7ULKLpTBX}s@%mLWt$UQzG?PW$ zLbQ(Yfyefquypd{PbsF#vHLa{Sj6L4DHh2d;saZmmoj85gY^Q3LryrXh7JbxyQ8v( zf{s~zNy0~Nl(K`GQw*V*3ehb~q4aZUVqqE0=X>hPxgQoBi>KqKj{2#1R~pHF{w+&@ zVyYhxC}FZBt!~?eo+VFDAr&BPg) zcNIf*k_ccu(iT;xVEJmlC0BAE?ZZ}~_btx!iLzBdAD;r$tIYuE;Xn?9=L<`-(?U^u za;m0EmwTxdtQs8`fC_xvE6OLWI-bEP|5Iss=C0S7li7zhWG^bDjPEN;_G!sP+avtQ zFu(ujBO#$yO)*j#TMi|Zmad__9$Oocbp_$#cZW;13Ujox&`p%AYTu0xlvj@1wj1?z zfN^?%^~YbEC>AymkVoZnU#MGQB(3ks=&}@r!6G3xARTGyeBSBAgum!SUu~iM$m141 z8cX-L+*J*yGK3yhS(DgqtNrLBh51y7-n=V*Q0rcL*YBXW|JG8e7Wx_U6{_^H7c;T9 z0vIipq8idmoxz(;W_7WA!|mKqT&~;-)bFX>ky)fy#%OqeA}IdwLq6haPb(az>ieWf zl@9)k*G1D)VKo(r#6_6FCl<`-(_>7SLoVf_*MSLKv)J2@bk5rDtFfhkAV|{LlsnwR z&TEZvBTNNnJn7Ox{^VdgsyOwTDmBgkgneF#GZKYGgX+?bCY@rWko0Gw|~T7+41 zja4e2gGm|T`SgSm$^I!+$TaKZx}PvR>N$4ua90l15(rc_0_#44(-}}6dYr7R;$_d1 zDn2y*o}hpjW($M~jLLfU2`w)HF1lu@5x{Vj4~{6VQ=bs;1_EE*ka1F9t*?j}MW&S- z+I&aZt>1G$t#zpK9|44N#PJoG^+V;?xbo4lE*!g98#a8iY;9hNAH4K@v?pfG{K^O09Q(}M zTI#5(z$N$doifg%1R31%vmNX8f32d+6Q&mBqCu!L`TF{-L&aXoJK%U~=IGEUb6urB zPMglPTs9FOzz*|wI*9Q|K(k3+uMom3)vPV;y^rv0BzQkR%8G8-jg*~aRnyP%ZMo0L zXSb1wIzvwo zyuGEj`NnGh1 zLW*M;|JwuNFW;8cRu;u(vGq8P1}tm=JW|UNTA0zc^ax=Xlw%_{p@{C=0-=Nvwc46- z_gWYH>RuhcPBXD;)azp4_F>UQp0cGO+KLE&|%ODL)Zl@qdl>Dl1~%A{~-8b zl=Wiib~-lCN#Jopd)fjzejD4wPLd>qW?2P&whJx8Y|+?%R}d#t^_V6w0cJw1?&6C4 zK`eI5L5Ot-hMxL0yp=6g2uCQ{H28*ILNbfyHfj1icF1a~rzcadD(009bh_du-|S4( zY~qOD$*C9FCFYuUG>yFO(^c_NBY7-s0&bXmXix^7R!1dj4Qq?#B1G^&J@Pfu1-7?p zWkG^*`^e7C7bX-yk5bP)M|jEEaG<9iv|s&BkIwuY*HQ8t){)H8qnu2tLywo7VsNqd zWpKfW^Xw2|Y6gIdf&sF3f&fai218ypH<2MdL#5*z!UKu`=7^&Kp0Lok?XayL^oiPG4)B9R8u6|z2*ZH zXqQoyO7>Ol?mJlvz$f_8Xy0KzhN;y~DAYpK(*U!{{df7aDQ77q-+9~eak4rnB{QM# zvWqnHSb}~8K6lZ=xg!$P#WJrUlM=t9&b!a1`@OxZweKs5G-_Ij-0l}5G^n^%o5&LutTSGPDDzp28eRz5OTmZi$FWmUS=8hP+KM@Zi5Zy2&U zwZN7T=4?8WF>F&Nr}u?Uk_(~7)??Jqbr|%jwWMIp3B0J996#>#+Zzdsh`CEoR`2I$ZwF9uilw>j z>VDHn2i#nh_nV^hz&?A`K7PIuPN&;=8FZ_#$ch3_67WOznuST# zmCR?ikQo^;R??1Ne{|UzWV8cXo3k<(D+Sx27(Oh;HUBuBv2_QmJ6D^TWrg=BHu~RJ zTAjR)^06wO%G2R4uo?(DrhN$}Y5D7$jI>u)(EO64m8jCk@vLIcPcV_KPxchI*=bM2 z`nh?G_qhTN_jZXO_zZyeC|@xay-IuIZ_Tt26vN@+ouMC&n_}J^s5-@t9JH!;9qjq8 z>I)I(DoM&DcyvP?kM8GZI&`|In%sl3D942;eY|I!TIC6L?-J9NBps|WM;9_C3BMdT z52{QpG{Op37crO?e*#3 zelQqjk^iYck`HGKFj};(ol_(nGEM&onU~XqaG248NVPSyW(Ysg#$Kh$^dm-Jj=2-~ z5MyS!P)ZqtXSwXQQ7m;>zE-WWGz-KAlavkSn`IX$2UnI+eY|q11<1Ib3A;RMl-Iny zUpkT_Qz6xTnJH{4D@&VjX2Rgf#kVes>NEbtOTYfCdASgIU6eE9pxqI~m*#APq7KR1)0jv9 z_jzhj$cSYuquWm2U^*u6R|wKR|GQ76Kb}&XgUKOI_>x;N*{!voFy{8fVLUOOG4$I4 zKW}E{-h1eT9PCNnW$TGWwSa;taroxZqyxJ{&44LjZhUD(eOUCrfr z@W!5+?4dF2vvkwavaZXGk?JjbhH}xgx_hvJfr{nJb z3qFIL;G_7qYzLp6Ai2mNMdl0Vc%;GLJ5&3N+)#L~VE`I} zGn^eMO|uBOa$}mGHWs_42E}Nsa$j5AptdrL3jxgFh<&d)5qqM^C|Irw=DZ+ps#iR> z*gz~dIf^TRdOjpI;w+7789Eh_T%}}lYy9AtosUt@!IhjK0O=Eq*VQb%7i-a1CfrU? zaVeW7D7kzbFKlcj&WoEUYb<2 zOb*di0bH2T)%kQE5o+Z!(O#|oYC0({qXM{>DWGUx#&J6Dy~`V_x9lR1AQx?{Xv*O7yd9>?YQ^ zI3ofMDO3CaPn!43`gje(%hR0aJebaAd4D@jq{{r775BKC&6inB2uM2d>J4d2Ak%Cz z@?Ch!UEfrf*g?q>5HB_8M+E}(0oZDO`P`bgrPe6>>W${bolOQ4et%@5T+YN}M=Mr|Ol zJ$^cey%Zk^!vJFAd$Y!ZUWbI&)F-2*fd;)cH5ERd9EH;_y?NcN>?y0f_-~=Wvv1k-nCy4M}TqQ#H!ch;sV-jlI@f2Sb@&G~a zBh(jjO(mvgO(28U)Sm0}((Q$|@%5qn&2a2UL>^11RnZvZ1bD|_YMIf>a$m#q9k=%D zbReWhl56Yr|1xkqlhpx#KIHz}NS8L?&NyvIHHGcGFE zeW0HqrnrhY9;%`b?D%EiW!gXhds8><**1`Xv1nDTA97L#w^?gDcwK*=2Sf(EokoUH z8d)t&=jMPF<(xLPLFb@~VhMO*WKp`%hFKjtc_{-#j~fE@pq#c3fTH_-SzSOoNLVDni-r2?i#ppxt{e1JizACAK~ z1ej^02|O4^$iW6s_MQsFlGP-&B1fkPB9q0G1yI%v;5>pXpg$jvzkRfw8i87SrU=Zf zfAHd8=K&#i;`sr9jU}_!RMT)>5IKzTTB12vIwqUmv1?m;(YY7gx`+r8V&MmnDHMD? zZ(lops_T&`a_%RhFhHNC_U7DmcS7S9=Ao*n!tLi@-+J3B{wdliNPo~05Kz5S!G2{L zccR~NjNq9VtUcnmR7$b>2FLD9{h|iU!P+r-Yn9R^FQgPO}I@gir>i$AJX(mk;4L*?0~HWND5w^z2y7e|<>@ z{q_+b&B`DZ8p6R!HqV!iQ2iEW$1aoA%S!)qiql^oJeCOW`X>L_zaDY=Rl^hRMh%3i zFh=X&Lpc*Cs2N9}P-!I19^I;z9ldq?Ei=C00Bz@0H&2>aJ{3u3vf^I?|@pxQ9T z+3!kn9WYluiChipQ?9yYVda=S-q=C++<_f3^`Gl`-ULcjvPR12*Yj=v5HmbD3HaZ% zEzJU9h(F#>wV+^bbbROs9KS^;3UOA2g_g_o+Kr9B*?fIb? z&YcKC9)zBAZ)d)h6=LV5TL0`#K84IYJwHOd2=bFU$RnYoa7Iqy8TXYR+lOoY%QgR( z6b{$a+Y9Fv=)%DZj#M72^FtPh@SC9{9>0vfo0g!Q^%C9klf$A3Ik|wcPHM&)1iLwu>sE>O1{>uYH zZgEFwTe>t3+a2Ea*F)bswgSrLl)sX!O`dA5DHhA7*~u@`_yK@JBM5CG@rW#`zxUt? z>*uKd8ScA@$&hBD;H>c9fv}6{G}C362%2b&_G)fND0pO`LdjfN91a)Ahu$9K^mZ8V z;rl1){_97*fen=Heogy763+?amLx1f)R;{EKla`_D(kF!9~PubR0O0I5RsB@xD^p; zX=#w|?v@c$y1Ppnq#H%)uA2rWZn`_)bI%B)GtX~5&sy)l-@j(oLOz^x_CEXUxc0R{ zLuWC7^FwJla*}~@RslY9d^NDte_5l*f8B807i!=TfB5WPis`SLpc~)lgH4 z8Ratl88m9)i<-kDSNM}C!Fs-3YLfOIKOo4CrIoC}0I37||b-i~WUE@uya{0ka#grs@2P_X(7~UrYwX7B}k;CxR zX)&k8)Wmat!tZ!-ofK;|@UHSKxrem%mfxS!saKZ*lC}T&n=`3?jGt}@p7t880(5FR zJIGVfa>P85=HCnF-|ZFZ&H^1Iz5p@V#hnQG&B(&<_9nPBs}{{SfBS2Y=NN#>q?yht z9;#PzaIa9MtevrH?v{Z~zOhEBVF++H+K&j~#BEo~^XLCHN_##lNh0X`{aei#yKCdn zM3;5$tSwHH;m+rQBujX4tH~g@`OSDZ&uFZ0w5&L1RN({HKNt4}y!<$@W|`l=_ZdB4=Z^MaUHyj zllkv{HNE-gVWZ-w z{4ShK^gn31-Qc8dfRdx%ItOsG6b6v;4jY+%SOXEpCAa6Vzcd3Lz|kH*!{4M&k%k1= zB}_n{)IO072;@xxU{j+d{m~)F4_06@)%Jpy-?uW01mFb|03z0E7VV$Pcn!e=2<2R# z!DE0U32^FGDKiHABeQvcOq7eHTeLkQTNFF0Tt|7{O=Y7nK3> zjq?KkeM!zqU3eD{=(U*p7R&{x&VIFwW3SaZFR=h1NP_ob*H;Zn;Qgu`+qs^KY$kdo zCV+k@XQ)7ZoSjd47GxMi;Mvia_f^sNfX(BRZfR=!?aF{pv@!t=-RX$XPyF?d23!2; zyQ!S5i~;T#J-8!|O@s66U;Ohhw^QIdcI=B``E|*2OkS697~2w@Ul(+UUH^}--UUbo zbvHm0V}1#aJs|=&Up2Vbse_c1REa{&8mK7z45)rR6>t)85axN^ekaMV>)H;1fJ<&g3;}-Eyl|kT4aord^x*7pC|w^?e$}}@tp!8_ zo*|%f>b~T|{7Vk$ue3m7q1ObWvtu6{rIEP4L5o>pCOTVh=I z2zh^%#_xyrQ^8~2`pH_<7dL5jV zlV>;WKkvgoS9;A$0v-&vDFywV?fQSNz=;B!)s0?m>i;f)HXs1jqh)6M|9)0C;0YWA zo~+*=k^i(U7G)rS2b%oNf&UWYKi53x1{duKPFg|j6O^sZ&9Nx=*5gAii`F@D7OoK&0PZzIMcV|o@ z>&`TEPYrs)bFzB4#%jY_(ycMxjffPxR{Oum>fM`bUJDPC2mbr1Y8fCPcG+wEFZ+M? z#1mqJ*Squ!HSPaHX^Kj&dFk{1PZuDS0+P&)v1t3>Rdqz!6H=1!KPiM(T*E*_9`*lz|LyT9dQD;f(@n)gLSqv_NMQW0e;$Ds6W6eO z=YP_E44@}zo>Ec&RZl$=kkQ!2o&Fc|hl>pKLhKFw7ekzI>TG_HjjA_j8F1tD1{rzf$cjtlmWc?|8yZXW0=4Hk(x-uXE2Vt{G% z~JBF0dRF1{w4`~D|xt=-u z^k2XK0uQW2&z_E^o%A!`&nJ_D1W}Gg`j$ma!st8+j@{TZ3QO?ky~eHn?fX!9HUcaV z35D95P7yyUB<_3kJ3q5@x`;9z`8@z`q)&(yiBfj450{_#O5r-gge#ct`7gvRIX z3YY2$nL+f$A=#`vocVmlc(IoJgjAFN0cz6G=f)_!LP}5121L+HY|Lm*OJj^pTv{ovZJZ^L({1y%7)v&!&hA4NB5`X-m9n2=VARgS9@b+K9h}`O z-}Dc0h+HByY8E?%88uqIAd%s{MUn!A6m_IqB8~j5os0`#m0k2N>&RQwGi0X2z7YgW z8YG%wx*TXk;>3VnUeaQ7$8O5^#B|BEfDN* zL>cq>YdHi9-e~m4@BTK^}@GE>2a^K4wG4oPf%ye3K zmJdcqN; zgOJG_8|f4mm?)n`bA8wsMt#9+*_X>hd-lq6vg;;F7BAB4SDxXc%(YxtSIJJa5=h5_ z8V}*{2HP*uI2QQ1jgE9iUHlH7`TQ~t$GIi=V*&K`q(VI1oq4)lrPk;3=9X- z#P#meB3$B?FAs6iD!^u&7$i#d>49+wlob%y26SM2+=$$^8MSVjr{8S z>DdF+7E`V%hOr!1ih-0$)!4o8@bdrzB8UX_?0=0}-)SQp;Ek;s2Y&zd+OQnNiKc#f zuY>z=DOyRe?f&klxK9aGnx%X94Wtw~{(2XQ7v9=Xvd1r59vLDbUTJ*zLkMihHIK3c zh}*#NEn)b=v9vH=`DG?>SLsqE%a_2Q!Y{s>q18XKY8I55CY3lHrO z-0V?J=H*K-gbUfd&RQ6sF`lWV;pq9cpd3Ww;a+MwNan_?_uB)Dubj^%Kzj|sC~62d zc6F!k`JG_o6!LFMN8h^%K6bf0=|*Kwo9aq(+@dT9n3?fxz`!Ji&8~W`E3PN;w)Wu9 zJ>wD}&Nll;j+W@FC3Tbt`?VL{Oq&Nawhw+s-#Vz`8!oI|Woz>u?Ys*IPuvU90qIWk z&2$&|Szi@9UyFI}8ZnoVV4)z0gT>`KMvvUO8}ai%DEtv6i8+IB1d&)RX>Y1FrO#nz zd>NsWJfUm!Za)7YL+*8Po1(XKGPnrw^jb)fc+b}>@ronz4Vk}xFNXHj-*a7en6yh} zd!d6SEK?#VrkEM`=PqAJAi#W;!jPT!mT2xgQWTR2CQP(D&)QKVp9HurVFwO5Tu5UD zcw?rY555yZuxD-gp2|Xfc|;Ugh^HgCDDtt-C?pVFdoXX)cl}Ir0cX6h+=v>qZCF(a zWXl<3t7mC6nptZ*BDEUcAyZNb@BnAO)zIA83u$!XwAsPWPgnMf&kqR}2Pen_u|TBi}6KG~>* z-K{)QR4z+=s`lvCEAQ+}t)fRGM2=k<%%>scu@8Lvof(A&AyDdvJ%r*h*oaq^RYU-? zWBX*YUrX<$0K6We1wUng=O`gS>0)vnlevJ>{e+f!1!EFsbPw;u?8WPgT^Zkfuu{{c zve)VNZdtmwCzOufdzx|ga-9@`z_hk({a`7pyahI-)X}Ecs?PJ`F+!rLbP=*ab@`o{nnbgxK(D_J1eXKMWEmElo+?zg zxHv~83+CRz7XuKPrr|7`rmi@)P{8hAwX0D2%IYq&vBb0EtlJkLLOFp6n<2@2LRb%; zd_muw75c)!efw(i#2fgo@W=@$ac?5<$0?U68MVbRB}v4x@ZUe{2mY=j!69e?oU3z~ zjsSJw7tL6A=RxyI#4@`{yW4@D7rbVMR@0a*N0VXwR*CQUy>4U&%ARotjYv?ne~ROK znC{3s8SNxN(nBV&TMyO(2z(uor|4WsJ)J=}(93ou@Gq?knwTd!$(tXuuaUkmg4cER ztF0#lf=KCmBMf;5R;?Ddyg#nj_d_p~)@H$TZ0;C@x@85rHcm~Cwf<&5raF2HETUWTHEJYe2=xbVTOt%^;eL&4AybLXZA%FjQS6?30MqwkJMLwbbPCW^hkH>#GKQ%c5iR7|JChcG(A@rQa< zj$5geCSC$BZi(v+G-LPY8aM>q6W(V1_=8SwzB6WkQRksj{hQr2PKwBngdG=1hf^*2 zo}L%ra-DI`Vi02*K4L#Zc#7$zPs~ImbQSj8r*FK0^QQ-Sibv7AN<%nw^POj7bRVVO z`l_-I|4@OPbSRHqVB^C@u=X$1o0AR(8JbBN)|-t8A_SnMozxl4F4CVNOhmh?VE=w|#zoUn&4|4o zuJ{MXEWDfu-3AMhe;1C>o_X5DUYD6ALb^`_+0WTjS>diRXgE^@!#Y^G9oA8+T$etj zi^e?%y|MJlpY*XUV5R1&Ue0AE?ZrAtM1c)PJNW&ndM4xD0YI%iTSxlRbh}q$81dfv z@zJHV1P0wrbh3f9E`~JmH?tbHzH~|%^t*Z2U@P_QJd{pJ?f8T4^7=aomO!3+y(A)j zZSwaY06D!Lw8g`O+biumgjbkbfFeii+E-EP5xWT#y^ZhUc!eoO&?1mtqx2=2>&%mE^@@7F(^ksBawat{rX=_6_kgw6WBy>; zJ#5^fVKQWlQz2(OLqC7ax`ou?4_z#_H<0@ngbAMuaN;V~!a>(gGg8}}=xyDx@HR4? z7#6oo081aZc{gn*rlF0L!=$f2#4PNV3w)Cnl7up=@L=YQ{q-NfYMp<-XHgHB(A#%$ z%OqI;b`0JFw~{ghMe89z`|!e#K-CW*y+u)WS16q`&5MK`jD}2lUx2wK1g3Io7CK&Q zb!tZc7NEG|+ZL((e;?d{8i< z5|7+8m^%cY$bi&m@f};77`qtt7U44~&Rt>wtRx7T(UgD$CvKu$T92aaYl3_uw%-Y_ z{7zg-#M4DwO3crXaUbK(oM_Bcd;{Q#IW)BM&9eh9&exWDC+SkMbO2qK> zG3TbIQ!RlhU-4bROBA@yU?(Y@8Q%!+%=r2esuTKLVV*~xfm)d){s-*`>DQ(Y5LNff za+O3c+}#jsM!qOi>)19na?ZWylUDgC8R2-dx8#L*gIYV=WOcXA{si*2;rfJ+-1$~Z zM}M+8Dd-QhefDC3;T3(*T<6`8`b8u!$6JqGlGAOV>#}M3`N5KVxl9e=MshSAiToSf z0!kQo^i-41yQQU+Rd#;*pFA#58Fl)0k812U&_@qz*>?fmWUI}$503In;})Lbbei|> z>m>i{O+f?gNkRaC4U69R8Q`I_Iy|_Fg(+}rffqJ|ZG_-uiN5L9<1xw*$0^#gu71|= z*pn_VT@)u-nen}hSB5x#o2oae6Y1o5E)@Q_Fz2PgvX`rJLLN~*53-AAm4XL4NGLdq zG`WiPYgRITie~9XB@5^5`m(A6v(Q30aYl{WMOjLrW`7oksi&e*0(DUL9Qj8~G?Rg~ zPKznNdY@nK_+#F?dw4-qWxEtw`|8slB4I*4I8iY1dffH`L%L_+jtC)%vZ7N1ladn2 zTTIh&8YD^ecD zW|Ox9?egD#A0hf?+Vulu#9}(IsTC8O!`>%GQ>Ddwn9t_GT@M~M(~O9(vTlRVx2fDu z@;DFK=uV6PG^>S_8B6eownWmN0eRs*>Le&*!k%N++J%n(J27byc=>+Rl z2>`Zjn2U_F2vL>G3=r-1;%$7$!*%2G{4>}&~ zg7wk9#dOp!&66#QwcQw{yKiE%zp6~Dxvv*gpV<}7;FLJ`>is+VH%SSb2|dz}b0{Ip zrt4nMZh?#WOvP;BN^}3v0K$H1A?AnnhnVMv%P`g6>iDw4Bd2tVJ$t(g=yY(Dc+?h= z{|Q(l{N-mc{`xq{OgfrP7=%BKW1iW1bY6hpPW@3h-P#UWujrcKZpzXGz(hT8E`ZPF zLyT=e30)b?rMba>`fkDwst(?T3#-*HP|r{28$QJ5Xdl%XLA;{mHC4D596&rw{AtmA zwiHYmAbWcJ!>@;-55ShBG9}_@M?V=fXj}w%UDpNgyhb4>@5UiOuF5V@_v{Y-jCNIs zroiEf`@a!-Rbij-j@5nen1P--u0AB!B zD5D!s75le*>(fKd{!9hTCJr9^_4{SU&^>OPM*x#Og9v(MLL$3jTY)2iGWG6r#E{8g zDC1VCP*)4jWR(Iirs@wLd!aMO@-)QdG;^5tqIe^>6sCFZQQ1l@eXw*F- z=uV0Pl#5dJC+oc2R;R!XnJ3s@%1S!q^W}fZdjKgZ-)ZYhW3hw?7V@T<4CYZM)SPR2 zsuYe_I24we%vRYb_ZSS@#8S$1B6g{kZ-lZL^*?)!w);H`1eb!JKY|pnK3wxDdgEW@ zcBx$jQN6&qz^S8UxR3h?9LyATSHEwyW`+@8?$F13Z!ri$T-MReTeg0BqnX!pm2=B& zm-`vyE3U0QQ%<&N!TDs8b)eqfCS)tvz}}kz^WX{ko-uA;OPLLZcK7O6;duRl4|l0- zH^$mb^gmc4#_09;QWBM0j6br0by_1OPKw8TntvALY{^JGTo@*RV$j?Z18QXavAppE zl~enGd25a9)0ev1ijC`{Az$|RmfvB$lX3;g&wbHn$Y7}KNMEO9$YWs6Z|nbpSH#5! zyV2IpkU@HSAJ{_P+1Bq(x}RNlNSkBZU&+14r_=bdqTG2-ABloIMZyJ9%x9afYVVfw zc&1aaUY@U-?gH(OM158J)rQ6N>%)${u17@3vH9uqIMq5pU{c84fC8k{xK9EA&`FE$ z8vA$yg!22ue2v;?=uSKq0!eOwv&BPiry1LNdrq_pu*p#AwT}aLoQMo+tRDbeDW}p3 z_gVZZULlmEvp-VbWa1Yp)ntmuk*pA6Om3DwN#^D&C?%Q(yP9=zi%AHBC!znY+j(h$ zSPwO=r3~#4CE!q06&6l*9?zbCZIh#xTj;!}NhO&!S)@Taz{(e3pZwMpmyJ*nOgOEj zpYMp+a&phvpsJO%wBvUx9_1U&!%e|nu}X{=160$~bmwL;l3}_&e!{NYV4i<(r?qdED%(vp2%5;CBEMUv)egp+X1FT%u%?(mvuQvKxlw?l zj}=n6518wtdIWhN95=QCwzT_zg(ebk({?lk@!bbRuc}l&5OQ>?WCR&(f1ez_^TXSr zur```;HtIj1PR^)q%aO2W6KAnfGelFwyZ#AM_Co;*yFMxc4BX))|`C9F<-5uyPxDV zdl%3Nw`TYeka~d8P}OsX3yntPQ$Rh;@Z7}FiE|kH=J_+iZ^TnVguniFG>+WO*_|!PswIre_EOVOT z*{e>1aXtNdN3%VGBu`%O>zn`_DcX>{TX~7g0dIe?8*h^{gGyOQC{wMLBZtvL#I7WF zYx!kp{#u_y1zDl!knxaPApbDk;^{(XA~kqCI7p4X-n>nhoqr^|{Z{r~LZG`jSddD0 z!pRc=o_0CfYcC9aAW0;W^BwChORKtrK``hm^`-A}e}PKg+{~y^IfM4aeP<5}SR4d9 z0GR^A=!Qq*){!36S8t+ftlXS-V`H4SG}RraK&?bz`G%08r_%MhZtDJ}y*>VkG*yBq zX1-Y7iq6Cu3PS@kNKp1jg+maC=6ZDs+DLV~A|(@@GCkQ02VVg`W4RZ{0kG#FBHmeM zsj!3}c@+IxGd#Z-t%8Gcl}jE`1XmB9uxY6Rw{sR*qf#r;pO9m4^PAE*jc6STHl?*e zo^qWm2*FH7pt~_8q+ZG;se-8+?gmj3iCj#z+|JGQqy61Hil|YXnh@y$Fvn6&toV1NXhYAn%H|-VpK|#h)N1; zep=0ePaXTn?B1Q+;TMtg%HGDjDg@+bhdM@+IaQi4b($cBf!jeutOCiTYrZ!~!=G5- zu$|@=y-qRmTp7+~ipY@p{54cMlkjAFaOPA1A2u zX2{991fBoPy>pWIt`GK;<`NyEr7v>V0gp0**u3&ya0e+A0ME&{n^(@x#?*}mV`k}B zkNlopXMaB5cz)KOBPqab{LXtgs_lnhUdPbZ=G(sOgzJ{aiOYkuje0a{}4|! zaE|lRtaBbJ#Yow$)Q$zg+X_#|B{?!5ZPlBRY8*l2DMwgF|EcN zfR|u&XWiUs^D~KTx*@YuCIms`+W6Uyy!X^|Wz5SrUVr(CrpZ3%9lW>kdDZki4=l;Z`Vu33% zJju}Vxc+FtqzR1ACM{flv^rqnIY9;y)8^l)G|8gs^K^LlL3!rYI2n&_5xsJ4Z(7`> z%ajB6iQUh{t9Vl%#0A97^h0}D{p}S>TyHj>3vXmQVd85}(|Y_F1P-%SDy>Is_g7r3 zoZGI*Ko?gPu$S}IZpS7Yz4YGM`9V5fAqwp#J*#c6Jbu@wlVRBRT4Vee^z`U$eL-e% z9()Jhps5!b?Ftz}EmEO?i7t8kVs23eB$VEpPFBw!3(=_890VH=nZyF%#FHn}H;(Iz zWNY~)wCA1k(|7}>9c|t%#>{!)c}#6QUvpTQlFLyny!iqeS$G88`gC3No+GD|+L8&! z;5!v&llLcUoC_S%9oUHZ@n(DK7CV!9M^rg1UOVbOp-@}nwGFS{BSx||QqvSl<}LvJ z>~BSZ0HU>wILd*$&@$}&nCP;O(O3ZP&|0;%KkpQLAV*G08#(E>UZ7r~ZS%|m zy##OSxnTK9!8d0+<72yp^oq7c<><6#{~eR4T$CT7V-;Zs2Wxvv>AVDhL5PFfamKT! z8aQ>#5(+uA$?SJZU96<4%*uI3^5n3s%|L=fb7!9&K@ys4kQjRh01=X)Crt;_;-mdc z&hmwS1?C+}4jP(J`!YA!tq&zh3F8B7-^e{qL*jW4ycfMuv(3e+LAWqbEG|EQmnH2H zb-D*-KRU13m_I$T*@lsOyofDbn%n0!ci^K)KKKTn|F!KR&y#dpPOBshe(z6~^&?82 z8zaTidXQ;>CmTMy>{^n*wc{BoS4$J{;W#_ZrO(BB zc-3heG_JOSF*&@M%4~br5MOI-e zFocWU*&r}kjU$FfFI40jTUA2wERE>rr*HXU)qz9w{cbPOwO)uP%Cst*89fozZkC~n zwWKuJhaW2lo*n4#L()Q^gQL9iLG1pKJ^q5Wn9 zuakjM5FIm?7)V7K$z>hd#28KX>$UUr&t<*{`H~ShWIK9n`JyYfj2ckyfUJVmXtiDm zB}0`=A4RM{ua-R9OND}6arZ7!hxaQ^<3zeAD`V{3&bvC?Am-25iSOzT7(NhdgPmJI z%QBr31xx}b95nHxievPi_KF#Qa$ZA;Ds2GnwgVrU74WtOxMTHXUerT+$e9$jt32iQ zIZa~A1P%& z0&$5wo7?d&?P!sCf<%mIVzVo3ogYE(tcb^+cbKx*v_(Ouvk$HUYj=__-oaZV&{z|$ zaywS=Lf3j!H2N4Q5|+TeEa50m(ynN{;U;DY9&b{eaUAcSgnHmY%h_9H>(z_~Ao(&E z)pQa)(edmiKG2z$K~QI2ch3Wb&NuaKP@|W5)1F>FHMC`cnW6a#X*u6XI_-;>*1OHs zd7Q8$uRkO*Tpy`dhtSskRn}Dzzq8trVX*;~=*byojoRZ=IoMI{LhRZjW(&1n<2_vN z-?b|VpCIZSI^m5@JYCvdz^7%8Rk3m>m#w`xwnsq+tiHL(i4Z$QgpT)Xqk^$k$S?)ox4e@c~ zv%0Z@?83=`?6@@_2Qpt9A5XwuDY}B+H3!1j`pBo{>m$?-Q!&j&Q)5H9=^hON0^=_V zl0WY%wJU`u@RqmL)HuXxWFee5VG?w}CxNJlkSF){cuj7sZjUwW$6Ul92KuUjQVWw& zf2P%H0catwn{}2XUDx>Hr2k3A9${_zY-f@sC+dTlYQ=6V8?)XRmecZ5Et%3=y59JS zkl6io&PJH|meP1>Xq}5X`T64TEPJ*}ezjg`AgRWlv*sdyUyZa!TO@ytc3k!KTKCB@ zpF-qMvs2rRW-1||zCaMDEdmGk$R927$%40ll3G&mvQh-OLdNX_$jxe%y*{lId&XM$ zQX-7nS7*fKO2{sa+^ zYdXDZ^;Z*&jl9uMF`0Lv>PaS03l=P%{waKID%(Yyo zuMVE!@MO8OeXRm{K8(bWjQw?VXOaN}vSpI{8R=*3wEkobv!;UrnaS$CW$~JQQK&=N zF^234|K@PfO`i=B$tRNy78V1(+@&68or4nB)_EHeS`;&6od$ZYOX}q#w7rb_P#DfW zq1|BAVGoKHZZB$BG1!RpK_WHtHE`P9|5JT`Ywza8R8CD>KkXif)0@G9!^tU^NIexB$N^HhTT}o*h|6qESAqgy;@-@8XT^3o!RqBCFuZLgHjidqDy( z)=I6wrleDqgmMrR#QwCg4@Rc@YlZ3h)cez(V?`nF1bCbJMHK|2lDYOyXBI*a>=;Wb z_Pgx+mKbeG9g_80_RadsY(#=Ogl>+g9PMK`&F-Ch;dJftz-+%yyY7_97rw6FQSmOg zO(Si$D!ZqcDIIq|?1BHm$5aCw;sn(iP}mAoZdiAA}e$4_5# zfTOb?#-#q~#S{@=>n;zSmxo&s?C3{NYtOO$k1O zups87I2B^oSXLsoAjlyOumNMDxc$N@{|1TSvYDpvMdqTxTjiWnB6btemTvXB%*|m6 z8<2*P9Dh}#9j4pq7*v=-HMj!un#NTzrtg54=ZhAq|N0WCd4<+qh3Cr{>ArE@mW12% z4@o>uAkMVKo_hdQDYg#KZHr~kt9ZX|o2T}<(gRyRf4cIU4uNTul&Fu9<|L|dwB=#1Y7Tk*z{E1b=Mx_XP5g!t%se;^E zXB=aU;iMlVc|()Ib|yo(*cdfYTK2h1waY{KdeuVKe_^Yd zVHPV~o%SDd^#=62s4ePGL$?xL=o4&~`Iyp@)joFrF_f@XmZP-g++8TRl5V%Rx*V8L zqc&~Dq8BiAomRgs{mVK-%&n*U-KE5SI(BERB zw(Bvwr8U2#T+WhMfY_gP$}m{fH-JF;WLs}y9mf+`vwrN*veb5)##PqH((YRhyU!B^ z3m7>uyJ;!q2z})|gJeBCen+7D)((4z@__xV{X@Sg zx#zs2msO~6_4j=wdv#nc>xe<_c}TH)J6S4aM5(Y4gZB7Q%RC~>;nE#SA8hsnKTf5z z29xR%HHT;yIzXfm7EUiS$Rv1XT0QHb%$awp-tXkFG1^fma4UtyYvI`|y#+#J;L)ex zXVgW5;F~0@<4f`--Vo+@-+xhRhS;J~Ms_l>J*0a~_1ISP%&0d<%YD{qSvlA3 z1bcJnS!1CKCfkZrv9q~pk#HBroJ)IA4F3kXCDJ^&*ysECPo9>@PacnrSlv#SE`(x{ zJ2HzA@L3-|9>Ck})?pQwTvvmw6xN3l4Lu^3GWs%?omyj1=GeifT`mXxC}VM#YGMU7 zY`*cM1w44YLS#2y9b&Lm7!N1{q8J9}!dJ9Y-8s;e zE|#~1$Um~Wqp76@j7|ka$^6KClMi$&Kb-SIf&}1`TD}auH5;eyH672;nUR4DNZ7SH zFdn?jt;z$puU_UJ_{;2M7ZV3>{v|G)I(4$q8$_EW&)Jg@=O1SY7xpGi2pw~Y7g?I{ zRc_n^+<>9a_g0Qcy63V7=9g-x9NrjF>es{u$i7t>MPIbh(s)#AJ|IQJZe*;IW*=be zV^lcF)JtkPOC!ZRQS1!Tmb6D|%t4KDqfVt$3p?LKDEmvTytOh{56Up{nIe42oJacY zp4aq%xc}Zz4z$$Pjg1aYvM+l&`qW3Lm!bqEu1TR5Id+=lG&0=^c>Hgk%7&!{Sg2Q14<=D;0>i*{9rS>x|`AKR@|? z6Q1eXUI==5Y=VtR!g|HKfRK1V0KvU?<%zM3aE8WATYDy(k{$2 z`sRms>cE-n0?%=X#IC)>X;n=gZO0L((}`hr%aI@v>`u$c+#Q~YjkH9GWpCv(TL3g8 z^V6)00o^;p7v^}2xrbuPg$8*mcIWAdiF{VI-wi|(s`t``T0900Zjki$SfLcT^s%mP zbO(RNsEW>@JFqX_i5ZRL*^+HreR7v>Da8X+ud`*h?nJRB)**Xw7HHDcVV`#|TA?}9 zzp>fEH#8ru4luOG^6;y$DMH0^l@p7iUFl2jgAFxuqIQTgMRBaTgG6M1zBo-2v}Vsy za?0g^)XCw%+{4$Su3n$1mmURA2iUxcyMF`5Qoz#&_kOPcub^86VuQtqA@mgT3|y&c zl=^Pnq1@)cBr0UgyBkHN);0;e4pPstT*S-GM_0?L6IsV{r3I@1tufY04D!bbI^qOr z)8S&}YYsc*385TjDlK~Y2-euE1!{HOG8^6p2W_RaVP^tO+_c$Y@_B0Ca{bnyG44ue zBn>Jeu0HqSxskSG7{D_)<2vvhn|`fepIeLk4qJd@?(o`a{Xu6pL)ooC4ExTM>M8o6 zqS2%8Da+7t*pr9$obE0Q{%poCq11-aeD?eUuX;F!e@G`QUw4>xJu90f(iGm<8?}Ln z1BjkxVb15u^nEjaM!Opr=SQ$4*-Fn13G}hv?n#$9d(5TCC zdh{fP2$%_zrQ>16IH#qgtNPl2GQ z!j~wQ!Np3eN5Z=FTX5fD0wG%!`%B!%0oO8)DQBH!SSzQZe}8B{a8m0$U3RFkGxVFE zgq#iUK~~LO-yjkO_^7U>$0!ZlFC&>$jhEKrE3}GxNQ;~c%(Gzu-Xy$JP8-Kn;rg&o zLev-jrJghds+qBc|E zsK7Jiz=-tN>4M7EiBShJ59KRqj?tV=2lkQIUud*Py7%2GTt`|Pzpx9kS(s$vI5D-F zDn9AQ?754wkRNYx+Fg(kn1+GK5Sv-&kmN1^`<{DtiPX$7g2<6S-d5}sIH=fghHlc^ z@?Y0*cu9Qcz$Ug!;6|8WuAAOF%MBs&ui5hTdEcFl)VoMh!29SYlROITy92@)7QQ!M z_~&a-;_VK0YO6}-!_q{s)pym*&hC(9qEAf8W+J}fF&E+ z3QZKwt3jOft6=rsYjK;b%YKsQN5I(1AHyOJW=zf}O2nMvj2UiLi&F0F1ly_OGpTFS za%th0a)>X*^1%SsPqNx>{UK<#*eZ($ne0jXAihv>Yrb$HYrfqgH7<`SHanMOqtq^Y zTB&Hhp__Bkj6_@wj$3a|9gjqgmS9P9%Nt7bBpvghvPp1VD~NxbzGStEs^2iYYxr> zNM%XI`m*L@ivo$H1^$eN#HL#3Au;909tBGbJJw4q+bSFNkv(rXj12+!oJD{4MNm{c z%!uP!T8}2Szxmj5m5|Cf7;0ZztCo|lZL@xZ!YMZw+fecC`>-rg&=lcXpHZMzbziiw zd@pwBX-l4t1JiRa{Vv-rl|liZvt^i7@%=;t{uLM7mI4RKi)OVtX9cN5E=Ca82dLE@ z*GxoF=jwLEN&>{khK;+tB~X@5Qm?-e^Z@l!{ymjh_p;PMfdZ^T%S`HHDx?<)aOB-? z?Cs{DJ!NUSxy~5z9B3Xbw>2LXNW+CCxu42N#dEErHLp)pP0CtwSEV^)6MaDOgySod z+)gm+t2mPdtEFRjnk)@=@?uzZPe=MP-FP-!S;u86U(XY@h3-w;Xf!Ezc9Tf14~Vs! z!nW?NMdtr3Z&(sl*OO|P(UH6DYBpple|)L!biXHTdy_JVYdr$g9f(mTHOvOuAA6n| zpHTIkG$p0NL^$)5#veOI)yADpRFThtQ3e8X=q8Yl+OrR2bM4hVd~idnr|eZ8I3&Zo zO_ICEVe4w5K$ zY7Zg@UG;UPgo!`fv|9TTVXh|p3DTcWrHMJXb@tVqPPb^(G%lEVyfgj|-C*gQs|W9=MWuj!JcIJG!<8W@;m&=CmW z&ps|XyesowWQ^#2Pe?pE37;hdF!V7j0Z9{RxXM`1JF|7`x7vcfkwEZ!Yn8_S+&2MI zY}GAduf<6+d)16U@8ia854me`$lk62;@2^qc3@GU98L!~V>3v&T}M{d0YH_Hy;nQj zdN8RZ*)5=Ne5HDJbMM>@J2xEU8SnB;*kmPF?DN<=4pDwguStz+s2iJQ;n%E@%w6!I zRSBarDOv|dDADFxiFjQ4!r`zlYDMBb6q2CgDI|91p?JUF{(u`kjx26bv zy6`^mp0YAy8@|lKm=N7_I|EgZNMnUZ?eGY@JeSu%obRF*%oh=nO+R!XaDOZet?d+N zisnf948~#+wSp=F9`-GhO+|;NCY!sf6$T`{N79Krv~e8fo80c$3(|=kO_ajbg^-d& znY_Emp$@UmAgJozed`w`x;&UI31f5WM{2pdvi^2`4r26JW7#!a?xIW5~d@oZcqaONA>aGFi@u8gi23)p{o zmPETBz}HC@-&>|!pB`{mtyV@s-zxqU-ob554{tZ+H<^A&6L_5Y~&+v1zEk#@G z68*MEcQ>j;z#s=Q(Xa>kYG<=KuKP<_kpecZdelSF!FAyZN*I8FlnN4pfPi#|bR!`( zgh+RHtAL8MfYi_+H8cYZDbfwn-96M$L)|mq_rBk{Yu)?v<#oZ#Is2Tm_p_hp*>gpw zuyn){%0`($7Omd%QLeT{_ojL}6Vg1@ZEL90u`Szs@cBe=@6?5I(9SMjJz$)|bQmaZ z9OPPvDc!V2N)6i!=2D0PPEUo!=$|b@S7A5;w32f+0*k&||NUab;SV+6G$5$hp4>^u zhulnGoRDNb}3kf>Wh6kJ=*$vXTm&r!rg3!K)+a7skqrne=EI3+Cx~R*`lwuyk6CJ zspp1tpsH^K(fXVWs@drz!&tlAE^`bIuZNOwvceRx0JE`2wB6K^II+ehAZKFx!67;~ zwOwaw-_A+4^KsPAcjqBdtB-}mXE9E9EbJ>lIX#~H&TR7%AeX_RZIKVeo~PpwQVG25 zP2zE5e?9dzYT&IRFf*jVh{G0_UA_ch>=ojEO1i5llksYr^(PwbV2fJ2u8-?jjpo}j zzN_*cmKJTZj&$w22pWPZbw}Rvc5yF3K6GPk2_cMw>x7nV*&vP`yhEbi9%40BJlp z(?Tm9&MqYXyaon+GE=SUn@U7zQ5869JEh~Nz5Zu^Nv+ZeN1bve_8QpfcLNJhYV_Ii zPBV3Rc~#02f57UN5wJd4^r8EBXzhRD_zpmV#LlH#t%n>f z;}Q5@otb+Ud21F&qhh9p^s~b#{u1zY z*mBVmxNS~MWgGZ(TGI#ICGG3(&eki}dI2{lmK>frakUz8xE;AkJ}T^eRHc|nH1_=o zz{1YEv%C33NGrC-8Tx#uyQsep;BBI#^8PqyFtN+d?_)IL}-PLHg`4@#qPG`r?)Z z5qj^F>YxqQa87?&#jFUP42}fm^(FiN72Sa6eTs_7RBN=eCHe$R6%;K<>4De!ASp=r zFS!9@nG7WQ$JY%5GrbgggDFPRd6~YIznWJT`-Ry6<38GEqWnA`dFYxcR_XN|cm)u{K%>2VUyRO1Il@6WRpgK5jw-K+9fIJuu_zi%U<*Rf3{wtLAeG(=N%-wOUw>?SG zC2bc$3im$s0_8_aPXc%M-aT7?+zc#R-_+RRw{mv%Ba{3?MG!9MWYcZEx}CRI+vDsL z6_%eswO|0)YP;(y{Sj5VK-}Vs(N;HW}J+M)SuJBe;QF1y`Xkeu~?pw|; z!kq6V1GQ&Uv+=pH}*wt@>t7UhjUquPCFLoDkcmK zxd01mrdZ~N4`g3<0Yz(F0w3855pXIh*yUGJfck{jWCyR8V|n()CapeQ*4ZCSzj^}I3C}+|3Au@5;2IK=AsH*Bf{-DHkk-Ny0!k) z_`@~7$iub1P$XT$780?HL~zOm1LZOD9yt&WzB+K3^*KnrzF??Uo`Laz&RxVt`C4sG z<<)M83{axWVM?dqOY6NCCz&-Xr3}!>Z;zQ3R=szR_?3kvwEG|OFjMG}@j8}SG;#dQ zSTjlGch*%-5n^2HONn9D@{2mB@_74=goQ-0Ja`Od2tW3!HJqt0U^K9=Ss{_}dS9O6 zlt{1bo*A5fI=M04AR&_nX>2}1a^2S`l5Y)A7udQy7&lgt1#_tAmV{4akGCgYK#tFC z4=>-iqpeCjn=d@E@P2BA|&Jp>xR<1*5&DhA4Lpk+-IYey@5BGPS9j)n>KHtqBdUYOe{4S4p84tG0-(1%px<4(|NCkYtB3$FrP!K|hLgJ-+@*s12>!bG_ zcw9*8%>zIo`;Bn_vtEe%cC_HG;-mfj0crr1M*>Hy1aGYWkPM$DZ7ufy`rc>ZHtV=P z8+Y~EZ}IdtzyDP-usxO@%2Hw`NA}J0*`EM?)n2ak;0=eE;G_DSm{DBr%L?dL{-q8} z;<5W2RQLcuB{_MG?sbPyU={vMGpRyvcN;z@Kk2fe=Qg{s&qcB9%nJPLD^MGvVn_sA z=t@0Tb!u?~S`DSQ(s+HPpqAU9j;!AmY5ydWjut6gngwuwp>B=1GU$Mk zbDFr^8ex8y$K*ca8b9|3s4iJ<;-L<=YB3b_ok@56^>Uf8CQpiBnhjEG8P z*}Z^Sv*`Bbg-fGg#R3lcxomFMF(qv(hi$OXk&D5TzLy6QqfNG6~R zpd%%4N)>hQ+YcC=t@mC|NfGpc_QJ4^W38{)ULu`uIBEupIVZEc9RB(Rv%s&1rJvU* z#hd9^ka#BOQfaX{2b=Mh?V=1xlagrR%G)rNSi>S^|=z9nk=> zyr3kr6Qp~V@wpo%cW!?&i^{U+koa8BqV&b4+ zQC?qBN$vzB%j8P491wj}*k+8!t38nCHI%*2sA5dOsFL>z;DM{sO&E&8&~_} zx7U<{fFW4b!o=Q|2KF0IN%kftF_*+SMtZ6k*iKDm_5(h3H!Z)jw96|O%&-zZuoA0& zV$XTNKW|^HJ>|^**8)UO?Y~|m-$i>rXTgwUzQ*tU0drVPc9PRt(*5)ZIm|}=vBxMV z%kkDn;@9U_ouB&N{C4u}xP_wrb#;mMNLHmPBfJ`W`{C?v+rL}@K>p0OMI0LmD^ve9 zCUo@%l5G}w@2?nZ>Ia$${C7b1fsOG7F~G)nv-CX^9)5&D64uFWiNTMb>w;a#e4)|^ zoJ|0-d@Y#e_{l4rlP*P}0=riLS4p+bc8er|(>kYJdFy=z_Y2GP-$B`8&Ejf6_2<+= zz=@>wl@NXUO%rNgvR-h~_L3tU?JeEnl9!P&5-(VhNkFuf=-vJ3`zc5Z@pY_1W>X5U%9~cRCi14bJkf}3}Oyf7uit`Lo2cLq^)l$$5!!hqs zdmTykpP@+0V9slX%8(3VT}N#W%1;ZXR3WkyQbi_P(yz%LY9qX2X<2@F>ohusPNz^y z+zb1W4x%apT+YfBkIft0Fhqp-SPrV(t0|Rhf_1$QF9wo@ascAj6%sew; z0XJ}UuX@Ppf85Cbcs>*Z_EDG>WUMvb>a!4J{obJiW z0*qV9(@VI;h;$;4HRXOOnJdODLN1C1$8tNPnu0lwE#{4V9g_W{t>A3bU-wz^)~G>l zFjWsj)01qx`P07;0Nddsv85ug{iQa(23o%Oe`DXLP83Dr?04GplOU@dj@E3FbMVC3& z2f8EKGH_PPo8u*(Nf=rF;1e=o+5j zVFoX~dG#N#)i+$Fzby|kb@>F#WusrAP!5uEFy*QN)O?V`vTg;(2Na6X)Tuwu!W z_!tf{)vdzZWalqjlG!Z*Wqi=MT>-dJP5OWA0RYPf-T%t5*T5=cSW+0VV@`YTJh%z} z{qA2ew-5U$|AM((8vlZQ;AdP9&{LPQnj39N!|0Acm*xSST+r*sQk})g0lJBIWrf3( zR)lhm5a7qpsVMnB@CB0^dg;uPas634{l}lfR$Jv$uNMh-E4-1`IWMUYFjd}6x{}zA!fi8Nl##^V}E)%rM7^(05 zq{GH9$xQIR5h#YqGcww^tAH5H24g8kO?>(vG0oQuN*1d~olm_{tY(3`FS_UA&UgKv z54F_H&Vllr1?NC`rSiH{J-2(<^U$+s7-*IGL8I0(g=NZJE|JSJgGUA7f*}iGI$_-o z<$!+DJ!fDW&-hvynB=O1?RKm!QS3ejF=X5LIsEnymdlI_U^~MaGw7TM-|>(EnZ@eq zPr7n&R(}$YgVY+exsgB*Q8%}~V|xc2^h+Yc{Q zfD2QMm`)zfB8D1Lb}L2`YjxE5p2UI@K?ll)yu9F3N-1KjV4TI7B?EGNm^M-^54^=3 zb|2SjdbKeo4N(~8l$LqX z#8a|cB)=NVWwz0fN(bTfPH`HJbuU!`ME}3Wa@-J*K2;qZ=u=%_4CT1<-|-~h3xu9( z8=D1+y}FB9AC|n(i+5{LbUI9(`1xsYf4R9D~ zfOFyAPydQOb11iQ?RP5PHe;bdkyIT#E})?;v+4YhdLlpgq1E)y%YOd#AasyM<4-SU z%^~~qr;^wjukUba{nC~LzJ&2N%FTh@_Wf?K62_jcWDpwgDZ?X1#_&uHS&!;K^*(4U zK6~|ylB(PsWV!ck1Gx*dx&r)s2}0P-XR$kvERYsNQCHxK{Wr;RWCU@>8XqfHgGm>m zvKm;K?^AT^>b;6CM30d-<@1@WQKvjcw>0ic;@!Tp%6{X{OYD#MO#Hm6YP@L=A4_V` zWr{tbTT_goqm>gQj{N@ZLyi>gv)C-YJDClY)&_^~v}U^C`F5wNm3n>X%~|K;vz;w( z{mNbbHLB~EKi|fI>{%gSROj8*Wed?n+lcKiSHVpaxVb=9C!Szc$m6!SqA5u@)rsvV zyyJMF&B5JSf_m@9cB-3y<_=F{l12e_`V#4?ek;>{Ibi{+vJ#B>ry?Spmc!zNB# zx~F&&PMkKd-BH28Xoz*ESLl%)F+K_>Pj)b)?EN6*Hp4`zFG6UFz1M!ajZrDv`eeQ! z;L(HarC4SPSn_L%^P}{Z?8VI3GGF3tA%@+%6Hd%^HVqd;p$Er_BNME4@skFVIcQOd zg`eNEhyUagp#G+l~}`)MJK#GZ|X3bbbnMWN8Zc?o-*{1C;J-S2zUTBj;)= zW?=bi*~~h~GN2=4+^+INjY#O{xseaMZHBQ|}ICnZOU&zbPcpa?(H-L!HuMLIK37*;uGf{Cgz z;!@vak2blNQ%|O!#P5T70|j^9T2xt?ed*o-i<5G6tB&6)tLLW(_f*!Y;G_<}84tep zyLm7}a_H9^{j;5(1RLsbFXzG^j8Cq1aWzw@9KlQMfY~`}d`VWYDo&ipl60^tPw2xU zjY-xg&-f?MNRmcBv;JN{Izh0kUeRudg_>n6%S9V$R}A z@Ug??bJt4+Yg>A2EA74M-Il_xBOM>n%LP8Ma~PFOAi&1gRul859ayR?`_^muOhA5% zXXh0;^BZ7dUxSIIqby!`{I?cvGEjX{g*&}4|66)^X`($EudYiB)8+lsbZS4m`z_pX zdci4Y`M1elPd{v;Tx<7p`;y>1fGJJ>?4>_wWjC9?f=&+s_W zA7J2T7v&NeQ%^SY_w<1MROQ%e)FOPA;*e>Iq*rY@<`Z7|ipfAEZ`OhFJDRST5w_85 z%_EyVf~~R6V1lg{Cf4&l%gLSzr!jqZ8R2)mb3lq|dWo*|TU~I`JqC7Q%j@h5dR^T3 zeBuG(<+MW`bWdN3rB8UujWos_Z_mB}9{@h>J$E@_003v3ga=?Pl;-C|FP;O$|0^+z zS2=Or27N)0j1ai~7{F3Ge$0l*+y#r_;+3Em2SNaf1HTO(A(;c0!yLb1yJ6OMYQq9_ z$;QV$Z>McOL2jibzrVq&ZI+ z;2b~)$bqgJ(mK3ShjXf|}@umZ}3yqWwG;Qw$Qbi5J=(Y*S=F(v>Sa`9&=b_FnSfU!W9 zg{n@;%c|JWK|Vh~z6%Z-|3Tl2DFFsuEH{O{2uq-NPk&hka&76+&spaH);E}P@#Cb* zjzBQSWQBVB#!Wqsy`_k3iE}sA#|%eL7anA`b-dHkf9ys!8Cy3i+XbB8p~E0#@LEng z2bDhm)#~`Zk!z{-8r~3f?}c+Ky>bHm$;_HRuDA9Z#|PLv;F+;$$$C)63?427O|Rnr z%)|$hy1J=AY90iEasB(bD zc~%+gDjIJbcg5r=6ItURU>Cl>P&fUSSRkdkcGdfBzZOr&fSHwRSJoH!*HNEdD7;DC zBRpFvRY0n(j-M%?^{;a10_F~%t|^Y|W^L4CMWEC=9XGPo%1jvhIVti}044?fh1S+! zyR$xlgcCE;=K(Hs(MvIK?+!oDMrYj|1r9? z)j=q5ABt;?V&2G|i~A{Z;0!2z%z6UBzehQ4NWyk7m+0BL312~P-KFy(ls4b7YThhb zVP5%omsR#Zp&!>dlCN<9xlD2G?E&4$Z{5X_KvbVdu?DNhQ`}*Lrr3I!XVwjXrjo=x zDt_IrSZ_KLLk1P-k9JCyUH6&Vu%wklYuIhi#$6oiIRn~y@5^1G9ShwEXg$VGaBzCF zM#i1oqe8sq%eOjow8RQi1V(ad*%db%N@CIx*?hAF`iTU-HEREyNLWu9)_~qD*##3? zrk{iBjX=jN+zk^8?S&6G|2gf?-?M=GYqSe*a2#_EgbyTtu!DmU{vdM7mNspTK=f)n z(+{UeEt@j3^b!3FA4)Gun*x&Fi#FT2eR9hYfjzg8uXkM5@B{(os?oAgLDt~X%G7Vp zl9=7WfJc-_5(l#wjV;r(2U^Z(Ew2*}mYy;q;a9XW5w|DiO8qYttc2e6?$#Dfi9~j@ zB3>GABn4@%I?o?-EoEBaX=0xJ0-ke*j}E^b85IYgV?6^L)|) z-rW~3+Q?<~V^PP_=XB2~Vr?*UjyyQi{JH%^EsM!7Fl{)93y>jpYh^V*ZZR$L2H)e* zo1x+exIt=hF>Ky*Y-9XPPS6)!p^^WVwGg6Nq{Cokt4ejmjnBlQYKYs_YkCPIX0YH|9<=DBH-IEEy)-6f$-kmzTiMyvx%iA$TYKHGZJiJ z*dIvV8KvC@HZ6VEcjDyX*B9mHu9fz~hk6VuDNzI3-*N9&cDWo+->W&Qni7HoJ8SWL zjr{v=J-RRT%3Okg^Qo$fRFVDGz`l-Wt!q-rxdJMvHltd##p?~+YcykvvtNXP+j=DK zDL-p3$s)!=4qcHKY_vOFF1j4IA=M05?rOo0J0UrOzRi{NLvfq{^<~GKBE* zN}B0ojz%v*R01YBkW{QKD{KnEJ671rg`Bo(t?cJ>ydQ}C+%9a4*6?)xE8FPkedGs} zg!_g?iR(A&uN_d&ro5-N3;PZHHl#egdpApd|E1Ai`jJ!SGoiL|GFjkvqEU4G8TI7# z%fg0eN7a(pS&Dw8pgWz_`!U$Cx)GLXYms-od)r&?b92=3hJKy{mgKgUFo?ib+){cX z6j8*yp;fF)zBd%o9UEiUe}bLb=a(c-iR|6MdbOw7fUb?QBQ0gjXE(DxV(uyk@O=6knp zuj|x$<-qI}sLpaMj98+S;4FTN`(437Z3IbGvdC#I29BqVrG4Omr!3H5pZ})MQc`Id zf4RLX5h4<6#u)bd+j$Dht{gDdGac(A%92RD=J`{5bnsEiK4?TMgAKi!RGj^0W)dr8 zWV~JJ=d#Y(lfn%vx=!vnOYNsbm&y*+`(M%3_LUF?rQhe zr<SjkPdYtn@JkhPx+cd+GL==ESGA-OH9<;NHqYut`p83*Vil1&7~p zh{fk7zFS^=~e{x1IjnsaB?u7wLOuum>FVx-n_^KpNH? zIQ>bz3cP%@JZ86U3-+ZQRsY7B(`S7PtDk|FLd?0?JZ=o8t_7=J{Pxp0?dB~o{WE={ zZ-Um10h41pxg|iju+hSjjS*y(Cx8#|l;4TWugYYfhMX(Su&N)m(n8oTw5CJgJ19je z^@#9f{Ox?>^<}a75#mY$Ba@z{0l$tV!3=5!UOFE2n1)x(;0I-S)URW1p`OkIbEfm7 z(KI7t&jy*}k128zxI5DDwGmq{->Lbr5c%w`h!-@U(~K)D;%HKz3R!rB!JhjXYu7HA zH>U8@9&L@p`CsozG+mLt@>BRm%(!`W0i5g9@;>fc9Y`s}$$_R)jNQT$G4O~=_RI2E zfu}I4j0neIW6QfAl(aOM*W+ot0Tb`E!>6@b2fv@Xr9OG6ofzNl{c|M35Tte_f0@VQ z=5*&r#ohUKfS~zK?NQb7gDssU;G27J!He)1{Ouqv;2#EkFjHq*@ax-j!%bW7Yhm(% zM*D%ai4#f|KDf<<{N`u@m6NHLut@T*6X6FVCIGgnfw*Q>5*iK%H-@vK&$JCG_<>s|v7GFT4D=YhGe>ESC~IPa{3J$Wa(Fkubp$K%eDIZL+c!+B>O6q zgf)`-6RydV0*yR8lkLxPSnZ-9{Z`A3+mHBwd8THIQMZ|o--^OK-9NNdp}eZlU~toN zD4Pfob3Y>xg%=Hg| zj3^we68Kdp^Z7a{eZax^BKDKdq7SS~;`6wfE=72}wmrV}epPyq34n-);Eyn1rBDN_ z=k$qVMicA>P|gs_e6`+Y+S_2j%k~^5ZrQte8F0V9rZ0t4VJL*)=m|~o`zqDRkc)}lC?5iv?#pt z1%15(#vxp^v`M+Ka9Ee}Kj>XgUU>^KH}6ShkT9g$CWS&7g`JP`#D%jGS!E8qnS)=^?6a{gBq= z;#v;%F1jWdxbN@@JW9MK3H+0oa_(8VbkF`nGIpO|Jqg0h?@tiNl&>!ouSs@JbdOVl zeut?x;tB7)w^Vs^kRC!+^(UbZO|z3c!70_qJgtW^le3)E`!?rQU@i}E-WVTrCj)dQ z+3&jFf4Pgfn0Tgs?He=fhjf_wa()5qj{9a+t+G;;t+!#^E8XX4adA4^(djd%DO#rq`fouyrWkL?+W{sYp}EJYdku2l`0{X0?a-yxAr_4=vT-z4K~#fgxI593Jm&QE$9uyy>abt}#EoA!^q z>igkEe%|$ZfLEaHh$jFM8t<*;uD4UQ*Bt)13v3-s20woPqRDUFd^WKr5NdZxX+2Ce zg_3Cl%s`pHu>uuV!_o)KM%)UL4th3%;CodmkYgbv!5%0)?B!~C`0Jm%!^5_uv0!ed zOsCd$gLsrIopopi%JdHQiYT6$4b-suzq)DB7vLd?UyzuKEN;9~yFtxIwB@emaWrM*dU1jjNj{WBe30MDe0hZb`6GwkKJ`!Sk zG=Qn;|MxW1frHSl5a-7NQW216JtvF7!G1g*{m#O#O!sk%Ej(J^Vzj6U? z-A7V`-YjV88vx+GILz!4m;$bRZD)RRT;q7C*$GH%<4Z3V#dop4op>-RCCbIy%{+SW zu?Na8AstSJOK{J#(HEQLP4o|At-fvt=|h5roLJxk70#L z0`OVmv*;*=O%JfN8v12NFt+Mvik^xjZ^$7kYf0HGKkj z9G&4iQznDIamtQnSaMp^nA>E+$k)bMT9rO!fI&tj^Pwz;+JomN+Lj}ygZPuaw%dxSy%%MxqJ60SgSJe1hT3KYKKaJPHCvb5#(TMk}w2o>T+k2mJ<+1kTqjEz`BY< zysauJLMvw{oiPl6Ve-i7;LrZ`<@vIlO4P>H!x!0ZGqn?a#v|}ts#l(d0}~rhP{4Ek zfcV~_6{taPT^PN&JJxt{5SlFDdEoQvI&`f%S;+DC;ms~USXl;lTimw@%>!aFwtBAf zZ)0NSbl!O6yB~{57|-jE^HW3pRQ=5lHEY&0LKS^zK5SV{o$t!!nhWCYWVxky4e@aI zqRL0ha8rPm@7@!1t>L{|*AzP=^S`)6+ianvEWqBO6 zqBd#fvR3pZaXkrmUx4lMw1n zb$oQLk?HE*dysh#F`99P0cE67J;C=XUv*IB{(nlA&_?v z_NJoyXQQ1+HAsxU#?g@;>;{b>2$D@!v z0t%kEI}svGC%}lPiKQs>t8eF`6o96#<##FB8t9HTd0%fk{TC$ro7OU;@hkEI-OXks zy7q`(X}|7m)JR3sWX?MUOke9}c;>Z!7^R@{9g=%>0#_ivj|Kv3=7=kxE|~%i${hjwbQ?5<$<+dyHxo zzt$5vseC=e0H2*@w*m2Dao0g7aNRvrS-%$7hY?I1DEEf1ha2o$q2U)G;g)Tck6ng( zBMH2-UF#Naj}|x7<P(3fBREQcL~Fu7I$!o?+Uat?uiQ-+8LTBd5UDh-X?vFj0(N8M-+^(+9UC zOac<93hknxDPAx=29NU9j{7wLSys}AvhPW3j)6-$%mLz*0>8o^2ejK35!Z7Bz)elh zUk!{d*G`G;YM~{Q1uq>P2b!+1w7gZ4Ih2ryeh-C93#S%gd^!k->oU%!Q+zTI@~fQ= z2(fB&W_SPeTrQixe01>*WO;0|6qR6vWtVfx#kIlMhm$Mcb0L3HyYZ5~y>pe4z$3)x zt9t*-qqg=Hi|g9Vu?Amni8gzeK$7UA7aJd1?PKda54+xJt^8mvt{YN+JL>@*bKof9kV6n7?gw&(Q;QqdpF4(zO2{ny)78vwV9-;aW?ezQxqOkhCk!7 zvMLm%7|qvb$avR*8A3toEo47#o<>DuEKkf!8z~8r4!J+(7PmfPI!5~r*>4wq z2eU&(T;)fL4H>|`<*p&RdU@v-`Dr#Mg~qzF`B{&W8J}9@X2NDt;{gpoqHiXj^v{8> zOk@0pqxcH5#`pE{rv!jGO&Jv+@LlJeg}zi&SdxO{QYSRVOx3Hz2JCTy{3^q=1c zhO)hmV%Z8)`1h#L!N9&wu+U?UEROXtlL*DZWS%;GXEnP$T0fU9a&z!yR2A^&en;qw z&Rer@*Wt{?WBIn#ojPpZcUz~2kXYi($&}z6K3OUiMIr5F6GyU>>T$s6xtT}dw5IPa zCGd0%GuZ-$$?Ll+I5)_^S!&1c*g3|0mI$E-rZ(6LbGkG)(e)${2KLz^2x?QazBW6- zZtbycGh5s1zPYQuxDz__zGNpL8Oz%@8L^q!tg>99SB}HR`3^VbWDYL$s}R#ZDwQG7 z!r!Vgtu-xuoCFIe<29=Q&5MRPw`-DdP%Z;QJO%+_0dm+y}euBPhEzQ)O z4a_vkVkp~ZPxtNkjR;8b(Ube$GN^z6DUgmq=pK9y42ZzIGPmJ(d;nqzr8W6&8yNm& zzy3O#)Y~K6V&ArQr^>GJnZod4PzaIl%%16dq2E!E63e}<(b1?XF*Y~5X%t8wVxSJ$Y8z)PsJzpHm8u!fL7C+zpo6l5sd(3$kgsZt&0JubS4I|K7O zmKk~aQ#hyonlld^yXwBSiyM*!vm6d>^mK`5%UmHH6}EH74SLQv3w0Vg`VgdKNGaeFf`xN zHsSB&!c6n!WOCOFdIbf4sET!2k`-!ju^`|ks**~?9|{5~lKxMZoNdo$tFf)f$$guUoV14cB^N)t%P5c03P-v|!UvGXL`(QK!W}`-l5J*O%=3 z7tY&1b3JmIP1o48w>Mhkch(1YCAG?dgpSi|^819-LJ+Agqe-JSYMNKL*cQ<6a|*jg zY)f1PZ8CG%nCb>D{n_wLDy#A=p(E`JRhUKt=*%h%I>#P8<@F7=yJ#29df)tUqK}0T zE}FT8A~>nEFOD)*i1(Ov>Q4o}EsW=pe_;@ngVkQ^@k){6o99QdJ5v&*y~bZ{e`hf| z`G1WL)?d%xN1=>z8u;b~BVp^KRJ3fh>lxZP&Gq=&^59Hl550MfpA2YRJuUOnftKp@ z-Q(<;w25d;oBDWa7vC$wPt^OD3uBA*sO|KtF+FD`Nn>2ZX}Ls5mZf}4K)lJb5MpEi zdgd@HME+bK*1r6mf|GUH?vryr=>PUjxmox_Byn6f|H=UlPyh0NDy!w& zgGE!t)faaF754;aT%2V?GmwyWZ32?@F6e*~ue(;%VsDB^|Z#H<_vAK=d4fmMc zZrI2XkMr!Kfm#GSaS{A&WPgv{*R>orPIl4ux}UteIdRniu$F?SXxap1W*%MzLF~I%ciV&JJywImEDlqp=Y8WWM z;VDlf!HTc1lx(X@xco)Fd&K~t{R4a5-L0^2rJ??RD2GXpz0}ZOms_!gNq*TPxVmj0 z!y2D~*|Uwptq|YaS3t}7!=Lz-#pFpH;GD#QSsTGe;`UJ)QrXhIlLy+dOwpp(M}GV> z$5-A%8x!HdHFiT42`)&hzC)C5s+P&hf$2os;I1>+^6zMO0zrO;6;XOzspnrXS2xiu z8W1f{YslTl--7rdh>`BV=hG1D7eb!-@V1m!ZfjZ`E2)l7yhFGA1^%J*RQ{(%O`LyR z6xINI!F)1f_&Ys;&|lZQ|B8CoZi2?E4~=14`e!!B)^ar1)#Iww92jTV{|=dQ5!{Vs zj_>~X{fxcX!ET_WRHIl&9ZdT>b_iJ$xMhRE@iaGRv`I0|3DvTMe2WKp@4C%`3m`}K zO)Jb=-2-DGWF(2^J&9ZV<~-WNGhSXN2cjW55zjXV%eDTJ5By%#GW4Uy%oo@muVN|u z@Scw?#|sbj4=UbB=3RFhZGko0r)1bLVw>DD^H?qwN4P+zk?Pi@CF29i+Ah7+Jg~Aj z(`ZPeppb4}8JA7+!=tLf0!>(?NHN)N++!_cnT2vkSK%oq%T^p^<`k-59Q!Luvpihpm9GrWhYcl%5 zxg^J5h4Or?ak(SPURZ{~)QpnEH%~R9{5HznH0>&1(_0)(Kt}waKdv_kGl9+TWc)IF zw($1Gp!y8L<<6S}&+(z60b3jh{&!3T+k+S||D<+X8ohPd@gzf>NDv-7qW z*_XcBzJ^by3u>~AR`UMa;L1sflv=&727Vw5tVnJI(&4XHwF-)kQ`cqyM#Qe{di)ig zVmx&w@c`8pO6c$|YxP^!hSLxIkYn_&B#&^?`Lut3rxdCFw&xqM>(b3*)K?dySMHz* z>1IBgP>Y(S3=}y^!vv9H2UPWZBG|Rz=+%n z>ZSGu#&YxC?;iNR{FSP}U<$Og9WRv~m#6GkDn;13fg3MvxT^htv4>aV5JRE2JHTi9 z-4yR`O**h{{>@G@e^Cu?1}k>&^Z!CP^k@PBEC&+mtWpE&%e#tr;~!F%fX zY3U8-lZqSTfTHjTVIj_ZQ0Q#5&)K23$k<3qZPzf|^q*q#{a&p=*@v@`7K1q@iR2lF@%biXE z>582?@p|jS^%aorg&moWXzS(z#0pHTeuEegw;rYs`#{<49tltb`7O7dtlnXko8gT{ z!z7(4=bOKB$t>1x?393F=K$dGZ_eGmIX4D=`r!0k0BiaS04qTmM)Ci0g#Vr6hIJQ& zK-GIS{M%R$K*nS|?}EiLnaySO9C#v_^NnDA3YLv+zqWT>4!H6)-H2}W8w=K*D7YNU zzcLx(bDdfS!$73;iAS;5s!ry*NgC}$3#aZ2R&F@*g62Xzpu|dXw}Q-LsN(;y_uY?B z$N%G8C$mCE3Y`_%*)qB`Wn^TJG@O~}WJJ2Flp-0~v$D#FE_){#*~%7?l^q#b-{LM48d%|U)_7KA^FisW^q>+?+>Orp`9 z{q9~(5pwpy@h_+PYd{cFnDww{ZT98Ibx;Tu6?XjZNyHIw zxMTA!I1NC(#WM3wF>-{m8Ym}*FAkXpabnKHMt9C248fy0bhpaCfoyj_7_%oQ9&YH9 z@s#1<@3VPF3H5cRRXZ61kbEc<<(2i(w^7Sy3;#|*$#Nk z-i2;mSsAI9pX<2M(@A&o{)Vb9!&4VPy9{NCs&T_ua(YpPT*+)YK>sNBFHPH5@vOoJ1ZuZpksm~p4 z*PNm1!Owa04>2SS`B@m>`p5el-114PTt6dsW8m zZ#Kib|G>0wgUstIn}ch4NZ-+CG5LR4lS^msf_gmNo0S3plQ4pNnP=|+0^qbm1_4hy|#8T#>`yT^2z4<_&h(SUP5(Lph!Lgt|9*{J7G%LO+BsmPIn zQW|hw%**qKX(FLg2ygv6nq?JW_9cJ1QKk$-(%DAS-@nt}Q8B!C7#q7jS!}khdhcFM z7KW+-XR{Ub_`(URu2i;ght5oqeJ9raK%IX|+Eb5tw*JMYvz{-#jLNU5-C*aQEy(z~ z1>Fk@6{I_+Z&tG>JZ88tKF6e2c7I;3`3@S?bL+j+Zp3UbfX+z$v^^d7<8nF2r@mW_ zu{`HgIa{x~=v-TP=2m5Ix>4PiQm@s1DYuXOt<#^Y?0^~%hBRJ0ZD#d+;R5?^^FOm} zW6qp~yRjYs(SiAr8%m^F_H}U51s(EjJ~WX~l^DaBzsL%20Ho2FwrjVj!pm|`?_QJ^ z=btp}4oz*ODY{c|2FJ4cbMR!j`d2VRHa^4h)Kj)nldO{8^^OULfyDdzPXCMZ(Bwcj zuhHeZw{PjH1(Z2gdVAiTOLR1v=%{h5b6}8nZxU5MEGKha`vL1OkjOmyy|+l#>agiU zu}cNnpr|nk7>v^)d^KB{9$Sl{52S9+aGK!m7*wpKtIQ`YPM>I*MXNlxQ=3TXv*j+< zg#wD4-Q4+Pt>>9r*my6CKRFkC@{>|-GpbZ6#nS4zQW_-T3MP; zW*D)IAp=q^0LWYWP7FMO475mt(Vrmj6ZIJOf8rEDd@JLH*SjB4+Wv6eykD)Ch@`A) zE?=ARbL`8ubdBlP(V~YYbA>C-7JWg#VPnWm9O>m2wK)5oQ%UubyZV-QL(J>zf4?5> z9`YzKS5!Z|ifX%tv!^IZc6&Zn%#roJ@u^}<$MVYNQU_eOsd(p^Z>{_pR{o9+kF z3L6!<3twM^^FrG630o)c+zEhUW!fwdL!5dlBdt!Nf5Z#`!?1P(13w1a zq{%^ZAcKeYEpQbH=>D#+17@K3okt5$cYV#hEyTdx<+!Q-V!@1|xaAMGtKT?nrE4|M zakZ4cOSlk|H_Z>yLiG05`|51d=ff}NhXgjez4xA~W0(VzC)-LL1QoOIm0vl4wUnXx zpi*ptexoz5>j`S&uDBOPn;6Ni<$170=VoV8ISsJ=VweMrqjpd}%|_vQ3?1w5SV69- zKG%H3m_O?+`(Uj=GXxq2uJu7Gv$5N~weP4*u~4@rD+!%`AlEpmFw`asI|g({Amwv2 zF}^vFuvcHsirdp{7-$p}b~v5)!K0EN6RJ8q)7{!}$)5hH!>sr1Hu4-cK1}uI%bVSk zSoGHYC2xDZ=YaO~`Q|aF-oJKL(;c^)fSp+9=Hr|u{ovZ|qNN@OforX1l=%+#h}GWZ zSB6Y%H^*P^OE&OF`CR+O@wMo8PufIOR}u4H5D;QG(aF1zncvSA1SWars4De6M`l3! zjrFIGJf!fhbyz6afbuuZk|y*UuYqd{FCU%-8zA6)iUNRYLib^ z*RKoadu-DHO!7K24Yt*11IcFY>UKkytu%}iuk5`9YTfXyX-4~e?xtOOyKWq*V!R-w zQsn_I&bbAyJuF-uHT(HQFd^iSQ%UQhRld_*qawFE9oYlKLho;@&5z1djipALrAypa zYFnkI{_s%xr@w@Ov((DhJawApPE+a3DRmquYn&bSa0=T5zJC2{*7{YIYX?7tSc`zb zRm}V9*PW9W=8-nW?x4n6M%(V-i>hZ$FXQ*Ao6O5N23SS&THM6gyG|#@49qzR~?QSH`ZV?`Ezo$d1ClCGao*7K5=Y_7=hB_(z`6x60s+sr{~XXZLa6<*sbw8djp1iyPJeotlBsF zg3z=KDfcGFs*jF|6<=N)6_F_Y{q|?`y}K=ixpv<^I;YRxdj2Bf2jAJKUqk)fUnEL} zZoV^_?%e8k-_kz<62a^iuC1%h_e&LCiM(q+7Z?szH`r!4-3~N^O&q{V0@?f+`$>Ri z1srcQ*x46oDWfE4J}uEMc7cK`?pJps5yF6zb#q)o&%l>vy2akyFcajDhRQTR1q?^v zH*m$&>gePSt=o$S&Au(6kmiZd8|Bjbxii7ej z>1G(IZ3lzl0DRS`le7=s00_yk?5N7;%ZTXd_qmLwS$%x*AAgI1dHqdOrNB5{zrQh8 z;bXVqN?)b!I8a!lKfZ9|`_{eKWt$>B^iSI@UZ-{6kB5Fhjjr!ao?Zzyr_U+6uvBw+ zOYHH!!+Q&#;o|pYnE5|Gt|VVld$nA+>nUpByX;c0Q~9Z<>=HBGqo(;TWpnrGM?U(C zhtP{Ng8GZgKNsJ$gRAaoSEpKd6kW&Lvth9@IXYk%pU-9wsn4c3A2e@5O`IQpNq6|e zSfg_X_PNc{7Yf`K9iHu3#k!t%U^ig@>o{>?(K)&|uIkv(LHH0p!Hz}@9_CGB{%gzPQ{NL$7eci zGfesW%iZ7Ew%!DVQz_3C-8W&)bo1Bq6>&Coem_2&Nec4jL3de#x|8llZRnCe$~Rx< zc~0h~848^{9)YxBcQ~P8l)-Ehm${s=v3H_P8{cE#8o?=JO< zu%#6Lexv>a$M0sZnPk}ufXt~fPi{iCC={9_m9Cc@>!&Kj? zgTW|6-1gaG)lf9`Yx)2p>)1Fw0!%rjDO6Jxq3M5uY^L7_?sju;QK*sibqz!!A^U~% z|2-;V#f}9L`QX)sfadcZinhU?zz5ZvONqG}-Ikib`C?Q{pJAA>?S&P=+NTrt z^{cMK`q90q6N=wW$7SvF``wB)V-%I942pw4huyxLt%e3|=J??xZd^-uL0DT9%y^;m zES=GoM0fOMyYtolVjK(l0H!sW1NmR$8>Y<#I>Wnv(4y{afdb)%P?Gcq)4?7AtV**? z?4n!%EFN?7!t+sj$L}wW(6g=x)_32^>U4VaNIKPZHKy&-dgq45mRIo;a z+i?5~bWkZ)F@|D4LA?da&8{mVUV8nfzU9k4f%J0YjZDy$e2`Jq`Z-9vO`HU z+tY;ER_Sqjr~aan->*k@sft`wUVTyW*R;GKK9qAR0A=BunVz%1HC)t^|H1A>dW9EG zKUD5C6r@>lRw^=aTxWoXo{$&chEkZf%gZ+J%KK zef;(Ht@zgF59%u0o5GjQf4&$g3c6Zt>K#E*Gy+Ie*(nYG%Xb1n>)5=vyeIJT%eU5m zy)UspRa06Z%l3FH@8ko4S^v?F#AV38-}G!&fjYatWabZupYi`L3N&%>#BgN)571Rt zw{)ZTXMbairecDrQJ?Bu#RI)!ZHuG&OFUHu4Q1#^74#!0STXn&x=#M6aZNgv(y%vB zA-6!G$!7=Ka-Urr`Gz2-)Rubl9#sDOHD{1hmP?+OF&CUTZK9yH8psukSZ{f)I5k|% z`$lt86c4)s9MZr9F%>>We+rT4YpuiIPt$n2jR(nS+1V;wijuNUCKN+BwX2zccB(~B zfw`qhrRAw*5uq7kx2^w`wyzVnW=lue?^MNQutfv+QeXgudIWBynfx&B(P^>u0!Br5 zesl6AFxjh7$K@Sh9h)cs?V90MAp_|}$Y!hl_W5?pM+BX}{EMdic_aIt|Lu3LC% z>DFx>B5fpI`0H&tzLembfuf4Y*eu8aT*PjCq?ZV=H3b)M#a!|#xz!^5G$!A}6a>#- zfG`mMtFpTis+$YVpm>t06;>RtvN?8d>qWf*cSOfU`DHJcQ_W1B3!nD)=KWf>#qj)8y($WUo+Wr(r@-D2p$g&c#NR|ZJDtDA+1DoVKG#Iy38bxK70rO=(*bw zkxghFj=r3Px7a%z7||!|^RJ0DskW;cxJ~y=$fsmk^}oLz^RlN+z~36VRTkg8STaue zz%n8v`GSCj!ou8@H%6(b3I*3*mw|6X2I(q1dk)I7iK(_Vuy++)DP8pDSu+_Zdp-VZ zbw*+_z*gjz#m2kpg&VWwf`{a@%Ceo+2@;s@{`Ll@Et1en|M48AM`|GJ>3Qj-U)=OMOIHp|J*^y&OxGGFsPQ-hw{ z{rdA}H=lXRzKsfPUv&i4UAQ9Idu8k37I)p&>38oWbSm`II>X+bIK0&@{ZqEnT*Px> zR2J>=SBkMEf_ZV|q0Tf^F6iw`hmWtYe*NC7z6GuW`rRMQ2L?h|dTy0wzV~cbdRY_- z-CWviSvmq*%3s+e+yk>H^}waeNAD~&^33KAQz`TwbNJ+F1Z1^aBd3cXXplxr{ozo$ z0+?X`*+wMEfWwf?l%~%x$G98AKMREx^Nrp?K5bY@=SV0hU1Gk)o$b`!KUvM*bWu~r;iBhw!HVSkrAm)|t~ zZs4moBBWq>7$7?7`LT>cAi|@#`8j|rRSQ5pHtx{-2F__n3Fg(=Ju-%?ueaO!Wc5kP)MPk&==e!3l)Cc|RE`T57t01IN_b+S^s69q^M} zQWTxTHEDsBlbu(QbFJq3%v6-rz3=AwyGq~a@VwE*g+r~Rz_0LDfk3vt&Un!f#!rO%8#-oSszCT+Sx7QoaZ)c%OQG-cUBH|QbrC#`C%gPHaEzkIl2-#+DC zG+?U1tT&Wf;31*ijd^e_9%>QacH)&*9?%9)PRA9T#T@1~hrWCYtL94=_xw)h_g!+a zlsshGq2UMXdewckaV& zie(k0>rW13LPoAV2TyzffN>zbNF3Qg&e}itn7T7LmbUji0PVAoGijt{fu2|?v#CsC z3>krtsd@%(OE#v6pmWZzh*v2V$h_eIE=g@m7U1Kn>PIwiQ~J|-ut(QijOG=%@Zy8< z!-o$@@I+V{P?pV_$Vz{*z739H*vvQ|b)=$qIz_h)jDI1VXYc|zjTq)6vm2BFt4hF3!p~sro{GKv0lJgl_faM&S`TNN{FAnH( zs_m2Xjb0iO&(G&++>p4T-|o0y2}@p?h96Y6p>J()^p^o(PRA(mJ_ltP=!aJ3WTYX2 zfQXUa2ZwdA4c78Ij6EKQIwkNB5ux`;XwInNpKwu^fiu#G0SWYh$w&ce;X1`!CqsG; z%TGJ^$SY)!gAFT5RYKo*=L?uy@v$WI3+VySJu5zFVcXQu;HpHua1P-JV0A^`f_i||PVG$bf=vU!~5A>ZJ}{bOS?kNG~!k}yaCo3gBy#vm8LL$C9PwPPUp zWAc%Po#e-5M*`vHFYwTrQIc`c>+o|b5)LrPB$#)n|D}LQrWJf7X-^i@AT@PoQuGrt zs9cx^YaSP5w;hG0kmcxIB1|=>6Po@VCRVV*U1eNiR9tJ|A|1@n0+DIq0NTOwQ~7+n{rW?>L9Pem{?cS^Aa@z5{OILH5(FtIsUc#O zmX_v{kxsdfT!*C6KqQkWZv~q@n>zdonS|s32|2cZV#=F@)3FM>`ZNs-Y6UY3TqtV7 z(Ej7shk}Ep=&-Eo9T89MK(#PPu#L`T7PO7R9ABaq;LKn=EMdW*>y+PbUAzT5zzxxW zJMQY*lw@lG1L*+HZz#_37s||Q_R0X<1NI311~H+^sZY?LTKp2mMpn)7{UIHBe^x2F zwHu2Hd8Oc8sxd$alFo)Or?eE>YxH!IQ4%QGhR`3vl(S-A71F;Lys>#tA`+w~tYn0H z|6x#a?Myjq(CC;D=j^i^s9M4*jyx61%4SJV5Mq#vHAKs+jHX)2sWiWMhb(3Kcq{fX zI@N&;>r)CPQ$>N$hFzpB_$6SL?MKBeiq3U*CwSwSF7u_|6}8m7G+V>idNmTd(J_kqDia~l#l-vgle%=n#j zsY5$)%Y3gt>i5ka(067y)6kP*?H54SEgIS4egIexhW6#I?K`>$C z`82M7LsZg>yeEx{HKllN|0G`&sJ1-~71+WV*j z@F4_P>D}qN5GkLb7YYGt5&ypC#yn@)*q5xqKG9c*i~A^O2p1(bZbqJ_an?5CeZfJ- znR6f+0S~+M8WlaQkIXR6eOEQJ=@ZW_{h+0VuK5`yK>9oe15oV$=`YVYzWE>oBbfIw z+O=NL`v?}>ZVjd(Ns*od_58E>{i{s8Po7xW!e6l*ICk}6Qfg{Kc?{o+R}X*6kxpJQ z3iDc;3jdIo+1z~pTZ7z1p&w?@_%MMsB;ymd!PuD{t z|0lSn5EbFwZnezE$eqx~=vOvtD?03sg|-2bB(U(75* zl2H=SwU-qbc>{H+efmB@M_LJ4@f7xbVERIYxESCLLx#{u#%zdg%~GBvHZl^{MEQY) zHEDCAl(X{Rfq09PRYX0&Xi#;j^(H^D=^_2ulyV^M?T6ZoYKYJxp{W|2=kDaZ!ISN| zrnjh=Bf;4u3k|D4GxynxQ)hyi(x1lToqb2pinwj?C&t~Rk6FDZ+5?jdrEU{#XO6qi zIQ|xLeT*pv!mC(mD=R^zH>GfsiGs}Tk}RN;(-cgDn_dk#%;#~Q(K#NotWk5k*ura{ z!p)F|e+-<1xfvWKhO=0gl>Acw<*!5};>;{C)40rdo*}X%xQ@I#hR zdFQu`DkOXOx1S*GG02#5_H~f!^|DTZj)c}e=)@|8s1!=L@wbN8|IEoju?l-sJT<(J z2bt5&<%tR>0Zg27AgtlISCci!A5}Q+2SY6sffzuYq#QWRu8{EMzB(^IiIx#3VHqfG zMwu#v6%&?;@9g&V9FhhKqV_@KY5&C17>QkuZERrQ3n`G}&G)Fs@imzLIPufb&00lM*ipiH6rG zgb4jgmi_2CDiIt1E)sQPS7v7@9LEud%T4X$WYlg}iMF^ebi6O>TBByHm-ct3nM{cC&op^+Vr~Q$jTS z=PS~>G%FQPt}u{|z|%ZdAD%`p_9E0D`jK`N$v?QpM|HCz2CgAbqj0(+ZP5JHb@ijVuUu~szdA0ynQG&}~C z8IEfM6wEXz;r8+jmip#d7CFpPHjq#N+`uEq;O;B5*2if+sUzLXE?m6GzL9^kowaG% zi>_H9Jh#-YFy~(C%-UweLBibwJR| zZEN=4n%u_BXSr@#4cc8ijF%vTR|`AQK&l0x0f~PhxdmIYoiC;n840-RywU}&MdI^T@ffzS<(cnd zKy&JZEEAC5r0_RF+}0xY0y_2%o4Xt>!N?FN#^M%dUBT# zq#IS8A0RuP{>&)cZ5G8@f2#a-*irq@xcgBH5vT0t6=5Ba!yISM_@tW3yVe+_wZi=F6K&P| z>3m17+dQ0S@&o=fdeNl8+8swX#-{8QI=5#u~Y$hZ5=#E6>2!*XUO_P5+2ZW z6_q|EIamOvgo^@~=l3dwqwEH1(Q6i_Pf`7YQoI=ESXhDE-XmcI+!iR^GN(i~xQjByl` zS};^;-bYdnB;Oq*4xrMw1+9tJWEhsNslGD2NBpi(B)EPBPx7C%5IQc34oJgeHP7M$ zN#`T$XbMVvTNWIy$By^gR^l23XOaG2wliGQRkz0BiqrqK z8iM34xo07>S(}beX1rS15YV^)%#}F$O0qP;3Q@hM`{Lz38xD+BUGu=U;NE|NhV%>T z0R;da3*Y4(6kxOu!^yK%+i-J{xFH=`aK`%PVMB@HKXzBXlem#2{t#AFfM@Z+yR(R| z!Zv(<%<-(v$?K|ZVJ+XE8MJ1321E)G4wuJNE6i;}l_3&Z8k42h!Df5N@%0lu^E|D; zv@F}8MEEWzDS6Yu0g z&bMfwaWbUWxBv13N*bMX)g0XZh+nZ4V)V1mya*3FM`GgptD`LTY6!QVBoP<%r%p&> z-}n`!KzNH5GJbTNb`uQhiaRjAvjIUseqFSRii+x>5AyquZ}Wmc8z&PxJCPZYF92n~ zemG2Y=VGDAfOZ}K!p;qE3WAtIJe;`u^A7={P76sBjVCs?L24R$!N>S{L*xTuh2Sgk z>{JKyiNHdB0w3Z#GQ6AcM63Y#P*c4iZpYccD>c$2knRM1#YfmW#g!jF8G+Zc4gQS2 z6ax#dki%@m5BnJdpnkgEM_2@dgRfA2G+DhaxBVRqc*SEk5iSTOu&@0u8x9bjFgC^< zMu^68s*xQO{By5Bw9S&mw(mX>S4>dNX=K15!XnP$6eo}hA_)zKwA0hi?Rw}NddhakzK|?1?`D74Gc(fOt^)DR6zZ~zut*a#0JP0WX!o}BgL8T} z4|5RFg*^>uNc*AGHNq2CS15fj+5bg?erlLmPu)Zpdg9(rgQ7ykt5QycMJh}Hpnqg| zOC2KP5;_%lg-rS6Vq?M+qM`toY}$KYlC?elRJt^CDiUCR1rftoKftEvG(Ybl99cmF z06I48B8u!~;P*VI@|+wGBPE0gJ|4!G3gGf%9S?AQwoxa74j+gw7Xd!QQ||9W>j%c$U!lNu z0i@J8o!>`T7r_X%b=mS|5lNvHfPgE`@(9W0z&|ApHSB*(lC~z`Ap|Pbq#b7DaGxWb%qMq*^k5>B}l3u zH~_Rd(&jwjnG9m`j>Pwbg%NJxp`1f)M24Yx2o~n-@e?7O+4%!l@my*|-F7;~&_j_9 z$q7vkJnw?$HNZizJLf>`-yg(Hge3wjJlb;BoJgpor0?GC z(!d#dGz}N8)_DYrypWjR2l&YgLVCN-KR#3RlJ6w!w5cTK zJd;bXWs)EA^pCCe-8Nfjo3Zal-J1&a%f>uCUUf(5tJ=sFyMIT2KSgAIQ27Y1KKV{U zOxgzu?C6Dy5)Za>C-xv6qixoYBx$14bsXGhSGa3jhCpY?U^tBoyPe$lPPS<&11l}< zc|&5GAu1Y~8Zw=Qu_0Q_Cnl_&w;x{zx~o|>*7XsQZK%P*?i6uH2y7!10v6GL{bddZ7y zDzsL2H2|+c%g87qH}8PP9n;1qurav@V|4429Jo@7iV@>Bjnv|6wLv_F zuxoF-J#)_}{kgh-uyVy>dtFMb#5Rm9vJa zPIEu~`Y$#Jor~ohrrEPPivA8kG9kmLVyUAal>h8{WFVPXT-<9a9UyQ>Y$2rYnT=?n z!Cbwt%C$W92DtlsRJ*p1QP}5K31V`)nzGoWR_RfToGZzxse-FXM+(|Th7R}oYp&<; zA8<2^C@@_Qwf=EA`pgN{`_rDfN(N@@?H#*b45DJTe0*c_ODSqcjG0?*(UL%=a{v&Z zZxr2e;--?Rgt=@x8b&rgDzC^CFu-~^{VgeWKVT4#_b^-dXxtN1D%|LwiL?rfKW0?f zFsN@g$$?f07ZyXc7~jVxmW=?LtUeZ!JiZ3(|Snu7+> z^}^BeEsKneXx*rCoq^}>=%@((p~iPtDm6>jvoPzrB;USfP#vJ&A&CMZ}7=P&=6=9JH_mt^nZ=VxG zY|^>7K|uw>7&P-{zus0;muOzitFm58(bsb3;*mKj+!26I9ChXK1UnC5J|_6*M%A~G zeeES{+;X>M+3v1s` z=FbEsW}-f5|Csc|hMTwi8A=$G4ssu?`(x}_HkhWTQ#E4jcyG3t(|?-c*_WIy1{ote zN{Vahn>dkQ^(FUG;JYZYq@}t|*Z1#*e-MnHa{3(7Q1_?2u7Q3bL=&-QkhiI2_}p9^ zmA$bR30_~no3a1I<+r~Bx;NLaqJy7IE=$@sf3w+?YSQ?sbl1rLo_WdSBn4~6R>zzJ#wF;_vkl(^VR(c(--c&RL?689T>S<8E_}mZT@1<>@g#HlzEb@ zn=Bi<1YBl0@AB6IZ*>PR(JagO!btWAEDq4;G!pa0CLo7@%fvj{(`B{v<{BVUHZyvP z<2gZf9mXTlY zG-?fP9riiId-p2{qMcsI+UVxL!82SWA?x`^(9eBiMrTV zS?^Z^tY;0@+?j2i27(R}ECQiiAl&sX9*!eLul!*qyVlf;e_qWA7+~+crmH0Wd04{s zynXBlxUU-Zeo>-MWpjwt=#AFohZVu+vjXe@ue)K6yHS5iiW%Ti;rv@4yUAAE4RYqva^_4%sO+C>xgt<%=&SLQGNO)ctHOHtlZan4yuy%2TJTwE%LEAloF z981P+_uNO^sj;K$qHhSX7OWoKrFp8;M+NU|?+oxL(#|PL2%dk7*PMPVwbEUOzhTm= zrR;l`jK4nmGGE|xomQrpEmehJ&d?pHp!d74MbrNunwDh{Z5e~JTsC zrMDvBrC&{Ad^;?MJ9Kl3@8TDl(g>>78QkaBovScSP?I{6l$J;qN40TMn46%aT2d$y z7JA@6-3SXap63S^3e$de;!76brAzKk930!B`BfjNls~cVXP@EiLW~0tky+Jt`v{ig zecCzLE35r#?mI1RIjWV|&LfC?k^{~QUV0K)K**b6f6;-LiqKzbx4)Da5A$wHA1jfz zCv47~p3&A#OxufKE3jx{*cqu}7>jm#QXh_1jFrey7LrOVm}wbpwLg72LwM|U#Hz2HPAxRNo>}l}Q{$m6qgaY=B6})tNEIfFJF{A>3ZPe^(^Odl<0$>16 znZ_;=xipeO0zoIo<4V2VP-nD-uzCY_ONoeTeqfr#`aSU{yp)y&UV5vPA3(1BfKK!a z0`{FIrPqXl?+U>GsLr7e1iuMc3HPAX&8B^+LLpMq5)ll9#%WD+ zLOu<7n+m+tUub=O`%5|(0kFTJzjMeEHkSpAz5mZb|DT0GSn&VOLdayQ&AG=XA}b$I zn{n|aEM)xFQzX*(`b7*<`Z=+UN|qh4QsZHYN2JIko&uZT+Dg+h(k?@%-<8J4b305I zt;D8c3O~O(QCwV{N!^@zMVD0Jcpb1=Y*gSO0%4~;!uSJO6LfW3b0b(0=>@Aq;u;4t z{F^Pw@tSoUe^RJ{tKNaM94%WSNAaHX5@XDstqU5%10|cWbfJt-zM(4X@bBZCUgm?rc z9O$8_F=Dhjnttwn;^;n#Dw17+KgSqMuHB}0uM94xdv#dt0pSZQ(4DAPQXd5pjxu-_ zi1I@=raP6ZH)YHO?gns!5_r^O8l?$p3|R_!kY1Y{Dg@$4QYOb2iE!hjhd-#aF9IG= zJ~yy$d#Bu;C@zu80Gfb;R(hSnLjdv9qZNUpSDWZdx94B5_VjRfS$B8$7(j|+QdTio zR2ync>dYmg7i0~2lqg{i8o~y;!9Q)#!{XbKhD_wbTD2ISL{Y^o2+8egOloYp_9%@| z5{3Om10n}a=2GD1Z%8;x0nOL+>9ZCAFhr$-G=t@}>GaacQ12Q(=n+3r)_eaaiui@` zNFa#=Tic1*r)q#K^8n98_U#MkoS-~3fwO=|*93Ao6crWylD`sgfC!(1V8aVY!iRLw zfCgpnQ5+x_5<)cNs#$SXr|WDSYzTL)hI{`(g4#&T2INMsBLFJh>70XBfuVtIpn@wy;8tDX?}7Y zXew5H7To;Ivq9!I@ysgV%->@4&k|LcFs0>zq@kg!MDP;q2826Sx!q=r%XV)Hz+KJl z8IMaO4(HkrViw2lkf3q^K*j&qzQupD3m}av!l>-TemQJp+1ekVdq7`?vJsulZ!L(XE%i3ue>7L(%v3g7^l3ev9iBmdP!sfQ$QNG zLMH+i$<{wrLs-NaK&1PbbS9C4h-OJ9{C;tC?rRgJ7#xck7Ud>w6SZfCz-Ea(VCU&7 zdzWO*AmU$^I)5b&%i7M{C9Ltf{(JsUV230PyES~S!((2<~>77 z+u<};6YV8^12uBjhKWo@_kDdA6rTb%w*DZ?5o zRT*u?e}$@*6L2;TMdq5CDOHF>r-wKY`%T=6D&_`DCl+O?*A;o@7rAs6yn=m8^Wl(C z(}GXb(&FMJ-*6X_xM%_hK<`dIPJq;JW|~w@+@Bxc&m<~#J!Sa&-y%$Eb3E;0AZT))R&z#y{_!aI@S%th4g_!E3J zNsO~{QCi;3PWNTN5S4knS?n4?i6mZth*Tyf03Q-eyaf|)gOo{cR{5>aCJX_J1mICn zUeoMY?DnLJHW6yLMq*YM@cQt>6=HcS7h86mYA*^Ai7P$6KG&kWeGe}T5f0!iK)Ig) zV_bR1i5W~98CC?CzDbDIAuFkRJk^7)+sa+$cg`~k;TSx)Luw?xLv}D2nVdKU4w9pOEg{$+Wi_YeRkrI{@Ud+2JPVT#ub4 zd}U``*UAF%<%@4p+v&3S-9%90oj zE?&Gy{mGVmPbnTPLgmOF01;TDD2wrtzc0ZK0=bSw#l_NJ z+m_jSv8bk{dh6&kP}cJg$M(WOa?Y_!ci8AU7b~xJ;Y9__PY`%lq(1$YKm--O$nY0- z>VK{IS{z?D@n8G#ANY9+vy(E<-EWI}>CBqI{h1(jFpxr6a(qL~vGh^3rD$rjVM`u- zto%0TIa8u7#r7YNc`Fn&inHoLqN5RRtT#CJ6Y1zW2!O}-2PzR@V~M$}yVgomxOjd||K5n8oE{RNoGJ zm*&z*#6%N>UN$2h;^zXrh@=9TQh`K{RjSMBOC|zW<44cIP+g94x#05 zbPhYx5<50ldHwYu(UN3LgYpW(jYqEG2_nrG86B}(9r@ckhcUw+iT>ZLvM3D*CA1Jt zDoBz6&}e8UGEN=*qXauZZ{uL)1L^0y?~u|qUe@t2r|r@W4A7+IuX7HQY2nQ9_x<=} zrI1vvs|L8(lb>)Xp_j;z2zvF$0L2X&Pm}(d?@0gA@Ux!}QB_h;RGgR@|EV&3fkgPP z!HJGH9lwURWrRwRA?(!Z)KNx>ipCdR~dkq;2avk@y4ToxH=*o>?7k= zAJO-IOcjy@Xsxj?B6spW4hXe1isEs=oF$0xc0`62>k}RY|<1Z^j0>LKFfNlprHRo@*h%2DG(Xu>^-gZ~CWw zj27Fie-M8AKzAhnuc`CPIe__RZm0A~V8&(uSmNqk4ilJ%iV^6n$%zT|en) zB9=*zOc8`tv~__!tLAE9zF=XTcb zj0Yfb&FIF>c;An4QG%uf`!=y294TG7H~1 zztG(DzF(wM7YVg+SjeLgZ+C1Z-XR-JbA8VNo6?K@gN_p1=XSs$5eL)d$QpvA^UPFi zD^W`tK-s?J?(2j+Y%ZJMg(x=a5ZM+^^F1d#=B(QL`?)$-PMI1rRG`saOjZ~@XD z>)dhl9yLzSW)hunxvMcYI-FQvgG}@V90?{b<0)I1pV7N8=;zc=Q6TswV~}WHZth1r z$_?{di&Uqz(5e6`r2}guu2>K!^`5IG7T>tveh=b_m|0;+<-wa z&yj4{!ByK1#OYfe+b)VF-H`no7kZ3Y|yw6xfacysrwgI`?p)A*d99g z3Hq}Rizt=iqjk|)hw@!cEaQj+%&lv*dBjI zW0(jEOa~4yb0(7NFL9fJFuOLhR9YEocj@%vj7QBbZE62nPaspf_GF^TUq@J@;-DU> zTX2yH=#5iEy+K740vy1Dwxa@Ps16_e4>y6`He5Bv_hWK`>2BipctCJ~{g&xC3t{`y zwETMzf~DG}v{?ZFt6mFL{X0&8*@I16>=tFoZRFWM~HSE~Y zz7)_|N_-GngaX?~bw6Q%#mX=-icBV5z^8XS6-JB{fecaz;#P=*3`-k|-AZlZ-#j5m zGzK9ND~iv3Khjn}MbP-ArGTf@q*?Qfa0-^o2=TjaA&@uaf1-JDryTpYlbv~pK^=s+ zwJA_vlE#hp9*;xM>F)p>StetUSP0}Ex5kN;Z_{+2s6vLzxceG2$`%(FKlBRB{D-9Y z_sKL2&_Z;Yyr&5Nl^BL}HbT3*=W{4~IM0sQ!5XJfql(*UmUun_{N})i7eonKO2-9j zIDs36HM-a+8^s2aYT5s7lk`glBCgj&;;N>)UwM6xY~*H>p`OJjYcd)RIxvL~#7v1HLiny54vBV_m&T&<7fuu7eA_-pO-H0pU<3#pXq&ZC$HoU3b7$T=c z?M<|?i5@cHM0ivPgXl}))&2F(1jN`1F!sI4SnL+=naFr`spqd!8)|`JyPLk_xSzmQ z1NRS25&(DUJR(xn3~QZSy_Xt-?Mqr4q5l9W>7N(@z^dU^Z`cTI#7p;3-}>>c##4_N zj_nuJo|--m=v~2okz8~AA5i)a-@D=s(xc}6VdIXyV^)bKC&xj6H1+OlqBtg;Yx@G& zTOf)jUZdQqL4d=p{UA`a*YGV8=cc^o+uu+atWWMhC%~3`XdOkc@30uX-Ng8l1J>&A z6v{|=ysy8154qLoKVAa=3djj|Y< zKgE9=X@HNB4<|Ho${uM$8Cj=r{D)Q{Z_(-;@I$za?1=_R3oNtE22fm!{<*RL1_c59 zY7jg=$+XZC;L%4fqmiJM*u7!i!b?=R|4D5+asUq2F~|NAYc{lDjc&4`a!P_$grw|$ zo(9mJFl3*74E;hZ=JngZ(ve^G&6&11YWNe$$0v~o`7k^A_8Mdn&&)VUd&~_U^p|wq zM#ejihUEv~4u#sq@C&lC(xgJF?u=mc6*4Icu(Zs+{lUCmY}cL==6uN&Z%QQk!2k)` zjT<^MsnY-USGWdI;8R|(aUtoGCkWpd52~ge$#%=f4j%89J3ENwSmC{}|7j~C`ljw0Jzi* zh~Hk9WW)af6o#-8op;|{rH!G#Vd1cKM@5DENJRjP|3pEV-h{iE3SG;eQm{W z4**pv`CV-P@4X_s;U32<^Eh6Y8{kvyP#)q~GsJ;T1v&EeR zL*x%z*OkXyB^Vst1df3EdXNMy)Wn^+ywi`xlf+({AUUPOi-(*FDqU~imc5h*ZfA}0 znklUDp3t~lpUPy9-=9!2IGmpLp}4b&O8|`yTQn5^t^a90K#A+-{Zgog1L!Ye`HOF+ zemp)b2QDnJQhlc5e;a#mWAgH7HTz|mUDSJJcI{@!{vLnOV{JOWaq?ZcQ@E-9QR%fQ zoRVGWSm6vQq(1EU;lN4jI)Mn8 zP{E6G=qAO~T)B;>;5GyF!uadw(O_JBOmc)}w8e5qwO!&2IQQfxT+#<#4<*N97tX%> zZ*WrDz+#*GRR1JwE(+qhQ88x>ldO2(;CkirEUJ>!LmO7+hYv*;f*Z&hz)d?hGbdD* zx~eoXxkc}Uv#jS@+>c@~1jEtM>+>}+v)*G$IgOhm68DbXNnyA2xPt4t>K|1&8ZELE z?-YI% zZes_V&6@1;nzTTAD(C^gCMV;Y9T`CaN2R?*N_L$VZl1z%vL;=+LHAHAK~=@8x^Ln% zQmO6nUM_n}uj$-JOO7cj_a>4f8nUlc9qci4Z9UWY^^Jb!^VRaLjdvzjd|CfaRg}+` zt!YX!vdz>M&z3pJ_1N+)G%3w`T>e{cRCcvVb#1Ee!TbfjESW|5e%H{bW7p1Jea`n> zxos-9)AGr?F*Ue3ebC8I__8x(F#vrpn0F0lOnZpYYfoFbryV1g>2AI`U~n~&0Xjum z4#qNI%^Flamu{KeJG^fC?8yOQuqYV9$mWBWET5_#?~}i`xsb4SDX&55OC-3~pmgJG zTIJgc@SlfqV5F@a-13G5vtJL@iL0Mzpi+(WYX&2YZi5cz=5BXd*r1}U!)sMz!R-{N zj}K|@mW9_e%>tt9R-UWaHTwC`O_~42)K`Z^xp&bDG6o3hkvK|NM=1f7lEwfOq`NU- zXi#A2R8c8CN=OVKNHZWXbc=%0H4F?5!caqZ-u?Q0-@W$_pYuHW9GH3E-*4}=)?Rz> zlhT`J7B3{XKa@VB6W!k#J`h32Jqv^xjX~-ju2PqG=F0{2|GK7Ep8g@Blt0`G%I(XH zaAVT0x?J>=wW8S_$&-#deV@Mt-=Pk7vnI0!jNX;o}R-L(b|&+Ic!y_=h43ZY9tz>Ws>* z+Bmqr|F+;suW}FJJKriqpySKR{y4x_?zS1egE&^W9 zV4TNH20fvA@f)2*k3PDP@YJEwb1v_7DLLLVrpW1FZzIJks%tad7TgAf{NR61frk%o?%4j&^cbLKQ{$BQlp^vK4 zeXH4H7=`3RNdajK6)P$Z+WfB7vr@loaw~hB|Az6%aeZ*5SgT+e%z`IbPn0*x7!$4U zI_Lr)QaRBb&Y-BFU15YiY_DyZD`*?~U>K=*8TX1RMl?ktw^L;}`JFUrcbixpCe$uA zCQkIoDV?<5`1iMu0)z_o+g;F1@f$e5n5ZHTz?G!qG+pJWo%&XMQVZiAlF))UYuetA zwH%lF1K9r*gXh*^F-4)+YWrP7ox~OG4G0f&p^j-T{xG$Nt#KWA_WG=_wN}>-zBa7!^QpDfUEwZ@Vy3Dcuw<44{ z0N<)f%6FB6iY5jWk7G&FG*tifar648CvaZV8;kx=B`mh4xM~eIba7*KwcBnpx?yKB z*1@Bk?_e$0w=nxIdUPJ&r4rrW5qjQh(cgXP2TO~RK;>`BS7S4ORE5yDz3nxdEED_v zJ7%3<9$4&dF442m&e#?8S@gU~($JMz{&{(5fW)uZHpp{fu2{x<+c}xNz()+iUyHKn zU^m_q8JXK>nH(D9vj-bCg%vp5pZ+EmzVLvTn^*K=peQ4Fd(EL$Ti4heQb9$aud98< zl6*lUPgcnkwcYQ3YT5?lE@Xg5BMuNB=G()|$1aR$Twr5d)O`ApqkrOBJ2Q|UH9o-k z%mFQg7*qK_Mc#G;KbE1s`1d!S)PFLxFTJvEK?$;R33SpzDO!1L!0xF7Zf~dSmHEDv z8Z5)dOzJfw>~q@b_#S4dyFK~;#Nrw=lJ27Q{zUdj)G-mmBdPzv z>ECDf8%?*xovQOLm2KK)luY9Ug)Bs zK2~z(*#4z@dqLNEJR&Z2%Uxiz?Rk9lrbV~;`dLoo?V3`VxTVEtjw7-HOl~m+FAvs>tD_+!HmCE4x_?v_xe?4)r1#SM)Z;g=?C#2(an>oIO%S7#P)00o`JQ(XY(Vi|FNIN zK_z8%Wu=~5D)f&wB13wlbaVPHlbW@3D(5hgyv^>1;F%hAw(urDuk-45Z4=^5S^j{Vrwpzd-Y5F}iNA0ykLg;7=)Vihn~rm_ZI5i9)410 z(uUP&7Ud<4`oLL_0g6#E`r%-Iqgo}tuVkrr*MqY@U2Dk0!Ie@-yn2oVTY;TclF#pP zbrfn>ypeTcLPy-VsAde=@Q6K7VR>x0sF{G(UP(s_Ap?kKF&K z{qXlxiw461Bq0%CPoIm&F|h8&E?upbg)&ALIlMb5I+-T3XSEy{dc$;wL~OnkeC;vb zP;)wo9F68!2{zsR)-2L*%QxF+_JY2Gv8(ojkT5wCr{7`(+On(4hO2$-uu~?$SP<-9;aq?ODkl94JcYv+(X3qo#c8`BX4BqH=Rm zC1eS}j%c#NJC)ALUSC#CU5!pw(CMAWrbMWP5gOk07TtSHOZh@cZ1e>uf<@F40;hhQ zxezSBH2IR_&zbB6c`B}1XByWKa<+^77@L)~;-WoYGR`1q-JKq2lhk?L=BQ29;HOMk zs$OH)K?0rGtkJ}^48MrwTRd`w8tB}DR`I;UzB3tVF{$r+RM5TyK612)HpMwXt>{6w z@leB5!y&-C&A&Jk2Jq)ewb=zB5gcL=0(DySvy#F`MzzTMN&a}smCF4$m&2|jY_e|R z92v>e_@JVkEa6`F4!W&fRWzs(>F$eVOIvCd`MlS@*7r2PlVzym5t@LrGZ zC~bLgcmTJJAi;H?VV8gomN@@KZ6#}l~d9J1!-w%8skr{|H&Oc7>`vsqyN>s zL4ITM=UM(nJd8u?=;)l{ArEioL*;`q{d4pt=8#sGaIp!(;MX!J&RmEzj zQn9U8wWlR|74p{U;ZkPZdnpf8KBjH~roKV?!Ol}hV@^GFVv(J%E_xgM^Dx8rM#)2 z=xhZNnO8|CS+HU`BgL)9i3L%Gmb$AQ3g-ux*Wt8_bba&B_SR@zdvQ{_x7@gP6(UH{ z=S<0jKC>SV6-$BYBrjCHArsamhn-T;Cha+Qs?hDO3(ukIiAY2FBWuQCH~3+^-%UW8 zMNuAu!*U!PPmMJ#&sc_e50nD*qea9J*^jq%0RYF!E=CEMtfB>f|8YlyBv;EwvNHLZ z>hMn*6z970)i1vt;t3%1l!_lt8-mVEw3Ie6S|0>wup4}MA9E_ngC??@D zy6b6$c;zq>a%fWKLT!>qMAfYaeb!mHOKoyGj`oEF9YGshq0U`l(r7k&jLc47+x8sE zuE#`+G^K0e{=+u^*Hz`iAH-dCYwu#0vTX_lw*W;^y1v75*QY?BlYgd~x{ab7irv2c zb3FX&GaF0`LD$p{UnimmxkFL_TM(Qi7#?9SW#p_=GE{jl+%;xm9Cn>+^Bhy!lMolb>@zlkmW|lGB??|6x zezO@DcG$8M2e_UiLy|`87`qSx=egFFL3v=ECS(tvGws!_wZ<;6X)}gv`q=>4vfIqp zFykFzyM}9()`=lI`tklMZME8cLF5T74ItNIv83Bl-rK81ER6q5f5b%d;!0bOGqCX8 z{{$($vbv{l^jW#1?`raSce2yb1Ips8)*%^5(S4%xP*JBGnw!>SX4jTdWdcgbW7k8x zsN&Y!45=wSy>?Os=RStKlU%;<1j#V{a`i;JS6j~idm0KZctE_+S^bXc9d zDT(2eymhd<(jVWSuU$iK&Tsj9PcCr?VC<~aK?sL`CmjvaGr_;-eDe^ZLOWA`Y2zOZ z|AEnS${fi*Z@KPZ*5GlCodu5fD=+~lsM;5>SJEEhwmj$J^IXeJ7fr)^eGj|f)zGhe zRx)@EnB)qEk2jHBm(hzr9$TEB(%G11Gf%N#i2SDUmrchQwo+A4J`NF#eW`mDYYTbBZ^IsJ&6$ zFlUC0eVBo_RNT5Av21tm^54C*>nr(9{DQMN<@R@pv6L~QT&-Zriy{+*Xxbf{(D z&ARhNPrLaSfDa6E+3jd3@evr@6FlJ{Ti>tkJbvcty>;CuojK+xgQF+@HpBNDaOV)t zQv!5k_38NV*T;0Lx3%xG*WyW|#yjeLhit>t*+E+-S75i)J-AJZq~R?#LsDP%T?S>g6#2ua5F;Krc zcXmn!Llh4@Vdu-*1HNqIAX=}?oRTEjuz*OCvSib>0(^xL`UbCy>sPKy{EVe`4CB{2u6)g@PW7BXbY6F_DpzXwh z4$PPDnNOGcwMR}f_2G?}^Q;qqbfjnIJJud>q%eiO+~DD7 zv+U5vv(a{i!EO=SI!iYXdyTUWTZ(;2FNkot>)0jd#+es?F~#I0v*DA~{x^#1!>vYx zz6X02BQ^dME%ioE_hJ1;tK_a@-AMD3U0S^VWi@)BV_Hu*&LZ9?H47>e>Tsv^v4Ey? z5!B5d?rAdINCd~CHU|i<>nFv2pK>U+(o%Os1Q7e-FKz80a#1L&bW!i5w|{cs3NbJD zL5B#%sZfby-|p@$_*PpTS|53FQbdu=evLFyj!3Y$17Ny~GbdQ~RKDP=eBP3n*s2uD zCF{`3Ja4kio`2mW?g3v}$J_}F4v1DHO4vs?WSIPA7ujC{5xTOlBP z;&kmQvz08co(mCdy`C2LTrmN|^i(HSeRyFsxbXd*@~AqwVW;gOP4yb_xO|y8v`vWh z0VLwJu()4Yc*R!b%iUmpQP7&pO_?; z>wlH#6-C7{{r!%Xfp;_Ig5FmliDEsXrEZ1>b2Y#@@4(!_V!CTzWfz7Me72`2yERqq z8o%HhNlWMW6`9zi9f&f?>j?+DY5MS_b_SBMW1s>#^J_kb#3)SDtUV2)kv$^O8_m4S zd1!W=H+ZDeQPl8 zMy3E~yxZf1g)>qLji=uK9?l#oDHIg1C?8zkxbyD}=l>?WoC2aNrt&8DvY+fD)UAa7 zwisP_;b)T0UHIr0pK)Mj$&b84GVj=4pe_0BH}Vgt!v~wrI+j9t*BX@Qt%EeNUuW~J z&eNje7)ypcW^qq6WnRzvEL3h(u(ysHC#rk*0_Lo!vA}YnmzawEYZ}4Z9m|#pXslVZon(* zSJfV2UG4MtC1YJZ&>vn0GvAQh-yE;>WslevE4piEWHuhH?fR{)VE9L@ec7*U*tN^& zxr+#X!^!)7W3|C+l^*b&)>Czf2R$3^U#AXiJ_nlGhN`bAq{M9;9-OIWE_~TF^!jM7 z?@0i21JdMx;v2Y>iU?UJ?zB*q%W8%Yc?+=~N5W*cP;Pmq`(jUNj$K%M<(F_oIcq1v zKt-UFYvS~UI{6!99{!SSlaJ@hKRGGn!s&K{GhhAb<7*C&pSH06DVp(hoe(c<0~h^xRe5Ib`Sp>%nVLIa)iQpgm{||d9y$`- zs@h|(E_20MVohPNd@jFfJz5ksPhc^WD}P#;WmImR97G)eH7SSw7Tzl*8ZV9px3azy zuQsfXMYUZdR?LQ&q$KJ3MueI8`anTW-l`iAi}6A;MRdrmZl`LyL1ur{TVSfqY}-T1 zMqJdGyQFZ|#CPUk)@Wu#Y_cf6Z*gVLkSjeCTg99G_0D9H2m(crEYyUqk#xC{+{p2_`}3 zsdnkL3dmy0FL=cT^=}UVsNxoBM=r7(6`FF)79)?xv{`>n4vogl4f%(=7H7Gym%Cr zll??0s=6V7ezy5ZN@|Z_ceU?9WL>nFlZAt)PwciPmF${vo9b6QsgHCG=gzE3G166> zGgBYgFPqJ=*_4n9M)bePt2c+wNDGj91hb4u2KbKbso~>%w>mb3(c+w5+pE)jETL1L znskzr@R5eBlQkk{gKJmEi|W&VaM2DN4Q76YNW#(Nv{uo-EOS=o1Ti#?5Ti=HoaEn~ zmL2g$G5{l73m)pxh#iB{DhP}^=57(^VGc;EO5X0;FpOxbkV&SMh?0;{z(arO1WUSS zq8N|bWLqXRH|?gS?ib7ocVe21A^U3>(4*=y2+;x&dS+L?P7ORkv1)gR1?197)>E>z z{WPiK7q^;x4vR~;1MFVf|D%ICb6EFu&RrXY_dps=XDoASzM;@G$!OU%KWo;Z=YuGn zh=hCM?9DIJd(S(Q{6a-YKb6|p&`de^CEn&%u9Nf_D2(j491}$aNXA*R%OKB9tmW#z zAI{a9NarknitAO`=FFE?)}EZr3dQaXY<%67 zD5Lz($E8F`Qgd)uykH+_CHt?@TtBWfBZ)kaqgRZ#Q|9e9=N+gOTxO1y_Ijs`Obv$T_5&62y}9Ad50z%kY3r?ftsOy#p z-F;4W-N;#diEgmh-s6>M!nZ?wkAK?4$D$nTo*YYSEfSWeq!TxEOJ0)2nj{HJ97FCs z&w9=ZQcoaq+)!yU`B=A!pJ|yj;^dGuhvp{}pQT{aPgvKX(wpO1b+R+N)I$@y^{6NU z3r3(k?lcMNXl2K`*T`|L5rnOLb-C$?lM-`ZBPF7e=WU8?G`XX-O-tgumVe0ypGpo2 z(Am!!Oczv+klz6~cU|GUxARl$IRth0pUq0b=8?qqh}HYTgeU9~)P1?K+|^97Yr#J9 zvy!AQa#mkqvG{gi(+h6D!dV@vWxQOu9>D>Xz~rcV$OFj3JZ=)mn$R%lyPAeyD6f6@ zK)GL_XfPz;VhgMO*^mNNYDlc~^?4t+geIo%y5g&n!OTi8U5O6B1t+r$T4(I!+lG#? z+P+5@424;xV2SR{+YX@AZ?s}LuABL7B*goh%~>%FW z@&{>EK**(tEk%;r#hYs3ao{ojas`2p4iEh4e%*OjO!ur#}B!khLEZ|@{aijU6X46IxZv1_Xy z`Sc|01}UOHd(*0+a9m$P!G+nbGil#aG(RCwJdRp#?Wfb~REPWuK}EgnQRolVy)38a zYdf3pSO-)J-_8@l+_7)71?^q?%vl7c9LgWYOZLUmp*#sWrr#vIR%R5sw<^m= zye?||DRFf`v5Ue|DWPdt``9LgGtTkO(^JND;yGnAJjqLHAC51!3PzvN1!Kew@ba-d z;g|vn^7Pb5RL+S%b7WsJ9!vPuLm-&rU%b%Z@E5Q0D;AURan!eSoX=ObnnBEaAC7k~ zk>fB!9M6AV^!5wS_kgnV!y_!u2u^KMXc0(N(xwOizN|UJGg8`bU+zvtkPN2oLaG+7 zMb0H$TX7%xvws$&vSMJ6B&kNvZ4RsQe zMzW)Nj2-qFZp84qWlWC}RTFq`*XA{JdCVx^39aiTf>B*X6Q;S(xXnv_b@EzWd!V*Z zr0pEG2@4m;Lst+1qP3vM{xKz!cqs^KkWw3|5%J!GE<}n`Bp%aoRI~22Yrnk9y|$dz zg&wg%k~_O3Su8iK-^XIlXXfDqf=(pRhE(gSGJD;06|?$AmY-i$)Yx|OKcd`6iWMAU zMk!Mu|HE*yfIwfg;wau)kaHx&>-~^Pg&G8x$B9 zP_D^A8e+4SOeo0}9?*~rdko%YX&!n&m20N{Uezknjs-Qw_%Ng&o9P!GP(4{fr--S-TTfim_Zj^!N=dtjRCy|!Bv;F~!pMvm*6jSYD2 z6Pu|(se;ooG~Xi`KaOAVfv*t*9;W;@yHTydnJnty?f9+)kQcD(+va@r?P2CJgPx>y zW0Qo{=+N1ZlS<|OFXWCOYI2j|TIC`D$_`JJ2&Ni&D-n6V?RIisdJa1k|ZYt%YwS1lZqRNSC&}c)L5yie(RXmsMQQh^)qIcK({8x3v90iTJ56 z9OM79$_Wpot&AqO_NW+_{$LHKtu&=!r)uF*^gjdI zDH|JI1BPt!3R#m_w8EAZ45iUP8qAK@gHsCJv_8{3tw!xsZPzHIEq22k4NThRL#K$!PUxDYtwbPtBtkT@Hr2G!k}WqxxWw>&I8> zbVVi{zM&E}uNCu*oXZ=QoL}dDr(D1X7;GQJ zn4%==Borh%e+7wVVh2f-bCTy?9r9gy0F0==5-q3~z9oD1RFDxq*fg3o#@x!Xcwx@c zUibYD;-B{d*^oT@0CLBOTH-N(iAN3rOb{pawzyA2^FR+1As zE~{DltL%5Cm@OdIOQbB{rgnSy*GEoI^u<0NsO`QhzSM1stAa_V(TJr>$BBLopwZrQrA5%qiF=2&_TH*IDp4YcH$mAHxHHRj5z3cJp!Y$WQ z$u>zTF!XvHUv$bbSz+g(z*l?90Cv5=YMuA`wH6?x_nz5=qW#cU-d;lKlzZ6y=)*`U zB{|WlCn@|u+`udgSCrWzk)cwx(k!luG)1o=_-VKl{rgzBN#e(9XG&fJhrIu=Sx2AP z-p)qxSf*?@6nregOgWGOG15FmWl|%mLX(k4w`|sl0E!Eo@UACl^gFEs5-NphD5fJb}&qmE6;Vm|mYdy0+(H)P2+IeXPj&uFohIhLvq8HJuOM1A3*bvZP zY(@*iUd4h-utMvg@rx)FL`v`STPk5a$ogswWgy?7U`T+`;_)Py7T9i-s9Ei7MP7za z-qW6kY*P>*+C>BlRQ0mCbgR=75}Z3945>WnzmjQJD2AAzq4+Ce=L3?XAYx~%BUY~$ z=0<}@=?5ar{v5EwqIU+I+T0}?5r#FSHKxzwHJ8`Wyt~qtFwt{oXk{iVH%74aRvp*R z;apQ0fyzN}nHv49>dZJ`X+LU<>t=`qtIv1CT4nBcvc{%0XN>sU1Qk`0_<^CB*>=id zFUTCoYAa2{VhrjWbE@|}1jd;`1e-hVwOD)9M83$Z6rfql`-^wygcVy}ou2BJX!~{u z>Dl3mK>XgyELgZciLz-v)7udKTmDkI)^YtT68Y852s-ZY?Y3zm5KX*b;CWM@b$;;> z1oJfIwob(Is_(V*>Q`R1}W*D{V#4h=wg zULMV+{h3Q{i`mfjku$96H3G+ocfcp@za1j#6P<{ny=E^0FLGmh4Qz1QDUWn~uDN(n zwUstS_{N>R0P(F|AyGw!{AvGyWfR;TWYdt*%vP+}q$~+2?>cBT`F(g8v9hjgQ(rTkkPU zku9KZRxgi=xN0QPv~}Hj?^)x3Y&RWE|3T{0`D>ygOzCxWnsJ0KYqx>}k}u72rbZ64IF~pK=zfKa;(BA1cm^Px9CDbk|C5KO@+pZ6n0`An zgyqp85hk_nFw=fHtISx6o=(4_zmy#bFP+slkq6R+JVy0=*s6affp!yDL~2AN$&A1> zkBB?gp~5}ahxgZuD_6Y1v0|TGt!oHWu1pLOP&Mpy!H`P6+x9@KBZq>7n}S3}RoU{@ z>W#_;VdsazhgaD&8!xL7SQekvnzx)H^GA`zQqqt|q2r&qAJ;Ua9<}4l*oI`M>qg%D zJ5bHhXQ8ftE1N^@A#l3YDCKg!J;Ye);5t>Y3C0D>Yt<*%#k-AM2->CVuN#AS+cR9_ ze=vPULoR#@>X%$R`!+O)@u9?*lytRbuBY%KcBkwXoo*EZl(jyAZ`Z)4D_1WgbG-GS z<7g6MZTKu8F5~`AYTc3~gZpO3wEFOzr4Bm~2V5WMF)d&pd|I_TAy6#j4m}N(X4x7{ zyc4mlZYGYkD4ax@rEK|67mJOMvx1jmS*5*75Hpjp^D7Yz+I!{?E?hBC{7!9TlESh` z6GgWu+krxep&7OIwwUg9(l-pba${&k`U$EDei53#a<%i@dUV)DHD@aJa^^xS*|W>e za|A!C!yc~_$NTVy`UFT@Uf|+m^nSA{!0E42vC^C|s|99BVMGV4dw~9VOiM2DOtmwx z3S$e1J=PcEZaqsn3o9yxdzQu2RZTGt z{G8Aa_vzu}~L#$uaUSQ2r5Vubo18Dq?Y)VKCU zr_A`>CE;mA$TIn~-l&_V;ji9(?gV8pp@A>MO!%Ks&=B26;{(O`XsGu~EL-r+sa?Hk zZV~LklDu&ZE~HIS;Gb*Y2G`)9Ki2>(&_a*(1R%*w>_#GQ@6( zbgEP;^F*wLb8B`!H!s_V%Pj(Owq=uNl+p8FfhQFei*c(2#M#`n6Aw~QU_{v{oeRUi zt(R$8Qf73v(;<&abh*tu|EO^T5tQhy3h_IJ(?7e8OwK;;`SlE`UJG8;!CXsmPb}$UPB}Jhe{=#pGQa$dZ5$RWpSY2LMho`^vr>foMCX@H7645LMmY;u%I;9Bug9ULI1bhBU%Jf-c z*kpbL(ZsXG&^hTKyzqi`-rM?A)U{DxrLGWywuII87n`PiYUxcuVM=c)7^F|hj?ht| zHejkI?=SWB1gD5N#^4u0jU~x))wuk4y*UQ$%W7w-%rEwX?QWaq-;XEWuXKFm(k7oH zLMpi2a34@S^6 zt{Fu%R8(EzU1>M|hzNP7Of7WLY{dd#k$Nvsqb%T$sE10|`T?MpKi*S^+{V?vRLSwf z0I!BNQZdb(+{1hGt~+c;Pq`*!VaB#QW@feBs1ly)I^A+9GI3MSyePYvcy8q*mO{bGXn!xj7hX1{Ds?>PU43AaQ<{1fd=O&`=TX)# zGDM>EEzUYYq()B^`Xt0Vp;(xV5WQNnIBNCE+boR6$;4mu+8$KrU3Ci1+vYqE>aB>U zo4K9%k@^~kW3K6h^8^HE^8PO_(_r`^3DJ`7&+j(;7geiO)iS0xY)g{n$!*m1uXFGh z7l9yqHW*I}KKt-(YwAIGrc3CoYdoRzi4ZWn|2rqze}Kja2-3-yv>Yhc-mCPHPuUmY zOp#H1FDAfCnjr=)7Wq2SGt#Is#yKSeXPrWm`;w4d^NP6~x1h9MIpH!YyK)lreChJ? zWEkTx{bL4PbW;=|g7xTPpM333;K}pz*c~>NUpO=Tq-?onVV%%S#Xvgaa3&eFLm}SW zis#vP(*)XI8=~G<1FaJod;}07Eml)is_=6b?MstqKbZ1@QAzb_FgRAc4WUoThf~3=&h&E`B%0Rz~&SPgq zsI^ZyZYf1!7AVkeMQ{73K3(0^CCpQk#vTrww6P#M)rB1d#(&Yx+`{XxSWJ4!(v%{d z_L~pX&n-jyPEmyrn`Mue0;+`Zl+V~(&gKQ*15bz78JO5qK5-jE^7IzfExv1;LmO4Z z@_wE3bNo=L3^cBQ=K5-5)apD`sUG_GBDLhHfvY&@cXah>?C3?076Rbp4%U~*k`c@L zcjt%I%(Z4P2LX@j0^P5wQ*EaEa39M&G!7V1!__cSiLX#=W@owLw!mxfaOK{k;|`!) zJ)Rlwz1}B(5Wjgv$=XWl9VgaVqltwv)0FO_iuBe~qZsXiRy)pps;MU7+`9%BzE=cfPjsV88xu+_jO{X~OI674w(L z?rE3Yc>NSfB>Gpb6cxjZiy=!4F4y~dxZKZc;4I89XAo1sU4 zx{*shBf+o9HGZwj{5#~qV&H=nav3n2Kkj(Rc8XH%caY|iFQ$JoZ+LX>?-V8-Yn++b zJ15*o_YhnT>z!v!TNFPG;-A^g&KkJq`ekfilt@X^IW>pK zKq=Z12z$r87;C*`B&A@F{&I z?qehZ3%OccjnvzFhV5|dsWAe+w)>Tc4Y9HyF+fhDWv%nBM1vvrG*bm9Ki1Q&*nZK3 z;t`QW(o#QV>Xhqe?p9n9kf+!`pU?Fw`dSQ@6Gv8(5Gl00v!}gnjg^5aaTN!MXUgR{ zOV$dNreiro7jYRQZyV-M8wtLB)_E`XyY<^g|1vV`Xw=a^(gWx!@EYCdtoi4v*9nC} zPLr8fA#X(wyFt?$r+BAM&R57hMgQlt-FsvsF?6U2i=1r2IK!30au6&yHXfutRAY zw0y>>Ss!)z%4PT5u_3tyEh{0)6i3g3ve{d$5NC0RCKM4MT?S1#h|D^)l|8y*HeR1R z5=C96AJl0G1-0P~%o0ENF!Ff)meVGh(0!$cg-CGcagx!H%(JISbE>Ya1GC0u^F zjGpS`bH$Cv9HBH64Z>fRs<#J!Ve)`^M?i3TcpvSU%BSnK;P|=bU6yRzPNF{_S6;GG zrk#Mn?#K=6svp@dGjcJ(W8MWe{rvZGCF-0y`BIjtf!k?qq2dRc1l3rPmi`Cqx~ozd z%_sItC!Y~tSI=i+@q5-TQUW8%E{x7SUh+}}`|Kn_n|rIfbZ9rIoX36()U=iEDre+L zQ#YD?8FmejYmh7acsyO&BZLupPvAZ1tLh_*T0}tn?N>`tp*iJvX=E%a2WlB@bQg{& z!Bn(Dc0Jll+}F=AT<5_>VtAlYlpDt$rfv>pB@5_DrtE2C$tLkHw}toQEdF3N%vOyM zkG)mF+FgAgeRTH3sE(WjCsPPWBX0BDKR(W-ukNHq9RdA4<}GP2$0j^r;Cyu_HSbFUcF1Zqx*y25U<2}K?Pq*>v0LCMaCPv15kOY|;aapOHA2ii zk2Y zfU;e+3lawlj3-X|sNSQ@*rDx(HpDdzYF%}&K->h=elZ}y0_7# zE%9tTbVpoZWc=m+!E78kZXk#k(ZZ9Cfs%^Mwrb=$wI*g*eXuc0%nhmg+y>N3wq{*I z_2}Ki<1j-4Ss-&xS>&!FnJ(ofd6zVqJ99aw*PL$ZU(n z2@S^(#qz8I>lbol5>Mrq`)v;Y^8)0WhTqX8YoL@0%wlqmCtS($BZHDe`mF4-z@%$5 zifT7t!kSlryFJ)mY*GNeOouGY`!nu$+`xaEnNw7Wzb|t#VUK5HyL~!x zMl&?iLtSiND^gRSCMd-{LY3Y$oW@`zpL{2##<(8mi7`MXo zpk`L0I{-yzy)9|<&&hgF|7xip46mG+r!qvY%qWnB*Ts)v?A^~#vYqG4@Ix6Aw?wVG zxp?11-P25T1+wn&+|r!LlPoxmfmD3X{C8RG&BzFf9@uJ{vk4+)R#fxro?r4$rwq z=9h~tJ1#kTt&rP_8_}M^l@{n-5A&B(JN3K2UfJ&0k+T)6M9p(>~rFXtXzziB2ThDN0w4w;HDZPfA}y_Vt%SUUoG>~Z`)vIKAyVvvzMul-DAAJ9gqHQ zpNF&;jlkfWXgV-`Jm-DC`kEgFzq=Cp9&XUea&|~>{XS#kZQ-gZMuA~u7vE}%aCGsy zSyumzugb2(24wr34FWHEMjr@+y!D9}l6q2}M|Egl&sU1#XX^Ke@KO2ep3S*oFhj(r zsa@X#&@T)PX;tLe`9nFz+g@ukg!q;8hi2xN`_1e3>V-YmZ3UnHJk=KBiTuB!DhvZj!0y02DGW!BOpRMzux}4k!B=-!t&gb2w)U zRPS~+Ix=W3`$F4k;^@>l)8EV$QqD>ERI43H%irKjL;i>V*@PCZ8xXsy4$IW13> z{!ff)QD**OrV+(VKBJO$9&Oy3=^)N?V_x2W@>YzlSr_k{%BR2n>MD0#2-4&La@1!ECmslH!Q_3K0k{PK2g>p((lG_yKjoeo5FH`+-J^d>Mb`{v^{#^;(aia;%75jB-|W9aIAP#pCyn?2({d z_o;b~f^jlGZiU9*@v!uLF4F8HC7anPuwuT${I&jP1jsSQ5pVwYZ|F}gGPQSVY(KOW zCA;41g;z)=fV`q4Wa-OyIR0WP%3*+)GHCzV``qN9JO;B@3@4lI?Y=e^|)ILqu=0IWF3w3 znfRFe5Py?*uvPfckP@Ox@iEYBGaBkd0fYkOIYQmxhmrD&a7mZdXY;&u&GbiQ? zMWUP}P~AE)OYNImBY>l_y5QcUeb&D09eK>eVOBxBfKp-z~|<4H9Gb z?c9KK$Nr5hB{2|%Q1m_^pcKQLzrQbhl;_0)%K#y!(V)%He5vwg(^^c}s7XSb*9$#(&JX z1I{R;qJjw!5DC)cBmxFdBu9ZJ(+vnTIU^E`2#69SgOWo75+#EIN=9-91<6q|h~8Dz zbG|wAeRI$Gf1k?_x@y<1T~#}*z4qGgde@~X2nO592I43SaSG=fYweTA)z931tUXtvB&o!AGs&KNhB&5I1@O9sZqcg+-r1+A zWis?Y^L}~vu|lzU+q6Xcke7vezDlEV7W>2o5WA4cnLz+u4oBNHT=YI3F z@`ReUPAI$E+RTkG`^$r;Vbh5QQ`Tzx2Lp|6)oxqC6=R95@!-#gKgB}ZxS(xwHi(?b zOEMiKnd-L*h`mRXg3Q4o#gW~wB!)1W3`$zbdfzO^96H{HJaq*;X83&3$S_;)Y?m5^ zc6GwL{`REX#HgPOc+;K!$_?t9AG`Kdj-|?dkC*bL*Ix(cYRP0T1Em$}VUCk&mI?wm z_D1#U;R(S?vMf!8J}uD!?zSHQ9n|UTl(GU-ipHbzya2FzL?c*j@<{|SEPv?r%YHXu zCPjJmy@3)xHBG&r?+>+0Hx@Wa^sia+WbJ#SMQ_Z!mk}CqU8_Qn^`Skw1&4&39mHq1!SE!ReiV* zH#WdUT&Z!Cx7%~&Qhk45!aj>t)pE z*mi|U`+2giUq@6|=6kJqYzqD`>E?p#lE^8PM2o=(tyQ|z_FVG<01e%t%VWPCEuEM5 z6wK(EexrdcM1NV_(V0qHL8=Qw)htBKB!k$Ybexs+{4*bY>L1gU+kKXK&2j^UR>X5S zlXR1UyfiE*J<8UU@2r`pgtf2r6WCcY_t8b_T$1;u#W9vfQM_DWNXo4j=(!8EmJoVt zk#j_&aeIvYKz4j7AD@oaAp7q>!F=>FH9x=f3}6VPr5bapds6MsYkhPhPpcwQY}5L z5-Mp0h;&))gL?8kFLKul){8X-D0O$hk{DPFn}q(n=|l72o@#|XCrVuDe4vFhCRBZ7 zq1zQkg#1L7rx{=f5Vg=H?x1yT*#37^Ey82J@!=Xm>huRjVuBFy|AX>n#pF{Y+73+$ zECPfDM1W~1kv*vHah5hOeUzBXz2!k*Cdl*(bPpJrI6Gi{pGu;fH(-da!Zv2aVpor9 z^jL2}=R-qQ$k#~kb#Hw#5PgKVljTVyf7`yd+t(eGMOB>MbYo%IL&J(6a=fc zYbS*uyUuyArT+Nr7?2#`5gv-Vc}L5bF@U`1v?LT14$0HC(&v>jJ4^+4n_p9>Yx5;v z7wuFoeLbrzWn{Wu2J>`=n2w%()=FAUd8xVl0<1P3%si4co;3`NUmGuPcrB&~_*riL zz3IN)^hF!0UfRZlzTBkGYsm#)%Y7PKR@n9vE42#pTaHrm)<_R}9rh0S%DTOAllFM} z?$FF@hTLe?=wk{nPZl4CX|<lF~XU!B;SxcLu$r4QJPNg0;T& zJMKEp_X{{^7N~i3dpSvT=742M>o8Mq&yyeO3j?!;K5Z9PdX-q&840H4DUPudt9`}X zLZf3Z{eKCA(ph;?24u$f14~ylD@06dmJht!)7*j!_$qc#25gs12NE4y^KRmiNL4e4 zdy?mRG;py-UJP1)oTZ+jp(T}tYYdC9uGp%Z`USjeJ<_-bp|xqeZ>iZ!?jIww>PipQ zDt9Z`0vujbN}T6is-uplcQ+cjEBV27$T3*u<5y+YBwjMwrTo^EUs+mtbqzFXWNIHZ z3*W4iZO8b8&cv;EJ*V)aHUVQxP=aN3H&I8({`lRIpMhnx<^!&U!;%}c8 zhDxLE{}?-m@6OZ;d%+uD)Q3OS$UiH;k}+DYdEkroc2?CyT1u?;+eUNtSrtn1T7wfy9P?c+L+2=o`xTBcDMPD^ zUKDfS`xw3EjJ0qF`kbf`i=L5^h6#>$nU7B$yM8#hAoITJF$ay~L_aw0#!O{s`bj`ZI2b$>2>Floe#pP!UKZ~xlgmt?{c38XXgFJd2KF4#X zRYXfa5H!hJ7!_H)D~(exY1=*&!sedk*sXlI;Im7K2z!esh55*e2`jREm;fF$Lg3jQ zm{N7AHrv!Se}1~4c|5Uu)=>y2mmKq&-j(L!q6_#b=~%FfMK!o;9_&mWtYuA6t+IoT zS#UON$)X{#o4$_P$9bx)+Rf%GY|0ik_|HnqDc!Qg3HR)H?-7@aw{TmZ>+A0SKs~29 zTl90N-IwKtIrM^L#DO<+AH^F%;)|@8GhJsN-JHFw{4sv_9r?)V^3@HT_~dG{2&@su zb-Xr;JzCF|XJ>nb(b#$Fb?-^mR=LnBf4Dv4dstXS`<@oJcdkHyg+X{AGC!!`IK(l( zfT~dY@bs+Von80D$F1i3k7;?i$KF8E?8~6qoj}p-Nq>x9)AUTymMR9KLp4gq6yWp1 z8MQ}JW1aeN-J+jck}dVK*WB~QQ!Gu1R?%t)TyW(*+CE~xpQDO9aS3Y{VoN+XbWFLU zQtR<)yPJu;-^^D_MJDg$nmjG&E|GQ3uq+kFXB(&cc%Q|zDUMX!3+UE7JOK8G-k-un zpr0Nqu$=qg*3uWg&kZ6n^dZ7*VCC9kE>}ED0KO^wb8kIRN9c?^Ug=chnh({~d)plu zD`KClRp#>6bOY8evi5+Nx^Xs=qH+6Y>dF+O-o2uwy8sgw()QtKIHL4I>|@~mM7ApN z)?;r)ASx9p!a8n}hOui-c)ZQ0A3)gbIWk1e(li@OptOmOC ztw*jDp1KN}o<%xMoN}S zYwb*;{bUOsK|~jifLnoA0e{Gl+8;qY_C zB(XeTRgATa;YiI5y=S?`^_1n!-D$A_rl1hAjmyDcbb!r8B-k3Wng|42OJu5goTU~n za%(T8nooZOqZ@dnM*spsXpi{8k%!1~Zx9f0u@+p9Mf?bJ2t;V2?b>01%AKJ_`ks|T zdE5gGL669UMmv-g7EkLX6&&DCnQV=KCAB+6Su4(3VM>{oqVW~CknzVr0R)?5T7~eN z3T$7|X!LD&Q?lyI#RZ%>E8$tDyd?5W{;*w3q(Gx~1DL7Krk5OhssHN)=N$uaOZrZ= z-CN|AF(2uk9PV%RjA_LiCiPx=PBFOBG?w|?9uk#_+(a zV#*mXbw;;`HkY&~MQgsyR6K1+&iL(J?3KfAD&>lCSMWd_#RLAhDbt_@TWaMv%Ea$^ zcx-g92V+VXeTYvBtXH`g$j}(-G*@BB(cH9r>Fy2s=m&p#0rpPeK#_@)3A~>TDbfLe zs#@zDmrAa}^*JHQWHsT}P%iGaMYEB3NJha6F^4IG{sM~=>A&<&rSY!={FkCKMO~~K zSflX-ppQ+zw+SExi@k?VpS<|a^4uL`&$TX1`3wyr)ALtn{`iy&4VQOWY!zZiA-t5u zAgRD&;W5a08u2Z4Nw}WW2|FITv(irqL6(63 zc}?gyu{T*w_oo>2;vUQ7L*mEA1DL-x)HJJ6xV^xl2iZM7Nfl{f?zX9k%hAa<`%)Se zNKASYJr5*WndWp1xSg45q0rDncVR**5dj2}ub{>Ek``r)u(ejtbv_GAz^(Ei`vL{n z7U1dh&#l<)2Xjahpw<>k>+*ov)2b+jRtectm!It>%7d=eAD@e!pap- ztwP`MN>`>}#F zdDF}`CKcZc@B9pjp6Y$|;L_`iwvNV8&m-@Ee3@)@xF<6+yUB5DEXn9G3WzW8{uo*Ep!g4W_kK0I3wiReMs)G1_#X$QKu;W+OjCc!{ei|rms_8RWm9H0~Yxq5^F`$l@ez7*y2n5H{*as zadz6yw4-?+RY(tPWc!U|V0)V)_)gt-1_xM;T_ZhyjyhjSJ<50FajzoY#Gxx3Ww13q zdsBBs>v)?p=e?fqcd1A3mQMiDOv%@8R0ofr1g$rU*9KvN-8$q5GyVi@-{t zEaUdqa8&JJ%VJLxe^LMv_bJMi>~Ni~p%IcZk?exDX^9XVehlcp+CS&}Z{`Gt0xY+w15*Le%;3fyW4@01t!UHvukC3}e>B z9p-x9dJk|!>5!Y^9u>i-+qh5eMOOB%pal5KI+adV-(v{SNvipV;2Sy}&I(F1WFr&Y zY}04!|6;&zN&SQE0gO{>o`9s&H9!xiTfiI<`)08(&$PB5Ja7oIn?nX{EUdoL4Cj%L z{dBzz@lPW2zK)p1AB6gL->N$Eqo8ydP*^9a9DNb%55RIfjE{wPD;BK7VCz6AvIC;j z8ojOn%&{3NMb}n1bQuBC$$`ASr+74f;q6)cS>Rc2%VF1x6LZd@rPPpiQtAkQ^!q7% z=y29r>5LyOM-#$1Y;rF7;%H|3mouUc`}0IVQ_z7%oN$_fNV-V~FuuXuv~B;fYtV?b z;U9SezXJDv`gUh&+^x24eO$gt(|(g1UZ<9{A`Wb$;kiI3o7+@4~l`=zCb$@hTwXH|S|vP`hxL>E6~Ubs{3{ zMMRaaUEQyp01O4ceHhX>FS=aGD03$M~ z_CsO!G5}E1*Qb%a43G-O29-Oj(__V~j-!DRR`HHW=iH_k`CyJixO6p#q< zp(|azSM|=oJbB@QB-3r1O;1Gc!_N@VVDuo2H!3Q$MUw;1(%6c)Pd|4 zLaE1`h(lmDtOQFdu-ggJJ}vK)1-^$#kz>^Xr z3TSwiz=dc;%$_5NP5`tQ@3yfpFIIufMjp^Yco1{Hao!_dxfq_sOjhZYM==N~*kDUr zD+z>(9VYR|)C-jWct!LN0mKo>dZgUaIO66ZA4wz*s_NcJ2SAi^BE#UTu_*umQpo5` z);cE#ZyjK3^MJk@g}6P4KCBpgeoCk%-)cZGP1Ozy9>oY04^NRx(laJtzuDB&Z^|Hm z0;c`K2GN~@k)I!^?BB+iY<qkolbcupVi{EQ4MRU>)m7QM z22d^QZvFea^x69bWkB^STf|#jnSvN%YDh3-TG&i4M*8AFTEZAP1#I*QXg|LT^9Fy6 zsN;KpT=ODsJN=w4oMcyGi4?!>dcW znTuwIdmH7#I6md^^`Xo$MWv@es6GUGLN5JJqJ&1~T?T|hL;sVeDdF!fr=}38mGB^uO_ILIZz_syiZh+5cWKj+NTMAwF zyAT>>0t9Ei&*(z(TI2?F+p3COatbY2y$dzwT^oTI6wtpTEFf-(9aq)9Y^>tB7$k}l zHRGrU_ZzG}V=@GeM^ue>l$*n~EXvotkv;in087WRB>UE7d<$?}oibe2r-}zP!=mHO zjc2T-_aIn9GOUi}JyCCs`Vi33pEpCS6k3NFzn)NY51@-jILxc;D~s(XwY>>cPP_f! zh1h=bdiVBc1cEcT+H3s0zy~!~f|^LkZAR89(IWPla?2N0llfhD!MBhXzzcC}J7BD% zBR-w)8c%N@`Ex&()eWOVH=4dTIli#;cf{`voX!M9a(O7Uj>2bK@LjzRE0RcXam)fp zn1ndn(UMm<+qxc8mt={P382RzFl7Ai%H;Z?1Kry# zv&163+PBA=5U*?wPa%ks!trUIK)uS|*qiYq-(3f%{rrqrw0|AS{u}xjDD7u0aPG3L(iIKY-BbByM7+h{aMP5r% zrbUcQL=$za=D}zV)txubE>WM}bBZAdM_yr!Co!JBgTZGn53ij?75z=N!09D#J;~g^ zKaoYUZ!SMkrsufPhrdfWiUa~%zjVHwqJ-hdtA?yOwrSDac#xK(ia`Apr(?k*~;O0xxE zuN+y0Y}kar!!o?U=j$o!GTYiO(3Ub_Vc7_W?NR;)^UE8`OKu>2;${#BVw2mWC_mzDb7tiOf~yEg|tDAUJA}I2(ephk(z2?TCR@lUu#L9{M%1k+u*_ z1;$Z`6im!?_z0d7DsZr|Sb=qPWAZJ>h}zFV(1pJ5cdTb5o@%qS*sfukbG)9$#=34I~9)l^mkH!~TY_ z8hz5c=|HA#X*Z{S#UR_57OoK1=J#f0Sl$&utf)JU8u!7~MS9j0dd-GLvK<(3GQD!U zLoUfqxJv~Z$xyDItKbI6i-qC)TPJrSet<9@8)?RFH z8r}dYWQL;U)SReDUsIYQ`31x}N|TyEMw>W8`ciUAS;ngErlfHE-lvjZ5Zg6DmW~GI zkG$sndu>2E!JF{QNFL@9;O9TN+e4s?h&ta;fqsYUF@{F==Xug`P+<@ain)qc-ZheI z8zL(e?65V^ORO+1P+Uqs0DUg z!Gn2{=Rn?TKtC(+dn*o01nDY{UO6uYp$-`tq-{#5m(HqadMY5W7KR{Y@^15%shTz} zba6I#@Li?vCGC8+4PzrRXwySb%C~S+>8FxuR6l&E8Y~yOGG5{MK&MqX#e;-ijGqI{ zaCa-%PB>&u1}?d^DzWmZ^E7xSL98^mI!KY(fnC@#T}9uo{y@$Zd%MEZu=Dg1(KIr- zWn{>_e><>A6&)i*DYi8CL-)}mNg!v3`Y@ug$tsvTamT6RFpWVVdCWDmDvw>QDpttb z%WvM!eD*ziO%v~a^tIg%DhZYj^HptufK3MlE!A!g^w;8JN;z6YKv%rlO(BIjoZcT) z9UaJLimn+x#d+m<6b^IKS|*dK*GtlYJU1d?;4K6r>C}gQwM*jiilOv@os=}4Xf&8F zg!6Q|WtsbG)~od*+H!U*k{b3|$+m_xR0B+ zst#QilTuR&Mf)evvbaZ3PP#Q_mSZzT@Zbyxou!^SsEbrD$Pj!EL-1RKg|wffGW5?A zj(EBgq!Ebb{qtXt7nxS`fq1){XjP~nBagJ2m4#1uog7?^*4*F!)gOe%l<}A%HBG}- z=(SR=c3wB%O!@7;rYUH)AQ;sLMTo!sJsdik`KCfuOjlFn%X%$)G_tSv*8{@0@fHq1 z#L3G!tyboWzy6od|A&};JO|%dzdULgNEiMAt#vS*Y_wig;PAZ)5N68A+hS|K0i6#? z2BHr{|Cz;-(P=fW7+a&?Xm>#Ak3{TBv)LbPzR=pdO}xhF!2Byea&+W~O?FRa6Taa- z>w{)PQd4onGuXo{^Z9<)s$Sa5K7J^Up%osxa@l-fM`Wb2pRkSkA(l;Ajp_Us{0qWWKIMJ((+KEHTeeb(SE;u zx$m^0g4y7i&v=6oXvcqh9QxqY1g}zNHlg-eVY+B~%V2S@5`pHdoIa?^dWmaI@oRcdvAb&E(FY4 zd7{_S+DhaPTtK6r-jwp%s5}-Vf>5s^Jt;Zax8>h&gO9AGS<)`*J4;7CY1G7%fPMj{ zj>Elgnis}shKV5T6wt7}fmUK_VK+wMc4(e|{o`dCeKnM{FkR;k>zmBjL+7qsJ};hp ziu8t?2ibBmyW83fc6)yQY7xKlEP?|j&ITl2F(+TMFxsYns{(IPYPQk2GM>dATAWx$ zU!T5-cb9EDf-zuFvLiR8Db_aV!!uQ3zaM<`<-F2WkZvr|4v&Bxly{DN0wZe)sLdOQ<)}srC-?7n!?baTaxOlvXC1Kiw74GMpFaz`yCA-|t@^NrP;MFbg{U!-)A?hlkPduMNk4c(uRf z^}Y#pfBx-Ti23)wJY7i-nfGJdrBNN>e`@&s*N-r?oqie+^S3Acr&I%(^&Rn_TO8zr zfY$%ZoBsbU`u|!NUAo^xOB<+iV7-lu?9g$C8+RNGZEP&fjSU^ltqI&t1d~fR7h^+9 za}zs5XO6q})&w$lCMTSoJzQlHW#VDtWWo`Qtxe1crf_Ox?_h@= zchDm~&qary^1mKl82(g2x>R0_H4@a@m|QgKmOxX4qs10Vr(X%*6PS0fkQ+zRFgVi9 z7f2a)2tTK=gnsmjz~?6xEQ!gTY@E4|?JZbwPj0fJ>HLD|kDzLx`TA-*dY=)CsX3a6 z_ZJQeiHdsOP!e4{yvgZ(MV7&e-;^EAg#XbWq=_DxL>JWDLXY@89rYi3qKCp-7WdC* z?FJ9HeZ}G{0~2h}R6+mmSF&s$FPINccCPG7@hZwZs(tuy?vnqn={n2Yw&7J$E{`vp zoHk?cOe};QAL$vPX+lp%%eu6M=AXCU+ZHRjXvazzA*f+ib~F2s%NQf+EZv`3=Wmbv z`4~dQyi@QBdSYTC#5H0+pxc9H#K{SxsY2*~UXBL2b>)YxVtRWpgE>mGS~Di*Ihw`j!4F%9rc1~% zMsV9;{Nv6K17g(L&d*%W-q}|Y)@UY^X!JV&ms0ul=>U-*p9+`J1l+*0#u2-nT?I53 z|HD@gZLK{Uig2VSnuzqQF#DmGc+vvUU)~(_$8Zb3t+(W8+xo+1Q$RlwO+!BrBjEej z2@gg~q7h5LGTnaWfVGoOdV#-#UgWvRKYX`O0sqpKh_roa0V2yr!y^rR>T~|kc@ii)oXdygd(+2J2vR z)vSTp@T&}(j47&d_I~ZHzxbuJm_r-APv}AY;{3exBT}zN@8%J57~{v{UwUse_siR+ zIW|VP{4C3Nb&3`sHMh|KTA08T=~2}tLNjPlf|`uWl0CVkR5XRpnxB294L!u5$%2k$ zRRP->SR>NIgCYhzwx393)Dd3fHm zSdh(Koa-!Q*U>`zGmi*qlnD=6Szwt=e|95fU<4*V@;y?VDv{Wpocew( zhA3{qnM^9&A17-pM>?!i`R-SMX6HX&7mB9wZxjgX4hY=VGtE*(ukfJBJWWZPmM^|x zvA>whu-J=e8fl30j!66V{92W>77O2)nAAkC7_D}=XycS`H<1J-?>7~OTC9U6X*}|U zVT8U7K9Kr~WqQK!VENJDb(Me+l9Uv1yWALomVn1%i+26T--g?d*O)GJs7%&PNm7V8p*I}8o1Y{cj9fIactfmFnXNGwS{~^^q;EC)9D=3^ zEw5crjN={U3!M6eb=Xow3&8VPNtB^SxYy_pf9L}XT~dCJ7Bt1Mb2Ydzi5Nu&;pkEw6z`b79`V zGVxeuv@w#(P-h-ND-J{I3j(4un%Shh89Qi>r7Iv4)h0)WkRp= zG(KAPNho0cy0JDqh+Yv{v8%>o9+}zSs`mY)GK)46kI>}>rRGQcjqQniwioFyH>fmk1Q^s_hTM5YswnweYPX$9T z_+Q21xt@Hzix$T7W&6?tRe!7zqqNr#G^Y_3&s+_HaIzI^WG%TlJkw4LtqPJ?ztP_y z+8vMw7efb&*`goXK7r{2F>@ z@{&{lmML~+2|W}{TqN~u42CqJ2z0EGbRQusR&W?gV30)T1|DNxj;-0z%Atg3qUeo6 z@@R>8wR@yi!)IWbT8J^aoRfhH({n!PMV|DxXkH#QpAuOt_LG}fpP`p@5dUpU+2Eav zrf41^blCCmHF#f3#4@cnrB6h&m`;bvdAvby<$G4JJJkGheQ9Y-Pw|BfdJF%CvqB-a z##8@9=1*pvAz>k3FK6DuI(Xks#M*sqX2IIYVja+oyrWLqw+wBp`h7|FC4Jv2D~dqh z7lYS)9{A)vDl9R)_4OrKrX;qhek_wXsd)v&J~<9N#M+@RA@C;MK6}%9%8c*kn=P!J zdUx%0v_RwYhUj%XdWOsGEEz(NFc+~7De}w8s1C>@_^euq*f(Dzx$mC-LMoE15Nt=nRTMr(zz%f=!Zi5@yR=^dz++&t$Glh5wIwW$T_%7h@Z@WvB1ANi|NI zT=`92Y={bzL>X`WwpJEHR;2j@hhC%C1&huU&xF|G)X1E{CZ4Mtpr@^*KTEwW@mkS@byL= zmO!c^VXU1)vvwsrF|8^ZeUiZLR?1ME<#*+si*BY@G;XBpa9rh_e4&trHIkYWd9EaD z#`zGe?XB^K-X$;}=>w#*@P~n&}?pokB-4_`7vO;d<6p1E2o` DhPovE literal 0 HcmV?d00001 diff --git a/docs/source/assets/design/arch_overview/llm_engine.excalidraw.png b/docs/source/assets/design/arch_overview/llm_engine.excalidraw.png new file mode 100644 index 0000000000000000000000000000000000000000..ade1d602a918726f5a407f9422eadc0b9c25639f GIT binary patch literal 178116 zcmeFZWmwc}*ETL8f{1|%f`up`0;04Cf`W8|AZ<`XDk%+u4GMyENq5cAZ4kmR^hl#L zL#GV&u2J0LzMtcH-~Xrom-~b4eT*{mo9nvPTIV{?b6qc#6eOwk((m21YZsNYl(@>S zU3=gUX&5;fyfQj?u?7A^WTPT^V^?P5iSb>#7(O_9 zFI6sxsc_wl^*pcoKJd!kGcrt!suWhw##}S zF*(Ek`eR+&L-c|o>HqDYG5E>BzkBn4|97Gc?~MPiPj`^~qNv0LMe_fC>W9ex{&P+K8#(`tod198>!*au_hQcs8Y8acns;)oj)Yqc4b=sx zWa?Z=(JL!H!m4ua;);0*tB?8Tu{Zw%O!1QyJHIy`+@Bi?K2j9V)$Ia(w zpWjAf*+&Jehn@$rs!A+RbsO{8cIR7A(C+$JJJwA874gBFq9xy=+kK;qY*@R%l0)&@ zYCK_eP?VSGJui`;5PGEKwmv6$<-vf*Lc*5w*5=0iXtev6<~YgW*jE?drfVW)UZ1#f zPBBX8?NtZMx+vk2c-i2K`_k^p{2L42ll2faHSgtAf3^M4M5Ge0Pq0P`+HuXZ>J&a` zYN?P2_;iQGr?ekcWHTx=SnW%q5JS`+CG0Ho?!v87{ZIGe$v&@4cAnMDHF)^=PS_b) zv4OS})%lH;{;PB=Y3do)n>|G~Nh~4MOfmwa(zNH5D(lJ{L%EZZ9FKYbYoSEhDGy5d z4wuw)9r^`ZD-SgdRP{RabCfvD@KLAeSCrdMb+sp3mD+wdkGiS}=cBXSwFF1l1ONRe z1-*H*9p{InSxD?x=ZB54ah!X4To%xc!A3^7Xw(ndCgZ#HK^(c(!!q!!FBTH6FF!z? z5_YOpRTMt@Iwkb*zksr5@=*pq`n-atPtN>;#U3e@-#@>KtbB{WwWkHQW$JdLoOKpI z)~;E!aWxIs2lGb_Bn0cdrQy&zwo)+lwZy@SPT;|vrn_O0y~)aP*<`jBSb2AMQWVcj zLnyalN4h4L`=*OKGjGIAcS}>zrN@*PFP*r~{~tR9sN2-MUqt=)$!DBjU%=GEep{Mf zwwq8Zn#%9?S*sC~gf05494Ap{mQ`lN>lvh6zS8HMQ5|BtIbAX{^zGSczVV4gTx-%B zgD-(~$s&=l8UsJ(26IiC8AEvxZ{@=IIBwBu<(q5LaTz=}Xb8D{3tRPhdmH`%>Ce4) zdA3QNx1rj8Nb)p~pz!sHv@D;j_r~PRS_gNX9jqBFeT@$14<2t#(w2Yl`KX3~XBK>4 zZC}a`&3bN)OdU-=i!K8pgJSh${OF?}3k}k5>v8pB>Tk=g(IObRqj{oNFNxWNVcTSWW9)XkVxRbWN zgw;^q&#-|sD4g1So{dGM)x06xP50Ne- z$RpTdI9+^B32Wh3W}?3@q|u=xuQ~*kIxY}D`|F5*BCKCXp*%bF_E~#OG1*(9FtwdNq5l2stuARvi4d=6nkTVTP9L6lI zF~JvL?j85~YZc?e;IY->Mn7yP@qu6u`8ri9dmV>fJ2zcHqmioG?hJZ?2QiD&z1`gd zTfX#{?|BDu>R*e$Vms@1Ml9ZWWjbn)LYOep~O- zF@eYdjL24?pxs1mqI~#w+h^0=jD8cAfl|UKmmiX+T@A{H@k_X?BHbo`6){oNUO4w1 zS3eeSKS<^67^odwwqr`z&0DEz{zN2Xg})y~E6f4ck%PgJ$;x~fmLZ#R| zO~iJ@TqhZCNqB|*=5tk26se@|G@jticg8QQsbH%V-T$!WWK_M{n-f=5rps3JME}YV zp2u2ZNqKU8}Jl|{nqmk)dCH*#mzeRrSRn#Fj3g}d5=u`2=epDrlHc#OH@ zG|m&Tx=uscV8Pp*y?xe?{%C=SDb1mYSEukouK4jJ>*kxZH_=hI|At{PaIzft|2|iV zyPcLNS!c>tqHw3IM;ch`gE<|3F6O+w1}&!^I{_pVRI&PWW#9Dzu+Z7+a@Bs)FW0c% ziQajdK}2TXv(o1RAu*~+3bDgds%ePOdFSb(8j3|6hOeUAlVp#$?DZ^7B=bNUsmg}> zo2k_>9t0!O7_%R7SgTJ1S;ng!_VlkX6n_O_Xi{`Ion$+Er0;%xTIAx*mq+~1O1>_9 z4FDc5^Y5LtmLGityI*b?=L(ITiMi-8BV**UM~!;TEEN~ z!+BvzIc{=3V+vY+S)O1VhETT#x_SK>}i?AgQ< zS;X=}@th;XKiPp(ldg{q&ZcQ(U4&!108i-OZk_u+|IIagZJj~<6XvGao0Oq^7Fs3& z$9y`t%AE2u$p*5DM(Di`ZQ6HMxL5F)f1NFDP@@Vd+r;An?=A&j9Wh?Ot=)*`6mE_9 z*9v+ORCiAF=5sRIU z1!_e-z^aTIrqh@1eIjSoFL$Y3IEN?w#y;gtD7m4oTIDMp+E(&*rfAdtT*Fr;Pg65F z9Hx6nl~gw}#Yc$&60_ooUj~g*<-tBFTr8Q)RZ6-m^jC9uFc%)rk0SJU`BxFU168z| z=nTh;D|bJ5x7{{M)jJ&5^rQYA8%uYA)d7pQ261Hu-kS>;k=W1|Zah>Egk~#A+~nRh z*(^?VV{PmoG6+yTz-H<9Us(yvO{Ca(^m%<`X?IdznR9*Wtu&z-!%7dL#(Gkozw>L} zZu0oV<&y*W-|1dD#uyV7Ypf84dVTV$OaQaITygcS)3AdI==3k){8=j$#6Ia7Sy8sQ z4zy`sR>fu)>&%3t^VY^n0-Uj*&$I9JHJxJH@$auZjiRruO(F@A{DbRs0K-8J;)PL+ zgUq637pl%lI&dYA`Tk$0-dh?DQ^=iAn(Zz7z!H+e&&V`OpwZS zavOd__`8QguDKTTSoQUoxS{7;L`~s%-zXV;MZ@v7*beuUPuimy7R_3Z^pZu`^$Dgp zwuIF^RP^WGv1#y?@oW&2k2$`7viIoxWNW%6H=?&Vj{q1KPV5tB2qJ|#f0mN=V&LWb z)wPqINZsH}PCp_1SX}1D*bWE=3+7Egt%XlQPj4@i3oV2$ivbiCiG_UgP8B_uxtnq2 zb8QrHP17UKPP?54z_j1u?XI{bCXZl!w@#C3z>u==7(VzgTSiFc1|3OIBDfsq4Ummn zxCG5)rvo=AK4|IsHIj?JuY@CXQ(wK=rqrF#7PAhm$hkf*NYXrnQ(y^fSZ2w8qHf(&W2N6j;v_s6gq?PkT+ruKB<;Qm3RhdhleL%VogDF)gY!xKlanuJ<_)pO2R-97f= zucJ#KkW((eS-G`ZB(NRnU-y)AX+a&_%vrYh z76pBRNfzeXS_7-qxba!PiKIjUW9$T7exgNhv7L3NVQ{E@(&@j_2jM`(6H3{3sCxHax@A%L>b!qyG;*EmHqhs$`A_E=0%TA zLYzhKhMt-cI#C*7E8PnFsV=69_4!xptE15h5@nlf0828h-Q16|s)*+rW2P7OhV0+| z&hX2gW7L3*bXUCEw^@AAgPz;%w671L67|=|((4MmyL$Hso~p@Q4J|`O0BGTDX&N)7 z^(O=#h{Ew2#&sr3(>Cw2$TMyKP?qN%+RyCg1c~cs(y4yx%jh9GNj~{CM@-_)DUQP| z3aCu)7?JKT!JM2<`H#MRcyiykHT}q+XCe|KB2Ia#WGnV7nRvF*KAQVXYuHoxP=Ye_ z^8Kd|ms6X)FSkI1TDAc@6R-6C#(VjD0$N@P(GUxcWoye*Y)~S(BR*$_8+AP`G0*9J zd9S2Eo+0mc&b?}pihJ5jP8wOE_G}uNwp_SHb^{B^H>ZwLG3q)FziXu22Vh-e(bD5x zLUH9vzx&Jy&KL|PMKx&%T04afqn4T1Ycm!4xkl))2^^1RC3KU9bKgHV7A-(}=BNAK zB`F)pZ_%X&z3nyW=xb$W?t(k?k%N?gobG?Sm?NmRCpr94N_dVvg^ELZ!3#4@{Z%4ASSh2&*MXi^iPetE)kjXANcy-fF zY3MwBvcTvQ_3$KGo+OFTSi+yDhN1YGylbv_sdAUq4_qPvJ7VCVG;FMY+lbchf~hHr zWR7x~myVm`tXR(_6ozCgA-fOf7fVt+X{ZXtQxVH4WR`EWk`y8hhHCxqmbhS|goj_S zM3Hx|zu?P}XIM%}ih2uFiFlxtS8UkzB+U>woB>u98|wL%y%THnJY!$WU7b?^@Xgpf z6MS#fo3hsQUd%V?f@-1lixP zQ)VJD#g`O2&(>&9Kv-q zMj=u#s{Bpt)N@bh>~sWe#|99b?|TH%)mV02Jvs85{yBe2L<%cpXx^nFzU^dsjKUNsEE z6G~bw{eB}4vhHvm#652B^6RAL#3^AptCL6!etH#RjsUtMV#a|j{GguNYWoe4`@_af zbRcU$v!BlSAD#i$s^8fkcRS8 zEis!Y_BZP#>~9nobYA%eYJJe_z%>(LH^7s*ZW~UH0R1v*y7o8S`3Tt5>Z;Es5n(Od z$a0okTA|6P1FbwlR361krAKu_py~YaYfo7yZ^YlPpbK0URld8}Z(px-S3 z=vZyV_|8cDbvKpcJSKjnusIzI;-;cgU3m!_S$h7(Iz9R|i6>A|D=9=u=+w3aQQox0 zTK}`wi(|^8iX6?(BMqVMp^?{IwNQ%-z()AUgi9VSG^(IrI-U^~?qi zlZuk>^Ut7vd_w6E}@kY z<>=^yooqvl6iX2nUPqlqlJqq#n=>=6M*w$9raNLB?aHi`kBuK+&2`c<%}yN6P)}C6 zZa>p!!Ig09v@quapxb=Mp8;d zR6eGTf&uxC3r2O2ccig}6JD^ns`VEM{g)}Du!9wlhoio8DvVzzK8)Hbe3`h6R%sQD zG9qbSKQsmiY2Qh~w&=R=<}~OD=cRTHuW3`*(pVyu03(&KyzP!5U_56IPtRvQhT~ma@d?V|-zW&_{u~he$pMGy-*KY- zx&Vx*uI+!|y1l>T=VMNHg&KuO;Kim*iY$&jabw{#|2hy@jN6fY0$1u!$ywUex^!&O$`ce7Z>F`&ry}$nhmRrv!8B!?a&5*ljOt_q`Yk&B8m&$iC#% zDV){Uc7m>F7^*TAA{;775CBwq^ktVHvmTm7PL1iCr!h zp|8_4-{a8ly3$G8MnFKL!)IJItJ;K`?0Am%%3g6Tuv=L9Q`^YB#h~R9Q2R$~aXHlV zwp=T}Bi{GqRdt?Ze-o;>rkTQ^(@WO*mh-vg(rj03$NY|5P&>c)q^KdLsw;>?TX58v z52)xLPWP*QUQEn?tUfM1{F?sJm4bAe3!E%N^=2KRM(oCo5v!BIQM8T&l|-Ymn0p;9 zMa-QTr~R=;;UkTaSI3HN%eyU0&?Zw=xhcH$m#GcS_Ut=0u(p~e9WDah&ACzwVXk&5bp~iMEuMEPau8L?^Hd`+(3De?E(bK3QaWk#A(jROM5pICZ08} zE{qmL)nL>rHXk0MOkQSJR9y;LBsi;h|9vQ!wjLjxYarauW-@cC{41UV0aZ=d~DDk;v-0=kfeM#R@X^Lm=pI~86nC2zsIM+8y6 z--iF_4U#u_A(jIp^!hzw zHAmUhrIsfDoL_q8C7iT)wZCsY;t*s4wp&Dzf<_E?z#oqw`hJ@s3?@gI@a@w5lY6;&EX0Eim!O zu@wse%+S94TELq9thx9?m;=Up zLKnRroW$8TMh0=TQ75_ab5z_euY@tfxpjSVJ_ix?+W%`8?vy-k@ zl03`t_|ccw7MsM}{>w` zHqo$?KpY1!?U_P4%;*)3MmY;bzDn)T>g&u}D7E7tbfN80s7-^`4RG@%%`>u9g*=Om9BjU65r$)(cdpg|K2WC!@>iaInHb-n?)G!xFTB zu?gBpgSzjCxKa8#8_sBOTBfoVQUF&J(S# zi6(GvNfX3)#ww_NuCCv{i)^jFDP3s1DzS3?YEdM&rO=_1JJhG=9>6q_GuTjU7uid+ z*;Ln79rYqtFQ^uB_5Tb)HB-M+s|NmaiJcz2^&!Wj@Sj)*-j}abZXvlh|rA zEfS7_FjY&Z!6yX89ge|?CAqewC?}c4 zh=zD1121wZr{)wgN>SEK-+3ypgNEM2c|0LFz2?b%4xg83pe-iO0%T=GV!JzXHp71|ziK)2_8R8sa9h)A#!yS@T>> zD18`*LCYdZFPxW9Ti>*6fI+T0gFrV^zLvQpPXDOszi%7qLo@gJvNAR=M zb5;mAuWqi*7=C<8#Tpb1;%Ty6$fYx%Wle&zjT|RtzJbUT|5Y*z=tr|rAFTrF(L~QS z;H9OYJoe_h_Vwe0^HxL6T|Kv`nuLKCu3J)ntJ zkGihjI)&Ma^tO`lxltf&KN)gZeBY6CPc#2Br*>D6do|#iewi z)XkVujO&RYYf5LnZu#>W3qP}7($IJ1HK~rUkkExFjTDVV ztSxt2C78CSCiZ$Qw=yILF<~d0;v2f|kStCD-=HDb*@{OhVoqnNrMxBBI6mLSKYmee za<7GlZ4Iaa@~&H6Wgyl#L@HoTbQt&MBSH;0f=@Ii5=l{YkU71$PV&+T7@tHGSusg3 zNkQ4y2ukN^9s0J8U@|V_#z|6D-d!`pI$DvcvSt+koMSmx9wG3~%IKY1^ry!Vinj}8 z(+~a3TArkbn)cCccyyr<`Mz!pAyNd4mmg$g4z2Z)INlRUCQjgoc$Y!KRVjCIO!FxS zym$#521Q$&YpSKo1XmMCS-g6|WT#OLOz091ve-OSVZS^#53iDXu7Nax1@3Q^YY%^lEFznVc{vFb0D zuroX%n?{{OYPY@})++AFzmLn3ZxKJ_x7=(OWUtSu+4(w=9OC^5Xc8kTQ0p%7%y$^ zIRFRbbp{}~wmT;0nbb?{a3c}IV+n|CX%eogu&B;L`zfu#aWPg7gp0qxR5Fnx$16nF z+b_msdu^Rr=`5V4Os0*U^W)KaJZ5TM3W=I!`PX%Y``f2nngA=yLO!twa@FgXdiSkE zB?BM(+QOzjy{*9ko#_53-}ye>R(fptW4L66*d{su{XSc@CQaKVMxh z^NHc0MD-iVN`g~rCGbTGlb!ryqAK;|(e7*Ovmez*$C%Jb!$$~J=g0sE3C(T7kTfFmAKUb6%UX0x?~(JisBgXkVB6{Tj6!wk{qbV*`vgH14drXIWC zy|mUow68h-Rd*D3IwuF%o0tZ=c(W7fo-vPlk%fuN7MUelpX<`LDr2H=DzE_Qn+6?n zpv_hxvOC_QGxzgq)Fq2=``F#Ur=Z|y5uV6dY9iVf&x>dghXQWWvSfa`qOm)_I2jXZ zfuUVqotN9;GV)*nj2iXzAFH1mn3l$Y9^{Ks!Gs^@H4S{Y^=Rz5@|eRMR^L5u^N!GX zD&+MeRE^$Ui@QAt*wr*FDcVg-&v}X$dl_2RhhN;o@_}_1G#+f8+|Bbmk*<4 z#v!0^2A5AePxllNtZ-8xttbF;&Qa|`62rdtvR9HuoPuc(FoM+5kZ7KDmW&? zNZYEX@bZ|U#h2q|s|(=Cp|iRVWl$wUi+6C7`Q(zZWtLu*$z_*{Is=rJ1=bV$@rZ^~-_dS+-JE*6bcy{``Y4-nthbr;rX8-M?(7?(dprd} z*W6cn9enYoNsbF6(s2^LK5Hd*U<1ll4dVx%!BUrG)AhHLY0#qnvsvYBk{UjFec~?2 zy=cGeB{&?pJ>YA*8y@{)2P!2`f>QWkf{TB9X$tp_?Lk8la+q?kdd31a(h4|Q> z1<*C3#cq`5I`Wyzj2ENRj7h_TZc*`ILGWQrka=fa61cEJ@3L=Ecw*@@ z6-QnMQ^DEJ8>_27zOZE#8cBB(W|wUY{8U-S!-zSs9ip-obkv6b9ZaZd5Ou`WGap zR;f&!)TdU8-OUWGskRvRU9CLRVFY#e{HuK0)$ycgaoLKAXXxE~k349++b&X;5A0oC z+?#8rLOR@Rr=7;mfbn5H%Y*E^9Ld7By`O6EWlia5TvkP2u*DHJb-8OBi>h(dU#l)Y znrLG!`u6m=Psb5B*r-=APr^7UQjo|qUpXB#<}7t}>aV_TgE zGzL1e8Xlrw@?BX|i$?CIq&e1Wy0MW^Q6m@LcNiU_a6vNbs0b?cW_*W{c3zeOoGbM1Yuk%9JujY| z7IQBe`hz^HO(N>cr&H`qv+Gd?i9vj2lezVbd8m-0Mo`Mcxm7kyWP{lK%`%Is-=v7k zW?J?Zx7%fc4O9wbNY%+C;|n6ILjjpK=VlI$FH~5_(6Q*J1y%8K)Medbk*h!90G{c> z(%hhHrv+`3^K9mcoFvx3Buq0eBv=I|j+@a`0r^l-p)ANYWj;G_I^V6x2>Sm~Lyrl% zE+>V;OipU~iZ=vfo7?hD;&8J+fY_Vhtor(*=(j9KdblaqM(c57B7)`_qQehe+T2A8 zZdM8IK6)M#2Kq-UEtbXO%%K1{2nL7xS+(PFbSYt>rY(#6%D_M!ZkvGFB2|bGIK$uf zC@!Z4#I$oZBMsFXhZ6>Y`5OX5@IJ>9u#ml@=QZ^fQfwiGuHZrLYL8a7Xe?RAsYe4u z1+0y0&K#U+A~uVd2mHd}>h9_xWC8(Ml3v!JnsaF-mN(>raUiao&-htbU)mko4tH4- z>3#q&bQze80JM#;^^jE3-s{;$;l1e##CSr9WUek7CZG}I~ z23!dWn$ts5;B9&fjw5=sf2k^8FS=lIkT=kJqHdJXxAa)ob4Xm&%3b-{F024P3qEvK z;utz95>DJX5D6)|^rJn2@)9sEa@$-Tt+v^NOU9-{ty)kKQ<1)_&up|E@jt1%SW!45 zM|wzuerY_SjFcvH6=0#%JakTr;po}4nvdfrWa?y2W_dm~tPhf|_Mw~NjQVnR1Rc%- z+#r|U6C;-KJp(-4o=~|JPPzTKi)>`f1-+m@x0hH~jl^Nk{u4jdN_~7qfIGn?h>}Zo zL(s$;br6hb*6TkWUU8g{KfGQz^j3DLMV8Z!xI)_9lrMvfGOHYKS~OjZ8|pI4dI!Yl zlZcdzVM$Oi?@xx5#<{(jKt}PJsbg&7d6Y~#r=IVgg0T;cS>k0+n>LIB1b7U4IDfDe^>3qU7>Ru7}1R_N$7uee9c}R8w`XU>P z0h3*Mvi8$Gh*BDns}q<3(8>f2J3#iC3Ot`jVY>g=B~f4)2OXu|f*0krYC*$~hwLta z+iRbQNM2*p&QITWO6Y5_xzR@GETsA`B{Su?qChL^=N5dauUjWKp)JPLs$CuLL|Tse zbOn7?aC_*=Dq26H_n>NbHI8d4`MOc-+j*x`Rxy)8O1o$G(L9t3avG_-K6=fKKEM2|MVLL2&aQrLZhpQpc4| z&^2p0Jp$5LfkLAqEO_5Ch^&H`rwHe<_s=32tg{n25B0dN^d_oCVKGB4Dp>&)poLa5 zC3zY4Pj@0EdCb~TtGbC_7mNQ|jy}S2terG+|FsE~KA>7%{-a-3gJ5*(5LM#SwDHa~P(Jy?u6>my*Gr{{w^gQ6F}s4G>X~q? zCV-iRcY*RLo{k(->)`*XCurbM;%S7v)VJJ4!m;cM-N6SQ?@kU_jx$C%34hd*W$8Y} zdS@^>G!r}zJoGKUAo9bP0IK8ch|xjV$p`d1`cLv7kf_`B6^(wM30Y5Cy+_xp-<-C( zGST15i<1;lYCq5cMTc&_)7A1-a|4&KnL@?lM>CFs(8UBgLl_;fD_<0|K%c*|^3hya zaS{Xwhw{bA>j6*wU%agaBs3K0y)OY#d4nX^Ny31gKiuY&T{MB2E5%Qw<&+LT&*;sU zLwus4VWMrBloqb(${5B(tGzgr@!Jc~uNtH1-hmS)H+Cin2Ozlc7mH_emlS|{7k2U# zr##zrwG6Fxr>kM#tZeopr6p3!&&P85JVI#M^8>xF_mtkZ*_}3o4DU5%_q(SsGi7l# zVpC5y>4|>51Gh8Wfp)yT;f3DzYX_N$(A7=U&Tf1hPm)fL^g|&~SZ*Zf+N2!3Qi=Ld zVyqu$fVTFq@njn}d0tN4Feb)zx=~A*RQlS3b%6w0uiL_voUG z9@psN=m^xWVOC_nQ8(;}KlFiJCeO7~x*yfu=`e2tlqO)9YS9hq0_OptS&z9(RK=-g z*@?!PP87+&^J{hGTbPy7WS!)VDMGX|W540$a7BH1tG7dFzO%P5mL2_Ge>sh3HGtf! zh-&yPSO*b98h;DgCe~VQ-4?qmh)jmh0svql9;MqG8s9DODR;jznalkULO z>#Nh*f;QYe!zzPSYAZ2x{#xpRG`B_TzfVxISA_+OrbZz(p}ER##ZhwOH}1Ej{<4@; zeqDl5g2o~={jS%;9WXSOaw{z;iDOJw1{@A0bGKdVlA*i5zUB<03e%=A@1J3!1rwza*5Uz{sH+JU zdHU>py@T*|*Z={|qFgdI@2JXbGbvs%*m-KHlb7yzsHLcUZarnb2ii^9MHia@nlT;A z?k~hn8Q@Zp_Gi2)cTx8AHo5#7{>H-yM@WIW5Tmm?3{Q>P%|i(W zIS7KSJX4>|=(B$Q%S{bN=)R>E9wh?rRASWZ7I6L}}q z2v@DO5O=~7V#NRy83}EEibq{!D!hfs#*#^DH?StkL6M769s~=k?_S%?*}4E0CTP|r zY|h!W(7s;3cCpHo$aY)XA#j8Y`w1!fo4ldycEt+wa3jy4JCEAh8*i(?BC+KrsKV9q z_9<9|mlux_JgY50^(o$uv-6AE(k^^(H;vo10(?uOE_b!=5Owdm!>AcXrraL?%i+Xx z_LOUp!KdekBwRNpc(%t~n{BrdXVG#ycVV7TrgUN&&fw5w_JeOX(tKJ1gA$oS?M?+G zhA!eC-^xzxEaU4;NKr2CVRja7!szwJSuk^JrZ6kgx$cl|FVVtgS>9CK-azsI52J}d zcFmRx1BVk6#L=ia5%SMt>)HW5QY6plA6!N|$%H8*mBCxX9IR`v+1BG;K{F5FtqwXl z-z2l{{0x(2I*x2n#@#AQ4WFTzQD4LWf|CXlMCAM+p^gB{brTqCc0$f-Jle69e?|W` za*9VOg-NNm3((6DLDj{L;QCKrwU;~PICN+*W)uu9K5kD56M3iH*B=f#y7mS>t3$=- zMt!3*ZADhfb2G{7k2PH&W**Hij=X<-qL0`&THs+U_^_%?oHq0x0U?9Ibwf1-{|&17 zgmfg&ng(~_*>|pHc>;nvJ7X^bc$-bkG5EC{-<25x%|AR^Y-;#2WN3OBzY?iXSIhj0 z9g+WN|EY_bxt~~BOuN?ihK1;H(yVTcS^ z1ID$xWjD9h7v9)hoP$o4L@x=nn-(Y_*_mgXh6pB-J*Qoo#<~)#L?S20n&ZIUZ~(U} zwE2e1bbRPX{0)g@dF~tX$W4t5tzpt>`7T21KyZV0-6E;NBRP1rz^rWm15%m69o&x3 zsE*s~iF8v}OCKf`bZECbW4rUp8P5e$^7whBZR9H2M*h{nN*o>g5#=X3dh6ce)&MIV z!F>g?x6!S=%Js~q_PRoRXgt)akjd@>%p+$);&)k{r(01(y@qzj=~I0}7rj}Nlq?U< zwIP7zjSj(0%$*q;AEq33`MyLq>&K2Q5DZ(O!SpcH2Zy$75%^L*+PfCK=>IjERiVvq zykTn5y67y=T?vD1_qf?(T}9R=xmoo}AJt;pQul%|7v3xcdP@KR^W#hH3RR6>nVQH% zo8?EQylBx5q-*bVPS9*S@@@bmwKX(y3rrI}7=%2>>CE3l7!a+Z>;${hlaf#L+jb`g zq78=2ydsM}%oEXWc-GIucNVwUfw1mENne2CY|GZ~gyTqRFcRVyR#dMS3d@HBDYG)& zD__E*Q?We{5qqyc>}G0J%KGZU)$6Jp?rGO+C@&T=T<9<^Cg+IjPF6nKJo&*UJSwsm zSn5{9hZaR_qGpOE(?_twO_8QpKnvg;4+W)q^jTNqY11u&;i+I$dL_lP^UhDeNhI3M z2Xo1Yui4)!o{m%QhF;S~@0=7mak#xV*U2lgwKqeXpAMLWC7MOYHaP_PQqm6g)5F67 z2;aV`W4}ia?ou~V=i@!oy7yr37Q3j)t-;t!9r$KQGIyW#zazn1Wi%D1DLU7uviDNq zlqPrS_xIiz-Pg)CH-P!MpykH%R-Ri9aJd2uqh}MQxfPn)M&w$gGTZC(P(2wzXWOld zoi3UI#T9Qo{6#Nu-qk0nIn}dI6m2%kiMWQ2@6Wma%*Jwp021(AwNTAzTV_P5V#krO z82Oe3>mRds)V z1F-EdxCh<=YsS)`@v?E8*Gs9Mh|KKRZoZaE?4%D3dTrTsiU4)TDC%x3*M3Jl4R)-F=#3=km z)<095Gko)9BcJVz;7JbjXEh}aJ7EmA4${@xN5gF;fl(VNqr{1_!Hp(|jwLkMe*Z+l zlBOXmZEBAsQ-CqcvtXaPtrmF*VH+Se*_J4Oym`USq6<{0c*qNUV$`WKARSBvQp=A| zegu~23&gA;aLnq=Vb_xly$B8l+(1G5lQDA+KcR3HFIC`ytjiaNeQukO?Aad(lS9Wj zZ$;0YxMF>5g@619c!8uKy*088rr%#$#m3me7^Ga{%c5&A1X;h%oX2JOMF8_gUE={; zO~$AWPWGr?xKWln$nlM#oy}eQPY5u;gh&u@0#`XHO@fMp3@^esYkO!>bFoOr?*l;2 zaIl0>0ap5QtbbJiY6Ve|+pX4NTJuWvBBp*fWc$YV_tQd93DCl)qS~z8MjC-=^>degBZ|Ifvt3b(TohfEKLhMwOFED zXnHl1im`CN3TCOzBfxEDxGtcEBGafo2wit-65!`cB6C;GSSf!-1)BTsZ#;<*?J*42 zmB?oJa{AOpXl8{o4@kSWZL%ZGvBg_IgkQ@=EM26ewhiO%CsYrlZ zbR-bjkz2!B78r{~G{-){(IMZbKilDqCocq@!S3+)nQeldftmE@7~`1)qe~`Asuhc3 zSE!7}o}ut7_yps3mzt|l$?ERC&{xox$^N)Es@%X|YjrWU^94XrkL z{@!zNG#uP$Zl;b`pcl;@gptmWMmj29@yw=f(Zub@q<-{pL{!Bh&HkC}s3YYePrt55 z)z_jzQ+Xc6^3jYhz(@(f$45m(dQY3deRI9~7B%Wzak4QmDY)a#CNrrZQ8Ri@R40_Bmz=xC2i|s&;&uCt8POyIm4_7k+AkU;ckoh^MDw9j`KGrb4IF2-l_FF(^BT_(9fwCD^9@* z6CpZa%CY|GU6*FETQYydp`0m?@!oT{J&#&VPV~Yl%wEy_6D#f8I zd_lbv%RWEs2>@9J0?gMdCG4}{y-&{tI)%GM87}WOYK&loA-VMC5nd8q?q&qsA>;Dn z0XR}*#c3U3F!zUw`d9mAQWcAx7ugJeNTeph8%p`lhwkz`cJK$xro{u-EkP%azq>(b zF-S`8y@1pqRmtr25X3OTMEa%sqLsK#?B(n{cMfHwE*Kz=o7EeY{ps-^G6FHJV0o^4 z2PmtQARJ4D)kfH*-6s+aUBL`}0nrkl6Bx<~AVpLQ2 z%0&oV&g6a$_W>}$r8rkAo~bOc4u1drQKI+Y?iTsUHO%y%gF~$&iYYoS;#S%KEgg`D zj`>3~z-qGKiCm$|QtLdQI(u-9SgUaj=u_KXDHu^R{1V6tv!n-gi6PlN)O*0;YTq8G#D@rup|%{1zg_^F#@JI4x68SX5l9VFeNRXSiPUVtpH!J@voyrjbppxfS-@>vS z<`|cMr3-Q43a)cQ@KjzdprDE|UABeAOMZIL#^KHGe-Qs1K^<_L;kf!&Z6rM?^}ti# zed88{2O>VEssIY&@udV+TS^nwQ9x;5z`z&l{GL%IPJ${0(B~0OJsmhmv`aWc&p+Sy z1!4F%XPUoc$KVVhoTM0ts9u};eaS%Z4FHSQTRgXN`P>8iB7!ixb6AO&b?U^648pJ? zn1)ygw^8U6SxXZx4FZkbzmNy}&zA<;AOOy%qWQi*VfyPY8SuLalpiPwcDl(yi|sAt zs2_)$<{Lq;q;{o&r>AMAGWF1l&H#6WOo~dvIoQ)fW!GqTtOA1?0EG8@xqgp_{*HUk z^GCGI+OiR*&T#Jt$DcqT5kBEfB}|POKpf>-_6ia% zqA~zFLY>J2+K_QlnWZ$y_13tIf-k=Oedob5{pR#J`^Ucz9fRL2oSeg(hj#sw&$bsz zZ#Urk^Kh|=b#EW2Y(rsYnGdHVpTU(xDwSbHPGe1H2*x?MqiK3F5vFZbK#F^%<1+Vo z?m*T4KjC!n;31fYBHQuxezsMg-g}UJnjb;qin*y?T71p+q)yLN`MU`dKMbC76NiY6 ze`?9EH#z5B33Zo%qcWZ0BA0X@dfqR2r%(Rgg`fGz^MCneA3E-Mn3KqKhFVfYz$yPR z2Y!xlSBrSb;N+i-`g3s@&X8XFF+eij9QO>Tw*~_?yk;G?Q<8iB3OR;mXdaonP8wJI z>pN4NnT1L1dcc4>D}rw8j|kXXXRZ17H449;Uy29L)(iWZPR_qKU-Sk!4|rFMFi}&* z*lp*w?*E5hR!hIF3>O0p6=PuxNDAQL|NAGWQl7Zxa@z=1f6$KML)y-=+CQ)z;jHn z*ydXGmmk4C0DDLs+Y-n+OEzLJj6K%v^duJfW-yu7?R^gX;Tl1r@@0q_qkxrvxowqz^=`qyO*E ztW1Qyk)d0Xzr+i${sN${VA%cJzEf=}0X`?LwAeL(?a)_dSBZl7zYgEOh_`>%znQ5i z=f33XSlk;Sj#T+@KEice+P)kx>N?p|)N0oNIhqNsx%$sgh$VzVDQ@zQnfq5L#1cby zT44^N{sJ$Ua1A4xQ;F25;=@qTn_nkfZxfKY`1_WcU-3b$0BP+*uQlJVn+yJI5P~oi zR6E^UB33-P-63V#5NhW<&|QVgCEs7N8NdkMe?D6(;n~U-CXfHYAcX&8xJL|bm0+-B zdAkTxy@lb!%L78tT-oF*%V*w#YwtoY-Mvw3c8~b)xYZ+Ymbgem{eSJU@>xH)kFLLi zFz*bvxw*YuC|m9_<1%eKA7?gxLXZ}K?9SZ7UZA@|lC04L8ouo2+x-@##! z2A*{|NZaz^$U2NG#smG!1^9i?46c9D2BWOuzeEoY(O5tY@~EqgZaZ=-AyYG6%!by5 zP-|zf?enEtUmP~qS98G{k+zy7AI=Q!qFj2hf8GA%Pe@TMy+&F8ddd`Lu(=b6K-*MZ zk<}*!=ADe7G~I1WfA-{`ll1df{^KH|?In0F>v_NCwKPmJW^hY>^n9(Y)~yPRwuv|`_H{wzaZ1US*~9t4<6*Z!X23TC2Tdg zxs1hE)VGg|cvju#ox8lt^ETnys8nQlcOrf`p19uxO-fQA&3RA}J`{7L9%_(V_vA%hr4iAZ z(5>;vIrT5Ah?@wQPt1sL(#qnvM9vmM;l)6eBNXrq&y53G{;>*tZUO)51i9N&v42hz z3A_x@P1B%qkvx^@pMnt96vA04|C?}NpeQU_M>oYmihntFI1fUCcLgpoOaQiBb3OUH zDOmI74`}Guh+afw5T`2J#{Mx(aLLnn4}qQc#(T-BdC-uFSV2%{0P=qOWG3pj2-D9$ zB1Zx*LFyPg$oQ`>aUcieexNhP0mlQr>iffUH3P!O1V9<++0<3~fBZNk5Xok~<`w>P z#qaTnqP*bsy$o_QRYd>JssmO^xW{63f4A=be_35{l@xfoDDLqui!+eM2fDRCRu?df znrs#zk?P6O>n@Y{uZ#|4A?=tTlH+^-@m?@UJZEk|hZafivMOwll~F3~|E4ynhHS7FC>H-H#Q`Wf;R|4)RgnH0l_a9xr2fw*QIJT% zqeyNY`j=E2K5`#Yh~Tqm4*}k!0wBCT=T$C*A_@_@KP`bYC(b0a{h#7@|FSBPe|05v#J?;awcSZy-3T>O_<_22&W zKQH-b7XE~@e-7vWeKn6JtX>_n$=xo`&u$Z5^y*t4>6|j&@?0|>pQ>4G^a~A^+(Srl zcMlQ$9s-j8@gGeoCFKr3WL*9qfAC+P5X5jdBqmC&fbxHS+@D{AfGA0p>CbL)-WM8^2}r-y>~pCfy?YzYf9ACxcoAeX9TE(A)pc zZyPBj<&>3J2TA_-!Ts@MilYR6JGOY>Hsr}K&)f|;aa4Zt z-}x=-5{w#8(zUa{hw*=zZ%JL4j)pTW7yq5#n!%_sGl$y}{hyZ_1<6C04qe7*;(z0} za))5l{;7jE{|+JlS;W6X$bTa7uMpz%pGf@c9F+V|B>r_~0&nI2OFDsjRJXgc zxSRfEU~4z~5b^MR|ssWIkdZ)d#vx_>{x$=*rQIZ-ZDVO^)u@Lh+QqfcRNh%ya z1X#?*SKi(_;^*%xM+b^`F$8{4OFv`M@@Lmd7+jCA`n zn_o|&+u=`t5;Vzn(ray!Ork84^{+gCd|tBxeQajyxTHO#orx5L3rg!|icgLeh(jar z#|yT~*Ky@k7>J@9l4SgPoW`tj_WemC*=6b??#Hx>PlTJ^zxT4>$4IE%B=ow+Ze~4r zss0{CAT@!*%#8vib87LppNoQ*;)afp;*c}5Uk~qyBqzb&XUW$wCj$lS>APy|p#wIT zkiT8>73tF2i;oO-1SB#sk|oyM8yB7x1+;9;$G`aTcTt!SzM?y!pmFlBU(fp3>sw@h zo>qW*F7}<_AU1aY&rx3TCbBhSAIutIpvbf*l-k!5E{Xf*Z~WrO*P@C=hMVN@2kC=| zMqXOadS@4TQq&79B;-?Sgx$JQ$ns}_UVXHYu)C4KPv*G$UP0~G%1NWY8>N2$*G}rQ zPT9qm+qCXF$*C(P-PoXBS`jO%KtW(^0n1xbfOgDqUbRu0B``o+Nc$r^g$`D^rFg<@&nS=PxqA5{v0&e8!|zA&RB;h`<(mnTeyC=DuqSG@HY08U8eA6 zpoqIGbTEFt`~CcJ(w{@$9C`Pd&Y(N4De1r=Y3rJ|78n0)h`{p%ZYDitScRbT+J?SA zA_V)H_tcsj9{QK-Ewj;5bz5}(>B9W#)Jc|MKmR%c!jeP1ncicva=~@Mz6bw)mZzGNWI$HLJx|8o(2yj*8sA%Tm| zBFXw%k-v2G&mifNmYqIy8Sl%I^y9M#NE(Co1cZM)!gc4qq&?xn(c|IYIyxRw{(OBO zCyKyJOvhm>jgW%=`|s7Ipj5YgH88Y>$-m=gq*N0iYF5aNQ7e4wN3~>8wRBtf1A(Q- z&vD@(9INeC&pz%Gaw2<})bei-V(mr4aq z>oKB1EOkB&jR8@IrD2x`iur(aRBUm-vFPd+iTPgj+%xc9vM$kk{|vBJ;2u3*YQoN6 z5e7{(;_Z;qSD1w(thZnYAtoTBog{de^kZxw{6Ljm2!O5vpjXZh7zF_+Q}ct8h2!9M z;Lttw?_0rO?o&zR{Wm7k5shTb&kRQpU+_m>2o}^gdbqX|bNWlupCM|N4VI(7@$+SJ zh@f?F4O(;eV){jZ2xVXPVgGd+^AYcclpBGR4A^1RpLcEyzgy}}ByFv_Yp9K3=|S+jHyHK?h`IXr1+W zP3O@L@RtmWIS{|gy$$+AILga?on!7V-s!<=9*Ow#X!>+W1k6ud81etHL3L4q`av&r zr5mkC!hQA(W@7HPqkaxEzYhQ=N8+5{2n#HKpE&2u1Mh9ADcYe$)eGQRz zzOmgi&sDubml=)j5<3IUTCeRcAwJ;krRoqHfFIug`GzlY>%LSJ znKubRa42AAw6Zi>#1-c&&E&>}+u2_9Ca3N|glQ3%Ai*i1K-vfnT~AT#4_dyazVRK_ z<5VTd`0rhusZV&35jWIT==dWxWK#HKUTn_lTGcyB_=3L9^0TfVDa)<|!M0I3a6USq zU*4f@U^hV`goxR}r~4eBx&=a&?I7nEKly6kF~g9)PX0s35cvz>2!|A99p%)$3Q~kr zO)EiBXW0{KDK)4ZMPHEq4n(-%J>)?+u8;G_ek^5R@fK{V2NH@))+OBC5)%Qn*A(87 z<3!1Fh}9ZkeFSOp04bBr$Rv}}yeEM^B)ASZph|!NXxF#NMc-6I#A$_>+xfcts)2~B z^n^}K8V~j@JQkoNVC6jP)&L~jQ?nwD37iLzclqRX?1RMb@Qjw4|tEm6yCOZ z!S;I!LsjSxuzU-B6Se&-oveaQUCu%e=sNj}*Du%&@h<}hEEc5tx>*%xa!bc}O+4{f z2Z|Mn8fc77LM##jxTUv?FM8rxu5tAc;3gW8l}^J*=eeTZU1X)n!9bdXNQ$2gMeOxx z%#oh_U9+EXjB^)pvLX`l_S7+Veqh(`rf2a5rC&?d*} ze0mR?3wPeZ$U4!*s!xHVg0X;(8-y3&H4dG|ZEb>lucL8TV#=Y<;hrENHVLG$&NlT^ z;?n@~Fz!EpoG^{~{LOh?XJGzhaA`g$s2E&Ls!5wQnH8+AEde6ET4{bw_QLq`?|3V1 zbe1g)y?GuET#W_B)!*m7B%1QQls)=>Y{!p-M^Xm)gfl*Mhe>ovv^E)Rbfq>x=)N5| zIMd+^ha2)hHPCtG?Uh()Cg0~d|kz^B9GN9y*$|6+BJ9AkJsdd5+Qcy(BqI+qhFaGoH=H4iSJIIF%LWx`73r^rh32V z@RlTc43t#l`_B0ERp%}gdM$)l}jBC!Qn8YKWabQ?bd ztP^0ut~}rOjTR4TdC`Xj2k1MqkCXYlqbviAM>~L8-W^qInS}!FOtZH+s^C{4omZ0gaIQh5P?G9B>v z^CP}YUDp))^YHUYMn+sk{f#pZ5lV%AA>j$S0!_Y%<-xXv%d{?xMs698J07QTJb%? zI%^HBR36N1Rlvt=?$0mo)7Og(ziZtl-n;b$u>I!^Y+qb%!h?{(c_2z;Zj^5p;#6%0 z`m%M-@c}wE#tKmuSJ${OFYrmk&pk0>+erDW_5t3=AKQ6(;o9L;!@v;Z`pPO-SjMFogL~3;(+Jei`I!ov=~J?VYS&KavB*$ zOxj+EMk?RdLS_D0ui^H5iI0FJQ3jQy!UGc}8)O%kBntt37tWNMP+1g{Sog69XwQ8kmWzhMu zFaoM>`XtPJ$GYbb_|K&9&Vfbd9O8OXxAomW&asI`!1Iag#M3KuRzPaQjx(ZpE8Zp4 zQ)*;?mNk$4oxm_YM23Nh#7x%g!JWnKf=@ zo#17TZgK)E23ZV;T_508GIZ$S%Lm<4T4QauDY341AWb-lcem|I^cl2>1&6u{aEo|F z8D9ltOVKa<$eH@hA(WayjmQT$vu{D|;6hhYsBK%EYoDIQ`#PUD6x%9V3D@zbO9mMN zXw87s-xm`JbVc)<1Ev8*N$k0vU=eZT1>aVhF+=aUWxnf>1Ps&=TUMty*O%CR&c)H2jo$bYYtI0 zmC6N%u8W5b1k?cm@G2P~Fmu8HX6|JHXUy$$(_G{@QY|_>1pZDBZtlC1S){Dpn$>r) zNx4p+6TElIohc-xGei#-9hU&wj}?GFwD?{DoHcGd0VElURkp@=clvG1KKQ?}`6fk^ zWnVd+{IDG&sNN&RJ{wq9F3UhG>Z?k(-U}!^a?f~1rS(Pc-AZNyooWm=$x!8z92bjS z-@QLulL0A)kTP~lCO#m^pdJo1-K&r3fqx!D&=`k*}eXzl}I| z3b4P$h30I;w6239MVQ$}X*+9eIXEb)-TJagT>N)d=6 zAdL`dq>Q*~Jsi8a%iiGf`jlJyEj=cm#fmv@mP%0!J8_P9BVc}?EuS?|MPpBrRjR(| z(=eF_{8`_6GN(SK+bkM6SULXOB;ew}3>OC>UKm3(Rae$1kbFg%K4~2dAIc?@X@jePe;!lc=N{tjTN*co}Q|O7h86uS|^@NYz?j7fYH$5*u@~?w&Ql z&H($i+p|YKlBN81q7v-XwZv5b z!*TZH3EFI@A797nRcqUGzU?TGI9kDp=?z=jjDVYPoAesRsw+6BRBJ9Q!_nkQLYX7NFVL(Kf?&r2+_0dT>>DS{gB!Au<__ zg*>e0ti?>12|M5OuG{6@F+>HYXh~*}^9M0R!=4-iZ@uYlQv&4$@EpPg6Lyy|Hjs?gT-T;n_aI~$~6-3#SCUP%j55=Otf z?)k-FR%|B(A}Cxfz5#yp<|iqZm|%Z)5cph21*U{-0x>1VmrR6p?{$EFw*yB{-Gv$F z@ey`oedcRizt(2|7EQgF>;!jjt?ITI20kj@`GO6cVge9R?XL?pz^1c$=Dl+piOgw1 z2eS6LkSJ}FqW2eupk5na!P0;>*`Z~SXo*5X&UG)$^wHSuIio3HT%Cti9CsV23EjpT zEu%2JVqX$t_q)uzdo^wRrhn(_6Yo^a#thvFA$!A`?r=<%8CTM5$*{e%yV;P^y#jZQ zibA4g#{~!td=*n5Xnw6`XVV6`KV1pW2NzzxjD=5NbK9t|%iiz7cY2|`X-HvmvVs|3 zA=8fBKP=O&x;6@eD?!n?9a7>r!O!sj{0o-nO>edvS-JpcK#vX7bvCA1y}K8%uY#`6V{wpw6yEvSsJ`?8d-l9t+Q z>W#v@aBC&WriBS)^fRl(O|MUF-_e${G70jB{Ly&`)p`?uKCbHjUM z;+STsy;0#zdTb1wej^)^s_7|X9soHnvUvTlwsl$XdRLvEVz2WEv!nVMb77@LJ%>SO zypVRw`{x}VB+|7f8S^I15#^o+3-I*u;qG3fDR$ShjCUmre$q)fyUVc?l>P13$ctST>Chvl@5S61b@a|YXeo9D!{Be%hWqXH7hp(Je;K-U5f2X9~us#)Qjt& z8h|Ncap zGuta(tpFm&vi=YkcPh6Qb4Knt8)K?~3%WF!6}wKDprMGKY9_~)&N0t5BfeWpSI2ql zH*7zFu$YUx`B9CJdfWaiN>T4eB8M^xb`Yzqryp{jVa<{by$-i}(wdC+&q%<|RkN9@ zNGIoJ6eKu+dUK(L@m6rZ&wk8fjs}D{NYl`$8XaS=Ir&^jkw0NzlCLk831@>)<8AJ^ z(9388jQ>u=ji#a1tk6%Q^Up6OmLH&2*9_zE1qqBipS}k#VR5s{5S;$IxZ9D%&3U!# zpw9sma%S)`mr{RPZ?=e>l;nxh~e;fCZ6;V;&0_I zbKjNE#!r=8A{XmvVN{G(xWz7-r=}^G(D)2;Ue2i__f0Z7pw8G9Qu zpvAMa3Y5giR4;t+i>skNV^P6^E=iIp0(nVH4j$g&WKb3_-f|Qs=Ur3W%^@obpezb| zx{#|S41-Z`s*$FW#FZev);n64sYu06+7&%LOP|H$w^IoK^^A;^)+&3jK~l-;2bbUr z`B3LERyn6pDu13+JIB#kv7;JPnpn)=d6-P?>*Upqu0j=7o8ib#0bQqw?ez#maDMfU zRXqdEx#xGco`p@{1jQbaMXG#Y2DHgF7weUe#Xi6;>`D_koYGg*&M$3uN&339dbYRT z4{BDGT{G5sVaGFg%PO8wUvZ!@!k)H=ytzrFwMyr^7PC=pSh!yHZn`d;0lL;fYu2y^ zx9ggBNZUZWLg55rRwKNYDH}R@rvB#jV*D` zv77hCfSzn6VNiAJ1>zc|9La4&NGgMpU}U8Z#b(||8{Y&xJk4=LI?lPB=XKBbJ78m0kZpG6j8!E3yisfxrvrH8*%*JbkV_wvKsCCOyu9fx-xDL@UBmYp@rN53hqt{3)WUcE z;h(ClILPCjq|HjroSGQ|Qt^%*5#=*$JDpQr8tIutlBM#JoPAYy4eNcaIZ3UdnUGe9d6@3kV;Eh|Hts3`0u1NvIMTcvtaovS z<{nR2Jq#TScRK`kicN|H6m$?Xdc_W{z^tj$p}cq`o#Ar zqDMTAR2scD`C!RF^YOm&T~6k@*_ll7VB_Tn#3)`Y<*X%YFYs||4r$znuFA-B@MV@J76eq$CCUAY9GlOL`0gTN4S}wPVO!z>|PiQ5j1@nAr-?# z=H;o*l!~|-(VU5!EC^yn=`+p>*pCeG+w`^D7PK%6H`Ict4Gl&IbeJ1}EcoVjyjvaA zJ5TKlx^qv)Xf0mC9>H#49>K+ALq*V8P_t!HOFMTVfaTe(Grsq0b~BHqb;v%Zjn*;I z#>~wuR_Z9Z9;|^A@nc@=$WTGGra0RUVguCB$lT-Zsq@6l!QYl`Ja$to)x+rCi~f=5 zSAtFMyMcur;Z}E`Lp51pR8W(*HSTe45=G<7Io%O76O;>%r%$X^tON39!UQS0KqW5KS+j$$G8~Fo(~% zU3oDv;(MOl2Ab6WKv!x}LT<)>;ATXwIOr(JGPP^y?RUSG`pz1&;=zFjDy#|KbsI%S^A(LGOC5;ni7ksFYF9Izgz;VIv5@th!>I71=CfHv$Q*Zi0| z%y?e@`hkY;yoaz%nYJQpA1cB3A(~AyeVyB8PCfdB?OgT@H4)S?CrHv>1-jTztU*lJ+my?QxRA826^|>6DuW zHP2q2u|&OuYh7hg8*K}= z>OLD#-?IPyGKlGeQnGCTKAIXvI;LBfy5fHSo- zhxs z_0U>1=OMPQzLzoCw>`695WBdY`*qCgElP$dDuXjN1h<~)Qd*G0q&1{8k&NFSu^8LQ z5aE2kux$IV)`Je?k^R|4H~m>P`ylPHOLD=AsY_IUx~?*TkWBd9Q)kdw>AF&_Jwkq! z@w~gof%`MhxaJbO0;o4O(}-0!lOx^VC-Ulp#=G?!Vjhv8>~mQtV|Ks$MMNq%ZUIw_vRdsN4JV( z_f;W2q8Xu@@YXg5U+xTieBphXwbhfeR=aS$zqaCpCRHbWhAq-cqs=FO6<9S^{vATz z$HLRcFn9$?w^%3Z<~Orb4jVly4qsHp2BKw4hVRj~_j*QW zljB-<#%?qm-e+EbT}9XGoqD%a2)A|$OioH^=EYRxs%#~qLYOULdSm4@pr zbI^nKAzNl&?&xONw~nA}i`hnT{lxyAjiChE(lP@t?q* zAD8zifOrL4y_a?wUhR@KIkc@(=l@~_QL(w>;}ToSZ+LueVDh=AsSX<#{&lOxw?#-v z@@SI7CPiGU>i3@vZ|!aFnA;BJCT^HEI#M3|k%FjI5@PM45IsV|eH2h;VgH~)_^h9C zOzxT<+~Ag(zd}>vLPD^{$0u%tLD|qASKN9*7XRgW16hp?^posqq6^3BjUA=)D=+Ug zxS1@&5%j#IAld7U+;z2*xXl{(4%I5f;O=77Q>M6E9TAt)A2l0t@iC5sC-XkM(>rC4 zm3tFWZPk|CQ-7t_w!F$AlknU+?496+llk&RHjoMX6sDU@vZNl>-|5ws))CXzyl1K_ zjg!&Boa1Q1F{Tz|+dF&9p2K@|8;I-FWcApTEXuPJR0BtsScFzhiG$Li#hOQ>5{1<) zi7r56pFZQQ6@NW@>7vy;Mw3Y|X>0L`n(bB{N{4Lw9^;k6M-zIaO6gIw<`= z#eXo+tr_U+8yQq7$(|2aczRozB@U{KV{wByrW?jHw{4RvOJNauUh@P#Bs|aX%>9I3 zS^CG)INSqP-**<9ImrPF8%4ZoeVObx%eF8TQ{rIp8UM`R*io%|TkED!EJFB2jBaW! z*?#m>IH(wfc>Lsg4x-0yLRZMG_0ZgVgFN0WRMbSRiR)9tt>(UEM<|m|MwvgBA2!MB zEc>RDX41f}6Q^%TV-3}eh{5YA!A>>7cG616CsdueyvgWi0!&v26^hx@Pq4JPs@fdsPT&z4NHsb(5l*#r+L)0M6ci*kiD>>;y#%zb-X!EZ1lw+eKGag?DPcjK6- zlz_}oUyreQ!Ax<50bOTDc#WN6Hqs5GX|8=S&7g~RU*cwX4{p!jf|Cj=!zZlg1gzSu zS5z3^Ue+@NyMzwiPsp(>WhD z6)$>k^MgWJ;i+44*x_RQcXZR|y99f`?bKY)_Dfyl_X-fNnke7i3?5Rzd3Z~(+L+Mb zjSp%B$){D}PmMAQDqeLRzaq%cE6O}^sXxz%uVBq}Pot;A%-UPH)>~*1>a9J#gBW&% zof-qNk(8YQ$2Mu#*ATo0#$&)c;P+(XLSQFR?;kD17|2>@z2b4uz79orz($xW&^_vj zl&rnKa$i@>yvm}DTQ2zRv1_$srcC2%(Z|$+xG|Y!TS$RkZ&kIhOwq#6{A^y}aHt{X1@^klmoc@D5C{axLZN5O?h)dg``5lMK9(y#v*LktK zWd+@oPDVXT7p4|>g5|^Imk)TuT|KkfnZH#?1Y}ECLY@DCzA*-iHXgJy%iiKKD7Owa zDY1yYx#0s8g<|=bjZr^Oj~_b^-F<8l z#yuxF&5oT-mF1)_`^&ES2c@a(MtB&q?xTPIl31m_T4X9 zdX1AnTF=(_;Lv6=pi!*uJtsQzrPHpJSJNU&d0o3NPe!vky`ThDhy7}q9TOU(X%_r( zQIM3UHKZ_FMjaoHO-y{(GDx&ePmATsX>ZhsGMO3e(XWV95yNXuolljimS!JeRn*rT z6%>W~ZM?0$rvvy6mz?FyUdbbpK(9VcoV4`ZT;TzU2CtzuFKOJC=>3Ec>pA7P7(-&V zk?0Q8Ft(hX1J1>B%NmOY)vsT*=WCUH57W(EG|0*^Iz>p~7CW<@_-#n4FxL+16YDQM zST7N#k5!_WMkn2L8@QU%nmAI7%^X{MZ1fi`CBnM0_L6S0Xm1*2t_ls3>0}7ZK)qD~fTSPSU_2 z*8mCR8W8QckEm6O{*5$lUG4(u@{O3CK8_Scz0~sd)$=9^VYY-;dF2}Um2Dq4F}?S> zJ`Vb6VMb<_pZOIT-h{>w4ZhLEfE{Ef!|t-F4>21nFly{K52;CC*uZaSCpbejxCSnc zz2C2cOU+}GS@LM*p4MWC;kk-M#u0350jbS`0A8;KB6vDf6;xtT_{u_lRXgaB*gKK&wbo|{E~imzD$XX zO;EpmA$F$(^YG|P`TlkMGqhlF0d6#X-pGMmr+Pt5CO2UN({XIpvL_#sdUwC2zZr}S z5+{j#HDn{To^=>#=&($>I@M@RdY(@__tO15QWw_X(&Ni-6`3);Dp-h>YSlJYB=4(Y ziBzn4dVuRa*Ipy#GfwwI3T^L^0`u-dnJrfz(MRso12yhcWGDEWXCzD>K{4+3WKQ9m z))>;6XXDJrS=*GPag|T3)Di$LF<q|M zR#D!k(|H9?*3r)xBcB*ihU?5jb=r!#`>s$PE-2~Kynk}hw6~|2yJ< zbdGz;Cgz%@&4*-PQqW#da#q&sU{_x#KHsQ?x2HBYXexHU1h(FfY361fd`gd$d*u(+ zR4Uxio>{U!@7H4;$m{VJ9LxZw%3f z`8CZut;H%MpVYZ_&ZcTBrVd(;kT6QgQ7C5N7f#=KT5?DSa|;)4r%SvH$~7MXv-A~dY8D>Q zN=4l&oMFsiqf*)pe>yAR*{8LtD_(tC(>e1BPGQe*`)J#kHZ^mzag{maY}U$i8P;u` zGy_e~^9Q5PCDJsB~L|G(%P05;Yv~)I?qn^Wnwi*+~ zbc=)PU5DCu_llQ#1dfOoV?lgd!hK4+FKx3wVzZiUzq?DIW5wuBRh-3onjWM_ONmE>tt`xj#i)x$_X5 zh=_$=_pQ7d85WW^*}-FqwJbPUA{W z?m3@SlB|WpxT*}gXB^A3kj)7oc1-Gm*w#S6ICai+W;D342Z0u*bqOkly(NadmQ7tX zI!hH!nI@EbVE$gO)*P)xH3LRHh{goyN<*yF~B>3VVHR4=d@ufjmyhWXuJ8 z_uIW1V(szZJo%n##;ZEFK#HSZG%%*LA+ zjWX3zHF8ihXtMGU&d;`YUQIxRa+A^bT!@4QXY@Bky>~w#Qx~KH4m8RyD)Y2O_{7+m z;OJ5~JxKjgtLQTZ6EVI~jgF_@sSSQHO!s}Yd5oWXXdJ)`lk+^&YuC5!)uJ6XQapqD zAynE!-9mJVK7j1pG+kSN_$d#-Bb^v6{B}9bA7pA5A6Xqa=^$9*5kh7Fy^em{l0l0a z?rAy?fThO!W)0N}eR2ccG+B&a3P3B=9~}_?%P$K=n$5TFDoj5sU9`+<+nf!@t+nN~ zK9`T<2tXhUnYIoTieeXfkG(Y%tJd0)W9dt=a1T6V`W@b=FrqH3wl%yAa9&Q&N>sjd z7}O|`&uhKpz@@j7i|K~l!Ud#l#F!+UUA#D<&tkGmbJ`BdXKx>qkD<>&)Ol&1uhb1f zm+8uhp*8of31OC{13rbPVF9VqD2XXkP*U@cYUB+fX5Y*HmM3yL-B+EB5_jXwjLvM~ z^VS?|JJM>vZ|9zU=7&MB!pr67FzjbVZoIF1G{cmqaQ*3XCc*m$3`{ppHC;p{7oEd2 z&LnKs4=J!#_P1*L3mi&F=Dz}sPW7R}{$r$(voCD6s*_N?uW6khIcFwO5b(8btuEXB z0;ua5^)npAQ}?YI;q`kLxvM)FWh%(83^PBa$X@JG=ZAi??7oD*{pLoNH(+AzO*NkQwW*e05Z@xJS+S}Gm!6%PhxAxT2+Te@t>A)xE_7J+z z2Z;HuB&xAwIeXUkVnlND22xuWFb1j_4_uSQFeNpjQs1}X>KotP&%VuQ@*60^GABiT z1g}cIb}#%&VQxm)hs}=*s{(;0V?6B2ea}rVjVrblt1#3!E3Y6gCn0-FHy$bs2cJ~# z*t@scrYk%=DxSTqfMxBQ3WXcK*nC#}MmSr9L(qHr$EgMTurt*Bdxe^-PMqaYr!hnM zUlaQnLMiQ9XK*#AGd~v> zNV_)c1U$r7r<~v#4}HCH{vE?(Y4?|tg|bQJlo|(_Uc3IrZUWp7UQyd}sdq|an|m~U zOz-dsepZ?gaJ@?to7dPli>1g0UQbJ(l5$RfHpo`CPdhSFs$-HEW89Sqv*@Zm^I|Se zx9MsrG2Q94;V_8s@6d50=c}!z1yY6zX>s`BQzAWa?z34wrQ8JQ9JIDbq&KRtH$l6) z?3(~-6}i}onsMX|!2|bKGe;^EZX>OcmDyg7Ia*l5`N8Ke2*S9TQ?b+L^1@=iPduW% z4FENo%Xp)X$>7!$_7rRA5Z@R5Qm&Dpim)v*_7U#)Nu^+2^Wb9}!I_@6E_7~X%lQ1= zK7#xpcE2|1Y9`%hNx2S|m`6XjFF&gSH=w(%JswQ2u8w&%;;FRqJab^a+Lp0bM@~9@ z*abBbo1~Ase#=86t?zAQ6M{@|fo6JU+A&a(?7^EF<#^J$jW`MyUbmauS58a@r(zLr zJ-fUcH06k$Op;xIeL)!1okhx2V*-Q-pGnoj8WD3fTMMzvC$Kc(??=FQ;9h|VESNXJ z=jlt3?>#9(t9k+1vwL$c-QKjF5X25ZljzIA1pYd#{v{?myK0rahHw`55a0kv9({>S zxG^i#M~Ysah#fzsglb@1yu7XAN{<2i+kKu-j-2$21hl4D2Lj^-1x=KdAxYL#GV(Wn z{yaA~L&==74 zc~v0qmb6;N-biU+xE#--bD9qu=xjOjm`-fy_H6z@XZ_ zcn}VVcdh%dh54B^6ws+X^T{WsfP6*GG`F~qcOf8s@OWlX?xre&Z?S4v>B@r`o>q74 zx(enBZYp>D(xtBp+48)Vlg6_q3c|+qu7IlFHFaC%$eO=O{b0!akGzu4dvnND0OO+~B-!=@cwSx0+Im9>c!4(d@ONVcs-fA*_D|S-@#q8ZkEyhY|ZBb0(T?9 z*;j&#R|n5)+D~ou+gsAUba$4d3gHCadhF&ck0%Q)<2bD)ttuw3S>iOIz{1yP1D;hC zZ;kH_Krt`wnbn{Y4J-KBM5~|clg>4qJ2HE^|8gvhQqd=0w_Zgk3@#1i>%Mr&_H?cU zGUL%AkZ)a*F9$LqYs{X-4yf%(0j)iinKjCG-=9`jI3f*GE_cN|9>B9aDN~;zsksWI zL_=O;FiseF^bCyvC3y)I>}H9xl{%nsYr&aTaG8BBmN%#x<+;t-o9_(PeJc5?rPS}* z_;RYx0XVK@y*PUFVXIJV)^{VMYGT~lx2%;B3Jt>$!5yHXEPe4gTdTFu62X+s+xbm# zu+G>6TI`;SS4an2^$(k|k9z=Ob%GYRRcP7yOy#BmgLa?f$;wbw8TwjZISW8p zeZ%?cfV};Oj$cJC*B_ZDtbx;bov=#ip=>h|DFf_+5XpkWzpJG5YAQ(YE!AkS*p}($ z#9O!^K{qq)Il1_H9VPE*Ivsl}iYs6PBcmXR4Vf(gS^AM8>)zJ>TrJgp*bM!&P>!+s z1ho_)KG)eECtEi2!`NwMOkah`MQHH`w_dR1BYlbT$ILpPHPyl_+NPsgpfzVRq*&1x z@s^EXD>LP>lLCtemySg%IG>;91BuaW^41)5?Qo)?s^U)VqW|Senw)`=ycl} z_~LK)hhN{qZa(p4{k;($0vZ?hD&P{!ZcoL#Su+oU{%<=N$2^D)&t}-;lT5la#(9E) ziqQ||Q<|_v@NYgL4Ci4)*iB5oJDjs{##H1&cCX^yr7;MdT>GZSr(y-G3n-cB!y6bM z4HBtH@Tf99uEuywWEk8k#$M@*dLq8rJX4FdUlu>|c9Y$}18$F>=Xde2#Dp=y{RR$V zg%C1hfvE7he;Zo5(GWbuzPRGK@Xze~_h4zX%T4p;8N~o!b*DdM;M@lz&h2?%Yq{Pz zY#pO1X6uaIRPN{pS#p$_91q#!TkMo?#Rz!I_#U{Ssr-3!NUK)3IR9g>L(FO+?H5F7PlQb}M^ z51c;6g7l;B-D}bQJE`R`ED;Y~F#YP2AKr*A#(ii6ozr;rJONHsbRPm`wCc|Cd#*ql zIfSr89*b{Yx3SX0v^=}@F{p-HPO19+y@TywUi~qAskcGW;pqKF@gOhzZeJbDdPKL^ z_pet{3SJs(=J$hHg8yvmw~02s%_R4&_Vl9=;-e8ZnRTB7m~5OivwvQ{zeb$PsG?^c z@S#33Dz&b@wzoi+Q# z;r-%?m?_qko#!UN9ZL$- zYqc7;jT7KhPz}CQz(FdCWUONSBJD9mE4|RfDt>y9) z@9$^7SVHNk3gH6Y8C5U87Bmc0E0WIztqTfDMnJwG3|>2#nll0M^sUmfi^5YE!4RDH z3f=8%ZyZYqCiFAtr^+(T;A1%apWSB&!NO2D3I&>64uzw+Px!MxBr#ZqI*&r$5j_ES z_*u#^s*8VFEl&Z|cw&R%{>S$a3a*ohOea1n?`dEkf0rh8ssg&e9d$l?`TTc5^g{sP z(WYHi-{^z@W#v3%m7$kE2TG~JuyU85b$j?IYxP7yIFA8FY3cOFa&#h4_uf<(AFMo< zYu5>E_x5HHZDt&=#was^LB@N#mn%T}c^{aGWJyhQF7Zkqb0DAqkLS>FwW288Z3KPJ z2f(OhsApJQ&R3cMOf|wVoS;gMdGYJ3=izhO9>?utpgFO>5IMDr7|YSa^%yxSr!(^um!ML)bk>*zNKu*D7py6Ad}R3D+4 zAq5wKT@%Z=?mE5MYl5QW~Kudp+uRBel8+k~Ri zrp3LRvpqi$<26ja2n&(L*%kWaB^C?r{o9|zRNKMn zf(_!u(*wkBBoh&Zy{|&PWFVJ5z+;yT=y(zDFdjGporrEZXg?O?xDtSDGBY2RKQ|-# zpLY*jGA&lprH+93bKIqB$=#i`TG8EbJvJUC!Wf18A>RGfRIAf(8&;qaKq<^5EAP^Z zL1~u`TAWPTZDnjv?{irjEb2&kbQx0(z1vaBX~uAc=g{Lhh_Uj=48`i)&}7b0zDQ=H zJbck>@I}^(2Q$zQo+TM&|73Vw*w7$7%S70mw5-Ib{DBUb{{-KOkesyy9JVjgFte6% z5W9uvKQV3a-(8Bj~o!Koe7(u1nk&SH$~je@j?L;=Eyn#LnVcE~eR zWg|TQ-WgEH5z8<_qWlB@%yBOAJdJ-FN(RfoW?Bdm38qTX&SaQ?F<226z-*m_6TRTE zH^BXr5O%;DYL_~&%zpMXI89|$* z0k%Cgxc3g+@ba?YHagT@JqN17o`*^x7_B`vEBN2t9qm1P}VT4`Pgs_+D9^-0_3 zQrtSUraQ1k-ZbZ-Z;wNVzTeO!&(%q!{jM$$-h$C(H%cDB+2Bt#59Fv(#0Eme!QGRt zhk(C!Z23b@Gbb=!C?HGnRWnlxh0K6ixMKeB-*gb!m_c>2H}q?trKR0K4|@OLa;$G+ z@b|0p3Z#11u0)rP&FgI|rTI7TV~=UyOzZDSK@(!i5d&j>mbe?zV{Po_XuWup4x~+P zSFYxQL9{C!o)_wE7DG0zv>?NTXpWE~m=if_M?H(cVQ zpqMCb!}|ZQ_vP_WzVE-8C`7ibg(+*KWG#x(BH8ynDp|86TZl}H7E4o6)=ZJ?`&wBl zAraZLW=SHEExU6)GiLaFf4}eVyw3UKoYz_Y8)Kg5zVGY4*7tSY*X^zWN~8`>_Wnvs z=gAz&j}GaB_Rk|-wtK->2rqhRoa+vJtyc9oa#Z%;aM7^l zz2TxrtxRmrix(0zrw!(t!%he2@v6-{-50)BFB{ShpQphrUG^~-annAQQ8;+fSfRaU z(ZTtySC2%|x%9}3JavOck?6raM&FbH}!v05q);(Gz8o!Tkwl+ojZepJ3lrk`=6&VL;A7+hL8>* zrDZLIqQgr@er{pmkY^Z$t{Ec{2skTz@xY9hq8Zfr#Qb?>xT?Qp205|5Qn+B9@+KvH z>vt#*9-2M~&~))UsO4SQi;({O90i4`FWo{~v(xwD!KS#i&LPPrq`){$Y1v4?6YAO; zb0k;c;OnjjhKrXvv0Hf}yi7W?V%h7TSonNQn*I%8qBbsK4zPGACFq0+iMheuQBiO( zs7ct(;OGgM?OfXLQU3*~lnfbSkql=sJlzeqX;{}`>STU`Z{f(_a19KF4ZyTAgd0 z>)z@|#9O5kV=-dFf+JLs6ysWRIsA19iYvU@ATX;&OzlE=dD3bbRL08r8G_#UD_q3+ ziUH*RpolEuddt7KpF49b{A|bbliT7IvXCg~E8OO|c~K8g1Nn;bYDD#!t6ZW+Z3hl= zru^@5kc4YElkqF}5AmMbd2?gpF#3y1mx!Nx9O&~YFr5|YK+Ds5bYz>mF+IP)xy-7eelC1cY z)h9*Rnu-`8PwnPAi+3T2jcRXr>}Lf9Vfn?-CZP^4){4#dogO~7W&5t-T^%`*3)R=4 z{6sliqt(K9ENAi4b;OvbVBrnYCnAro^%;WmEI5#rK)SG>Yb2)tdT$A+V5HVZP!uY> z(4~(c@{w>n>af_3E*a#;jHja(5TK6M!r2Jpsez#Vud0!z4mNNMR}0NuKnYv}p2Qkw zt(_PTz&OG%l``&!h%M3%BEjjvw?3ceh@{HBvF?mn$nt{gh^$JW(&vl0|NM7*#W*OG zLLeTA$`m8t-cmoLkRH)?<}$q|)e%U%2tm&WxZ>**m@4+6+5gXH z!u&*}Ak;~^?rJ5cZG`PpZ$Yb1B42&MwkIwx!S%$_;1GSx~ynKNga%KDlqAIk!qAFgWy5P{Y5cxsYhJFkZ6oVfwDcU_{5Ug z1iAP$7tpHjxmQ0_`on47&`T*C!`HSR{7?XZs8XN=bR@gO0rS5@Md`IJth->EmAh5v zs0o_mG#_wj-kqGd1mJiLResSzhP$Bvsb|UIEt25;otp0$go840@7La%b2QPncU``7 z_*=l{TEs2?YmS@{M2^Hb_`Gs>3M#Ipb_(|X!nAK)!5zvLzSLZJarMd$U zI|Rr;UFQcVXO94j{KJo%9nPbKL(F43&+%=&Eh1bgc(b|v7Ts-PV;u@$2NuQ?1vnKj z?|11gn#f8+*PbTiUg9Vuw6=%Kcsnj4&2m7S)~Y2UbjZPvkedBkQBuj@zC(PvbqZRIGz#UPy>3qZKQ8A^RyBwG zK)%vP_ionp(<~8WngS^L;6T-VhB__la3~@Xwl%cF@uc56)B<`=NK~43wnj8F0`Ag7 zY=>l)yog4cM6F2Nn+qSAr{G*y1cV9AyF8k8IFFv+tW|>nQRqhmg9o*1O%((2{=oim zl|}R&Bq|@I`GIiZkQ1pEV9Ba_`$K4uBpvB0)|LTnU#vn8^;7qLhj_vUDefN~1w&g~ zZKy%-*jrjMKUJ?7LizTp0jWolBS=f9XP=|T zFYx+JKPVnaNsW;xRZ2&ibAa^7Y~^;xA#QKOCyuSq=MI6a)KRy{?Kd!JRai#+4*IKv z5D@EtNRZeAn>bfwEroYZezN>K?ip>1I3!S^StwU>_ARRE+40-KZOyNzL6AKBSHOx! zAlM$H;NByS+Yq>GG?bX{b^;L{C%TZ!*O}<24u&(X{7_uA_v~^#mHS0`XC!1Uq9A5H z%o)wRXLX^iY)JA(n(dUtkSFTDL#+n1_%$3JRg*tumlU_sbFJgJlCX{zJU0bGMU?GN1AV!u$${ z0Jr$Wap0M*Nsdk~lxa4en*;4eby?$jR?S}?IBsYIXXWYK>7g3~_6m}=^;(+XA-h)k z$R?c3$`vNCiT+`bFOf2ecK-{AN|lj%;OPjyQ6lWwVM(-sLu;n_FK7j)0hyr2*qAo+TGisXwl40g`=${f>$;&S6<6DBOaQY<;}$7|{e^x!}((I>tMKFipEk zff0){kcb)r%~Cs(c%l|O?8Q*EQ|t2;rg=4o>B?PtqeSl$ibuFi{bVn*)5s2 zDSuI!xD>x7z)L(-=auVrZ2s`BUh~MG6Y;tqMLf*M5`71s`u=uRJy{4~@SQrOiO867 z`!!bB$n@*z+>_)PGRnYIvhGT(b$dV~om{(uIViBI$WJL}u4L}jfj$MHhh=wDxXu3h zS~>v5#XqKJ+&d9$KkehOUjR8vf&ICG^t!41j`Upl36)ED`|e(L4<)!1F($25xC7Lq zlk$C@vC*g#Kh3{Ci7(1@RPcjilA3$1mW>YfI_B2ZvY9!alj*r)Wtr{!r~d<7jJ)4o z0EgVA`?A*c*(@B7h@xQ;(p@Q4TMSdGke?x+crRT}bqeLYMg z6!Y!Q)oABe_8*sB4@^LPg#6zdE6dPp&juYuFs6XB76Iju|4XN&8^m zY9?(x2pgM(8C|W}$JPYZFmEffD-4@P3 zFxkCE`h?B*2OX$|>`;GLw;$3*Pwha-psy2nKCzB1=1AHNW*D7!4x~Bz@bYw%+ZogB zwiZ5v5?!~i&|~3ySZ+F8Z;?!)6mnbp@vK=h{XGuQDM2#l zN$%mj+1KFQhz)Z7A#g9BH=MQMd~yWd>OE7hp@H}P2^7=SLGMms3ZqkrrYRWy@nOiI8u% zfr}`I02Gm7KHSnf2`7=%&4k#6>*N8CL{bJnxE%B|W&p?0*eRqm0Xpupx6_L)aMf|8 zx#w4F8_0uW%^AIBG`b;J*&Ms; zFbS7D68??AtFog)6oLMLpd@#P5+^-2(@DI&#-OMZeN5g_uU31?3W1MH}t;%716 zq$`ZJZs`(8^T^K0hRzUYV}-U~owneVvwNAcSG4#JgLQkX5Rg81ouy!4X+5llOC)YF1r2v zhz>%iXt^yR4hIJT@@!LSF!dRJZ;1H{^*pWg`v&x)G

S5(v2;gPd?neFX0vbC0O7- ztYz|iP3XGY{*h(^T{w8hgtWU#0cB-fJIFRt#Nu?s2poX$IU}j zVQapKcuN3ilztOa?FzHYhwibQ)+HhX`m2`j#$JtPm`%KSHF1Y8R2(Am^g7Xn0?5 zevmZ;IslGMyUv()&AG!tB6FnhSHK zPnCH0Vqb1*+Q~xgR){6ygS+_K0SBrDN9{uMfZlT(nNG;r)WQ{TlIMSwD!&MD`kKMq zAkc+$+q1e0oqD2?j+RSHQ1UJd6%f{S^#V3(&9jh-iM4^01heoi$l(?sH!6F>Me4y7 zElAW1C+8V7i){-}o5N+i=RO~9Ii>%rOHjx1dwR=_mW!@~e9Fd?uoA%oiNC1H-dkAA zCWgyup%GMT?7&Gmtn*m|X(R8R?1bN$-udY?xlIGL*a7aVGVqkB{$`Sw` zh7ulW*35as)tq(DZu5*>c-)$4*^BgIO9aTk`uMYzCq0vCq~v7H(DB1;rhxK!1FBVv zB>BSLLBFG9*pep({9$ufAiBL^o}ybU(v;fnt+9Ms;ATgKQFyt>lXISG0CmmB zLc5C?9EiYWJka{vCN&$ml_&L5c%4aEX_EiDFW`%&^`|WCPoYcAVGP$0c@0aJ%%K%X zvjFR2-5KY!+uBj}hIigvD~))+Lyvb=DSQl{XU?l4_(hd3^RW_r-S5035&%cVD-lk zG5H`_3Wc3j^8PO-{GsJ855~Hg$G=ZQe;4Ib zXLl-GEdpJs-m%1-z^`u5==k*>G(eMtEJ}KUnSiC)ch7qH@gvaTb~-p;U(*}v(CihB z1V`~oH;zq>wwk$ksH(iyK>Y@Xtlk!C{t0zP;Qu~pvTAWsTGSuPwJ5cMTU3X_o@zu^e|1F;W3o_F{ZT3h86p3n_#2_*J=L#bUz%}mYGLZ`?@ zz5err!?}FV8e6qmJnuv85x>UpXIrtq#@#=XmlOOme6`N6AZl8&*(DR&W2JFIS(V2; z|HR%f(YBLMFfSBuw=E!^Pw;-8BkQ>W#w@0!>DwyL*P65^$)J|T(hKh7_8vk!0lhrc zAE^?|_vL>ditlYZXNu^Clk8{oAALzG{tFlA2Cqaxm%TKDifv6#%-!grVWk*fQ|oKV zwq4(SO2B)y0W`<{6RR1IHzdoH`x6Nb&nnbC5eo@{P$K=|KNT#oaR-dE%8`<5&aag+ zf;Icc;6`u6)TUJfyzs5|2LeX>>?<>_y20upu>Nj3i zp^ei3kboa)^$aLiG}5dcDK`2AjhI^Z=@*tUXP1Cez}8{*P&V%9tEz<=3I%}F z)In_P0LF;iJv-_nyREb1bf2EEQAUs;^zqybr=5oUutw`zQi#GUPUbI7XAHZ z948Z!bEN*zKyXxCx_NPqNzjBY@`W9 z9bzjX;M?V@owyuyTa{fFikuvOcjowX2dOcJUE13!GH>?U(@8JYQ0zxpO^!L_VRD~< zixQ2X9EIcQM(Uo|?rWyM{H0OHo%nV3N1IkY&LbR~Zc#oz?PE}od}74$@eYMs;@_i& z5j*GXII*58IuS*<03#~L04WV+C68dV zv{fBi&GtGl@i>HPwLDI^_+;{Q+fEjjp9{)1gTa$*0$#f#bssLG4_67ef2m7k zA>YhipQkg4vL$=f;}nkWgB#q%K82)eL0+ffq(4vLY^RN#{JnLCT^ZSxTz$`j_N%5pH5`|8GE|DG5{h-#?JO@+S4dV8wfly;v2xVq ze4U{tDbETHF)Ka z7v#fsCzWYHUT3s0hx>q3U=q9eC8etIeIcnj11Zf5e1Wd6Edz6|0Q}tYdNaVy(h=vW z(n>3wfeG%V%6v7FLNi|Wi%L4`L%QRlM;Aay=Yqrv?26&!wr9o4)gSV`-kxf`ej%In=?-{*RMSlz4cNRrX~r#wFXNp8;NKn2r*?f3 zj@EK$pyf&mKzrbqM1+VdquyauP9NVN8vB?9){4od+~PNtXl!b#hF?V~u>bA7+8|w- zenz(E$^gwcK6vcjTU)viXo{B~8tq-dbgJ2KOq$2Fvy1)BH>!NaP>OG$e&zdO{Hc;5 z;<Ydn3v$kXDRB53H<*AXsFtex|rQVPH7+Ns+cGUIe^Y<-f(^n^-lGb9QC!h0i3R@O zZ&=(!#fR(ruQzvT($n*;?O7iC#M9p!rwm-=8Bmm=4VE)HBbbS}{_!X%Crf{Jc4>=HcMpHe&L+z#ON z;Wz!PnbetnV;A4UdK7-XKL6-(><4ex(f+y5Z2&%%6^ywo32nJ`ZF$r(E`9%&8w0Bq zPM4vvK%J@ZuHIYGgLlSrm)|n@i}y9#vEG;~Vs(C51%ZOT87r63J_O#9{(F(b(A{VIfWgKa~=+rp~%Dp(Xp&(e{E@Nz%8HJLA^KIq#guMh=oM%sB z&@B;?ULAOGixR%nK7w7?DVsghbKmsM!RCGk>h|OpTm!p&=W8YF)!VtGONB0j$4^)9 z_uI|)`kmeFaI*2xIki@#hPeXoxpZYD`Q)5#xk(4#V|#n7w; zmHUTpOY!QA)oHLvmfGw%QQ^+ao7X})zK z;OJH2M~>_=G(LCh@eia4Sbq86+lou)il9ZjP}-KGP$lkwovBio&F;8rYX2hq9e6uz z7Sqq!zdwDX_4{_xW%^yydr+dfqO#xJBM|*V%_#f`v)uGI+wxDh(4QcvbRc}Ga_Ve@ zR{6`OZ7&#w%L35(OIGH+@Bc)PxXO0`-c-uAC(*+kqLXu=Zu|<0K!>)J+DdP;x59T^ zywa)hdUd}3T43DLpwRACUizq#ZF$j`Q{6Qldev2C-ybgx>0GX4%{s`qJd=5iyZ`UR zpYm7XCUoadDJwhl%{mo!8;aUOv+aF!2jkw+83=HRsH9hjkMa}dg~p)eDN=QFa}6Yr zzxB=Vf3O0nI;lr_rxkiZ1h3xLVoMJam3(6AkgRTKwgL@7GfN8H0IS{xS!Dk!ecLP} zPO^s`K8pp+OLo_lZ`jDZelP=PsnID zVZ0ON4$b%I@|+(ZD}NoVb}(2@2|tcPN~zi*=5!c8uQ7VZY%_w2Z8k}dG32fM)c2?$ zY4Q~l$Mk#Mg4r>fW^>Yet0RHz-mZ)Iq7d8~(T6q8Hpn{pTr5eY(N{a$kZO?fBE68Z zMu4%`%Ll3zZtH0twtlzh`e;_F^bt~JxgEKg;!0C=PuO(Mv6!ltjPLbFl(#QP7Nhf< zo78>~`5K!-Z5(bI{-9M0+3evb0WC?(4-FZx?vx{|pSRP~xAfTmM0A-D3d+%qiy&5g z5B>Q#{r?16s{8yDNlnx3oSk=a@in`%T6eb-^JMv(-YnB++tta5NON3Neep8`L{>n3 zU_+_ih~`^KuoM4EM9s)L_S1O!itA7hTH;sU9#e8ysZ!fUGk6ARU_uKH{k0;%)86r} zOx!kgdtzb}InmEvb|?*O)wx@&hSk385>C8v>fWptLTD9fZjzM3o%eR;;UkGI4tcbD zkYQB#BtE``^SFx$ znl5Wr>hY8qT0U_`?3=Dj+$ui|R0({iDu}!NH(ps+$>F2mRPksQSliO>%8o{JlbX{cWT6Qr;8QE~CQVrU{Y{HPs}%YUJ8WhRLa7bc;FvnvxIG z$3&PmVq3%zj!@AkAFpW^W<7zHMa{_PUa5C=4m^3Bux89Dl(*0KKy-fmGdF^`!93tn zqYz$&0s0qn{BeI1*LiRgRFMHfB&94{QCuDHfZe&Ser7IqZjlCnOZziEL4;3 zQ!R6A>yM=ww3l{p1oqDT<0eCTNfxTkc~{lviWb>Fj7e;scG58jWjm;)d>KaO1PWH9 z{HVjU%2S#rLy~!z5Ki}?92vL#ey)l`$-!gj=~HGRDJhS=NtW`LEat45dt6qqENz02 z06&cS^Y_dV=&Ee6*xh!;_?GE3Q>#Df(YEGHRc>zX8PjbnfdqxQ28y7m3l-}q-b0LCY3d1V5VYcEmZntE?cG=G_@M>AhS;p6JN<{=qh%L-tuy$toM!R z5huUzOx1CiffW674l9)^LBwr{$M6*X07iOAVx)W9^0w)ZdUF}2gl9cFNH-jh zp;H~bZQ52|?OdJeaR=!SVmSH+gny8;NQW>gs?|oYvM`Irnr{qn^b-ek=@@KY$ z$F@&@(yZ+T+5nqi#-C`K{EqmmC#V4>`!_%L92#T(Voj+kjQg=OS75MJfUwgI*_a;Q ztIP?WM-uk0Z2`5u5#DvNBB67yp^*oI;7RX1Al$ zD>;z=i!b9Efo1ryZ(KecP!Vf%Y&0ain#kw0d$q4^_HuMg!}&GX5xxiJT8Pfe=JoE7 z$g0lhAj-!p);kP8>Z7DZn-vq{s>VNEZ+1S_?m?yvd{7bAzBbh=5^>W`ljtYyrIm8t zDXgx+WTZp*9sW3W&$0zug6E+kO_KME@GRyf>iT*A)7GR~fM(4PF)s3DfZVYb@GkgO|&%cl(Hy+fJ7D&&ISWqaU=C2VX=p>HnNw-ICP56HJ?SE z6^_8j`qz=sxg-_y^HU6Vw@Bb?i)btShL9wBh7iVpq^-t?N@ zp_Ortj8}1(-W)7=|2!!ML3UDnKUhU}B30%oE8jO) zYI|J$^nP;2_&)L_J_R+?*?#}_3u`)!=#WfweoC%I*@owbqk$`2nnj2LkGzmDw`3Y> zUA=gHq6J-(%BXGXkrLm(@&<>5ocoubQ_`Y=HOkTUd)71nvQ;107!%lhm-3!&q&sEG zUSNTtqEV9aM}FzIgE3*2$&BPdkhF*ZIOeLjd&E5^P7NbUp_Ed*f%3?^?}1|=))QfW zaKqqKEHJoZxzv+?3{HKMkuo`vV#Dt!C192|bZJ*Ma48T0hlN0Av;IkvQ6P}g0)ul< zSR>FHrLg@#s32JzQ}qqZ`4*UyYNvRe0BM_v1T2`9-b$h`@;I&p#_+Y0{+BoAQKhM9 znCW|y_X0lHF)}7ZF>m~aV1chU_lHEXAz}5}Dz(LdWN){qyKUHE_-{bEU){zvQ(pi{ zuIn+T|K}UgP86cLO>71PqePk&7c3@`@*ht#3O@y=xnh<0VZ-w~)G;{IEQ%)ce-iTS zMN2o*>%?}p;Ad4)K&+Bva<5Ki3xE;^U>*JaB|xI;Tt!}6E^AV zDdSZ#Gc++m@3 z_E-DKOK}(Ymg{EZxPK1@eH@k)SaeTt{apb8(Mm(>!iT+pQjiyNyUa#`S_aXatvA(J zUlFywc6lBC|G)eH5}JI2QWJWp8BCeEdf@^;9Rq`rw$c-_pi$?6Fc!#O-U6J!3Y^ep z=CqL$5H*rb(RPNUMz9%RTF*2U<2GS~K^1Yn6?weQRTL;4(8)X`T?H=MesQ?k!B&fB z=T0^MXOb}Jf2PULn!{83H$H_{(Qy8*j-t$gx!Jg+Z6S>gT1sReNYuo)Q8|*v!c>7E zgnl#!zwW;;QNAa{oJ5$tps$Dn;d_+~2|*3AG0{|=E=hSH36S%p=MMbuJJQeOZKQO9WX zg*+&H00pyo{qKh`i$`XMA8o?^0@{DwnTlPfIF=UvDSBk?713#i)ifY(DIwg)J%hy_ zH$O=p6etXY-u{0)p(SlaJ6mfC?1qT!{>G!>D^B@TcAU;?9fB#$Ny2Mw# zYn?Z7=H)+r{1<%dOlfds!h`IqyS|6U}zs3kV|RQ&7{^N0TnY1^s9%5(J$IWikI zhN=8}L6*H|??6#5j0wN!lq%tK*qE?Yeer>5V^2U4W*pls) zD~BHW3x|;>fl-9{Eo&RX3{QhjKK&wQgQtv1rW|o;s9=;1Og7oIOsiWp`cY!X=Xa+f z*A>$UJTwG8_kXs$QksgEXi#xi3QDuHv(-m_pTdpU=m{QwsZADe3hL%-n)Q>|DhhXv zZ~*bN!AO@YH4&jY;>sJD?kG9D(P6otW_Wp)PbEE*FV#PRFJlMk9Tb3;0Ua3s4INk^ z=aTEwadZ@&?=S68@yWs3$#Sc*HF*W$;3^n;4tvC4_QO(&cd>Hw~UqQoBR!ImY~X{jHSLVaL&X&`+IhiwT7HU)ErzlB+DEi!Q&kG5_w>Ge$2(QqZL6DvVYt@}y*-U0@H z1dQ-x@QbJG_7B|vC`ad_4QE2@6ET&s^Al%|k$FlUtoZN16E~>!0TeL(+1KVxdL{77 zbIfCSj5VP(Q#r(sW9LrE2=YWJX-TjQRu^GZ=@>auZUOiYBa1TWN&9&U=OFDM8~>Y& z^iJ#%+6(hvkKA8wf#_jU3@G$>&W?K{XEIh__U%o(-n?-6)TvX)ua>2*f!)YUzrY@x zCXDNM`UEFS|$Z*40dnq=2FZitUtI^i^=Kx*Z)U(SqwKjTWZopnbOTHSvsvdwN+izlzw1dXqEhz1WX1ZX_hnZ!nj@r>N_fXPqp>M))^#5i!RdmRhUlXnFs0%-huA8)m6cNV<;-EJJ4YwrS z+ujn$FdxW0=Da{9xWhw80mnb$l zY%jk-xFTtZi356oD7agmcp}nyYDtqEJ_|z$MLvG>%dY^I!_$5E5#|j~|D>LNRpP>> zloGA-Gx&urS@iDleUrT8TB)i=6Ltnv$ShN`54wlww?Cl2^>b0(4{FI~H1!viq#8tj^^3o?K_IdxEgMjyv`Tu#+I2 zpX3XCHVliQMU{YxHAJXGyE&Pfv>5C=L;*nJ2ha#yBK_ym++WmD-UMISxp!5cZ zwFmj#gRs1)Cl4AxyZj>*1LAhTBZMt+^3xzBSdrCZQ$Qc5%JeU&q03;NOQ9i;*F^`7 zz~*v$f_7G4tFVF(G9;psrlm!Q7pvsnse>Kuhx$;cSO@bKR1p;~rjG1vgVDT(igh0ER719NwbEc6|CwhVw#9KN4it9U3tGyV9uL4M z!qdymhU{yLLRNQ+efWZJuO_f>PP($#lKvOc|1kDcIoo*`SrbC9S=?80xax5m(YGO} z>-uijNmM~zz)juU+idUk{>3A7{z{kUch3g%m*lNwR53Wa)K|98OB+mO$4_U;>_tcU(0M$1dW&}Bl*I5oP{Jg=?9Sital05dtGUK6{?>Ga+rC(`Fta| zWVrY-CLfswmrKb8bQ~>!<%el>ZBo6gsj6W}N7~v=SbfTz9xmBW_0>EV)U9cSZ(^+B z>2OX5?u}1ttUaA_jjP9@S?ZL^dXB|}wpr2m5YfNZP2KnySfOF@&@*;Cj}j~=(-YA8=8D;K;iwvyYl(LB84x0`!mNMX85;pGfI z?K2>99#t0gB+3D&4C~;#XG~i9)J=}z&Ekwdes|hGSfnB)0xv8s1HnqN{U`9F! zEI1(59mx?>8zzC)783tj7yt3PzO1V{ADKBMs4x5}mI*@iqjiK6IJ&E#b5^2LACOqCC3`w);o8wm`T}+{J$M|Q|$wOv;p0)X*!U2CWJRjllaKrQa zs7$F6PhZy`x2d^&A&d-JCW0#!l7`i1?JT^GNjpjc|Qm825zt%miL`y%FguH_7 z#oTlk+KGFlRWMdV6k^+3B(K)>;zcK49U*ec92+T6%;3Y|i(pSkMVD)I#Yti-Urt!r zak-#A@b##qFzBW$*M0qF4@^v?97YQjlKFc0`?I%c*Y)Gx`f{CTXGux`;y!$z(X@fM z7j`!kq4E!^gedYVR`$k6os@^BH>Axw%ZRG8aQdPl-$i#Xr-vo!jiOMNyq4)RIY1n zG|mgVAk<@)-}3mpH}AA6c59~ty`|iLe_EgCQGa7zZSXJm#uR0L6K@}O&a5|2S^qFp!GLJa&W#c(U|@x`amq`iS^#8FPHOdnl1xZA zURqB$4yxi;rY^~X1I?6>m@Oe=l*DyBpd170Z!_G%M%poW*qSZE(buwb;j#DNObBTT zXxip$JUm=nv}`acU`@ZkaImbIdxwLPyvr5F6o=aROz`#|~JIo{UOf+5hs6GLvvH@Phd97Z6I5_i4vz{MVFbXJ<5Pa(xG}XbJh}SrBx> z^kTptp_oG}BQHh{v@oFOxn_9g5BCb!8TxjXP#!Hn2k!k4S4!;oqka>Ph&0K_2h z1m7Fz;azEN;gOHgc{}fOJ#<)AYfHF)vJRwtz z5h|<)_KU?HF>iOmb&Pp@FlYs9ngfY#o8(l-jgONl2SrZjunVluV;#pkHP8-by|h|V zXM}JMz{>I&f7rm}&-hDS^p(px;}07 z9d@AZf2K5p4`ornBm>QR80`cgG?HEa&4#%lW7SfQ6#mtCrJL7;==wy*)t#HdOJH?itDySW$ZGl~J(`60-3f!>=(f zQmF^p>P(UYY>2>|atzNAeoP^1nsKyOJx za(Yf}qhtf5qJ{3M4mCq?#YIO~7Gu0k~c(8vvB-A)H*VDZr%!Oh_4M z-Xh6O2W`d9FFlzZk3Z5N^KH0+d5Dwxc=&W7&F^Q)`$72ofgbm?Hgyvz>AfSV_f?@~ zs@6-*=*OpMok$p=HG)CY461@7$k#Yb7BWJEY#Igwxywi)S{fY z59^u+4?)>yDsVHL^6Ipzr)k%FNvm0hA<8L!7YfcfaHaK3X5u~i-p%h$Z!o}{h890Z z(81~;%)l%XSLSy!j@;M0u3z?YQX;2A34ax4N~$dcjEYuD;dF<+5Z7@%CHt4xKd*5@ zjR@n7*1UHHvMHbB)TV<#T`xnmxoY&Xzdd21t5%81CnP|RAB47gFrODH|7pXkBQcPo`Ldy_PLE9Sp(J)+)UUyj` zWs+VeE2?w&OqL;FlA@^GUv{G5LbLKYUZ4M*B^ALsK-*o zKEUSn1|Se4~%yClTxQ~BT7%t2WJ~H3+I5^sg8d+MwkH} zjSA60P){ahT77p!T(T{K~T!L!}fazeO1-5vYY&gPRa3 z!Kza|3`}}Go^9LibU!k(;AG$0(kM_RH2n1X1Mn;(?NL2P{^M;15ZntMc6zAPU zH?Tf>K%>OO9HK75o}_$uGs*B-loW|R=V_?*f}|}>)nKGO)XgsA@}(mAXK-Lj_5`QN zFbkK$YUZqxwi14iE=0{}g~?n@Fi0xqmRmpR!D`2z?jraR8MjXj5U7IrLwPgl!%j&tQdB!F(y(7WL^P;h!<9(a z2z9*wXwRH4UtsE=J8yr^IW*6$4z!jb5b^YJSQ&%02f?yH0lE6Dg}$hcs1%d?TBMW| zgLy69qA^~hLBkCLK<0nUf<=laWjh0dmw5pKf;Avjl{=-|){K(|m7rOO$O&nC;+9dM zzOzT{Q_rWjny<2CfF%JqwLDvzUNgCvU}W9Kf^N7AkF$vVSX;dq)y*V9;&54s_a*`QvY z33D(Z*n8Uuo@##oqvL;R=m7nd+vILQlsE|*b++f4u`*({Pu(0MU!@)(vpNlx5#x(-ICV-72kAsj?t74(HuT*9G5<%xhY4Rq(MXwv!@P@+ z1(c&fuiel#5+cqUe&weofqO;rfT~rX+B^6xDBq=9o@(v0WBILirV9-X+8o^6by+rqvIfipDz3u6;A}iNrPoQb z!w4e2uIwfvZX$jOYuLH)yR_=*xx%A1+u!(ck^LO;E7*?(a~^tECkEmX6CMeKMts#_HoN@v@n-v`cfSt594h+0 z4j79I!_l0r0+|-1w_%;($?lqBB6y{542ajpM7;Z6#Q40Ac~q*<=K!7~&4>i~NvK#! zj$}zChGt=E_!fue_D}PLT^1RiM9UlWOp3M-r{>qNGbRF{`M$yFo-H_S&bh7sGLyq%EhO zK1CGmFOXrn#)W^}xQ$v{O|e3b;By~l=jdH29DbI9mj1z_0TCdgb1f*5IgRAmpPP-* zn`%@aTjL&po&o~?Ca|5w(RQ4ma8TN+nTya3pNBfhpih18bwZDJJLd!(+#FOnddkZ zolPdjS4%GSEoEi>UiIpD_1t-0RAtOPAy$jPyCNbDBZ)WB`OjI76_RwvQR5xS>v@nIo#ws_^J++OnCp>4(U8LVBSg1Vd+9wYk+M?uVdg z?wz<9klR{jqIJX4f;v$g0S)cZ^z8c7dP!dib;Zt0l3tzG$CysODQ*qAanYCEF5gWv z_2E(Kj@QM^L?XsxQg08Mo@TX&`|etmP3vaY|ruR(_I}lbBW^%Ug9o4xZ=hu0^&eQ2wMMPD>@r^2-FOb{^ z>fR6pRiFJ;5&sUdtbFoL3>=9 zI$1}2SR9q^YQW__ zgyxMBzoY_=W97JThU6|xn}AKVow#n;2A&BcWNuT4;Wu8BplU`QyfjF@uCKS22e5XbXV8T6BNA%F}D*~Jl&K%(w)CB!&Rrk6!5sh#|a`}aEIQ5mHujKHOZ{#Xr(%MSE15{hjJRwO7C$PBWmi<@$7{;?* z-6S1Hpfmptvg=vT!_7_q$1nND5%AcWjR0N%b`ZV;8;9QfB9g!wz;Gy-+cqiV&2}SU zd-bAP9i&G#Z8;A0M!>abD>$&EOUS<{@mz0K8c8L>tveu$G{s>Xk$OymuExUFP;sSE zNaY*gM!J(WbI`5%r@>TUx>@0@N9NjD4>UI{LM~L53`ouu0yyeuC(tmmSheQYTwhby zs=`^brWd8gTJXJ(98NmW2t^Z)J3+-AY@L0O2suxL)VsPP&{taw@x9*(rW+m;Q4t%D zX_6$&6Bh+UmsVY`q*zje2SxIjp+}{1gse-?P2O#~b(}<6TVM@Ig-txhRaD8F0_$3Ni$t?<$WRF}p7Ff^yc$Dhx{zaVuU)`PezY4{1C2Gb#R^_H$ zV~EMUxT^Zw1+#TO%)bzoISwfV$p}1dMVj&p#_EBP9NluZH!RniuQT9>h&~_><>cJF zHm>7+z$;<})4vu*U{)z7uf(TaD>2gBaG)tg6dCY(^T_8@M~U|FE>QSxaty%>AW%aa zeP6bQS*F{blNJtRy|Di|0qw^0+v`OY0$}k@8^!B4D}&j;?XfG1?K7#eWE;DXG;F!=lJV1^vU> z!QVVcVYB9fVb>HKE2FV*x^&d+WdJSFC4WPkv014^g6{x%GEMu_Xi8EGOfb8X94}T1 zbZ{M(E<_CpHE`6rQ-?%4);tGwIGrf1MA}=*MwoM=nCa3aW%38!<41{nRRWLrvbwKj zavHo~oxL9oNE;-K>u2~-JYpgz&Fh2egJoAn|l4QNt=Lt&P z3D(u|{KEk)+`=`hxo^BHjQfk{7W-@!7wxa~Og&{!%v@~EnrAZ${J>_GHg_Y9h-Bdb zb7|SJCZ)8<;lD1NJp#ueTZjG&<8?9U^zIZ^p+EQQ_v`n!z1kIy%nru~&AF_;<$hPa zvUS=|U7twj1<1^e6&4W{+9vE%Sin&h5&Rz1wfU*I_q@C-R>s!hd(T0`eBLXWQ>{<2 zH;CLi2<2{3seRQattgXsI~I<$_GR=gCH}m!WY@ElRl%$_cs^|{;JN?_i8@uoV&Z0C zY+!b^cC6;iSYC5i??YbGW_`1r&Z#qYAD)!#yE-tP>j3R%9u&?t??gJw_9eeuVR~%b zKYs(SLqItI40cd*prZkBjTM4C9yq`2CdE8&ui65TJ7br8+v@n^&677$Gq)Y^`4TiK zdDisdPAt+fwXf87oc`0kTy^8V4+nUzyq+Yjg&SOmN;@Y-7~eOY zfty#3O)2!kNMHS9U$^3=e50lbI{6D)rm1N`%XGY_Yz`pjQ z$=W`If<8FRMW^vGwwz*-{`P3*YtLh^OJ#gDjTgm6r%Ot_FKQhvFJ1h_?Y!_MBq+jh zvGl+3^`7x;x9=P9-Jv>eU0Qo}sG_8;O^FUQLrX;wR24N-dxo~!>N0BYv}%-yEq1hK z?Gmv`ts;>G5eboazV6?B|39DSdGWmHTP3-!^E}SuIIrU%H^1N(Jq?&Xm)(H^w&u^s zhvsHK$caI19TTloYw01?Jb*p1do~HDzC55S&I4w5nlyV(l`As*d zzH;KRs|tLNdV1)G$JfcXyaxXF%MsdRK#iGk`P+ET;K4pu(wH?}g5GEix16Z`I(EuV z(5urCe7VIAfzZ>JIOMG14))^CP#T6QWO$DYO5@`iQW~oAu?t}KvPpBFhWs|-0w;5O zFgfN3JaA5!H(=d*F;oGzIz?+SkZruN=P@#aCb0uN@-xH4`ft#TYFNef&47InAD?`R zcvvW{;iR~-N5GZFp6m9=7@kO`1S&#%8YmJ$;^+QbHn;>dL&(wnyu5<7jRYVXZd#;8IxZZ9;iESXw2YCY!QP6{<{dqYdIA{&a&EC`}d$s6FIa0b^#Xc z1Z?YO-xnB<06taJDtz9EO^9F3O1U9&>NBwJb3Y$GL z16X1m2?s1}N{Ozh*X+&RK!G%^#$W6~0O!UXNs!b#`}vYE^`f)6njYf!3qoYC%Gfta z9Eq^{=2^St;zfI3W1WN1oWc9{8xw@IUlS+7o@vx1ncrib6Y>(X!~^-z=hlYq10Di`HD-wyY^-QZ($?&XUf+sut5xq)>qp$;skG;hNvsX%Affs z3vgGZeS{l5W$_=^IPc!D?1Z42?cK;yQlAE92ewOzQa&CjzSV}YjYM!NWBRw-TiZRlWTS%Q};&(aM-FG=ZJl)KJi&p1W za)g@#9`vo5jrvFf)m(0vmjvlw&*(d+{rU5Loxe@FzKt1U3J(dBSSb`2ZCPTnI2a-PTRnlMEd1HIM1i`0jNEk zrtUGgMnEKIHUn8JgXgSDHH;G=u3LCQRVIBE7L_-#_uZq>Ixz%B-sy@3R)li z$#j)}uo0EqZynOUk5bQ=Ud@tI&K$ZxiB(W&q*~2 zzYYuTYtSn;GRqpqG(l-$%HfZ#{m7JcQ_`gm*Y5wx23DpN{#~N|KU9f0 z{d808^Udi7pggokp6fLGy*7Ck`tTeqY(Yt`nxXlqY33Q*Z^T%=j-bWZSw)rvn@YYkzT2w%bWtINOt13Ywr`SF)W}_A4Y0!n9UT*hL`Hq7h=y>O&y(kTeT2^ zIf~rba6aU%P)fe&4NMlA$L2MGVzzmsRh0i;n}m{lU-SxCYvUfyywd!HDKh9!>S|Jfn=w&5 zf-X?1@jp(k-s*p7{)HG&h50GEA=Bu&_O*lS0>Dw?)(9vR z$jJ(zJY~+_*R6aF{yoPXDM;Pbo}gvi<9Pu^mYh&$^4d?3oi;?z!N~>O%waXc-yI@T zTCiwq=MK5bbkU}Ux#-m|WT^o1#a4cQrh0 z)F;MUJ(#9CTiS0@Hy8+{@wrkB0jc+% z){7fl1^b6WdrAtq3c|O#?V9=0J#H-ff0?_W@U1^(D@D+1;PNxTcKQvkiE7yXp8vkS zR_?R22ab`WnVvlk(V0($NN=;=>Z1f)QoZSYp2eX3)Uv@75>>v_}F zKGL}5pW~$M2Otf+Xox0U?iVo=X#op@?4fsxhRt55iJfn>n_H|t+Kcs}RJSBYtCyom z@9e%>N zL|tQ;)K&+Wb1ExzUH{9Yslzt@%aSwSE}TGa*e8L)wq(zRfqXXrQT6{ubYMvnfLCVP z5AGBkq%_bhW)4hvXnZlr3=J~oy$C0}tW5k@pj&OUsP%+nX0}^hI(;#+R&!m${v8%3 z8KQ6UOeVc+*Xs5l;mi~@R2Mfp+Z*4H;9OOaov?RarLm~>AlMH3Lo+BmNH=i(omBJn zZh2vV3sR1iBVJ$10v79^o)mZxE2G^@eNbc?yZym#$Ar1c9S_HAEGFQZ(?`iAI;D7g2uVts<%jLrCvZX=kr_MP^T|n!HhnB*X3&(uz&5W8hqVTqp9Vtx_L(%gN)9VV_!c=Gkmh!l^hgFQOyO871}<^)ST@Lu-z|MZlpV;c$p z9`Xkpvm44~ZW7H*G|i9Q52vGcAw8^KpSvw?wyg2iMHBzWuufI~{)Ym9`};G7P1BL4 zEqaODF4)r!h;Ig=8hzFpsT1-=m4gNaL;113T(f)?UZ}D53(K#=L|pN3EC|1&8x^}__Z%X z`lF~;Y~~N-;k}{vbmBse@4?59I4z`-V=B|DEvw%ToOmUf3@hJMPdacyJqIIN{&nv? z4ksA(@4l=apv9%_&pvM;`Rq66!VCa{Qh()t0M(ywSaIAfp(1+!8GECem*LCq^gDC7 zNqx+qPzjC6i{;BJNndg5H1TYbTPer222^8$P233F(ATf|->N(j3tc@?l|M1Z@&cmf zb`O_43|hz{R9OcKh)S%2UW6S4s?cLdb>YDRf+{^>wTicCeBk&0b%y9-LGTt}!FXhs z)onL?>m`t=sW_>iJdDYxIS*U~(BB{%uQtr0`j_ziYZYz3#@!&_u11gGM>tn+FlzhB zb`G}=1MtsbF?<_`e3con!#I#eewIC#Xko{{XDRAz0@=37iI<@75d+k*$GE&+!6DS+ z@iC^_|L3Ko#9826$$UI2{q8$3-JenpUEJoy_zMkTu)xosyIOrzb^;PQnpAM8Ek3-j7qm?`g;cd2u9tZ&{N3s4OW?8qF?#Qf|Ba-suf`eSy~b{0{# zXR`pm)&O4*L1Q4Zy`yE9vmU+2K@7Kh?^%k<7Ol>fZoAp??r|Df^#UEfECyWVO9sP} zg?+XvrG@v|%H)d$?jdG(1)g;_TGMIP_*Oif)>?u*GhN2tR8<{apy|r!Gf~y zP^xF67bQk^D?sX0dA#~*CdqDBXqJJD6brq4rFJOJzRKL{hZ<-12`rS-j*JoDCMoeD0ezRD8y{V8?3VGB>auvd(BUGlR;u)xqLS`oUALmsjA zXoRS4m>~}*+XU;+l48^-DEme_M`K_M*q8u`_}>`~#63|lBFY8>xu|Z`&7CQDxHSu~ z>uhfp7Z3i%=h_}V8&xHh6?*rd?N=}MkFOY+145sU6Et@l zR`GK&9<1Z&wYphldj-b0hWl0)8f{AU`wM2L2Ry$37y>v>V!KLXJX0ZnGIxXJT)60{n%NZOJBrm z>7udQ#41J}qx^so^n^KkHatwM@5bClp7mt(fno(kP4~v$kQNO>mHl~~lN16-T50y` z5L1}ZrO()nybkcP2O8<r%MQ)?c96(5=@qO49PO8SqkC56!-2#SEgTIHE*V{ zi7OrhEWKpad~C>ME`q_Q#kO!4W;(-fScb&@251G%EA=boL2iGv$9Z>!H@{QE#b*QF z=!$KbF=KWEbsnxcf(#qw_ORTu>7d0od(0z_dU1`_kv{xTA(VMR<8=-1ga5Z>;EN;x z`^QIoZ{pHociv8$o)jA5GupfFura?y(H3&`)bQGw_b7uozIqJ=^R}oBYoVm)=vtml z+*kA1@y^0wAdp8EV7_8nAS&$3ldCPH_y>S*cXg8k`&sLKyT1*ulr0rT)rhX>@FPHz z3jP8A@T`ryVIkcCU)wyoXU)hln%hrt%B|_q+@d@0ipP^XKDeT^K=q1OU^SmN<*SuO ztWb{6IUQiFGn->>{rVP>S)t?0RSXHgkbeg3MJ?8H@t@!#nx?gfsFvT7b3J<6@8D6{ z*t@Y-NvoAM+Jf9cbp{D*Z=FLJ%Yg`nnXB z4Hp@P!T9LqcWf$5tNc()^6B+c`Fs9L5x{CqD;%;Qq3ezFTdNrMwwn>C7Jy^5uV;8> zGPebUvJ|#6TNhQ1nL3pEcRKPC!qanpsq#~I@R>7MR+z;EmQ~%v_5d8FJKRqAYOOJO ztGLzY%2uv-zv0I&=~SVcRi=G$(Yx|WZiYMX?`s`myt7AtZ(p#=QVlU)2>AK#%O9x| zYESo1ozg`}yE7?Kaeu=nd;mvk!W)oi(47JMBe-bBSyNzdnbQR0_{>Q(o|D?ff=lf?}qdcmEOsqSMZ~cM3ZVM>V-1r?)C6_$M z(z%(VH5@AasKe#>?2&Ew2XnI6A%1oeRzM`#X}N{ zS>ZPyhHdduG(LeT0lL)7<$98Wy`Mym8wcygSz4Y#TzE z#ydY}TE#lVfXlw(g9E1M>3KhFK}ZL6p_DiAfR@pYZOzo%gH=g3ANTp9B2SuV%oRNw zm-cA}7@XLYr+7T{*dE_FOH1b}$SiV2tdw=^G1{iqWr!9Yu+au(uDx9+0u;6^YyWh6 zLnrJEnAQ<_XO5A{EVU06y&L*1drbD)<9osu6ijf6#IV;N%fg4b%2^B z2~C%G$OF1==hlE}_q6SLT(Y7ZrGc6uj18QhkwX!cjH1qME>-s*0cO&%aA%V62HB^- z#G@lCAJ}fmANa-hujI4$Wad=fk^3d@8|~cg5hA70-mPZ}t?=%u zJ5L96r%UeD9<)S{YfqScLHsxB+Z<5nJGxD@snYRq&f-B#4NZ4)Urzdk;?Eyc`sbjv zUa!ZmcV7`rf_WEMJv<$r{zd?p4C`e|F{Lr&*Sm9(@od(y7NTIPMDo~#R$>hjHsFn0h@`tR`c5=MC)?=OTnNC2K+%M-$ zHhG-U)*$k7sNv%)|9@-Lzl!@q?-w=1qOtnC;wCT8hd@#TYnrupo+OG7ezfjW)@W~8 zgxWh*r-+YoZre8xx!K`^!Y}C_AH%CXvs6kpjdRhM@w|n?^gu`jKncCY za^8f)9q}$d1uf2X;D74Jb?Q7;qF*j!xV z4#vd4PSGzm@$UQlqFW%R{VaTeC(PwR4rst5$PsQ&dFLGG9idi0|(l z>Vo?fsj#F`cbZ(wI6TNOkVE4~ptY;#!tsgV!)2mTx;vtQ94`JUSOLBe3!M;-L5dUV zgS$GVcvij?x|P(bhweOQ*DYAh*EvM9T)Rhj6``egzH~^6?waNYujJfcMmHF|v z00D9v(_tQ*-oVP7nTZtl9M4tg=Rh9@7v%DiiG%O-rKl=X05_YC1*u`vRk5>kfU1UW z6b{^ZEBky7>FlLQ`AJ&XA|=8`>Qzqzg9`^0dKyvx6HG6UobbQ%GQuj-Jw6$SgFuI6}=WVPGcLuF)1r) z!qWOyIppbyxZicco=5kDSC6X2L)3?$`+6$24;!QzohG8=8=4B44e8&;%znGXjLDeg zy-I`z2a%6ZYvhJ^ovFsZ(Ly8K(cpHsnmdkf4f7`5YMP}p&+8n1XCR}W8x-63>7?nW zRAYVG=LBz~sPjS>&+uHgq4LF7&cKJ|H@PJv~lrpQQ(rT(|bs2#3 zKtg5GSev3xORRl$UAg#UZ?;07Njyt8F&BoRCR)5Z^!mEAF;^((e67Hs z!?a@vJ*yb_C7#zW?y>js6vJS75U-9vTa%57JK$t}Ph?&_d7?hwg`vc}5%6P^ppSaw zgz>C9kMAtR!ZDtSmz*=Q$c`|uiIh}EKv+drF#3{Pd;W7s@=J}{Pur|o->qYJkNV{6 zR4-nxXNc+kxaHTh`@#zoe+^xX+Q-m2#{UxtyOY@78J<};E}x6mNE7o^vZQSbdlvTe zeT~0g?@zg$wtv(m^&08aKw`ad7mzl6K9|i>vq~%~n4@nwO^sK%poaAe##W{>Ftb;V zOT?ye3FS*X|7-tYiRaq(T!(Y`hLI@N?MDyB5Z8U$Z%aq%X9W?>-Pprf^I_J#kB8?J zOLJI$?eDRHIR0nG-8?EhNqLJ|peSPn6A*J<)(yg->K7B-&A;N5Q$+JCzK}-X5;}fF z1M;|3*G+D5cgswz9pZG_ZXiqU!msi9q6#yp*LZFT-g3->K}|OILE3CLtN^1x(jxJg z5h5s?we`g8I>aq+Bt_Zp{l}o+oLAk;T}C!vT%bj^l!nS}^RY^oc;EW$_=Tef>$RpV zlnspGL!+1pp}(7IbbKbZm97?tOjCgPt~@`N_fOcO*JNK?&1CgH^f|u2_UIEKJv_-0 z0g8efUU|CwBxVT4`o2Xu`bsWVJ$xGGK-ET3c`CZaO;QzoM>4Cs=VFrbGs>B@j;2*s zRSCNk#gjR_1-`e;wcg`p#@(?})?7=E$# zmy5B5Kz@C{f=dj(Y&&!3yO7e8UG|Bq(AOkd2M}nwfCDHNo_p=d9mtI2*shSxqSU+) z&vwk0Uu2VDe^T82&-Y4H!e-A%7n(_VFfg8Mt4q=1@LKd+8?L%>CXllV35gorr#@Cj zmPDV1){jH(1ukZW>veR?Lke~aIl10J`F-u-0dW-7+Wk{l9b8hr?D(|xn`!sDWdEI7 ze%qnul%L)|&qWIDT1GIsljM}H^H`$Y#WbBlyTWRh$Ky+oyn4OKSLo;eJ!8{=4d$Py zr1uBTe?IYkQAtzQ(Jx&qgBW&wEazHpE-B2-#M`cCGwL1KiAfGH!1?r}c1+Up3VYlB zu--_Hph4+bHszCgIwKoJ3Pur@F`8-?$2C?~D_fx_oLx*lYG_-0&+_Y2ySqyM_SMzV z+jZQbcRL@YuWmDdSAJW_Scn0$pVSH|1hof+-VS5Va69BuyZE6w9!UH?rxy zi*C)2J^Kqf_c53x9$Yf_C6u~zBKVTUCdEZ~e#rbV6r>PuaVVXB&BkC!nES*hJ~`{L zZFFE*EzHO7-SV|=Xv0&%=e@!AR8Q?g9(}^A7v-mOz;oNg(vm1s!=$)gH8<3Mw?)k` zaXCLxbd}3gDVLS&VPzeiizi+{X%hz*Ez+D^e_p)(8aqFtZ;*DjwRi9<>1^K3+<63l zCd{X2>Hc-{O&XeCQCrRVv1g$3f4czFx5oTNY@+n-oF;_l-AofHoukDbZWHeO+nV&Q zFpGC#$WgzM*2+$QcCg>np^2deJH%G5q9&{7j8+AECR1QB_#43}R-hF~03 zocm--U{y7a-F#~K@7NdXnC~~lLcJ;bQ?CbQ-%xe*?o!&`AWK>JLME+;iq1io?R^A= zdF~Bs=4O5Rvzg)$W2pBd9YzIm^c<(sEEa%4DYhbA`7Aq3#I6Wx6Pf`wIlUyq+Bf)&w&zw7><^1}ls+>jgr}0&RAHGRK)fV{i_=Vg^cRR9*nY zV%C}HPa4#>C6u09NSN0Q>md#aj`0w4kvty$#ai>gpylH-RO4kkIJc~X7F)t?Ep=yh zn&ka(sqK`sT}<*?8a2bL}Rl+44?M-;(YR@~Zun@MR2Nik3&=(w*Y@Y)dOEO!@I) zjkvh`ve5g=aDeve!mIPsW2(1FV|uo1lbAPXSn*labm5k)^D-!A2DE#o|R34&^PjM zV+gHt85E#kkM;)NND+)yhE8R^So(TGokZ7lzP z&eC|LXKE}_YFGyqi3;UqqBYmpc<%d%(n_nfk?lBT@2StiW^a9=8?Sr&Acl+{AH!U$ zDq_1=owuol{Y8E$mX}K@#BN(nZhNqdF6j{^IAIK}S4WQ!Yuq;>@44=~iTK9eCm)l| zO5T@S@PfnCcHW`uM%P$~Ww|tg^Rc0WXYMa%;40T0qp(+snX3na9Ol=ShYYtQvF2TJ>r@hp1@a_Lp50dsoHg5n<`D?SE6G^( zC#zBAO9ijG=pk(jtnV!~t}e|L@Scm4Mc(jwX(Rw19#D!uF(_dlf63W!%FOs$`NsU$8;b0k zf|{;R+RaIYMmBg=?^Z5J9-MUO9XI!n@qRS*f;L^$~QbwuAH^a ziFTqNB)2GDqwhw=yUNB&M6+!U^uUK&~H{zQLEFAsRU!=mDv^udcS<~p98LgugeIYjO zggNofKlKdA4n2BOuU8h7$bxAz%2uWwS+-JZI?nN0*POA7Xohu2+`hry^O$2=;LW8A z-i*v)g_?NhKx;CZcu~MnDPFY}y!{z927JfTkw*rEZG*XCZ}dd_UuX26Ord5+n9m(eNBir=xMEab>cb=Xum zr<`RMKje(^djEbA9?D%ooCEg7qs~^ZN!mP`a@BZ#H>&?+Z}Y6*8t~Dvc6+k(pR7Vu z9+qDT4K5RT_Wgx4Slo-MnJsNbHehwjClBtMzzv42a#-2%*1cVY79*WNqNKZZ@|32d zfA*n6poVX`pNh?!tpC)1l&Lpy>I=O5wY!zOKuL9MW)PDX{$ch&X4x@S{Y-QRk!qNZ zN$wg5XQRpr%}XkRXp8HAgyBCY;EmE9VgU_`Y9#kLc&ex6VD{El9_l;N8gpxN%vpSH zz3wqRA)D81+qFv6Aa9=?T9xlk6c-kKsdU`2VX%pssbTA+c1h@=Rv}cbzyfw9s{iTv zy~xXjGWB;4r5H;oR*=BqhNE3#kQ%5)Ky^~nl_ zd5VX*b$v00YCq&^^KDyETg;LY4_n2$Pi!`ptq;oRSZj4?#HaRc@27s!Md}^SZXb85 zdg)D1qaP_i`|@G?+N)(o2+!h|j!hbM^+e`%fBSU7$Yem#H!WHo>YJF4QL0^HXEPw7 zor>hOZvze=lleP!>@F@DX3xikBHO||t8QK9<*j!hZ@%&)2@OR&CmdWC)r+EnZkjhm zVS_?mis>P!cDo6iQ5h1Gc*sg*j(doGW`s>B6f+ zZ>~$L>z7(Hva+rJS^>1cXp^e6jJ)NY{Qi`MnyiKHq^_zFm`8BKp-+m;({>FR5AeCT zg(QLr8teSf55@?Gv52U05|VR~ThuppP1G?z)4$x8h^weywoM3_MwFbox?Tr#kWO}W z+S|_^V~CMKZJZ?9dfYfPWaFKhH*-c;AbIFW1VjjKM^Axe9Q;~jY9=+3yU%AT0PoJ_ ztkp5?Y2G;I*P9iEHq|EF@J&%{D%EjR+GP=T??~N$sCj_+LjBYRC8Yaw78j0AkYCs; zTJQL1PAPl5$;hFdwaI}u4XtP$`_rFj{pPrSz*ZMhiu*pD8{UAuS!X*Sof}O{w~=V^ zZ0xP{tbcdONovX2m`W$c>JShV(E~iHt{m6-U!~d?4mecM zJ5+xirE>Y|5XK-v>BZlK!Cvpr;5$8Idfh{2t2Zw)T8J;M`e)#arkTOm?RCT2a+8vT zvF^0v4Pj~UY}3QK)Zfq(-+84>661~KyZqn6H2USMY}R$!Kc-9M56LPw z`fpvRi1LMYImW7b|I^!)I0L#!pjdWswUNF(0Ef>9z$x@LT?@2^#?_XiLXkCP{W(r@OuY-_eAIW_$)TFAjC z7AqnX9%{VM!-SFMLJz(?|JxB@;9h+$45!9QaGN%jo2NqATBsSwJLnQ)g0n0+~o z&+fprbTHu}GIaf2kzG^Y{XZ|6pK!kG4pK(tB1Y`c5lw+)Ih(kF=OpLBvo>{|ujQ<| zfK1R&n=n*A&X;#-R(2ZWkzQH1w7o+D*0n#VQG~8x zb&0Fi`261IdTr{?or=FD*Zf8uhk+_!CnlhwH52C)a)7jtkr)++6IQ~A?niwd%_+dm zcLP`*$PhO=eq;6f%Y)rjGtgA)9(BjF&nm5MrZ(_=z`yDmtx~jEf2D8c!C)JCCYw8D zjd~s9(~K{DKk07j!rtcj$i5qIc*!M(j`+bo@ewjgxlO|T_F|ivBGq~+3YxanhnGjs z(JecB2|k(3Y~DvZriXL39WOrR{~E@) z!j`1fZIl~MJnZww3zqfETT(8|6=5G(E5hs7fY9chScR_-S5oA z65@@ohcD;SjRaKgUNT8bF?BrGjxVzFL5AM)6wczbW#rA`1b;)@hzm}k?-yRv7T@)~ zSCMs+)y|ll6Vk?O7n%?7A5BZU0xjD7=@A%wLGao-=auUq*^E!)p&jg_pvr)EVkF*h z)UCMDb{tc$IFlyyarkqkrQkK!%ErDx+c{Tp}tr4{^qYl_3k!x7qF00`xO z(46fj#ibNmh?E|I*_=;XA3$Zx>QZZD*@b2{*S3dVa%HF9P~<7WWfP_pKNkv0uWU|M zq`4>PyV~(Et;qSKl)=sG0DM2vyXkG3c&Jf06G}*cM0^u6>b1DfTct0*Eo#w33f>!U zm>jp@VPKG`?D;t7Tb1c_e(-N?;6((!GpW(ojofx#vdCmphqXt29uu30*rG;GjM8SXz{QiPDR zU&zH>(F#VODjPaIa7|z8)YY`wJa?{br+#X%D8O&tOf5d?PduYxY@um4P-UH$zd#S% zUl6pA>kZ6kd*$M1Bw(W*`pwk&N+nZ>+!Hi_=rK&y@ff*^_%gn>zL_UmOTWP*qDAlVITE7lB7wD! z2+qS_%0Cz8!3G{|^l}y+)baBtf_RJu7bK}|s19Wfst7LeR+%;i`wQ`8^GYgx)u=lx z-Ot#$ju1#YiEFGo8no->aj@a^>TL-nY{^0MMxh`Q>7Y{V_ILKzJGh6z=8E@lobuy> zWJ2-HsAOB1w9n#=b9MWBTn73d^irPbrz__XxD_P}i+dDgWue$B!C0Fh|1|~gxzDZI zE-QhW0n;yaqy>e6hVw73{=>T+C-p{6$uAgIEuT40=MgqNKJpur=fAnvU)I>w=>p$6 z5@Coa*zga_H;JPG4(z`1?KsR}o{dI2xx@g_WlCbkZ;db?kQ)}_y%xP-fdsuZkFnh@0%z6f+Ne7{sKht1hX=;YiYRX=T zd3c{E>$U!t5gQL7Kf?NL3Q{BDA{x@cyRbg+$c?CK&ty}yl{xN6vvH$u{}`%7@M-$v zNf#SWF+@IcX^iyR^>caL&*nb-mrqki(It8|ap!9SHWajdZHP3wV#FhKD( z#_oot1L4RgfVk`3ctRKpAGl_U4pu6FqU+XTunA+;uI9iyuU+?`%z8=?c%9zk+8YuPTTIC65H?7l3ks|0U9x0U@+PPVv4I+7Pu1pM{P`j!c{hX zVEeSw=1QcTnWAfO`RweW!P5H|x)X<8;M&KvB`)p{JC@U4ar%K=4}`B(yKw5&#p<&f z0n0MUeoHAnGJ!U^>K?>1sbF~V+$EyS&k)^@35}=+`f43S1=xYoG$%q~j^XlHN%%Y8 z6|!LPo(cfy zg`GU-RNFyH>TJsiaIB#fyG-|K(aTVRi80j-Raw!wn1AdHam_5>D9+2_G(%;W`l}zH zbbvVa<98FGj6_aP=kV1v@?dAp*RHz{i=T}+z(sD{v8r@W{FQlRuA^)I9k1!z9*khw z1ok5IW?j+E;7PXt=3?o#7|u=ILu}Fc~@tZaX*cy zy1&3y-$(S{%JK~IT)WUM;lY`!`|8bK)b*Q}Oe3ya6(qYc*u!Gvn7j*vm3RClkt|)r zdS}Prdh;_;hbgX z3vyX_j&o{sSze=XaQ8E{^>1HK8{-kcgmjd%TY;b%*+ICqT`h4 zFN^httf4^HV^r6Fz7pKpj?0bbCTpfkA%%ocJ8Dv?U&J( zs3`pG!J*0tb-{t6BAbf4!3-nF*TVkfZPskg(w#mzJA|VVak9=21#Gcbmy=o41wbrF z6vyuV_-R1v27!h93(ZmHi9|d@;gSy&Z3QWWqMQyAHdqCgW!C!yHL%tUvFv`x@_RhC zVy)i=Djf%KhuAWytU%K`!*>|#YT2@oH-o=tW*3tL`$N`r6hvI|zRo|!^uEOpUoZ3V z0}klBcwF)7(7PIEWp%lQv`*<3v?)@Gs)Rb*xVC*L=;?N4Lc))-4^Maf+A5utU5ATm zms?XRe~F1|OW-d&m5=yGYWZtF1u*0C>yJx&DY^PPq4TN$8}s&lI(Ec7QT>=*#H-XZQ~l=7Pty~%F1h&A zRS{ouKuM3=Wavp8nA~EI_0!4${W7al&n83zI@KWkN`<8cXBU0p>Qdz20td5YP9|u( zt}~Yc99|Y~)w)HT(X**t=c=s>Z_1GH-EC^(km07Jr6vQkRn`3Gh~sg| z#kI{DbEx@9A#24YPVDmgvFYg({TcQv!cWaB$x|U4Nx^Mx>kG+t0X+#wLe;JIo^kV?{1qE z`(Ees?|Ay#xJmX>GIIsg39d-H^O@if!l1~eZ8KQn*klia2XL+cBN;LYpb{m(Y-W7u z#+t66hon6|jBPV~UtYO zW`qoDGmlicb$^K*HadKd=;PZKDgM*!7=I2sJ)2w}%x8)5eFsx{_S^+&XH)%Uy|$ae zkns1ul$z3$l^cIx7r5hqD3P>|l^8zckqTBYKf}1-fBEn43IP9|b>HV4^2liJV3lj3)?ebGzkb}kM`sYi6vSROjqd0`Wd-Q_ zV2tmDwvBaOYQU#uCWS{ifHjyqPW(Sht>N9IpG2%xo$h__vT>xI-JOguS1;R%1eXQF zMWc0OO4?e;i{yHnC*T+K5HuURDfNaqaGr;epv%h`G@mvc8t$Q04U23TCzq?@OCik6Pnpm)Ka%%psU zP(szAl6LU1$3`nVf6~$G6Y!>PL1*Ppv_ANyz1?b=dSK3d+Oo1a5~VaL|9QDx)g^ev zI+91fz@UjC9}#3%2<3!v1TQHbA>13(CD(@Ye*eui4%=mmvDwxtk<$v0LAqP7?=EtU%4Xe zuRdq%UgB`_Cx948IT>3K{HX6zVtSt|eDt$zSFT@MZgIoML`b?;{2*!@`2E?kpo5v} z0iR?`h;Ym5V$QJnNaq42L{|W`0hF8Aw9XYK(=COZjB+S{x^{YQAvw4b9`eqm9xkTd z6Zk^^=AI2`^GSfp+qn%UIJP5DrIt*LL&=XO6gqm9)t3;tV3*3RU>lheSV}^ zmRyI3Pm!dr_dQ*^pCp4~bw84mqH3MrOHP;-1UA?OjB^i)YyDX{7v$zTqEAh|X-1dt zyiUHth-U-?^VTU)ZO!YiN1W9KQERErfYe>(Grj*}`rk4_r6c;8A)Fu+2YU7)Lol0~ zFUXbQ`!~J**LTt($(($;b%>LeqRrhX5&MvC&CPk38i0qU>g3FbTffbUJS`Ior3o&>%l;x_KT3IcnsEfa7zfoo+mbbZjVD+0Ih zYFz3)Vf^cIr7f&>FyXw~y<`vw5)%s0Cq}3y8YLvR)6!XLr-!mn)z0H19tBdQpR2c` z6WhCtAY4;FG{&g=OTOb?^QK4z}|mgC!YKAt2&c#pa9Bn?}32l zI!rR+a2K!FQF-!=5HqZ}y~UK`7#mqC?#s*yhe+p(k*FWZw$K<(*mYv#EPe zaY%)&Kp)gAd8Q8e(3K7>3~q|bn3NsMAsXl-vO05?818IrCQMOt^^~rd`gd6ES~y8*p)^m-SbjT*wozetAii*< zKz{{5&#CXdZv4y3oB_Ul*B_D34*{8OjI+y=Ww-BwAnOB@(V4}uVQj;X2;-fhECz>e z;zB5na!GauZ`X+UJwS&(%NcS}##x>itb9_(Z4z1*ax>M?fi5{_$MJr<7H1^xuGCo3 zcX4>v4gu!LfO<8QEK{3yriWQw{s}_a&_J0u(Li7- zU(aH)#@yHM6{XYFqG?MsQB8J!GM(5!ov5?8NcCmc_ zg6Dqa*~V5^d}Hus_9Wa zMRU}1+N9`zy8!0#2*%XN&o@5v_jPmKKYT$c)Y}Y5FK9JoyQwVWRA`7V0FJKOAWtde zx@iV)Di7Ls@Y~cD_WODAXP*2#kv4j^q6yh)-L?a~!5%Cz0R@q(=ts=iY^v1H1eX(P zVZ$FX-4ppVIb-dVipA+u7jo}A(2gKIg@bLxUpy#js}=iCJ-DJRq2zo^=5WpPydhCv zc2%B2R3p)9CnAq9?y&hRzoNvC4`bmtvlDdPeW^DuImCPVWx`pc6Kg=#1BE@KHuGxS zd;Wga{M!rBv49Y0dR+DQUn2)xgpBYHOgV!VtW6^Q|SHA<63UVJ7DJj8h%woDjF9c-Q`MC4H_o7D&6{n6_q_;%2$igY(Amgy-}` zm6ZG_$nOMXiYuTm1xvWxr4c!P;t1G08ldMLfyJI!#O7GXDEwKZsDG^8E(5i6Fgtvg zr5Qwv8gT@+x7LAa`=Kqh?xu{4Wb|*MT&F?AVr#hG?AuARXQj|AW=jFX#%21?^QvHfC9k+iiNHsRRQT$dcA8W zLH)kpIWu?eU-!;EGw(ROJA1A5tf#lNHtXfs37Tw>Inm{f0{Gw_*AJ;EB+ zPBsNF9PU)EprY*ba?Fw#QR=C($tRP!v!uRbv(>B}F;HlvVy@%}Ard%j@b;Z{lMU71h znzc^EMKVN;*m`G%>zUmZSx{bE)Uf|hcYV<5yiaZXTu;S&+A&Z&czwK$VlO|a3G|S< z1v|ST4)j+kw>Y%kC#U`;^PTIxE=6GD;+sBl%FWa31qvIlW}e8o#`$@TX_lC4Xr~E2 zz2C`t$~zA2Vp+S}+2fFux7uA5AL)XF z+mSnbZ(1KAt8WneFDvIud0=GuGstc&0h#*T`e7;Huaw1O`3Qa{hBbn#j2-B*#95-841eQ9;mEpamThCi6C88a^XE@)wTUiXp2 z$)YL=`MTJ9hx{6{gndDky_5@NOIk1baP= z-Fbb+Jg1*AiL_V+%p3!}0@`{;U)hiGdg}(CZPHCX0m{Vhay~?XOmg&CYGAI@&|D_7 zP-I~EaKw|Vu3ly9^@V!E=1;5Z%aYD_{+xV+OliySy1%=yH7>5XuyQ>Q~F!(pZErE~2HkB9k$k19Pcz}b_3_h{xg2hS+mkPwfe zcs$cmm}m`k$HcjZ&2kh4yyH!DYuGk<^5T~>Xc^jZDITBs!sGL{EM4wy^evjYrVcE`>>8%tY_7cgEqY42rPVE7h! z&zZHmlJ#%CmWktf~O_J@(T)=R^LQqikw`!J^|wDQp3iT6CGnZ)?HH{4(rb7D5I~!5r(f%W!9`VN8^l&x_*DZ zpzBvS*tzn=@Yw3--C&xtLKbLMxBBP^48lh4a=S%9#-*gMbipD{X zR`g~;IcMslPO83<+PKx?uCyyLqp>}-kDt_k%#S!_w(p z$jeo(ohJUPH66YKi_gB5h^4Hwz4q)|Xy%h}AoR^ODw*Vb*PZO@Zjo)obb9X2Jhatm z$n)>{di7}~XkE#4imY@@*EhXuF9ut9Tynv#bL{cuR)rPDz})utf(ygNgE^b4Q;$33 zix@w=-#=OY@sm_Px>dd|e^Db^@nmxxS&lvZ_?MjWh3luAHy`x6O5J>2wmARsu$bqY zx(7?L3#&K}hI?l|)i4Go(tS{`@cV7wQ#B*UvuSvPYSm)0bp-ToJ#zFoK{B3e;OicR zBB40-(?*zmC2}(rac=%|&sW~O{#G~MwZEgFv+hF1kH@gzrQ92fg%jrs0+z=2)#bjY zQLS-V6B1i}&)0EH&6G;QxzEg*W7|~bXBZEI)#CVE8y{Ej)#;L@jtId~(30KpWlTcS z^Lw~787vK?q?UWIYw0pU2eOlLf=vZJO}l$coqCeD^7XC)=zrwD^5tcc*b&yN=gSuD zdSf>hnz`(z_hs3Eo>M=LcHsS;EhD5It|N7N)}A1FQGyI!JD z^W7n!J}MyM*RKLwNxR>-IvsMiO4xoK^_jl*Q{A;z+MUOy>dPNpe%4MYQe+); z-g|Dsxagyjb#nHF*6gQxQO9B?Pv5oO3$09nv;|7COHjZNR}uGDKZJ7>ityyNKD=$o zNe;zi>XSnw9R*%2HH(EielMS0H=c^n>Q|ab=q?hg&Hqfl``aqu@Yr}ns}41;OXsb$ z;&CwFEiuD^%Fbw97Y&5=h>W$xFmx;?@Dy2n20dqU1}5xaO#+=W$_mw{^~ zPS!6SI%lVzG&Gv6C%UxTwA^ONw!g683BH@rn>s6R`F45bf?LZ4w|f3sLqk42JM+}! z=jf-aiJdFsR0`V~<&|q0Q2PRJt-u%On|1;W5(%6>D zcIbIDelj??*}>p;Wy?`vHxt_#?+k^ zMN)5@);SPhYpr~uRJxrz5Hf4x~2d6zz_2j6Q+KTuL@0yhKz_;$AJ@nFDvOn z+7_$krxMeel#<3rX3k$MoHtf_-PBCKvKAfv`c(X8fPI0_GHY$-1a{xS7@lZ(E#T5p zdaBHPZ`xUKEzx=TGVn!w!ZZI*V-FO$3MFz)hI5>D27`B%7JD4h-haJNFSZiU=a3dS zdQbfEe4u3FVlA zQ#?p_PQmuq)nTv353jx)YbD zN9)oc<=S_=++OkW^xa?f?JUfosa@r>!rWG}>2`IG@T3af1@MP!1Vw`)ykHf9lIRsT=5A&Nu%ltBX zUYN_t082^Mw^$vCPa6-)0Fl?hlt+3YKwpcVe}2uyYBM)ZX8)jk*Kc&;wQ!8H)tv_| zbfm6j@J&_2-x*pd&@qi_#KzJzXwN$#AfndLQ)>+MF^fCsJh$k#mHJ-k%pn9Wr>pHM zGPRD9+gZEUaVm65I8(}i0r)bNbY};x;RO?sH-p}QirVE@Yi0cX4`l;G$0tux0c^z- zU}pvUr(bp*5VmbT8(*8(^`W8`!~T@v;saqnJZ2tzkWCmLj*K_#7)L3rNXsl_$L07hPiCA<=RsZuM3iJGab(v< zOZE)|PZ@M2o3yNwZ(mIo55CxF_3h4ulmTv35KQ;NK%Mqje3at$bcg1R)-T>Opj^N$ zm};H6U~{EqfPzLZfg&$ez-RJH9IK@BWeW{OJ#Y%?p*tL z5{M5&WJM=r{O6KEgN&=DBZ|b;*?^!)dSPi0nC;BIn+XoO)MW^ zQH6^KyVQ``6wcLR&eIQsE{r7eMxOgz5cu=1nBU~my~&?8Kx?C|(kpfo7Js%RP##aL znnRmfi=*KHgFG>;1C2q`?HVJqa&wv!LqpuJM_xsk zhddu%Isii~3cGuJpdQT%4-c8@llBd(aO@G7`}zyM4qW~uE>mYx%Nn@x7c|%M2pdCK zR%9?+>QbqJk~^C&y#g1$>^%FBGcpN#_WGdQ!>Vhf=JsP-#ji&>a)OVyb zi-XQp`I6o6^@A5EvNI+ob=A)tqYPMnTWGnlrPTvc_Lj9&-QdMT2yg4w-IuGWM!jxI z`~rnMal6Y_{8!VvizcvAqbf-1b zK?Od?rzk=vpfAr&sRpEcI@2m02Xqv-m*PUwXb$^FSh#2SS<)TJDMF%ccrfUMUY_%S zxYBcXuMY-l2)hjm=Xi}ty}*R_`(Kcj(|rmkW{Z1Tzhwvx&ugkA4cwyLiRcs0)N$^X z{a#T|tLSU}-J(xy)B#j~&;h0e_dVF88zFtQeez%h96ECWSWyhz%$)>dz5^Wtf-xTw zTT^dmDli#ck(+x^-s+SzKkQb&uAU^}`p}cA9KY`dPn$PF9^An?N#~6f2JJGKTM5eN zH2MOfKc-5GkKQX|;5`GH)GzBZfC(`V$wO)C&I2J!?05^Rzb&t$<$1e{@?51Br++$( zG%0^QmYmfrV)ab^qfy2j8?LL3_9O>URiVLs$%e0`a+i<*Oi>2lyT`o*2^FEwAU~50 z@-)VrccX@5QY2Tn7}BDI|bE*zg0aJtkmnZT4BhT*rt`n zFoWZbE-jzuo!WR1s8agU5BIXh*aP}J#9Uup?y-$lpf5b6;G7Lw%o{^?O+ZX6FM3V> z8w@nOu?{t6O%NMzPa6Z07-^&i@nA7sh-8FececY*#g4_F5u;N=*4l}JvbitW`)ykl z?B2E{W;JO913^&*TyglO+K@ZE;0N1?op$0opMB?6b>_a8+*z9Jl)Ax@s{wR;0zS#q zI##jH|6A&XhM;{loAqKnF3QatXzNn}>aOh#RmSmB-hatEd zI-B`kDm5erkPWH=h$~MRmWRaI4BjeY%?mn$e)Wwv>~13-$p)%_Ue<@fks%krCaRmI z9uLrIep3H5IJPwR)zm6z;%&V8=ZDK-TQhFWYcCcm4#E)f4DR5pO4E2t*FfOLnUq?e z(VbYkT&LGh>w>_kab6#5`&QZ4w><%llEk^0Nd6A?D}bVodCVVz=m%o2eNB&MysIX{ z!q0d&d2;A7P*)8=mCZ+cyT_@q{(Eap82Fmx2+v{1jeryL_yL$QkIE9I&4g zKcs0ORa}af+x5NBBrj?Xj>ZUNS;c&j;t@Wmj_v@c;ZfMiP2r1SO$D8`gF$h>z}b_Y z7y1FdUwX#gaa`|bNd4Tw05qoVP#U;<#Wc~%ZsYmu_iCS={Gaq)Yqr!oIC_W+*pB6e zp*9Jk6r)60&@nA3IKTs^O>GAHWW=@}WJJN#BRP3!^b|fVEU%=mb(C{wb5L*Cy78wH8W>!+a+pGV6S9Gs?E`1@MYl3-_1s`GBFru z-_Q-v5)vn_r_^1c3VLJA%&VL^h&z23fggK(>V4Z|xBHS;tbB|b?3*4*_#{nNbm~P# zjiGmeu3w(})+V;MI?!1yCD7{Vd8lVB`|hyXs+_nWR9lc2b-g(a?$Nb@h#v)nL-ta5 zmoiW$l6{e^r$jAvo5IW7BgN~K(P{7`-mM>pY2R;WwOkb`2NzGb@Qg-lN8UQv;^39v z3mr$}RQ5VdTGRb3h-LyFbcKLb5tc&@X+Z*Uyt*-<9o6`n+N$gezj2{wo3k(W*9BB? z0jYsFi~AZ@*tpzYw-X!&+x5r#E1xq?4{oSm1lP^v+Q`ee;n+RjWuOUgl7riPxWvFg zoyEq@xyPyJPEEpON-nB4FMFg1lY_&gf_1gmb)9THJj&t|6Irr<$Zdh7R|CsPiv=Tz z=i$I!NQ$besX~rvD`M$bQ!ilIcS?Umlv@+nVlz?0FCyxq%rtiT;JnQMP>%AeHd z#w(GTug7s%F6jO=xN7>g3%jWX84gd7xW&Go4wsk44KkgAD~&)GAO&sBZM5@{dS`7T z!cbM>p*J}J0NinqqloNq>@+3e3xj6^2po{!KO+Dl-BXvo%xr_Jphi}HJbyREcIus; z+}zP-uHEYs-7iE6q+W}^Swi4$BF7)_KpJavx;#4B!sw2I0ks&_`itYh-{~D(C}1UR92J%ib`^H;Mv^Oo=4d_1Ew2SqmL<2c#7=|`1G!pD^XKrQn~Dv-s#i+8!Rs;9=cQL@5tZ2kSeA-jDhd z+tt&}4@Za%4xO&%2BIy$=FI^uie7{VZ3Tfo`^D$S$cRI;fsiH*gz=D)3Xt_>07IXB z^vAcaLw5L&vx`1N8hmK=U>Pt72DJkRt ztT>r!(Ys_0Tu?T+FeE(?-g>}+gHP&_UVHGs5Yv!L9Q=^7*s9K4qR!2eJEmp(CgE+& z8hJ6ifQgBTd`e!>kXpU(%;` z7IY*|k*6!&aLwlr^igxppu%;jy!WysZW|81X$?FXwR3+^iv{q}yu&c1KdL=qY?{lP zxDc1CX<(27uL#rLJ1|{OzO~U0^jf_}>IMlS9DoRQ;D{xW2?2d?@D6=Vv{SSVzw8C} zki8l?GK0w@ynN)y9;&t6w6(Q!va_>or~rZgZ*8xKuRFl&2~g<(nV1ant!kyhB$TGm zQcB}82@>!@kS?+V^tyXwK)`vy9&Pshy69B`FZhmFIkW7Ju4W`sgOjrO!gCXUpE+m$ z%J6WiX!DCfhv5a}qhmjbe2V~ImUI?vx@+>6>OlXqX16camrTb^*2$2l4ZiChv}2`- zuGGIy{*_1-GzM_(J_$1kraf^*hGEmJACiZNc73{>)}>D?lA;O_1>{8;FHqu^v8vIn zPOmv?zvt&yfbpI59wfej_8SVGh$AAt{kS#}*nBqWh?<(u5nh$+q>blcyRWOvxMXy0 z&}-6+DQEyE8Jgam)GR~98JO*~71+~=8?J(^SW8#?4UDu?D|~B_0cAGD&PwJ!;v-+6 zSi9LAo)DQ48hIw&H2TRi*O~9}!ikBA+8!Pr@yExDZj%)m)<_apLq5k2nq~n2R<||g zNC4>KC&|?fQe?%zng6d(RMm*HL&DGwRWnk`KIh~WYkg9S_9E_No;1hu>bB|>agO;& zU`+=PkkddhB;kxJX35Mh{n4fG@&Mo5m<%nNO_ID~el5(#xwFcSDz;?h?+ZAATq@}E4;vM=7REPQlmN~xH0HLi1B0> zXIFaPw;0uBQW%|ma_)|f?Cjz>HX_CN;d=Y>{V(bBw$W*hs%wz=T?1ZZ`jk1)Al3r- zbw7tmGy+iIq;XaXwMkF(kBf+~SU$oi;|nmg3g?EW+OdTXKx>dpN$MWc<8c^Xm{Ex^ zU}hIVoY8(MvO7eSVXvmJYL zv#{7`RFQc00qq5&Nq%De!UVW#ai;>^JZF?2OKYGde{bFrCo)c0F}7Rr^mH6+&xM;& zq*#p%ewcbZ;<;7-3wq5@W>E~&h-uy++1O4UIiS)a8S!O?MoZ1R*9`YJ69Gh>f^82l z3)YSmJn-I3H9w5FsAN=L#h^*kYpEMdT;^RCwwv=b$Vi6ib^oE?JH$6G$;GDiHKXcn`&y&PNFT1UFA>WOgo_Wl$7J{1N~o9Q($X@ED};iF0G{1wj4e z8L}rvU=;Ti)>K50G*wtQ_6eq&tZ+x}-EE9>j7D@7af3r;SmW9gz>B6uBocKzKqveJ zWcNDb-IaSq1o5h)X0dj)Yw9H8qTV3S?QcGE>1)ANnwaS7UTsoNnUUeWjo+DE6GAs zVm>8F>TWhG7G!WKalSaS zS}WcDqGRI|0W<1}7=cwq(%{QS+p1hHbe`|aUi}mnduvY};xwsBm7I^>>0eEWqAi{* z>^8f&TsU_pih^KBN6w>VFs!%nPGbDHF8RUBM$rUVFy9cGb8J;~IZAtb#a@S-I0Y`} zSe!X?_d3lFE#zfxjEy9<1R0rQJ-Tev`Pm z6l>`}Nb-%4Q*AzbtU`vn3^U2{Jo|@ONCf1zqckEn)KV8B7K^@d7xyS z<`i3~K{heb4PeJC0o||e_Hp7Q@ueron1QdPx&vV&Dk2#3!N1ujfIk&MU|1_uwVO$D z)j&28_}lFx6G;?+8K5p@l^o~M-26l9ae$i0*>9a*L@+&>OD`mR&vI~^7$`UW{1tfb zf5r~|W0{ApMLqbV50D{{S2BL*;X$IO6=Mx6GA{7%CSiE%6!!bxa=^Fbw_!OgBrN=3 zanaMd>aqGi+8+gMX-nOnpvVA=q$N~%PNNC5PM+IN{$pX$C>>Z)eEoMelPn4^9Vf{> z6rv;nV;Hb`Y!Cwq3@2jRBPF7^FEiYREtclT;2i%39Rj05Ziy3i^qzTS_Mtnkm^|MDa;73Ats?EH3i zb`Ox#iL5i5w+2Ym#7L8OkQ)$P`=iMzd8U7bC<98we9y?SB|c)*!wbKCS{0s9aGQ>*E(Vi-R3~aa0w~~ydx#m*#xD~PJVV#m6+E8KzoO=I`n~dBpcMho zlbyJX^TbCX%2*rcL+Y`Le?c%$`QUoD3d~uG&ZUHT9@r!O zP$cFlr)6jELiYnlR%^?kzyLxHAv{%H1~i#7W5pGFUL>8^cw-B+<|9J9!HwUnS~UPj?3s z9X=v3$Z{}|XtZ!3X(9~pC5&t>PXIj=9zMysn>gf*rAW=)gL8oV=dE1-GM_8Y=Yo@T zFETlwa!%ema`iuX14#~gr*evmKPC*k-ERt#7g5PT9s3Bp^&pKHdW4_sE>hd|5KK>c@`KeqQlu=QWa9Ji*xrm- z7Kne()ZDuO>+qi8!|s*vh`)zGg_wsLiSC5Dh& z6AOR%Cj>>u)Ly(O`@M=c5QkLh5rS<9T+awx|4a$e;*;y`ZhQajha-YXJ%&NOMWc=`x^mGHb9{# zZ0=RGh%7K~26RR9kOe4V{N)Mo7w!b`1?+K6e>jO?&@jS4s=eBWE&lUmP$B^0HW?{y z`X}NhOeO#nsi*iK(vtuh3I^!g;o>p>lVpJ2;D)yJ43;DJUtni|6ftYd#hi2hJ|A)b zQv`8xeE;jE5g7r04g3H;ul?_u*k%B<0>zl;e>?^I0q!CCk9)YM>Hb@D5cW+V9F$dX zORjP`C@#9d-}3tNe>7==w_;=UP{@1|GI)%HQo`+wxA2*fl3T5!~U4-I=5$L|8p*;Z1oK>dp-7055Tyie=; ze_#pt!hjVd<6mp#{reo>3Qh4Jju^o{lHq2+58#`$|FLE?z>@!g&!hkO6kH|&S}C2G z9skim5K2a%JT8mEXHpLE4~(Ref)~@?J_swO8v>k4?fXxovz8TSzWZ?FfTt9Y6o75yWMV4egBr-%O=d-I@vYP1_o zC-f)F3*NxRfN+VX2iviCte!*&P%Z$^M@u{S|HU5gr*TF~(Fa}rdYQ!cZC-#MW)c6s zi^1ChpgC`Ok(~3t;5>th7k}@oCE>6J;@>~{%)k6F4=N~yQlBmMqIBXs`9L4GeENe< z(*Ci#fO9s2?>Tb+i>MIrX*B0|6!EkFi6vwRU)t_$KM2_p0i3gIdz$(mHw}Zn3}#S0mF~U&5Bn3i9XOZZxlFM))xXcTAm(e$ zQvJtKBx3cOFffgB|Eb3Rk7*=jPX18fzntnnbR7CWrtuFhAj$kcrtz=NBmDLM%rv^6 zNq6b3Qr89Eu%U&;v|)+SUvq>~2Rim5yYU7iFRr`DG-BtRp&@;)Ydg!on)sbQP&=VE z>`|C!SVfD(goMMiJ?0W&<_$rC>Hk1j%Gfp+U6#qPa2&YjTsY$*6Jy1}h0TdHkBnm= z<_dysZ)#J9V(T|AKQj<*3J2vq%q-9@UQ*eiVN{Mo5>-@^nFw#Te-+K&*^eAgk2urv z;RDY%uUZfJkYkElA;*sP=`TI zuMw5Daks0!j~LBZQN2;TrKvEUBl}eaeH~_a2Ylq7205aQ9eZ)(y`c!u$u=VCb6v+9 z>1;e@P$K8kNqU;2X27l{$rvd4ajA zlDadn_~P*kK1mR}U|oM?rj?nm-S9d0pq4P=rZ8&r;0LX)^ys5Apc_I53L7%tmLbi6 zRU7?6P#woZ$kLQ<>yIT7wERUV@LHkE4JQ0w4|0C%Q6%da(E5iU*YP6bglZ%C+7W|! z?sTZwh&f8Wwsm);w3ATG!O2625EtzxB##h=*s)g3LAFSBG8hXOT#MCYLe8t*3?TYD zDEN&XMwOH+1phPyc$CiHG#>5{4Ax~H{BH+hWPK4T%tKjFa+aZp)6l_S$k4?y4*ll4 z|5}e&gkFTQA+2~~;u)n0OI6w@b5#OV11-SdN%#?CG@gZWmOy`;3UWU4Icm~^0s3F` zH2g+=Xkf?$7i5+(r{FJlKN8CM=mUU(UWopl~ zAPKEGs53Cd;3G2A-h{$6x)m_vufq=~3H}qDA3tV`z7wg=gT+z;Vf@&Ye}qTB-2PKbr+|RKvx1vmL~;S2d8D-6v4L_vedak`h62Ry z5(wXz$R_DdZGujABloTU{pE2HK>$Ik#8|7%Jq9R`whG+KS~smoI>sSoxtq825}`G@ zkn`xNTgvIgP(FiUXkFM_RqGBxEpwl{#ezPe>= zlB_sb0{WC%T3YUHXlZHnT{$XAj2DK)VcQ=&TQ!6>*3Vm6&6YEFutVxE81Uh52klRE z^zIw!9NhG^!1N&p({lLsdI8WwpmoTIHl_wa(5H@wP^da5JuTHTyDKme**5VcEhcI8Qa zzGxjbZtO)Yj|OXzf$iHu;p+hyQfND(f!--^ofiLe1}*uZKzX-B(4<66X(Vrd7X;}D z=zVnFxmqBj0@vfL;CZ8LN{}96AGky!AvcDM#Kt*D@io8MZwP80XJ*)vfq zS9`{e#YR6MV;Z6^2oeb^-e)f!j{sKYQ zBg|I$<8Dqjz0|p%O1IAY#Pu9q*OnS&i(#?RR3?wt#CHJ)(Bi< zMg{?fSSHQY^@p?Taji}724ub2ahkyBN4^p5e6K^^5$LD+M5O=LwXKEhns?q3(u5`r zybikm)(L0AXyBL#$IPw^^|$)v4u2-(zK~hi{xR#?vDUTj{Op&s!~M!>C{UQf5eie! z=`>g8*@K;V3z*J$HM$TI2t%Qm+WqIHo+xt=rwlPb_d7V&IyCB}?!Fq=NF(TrSD=k! z;OlI;c+kz*79%605QZ=-ZY<_9&^zka#wUDbao)6Fdk}`}mtyr&(g?1FG}F5guNJ2i zc4C?We4J;RxB5G|;rU3`1i27!^coP1q4YzW!|HmogYD8Rlm9)?3+gBarVhQ*GklcE@D$ESL1UMnpZ{*Q2+{6g(7ZM*Cn`4&7f{y_)!VBR zHp_<{SM}ni11{F{jDv6-bZ#2W?gJ+(J950kQzj*1CYPis2oW}DOe)G*@aahrsJuL3cwyMxQ?#%Z=th8NV* zf$!yVdLp_Au4ip1M;Y^swYxhbLkyD2c(|U@^@`t**o&Iw){Ci*36S!Xcon=ex$dbV z4-CP8V}MU%5*J!LaHT2OUiN)4BL&X}0ZqWaf>PCok={e)>snM%!lY?(AiA4RbdVs|tZ+&=A=eQW$9`3CjINJZd%`vS^^UcWl= zNE@!#v6&05jSTK-vyU}w&uPqHg(ePx>nTDLFaDGYk)k5-H0s@X&{^hQ%koaXb5!Rn zIRQP2%%YDSFEeY}y?PS8KQ92>s)PFyS8)kL)rNcwyhB&jYaviD8y-fSnN^1 zGxxJbN-tY@h8`PB9=-PO9NCwHx9Qhe-XTfJMo&+dL6 z`>i;q@GDu?r@ni3LeZy?3V|U*V1~HhhAffU$+@2Vi;eWqBb7)p_JRz1gW_gJxU&d_ zbCgl#psoR{iZXbc3~^N(<1)C=9UFejx>u22pPeuX{wUngK*})6=tk$G%Vc5XFccXK zReBUQR~M5~6vua#*?V+O%k{}_gZqzH6`rG7+E4ar;mD9!EFEGs`*QFi_uifNI04d9*WZMz3xk25?ds_Ci&(l3JyI)qcRcMnXd5%qZf2pGHKxt z$O%t}l<}LH24-dt@9-?Yi{*JG#V=*??d|J*;QdLk42=)9Z6KIWuGmC2`&#ejCu8jPxjy?duOwjc>jhKfGtYcVo|U!wGvP zLlkR+1?jAhdR|Yi%{1^MmvS4Zm{mzXjKq>54`IlnjP{{bLd22BbI!+mWoz}jUZW5y zCSVwYw}v}U1XfBLdOT{j7POIvMqsfFNMCE4pOP=+4E>aRB-xqee%!^g5+Qkr-CWI< zqjSp>5I*$74+cQ?fxBmi*4{2te2qNLq5eU}u#(-AEf`=%}<7WsDs~reDKa&-q)eN(7wrF7f>-?*|mbv(*aeY$Knw%)O;Kueh~bP`xr`J ztNi$wrqAy6muAvq=NmyS7Jwt z3;vpST{es^?*O!=VlZjLlB*o!_A+o!C*~aXg4zAwUZP%_nSD;yM?&a4aTdd0_cSSs zG12dYo7Hgw@zFNiaOo3OF(=chhzPPn9=DWGX$!Oah)@G#ba=@$3g2V76)TX{4f5^^ z*aSgSDrzII?4NIsRHW`Jrhq1ft6}&{Uf$s+~uO;aY{Ja1_h?1fm5FR%srH1A8Y!VtFO<( zpwM}OIYpIPkSH|siC7DN>zq2kh+8>P3lOkmaJ{4H@dGRdQhGOzl-d&s_qB#^E_RkC z%1E9Kdf^DbL|k!XC94Wvu1=&u+E>odAn`M*izG1f4IgBmM4PNuEP6&6S^R3WC87az zA~3{+YrVZ#HYb0Q#F;yY>=ktj(Ps^oWpJ?643M1e&r}M^81zt}_M@rnS~SHC{7Ou4u3%6tlb*eLlguqByI&1Do>L|b)m#~AJCh@Jq-$kg;11?W&t**AWQ-|AD9SS zkNKKlp1R8JI=ics>7cTG7r#No{r~*;BP0|(A>2xrDSf|Y{I)uN1gG+4$?Om$L5SPf z>;BdzuM+f9&uuTxJqi6FuY8h@m&kcU(f#*d9_=X&;H{cyPYXmGO~XP!Kh86QDR395#;WzxUcN06G26TW;$9#*qd4<>y%QHCLG zj|x^3c;`xWP3F0>{p4>Y9x{?h^WPgoBPj8)tT)x)-iK>ogaw#lF}QM=JsctqADw!%aapG4$L+W&hTL9&Stw|D?%g11D!|3GB37la`^*Hm$%w;(pVi-_-Nd6E0pj+23g zteq%x|MTA{NXW82M!ro_#>>efV=?ynp*lovXMyG9LvrOsv`>44{dbNgIxJ7O^n;iY zefVsQbiN&}4~6~RKI#U1)}SCvou*y^r11V7`A`ijVpyBBxf?k0j;=g6!~&v6dTFUB z-R)Pmsi}8=Z4BZOa6L4HyyAP3H}@6_If~2AKmKEwoJ*A3$?62r6(aHq9*3}q>UbZ= zM_qLNda^8P|APZSb$h^XaQ^VDz6LyipWjs!P~b!WdjVBnww2NR;?Le&p#}ijUDW(F zUp(w)-oF0@1(MHTTUmcd_~g!%IirxqaN#il_M!~ui(@4p+M|qu41Vl-N>HNbS^F=ihMNe-_bVW2X3!}^EOr&Tade^=;ds4Qp; zj;{o>2&}a>^xDm|F#v0=i1-=x_HMia^Xh8D+wHmFq7q&)F~f0qULh<0ma82+$|L)g zlk;dZsuoDKBRAIQu=(+x7ttU1!?hJxs(32)oVGFc zdh+ZX)9Bs#k#DgWO*zBLm&wNuf7-4}EaI}VnqGUT>VN_q^YxQZqKUJ}VbAY8x}lZR zg{Ew7dHhGTm+4Oj1Ih}buwLC%CP)R2QjO45-LfMPB4;f7^zVN3h zE&B0jB>wDcKQs^n1QKNdkUd#Z5G;4Tj_tF=tt(s~pQK^l$rvt6pY?dHy}R!g9U$H> z29&G&wun#YgI-7<>=H{e5%K3hl~dfBF6& zmkdfvaO=QmY#qaDtMAYsC|P*E?xgNpS zbP%D^6kB*rlqRo$gHvN1xH{>lF;@x?ybLj$lQHbg>XW?I5czR8D%yFfhk+G`JxA%- zf5+kmSh(P9_=?X|57+s^4!a{=VV$2j6u{yYbozzV&GZ%a%&&s-Bd_N#-|rpv6}k0W zqN_M^83>CbcRFKEc)5-cqMh#{;_94cw`V!^{`STjZ$zbl@;i@MBR|c5iAjXpKK84y z=?wOe-np3*?GXDUAu41|%lOWB1~s;6xwv^xgeZs|`qG#T(B)4#{)snaTVLd2z)Ey@ zcqWjSfaA3L3l&@_O8`tCxzS8H;R+q>Ut*jOzzPFvW%`f<(rM7uIT;E!Vj6isy6_O+bY_=_>2!21}PK9sn-iT?`V1sO*`9-)4ll8|C=u*_9-)*00A4$W{qBCPGFh=M zN9-4On<|&;5(V&EcwwjWWIJcCU8pQ!FR1RZd)`dF0aOYig3Q&2JXKzJ9g!+y51i=I z{jljXE2VLV&0Fjo@r}<8S8zk1MmuuKG|zEKMA%#cf+NQ;Kftvf0PfC7xMoG~O-l!_ za?jSPiyyK#4U@;2Q2<0tAm(FTIWK{wqJ@qEs~ExytBG%@c43!YIY>*((ygq9a3250 zA_&AS~@)S+fEv->Z349W({KhQs9L4c?p={py_W^&-z)c5N`53XnXa~Tfjys4*U zlK?&xQW(b@$!1_-VeyjQmu{hzvwCEhOX(hj)l)EwMM~XGZ5ac)1cs!lCXj_i+~dJ2 zv`at0#UD-hyxJ>hx~~BK3o`b&m}6uP&hL`GR8f~#@EEt;t-b72>~c}n1Je7*Q?Qp| zhit)gCqsF%31&igJ)}t-&@$tqMllH6(7p(1+kL@BKYm`BznXsCe$GwBg1ti>4`W`#S(QD7)Zfx7EF)_ef; z!XV8e0g2b`=`0o(BH7dlz7_C}&vhfuudd!ZEbf2&P8j>tcd8`t%ePx(E2pP=-v`dA z4&7Ovfexb~rghOD?r^;kA+u|lcF~r*;&w)(Fu;Z5$z3YG!W6EP;|Ia|#=wJQbQ^f- zWNa*y!IGT+Ky+U`gsqE^oGA_ae?5Jhp*!eQNE2YjFGtB73MFS=nb!of{QTHad+nw@ zc$%S#?Hq)z1PW5tTws%}s_fJ{F{Zh!n6I93O-@0t4U$?{L|;llb$8BHh1CnqYc^HA zaR&n-09EjT)+&>{S1!;v(UiDwfv*`s7^$|otiX0@t~^yYoenUcIC4m?{qVJ_MFF8P zO*Q`mf=a?|K%CKC$RU%&QxQM!N1xIgCq((`D9j`;&+#O|edk#}j#5tm=+Hw)0ENH! zGrQY62?c=9nv%iBQ1glnr$xiOuDT^8n771+FC8-RJCVL0EENY$`K+>T>RJ{>RNT%K z-W))AQ<*IB#!Y3*!DMO8FU{0-5If+x0F)!N3d2o5Jxe@Ja8p7?{RTHyho)Zt^`R`J zSras^`Xfe2^e`AD*p1{5;$Ba8DfkL^9H@Dqhj{j)C%GU8XiJssd2MzY1v~R_13iKF55lrtsSaMRJRjR(8i@+p58I?)n|j)i zub1-Odv>~KP0f3*{nqyirz?YMw_|=)+b~+#{(K=?y>(I!ny(sJ9n8lpZ1M2hqi_m1 z4sAoJAz-Bmzv%oC28Ajuh9Xt=2X;j7uo)&8yKQ!Me zSGvBqNN~>%>ESnrlg|an)MU6mk1L6;gv8`Fnee*VTl&-0Th1I8FbpWs2<&EK_LV4? z-sk6Xbsl|6qWAPc0St1K9+;gBa$TQED5B85BMqU345omXkIcWryuqr59tSz97&bdC zC@Vn*+oeqZf>%1=@xFO?jFO%}GHZxrBFe)}w$>6+$ByE##*~^ehF?oIZ+eb=bTUrh z$V=Q@HNSlMs1I-jJr2O@Y))rUBuW1wqy_BYYwB+p=z;D4nFIxOQBD}7I3UjuQ-W(a zKXjjVr-%U26bgSVZKxN%d1>B-j0S^7K_SUW?FaN<(W>PXup3Xw2M$(iGWz~{NC)8? z$N`%HpsG|7rj4UuR^+I5Vv}K`%I$krJR^|KAVZF$vF7_yL2$p=zA&FI2t9g;!7C(i zbN%w-V6VW_Velup?dglPiyINvh#=vs@sH^~{b+6OAo2edJy#`<0K`DR_`H9B<|d&X#cMXe)YJUl@q?XVn^-?SHWZu3s<+JuqkZ z^UYnF1e%NALGI^y-%G!9z33%4jEfS{9m~qesizUm$z-JQCOVq=jVJ_|=tFGh6`CR2 zo}v9zKM8~;gXdE^ke%jI8(J)i9uoZ+i;(qpJj)(C*?(=iZ`8w}HQaq?ZJ2iB_yv<# zXVp72Srjy^<_31nlOs!Wl^E86?LPYROYk$CUT5fRnl0z76!?XO_10umLl0DVb~|f5 z5<7+^kHOz%XfC*L`DlMbu=T^rt3A&oTs979#UD2ytHc}WfTK+d?1=jqeU5^#HOlOQ zlPnP39QlSu66w0|?K{cogoqjuPG%@Y9$w+gX%R%j8SwF{5y>7$SVua>`$8giR z$RtUjy7b+h)rR=t;prnBn=4ai>FDSleCfa2BC{SK6tmJ)V~2M7(7N1tR;gWT&>|b$ z)M^#>Mko3BNv4C;S?n^N(k*_HL4DFmQf^Q7=hLSJY5g7(DGkkF&s6)w+C2ecDbAvq z?iW&E3oYD;;&L5}s>&##6#7m6mY2Mr&$Utq+^F zNE8Qt8oA=SZ~wteUfsv$=GXZI?og}$y1)^TzJ9OY!FOYczwXBR{hf^|k=lUOgO>H) zkLq_eu7pR(2_&nYk<`2Lp)y}PL6~Q2?)iYfV5!!A->UX3^^Q!nh|}eqk#fw7S7=3@ z-%bda7HXP4^-pE8`}834l*{+4$0Rv2s8>`p9`=Yugz$8WQ5CwHS>wo42SmyjAF6hsp{_a!t3Pap4pwZZGAXfA^;;Y% zTWCqIWryW`>2GWn+j&gXPW1RYoUSL-y)7ee>GO zcf-2h8Dtf>_Sc4rg^2pWJ)^ovY8)qwNA!s>^}qBJ$Cum6^3frac$j=BRDPcv?xIk= zbAVSEYgKL{RA`XJdrbSmJ*$_#+}R4!K8r^exGGQIrSEv`R)6L${juC5Y7vHIm)@!{ zXoeMQ&0Z7J$o9M}VD3oo{*%T5IGdjw4i^D$5_r?@*%Sj&YxdMcHuT)z_c9ZFO+ddn zD&<6-^p2j0Y1KW==KOqqqZ%zH?WtL#(OliftXywd4j(>a`Qn<70M)rKk#4HzN}H(b zd>4*LI)B4Fds1N_9?dQjKKZ4{A-+2O!;cdlH=LSZaFYkv=$^~<=6~Jl$eRfs42Sl5)6R8p%cUYH-nwr6I^XS=Bc`J0@&u`@c zQk8ClpYzUatjuW8eR{kp!F=l@ANV|*l74LItIWIL7CqaIOWPE`neYdcxrg2)(y<@f zKWgwg8yF#U`Xe$|_ABakni|s)p*9sEG2jxp$oogns8G@$Ee%o3$UhzNi_Uup4||_e z>D-;<|9>&|mQhi7U(~P&h(RgRB_LfY9n#$}14xTBNT}{u_s%_MpS|}v=Va4l!^EK0!LI;X{CwE{1YQfRbF%n*40f$u zy1hm?oDu98+M`8~X()Yx{3SRW)p@UL2GvZ7RaOR(C@>jV9@#eDKb~WyxjC3&^s%Ca zlQx(z>+*CjM1&B5b~614<>jV=6yV-xv@23<{1~_Gfwxrf^=Xv-t!?olY~6r@~1@xv5_g0 zOr|9WU=$P6Q(Nxskk+zKFAoP2UZ&(qNDkdz&)=$og`D}$$=s4&o49xGJi8++Aqo{6 zf5vI_po_flfCV`AvVx-Nhy^lQhX>Mxl`AdBab@)yc>TWz2R5T)rBW<>cJla(R?N0di zA%TJEP1hG&t3fDDkMA?<${d_0^End4vT6?SWDu=zpy)Ck5R>2AL=t~WZmA=d%yJ~^zx)f9rk^2ni~y%^6HbkP}K*{3Tzz0efjz*bm=0+8BYB*pNps4rj}XkC^qP>()A0rSkK;KQ7X)jhd5<51;eCg)ym@ zJq2lDL{Ml{K*;?lFqj~6EmGJUMZ3zgHs_@saq7i!n)%x~HLDqwmIx zMJe=sJdxP_cv}=SRS_1AU4ADu!)kzCR;g(3>_-R>Q+nTsmNXnZq*Y9ciK3I0>yDzQ zcabw2fNP!caKg}*$q;jqt<5(V%rNgmy4fbjfK(wEwL;Xgf$V2YD%oh-tmW>gp)E!= z6#x?7?=4SA5B{1{5~$~lqsZQ%!hI^uw28U?QuWWp0p-<9t;0P9pTfzqA@D;+<))+? zR{A}VG7T*-mhDC%Y_63E!lNH&!+dQm-eSQ6w}1OSSsE`Gl>fX5cZ6b4%{)(StCUg% zAb51Z75b5xWK8Fo0EEZ-CQ2<^CWD`FqV%h3V^Th4-s5GkC5lj?!h2rGa#9oTva0$$BQ>IYXFlK}(eSe^ z+NVVs$rP0NQ2cdOAD3*0FlcdXu<7f&aQB?|#2azs^8Dl|AW1Uudn z^MmoTzdW=8?MECF$iDmiP9B}#Z<6`&q=^nt%AsGuh+EDN4l$cY4qTIN;E>MWPHk8( zd_w0m?K9WHBz|R_u2+Bhh9nx!;nq2zw|sNjG8|%;?x4in`Gf&)DXM5c+sMesVjXwK zq-}G&NS;Qz`&RFG{7-ub;rDsNfx^7e)xH$@9J$Go-NSOTpMmU3c2A2Vi|nK6IL*Jf z)D!VllbhJb*w&VrU~0lPD>k5|?3f1py(gEHV*j+S!vVQ@o?cFn#6VePi zlwX8ghGkClN;{{jRfWO)?#;dO&Kj3=p#m;&uslamzWjwwEtN0ENSR4#*~l-d%i#fX zfqdAdOC}c?zvpMQ961wiv98m|J8VSf_mxbD$PgV4jV?PR>IH8^%Lj3U&OU0CXl+40 z&w(A4`8vkg+qz`Ooy5e$dCDbPX(G;7JJs{+0hC_pV9DD9Gs}tQ%W$v`^p!o96hQ2m*8oydQOz7eml zgbv<}s{7U1OTD_IFymIhKUdhy`q!PV`#|3kl$OFt?6S3IL{xD81^YXcJ*%YTj9C3< zWP!vrLfRW@!z+BchaMB^(ZlkLe&nZVd{*4Ctjw^DSGZdh-^m`Qc`F5AlgFt0jhwXq zU@84nP{#lM{6KzlsxoB#bi`~x4b(@hMfK6K;2V=|wMANzO$gs!MS~w(R+3Bn&$uLR zYpERh_*|TF`g3~i5-gYBkJ(%eUyh9z zMNy93Y_*^S-wBKI&q(Tv>*B*4u$R@;uu zvKQ0iLY#on!vP0tyL=hF-=QnL@mZu3CB4A~y0xz)6%#FgKy$l))!3sRug~c{ zza3t&y>>89$bEs=Uw3*f39I3C~{sM3x z-o#rh(|X4>ZZ7L#2}f2%8mAS<+0!|X2;LRX#IlwAx2>O~0xK&++X8Tg*7}psov`>E zS2Dqv!}wQC$&r?0#jIEJ=1q7lLhAwop`%K`f(G@BvEm8Wb1!~V3crU%#-KKVU2U`T z{nz`~d^;0Ag>My3_Z%<2csOm1pYEe?DyIoqPaS~+@gINDs>D~=F=zaX22~UZlHT&8 zf22Sd%k8#Dh0noOy5M_&o|n(4AP%bjhE8yf=F)ILwE?F zkyJdpUKBo2qg&~hmA+7J_ag@F3cHMK^Cl6eKZ6VD_q-M0FML+}60JLi(Gwe_*bs4R z(aH>(1G+J^RvH;c1WEKhQ@0e%d-x7Ub_U5iN2V!QFrhKkV# zJc@kOYF|Z*U3UdP9H`8Bo-+W7Glu>reGEU>s}{t|<+_SmhgFrW+$~oZeeWn9_Lbh` z2%&*UUw!XuXT7B%EkM{&ZkqW&z5v2Sr3?xnSCYn7W1 zX9uA`KHGA5AE~-f>7lP(mk?4YAxmvkZehBEu$Br7h{e=s^T-!TP#yu#01VG~U-kb9 zO*8j@i%(v4qJ%>ai80>YtE@}e<+mH9YA*4p2KD67!AkGbu@!NkqiWoHCmRP}0zwM( z+s87Rx8HvRL5q?qtp;WJ)XopK;))Ekq6A_t0)Xz=A&h00P106jIrh^yOO;P9E$g`7 zhLrn#dg|*R7QG+l{*^F2@qT@y%0GpQvN5DncjwA^(tRF+<#u5MseH`FT0F=Po)+`ESR@@+2XfFJJUJ7N4E{{YW218JU_$G0ygnJNU2Q{i5k{lMhi(IE zAcZkytJV8lwEruhDsB9)ycWKrFJQWLp6o;&{6gpyX>%3mu5h#>HmyAN7tsN9FFBgB zDoMP7$1v*keBrOWV%g{LBXGO>3j_7%Oc2MtnaJRXgF%nJGoujUx`yeY8l$6e9b3P- zPvUPS$zT%|vPNwh-`VBh!yypO9I82Y}LZBa&?~CE&!O8{Q#AGt$H`r|`$0hGq z|3=Av8Z7Tve5C0z+lL&}Qi%I&YrWQdc>6cbL{>0GXzxyEU6 zO0UV^M=GtBr@OOR2Hr5GpWYvoysysbO@5}P=zmzjZytLPJBoi5rP1tFGcWk2C0*1P zVUKr=N+3?>`{g4ZF9D%6Sj_Wel~uRqMNv!*=VQAZg#=uT4muG(nZNX65O|h!KF)X{ zup!S5)1P)1M-~N|nwJ^u-p%dg0&Eh$Ym(wBnr0?8!l5@(g>PCpk|w5`0AhN74)rcK z9modH{zLx2fbTxuT}|wc?OQf6bs_98Vbv^Th#QXB-e8pq^OxFB$#-tiNN5>y41_&) z1wl!~sUM|A^82;NscN0m2Im0N?g^6M18m)A797CsK5NSV9(H@X(B?nX1oPNvW6{co zcB?_NWgb(Bv-ZEb+ua=I$)eUfP%Ut8zcK+?+2dS)X5_hF_}@`EosO5z2pn$A2yhoRY#Q?|4;L zWk%@{|#fyzn2@d9$SR14&#S+&MCl-|3_MK*{35>8+hvqs-EPv5#dk z@hF|@F4Ye;kl-p~_|h?vG~CBKk6R-xZ;U>o8XpoTzi!L)dt?D7>DJ{2={F< zxyX%Y=1W?k>zLJ_2I=x~;{)$|!5)j?0a#_Y>+w?lFfoT&ffP0K4#KbPI|H3*&>Ujv zl2X7$AyCknE8Iq}=^Z-2ruy*;wPppHdvl{@IcHiUv}7qT>W{EN`u9ZHNS7zSPey%$ zNF0tZ?zItsH5PrpO1+jAu{=gP!od7l`fS_ZNy(sakBhlnAEL3G6BIk~f0}3XrpnVV zmsF+rff%22yQTq5GDW~S#<-i^5M;3I_me`IfMa;Xb9Ba?0flAL${YDrG?{0BD;^!J zVAl96;kh6kYUkr^C}roN{izv}hxhsIet#^s;O)gUOI{c+u{F>xH?7RwmzprG{{V{m zdXIPtK~EVRikZB4b_2BnC}IPYD7n>;yxi)M>^{C1 zG}!jNc^Ml5H%54z?2hf!97rdt(nza0Rq$#@lY8h?dEv8b7qQhqN!}K@cboVbNweMV zj8ENM9V++4afRg9Cmt7i{9`gE3Nqgb>oZR{sLTB%t#@L?BIlP-3vZ6B{;%#h`ZC)f z`Dk-QnoO^t;tSvmRImTIXR?tCxI7ug$wE)PbJE(v;kp=Vm(4%4d9OGT0(r{xM&x;^ zPWA3+m7YMeS&|TpL^0)6ySUQc94ue5H;&@A1A79j1}Vr*4E)C|Sq=9-Kro-|O-s{A zg{KSaH)X$T55^NvIb8e6PFy=(F73L=Nk7-07B8#lFOrx3ONz(g#i8I=06K8G9DXchtgg2^iIo)WDVi*yeeH1)T4ggU*ZVI1 zi5aSz6&X4*T{0Oo-OW)<)+nk+GPmKsV)PHZW@0tk1i3FsANz( zZ_O!k9dyFj5POZguiaIWxvdpfFcxm`TAJ2{#cdz_!nihaU;JTj`77N=M&?D&M|&4^ z1ZSyY5_gUdDn|R38wf>AdU8#3TzifU^XJ1g8Y~l`$2M^O6iEJ-1yD;^>lBO#G@Du= zS?*k~HxVZ${>+-inakbL@+T`XwQzpTg8iK5P4H!TBk6PL$+BSoGKlLfit<`P(iOnz z4=0Hmb%1$-(Xt0xxo|d%hAgJ;;8RN?VYT=IAxiCZwG>!WY|unDTjMn=E{#am0Ln#r zEVG8Wb}s;NjGDzW!LZ0Wu~4(C8hcLLIo%-~fNu6TI?CycG9ZIUbE6ZU2vU;-f7tKx zzTm!%1jQ71A#k5wIH&cLgv&!AyVxGiHq^6OuhmfE9~N5`+8w3 zWU8;xNrG%k!%gBCOd5p=+L~jwEw*g>PswVL0^DA)LEeOcho0T&>Oe~JLjop-jRZlr zKhL-#&UDpWeD7gdtba2U+GvlIe)%#aC=20fo!mSlrd1Z06`#FaX+4tUvNyN0kC%76 za@1r#PWKTujEb70cNrnBS9Y1%XCkpLsU3CCE*3Ost_ zSOZas8%PBlCNsX~#WLuJ?#?%}lQ>-ckWK4N?Czh>bF~GCtjP2jN#{%o4~Uk^$1~*K zEk+VW7Q@D}sXNn^IRL844e&WnIZ=6}vhM)C3tI2O+MO}4Ut1ub4Ii0Z$0V7Xn`ARgJfv{P^Ek@~ZGB0)=;LL@MfE-tn50M5d|I{9uBUF7y@!vV< zvIlIfy78Q9%XyKK-DQymd8s!M?IBWN1y1DOEx)d$?_hMTa&SA|3wK{wCqxhoHSje? zE@CAKeuy$;I&*(9Fwm1B9`JaG}l5>BvEGAc&<}D9&+pn=g$+uq(x)Y^HZ!e`w{o_t_sRd}eKl3Qm_A+xaGO z4h|>IaAaXp`uX_>BzE2S6Veh&JYpdSjgj$J<;lplX+JP7o)&{E`D_h!&F07Hz=u~u zM)8M7c^Bhpyc?&#EwmxLVD>!(%@iz{RhS_9uo^vN_-Cj6BmDP0sQ!BoNj@}!=XQHQ z9FBp1VsB`=uyiO;X==z|S`S)v19TBUMUfgsx)%A^Y=+kunCYhd*)$T3^u~I;)@;Cb zSS9B7Q1xh$2#BzeFFtp=Y|O|yoVDAssRBK@GMM1v`s(oKC`i$ZUGHc}q;>6=W3`t! z{jOj;bEX4uNt51K2x;)Vufm8?Sf1rfi2*k1P(MO3j&6LFoUHuUUckx1-QAgyz zluDr%!$!69t2X47XnlMRxY6icwG=ml*-}{jIz1cyHz_IMaq`u!FKHBMLw@Qvik&gKo(%t%ajv>9bw`G!Z`3GcM9H@J zWY3a+{wgHKrWBG$8)RXO5LT^TCW?a5v zkJFtz*SQbUUx~)NX=K*Y)e#9r{l!Ah}<^>k0%BO*b!`&^bYyk2nK zYEwMWo9jl}ye*=+~f3Y6_2-qt1yxnzkR7*y_H;hJtS4~2BX&=P8 zNI~3G4p%mSg|t=SYRkDZEl8?lf01_G7e_d|Q}{*(Xcg2h1Xi^Qz4MG@DM`;n??$>6 z+jb;|RdhuT_g{bGtWiXDDVsV&o%d>@&xABxEBVfen<12#^bouKd(^;3jN%@VD zyi-ZV?w8uW3Zo|vgv?gf6%h)b=D5bsG2>}$a*|32J$@zXWJx*Kd2U2y>lzHB8`Qgm zF{u|mivs+TVMLmZ=s#<@5H2v|W0(^i;GiH#a;5mkV>XEyat>3P-%_poOiN4RiNGEE zV7RlAX$n&I%|_f;!l8Usnng~E{};+;D1DrNOu!zVYo%GD`X=%PU`HR)0-<&^ekb90 zq2-q+Wa-W%C;5 zSP|G4^!Fz>XR~OPTS|WN5(S09klP;sv&=~T2UovndF%YL7;n_c-u9!`SRGtwi(!i8 zMYs7$G3$qkI#;B7e!ywep)f1L%LIhMGA}*5$RtkSMwHnsv%YlciD4|adA)K^CI?hN zU&p!|+{c)xNd2IWuEzS!H!1Hoe8Q5sRjb}pZs?~C&&-AE7JZ*&)T!Jw%iNVRH6JZ_ zbaAZ+60qO{A7I|Me8v#)hfIz13{;+eIQldswh9i_{>#ht^Hs2;^~dvtCg$R4a%Hdg_M%{)dT0CmSm;SW>M6|u3WI3 z+lLJ!5@#y-rti|j_NFFq&V7qjt8!9wCS4TP z9n;UMQ%jB!} zqb+wXx07vRto3>2p`fX11MlF#z%Nf(34OZf9|IC-Z6n8X66HMKC!{>0IjmY7)HoOKhOLTQ786BL>a;$b7Q!Y*?E0X)p-C} znVm$G$(~GKtq=eiBjiuAZjbJbb^lal40F60ziDL3nw0hirgX0KYNltj`6E~{nt{)s20udj8R$D9x!*&@k0I@0C--1)wRe>+K!)wK zS=Sp8Bk7=8jy3zXR9EVnHJ~4^6>M3b)&r+lh(*h-e{xNXSh$$HYach zO|ldCt@+gp-@JT2zIG&uM0522(44zJC`I%FU&K;4+nN8*3(z~&b}Y!pG}Ne9W9MTz zo?k4BKS4YCKWWyeLc*DW&t31X7Pf%U@%RUwdKawYoe^mV*6sTE6u+eXmilUkhETQnJx>_{*_Z6`$X^zb(R;c3`B{dQdFP18D3a+PALglR=Rzl)CdKBQxy5de3WrGP=r4Cnr=KTo5lON!FcV-tHq{VWc90x4ysC-y3P;mxZ zU0P0a_rT9#vs>|xco&{n=~^lxkPt{LrqDzCa=d67H;>rG7SLvR!}|qXr%@=Duv)rW zC_&Ia+~7Syx)LHU`91q|zN^b-v>4`5X|d?NwvBH+T@hLIvxmG=YSdl1hw=E)@=ta| zuV2XgAahAWX8zi_<66E(Q4$60m)1`u56*dHixDYL=r9s=!x{#s!#?Wvpf@F6f_>}L zwi*o^CcPJ@MbBBS!S5Qn2-%n3|Bn91ypPEK^(f#S9#ZD)Vj-B9WqWf>5c-Y$VQDW^ zLxKi7{gI?l#!&hz0N|x9DG9NKh}L?A%}?k*q8c4q2O`-a(${SrVWj!er@LkEPIrVw zEJkwiuZ1j^Wfo(~|cP5=O!7!eDW%HVW1fyE^AZN4MlwABBMvSUzyKw9RNPX2de0v7W z!vFdk?f27Y1mre4DvEp!7$EZe9<4I-K7GQV%bzqWjaLf3o$ucnErM1R5%nlAQT+o=%3i-0bj`AC0Atflef$b5Vc7) zP8%~pH5zrUOPEgq>%yu-RtV)`WUMt@Lt_bCdm&KW@(QY=y*lKGR3+T)=IZ3%)~Ms9vKx*YO^`62=}| z3j5mWmas^SzJ{XjY`yEqmsa1Wr|fa9*fm(BJRfw$BBNMcUDh}2-Rwgxd(HvR3fa+vX=R{vRgujbxB@A9=f0M3VY;tq;fzxLDjrw&CysNxcsb`~9zPss1>mx$dnUT)!snOYqTW#BtVxul8%m zbJ-l;nN(q{Ye$KT&*jIY`FM}2SV%5h%NFCH;o6xEI=$H;bdDV}L6*|ScARlkEZ`*==UJ}BOJAm1vB{oj1s zd1k+4J}3B7MxscGi&Z9^QdkMB@Kf3DVShDUwWrdkHC|zX$M3!z7*O~R%||cZg7_O# zMC~dNfkSroTeL`ate*UDxx^A~sS)irj|u|&XiI5_!n3!ucLjjJ#Hv$$-`NPn9I6j* z-_pVwoq-=*iqlpJhzjLpQ-g{$pxU%TTao4=dv=M!-XebfyITEgTvWh^!iUl{HuI$r zE_YuVD6d+3DjIFF*ad<|&Ib4t-t`Lf`@&B}j$Y%x`B%WiLNTNK7Vs1P2U$ul>Y z%9hGOVrPn4+YvswIcB9Ypv$*$eJY&Z89@orol%{tv8S=<#{JD2zvXJym!P^8_1ViI zT#8E?J~RKJxVQV5Z+05aS8lP022*;?cHZt$YR9n;|Bd5eF#D@<)l!&uV#{{(MxvWQ_M(c=E!XR9f5dW27sjo`~)=bl;Up_|m)y z*_F&=LzEK%_<{?!jA~L8#x>)~G82o{!byL-se_exu~N63T$lPOE|>3T`<$-3WH_pG z)x}@x#3tW6NY3Zjlzfnn3OR{ovb61LL#6UZe&CB3T(rAYRL|7E76H5ekH;x47HS9> z>{_O(Hz_5X9y_$Znj8B|zuEgyOowpsc+b}uj>T0S@&IWs$ezIMSAHc>`ux;HfmqHumn%_5z_zRM%;@ zg7`L1UVk`={NAcK)*iQeA5npxju4hs*L8qV8#)l##IrgacZ@AK zf$FYEgZkI;3bUU!-M*Uuey9XLO;uHOP?hxj)?jr?;l5Q=z5ZuGoD(+XG|L5@^ce`K z)lP=fjK#N^6gg(5*&_jT=P>Pq%k+JPvCP+D36bn~_xTJb9acOE@=mJtBi@b&KpHzb zI^@RbOtb>9DRZF+H7f$<)6*pMA1qo-5|(v5N&z#~%C@orbbCCWh8^`nwa^z2d;5~v zDO31AsFfF6lG7`PXELS1=q9a!sk*|U{UO)mB(JqHoBm{;JT(xCPS)mYuaZ-%-rjhU z2)Klr2kI)$MT76s8BYG3#yYHTH{T2^W0Wv_RHZD}dE%~gfwQ%dRzL6QZRo~`i7mbT zbu5osb~Ch+`1KU3ar$5*GAVkWP%d5&DT{w-^8d%A5<@mmsAZErwAb3Ntt{~ zsTWB|b6|UIt-raO+iNX)rjid1qffOP+)9nLWe`VAaG@yjM{`BMe>j?3WEQ*(G_;|s z4~`aV11V+morZ>n&~GYK1zP2~T^3*OY3#mDr3vL5ChDI;Uc4x+|E^)L=YJ8g^NS+C_khV5n+?ZL61 zqDlH)0R?z5fK5{gGPSw@v-Ih*PA9!`YPONRv;{C775FV4e~)4+D~7U5*;*^Q+<3(( z!l_#uLFbxgzE0TIs+}JC9=GA5M1h9lg^C2*ED@zSocDvAYXQD9>rk5v_;eCkyfTrN z3$&U6Wx!0);;%%gD0SNe&{Y}`Yk9R`V~S}>Be@a1zrZVv_*c+Wow|Ix(+8F`t1K{6 zq|sZ` zf|0&6T^;@UY_078*zO+@yYtE=r1Z`0_~Hnqv+6V-oSB8�GCqR#OW#`*{JnxNX=H+mZaKl9BVI9$;KM$6lO?#VYClAoirk-bv( zKe9$C7JCcf!!AJThGqvk6*w6fA0(uCT?Fx0*p@KUe6%W70uJu!s+~m+!EX>dkz21Q zF0RRi@iG(Ue#-+P6;RN7rS+B_ekrE}Eg8C;JQFBu?!V{4M*cWdeeRB?lg+Ihs|&nA zy?1{F^$DFqKEDK;hwqfgoGZNoTmo<@5qkwstqZGv+`+&>x`q|8#^!Sd+SD;Jd;>xL zbz`_I3Q&@WO4SA#JX&aX{qf>cg{x9i(gPbHS`8hkA)p7Yr@3G zd7U`TrNQ<@e5WjX<`k`WWtbK99%lA)*{J-2F2+C{GF}l-*{KzptWm^*3WXC0l5ALQ zTnc9Wsp3dcN^o_>fg6u5l#uM3L)b*USDO^oV%J#qrqiMAKRd)>=jkKR3Gu2uEfL$U z*11da(NePFv1-NtW&ZaS8fhJ}=LomPhd@AblFmOci9T5Af$Tq_WdKb9UV6_}wHf2- zbSf=r!zg)_)TQTesHu}4c_c#p(S>NX2qt5JO1=5gJ{r(}&PfW}R;NI#2dx?X&OEyV%|2z8yc_x^CSt)LI*$gh=SVqPU9uldRU zu6cpX^RPgr^)AYYZowR*bW@cTbGWBg4MS2eKHmI+Iz)qQaa3LW4!GR=PZ%X1n00;e z;dk1WX%@VctSRlpcB{P*y(;@=dyC?JeLOKnFB$Qlh|kK!H=ntJ)2*rhpc7>t~bfbC~VwGN~6-bUN@} z+g!S&B0kiLj}?rWFJ{Z`!&b_!(b_(IK&nP+TJWeu2!UgrDW?^(Xl{TZ-!xQ-`Zi1$ z;L&#S*K#-%Lg}C@Z7?eU$1jA=)AYyVQ;Xr;oKb`tWXg3e<1YHcL4mynJYMvm^|dr_ zAYL>*8LrUh^%fG+V-xu8*8FZw4~*2|@?{+*_oE-b6m}2gygxS(VA-!ndLG( z(Rkxb855loujS(Jdg*ebmGD;VORT|F~EmgpwY;` z(x0r^dtiKNck;Q><75~Z6E|jR%fEAcM|njyqnhdVfC|4c?ui|+Bj@xda}U+Aj}OfX zxOx_U?R6h5K)hxxzx;mIXtdg=RYuHkb$6wu75G60*G1pFB14FNvm!3nWd}6xKKRMOfynZTAPVC>%fiK`B$*hlWMMnH^W8{kZuo%Z~!s96B3@-;!-# z$h~3e$un$|;xx|Z&B-cpe5?ECKJrI&!Yjg^Rw~TVmnIz1!KjP5@AB)MBYx%=(mF5a zr7bo*)OGy2b~gz+k{tB~=lu$s8B@_$APsVwZu80|aawrtC~as05x{=|v@xym}} z;xxKsh>=Uj?;$?ItVI84w|=-6z%M2lN z;Iwu&!{dyLIv;jFSyzI4y-{9zWCY*no@FdD6TAop!816I8W$WF7Mcm^K&f5pT@nt{ zij1Vgd#dRoq8H1+oNZRD`@JZeXWkRyjIDI|(tVU6R+Lk7KXFAXj#B<#DNLN~K+cUyVdjEv$f>JWMAbQg(8CiL)^w&E+w3v%9X1~rO3 zkxwnH_ZNvCGOI}xk$P1VqtrTmZU_Z>?IX%L7jvY^h2PC~E))HDBWv$lE4Gz$W;pPX z%y0g8CtYLZPTf}yy&4ywU+_9nY~+oSGcRzWs2Tuz2X=|AMQnLtm86@2x2HK<=Sm2( zwd4Q&3H{)5IpH{Uz?oQ639I%|K77)LL(ncd|OC)6#|WP>j`h zaSrGx!%)NT%ZUb8lG4KI)G3sY@}v$e_KtTmk0k)I<5l(VJPCRU<-ShjivlmiQ1;>a zP?k9$QZt*}w*oDb`c_kTjqXY3lyPVw_Ul!BFR<3j3)C6a3!vNvmB*t2G-K}E42g<`Fh3 zXFu*4_@d=#SL77G$MNEYcr=}~tQ6(tb6j5qB$>Do=qrRGoqS+u0A@54u;1?X94t^! z9%T3t_&jM3>CSzLDe*a4m9yOQ%tz*F-uvy*+HzHhh!XDK5PJ6aq?!tdWpoMSjXH_%c$Dhg#+^T_#BLkSC ziJEbfe@OmFvZI{O=Sohlp&?r54MN^ zqiA2miRjn}1QwpXeY{)JJo?`{N+pdaNe@VfX`)6{?2~eU3(#XmO=|O@wWs&x4iA-j zZ1r`ll%snAkO`o9HhOXiFEXlv278|%W6x(|OwCV_<(=7bX&KQrjKn5Rp+eo#cRB3h zFFP{PC|$ozZPLjJUxG})m~i$d@YO(dH-LevWE$zlMFRl;bYu3LM@{> zmK;zMoipo?xD}@ja7|@rzCgN*Xrvj)A6&jE&6G5zF+RWJ(m&;V zbaS~1m?ML>>Ul5uG<5_^ZQ+i9%S9})To}IBX#{u;w`a0pfLK7@PsE))>v_*Ztl1j- zlvl=Xnl+!JS+riI(Zm1glG2Z(x_}v6}iBU~tSs$!~m$P|(8^Tr77r{IpFThE z!)XAWR+DiIYMSOey_{{-8Ia52mK#n%c&A_U8dy@=X|1H}3 zp$;??Hm_BGqB*`e-ZSst4Lt&hB+VlJmHutuW#)@tx_qK3SIv?d$o;$`-uc?Y zWuzOA9!bX*nEz^hlPU24X(GgD*4I5v^1eAY3l808@?5jD5HtXtr9eTNs$=&9KaiNI zN^875mJ~t0-~!JR0+ciJf%o<&j9?U~k$9d37?d}{ zLIFSi9!WyBn=tkX4W!dFRbEB)H6?P%NQ5U|s_kd*6Rok2v;Gr#eC}IRAfixck8k%{ zm}->hOMb&5Jb(Y>ENl^~A+DATQv zD@K}tGQB_G)Aox`6WMPbyVe&Q0{;1|-t#WP=3@O?u?V0*>o<9df&(SIKJTQAd7(<} zP9ynH?)qd}?3{?EY{pI_9Q|-@1OOIE)r{>L&(m2~KZXg=OLpn36>^#rM#`;{RAGTI z?vA2}kC<#F)-Nf>ZOt478l)IJ^9E?dqgOGo+v`(gH~CbfRonkDgKLThy2g3>Yk&&c?m2dIwNukz$xKtza!g>f89iawF z=+7~n(@ht_+%_{JwhjAqW5p2ZO74WYbI*DzCJag-Hof zWQojxERlI*OxFi9Uhn*B(sPh~L?{UX;|N%qXOM=O!?oszLq%{Ht-yDo5D*$o=+%8T)=Hz!;$F-g>O#$;AikTeeZ%|`E;{c!Dz|gSkb2MCd`8&S2C5E>M_oX;uVaL%+L3s1|*cjmDUsC zn-jL_0_v&khvys}(~*{h(rLn0+>Y;mtfevMLYJL(UcPL4t!cWs!+P23ivewTqdF9$ z7I5wW)AJ={56}2UEqNCqv^tEOi;VT`Zm|NTEFeqH}SNNtxvUXHMLTKaO`{o;BEu{bn}C3Sx)bphO?agl|u1=h-aCqS*1Atymh&y)wi=XC+CC)h4fW;NsWIc zCoiBtm^}t#8aSu3+pzBT{>4gEXixaONgfXd#xplBqv!wGjRUH3Cx}8l4kYM*fC$L@3E{9?IVFa+SPoMQ@S-L=e;(gIJU4#&(BpW zSVrle0PMug4gL2Jpk}|PwgYg;Gm$O}8PQ~rs8sX{7`GO?hJ8>kAiBD6#sfXEbB9sj ze_N2?igoz?8H`>^<@#Sh(EYF3a>JuX=_LLr=?hQ2{CyWC3u&Irq@F&SgVEr|SK_iph__NRakt z9&S#`1+4u{gQU*XK6S2=?^ofI-rRld!3eaxOfIFASAMz5X}L8g|C@>vt0=Ce^?$iA zB94zA$xysHFDqoELj5s`^WFBJi1kmsGU=V#y_;hAOiTqA)F@%WutB%n zFXohYJsAt$sM>#sMvUY_+Jo``|6Sg@*KB$h{srnq1@f)HeE^yUE}$9jB$4I6b6Dj& zGejT|$CHOX`k)r&7w0%5V=X?*7X#JK2YB>qV5SFaBesUpKqQIlHgDtkYBKdTUzx}7 zkD%{9m1+V`XD>jv$JD`=>nG&>&G#U|EOP1~D;iP&3?X1k2li{rjol#-o8lN$Kb^f& zS@>=k!=}eS`|3pt#b08T{$B}piHn>)#$XkPBl)n}Vk8c9Z5yWfi9kA$zPp#X*qdVD zfiih1*w7*$D6N$U;aB9}Fva0++5hMAr$Z+N$z zFahZK<@@>4=sAzQ*S#h44<0Up!j<{2S1$nt?FhF9G@Yafe)!bXeEwRm`Gy?kdd&~U z3o+`WO&lpj(_z{7Uv5*br8j&yd}tAiM=O_YMsifih}4ZpvHt}oeYDN7B5@GwPgIgP zNVlgNJM|y=RD~m_(9-mg9Z3TCfHeDm1+(lPetmP-{SC5vOry*{kFDNiCli=W^O)Nvv7H~;;f-lKos&J~3lEvO61nUUzeNZp}fNn0(5STXK#+wC=JR2{h3Wr~VDIkiW zNSHKLV~Co<`{9$<)xI>+5W=9|1LZVcS5aIV>h?sssncE}sl8BQ;3ySoVYvH?FpT;< zg6!Yzc|0cKwYyWKTPl`)bb&?6=y9@rqK#RBg>-iNHw3YkiU4=E;XMXJbmZ-!GmBt+ zMXG+51n1Ie0QspM2L4pXUv_HJomOSaL2SMaaeY%^M-@i#S}vPbKy5N3I5(xmbzUzk z82^VVeD_IHtNXuSV} zO;1id0Q)iHESMFwIaHq4NPucBo(fKp%J!W+3vM`6J^@7z42@IM1ViHn+nA6^EtvL) zyP)?C{9t(?M^;p+mBXz|B|}N$8);td((U;|fHWA(^==UgDuzaJ;D{bCngYGm`L=07 znf3nKgg_VWI?l@MUoC{RR56gJSQZ@{6|KFgmNalaW8Z?iC-_6&h_wd%3xaSdh2=Svo`q1p(%fxUPAZhw z$G4f!$7wzs5R%Zi_)lv!-B6&M#xT`|YY&b#V|lKOcsgeVFR-J2K+&UhTcWyaJ1f zd*5d3_r;Cr=yq#qtjI^;SH!VCG_-VvcfONQ{p&60zuRx=`s3;%thwW$t?32I1;T`( zkdOVbA^0veoGWXZU^VsD2Q8c!2Rvd0iZS`7T=S{YZ|0f z9w9(>rvLw6;syT3cr|*NX{(8*b5pEOCAGWN4i@a?e6rqhc)`UeP9QxG=0?gb$FM$p z=KLS4S%TDMYm9kow4k0Xn6l(MZWS2UX}-8RkL56vmEw6`Fx-ZOV%48KF8Vz(ELk$H zL|cP`HLg7lU7FoQ8LY#Otxf3A5PGi8h`TB!y5&a-MiX|^R?tO@j=!{czQzZlx;I!P z8!$v2%^zZL}Ad1fY z0aCsk_@<{FHHP@_!GRJSz|g(l_SarF?%B8D zVYiC`doQuW3k1PfhhZ-`l4!ApQE`XF zvzzAgUxSK98BjYd(8~jtjZ7o_j+j-#=*8MT-J`<%N`eVwrDOK;_w5J3@}e zfr8KnlFGoOta1N328jKLc18^Om2-{as-oNRmQKv?-gtIKHpCIyx$r~Sh#iW;b39RItOV62TQgiUbC^Fv0Vl)DV;z-w``m-~ha)(j1^s(RH zwx1*~%B&;C88-eSL6(tMa=qAi_vLGpc3Byir4u&uGb~s$>gbKpZ~x#+Abi=K_igj{#m$K*w90yga4& z5MKmMBKU(BERM%i%y4KYV%(4OcBiXTI-|)MUk-iY3Be*{k{9thAq@Zy6X_gi!p>HyOtZbxim^nid&@Qyla?Ib?5G=yU$F`CKl*0jK~J12Xgcioe5; z$q9ijp4I$rLASVaC2G6ne1yM>6ExG=S=H-(^mQ>tE9ZqqjN{D>A9WNG9NA5YDMrLD zPf?tW$HRzNdpxK`Uy(ix4aL7|IqQ8GORq${yy!UJC}W}6E2ra)w?^37rItFj+P%I3 zQft{et*wPTSPDP2ba@3%h)0Y#ai{gds70WOy#|CbK^e~nuU>N7L5 zH{ng?bxs=GKrHTS!3tcXMNrq?aTU>%pvo1~_ao;8Dj+5f1!~vF2&7E6I>guK)Nv-@ z>cV>AbAzZ%=zp$=Wr7?9Pp&>2XsNhrgImHa;s?^~*Dn+i*P+Xi_1w$ROo0=`T+avl_&x}^dYpOm68Ga!P7J{i$f}sYmX`kq+!ugotDDT_U00KFNW!Lx7{dpt zg?YUo6Y0^zeio(X|6H*)RRcgCNGRAmDAh)YO^c@|C8QFqb%6Z z3h;1cZ|;sZC1`o-R{xJh+>iQ`>##Ymxvb}hE2=0$m zQCF6CC%feeyrqKEO`oGyY!ddkLOhQ z53GPm(fiicDOyKcrtGk{QUp;dG17ATStcUhf+B{^UN}U|hOvz>W&Eu1)V@8Hg#zEF z5lJh}kP!_STvx%dvmKzIM!|VF?sEQYL3EQa7P9VT*Ohf!3Bqxg) zS440kOT^qN)|T5`XhuJd8%ucYT4Rp6Zdi_>7nEmw?q+~locShNonCs0J$9ytaivS% z-sdO^QROs2_t?4s0zv-!_v?s@3TY`4ySE6x|Dadu?&?Y+BQ_{~l)Q;~?l4_U6fi(a z`pV;&g;Lao9~Za##eB0Z2{7x*sn1~$90ulZ-h3P8o>s&AUoT-E=OHuDqp$Gs7WBQ$ zh%oTxFeFi@6cX$^-blA^wBYe{ocm0lDrx@Hl1$J%WV9Np>g~PT*_E?o>)lBo_U4;e z$HJADJLIB9K<#kvl|#+C_AieoCiG(~V`9z@}`)Jm!&Bl0j{*)wXSAC+;A?R{X*$5Nhqp8R;}&$*w&MsVKgfZT0H^uABFU80U^zso^&{el~TsAO-^lcbiFQsn@dt655&k+DzZ$HF-h+RgfR#voY0STq zSEGfqRio|o+Qok&$Fh$O*BbGIFvHDO5llfj<^^Mir}~4#!??>W--EnFdBYb1{~ zAYD%;Uo}Y*KJ&0qH#Ilz)t=*kU+(6^tNklGc3X1|STI8*wp{E9*^JICK0KdYduTZH z8grmliX(HiH-7kQXx2y(Z8)E8e5p1VJF*z(V}oM?Z{5SH>xnmSz|F_@b!?{R7tGw}S&z|W3C*e#^WvVZMpU*CHNiR)SP zDnNBA@G#uAR1D{gk>N3t@j>WOXKL(~U?VVDWjKsyRk6&}?6!FiQ1L{tiOI7_-x4Sl z6Y+VAa46&&4X>|*cUpMNPW(i;a}MR-J=UddThG>!ZyD-X&pb}7lKpqY!f$Q}X^Omg zuw5$$xBN3TDdItSf|`p_LC0flk8A!yHG@|tnI4^c!S}*ma$br5DGjhAb8rK8--v&S z`Q%|w3X)4B%x+M`DfLFlc-VjuPHlvu`PgP?p(rD#g!l253PipS=N&%Os(S!h>iCvR zaQPc)sIDOA=Ne3FU5eFV$sw8`XRYvFF@0VfF){x4O1JFNdgs$8w=So!FU}8(8N;#L zbxJ0`I!|pyqEAt8g^9vgYRdZ|?jI=q2RZeJ{VV?BDzWzw|9 zgvLL>T%)dHYFfa+$@pL`@~kARn>MlyvZ?tMH@=3BQVF5 zHkbR}gwio|wQoPNBaF(oq~Ex@UB1TDu6Jmj5{#w&@}I9~&+1fsVs2xjl0U^aM+yFOl?8(`* z8nkfx3gW`-d~ihGbSgdc-WZZ7s-|Z5J6#U0;kBb|KAw22xFSKPu0?WL=wEY{)gi~E z6#t;-{6{q{d4xzb@Bh`O-mIrmcU+%Dg>yr{@}6tzgHby8DjO^Y>T* z6}mboOtuLJ+K5>7hS90N=3$}La@e?pnNR(qxrL1#;dA77{m0KRQq$bcOL5{&FoDg8 zU=IKlg?_ERs@DsJ&SuHn3e6_#$>Sxpjfur~C@3WRlZ8eXWMv)lv!vc)j2pE&b4_TJ z%?v^uPN7y8?#LSNjHQTAwn&!bE^AI$xnV8KL!+9hqTci4?M+MJSrc)FogSSMv+7T~ zM6{zd=AqGP6rg?=eD24Y~ogtEQLWLKnf@C5#v7S&`*UAudjMGo|KW@-*~*-r%A-G5l;M& z#a`646FXVVFMafoLcmp1xo=_04?XT7lRf3lT4o7atQ}yFN@9`pc^a}dS7v7ny;C>7 zH3Ki^f~xUnDzNAB27rPI#A2@SEfXbS(nFCEx+rD z*L_MMeU-t_yKNpUJyEEXkPbHYA+K2UG`%Kc()Er;fVfa-+;k(W#dV$i>jdjJ zvJi?$eEe<90qv9I}oy=HMK|6XWqZzg`iR3(4>6NF@WPo;u~AQb~= z@{U<5k9U-V5Io}u$FCASN2x2|V`oDzP~Z`Dv3{`9rCk}=5~NMC741)}*f zf*4gP3cy%u6x3&`03y;;Q&V?1Vr=6-`>!nFAx4Ds+80T3?a;HnbbsmauP%KGg(@|V zI`m^wElp4c$^N){t0&>3cmSoKI*g>7wUZ`VR7hJ=3r(I0Y(}5Nn>_Y%0nJieto0-f zJSQ=D#Tmf$&mI((P^*=NyqBO{{{HbPO2=9pi(dY>@C4Ek>g$6pYVAgCg{&OsgZK2W zv6c9*!U)-ZjxnaHIax9&#VhltgAcw+$Dfn3*`wM0R%_fU$iqk;xM$L2i4#fwCz@c^ zWGBrb8>vlUuGyQfh=2(96nZ0@0j?oAI+PI&gy+i)Wo_5yqQ%|Dy0=cR+L2h^Lz8cH zC}CL=c4Hm3v)dDUgaVo=^;QxD45;KD0gaV~)Z0uvuY3S7IIFzIUeW0#9^f&v$GJUK z;q4jwUPEauwsw#k0F|3>P`9JS`0vLebWH*-KSO}E@_ru{&fvs%i>ruoz4-mHEkMU%?-z@1Kq7AYJl-`-)a#)Uf_#LfOk@9ra z1j-k(ePDC?`I58USB1vQi!p+S50pdS)lgVgwMc$)%?fm z*E`yu+mE$E*K~*|rOoOcCspeOC>)(Qi$|yckR1BXAApODf`VxfYU+EIC;M@SNj6G* zT;Ogkg3NtWEpcfKn@5@Cfsypb*1uk<&msK;OC?S4&60l{W@nUOO5KiJ&Gr`Lu=|OEF~oIFI0>72-vX(i`|sT0*tB$Vjp;Cmtyhe_KW4}C z{=cRxHTlckR=3TnDZj-QA95UuN8-ghUm}?P4;Mi7&B)|m3ytRS|MiH@hFt0#dLMOl z>U~R_MM=jrbCrt(3N5$?vcH(IQwYBLN*919JC*Ws0L_fcs$pV+9H+ay|MlWC#Mv&b z%0UCSJvl`__yy6TW(NGeVBBlUaNCvzn!zNFU=7v7iSB>pqWvCBdau8U6&(#kX*b28 ztjL`yOpO#T~Dm9IZZ08_OE~ThtI0nmiqE+J>y6t;1ZLUiC

q zZupGUhJR3foHm9KN`#RROd1$BTRx0q*6OsHauAv9AlpVcH#CoWmfAff!$8NWe2nU@ zT%uh>R`9*W=c3TNUMTM0?O+dm@+8r1P(k)30~2Ub;VHwcXw1 zWF0079Vzo1n!UhPq;JY^p~jjHhniQJ|6$jTPF#c8;M4u-a5H<8@m8+5zvlMZg^BaS zj^j;skLqqIG*CHm(|G$%{5NTY62RRrGv#v>Mvv5gV#lYm{tVy6oPK=8r}Bne&E<9hgA{VCu?B2o&I`SL(i$%z{P*n6UVLw&)~4=p6lv) z8*|Etd{XzB`86(6R4@*?LoUOBtNO2a;7}xf3s!D^U{LjrX$4>L^77K^cWhDqIoB-d z-M7cvlUx_cHCI2sYjuCrpetC{*II-q9#9FoczOKX;P6^c^ZapnfpE_qR`olXejXpu zzg80~u(1x#_dK>?+c$e{ruL>PM=RbwLf#TYt@BKl5*HUDd+Mn+=9;@F=Jc+jwZHrS z^s*m!W+-JET-T$nJPl;4l3I>t(nVc)HBj-ZzDyMpC2SwaqExLv?-%(uPrT#D2b|*E z{jk$jy)!k3)4L+Bt-&x$@+ON;BX=IyimmcEAdV|SJX;*5y3l7ycdlrrW6rNP&&|1g z>c-!2?0j&TM+CscLKTv3-4OoY)Ta$UMlp-#!K`XL&w7!@QinIV6f_Rdj_o9g+LJvr z`P{ww)IaQ#Qm**PY!ETWMSKewoMT(bVu8GZ{j&?+=T}Z-jbm7+=}kU6tQ|vN>fUJ-b`3s@6P+W~N4a zA^DMy?45|1?f(FvfUlu|Wlnrd;Y$z7LVz7-E=mko@kzYY zi#Il2e>SCHDX6*GUeu59W;HT+9=0%?)1|)6YtP;DSRHs{HI2*o_-q1BI_RvfN5w68 z&sPD4tG$Rwr(vyy0tdQ}Z~Xn>Nm8sK9FDrLoXoG0L&(eO^fQu7gaOz62!?6Q;XhG7 z^sC4Ip0zZ47kU37QB|AedK5`hqVLFl4U$6bs;^zDQ8xVc%b#mW_>EI|?C;D@%%5lf zug@vu_o=a-jfefkF1MfgUyrRRi3ZE!9z)Xcbe&;*Q+vb!o!dO@zjI~)$g#gCtv?r0 z-)-1b7uZeDv4<`hSFa2M69cg1?6YV#csrv1;ftH-HyZ;{do-b?$^h@t_N3zS8{PYe z%N>Zo-%w2h?{nNh{0LMcDZ^|xYN2YcB5Lc;m6Z=n8$%ImUkCq{*a`%~w7-bedi_@j zh&2C(7w;44yAvJSN%)*nKDMWttbTqy?`VU1Se<7mgkSipw-QD%lzdAMI{~M|v0@#i z0=7qgLqd)n#*uH~VCECC{t!f2PJ9z|0_vh<6cvWbJbQX$T;y!(wK)%)GY2TKQG4lX|;EWZ(0cQiJB? zcNRA6B3?)L#|{ZGHLv8+_`YQjDkYIznz&TD6%Y^sDoK z%jkuKtx$nH-hY%lW#W(0Dtm0>TtUU<@!;&Do_OmLim>%GL?14F(KCGV=4z2VwXu7n z7L(!2a>etXDz#v`SQ>T}>sslqazSs|aqt1tY;% zrxhp7Zye2DRfF&IRbGu z^g$N%|C&`&*bO@nJUoB+_c&qjf z`14Qj%S|lT?F!9_21#f=F3ESKcy3gp`ND}_xEb%IT3h}E%fT6u?`w%tS0jzb z5!*HQKCBI8Nx7YmYPqfU=m^5vf5JX0RiG#=8ZYjnx<+r9N*qhvveTn$u)kJ!bE~Nf zULQy)7$1*QP*&zX-Wl%Lbas+w;;|*>s~)QyhJ*&W-I<%Ww}vGv3ZkflsG8QX(YIw$ zF0kj|p}$=vZ3>!Ri%7A{UlD&Vj{P>ptVeF2IDzbD8q=i75IasCIX>!m$U4dJ`0^U-Y*xCkh0gzt7A7 z7awvkl^V8R&$AaUP)*PE_pht$z{17Nvg}W>y{3+8-Wo67!lfKg8tb*2E{?0>!tRV2 z6uXGPr4~@mEWG)qBxlJtAmTAbYD2do02aj`IBq62bW2LdoA1uJ$5O;sVP2iCMA!I$ z_s#meCl6Yb-nDyoigDAaO>`!*I&#jdAeE&DAeNv9ij9sn>68{YwB!OE>6`8A15nj( zl+`bI*fyIJ7&Tc&#xw6Q?arc){K}-EH~O4~5BON?tpywLlgqx9lV4zCvdee2oD+Nm zNmiqHgT6GVp}BrsBCuGzIFcwgIszMPuPeuAuU0y*8iH?}K*q|z(M925WS=&%Y#Tn} zV_YOy@WuTh8tk1`kcYrWoQ7!TZ^)`e2OM)DF9fzF4(GrU=+Uy%ZTUz?6~}AU6XFMD zROmvQWwNoyq42A0wHCex)O(6!fTyr@hPP0_|G_)JjF`!_%C=IDA0dY#A(3u^OGdc> zu6P#)ndH!Y-9mL*4(#hZN;)A;)0sS#ccHX�I{A80I6!QJ{#|%$QnA>vD}m;BwhZ z1n@sllZ11$9Q`+iw63SzovKyme<^ z1hU|aUz-3q@pK4`9wqSsFhJXMIMcsD>(2H${gt(5Z=TwX4Flj}(+Ymg)*6O4Zr< z;M2r@eX_i{MT5YIW;YZLHj(H)#}LjK{2`SW7*fak>;q`j6TP{VCeErry*!5IU1>n;KT6LWJC1#m(LP>iV6GrcgK*ukC zq-lGbfuN>mf4Vx@>(AEa!IggmA!(4n?7)h;D|+Y+B=anAKvv<`B*lJqvX=)c8~VVc z&#$D+VS=d74}B_IxEc0i^`M}9L z;9=kEI>gW}G5p4?j&yvY?#a%9iTE517aYVrNRZ4geY~~^cV}upz|pLiH%|Szx_UgC zSUGSs9q-;2_8t}a$$F8tMqiKX%GyIEj;U6^R$06-f8AG}<0YmUZ}IS+$%W=Id7IpY zZqjULcb!I2-)_Ef-1W>OVf{fXd7D}WA2 z;_GY#^vH#yf^OZnBa`Poj1*}-2hBCjHI>($p9$K0TY_tY36s_ya98t9n!Opp)B&KV z-ql?+a}iXyj|xqmyCW^5;IAwG^7mREl-=8@1=m;Swq++ZEEXRPa+AEqA3U2gd40pi zLhs-JKk)F3WReM@FXL~(euZP}_SAv`N#VkJziFE%j*@y5H9cIf=xVrLNJ#cPx4h(l ztyQ8sI#xF7Uhnhx7(abwKy1= zfYsCgeDuL+5jKb}O6fENmU6`?J0TmA<_YpoK2r$8-k46>B~8RT9+zrLO+!N?Ke7-( zG6UhUCK#REF(+^;Kmf%RKw7*-CaUa7ni-RHn?f1PVw5(P8H)Z+$0X;*r!a0aAS1Al zvd1K{z42^3rLH#ltmYxywiBsj=V$m4M-L!~4x^hh6?CF17QF&m@{J>MubMkxElRfw zM|c-GQvh}TsU#*eLbuKmN=(PNOHEXa>d(gt3Tl7f&={5wZe45^5o! z)hdfU6R84_K(bN=T$yXvWh2^pU*7;*<=LC06M2#}D9B{`Kk#58>ZG2S8|@9cu_Y=e zw?9l#?1hFg9RcW`{qN(lqT%a=2RdP3aC80Jzk^HR5$Zsih!VIoyKP;ch7yEEiDuw* zrZ39E2N+MAsoB(n?NygN0nHm|85eVFtQUt&U0KOg5b z{yLbcaMyiUvgm( z&J*(79|8gkW*`0O^%K_bsb`cA)f?hkTSoI6@x2FzLwh(Y2601QPm!4RdMjxf@nCtogf|l>b?7s+MFEcd)BSQuyEuZJ! z%Mxq_vnTRGXP#X0(49HWv^paHf5pUx4V z_k5GrDj{1y|o|FJVNMIDGX%H%`o!e?*6rk~O_7mJJ|aHiI^ zTwwEzXobpz81?Vp3&$A(A{lT(t9+#xg^gN8R&+wy@#)pklyjY$ehb+I%TL!~L*cCc zl)*0*wYJvq_EJ?oo?&hF{u32{J5W>k-|}(ZtLKTMezg}5(3!@ZVwNz_ESz=)-8bev z7kTV5NXclP-njb?y0b)e;yr}UFtrPQr|?O^v2JXi%hO6?;X~k@Gu#LI$E4_|PDU!Vtu}x z{rR=jGpKUe=jRi*WlQ=}_%dux&kd_?-j3mVHKlIt!7@o#iMp0xh-sy}w|K4D>(uB< zeneQmF$09?pCAD=K0k`(Q6fgTTga-#u;{QvKS3fZYRc^T@9zUvGf>sTL1cLfAVN!c=Bjm?~mCgu8tB)19D(Uj}PzFjMpBp+ORcpn)u35 zLSH{#scFC2zrNHRLuch9{d!c}5)55xZ6|fpvbBUz(R~iS@ie*}-21i@In1Pgj2f^9 zG|X7R);fo2NmvrPSPT1{pNyv(X0q_HB1+YKi+ebg-(lpM@Fo09H^iVT|HwAVa0e-n+KD4DlX3AiL8;z^)*>u$=<Q+dW+UwH59|N31X zpc4Vfz)3Fk{D>PtJc!?ohxaS>l8pD=mXPZS1$=4)6=M8}qD@|L>B2tQ8s?Y3cF-07 z8oHEC5N&X9S`2^-(E3uem(=~{RwSFfGy&IS*d|*gyw33ZJMA=-X^4sS`1$m?81Tgr z3Jjouzc<04M6Y$5c7fvZep$aEz!QPw?HiF$5&dJ;SmNKliG_w9tX{p=-(Rfw>jW`w z+<8%rWZ-zR(iJ_5o!~VBm3wP0FWlNJZCzuX%ESY8Nn4yasmC*QT%Tst*7Yaxa!c?i zl4=qWYdluuZWaNfjRg9Gh46n+WF(ZM$V)OC`mYog5Q~i(e^W$aZRv`=b!}G>MMFq zH2!jGkIqqc`fgD5B4$mU>kfr{H+JdckLkn>zd143{FXk;y(Y?jIhv=Wba!Du{0xgQ zL@x*9UWNGh!Q#i(35sb)NGSs}u*wcQ#0h=%&PJN}ho>jc!fO{kS#uFFYkpm|K<`9& zVa{tbZT778<{xVp8zsfF8C#x<_`>qID^dyEV#<7)= z%S7!IKyHU%0BNOfP;VbTX2U)B>4nc1?8GNhu0VoYXiLKJkQ}>;%d(G~o70=hc#oF( zUIloTKz5MfXBA4-z{@kNA;7QHpcl>pUK;7h;g3>rl?EwvJbdg`*!zxEnaD;`0O?0^PdHWf9ij_UhqF7s!H5*VQxl$X0ci@Nw<$g36Un zOjC_49KO-Xp{$NVpC_3|$a63AZ{UxK%sZ4KmZiOqZw!k=&=Jj;udv5ah^D=N^Tml4!nxn) zUP^7PA8SIYjZ;3_&B#oMDo#wt6D@U^|6HVD=6?^RhB{;hIg-_igY$W(%&u|t-*=|X zE^nZrFMV9`8N?ih+D6cGDi3O`-du9JoZ>KSwycd6L>~C#V$o|&QVF}D!otYWH9#I} zJRKv5@4niecsRO0+gjl=&XQNidEsd{{jrdwR1Z(e;)zT+aUR$L7jZ{U@QJww?pbmm4BTp$^@3@!`*IUA>`5y;iKMY3wFoC4Wci{Onx zD)cFI=6#LtQ8W2o1F?UkW|Qj&f8$c3Vauz#{mBBOUpH1&menb_JScdqhT3YRT)$kN z!B)_%zH>NFNso-bn}@~R?LkSJt%o{-V6o$I57TaOA@6}YKg;22@5ku|F3TSI&C$Gq z*dk5M9zteji(+lbBZT{YbefiBx#8CP-Hp8$-AhQ~MKC)%(hhVyrklKV%~o(em;)WL7IH zP_O+R%0asOsmZj}*SPnc^lkIv*rJ-9ZamS;cQD&0GEHm&cuhD|7inSk?Gjk5DhQe5 zk8feNi|pXCUZE_;s`v^)?Y&BVxh;U9+xR8uPJR@foh|raE^o8m{0wn|4wJ>P%yLUh zzlm6Ro>Nq*bwyH|y|lc4F8kybQ@_RchbJp2O7pSbKUi;$(8t?J&h;OVJIX#no2rg+ zd-mo@S~R7Y=>WF&|KS2Gn=8dt^7ouePgp>qF7k>bQ`9ex08#!#LpLGeZG#syA` z9!>C-(P{jpPp*+f#9zAnsgA?|$9%Yr*M33y4OYr?8fi;!;I2t}uXnEjpG4LmqPWk0 z%o;gB ztp7)K%B!;^yF09%GE8q1u0-b~wW~j*M#S_h#zO-cCoYF(>FH@jmS(W<1fxqYas5e2 z)8Rqg9~k%TS=(2hq9P#6Ht;wv%#S)KM(~*}`wbOg%%DhovgFKNp4dS5f_x3p&4j@s zz`$bd)FVJ*SfkEicCXe#)hR<}`i^=}L%U^8V0anMxun7!b=r1(8*Meb{Q!JSp7qN8~LgkdC!cvR5#m^S8V_%>LqI1O4IG*^^wbUF?Jc6hX94_ zlR1i@OG!zk2DX=FTwz64#EcbcNSV1YnPvq#c*$zPlV{CqsBhjk)1K|XN*KG9r* zPYR@J2X*9C1u=IQwok&f3x;?SUK)>|I98%&Gc?@*WguhI?Oa<61Tk zrlOdwNV3RsZs1ji;cgVk`F6RY9K>uCaR7m31KWbsJx2t6*R|TR|6z%U?RJgfILpk{ zH+d($dP_?-IoJMdg>FBq;; zHksXv$71hU-0DwM2hl+uhBJQ{t1i?2$lorio)kXz<<>%ctS;d|T=C%m4DP%1Axh!2 zFBIGi62>hkfh0=op2v79GzVbC>eaJR96?<3La`U8@BV%e_Sj;&lE^Qyx!22IF z-Qph+N*I|%LM>A?Maa>2Ck^Y1;Wz*zoQhe*qaiVL{dnzI3r}EbCi(Ihr<;W(-6Bp4_C)U@iTjbywZYR%rAYQ_i*72ypka9&{`q`33r#CWXb5W?39T1s#!MSl|VN8F*{TwlMs_+!13IINgwZFdEZ z)ZyoN8~ZL#?(GdhX>-qR9YrC)9S0!M2JIPF;IWh+)Bdu9v0$@rjLPE%O&Kn!4|T?$ z%`wkTwJW6)7-Nl^z2bQ7=L&!7`y<^Am;{3Ec*|oA-tG^9cEvzb7Dx-ulmoo7Fwf65 zJvCUK5AHnO9&~}t)ooh_E0+i>rCQfg@c`sx96E5;=xL2+5~&2ssLhHaCNq2XF9Nk- zo|Vk)oUL@i+%;g|%GEs+RCa6qY})lXCK8)$pr^C!+CbXlDZ<$H!?k`M&78DKjFaF< z+^fBX=Es73kGA`hMY_}r%8-7c?Rf?M)p$N(nbOg`dU{}n$SY>Xh5DP>f}6JwUxj3v zpvNN$@*QWVmY4U`^MM40AY68Eyb#PnLGkBu0cB6688avWz@t)DuuiSf1D|1V?$eo? z0AH-T?Ln`-a5t0je7K}oE&48PnS{KLLAMfTfswGRa>b0D9*FS~Vh}34j zQ?U5^sSyf7n`95|UrC?Qn!JeT-@rRcID2b)<=_v<7)22AmZej7_!Am7sl2h+*;l7- zl=D9YltOh~*1;pBfbcr^R-&NqrUT~{6v2(MU0~ip(DHESrbOop_WS%(PM1|^U`jo) zWtW6M=D#;rl%w>eLl(`k%bBKwx>T;NkC7i2kzUh$aWot#QAS*PlQcv+aQ_!r=vg1H zEY*ms+e~xjy;%&lop@_q)3U%&QP5Txp-iHOx*fdV3uaDa@n0)9yJl$3&KBmIVnyU| ziw$d&xwk3k?U2E1*w-~DmOxREUUwN-OmVwK(wy6QzCubP-T%@$CVn)Poi_vo&0%!GJVZ);Z~awY$Ta$nrWUQTa>Be zimUyatr6fHVdf$#wQ`Es#0-1|$CMc-QjIqeKv(V4$U!nm#+pvA=++&zwjEb%Ha^Ij_9E{;&jHDL(^7~LxyIdOW2B$Y994vF7 zXBL{+friqWX8!bGrAB0(Nw3^De;#=5R(tPvUw&zq(S``tc_06(M@)Dt#XGZp{tJHB zK6<9n34Z_pWPikXraBdKzE2FxBQf`jT-(nM7!>coB!(e_MYP>Zx9-al^0HP#)t&`r zOezl9xrT33DIdiE_(_C-^_cQDUnyV24AQr^-9wfmuA`iUS~fG4Lef2`@>%K zvF#f%9k6_7rZf)`M&O}1)w;|6($Xo-{#zK+nJj_Py}nl$tbRtt8`3EpIdZHrI6(`j zBpQ8_A6&rv`k~@2?@tDY#Esl`RlOn22c34AsBFE@2hzESh$3lTFTH!hEb>Xjqzym8 zO!}o)6HM~6dLdn1Ks!^wL?V53c}YgLz^DBPU&Ev5^EwWK{d1=C3s`Jhr4~_RYHArV zXgBUz0ri@|?N^(GpwpAq$FepT&VlwArQDnnG!~WFKW21+0yCzVn@Du2YByV_gSDfD z30?{uk|j?q3FcbmP;nCC+tn%YQez(1IcSw1qcx6uAlb(iu9=&)QBp15kWK1p>#mn| ze`?zQxF!>mz{r!v%nkzs%~ti?ZL`Su!lM#f;r@krYu}le+jtBhLx@D;p_{+=qN6w*l zoZmi^4tKN#lN02!c++Qu2ofX@5SjYT>*@IK>uin1uFgL&R9CXd=+UmoF|n-*C{!y+mb1n9XTBqLM*k1Kk5mmV=&6c2)arYA3x>~ zfPLHM5etQHe+rLIxk`9j1udkdZp@Y4DIEvBQ|#E66W(WE<%_lI%a@a~bBD@fx?_sv zTJhu zEVn%Hv>S}l_Qe1BN9P~0fN43{0%CJn5|bQ)qPvA(9@=P_o?*R`K092^fe}M}TER?P z*|)7NV63*pxrQsdm@$Hyy)Twag!x`s+hx}`vHS-Rm8X%pG%e=3R`Y%tNt@`?RETuk z0Ldq`8ruR5tN8|>;OuMiw2dW~`#R1kiD=C|LjyS_Q zyHNxI{+sZ}-|tY9<0yFh0-ZBjT3#)MH%^@0?tXVF+P7fFo5N0-eASo}dqc}~n(G{? z*V$Gt$yM%d9oC#US_oDd^URPZ1W#8RhsQ=h@+hdwHx&5iC;Fqb>uJqz{)6`Q!w}d% z_Y9-w>QPd4=zABcL(8DbrV_`?79br)%3Y~pO*c;r4yxrOdH=nXJ6}!6*aRk4L9O5U zu9i|Df%FPW2WFLodG@;eHzD2$n(>AeA(s0u0+;zBnnf>WDuL>hD&BXYip_=bbkCsC8F>w;#|zNAKSe9;h1k-oe%ps>xU?#<0HD|_lll( zae2po@APk7zxM{~F0S(n^>@TH=5MPgF&}i_x!S27e0^Rpm?&|)b>-?{H&+o582DDH zE31cvHrqO0;1Q+JXf?q~y&c$7zdg>d2zb7s#9j!#wQ^*x8H$lAv!8^(mzz@1t)}nb zt@008sF3a5e2fzNYiqiM4^XIYWqr06xH3^=Ce2=29~YY6tZV^QU7rPsS54$?dtPt6 zt%M$>AnGjx&7+-pNb{mCtqv2%Q=|;BA)H00Yp;Lg@)g7W%DL-{uOs!837>>=o}cF| zym-c_XFfy#@tm_O0teEiL|*{62_Ox1U~p|T+wE(BaZ61vmaq0)*)hk9A#rQzM};iy z?GwzV@Bf}}v)?kUR}Ej_kfOJ3O>$xyOyM)tzqmt^^6>G!S}V3@_lPuDRyUmUw8=bqPN(>4)qZf(O0^lzPzNpb&@ z+^E4Oj-a_xaaWb8V=fpEFS}DEndxIg4_<)DQRd1;NHH}Pce_~)ZnjsY7p{>3hllhc z(f{P>Ki+d-5I)Zo93>dLfqa#ySko#$2t?=_!@CO)KuEGSzFf%xAcUsO4UL^oC8YDU z=BHz2dTQbC)k^;fepBIs>pY(a@z~}KL7jQa*-Z;f@q`#UBF}4)VUAbrUStUMcx@1VP_aU@OhAJ2nuAvV7h!7Uk6 zM%DC@G7{qdDRyM}?{n3J-Me|Og9W?0Q0=*_x$Lvy`rNzCRduK7(~TBbUy5)PWl29< zL?uFE!_e!4mAR^n!ic*b!4>Y?sZ2ZvY@O;Ysa#5T5*sZCyzs{!n6bY<-cQUb<5YFD z>Hr2%$x`HcW?uk0BR=VkJDL+^Sfhyp6gQ^}n>^&0XMaSjsTX)WoPUzVtW^N{wm6Yv z7#Va~_a0R=Kk6k2yx$t_w=p#P^h^u)LmWuw`dix!x?usM#?U5h5Jm9D7ry z*V>{hHh1EUEBh8UHY2dEcT*yv>A?3SO=|o95%t$$S#96U&Q%#s%zk9E@PVgXY6dWZYfF)l?^G>-lmoyJ_^(U<@+f!nF%<$CsZg(t}5x%(dk0gJPBUKIu@bo*-zgw%2eH1w-7 z4c{-HO7VWf?*QHdX`wQ((|-vP7jwhVC*WoP9J$$=Ps3Q37vZ`w`1Ob-slogx^g-e_ zAPpddla#*I&fe$~5~9_Q2V^Qg5YAMfKC7oq7ne-VK|2=C++VF@-wULogDDQ?^GeCave`%CB zB<@M$wlA9V8w1P~?)Ret@A#$e(-Yx5_8-@C`eiH9I_0{Ifq=CT@CR*n)J^j8k+@jQ zVbb0c$eUwv`V!c$Hmm$4D3>+*;38>ukFkMkgvU;OL``M2J%p)L6-g{?U@B3oTK(jH zG#h=j7R|r1*4kx+RD1Se&cnn9;WLMmQ*TN3J4K?qKi^pPk6Y^GZbHo)97SzYS*l#o z4DK1NI2x|)OeHlpz;uRTGYVfHw3#vLfL^}#m|Y{8|479<*BO{|IXx?{9E<5W0uL9j zGrj}QST0aAl$2pLCYZP7v)O2aO)!6Z@NDk1M6f913t+Mow%wlEKuf~`dlTi9A>fL-YS&=+%=C@Ta$K)y-PJDA)e}Iq zifmi^@WQZ=dDFW!|3rkvXI+y-csKf~Jm=jfF#f=`Lq1no+y+V9>QxS;YxlV00%kht zr6?@-_Z+`q4v^QS&cLpYNa^nY2TIJhROc)B^nc@Ce;^@Y(rKI}G5o9I@^xk-^D7LK z^%Rwfl8>(fItk|x=}pZU74qMQ7T4C+*h*na-o?Z%)X?RUIO+oOvTi)MAxho=+l4YZ z(pOYVniU4(gd7&Lq{Wd%|3JgP=Em9#`S)Z$ftk`xBQ!J)4yRZOGK>_g^6-dTHy=$z zXRpxlLK8+Hm8`~Zqy9}tjS59&&VX?Vjn{7@pVB$)ZEOtp5AgX^G3WL^?ynb_4#)r0 zL8GmMX+={ORzyY11rQxXL+NcZ07=cv>e-A^pwNh$oqea(iJ|C*Vylgkz> z@8-3BS}dqjzer@?Tm08?A%I>LfX5&!$Ozy4xHsI4aAw={!g2_I+1f2CETJ^K3}@ld zTfnW@)-Ak5F;&RVyOcihxO?qjw<2BVS}0OoDo<=MSt(m9pXmGdk&#p_&R5B0L=~j}6JeqEU}js?Iz+H1f8eN)IPH`SLF&-%IF~x zaKJT+nR*n^2lOkhNp1d*R&MVbjhmLNv+id9<<6sICU`QmmWjZh6@$l^%mQd106pk4hFBP|24exLBo6AdxuMXy{UoDurtVG zqe``*%>1VP`Yt96u~>y#`Vf6e^AKhOOtU+O=dF8 zJ^C1=Ru5tm#fmG-ElMNdr^&u>+4hyZxDoWs&{guweL5oLH1MdWa;|L}Q|UNftpOb( zY~wp=(VQK`;0_aus+G=AiKR@)jjKJsDAU>v)0i_ z<#jA=8^XpB;I7WO=k+>0tvGhc-m{Oq(Mt1u&-2H1%DcPcg5=t9PVW1^J+y`d^JABb z9hskeJQl|vn|NT3xrLG#kX{AnnVRfhY&p{ARPY|nX$**P1Opdf{`5absbW7}Wf&Ii z*3m-#yqqtRuTcnaqsatqY!LUGd~V5Jir@eC;97H|g^yiAVsuyYkj|4%*p((~$x1sB zhGdrF$;sEngK=@XB}Tc;4ZbhBl0vz%F~zF!vSZY&|KKy7=eGN6t~T@Kk{p&}?DiYk zJdVs`X>`nJ!P^{*AFx#y1If2L ziSdFeNZNe!0uwUO_5PYY;-imXo|TJ=aL#a=kUjs`&?+@TDLZ04A%4#TIl$FnyU8kH zqdJ$0L}b_ajFu8psV|(X`BZ^g_u2eaqCV<`VRMO!C|`sDnz0HwL`i+cHSP*;LX1YK z;GLp@gkIpl&n^x5wIQz#qTX7*KC6J%pHysl?_7Oy(Qv@&GlN*?|KGi%Tpa3_JmXt! zEpi?d<&k%cup(HS@8|z_={V%;iTxANy{KE|Ab44-D#>#+i@QdDR-$}EseA^;%*(ew z;<^JP*hzqRcE%n#l<{m~!z$O_tEDh9(~ zM{r%!B2*>G;&YWUg>gyz`V9{oMo|*R=J#}Ae`;?O5fA9;j&iIm-W^Qk3!FrSJ0H#` zEgQ4zECc)4%s$(x0Ln1}-LFJ#Ft4O`ebEr$H56C>{iFVFmh7d7Y%&Re+TYPN=zmiFlh}&8_mETiFNw^TO)d9 z=Lul+d1yxq3~Q-&nM?5!54uf7z^At#ZF~>0=X46-aP6<&6vZJRAlTEVhL_Scg^j7u zYtXUjO3w6yX!kq4%MYoPF{*kG47S!%qOfaRC5DEr$+Y*ZM%^M!^P_7iU<$_^^w5d? za?P0b|G3HOf}@-aIatwxla6Uv>ab0@rif&brq8Qf-KjtjDcPVNN$ifXFCNb8ali0=!ayS9ycf?K9#kZUq3tzHlt+XK=!(91ou){)m zjuD?w))-z~3LN)P9-}$F$JD3IZOO;i(xMecn1U#fcg(QdaZzIsk`%4kYAlta;qyLe z-<}v`nN2~rzhSEH8kcTV>_JOMqoA4eKjqVUAXLzT$H}s6NxnHp<7Pf#k7zX4a))XE zt~9tPTzyaV>jQRcdRJbTZA5oq1}rR{hp+E=dV12q5q?s_(gCh8dE13@XIp~50tgAp z0DysdGj!VBh_8}G<+$Fi6Tl`HVFqTmnl$VPz*HmmD}^a3nN!ph6n9hnrSN{;B>-$= zNHR!#m<8$oi_sS?%0;z zxI}WQHmKUHyV0-P5~e^0%N5x5SVkQN$TD~wJGtR__2tt>^=kP*y3^yHqv`Y0C2+YLP5t;oy-xGVf!p;C5bg@VMCl;;)hagWiA2}g zX=|!qHfgB^<`$&#g~3NvLD5>G%#&xN9Q=k7&JA#oBk|BFg-A;Oev*oFW`HFR`b*)K z_&Zj)Ot-1B9 zcP({NBDMQG5I%CIVZ|BwV;(K24Meh_D$x6EUrz}pcK$ytz`=};A8l&bAw$W21Mo_! zR*1y|zY;j?Uw0u=Y@wv8gweBTwASn12K*&$adxz0*K3m`0mIE? z5SJt<*MTU;D)y4(H)w^};!iz4Njr8tHYSIc;WyubsVXMznS=?41cT?3qqwj9-9N5G z|Ab~Sd`1+A18Agf-Y?l;PHm3_DVs;5Mp|F*v(2_feTS&Jk9v=vGH40Upgxvh9s`wo zncUrAcxx75g%RCHGv?rm;?jKJ6HrYQkPirXDZ^TuGK0OBuJewak1(eeXzPA`%#5SY zcdIh!;2$$I5`ymLW_XCcJzNqWsWi$j?eafKvk1mQ8Xvkj;!)0O_Y#D8?tJn6dhV6$ zVy07NRMOsKws_Sg4v>uZ*6xhhKo<2{gwBV)pIm%Z1T|jj2)YKZhcmkNTc;9cgZfx* z2b?-5UdSt-!^<6pfY$M++s4ffb}d<~?PWERa1syCE>_NvagsZ2(P? z+&;WJOy0>htq=OTEpwl@Dl2&eRP`mJH#lBo7vVCyByoHWqfo~Vx`aEl*8`!{4Zz!5 z@7>^|GDYx%4DNN_p0aeUa!=7^8ry#)v z{Gue_@L$b(MUtkX($?Tz$6K!fhp_WoH26h2QdT9ivPmKNCPn)d#^G0wqxnoAdMObI z0c=nkC41>ONp?I2o&M^qayhegv#NJ9wdhfY++`4J zcY#%mP&Cp!bDuKg3< zm<(6N$N5$gb41H7O^@0F+#l6%oQi=+=Zh*wG!|w}mWXg^3hwUTap9XD&q3$B<>euW zi5Nm%n!ceB{uq_T>iWvKs9>v-AiwKYs?Lm!ZwRicBT zPY#DLslVJ|Yraj2H?M2pB`; zenr`3kp5uY-pm;i@}*UuRiD$-j6H}kQ2Q8PC6c4}yX}OzY$- z*buLr?w693RLNfN$Qa0zO9($(05M5wZf=~9msb-ynjfz4mdgYtZfEpFaUshg{en);NmB9Nx5JSFqy&RrUOe-FOdaCqxeSHtqFC`r(BoZe^ zue4^Uy$dHrH zM}$Oqg@Dc$&9O8k@kYzHEx=E#c_$({DI5gn|9rw@9p#crX2uolH%0t~v~;~1ndO<_ z#Q|MQME?KGb^FXBiYl{sxWk~WZ%X?96y0U6ED7hmF$f`k$jK7)TxS|wxitS}^+$s3 z@wE^mkF>+oe%3WRO2zgdzVX|YSB+s>D*}yu{s48=+sb4WD&FW~PEM~|nIRPy_uk9J zGe!E=&*@~V56Rt29k#(7^b{;wo0}DQNXm|Y#=!*S8VhJi2O(k3zJ zN-fUXMG5jg2pueIq_mV+P36G`muZW>Lt>XBk({%lyX#zVMIVW5kKQmOh#8G5+k4N$ z@eJCCuQN`lYWU35pQXGZ$C5zq5a;m>m^}%Geeg;XAm@RdL;A-G@E>vItq+doXkp1D z3pxnWyF_`5?$P2G=^PGQjpi^_=oU9R4Aa3zaav8rf&vQfmlISQE{Ww!gS|#AN3$sc z%^rYM0AI9%x6k`}KMwEM_)Rrh&|$mHoJ}H&p%6`)PyQ`w_Ae}T^$EDT$AboGLGAKa z`0&QoLmt?ACKDk2+_4pLBJsOFu{dVTR%7MGuh}Ndv)WN%KnGLporw& z_%vgwRe{3(6%6S2swmN$mAf0|K~aBIybfB3WRD;fNaAyHz9r&1l(ZJHVK? zj_${7OCfM6JM;{)FuWKXsNx^b-S-#cLeTv#^2@_Y3&)hnaXBlgYS`sm7REDYs|*c| zh5r-&Jl>^dEno=0&o5Ogma~-;%k4;@2jE`)!T6;};W_AERGphc{Gb7MT^kH8iawqP z|Ht`K0dj6S>F5k3ycTZ&LQUiGjK;cw5!%ORGB8=*&kLnPjtvd1qBip=<5dTP8!>-S z7Ha(G2P)iD4fqwP$hw!zv`v^kgt*=zAUB-K#6i1s*7iRrcJ=9e@6U=)7k?5#hmCiD zS`YdEm|gU9X59$FLJ!zFpqVhq(Ea#-tCN?i8}v5M!uto&B^qZ7V)(ZqWrYzVV{>H-FoFA51QuDf!j=#r#=9)ZZ_$U^9kAR#qQ zkR6YGv>QX17967X3*9~u5|bXsVC%q(FyZU4K%2Y zgHP=Dau}O?xvwUKo_q7__oWbzl0?Od_}S+x;wy9;Cx7?YG}JpYi>lkHL$>;SQ1F=J z`9Z>BuQkZv98nAEd?8Rxnfxqq%I(!5)!-n^*q4#|7i~H8{R=l11}{A|4}cJb-& z(f*DCqn@Cm5SX^(yss#QH#3rZ-$oq};o;v%rWP^}WWb}7(2m98&IzPshDJlYQ>o`7 z$EL}YG3!F9JD$KfUdfjR^al<$OzOmKBh9&G8+HO71!|7~q{sMz9h{4k<)4J+Z#vhx(2ZNCFi0f?fx6WPx)Ab1k{@p>!f zdS*g+iu`$l_N%{vY_fuB?(la?YV)D*aUi!cecfLmcsIZ;R8c~T!-n&7-T3TetdX%7 zk{M{`%o@A~2;E_mrUJNMbat{PLulc8YxS?UE@6%hP?d zJirPoxB~4?7;u#yolHfUr>&MI4F7@pp>|aJxGkYrXzm2>W{h5|Y=5>$KBbb=DR%K+ zz;An4XMu?Qbd?Z8&_A|*f^p^}r_MMh(dO>(hQwB)mB?QRA`)_V*E5q+JHkdfMiT22&w`!j{srGTs9usmf;tf`!-}pAOgyLA%?V0*C96PxZu}KH|_V zeY42~B*NGG+&VDK@*M=Ap}VtPZ4c-F5dI$`cJq?pw;KqjiXCVLR0`u48yU7=d525K z)1Uk|X^$UY+s$?A3mmMnWswQ`sDQ|nqzlnYXh@ulgeRY%rZ_L{U?d@k;XL42dGGYQVO>C}cNK15yVnG;jbpc8tszo;P_ ztY<<-l9}**n){$pbWnKLL=FNl?31kUrzm5{{{qz=(dCw@?~vSe#pX-6&;oL28#QY* z4vVIGN$bGM*W!MZEahpB z?fV6tgf|Ryw74bTK7!VVPKP({W(AYw2iyHhN>YE5g zcE?UtypN-zeJr4yT^^QN6Wsh4amwFyJwX^A?|xNXUrh)V}Kv+Mm$ z@f{<+j*H)?dX>Ij$Vf;U=n=sx1fnJ08zxc&-<^v_O5EcP!tPsSSfzZ=fM+=!V0YAi z$c7xJ0RGLw#>AJ#r$wlW8A-sAM#gbhca^pWD)KIzb%@`-RdG!= z8w_JC^z8^k)klmkc~TujrAG9K$CIKe2|65RD=*!#?;VvzqN~Vuz5ozIFHmUgc{JUU zP}=R)aSKE%EvVKDm_pozk1a4IpILER-Fn z%j-!C*O(aHEFfZ|%s{`A7b%cUzvRjL;Uh59!V?4PwbZxD#o8_EZ(&R{!# zi0=;RFF0|hnr<#oQWR&X6VgxpR{ovWEwC4fd{i@>kwdMB93eH8Q6Qac@@+7;_M4$1 zRJ)DwL<5otr;;Et>d%2NFHuS}v5U{Ym%C7EfT>C;>EG&af~hy`-*~MDBuZUY_PeZGMek0ib`+# z(;n>TF-why1flC>F@NOky!zeqN(W47^4(?=1gEAF85R&~6t552@Sgvg0_VE22fh9O z6j%c87}90H3tDGur{Oc+UxPf1@7;cK|2n!j<{Lit@<@9wF@3f3dr!>u>9`k9tn1BzC8r1yc5 zL>omL3d%`8OnZIG+)dUF`kwUTTvZ1ujWx2Py(M~N^6pXXioc~WVxqq)_s*rH)L{ji z_sm+JsgTDpfjgodc`dZh=b) zHBp12?dKHg-i!PgL}p%16kKKrazFLS*lFp3iWoj}0sr1}KZfc+bnOn!bpJ`k7y`EG zS>tcCKR^X9%i%|w>A@a>7zw=?q*Us4g$Eb~q|{Z%7q!J!wU76AVTeS0E?=V>9Z1A+jcZ? z_TTmOoSiyWeZ1(4USI#d3@%|J7M3`*oUz%=?-koW5`GB8B;h4$+8_fgXzDdixfF~P zE-cFJ=}ZbhVI0ruY0fYRMO}DnM?qe}|55;DCDQ?V!lP@D9I<&r8A z3+(@oJV2AsvXw&8;}TH*sa&|zT&~^m4B>RY>lFY27V}U@AKab zViqj5F|mTSZ}L!y1&O9ARvK+_Qp>1*m8;h1mq0cTfJ?7|%-HckNRRci(Aal2!Ev*W zLdMs2+FX9Nyyioj$6`SdG;n^>($X`jGuc@2p50kSZ+LY@XLKsO-A`_NBkT-d-Ixq( z^oA-eILSKpfDm|Kp5&_r4*gU-OtbWT$^^E{+?qBK5rt(CGq1n z*e%f~Gns6xcgzB+aMPWNAK|E*QAr&>uPYc+nS4(wTMkuedxe03lnO%{jQE=OD^%Iv z*Tmo6`Xd-2Ncw%$_(wziSNHC8<%|}X8^6?%dP?16;m#B(6-s`~c$InmT|oW5sg?Bq zv9)9_qtzdj1LigGI?QlR4i4-K2PZ1=cm9kwn+YeL?n~g{3XRs84d)fWYex<#%-!>< zsa`u$OR!MhW2NxUk_`cec1E9Ql@fXhS1&bH;^E_W!i4e)MuGWGL3Z9>Ni5R;uBcz} z1aouas1EuM82|k>Av`lTQ91q3bq|##9=L0#k=IsWZ&Zv6rm%l&x71=J(P{_PnvY4n zc%(t)mS;9>Fn8Ca@;NYXYjp|44>V`s$!_x|sw9U!k=0120o3C?&+%#KmlBJFZTH#ha)d>;YWeTsp5O~7`l3soa$qFI-2RTJMJh%E=(`rx;uJ1 z>av^h40wWz?}~-HPFpv#9CkRbWDphrad|%xQr_5=<$H;=LQ%HPsTJ#c4>_5Jc&65A z%rx=Iz7I$-hiULD3SA&H^P|VISKMboH zoWfH>Hv2@DX_xye;V8GR>a6{Wd16HbuBw4UGSm2JZ+)+5)}F(bEDlWMa1WCk4*=a& z1E|&*o*y8(OBcbA506Hf#qI}BifKcEis^Dds)&+IgLZzvt)CtC`S?%sN_Q@}i z*KM~7{;Za?RdR7|K`)`UR;LnSz0p3M@zbNbEmsS^VTNP+t!x5K@v%VJ4?S*OrgLB%}r&{ea$E6n1xQ zTi&wls5=m}gz>glPNwBq;_!FJ$A%&t@{3-rDexl}q^T1Xrl%|-s5V$-62<$^GhiGh z4L8F3Q$h$QeUA+L`O}WpeNG{`L$e_<-0Sx@XJ^(DR=rBnyC$<}q69$WwTiHXLhFWGLHm5PSFN2z?S1$loi#a2hS6^nNg zPNa;A9T&4?oKDiXTrPeJNE_(;jWtXI=0^KfRVIJytkxw$)tUifII-{LFwNy;FvfgL zxf1$QF-uR8=)YhLp~b`VR367Hdoo2`F?!}G7noA7=Mg7L7(|NKts;7dMU%z}4pt@z zF3d4@xO z7f7Pq`MP{?M_n#d_tfwi4Q!76>u4 z_L~8f#Uu>3!&mn<`pnXgTDQs9`(Dw-B3Uaw5E+$jya%i8%3HRR^SuQKw!8;3aI@n7!{h8e9aP{#Ls5hSTWy{64fI z=^6wTx*15lWOx4aMMTQ=?PY#`og4PQe&>mXN zDb50DU)r+#c!aEd0PjfHR!Sn_@wN#?qqeVF&U=mE66(q>AzKmU zl-r|)v-XUWd^i+*i{pRXJDZ_91H{j}-($ATVO=fe_mGwkgzOfTQ?IUPgw&S3yiC&Z z^;I>ssBC76qHq_-J@$_)DT6yEEapYyJ=eMD%_pmwPfA4j+O%bqg2gf~ck30Zv`RoU zSXgCs!cnzu;js1OkD@|oQvFQnd$pkV^z;lB9xrW})9>Yt+K!zEMTyYREf#AQzw&3G zJlZz0DYCgr>c59$CvL|aObVf{C*pX*le!DkaoK9>iVC9{@WVa3X&#KVW*3c(_%COd z=;i^(^Ah$Cbk3zJXVZU!jrLhpbHqz%1?N z)Y@eBJv3TV2k_AL+hR99Tx(*Dtt97^ysSE?;^O^Cr+tlQ_NENt_CqVT5*Ql z7YwMu^4H1bt#B(=N@oV}k*1E&C#6;IBPKop%Lg;%l=s9O5TQYge9$H|F!=n)wbbaI z-r)H&%jfi9&m*hy1;J{T?FSkAF8iAfGc;l5R#_&ah!mdaH_t(syd@W!xn$Z#)Bn=~ zB&_pa20_6fn&H1J%F$bjh(L8x*yW4w`B!9-B^*SbSBjzyoxj=%V;xt%*a9BzfXx=ayXOk+E%273jo%-Mc0TtmKg zdrDqiGilKFmP+Z|YZ}t^<2G~MR+r9T{y6;kNoyBsvY&5Rn$>TabHltia~^!8Mb}3g zEY#;`>JH1+{zptrAA_;$6@6AtsQnt&%6^oY0gl&A)652W=5Ts@y$3-T#=q5IaEq{6 zs><~vXs4rFMtO6m)8q?DwPIDh??Jcsuo}T?_0T%Znt2>QB9?hBNw1x947Q%JY}Rph zx;gZeJQ7_c3(!c+K$F#jrp(DFOy*wNzO?J@csC{3N!lY}nV+^idwbolw zgRSb>^1+3zFhLHaP7-Y%^AzQ*jeJg92Ia0VGOr_Dwny-#yLBhXN?$gzjLLA8n#r-S zz^W#Uxc!XaKYDiCkY%Zx=>5c0AD5F9P^nmn#e z?~Z*yIy|~@xYe9a4F=}GSWPBfZ4O!Y0pmZDwbKs=gF*0OdlTs?br(N4#P_=Xst!=^ z4;`mFw6g8p7-Z`UtGji*(MM)j$(_jUnaz<3quG#`PC39LRI&Q*eKY3%#Kf~<*B09M zxHa$%K1GrL&$PCp_)-O<>WI3bdSZPDu-{Xez$8`{AX^Ozx$D}&Bi%gxAQTC%RgwY(DGxe++sBd zToXNxd-B(Juv$-V-)XpB`jK-Hl#rGH^G@P~(7n2wI~ke{nWx?^PnE~J3I9VnLY~tn z5rL4p6Rq?oy=9v7OL>-G0%z^ij$IwSHld|o8@s^ZfU#9BF1^syJw2`1aw1tfW+J}p z^Fz(*r!B?#GDU+JFzt+JB)#MVJm@$1Wo2rb1H;MDji&C|o7>tm6GFG`KLT2lGaWGM zx1)5Xx`b=p(`YVp>U6FJC!y=^vO*e)GZF%Y#Bb{+YAsZ)?|u8jj;LqO#3;D3H)2;8 zzVjlz@vP*dR)T4j=&YpG3S*jzCv8}t7yOa%nWmlp=ndtgk7iTrw7hHGZQ!}>la*bHihTdp8lf%9i>CP6(IDGe6^`9Z> zoJg6_MKV*iwkWIquml3yVSHBtjbgIl(`7*Bz17pMll6RiOR?kT!&|kgB(7F`SX{0qt5-(|{5LAG>uJ+Fjm| z`(4M)&NC;AKW{19O4$kx0@0>9fZSeAICdqGJ{ye5%}hnRu6_91U8GnDrdj{q=ug_# zCm33OA^r3_5U%6HWVjj&JX0%22k_Bpzj}2I7|dz)EDQ{zQ8KvD*)Yop{Qj$t?2ha2 zv*{i`wLn}w_)dBGTKlG4mJf~kD2-K zoUqD8fBWb*Y?$fygLsYwj4g=VPjMD!;p;-1tt_tWV3H{s7n6N%XYW+E3_);)ie$9Vu9fvvp1DD=^JNRm!gm^{ zXTRIw5*5!vGX*WzRDoN#Eq3{p$!03LB~(lqrlq`{o(E(`px>(`;U1mY-|#&x^;ttF zde3|ImWi{uRNm~s1*etsYxJf4WR9qi_V7Is+O9tcB%SAYbD8tY-N_8uD&QHM85R#B z(svk)EIbj64)>^&%G`%H)EtNu0Ra`=#0?G8U@&!;ZNFTPR0@OmT80U#EUZD{7)d|o z0}|{ZR(tE0%Jdx*lsJaWnjb$3KMA^hkRPk_Sl+EOiCPgU)9(~|yjHWt%>gZcEzR~0 zF+ApvM(GG>P#C1#3T_)qCFJ}oGPiwe?Ov`U`TKS&uHHBCGfD!2E_Jl|2Jectg-oF>-JHim z0N)FWd3x|UPICe|^a(~b*HKO48zsRT^-{3O2oK|J>%f%48NXE_4$Om9{YS8=9~sdN zK&TZR@MPZSJtDVsb1}(Tf&eNo{+?rAepPBm5RpB5`QKEF+F<0HB})pKorh_v*tBe7 zElRN9jFGDmls+w&&ngqx>KpIohLR;>2>(@%=KX`n_ssH{?(SHWRK(l=`RS{dQM$Uj zR3Au3#az4`Y28RY9WIqewynPV#nM>~pK_R0(E2DxN7{OPTBgCUbAQ&^lw3A1LNDD8 zSAvhC^>nR|H=dk`Dbj5hc)B)`+@w`5Gi)YlUa03XTqtG!-T_-AvNy)q0kjg(+a!K*BGN53n+z3K%@mRZQ-`irJ7I4T{ zwLyqVD=L_*JlweZ#+p`%KE7_qNqy@KOGP2l@o`biL|MhyPnHxKb`%>VQj!+{GySl(Upwtx||K9P9Jkc^O9$Yr|Y9GoxB^YBl5FH|X#`_Gm# z&>%{bwqE3tB0DXOXv!Rw51BfaL}u;;!HpOudKzysx^6GaPt%QM!52P;|6sp-SSgcV zH(0JM%PFYgEfN9@A}qB?2SH@M8a*mKeK|`hbwGd@3{Z*1nMR++#?|a)eKr6_eh2lC zEpF(3+6MxQBC9}kAt{r;jTb|R6cnCoUd55bo}VMWfJDwMJefH$<5-NYf|oCB3C6iR zJwk&Dm6blZo$yw_)+}5h<2)9SR3}_R++vOPfRg;GW^hDB+TG|(X(C83AixLuQ1Vvu zUxw_4xsZ)sFci!?q<#8ttqN7ADJ!k+#K7_9+Gy?KuT@@>K(^Lq`M$TzQ%gP=9!yO#jY}hHfh^AojU-p-41;PEAyd%s;4RD1 z52PvX>z~6vnPDUFWOhvZB@djCVGt8DM4B9EihR5GUjY{Z)AUt_Yozyl^C^c2#azik z?C8r9rh{>=NRRkORIMt`cGCjV&?#c|1H9QXc;2)yXZ-KPMA;iyg#(M{obtTt^P{{H z-@k&ylYOk=gEGpkfv6_ZzuYE&ol@CMy+<-9_(8JdBP3491^c0JHn1l>-oyi%8m2Vu zXT#BYe)($=sK6nl{jdiv+Xb0~0mX9oxexxA*n^gfHKw5tts1`~vnR^E`iVLmr{mh+ zOPQM&s;jGiPwWWVDQs=kDJv^0zgEWt#i(bLMTLeH!}$xc0(TC7RLW@=-R10MyselrIv9*ElrL1t?+_dHgXsSBI*`zmXWiv`Lk*r%Bt*o;R68YO;pK~C zh+Q=x{%FAb3LzOE5>gNi3O|b9Isx8OTuD{f2`gb~udzjaon6>q5Wl=}IupG8xZ+_h zz}#k+lW6Gg^+i!W91)~Cj z3ynK6W-HQ7HCG-WO@Ml*z67cdwec3yZ5c0cMWOrbG0H}2{-;1zO3-6fn!C;|L8Xd1 zRTMl9V_-AJCEc!V_qx+h5J>s53Ob-}Oqw0xUw!Mn*>wOvr=bx5Lru4+Nk1cn59zuy zu;1~%lI4w&AOJ@$Q3@=T@+`RZLjRJ4Q;mKDDsM_;MxAEmyW6vrn`kiqj0ZFL5r?)D zi_GHm_Uy@0_BfUt65y9J2X6Yv5{pmwUV5I)u z=QI+$P+p{dzjIr*byRpfT=7gFs(=9UK){F{Mq7wEPxwOO|9A89FZPE!IqIQrgfch< z@2tHrX}u|uRP+rZzPW-k4HlXwP<^a;T%n@8y?43Ke*2{9SAi@xN3F0)|IKuyXfF! zXzy10AV>!?Z$<<*0gzSJJJZKueF4GC_--4c;<=t|TZ95NFb_fXpc*~@d&|q7PgiX^ zh|YI4%3(HkIMMc1$p;O5DjoO*Q&=sdR`Un_~UlG*~;Z03C{vVMOv+&ZFazJ zJzEmDcZBQ=ebOsd5?`bXy#-ThZf?#F(7;Xq_TGc)C5INq-Q9hwWe0M zrk0kxhK9yti|s3k~~+0bCItYKt>JKCn|DfNi*|8{g2E zlIQ6J-s^Js_&)2sm!$tyn1mVs4&A z7LowA!@z1d;O8e!<#&&0kQe|F04E2KcT7jWE(ua4Q-p>>y@zLx#)e-$A;-XLQK3kF zh~gmwG=H?;g)`WID1Pe(M4N4V349|W!1h`S3#21aq3o}hQUlTV$*)y#mEDPZ_E$&e z-_N1ibJ;tYI9idmv9+yueivpV5!cU-4ZeAe>;W&v4N)?p!1&jT?Ot_Y03(6NqO*PI zwcKdW01A*H=CKAhM52+FqgZbU2?p^K_TR%FRok1PH1h7#@jFbq4e=mkPUg~vb6ph;#0fJ7-@Ivf zfQv*fU@v2M4%(Ij_n*%0Xi@>QKmADTmyne@3yT;*NNn-mXxu9dJDqCB~Pd02k1X2;3*%IdgG7V`7n!HI-LQHUY@Du~LE)1v55 z{4}c~Evu-gcp$|3!J<9DIZz!p-`U~8M;sp9RYC*Cl`=W@tXr<41td})=frObnr|9c zB9Zdg%a$H(N2e7}D|o?v{`#Cj#InY)r`KQtD!=zGo*%sYSmxMOhkWX#@>^;@_Mz@e z7ee0Z)pCV+;+N+!fjRiGr*bP<4kgdoCIzp$^to7KgPRN`kO0{iR92V0t1Y$Quav*9 zWv7Nn%$XQ9#@wc`d3{EdBKEo6LI(ed?kMnnS7~vdN#F@^yHJOqVG#Sp13c3I?PQQC z6joUIr^bnciy8P6*Jm;umg|RCQ4{JG5eW!oH2kF8f9{fQi-JGU44P=`Um(X2jWAE; z%XhnqtSsD;#J=@j6O%okWC#x2fGS1qA~Qxt_FbyzgiFCwYpax6YYM~1`fx>?8TkvJ0S0Fl2iI(ybW)s2mfPrHnJ#QuCv zgU2vZ2&za$mv>ZP74;xhtN5@c!>R>4zrlU^MJYvIbG4qqxSP2Bip~HR>lT;;QS~vp zUqorT2E%-HC`DMxQBq5B@?Zr=cYFEW7r-iW;lrwYQ^Q;3`V*+$W<*a;**^dBj6Tq9 zar%rXJL?mt|6hC885dQO?H$M2b#~_c#+h+v#+lt6lH_o2N1B|Yl7qk%=u3AaAWi62 z5F~>VCAJ_rN)iy13<3fUf`TAO5)ma!5Xm6=y6dtZXWy6ialcg6Ip=>)g?s9ps(bqf zXXI`i?0OJ+PxFM1o2;p_=IVsKTirb86ova4g!R=@a1oJ6lytekj31qi9xeV}0;=MM zrkrm^hP+b0UTk_675rPnNyuNz`3+^~xQA74`ZaFg68d_?b-+O65>#<~(*(Ma&rP9g zC+*`5oj8r>A@l99=Lb`=ya0Uc<<}$C6Z1xw9Ub3TjWP;vR?FnwAXGl<`tj)TlSpg% zKa0rppPbtk>Vph}x{jG_Qmh2p9i3Pdt|w@O<`nl` z52^m|)7+FMYW_u`T;w%Y@y)seJ+=G&dbLegaNJEPtuiq`apz?DGg98axEbn3Y+_1r ziO92?l3^xur%r@l4}9TL?KT+s$tHFz;IsU}!YRpt9&^ZoO^)z|$TV{0d{l4BE{SWT!r;hzHZ7Mr1n*U^JP&Da>E8OYd{i^7<_Mgbx z{-eU-`EzoVy42%$g|pf-r)O{CDeBoLkNwcr-*N9kt640f`h40^B+&YAZfZ?g{2#a- zuGOCx4(UhM{HLz?9LtrA+fnJC$=a}IC)XF;lE^Nhh!i<9cjNKLPITD?sw(Y{a~3(H zM@t5;7g#SU{LJ;LZ-n{=DqY$u)xKQSFIPGn%DXCv{_TW93|buTC=!(Lb-wUl%{E)^ zNhRheI~ITIFUAcbv@dE^pLgKjd!P)bS?3cX76$flvA1Tl%egzMHgJ3|(nq|OE|xWQ zTPa^!a)un}sLq#pwEizY8r9Y1NEJ$ooO9pkn($4-vGS+zBGH&Dh@Cuc^fnUU@2t9+k5s$nGuYM={ zPR%EBbH}eUy3*f=mj;e!J3YKECtmIrxz1`V{_Yg5!hyZEpH? z$t=%R#7^lQvyDpAU*X;d3dH07b#j^erlE)Zb2B|l|Gv83 zG+ud#?&V#fC!>(}Q5aYTg`QqutF!LkMZODB>Hq4NGiDnCd|&>1B7xr@J!TRk`WGB8 z$nX~IPYdC%PjJ5-5xTzla|ovU3qSWZ9**+;_@|~01@;L?g3J>2LcsT({{;T;Aphf& zInMXrmHbcc!*%Na`sDwyYybatH=-dyJ96a6_qrOYX8-QgKB4vV7e{d~f9w#T>K!@q z6P>Mjj!kuUzv$#ZWjnbsWiB&m(sXYJ>P07-C)G>BndQbj;wx~O?#bdx1>^)^0Z9Qm z)4`4A#B}76D2wgMYe@B|UWWjU^cT!J7N@7GuH zo@&c$L*%oTtBP*IfG_I~SU6C^Xw7sQ1@Ijl00$k9AYUAnWPO-yaRv z>B(aNgseb?QHRNeGh_VV>x0wTt>GB6UD z+>)CZfU$h@R9!tC@d&!tuKamN@an_TO}TBuUDuYkULpLiwv|76<-G5kcKdkSytWT$ z(3N5u(|9eI(NlX}-QraRf|wt)f7+g!?tgqXHU@|&JJs1^uJ;e#mjV)PlCE33|He!| zFB%a*_h!mmZv$J9p?=AGc>~c;R3d%1AsLC^OkRT9>>1Zg~m- z>60?$XIJ+(8~3ZXfi*^!>#`l&_xkC}dylUwQpai6m$6Z zUY3Q;1V5eMuLV$W|7*4MS*P&lkpPYhNm{pMmWygLfB;(;Ndw? zL!Aj&oFWY-stv?VT6$-;-OIGJaw@3`o(k6MJujOqciu|O6hQ!-@>Km(FXB0`2Pm{W z?~#Jcv;AfZ1Zil`LYPxRXFkCPyQ`BKUKNEHr075)m9di=%A+7i5J5s>n+-4-(f%{w z(l27Jk2~xHH3MvN8>mRVj-}%%8MPR16$A&EUY^3EVaURj0E4XoF|=bv0S2$rs*$~| z4%&p+moJL8^0wqF_tHjhpI+)%eUZhq=hI#;gXiW1wWQ~@8b7@q->fVRdRG&sp(E{) zOM&fEatQqzD+v2WM=ri(IbjZ%a?VsDr`*(vta&2FQ7?t@>I?>-G|MF=-w zI=_2vQUr_%z6Iqd5Ne~lLQM|+)*Zwt7!GiX2ip|JWswPE!1xY2LU`4 z7v=d>;XXhS^I+Lac`rp{0fl18h!B_`?IfPaXLm&t>`@4syrtZ2oSm-`<246N5J8E1 zp9$1(w~RMpgkqnx;p5UVajwflS>J1G4Z-Qy7A9klhdc?ylxYpMx5EhPnQ5e_9S2x= zH(w>y-r2QJPnMM~i6=H1JayM0ju=OX- z^AsR*^JVcG6-QH`jVAaOtxp;)_32BHhQcj7`2iR18>z%5ip37pBVGTjD8g(&5TT69 zY;+Z%Xc=Vz&XY@C)8_ThF$RKElbGct71?)gtxiZ*rA4i;ZHAp5}3 z!`5iPf^#`$!5X%A6P6BL?}2;^&A~{=Jvd2%%0B?KvBW_FZ7@v;Dz?^*ucetXpC#I8 zRYkRrGndaVY++y!Hekx|GUg8SX~9>c={Gq!9kOHULziSguQ+TT1O=c%Mi}ZIw-L4@ zf?TsxMpHfdUQ1zqNCP1TBE|AqUFgMTjAbrr(mAR;Fhv~q?GX`+=TysU>RO4vi2+Ok z$P5sK#$S+^SJ%aYJrpISIh~esW~}f)ND6<|SS^~>W6Uts;!n-Plmx+Oh^p;slmaZa zLq_q9E9(IRc|tea#GCx961+8_V#Kq(ze6z>; zEVh5>?cDM%{|GD-3jmvbAxQ7=B5s|-vKv?M@{y95CE`QhCRL8DOJB-}#xnGYLSa3pDB{gVNP(H(%L0Mf42 zJvycaN@^Iw=nKafGA1EdqdNC6b~+Sf4;!v0FyyAJEBt_r;x%7m(bz~yut7YZ^XHAd zazD6ZH>Quu5tDPk6NGRGsz1EQ14H=kJ0fC;Kw9Cj=7NYN&}Vd%e8jQ}MC{_stOKTO z$F)L)nIs>t`qr`#8y%$!i`RsYR1Z88qaN&&w)QaltI88FeB^n93kshj)Nfjy9ndmScc{CKv_Y@2T8$vtIpGv!(_K&JZ|PLZr`< zK**4BP6jII`r@iZ!C2!1%IN2fwG}K$(z1YzhLtOJ2Ti^D0k>XbF^Rf6U(uM%nwLog zVnTcosEH$Cl+%bnX1^jJp#m(x$Z~ijL{hi+0(@E(oT-7$kDMWHGt~vSgtNIGGYDD; zu<+UjU{XE+5=9u{DPmKxv-{7JR|r1ZKqmVlK@(wuCgC0iC+7na0!oBTYBRzIGvEX-2D<4 zwN~bsG%zn&k2xzzSXYPoyj|iJmYLCs%>p`DXBdYtTnH@bwa091F6i{(bpaa*z5*NY zOm}~B_wLh~WkWzSI*Ke@m(0$&1=w=R4zkG{b=g;#0IWSkU-f5Z_*vweJvO8VDM}c0 zspZ{EW)6Btll9+; zU--z^n=w^-X3-UJA@U#f@u@A0j}3n6!%p}1mM?#{VHxd@HMnq;TzgCJw6D#!c~sV1 z(FX=mdnL=BNolg)ZYTH72w0R<(@!H4J(MJKggN8U$m2o89w3rE?FwqJ@Gw<9_e1!# zXdhyTtQJ)h;K(VW!k4CZ%|Gs8ApxW%7zGguLM3uzoYh~3JTK)rMCKwtPAxKG=Ct^ATpy8m{rA@6}sgb_6v7>|8 z1nvZpE#)1(IOFSQwGG5w!(Jc)i3SeCwBQs3#WE42*<@tD9`iN)@Y4cdjfgoMK3b)| z-=I!AFj)a?JqVX_)29;gJROCwmmZ%FBZIYr$f_~bMm%eI5{P&s0(*Q6s!Pv3lBWk#LHaq+ak#*4@ffqzG1T7cD~Rts4d6I^ z7sod9M~krlLP%gSYylryuzshbx@p5dK~LGb2ZZ#o=@!s%*cdezv663=wI9@Pb0_t0 z2rY(91V0@ZzJ;KVI>+k!fd!PpnF&bh4gdssb-7^8pt}DuCTf=8{>RkB*}xGze~j=h z;Nh{OIUFy3=4ta3!Zy37<)j=tyCY|0IIhk)FJ+RV_?~;023SI_P~iE5+j?5P{XQd_ znu;{vPqtphbeZRlplcYX70koIz@s3%;FD%Npm8FnXM)UjS_X~US@QgcDa!WE)k2UV zAiCHk;PMcIdoiwgiUt-$n*wV5+-lHY@E8K+u^r*J-jM8xGEe$9}gvh8#)ADGr$f& z9j7e}211$;WFV+d%mSRo*9|zd`)wW7siVm)X#PWlA-H+dD;_&rlClE<0N0yzaj(A( zEF*-5XvIBf1;;Pg!G62`0p)a=h~#sBM1ZtgGo93&fY&!Lfj}hVz9x}V(=niWy6PD` ze0vQH1CKc;{rq-jmzb}hhcHEV8D?PpfKv@brogRYw{Z?!2W;AjG^zu$htbH{$V7&0 zaSK5NJN!UuU~G8D#gyBiMY0+Fz_yq1{MgB=O>IFK{HLS){X?H5UK{8V_LW(c9CdfD zQ>-~m1^CBcG*GL@<@#u#&bWx3KFGsQIwSjJTe|?48l{TWwT1r9+)S_Fl>=xf)83$( z*Mgw6tvfqYR13g{5#&HG@CA0$q5(Nb6cE9w6aeU@r|YndN5WDuo)L>6Ag#hgRsnM} z7rgVi2AEw9{%Le47HEIAIy_h^3t<_JA4N+0)>mtGtm?-&ApzW?eq=e~_TqN{13 JQLbhm{J$t~5mx{J literal 0 HcmV?d00001 diff --git a/docs/source/design/arch_overview.rst b/docs/source/design/arch_overview.rst new file mode 100644 index 0000000000000..a9e7b4bd69bc7 --- /dev/null +++ b/docs/source/design/arch_overview.rst @@ -0,0 +1,274 @@ +.. _arch_overview: + +Architecture Overview +====================== + +This document provides an overview of the vLLM architecture. + +.. contents:: Table of Contents + :local: + :depth: 2 + +Entrypoints +----------- + +vLLM provides a number of entrypoints for interacting with the system. The +following diagram shows the relationship between them. + +.. image:: /assets/design/arch_overview/entrypoints.excalidraw.png + :alt: Entrypoints Diagram + +LLM Class +^^^^^^^^^ + +The LLM class provides the primary Python interface for doing offline inference, +which is interacting with a model without using a separate model inference +server. + +Here is a sample of `LLM` class usage: + +.. code-block:: python + + from vllm import LLM, SamplingParams + + # Define a list of input prompts + prompts = [ + "Hello, my name is", + "The capital of France is", + "The largest ocean is", + ] + + # Define sampling parameters + sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + + # Initialize the LLM engine with the OPT-125M model + llm = LLM(model="Qwen/Qwen2.5-1.5B-Instruct") + + # Generate outputs for the input prompts + outputs = llm.generate(prompts, sampling_params) + + # Print the generated outputs + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + +More API details can be found in the :doc:`Offline Inference +` section of the API docs. + +The code for the `LLM` class can be found in `vllm/entrypoints/llm.py +`_. + +OpenAI-compatible API server +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The second primary interface to vLLM is via its OpenAI-compatible API server. +This server can be started using the `vllm serve` command. + +.. code-block:: bash + + vllm serve + +The code for the `vllm` CLI can be found in `vllm/scripts.py +`_. + +Sometimes you may see the API server entrypoint used directly instead of via the +`vllm` CLI command. For example: + +.. code-block:: bash + + python -m vllm.entrypoints.openai.api_server --model + +That code can be found in `vllm/entrypoints/openai/api_server.py +`_. + +More details on the API server can be found in the :doc:`OpenAI Compatible +Server ` document. + +LLM Engine +---------- + +The `LLMEngine` and `AsyncLLMEngine` classes are central to the functioning of +the vLLM system, handling model inference and asynchronous request processing. + +.. image:: /assets/design/arch_overview/llm_engine.excalidraw.png + :alt: LLMEngine Diagram + +LLMEngine +^^^^^^^^^ + +The `LLMEngine` class is the core component of the vLLM engine. It is +responsible for receiving requests from clients and generating outputs from the +model. The `LLMEngine` includes input processing, model execution (possibly +distributed across multiple hosts and/or GPUs), scheduling, and output +processing. + +- **Input Processing**: Handles tokenization of input text using the specified + tokenizer. + +- **Scheduling**: Chooses which requests are processed in each step. + +- **Model Execution**: Manages the execution of the language model, including + distributed execution across multiple GPUs. + +- **Output Processing**: Processes the outputs generated by the model, decoding the + token IDs from a language model into human-readable text. + +The code for `LLMEngine` can be found in `vllm/engine/llm_engine.py`_. + +.. _vllm/engine/llm_engine.py: https://github.com/vllm-project/vllm/tree/main/vllm/engine/llm_engine.py + +AsyncLLMEngine +^^^^^^^^^^^^^^ + +The `AsyncLLMEngine` class is an asynchronous wrapper for the `LLMEngine` class. +It uses `asyncio` to create a background loop that continuously processes +incoming requests. The `AsyncLLMEngine` is designed for online serving, where it +can handle multiple concurrent requests and stream outputs to clients. + +The OpenAI-compatible API server uses the `AsyncLLMEngine`. There is also a demo +API server that serves as a simpler example in +`vllm/entrypoints/api_server.py`_. + +.. _vllm/entrypoints/api_server.py: https://github.com/vllm-project/vllm/tree/main/vllm/entrypoints/api_server.py + +The code for `AsyncLLMEngine` can be found in `vllm/engine/async_llm_engine.py`_. + +.. _vllm/engine/async_llm_engine.py: https://github.com/vllm-project/vllm/tree/main/vllm/engine/async_llm_engine.py + +Worker +------ + +A worker is a process that runs the model inference. vLLM follows the common +practice of using one process to control one accelerator device, such as GPUs. +For example, if we use tensor parallelism of size 2 and pipeline parallelism of +size 2, we will have 4 workers in total. Workers are identified by their +``rank`` and ``local_rank``. ``rank`` is used for global orchestration, while +``local_rank`` is mainly used for assigning the accelerator device and accessing +local resources such as the file system and shared memory. + +Model Runner +------------ + +Every worker has one model runner object, responsible for loading and running +the model. Much of the model execution logic resides here, such as preparing +input tensors and capturing cudagraphs. + +Model +----- + +Every model runner object has one model object, which is the actual +``torch.nn.Module`` instance. See :ref:`huggingface_integration` for how various +configurations affect the class we ultimately get. + +Class Hierarchy +--------------- + +The following figure shows the class hierarchy of vLLM: + + .. figure:: /assets/design/hierarchy.png + :alt: query + :width: 100% + :align: center + +There are several important design choices behind this class hierarchy: + +1. **Extensibility**: All classes in the hierarchy accept a configuration object +containing all the necessary information. The `VllmConfig +`__ +class is the main configuration object that is passed around. The class +hierarchy is quite deep, and every class needs to read the configuration it is +interested in. By encapsulating all configurations in one object, we can easily +pass the configuration object around and access the configuration we need. +Suppose we want to add a new feature (this is often the case given how fast the +field of LLM inference is evolving) that only touches the model runner. We will +have to add a new configuration option in the `VllmConfig` class. Since we pass +the whole config object around, we only need to add the configuration option to +the `VllmConfig` class, and the model runner can access it directly. We don't +need to change the constructor of the engine, worker, or model class to pass the +new configuration option. + +2. **Uniformity**: The model runner needs a unified interface to create and +initialize the model. vLLM supports more than 50 types of popular open-source +models. Each model has its own initialization logic. If the constructor +signature varies with models, the model runner does not know how to call the +constructor accordingly, without complicated and error-prone inspection logic. +By making the constructor of the model class uniform, the model runner can +easily create and initialize the model without knowing the specific model type. +This is also useful for composing models. Vision-language models often consist +of a vision model and a language model. By making the constructor uniform, we +can easily create a vision model and a language model and compose them into a +vision-language model. + +.. note:: + + To support this change, all vLLM models' signatures have been updated to: + + .. code-block:: python + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + + To avoid accidentally passing incorrect arguments, the constructor is now keyword-only. This ensures that the constructor will raise an error if old configurations are passed. vLLM developers have already made this change for all models within vLLM. For out-of-tree registered models, developers need to update their models, for example by adding shim code to adapt the old constructor signature to the new one: + + .. code-block:: python + + class MyOldModel(nn.Module): + def __init__( + self, + config, + cache_config: Optional[CacheConfig] = None, + quant_config: Optional[QuantizationConfig] = None, + lora_config: Optional[LoRAConfig] = None, + prefix: str = "", + ) -> None: + ... + + from vllm.config import VllmConfig + class MyNewModel(MyOldModel): + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + config = vllm_config.model_config.hf_config + cache_config = vllm_config.cache_config + quant_config = vllm_config.quant_config + lora_config = vllm_config.lora_config + super().__init__(config, cache_config, quant_config, lora_config, prefix) + + if __version__ >= "0.6.4": + MyModel = MyNewModel + else: + MyModel = MyOldModel + + This way, the model can work with both old and new versions of vLLM. + +3. **Sharding and Quantization at Initialization**: Certain features require +changing the model weights. For example, tensor parallelism needs to shard the +model weights, and quantization needs to quantize the model weights. There are +two possible ways to implement this feature. One way is to change the model +weights after the model is initialized. The other way is to change the model +weights during the model initialization. vLLM chooses the latter. The first +approach is not scalable to large models. Suppose we want to run a 405B model +(with roughly 810GB weights) with 16 H100 80GB GPUs. Ideally, every GPU should +only load 50GB weights. If we change the model weights after the model is +initialized, we need to load the full 810GB weights to every GPU and then shard +the weights, leading to a huge memory overhead. Instead, if we shard the weights +during the model initialization, every layer will only create a shard of the +weights it needs, leading to a much smaller memory overhead. The same idea +applies to quantization. Note that we also add an additional argument ``prefix`` +to the model's constructor so that the model can initialize itself differently +based on the prefix. This is useful for non-uniform quantization, where +different parts of the model are quantized differently. The ``prefix`` is +usually an empty string for the top-level model and a string like ``"vision"`` +or ``"language"`` for the sub-models. In general, it matches the name of the +module's state dict in the checkpoint file. + +One disadvantage of this design is that it is hard to write unit tests for +individual components in vLLM because every component needs to be initialized by +a complete config object. We solve this problem by providing a default +initialization function that creates a default config object with all fields set +to ``None``. If the component we want to test only cares about a few fields in +the config object, we can create a default config object and set the fields we +care about. This way, we can test the component in isolation. Note that many +tests in vLLM are end-to-end tests that test the whole system, so this is not a +big problem. + +In summary, the complete config object ``VllmConfig`` can be treated as an +engine-level global state that is shared among all vLLM classes. diff --git a/docs/source/design/class_hierarchy.rst b/docs/source/design/class_hierarchy.rst deleted file mode 100644 index 58a888b17ba53..0000000000000 --- a/docs/source/design/class_hierarchy.rst +++ /dev/null @@ -1,74 +0,0 @@ -.. _class_hierarchy: - -vLLM's Class Hierarchy -======================= - -This document describes the class hierarchy of vLLM. We will explain the relationships between the core classes, their responsibilities, and the design choices behind them to make vLLM more modular and extensible. - -1. **Entrypoints**: vLLM has two entrypoints: `command line usage `__ with ``vllm serve`` for launching an OpenAI-API compatible server, and `library-style usage `__ with the ``vllm.LLM`` class for running inference in a Python script. These are user-facing entrypoints that end-users interact with. Under the hood, both create an engine object to handle model inference. - -2. **Engine**: Each vLLM instance contains one engine object, orchestrating and serving as the control plane for model inference. Depending on the configuration, the engine can create multiple workers to handle the inference workload. - -3. **Worker**: A worker is a process that runs the model inference. vLLM follows the common practice of using one process to control one accelerator device, such as GPUs. For example, if we use tensor parallelism of size 2 and pipeline parallelism of size 2, we will have 4 workers in total. Workers are identified by their ``rank`` and ``local_rank``. ``rank`` is used for global orchestration, while ``local_rank`` is mainly used for assigning the accelerator device and accessing local resources such as the file system and shared memory. - -4. **Model Runner**: Every worker has one model runner object, responsible for loading and running the model. Much of the model execution logic resides here, such as preparing input tensors and capturing cudagraphs. - -5. **Model**: Every model runner object has one model object, which is the actual ``torch.nn.Module`` instance. See :ref:`huggingface_integration` for how various configurations affect the class we ultimately get. - -The following figure shows the class hierarchy of vLLM: - - .. figure:: ../assets/design/hierarchy.png - :alt: query - :width: 100% - :align: center - -There are several important design choices behind this class hierarchy: - -1. **Extensibility**: All classes in the hierarchy accept a configuration object containing all the necessary information. The `VllmConfig `__ class is the main configuration object that is passed around. The class hierarchy is quite deep, and every class needs to read the configuration it is interested in. By encapsulating all configurations in one object, we can easily pass the configuration object around and access the configuration we need. Suppose we want to add a new feature (this is often the case given how fast the field of LLM inference is evolving) that only touches the model runner. We will have to add a new configuration option in the `VllmConfig` class. Since we pass the whole config object around, we only need to add the configuration option to the `VllmConfig` class, and the model runner can access it directly. We don't need to change the constructor of the engine, worker, or model class to pass the new configuration option. - -2. **Uniformity**: The model runner needs a unified interface to create and initialize the model. vLLM supports more than 50 types of popular open-source models. Each model has its own initialization logic. If the constructor signature varies with models, the model runner does not know how to call the constructor accordingly, without complicated and error-prone inspection logic. By making the constructor of the model class uniform, the model runner can easily create and initialize the model without knowing the specific model type. This is also useful for composing models. Vision-language models often consist of a vision model and a language model. By making the constructor uniform, we can easily create a vision model and a language model and compose them into a vision-language model. - -.. note:: - - To support this change, all vLLM models' signatures have been updated to: - - .. code-block:: python - - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - - To avoid accidentally passing incorrect arguments, the constructor is now keyword-only. This ensures that the constructor will raise an error if old configurations are passed. vLLM developers have already made this change for all models within vLLM. For out-of-tree registered models, developers need to update their models, for example by adding shim code to adapt the old constructor signature to the new one: - - .. code-block:: python - - class MyOldModel(nn.Module): - def __init__( - self, - config, - cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None, - lora_config: Optional[LoRAConfig] = None, - prefix: str = "", - ) -> None: - ... - - from vllm.config import VllmConfig - class MyNewModel(MyOldModel): - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - config = vllm_config.model_config.hf_config - cache_config = vllm_config.cache_config - quant_config = vllm_config.quant_config - lora_config = vllm_config.lora_config - super().__init__(config, cache_config, quant_config, lora_config, prefix) - - if __version__ >= "0.6.4": - MyModel = MyNewModel - else: - MyModel = MyOldModel - - This way, the model can work with both old and new versions of vLLM. - -3. **Sharding and Quantization at Initialization**: Certain features require changing the model weights. For example, tensor parallelism needs to shard the model weights, and quantization needs to quantize the model weights. There are two possible ways to implement this feature. One way is to change the model weights after the model is initialized. The other way is to change the model weights during the model initialization. vLLM chooses the latter. The first approach is not scalable to large models. Suppose we want to run a 405B model (with roughly 810GB weights) with 16 H100 80GB GPUs. Ideally, every GPU should only load 50GB weights. If we change the model weights after the model is initialized, we need to load the full 810GB weights to every GPU and then shard the weights, leading to a huge memory overhead. Instead, if we shard the weights during the model initialization, every layer will only create a shard of the weights it needs, leading to a much smaller memory overhead. The same idea applies to quantization. Note that we also add an additional argument ``prefix`` to the model's constructor so that the model can initialize itself differently based on the prefix. This is useful for non-uniform quantization, where different parts of the model are quantized differently. The ``prefix`` is usually an empty string for the top-level model and a string like ``"vision"`` or ``"language"`` for the sub-models. In general, it matches the name of the module's state dict in the checkpoint file. - -One disadvantage of this design is that it is hard to write unit tests for individual components in vLLM because every component needs to be initialized by a complete config object. We solve this problem by providing a default initialization function that creates a default config object with all fields set to ``None``. If the component we want to test only cares about a few fields in the config object, we can create a default config object and set the fields we care about. This way, we can test the component in isolation. Note that many tests in vLLM are end-to-end tests that test the whole system, so this is not a big problem. - -In summary, the complete config object ``VllmConfig`` can be treated as an engine-level global state that is shared among all vLLM classes. diff --git a/docs/source/design/plugin_system.rst b/docs/source/design/plugin_system.rst index bfca702b9267a..5a96cc8b3a464 100644 --- a/docs/source/design/plugin_system.rst +++ b/docs/source/design/plugin_system.rst @@ -8,7 +8,7 @@ The community frequently requests the ability to extend vLLM with custom feature How Plugins Work in vLLM ------------------------ -Plugins are user-registered code that vLLM executes. Given vLLM's architecture (see :ref:`class_hierarchy`), multiple processes may be involved, especially when using distributed inference with various parallelism techniques. To enable plugins successfully, every process created by vLLM needs to load the plugin. This is done by the `load_general_plugins `__ function in the ``vllm.plugins`` module. This function is called for every process created by vLLM before it starts any work. +Plugins are user-registered code that vLLM executes. Given vLLM's architecture (see :ref:`arch_overview`), multiple processes may be involved, especially when using distributed inference with various parallelism techniques. To enable plugins successfully, every process created by vLLM needs to load the plugin. This is done by the `load_general_plugins `__ function in the ``vllm.plugins`` module. This function is called for every process created by vLLM before it starts any work. How vLLM Discovers Plugins -------------------------- @@ -59,4 +59,4 @@ Guidelines for Writing Plugins Compatibility Guarantee ----------------------- -vLLM guarantees the interface of documented plugins, such as ``ModelRegistry.register_model``, will always be available for plugins to register models. However, it is the responsibility of plugin developers to ensure their plugins are compatible with the version of vLLM they are targeting. For example, ``"vllm_add_dummy_model.my_llava:MyLlava"`` should be compatible with the version of vLLM that the plugin targets. The interface for the model may change during vLLM's development. \ No newline at end of file +vLLM guarantees the interface of documented plugins, such as ``ModelRegistry.register_model``, will always be available for plugins to register models. However, it is the responsibility of plugin developers to ensure their plugins are compatible with the version of vLLM they are targeting. For example, ``"vllm_add_dummy_model.my_llava:MyLlava"`` should be compatible with the version of vLLM that the plugin targets. The interface for the model may change during vLLM's development. diff --git a/docs/source/index.rst b/docs/source/index.rst index b04acbbce4169..c2afd806c50f9 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -157,7 +157,7 @@ Documentation :maxdepth: 2 :caption: Design - design/class_hierarchy + design/arch_overview design/huggingface_integration design/plugin_system design/input_processing/model_inputs_index diff --git a/format.sh b/format.sh index a57882d2ac3f9..b3dcdc15bf948 100755 --- a/format.sh +++ b/format.sh @@ -299,6 +299,10 @@ echo 'vLLM shellcheck:' tools/shellcheck.sh echo 'vLLM shellcheck: Done' +echo 'excalidraw png check:' +tools/png-lint.sh +echo 'excalidraw png check: Done' + if ! git diff --quiet &>/dev/null; then echo echo "🔍🔍There are files changed by the format checker or by you that are not added and committed:" diff --git a/tools/png-lint.sh b/tools/png-lint.sh new file mode 100755 index 0000000000000..a80fe9837342f --- /dev/null +++ b/tools/png-lint.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# Ensure that *.excalidraw.png files have the excalidraw metadata +# embedded in them. This ensures they can be loaded back into +# the tool and edited in the future. + +find . -iname '*.excalidraw.png' | while read -r file; do + if git check-ignore -q "$file"; then + continue + fi + if ! grep -q "excalidraw+json" "$file"; then + echo "$file was not exported from excalidraw with 'Embed Scene' enabled." + exit 1 + fi +done diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 92fa87c7fa45b..ee4b6addfd466 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -793,7 +793,7 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: type=str, default=[], help="The pattern(s) to ignore when loading the model." - "Default to 'original/**/*' to avoid repeated loading of llama's " + "Default to `original/**/*` to avoid repeated loading of llama's " "checkpoints.") parser.add_argument( '--preemption-mode', From 25f9c78961daae10b9084d78901d71bc56691aa1 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Tue, 19 Nov 2024 02:43:21 -0800 Subject: [PATCH 032/122] [misc][plugin] improve plugin loading (#10443) Signed-off-by: youkaichao --- vllm/plugins/__init__.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index fdc848cedf054..05a9739d99e71 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -9,12 +9,19 @@ logger = logging.getLogger(__name__) +# make sure one process only loads plugins once +plugins_loaded = False + def load_general_plugins(): """WARNING: plugins can be loaded for multiple times in different processes. They should be designed in a way that they can be loaded multiple times without causing issues. """ + global plugins_loaded + if plugins_loaded: + return + plugins_loaded = True import sys if sys.version_info < (3, 10): from importlib_metadata import entry_points From b4614656b832aa8ac95e5450ca7b861f46049635 Mon Sep 17 00:00:00 2001 From: Yuan Date: Tue, 19 Nov 2024 21:16:43 +0800 Subject: [PATCH 033/122] [CI][CPU] adding numa node number as container name suffix (#10441) Signed-off-by: Yuan Zhou --- .buildkite/run-cpu-test.sh | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.buildkite/run-cpu-test.sh b/.buildkite/run-cpu-test.sh index 14756b5964aaf..f0128f091b742 100644 --- a/.buildkite/run-cpu-test.sh +++ b/.buildkite/run-cpu-test.sh @@ -13,26 +13,26 @@ numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build -t cpu-test -f Dockerfile. numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build --build-arg VLLM_CPU_DISABLE_AVX512="true" -t cpu-test-avx2 -f Dockerfile.cpu . # Setup cleanup -remove_docker_container() { docker rm -f cpu-test cpu-test-avx2 || true; } +remove_docker_container() { docker rm -f cpu-test-"$NUMA_NODE" cpu-test-avx2-"$NUMA_NODE" || true; } trap remove_docker_container EXIT remove_docker_container # Run the image, setting --shm-size=4g for tensor parallel. docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus="$CORE_RANGE" \ - --cpuset-mems="$NUMA_NODE" --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test cpu-test + --cpuset-mems="$NUMA_NODE" --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test-"$NUMA_NODE" cpu-test docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus="$CORE_RANGE" \ - --cpuset-mems="$NUMA_NODE" --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test-avx2 cpu-test-avx2 + --cpuset-mems="$NUMA_NODE" --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test-avx2-"$NUMA_NODE" cpu-test-avx2 function cpu_tests() { set -e # offline inference - docker exec cpu-test-avx2 bash -c " + docker exec cpu-test-avx2-"$NUMA_NODE" bash -c " set -e python3 examples/offline_inference.py" # Run basic model test - docker exec cpu-test bash -c " + docker exec cpu-test-"$NUMA_NODE" bash -c " set -e pip install pytest pytest-asyncio \ decord einops librosa peft Pillow sentence-transformers soundfile \ @@ -45,20 +45,20 @@ function cpu_tests() { pytest -v -s tests/models/decoder_only/vision_language -m cpu_model" # Run compressed-tensor test - docker exec cpu-test bash -c " + docker exec cpu-test-"$NUMA_NODE" bash -c " set -e pytest -s -v \ tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_static_setup \ tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_dynamic_per_token" # Run AWQ test - docker exec cpu-test bash -c " + docker exec cpu-test-"$NUMA_NODE" bash -c " set -e pytest -s -v \ tests/quantization/test_ipex_quant.py" # online inference - docker exec cpu-test bash -c " + docker exec cpu-test-"$NUMA_NODE" bash -c " set -e export VLLM_CPU_KVCACHE_SPACE=10 export VLLM_CPU_OMP_THREADS_BIND=$1 From f028dff33d3d0b0dfe71e0e0354b355b8232a4ec Mon Sep 17 00:00:00 2001 From: COSMOPlat Date: Tue, 19 Nov 2024 21:42:50 +0800 Subject: [PATCH 034/122] [BugFix] Fix hermes tool parser output error stream arguments in some cases (#10395) (#10398) Signed-off-by: xiyuan lee --- .../openai/tool_parsers/hermes_tool_parser.py | 25 +++++++++---------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py b/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py index faa6f653b835c..18816cd665b3e 100644 --- a/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +++ b/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py @@ -12,8 +12,6 @@ FunctionCall, ToolCall) from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( ToolParser, ToolParserManager) -from vllm.entrypoints.openai.tool_parsers.utils import ( - extract_intermediate_diff) from vllm.logger import init_logger from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer from vllm.utils import random_uuid @@ -190,8 +188,11 @@ def extract_tool_calls_streaming( diff = self.prev_tool_call_arr[self.current_tool_id].get( "arguments") if diff: - diff = json.dumps(diff).replace( - self.streamed_args_for_tool[self.current_tool_id], "") + diff = diff.encode('utf-8').decode( + 'unicode_escape') if diff is str else diff + diff = json.dumps( + diff, ensure_ascii=False + )[len(self.streamed_args_for_tool[self.current_tool_id]):] logger.debug( "Finishing tool and found diff that had not " "been streamed yet: %s", diff) @@ -307,22 +308,20 @@ def extract_tool_calls_streaming( # last case -- we have an update to existing arguments. elif cur_arguments and prev_arguments: + if isinstance(delta_text, str) and len(delta_text.rstrip( + )) >= 1 and delta_text.rstrip()[-1] == '}': + delta_text = delta_text.rstrip()[:-1] + + logger.debug("got diff %s", delta_text) - cur_args_json = json.dumps(cur_arguments) - prev_args_json = json.dumps(prev_arguments) - logger.debug("Searching for diff between\n%s", cur_args_json) - logger.debug("and\n%s", prev_args_json) - argument_diff = extract_intermediate_diff( - cur_args_json, prev_args_json) - logger.debug("got argument diff %s", argument_diff) delta = DeltaMessage(tool_calls=[ DeltaToolCall(index=self.current_tool_id, function=DeltaFunctionCall( - arguments=argument_diff).model_dump( + arguments=delta_text).model_dump( exclude_none=True)) ]) self.streamed_args_for_tool[self.current_tool_id] \ - += argument_diff + += delta_text # handle saving the state for the current tool into # the "prev" list for use in diffing for the next iteration From 11fd7ea639cf3c4fae29322d8e5c839ff6f8a1ca Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 19 Nov 2024 18:33:06 +0100 Subject: [PATCH 035/122] [Pixtral-Large] Pixtral actually has no bias in vision-lang adapter (#10449) --- vllm/model_executor/models/pixtral.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index f7f46770057e2..d14b89d6b3f85 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -331,6 +331,7 @@ class VisionEncoderArgs: num_attention_heads: int rope_theta: float # for rope-2D image_token_id: int + adapter_bias: bool = True def _reshape_for_broadcast(freqs_cis: torch.Tensor, @@ -595,10 +596,10 @@ def __init__(self, args: VisionEncoderArgs, dim: int): self.w_in = nn.Linear( args.hidden_size, dim, - bias=True, + bias=args.adapter_bias, ) self.gelu = nn.GELU() - self.w_out = nn.Linear(dim, dim, bias=True) + self.w_out = nn.Linear(dim, dim, bias=args.adapter_bias) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.w_out(self.gelu(self.w_in(x))) From 1ea291a4173a82c537ab42487e23375be4926d30 Mon Sep 17 00:00:00 2001 From: Manjul Mohan <49657164+mikejuliet13@users.noreply.github.com> Date: Tue, 19 Nov 2024 23:04:57 +0530 Subject: [PATCH 036/122] Fix: Build error seen on Power Architecture (#10421) Signed-off-by: Manjul Mohan Signed-off-by: B-201 Signed-off-by: Isotr0py <2037008807@qq.com> Signed-off-by: youkaichao Signed-off-by: ismael-dm Signed-off-by: Andrew Nesbitt Signed-off-by: mgoin Signed-off-by: yan ma Signed-off-by: Angus Wang Signed-off-by: Lucas Wilkinson Signed-off-by: rickyx Signed-off-by: Jee Jee Li Signed-off-by: Mengqing Cao Signed-off-by: Travis Johnson Co-authored-by: Manjul Mohan manjul.mohan@ibm.com Co-authored-by: B-201 Co-authored-by: Isotr0py <2037008807@qq.com> Co-authored-by: youkaichao Co-authored-by: ismael-dm Co-authored-by: Andrew Nesbitt Co-authored-by: Michael Goin Co-authored-by: Yan Ma Co-authored-by: Angus Wang Co-authored-by: Lucas Wilkinson Co-authored-by: Ricky Xu Co-authored-by: Kevin H. Luu Co-authored-by: Jee Jee Li Co-authored-by: Mengqing Cao Co-authored-by: Travis Johnson Co-authored-by: Russell Bryant --- cmake/cpu_extension.cmake | 14 ++++++++++---- csrc/cpu/attention.cpp | 12 ++++++++++-- csrc/cpu/quant.cpp | 6 ++++++ 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/cmake/cpu_extension.cmake b/cmake/cpu_extension.cmake index 5912c5c02ede7..426189481575b 100644 --- a/cmake/cpu_extension.cmake +++ b/cmake/cpu_extension.cmake @@ -16,10 +16,16 @@ include_directories("${CMAKE_SOURCE_DIR}/csrc") # # Check the compile flags # -list(APPEND CXX_COMPILE_FLAGS - "-fopenmp" - "-mf16c" - "-DVLLM_CPU_EXTENSION") +if (CMAKE_SYSTEM_PROCESSOR STREQUAL "ppc64le") + list(APPEND CXX_COMPILE_FLAGS + "-fopenmp" + "-DVLLM_CPU_EXTENSION") +else() + list(APPEND CXX_COMPILE_FLAGS + "-fopenmp" + "-mf16c" + "-DVLLM_CPU_EXTENSION") +endif() execute_process(COMMAND cat /proc/cpuinfo RESULT_VARIABLE CPUINFO_RET diff --git a/csrc/cpu/attention.cpp b/csrc/cpu/attention.cpp index e73eca1b345fd..e6c03dcb034fd 100644 --- a/csrc/cpu/attention.cpp +++ b/csrc/cpu/attention.cpp @@ -24,12 +24,20 @@ struct KernelVecType { template <> struct KernelVecType { +#ifdef __powerpc64__ + // Power architecture-specific vector types + using q_load_vec_type = vec_op::FP32Vec8; + using k_load_vec_type = vec_op::FP32Vec16; + using v_load_vec_type = vec_op::FP32Vec16; +#else + // Fallback for other architectures, including x86 using q_load_vec_type = vec_op::FP16Vec8; - using q_vec_type = vec_op::FP32Vec16; using k_load_vec_type = vec_op::FP16Vec16; + using v_load_vec_type = vec_op::FP16Vec16; +#endif + using q_vec_type = vec_op::FP32Vec16; using k_vec_type = vec_op::FP32Vec16; using qk_acc_vec_type = vec_op::FP32Vec16; - using v_load_vec_type = vec_op::FP16Vec16; }; #ifdef __AVX512BF16__ diff --git a/csrc/cpu/quant.cpp b/csrc/cpu/quant.cpp index f42fa2361a2db..d9aed657a3113 100644 --- a/csrc/cpu/quant.cpp +++ b/csrc/cpu/quant.cpp @@ -25,7 +25,13 @@ struct KernelVecType { template <> struct KernelVecType { +#ifdef __powerpc64__ + // Power architecture-specific vector type + using load_vec_type = vec_op::FP32Vec16; +#else + // Fallback for other architectures using load_vec_type = vec_op::FP16Vec16; +#endif using azp_adj_load_vec_type = vec_op::INT32Vec16; using cvt_vec_type = vec_op::FP32Vec16; }; From fd9f124971c58376ca294091951dfcc96cc03474 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 19 Nov 2024 12:48:30 -0500 Subject: [PATCH 037/122] [Doc] fix link for page that was renamed (#10455) Signed-off-by: Russell Bryant --- vllm/model_executor/model_loader/loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index b41c23704b7ff..936c2fe415375 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -105,7 +105,7 @@ def _initialize_model(vllm_config: VllmConfig, prefix: str = "") -> nn.Module: msg = ("vLLM model class should accept `vllm_config` and `prefix` as " "input arguments. Possibly you have an old-style model class" " registered from out of tree and it is used for new vLLM version. " - "Check https://docs.vllm.ai/en/latest/design/class_hierarchy.html " + "Check https://docs.vllm.ai/en/latest/design/arch_overview.html " "for the design and update the model class accordingly.") logger.warning(msg) logger.warning( From 803f37eaaa11568f65acbf0bcd1044fb9b1610bf Mon Sep 17 00:00:00 2001 From: youkaichao Date: Tue, 19 Nov 2024 10:09:03 -0800 Subject: [PATCH 038/122] [6/N] torch.compile rollout to users (#10437) Signed-off-by: youkaichao --- .../piecewise_compilation_config.json | 5 -- tests/compile/piecewise/test_simple.py | 18 +++---- tests/compile/piecewise/test_toy_llama.py | 45 +++++++----------- tests/compile/test_basic_correctness.py | 13 +++-- tests/compile/utils.py | 4 +- .../model_executor/test_enabled_custom_ops.py | 4 +- tests/tpu/test_compilation.py | 47 ++++++++++++++----- tests/tpu/test_custom_dispatcher.py | 10 ++-- vllm/config.py | 43 ++++++++--------- vllm/engine/arg_utils.py | 29 +++++++++--- vllm/engine/llm_engine.py | 4 +- vllm/envs.py | 8 ---- vllm/platforms/tpu.py | 4 +- vllm/plugins/__init__.py | 14 +----- vllm/v1/worker/gpu_model_runner.py | 22 ++------- 15 files changed, 129 insertions(+), 141 deletions(-) delete mode 100644 tests/compile/piecewise/piecewise_compilation_config.json diff --git a/tests/compile/piecewise/piecewise_compilation_config.json b/tests/compile/piecewise/piecewise_compilation_config.json deleted file mode 100644 index 798a34e8dd92d..0000000000000 --- a/tests/compile/piecewise/piecewise_compilation_config.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "use_cudagraph": true, - "non_cudagraph_ops": ["silly.attention"], - "cudagraph_copy_inputs": true -} \ No newline at end of file diff --git a/tests/compile/piecewise/test_simple.py b/tests/compile/piecewise/test_simple.py index 45f56cbbd4b16..0e40e3b4ebc96 100644 --- a/tests/compile/piecewise/test_simple.py +++ b/tests/compile/piecewise/test_simple.py @@ -2,7 +2,6 @@ Test the piecewise compilation with a simple model so that we can exactly calculate the expected output and side effects. """ -import os import torch from torch import nn @@ -11,7 +10,7 @@ from vllm.compilation.compile_context import set_compile_context from vllm.compilation.counter import compilation_counter from vllm.compilation.decorators import support_torch_compile -from vllm.config import CompilationLevel, VllmConfig +from vllm.config import CompilationConfig, CompilationLevel, VllmConfig from vllm.plugins import set_current_vllm_config from vllm.utils import direct_register_custom_op @@ -77,12 +76,12 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: def test_simple_piecewise_compile(): - directory = os.path.dirname(__file__) - config = os.path.join(directory, "piecewise_compilation_config.json") - os.environ["VLLM_TORCH_COMPILE_CONFIG"] = config - os.environ["VLLM_TORCH_COMPILE_LEVEL"] = str(CompilationLevel.PIECEWISE) - - vllm_config = VllmConfig() + vllm_config = VllmConfig(compilation_config=CompilationConfig( + level=CompilationLevel.PIECEWISE, + use_cudagraph=True, + non_cudagraph_ops=["silly.attention"], + cudagraph_copy_inputs=True, + )) with set_current_vllm_config(vllm_config): model = SillyModel(vllm_config=vllm_config, prefix='') @@ -109,6 +108,3 @@ def test_simple_piecewise_compile(): output = model(input) assert global_counter == 2 assert torch.allclose(output.cpu(), torch.tensor([3., 1.])) - - # clean up to avoid side effects for other tests - del os.environ["VLLM_TORCH_COMPILE_CONFIG"] diff --git a/tests/compile/piecewise/test_toy_llama.py b/tests/compile/piecewise/test_toy_llama.py index 8032304e95806..356d119a40334 100644 --- a/tests/compile/piecewise/test_toy_llama.py +++ b/tests/compile/piecewise/test_toy_llama.py @@ -6,7 +6,6 @@ if the config `tractable_init` is set to True. Otherwise, the weights are initialized randomly with a fixed seed. """ -import os from dataclasses import dataclass from typing import Optional, Tuple @@ -18,7 +17,7 @@ from vllm.compilation.counter import compilation_counter from vllm.compilation.decorators import support_torch_compile from vllm.config import CompilationConfig, CompilationLevel, VllmConfig -from vllm.plugins import set_compilation_config, set_current_vllm_config +from vllm.plugins import set_current_vllm_config from vllm.utils import direct_register_custom_op # create a library to hold the custom op @@ -254,23 +253,17 @@ def run_model(llama_config, split_attn: bool = False) -> torch.Tensor: if use_compile: - os.environ["VLLM_TORCH_COMPILE_LEVEL"] = str( - CompilationLevel.PIECEWISE) - + compilation_config = CompilationConfig( + level=CompilationLevel.PIECEWISE, + use_cudagraph=True, + ) if split_attn: - set_compilation_config( - CompilationConfig( - use_cudagraph=True, - non_cudagraph_ops=["silly.attention"], - )) - else: - set_compilation_config(CompilationConfig(use_cudagraph=True, )) + compilation_config.non_cudagraph_ops = ["silly.attention"] else: - os.environ["VLLM_TORCH_COMPILE_LEVEL"] = str( - CompilationLevel.NO_COMPILATION) - set_compilation_config(None) + compilation_config = CompilationConfig( + level=CompilationLevel.NO_COMPILATION, ) - vllm_config = VllmConfig() + vllm_config = VllmConfig(compilation_config=compilation_config) with set_current_vllm_config(vllm_config): model = LlamaModel(config=llama_config, vllm_config=vllm_config, @@ -288,10 +281,6 @@ def run_model(llama_config, input_ids[:2].zero_() output = model(input_ids[:2], positions[:2]) - # manual cleanup - del os.environ["VLLM_TORCH_COMPILE_LEVEL"] - set_compilation_config(None) - output = output.cpu() if llama_config.tractable_init: @@ -361,7 +350,6 @@ def test_toy_llama(): @torch.inference_mode def benchmark(): - os.environ["VLLM_TORCH_COMPILE_LEVEL"] = str(CompilationLevel.PIECEWISE) from triton.testing import do_bench # similar to llama 3.1-8B @@ -387,15 +375,16 @@ def benchmark(): for piecewise in [False, True]: if piecewise: - set_compilation_config( - CompilationConfig( - use_cudagraph=True, - non_cudagraph_ops=["silly.attention"], - )) + compilation_config = CompilationConfig( + level=CompilationLevel.PIECEWISE, + use_cudagraph=True, + non_cudagraph_ops=["silly.attention"], + ) else: - set_compilation_config(None) + compilation_config = CompilationConfig( + level=CompilationLevel.PIECEWISE, ) - vllm_config = VllmConfig() + vllm_config = VllmConfig(compilation_config=compilation_config) with set_current_vllm_config(vllm_config): model = LlamaModel(config=llama_config, vllm_config=vllm_config, diff --git a/tests/compile/test_basic_correctness.py b/tests/compile/test_basic_correctness.py index 08747ebc58b75..c0db2e78824be 100644 --- a/tests/compile/test_basic_correctness.py +++ b/tests/compile/test_basic_correctness.py @@ -96,31 +96,36 @@ def test_compile_correctness(test_setting: TestSetting): final_args = ["--enforce-eager"] + model_args + ["-pp", str(pp_size)] + \ ["-tp", str(tp_size)] + all_args: List[List[str]] = [] all_envs: List[Optional[Dict[str, str]]] = [] for level in [ CompilationLevel.NO_COMPILATION, CompilationLevel.PIECEWISE, ]: - all_envs.append({"VLLM_TORCH_COMPILE_LEVEL": str(level)}) + all_args.append(final_args + ["-O", str(level)]) + all_envs.append({}) # inductor will change the output, so we only compare if the output # is close, not exactly the same. compare_all_settings( - model, [final_args] * 2, + model, + all_args, all_envs, method=method if method != "generate" else "generate_close") all_envs.clear() + all_args.clear() for level in [ CompilationLevel.NO_COMPILATION, CompilationLevel.DYNAMO_AS_IS, CompilationLevel.DYNAMO_ONCE, ]: - all_envs.append({"VLLM_TORCH_COMPILE_LEVEL": str(level)}) + all_args.append(final_args + ["-O", str(level)]) + all_envs.append({}) if level != CompilationLevel.DYNAMO_ONCE and not fullgraph: # "DYNAMO_ONCE" will always use fullgraph all_envs[-1][ "VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE"] = "0" # type: ignore - compare_all_settings(model, [final_args] * 3, all_envs, method=method) + compare_all_settings(model, all_args * 3, all_envs, method=method) diff --git a/tests/compile/utils.py b/tests/compile/utils.py index 729f10676888b..078c6bf9ea1df 100644 --- a/tests/compile/utils.py +++ b/tests/compile/utils.py @@ -4,7 +4,7 @@ from tests.quantization.utils import is_quant_method_supported from vllm import LLM, SamplingParams -from vllm.config import CompilationLevel +from vllm.config import CompilationConfig, CompilationLevel from vllm.platforms import current_platform TEST_MODELS = [ @@ -65,7 +65,6 @@ def check_full_graph_support(model, optimization_level, tp_size=1): # make sure these models can be captured in full graph mode - os.environ["VLLM_TORCH_COMPILE_LEVEL"] = str(optimization_level) os.environ["VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE"] = "1" # The base meta llama uses too much memory. @@ -86,6 +85,7 @@ def check_full_graph_support(model, enforce_eager=True, tensor_parallel_size=tp_size, disable_custom_all_reduce=True, + compilation_config=CompilationConfig(level=optimization_level), **model_kwargs) outputs = llm.generate(prompts, sampling_params) diff --git a/tests/model_executor/test_enabled_custom_ops.py b/tests/model_executor/test_enabled_custom_ops.py index c3219bc50646b..c54e30995da49 100644 --- a/tests/model_executor/test_enabled_custom_ops.py +++ b/tests/model_executor/test_enabled_custom_ops.py @@ -1,4 +1,3 @@ -import os from typing import List import pytest @@ -53,9 +52,8 @@ class Relu3(ReLUSquaredActivation): ]) def test_enabled_ops(env: str, torch_level: int, ops_enabled: List[int], default_on: bool): - os.environ["VLLM_TORCH_COMPILE_LEVEL"] = str(torch_level) vllm_config = VllmConfig(compilation_config=CompilationConfig( - custom_ops=env.split(","))) + level=torch_level, custom_ops=env.split(","))) with set_current_vllm_config(vllm_config): assert CustomOp.default_on() == default_on diff --git a/tests/tpu/test_compilation.py b/tests/tpu/test_compilation.py index 941abe17a3378..65bee85e7a1ea 100644 --- a/tests/tpu/test_compilation.py +++ b/tests/tpu/test_compilation.py @@ -1,24 +1,47 @@ import glob import os -import runpy import tempfile import depyf -from vllm.config import CompilationLevel - -# disable custom dispatcher, let Dynamo takes over -# all the control -os.environ['VLLM_TORCH_COMPILE_LEVEL'] = str(CompilationLevel.DYNAMO_AS_IS) +from vllm.config import CompilationConfig, CompilationLevel temp_dir = tempfile.mkdtemp() with depyf.prepare_debug(temp_dir): - cur_dir = os.path.dirname(__file__) - parent_dir = os.path.dirname(cur_dir) - root_dir = os.path.dirname(parent_dir) - example_file = os.path.join(root_dir, "examples", - "offline_inference_tpu.py") - runpy.run_path(example_file) + from vllm import LLM, SamplingParams + + prompts = [ + "A robot may not injure a human being", + "It is only with the heart that one can see rightly;", + "The greatest glory in living lies not in never falling,", + ] + answers = [ + " or, through inaction, allow a human being to come to harm.", + " what is essential is invisible to the eye.", + " but in rising every time we fall.", + ] + N = 1 + # Currently, top-p sampling is disabled. `top_p` should be 1.0. + sampling_params = SamplingParams(temperature=0.7, + top_p=1.0, + n=N, + max_tokens=16) + + # Set `enforce_eager=True` to avoid ahead-of-time compilation. + # In real workloads, `enforace_eager` should be `False`. + + # disable custom dispatcher, let Dynamo takes over + # all the control + llm = LLM(model="google/gemma-2b", + enforce_eager=True, + compilation_config=CompilationConfig( + level=CompilationLevel.DYNAMO_AS_IS)) + outputs = llm.generate(prompts, sampling_params) + for output, answer in zip(outputs, answers): + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + assert generated_text.startswith(answer) compiled_code = sorted( glob.glob(os.path.join(temp_dir, "__transformed_code*.py"))) diff --git a/tests/tpu/test_custom_dispatcher.py b/tests/tpu/test_custom_dispatcher.py index 53b10c06135a1..df348258efcba 100644 --- a/tests/tpu/test_custom_dispatcher.py +++ b/tests/tpu/test_custom_dispatcher.py @@ -13,7 +13,9 @@ def test_custom_dispatcher(): compare_two_settings( "google/gemma-2b", - arg1=["--enforce-eager"], - arg2=["--enforce-eager"], - env1={"VLLM_TORCH_COMPILE_LEVEL": str(CompilationLevel.DYNAMO_ONCE)}, - env2={"VLLM_TORCH_COMPILE_LEVEL": str(CompilationLevel.DYNAMO_AS_IS)}) + arg1=["--enforce-eager", "-O", + str(CompilationLevel.DYNAMO_ONCE)], + arg2=["--enforce-eager", "-O", + str(CompilationLevel.DYNAMO_AS_IS)], + env1={}, + env2={}) diff --git a/vllm/config.py b/vllm/config.py index ea9ec43cc5a15..e69cbd3eb402a 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2174,8 +2174,14 @@ class CompilationConfig(BaseModel): enabled_custom_ops: Counter[str] = PrivateAttr disabled_custom_ops: Counter[str] = PrivateAttr + @classmethod + def from_cli(cls, cli_value: str) -> "CompilationConfig": + """Parse the CLI value for the compilation config.""" + if cli_value in ["0", "1", "2", "3"]: + return cls(level=int(cli_value)) + return CompilationConfig.model_validate_json(cli_value) + def model_post_init(self, __context: Any) -> None: - self.level = envs.VLLM_TORCH_COMPILE_LEVEL count_none = self.custom_ops.count("none") count_all = self.custom_ops.count("all") @@ -2249,26 +2255,6 @@ def init_during_runtime(self): "inductor_specialize_for_cudagraph_no_more_than is None") self.compile_sizes = self.inductor_compile_sizes - @staticmethod - def select_and_init_config() -> "CompilationConfig": - """The order of selecting config is: - 1. Use the config specified in environment variable. - 2. Use the config specified in plugins. - 3. Use the default config. - """ - config_path = envs.VLLM_TORCH_COMPILE_CONFIG - if config_path is not None: - with open(config_path) as json_file: - config = CompilationConfig.model_validate_json( - json_file.read()) - else: - from vllm.plugins import get_compilation_config - predefined_config = get_compilation_config() - config = predefined_config if predefined_config is not None else ( - CompilationConfig()) - - return config - @dataclass class VllmConfig: @@ -2354,8 +2340,19 @@ def __post_init__(self): self.model_config, self.load_config) if self.compilation_config is None: - self.compilation_config = CompilationConfig.select_and_init_config( - ) + self.compilation_config = CompilationConfig() + if envs.VLLM_USE_V1: + # NOTE(woosuk): Currently, we use inductor because the piecewise + # CUDA graphs do not work properly with the custom CUDA kernels. + # FIXME(woosuk): Disable inductor to reduce the compilation time + # and avoid any potential issues with the inductor. + self.compilation_config.custom_ops = ["none"] + self.compilation_config.use_cudagraph = True + self.compilation_config.non_cudagraph_ops = [ + "vllm.unified_v1_flash_attention" + ] + self.compilation_config.use_inductor = True + self.compilation_config.enable_fusion = False current_platform.check_and_update_config(self) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index ee4b6addfd466..a3ae1889774f3 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -8,12 +8,13 @@ import torch import vllm.envs as envs -from vllm.config import (CacheConfig, ConfigFormat, DecodingConfig, - DeviceConfig, HfOverrides, LoadConfig, LoadFormat, - LoRAConfig, ModelConfig, ObservabilityConfig, - ParallelConfig, PoolerConfig, PromptAdapterConfig, - SchedulerConfig, SpeculativeConfig, TaskOption, - TokenizerPoolConfig, VllmConfig) +from vllm.config import (CacheConfig, CompilationConfig, ConfigFormat, + DecodingConfig, DeviceConfig, HfOverrides, LoadConfig, + LoadFormat, LoRAConfig, ModelConfig, + ObservabilityConfig, ParallelConfig, PoolerConfig, + PromptAdapterConfig, SchedulerConfig, + SpeculativeConfig, TaskOption, TokenizerPoolConfig, + VllmConfig) from vllm.executor.executor_base import ExecutorBase from vllm.logger import init_logger from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS @@ -189,6 +190,7 @@ class EngineArgs: override_neuron_config: Optional[Dict[str, Any]] = None override_pooler_config: Optional[PoolerConfig] = None + compilation_config: Optional[CompilationConfig] = None def __post_init__(self): if not self.tokenizer: @@ -868,6 +870,20 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: help="Override or set the pooling method in the embedding model. " "e.g. {\"pooling_type\": \"mean\", \"normalize\": false}.'") + parser.add_argument('--compilation-config', + '-O', + type=CompilationConfig.from_cli, + default=None, + help='torch.compile configuration for the model.' + 'When it is a number (0, 1, 2, 3), it will be ' + 'interpreted as the optimization level.\n' + 'NOTE: level 0 is the default level without ' + 'any optimization. level 1 and 2 are for internal ' + 'testing only. level 3 is the recommended level ' + 'for production.\n' + 'To specify the full compilation config, ' + 'use a JSON string.') + return parser @classmethod @@ -1142,6 +1158,7 @@ def create_engine_config(self) -> VllmConfig: decoding_config=decoding_config, observability_config=observability_config, prompt_adapter_config=prompt_adapter_config, + compilation_config=self.compilation_config, ) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index e72dc81f35b67..2a5eaf1340762 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -262,7 +262,8 @@ def __init__( "num_scheduler_steps=%d, chunked_prefill_enabled=%s " "multi_step_stream_outputs=%s, enable_prefix_caching=%s, " "use_async_output_proc=%s, use_cached_outputs=%s, " - "mm_processor_kwargs=%s, pooler_config=%r)", + "mm_processor_kwargs=%s, pooler_config=%r," + "compilation_config=%r", VLLM_VERSION, model_config.model, speculative_config, @@ -297,6 +298,7 @@ def __init__( use_cached_outputs, model_config.mm_processor_kwargs, model_config.pooler_config, + vllm_config.compilation_config, ) # TODO(woosuk): Print more configs in debug mode. self.model_config = model_config diff --git a/vllm/envs.py b/vllm/envs.py index 716e835a555f1..853c49bc4dbc1 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -67,8 +67,6 @@ VLLM_USE_TRITON_AWQ: bool = False VLLM_ALLOW_RUNTIME_LORA_UPDATING: bool = False VLLM_SKIP_P2P_CHECK: bool = False - VLLM_TORCH_COMPILE_LEVEL: int = 0 - VLLM_TORCH_COMPILE_CONFIG: Optional[str] = None VLLM_DISABLED_KERNELS: List[str] = [] VLLM_USE_V1: bool = False VLLM_ENABLE_V1_MULTIPROCESSING: bool = False @@ -209,12 +207,6 @@ def get_default_config_root(): "VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE": lambda: bool( os.environ.get("VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE", "1") != "0"), - "VLLM_TORCH_COMPILE_LEVEL": - lambda: int(os.environ.get("VLLM_TORCH_COMPILE_LEVEL", "0")), - - # Path to the config file for torch compile - "VLLM_TORCH_COMPILE_CONFIG": - lambda: os.environ.get("VLLM_TORCH_COMPILE_CONFIG", None), # local rank of the process in the distributed setting, used to determine # the GPU device id diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index 9057afb6514e4..2a7ca9fb8c576 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -1,4 +1,3 @@ -import os from typing import TYPE_CHECKING import torch @@ -40,7 +39,8 @@ def inference_mode(cls): def check_and_update_config(cls, vllm_config: VllmConfig) -> None: from vllm.config import CompilationLevel compilation_config = vllm_config.compilation_config - if "VLLM_TORCH_COMPILE_LEVEL" not in os.environ: + if compilation_config.level == CompilationLevel.NO_COMPILATION: + # TPU does not support NO_COMPILATION compilation_config.level = CompilationLevel.DYNAMO_ONCE assert compilation_config.level < CompilationLevel.PIECEWISE,\ "TPU does not support Inductor." diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index 05a9739d99e71..dc183dbfc9b96 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -5,7 +5,7 @@ import vllm.envs as envs if TYPE_CHECKING: - from vllm.config import CompilationConfig, VllmConfig + from vllm.config import VllmConfig logger = logging.getLogger(__name__) @@ -54,18 +54,6 @@ def load_general_plugins(): logger.exception("Failed to load plugin %s", plugin.name) -_compilation_config: Optional["CompilationConfig"] = None - - -def set_compilation_config(config: Optional["CompilationConfig"]): - global _compilation_config - _compilation_config = config - - -def get_compilation_config() -> Optional["CompilationConfig"]: - return _compilation_config - - _current_vllm_config: Optional["VllmConfig"] = None diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index d60f93a44f6dd..1f9b544637bf7 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -8,13 +8,12 @@ import torch.nn as nn from vllm.compilation.compile_context import set_compile_context -from vllm.config import CompilationConfig, CompilationLevel, VllmConfig +from vllm.config import CompilationLevel, VllmConfig from vllm.forward_context import set_forward_context from vllm.inputs import INPUT_REGISTRY, InputRegistry from vllm.logger import init_logger from vllm.model_executor.model_loader import get_model from vllm.multimodal import MultiModalKwargs -from vllm.plugins import set_compilation_config from vllm.sampling_params import SamplingParams, SamplingType from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, DeviceMemoryProfiler, cdiv, is_pin_memory_available) @@ -508,20 +507,6 @@ def execute_model( return model_runner_output def load_model(self) -> None: - if self.use_cuda_graph: - # NOTE(woosuk): Currently, we use inductor because the piecewise - # CUDA graphs do not work properly with the custom CUDA kernels. - # FIXME(woosuk): Disable inductor to reduce the compilation time - # and avoid any potential issues with the inductor. - set_compilation_config( - CompilationConfig( - custom_ops=["none"], - use_cudagraph=True, - non_cudagraph_ops=["vllm.unified_v1_flash_attention"], - use_inductor=True, - enable_fusion=False, - )) - logger.info("Starting to load model %s...", self.model_config.model) with DeviceMemoryProfiler() as m: # noqa: SIM117 self.model = get_model(vllm_config=self.vllm_config) @@ -562,9 +547,8 @@ def profile_run(self) -> None: def capture_model(self) -> None: if not self.use_cuda_graph: logger.warning( - "Skipping CUDA graph capture. Please set " - "VLLM_TORCH_COMPILE_LEVEL=%d to use CUDA graphs.", - CompilationLevel.PIECEWISE) + "Skipping CUDA graph capture. Please add " + "-O 3 to use CUDA graphs.", CompilationLevel.PIECEWISE) return start_time = time.perf_counter() From efa9084628b32787ae1901a2d1e9b80f7d08809b Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 19 Nov 2024 16:05:25 -0500 Subject: [PATCH 039/122] [Core] Avoid metrics log noise when idle (#8868) Signed-off-by: Russell Bryant --- vllm/engine/metrics.py | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/vllm/engine/metrics.py b/vllm/engine/metrics.py index 47472c274ccb6..5bfd6a9f4b386 100644 --- a/vllm/engine/metrics.py +++ b/vllm/engine/metrics.py @@ -421,6 +421,11 @@ def get_throughput(tracked_stats: List[int], now: float, class LoggingStatLogger(StatLoggerBase): """LoggingStatLogger is used in LLMEngine to log to Stdout.""" + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self.last_prompt_throughput: Optional[float] = None + self.last_generation_throughput: Optional[float] = None + def log(self, stats: Stats) -> None: """Called by LLMEngine. Logs to Stdout every self.local_interval seconds.""" @@ -445,8 +450,14 @@ def log(self, stats: Stats) -> None: now=stats.now, last_log=self.last_local_log) - # Log to stdout. - logger.info( + log_fn = logger.info + if not any((prompt_throughput, generation_throughput, + self.last_prompt_throughput, + self.last_generation_throughput)): + # Avoid log noise on an idle production system + log_fn = logger.debug + + log_fn( "Avg prompt throughput: %.1f tokens/s, " "Avg generation throughput: %.1f tokens/s, " "Running: %d reqs, Swapped: %d reqs, " @@ -472,11 +483,16 @@ def log(self, stats: Stats) -> None: self._format_spec_decode_metrics_str( self.spec_decode_metrics)) - # Reset tracked stats for next interval. - self.num_prompt_tokens = [] - self.num_generation_tokens = [] - self.last_local_log = stats.now - self.spec_decode_metrics = None + self._reset(stats, prompt_throughput, generation_throughput) + + def _reset(self, stats, prompt_throughput, generation_throughput) -> None: + # Reset tracked stats for next interval. + self.num_prompt_tokens = [] + self.num_generation_tokens = [] + self.last_local_log = stats.now + self.spec_decode_metrics = None + self.last_prompt_throughput = prompt_throughput + self.last_generation_throughput = generation_throughput def _format_spec_decode_metrics_str( self, metrics: "SpecDecodeWorkerMetrics") -> str: From b00b33d77e33c5516e73de663539dff96e8b61a4 Mon Sep 17 00:00:00 2001 From: ElizaWszola Date: Tue, 19 Nov 2024 22:31:12 +0100 Subject: [PATCH 040/122] [Model][Quantization] HQQ support through Marlin kernel expansion (#9766) Signed-off-by: ElizaWszola --- benchmarks/kernels/benchmark_machete.py | 3 +- benchmarks/kernels/benchmark_marlin.py | 4 +- csrc/quantization/gptq_marlin/gptq_marlin.cu | 277 ++++++++++----- csrc/torch_bindings.cpp | 2 +- tests/kernels/test_marlin_gemm.py | 88 ++++- tests/weight_loading/models.txt | 3 +- vllm/_custom_ops.py | 8 +- vllm/model_executor/layers/linear.py | 3 +- .../layers/quantization/__init__.py | 2 + .../layers/quantization/hqq_marlin.py | 325 ++++++++++++++++++ .../layers/quantization/utils/marlin_utils.py | 6 +- 11 files changed, 632 insertions(+), 89 deletions(-) create mode 100644 vllm/model_executor/layers/quantization/hqq_marlin.py diff --git a/benchmarks/kernels/benchmark_machete.py b/benchmarks/kernels/benchmark_machete.py index a0342d08f1db8..46bab74ae8adf 100644 --- a/benchmarks/kernels/benchmark_machete.py +++ b/benchmarks/kernels/benchmark_machete.py @@ -210,7 +210,8 @@ def marlin_create_bench_fn(bt: BenchmarkTensors) -> Callable: size_m=bt.a.shape[0], size_n=bt.w_ref.shape[1], size_k=bt.w_ref.shape[0], - is_k_full=True) + is_k_full=True, + is_zp_float=False) else: assert bt.a.dtype == torch.int8 assert bt.wtype == scalar_types.uint4b8 diff --git a/benchmarks/kernels/benchmark_marlin.py b/benchmarks/kernels/benchmark_marlin.py index 536c133bb3341..8fb44e3a3dbd8 100644 --- a/benchmarks/kernels/benchmark_marlin.py +++ b/benchmarks/kernels/benchmark_marlin.py @@ -131,7 +131,7 @@ def bench_run(results: List[benchmark.Measurement], model: str, results.append( benchmark.Timer( stmt= - "output = gptq_marlin_gemm(a, marlin_q_w, marlin_s, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, False)", # noqa: E501 + "output = gptq_marlin_gemm(a, marlin_q_w, marlin_s, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, False, False)", # noqa: E501 globals=globals, label=label, sub_label=sub_label, @@ -141,7 +141,7 @@ def bench_run(results: List[benchmark.Measurement], model: str, results.append( benchmark.Timer( stmt= - "output = gptq_marlin_gemm(a, marlin_q_w, marlin_s, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, True)", # noqa: E501 + "output = gptq_marlin_gemm(a, marlin_q_w, marlin_s, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, True, False)", # noqa: E501 globals=globals, label=label, sub_label=sub_label, diff --git a/csrc/quantization/gptq_marlin/gptq_marlin.cu b/csrc/quantization/gptq_marlin/gptq_marlin.cu index 6dbf9594e8492..0c698ced7713d 100644 --- a/csrc/quantization/gptq_marlin/gptq_marlin.cu +++ b/csrc/quantization/gptq_marlin/gptq_marlin.cu @@ -54,9 +54,10 @@ template shared // fetch pipeline - const bool has_act_order, // whether act_order is enabled - const int group_blocks = -1 // number of consecutive 16x16 blocks - // with a separate quantization scale + const bool has_act_order, // whether act_order is enabled + const int group_blocks = -1, // number of consecutive 16x16 blocks + // with a separate quantization scale + const bool is_zp_float // is zero point of float16 type? > __global__ void Marlin( const int4* __restrict__ A, // fp16 input matrix of shape mxk @@ -82,7 +83,7 @@ torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, torch::Tensor& workspace, vllm::ScalarTypeId const b_q_type_id, int64_t size_m, int64_t size_n, int64_t size_k, - bool is_k_full, bool has_zp) { + bool is_k_full, bool has_zp, bool is_zp_float) { TORCH_CHECK_NOT_IMPLEMENTED(false, "marlin_gemm(..) requires CUDA_ARCH >= 8.0"); return torch::empty({1, 1}); @@ -516,10 +517,11 @@ template shared // fetch pipeline - const bool has_act_order, // whether act_order is enabled - const bool has_zp, // whether zero-points are enabled - const int group_blocks = -1 // number of consecutive 16x16 blocks - // with a separate quantization scale + const bool has_act_order, // whether act_order is enabled + const bool has_zp, // whether zero-points are enabled + const int group_blocks = -1, // number of consecutive 16x16 blocks + // with a separate quantization scale + const bool is_zp_float // is zero point of float16 type? > __global__ void Marlin( const int4* __restrict__ A, // fp16 input matrix of shape mxk @@ -692,8 +694,10 @@ __global__ void Marlin( int act_s_col_tb_stride = act_s_col_warp_stride * tb_n_warps; // Zero-points sizes/strides - int zp_gl_stride = (prob_n / pack_factor) / 4; - constexpr int zp_sh_stride = ((16 * thread_n_blocks) / pack_factor) / 4; + int zp_gl_stride = is_zp_float ? prob_n / 8 : (prob_n / pack_factor) / 4; + constexpr int zp_sh_stride = is_zp_float + ? 16 * thread_n_blocks / 8 + : ((16 * thread_n_blocks) / pack_factor) / 4; constexpr int zp_tb_groups = s_tb_groups; constexpr int zp_sh_stage = has_zp ? zp_tb_groups * zp_sh_stride : 0; int zp_gl_rd_delta = zp_gl_stride; @@ -768,9 +772,16 @@ __global__ void Marlin( constexpr int num_ints_per_thread = 8 / pack_factor; int zp_sh_rd; if constexpr (has_zp) { - zp_sh_rd = num_ints_per_thread * num_col_threads * - ((threadIdx.x / 32) % (thread_n_blocks / 4)) + - num_ints_per_thread * ((threadIdx.x % 32) / num_row_threads); + if constexpr (is_zp_float) { + if constexpr (group_blocks != -1) { + zp_sh_rd = 8 * ((threadIdx.x / 32) % (thread_n_blocks / 4)) + + (threadIdx.x % 32) / 4; + } + } else { + zp_sh_rd = num_ints_per_thread * num_col_threads * + ((threadIdx.x / 32) % (thread_n_blocks / 4)) + + num_ints_per_thread * ((threadIdx.x % 32) / num_row_threads); + } } // Precompute which thread should not read memory in which iterations; this is @@ -832,6 +843,7 @@ __global__ void Marlin( FragS act_frag_s[2][4][4]; // For act-order int frag_qzp[2][num_ints_per_thread]; // Zero-points FragZP frag_zp; // Zero-points in fp16 + FragZP frag_zpf[2]; // Zero-points in fp16 in HQQ // Zero accumulators. auto zero_accums = [&]() { @@ -1126,7 +1138,7 @@ __global__ void Marlin( // has_zp implies AWQ, which doesn't have act_order, static_assert(!has_zp || group_blocks != 0); - if constexpr (has_zp) { + if constexpr (has_zp && !is_zp_float) { int pipe = full_pipe % stages; if constexpr (group_blocks == -1) { @@ -1170,11 +1182,44 @@ __global__ void Marlin( } } } + + else if constexpr (has_zp && is_zp_float) { + int pipe = full_pipe % stages; + + if constexpr (group_blocks != -1) { + if constexpr (group_blocks >= thread_k_blocks) { + int4* sh_zp_stage = + sh_zp + zp_sh_stage * ((group_blocks / thread_k_blocks) * + (pipe / (group_blocks / thread_k_blocks))); + reinterpret_cast(&frag_zpf[k % 2])[0] = sh_zp_stage[zp_sh_rd]; + } else { + int warp_id = threadIdx.x / 32; + int n_warps = thread_n_blocks / 4; + + int warp_row = warp_id / n_warps; + + int cur_k = warp_row * 16; + cur_k += k_iter_size * (k % b_sh_wr_iters); + + int k_blocks = cur_k / 16; + // Suppress bogus and persistent divide-by-zero warning + #pragma nv_diagnostic push + #pragma nv_diag_suppress divide_by_zero + int cur_group_id = k_blocks / group_blocks; + #pragma nv_diagnostic pop + + int4* sh_zp_stage = sh_zp + zp_sh_stage * pipe; + + reinterpret_cast(&frag_zpf[k % 2])[0] = + sh_zp_stage[zp_sh_rd + cur_group_id * zp_sh_stride]; + } + } + } }; // Execute the actual tensor core matmul of a sub-tile. auto matmul = [&](int k) { - if constexpr (has_zp) { + if constexpr (has_zp && !is_zp_float) { FragB frag_zp_0; FragB frag_zp_1; int zp_quant_0, zp_quant_1; @@ -1219,10 +1264,14 @@ __global__ void Marlin( frag_b1 = dequant(b_quant_1); // Apply zero-point to frag_b0 - if constexpr (has_zp) { + if constexpr (has_zp && !is_zp_float) { sub_zp(frag_b0, frag_zp[j], 0); } + else if constexpr (has_zp && is_zp_float && group_blocks != -1) { + sub_zp(frag_b0, frag_zpf[k % 2][j], 0); + } + // Apply scale to frag_b0 if constexpr (has_act_order) { scale4(frag_b0, act_frag_s[k % 2][0][j], @@ -1235,10 +1284,14 @@ __global__ void Marlin( } // Apply zero-point to frag_b1 - if constexpr (has_zp) { + if constexpr (has_zp && !is_zp_float) { sub_zp(frag_b1, frag_zp[j], 1); } + else if constexpr (has_zp && is_zp_float && group_blocks != -1) { + sub_zp(frag_b1, frag_zpf[k % 2][j], 1); + } + // Apply scale to frag_b1 if constexpr (has_act_order) { scale4(frag_b1, act_frag_s[k % 2][0][j], @@ -1510,7 +1563,7 @@ __global__ void Marlin( fetch_scales_to_shared(true, g_idx[slice_k_start], g_idx[last_g_idx]); } - if constexpr (has_zp && group_blocks == -1) { + if constexpr (has_zp && !is_zp_float && group_blocks == -1) { if (i == 0) { fetch_zp_to_shared(); } @@ -1697,23 +1750,27 @@ __global__ void Marlin( } #define __CALL_IF(W_TYPE, THREAD_M_BLOCKS, THREAD_N_BLOCKS, THREAD_K_BLOCKS, \ - HAS_ACT_ORDER, HAS_ZP, GROUP_BLOCKS, NUM_THREADS) \ + HAS_ACT_ORDER, HAS_ZP, GROUP_BLOCKS, NUM_THREADS, \ + IS_ZP_FLOAT) \ else if (q_type == W_TYPE && thread_m_blocks == THREAD_M_BLOCKS && \ thread_n_blocks == THREAD_N_BLOCKS && \ thread_k_blocks == THREAD_K_BLOCKS && \ has_act_order == HAS_ACT_ORDER && has_zp == HAS_ZP && \ - group_blocks == GROUP_BLOCKS && num_threads == NUM_THREADS) { \ - cudaFuncSetAttribute( \ - Marlin, \ - cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem); \ - Marlin \ - <<>>( \ - A_ptr, B_ptr, C_ptr, C_tmp_ptr, s_ptr, zp_ptr, g_idx_ptr, \ - num_groups, prob_m, prob_n, prob_k, locks, use_fp32_reduce); \ + group_blocks == GROUP_BLOCKS && num_threads == NUM_THREADS && \ + is_zp_float == IS_ZP_FLOAT) { \ + if constexpr (!IS_ZP_FLOAT || std::is_same::value) { \ + cudaFuncSetAttribute( \ + Marlin, \ + cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem); \ + Marlin \ + <<>>( \ + A_ptr, B_ptr, C_ptr, C_tmp_ptr, s_ptr, zp_ptr, g_idx_ptr, \ + num_groups, prob_m, prob_n, prob_k, locks, use_fp32_reduce); \ + } \ } typedef struct { @@ -1905,51 +1962,96 @@ exec_config_t determine_thread_config(int prob_m, int prob_n, int prob_k, } #define GPTQ_CALL_IF(W_TYPE, N_BLOCKS, K_BLOCKS, NUM_THREADS) \ - __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, true, false, 0, NUM_THREADS) \ - __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, true, false, 0, NUM_THREADS) \ - __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, true, false, 0, NUM_THREADS) \ - __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, true, false, 0, NUM_THREADS) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, true, false, 0, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, true, false, 0, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, true, false, 0, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, true, false, 0, NUM_THREADS, \ + false) \ \ - __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, false, -1, NUM_THREADS) \ - __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, false, 2, NUM_THREADS) \ - __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, false, 4, NUM_THREADS) \ - __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, false, 8, NUM_THREADS) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, false, -1, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, false, 2, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, false, 4, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, false, 8, NUM_THREADS, \ + false) \ \ - __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, false, -1, NUM_THREADS) \ - __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, false, 2, NUM_THREADS) \ - __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, false, 4, NUM_THREADS) \ - __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, false, 8, NUM_THREADS) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, false, -1, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, false, 2, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, false, 4, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, false, 8, NUM_THREADS, \ + false) \ \ - __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, false, -1, NUM_THREADS) \ - __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, false, 2, NUM_THREADS) \ - __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, false, 4, NUM_THREADS) \ - __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, false, 8, NUM_THREADS) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, false, -1, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, false, 2, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, false, 4, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, false, 8, NUM_THREADS, \ + false) \ \ - __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, false, -1, NUM_THREADS) \ - __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, false, 2, NUM_THREADS) \ - __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, false, 4, NUM_THREADS) \ - __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, false, 8, NUM_THREADS) + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, false, -1, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, false, 2, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, false, 4, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, false, 8, NUM_THREADS, \ + false) #define AWQ_CALL_IF(W_TYPE, N_BLOCKS, K_BLOCKS, NUM_THREADS) \ - __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, true, -1, NUM_THREADS) \ - __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, true, 2, NUM_THREADS) \ - __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS) \ - __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, true, 8, NUM_THREADS) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, true, -1, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, true, 2, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, true, 8, NUM_THREADS, \ + false) \ \ - __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, true, -1, NUM_THREADS) \ - __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, true, 2, NUM_THREADS) \ - __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS) \ - __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, true, 8, NUM_THREADS) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, true, -1, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, true, 2, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, true, 8, NUM_THREADS, \ + false) \ \ - __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, true, -1, NUM_THREADS) \ - __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, true, 2, NUM_THREADS) \ - __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS) \ - __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, true, 8, NUM_THREADS) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, true, -1, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, true, 2, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, true, 8, NUM_THREADS, \ + false) \ \ - __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, true, -1, NUM_THREADS) \ - __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, true, 2, NUM_THREADS) \ - __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS) \ - __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, true, 8, NUM_THREADS) + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, true, -1, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, true, 2, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, true, 8, NUM_THREADS, false) + + // We currently have 4-bit models only with group_blocks == 4 + #define HQQ_CALL_IF(W_TYPE, N_BLOCKS, K_BLOCKS, NUM_THREADS) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS, \ + true) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS, \ + true) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS, \ + true) \ + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS, true) template void marlin_mm(const void* A, const void* B, void* C, void* C_tmp, void* s, @@ -1958,7 +2060,7 @@ void marlin_mm(const void* A, const void* B, void* C, void* C_tmp, void* s, vllm::ScalarType const& q_type, bool has_act_order, bool is_k_full, bool has_zp, int num_groups, int group_size, int dev, cudaStream_t stream, int thread_k, int thread_n, - int sms, int max_par, bool use_fp32_reduce) { + int sms, int max_par, bool use_fp32_reduce, bool is_zp_float) { if (has_zp) { TORCH_CHECK( q_type == vllm::kU4 || q_type == vllm::kU8, @@ -2111,6 +2213,11 @@ void marlin_mm(const void* A, const void* B, void* C, void* C_tmp, void* s, AWQ_CALL_IF(vllm::kU8, 8, 8, 256) AWQ_CALL_IF(vllm::kU8, 8, 4, 128) AWQ_CALL_IF(vllm::kU8, 4, 8, 128) + + HQQ_CALL_IF(vllm::kU4, 16, 4, 256) + HQQ_CALL_IF(vllm::kU4, 8, 8, 256) + HQQ_CALL_IF(vllm::kU4, 8, 4, 128) + HQQ_CALL_IF(vllm::kU4, 4, 8, 128) else { TORCH_CHECK(false, "Unsupported shapes: MNK = [", prob_m, ", ", prob_n, ", ", prob_k, "]", ", has_act_order = ", has_act_order, @@ -2135,7 +2242,7 @@ torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, vllm::ScalarTypeId const& b_q_type_id, int64_t size_m, int64_t size_n, int64_t size_k, bool is_k_full, bool has_zp, - bool use_fp32_reduce) { + bool use_fp32_reduce, bool is_zp_float) { vllm::ScalarType const b_q_type = vllm::ScalarType::from_id(b_q_type_id); if (has_zp) { TORCH_CHECK( @@ -2148,6 +2255,12 @@ torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, b_q_type.str()); } + if (has_zp && is_zp_float) { + TORCH_CHECK(a.scalar_type() == at::ScalarType::Half, + "Computation type must be float16 (half) when using float zero " + "points."); + } + int pack_factor = 32 / b_q_type.size_bits(); // Verify A @@ -2257,12 +2370,22 @@ torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, if (has_zp) { int rank = b_zeros.sizes().size(); TORCH_CHECK(rank == 2, "b_zeros rank = ", rank, " is not 2"); - TORCH_CHECK(b_zeros.size(0) == num_groups, - "b_zeros dim 0 = ", b_zeros.size(0), - " is not num_groups = ", num_groups); - TORCH_CHECK(b_zeros.size(1) == size_n / pack_factor, - "b_zeros dim 1 = ", b_zeros.size(1), - " is not size_n / pack_factor = ", size_n / pack_factor); + if (is_zp_float) { + TORCH_CHECK(b_zeros.size(1) == size_n, + "b_zeros dim 1 = ", b_zeros.size(1), + " is not size_n = ", size_n); + TORCH_CHECK(num_groups == b_zeros.size(0), + "b_zeros dim 0 = ", b_zeros.size(0), + " is not num_groups = ", num_groups); + TORCH_CHECK(num_groups != -1, "num_groups must be != -1"); + } else { + TORCH_CHECK(b_zeros.size(0) == num_groups, + "b_zeros dim 0 = ", b_zeros.size(0), + " is not num_groups = ", num_groups); + TORCH_CHECK(b_zeros.size(1) == size_n / pack_factor, + "b_zeros dim 1 = ", b_zeros.size(1), + " is not size_n / pack_factor = ", size_n / pack_factor); + } } // Verify workspace size @@ -2282,7 +2405,7 @@ torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, a_tmp.data_ptr(), size_m, size_n, size_k, workspace.data_ptr(), b_q_type, has_act_order, is_k_full, has_zp, num_groups, group_size, dev, at::cuda::getCurrentCUDAStream(dev), - thread_k, thread_n, sms, marlin::max_par, use_fp32_reduce); + thread_k, thread_n, sms, marlin::max_par, use_fp32_reduce, is_zp_float); } else if (a.scalar_type() == at::ScalarType::BFloat16) { marlin::marlin_mm( a.data_ptr(), b_q_weight.data_ptr(), @@ -2291,7 +2414,7 @@ torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, perm.data_ptr(), a_tmp.data_ptr(), size_m, size_n, size_k, workspace.data_ptr(), b_q_type, has_act_order, is_k_full, has_zp, num_groups, group_size, dev, at::cuda::getCurrentCUDAStream(dev), - thread_k, thread_n, sms, marlin::max_par, use_fp32_reduce); + thread_k, thread_n, sms, marlin::max_par, use_fp32_reduce, is_zp_float); } else { TORCH_CHECK(false, "gpt_marlin_gemm only supports bfloat16 and float16"); } diff --git a/csrc/torch_bindings.cpp b/csrc/torch_bindings.cpp index e4cc7ec951848..3dccdf61abf3b 100644 --- a/csrc/torch_bindings.cpp +++ b/csrc/torch_bindings.cpp @@ -244,7 +244,7 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { "Tensor b_zeros, Tensor g_idx, Tensor perm, Tensor workspace, " "int b_q_type, " "SymInt size_m, SymInt size_n, SymInt size_k, bool is_k_full, " - "bool has_zp, bool use_fp32_reduce) -> Tensor"); + "bool has_zp, bool use_fp32_reduce, bool is_zp_float) -> Tensor"); // conditionally compiled so impl registration is in source file // gptq_marlin repack from GPTQ. diff --git a/tests/kernels/test_marlin_gemm.py b/tests/kernels/test_marlin_gemm.py index b6dd68cc51a9f..3899ad1a325cf 100644 --- a/tests/kernels/test_marlin_gemm.py +++ b/tests/kernels/test_marlin_gemm.py @@ -29,6 +29,7 @@ marlin_qqq_quantize) from vllm.model_executor.layers.quantization.utils.quant_utils import ( awq_pack, gptq_pack, gptq_quantize_weights, quantize_weights, sort_weights) +from vllm.scalar_type import scalar_types ACT_ORDER_OPTS = [False, True] K_FULL_OPTS = [False, True] @@ -40,6 +41,8 @@ MARLIN_24_K_CHUNKS = [128] MARLIN_24_N_CHUNKS = [512] +HQQ_SUPPORTED_GROUP_SIZES = [64] + MNK_FACTORS = [ (1, 1, 1), (1, 4, 8), @@ -226,7 +229,7 @@ def test_gptq_marlin_gemm( torch.ops._C.gptq_marlin_gemm, (a_input, marlin_q_w, marlin_s, marlin_zp, g_idx, sort_indices, workspace.scratch, quant_type.id, a_input.shape[0], b_weight.shape[1], - a_input.shape[1], is_k_full, False, use_fp32_reduce), + a_input.shape[1], is_k_full, False, use_fp32_reduce, False), test_utils=DEFAULT_OPCHECK_TEST_UTILS) output = ops.gptq_marlin_gemm( @@ -244,6 +247,7 @@ def test_gptq_marlin_gemm( is_k_full=is_k_full, has_zp=False, use_fp32_reduce=use_fp32_reduce, + is_zp_float=False, ) output_ref = torch.matmul(a_input, w_ref) @@ -441,6 +445,7 @@ def test_awq_marlin_gemm( is_k_full=is_k_full, has_zp=has_zp, use_fp32_reduce=use_fp32_reduce, + is_zp_float=False, ) output_ref = torch.matmul(a_input, w_ref) @@ -451,6 +456,87 @@ def test_awq_marlin_gemm( assert max_diff < 0.04 +@pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"), + reason="Marlin is not supported on this GPU type.") +@pytest.mark.parametrize("k_chunk", MARLIN_K_CHUNKS) +@pytest.mark.parametrize("n_chunk", MARLIN_N_CHUNKS) +@pytest.mark.parametrize("group_size", HQQ_SUPPORTED_GROUP_SIZES) +@pytest.mark.parametrize("mnk_factors", MNK_FACTORS) +@pytest.mark.parametrize("use_fp32_reduce", USE_FP32_REDUCE_OPTS) +def test_hqq_marlin_gemm( + k_chunk, + n_chunk, + group_size, + mnk_factors, + use_fp32_reduce, +): + m_factor, n_factor, k_factor = mnk_factors + + size_m = m_factor + size_k = k_chunk * k_factor + size_n = n_chunk * n_factor + + quant_type = scalar_types.uint4 + + a_input = rand_data((size_m, size_k)) + dev = a_input.device + + b_weight = torch.randint(0, + 10, (size_n, size_k), + dtype=torch.uint8, + device=dev) + scale = rand_data((size_n, size_k // group_size)) + zero = rand_data((size_n, size_k // group_size)) + + gptq_w_q = gptq_pack(b_weight.transpose(1, 0), 4, size_k, size_n) + + sort_indices = torch.empty(0, dtype=torch.int, device=dev) + marlin_w_q = ops.gptq_marlin_repack(gptq_w_q, sort_indices, size_k, size_n, + 4).to(dev) + marlin_s = marlin_permute_scales(scale.transpose(1, 0), size_k, size_n, + group_size).to(dev) + marlin_zp = marlin_permute_scales(zero.transpose(1, 0), size_k, size_n, + group_size).to(dev) + + g_idx = marlin_make_empty_g_idx(dev) + g_idx_sort_indices = marlin_make_empty_g_idx(dev) + + workspace = MarlinWorkspace(size_n, GPTQ_MARLIN_MIN_THREAD_N, + GPTQ_MARLIN_MAX_PARALLEL) + + output = ops.gptq_marlin_gemm( + a_input, + marlin_w_q, + marlin_s, + marlin_zp, + g_idx, + g_idx_sort_indices, + workspace.scratch, + quant_type, + a_input.shape[0], + b_weight.shape[0], + a_input.shape[1], + is_k_full=True, + has_zp=True, + use_fp32_reduce=use_fp32_reduce, + is_zp_float=True, + ) + + b_flat = b_weight.reshape(-1, group_size) + zp_flat = zero.reshape(-1, 1) + s_flat = scale.reshape(-1, 1) + dequant = (b_flat - zp_flat) * s_flat + + output_ref = torch.matmul(a_input, + dequant.reshape(b_weight.shape).transpose(1, 0)) + + torch.cuda.synchronize() + + max_diff = compute_max_diff(output, output_ref) + + assert max_diff < 0.04 + + @pytest.mark.skipif(not is_quant_method_supported("qqq"), reason="Marlin is not supported on this GPU type.") @pytest.mark.parametrize("k_chunk", MARLIN_K_CHUNKS) diff --git a/tests/weight_loading/models.txt b/tests/weight_loading/models.txt index a4ee9538d646b..2afffb5b9d1c8 100644 --- a/tests/weight_loading/models.txt +++ b/tests/weight_loading/models.txt @@ -27,4 +27,5 @@ fp8, neuralmagic/Meta-Llama-3-8B-Instruct-FP8-KV, main marlin, nm-testing/zephyr-beta-7b-marlin-g128, main marlin, robertgshaw2/zephyr-7b-beta-channelwise-marlin, main qqq, HandH1998/QQQ-Llama-3-8b-g128, main -qqq, HandH1998/QQQ-Llama-3-8b, main \ No newline at end of file +qqq, HandH1998/QQQ-Llama-3-8b, main +hqq, nm-testing/Llama-3.2-1B-Instruct-HQQ, main \ No newline at end of file diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index aa89010ca8ecd..782dc6aed1b8c 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -343,7 +343,8 @@ def _gptq_marlin_gemm_fake(a: torch.Tensor, size_k: torch.SymInt, is_k_full: bool, has_zp: bool = False, - use_fp32_reduce: bool = False) -> torch.Tensor: + use_fp32_reduce: bool = False, + is_zp_float: bool = False) -> torch.Tensor: return torch.empty((size_m, size_n), device=a.device, dtype=a.dtype) @register_fake("_C::ggml_dequantize") @@ -601,11 +602,12 @@ def gptq_marlin_gemm(a: torch.Tensor, size_k: int, is_k_full: bool, has_zp: bool = False, - use_fp32_reduce: bool = False) -> torch.Tensor: + use_fp32_reduce: bool = False, + is_zp_float: bool = False) -> torch.Tensor: return torch.ops._C.gptq_marlin_gemm(a, b_q_weight, b_scales, b_zeros, g_idx, perm, workspace, b_q_type.id, size_m, size_n, size_k, is_k_full, - has_zp, use_fp32_reduce) + has_zp, use_fp32_reduce, is_zp_float) # fp8 marlin diff --git a/vllm/model_executor/layers/linear.py b/vllm/model_executor/layers/linear.py index 9da38d4857d6d..2471c160d66b7 100644 --- a/vllm/model_executor/layers/linear.py +++ b/vllm/model_executor/layers/linear.py @@ -27,7 +27,8 @@ "AWQLinearMethod", "GPTQMarlinLinearMethod", "Fp8LinearMethod", "MarlinLinearMethod", "QQQLinearMethod", "GPTQMarlin24LinearMethod", "TPUInt8LinearMethod", "GPTQLinearMethod", "FBGEMMFp8LinearMethod", - "ModelOptFp8LinearMethod", "IPEXAWQLinearMethod", "IPEXGPTQLinearMethod" + "ModelOptFp8LinearMethod", "IPEXAWQLinearMethod", "IPEXGPTQLinearMethod", + "HQQMarlinMethod" ] diff --git a/vllm/model_executor/layers/quantization/__init__.py b/vllm/model_executor/layers/quantization/__init__.py index da841d052d728..ff342c4f9479e 100644 --- a/vllm/model_executor/layers/quantization/__init__.py +++ b/vllm/model_executor/layers/quantization/__init__.py @@ -21,6 +21,7 @@ GPTQMarlinConfig) from vllm.model_executor.layers.quantization.gptq_marlin_24 import ( GPTQMarlin24Config) +from vllm.model_executor.layers.quantization.hqq_marlin import HQQMarlinConfig from vllm.model_executor.layers.quantization.ipex_quant import IPEXConfig from vllm.model_executor.layers.quantization.marlin import MarlinConfig from vllm.model_executor.layers.quantization.modelopt import ModelOptFp8Config @@ -48,6 +49,7 @@ "compressed-tensors": CompressedTensorsConfig, "bitsandbytes": BitsAndBytesConfig, "qqq": QQQConfig, + "hqq": HQQMarlinConfig, "experts_int8": ExpertsInt8Config, "neuron_quant": NeuronQuantConfig, "ipex": IPEXConfig, diff --git a/vllm/model_executor/layers/quantization/hqq_marlin.py b/vllm/model_executor/layers/quantization/hqq_marlin.py new file mode 100644 index 0000000000000..28538d2993355 --- /dev/null +++ b/vllm/model_executor/layers/quantization/hqq_marlin.py @@ -0,0 +1,325 @@ +from typing import Any, Dict, List, Optional + +import torch + +from vllm import _custom_ops as ops +from vllm.logger import init_logger +from vllm.model_executor.layers.linear import (LinearBase, LinearMethodBase, + UnquantizedLinearMethod) +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig, QuantizeMethodBase) +from vllm.model_executor.layers.quantization.utils.marlin_utils import ( + GPTQ_MARLIN_MAX_PARALLEL, GPTQ_MARLIN_MIN_THREAD_N, + marlin_make_empty_g_idx, marlin_permute_scales) +from vllm.model_executor.layers.quantization.utils.marlin_utils_test import ( + MarlinWorkspace) +from vllm.model_executor.layers.quantization.utils.quant_utils import gptq_pack +from vllm.model_executor.parameter import (BasevLLMParameter, + GroupQuantScaleParameter, + PackedvLLMParameter) +from vllm.scalar_type import scalar_types + +logger = init_logger(__name__) + + +class HQQMarlinConfig(QuantizationConfig): + """Config class for HQQ Marlin""" + + def __init__( + self, + weight_bits: int, + group_size: int, + skip_modules: Optional[List[str]] = None, + ) -> None: + assert group_size == 64, ("The only supported HQQ group size is " + "currently 64.") + assert weight_bits == 4, ("The only supported HQQ quantization " + "bitsize is currently 4.") + + self.weight_bits = weight_bits + self.group_size = group_size + self.pack_factor = 32 // weight_bits # packed into int32 in GPTQ format + self.quant_type = scalar_types.uint4 + self.skip_modules = skip_modules + + def __repr__(self) -> str: + return (f"HQQMarlinConfig(quant_type={self.quant_type}, " + f"group_size={self.group_size})") + + @classmethod + def get_name(cls) -> str: + return "hqq" + + @classmethod + def get_supported_act_dtypes(cls) -> List[torch.dtype]: + return [torch.half, torch.bfloat16] + + @classmethod + def get_min_capability(cls) -> int: + return 80 + + @classmethod + def get_config_filenames(cls) -> List[str]: + return ["quantize_config.json"] + + @classmethod + def from_config(cls, config: Dict[str, Any]) -> "HQQMarlinConfig": + wq_params = (config["quant_config"]["weight_quant_params"]) + weight_bits = cls.get_from_keys(wq_params, ["nbits"]) + group_size = cls.get_from_keys(wq_params, ["group_size"]) + skip_modules = config["skip_modules"] + return cls(weight_bits, group_size, skip_modules) + + def is_layer_skipped(self, prefix: str) -> bool: + # Split the prefix into its dot-separated components + components = prefix.split('.') + + # Check if any of the skip modules exactly matches any component + return self.skip_modules is not None and any( + module_name in components for module_name in self.skip_modules) + + def get_quant_method(self, layer: torch.nn.Module, + prefix: str) -> Optional["QuantizeMethodBase"]: + if isinstance(layer, LinearBase): + if self.is_layer_skipped(prefix): + return UnquantizedLinearMethod() + return HQQMarlinMethod(self) + return None + + +# Empty HQQ parameter, will be ignored during loading +class HQQEmptyParameter(BasevLLMParameter): + + def load_merged_column_weight(self, loaded_weight: torch.Tensor, **kwargs): + pass + + def load_row_parallel_weight(self, loaded_weight: torch.Tensor): + pass + + def load_qkv_weight(self, loaded_weight: torch.Tensor, **kwargs): + pass + + +def error_loader(param: torch.Tensor, loaded_weight: torch.Tensor) -> None: + raise ValueError("No loader provided for HQQ parameter!") + + +# HQQ packing creates issues with sharding - therefore, prior to loading, we +# repack to GPTQ. We also reshape the weights to their proper GPTQ shape. +class HQQweightParameter(PackedvLLMParameter): + + # unpack function from https://github.com/mobiusml/hqq + def unpack_4bit_u8(self, + W_q: torch.Tensor) -> torch.Tensor: # uint8/2 > uint8 + assert self.weight_bits == 4, "Unsupported quant bitsize (must be 4)" + + dtype = torch.uint8 + step = W_q.shape[0] + tmp = torch.empty([2 * step, W_q.shape[1]], + dtype=dtype, + device=W_q.device) + tmp[:step] = (W_q & 0b11110000) >> 4 + tmp[step:] = W_q & 0b00001111 + return tmp + + def __init__(self, packed_factor: int, packed_dim: int, weight_bits: int, + **kwargs): + super().__init__(packed_factor, packed_dim, None, **kwargs) + self.weight_bits = weight_bits + self.input_shape = self.shape[self.input_dim] * self.packed_factor + self.output_shape = self.shape[self.output_dim] + + def load_merged_column_weight(self, loaded_weight: torch.Tensor, **kwargs): + loaded_weight = self.unpack_4bit_u8(loaded_weight) + loaded_weight = loaded_weight.reshape(-1, self.input_shape).transpose( + 1, 0) + loaded_weight = gptq_pack(loaded_weight, self.weight_bits, + loaded_weight.shape[0], + loaded_weight.shape[1]) + super().load_merged_column_weight(loaded_weight, **kwargs) + + def load_row_parallel_weight(self, loaded_weight: torch.Tensor): + loaded_weight = self.unpack_4bit_u8(loaded_weight) + loaded_weight = loaded_weight.reshape(self.output_shape, + -1).transpose(1, 0) + loaded_weight = gptq_pack(loaded_weight, self.weight_bits, + loaded_weight.shape[0], + loaded_weight.shape[1]) + super().load_row_parallel_weight(loaded_weight) + + def load_qkv_weight(self, loaded_weight: torch.Tensor, **kwargs): + loaded_weight = self.unpack_4bit_u8(loaded_weight) + loaded_weight = loaded_weight.reshape(-1, self.input_shape).transpose( + 1, 0) + loaded_weight = gptq_pack(loaded_weight, self.weight_bits, + loaded_weight.shape[0], + loaded_weight.shape[1]) + super().load_qkv_weight(loaded_weight, **kwargs) + + +# Zero points and scales in HQQ must also be reshaped to correspond to W_q's +# GPTQ shape (transposed - we transpose them too when processing weights). +class HQQZeroScaleParameter(GroupQuantScaleParameter): + + def load_merged_column_weight(self, loaded_weight: torch.Tensor, **kwargs): + loaded_weight = loaded_weight.reshape(-1, self.shape[1]) + super().load_merged_column_weight(loaded_weight, **kwargs) + + def load_row_parallel_weight(self, loaded_weight: torch.Tensor): + loaded_weight = loaded_weight.reshape(self.shape[0], -1) + super().load_row_parallel_weight(loaded_weight) + + def load_qkv_weight(self, loaded_weight: torch.Tensor, **kwargs): + loaded_weight = loaded_weight.reshape(-1, self.shape[1]) + super().load_qkv_weight(loaded_weight, **kwargs) + + +class HQQMarlinMethod(LinearMethodBase): + """Linear method for HQQ Marlin. + """ + + def __init__( + self, + quant_config: HQQMarlinConfig, + ): + self.quant_config = quant_config + + def create_weights( + self, + layer: torch.nn.Module, + input_size_per_partition: int, + output_partition_sizes: List[int], + input_size: int, + output_size: int, + params_dtype: torch.dtype, + **extra_weight_attrs, + ) -> None: + self.output_size_per_partition = sum(output_partition_sizes) + self.input_size_per_partition = input_size_per_partition + + weight_loader = extra_weight_attrs.get("weight_loader", error_loader) + + self.scales_and_zp_size = (input_size_per_partition // + self.quant_config.group_size) + + qweight = HQQweightParameter( + data=torch.empty( + self.input_size_per_partition // self.quant_config.pack_factor, + self.output_size_per_partition, + dtype=torch.int32, + ), + input_dim=0, + output_dim=1, + packed_dim=0, + packed_factor=self.quant_config.pack_factor, + weight_bits=self.quant_config.weight_bits, + weight_loader=weight_loader) + + zeros = HQQZeroScaleParameter(data=torch.empty( + self.output_size_per_partition, + self.scales_and_zp_size, + dtype=params_dtype, + ), + input_dim=1, + output_dim=0, + weight_loader=weight_loader) + + scales = HQQZeroScaleParameter(data=torch.empty( + self.output_size_per_partition, + self.scales_and_zp_size, + dtype=params_dtype, + ), + input_dim=1, + output_dim=0, + weight_loader=weight_loader) + + layer.register_parameter("W_q", qweight) + layer.register_parameter("zero", zeros) + layer.register_parameter("scale", scales) + + # Ignore extra parameters in the HQQ model. + # To be added as needed. + ignore_parameters = ("axis", "channel_wise", "compute_dtype", + "encoded_state_dict", "group_size", "nbits", + "offload_meta", "optimize", "packing", + "quant_scale", "quant_zero", "round_zero", + "shape", "stores_quant_config", + "unpack_view_dtype", "view_as_float") + for name in ignore_parameters: + layer.register_parameter( + name, + HQQEmptyParameter(data=torch.empty(0), + weight_loader=weight_loader)) + + def process_weights_after_loading(self, layer: torch.nn.Module) -> None: + dev = layer.W_q.device + + # Repack to Marlin + sort_indices = torch.empty(0, dtype=torch.int, device=dev) + marlin_w_q = ops.gptq_marlin_repack( + layer.W_q, + sort_indices, + self.input_size_per_partition, + self.output_size_per_partition, + self.quant_config.weight_bits, + ).to(dev) + marlin_s = marlin_permute_scales(layer.scale.transpose(1, 0), + self.input_size_per_partition, + self.output_size_per_partition, + self.quant_config.group_size).to(dev) + marlin_zp = marlin_permute_scales(layer.zero.transpose(1, 0), + self.input_size_per_partition, + self.output_size_per_partition, + self.quant_config.group_size).to(dev) + + layer.g_idx = marlin_make_empty_g_idx(dev) + layer.g_idx_sort_indices = marlin_make_empty_g_idx(dev) + + layer.marlin_qweight = marlin_w_q + layer.marlin_zeros = marlin_zp + layer.marlin_scales = marlin_s + + def apply( + self, + layer: torch.nn.Module, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + workspace = MarlinWorkspace(self.output_size_per_partition, + GPTQ_MARLIN_MIN_THREAD_N, + GPTQ_MARLIN_MAX_PARALLEL) + + scales = layer.marlin_scales + zeros = layer.marlin_zeros + orig_type = x.dtype + + if orig_type != torch.float16: + x = x.to(torch.float16) + scales = scales.to(torch.float16) + zeros = zeros.to(torch.float16) + + marlin_out = ops.gptq_marlin_gemm( + x, + layer.marlin_qweight, + scales, + zeros, + layer.g_idx, + layer.g_idx_sort_indices, + workspace.scratch, + scalar_types.uint4, + x.shape[0], + self.output_size_per_partition, + self.input_size_per_partition, + True, # is_k_full + True, # has_zp + True, # use 32-bit reduce + True, # use float zp + ) + + if orig_type != torch.float16: + marlin_out = marlin_out.to(orig_type) + + if bias is not None: + marlin_out.add_(bias) + + return marlin_out diff --git a/vllm/model_executor/layers/quantization/utils/marlin_utils.py b/vllm/model_executor/layers/quantization/utils/marlin_utils.py index 9a1defa409714..c9366ca97d149 100644 --- a/vllm/model_executor/layers/quantization/utils/marlin_utils.py +++ b/vllm/model_executor/layers/quantization/utils/marlin_utils.py @@ -303,7 +303,8 @@ def apply_gptq_marlin_linear( size_k=input_size_per_partition, is_k_full=is_k_full, has_zp=False, - use_fp32_reduce=use_fp32_reduce) + use_fp32_reduce=use_fp32_reduce, + is_zp_float=False) if bias is not None: output.add_(bias) # In-place add @@ -340,7 +341,8 @@ def apply_awq_marlin_linear( size_k=input_size_per_partition, is_k_full=True, has_zp=True, - use_fp32_reduce=use_fp32_reduce) + use_fp32_reduce=use_fp32_reduce, + is_zp_float=False) if bias is not None: output.add_(bias) # In-place add From a324d3a1a74ab0a3fafc0f2d19860bd1d1301a85 Mon Sep 17 00:00:00 2001 From: Maximilien de Bayser Date: Tue, 19 Nov 2024 22:16:54 -0300 Subject: [PATCH 041/122] Change granite chat template to keep json list formatting for tool calls (#10452) Signed-off-by: Max de Bayser --- examples/tool_chat_template_granite.jinja | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/examples/tool_chat_template_granite.jinja b/examples/tool_chat_template_granite.jinja index 2cc19e77188dc..467dcb2d10237 100644 --- a/examples/tool_chat_template_granite.jinja +++ b/examples/tool_chat_template_granite.jinja @@ -21,11 +21,7 @@ {{- '<|start_of_role|>user<|end_of_role|>' + message['content'] + '<|end_of_text|> ' }} {%- elif message['role'] == 'assistant_tool_call' or (message['role'] == 'assistant' and message.tool_calls is defined) %} - {{- '<|start_of_role|>assistant<|end_of_role|>' }} - {% for tc in message.tool_calls %} - {{- '<|tool_call|> ' + {'name': tc.function.name, 'arguments': tc.function.arguments}|tojson }} - {% endfor %} - {{- '<|end_of_text|> + {{- '<|start_of_role|>assistant<|end_of_role|><|tool_call|>' + message.tool_calls|map(attribute='function')|list|tojson(indent=4) + '<|end_of_text|> ' }} {%- elif message['role'] == 'assistant' %} {{- '<|start_of_role|>assistant<|end_of_role|>' + message['content'] + '<|end_of_text|> From d5b68aba2ff6dd17060a62c0cb799c0acedb524f Mon Sep 17 00:00:00 2001 From: Alexei-V-Ivanov-AMD <156011006+Alexei-V-Ivanov-AMD@users.noreply.github.com> Date: Tue, 19 Nov 2024 19:19:59 -0600 Subject: [PATCH 042/122] [CI/Build] Update Dockerfile.rocm (#10434) Signed-off-by: Alexei V. Ivanov --- Dockerfile.rocm | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.rocm b/Dockerfile.rocm index 8fb79afaebe97..62d4a9b4909c3 100644 --- a/Dockerfile.rocm +++ b/Dockerfile.rocm @@ -51,9 +51,9 @@ RUN --mount=type=cache,target=/root/.cache/pip \ *"rocm-6.2"*) \ python3 -m pip uninstall -y torch torchvision \ && python3 -m pip install --pre \ - torch==2.6.0.dev20240918 \ + torch==2.6.0.dev20241113+rocm6.2 \ 'setuptools-scm>=8' \ - torchvision==0.20.0.dev20240918 \ + torchvision==0.20.0.dev20241113+rocm6.2 \ --extra-index-url https://download.pytorch.org/whl/nightly/rocm6.2;; \ *) ;; esac From d200972e7f4969da50f533b46c856c5ff5a9d27d Mon Sep 17 00:00:00 2001 From: Lucas Wilkinson Date: Tue, 19 Nov 2024 22:40:33 -0500 Subject: [PATCH 043/122] [Bugfix] Marlin 2:4 temp fix for large M dim (>256) (#10464) Signed-off-by: Lucas Wilkinson --- .../marlin/sparse/marlin_24_cuda_kernel.cu | 15 +++++++++++---- tests/kernels/test_marlin_gemm.py | 2 ++ 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/csrc/quantization/marlin/sparse/marlin_24_cuda_kernel.cu b/csrc/quantization/marlin/sparse/marlin_24_cuda_kernel.cu index a33e2660d760e..8fce76eb52f9b 100644 --- a/csrc/quantization/marlin/sparse/marlin_24_cuda_kernel.cu +++ b/csrc/quantization/marlin/sparse/marlin_24_cuda_kernel.cu @@ -910,13 +910,16 @@ void marlin_cuda_2_4(const void* A, const void* B, const void* meta, void* C, // than better compute utilization thread_k = 128; thread_m = 128; - } else if (prob_n <= 256) { + } else { thread_k = 64; thread_m = 256; - } else { - thread_k = 32; - thread_m = 512; } + // Also had + // if prob_n > 256 + // thread_k = 32; + // thread_m = 512; + // but this is broken, + // TODO(Lucas, Alex M): figure out why } int thread_k_blocks = thread_k / 32; // 2:4 version with m16n8k32 instruction @@ -1079,6 +1082,8 @@ torch::Tensor gptq_marlin_24_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, // Verify A device and strides TORCH_CHECK(a.device().is_cuda(), "A is not on GPU"); TORCH_CHECK(a.is_contiguous(), "A is not contiguous"); + TORCH_CHECK(a.dtype() == torch::kFloat16, + "A is not float16, currently only float16 is supported"); // Verify B device and strides TORCH_CHECK(b_q_weight.device().is_cuda(), "b_q_weight is not on GPU"); @@ -1091,6 +1096,8 @@ torch::Tensor gptq_marlin_24_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, // Verify scales device and strides TORCH_CHECK(b_scales.device().is_cuda(), "b_scales is not on GPU"); TORCH_CHECK(b_scales.is_contiguous(), "b_scales is not contiguous"); + TORCH_CHECK(b_scales.dtype() == torch::kFloat16, + "A is not float16, currently only float16 is supported"); // Alloc C matrix const at::cuda::OptionalCUDAGuard device_guard(device_of(a)); diff --git a/tests/kernels/test_marlin_gemm.py b/tests/kernels/test_marlin_gemm.py index 3899ad1a325cf..5e047f4b099f1 100644 --- a/tests/kernels/test_marlin_gemm.py +++ b/tests/kernels/test_marlin_gemm.py @@ -50,6 +50,8 @@ (13, 17, 67), (26, 37, 13), (67, 13, 11), + (257, 13, 11), + (658, 13, 11), ] DTYPES = [torch.float16, torch.bfloat16] From 9e05252b46a92a5d14e4e6fd02b75383c5cf243b Mon Sep 17 00:00:00 2001 From: Yanyi Liu Date: Wed, 20 Nov 2024 12:44:57 +0800 Subject: [PATCH 044/122] [Misc] Add __setitem__ for LazyDict (#10469) Signed-off-by: Yanyi Liu --- vllm/utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/vllm/utils.py b/vllm/utils.py index 5d0514cd9d168..2bbdc8d1ebde8 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -1491,6 +1491,9 @@ def __getitem__(self, key) -> T: self._dict[key] = self._factory[key]() return self._dict[key] + def __setitem__(self, key: str, value: Callable[[], T]): + self._factory[key] = value + def __iter__(self): return iter(self._factory) From ad44437ba33e8d31962d272be238eeed4a1b4f84 Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Wed, 20 Nov 2024 13:04:05 +0800 Subject: [PATCH 045/122] [Bugfix] Fix Mamba model initialization and MLP Speculator weights loading (#10456) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/model_executor/models/mamba.py | 8 ++------ vllm/model_executor/models/mlp_speculator.py | 3 ++- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/vllm/model_executor/models/mamba.py b/vllm/model_executor/models/mamba.py index 405b8f7787ba8..ac0d265a961f0 100644 --- a/vllm/model_executor/models/mamba.py +++ b/vllm/model_executor/models/mamba.py @@ -1,5 +1,5 @@ """PyTorch MAMBA model.""" -from typing import Iterable, List, Optional, Set, Tuple +from typing import Iterable, List, Optional, Tuple import torch from torch import nn @@ -243,10 +243,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, - torch.Tensor]]) -> Set[str]: + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): params_dict = dict(self.named_parameters()) - loaded_params: Set[str] = set() for name, loaded_weight in weights: if "A_log" in name: name = name.replace("A_log", "A") @@ -258,5 +256,3 @@ def load_weights(self, weights: Iterable[Tuple[str, weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) - loaded_params.add(name) - return loaded_params diff --git a/vllm/model_executor/models/mlp_speculator.py b/vllm/model_executor/models/mlp_speculator.py index f2aa2653c4f5c..d49da5f29aa14 100644 --- a/vllm/model_executor/models/mlp_speculator.py +++ b/vllm/model_executor/models/mlp_speculator.py @@ -193,7 +193,8 @@ def load_weights(self, weights: Iterable[Tuple[str, params_dict = dict(self.named_parameters()) loaded_params: Set[str] = set() for name, loaded_weight in weights: - param = params_dict.get(name.replace("speculator.", "")) + name = name.replace("speculator.", "") + param = params_dict.get(name) if param is not None: weight_loader = getattr(param, "weight_loader", default_weight_loader) From b4be5a8adba95020187ae3cb43a7db7eef20c0ff Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Wed, 20 Nov 2024 13:12:51 +0800 Subject: [PATCH 046/122] [Bugfix] Enforce no chunked prefill for embedding models (#10470) Signed-off-by: DarkLight1337 --- docs/source/serving/compatibility_matrix.rst | 69 ++++++++++++++++---- vllm/engine/arg_utils.py | 6 +- 2 files changed, 60 insertions(+), 15 deletions(-) diff --git a/docs/source/serving/compatibility_matrix.rst b/docs/source/serving/compatibility_matrix.rst index f629b3ca78318..5fc86ab0a11d5 100644 --- a/docs/source/serving/compatibility_matrix.rst +++ b/docs/source/serving/compatibility_matrix.rst @@ -39,12 +39,13 @@ Feature x Feature - :abbr:`prmpt adptr (Prompt Adapter)` - :ref:`SD ` - CUDA graph + - :abbr:`emd (Embedding Models)` - :abbr:`enc-dec (Encoder-Decoder Models)` - :abbr:`logP (Logprobs)` - :abbr:`prmpt logP (Prompt Logprobs)` - :abbr:`async output (Async Output Processing)` - multi-step - - :abbr:`MM (Multimodal)` + - :abbr:`mm (Multimodal)` - best-of - beam-search - :abbr:`guided dec (Guided Decoding)` @@ -64,6 +65,7 @@ Feature x Feature - - - + - * - :ref:`APC ` - ✅ - @@ -80,6 +82,7 @@ Feature x Feature - - - + - * - :ref:`LoRA ` - `✗ `__ - ✅ @@ -96,6 +99,7 @@ Feature x Feature - - - + - * - :abbr:`prmpt adptr (Prompt Adapter)` - ✅ - ✅ @@ -112,6 +116,7 @@ Feature x Feature - - - + - * - :ref:`SD ` - ✗ - ✅ @@ -128,6 +133,7 @@ Feature x Feature - - - + - * - CUDA graph - ✅ - ✅ @@ -144,6 +150,24 @@ Feature x Feature - - - + - + * - :abbr:`emd (Embedding Models)` + - ✗ + - ✗ + - ✗ + - ✗ + - ✗ + - ✗ + - + - + - + - + - + - + - + - + - + - * - :abbr:`enc-dec (Encoder-Decoder Models)` - ✗ - `✗ `__ @@ -151,6 +175,7 @@ Feature x Feature - ✗ - `✗ `__ - ✅ + - ✅ - - - @@ -166,7 +191,8 @@ Feature x Feature - ✅ - ✅ - ✅ - - ✅ + - ✅ + - ✗ - ✅ - - @@ -183,7 +209,8 @@ Feature x Feature - ✅ - `✗ `__ - ✅ - - ✅ + - ✗ + - ✅ - ✅ - - @@ -199,6 +226,7 @@ Feature x Feature - ✅ - ✗ - ✅ + - ✗ - ✗ - ✅ - ✅ @@ -215,6 +243,7 @@ Feature x Feature - ✅ - ✗ - ✅ + - ✗ - ✗ - ✅ - `✗ `__ @@ -224,14 +253,15 @@ Feature x Feature - - - - * - :abbr:`MM (Multimodal)` - - `✗ `__ + * - :abbr:`mm (Multimodal)` + - ✅ - `✗ `__ - `✗ `__ - ? - ? - ✅ - - ✗ + - ✅ + - ✅ - ✅ - ✅ - ✅ @@ -247,6 +277,7 @@ Feature x Feature - ✅ - `✗ `__ - ✅ + - ✗ - ✅ - ✅ - ✅ @@ -263,6 +294,7 @@ Feature x Feature - ✅ - `✗ `__ - ✅ + - ✗ - ✅ - ✅ - ✅ @@ -279,6 +311,7 @@ Feature x Feature - ? - ✅ - ✅ + - ✗ - ? - ✅ - ✅ @@ -353,6 +386,14 @@ Feature x Hardware - ✅ - ✗ - ✅ + * - :abbr:`emd (Embedding Models)` + - ✅ + - ✅ + - ✅ + - ✅ + - ✅ + - ✅ + - ✗ * - :abbr:`enc-dec (Encoder-Decoder Models)` - ✅ - ✅ @@ -361,7 +402,7 @@ Feature x Hardware - ✅ - ✅ - ✗ - * - :abbr:`logP (Logprobs)` + * - :abbr:`mm (Multimodal)` - ✅ - ✅ - ✅ @@ -369,7 +410,7 @@ Feature x Hardware - ✅ - ✅ - ✅ - * - :abbr:`prmpt logP (Prompt Logprobs)` + * - :abbr:`logP (Logprobs)` - ✅ - ✅ - ✅ @@ -377,29 +418,29 @@ Feature x Hardware - ✅ - ✅ - ✅ - * - :abbr:`async output (Async Output Processing)` + * - :abbr:`prmpt logP (Prompt Logprobs)` - ✅ - ✅ - ✅ - ✅ - ✅ - - ✗ - - ✗ - * - multi-step - ✅ - ✅ + * - :abbr:`async output (Async Output Processing)` - ✅ - ✅ - ✅ - - `✗ `__ - ✅ - * - :abbr:`MM (Multimodal)` - ✅ + - ✗ + - ✗ + * - multi-step - ✅ - ✅ - ✅ - ✅ - ✅ + - `✗ `__ - ✅ * - best-of - ✅ diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index a3ae1889774f3..9288cd22c0036 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -1014,7 +1014,8 @@ def create_engine_config(self) -> VllmConfig: use_spec_decode = self.speculative_model is not None if (is_gpu and not use_sliding_window and not use_spec_decode and not self.enable_lora - and not self.enable_prompt_adapter): + and not self.enable_prompt_adapter + and model_config.task != "embedding"): self.enable_chunked_prefill = True logger.warning( "Chunked prefill is enabled by default for models with " @@ -1031,6 +1032,9 @@ def create_engine_config(self) -> VllmConfig: "errors during the initial memory profiling phase, or result " "in low performance due to small KV cache space. Consider " "setting --max-model-len to a smaller value.", max_model_len) + elif self.enable_chunked_prefill and model_config.task == "embedding": + msg = "Chunked prefill is not supported for embedding models" + raise ValueError(msg) speculative_config = SpeculativeConfig.maybe_create_spec_config( target_model_config=model_config, From 709c9f1f257fd15545ad19b89ed5019cb5ea338b Mon Sep 17 00:00:00 2001 From: Rafael Vasquez Date: Wed, 20 Nov 2024 00:35:31 -0500 Subject: [PATCH 047/122] [CI/Build] Add sphinx/rst linter for docs (#10366) --- .github/workflows/sphinx-lint.yml | 32 +++++++++++++++++++++++++++++++ format.sh | 6 ++++++ requirements-lint.txt | 1 + tools/sphinx-lint.sh | 3 +++ 4 files changed, 42 insertions(+) create mode 100644 .github/workflows/sphinx-lint.yml create mode 100755 tools/sphinx-lint.sh diff --git a/.github/workflows/sphinx-lint.yml b/.github/workflows/sphinx-lint.yml new file mode 100644 index 0000000000000..e0bb24276a653 --- /dev/null +++ b/.github/workflows/sphinx-lint.yml @@ -0,0 +1,32 @@ +name: Lint documentation + +on: + push: + branches: + - main + paths: + - "docs/**" + pull_request: + branches: + - main + paths: + - "docs/**" + +jobs: + sphinx-lint: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.12"] + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements-lint.txt + - name: Linting docs + run: tools/sphinx-lint.sh diff --git a/format.sh b/format.sh index b3dcdc15bf948..0b196de9d0773 100755 --- a/format.sh +++ b/format.sh @@ -41,6 +41,7 @@ MYPY_VERSION=$(mypy --version | awk '{print $2}') CODESPELL_VERSION=$(codespell --version) ISORT_VERSION=$(isort --vn) CLANGFORMAT_VERSION=$(clang-format --version | awk '{print $3}') +SPHINX_LINT_VERSION=$(sphinx-lint --version | awk '{print $2}') # # params: tool name, tool version, required version tool_version_check() { @@ -57,6 +58,7 @@ tool_version_check "mypy" "$MYPY_VERSION" tool_version_check "isort" "$ISORT_VERSION" tool_version_check "codespell" "$CODESPELL_VERSION" tool_version_check "clang-format" "$CLANGFORMAT_VERSION" +tool_version_check "sphinx-lint" "$SPHINX_LINT_VERSION" YAPF_FLAGS=( '--recursive' @@ -313,3 +315,7 @@ if ! git diff --quiet &>/dev/null; then else echo "✨🎉 Format check passed! Congratulations! 🎉✨" fi + +echo 'vLLM sphinx-lint:' +tools/sphinx-lint.sh +echo 'vLLM sphinx-lint: Done' diff --git a/requirements-lint.txt b/requirements-lint.txt index f9132bbf96437..711bb50a0e936 100644 --- a/requirements-lint.txt +++ b/requirements-lint.txt @@ -6,6 +6,7 @@ ruff==0.6.5 codespell==2.3.0 isort==5.13.2 clang-format==18.1.5 +sphinx-lint==1.0.0 # type checking mypy==1.11.1 diff --git a/tools/sphinx-lint.sh b/tools/sphinx-lint.sh new file mode 100755 index 0000000000000..04f8075c5527f --- /dev/null +++ b/tools/sphinx-lint.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +sphinx-lint --disable trailing-whitespace,missing-final-newline docs From 7629a9c6e5e29d60be9ef60e4afb9842effcdc73 Mon Sep 17 00:00:00 2001 From: wchen61 Date: Wed, 20 Nov 2024 13:35:50 +0800 Subject: [PATCH 048/122] [CI/Build] Support compilation with local cutlass path (#10423) (#10424) --- CMakeLists.txt | 17 +++++++++++++++-- docs/source/getting_started/installation.rst | 12 ++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5acbd762ee957..bfe435937e3bb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -206,7 +206,19 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") # Set CUTLASS_REVISION manually -- its revision detection doesn't work in this case. set(CUTLASS_REVISION "v3.5.1" CACHE STRING "CUTLASS revision to use") - FetchContent_Declare( + # Use the specified CUTLASS source directory for compilation if VLLM_CUTLASS_SRC_DIR is provided + if (DEFINED ENV{VLLM_CUTLASS_SRC_DIR}) + set(VLLM_CUTLASS_SRC_DIR $ENV{VLLM_CUTLASS_SRC_DIR}) + endif() + + if(VLLM_CUTLASS_SRC_DIR) + if(NOT IS_ABSOLUTE VLLM_CUTLASS_SRC_DIR) + get_filename_component(VLLM_CUTLASS_SRC_DIR "${VLLM_CUTLASS_SRC_DIR}" ABSOLUTE) + endif() + message(STATUS "The VLLM_CUTLASS_SRC_DIR is set, using ${VLLM_CUTLASS_SRC_DIR} for compilation") + FetchContent_Declare(cutlass SOURCE_DIR ${VLLM_CUTLASS_SRC_DIR}) + else() + FetchContent_Declare( cutlass GIT_REPOSITORY https://github.com/nvidia/cutlass.git GIT_TAG v3.5.1 @@ -216,7 +228,8 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") # Important: If GIT_SHALLOW is enabled then GIT_TAG works only with branch names and tags. # So if the GIT_TAG above is updated to a commit hash, GIT_SHALLOW must be set to FALSE GIT_SHALLOW TRUE - ) + ) + endif() FetchContent_MakeAvailable(cutlass) list(APPEND VLLM_EXT_SRC diff --git a/docs/source/getting_started/installation.rst b/docs/source/getting_started/installation.rst index f02626bda4c64..e3dbbc9affe66 100644 --- a/docs/source/getting_started/installation.rst +++ b/docs/source/getting_started/installation.rst @@ -170,6 +170,18 @@ To build vLLM using an existing PyTorch installation: $ pip install -e . --no-build-isolation +Use the local cutlass for compilation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Currently, before starting the build process, vLLM fetches cutlass code from GitHub. However, there may be scenarios where you want to use a local version of cutlass instead. +To achieve this, you can set the environment variable VLLM_CUTLASS_SRC_DIR to point to your local cutlass directory. + +.. code-block:: console + + $ git clone https://github.com/vllm-project/vllm.git + $ cd vllm + $ VLLM_CUTLASS_SRC_DIR=/path/to/cutlass pip install -e . + + Troubleshooting ~~~~~~~~~~~~~~~ From ed701ca9637306a44ba8403ba9e85be024e0dafd Mon Sep 17 00:00:00 2001 From: "Kevin H. Luu" Date: Tue, 19 Nov 2024 19:36:03 -1000 Subject: [PATCH 049/122] [ci/build] Combine nightly and optional (#10465) --- .buildkite/test-pipeline.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 24bf223fb12c0..501743c887596 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -9,8 +9,7 @@ # label(str): the name of the test. emoji allowed. # fast_check(bool): whether to run this on each commit on fastcheck pipeline. # fast_check_only(bool): run this test on fastcheck pipeline only -# nightly(bool): run this test in nightly pipeline only -# optional(bool): never run this test by default (i.e. need to unblock manually) +# optional(bool): never run this test by default (i.e. need to unblock manually) unless it's scheduled nightly run. # command(str): the single command to run for tests. incompatible with commands. # commands(list): the list of commands to run for test. incompatbile with command. # mirror_hardwares(list): the list of hardwares to run the test on as well. currently only supports [amd] @@ -336,7 +335,7 @@ steps: - pytest -v -s models/embedding/vision_language -m core_model - label: Language Models Test (Extended) # 50min - nightly: true + optional: true source_file_dependencies: - vllm/ - tests/models/decoder_only/language @@ -362,7 +361,7 @@ steps: - pytest -v -s models/encoder_decoder/vision_language -m core_model - label: Multi-Modal Models Test (Extended) # 1h15m - nightly: true + optional: true source_file_dependencies: - vllm/ - tests/models/decoder_only/audio_language @@ -513,6 +512,7 @@ steps: - label: Distributed Tests (A100) # optional gpu: a100 + optional: true num_gpus: 4 source_file_dependencies: - vllm/ @@ -526,6 +526,7 @@ steps: - label: LM Eval Large Models # optional gpu: a100 + optional: true num_gpus: 4 working_dir: "/vllm-workspace/.buildkite/lm-eval-harness" source_file_dependencies: From 343041c4c4db93b4693ba437df7ae8bea485d18e Mon Sep 17 00:00:00 2001 From: Sky Lee <46676799+skylee-01@users.noreply.github.com> Date: Wed, 20 Nov 2024 14:05:55 +0800 Subject: [PATCH 050/122] [model] Reduce medusa weight (#10454) Signed-off-by: skylee-01 <497627264@qq.com> --- vllm/model_executor/models/medusa.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/vllm/model_executor/models/medusa.py b/vllm/model_executor/models/medusa.py index b4ed6538bddac..66bdcb89a0213 100644 --- a/vllm/model_executor/models/medusa.py +++ b/vllm/model_executor/models/medusa.py @@ -61,14 +61,25 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: self.truncated_vocab_size = config.truncated_vocab_size self.unpadded_vocab_size = self.truncated_vocab_size - self.lm_heads = nn.ModuleList([ - ParallelLMHead( + if getattr(config, "original_lm_head", False): + self.lm_head = ParallelLMHead( self.unpadded_vocab_size, config.hidden_size, org_num_embeddings=self.truncated_vocab_size, padding_size=DEFAULT_VOCAB_PADDING_SIZE, - ) for _ in range(self.config.num_heads) - ]) + ) + self.lm_heads = [ + self.lm_head for _ in range(self.config.num_heads) + ] + else: + self.lm_heads = nn.ModuleList([ + ParallelLMHead( + self.unpadded_vocab_size, + config.hidden_size, + org_num_embeddings=self.truncated_vocab_size, + padding_size=DEFAULT_VOCAB_PADDING_SIZE, + ) for _ in range(self.config.num_heads) + ]) logit_scale = getattr(config, "logit_scale", 1.0) self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, @@ -172,6 +183,9 @@ def load_weights(self, weights: Iterable[Tuple[str, requires_grad=False) elif name in params_dict: weights_map[name] = loaded_weight + elif (getattr(self.config, "original_lm_head", False) + and name == "lm_heads.0.weight"): + weights_map["lm_head.weight"] = loaded_weight for name, loaded_weight in weights_map.items(): if "lm_head" in name and self.token_map is not None and\ From 09dbf9ff16410d0f83adcc9705764ea1c7f5f017 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Wed, 20 Nov 2024 14:45:08 +0800 Subject: [PATCH 051/122] [Bugfix] Handle conflicts between modern and legacy fields (#10471) Signed-off-by: DarkLight1337 --- vllm/transformers_utils/config.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/vllm/transformers_utils/config.py b/vllm/transformers_utils/config.py index 054845584c2ef..59096753c395d 100644 --- a/vllm/transformers_utils/config.py +++ b/vllm/transformers_utils/config.py @@ -107,6 +107,15 @@ def patch_rope_scaling(config: PretrainedConfig) -> None: def patch_rope_scaling_dict(rope_scaling: Dict[str, Any]) -> None: + if "rope_type" in rope_scaling and "type" in rope_scaling: + rope_type = rope_scaling["rope_type"] + rope_type_legacy = rope_scaling["type"] + if rope_type != rope_type_legacy: + raise ValueError( + f"Found conflicts between 'rope_type={rope_type}' (modern " + f"field) and 'type={rope_type_legacy}' (legacy field). " + "You should only specify one of them.") + if "rope_type" not in rope_scaling and "type" in rope_scaling: rope_scaling["rope_type"] = rope_scaling["type"] logger.info("Replacing legacy 'type' key with 'rope_type'") From d5b28447e005a79dec417a706900db0dad4e1a47 Mon Sep 17 00:00:00 2001 From: Mengqing Cao Date: Wed, 20 Nov 2024 14:52:13 +0800 Subject: [PATCH 052/122] [Platforms] Refactor xpu code (#10468) Signed-off-by: MengqingCao --- vllm/executor/xpu_executor.py | 27 --------------------------- vllm/platforms/xpu.py | 21 +++++++++++++++++++++ 2 files changed, 21 insertions(+), 27 deletions(-) diff --git a/vllm/executor/xpu_executor.py b/vllm/executor/xpu_executor.py index 36b7e2265efab..ba6177e51a453 100644 --- a/vllm/executor/xpu_executor.py +++ b/vllm/executor/xpu_executor.py @@ -1,8 +1,5 @@ from typing import Callable, List, Optional, Tuple, Type, Union -import torch - -from vllm.config import ModelConfig, ParallelConfig from vllm.executor.executor_base import ExecutorAsyncBase from vllm.executor.gpu_executor import GPUExecutor from vllm.logger import init_logger @@ -23,7 +20,6 @@ def _init_executor(self) -> None: assert self.speculative_config is None, ( "Speculative decoding not yet supported for XPU backend") - self.model_config = _verify_and_get_model_config(self.model_config) GPUExecutor._init_executor(self) def _get_worker_module_and_class( @@ -53,26 +49,3 @@ async def execute_model_async( output = await make_async(self.driver_worker.execute_model )(execute_model_req=execute_model_req) return output - - -def _verify_and_get_model_config(config: ModelConfig) -> ModelConfig: - if config.dtype == torch.bfloat16: - logger.warning( - "bfloat16 is not fully supported on XPU, casting to float16.") - config.dtype = torch.float16 - if not config.enforce_eager: - logger.warning( - "CUDA graph is not supported on XPU, fallback to the eager " - "mode.") - config.enforce_eager = True - return config - - -def _verify_and_get_parallel_config(config: ParallelConfig) -> ParallelConfig: - if (config.distributed_executor_backend is not None - and config.distributed_executor_backend != "ray"): - logger.warning( - "%s is not supported on XPU, fallback to ray distributed executor " - "backend.", config.distributed_executor_backend) - config.distributed_executor_backend = "ray" - return config diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index d0b3dca9a4195..62db285f6696a 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -1,9 +1,16 @@ +from typing import TYPE_CHECKING + import torch from vllm.logger import init_logger from .interface import DeviceCapability, Platform, PlatformEnum, _Backend +if TYPE_CHECKING: + from vllm.config import VllmConfig +else: + VllmConfig = None + logger = init_logger(__name__) @@ -34,3 +41,17 @@ def get_device_total_memory(cls, device_id: int = 0) -> int: @staticmethod def inference_mode(): return torch.no_grad() + + @classmethod + def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + # check and update model config + model_config = vllm_config.model_config + if model_config.dtype == torch.bfloat16: + logger.warning( + "bfloat16 is not fully supported on XPU, casting to float16.") + model_config.dtype = torch.float16 + if not model_config.enforce_eager: + logger.warning( + "CUDA graph is not supported on XPU, fallback to the eager " + "mode.") + model_config.enforce_eager = True From 63f1fde277d063fbd36ccf43cb709fafca754ed5 Mon Sep 17 00:00:00 2001 From: "Li, Jiang" Date: Wed, 20 Nov 2024 18:57:39 +0800 Subject: [PATCH 053/122] [Hardware][CPU] Support chunked-prefill and prefix-caching on CPU (#10355) Signed-off-by: jiang1.li --- .buildkite/run-cpu-test.sh | 9 +- .../getting_started/cpu-installation.rst | 10 +- docs/source/serving/compatibility_matrix.rst | 4 +- .../basic_correctness/test_chunked_prefill.py | 63 ++- vllm/attention/backends/torch_sdpa.py | 189 +++++-- vllm/attention/ops/ipex_attn.py | 150 ++++-- vllm/platforms/cpu.py | 15 +- vllm/worker/cpu_model_runner.py | 488 ++++++++---------- 8 files changed, 559 insertions(+), 369 deletions(-) diff --git a/.buildkite/run-cpu-test.sh b/.buildkite/run-cpu-test.sh index f0128f091b742..4f1729d46dae2 100644 --- a/.buildkite/run-cpu-test.sh +++ b/.buildkite/run-cpu-test.sh @@ -25,6 +25,7 @@ docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/hugg function cpu_tests() { set -e + export NUMA_NODE=$2 # offline inference docker exec cpu-test-avx2-"$NUMA_NODE" bash -c " @@ -57,6 +58,12 @@ function cpu_tests() { pytest -s -v \ tests/quantization/test_ipex_quant.py" + # Run chunked-prefill and prefix-cache test + docker exec cpu-test-"$NUMA_NODE" bash -c " + set -e + pytest -s -v -k cpu_model \ + tests/basic_correctness/test_chunked_prefill.py" + # online inference docker exec cpu-test-"$NUMA_NODE" bash -c " set -e @@ -75,4 +82,4 @@ function cpu_tests() { # All of CPU tests are expected to be finished less than 25 mins. export -f cpu_tests -timeout 25m bash -c "cpu_tests $CORE_RANGE" +timeout 30m bash -c "cpu_tests $CORE_RANGE $NUMA_NODE" diff --git a/docs/source/getting_started/cpu-installation.rst b/docs/source/getting_started/cpu-installation.rst index 69530fd778c55..649de1cd9b53c 100644 --- a/docs/source/getting_started/cpu-installation.rst +++ b/docs/source/getting_started/cpu-installation.rst @@ -5,11 +5,11 @@ Installation with CPU vLLM initially supports basic model inferencing and serving on x86 CPU platform, with data types FP32, FP16 and BF16. vLLM CPU backend supports the following vLLM features: -- Tensor Parallel (``-tp = N``) -- Quantization (``INT8 W8A8, AWQ``) - -.. note:: - More advanced features on `chunked-prefill`, `prefix-caching` and `FP8 KV cache` are under development and will be available soon. +- Tensor Parallel +- Model Quantization (``INT8 W8A8, AWQ``) +- Chunked-prefill +- Prefix-caching +- FP8-E5M2 KV-Caching (TODO) Table of contents: diff --git a/docs/source/serving/compatibility_matrix.rst b/docs/source/serving/compatibility_matrix.rst index 5fc86ab0a11d5..a4300761d2635 100644 --- a/docs/source/serving/compatibility_matrix.rst +++ b/docs/source/serving/compatibility_matrix.rst @@ -344,7 +344,7 @@ Feature x Hardware - ✅ - ✅ - ✅ - - ✗ + - ✅ - ✅ * - :ref:`APC ` - `✗ `__ @@ -352,7 +352,7 @@ Feature x Hardware - ✅ - ✅ - ✅ - - ✗ + - ✅ - ✅ * - :ref:`LoRA ` - ✅ diff --git a/tests/basic_correctness/test_chunked_prefill.py b/tests/basic_correctness/test_chunked_prefill.py index cc5bc2aca27c9..469d18a4dd7af 100644 --- a/tests/basic_correctness/test_chunked_prefill.py +++ b/tests/basic_correctness/test_chunked_prefill.py @@ -12,6 +12,7 @@ import pytest from tests.kernels.utils import override_backend_env_variable +from vllm.platforms import current_platform from ..models.utils import check_logprobs_close, check_outputs_equal from ..utils import multi_gpu_test @@ -206,12 +207,14 @@ def test_models_with_fp8_kv_cache( # NOTE: Increasing this in this suite will fail CI because we currently cannot # reset distributed env properly. Use a value > 1 just when you test. @pytest.mark.parametrize("tensor_parallel_size", [1]) +@pytest.mark.parametrize("dtype", ["half"]) def test_with_prefix_caching( vllm_runner, max_tokens: int, enforce_eager: bool, chunk_size: int, tensor_parallel_size: int, + dtype: str, ) -> None: """ Checks exact match decode with and without prefix caching @@ -233,7 +236,7 @@ def test_with_prefix_caching( for enable in (True, False): with vllm_runner( model, - dtype="half", + dtype=dtype, max_num_batched_tokens=max_num_batched_tokens, enable_chunked_prefill=True, enable_prefix_caching=enable, @@ -260,3 +263,61 @@ def test_with_prefix_caching( name_0="w/o prefix caching", name_1="with prefix caching", ) + + +@pytest.mark.parametrize("model", ["facebook/opt-125m"]) +@pytest.mark.parametrize("dtype", ["bfloat16"]) +@pytest.mark.parametrize("max_tokens", [32]) +@pytest.mark.parametrize("chunked_prefill_token_size", [1, 4, 16]) +@pytest.mark.parametrize("enforce_eager", [False]) +@pytest.mark.parametrize("attention_backend", ["TORCH_SDPA"]) +@pytest.mark.cpu_model +@pytest.mark.skipif(not current_platform.is_cpu(), reason="CPU only") +def test_models_cpu( + hf_runner, + vllm_runner, + example_prompts, + model: str, + dtype: str, + max_tokens: int, + chunked_prefill_token_size: int, + enforce_eager: bool, + attention_backend: str, + monkeypatch, +) -> None: + test_models( + hf_runner, + vllm_runner, + example_prompts, + model, + dtype, + max_tokens, + chunked_prefill_token_size, + enforce_eager, + 1, + attention_backend, + monkeypatch, + ) + + +@pytest.mark.parametrize("max_tokens", [16]) +@pytest.mark.parametrize("enforce_eager", [False]) +@pytest.mark.parametrize("chunk_size", [30, 32]) +@pytest.mark.parametrize("dtype", ["bfloat16"]) +@pytest.mark.cpu_model +@pytest.mark.skipif(not current_platform.is_cpu(), reason="CPU only") +def test_with_prefix_caching_cpu( + vllm_runner, + max_tokens: int, + enforce_eager: bool, + chunk_size: int, + dtype: str, +) -> None: + test_with_prefix_caching( + vllm_runner, + max_tokens, + enforce_eager, + chunk_size, + 1, + dtype, + ) diff --git a/vllm/attention/backends/torch_sdpa.py b/vllm/attention/backends/torch_sdpa.py index 563178d3ab60d..3d025df26a7a1 100644 --- a/vllm/attention/backends/torch_sdpa.py +++ b/vllm/attention/backends/torch_sdpa.py @@ -7,18 +7,14 @@ from torch.nn.functional import scaled_dot_product_attention from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, - AttentionMetadata, AttentionType) + AttentionMetadata, + AttentionMetadataBuilder, + AttentionType) from vllm.attention.backends.utils import CommonAttentionState +from vllm.attention.ops.ipex_attn import PagedAttention from vllm.attention.ops.paged_attn import PagedAttentionMetadata -from vllm.platforms import current_platform - -if current_platform.is_cpu(): - try: - from vllm.attention.ops.ipex_attn import PagedAttention - except ImportError: - from vllm.attention.ops.paged_attn import PagedAttention -else: - from vllm.attention.ops.paged_attn import PagedAttention +from vllm.utils import make_tensor_with_pad +from vllm.worker.cpu_model_runner import ModelInputForCPUBuilder class TorchSDPABackend(AttentionBackend): @@ -39,6 +35,10 @@ def get_metadata_cls() -> Type["AttentionMetadata"]: def get_state_cls() -> Type["CommonAttentionState"]: return CommonAttentionState + @staticmethod + def get_builder_cls() -> Type["TorchSDPAMetadataBuilder"]: + return TorchSDPAMetadataBuilder + @staticmethod def get_kv_cache_shape( num_blocks: int, @@ -71,9 +71,15 @@ class TorchSDPAMetadata(AttentionMetadata, PagedAttentionMetadata): """ # Currently, input sequences can only contain all prompts # or all decoding. True if all sequences are prompts. - is_prompt: bool - slot_mapping: torch.Tensor - seq_lens: Optional[List[int]] + chunked_prefill: bool + seq_lens: Optional[List[int]] = None # For non-chunked prefill + + # For chunked prefill only + max_query_len: Optional[int] = None + max_kv_len: Optional[int] = None + query_start_loc: Optional[torch.Tensor] = None + kv_start_loc: Optional[torch.Tensor] = None + prefill_block_tables: Optional[torch.Tensor] = None # Begin encoder attn & enc/dec cross-attn fields... # Encoder sequence lengths representation @@ -123,20 +129,14 @@ def is_all_cross_attn_metadata_set(self): @property def prefill_metadata(self) -> Optional["TorchSDPAMetadata"]: - # Currently chunked prefill is not supported - if self.num_decode_tokens == 0: - assert self.num_prefills > 0 - return self - - return None + if self.num_prefill_tokens == 0: + return None + return self @property def decode_metadata(self) -> Optional["TorchSDPAMetadata"]: - # Currently chunked prefill is not supported - if self.num_prefills > 0: - assert self.num_decode_tokens == 0 + if self.num_decode_tokens == 0: return None - return self def get_seq_lens( @@ -274,6 +274,105 @@ def get_seq_len_block_table_args( raise AttributeError(f"Invalid attention type {str(attn_type)}") +class TorchSDPAMetadataBuilder(AttentionMetadataBuilder[TorchSDPAMetadata]): + + def __init__(self, input_builder: ModelInputForCPUBuilder) -> None: + self.chunked_prefill = input_builder.chunked_prefill + self.input_data = input_builder.input_data + + def build(self, seq_lens: List[int], query_lens: List[int], + cuda_graph_pad_size: int, batch_size: int) -> TorchSDPAMetadata: + input_data = self.input_data + prefill_seq_lens = seq_lens[0:input_data.num_prefills] + prefill_query_lens = query_lens[0:input_data.num_prefills] + slot_mapping = torch.tensor(input_data.slot_mapping, + dtype=torch.long, + device="cpu") + + # For chunked-prefill + if self.chunked_prefill and input_data.num_prefill_tokens != 0: + prefill_block_tables = make_tensor_with_pad( + self.input_data.prefill_block_tables, + pad=0, + dtype=torch.int32, + device="cpu", + ) + query_lens_tensor = torch.tensor(prefill_query_lens, + dtype=torch.int32, + device="cpu") + kv_lens_tensor = torch.tensor(prefill_seq_lens, + dtype=torch.int32, + device="cpu") + query_start_loc = torch.zeros(input_data.num_prefills + 1, + dtype=torch.int32, + device="cpu") + kv_start_loc = torch.zeros(input_data.num_prefills + 1, + dtype=torch.int32, + device="cpu") + torch.cumsum(query_lens_tensor, + dim=0, + dtype=torch.int32, + out=query_start_loc[1:]) + torch.cumsum(kv_lens_tensor, + dim=0, + dtype=torch.int32, + out=kv_start_loc[1:]) + max_query_len = max(prefill_query_lens) + max_kv_len = max(prefill_seq_lens) + else: + prefill_block_tables = None + query_start_loc = None + kv_start_loc = None + max_query_len = None + max_kv_len = None + + # For paged attention + if input_data.num_decode_tokens != 0: + seq_lens_tensor = torch.tensor( + input_data.seq_lens[input_data.num_prefills:], + dtype=torch.int32, + device="cpu", + ) + block_tables = make_tensor_with_pad( + self.input_data.decode_block_tables, + pad=0, + dtype=torch.int32, + device="cpu", + ) + else: + block_tables = torch.tensor([]) + seq_lens_tensor = torch.tensor([]) + + # For multi-modal models + placeholder_index_maps = None + if len(input_data.multi_modal_inputs_list) != 0: + placeholder_index_maps = { + modality: placeholder_map.index_map() + for modality, placeholder_map in + input_data.multi_modal_placeholder_maps.items() + } + + attn_metadata = TorchSDPAMetadata( + chunked_prefill=self.chunked_prefill, + seq_lens=prefill_seq_lens, + seq_lens_tensor=seq_lens_tensor, + max_query_len=max_query_len, + max_kv_len=max_kv_len, + query_start_loc=query_start_loc, + kv_start_loc=kv_start_loc, + max_decode_seq_len=input_data.max_decode_seq_len, + num_prefills=input_data.num_prefills, + num_prefill_tokens=input_data.num_prefill_tokens, + num_decode_tokens=input_data.num_decode_tokens, + block_tables=block_tables, + prefill_block_tables=prefill_block_tables, + slot_mapping=slot_mapping, + multi_modal_placeholder_index_maps=placeholder_index_maps, + ) + + return attn_metadata + + class TorchSDPABackendImpl(AttentionImpl[TorchSDPAMetadata]): def __init__( @@ -409,19 +508,35 @@ def forward( assert key.shape[0] == num_prefill_tokens + num_decode_tokens assert value.shape[0] == num_prefill_tokens + num_decode_tokens + output = torch.empty_like(query) if prefill_meta := attn_metadata.prefill_metadata: assert attn_metadata.seq_lens is not None - if (kv_cache.numel() == 0 - or prefill_meta.block_tables.numel() == 0): - output = self._run_sdpa_forward(query, - key, - value, - prefill_meta, - attn_type=attn_type) + if not prefill_meta.prefill_metadata.chunked_prefill: # type: ignore + self._run_sdpa_forward(output, + query, + key, + value, + prefill_meta, + attn_type=attn_type) else: # prefix-enabled attention - raise RuntimeError( - "Torch SDPA backend doesn't support prefix decoding.") + assert not self.need_mask + import intel_extension_for_pytorch.llm.modules as ipex_modules + output = torch.empty_like(query) + ipex_modules.PagedAttention.flash_attn_varlen_func( + output[:prefill_meta.num_prefill_tokens, :, :], + query[:prefill_meta.num_prefill_tokens, :, :], + key_cache, + value_cache, + prefill_meta.query_start_loc, + prefill_meta.kv_start_loc, + prefill_meta.max_query_len, + prefill_meta.max_kv_len, + self.scale, + True, + prefill_meta.prefill_block_tables, + self.alibi_slopes, + ) if decode_meta := attn_metadata.decode_metadata: assert attn_type != AttentionType.ENCODER_ONLY, ( @@ -433,8 +548,9 @@ def forward( block_tables_arg, ) = decode_meta.get_seq_len_block_table_args(attn_type) - output = PagedAttention.forward_decode( - query, + PagedAttention.forward_decode( + output[attn_metadata.num_prefill_tokens:, :, :], + query[attn_metadata.num_prefill_tokens:, :, :], key_cache, value_cache, block_tables_arg, @@ -453,12 +569,13 @@ def forward( def _run_sdpa_forward( self, + output: torch.Tensor, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_metadata: TorchSDPAMetadata, attn_type: AttentionType = AttentionType.DECODER, - ): + ) -> None: if self.num_kv_heads != self.num_heads: key = key.repeat_interleave(self.num_queries_per_kv, dim=1) value = value.repeat_interleave(self.num_queries_per_kv, dim=1) @@ -479,7 +596,6 @@ def _run_sdpa_forward( attn_masks = [None] * len(seq_lens) attn_metadata.set_attn_bias(attn_masks, attn_type) - output = torch.empty_like(query) query = query.movedim(0, query.dim() - 2) key = key.movedim(0, key.dim() - 2) value = value.movedim(0, value.dim() - 2) @@ -502,7 +618,6 @@ def _run_sdpa_forward( scale=self.scale).squeeze(0).movedim(query.dim() - 2, 0) output[start_q:end_q, :, :] = sub_out start_q, start_kv = end_q, end_kv - return output def _make_alibi_bias( diff --git a/vllm/attention/ops/ipex_attn.py b/vllm/attention/ops/ipex_attn.py index 8df6d4ced9dc6..cbc6c74acf09a 100644 --- a/vllm/attention/ops/ipex_attn.py +++ b/vllm/attention/ops/ipex_attn.py @@ -1,12 +1,17 @@ from typing import Dict, List, Optional, Tuple -import intel_extension_for_pytorch.llm.modules as ipex_modules +try: + import intel_extension_for_pytorch.llm.modules as ipex_modules + _use_ipex = True +except ImportError: + _use_ipex = False + import torch from vllm import _custom_ops as ops -class PagedAttention: +class _PagedAttention: @staticmethod def get_supported_head_sizes() -> List[int]: @@ -22,6 +27,105 @@ def get_kv_cache_shape( ) -> Tuple[int, ...]: return (2, num_blocks, block_size * num_kv_heads * head_size) + @staticmethod + def split_kv_cache( + kv_cache: torch.Tensor, + num_kv_heads: int, + head_size: int, + *args, + ) -> Tuple[torch.Tensor, torch.Tensor]: + x = 16 // kv_cache.element_size() + num_blocks = kv_cache.shape[1] + + key_cache = kv_cache[0] + key_cache = key_cache.view(num_blocks, num_kv_heads, head_size // x, + -1, x) + value_cache = kv_cache[1] + value_cache = value_cache.view(num_blocks, num_kv_heads, head_size, -1) + return key_cache, value_cache + + @staticmethod + def write_to_paged_cache( + key: torch.Tensor, + value: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + slot_mapping: torch.Tensor, + kv_cache_dtype: str, + k_scale: float, + v_scale: float, + *args, + ) -> None: + ops.reshape_and_cache( + key, + value, + key_cache, + value_cache, + slot_mapping.flatten(), + kv_cache_dtype, + k_scale, + v_scale, + ) + + @staticmethod + def forward_decode( + output: torch.Tensor, + query: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + block_tables: torch.Tensor, + context_lens: torch.Tensor, + max_context_len: int, + kv_cache_dtype: str, + num_kv_heads: int, + scale: float, + alibi_slopes: Optional[torch.Tensor], + k_scale: float, + v_scale: float, + *args, + ) -> None: + tp_rank: int = 0 + blocksparse_local_blocks: int = 0 + blocksparse_vert_stride: int = 0 + blocksparse_block_size: int = 64 + blocksparse_head_sliding_step: int = 0 + block_size = value_cache.shape[3] + + ops.paged_attention_v1( + output, + query, + key_cache, + value_cache, + num_kv_heads, + scale, + block_tables, + context_lens, + block_size, + max_context_len, + alibi_slopes, + kv_cache_dtype, + k_scale, + v_scale, + tp_rank, + blocksparse_local_blocks, + blocksparse_vert_stride, + blocksparse_block_size, + blocksparse_head_sliding_step, + ) + + @staticmethod + def copy_blocks( + kv_caches: List[torch.Tensor], + src_to_dists: Dict[int, List[int]], + *args, + ) -> None: + key_caches = [kv_cache[0] for kv_cache in kv_caches] + value_caches = [kv_cache[1] for kv_cache in kv_caches] + ops.copy_blocks(key_caches, value_caches, src_to_dists) + + +class _IPEXPagedAttention(_PagedAttention): + @staticmethod def split_kv_cache( kv_cache: torch.Tensor, @@ -55,6 +159,7 @@ def write_to_paged_cache( @staticmethod def forward_decode( + output: torch.Tensor, query: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, @@ -68,8 +173,7 @@ def forward_decode( k_scale: float, v_scale: float, *args, - ) -> torch.Tensor: - output = torch.empty_like(query) + ) -> None: block_size = value_cache.shape[2] head_mapping = torch.arange( 0, @@ -83,41 +187,5 @@ def forward_decode( scale, block_tables, context_lens, block_size, max_context_len, alibi_slopes) - return output - - @staticmethod - def forward_prefix( - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - kv_cache_dtype: str, - key_cache: torch.Tensor, - value_cache: torch.Tensor, - block_tables: torch.Tensor, - subquery_start_loc: torch.Tensor, - prompt_lens_tensor: torch.Tensor, - context_lens: torch.Tensor, - max_subquery_len: int, - alibi_slopes: Optional[torch.Tensor], - *args, - ) -> torch.Tensor: - raise NotImplementedError - - @staticmethod - def swap_blocks( - src_kv_cache: torch.Tensor, - dst_kv_cache: torch.Tensor, - src_to_dst: Dict[int, int], - *args, - ) -> None: - raise NotImplementedError - @staticmethod - def copy_blocks( - kv_caches: List[torch.Tensor], - src_to_dists: Dict[int, List[int]], - *args, - ) -> None: - key_caches = [kv_cache[0] for kv_cache in kv_caches] - value_caches = [kv_cache[1] for kv_cache in kv_caches] - ops.copy_blocks(key_caches, value_caches, src_to_dists) +PagedAttention = _IPEXPagedAttention if _use_ipex else _PagedAttention diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index f9a34a47959ec..43cbafe709d84 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -53,11 +53,6 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: cache_config = vllm_config.cache_config - if cache_config.enable_prefix_caching: - logger.warning( - "Prefix caching is not supported on CPU, disable it.") - cache_config.enable_prefix_caching = False - kv_cache_space = envs.VLLM_CPU_KVCACHE_SPACE if kv_cache_space >= 0: @@ -74,10 +69,12 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: f" {kv_cache_space}, expect a positive integer value.") scheduler_config = vllm_config.scheduler_config - if scheduler_config.chunked_prefill_enabled: - logger.warning( - "Chunked prefill is not supported on CPU, disable it.") - scheduler_config.chunked_prefill_enabled = False + if ((scheduler_config.chunked_prefill_enabled + or cache_config.enable_prefix_caching) + and model_config.dtype == torch.half): + logger.warning("Chunked-prefill on the CPU backend only does not" + " support fp16 for now, cast to bf16.") + model_config.dtype = torch.bfloat16 parallel_config = vllm_config.parallel_config if (parallel_config.distributed_executor_backend is not None diff --git a/vllm/worker/cpu_model_runner.py b/vllm/worker/cpu_model_runner.py index d3e1202c15e61..66bd844c94901 100644 --- a/vllm/worker/cpu_model_runner.py +++ b/vllm/worker/cpu_model_runner.py @@ -2,8 +2,8 @@ import weakref from collections import defaultdict from dataclasses import dataclass -from typing import (TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, - TypeVar, Union) +from typing import (TYPE_CHECKING, Any, Dict, List, Optional, Type, TypeVar, + Union) import torch from torch import nn @@ -19,7 +19,6 @@ MultiModalKwargs, MultiModalPlaceholderMap) from vllm.sequence import (IntermediateTensors, SequenceData, SequenceGroupMetadata) -from vllm.utils import make_tensor_with_pad from vllm.worker.model_runner_base import ( ModelRunnerBase, ModelRunnerInputBase, ModelRunnerInputBuilderBase, _add_attn_metadata_broadcastable_dict, @@ -104,65 +103,223 @@ def from_broadcasted_tensor_dict( class ModelInputForCPUBuilder(ModelRunnerInputBuilderBase[ModelInputForCPU]): + class ModelInputData: + + def __init__(self, use_mrope: bool): + self.use_mrope = use_mrope + self.input_tokens: List[int] = [] + self.input_positions: Optional[ + List[int]] = [] if not self.use_mrope else None + self.seq_lens: List[int] = [] + self.query_lens: List[int] = [] + self.prefill_block_tables: List[List[int]] = [] + self.decode_block_tables: List[List[int]] = [] + self.max_decode_seq_len: int = 0 + self.num_prefills: int = 0 + self.num_prefill_tokens: int = 0 + self.num_decode_tokens: int = 0 + self.slot_mapping: List[int] = [] + self.multi_modal_inputs_list: List[MultiModalKwargs] = [] + self.multi_modal_placeholder_maps: Dict[ + str, MultiModalPlaceholderMap] = defaultdict( + MultiModalPlaceholderMap) + self.input_mrope_positions: Optional[List[List[int]]] = [ + [] for _ in range(3) + ] if self.use_mrope else None + def __init__(self, runner: "CPUModelRunner", finished_requests_ids: Optional[List[str]] = None) -> None: super().__init__() self.seq_group_metadata_list: List[SequenceGroupMetadata] = [] self.runner = runner + + self.chunked_prefill = (runner.scheduler_config.chunked_prefill_enabled + or runner.cache_config.enable_prefix_caching) self.model_input_cls = self.runner._model_input_cls self.attn_backend = self.runner.attn_backend - self.sliding_window = self.runner.sliding_window - self.block_size = self.runner.block_size - self.device = self.runner.device self.multi_modal_input_mapper = self.runner.multi_modal_input_mapper + self.input_data = ModelInputForCPUBuilder.ModelInputData( + self.runner.model_config.uses_mrope) + self.att_metadata_builder = self.runner.attn_backend.get_builder_cls()( + self) def add_seq_group(self, seq_group_metadata: SequenceGroupMetadata): self.seq_group_metadata_list.append(seq_group_metadata) + def set_seq_group_list( + self, seq_group_metadata_list: List[SequenceGroupMetadata]): + self.seq_group_metadata_list = seq_group_metadata_list + def build(self) -> ModelInputForCPU: + self._build_input_data() + + input_data = self.input_data + input_tokens = torch.tensor(input_data.input_tokens, + dtype=torch.long, + device="cpu") + input_positions = torch.tensor( + input_data.input_positions + if not input_data.use_mrope else input_data.input_mrope_positions, + dtype=torch.long, + device="cpu") + + # For multi-modal models multi_modal_kwargs = None - # NOTE: We assume that all sequences in the group are all prompts or - # all decodes. - is_prompt = self.seq_group_metadata_list[0].is_prompt - # Prepare input tensors. - if is_prompt: - (input_tokens, input_positions, attn_metadata, seq_lens, - multi_modal_kwargs) = self._prepare_prompt( - self.seq_group_metadata_list) - else: - (input_tokens, input_positions, - attn_metadata) = self._prepare_decode( - self.seq_group_metadata_list) - seq_lens = None + if len(input_data.multi_modal_inputs_list) != 0: + multi_modal_kwargs = MultiModalKwargs.batch( + input_data.multi_modal_inputs_list) + + attn_metadata = self.att_metadata_builder.build( + input_data.seq_lens, input_data.query_lens, -1, -1) return self.model_input_cls( input_tokens=input_tokens, input_positions=input_positions, + seq_lens=input_data.seq_lens, + query_lens=input_data.query_lens, attn_metadata=attn_metadata, multi_modal_kwargs=multi_modal_kwargs, - # query_lens is not needed if chunked prefill is not - # supported. Since CPU worker doesn't support chunked prefill - # just use seq_lens instead. - seq_lens=seq_lens, - query_lens=seq_lens, ) - def _compute_multi_modal_input( - self, - seq_data: SequenceData, - computed_len: int, - seq_group_metadata: SequenceGroupMetadata, - ): + def _build_input_data(self): + for seq_group_metadata in self.seq_group_metadata_list: + for seq_id, seq_data in seq_group_metadata.seq_data.items(): + if seq_group_metadata.is_prompt: + self._compute_prompt_input_tokens(self.input_data, + seq_group_metadata, + seq_data, seq_id) + if seq_group_metadata.multi_modal_data: + self._compute_multi_modal_input( + seq_group_metadata, seq_data) + else: + self._compute_decode_input_tokens(self.input_data, + seq_group_metadata, + seq_data, seq_id) + + def _compute_decode_input_tokens(self, data: ModelInputData, + seq_group_metadata: SequenceGroupMetadata, + seq_data: SequenceData, seq_id: int): + """ + Compute decode input tokens, positions, block table and slot mapping. + """ + block_size = self.runner.block_size + + block_table = seq_group_metadata.block_tables[seq_id] + seq_len = seq_data.get_len() + context_len = seq_data.get_num_computed_tokens() + + tokens = seq_data.get_last_token_id() + token_positions = seq_len - 1 + block_number = block_table[token_positions // block_size] + block_offset = token_positions % block_size + slot = block_number * block_size + block_offset + + # For paged_attention kernel + if self.runner.sliding_window: + start_idx = max(0, seq_len - self.runner.sliding_window) + start_block = start_idx // block_size + start_idx = start_block * block_size + seq_len = seq_len - start_idx + block_table = block_table[start_block:] + + # For MRotaryEmbedding + if data.input_positions is None: + next_pos = MRotaryEmbedding.get_next_input_positions( + seq_data.mrope_position_delta, + context_len, + seq_len, + ) + for idx in range(3): + data.input_mrope_positions[idx].extend( # type: ignore + next_pos[idx]) + else: + data.input_positions.append(token_positions) # type: ignore + + # Update fields + data.input_tokens.append(tokens) + data.max_decode_seq_len = max(data.max_decode_seq_len, seq_len) + data.num_decode_tokens += 1 + data.slot_mapping.append(slot) + data.decode_block_tables.append(block_table) + data.query_lens.append(1) + data.seq_lens.append(seq_len) + + def _compute_prompt_input_tokens(self, data: ModelInputData, + seq_group_metadata: SequenceGroupMetadata, + seq_data: SequenceData, seq_id: int): + """ + Compute prompt input tokens, positions, block table and slot mapping. + """ + token_chunk_size = seq_group_metadata.token_chunk_size + block_size = self.runner.block_size + + block_table = seq_group_metadata.block_tables[seq_id] + seq_len = seq_data.get_len() + context_len = seq_data.get_num_computed_tokens() + seq_len = min(seq_len, context_len + token_chunk_size) + + # For prefix caching + prefix_cache_block_num = len(seq_group_metadata.computed_block_nums) + if prefix_cache_block_num > 0: + prefix_cache_len = (prefix_cache_block_num * + self.runner.block_size) + if prefix_cache_len <= context_len: + # We already passed the cache hit region, + # so do normal computation. + pass + elif context_len < prefix_cache_len < seq_len: + # Partial hit. Compute the missing part. + context_len = prefix_cache_len + token_chunk_size = seq_len - context_len + elif seq_len <= prefix_cache_len: + # Full hit. Only compute the last token to avoid + # erroneous behavior. FIXME: Ideally we should directly + # mark all tokens as computed in the scheduler and do not + # schedule this sequence, so this case should not happen. + context_len = seq_len - 1 + token_chunk_size = 1 + + tokens = seq_data.get_token_ids() + tokens = tokens[context_len:seq_len] + token_positions = range(context_len, seq_len) + + # For encoder-only models, the block_table is None, + # and there is no need to initialize the slot_mapping. + if block_table is not None: + slot_mapping = [_PAD_SLOT_ID] * len(token_positions) + for i, pos in enumerate(token_positions): + block_number = block_table[pos // block_size] + block_offset = pos % block_size + slot = block_number * block_size + block_offset + slot_mapping[i] = slot + data.slot_mapping.extend(slot_mapping) + + # The MROPE positions are prepared in _compute_multi_modal_input + if data.input_positions is not None: + data.input_positions.extend(token_positions) + + # Update fields + data.input_tokens.extend(tokens) + data.num_prefills += 1 + data.num_prefill_tokens += len(tokens) + data.query_lens.append(len(tokens)) + data.prefill_block_tables.append(block_table) + data.seq_lens.append(seq_len) + + def _compute_multi_modal_input(self, + seq_group_metadata: SequenceGroupMetadata, + seq_data: SequenceData): + computed_len = seq_data.get_num_computed_tokens() + seq_len = self.input_data.seq_lens[-1] + # NOTE: mm_data only includes the subset of multi-modal items that # intersect with the current prefill positions. mm_data, placeholder_maps = MultiModalPlaceholderMap.from_seq_group( - seq_group_metadata, - range(computed_len, len(seq_data.get_token_ids())), - ) + seq_group_metadata, range(computed_len, seq_len)) if not mm_data: - return None, None, None + return if self.runner.mm_registry.has_processor(self.runner.model_config): mm_kwargs = mm_data @@ -173,8 +330,10 @@ def _compute_multi_modal_input( ) # special processing for mrope position deltas. - mrope_positions = None if self.runner.model_config.uses_mrope: + assert not self.chunked_prefill, \ + "MROPE on CPU does not support chunked-prefill." + image_grid_thw = mm_kwargs.get("image_grid_thw", None) video_grid_thw = mm_kwargs.get("video_grid_thw", None) assert image_grid_thw is not None or video_grid_thw is not None, ( @@ -198,226 +357,15 @@ def _compute_multi_modal_input( context_len=computed_len, ) seq_data.mrope_position_delta = mrope_position_delta - return mm_kwargs, placeholder_maps, mrope_positions - def _prepare_prompt( - self, - seq_group_metadata_list: List[SequenceGroupMetadata], - ) -> Tuple[torch.Tensor, torch.Tensor, AttentionMetadata, List[int], - BatchedTensorInputs]: - assert len(seq_group_metadata_list) > 0 - input_tokens: List[int] = [] - input_positions: List[int] = [] - input_mrope_positions: List[List[int]] = [[] for _ in range(3)] - - slot_mapping: List[int] = [] - seq_lens: List[int] = [] - multi_modal_kwargs_list: List[MultiModalKwargs] = [] - multi_modal_placeholder_maps: Dict[ - str, - MultiModalPlaceholderMap] = defaultdict(MultiModalPlaceholderMap) - - for seq_group_metadata in seq_group_metadata_list: - assert seq_group_metadata.is_prompt - seq_ids = list(seq_group_metadata.seq_data.keys()) - assert len(seq_ids) == 1 - seq_id = seq_ids[0] - - seq_data = seq_group_metadata.seq_data[seq_id] - prompt_tokens = seq_data.get_token_ids() - computed_len = seq_data.get_num_computed_tokens() - seq_len = len(prompt_tokens) - - seq_lens.append(seq_len) # Prompt token num - input_tokens.extend(prompt_tokens) # Token ids - - mrope_positions = None - if seq_group_metadata.multi_modal_data: - ( - mm_kwargs, - placeholder_maps, - mrope_positions, - ) = self._compute_multi_modal_input(seq_data, computed_len, - seq_group_metadata) - - multi_modal_kwargs_list.append(mm_kwargs) - for modality, placeholder_map in placeholder_maps.items(): - multi_modal_placeholder_maps[modality].extend( - placeholder_map) - - # Token position ids - # NOTE(woosuk): Here we assume that the first token in the prompt - # is always the first token in the sequence. - if mrope_positions: - for idx in range(3): - input_mrope_positions[idx].extend(mrope_positions[idx]) - else: - input_positions.extend(list(range(computed_len, seq_len))) - - # Compute the slot mapping. - block_table = seq_group_metadata.block_tables[seq_id] - # Mask the [0, start_idx) tokens of the prompt with _PAD_SLOT_ID, - # where start_idx is max(0, seq_len - sliding_window). - # For example, if the prompt len is 10, sliding window is 8, and - # block size is 4, the first two tokens are masked and the slot - # mapping will be [-1, -1, 2, 3, 4, 5, 6, 7, 0, 1]. - start_idx = 0 - if self.sliding_window is not None: - start_idx = max(0, seq_len - self.sliding_window) - - for i in range(computed_len, seq_len): - if i < start_idx: - slot_mapping.append(_PAD_SLOT_ID) - continue - - # For encoder-only models, the block_table is None, - # and there is no need to initialize the slot_mapping. - if block_table is not None: - block_number = block_table[i // - self.block_size] # type: ignore - block_offset = i % self.block_size # type: ignore - slot = block_number * self.block_size + block_offset - slot_mapping.append(slot) - - if any(input_mrope_positions): - input_positions = None # type: ignore - else: - input_mrope_positions = None # type: ignore + for i in range(3): + self.input_data.input_mrope_positions[ # type: ignore + i].extend(mrope_positions[i]) - num_prompt_tokens = len(input_tokens) - - input_tokens = torch.tensor(input_tokens, - dtype=torch.long, - device=self.device) # type: ignore - input_positions = torch.tensor(input_positions - or input_mrope_positions, - dtype=torch.long, - device=self.device) # type: ignore - slot_mapping = torch.tensor(slot_mapping, - dtype=torch.long, - device=self.device) # type: ignore - placeholder_index_maps = { - modality: placeholder_map.index_map() - for modality, placeholder_map in - multi_modal_placeholder_maps.items() - } - - attn_metadata = self.attn_backend.make_metadata( - is_prompt=True, - seq_lens=seq_lens, - seq_lens_tensor=torch.tensor([]), - max_decode_seq_len=0, - num_prefills=len(seq_lens), - num_prefill_tokens=num_prompt_tokens, - num_decode_tokens=0, - block_tables=torch.tensor([]), - slot_mapping=slot_mapping, - multi_modal_placeholder_index_maps=placeholder_index_maps, - ) - - multi_modal_kwargs = MultiModalKwargs.batch(multi_modal_kwargs_list) - - return (input_tokens, input_positions, attn_metadata, seq_lens, - multi_modal_kwargs) - - def _prepare_decode( - self, - seq_group_metadata_list: List[SequenceGroupMetadata], - ) -> Tuple[torch.Tensor, torch.Tensor, AttentionMetadata]: - assert len(seq_group_metadata_list) > 0 - input_tokens: List[int] = [] - input_positions: List[int] = [] - input_mrope_positions: List[List[int]] = [[] for _ in range(3)] - slot_mapping: List[int] = [] - seq_lens: List[int] = [] - block_tables: List[List[int]] = [] - - for seq_group_metadata in seq_group_metadata_list: - assert not seq_group_metadata.is_prompt - assert seq_group_metadata.token_chunk_size == 1 - - seq_ids = list(seq_group_metadata.seq_data.keys()) - - for seq_id in seq_ids: - seq_data = seq_group_metadata.seq_data[seq_id] - generation_token = seq_data.get_last_token_id() - input_tokens.append(generation_token) - - seq_len = seq_data.get_len() - position = seq_len - 1 - if seq_data.mrope_position_delta is not None: - context_len = seq_data.get_num_computed_tokens() - next_pos = MRotaryEmbedding.get_next_input_positions( - seq_data.mrope_position_delta, - context_len, - seq_len, - ) - for idx in range(3): - input_mrope_positions[idx].extend(next_pos[idx]) - else: - input_positions.append(position) - - seq_len = seq_len if self.sliding_window is None else min( - seq_len, self.sliding_window) - seq_lens.append(seq_len) - - block_table = seq_group_metadata.block_tables[seq_id] - block_number = block_table[position // self.block_size] - block_offset = position % self.block_size - slot = block_number * self.block_size + block_offset - slot_mapping.append(slot) - - if self.sliding_window is not None: - sliding_window_blocks = (self.sliding_window // - self.block_size) - block_table = block_table[-sliding_window_blocks:] - block_tables.append(block_table) - - if any(input_mrope_positions): - input_positions = None # type: ignore - else: - input_mrope_positions = None # type: ignore - - max_decode_seq_len = max(seq_lens) - - input_tokens = torch.tensor(input_tokens, - dtype=torch.long, - device=self.device) - input_positions = torch.tensor(input_positions - or input_mrope_positions, - dtype=torch.long, - device=self.device) - slot_mapping = torch.tensor(slot_mapping, - dtype=torch.long, - device=self.device) - seq_lens_tensor = torch.tensor(seq_lens, - dtype=torch.int, - device=self.device) - - block_tables = make_tensor_with_pad( - block_tables, - pad=0, - dtype=torch.int, - device=self.device, - ) - - attn_metadata = self.attn_backend.make_metadata( - is_prompt=False, - slot_mapping=slot_mapping, - multi_modal_placeholder_index_maps=None, - seq_lens=seq_lens, - seq_lens_tensor=seq_lens_tensor, - max_decode_seq_len=max_decode_seq_len, - num_prefill_tokens=0, - num_decode_tokens=len(input_tokens), - num_prefills=0, - block_tables=block_tables, - ) - return ( - input_tokens, - input_positions, - attn_metadata, - ) + self.input_data.multi_modal_inputs_list.append(mm_kwargs) + for modality, placeholder_map in placeholder_maps.items(): + self.input_data.multi_modal_placeholder_maps[modality].extend( + placeholder_map) class CPUModelRunnerBase(ModelRunnerBase[TModelInputForCPU]): @@ -436,8 +384,6 @@ def __init__( **kwargs, ): ModelRunnerBase.__init__(self, vllm_config) - # Currently, CPU worker doesn't support chunked prefill. - assert self.scheduler_config.chunked_prefill_enabled is False model_config = self.model_config cache_config = self.cache_config @@ -479,8 +425,7 @@ def _prepare_model_input_tensors( """ builder = self._builder_cls(weakref.proxy(self), finished_requests_ids) - for seq_group_metadata in seq_group_metadata_list: - builder.add_seq_group(seq_group_metadata) + builder.set_seq_group_list(seq_group_metadata_list) return builder.build() # type: ignore @@ -537,22 +482,19 @@ def execute_model( "CPU worker does not support multi-step execution.") model_executable = self.model - execute_model_kwargs = { - "input_ids": - model_input.input_tokens, - "positions": - model_input.input_positions, - "kv_caches": - kv_caches, - "attn_metadata": - model_input.attn_metadata, - **MultiModalKwargs.as_kwargs(model_input.multi_modal_kwargs or {}, - device=self.device), - "intermediate_tensors": - intermediate_tensors, - } - - hidden_states = model_executable(**execute_model_kwargs) + multimodal_kwargs = {} + if model_input.multi_modal_kwargs is not None: + multimodal_kwargs = MultiModalKwargs.as_kwargs( + model_input.multi_modal_kwargs, device=self.device) + + hidden_states = model_executable( + input_ids=model_input.input_tokens, + positions=model_input.input_positions, + kv_caches=kv_caches, + attn_metadata=model_input.attn_metadata, + intermediate_tensors=intermediate_tensors, + **multimodal_kwargs, + ) # Compute the logits. logits = self.model.compute_logits(hidden_states, From 772a66732d0ff58a43dbd1ae79c0d165659aa96d Mon Sep 17 00:00:00 2001 From: youkaichao Date: Wed, 20 Nov 2024 09:13:28 -0800 Subject: [PATCH 054/122] [platforms] restore xpu check for parallel config (#10479) Signed-off-by: youkaichao --- vllm/platforms/xpu.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index 62db285f6696a..c3c4746d3cc25 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -55,3 +55,13 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: "CUDA graph is not supported on XPU, fallback to the eager " "mode.") model_config.enforce_eager = True + + # check and update parallel config + parallel_config = vllm_config.parallel_config + if (parallel_config.distributed_executor_backend is not None + and parallel_config.distributed_executor_backend != "ray"): + logger.warning( + "%s is not supported on XPU, fallback to ray distributed" + " executor backend.", + parallel_config.distributed_executor_backend) + parallel_config.distributed_executor_backend = "ray" From 5f1d6af2b619b07b2af3151d6aa59f9adc17e1eb Mon Sep 17 00:00:00 2001 From: Simon Mo Date: Wed, 20 Nov 2024 11:06:56 -0800 Subject: [PATCH 055/122] [perf bench] H200 development (#9768) Signed-off-by: simon-mo --- .../benchmark-pipeline.yaml | 23 +++++++++++++++++++ .../convert-results-json-to-markdown.py | 5 ++++ .../scripts/run-performance-benchmarks.sh | 11 ++++----- 3 files changed, 32 insertions(+), 7 deletions(-) diff --git a/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml b/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml index eec2a51e2f8fd..5c069b38b2d7d 100644 --- a/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml +++ b/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml @@ -9,7 +9,9 @@ steps: - image: badouralix/curl-jq command: - sh .buildkite/nightly-benchmarks/scripts/wait-for-image.sh + - wait + - label: "A100" agents: queue: A100 @@ -41,6 +43,27 @@ steps: - name: devshm emptyDir: medium: Memory + + - label: "H200" + agents: + queue: H200 + plugins: + - docker#v5.12.0: + image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT + command: + - bash + - .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh + mount-buildkite-agent: true + propagate-environment: true + ipc: host + gpus: 4,5,6,7 + volumes: + - /data/benchmark-hf-cache:/root/.cache/huggingface + environment: + - VLLM_USAGE_SOURCE + - HF_TOKEN + + # - label: "H100" # agents: # queue: H100 diff --git a/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py b/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py index 7cf05610b9953..d640563252a0c 100644 --- a/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py +++ b/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py @@ -157,6 +157,11 @@ def results_to_json(latency, throughput, serving): throughput_results, serving_results) + # Sort all dataframes by their respective "Test name" columns + for df in [latency_results, serving_results, throughput_results]: + if not df.empty: + df.sort_values(by="Test name", inplace=True) + # get markdown tables latency_md_table = tabulate(latency_results, headers='keys', diff --git a/.buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh b/.buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh index d397b05cdff23..0d16a83781ab2 100644 --- a/.buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh +++ b/.buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh @@ -6,6 +6,7 @@ # Do not set -e, as the mixtral 8x22B model tends to crash occasionally # and we still want to see other benchmarking results even when mixtral crashes. +set -x set -o pipefail check_gpus() { @@ -85,11 +86,7 @@ kill_gpu_processes() { ps -aux lsof -t -i:8000 | xargs -r kill -9 - pkill -f pt_main_thread - # this line doesn't work now - # ps aux | grep python | grep openai | awk '{print $2}' | xargs -r kill -9 - pkill -f python3 - pkill -f /usr/bin/python3 + pgrep python3 | xargs -r kill -9 # wait until GPU memory usage smaller than 1GB @@ -289,7 +286,7 @@ run_serving_tests() { # run the server echo "Running test case $test_name" echo "Server command: $server_command" - eval "$server_command" & + bash -c "$server_command" & server_pid=$! # wait until the server is alive @@ -322,7 +319,7 @@ run_serving_tests() { echo "Running test case $test_name with qps $qps" echo "Client command: $client_command" - eval "$client_command" + bash -c "$client_command" # record the benchmarking commands jq_output=$(jq -n \ From 0cd3d9717e38c7a122ed01fe2a8fddd8b37dff4b Mon Sep 17 00:00:00 2001 From: youkaichao Date: Wed, 20 Nov 2024 11:20:38 -0800 Subject: [PATCH 056/122] [7/N] torch.compile, reduce compilation time (#10460) Signed-off-by: youkaichao --- tests/compile/piecewise/test_simple.py | 2 +- tests/compile/piecewise/test_toy_llama.py | 4 ++-- vllm/compilation/backends.py | 2 +- vllm/config.py | 17 ++++++++++------- vllm/worker/worker.py | 18 +++++++++++++----- 5 files changed, 27 insertions(+), 16 deletions(-) diff --git a/tests/compile/piecewise/test_simple.py b/tests/compile/piecewise/test_simple.py index 0e40e3b4ebc96..0db12d6b6a43c 100644 --- a/tests/compile/piecewise/test_simple.py +++ b/tests/compile/piecewise/test_simple.py @@ -79,7 +79,7 @@ def test_simple_piecewise_compile(): vllm_config = VllmConfig(compilation_config=CompilationConfig( level=CompilationLevel.PIECEWISE, use_cudagraph=True, - non_cudagraph_ops=["silly.attention"], + splitting_ops=["silly.attention"], cudagraph_copy_inputs=True, )) with set_current_vllm_config(vllm_config): diff --git a/tests/compile/piecewise/test_toy_llama.py b/tests/compile/piecewise/test_toy_llama.py index 356d119a40334..cfe661b8871e0 100644 --- a/tests/compile/piecewise/test_toy_llama.py +++ b/tests/compile/piecewise/test_toy_llama.py @@ -258,7 +258,7 @@ def run_model(llama_config, use_cudagraph=True, ) if split_attn: - compilation_config.non_cudagraph_ops = ["silly.attention"] + compilation_config.splitting_ops = ["silly.attention"] else: compilation_config = CompilationConfig( level=CompilationLevel.NO_COMPILATION, ) @@ -378,7 +378,7 @@ def benchmark(): compilation_config = CompilationConfig( level=CompilationLevel.PIECEWISE, use_cudagraph=True, - non_cudagraph_ops=["silly.attention"], + splitting_ops=["silly.attention"], ) else: compilation_config = CompilationConfig( diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index 0cf1e3a95fcba..416cffd326489 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -447,7 +447,7 @@ def __call__(self, graph: fx.GraphModule, example_inputs) -> Callable: self.add_passes_to_config() self.split_gm, self.piecewise_graphs = split_graph( - graph, self.compilation_configs.non_cudagraph_ops) + graph, self.compilation_configs.splitting_ops) from torch._dynamo.utils import lazy_format_graph_code logger.debug("%s", lazy_format_graph_code("before split", self.graph)) diff --git a/vllm/config.py b/vllm/config.py index e69cbd3eb402a..3d0c616868225 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2089,13 +2089,15 @@ class CompilationConfig(BaseModel): - 'none,+op1,+op2' to enable only op1 and op2 By default, all custom ops are enabled when running without Inductor and disabled when running with Inductor (compile_level >= Inductor). + - splitting_ops: a list of ops to split the full graph into subgraphs, used in piecewise compilation. - CudaGraph capture: - use_cudagraph: whether to use cudagraph inside compilation. - False: cudagraph inside compilation is not used. - True: cudagraph inside compilation is used. It requires - that all input buffers have fixed addresses. - Note that this is orthogonal to the cudagraph capture out - side of compilation. + that all input buffers have fixed addresses, and all + splitting ops write their outputs to input buffers. + Note that this is orthogonal to the cudagraph capture logic + outside of compilation. TODO: move outside cudagraph logic into compilation. torch.compile will handle cudagraph capture logic in the future. - cudagraph_capture_sizes: sizes to capture cudagraph. @@ -2149,6 +2151,11 @@ class CompilationConfig(BaseModel): level: int = 0 backend: str = "" custom_ops: List[str] = Field(default_factory=list) + splitting_ops: List[str] = Field(default_factory=lambda: [ + "vllm.unified_flash_attention", + "vllm.unified_flash_infer", + "vllm.unified_v1_flash_attention", + ]) use_inductor: bool = True inductor_specialize_for_cudagraph_no_more_than: Optional[int] = None @@ -2157,7 +2164,6 @@ class CompilationConfig(BaseModel): inductor_passes: Dict[str, str] = Field(default_factory=dict) use_cudagraph: bool = False - non_cudagraph_ops: List[str] = Field(default_factory=list) cudagraph_num_of_warmups: int = 0 cudagraph_capture_sizes: Optional[List[int]] = None cudagraph_copy_inputs: bool = False @@ -2348,9 +2354,6 @@ def __post_init__(self): # and avoid any potential issues with the inductor. self.compilation_config.custom_ops = ["none"] self.compilation_config.use_cudagraph = True - self.compilation_config.non_cudagraph_ops = [ - "vllm.unified_v1_flash_attention" - ] self.compilation_config.use_inductor = True self.compilation_config.enable_fusion = False diff --git a/vllm/worker/worker.py b/vllm/worker/worker.py index d3ca6d9d0b17e..80fd7bc3b67cc 100644 --- a/vllm/worker/worker.py +++ b/vllm/worker/worker.py @@ -1,6 +1,7 @@ """A GPU worker class.""" import gc import os +import time from typing import Dict, List, Optional, Set, Tuple, Type, Union import torch @@ -189,6 +190,7 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: torch.cuda.reset_peak_memory_stats() free_memory_pre_profile, total_gpu_memory = torch.cuda.mem_get_info() + start_time = time.time() # Execute a forward pass with dummy inputs to profile the memory usage # of the model. @@ -229,12 +231,18 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: num_gpu_blocks = max(num_gpu_blocks, 0) num_cpu_blocks = max(num_cpu_blocks, 0) + end_time = time.time() logger.info( - "Memory profiling results: total_gpu_memory=%.2fGiB" - " initial_memory_usage=%.2fGiB peak_torch_memory=%.2fGiB" - " memory_usage_post_profile=%.2fGiB" - " non_torch_memory=%.2fGiB kv_cache_size=%.2fGiB" - " gpu_memory_utilization=%.2f", total_gpu_memory / (1024**3), + "Memory profiling results: " + "duration=%.2f seconds, " + "total_gpu_memory=%.2fGiB, " + "initial_memory_usage=%.2fGiB, " + "peak_torch_memory=%.2fGiB, " + "memory_usage_post_profile=%.2fGiB, " + "non_torch_memory=%.2fGiB, " + "kv_cache_size=%.2fGiB, " + "gpu_memory_utilization=%.2f.", end_time - start_time, + total_gpu_memory / (1024**3), (total_gpu_memory - free_memory_pre_profile) / (1024**3), (peak_memory - non_torch_allocations) / (1024**3), total_allocated_bytes / (1024**3), From c68f7ede6a4aef0cd31f531b5d7ec22ab224de95 Mon Sep 17 00:00:00 2001 From: Guillaume Calmettes Date: Wed, 20 Nov 2024 22:42:21 +0100 Subject: [PATCH 057/122] [Bugfix]: allow extra fields in requests to openai compatible server (#10463) Signed-off-by: Guillaume Calmettes --- tests/entrypoints/openai/test_chat.py | 26 +++++++++++++------------- vllm/entrypoints/openai/protocol.py | 18 ++++++++++++++++-- 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/tests/entrypoints/openai/test_chat.py b/tests/entrypoints/openai/test_chat.py index 8d13f64dce01c..843d15e768093 100644 --- a/tests/entrypoints/openai/test_chat.py +++ b/tests/entrypoints/openai/test_chat.py @@ -899,19 +899,19 @@ async def test_response_format_json_schema(client: openai.AsyncOpenAI): @pytest.mark.asyncio -async def test_extra_fields(client: openai.AsyncOpenAI): - with pytest.raises(BadRequestError) as exc_info: - await client.chat.completions.create( - model=MODEL_NAME, - messages=[{ - "role": "system", - "content": "You are a helpful assistant.", - "extra_field": "0", - }], # type: ignore - temperature=0, - seed=0) - - assert "extra_forbidden" in exc_info.value.message +async def test_extra_fields_allowed(client: openai.AsyncOpenAI): + resp = await client.chat.completions.create( + model=MODEL_NAME, + messages=[{ + "role": "user", + "content": "what is 1+1?", + "extra_field": "0", + }], # type: ignore + temperature=0, + seed=0) + + content = resp.choices[0].message.content + assert content is not None @pytest.mark.asyncio diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index b7b064ae01f05..a82212677f63a 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -9,12 +9,15 @@ from typing_extensions import Annotated from vllm.entrypoints.chat_utils import ChatCompletionMessageParam +from vllm.logger import init_logger from vllm.pooling_params import PoolingParams from vllm.sampling_params import (BeamSearchParams, GuidedDecodingParams, RequestOutputKind, SamplingParams) from vllm.sequence import Logprob from vllm.utils import random_uuid +logger = init_logger(__name__) + # torch is mocked during docs generation, # so we have to provide the values as literals _MOCK_LONG_INFO = Namespace(min=-9223372036854775808, max=9223372036854775807) @@ -35,8 +38,19 @@ class OpenAIBaseModel(BaseModel): - # OpenAI API does not allow extra fields - model_config = ConfigDict(extra="forbid") + # OpenAI API does allow extra fields + model_config = ConfigDict(extra="allow") + + @model_validator(mode="before") + @classmethod + def __log_extra_fields__(cls, data): + if isinstance(data, dict): + extra_fields = data.keys() - cls.model_fields.keys() + if extra_fields: + logger.warning( + "The following fields were present in the request " + "but ignored: %s", extra_fields) + return data class ErrorResponse(OpenAIBaseModel): From 2f77b6cfec32c8054f996aee4b021f511630ea6f Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Wed, 20 Nov 2024 13:54:15 -0800 Subject: [PATCH 058/122] [TPU] Implement prefix caching for TPUs (#10307) Signed-off-by: Woosuk Kwon --- requirements-tpu.txt | 6 +- vllm/attention/backends/pallas.py | 66 ++++++---- vllm/worker/tpu_model_runner.py | 211 +++++++++++++++++++----------- vllm/worker/tpu_worker.py | 4 +- 4 files changed, 182 insertions(+), 105 deletions(-) diff --git a/requirements-tpu.txt b/requirements-tpu.txt index f9a0770804e55..3d1e80f6be620 100644 --- a/requirements-tpu.txt +++ b/requirements-tpu.txt @@ -16,8 +16,8 @@ ray[default] --find-links https://storage.googleapis.com/libtpu-releases/index.html --find-links https://storage.googleapis.com/jax-releases/jax_nightly_releases.html --find-links https://storage.googleapis.com/jax-releases/jaxlib_nightly_releases.html -torch==2.6.0.dev20241028+cpu -torchvision==0.20.0.dev20241028+cpu -torch_xla[tpu] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.6.0.dev20241028-cp310-cp310-linux_x86_64.whl +torch==2.6.0.dev20241114+cpu +torchvision==0.20.0.dev20241114+cpu +torch_xla[tpu] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.6.0.dev20241114-cp310-cp310-linux_x86_64.whl jaxlib==0.4.32.dev20240829 jax==0.4.32.dev20240829 diff --git a/vllm/attention/backends/pallas.py b/vllm/attention/backends/pallas.py index 6fee81de14420..eeab8731a2c39 100644 --- a/vllm/attention/backends/pallas.py +++ b/vllm/attention/backends/pallas.py @@ -65,6 +65,7 @@ class PallasMetadata(AttentionMetadata): # or all decoding. block_tables: Optional[torch.Tensor] = None context_lens: Optional[torch.Tensor] = None + effective_query_lens: Optional[torch.Tensor] = None @property def prefill_metadata(self) -> Optional["PallasMetadata"]: @@ -72,8 +73,6 @@ def prefill_metadata(self) -> Optional["PallasMetadata"]: return None assert self.num_decode_tokens == 0 - assert self.block_tables is None - assert self.context_lens is None return self @property @@ -186,29 +185,50 @@ def forward( query = query * self.scale if attn_metadata.num_prefills > 0: - assert seq_len % 16 == 0, ( - "Pallas FlashAttention kernel requires seq_len to be a " - f"multiple of 16 but got {seq_len}") - - # Handle GQA/MQA. - if self.num_kv_heads != self.num_heads: - key = key.repeat_interleave(self.num_queries_per_kv, dim=-2) - key = key.view(batch_size, seq_len, self.num_heads, - self.head_size) - value = value.repeat_interleave(self.num_queries_per_kv, + if attn_metadata.block_tables is None: + # Prefill without paged KV cache. + assert seq_len % 16 == 0, ( + "Pallas FlashAttention kernel requires seq_len to be a " + f"multiple of 16 but got {seq_len}") + + # Handle GQA/MQA. + if self.num_kv_heads != self.num_heads: + key = key.repeat_interleave(self.num_queries_per_kv, dim=-2) - value = value.view(batch_size, seq_len, self.num_heads, + key = key.view(batch_size, seq_len, self.num_heads, self.head_size) - # FlashAttention requires [batch_size, num_heads, seq_len, d_model] - # while the input is [batch_size, seq_len, num_heads, d_model]. - # Permute the input to match the required format. - output = torch.ops.xla.flash_attention( - query.permute(0, 2, 1, 3), - key.permute(0, 2, 1, 3), - value.permute(0, 2, 1, 3), - True, - ) - output = output.permute(0, 2, 1, 3) + value = value.repeat_interleave(self.num_queries_per_kv, + dim=-2) + value = value.view(batch_size, seq_len, self.num_heads, + self.head_size) + # FlashAttention kernel requires the input shape to be + # [batch_size, num_heads, seq_len, d_model] + # while the input is [batch_size, seq_len, num_heads, d_model]. + # Permute the input to match the required format. + output = torch.ops.xla.flash_attention( + query.permute(0, 2, 1, 3), + key.permute(0, 2, 1, 3), + value.permute(0, 2, 1, 3), + True, + ) + output = output.permute(0, 2, 1, 3) + else: + # Prefill with paged KV cache. + # TODO(woosuk): Tune the below knobs. + num_kv_pages_per_compute_block = 16 + num_queries_per_compute_block = 16 + assert seq_len % num_queries_per_compute_block == 0 + output = torch.ops.xla.multi_queries_paged_attention( + query, + key_cache, + value_cache, + attn_metadata.context_lens, + attn_metadata.block_tables, + attn_metadata.effective_query_lens, + num_kv_pages_per_compute_block, + num_queries_per_compute_block, + use_kernel=True, + ) else: # Decoding run. assert kv_cache[0].numel() > 0 diff --git a/vllm/worker/tpu_model_runner.py b/vllm/worker/tpu_model_runner.py index d7a641857a613..9a054eb8a4cf7 100644 --- a/vllm/worker/tpu_model_runner.py +++ b/vllm/worker/tpu_model_runner.py @@ -1,3 +1,4 @@ +import enum import time from dataclasses import dataclass from typing import (TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, @@ -11,7 +12,6 @@ import torch_xla.runtime as xr from vllm.attention import AttentionMetadata, get_attn_backend -from vllm.compilation.wrapper import TorchCompileWrapperWithCustomDispatcher from vllm.config import VllmConfig from vllm.logger import init_logger from vllm.model_executor.layers.sampler import SamplerOutput @@ -39,6 +39,15 @@ _MAX_NUM_SAMPLES = 128 +class ExecutionMode(enum.Enum): + PREFILL = enum.auto() + DECODE = enum.auto() + PREFIX_PREFILL = enum.auto() + + def is_prefill(self) -> bool: + return self in (ExecutionMode.PREFILL, ExecutionMode.PREFIX_PREFILL) + + @dataclass(frozen=True) class ModelInputForTPU(ModelRunnerInputBase): token_ids: torch.Tensor @@ -140,16 +149,21 @@ def load_model(self) -> None: model = get_model(vllm_config=self.vllm_config) model = model.eval() xm.wait_device_ops() - self.model = ModelWrapper(model, self.vllm_config) + model = ModelWrapper(model) + self.model = torch.compile(model, + backend="openxla", + fullgraph=True, + dynamic=False) def _dummy_run( self, batch_size: int, seq_len: int, kv_caches: List[Tuple[torch.Tensor, torch.Tensor]], - is_prompt: bool, + exec_mode: ExecutionMode, ) -> None: - if is_prompt: + exec_mode = ExecutionMode(exec_mode) + if exec_mode.is_prefill(): seq_len = (seq_len + 15) // 16 * 16 token_ids = torch.zeros((batch_size, seq_len), dtype=torch.int32, @@ -160,18 +174,38 @@ def _dummy_run( slot_mapping = torch.zeros((batch_size, seq_len), dtype=torch.int64, device=self.device) - attn_metadata = self.attn_backend.make_metadata( - num_prefills=batch_size, - num_prefill_tokens=batch_size * seq_len, - num_decode_tokens=0, - slot_mapping=slot_mapping, - multi_modal_placeholder_index_maps=None, - block_tables=None, - context_lens=None, - ) input_lens = torch.ones((batch_size, ), dtype=torch.int32, device=self.device) + if exec_mode == ExecutionMode.PREFILL: + attn_metadata = self.attn_backend.make_metadata( + num_prefills=batch_size, + num_prefill_tokens=batch_size * seq_len, + num_decode_tokens=0, + slot_mapping=slot_mapping, + multi_modal_placeholder_index_maps=None, + block_tables=None, + context_lens=None, + effective_query_lens=None, + ) + else: + context_lens = torch.ones((batch_size, ), + dtype=torch.int32, + device=self.device) + block_tables = torch.tensor(self.block_tables[:batch_size], + dtype=torch.int32, + device=self.device) + effective_query_lens = torch.ones_like(context_lens) + attn_metadata = self.attn_backend.make_metadata( + num_prefills=batch_size, + num_prefill_tokens=batch_size * seq_len, + num_decode_tokens=0, + slot_mapping=slot_mapping, + multi_modal_placeholder_index_maps=None, + block_tables=block_tables, + context_lens=context_lens, + effective_query_lens=effective_query_lens, + ) else: assert seq_len == 1 token_ids = torch.zeros((batch_size, seq_len), @@ -204,7 +238,7 @@ def _dummy_run( ) t = torch.ones((batch_size, ), dtype=torch.float32, device=self.device) p = torch.ones((batch_size, ), dtype=torch.float32, device=self.device) - num_samples = _MAX_NUM_SAMPLES if is_prompt else 1 + num_samples = _MAX_NUM_SAMPLES if exec_mode.is_prefill() else 1 # NOTE(woosuk): There are two stages of compilation: torch.compile and # XLA compilation. Using `mark_dynamic` can reduce the torch.compile @@ -213,7 +247,7 @@ def _dummy_run( # be re-compiled for every different shapes. This overhead is inevitable # in the first run, but can be skipped afterwards as we cache the XLA # graphs in the disk (VLLM_XLA_CACHE_PATH). - if is_prompt: + if exec_mode.is_prefill(): # Prefll torch._dynamo.mark_dynamic(token_ids, 1) torch._dynamo.mark_dynamic(position_ids, 1) @@ -229,15 +263,8 @@ def _dummy_run( torch._dynamo.mark_dynamic(t, 0) torch._dynamo.mark_dynamic(p, 0) # Dummy run. - self.model(token_ids, - position_ids, - attn_metadata, - input_lens, - t, - p, - num_samples, - kv_caches, - is_prompt=is_prompt) + self.model(token_ids, position_ids, attn_metadata, input_lens, t, p, + num_samples, kv_caches) def warmup_model( self, @@ -248,13 +275,13 @@ def warmup_model( start = time.time() for batch_size in [1]: seq_len = 16 - while True: - self._dummy_run(batch_size, seq_len, kv_caches, is_prompt=True) + while seq_len <= self.model_config.max_model_len: + self._dummy_run(batch_size, + seq_len, + kv_caches, + exec_mode=ExecutionMode.PREFILL) xm.wait_device_ops() logger.info("batch_size: %d, seq_len: %d", batch_size, seq_len) - - if seq_len >= self.model_config.max_model_len: - break num_tokens = batch_size * seq_len if num_tokens >= self.scheduler_config.max_num_batched_tokens: break @@ -263,12 +290,39 @@ def warmup_model( end = time.time() logger.info("Compilation for prefill done in %.2f s.", end - start) + # Prefix prefill + if self.cache_config.enable_prefix_caching: + logger.info("Compiling the model with different input shapes for " + "prefix prefill...") + start = time.time() + for batch_size in [1]: + seq_len = 16 + while seq_len <= self.model_config.max_model_len: + self._dummy_run(batch_size, + seq_len, + kv_caches, + exec_mode=ExecutionMode.PREFIX_PREFILL) + xm.wait_device_ops() + logger.info("batch_size: %d, seq_len: %d", batch_size, + seq_len) + num_tokens = batch_size * seq_len + if (num_tokens >= + self.scheduler_config.max_num_batched_tokens): + break + seq_len = seq_len * 2 + end = time.time() + logger.info("Compilation for prefix prefill done in %.2f s.", + end - start) + # Decode start = time.time() seq_len = 1 batch_size = 8 # Must be in sync with _get_padded_batch_size() while True: - self._dummy_run(batch_size, seq_len, kv_caches, is_prompt=False) + self._dummy_run(batch_size, + seq_len, + kv_caches, + exec_mode=ExecutionMode.DECODE) xm.wait_device_ops() logger.info("batch_size: %d, seq_len: %d", batch_size, seq_len) @@ -287,9 +341,11 @@ def _prepare_prompt( input_tokens: List[int] = [] input_positions: List[int] = [] prompt_lens: List[int] = [] + context_lens: List[int] = [] slot_mapping: List[int] = [] - for seq_group_metadata in seq_group_metadata_list: + for batch_idx, seq_group_metadata in enumerate( + seq_group_metadata_list): assert seq_group_metadata.is_prompt seq_ids = list(seq_group_metadata.seq_data.keys()) assert len(seq_ids) == 1 @@ -298,19 +354,31 @@ def _prepare_prompt( seq_data = seq_group_metadata.seq_data[seq_id] # Could include output tokens when a request is preempted. prompt_tokens = seq_data.get_token_ids() + seq_len = len(prompt_tokens) + + num_computed_blocks = len(seq_group_metadata.computed_block_nums) + num_computed_tokens = num_computed_blocks * self.block_size + if num_computed_tokens > 0: + prompt_tokens = prompt_tokens[num_computed_tokens:] + context_lens.append(seq_len) + else: + context_lens.append(0) + prompt_len = len(prompt_tokens) prompt_lens.append(prompt_len) input_tokens.extend(prompt_tokens) - input_positions.extend(list(range(prompt_len))) + input_positions.extend(range(num_computed_tokens, seq_len)) assert seq_group_metadata.block_tables is not None block_table = seq_group_metadata.block_tables[seq_id] - for i in range(prompt_len): + for i in range(num_computed_tokens, seq_len): block_number = block_table[i // self.block_size] block_offset = i % self.block_size slot = block_number * self.block_size + block_offset slot_mapping.append(slot) + if num_computed_tokens > 0: + self.block_tables[batch_idx, :len(block_table)] = block_table # Add paddings to EACH prompt to the smallest power of 2 that is # greater than or equal to the prompt length. @@ -338,14 +406,21 @@ def _prepare_prompt( prompt_lens = torch.tensor(prompt_lens, dtype=torch.int32, device="cpu") + context_lens = torch.tensor(context_lens, + dtype=torch.int32, + device="cpu") + block_tables = torch.tensor(self.block_tables[:num_prefills], + dtype=torch.int32, + device="cpu") attn_metadata = self.attn_backend.make_metadata( num_prefills=num_prefills, num_prefill_tokens=0, # NOTE: This is not used. num_decode_tokens=0, slot_mapping=slot_mapping, multi_modal_placeholder_index_maps=None, - block_tables=None, - context_lens=None, + block_tables=block_tables, + context_lens=context_lens, + effective_query_lens=prompt_lens, ) return input_tokens, input_positions, attn_metadata, prompt_lens @@ -550,6 +625,10 @@ def execute_model( # process them separately. This is a temporary hack that should be # optimized by using SplashAttention. orig_slot_mapping = model_input.attn_metadata.slot_mapping + orig_block_tables = model_input.attn_metadata.block_tables + orig_context_lens = model_input.attn_metadata.context_lens + orig_effective_query_lens = \ + model_input.attn_metadata.effective_query_lens batch_size = model_input.input_lens.shape[0] start_idx = 0 next_token_ids = [] @@ -568,18 +647,24 @@ def execute_model( attn_metadata.num_prefills = 1 attn_metadata.slot_mapping = orig_slot_mapping[ None, start_idx:end_idx].to(self.device) + if orig_context_lens[i].item() > 0: + attn_metadata.context_lens = orig_context_lens[i:i + 1].to( + self.device) + attn_metadata.block_tables = orig_block_tables[ + i].unsqueeze(0).to(self.device) + attn_metadata.effective_query_lens = \ + orig_effective_query_lens[i:i + 1].to(self.device) + else: + attn_metadata.context_lens = None + attn_metadata.block_tables = None + attn_metadata.effective_query_lens = None input_lens = model_input.input_lens[i:i + 1].to(self.device) t = model_input.t[i:i + 1].to(self.device) p = model_input.p[i:i + 1].to(self.device) - output_token_ids = self.model(token_ids, - position_ids, - attn_metadata, - input_lens, - t, - p, + output_token_ids = self.model(token_ids, position_ids, + attn_metadata, input_lens, t, p, model_input.num_samples, - kv_caches, - is_prompt=True) + kv_caches) next_token_ids.append(output_token_ids[0]) start_idx = end_idx @@ -624,15 +709,10 @@ def execute_model( input_lens = model_input.input_lens.to(self.device) for i in range(num_steps): slot_mapping = attn_metadata.slot_mapping - output_token_ids = self.model(token_ids, - position_ids, - attn_metadata, - input_lens, - t, - p, + output_token_ids = self.model(token_ids, position_ids, + attn_metadata, input_lens, t, p, model_input.num_samples, - kv_caches, - is_prompt=False) + kv_caches) self.cached_step_outputs.append(output_token_ids) if i < num_steps - 1: @@ -667,34 +747,11 @@ def execute_model( return [sampler_output] -class ModelWrapper(TorchCompileWrapperWithCustomDispatcher): +class ModelWrapper(nn.Module): - def __init__(self, model: nn.Module, vllm_config: VllmConfig): + def __init__(self, model: nn.Module): + super().__init__() self.model = model - compiled_callable = torch.compile(self.forward, - backend="openxla", - fullgraph=True, - dynamic=False) - super().__init__( - compiled_callable, - compilation_level=vllm_config.compilation_config.level) - - def __call__(self, *args, is_prompt: bool, **kwargs): - if len(self.compiled_codes) < 3 or not self.use_custom_dispatcher: - # not fully compiled yet, or not using the custom dispatcher, - # let PyTorch handle it - return self.compiled_callable(*args, **kwargs) - # the 3 compiled codes are: - # 0: for profiling - # 1: for prompt - # 2: for decode - # dispatch to the compiled code directly, skip PyTorch - if is_prompt: - with self.dispatch_to_code(1): - return self.forward(*args, **kwargs) - else: - with self.dispatch_to_code(2): - return self.forward(*args, **kwargs) def forward( self, diff --git a/vllm/worker/tpu_worker.py b/vllm/worker/tpu_worker.py index 096cb23416909..8754f7538f251 100644 --- a/vllm/worker/tpu_worker.py +++ b/vllm/worker/tpu_worker.py @@ -13,7 +13,7 @@ from vllm.model_executor import set_random_seed from vllm.sequence import ExecuteModelRequest from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, get_dtype_size -from vllm.worker.tpu_model_runner import TPUModelRunner +from vllm.worker.tpu_model_runner import ExecutionMode, TPUModelRunner from vllm.worker.worker_base import (LocalOrDistributedWorkerBase, LoraNotSupportedWorkerBase, WorkerBase, WorkerInput) @@ -112,7 +112,7 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: batch_size=1, seq_len=self.scheduler_config.max_num_batched_tokens, kv_caches=kv_caches, - is_prompt=True, + exec_mode=ExecutionMode.PREFILL, ) # Synchronize before measuring the memory usage. xm.wait_device_ops() From 388ee3de665c3055fbe610b66ebeef096a23cfe1 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Wed, 20 Nov 2024 18:36:33 -0800 Subject: [PATCH 059/122] [torch.compile] limit inductor threads and lazy import quant (#10482) Signed-off-by: youkaichao --- .buildkite/test-pipeline.yaml | 2 + tests/quantization/utils.py | 4 +- tests/test_lazy_torch_compile.py | 68 ++++++++++ vllm/_custom_ops.py | 3 - vllm/config.py | 8 +- .../layers/quantization/__init__.py | 124 +++++++++++------- vllm/model_executor/models/internvl.py | 4 +- vllm/model_executor/models/qwen2_vl.py | 7 +- vllm/platforms/cuda.py | 2 + vllm/platforms/rocm.py | 11 ++ vllm/plugins/__init__.py | 9 ++ 11 files changed, 178 insertions(+), 64 deletions(-) create mode 100644 tests/test_lazy_torch_compile.py diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 501743c887596..c436d2b48d20f 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -50,7 +50,9 @@ steps: - tests/multimodal - tests/test_utils - tests/worker + - tests/test_lazy_torch_compile.py commands: + - python3 test_lazy_torch_compile.py - pytest -v -s mq_llm_engine # MQLLMEngine - pytest -v -s async_engine # AsyncLLMEngine - NUM_SCHEDULER_STEPS=4 pytest -v -s async_engine/test_async_llm_engine.py diff --git a/tests/quantization/utils.py b/tests/quantization/utils.py index 061a077592e80..8ebd8dd2be0d5 100644 --- a/tests/quantization/utils.py +++ b/tests/quantization/utils.py @@ -1,4 +1,4 @@ -from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS +from vllm.model_executor.layers.quantization import get_quantization_config from vllm.platforms import current_platform @@ -10,6 +10,6 @@ def is_quant_method_supported(quant_method: str) -> bool: capability = current_platform.get_device_capability() assert capability is not None - min_capability = QUANTIZATION_METHODS[quant_method].get_min_capability() + min_capability = get_quantization_config(quant_method).get_min_capability() return capability.to_int() >= min_capability diff --git a/tests/test_lazy_torch_compile.py b/tests/test_lazy_torch_compile.py new file mode 100644 index 0000000000000..b8ac4dd93732b --- /dev/null +++ b/tests/test_lazy_torch_compile.py @@ -0,0 +1,68 @@ +# Description: Test the lazy import module +# The utility function cannot be placed in `vllm.utils` +# this needs to be a standalone script + +import contextlib +import dataclasses +import sys +import traceback +from typing import Callable, Generator + + +@dataclasses.dataclass +class BlameResult: + found: bool = False + trace_stack: str = "" + + +@contextlib.contextmanager +def blame(func: Callable) -> Generator[BlameResult, None, None]: + """ + Trace the function calls to find the first function that satisfies the + condition. The trace stack will be stored in the result. + + Usage: + + ```python + with blame(lambda: some_condition()) as result: + # do something + + if result.found: + print(result.trace_stack) + """ + result = BlameResult() + + def _trace_calls(frame, event, arg=None): + nonlocal result + if event in ['call', 'return']: + # for every function call or return + try: + # Temporarily disable the trace function + sys.settrace(None) + # check condition here + if not result.found and func(): + result.found = True + result.trace_stack = "".join(traceback.format_stack()) + # Re-enable the trace function + sys.settrace(_trace_calls) + except NameError: + # modules are deleted during shutdown + pass + return _trace_calls + + sys.settrace(_trace_calls) + + yield result + + sys.settrace(None) + + +module_name = "torch._inductor.async_compile" + +with blame(lambda: module_name in sys.modules) as result: + import vllm # noqa + +assert not result.found, (f"Module {module_name} is already imported, the" + f" first import location is:\n{result.trace_stack}") + +print(f"Module {module_name} is not imported yet") diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index 782dc6aed1b8c..41892e4dddf7e 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -19,9 +19,6 @@ except ImportError as e: logger.warning("Failed to import from vllm._C with %r", e) -if current_platform.is_rocm(): - import vllm._rocm_C # noqa: F401 - supports_moe_ops = False with contextlib.suppress(ImportError): import vllm._moe_C # noqa: F401 diff --git a/vllm/config.py b/vllm/config.py index 3d0c616868225..7522486782cc9 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -14,7 +14,8 @@ import vllm.envs as envs from vllm.logger import init_logger -from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS +from vllm.model_executor.layers.quantization import (QUANTIZATION_METHODS, + get_quantization_config) from vllm.model_executor.models import ModelRegistry from vllm.platforms import current_platform from vllm.tracing import is_otel_available, otel_import_error_traceback @@ -370,7 +371,7 @@ def _parse_quant_hf_config(self): return quant_cfg def _verify_quantization(self) -> None: - supported_quantization = [*QUANTIZATION_METHODS] + supported_quantization = QUANTIZATION_METHODS rocm_supported_quantization = [ "awq", "gptq", "fp8", "compressed_tensors", "compressed-tensors", "fbgemm_fp8" @@ -392,7 +393,8 @@ def _verify_quantization(self) -> None: quant_method = quant_cfg.get("quant_method", "").lower() # Detect which checkpoint is it - for _, method in QUANTIZATION_METHODS.items(): + for name in QUANTIZATION_METHODS: + method = get_quantization_config(name) quantization_override = method.override_quantization_method( quant_cfg, self.quantization) if quantization_override: diff --git a/vllm/model_executor/layers/quantization/__init__.py b/vllm/model_executor/layers/quantization/__init__.py index ff342c4f9479e..dd10c434f0752 100644 --- a/vllm/model_executor/layers/quantization/__init__.py +++ b/vllm/model_executor/layers/quantization/__init__.py @@ -1,65 +1,87 @@ -from typing import Dict, Type +from typing import Dict, List, Type -from vllm.model_executor.layers.quantization.aqlm import AQLMConfig -from vllm.model_executor.layers.quantization.awq import AWQConfig -from vllm.model_executor.layers.quantization.awq_marlin import AWQMarlinConfig from vllm.model_executor.layers.quantization.base_config import ( QuantizationConfig) -from vllm.model_executor.layers.quantization.bitsandbytes import ( - BitsAndBytesConfig) -from vllm.model_executor.layers.quantization.compressed_tensors.compressed_tensors import ( # noqa: E501 - CompressedTensorsConfig) -from vllm.model_executor.layers.quantization.deepspeedfp import ( - DeepSpeedFPConfig) -from vllm.model_executor.layers.quantization.experts_int8 import ( - ExpertsInt8Config) -from vllm.model_executor.layers.quantization.fbgemm_fp8 import FBGEMMFp8Config -from vllm.model_executor.layers.quantization.fp8 import Fp8Config -from vllm.model_executor.layers.quantization.gguf import GGUFConfig -from vllm.model_executor.layers.quantization.gptq import GPTQConfig -from vllm.model_executor.layers.quantization.gptq_marlin import ( - GPTQMarlinConfig) -from vllm.model_executor.layers.quantization.gptq_marlin_24 import ( - GPTQMarlin24Config) -from vllm.model_executor.layers.quantization.hqq_marlin import HQQMarlinConfig -from vllm.model_executor.layers.quantization.ipex_quant import IPEXConfig -from vllm.model_executor.layers.quantization.marlin import MarlinConfig -from vllm.model_executor.layers.quantization.modelopt import ModelOptFp8Config -from vllm.model_executor.layers.quantization.neuron_quant import ( - NeuronQuantConfig) -from vllm.model_executor.layers.quantization.qqq import QQQConfig -from vllm.model_executor.layers.quantization.tpu_int8 import Int8TpuConfig -QUANTIZATION_METHODS: Dict[str, Type[QuantizationConfig]] = { - "aqlm": AQLMConfig, - "awq": AWQConfig, - "deepspeedfp": DeepSpeedFPConfig, - "tpu_int8": Int8TpuConfig, - "fp8": Fp8Config, - "fbgemm_fp8": FBGEMMFp8Config, - "modelopt": ModelOptFp8Config, +QUANTIZATION_METHODS: List[str] = [ + "aqlm", + "awq", + "deepspeedfp", + "tpu_int8", + "fp8", + "fbgemm_fp8", + "modelopt", # The order of gptq methods is important for config.py iteration over # override_quantization_method(..) - "marlin": MarlinConfig, - "gguf": GGUFConfig, - "gptq_marlin_24": GPTQMarlin24Config, - "gptq_marlin": GPTQMarlinConfig, - "awq_marlin": AWQMarlinConfig, - "gptq": GPTQConfig, - "compressed-tensors": CompressedTensorsConfig, - "bitsandbytes": BitsAndBytesConfig, - "qqq": QQQConfig, - "hqq": HQQMarlinConfig, - "experts_int8": ExpertsInt8Config, - "neuron_quant": NeuronQuantConfig, - "ipex": IPEXConfig, -} + "marlin", + "gguf", + "gptq_marlin_24", + "gptq_marlin", + "awq_marlin", + "gptq", + "compressed-tensors", + "bitsandbytes", + "qqq", + "hqq", + "experts_int8", + "neuron_quant", + "ipex", +] def get_quantization_config(quantization: str) -> Type[QuantizationConfig]: if quantization not in QUANTIZATION_METHODS: raise ValueError(f"Invalid quantization method: {quantization}") - return QUANTIZATION_METHODS[quantization] + + # lazy import to avoid triggering `torch.compile` too early + from .aqlm import AQLMConfig + from .awq import AWQConfig + from .awq_marlin import AWQMarlinConfig + from .bitsandbytes import BitsAndBytesConfig + from .compressed_tensors.compressed_tensors import ( # noqa: E501 + CompressedTensorsConfig) + from .deepspeedfp import DeepSpeedFPConfig + from .experts_int8 import ExpertsInt8Config + from .fbgemm_fp8 import FBGEMMFp8Config + from .fp8 import Fp8Config + from .gguf import GGUFConfig + from .gptq import GPTQConfig + from .gptq_marlin import GPTQMarlinConfig + from .gptq_marlin_24 import GPTQMarlin24Config + from .hqq_marlin import HQQMarlinConfig + from .ipex_quant import IPEXConfig + from .marlin import MarlinConfig + from .modelopt import ModelOptFp8Config + from .neuron_quant import NeuronQuantConfig + from .qqq import QQQConfig + from .tpu_int8 import Int8TpuConfig + + method_to_config: Dict[str, Type[QuantizationConfig]] = { + "aqlm": AQLMConfig, + "awq": AWQConfig, + "deepspeedfp": DeepSpeedFPConfig, + "tpu_int8": Int8TpuConfig, + "fp8": Fp8Config, + "fbgemm_fp8": FBGEMMFp8Config, + "modelopt": ModelOptFp8Config, + # The order of gptq methods is important for config.py iteration over + # override_quantization_method(..) + "marlin": MarlinConfig, + "gguf": GGUFConfig, + "gptq_marlin_24": GPTQMarlin24Config, + "gptq_marlin": GPTQMarlinConfig, + "awq_marlin": AWQMarlinConfig, + "gptq": GPTQConfig, + "compressed-tensors": CompressedTensorsConfig, + "bitsandbytes": BitsAndBytesConfig, + "qqq": QQQConfig, + "hqq": HQQMarlinConfig, + "experts_int8": ExpertsInt8Config, + "neuron_quant": NeuronQuantConfig, + "ipex": IPEXConfig, + } + + return method_to_config[quantization] __all__ = [ diff --git a/vllm/model_executor/models/internvl.py b/vllm/model_executor/models/internvl.py index 7ea2f9be2191d..5d38b4b1ef14b 100644 --- a/vllm/model_executor/models/internvl.py +++ b/vllm/model_executor/models/internvl.py @@ -19,8 +19,8 @@ from vllm.config import VllmConfig from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, InputContext, token_inputs) -from vllm.model_executor.layers.quantization import (AWQConfig, - QuantizationConfig) +from vllm.model_executor.layers.quantization import QuantizationConfig +from vllm.model_executor.layers.quantization.awq import AWQConfig from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.models.intern_vit import (InternVisionModel, InternVisionPatchModel) diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 0ac81387b1bd8..531608a877f2f 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -51,9 +51,10 @@ RowParallelLinear) from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.pooler import Pooler, PoolingType -from vllm.model_executor.layers.quantization import (GPTQConfig, - GPTQMarlinConfig, - QuantizationConfig) +from vllm.model_executor.layers.quantization import QuantizationConfig +from vllm.model_executor.layers.quantization.gptq import GPTQConfig +from vllm.model_executor.layers.quantization.gptq_marlin import ( + GPTQMarlinConfig) from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead from vllm.model_executor.model_loader.weight_utils import default_weight_loader diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index 9c5212ace1346..d2911ef650743 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -10,6 +10,8 @@ import torch from typing_extensions import ParamSpec +# import custom ops, trigger op registration +import vllm._C # noqa from vllm.logger import init_logger from .interface import DeviceCapability, Platform, PlatformEnum diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index 022256996f97b..bb3a49c8b73bc 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -9,6 +9,17 @@ logger = init_logger(__name__) +try: + import vllm._C # noqa: F401 +except ImportError as e: + logger.warning("Failed to import from vllm._C with %r", e) + +# import custom ops, trigger op registration +try: + import vllm._rocm_C # noqa: F401 +except ImportError as e: + logger.warning("Failed to import from vllm._rocm_C with %r", e) + if os.environ.get("VLLM_WORKER_MULTIPROC_METHOD", None) in ["fork", None]: logger.warning("`fork` method is not supported by ROCm. " "VLLM_WORKER_MULTIPROC_METHOD is overridden to" diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index dc183dbfc9b96..d5056b18fe968 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -1,4 +1,5 @@ import logging +import os from contextlib import contextmanager from typing import TYPE_CHECKING, Optional @@ -18,6 +19,14 @@ def load_general_plugins(): processes. They should be designed in a way that they can be loaded multiple times without causing issues. """ + + # all processes created by vllm will load plugins, + # and here we can inject some common environment variables + # for all processes. + + # see https://github.com/vllm-project/vllm/issues/10480 + os.environ['TORCHINDUCTOR_COMPILE_THREADS'] = '1' + global plugins_loaded if plugins_loaded: return From 6c1208d083fbaaf89c6d812f4d3424e15182f652 Mon Sep 17 00:00:00 2001 From: Pavani Majety Date: Wed, 20 Nov 2024 19:56:47 -0800 Subject: [PATCH 060/122] [Core] Add Sliding Window Support with Flashinfer (#10462) Signed-off-by: Pavani Majety --- .../block/e2e/test_correctness_sliding_window.py | 12 ++++++++++-- vllm/attention/backends/flashinfer.py | 13 ++++++++----- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/tests/core/block/e2e/test_correctness_sliding_window.py b/tests/core/block/e2e/test_correctness_sliding_window.py index 9320a9ef62314..415d0bd8237df 100644 --- a/tests/core/block/e2e/test_correctness_sliding_window.py +++ b/tests/core/block/e2e/test_correctness_sliding_window.py @@ -3,6 +3,7 @@ import pytest +from tests.kernels.utils import override_backend_env_variable from vllm import LLM, SamplingParams from .conftest import get_text_from_llm_generator @@ -28,8 +29,9 @@ @pytest.mark.parametrize("test_llm_kwargs", [{}]) @pytest.mark.parametrize("batch_size", [5]) @pytest.mark.parametrize("seed", [1]) +@pytest.mark.parametrize("backend", ["FLASH_ATTN", "FLASHINFER", "XFORMERS"]) def test_sliding_window_retrival(baseline_llm_generator, test_llm_generator, - batch_size, seed): + batch_size, seed, backend, monkeypatch): """ The test does a bunch of assignments "x1 = 10\nx2 = 33\n..." and then asks for value of one of them (which is outside the sliding window). @@ -38,6 +40,8 @@ def test_sliding_window_retrival(baseline_llm_generator, test_llm_generator, Additionally, we compare the results of the v1 and v2 managers. """ + override_backend_env_variable(monkeypatch, backend) + sampling_params = SamplingParams( max_tokens=1024, ignore_eos=True, @@ -84,7 +88,9 @@ def test_sliding_window_retrival(baseline_llm_generator, test_llm_generator, @pytest.mark.parametrize("test_llm_kwargs", [{"enable_chunked_prefill": True}]) @pytest.mark.parametrize("batch_size", [5]) @pytest.mark.parametrize("seed", [1]) -def test_sliding_window_chunked_prefill(test_llm_generator, batch_size, seed): +@pytest.mark.parametrize("backend", ["FLASH_ATTN", "FLASHINFER", "XFORMERS"]) +def test_sliding_window_chunked_prefill(test_llm_generator, batch_size, seed, + backend, monkeypatch): """ This is similar to test_sliding_window_retrival, however, it doesn't compare against the v1 block manager since v1 doesn't support @@ -93,6 +99,8 @@ def test_sliding_window_chunked_prefill(test_llm_generator, batch_size, seed): The results with and without chunked prefill are not the same due to numerical instabilities. """ + override_backend_env_variable(monkeypatch, backend) + sampling_params = SamplingParams( max_tokens=10, ignore_eos=True, diff --git a/vllm/attention/backends/flashinfer.py b/vllm/attention/backends/flashinfer.py index 107e3bbf79666..b61c660e3e280 100644 --- a/vllm/attention/backends/flashinfer.py +++ b/vllm/attention/backends/flashinfer.py @@ -757,9 +757,8 @@ def __init__( if alibi_slopes is not None: alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32) self.alibi_slopes = alibi_slopes - if sliding_window is not None: - raise ValueError("Sliding window is not supported in FlashInfer.") - self.sliding_window = (-1, -1) + self.sliding_window = ((sliding_window - 1, + 0) if sliding_window is not None else (-1, -1)) self.kv_cache_dtype = kv_cache_dtype self.logits_soft_cap = logits_soft_cap @@ -865,6 +864,8 @@ def unified_flash_infer( assert query.shape[0] == num_prefill_tokens assert decode_query.shape[0] == num_decode_tokens + window_left = window_size[0] if window_size is not None else -1 + prefill_output: Optional[torch.Tensor] = None decode_output: Optional[torch.Tensor] = None if prefill_meta := attn_metadata.prefill_metadata: @@ -895,7 +896,8 @@ def unified_flash_infer( logits_soft_cap=logits_soft_cap, causal=True, k_scale=k_scale, - v_scale=v_scale) + v_scale=v_scale, + window_left=window_left) if decode_meta := attn_metadata.decode_metadata: assert attn_metadata.decode_metadata is not None assert attn_metadata.decode_metadata.decode_wrapper is not None @@ -905,7 +907,8 @@ def unified_flash_infer( sm_scale=softmax_scale, logits_soft_cap=logits_soft_cap, k_scale=k_scale, - v_scale=v_scale) + v_scale=v_scale, + window_left=window_left) if prefill_output is None and decode_output is not None: # Decode only batch. From 9d827170a3aa586dfb458bf28d18fd279bdbf580 Mon Sep 17 00:00:00 2001 From: Mengqing Cao Date: Thu, 21 Nov 2024 12:44:20 +0800 Subject: [PATCH 061/122] [Platforms] Add `device_type` in `Platform` (#10508) Signed-off-by: MengqingCao --- vllm/config.py | 17 ++--------------- vllm/platforms/cpu.py | 1 + vllm/platforms/cuda.py | 1 + vllm/platforms/hpu.py | 1 + vllm/platforms/interface.py | 1 + vllm/platforms/neuron.py | 1 + vllm/platforms/openvino.py | 1 + vllm/platforms/rocm.py | 1 + vllm/platforms/tpu.py | 1 + vllm/platforms/xpu.py | 1 + 10 files changed, 11 insertions(+), 15 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 7522486782cc9..0ed92f370cf50 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1193,21 +1193,8 @@ class DeviceConfig: def __init__(self, device: str = "auto") -> None: if device == "auto": # Automated device type detection - if current_platform.is_cuda_alike(): - self.device_type = "cuda" - elif current_platform.is_neuron(): - self.device_type = "neuron" - elif current_platform.is_hpu(): - self.device_type = "hpu" - elif current_platform.is_openvino(): - self.device_type = "openvino" - elif current_platform.is_tpu(): - self.device_type = "tpu" - elif current_platform.is_cpu(): - self.device_type = "cpu" - elif current_platform.is_xpu(): - self.device_type = "xpu" - else: + self.device_type = current_platform.device_type + if self.device_type is None: raise RuntimeError("Failed to infer device type") else: # Device type is assigned explicitly diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index 43cbafe709d84..0c4c916406223 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -19,6 +19,7 @@ class CpuPlatform(Platform): _enum = PlatformEnum.CPU + device_type: str = "cpu" @classmethod def get_device_name(cls, device_id: int = 0) -> str: diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index d2911ef650743..07562a8c3d71e 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -109,6 +109,7 @@ def device_id_to_physical_device_id(device_id: int) -> int: class CudaPlatform(Platform): _enum = PlatformEnum.CUDA + device_type: str = "cuda" @classmethod def get_device_capability(cls, device_id: int = 0) -> DeviceCapability: diff --git a/vllm/platforms/hpu.py b/vllm/platforms/hpu.py index 1e0888a30ba96..36d944b3f24b8 100644 --- a/vllm/platforms/hpu.py +++ b/vllm/platforms/hpu.py @@ -5,6 +5,7 @@ class HpuPlatform(Platform): _enum = PlatformEnum.HPU + device_type: str = "hpu" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index f4849fa2ccfb0..68abec28ad71e 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -56,6 +56,7 @@ def to_int(self) -> int: class Platform: _enum: PlatformEnum + device_type: str def is_cuda(self) -> bool: return self._enum == PlatformEnum.CUDA diff --git a/vllm/platforms/neuron.py b/vllm/platforms/neuron.py index 07d8398eda525..57e3c0dfae84c 100644 --- a/vllm/platforms/neuron.py +++ b/vllm/platforms/neuron.py @@ -3,6 +3,7 @@ class NeuronPlatform(Platform): _enum = PlatformEnum.NEURON + device_type: str = "neuron" @classmethod def get_device_name(cls, device_id: int = 0) -> str: diff --git a/vllm/platforms/openvino.py b/vllm/platforms/openvino.py index ad69ced5417b3..130b8eec1b386 100644 --- a/vllm/platforms/openvino.py +++ b/vllm/platforms/openvino.py @@ -10,6 +10,7 @@ class OpenVinoPlatform(Platform): _enum = PlatformEnum.OPENVINO + device_type: str = "openvino" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index bb3a49c8b73bc..c62241d8bb47b 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -29,6 +29,7 @@ class RocmPlatform(Platform): _enum = PlatformEnum.ROCM + device_type: str = "cuda" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index 2a7ca9fb8c576..863875ef5c2d6 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -16,6 +16,7 @@ class TpuPlatform(Platform): _enum = PlatformEnum.TPU + device_type: str = "tpu" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index c3c4746d3cc25..536e17a5f93e8 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -16,6 +16,7 @@ class XPUPlatform(Platform): _enum = PlatformEnum.XPU + device_type: str = "xpu" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: From 8b0fe06c890a202eba24d517cc77562e4a8b0d0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luka=20Govedi=C4=8D?= Date: Thu, 21 Nov 2024 00:44:57 -0500 Subject: [PATCH 062/122] [torch.compile] Inductor code caching fix (#10273) Signed-off-by: luka Signed-off-by: Luka Govedic --- tests/compile/backend.py | 16 +- tests/compile/test_functionalization.py | 95 +++++++++ tests/compile/test_fusion.py | 11 +- tests/compile/test_pass_manager.py | 35 ++++ vllm/compilation/backends.py | 236 ++-------------------- vllm/compilation/fix_functionalization.py | 177 ++++++++++++++++ vllm/compilation/fusion.py | 13 +- vllm/compilation/inductor_pass.py | 100 ++++++--- vllm/compilation/pass_manager.py | 77 +++++++ vllm/compilation/reshapes.py | 8 +- vllm/compilation/vllm_inductor_pass.py | 53 +++++ vllm/config.py | 60 ++++-- vllm/utils.py | 9 - vllm/v1/worker/gpu_model_runner.py | 2 +- 14 files changed, 604 insertions(+), 288 deletions(-) create mode 100644 tests/compile/test_functionalization.py create mode 100644 tests/compile/test_pass_manager.py create mode 100644 vllm/compilation/fix_functionalization.py create mode 100644 vllm/compilation/pass_manager.py create mode 100644 vllm/compilation/vllm_inductor_pass.py diff --git a/tests/compile/backend.py b/tests/compile/backend.py index 9d5c68274374e..8fa10e5bd1b37 100644 --- a/tests/compile/backend.py +++ b/tests/compile/backend.py @@ -1,7 +1,9 @@ from copy import deepcopy -from typing import Callable +from typing import Callable, Union -import torch +from torch import fx + +from vllm.compilation.inductor_pass import InductorPass class TestBackend: @@ -11,19 +13,21 @@ class TestBackend: It also saves the graph before and after the custom passes for inspection. """ - def __init__(self, *args: Callable[[torch.fx.Graph], None]): - self.custom_passes = args + def __init__(self, *passes: Union[InductorPass, Callable[[fx.Graph], + None]]): + self.custom_passes = list(passes) from torch._inductor import config self.current_config = config.shallow_copy_dict() + self.current_config['force_disable_caches'] = True self.current_config['post_grad_custom_post_pass'] = self.post_pass - def __call__(self, graph: torch.fx.GraphModule, example_inputs): + def __call__(self, graph: fx.GraphModule, example_inputs): from torch._inductor.compile_fx import compile_fx return compile_fx(graph, example_inputs, config_patches=self.current_config) - def post_pass(self, graph: torch.fx.Graph): + def post_pass(self, graph: fx.Graph): self.graph_pre_pass = deepcopy(graph) for pass_ in self.custom_passes: pass_(graph) diff --git a/tests/compile/test_functionalization.py b/tests/compile/test_functionalization.py new file mode 100644 index 0000000000000..5036189077be2 --- /dev/null +++ b/tests/compile/test_functionalization.py @@ -0,0 +1,95 @@ +import pytest +import torch + +import vllm.envs as envs +from vllm import LLM, SamplingParams +from vllm.compilation.fix_functionalization import FixFunctionalizationPass +from vllm.compilation.fusion import (FusionPass, find_auto_fn, + find_auto_fn_maybe) +from vllm.compilation.reshapes import RedundantReshapesPass +from vllm.compilation.vllm_inductor_pass import is_func +from vllm.config import CompilationConfig + +from .backend import TestBackend + +OPS_IN_MODEL = [ + torch.ops._C.rotary_embedding.default, + torch.ops._C.fused_add_rms_norm.default, + torch.ops._C.silu_and_mul.default, +] + +RMS_OP = torch.ops._C.rms_norm.default + +RMS_QUANT_OPS = { + "static_fp8": [ + torch.ops._C.rms_norm_static_fp8_quant.default, + torch.ops._C.fused_add_rms_norm_static_fp8_quant.default + ], +} + +prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", +] + + +@pytest.mark.parametrize("model", + ["nm-testing/TinyLlama-1.1B-Chat-v1.0-FP8-e2e"]) +@pytest.mark.parametrize("do_fusion", [True, False]) +@pytest.mark.skipif(envs.VLLM_TARGET_DEVICE != "cuda", + reason="Only test on CUDA") +def test_fix_functionalization(model: str, do_fusion: bool): + torch.set_default_device("cuda") + + config = CompilationConfig.PassConfig(enable_fusion=do_fusion, + enable_reshape=True) + reshape_pass = RedundantReshapesPass(config) + fusion_pass = FusionPass.instance(config) + + passes = [reshape_pass, fusion_pass] if do_fusion else [reshape_pass] + func_pass = FixFunctionalizationPass(config) + backend_func = TestBackend(*passes, func_pass) + backend_no_func = TestBackend(*passes) + + # instantiate a full engine and manually compile the model 2x + # (with and without FixFunctionalizationPass) + llm = LLM(model=model, enforce_eager=True) + model_runner = llm.llm_engine.model_executor.driver_worker.model_runner + orig_model = model_runner.model + # TODO mark inputs dynamic? (currently torch.compile is triggered 4x) + # Can only do that by using the decorator but then we'd have to instantiate + # 2 LLM instances. + + sampling_params = SamplingParams(temperature=0.0, top_p=1.0) + model_runner.model = torch.compile(orig_model, + fullgraph=True, + backend=backend_func) + gen_func = llm.generate(prompts, sampling_params) + + model_runner.model = torch.compile(orig_model, + fullgraph=True, + backend=backend_no_func) + gen_no_func = llm.generate(prompts, sampling_params) + + for output_func, output_no_func in zip(gen_func, gen_no_func): + assert output_func.outputs[0].text == output_no_func.outputs[0].text + + # OPS_IN_MODEL always appear. RMS_OP is fused away if we run fusion, + # and replaced by fused quantized ops in RMS_QUANT_OPS. + ops = OPS_IN_MODEL + (RMS_QUANT_OPS["static_fp8"] + if do_fusion else [RMS_OP]) + + for op in ops: + find_auto_fn(backend_no_func.graph_post_pass.nodes, op) + assert find_auto_fn_maybe(backend_func.graph_post_pass.nodes, + op) is None # noqa: E501 + + # make sure the ops were all de-functionalized + found = dict() + for node in backend_func.graph_post_pass.nodes: + for op in ops: + if is_func(node, op): + found[op] = True + assert all(found[op] for op in ops) diff --git a/tests/compile/test_fusion.py b/tests/compile/test_fusion.py index 4db79b070fd8d..f92ec8d0de5f1 100644 --- a/tests/compile/test_fusion.py +++ b/tests/compile/test_fusion.py @@ -38,12 +38,6 @@ def forward(self, x): return y3 -# Init does pattern registration, which can only happen once -config = CompilationConfig(enable_fusion=True) -reshape_pass = RedundantReshapesPass(config) -fusion_pass = FusionPass.instance(config) - - @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16]) @pytest.mark.parametrize("hidden_size", [64, 3392, 4096]) @pytest.mark.parametrize("num_tokens", [7, 256, 533, 2048, 2049]) @@ -58,6 +52,11 @@ def test_fusion_rmsnorm_quant(dtype, hidden_size, num_tokens, eps): pytest.skip("Only test eps=1e-5 for now") # Reshape pass is needed for the fusion pass to work + config = CompilationConfig.PassConfig(enable_fusion=True, + enable_reshape=True) + reshape_pass = RedundantReshapesPass(config) + fusion_pass = FusionPass.instance(config) + backend = TestBackend(reshape_pass, fusion_pass) model = TestModel(hidden_size, eps) diff --git a/tests/compile/test_pass_manager.py b/tests/compile/test_pass_manager.py new file mode 100644 index 0000000000000..03e7535093c5d --- /dev/null +++ b/tests/compile/test_pass_manager.py @@ -0,0 +1,35 @@ +import pickle + +import pytest +import torch +from torch._inductor.codecache import BypassFxGraphCache + +from vllm.compilation.config import CompilationConfig +from vllm.compilation.inductor_pass import (CallableInductorPass, + as_inductor_pass) +from vllm.compilation.pass_manager import PostGradPassManager + + +def simple_callable(graph: torch.fx.Graph): + pass + + +@as_inductor_pass(files=(__file__, )) +def callable_decorated(graph: torch.fx.Graph): + pass + + +@pytest.mark.parametrize( + "works, callable", + [(False, simple_callable), (True, callable_decorated), + (True, CallableInductorPass(simple_callable, "simple_callable"))]) +def test_pass_manager(works: bool, callable): + config = CompilationConfig().pass_config + pass_manager = PostGradPassManager([callable]) + pass_manager.configure(config) # Adds default passes + + if works: + pickle.dumps(pass_manager) + else: + with pytest.raises(BypassFxGraphCache): + pickle.dumps(pass_manager) diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index 416cffd326489..464bc2af8fd6d 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -1,6 +1,5 @@ import copy import dataclasses -import operator from contextlib import ExitStack from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple from unittest.mock import patch @@ -11,205 +10,15 @@ import vllm.envs as envs from vllm.config import CompilationConfig from vllm.logger import init_logger -from vllm.utils import combine_fx_passes, weak_ref_tensors +from vllm.utils import weak_ref_tensors from .counter import compilation_counter -from .fusion import FusionPass -from .reshapes import RedundantReshapesPass +from .inductor_pass import InductorPass +from .pass_manager import PostGradPassManager logger = init_logger(__name__) -def fix_functionalization(graph: fx.Graph): - """ - Rewrite the graph module to replace the pattern involving - torch._higher_order_ops.auto_functionalize.auto_functionalized - with a direct call to the inplace custom op. - - # TODO: check if PyTorch nightly has fixed this issue - """ - - # debug code, if we want to see the graph before the transformation - # with open("before.py", "w") as f: - # print(graph.python_code(root_module="self", verbose=True).src, file=f) - - nodes_to_remove = [] - - for node in graph.nodes: - # Identify the auto_functionalized node - if node.op == 'call_function' and node.target == torch._higher_order_ops.auto_functionalize.auto_functionalized: # noqa - if node.args[0] == torch.ops._C.rotary_embedding.default: - # manual replace for rotary_embedding - - # Now, collect the arguments - kwargs = node.kwargs - - query = kwargs['query'] - mm_node = query.args[0].args[0] - - # Create a new call to torch.ops._C.rotary_embedding.default - with graph.inserting_before(node): - # just insert the call to the custom op - # NOTE: don't run dead code elimination, - # otherwise this op will be removed - graph.call_function(torch.ops._C.rotary_embedding.default, - kwargs=kwargs) - - # Remove the auto_functionalized node - # Since the node may have outputs, we need to handle its users - # Replace uses of the outputs (getitem nodes) with mm_node - for user in list(node.users): - if user.op == 'call_function' and user.target == operator.getitem: # noqa - # Remove the getitem node - for getitem_user in list(user.users): - if (getitem_user.op == 'call_function' - and getitem_user.target - == torch.ops.aten.slice_scatter.default): - # Replace the uses of slice_scatter node - # with mm_node - getitem_user.replace_all_uses_with(mm_node) - nodes_to_remove.append(getitem_user) - nodes_to_remove.append(user) - nodes_to_remove.append(node) - - elif node.args[0] == torch.ops._C.fused_add_rms_norm.default: - # manual replace for fused_add_rms_norm - # this is the most effective optimization for llama - # failing to do this will result in many unnecessary copies - - kwargs = node.kwargs - - input = kwargs['input'] - residual = kwargs['residual'] - - # Create a new call to torch.ops._C.rotary_embedding.default - with graph.inserting_before(node): - # just insert the call to the custom op - # NOTE: don't run dead code elimination, - # otherwise this op will be removed - graph.call_function( - torch.ops._C.fused_add_rms_norm.default, kwargs=kwargs) - - for user in list(node.users): - if user.op == 'call_function' and user.target == operator.getitem: # noqa - # Remove the getitem node - if user.args[1] == 1: - replace_node = input - elif user.args[1] == 2: - replace_node = residual - user.replace_all_uses_with(replace_node) - nodes_to_remove.append(user) - nodes_to_remove.append(node) - elif (node.args[0] == - torch.ops._C.fused_add_rms_norm_static_fp8_quant.default): - # manual replace for fused_add_rms_norm_static_fp8_quant - # this is the most effective optimization for llama - # failing to do this will result in many unnecessary copies - - kwargs = node.kwargs - - result = kwargs['result'] - residual = kwargs['residual'] - - # Create a new call to - # torch.ops._C.fused_add_rms_norm_static_fp8_quant.default - with graph.inserting_before(node): - # just insert the call to the custom op - # NOTE: don't run dead code elimination, - # otherwise this op will be removed - graph.call_function( - torch.ops._C.fused_add_rms_norm_static_fp8_quant. - default, - kwargs=kwargs) - - for user in list(node.users): - if user.op == 'call_function' and user.target == operator.getitem: # noqa - # Remove the getitem node - if user.args[1] == 1: - replace_node = result - elif user.args[1] == 2: - replace_node = residual - user.replace_all_uses_with(replace_node) - nodes_to_remove.append(user) - nodes_to_remove.append(node) - - elif node.args[0] == torch.ops._C.rms_norm.default: - # manual replace for rms_norm - - kwargs = node.kwargs - - replace_node = kwargs['result'] - # Create a new call to torch.ops._C.rms_norm.default - with graph.inserting_before(node): - # just insert the call to the custom op - # NOTE: don't run dead code elimination, - # otherwise this op will be removed - graph.call_function(torch.ops._C.rms_norm.default, - kwargs=kwargs) - - for user in list(node.users): - if user.op == 'call_function' and user.target == operator.getitem: # noqa - user.replace_all_uses_with(replace_node) - nodes_to_remove.append(user) - nodes_to_remove.append(node) - - elif node.args[ - 0] == torch.ops._C.rms_norm_static_fp8_quant.default: # noqa - # manual replace for rms_norm_static_fp8_quant - - kwargs = node.kwargs - - replace_node = kwargs['result'] - # Create a new call to torch.ops._C.rms_norm_static_fp8_quant.default # noqa - with graph.inserting_before(node): - # just insert the call to the custom op - # NOTE: don't run dead code elimination, - # otherwise this op will be removed - graph.call_function( - torch.ops._C.rms_norm_static_fp8_quant.default, - kwargs=kwargs) - - for user in list(node.users): - if user.op == 'call_function' and user.target == operator.getitem: # noqa - user.replace_all_uses_with(replace_node) - nodes_to_remove.append(user) - nodes_to_remove.append(node) - - elif node.args[0] == torch.ops._C.silu_and_mul.default: - # manual replace for silu_and_mul - - kwargs = node.kwargs - - input = kwargs['input'] - out = kwargs['out'] - - # Create a new call to torch.ops._C.silu_and_mul.default - # cannot use kwargs, because we have an `out`, see https://github.com/pytorch/pytorch/blob/a00faf440888ffb724bad413f329a49e2b6388e7/torch/_inductor/lowering.py#L351 # noqa - with graph.inserting_before(node): - # just insert the call to the custom op - # NOTE: don't run dead code elimination, - # otherwise this op will be removed - graph.call_function( - torch.ops._C.silu_and_mul.default, - args=(out, input), - ) - replace_node = out - - for user in list(node.users): - if user.op == 'call_function' and user.target == operator.getitem: # noqa - user.replace_all_uses_with(replace_node) - nodes_to_remove.append(user) - nodes_to_remove.append(node) - - # Remove the nodes all at once - for node in nodes_to_remove: - graph.erase_node(node) - - # debug code, if we want to see the graph after the transformation - # with open("after.py", "w") as f: - # print(graph.python_code(root_module="self", verbose=True).src, file=f) - - def wrap_inductor(graph, example_inputs, additional_inductor_config, @@ -368,12 +177,8 @@ class VllmBackend: The major work of this backend is to split the graph into piecewise graphs, and pass them to the piecewise backend. - This backend also handles custom passes and adds them to Inductor config. - The order of the post-grad post-passes is: - 1. post_grad_passes (constructor parameter) - 2. config["post_grad_custom_post_pass"] - 3. fix_functionalization - This way, all passes operate on a functionalized graph. + This backend also adds the PostGradPassManager to Inductor config, + which handles the post-grad passes. """ compilation_configs: CompilationConfig @@ -402,7 +207,9 @@ def __init__( # streams, it might not be safe to share a global pool. # only investigate this when we use multiple streams self.graph_pool = global_graph_pool - self.post_grad_passes = [] + + # Passes to run on the graph post-grad. + self.post_grad_pass_manager = PostGradPassManager() self.sym_tensor_indices = [] self.input_buffers = [] @@ -412,24 +219,19 @@ def __init__( # `torch.compile` is JIT compiled, so we don't need to # do anything here - def add_passes_to_config(self): + def configure_post_pass(self): config = self.compilation_configs - passes = list(self.post_grad_passes) - - passes = passes + [RedundantReshapesPass(config)] - - if config.enable_fusion: - passes = passes + [FusionPass.instance(config)] + self.post_grad_pass_manager.configure(config.pass_config) + # Post-grad custom passes are run using the post_grad_custom_post_pass + # hook. If a pass for that hook exists, add it to the pass manager. inductor_config = config.inductor_compile_config - if "post_grad_custom_post_pass" in inductor_config: - passes = passes + [inductor_config["post_grad_custom_post_pass"]] - - # add the fix_functionalization pass last, so that all other - # passes operate on a functionalized graph - passes = passes + [fix_functionalization] - combined_pass = combine_fx_passes(passes) - inductor_config["post_grad_custom_post_pass"] = combined_pass + PASS_KEY = "post_grad_custom_post_pass" + if PASS_KEY in inductor_config: + # Config should automatically wrap all inductor passes + assert isinstance(inductor_config[PASS_KEY], InductorPass) + self.post_grad_pass_manager.add(inductor_config[PASS_KEY]) + inductor_config[PASS_KEY] = self.post_grad_pass_manager def __call__(self, graph: fx.GraphModule, example_inputs) -> Callable: @@ -444,7 +246,7 @@ def __call__(self, graph: fx.GraphModule, example_inputs) -> Callable: # we get the sizes to capture for cudagraph # from compilation context self.compilation_configs.init_during_runtime() - self.add_passes_to_config() + self.configure_post_pass() self.split_gm, self.piecewise_graphs = split_graph( graph, self.compilation_configs.splitting_ops) diff --git a/vllm/compilation/fix_functionalization.py b/vllm/compilation/fix_functionalization.py new file mode 100644 index 0000000000000..3584cc3608caf --- /dev/null +++ b/vllm/compilation/fix_functionalization.py @@ -0,0 +1,177 @@ +import operator +from typing import Dict, Iterable, List, Optional, Tuple, Union + +import torch +from torch._higher_order_ops.auto_functionalize import auto_functionalized + +from vllm.logger import init_logger + +from .vllm_inductor_pass import VllmInductorPass, is_func + +logger = init_logger(__name__) + + +class FixFunctionalizationPass(VllmInductorPass): + """ + This pass defunctionalizes certain nodes to avoid redundant tensor copies. + After this pass, DCE (dead-code elimination) should never be run, + as de-functionalized nodes may appear as dead code. + + To add new nodes to defunctionalize, add to the if-elif chain in __call__. + """ + + def __call__(self, graph: torch.fx.Graph): + self.begin() + self.dump_graph(graph, "before_fix_functionalization") + + self.nodes_to_remove: List[torch.fx.Node] = [] + count = 0 + for node in graph.nodes: + if not is_func(node, auto_functionalized): + continue # Avoid deep if-elif nesting + + kwargs = node.kwargs + at_target = node.args[0] + + if at_target == torch.ops._C.rotary_embedding.default: + query = kwargs['query'] + mm_node = query.args[0].args[0] + + # rotary_embedding is a special case: the two mutating inputs + # are query and key, which are slices of mm_node. + # While functionalized, results at[1] and at[2] are scattered + # back into mm_node. After de-functionalization, we can just + # use mm_node directly. + for idx, user in self.getitem_users(node).items(): + for user_of_getitem in user.users: + if is_func(user_of_getitem, + torch.ops.aten.slice_scatter.default): + user_of_getitem.replace_all_uses_with(mm_node) + self._remove(user_of_getitem) + self._remove(user) + + self.insert_defunctionalized(graph, node) + self._remove(node) + + # These 2 replacements avoid the most copies for LLaMa. + elif at_target == torch.ops._C.fused_add_rms_norm.default: + mutated_args = {1: 'input', 2: 'residual'} + self.defunctionalize(graph, node, mutated_args) + elif at_target == torch.ops._C.fused_add_rms_norm_static_fp8_quant.default: # noqa: E501 + mutated_args = {1: 'result', 2: 'residual'} + self.defunctionalize(graph, node, mutated_args) + + elif at_target in [ + torch.ops._C.rms_norm.default, + torch.ops._C.rms_norm_static_fp8_quant.default + ]: + mutated_args = {1: 'result'} + self.defunctionalize(graph, node, mutated_args) + + elif at_target == torch.ops._C.silu_and_mul.default: + mutated_args = {1: 'out'} + # Because we have an 'out', need to specify args directly + self.defunctionalize(graph, + node, + mutated_args, + args=('out', 'input')) + else: + continue # skip the count + + count += 1 + + self.dump_graph(graph, "before_fix_functionalization_cleanup") + + # Remove the nodes all at once + count_removed = len(self.nodes_to_remove) + for node in self.nodes_to_remove: + graph.erase_node(node) + + logger.debug("De-functionalized %s nodes, removed %s nodes", count, + count_removed) + self.dump_graph(graph, "after_fix_functionalization") + self.end_and_log() + + def _remove(self, node_or_nodes: Union[torch.fx.Node, + Iterable[torch.fx.Node]]): + """ + Stage a node (or nodes) for removal at the end of the pass. + """ + if isinstance(node_or_nodes, torch.fx.Node): + self.nodes_to_remove.append(node_or_nodes) + else: + self.nodes_to_remove.extend(node_or_nodes) + + def defunctionalize(self, + graph: torch.fx.Graph, + node: torch.fx.Node, + mutated_args: Dict[int, Union[torch.fx.Node, str]], + args: Optional[Tuple[Union[torch.fx.Node, str], + ...]] = None): + """ + De-functionalize a node by replacing it with a call to the original. + It also replaces the getitem users with the mutated arguments. + See replace_users_with_mutated_args and insert_defunctionalized. + """ + self.replace_users_with_mutated_args(node, mutated_args) + self.insert_defunctionalized(graph, node, args=args) + self._remove(node) + + def replace_users_with_mutated_args(self, node: torch.fx.Node, + mutated_args: Dict[int, + Union[torch.fx.Node, + str]]): + """ + Replace all getitem users of the auto-functionalized node with the + mutated arguments. + :param node: The auto-functionalized node + :param mutated_args: The mutated arguments, indexed by getitem index. + If the value of an arg is a string, `node.kwargs[arg]` is used. + """ + for idx, user in self.getitem_users(node).items(): + arg = mutated_args[idx] + arg = node.kwargs[arg] if isinstance(arg, str) else arg + user.replace_all_uses_with(arg) + self._remove(user) + + def getitem_users(self, node: torch.fx.Node) -> Dict[int, torch.fx.Node]: + """ + Returns the operator.getitem users of the auto-functionalized node, + indexed by the index they are getting. + """ + users = {} + for user in node.users: + if is_func(user, operator.getitem): + idx = user.args[1] + users[idx] = user + return users + + def insert_defunctionalized(self, + graph: torch.fx.Graph, + node: torch.fx.Node, + args: Optional[Tuple[Union[torch.fx.Node, str], + ...]] = None): + """ + Insert a new defunctionalized node into the graph before node. + If one of the kwargs is 'out', provide args directly, + as node.kwargs cannot be used. + See https://github.com/pytorch/pytorch/blob/a00faf440888ffb724bad413f329a49e2b6388e7/torch/_inductor/lowering.py#L351 + + :param graph: Graph to insert the defunctionalized node into + :param node: The auto-functionalized node to defunctionalize + :param args: If we cannot use kwargs, specify args directly. + If an arg is a string, `node.kwargs[arg]` is used. + """ # noqa: E501 + assert is_func(node, auto_functionalized), \ + f"node must be auto-functionalized, is {node} instead" + + # Create a new call to the original function + with graph.inserting_before(node): + function = node.args[0] + if args is None: + graph.call_function(function, kwargs=node.kwargs) + else: + # Args passed as strings refer to items in node.kwargs + args = tuple(node.kwargs[arg] if isinstance(arg, str) else arg + for arg in args) + graph.call_function(function, args=args) diff --git a/vllm/compilation/fusion.py b/vllm/compilation/fusion.py index e6a3afef85e1b..5efa410fab6a0 100644 --- a/vllm/compilation/fusion.py +++ b/vllm/compilation/fusion.py @@ -6,10 +6,11 @@ from torch._inductor.pattern_matcher import (Match, PatternMatcherPass, fwd_only, register_replacement) -from vllm.compilation.inductor_pass import InductorPass from vllm.config import CompilationConfig from vllm.logger import init_logger +from .vllm_inductor_pass import VllmInductorPass, is_func + logger = init_logger(__name__) @@ -90,8 +91,6 @@ def empty_fp32(*args, **kwargs): # Utilities for post-processing multi-output matches -def is_func(node: torch.fx.Node, target) -> bool: - return node.op == "call_function" and node.target == target # Returns the first auto_functionalized node with the given op (if it exists) @@ -127,7 +126,7 @@ def find_getitem(node: torch.fx.Node, idx: int) -> torch.fx.Node: return ret -class FusionPass(InductorPass): +class FusionPass(VllmInductorPass): """ This pass fuses a pre-defined set of custom ops into fused ops. It uses the torch pattern matcher to find the patterns and replace them. @@ -142,7 +141,7 @@ class FusionPass(InductorPass): _instance: 'Optional[FusionPass]' = None @classmethod - def instance(cls, config: CompilationConfig): + def instance(cls, config: CompilationConfig.PassConfig): """ Get the singleton instance of the FusionPass. If the instance exists, the config is updated but @@ -154,7 +153,7 @@ def instance(cls, config: CompilationConfig): cls._instance.config = config return cls._instance - def __init__(self, config: CompilationConfig): + def __init__(self, config: CompilationConfig.PassConfig): assert self.__class__._instance is None, \ "FusionPass singleton instance already exists" super().__init__(config) @@ -278,6 +277,7 @@ def process_matches(self, graph: torch.fx.Graph): for node in match.nodes) def __call__(self, graph: torch.fx.Graph): + self.begin() self.dump_graph(graph, "before_fusion") count = self.patterns.apply(graph) @@ -289,3 +289,4 @@ def __call__(self, graph: torch.fx.Graph): logger.debug("Post-processed %s matches", len(self.matches)) self.dump_graph(graph, "after_fusion") self.matches.clear() + self.end_and_log() diff --git a/vllm/compilation/inductor_pass.py b/vllm/compilation/inductor_pass.py index 8082a08b40019..f6846c08ac841 100644 --- a/vllm/compilation/inductor_pass.py +++ b/vllm/compilation/inductor_pass.py @@ -1,38 +1,84 @@ +import hashlib +import inspect +import types from abc import ABC, abstractmethod +from typing import Any, Callable, Optional, Union import torch - -from vllm.config import CompilationConfig -# yapf: disable -from vllm.distributed import get_tensor_model_parallel_rank as get_tp_rank -from vllm.distributed import ( - get_tensor_model_parallel_world_size as get_tp_world_size) -from vllm.distributed import model_parallel_is_initialized as p_is_init -# yapf: enable -from vllm.logger import init_logger - -logger = init_logger(__name__) +from torch import fx class InductorPass(ABC): + """ + General custom inductor pass interface. + TODO(torch==2.6) use torch._inductor.custom_graph_pass.CustomGraphPass + """ @abstractmethod def __call__(self, graph: torch.fx.Graph): + """ + Execute the pass on the given graph. + """ raise NotImplementedError - def __init__(self, config: CompilationConfig): - self.config = config - - def dump_graph(self, graph: torch.fx.Graph, stage: str): - if stage in self.config.dump_graph_stages: - # Make sure filename includes rank in the distributed setting - parallel = p_is_init() and get_tp_world_size() > 1 - rank = f"-{get_tp_rank()}" if parallel else "" - filepath = self.config.dump_graph_dir / f"{stage}{rank}.py" - - logger.info("Printing graph to %s", filepath) - with open(filepath, "w") as f: - src = graph.python_code(root_module="self", verbose=True).src - # Add imports so it's not full of errors - print("import torch; from torch import device", file=f) - print(src, file=f) + def uuid(self) -> Any: + """ + Provide a unique identifier for the pass, used in Inductor code cache. + This should depend on the pass implementation, so that changes to the + pass result in recompilation. + By default, the object source is hashed. + """ + return InductorPass.hash_source(self) + + @staticmethod + def hash_source(*srcs: Union[str, Any]): + """ + Utility method to hash the sources of functions or objects. + :param srcs: strings or objects to add to the hash. + Objects and functions have their source inspected. + :return: + """ + hasher = hashlib.sha256() + for src in srcs: + if isinstance(src, str): + src_str = src + elif isinstance(src, types.FunctionType): + src_str = inspect.getsource(src) + else: + src_str = inspect.getsource(src.__class__) + hasher.update(src_str.encode("utf-8")) + return hasher.digest() + + +class CallableInductorPass(InductorPass): + """ + This class is a wrapper for a callable that automatically provides an + implementation of the UUID. + """ + + def __init__(self, + callable: Callable[[fx.Graph], None], + uuid: Optional[Any] = None): + self.callable = callable + if uuid is None: + uuid = InductorPass.hash_source(callable) + self._uuid = uuid + + def __call__(self, graph: torch.fx.Graph): + self.callable(graph) + + def uuid(self) -> Any: + return self._uuid + + def __getstate__(self): + """ + Pickling occurs in the Inductor code cache if a pass is not given to + the pass manager but is instead directly added to config as a pass. + See PostGradPassManager for more. + + TODO(torch==2.6), use the `uuid` method in CustomGraphPass instead. + """ + return self._uuid + + def __setstate__(self, state): + raise ValueError("Cannot unpickle CallableInductorPass") diff --git a/vllm/compilation/pass_manager.py b/vllm/compilation/pass_manager.py new file mode 100644 index 0000000000000..fb522ae053e97 --- /dev/null +++ b/vllm/compilation/pass_manager.py @@ -0,0 +1,77 @@ +from typing import List + +from torch import fx as fx + +from vllm.config import CompilationConfig +from vllm.logger import init_logger + +from .fix_functionalization import FixFunctionalizationPass +from .fusion import FusionPass +from .inductor_pass import InductorPass +from .reshapes import RedundantReshapesPass + +logger = init_logger(__name__) + + +class PostGradPassManager: + """ + The pass manager for post-grad passes. + It handles configuration, adding custom passes, and running passes. + It also supports pickling, which is used by the Inductor code cache. + TODO(torch==2.6), use CustomGraphPass + (torch._inductor.custom_graph_pass.CustomGraphPass) + + The order of the post-grad post-passes is: + 1. passes (constructor parameter) + 2. default passes (RedundantReshapesPass, FusionPass) + 3. config["post_grad_custom_post_pass"] (if it exists) + 4. fix_functionalization + This way, all passes operate on a functionalized graph. + """ + + def __init__(self): + self.passes: List[InductorPass] = [] + + def __call__(self, graph: fx.Graph): + for pass_ in self.passes: + pass_(graph) + + # always run fix_functionalization last + self.fix_functionalization(graph) + + def configure(self, pass_config: CompilationConfig.PassConfig): + self.pass_config = pass_config + if pass_config.enable_reshape: + self.passes += [RedundantReshapesPass(pass_config)] + + if pass_config.enable_fusion: + self.passes += [FusionPass.instance(pass_config)] + + self.fix_functionalization = FixFunctionalizationPass(pass_config) + + def add(self, pass_: InductorPass): + assert isinstance(pass_, InductorPass) + self.passes.append(pass_) + + def __getstate__(self): + """ + Custom pickling for the pass manager, as some passes cannot be pickled. + Pickling occurs because the pass manager is set as the value of + `config["post_grad_custom_post_pass"]` in the Inductor config. + The config is pickled to act as a key in the Inductor code cache. + Any other passes in the config are pickled as well. + + TODO(torch==2.6), use the `uuid` method in CustomGraphPass instead. + """ + state = {"pass_config": self.pass_config.uuid(), "passes": []} + for pass_ in self.passes: + state["passes"].append(pass_.uuid()) + state["passes"].append(self.fix_functionalization.uuid()) + return state + + def __setstate__(self, state): + """ + Do not allow unpickling of the pass manager. + If this is needed in the future, it should properly pickle the passes. + """ + raise ValueError("Cannot unpickle PostGradPassManager") diff --git a/vllm/compilation/reshapes.py b/vllm/compilation/reshapes.py index 36597e119d2e1..63a369fe8d966 100644 --- a/vllm/compilation/reshapes.py +++ b/vllm/compilation/reshapes.py @@ -3,14 +3,14 @@ import torch.fx from torch import SymInt -from vllm.compilation.fusion import is_func -from vllm.compilation.inductor_pass import InductorPass from vllm.logger import init_logger +from .vllm_inductor_pass import VllmInductorPass, is_func + logger = init_logger(__name__) -class RedundantReshapesPass(InductorPass): +class RedundantReshapesPass(VllmInductorPass): """ This is an inductor pass that removes redundant reshape operations. It is required for RMSNorm-quant fusion to work properly. @@ -31,6 +31,7 @@ class RedundantReshapesPass(InductorPass): """ def __call__(self, graph: torch.fx.Graph): + self.begin() self.dump_graph(graph, "before_reshapes") count = 0 # Remove no-op reshapes/views: @@ -56,6 +57,7 @@ def __call__(self, graph: torch.fx.Graph): logger.debug("Removed %s no-op reshapes", count) self.dump_graph(graph, "after_reshapes") + self.end_and_log() def dims_equivalent(self, dim: Union[int, torch.fx.Node], i_dim: Union[int, SymInt]) -> bool: diff --git a/vllm/compilation/vllm_inductor_pass.py b/vllm/compilation/vllm_inductor_pass.py new file mode 100644 index 0000000000000..dbf6b8f7789e1 --- /dev/null +++ b/vllm/compilation/vllm_inductor_pass.py @@ -0,0 +1,53 @@ +import time + +import torch + +from vllm.config import CompilationConfig +# yapf: disable +from vllm.distributed import get_tensor_model_parallel_rank as get_tp_rank +from vllm.distributed import ( + get_tensor_model_parallel_world_size as get_tp_world_size) +from vllm.distributed import model_parallel_is_initialized as p_is_init +# yapf: enable +from vllm.logger import init_logger + +from .inductor_pass import InductorPass + +logger = init_logger(__name__) + + +def is_func(node: torch.fx.Node, target) -> bool: + return node.op == "call_function" and node.target == target + + +class VllmInductorPass(InductorPass): + """ + An inductor pass with access to vLLM PassConfig. + It provides timing, logging, and dumping utilities. + """ + + def __init__(self, config: CompilationConfig.PassConfig): + self.config = config + self.pass_name = self.__class__.__name__ + + def dump_graph(self, graph: torch.fx.Graph, stage: str): + if stage in self.config.dump_graph_stages: + # Make sure filename includes rank in the distributed setting + parallel = p_is_init() and get_tp_world_size() > 1 + rank = f"-{get_tp_rank()}" if parallel else "" + filepath = self.config.dump_graph_dir / f"{stage}{rank}.py" + + logger.info("%s printing graph to %s", self.pass_name, filepath) + with open(filepath, "w") as f: + src = graph.python_code(root_module="self", verbose=True).src + # Add imports so it's not full of errors + print("import torch; from torch import device", file=f) + print(src, file=f) + + def begin(self): + self._start_time = time.perf_counter_ns() + + def end_and_log(self): + self._end_time = time.perf_counter_ns() + duration_ms = float(self._end_time - self._start_time) / 1.0e6 + logger.debug("%s completed in %.1f ms", self.pass_name, duration_ms) diff --git a/vllm/config.py b/vllm/config.py index 0ed92f370cf50..b2785e1ce2d5f 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1,5 +1,6 @@ import copy import enum +import hashlib import json import warnings from dataclasses import dataclass, field, replace @@ -13,6 +14,7 @@ from transformers import PretrainedConfig import vllm.envs as envs +from vllm.compilation.inductor_pass import CallableInductorPass, InductorPass from vllm.logger import init_logger from vllm.model_executor.layers.quantization import (QUANTIZATION_METHODS, get_quantization_config) @@ -2120,12 +2122,7 @@ class CompilationConfig(BaseModel): name because the config uses json format. If we pass the config from Python, functions can also be passed directly via Python object constructor, e.g. `CompilationConfig(inductor_passes={"a": func})` - - custom inductor passes: - - dump_graph_stages: list of stages for which we want to dump the graph. - Each pass defines its own stages (before, after, maybe in-between). - - dump_graph_dir: directory to dump the graph. Default is . - - enable_fusion: whether to enable the custom fusion pass. - TODO better pass enabling system. + - custom inductor passes: see PassConfig for more details Why we have different sizes for cudagraph and inductor: - cudagraph: a cudagraph captured for a specific size can only be used @@ -2157,9 +2154,43 @@ class CompilationConfig(BaseModel): cudagraph_capture_sizes: Optional[List[int]] = None cudagraph_copy_inputs: bool = False - dump_graph_stages: List[str] = Field(default_factory=list) - dump_graph_dir: Path = Field(default=Path(".")) - enable_fusion: bool = True + class PassConfig(BaseModel): + """ + Configuration for custom Inductor passes. + This is separate from general CompilationConfig so that inductor passes + don't all have access to full configuration - that would create a cycle + as the PassManager is set as a property of config. + - dump_graph_stages: list of stages for which we want to dump the graph. + Each pass defines its own stages (before, after, maybe in-between). + - dump_graph_dir: directory to dump the graphs. Default is . + - enable_fusion: whether to enable the custom fusion pass. + - enable_reshape: whether to enable the custom reshape elimination pass. + TODO better pass enabling system. + """ + dump_graph_stages: List[str] = Field(default_factory=list) + dump_graph_dir: Path = Field(default=Path(".")) + enable_fusion: bool = True + enable_reshape: bool = True + + def uuid(self): + """ + Produces a hash unique to the pass configuration. + Any new fields that affect compilation should be added to the hash. + Do not include dump_graph_* in the hash - they don't affect + compilation. + """ + dict_ = self.model_dump( + include={"enable_fusion", "enable_reshape"}) + encoded = json.dumps(dict_, sort_keys=True).encode("utf-8") + return hashlib.sha256(encoded).digest() + + def model_post_init(self, __context: Any) -> None: + if not self.enable_reshape and self.enable_fusion: + print_warning_once( + "Fusion enabled but reshape elimination disabled." + "RMSNorm + quant (fp8) fusion might not work") + + pass_config: PassConfig = Field(default_factory=PassConfig) # not configurable, computed after init compile_sizes: List[int] = PrivateAttr @@ -2185,8 +2216,9 @@ def model_post_init(self, __context: Any) -> None: for k, v in self.inductor_passes.items(): if not isinstance(v, str): assert callable(v), ( - f"pass {k} should be a function or a qualified name") - self.inductor_compile_config[k] = v + f"pass {k} should be callable or a qualified name") + self.inductor_compile_config[k] = v if isinstance( + v, InductorPass) else CallableInductorPass(v) continue # resolve function from qualified name @@ -2194,7 +2226,8 @@ def model_post_init(self, __context: Any) -> None: module = ".".join(names[:-1]) func_name = names[-1] func = __import__(module).__dict__[func_name] - self.inductor_compile_config[k] = func + self.inductor_compile_config[k] = func if isinstance( + func, InductorPass) else CallableInductorPass(func) self.enabled_custom_ops = Counter() self.disabled_custom_ops = Counter() @@ -2344,7 +2377,8 @@ def __post_init__(self): self.compilation_config.custom_ops = ["none"] self.compilation_config.use_cudagraph = True self.compilation_config.use_inductor = True - self.compilation_config.enable_fusion = False + self.compilation_config.pass_config.enable_fusion = False + self.compilation_config.pass_config.enable_reshape = False current_platform.check_and_update_config(self) diff --git a/vllm/utils.py b/vllm/utils.py index 2bbdc8d1ebde8..cb2ad43a2ae8d 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -1501,15 +1501,6 @@ def __len__(self): return len(self._factory) -def combine_fx_passes(passes: List[Callable]) -> Callable: - - def combined_fx(graph) -> None: - for fx in passes: - fx(graph) - - return combined_fx - - def weak_ref_tensor(tensor: torch.Tensor) -> torch.Tensor: """ Create a weak reference to a tensor. diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 1f9b544637bf7..5f66293cbe8e4 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -548,7 +548,7 @@ def capture_model(self) -> None: if not self.use_cuda_graph: logger.warning( "Skipping CUDA graph capture. Please add " - "-O 3 to use CUDA graphs.", CompilationLevel.PIECEWISE) + "-O %s to use CUDA graphs.", CompilationLevel.PIECEWISE) return start_time = time.perf_counter() From 3430857b641131ffabf215ab569c41696b57b953 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 21 Nov 2024 15:06:42 +0800 Subject: [PATCH 063/122] [Misc] Increase default video fetch timeout (#10495) Signed-off-by: DarkLight1337 --- vllm/envs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/envs.py b/vllm/envs.py index 853c49bc4dbc1..14c1617f1be19 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -49,7 +49,7 @@ VLLM_WORKER_MULTIPROC_METHOD: str = "fork" VLLM_ASSETS_CACHE: str = os.path.join(VLLM_CACHE_ROOT, "assets") VLLM_IMAGE_FETCH_TIMEOUT: int = 5 - VLLM_VIDEO_FETCH_TIMEOUT: int = 15 + VLLM_VIDEO_FETCH_TIMEOUT: int = 30 VLLM_AUDIO_FETCH_TIMEOUT: int = 10 VLLM_TARGET_DEVICE: str = "cuda" MAX_JOBS: Optional[str] = None From aaddce5d268d2c82d49b0240d6c112ba4941f69e Mon Sep 17 00:00:00 2001 From: youkaichao Date: Wed, 20 Nov 2024 23:07:56 -0800 Subject: [PATCH 064/122] [platforms] improve error message for unspecified platforms (#10520) Signed-off-by: youkaichao --- vllm/config.py | 3 ++- vllm/platforms/interface.py | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/vllm/config.py b/vllm/config.py index b2785e1ce2d5f..ed09f8ae31863 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1191,12 +1191,13 @@ def is_multi_step(self) -> bool: class DeviceConfig: device: Optional[torch.device] + device_type: str def __init__(self, device: str = "auto") -> None: if device == "auto": # Automated device type detection self.device_type = current_platform.device_type - if self.device_type is None: + if not self.device_type: raise RuntimeError("Failed to infer device type") else: # Device type is assigned explicitly diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index 68abec28ad71e..07f23167d509a 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -170,3 +170,4 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: class UnspecifiedPlatform(Platform): _enum = PlatformEnum.UNSPECIFIED + device_type = "" From f0e02380169b99a20cc5a4cd1848bbe085b50d5c Mon Sep 17 00:00:00 2001 From: Zhong Qishuai Date: Thu, 21 Nov 2024 17:05:23 +0800 Subject: [PATCH 065/122] [Doc] fix a small typo in docstring of llama_tool_parser (#10513) --- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py b/vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py index a5f44d69e5fd2..1856308b88cfa 100644 --- a/vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +++ b/vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py @@ -29,7 +29,8 @@ class Llama3JsonToolParser(ToolParser): Tool call parser for Llama 3.1 models intended for use with the examples/tool_chat_template_llama.jinja template. - Used when --enable-auto-tool-choice --tool-call-parser mistral are all set + Used when --enable-auto-tool-choice --tool-call-parser llama3_json + are all set """ def __init__(self, tokenizer: PreTrainedTokenizerBase): From 1cfde82ffd6edfca6029a7e312c848386ea322c1 Mon Sep 17 00:00:00 2001 From: Alex Brooks Date: Thu, 21 Nov 2024 03:46:20 -0700 Subject: [PATCH 066/122] [Model] Add Support for Multimodal Granite Models (#10291) Signed-off-by: Alex-Brooks Co-authored-by: Cyrus Leung --- vllm/model_executor/models/clip.py | 47 ++++++++++++++++++------ vllm/model_executor/models/llava.py | 45 +++++++++++++++++++---- vllm/model_executor/models/llava_next.py | 20 +++++++++- vllm/model_executor/models/pixtral.py | 28 ++++++++++++-- vllm/model_executor/models/siglip.py | 42 ++++++++++++++++----- vllm/multimodal/utils.py | 44 ++++++++++++++++++++++ 6 files changed, 191 insertions(+), 35 deletions(-) diff --git a/vllm/model_executor/models/clip.py b/vllm/model_executor/models/clip.py index 7f638506f9fb2..cd89519e95986 100644 --- a/vllm/model_executor/models/clip.py +++ b/vllm/model_executor/models/clip.py @@ -21,7 +21,8 @@ from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.multimodal.utils import (cached_get_tokenizer, consecutive_placeholder_ranges, - repeat_and_pad_placeholder_tokens) + repeat_and_pad_placeholder_tokens, + resolve_visual_encoder_outputs) from vllm.sequence import SequenceData from .utils import get_vit_attn_backend @@ -389,12 +390,20 @@ def __init__( for layer_idx in range(num_hidden_layers) ]) - def forward(self, inputs_embeds: torch.Tensor): - + def forward( + self, inputs_embeds: torch.Tensor, return_all_hidden_states: bool + ) -> Union[torch.Tensor, list[torch.Tensor]]: + hidden_states_pool = [] hidden_states = inputs_embeds + for encoder_layer in self.layers: hidden_states = encoder_layer(hidden_states) - + if return_all_hidden_states: + hidden_states_pool.append(hidden_states) + # If we have multiple feature sample layers, we return all hidden + # states in order and grab the ones we need by index. + if return_all_hidden_states: + return hidden_states_pool return hidden_states @@ -419,6 +428,7 @@ def __init__( # NOTE: This typo of "layrnorm" is not fixed on purpose to match # the original transformers code and name of the model weights. self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + self.encoder = CLIPEncoder( config=config, quant_config=quant_config, @@ -446,16 +456,26 @@ def __init__( def forward( self, pixel_values: torch.Tensor, + feature_sample_layers: Optional[list[int]] = None, ) -> torch.Tensor: hidden_states = self.embeddings(pixel_values) hidden_states = self.pre_layrnorm(hidden_states) - hidden_states = self.encoder(inputs_embeds=hidden_states) - if self.post_layernorm is None: - return hidden_states + return_all_hidden_states = feature_sample_layers is not None + + # Produces either the last layer output or all of the hidden states, + # depending on if we have feature_sample_layers or not + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + return_all_hidden_states=return_all_hidden_states) + + # Handle post-norm (if applicable) and stacks feature layers if needed + encoder_outputs = resolve_visual_encoder_outputs( + encoder_outputs, feature_sample_layers, self.post_layernorm, + self.config.num_hidden_layers) - return self.post_layernorm(hidden_states) + return encoder_outputs class CLIPVisionModel(nn.Module): @@ -478,11 +498,14 @@ def __init__( quant_config=quant_config, num_hidden_layers_override=num_hidden_layers_override, require_post_norm=require_post_norm, - prefix=f"{prefix}.vision_model", - ) + prefix=f"{prefix}.vision_model") - def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: - return self.vision_model(pixel_values) + def forward( + self, + pixel_values: torch.Tensor, + feature_sample_layers: Optional[list[int]] = None, + ) -> torch.Tensor: + return self.vision_model(pixel_values, feature_sample_layers) @property def device(self): diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index e7d3161a7cb2d..05c6cc62efcd7 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -204,7 +204,41 @@ def input_processor_for_llava(ctx: InputContext, inputs: DecoderOnlyInputs): class LlavaLikeConfig(Protocol): vision_config: PretrainedConfig - vision_feature_layer: int + vision_feature_layer: Union[int, List[int]] + + +def _get_num_hidden_layers(hf_config: LlavaLikeConfig) -> int: + """Determine the number of hidden layers to initialize up to in the + visual encoder. + + Args: + hf_config: Model config with vision feature layer(s). + """ + feature_layers = hf_config.vision_feature_layer + num_hidden_layers = hf_config.vision_config.num_hidden_layers + # If we have one feature layer, initialize up to that layer + if isinstance(feature_layers, int): + return _get_layer_index(feature_layers, num_hidden_layers) + # If we have multiple feature layers, initialize up to the deepest one + elif isinstance(feature_layers, (list, tuple)): + return max( + _get_layer_index(idx, num_hidden_layers) for idx in feature_layers) + raise TypeError(f"vision_layer_feature type: {type(feature_layers)}" + " is not supported") + + +def _get_layer_index(feature_layer_index: int, num_hidden_layers: int) -> int: + """Given an signed vision feature layer, get the number of hidden layers + needed to leverage it. + + Args: + feature_layer_index: Index of a required layer in the visual encoder. + num_hidden_layers: The total number of hidden layers in the visual + encoder. + """ + if feature_layer_index < 0: + return num_hidden_layers + feature_layer_index + 1 + return feature_layer_index + 1 def init_vision_tower_for_llava( @@ -216,13 +250,8 @@ def init_vision_tower_for_llava( ): vision_config = hf_config.vision_config - # Initialize the vision tower only up to the required feature layer - vision_feature_layer = hf_config.vision_feature_layer - if vision_feature_layer < 0: - num_hidden_layers = hf_config.vision_config.num_hidden_layers \ - + vision_feature_layer + 1 - else: - num_hidden_layers = vision_feature_layer + 1 + # Initialize the vision tower only up to the deepest required feature layer + num_hidden_layers = _get_num_hidden_layers(hf_config) if isinstance(vision_config, CLIPVisionConfig): return CLIPVisionModel( diff --git a/vllm/model_executor/models/llava_next.py b/vllm/model_executor/models/llava_next.py index 37e2227a52dcd..abeebb45fc4a7 100644 --- a/vllm/model_executor/models/llava_next.py +++ b/vllm/model_executor/models/llava_next.py @@ -288,6 +288,21 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: pooler_config = vllm_config.model_config.pooler_config multimodal_config = vllm_config.model_config.multimodal_config + vision_feature_layer = config.vision_feature_layer + # Determine the layer up to which we will initialize the vision tower + if isinstance(vision_feature_layer, int): + vision_hidden_size = config.vision_config.hidden_size + self.feature_sample_layers = None + # Used for multimodal granite models to control encoder outputs + elif isinstance(vision_feature_layer, (list, tuple)): + vision_hidden_size = config.vision_config.hidden_size * len( + vision_feature_layer) + self.feature_sample_layers = vision_feature_layer + else: + raise TypeError( + f"vision_layer_feature type: {type(vision_feature_layer)}" + " is not supported") + self.config = config self.multimodal_config = multimodal_config @@ -300,7 +315,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: self.image_newline = nn.Parameter( torch.empty(config.text_config.hidden_size)) self.multi_modal_projector = LlavaMultiModalProjector( - vision_hidden_size=config.vision_config.hidden_size, + vision_hidden_size=vision_hidden_size, text_hidden_size=config.text_config.hidden_size, projector_hidden_act=config.projector_hidden_act) @@ -419,7 +434,8 @@ def _image_pixels_to_features( # NOTE: we skip the step to select the vision feature layer since # this is already done inside the vision tower - image_features = vision_tower(pixel_values) + image_features = vision_tower( + pixel_values, feature_sample_layers=self.feature_sample_layers) return self._select_image_features( image_features, diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index d14b89d6b3f85..6711cbf5694b9 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -33,7 +33,8 @@ from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs from vllm.multimodal.inputs import PlaceholderRange from vllm.multimodal.utils import (cached_get_tokenizer, - consecutive_placeholder_ranges) + consecutive_placeholder_ranges, + resolve_visual_encoder_outputs) from vllm.sequence import IntermediateTensors, SequenceData from vllm.transformers_utils.processor import cached_get_processor from vllm.utils import is_list_of @@ -970,9 +971,18 @@ def forward( x: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: torch.Tensor, + return_all_hidden_states: bool, ) -> torch.Tensor: + hidden_states_pool = [] + for layer in self.layers: x = layer(x, attention_mask, position_embeddings) + if return_all_hidden_states: + hidden_states_pool.append(x) + # If we have multiple feature sample layers, we return all hidden + # states in order and grab the ones we need by index. + if return_all_hidden_states: + return hidden_states_pool return x @@ -990,6 +1000,7 @@ def __init__( super().__init__() self.config = config + self.patch_conv = nn.Conv2d( in_channels=config.num_channels, out_channels=config.hidden_size, @@ -1024,6 +1035,7 @@ def __init__( def forward( self, pixel_values: List[torch.Tensor], + feature_sample_layers: Optional[list[int]] = None, ) -> torch.Tensor: """ Args: @@ -1031,6 +1043,9 @@ def forward( in pixel_values. This means it will be a list of tensors because multiple requests batched can have multiple images, each with their own shape potentially + feature_sample_layers: Layer indices whose features should be + concatenated and used as the visual encoder output. If none + are provided, the last layer is used. Returns: image_features: tensor of token features for @@ -1065,8 +1080,15 @@ def forward( [p.shape[-2] * p.shape[-1] for p in patch_embeds_list], patch_embeds) - out = self.transformer(patch_embeds, attention_mask, - position_embedding) + return_all_hidden_states = feature_sample_layers is not None + out = self.transformer( + patch_embeds, + attention_mask, + position_embedding, + return_all_hidden_states=return_all_hidden_states) + + out = resolve_visual_encoder_outputs(out, feature_sample_layers, None, + self.config.num_hidden_layers) return out diff --git a/vllm/model_executor/models/siglip.py b/vllm/model_executor/models/siglip.py index c58ad99692900..deaed0ba7e4ce 100644 --- a/vllm/model_executor/models/siglip.py +++ b/vllm/model_executor/models/siglip.py @@ -25,7 +25,8 @@ from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.multimodal.utils import (cached_get_tokenizer, consecutive_placeholder_ranges, - repeat_and_pad_placeholder_tokens) + repeat_and_pad_placeholder_tokens, + resolve_visual_encoder_outputs) from vllm.sequence import SequenceData from .utils import get_vit_attn_backend @@ -450,11 +451,19 @@ def __init__( def forward( self, inputs_embeds: torch.Tensor, - ) -> torch.Tensor: + return_all_hidden_states: bool, + ) -> Union[torch.Tensor, list[torch.Tensor]]: + hidden_states_pool = [] hidden_states = inputs_embeds + for encoder_layer in self.layers: hidden_states, _ = encoder_layer(hidden_states) - + if return_all_hidden_states: + hidden_states_pool.append(hidden_states) + # If we have multiple feature sample layers, we return all hidden + # states in order and grab the ones we need by index. + if return_all_hidden_states: + return hidden_states_pool return hidden_states @@ -509,6 +518,7 @@ def __init__( embed_dim = config.hidden_size self.embeddings = SiglipVisionEmbeddings(config) + self.encoder = SiglipEncoder( config, quant_config=quant_config, @@ -546,23 +556,33 @@ def forward( self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = True, + feature_sample_layers: Optional[list[int]] = None, ) -> torch.Tensor: + hidden_states = self.embeddings( pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, ) - encoder_outputs = self.encoder(inputs_embeds=hidden_states) + return_all_hidden_states = feature_sample_layers is not None + + # Produces either the last layer output or all of the hidden states, + # depending on if we have feature_sample_layers or not + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + return_all_hidden_states=return_all_hidden_states, + ) - if self.post_layernorm is None: - return encoder_outputs + # Handle post-norm (if applicable) and stacks feature layers if needed + encoder_outputs = resolve_visual_encoder_outputs( + encoder_outputs, feature_sample_layers, self.post_layernorm, + self.config.num_hidden_layers) - last_hidden_state = self.post_layernorm(encoder_outputs) - # TODO: add this back when pooled_output is used in inference + # TODO: add this back when pooled_output is used in inference. # if self.use_head: - # pooled_output = self.head(last_hidden_state) + # pooled_output = self.head(encoder_outputs) - return last_hidden_state + return encoder_outputs class SiglipVisionModel(nn.Module): @@ -595,10 +615,12 @@ def forward( self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False, + feature_sample_layers: Optional[list[int]] = None, ) -> torch.Tensor: return self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, + feature_sample_layers=feature_sample_layers, ) def load_weights(self, weights: Iterable[Tuple[str, diff --git a/vllm/multimodal/utils.py b/vllm/multimodal/utils.py index 40194716bbf94..d4333b7519b47 100644 --- a/vllm/multimodal/utils.py +++ b/vllm/multimodal/utils.py @@ -6,6 +6,7 @@ import numpy as np import numpy.typing as npt +import torch from PIL import Image import vllm.envs as envs @@ -392,6 +393,49 @@ def encode_video_base64(frames: npt.NDArray): return ",".join(base64_frames) +def resolve_visual_encoder_outputs( + encoder_outputs: Union[torch.Tensor, list[torch.Tensor]], + feature_sample_layers: Optional[list[int]], + post_layer_norm: Optional[torch.nn.LayerNorm], + max_possible_layers: int, +) -> torch.Tensor: + """Given the outputs a visual encoder module that may correspond to the + output of the last layer, or a list of hidden states to be stacked, + handle post normalization and resolve it into a single output tensor. + + Args: + encoder_outputs: Output of encoder's last layer or all hidden states. + feature_sample_layers: Optional layer indices to grab from the encoder + outputs; if provided, encoder outputs must be a list. + post_layer_norm: Post norm to apply to the output of the encoder. + max_possible_layers: Total layers in the fully loaded visual encoder. + + """ + if feature_sample_layers is None: + if post_layer_norm is not None: + return post_layer_norm(encoder_outputs) + return encoder_outputs + + # Get the hidden states corresponding to the layer indices. + # Negative values are relative to the full visual encoder, + # so offset them depending on how many layers were loaded. + # NOTE: this assumes that encoder_outputs contains a list + # of hidden states in the same order as the encoder layers + # that produced them. + offset = max_possible_layers - len(encoder_outputs) + hs_pool = [ + encoder_outputs[layer_idx] + if layer_idx >= 0 else encoder_outputs[layer_idx + offset] + for layer_idx in feature_sample_layers + ] + + # Apply post-norm on the final hidden state if we are using it + uses_last_layer = feature_sample_layers[-1] in (len(hs_pool) - 1, -1) + if post_layer_norm is not None and uses_last_layer: + hs_pool[-1] = post_layer_norm(encoder_outputs) + return torch.cat(hs_pool, dim=-1) + + # Utilities for input processors _T = TypeVar("_T", str, int) From 8a93a598d9ac265882e55432e7aef55c8bff23f4 Mon Sep 17 00:00:00 2001 From: "Wang, Yi" Date: Thu, 21 Nov 2024 19:15:36 +0800 Subject: [PATCH 067/122] fix the issue that len(tokenizer(prompt)["input_ids"]) > prompt_len (#10524) Signed-off-by: Wang, Yi A --- benchmarks/backend_request_func.py | 1 + 1 file changed, 1 insertion(+) diff --git a/benchmarks/backend_request_func.py b/benchmarks/backend_request_func.py index 25c8b1bbf3e22..c3fed56e8a956 100644 --- a/benchmarks/backend_request_func.py +++ b/benchmarks/backend_request_func.py @@ -54,6 +54,7 @@ async def async_request_tgi( "do_sample": True, "temperature": 0.01, # TGI does not accept 0.0 temperature. "top_p": 0.99, # TGI does not accept 1.0 top_p. + "truncate": request_func_input.prompt_len, # TGI does not accept ignore_eos flag. } payload = { From d5ec121f95f51184acce4e2c27ad8fc01904d3d9 Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Thu, 21 Nov 2024 22:20:08 +0800 Subject: [PATCH 068/122] [Model] Expose `dynamic_image_size` as mm_processor_kwargs for InternVL2 models (#10518) Signed-off-by: Isotr0py <2037008807@qq.com> --- .../mm_processor_kwargs/test_internvl.py | 206 ++++++++++++++++++ vllm/model_executor/models/internvl.py | 63 ++++-- 2 files changed, 255 insertions(+), 14 deletions(-) create mode 100644 tests/models/decoder_only/vision_language/mm_processor_kwargs/test_internvl.py diff --git a/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_internvl.py b/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_internvl.py new file mode 100644 index 0000000000000..af0c2aa211998 --- /dev/null +++ b/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_internvl.py @@ -0,0 +1,206 @@ +"""Tests for InternVL's multimodal preprocessing kwargs.""" +from typing import Callable, Optional + +import pytest +from transformers import AutoTokenizer + +from vllm.inputs import InputContext, token_inputs +from vllm.multimodal import MultiModalRegistry + +from .....conftest import _ImageAssets +from ....utils import build_model_context + +models = ["OpenGVLab/InternVL2-2B"] + + +# Wrap lazy imports to avoid initializing CUDA during test collection +@pytest.fixture() +def input_processor_for_internvl(): + from vllm.model_executor.models.internvl import InternVLInputPipeline + + pipeline = InternVLInputPipeline('', '', '') + return pipeline.input_processor + + +@pytest.fixture() +def dummy_data_for_internvl(): + from vllm.model_executor.models.internvl import InternVLInputPipeline + + pipeline = InternVLInputPipeline('', '', '') + return pipeline.dummy_data + + +@pytest.fixture() +def get_max_internvl_image_tokens(): + from vllm.model_executor.models.internvl import ( + get_max_internvl_image_tokens) + return get_max_internvl_image_tokens + + +@pytest.mark.parametrize("model", models) +@pytest.mark.parametrize("max_dynamic_patch", [1, 4]) +@pytest.mark.parametrize("dynamic_image_size", [True, False, None]) +def test_input_mapper_override( + model: str, + image_assets: _ImageAssets, + max_dynamic_patch: int, + dynamic_image_size: Optional[bool], +): + mm_processor_kwargs = { + "max_dynamic_patch": max_dynamic_patch, + } + if dynamic_image_size is not None: + mm_processor_kwargs["dynamic_image_size"] = dynamic_image_size + + expected_num_patches = max_dynamic_patch + 1 if max_dynamic_patch > 1 else 1 + if dynamic_image_size is False: + expected_num_patches = 1 + + ctx = build_model_context( + model_name=model, + tokenizer_name=model, + trust_remote_code=True, + mm_processor_kwargs=mm_processor_kwargs, + ) + + mm_registry = MultiModalRegistry() + mm_registry.init_mm_limits_per_prompt(ctx.model_config) + + image = image_assets[0].pil_image.resize((448 * 2, 448 * 2)) + vllm_result = mm_registry.map_input( + ctx.model_config, + {"image": image}, + ) + assert vllm_result["pixel_values"].size(1) == expected_num_patches + + +@pytest.mark.parametrize("model", models) +@pytest.mark.parametrize("max_dynamic_patch", [1, 4, None]) +@pytest.mark.parametrize("dynamic_image_size", [True, False, None]) +def test_max_tokens_override( + get_max_internvl_image_tokens: Callable, + model: str, + max_dynamic_patch: Optional[int], + dynamic_image_size: Optional[bool], +): + """Ensure get_max_internvl_image_tokens handles mm_processor_kwargs.""" + ctx = build_model_context( + model_name=model, + tokenizer_name=model, + trust_remote_code=True, + mm_processor_kwargs=None, + ) + + if max_dynamic_patch is None: + max_dynamic_patch = ctx.get_hf_config().max_dynamic_patch + expected_num_patches = max_dynamic_patch + 1 if max_dynamic_patch > 1 else 1 + if dynamic_image_size is False: + expected_num_patches = 1 + expected_max_tokens = 256 * expected_num_patches + + actual_max_tokens = get_max_internvl_image_tokens( + ctx=InputContext(ctx.model_config), + max_dynamic_patch=max_dynamic_patch, + dynamic_image_size=dynamic_image_size, + ) + assert expected_max_tokens == actual_max_tokens + + +@pytest.mark.parametrize("model", models) +@pytest.mark.parametrize("num_imgs", [1, 2]) +@pytest.mark.parametrize("max_dynamic_patch", [1, 4, None]) +@pytest.mark.parametrize("dynamic_image_size", [True, False, None]) +def test_dummy_data_override( + dummy_data_for_internvl: Callable, + model: str, + num_imgs: int, + max_dynamic_patch: Optional[int], + dynamic_image_size: Optional[bool], +): + """Ensure dummy_data_for_internvl handles kwargs properly.""" + # Same as the previous test - don't initialize mm_processor_kwargs + # in this test and assume that the kwargs will be correctly expanded by + # the partial when calling the dummy data func. + ctx = build_model_context( + model_name=model, + tokenizer_name=model, + trust_remote_code=True, + mm_processor_kwargs=None, + ) + + if max_dynamic_patch is None: + max_dynamic_patch = ctx.get_hf_config().max_dynamic_patch + expected_num_patches = max_dynamic_patch + 1 if max_dynamic_patch > 1 else 1 + if dynamic_image_size is False: + expected_num_patches = 1 + expected_max_tokens = 256 * expected_num_patches + + dummy_data = dummy_data_for_internvl( + ctx=ctx, + seq_len=8192, # Should be bigger than num_imgs * toks_per_img + mm_counts={"image": num_imgs}, + max_dynamic_patch=max_dynamic_patch, + dynamic_image_size=dynamic_image_size, + ) + sequence_data = dummy_data.seq_data + + tokenizer = AutoTokenizer.from_pretrained(model, trust_remote_code=True) + image_token_id = tokenizer.encode('', + add_special_tokens=False)[0] + + # Ensure we have the right number of placeholders per size + img_tok_count = sequence_data.get_token_ids().count(image_token_id) + assert img_tok_count == expected_max_tokens * num_imgs + + +@pytest.mark.parametrize("model", models) +@pytest.mark.parametrize("max_dynamic_patch", [1, 4]) +@pytest.mark.parametrize("dynamic_image_size", [True, False, None]) +@pytest.mark.parametrize("num_imgs", [1, 2]) +def test_input_processor_override( + input_processor_for_internvl: Callable, + image_assets: _ImageAssets, + model: str, + num_imgs: int, + max_dynamic_patch: int, + dynamic_image_size: Optional[bool], +): + """Ensure input_processor_for_internvl handles kwargs properly.""" + # Same as the previous test - don't initialize mm_processor_kwargs + # in this test and assume that the kwargs will be correctly expanded by + # the partial when calling the custom input processor. + expected_num_patches = max_dynamic_patch + 1 if max_dynamic_patch > 1 else 1 + if dynamic_image_size is False: + expected_num_patches = 1 + + ctx = build_model_context( + model_name=model, + tokenizer_name=model, + trust_remote_code=True, + mm_processor_kwargs=None, + ) + expected_toks_per_img = 256 * expected_num_patches + + # Build the image str / prompt based on the number of images we pass + tokenizer = AutoTokenizer.from_pretrained(model, trust_remote_code=True) + placeholders = "" if num_imgs == 1 else "\n".join( + f"Image-{i}: \n" for i in range(1, num_imgs + 1)) + prompt = placeholders + images = [image_assets[0].pil_image.resize((448 * 2, 448 * 2))] * num_imgs + + inputs = token_inputs(prompt_token_ids=tokenizer.encode(prompt), + prompt=prompt, + multi_modal_data={"image": images}) + + processed_inputs = input_processor_for_internvl( + ctx, + inputs, + max_dynamic_patch=max_dynamic_patch, + dynamic_image_size=dynamic_image_size, + ) + + # Ensure we have the right number of placeholders per num_crops size + image_token_id = tokenizer.encode('', + add_special_tokens=False)[0] + img_tok_count = processed_inputs["prompt_token_ids"].count(image_token_id) + assert img_tok_count == expected_toks_per_img * num_imgs diff --git a/vllm/model_executor/models/internvl.py b/vllm/model_executor/models/internvl.py index 5d38b4b1ef14b..47ac00b6afe9b 100644 --- a/vllm/model_executor/models/internvl.py +++ b/vllm/model_executor/models/internvl.py @@ -123,8 +123,15 @@ def calculate_num_blocks(orig_width: int, orig_height: int, min_num: int, return blocks, target_width, target_height -def calculate_num_blocks_wrapper(hf_config: PretrainedConfig, - max_dynamic_patch: Optional[int] = None): +def calculate_num_blocks_wrapper( + hf_config: PretrainedConfig, + max_dynamic_patch: Optional[int] = None, + dynamic_image_size: Optional[bool] = None, +): + if dynamic_image_size is None: + dynamic_image_size = hf_config.dynamic_image_size + + max_dynamic_patch = max_dynamic_patch if dynamic_image_size else 1 if max_dynamic_patch is None: max_dynamic_patch = hf_config.max_dynamic_patch min_num = hf_config.min_dynamic_patch @@ -183,10 +190,17 @@ def image_to_pixel_values(image: Image.Image, input_size: int, min_num: int, return pixel_values -def image_to_pixel_values_wrapper(hf_config: PretrainedConfig, - max_dynamic_patch: Optional[int] = None): +def image_to_pixel_values_wrapper( + hf_config: PretrainedConfig, + max_dynamic_patch: Optional[int] = None, + dynamic_image_size: Optional[bool] = None, +): image_size = hf_config.vision_config.image_size min_num = hf_config.min_dynamic_patch + if dynamic_image_size is None: + dynamic_image_size = hf_config.dynamic_image_size + + max_dynamic_patch = max_dynamic_patch if dynamic_image_size else 1 if max_dynamic_patch is None: max_dynamic_patch = hf_config.max_dynamic_patch use_thumbnail = hf_config.use_thumbnail @@ -207,11 +221,17 @@ def get_internvl_num_patches(hf_config: PretrainedConfig): (downsample_ratio**2)) -def get_max_internvl_image_tokens(ctx: InputContext, - *, - max_dynamic_patch: Optional[int] = None): +def get_max_internvl_image_tokens( + ctx: InputContext, + *, + max_dynamic_patch: Optional[int] = None, + dynamic_image_size: Optional[bool] = None, +): hf_config = ctx.get_hf_config() + if dynamic_image_size is None: + dynamic_image_size = hf_config.dynamic_image_size + max_dynamic_patch = max_dynamic_patch if dynamic_image_size else 1 if max_dynamic_patch is None: max_dynamic_patch = hf_config.max_dynamic_patch use_thumbnail = hf_config.use_thumbnail @@ -222,12 +242,18 @@ def get_max_internvl_image_tokens(ctx: InputContext, return num_patches * max_dynamic_patch -def get_max_internvl_image_size(ctx: InputContext, - *, - max_dynamic_patch: Optional[int] = None): +def get_max_internvl_image_size( + ctx: InputContext, + *, + max_dynamic_patch: Optional[int] = None, + dynamic_image_size: Optional[bool] = None, +): hf_config = ctx.get_hf_config() image_size = hf_config.vision_config.image_size + if dynamic_image_size is None: + dynamic_image_size = hf_config.dynamic_image_size + max_dynamic_patch = max_dynamic_patch if dynamic_image_size else 1 if max_dynamic_patch is None: max_dynamic_patch = hf_config.max_dynamic_patch use_thumbnail = hf_config.use_thumbnail @@ -281,6 +307,7 @@ def input_processor( inputs: DecoderOnlyInputs, *, max_dynamic_patch: Optional[int] = None, + dynamic_image_size: Optional[bool] = None, ) -> DecoderOnlyInputs: multi_modal_data = inputs.get("multi_modal_data") if multi_modal_data is None or "image" not in multi_modal_data: @@ -292,7 +319,7 @@ def input_processor( image_data = multi_modal_data["image"] num_patches = get_internvl_num_patches(hf_config) num_blocks_calculator = calculate_num_blocks_wrapper( - hf_config, max_dynamic_patch) + hf_config, max_dynamic_patch, dynamic_image_size) if isinstance(image_data, Image.Image): width, height = image_data.size num_blocks, _, _ = num_blocks_calculator(width, height) @@ -332,11 +359,12 @@ def input_mapper( data: object, *, max_dynamic_patch: Optional[int] = None, + dynamic_image_size: Optional[bool] = None, ): hf_config = ctx.get_hf_config() image_pixel_values_mapper = image_to_pixel_values_wrapper( - hf_config, max_dynamic_patch) + hf_config, max_dynamic_patch, dynamic_image_size) if isinstance(data, Image.Image): data = image_pixel_values_mapper(data) # Add an N dimension for number of images per prompt (currently 1). @@ -366,13 +394,17 @@ def dummy_data( mm_counts: Mapping[str, int], *, max_dynamic_patch: Optional[int] = None, + dynamic_image_size: Optional[bool] = None, ): num_images = mm_counts["image"] hf_config = ctx.get_hf_config() image_feature_size = get_max_internvl_image_tokens( - ctx, max_dynamic_patch=max_dynamic_patch) + ctx, + max_dynamic_patch=max_dynamic_patch, + dynamic_image_size=dynamic_image_size, + ) model_config = ctx.model_config tokenizer = cached_get_tokenizer( model_config.tokenizer, @@ -388,7 +420,10 @@ def dummy_data( ) max_image_width, max_image_height = get_max_internvl_image_size( - ctx, max_dynamic_patch=max_dynamic_patch) + ctx, + max_dynamic_patch=max_dynamic_patch, + dynamic_image_size=dynamic_image_size, + ) mm_data = dummy_image_for_clip( hf_config.vision_config, From 4d676f085295d92a9248c4944433b4ade52a8ff3 Mon Sep 17 00:00:00 2001 From: Xiaoyu Zhang <35585791+BBuf@users.noreply.github.com> Date: Thu, 21 Nov 2024 22:40:02 +0800 Subject: [PATCH 069/122] [Bugfix] Embedding model pooling_type equals ALL and multi input's bug (#10494) --- vllm/model_executor/layers/pooler.py | 29 ++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/vllm/model_executor/layers/pooler.py b/vllm/model_executor/layers/pooler.py index bfe2d7d0f382e..df1978241340b 100644 --- a/vllm/model_executor/layers/pooler.py +++ b/vllm/model_executor/layers/pooler.py @@ -94,14 +94,10 @@ def forward( pooled_data = hidden_states[last_token_flat_indices] elif self.pooling_type == PoolingType.ALL: offset = 0 - pooled_data_lst = [] + pooled_data = [] for prompt_len in prompt_lens: - pooled_data_i = hidden_states[offset:offset + prompt_len] - - pooled_data_lst.append(pooled_data_i) + pooled_data.append(hidden_states[offset:offset + prompt_len]) offset += prompt_len - - pooled_data = torch.stack(pooled_data_lst) elif self.pooling_type == PoolingType.MEAN: # Calculate mean pooling cumsum = torch.cumsum(hidden_states, dim=0) @@ -121,7 +117,7 @@ def forward( step_tag_id = self.step_tag_id offset = 0 - pooled_data_lst = [] + pooled_data = [] for prompt_len, seq_data_i in zip( prompt_lens, pooling_metadata.seq_data.values()): pooled_data_i = hidden_states[offset:offset + prompt_len] @@ -130,17 +126,26 @@ def forward( pooled_data_i = pooled_data_i[token_ids == step_tag_id] offset += prompt_len - pooled_data_lst.append(pooled_data_i) - - pooled_data = torch.stack(pooled_data_lst) + pooled_data.append(pooled_data_i) else: raise ValueError(f"Invalid pooling type: {self.pooling_type}") if self.normalize: - pooled_data = nn.functional.normalize(pooled_data, p=2, dim=1) + if isinstance(pooled_data, list): + pooled_data = [ + nn.functional.normalize(data, p=2, dim=1) + for data in pooled_data + ] + else: + pooled_data = nn.functional.normalize(pooled_data, p=2, dim=1) if self.softmax: - pooled_data = nn.functional.softmax(pooled_data, dim=-1) + if isinstance(pooled_data, list): + pooled_data = [ + nn.functional.softmax(data, dim=-1) for data in pooled_data + ] + else: + pooled_data = nn.functional.softmax(pooled_data, dim=-1) pooled_outputs = [ EmbeddingSequenceGroupOutput(data.tolist()) for data in pooled_data From da7e702c6fae521bf8633affb8fe7b834f5cb94b Mon Sep 17 00:00:00 2001 From: Chauncey Date: Fri, 22 Nov 2024 00:24:32 +0800 Subject: [PATCH 070/122] [Bug]: When apply continue_final_message for OpenAI server, the "echo":false is ignored (#10180) Signed-off-by: chaunceyjiang --- tests/entrypoints/openai/test_chat_echo.py | 79 ++++++++++++++++++++++ vllm/entrypoints/openai/serving_chat.py | 4 +- 2 files changed, 81 insertions(+), 2 deletions(-) create mode 100644 tests/entrypoints/openai/test_chat_echo.py diff --git a/tests/entrypoints/openai/test_chat_echo.py b/tests/entrypoints/openai/test_chat_echo.py new file mode 100644 index 0000000000000..223ac5b41aa83 --- /dev/null +++ b/tests/entrypoints/openai/test_chat_echo.py @@ -0,0 +1,79 @@ +from typing import NamedTuple + +import openai # use the official client for correctness check +import pytest +import pytest_asyncio + +from ...utils import RemoteOpenAIServer + +# # any model with a chat template should work here +MODEL_NAME = "Qwen/Qwen2-1.5B-Instruct" +DUMMY_CHAT_TEMPLATE = """{% for message in messages %}{{message['role'] + ': ' + message['content'] + '\\n'}}{% endfor %}""" # noqa: E501 + + +@pytest.fixture(scope="module") +def server(): + args = [ + # use half precision for speed and memory savings in CI environment + "--dtype", + "float16", + "--enforce-eager", + "--max-model-len", + "4080", + "--chat-template", + DUMMY_CHAT_TEMPLATE, + ] + + with RemoteOpenAIServer(MODEL_NAME, args) as remote_server: + yield remote_server + + +@pytest_asyncio.fixture +async def client(server): + async with server.get_async_client() as async_client: + yield async_client + + +class TestCase(NamedTuple): + model_name: str + echo: bool + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "test_case", + [ + TestCase(model_name=MODEL_NAME, echo=True), + TestCase(model_name=MODEL_NAME, echo=False) + ], +) +async def test_chat_session_with_echo_and_continue_final_message( + client: openai.AsyncOpenAI, test_case: TestCase): + saying: str = "Here is a common saying about apple. An apple a day, keeps" + # test echo with continue_final_message parameter + chat_completion = await client.chat.completions.create( + model=test_case.model_name, + messages=[{ + "role": "user", + "content": "tell me a common saying" + }, { + "role": "assistant", + "content": saying + }], + extra_body={ + "echo": test_case.echo, + "continue_final_message": True, + "add_generation_prompt": False + }) + assert chat_completion.id is not None + assert len(chat_completion.choices) == 1 + + choice = chat_completion.choices[0] + assert choice.finish_reason == "stop" + + message = choice.message + if test_case.echo: + assert message.content is not None and saying in message.content + else: + assert message.content is not None and saying not in message.content + assert message.role == "assistant" diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 2eef909eb9319..54ca0463bcab1 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -361,7 +361,7 @@ async def chat_completion_stream_generator( # Send response to echo the input portion of the # last message - if request.echo or request.continue_final_message: + if request.echo: last_msg_content: Union[str, List[Dict[str, str]]] = "" if conversation and "content" in conversation[ -1] and conversation[-1].get("role") == role: @@ -706,7 +706,7 @@ async def chat_completion_full_generator( stop_reason=output.stop_reason) choices.append(choice_data) - if request.echo or request.continue_final_message: + if request.echo: last_msg_content: Union[str, List[Dict[str, str]]] = "" if conversation and "content" in conversation[-1] and conversation[ -1].get("role") == role: From 2385b60d8300ce730ae67d9ea945f06de9ec4e21 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Fri, 22 Nov 2024 01:18:11 +0800 Subject: [PATCH 071/122] [Kernel] Register punica ops directly (#10522) Signed-off-by: Jee Jee Li --- tests/lora/test_punica_variation.py | 23 ++++++++++++++++------ vllm/lora/ops/bgmv_expand.py | 23 +++++++++++++++++++--- vllm/lora/ops/bgmv_expand_slice.py | 25 +++++++++++++++++++++--- vllm/lora/ops/bgmv_shrink.py | 23 +++++++++++++++++++--- vllm/lora/ops/sgmv_expand.py | 29 +++++++++++++++++++++++++--- vllm/lora/ops/sgmv_expand_slice.py | 30 ++++++++++++++++++++++++++--- vllm/lora/ops/sgmv_shrink.py | 28 ++++++++++++++++++++++++--- 7 files changed, 157 insertions(+), 24 deletions(-) diff --git a/tests/lora/test_punica_variation.py b/tests/lora/test_punica_variation.py index 52b82f25d23e1..3b20033271d26 100644 --- a/tests/lora/test_punica_variation.py +++ b/tests/lora/test_punica_variation.py @@ -6,12 +6,13 @@ import pytest import torch -from vllm.lora.ops.bgmv_expand import bgmv_expand -from vllm.lora.ops.bgmv_expand_slice import bgmv_expand_slice -from vllm.lora.ops.bgmv_shrink import bgmv_shrink -from vllm.lora.ops.sgmv_expand import sgmv_expand -from vllm.lora.ops.sgmv_expand_slice import sgmv_expand_slice -from vllm.lora.ops.sgmv_shrink import sgmv_shrink +# Enable custom op register +import vllm.lora.ops.bgmv_expand +import vllm.lora.ops.bgmv_expand_slice +import vllm.lora.ops.bgmv_shrink +import vllm.lora.ops.sgmv_expand +import vllm.lora.ops.sgmv_expand_slice +import vllm.lora.ops.sgmv_shrink # noqa: F401 from vllm.platforms import current_platform from .utils import (generate_data, generate_data_for_expand_nslices, @@ -37,6 +38,16 @@ def assert_close(a, b): torch.testing.assert_close(a, b, rtol=rtol, atol=atol) +# Unlike test_punica_sizes.py, we directly utilize custom op for +# testing, which verifies the correct registration of these ops. +bgmv_expand = torch.ops.vllm.bgmv_expand +bgmv_expand_slice = torch.ops.vllm.bgmv_expand_slice +bgmv_shrink = torch.ops.vllm.bgmv_shrink +sgmv_expand = torch.ops.vllm.sgmv_expand +sgmv_expand_slice = torch.ops.vllm.sgmv_expand_slice +sgmv_shrink = torch.ops.vllm.sgmv_shrink + + @pytest.mark.parametrize("batches", BATCHES) @pytest.mark.parametrize("num_loras", NUM_LORA) @pytest.mark.parametrize("rank", MAX_RANKS) diff --git a/vllm/lora/ops/bgmv_expand.py b/vllm/lora/ops/bgmv_expand.py index f176259fddc78..42adb191b8ead 100644 --- a/vllm/lora/ops/bgmv_expand.py +++ b/vllm/lora/ops/bgmv_expand.py @@ -9,6 +9,8 @@ import triton import triton.language as tl +from vllm.utils import direct_register_custom_op + from .utils import get_lora_op_configs @@ -162,9 +164,24 @@ def _bgmv_expand( return +def bgmv_expand_fake( + inputs: torch.Tensor, + lora_b_weights: torch.Tensor, + output_tensor: torch.Tensor, + lora_indices_tensor: torch.Tensor, + add_inputs: bool = True, +) -> None: + return + + try: - bgmv_expand = torch.library.custom_op("lora::bgmv_expand", - _bgmv_expand, - mutates_args=["output_tensor"]) + direct_register_custom_op( + op_name="bgmv_expand", + op_func=_bgmv_expand, + mutates_args=["output_tensor"], + fake_impl=bgmv_expand_fake, + ) + bgmv_expand = torch.ops.vllm.bgmv_expand + except AttributeError: bgmv_expand = _bgmv_expand diff --git a/vllm/lora/ops/bgmv_expand_slice.py b/vllm/lora/ops/bgmv_expand_slice.py index 2c6ed96c253f0..f397d752a3ea9 100644 --- a/vllm/lora/ops/bgmv_expand_slice.py +++ b/vllm/lora/ops/bgmv_expand_slice.py @@ -9,6 +9,8 @@ import triton import triton.language as tl +from vllm.utils import direct_register_custom_op + from .utils import get_lora_op_configs @@ -179,9 +181,26 @@ def _bgmv_expand_slice( return +def bgmv_expand_slice_fake( + inputs: torch.Tensor, + lora_b_weights: torch.Tensor, + output_tensor: torch.Tensor, + lora_indices_tensor: torch.Tensor, + slice_offset: int, + slice_size: int, + add_inputs: bool = True, +) -> None: + return + + try: - bgmv_expand_slice = torch.library.custom_op("lora::bgmv_expand_slice", - _bgmv_expand_slice, - mutates_args=["output_tensor"]) + direct_register_custom_op( + op_name="bgmv_expand_slice", + op_func=_bgmv_expand_slice, + mutates_args=["output_tensor"], + fake_impl=bgmv_expand_slice_fake, + ) + bgmv_expand_slice = torch.ops.vllm.bgmv_expand_slice + except AttributeError: bgmv_expand_slice = _bgmv_expand_slice diff --git a/vllm/lora/ops/bgmv_shrink.py b/vllm/lora/ops/bgmv_shrink.py index 0846ff36b1692..f3ef01d39e776 100644 --- a/vllm/lora/ops/bgmv_shrink.py +++ b/vllm/lora/ops/bgmv_shrink.py @@ -9,6 +9,8 @@ import triton import triton.language as tl +from vllm.utils import direct_register_custom_op + from .utils import get_lora_op_configs @@ -142,9 +144,24 @@ def _bgmv_shrink( return +def bgmv_shrink_fake( + inputs: torch.Tensor, + lora_a_weights: torch.Tensor, + output_tensor: torch.Tensor, + lora_indices_tensor: torch.Tensor, + scaling: float = 1.0, +) -> None: + return + + try: - bgmv_shrink = torch.library.custom_op("lora::bgmv_shrink", - _bgmv_shrink, - mutates_args=["output_tensor"]) + direct_register_custom_op( + op_name="bgmv_shrink", + op_func=_bgmv_shrink, + mutates_args=["output_tensor"], + fake_impl=bgmv_shrink_fake, + ) + bgmv_shrink = torch.ops.vllm.bgmv_shrink + except AttributeError: bgmv_shrink = _bgmv_shrink diff --git a/vllm/lora/ops/sgmv_expand.py b/vllm/lora/ops/sgmv_expand.py index ee2cd2e05e2ee..77c5178493c44 100644 --- a/vllm/lora/ops/sgmv_expand.py +++ b/vllm/lora/ops/sgmv_expand.py @@ -9,6 +9,8 @@ import triton import triton.language as tl +from vllm.utils import direct_register_custom_op + @triton.jit def _sgmv_expand_kernel( @@ -196,9 +198,30 @@ def _sgmv_expand( return +def sgmv_expand_fake( + inputs: torch.Tensor, + lora_b_weights: torch.Tensor, + output_tensor: torch.Tensor, + b_seq_start_loc: torch.Tensor, + seq_len_tensor: torch.Tensor, + lora_indices_tensor: torch.Tensor, + batches: int, + max_seq_length: int, + token_nums: int, + add_inputs: bool = False, +) -> None: + return + + try: - sgmv_expand = torch.library.custom_op("lora::sgmv_expand", - _sgmv_expand, - mutates_args=["output_tensor"]) + + direct_register_custom_op( + op_name="sgmv_expand", + op_func=_sgmv_expand, + mutates_args=["output_tensor"], + fake_impl=sgmv_expand_fake, + ) + sgmv_expand = torch.ops.vllm.sgmv_expand + except AttributeError: sgmv_expand = _sgmv_expand diff --git a/vllm/lora/ops/sgmv_expand_slice.py b/vllm/lora/ops/sgmv_expand_slice.py index 5244fa14913a4..55c4fb68ed128 100644 --- a/vllm/lora/ops/sgmv_expand_slice.py +++ b/vllm/lora/ops/sgmv_expand_slice.py @@ -9,6 +9,8 @@ import triton import triton.language as tl +from vllm.utils import direct_register_custom_op + @triton.jit def _sgmv_expand_slice_kernel( @@ -209,9 +211,31 @@ def _sgmv_expand_slice( return +def sgmv_expand_slice_fake( + inputs: torch.Tensor, + lora_b_weights: torch.Tensor, + output_tensor: torch.Tensor, + b_seq_start_loc: torch.Tensor, + seq_len_tensor: torch.Tensor, + lora_indices_tensor: torch.Tensor, + batches: int, + max_seq_length: int, + token_nums: int, + slice_offset: int, + slice_size: int, + add_inputs: bool = False, +) -> None: + return + + try: - sgmv_expand_slice = torch.library.custom_op("lora::sgmv_expand_slice", - _sgmv_expand_slice, - mutates_args=["output_tensor"]) + direct_register_custom_op( + op_name="sgmv_expand_slice", + op_func=_sgmv_expand_slice, + mutates_args=["output_tensor"], + fake_impl=sgmv_expand_slice_fake, + ) + sgmv_expand_slice = torch.ops.vllm.sgmv_expand_slice + except AttributeError: sgmv_expand_slice = _sgmv_expand_slice diff --git a/vllm/lora/ops/sgmv_shrink.py b/vllm/lora/ops/sgmv_shrink.py index b4d893047b06b..37d1dc84eebca 100644 --- a/vllm/lora/ops/sgmv_shrink.py +++ b/vllm/lora/ops/sgmv_shrink.py @@ -9,6 +9,8 @@ import triton import triton.language as tl +from vllm.utils import direct_register_custom_op + @triton.jit def _sgmv_shrink_kernel( @@ -190,9 +192,29 @@ def _sgmv_shrink( return +def sgmv_shrink_fake( + inputs: torch.Tensor, + lora_a_weights: torch.Tensor, + output_tensor: torch.Tensor, + b_seq_start_loc: torch.Tensor, + seq_len_tensor: torch.Tensor, + lora_indices_tensor: torch.Tensor, + batches: int, + max_seq_length: int, + token_nums: int, + scaling: float, +) -> None: + return + + try: - sgmv_shrink = torch.library.custom_op("lora::sgmv_shrink", - _sgmv_shrink, - mutates_args=["output_tensor"]) + direct_register_custom_op( + op_name="sgmv_shrink", + op_func=_sgmv_shrink, + mutates_args=["output_tensor"], + fake_impl=sgmv_shrink_fake, + ) + sgmv_shrink = torch.ops.vllm.sgmv_shrink + except AttributeError: sgmv_shrink = _sgmv_shrink From c51e397fe8db2ef0664814ef3f80e1237c7283da Mon Sep 17 00:00:00 2001 From: Roger Wang <136131678+ywang96@users.noreply.github.com> Date: Thu, 21 Nov 2024 09:21:31 -0800 Subject: [PATCH 072/122] [Misc] Suppress duplicated logging regarding multimodal input pipeline (#10530) Signed-off-by: Roger Wang --- vllm/inputs/preprocess.py | 4 ++-- vllm/utils.py | 6 ++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/vllm/inputs/preprocess.py b/vllm/inputs/preprocess.py index aacff87df6d79..853257c5ad71f 100644 --- a/vllm/inputs/preprocess.py +++ b/vllm/inputs/preprocess.py @@ -10,7 +10,7 @@ from vllm.multimodal.processing import MultiModalDataDict, MultiModalInputsV2 from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.transformers_utils.tokenizer_group import BaseTokenizerGroup -from vllm.utils import print_warning_once +from vllm.utils import print_info_once, print_warning_once from .data import (DecoderOnlyInputs, EncoderDecoderInputs, ProcessorInputs, PromptType, SingletonInputs, SingletonPrompt, token_inputs) @@ -212,7 +212,7 @@ def _can_process_multimodal(self) -> bool: # updated to use the new multi-modal processor can_process_multimodal = self.mm_registry.has_processor(model_config) if not can_process_multimodal: - logger.info( + print_info_once( "Your model uses the legacy input pipeline instead of the new " "multi-modal processor. Please note that the legacy pipeline " "will be removed in a future release. For more details, see: " diff --git a/vllm/utils.py b/vllm/utils.py index cb2ad43a2ae8d..424e7d0947790 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -705,6 +705,12 @@ def create_kv_caches_with_random( return key_caches, value_caches +@lru_cache +def print_info_once(msg: str) -> None: + # Set the stacklevel to 2 to print the caller's line info + logger.info(msg, stacklevel=2) + + @lru_cache def print_warning_once(msg: str) -> None: # Set the stacklevel to 2 to print the caller's line info From e7a8341c7c7481a0c797d50ead7a698255ac8a9f Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 22 Nov 2024 02:09:43 +0800 Subject: [PATCH 073/122] [Bugfix] Allow token ID-only inputs in Qwen2-Audio (#10536) Signed-off-by: DarkLight1337 --- vllm/model_executor/models/qwen2_audio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/models/qwen2_audio.py b/vllm/model_executor/models/qwen2_audio.py index a4965f34b1ca8..0c2374c3c3fc9 100644 --- a/vllm/model_executor/models/qwen2_audio.py +++ b/vllm/model_executor/models/qwen2_audio.py @@ -212,7 +212,7 @@ def input_processor_for_qwen2_audio( return token_inputs( prompt_token_ids=new_input_ids, - prompt=inputs['prompt'], + prompt=inputs.get("prompt"), multi_modal_data=multi_modal_data, ) From 7560ae5cafbae3af9967ac7dc979cb31a40fc572 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Thu, 21 Nov 2024 12:30:42 -0800 Subject: [PATCH 074/122] [8/N] enable cli flag without a space (#10529) Signed-off-by: youkaichao --- tests/compile/test_basic_correctness.py | 4 ++-- tests/engine/test_arg_utils.py | 28 +++++++++++++++++++++++++ tests/tpu/test_custom_dispatcher.py | 9 ++++---- vllm/engine/arg_utils.py | 5 ++++- vllm/utils.py | 4 ++++ 5 files changed, 43 insertions(+), 7 deletions(-) diff --git a/tests/compile/test_basic_correctness.py b/tests/compile/test_basic_correctness.py index c0db2e78824be..b7170886d2556 100644 --- a/tests/compile/test_basic_correctness.py +++ b/tests/compile/test_basic_correctness.py @@ -103,7 +103,7 @@ def test_compile_correctness(test_setting: TestSetting): CompilationLevel.NO_COMPILATION, CompilationLevel.PIECEWISE, ]: - all_args.append(final_args + ["-O", str(level)]) + all_args.append(final_args + [f"-O{level}"]) all_envs.append({}) # inductor will change the output, so we only compare if the output @@ -121,7 +121,7 @@ def test_compile_correctness(test_setting: TestSetting): CompilationLevel.DYNAMO_AS_IS, CompilationLevel.DYNAMO_ONCE, ]: - all_args.append(final_args + ["-O", str(level)]) + all_args.append(final_args + [f"-O{level}"]) all_envs.append({}) if level != CompilationLevel.DYNAMO_ONCE and not fullgraph: # "DYNAMO_ONCE" will always use fullgraph diff --git a/tests/engine/test_arg_utils.py b/tests/engine/test_arg_utils.py index 7b1be5a9802fd..5b0e76fe53685 100644 --- a/tests/engine/test_arg_utils.py +++ b/tests/engine/test_arg_utils.py @@ -31,6 +31,34 @@ def test_limit_mm_per_prompt_parser(arg, expected): assert args.limit_mm_per_prompt == expected +def test_compilation_config(): + parser = EngineArgs.add_cli_args(FlexibleArgumentParser()) + + # default value + args = parser.parse_args([]) + assert args.compilation_config is None + + # set to O3 + args = parser.parse_args(["-O3"]) + assert args.compilation_config.level == 3 + + # set to O 3 (space) + args = parser.parse_args(["-O", "3"]) + assert args.compilation_config.level == 3 + + # set to O 3 (equals) + args = parser.parse_args(["-O=3"]) + assert args.compilation_config.level == 3 + + # set to json + args = parser.parse_args(["--compilation-config", '{"level": 3}']) + assert args.compilation_config.level == 3 + + # set to json + args = parser.parse_args(['--compilation-config={"level": 3}']) + assert args.compilation_config.level == 3 + + def test_valid_pooling_config(): parser = EngineArgs.add_cli_args(FlexibleArgumentParser()) args = parser.parse_args([ diff --git a/tests/tpu/test_custom_dispatcher.py b/tests/tpu/test_custom_dispatcher.py index df348258efcba..bb1379deba3fc 100644 --- a/tests/tpu/test_custom_dispatcher.py +++ b/tests/tpu/test_custom_dispatcher.py @@ -13,9 +13,10 @@ def test_custom_dispatcher(): compare_two_settings( "google/gemma-2b", - arg1=["--enforce-eager", "-O", - str(CompilationLevel.DYNAMO_ONCE)], - arg2=["--enforce-eager", "-O", - str(CompilationLevel.DYNAMO_AS_IS)], + arg1=[ + "--enforce-eager", + f"-O{CompilationLevel.DYNAMO_ONCE}", + ], + arg2=["--enforce-eager", f"-O{CompilationLevel.DYNAMO_AS_IS}"], env1={}, env2={}) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 9288cd22c0036..88862a185ac75 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -882,7 +882,10 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: 'testing only. level 3 is the recommended level ' 'for production.\n' 'To specify the full compilation config, ' - 'use a JSON string.') + 'use a JSON string.\n' + 'Following the convention of traditional ' + 'compilers, using -O without space is also ' + 'supported. -O3 is equivalent to -O 3.') return parser diff --git a/vllm/utils.py b/vllm/utils.py index 424e7d0947790..67b2629ecc933 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -1192,6 +1192,10 @@ def parse_args(self, args=None, namespace=None): else: processed_args.append('--' + arg[len('--'):].replace('_', '-')) + elif arg.startswith('-O') and arg != '-O' and len(arg) == 2: + # allow -O flag to be used without space, e.g. -O3 + processed_args.append('-O') + processed_args.append(arg[2:]) else: processed_args.append(arg) From f9310cbd0c1109c4f22cf9f1dc615b2d08f06408 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Thu, 21 Nov 2024 12:53:39 -0800 Subject: [PATCH 075/122] [V1] Fix Compilation config & Enable CUDA graph by default (#10528) Signed-off-by: Woosuk Kwon --- vllm/config.py | 3 +- vllm/v1/worker/gpu_model_runner.py | 62 ++++++++++++++++-------------- vllm/v1/worker/gpu_worker.py | 39 ++++++++++++------- 3 files changed, 62 insertions(+), 42 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index ed09f8ae31863..d1c6a850cb78c 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2370,7 +2370,7 @@ def __post_init__(self): if self.compilation_config is None: self.compilation_config = CompilationConfig() - if envs.VLLM_USE_V1: + if envs.VLLM_USE_V1 and not self.model_config.enforce_eager: # NOTE(woosuk): Currently, we use inductor because the piecewise # CUDA graphs do not work properly with the custom CUDA kernels. # FIXME(woosuk): Disable inductor to reduce the compilation time @@ -2380,6 +2380,7 @@ def __post_init__(self): self.compilation_config.use_inductor = True self.compilation_config.pass_config.enable_fusion = False self.compilation_config.pass_config.enable_reshape = False + self.compilation_config.level = CompilationLevel.PIECEWISE current_platform.check_and_update_config(self) diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 5f66293cbe8e4..2cf55cd497659 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -1,3 +1,4 @@ +import gc import time from dataclasses import dataclass from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple @@ -515,7 +516,25 @@ def load_model(self) -> None: logger.info("Loading model weights took %.4f GB", self.model_memory_usage / float(2**30)) - def _dummy_run(self, model: nn.Module, num_tokens: int) -> None: + @torch.inference_mode() + def _dummy_run( + self, + model: nn.Module, + num_tokens: int, + kv_caches: List[torch.Tensor], + ) -> torch.Tensor: + with set_forward_context(None): + hidden_states = model( + input_ids=None, + positions=self.positions[:num_tokens], + kv_caches=kv_caches, + attn_metadata=None, + inputs_embeds=self.inputs_embeds[:num_tokens]) + return hidden_states + + def profile_run(self) -> None: + # TODO(woosuk): Profile the max memory usage of the encoder and + # the encoder cache. # use an empty tensor instead of `None`` to force Dynamo to pass # it by reference, rather by specializing on the value `None`. # the `dtype` argument does not matter, and we use `float32` as @@ -527,23 +546,17 @@ def _dummy_run(self, model: nn.Module, num_tokens: int) -> None: torch.tensor([], dtype=torch.float32, device=self.device) for _ in range(self.num_attn_layers) ] - with set_forward_context(None): # noqa: SIM117 - with set_compile_context(self.cudagraph_batch_sizes): - # Trigger compilation for general shape. - model(input_ids=None, - positions=self.positions, - kv_caches=dummy_kv_caches, - attn_metadata=None, - inputs_embeds=self.inputs_embeds) - - @torch.inference_mode() - def profile_run(self) -> None: - # TODO(woosuk): Profile the max memory usage of the encoder and - # the encoder cache. - self._dummy_run(self.model, self.max_num_tokens) + with set_compile_context(self.cudagraph_batch_sizes): + # Trigger compilation for general shape. + hidden_states = self._dummy_run(self.model, self.max_num_tokens, + dummy_kv_caches) + logits = self.model.compute_logits(hidden_states, None) + logits = logits[:self.max_num_tokens] + # TODO(woosuk): Consider the memory usage of the sampler. torch.cuda.synchronize() + del hidden_states, logits + gc.collect() - @torch.inference_mode() def capture_model(self) -> None: if not self.use_cuda_graph: logger.warning( @@ -554,18 +567,11 @@ def capture_model(self) -> None: start_time = time.perf_counter() start_free_gpu_memory = torch.cuda.mem_get_info()[0] - with set_forward_context(None): - # Trigger CUDA graph capture for specific shapes. - # Capture the large shapes first so that the smaller shapes - # can reuse the memory pool allocated for the large shapes. - for num_tokens in reversed(self.cudagraph_batch_sizes): - self.model( - input_ids=None, - positions=self.positions[:num_tokens], - kv_caches=self.kv_caches, - attn_metadata=None, - inputs_embeds=self.inputs_embeds[:num_tokens], - ) + # Trigger CUDA graph capture for specific shapes. + # Capture the large shapes first so that the smaller shapes + # can reuse the memory pool allocated for the large shapes. + for num_tokens in reversed(self.cudagraph_batch_sizes): + self._dummy_run(self.model, num_tokens, self.kv_caches) end_time = time.perf_counter() end_free_gpu_memory = torch.cuda.mem_get_info()[0] diff --git a/vllm/v1/worker/gpu_worker.py b/vllm/v1/worker/gpu_worker.py index c8192b7f86eb0..7973349f14a5d 100644 --- a/vllm/v1/worker/gpu_worker.py +++ b/vllm/v1/worker/gpu_worker.py @@ -105,35 +105,48 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: # Profile the memory usage of the model and get the maximum number of # cache blocks that can be allocated with the remaining free memory. torch.cuda.empty_cache() + torch.cuda.reset_peak_memory_stats() + _, total_gpu_memory = torch.cuda.mem_get_info() # Execute a forward pass with dummy inputs to profile the memory usage # of the model. self.model_runner.profile_run() - - # Calculate the number of blocks that can be allocated with the - # profiled peak memory. torch.cuda.synchronize() - free_gpu_memory, total_gpu_memory = torch.cuda.mem_get_info() + + free_gpu_memory, _ = torch.cuda.mem_get_info() # NOTE(woosuk): Here we assume that the other processes using the same # GPU did not change their memory usage during the profiling. - peak_memory = self.init_gpu_memory - free_gpu_memory - assert peak_memory > 0, ( + assert self.init_gpu_memory > free_gpu_memory, ( "Error in memory profiling. " f"Initial free memory {self.init_gpu_memory}, current free memory" f" {free_gpu_memory}. This happens when the GPU memory was " "not properly cleaned up before initializing the vLLM instance.") + # Get the peak memory allocation recorded by torch + peak_memory = torch.cuda.memory_stats()["allocated_bytes.all.peak"] + + # Check for any memory left around that may have been allocated on the + # gpu outside of `torch`. NCCL operations, for example, can use a few + # GB during a forward pass + torch.cuda.empty_cache() + torch_allocated_bytes = torch.cuda.memory_stats( + )["allocated_bytes.all.current"] + total_allocated_bytes = torch.cuda.mem_get_info( + )[1] - torch.cuda.mem_get_info()[0] + non_torch_allocations = total_allocated_bytes - torch_allocated_bytes + if non_torch_allocations > 0: + peak_memory += non_torch_allocations + available_kv_cache_memory = ( + total_gpu_memory * self.cache_config.gpu_memory_utilization - + peak_memory) + + # Calculate the number of blocks that can be allocated with the + # profiled peak memory. cache_block_size = _get_cache_block_size(self.cache_config, self.model_config, self.parallel_config) - num_gpu_blocks = int( - (total_gpu_memory * self.cache_config.gpu_memory_utilization - - peak_memory) // cache_block_size) + num_gpu_blocks = int(available_kv_cache_memory // cache_block_size) num_gpu_blocks = max(num_gpu_blocks, 0) - # if self.model_runner.lora_manager: - # self.model_runner.remove_all_loras() - gc.collect() - torch.cuda.empty_cache() return num_gpu_blocks, 0 def initialize_cache(self, num_gpu_blocks: int) -> None: From edec3385b641afb22739a6ec0fd0145f8f1141c5 Mon Sep 17 00:00:00 2001 From: Yunmeng Date: Fri, 22 Nov 2024 05:03:58 +0800 Subject: [PATCH 076/122] [CI][Installation] Avoid uploading CUDA 11.8 wheel (#10535) Signed-off-by: simon-mo Co-authored-by: simon-mo --- .buildkite/upload-wheels.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.buildkite/upload-wheels.sh b/.buildkite/upload-wheels.sh index 541b395eddbe7..7345dd4e66b29 100644 --- a/.buildkite/upload-wheels.sh +++ b/.buildkite/upload-wheels.sh @@ -25,7 +25,12 @@ echo "Version: $version" # If the version contains "dev", rename it to v1.0.0.dev for consistency if [[ $version == *dev* ]]; then - new_version="1.0.0.dev" + suffix="${version##*.}" + if [[ $suffix == cu* ]]; then + new_version="1.0.0.dev+${suffix}" + else + new_version="1.0.0.dev" + fi new_wheel="${wheel/$version/$new_version}" mv -- "$wheel" "$new_wheel" wheel="$new_wheel" From cf656f5a022c1ef6f0513c53c5106c8eeff7fdaa Mon Sep 17 00:00:00 2001 From: youkaichao Date: Thu, 21 Nov 2024 13:13:17 -0800 Subject: [PATCH 077/122] [misc] improve error message (#10553) Signed-off-by: youkaichao --- vllm/platforms/cuda.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index 07562a8c3d71e..b38dd7c936896 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -99,8 +99,14 @@ def device_id_to_physical_device_id(device_id: int) -> int: if "CUDA_VISIBLE_DEVICES" in os.environ: device_ids = os.environ["CUDA_VISIBLE_DEVICES"].split(",") if device_ids == [""]: - raise RuntimeError("CUDA_VISIBLE_DEVICES is set to empty string," - " which means GPU support is disabled.") + msg = ( + "CUDA_VISIBLE_DEVICES is set to empty string, which means" + " GPU support is disabled. If you are using ray, please unset" + " the environment variable `CUDA_VISIBLE_DEVICES` inside the" + " worker/actor. " + "Check https://github.com/vllm-project/vllm/issues/8402 for" + " more information.") + raise RuntimeError(msg) physical_device_id = device_ids[device_id] return int(physical_device_id) else: From 46fe9b46d83e733130ce952eb3967a9c96713583 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Thu, 21 Nov 2024 13:28:16 -0800 Subject: [PATCH 078/122] [Minor] Revert change in offline inference example (#10545) Signed-off-by: Woosuk Kwon --- examples/offline_inference.py | 98 +++++++------------------------ examples/offline_inference_cli.py | 80 +++++++++++++++++++++++++ 2 files changed, 100 insertions(+), 78 deletions(-) create mode 100644 examples/offline_inference_cli.py diff --git a/examples/offline_inference.py b/examples/offline_inference.py index 391ac6b9b6b03..9b758fa2479f6 100644 --- a/examples/offline_inference.py +++ b/examples/offline_inference.py @@ -1,80 +1,22 @@ -from dataclasses import asdict - from vllm import LLM, SamplingParams -from vllm.engine.arg_utils import EngineArgs -from vllm.utils import FlexibleArgumentParser - - -def get_prompts(num_prompts: int): - # The default sample prompts. - prompts = [ - "Hello, my name is", - "The president of the United States is", - "The capital of France is", - "The future of AI is", - ] - - if num_prompts != len(prompts): - prompts = (prompts * ((num_prompts // len(prompts)) + 1))[:num_prompts] - - return prompts - - -def main(args): - # Create prompts - prompts = get_prompts(args.num_prompts) - - # Create a sampling params object. - sampling_params = SamplingParams(n=args.n, - temperature=args.temperature, - top_p=args.top_p, - top_k=args.top_k, - max_tokens=args.max_tokens) - - # Create an LLM. - # The default model is 'facebook/opt-125m' - engine_args = EngineArgs.from_cli_args(args) - llm = LLM(**asdict(engine_args)) - - # Generate texts from the prompts. - # The output is a list of RequestOutput objects - # that contain the prompt, generated text, and other information. - outputs = llm.generate(prompts, sampling_params) - # Print the outputs. - for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") - - -if __name__ == '__main__': - parser = FlexibleArgumentParser() - parser = EngineArgs.add_cli_args(parser) - group = parser.add_argument_group("SamplingParams options") - group.add_argument("--num-prompts", - type=int, - default=4, - help="Number of prompts used for inference") - group.add_argument("--max-tokens", - type=int, - default=16, - help="Generated output length for sampling") - group.add_argument('--n', - type=int, - default=1, - help='Number of generated sequences per prompt') - group.add_argument('--temperature', - type=float, - default=0.8, - help='Temperature for text generation') - group.add_argument('--top-p', - type=float, - default=0.95, - help='top_p for text generation') - group.add_argument('--top-k', - type=int, - default=-1, - help='top_k for text generation') - args = parser.parse_args() - main(args) +# Sample prompts. +prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", +] +# Create a sampling params object. +sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + +# Create an LLM. +llm = LLM(model="facebook/opt-125m") +# Generate texts from the prompts. The output is a list of RequestOutput objects +# that contain the prompt, generated text, and other information. +outputs = llm.generate(prompts, sampling_params) +# Print the outputs. +for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") diff --git a/examples/offline_inference_cli.py b/examples/offline_inference_cli.py new file mode 100644 index 0000000000000..391ac6b9b6b03 --- /dev/null +++ b/examples/offline_inference_cli.py @@ -0,0 +1,80 @@ +from dataclasses import asdict + +from vllm import LLM, SamplingParams +from vllm.engine.arg_utils import EngineArgs +from vllm.utils import FlexibleArgumentParser + + +def get_prompts(num_prompts: int): + # The default sample prompts. + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + + if num_prompts != len(prompts): + prompts = (prompts * ((num_prompts // len(prompts)) + 1))[:num_prompts] + + return prompts + + +def main(args): + # Create prompts + prompts = get_prompts(args.num_prompts) + + # Create a sampling params object. + sampling_params = SamplingParams(n=args.n, + temperature=args.temperature, + top_p=args.top_p, + top_k=args.top_k, + max_tokens=args.max_tokens) + + # Create an LLM. + # The default model is 'facebook/opt-125m' + engine_args = EngineArgs.from_cli_args(args) + llm = LLM(**asdict(engine_args)) + + # Generate texts from the prompts. + # The output is a list of RequestOutput objects + # that contain the prompt, generated text, and other information. + outputs = llm.generate(prompts, sampling_params) + # Print the outputs. + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + + +if __name__ == '__main__': + parser = FlexibleArgumentParser() + parser = EngineArgs.add_cli_args(parser) + group = parser.add_argument_group("SamplingParams options") + group.add_argument("--num-prompts", + type=int, + default=4, + help="Number of prompts used for inference") + group.add_argument("--max-tokens", + type=int, + default=16, + help="Generated output length for sampling") + group.add_argument('--n', + type=int, + default=1, + help='Number of generated sequences per prompt') + group.add_argument('--temperature', + type=float, + default=0.8, + help='Temperature for text generation') + group.add_argument('--top-p', + type=float, + default=0.95, + help='top_p for text generation') + group.add_argument('--top-k', + type=int, + default=-1, + help='top_k for text generation') + + args = parser.parse_args() + main(args) From 9afa01455237892c878bb2810912c487d66149a9 Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Thu, 21 Nov 2024 18:43:43 -0500 Subject: [PATCH 079/122] Add small example to metrics.rst (#10550) --- docs/source/serving/metrics.rst | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/docs/source/serving/metrics.rst b/docs/source/serving/metrics.rst index 15e57bd3fec65..231111cd7b738 100644 --- a/docs/source/serving/metrics.rst +++ b/docs/source/serving/metrics.rst @@ -2,9 +2,34 @@ Production Metrics ================== vLLM exposes a number of metrics that can be used to monitor the health of the -system. These metrics are exposed via the `/metrics` endpoint on the vLLM +system. These metrics are exposed via the ``/metrics`` endpoint on the vLLM OpenAI compatible API server. +You can start the server using Python, or using [Docker](deploying_with_docker.rst): + +.. code-block:: console + + $ vllm serve unsloth/Llama-3.2-1B-Instruct + +Then query the endpoint to get the latest metrics from the server: + +.. code-block:: console + + $ curl http://0.0.0.0:8000/metrics + + # HELP vllm:iteration_tokens_total Histogram of number of tokens per engine_step. + # TYPE vllm:iteration_tokens_total histogram + vllm:iteration_tokens_total_sum{model_name="unsloth/Llama-3.2-1B-Instruct"} 0.0 + vllm:iteration_tokens_total_bucket{le="1.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="8.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="16.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="32.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="64.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="128.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="256.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="512.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + ... + The following metrics are exposed: .. literalinclude:: ../../../vllm/engine/metrics.py From aed074860a46536faf77bacd76d02efccbaf4a5d Mon Sep 17 00:00:00 2001 From: Simon Mo Date: Thu, 21 Nov 2024 18:27:20 -0800 Subject: [PATCH 080/122] [Benchmark] Add new H100 machine (#10547) --- .../benchmark-pipeline.yaml | 39 ++++++++++--------- .../convert-results-json-to-markdown.py | 13 +++++-- 2 files changed, 31 insertions(+), 21 deletions(-) diff --git a/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml b/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml index 5c069b38b2d7d..3db77d5f16022 100644 --- a/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml +++ b/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml @@ -13,6 +13,7 @@ steps: - wait - label: "A100" + # skip: "use this flag to conditionally skip the benchmark step, useful for PR testing" agents: queue: A100 plugins: @@ -45,6 +46,7 @@ steps: medium: Memory - label: "H200" + # skip: "use this flag to conditionally skip the benchmark step, useful for PR testing" agents: queue: H200 plugins: @@ -63,21 +65,22 @@ steps: - VLLM_USAGE_SOURCE - HF_TOKEN - - # - label: "H100" - # agents: - # queue: H100 - # plugins: - # - docker#v5.11.0: - # image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT - # command: - # - bash - # - .buildkite/nightly-benchmarks/run-benchmarks-suite.sh - # mount-buildkite-agent: true - # propagate-environment: true - # ipc: host - # gpus: all - # environment: - # - VLLM_USAGE_SOURCE - # - HF_TOKEN - + - label: "H100" + # skip: "use this flag to conditionally skip the benchmark step, useful for PR testing" + agents: + queue: H100 + plugins: + - docker#v5.12.0: + image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT + command: + - bash + - .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh + mount-buildkite-agent: true + propagate-environment: true + ipc: host + gpus: all # see CUDA_VISIBLE_DEVICES for actual GPUs used + volumes: + - /data/benchmark-hf-cache:/root/.cache/huggingface + environment: + - VLLM_USAGE_SOURCE + - HF_TOKEN diff --git a/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py b/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py index d640563252a0c..9d3646e2f6a15 100644 --- a/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py +++ b/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py @@ -157,10 +157,17 @@ def results_to_json(latency, throughput, serving): throughput_results, serving_results) - # Sort all dataframes by their respective "Test name" columns for df in [latency_results, serving_results, throughput_results]: - if not df.empty: - df.sort_values(by="Test name", inplace=True) + if df.empty: + continue + + # Sort all dataframes by their respective "Test name" columns + df.sort_values(by="Test name", inplace=True) + + # The GPUs sometimes come in format of "GPUTYPE\nGPUTYPE\n...", + # we want to turn it into "8xGPUTYPE" + df["GPU"] = df["GPU"].apply( + lambda x: f"{len(x.split('\n'))}x{x.split('\n')[0]}") # get markdown tables latency_md_table = tabulate(latency_results, From 33e0a2540a6bff23cbc6a4b8f7a6784a2bc87d47 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Thu, 21 Nov 2024 19:13:31 -0800 Subject: [PATCH 081/122] [9/N] torch.compile LLM usage (#10552) Signed-off-by: youkaichao --- tests/tpu/test_compilation.py | 5 ++--- vllm/entrypoints/llm.py | 15 ++++++++++++++- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/tests/tpu/test_compilation.py b/tests/tpu/test_compilation.py index 65bee85e7a1ea..b7124ebc1b0f3 100644 --- a/tests/tpu/test_compilation.py +++ b/tests/tpu/test_compilation.py @@ -4,7 +4,7 @@ import depyf -from vllm.config import CompilationConfig, CompilationLevel +from vllm.config import CompilationLevel temp_dir = tempfile.mkdtemp() with depyf.prepare_debug(temp_dir): @@ -34,8 +34,7 @@ # all the control llm = LLM(model="google/gemma-2b", enforce_eager=True, - compilation_config=CompilationConfig( - level=CompilationLevel.DYNAMO_AS_IS)) + compilation_config={"level": CompilationLevel.DYNAMO_AS_IS}) outputs = llm.generate(prompts, sampling_params) for output, answer in zip(outputs, answers): prompt = output.prompt diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 86b0b6893f1d9..2446a64a02eb2 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -1,4 +1,5 @@ import itertools +import json import warnings from contextlib import contextmanager from typing import (Any, ClassVar, Dict, List, Optional, Sequence, Tuple, Type, @@ -9,6 +10,7 @@ from vllm import envs from vllm.beam_search import (BeamSearchInstance, BeamSearchOutput, BeamSearchSequence, get_beam_search_score) +from vllm.config import CompilationConfig from vllm.engine.arg_utils import (EngineArgs, HfOverrides, PoolerConfig, TaskOption) from vllm.engine.llm_engine import LLMEngine @@ -107,13 +109,16 @@ class LLM: hf_overrides: If a dictionary, contains arguments to be forwarded to the HuggingFace config. If a callable, it is called to update the HuggingFace config. + compilation_config: Either an integer or a dictionary. If it is an integer, + it is used as the level of compilation optimization. If it is a dictionary, + it can specify the full compilation configuration. **kwargs: Arguments for :class:`~vllm.EngineArgs`. (See :ref:`engine_args`) Note: This class is intended to be used for offline inference. For online serving, use the :class:`~vllm.AsyncLLMEngine` class instead. - """ + """ # noqa DEPRECATE_LEGACY: ClassVar[bool] = False """A flag to toggle whether to deprecate the legacy generate/encode API.""" @@ -166,6 +171,7 @@ def __init__( # After positional args are removed, move this right below `model` task: TaskOption = "auto", override_pooler_config: Optional[PoolerConfig] = None, + compilation_config: Optional[Union[int, Dict[str, Any]]] = None, **kwargs, ) -> None: ''' @@ -178,6 +184,12 @@ def __init__( if "disable_log_stats" not in kwargs: kwargs["disable_log_stats"] = True + if compilation_config is not None: + compilation_config_instance = CompilationConfig.from_cli( + json.dumps(compilation_config)) + else: + compilation_config_instance = None + engine_args = EngineArgs( model=model, task=task, @@ -202,6 +214,7 @@ def __init__( hf_overrides=hf_overrides, mm_processor_kwargs=mm_processor_kwargs, override_pooler_config=override_pooler_config, + compilation_config=compilation_config_instance, **kwargs, ) # Logic to switch between engines is done at runtime instead of import From 446c7806b21d810b90604097487cc87393542aad Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Thu, 21 Nov 2024 19:40:40 -0800 Subject: [PATCH 082/122] [Minor] Fix line-too-long (#10563) Signed-off-by: Woosuk Kwon --- vllm/entrypoints/llm.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 2446a64a02eb2..c211ec5aee080 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -109,16 +109,16 @@ class LLM: hf_overrides: If a dictionary, contains arguments to be forwarded to the HuggingFace config. If a callable, it is called to update the HuggingFace config. - compilation_config: Either an integer or a dictionary. If it is an integer, - it is used as the level of compilation optimization. If it is a dictionary, - it can specify the full compilation configuration. + compilation_config: Either an integer or a dictionary. If it is an + integer, it is used as the level of compilation optimization. If it + is a dictionary, it can specify the full compilation configuration. **kwargs: Arguments for :class:`~vllm.EngineArgs`. (See :ref:`engine_args`) Note: This class is intended to be used for offline inference. For online serving, use the :class:`~vllm.AsyncLLMEngine` class instead. - """ # noqa + """ DEPRECATE_LEGACY: ClassVar[bool] = False """A flag to toggle whether to deprecate the legacy generate/encode API.""" From a111d0151ffed94582bec65635979e04e5b63676 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Thu, 21 Nov 2024 21:00:32 -0800 Subject: [PATCH 083/122] [platforms] absorb worker cls difference into platforms folder (#10555) Signed-off-by: youkaichao Co-authored-by: Nick Hill --- vllm/config.py | 238 ++++++++++++------------ vllm/engine/arg_utils.py | 11 +- vllm/executor/cpu_executor.py | 7 +- vllm/executor/gpu_executor.py | 49 +---- vllm/executor/hpu_executor.py | 5 +- vllm/executor/multiproc_gpu_executor.py | 2 +- vllm/executor/neuron_executor.py | 5 +- vllm/executor/openvino_executor.py | 8 +- vllm/executor/ray_gpu_executor.py | 16 +- vllm/executor/ray_hpu_executor.py | 36 +--- vllm/executor/ray_tpu_executor.py | 19 +- vllm/executor/xpu_executor.py | 14 +- vllm/platforms/cpu.py | 2 + vllm/platforms/cuda.py | 21 ++- vllm/platforms/hpu.py | 23 +++ vllm/platforms/neuron.py | 14 ++ vllm/platforms/openvino.py | 18 ++ vllm/platforms/rocm.py | 20 ++ vllm/platforms/tpu.py | 12 ++ vllm/platforms/xpu.py | 6 + vllm/worker/worker_base.py | 30 +-- 21 files changed, 273 insertions(+), 283 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index d1c6a850cb78c..b5f2116e3557b 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -926,56 +926,56 @@ def _verify_load_format(self) -> None: f"{rocm_supported_load_format}") +@dataclass class ParallelConfig: - """Configuration for the distributed execution. + """Configuration for the distributed execution.""" - Args: - pipeline_parallel_size: Number of pipeline parallel groups. - tensor_parallel_size: Number of tensor parallel groups. - worker_use_ray: Deprecated, use distributed_executor_backend instead. - max_parallel_loading_workers: Maximum number of multiple batches - when load model sequentially. To avoid RAM OOM when using tensor - parallel and large models. - disable_custom_all_reduce: Disable the custom all-reduce kernel and - fall back to NCCL. - tokenizer_pool_config: Config for the tokenizer pool. - If None, will use synchronous tokenization. - ray_workers_use_nsight: Whether to profile Ray workers with nsight, see - https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html#profiling-nsight-profiler. - placement_group: ray distributed model workers placement group. - distributed_executor_backend: Backend to use for distributed model - workers, either "ray" or "mp" (multiprocessing). If the product - of pipeline_parallel_size and tensor_parallel_size is less than - or equal to the number of GPUs available, "mp" will be used to - keep processing on a single host. Otherwise, this will default - to "ray" if Ray is installed and fail otherwise. Note that tpu - and hpu only support Ray for distributed inference. - """ + pipeline_parallel_size: int = 1 # Number of pipeline parallel groups. + tensor_parallel_size: int = 1 # Number of tensor parallel groups. - def __init__( - self, - pipeline_parallel_size: int, - tensor_parallel_size: int, - worker_use_ray: Optional[bool] = None, - max_parallel_loading_workers: Optional[int] = None, - disable_custom_all_reduce: bool = False, - tokenizer_pool_config: Optional[TokenizerPoolConfig] = None, - ray_workers_use_nsight: bool = False, - placement_group: Optional["PlacementGroup"] = None, - distributed_executor_backend: Optional[Union[ - str, Type["ExecutorBase"]]] = None, - ) -> None: - self.pipeline_parallel_size = pipeline_parallel_size - self.tensor_parallel_size = tensor_parallel_size - self.distributed_executor_backend = distributed_executor_backend - self.max_parallel_loading_workers = max_parallel_loading_workers - self.disable_custom_all_reduce = disable_custom_all_reduce - self.tokenizer_pool_config = tokenizer_pool_config - self.ray_workers_use_nsight = ray_workers_use_nsight - self.placement_group = placement_group - self.world_size = pipeline_parallel_size * self.tensor_parallel_size - - if worker_use_ray: + # Deprecated, use distributed_executor_backend instead. + worker_use_ray: Optional[bool] = None + + # Maximum number of multiple batches + # when load model sequentially. To avoid RAM OOM when using tensor + # parallel and large models. + max_parallel_loading_workers: Optional[int] = None + + # Disable the custom all-reduce kernel and fall back to NCCL. + disable_custom_all_reduce: bool = False + + # Config for the tokenizer pool. If None, will use synchronous tokenization. + tokenizer_pool_config: Optional[TokenizerPoolConfig] = None + + # Whether to profile Ray workers with nsight, see https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html#profiling-nsight-profiler. + ray_workers_use_nsight: bool = False + + # ray distributed model workers placement group. + placement_group: Optional["PlacementGroup"] = None + + # Backend to use for distributed model + # workers, either "ray" or "mp" (multiprocessing). If the product + # of pipeline_parallel_size and tensor_parallel_size is less than + # or equal to the number of GPUs available, "mp" will be used to + # keep processing on a single host. Otherwise, this will default + # to "ray" if Ray is installed and fail otherwise. Note that tpu + # and hpu only support Ray for distributed inference. + distributed_executor_backend: Optional[Union[str, + Type["ExecutorBase"]]] = None + + # the full name of the worker class to use. If "auto", the worker class + # will be determined based on the platform. + worker_cls: str = "auto" + + world_size: int = field(init=False) + + rank: int = 0 + + def __post_init__(self) -> None: + self.world_size = self.pipeline_parallel_size * \ + self.tensor_parallel_size + + if self.worker_use_ray: if self.distributed_executor_backend is None: self.distributed_executor_backend = "ray" elif not self.use_ray: @@ -1026,7 +1026,6 @@ def __init__( backend) self._verify_args() - self.rank: int = 0 @property def use_ray(self) -> bool: @@ -1059,100 +1058,97 @@ def _verify_args(self) -> None: "run with Ray.") +@dataclass class SchedulerConfig: - """Scheduler configuration. + """Scheduler configuration.""" - Args: - task: The task to use the model for. - max_num_batched_tokens: Maximum number of tokens to be processed in - a single iteration. - max_num_seqs: Maximum number of sequences to be processed in a single - iteration. - max_model_len: Maximum length of a sequence (including prompt - and generated text). - num_lookahead_slots: The number of slots to allocate per sequence per - step, beyond the known token ids. This is used in speculative - decoding to store KV activations of tokens which may or may not be - accepted. - delay_factor: Apply a delay (of delay factor multiplied by previous - prompt latency) before scheduling next prompt. - enable_chunked_prefill: If True, prefill requests can be chunked based - on the remaining max_num_batched_tokens. - preemption_mode: Whether to perform preemption by swapping or - recomputation. If not specified, we determine the mode as follows: - We use recomputation by default since it incurs lower overhead than - swapping. However, when the sequence group has multiple sequences - (e.g., beam search), recomputation is not currently supported. In - such a case, we use swapping instead. - send_delta_data: Private API. If used, scheduler sends delta data to - workers instead of an entire data. It should be enabled only - when SPMD worker architecture is enabled. I.e., - VLLM_USE_RAY_SPMD_WORKER=1 - policy: The scheduling policy to use. "fcfs" (default) or "priority". - """ + task: str = "generate" # The task to use the model for. + + # Maximum number of tokens to be processed in a single iteration. + max_num_batched_tokens: int = field(default=None) # type: ignore + + # Maximum number of sequences to be processed in a single iteration. + max_num_seqs: int = 128 + + # Maximum length of a sequence (including prompt and generated text). + max_model_len: int = 8192 + + # The number of slots to allocate per sequence per + # step, beyond the known token ids. This is used in speculative + # decoding to store KV activations of tokens which may or may not be + # accepted. + num_lookahead_slots: int = 0 + + # Apply a delay (of delay factor multiplied by previous + # prompt latency) before scheduling next prompt. + delay_factor: float = 0.0 + + # If True, prefill requests can be chunked based + # on the remaining max_num_batched_tokens. + enable_chunked_prefill: bool = False + + is_multimodal_model: bool = False - def __init__(self, - task: _Task, - max_num_batched_tokens: Optional[int], - max_num_seqs: int, - max_model_len: int, - num_lookahead_slots: int = 0, - delay_factor: float = 0.0, - enable_chunked_prefill: bool = False, - is_multimodal_model: bool = False, - preemption_mode: Optional[str] = None, - num_scheduler_steps: int = 1, - multi_step_stream_outputs: bool = False, - send_delta_data: bool = False, - policy: str = "fcfs") -> None: - if max_num_batched_tokens is None: - if enable_chunked_prefill: - if num_scheduler_steps > 1: + # Whether to perform preemption by swapping or + # recomputation. If not specified, we determine the mode as follows: + # We use recomputation by default since it incurs lower overhead than + # swapping. However, when the sequence group has multiple sequences + # (e.g., beam search), recomputation is not currently supported. In + # such a case, we use swapping instead. + preemption_mode: Optional[str] = None + + num_scheduler_steps: int = 1 + + multi_step_stream_outputs: bool = False + + # Private API. If used, scheduler sends delta data to + # workers instead of an entire data. It should be enabled only + # when SPMD worker architecture is enabled. I.e., + # VLLM_USE_RAY_SPMD_WORKER=1 + send_delta_data: bool = False + + # The scheduling policy to use. "fcfs" (default) or "priority". + policy: str = "fcfs" + + chunked_prefill_enabled: bool = field(init=False) + + def __post_init__(self) -> None: + if self.max_num_batched_tokens is None: + if self.enable_chunked_prefill: + if self.num_scheduler_steps > 1: # Multi-step Chunked-Prefill doesn't allow prompt-chunking # for now. Have max_num_batched_tokens set to max_model_len # so we don't reject sequences on account of a short # max_num_batched_tokens. - max_num_batched_tokens = max(max_model_len, 2048) + self.max_num_batched_tokens = max(self.max_model_len, 2048) else: # It is the values that have the best balance between ITL # and TTFT on A100. Note it is not optimized for throughput. - max_num_batched_tokens = 512 + self.max_num_batched_tokens = 512 else: # If max_model_len is too short, use 2048 as the default value # for higher throughput. - max_num_batched_tokens = max(max_model_len, 2048) + self.max_num_batched_tokens = max(self.max_model_len, 2048) - if task == "embedding": + if self.task == "embedding": # For embedding, choose specific value for higher throughput - max_num_batched_tokens = max( - max_num_batched_tokens, + self.max_num_batched_tokens = max( + self.max_num_batched_tokens, _EMBEDDING_MODEL_MAX_NUM_BATCHED_TOKENS, ) - if is_multimodal_model: + if self.is_multimodal_model: # The value needs to be at least the number of multimodal tokens - max_num_batched_tokens = max( - max_num_batched_tokens, + self.max_num_batched_tokens = max( + self.max_num_batched_tokens, _MULTIMODAL_MODEL_MAX_NUM_BATCHED_TOKENS, ) - self.max_num_batched_tokens = max_num_batched_tokens - - if enable_chunked_prefill: + if self.enable_chunked_prefill: logger.info( "Chunked prefill is enabled with max_num_batched_tokens=%d.", self.max_num_batched_tokens) - self.task: Final = task - self.max_num_seqs = max_num_seqs - self.max_model_len = max_model_len - self.num_lookahead_slots = num_lookahead_slots - self.delay_factor = delay_factor - self.chunked_prefill_enabled = enable_chunked_prefill - self.preemption_mode = preemption_mode - self.num_scheduler_steps = num_scheduler_steps - self.multi_step_stream_outputs = multi_step_stream_outputs - self.send_delta_data = send_delta_data - self.policy = policy + self.chunked_prefill_enabled = self.enable_chunked_prefill self._verify_args() def _verify_args(self) -> None: @@ -2293,10 +2289,10 @@ class VllmConfig: model_config: ModelConfig = field(default=None, init=True) # type: ignore cache_config: CacheConfig = field(default=None, init=True) # type: ignore - parallel_config: ParallelConfig = field(default=None, - init=True) # type: ignore - scheduler_config: SchedulerConfig = field(default=None, - init=True) # type: ignore + parallel_config: ParallelConfig = field(default_factory=ParallelConfig, + init=True) + scheduler_config: SchedulerConfig = field(default_factory=SchedulerConfig, + init=True) device_config: DeviceConfig = field(default=None, init=True) # type: ignore load_config: LoadConfig = field(default=None, init=True) # type: ignore diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 88862a185ac75..82f1ef51255e9 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -191,6 +191,7 @@ class EngineArgs: override_neuron_config: Optional[Dict[str, Any]] = None override_pooler_config: Optional[PoolerConfig] = None compilation_config: Optional[CompilationConfig] = None + worker_cls: str = "auto" def __post_init__(self): if not self.tokenizer: @@ -887,6 +888,12 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: 'compilers, using -O without space is also ' 'supported. -O3 is equivalent to -O 3.') + parser.add_argument( + '--worker-cls', + type=str, + default="auto", + help='The worker class to use for distributed execution.') + return parser @classmethod @@ -999,7 +1006,9 @@ def create_engine_config(self) -> VllmConfig: self.tokenizer_pool_extra_config, ), ray_workers_use_nsight=self.ray_workers_use_nsight, - distributed_executor_backend=self.distributed_executor_backend) + distributed_executor_backend=self.distributed_executor_backend, + worker_cls=self.worker_cls, + ) max_model_len = model_config.max_model_len use_long_context = max_model_len > 32768 diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py index 1542a2ae367eb..336f9bc8efb20 100644 --- a/vllm/executor/cpu_executor.py +++ b/vllm/executor/cpu_executor.py @@ -115,13 +115,8 @@ def _create_worker( local_rank: int = 0, rank: int = 0, ): - worker_module_name = "vllm.worker.cpu_worker" - worker_class_name = "CPUWorker" - wrapper = WorkerWrapperBase( - worker_module_name=worker_module_name, - worker_class_name=worker_class_name, - ) + wrapper = WorkerWrapperBase(vllm_config=self.vllm_config) assert self.distributed_init_method is not None diff --git a/vllm/executor/gpu_executor.py b/vllm/executor/gpu_executor.py index c65d0836e5ff7..7fa34456028dd 100644 --- a/vllm/executor/gpu_executor.py +++ b/vllm/executor/gpu_executor.py @@ -1,4 +1,4 @@ -from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union +from typing import Any, Dict, List, Optional, Set, Tuple, Union from vllm.executor.executor_base import ExecutorAsyncBase, ExecutorBase from vllm.logger import init_logger @@ -8,19 +8,14 @@ from vllm.sequence import ExecuteModelRequest, PoolerOutput from vllm.utils import (get_distributed_init_method, get_ip, get_open_port, make_async) -from vllm.worker.worker_base import WorkerBase, WorkerWrapperBase +from vllm.worker.worker_base import WorkerWrapperBase logger = init_logger(__name__) -def create_worker(worker_module_name: str, worker_class_name: str, - worker_class_fn: Optional[Callable[[], Type[WorkerBase]]], - **kwargs): - wrapper = WorkerWrapperBase( - worker_module_name=worker_module_name, - worker_class_name=worker_class_name, - worker_class_fn=worker_class_fn, - ) +def create_worker(**kwargs): + vllm_config = kwargs.get("vllm_config") + wrapper = WorkerWrapperBase(vllm_config=vllm_config) wrapper.init_worker(**kwargs) return wrapper.worker @@ -57,43 +52,11 @@ def _get_worker_kwargs( or (rank % self.parallel_config.tensor_parallel_size == 0), ) - def _get_worker_module_and_class( - self) -> Tuple[str, str, Optional[Callable[[], Type[WorkerBase]]]]: - worker_class_fn = None - if self.scheduler_config.is_multi_step: - worker_module_name = "vllm.worker.multi_step_worker" - worker_class_name = "MultiStepWorker" - elif self.speculative_config: - worker_module_name = "vllm.spec_decode.spec_decode_worker" - worker_class_name = "create_spec_worker" - else: - worker_module_name = "vllm.worker.worker" - worker_class_name = "Worker" - return (worker_module_name, worker_class_name, worker_class_fn) - - def _get_create_worker_kwargs( - self, - local_rank: int = 0, - rank: int = 0, - distributed_init_method: Optional[str] = None) -> Dict: - worker_kwargs = self._get_worker_kwargs(local_rank, rank, - distributed_init_method) - - (worker_module_name, worker_class_name, - worker_class_fn) = self._get_worker_module_and_class() - worker_kwargs.update( - worker_module_name=worker_module_name, - worker_class_name=worker_class_name, - worker_class_fn=worker_class_fn, - ) - - return worker_kwargs - def _create_worker(self, local_rank: int = 0, rank: int = 0, distributed_init_method: Optional[str] = None): - return create_worker(**self._get_create_worker_kwargs( + return create_worker(**self._get_worker_kwargs( local_rank=local_rank, rank=rank, distributed_init_method=distributed_init_method)) diff --git a/vllm/executor/hpu_executor.py b/vllm/executor/hpu_executor.py index 220e9eee87bb3..c9b7bfa71edfa 100644 --- a/vllm/executor/hpu_executor.py +++ b/vllm/executor/hpu_executor.py @@ -48,10 +48,7 @@ def _create_worker(self, local_rank: int = 0, rank: int = 0, distributed_init_method: Optional[str] = None): - wrapper = WorkerWrapperBase( - worker_module_name="vllm.worker.hpu_worker", - worker_class_name="HPUWorker", - ) + wrapper = WorkerWrapperBase(vllm_config=self.vllm_config) wrapper.init_worker(**self._get_worker_kwargs(local_rank, rank, distributed_init_method)) return wrapper.worker diff --git a/vllm/executor/multiproc_gpu_executor.py b/vllm/executor/multiproc_gpu_executor.py index 3eb14fb931925..a6c05a71d2b6f 100644 --- a/vllm/executor/multiproc_gpu_executor.py +++ b/vllm/executor/multiproc_gpu_executor.py @@ -90,7 +90,7 @@ def _init_executor(self) -> None: result_handler, partial( create_worker, - **self._get_create_worker_kwargs( + **self._get_worker_kwargs( rank=rank, local_rank=rank, distributed_init_method=distributed_init_method, diff --git a/vllm/executor/neuron_executor.py b/vllm/executor/neuron_executor.py index 02d37cd7fbf23..31e6fdc3ab1bb 100644 --- a/vllm/executor/neuron_executor.py +++ b/vllm/executor/neuron_executor.py @@ -7,6 +7,7 @@ from vllm.sequence import ExecuteModelRequest from vllm.utils import (get_distributed_init_method, get_ip, get_open_port, make_async) +from vllm.worker.worker_base import WorkerWrapperBase logger = init_logger(__name__) @@ -25,10 +26,10 @@ def _init_executor(self) -> None: self._init_worker() def _init_worker(self): - from vllm.worker.neuron_worker import NeuronWorker + wrapper = WorkerWrapperBase(vllm_config=self.vllm_config) distributed_init_method = get_distributed_init_method( get_ip(), get_open_port()) - self.driver_worker = NeuronWorker( + self.driver_worker = wrapper.init_worker( vllm_config=self.vllm_config, local_rank=0, rank=0, diff --git a/vllm/executor/openvino_executor.py b/vllm/executor/openvino_executor.py index d06b0ccb7906e..dcd4b7621381d 100644 --- a/vllm/executor/openvino_executor.py +++ b/vllm/executor/openvino_executor.py @@ -14,6 +14,7 @@ from vllm.sequence import ExecuteModelRequest from vllm.utils import (GiB_bytes, get_distributed_init_method, get_ip, get_open_port, make_async) +from vllm.worker.worker_base import WorkerWrapperBase logger = init_logger(__name__) @@ -38,15 +39,12 @@ def _init_executor(self) -> None: self._init_worker() def _init_worker(self): - from vllm.worker.openvino_worker import OpenVINOWorker - assert ( - self.parallel_config.world_size == 1 - ), "OpenVINOExecutor only supports single CPU socket currently." + wrapper = WorkerWrapperBase(vllm_config=self.vllm_config) distributed_init_method = get_distributed_init_method( get_ip(), get_open_port()) - self.driver_worker = OpenVINOWorker( + self.driver_worker = wrapper.init_worker( ov_core=self.ov_core, vllm_config=self.vllm_config, local_rank=0, diff --git a/vllm/executor/ray_gpu_executor.py b/vllm/executor/ray_gpu_executor.py index 66bab2c686c67..810b0f06ff7b2 100644 --- a/vllm/executor/ray_gpu_executor.py +++ b/vllm/executor/ray_gpu_executor.py @@ -91,17 +91,6 @@ def _configure_ray_workers_use_nsight(self, return ray_remote_kwargs - def _get_worker_wrapper_args(self) -> Dict[str, Any]: - (worker_module_name, worker_class_name, - worker_class_fn) = self._get_worker_module_and_class() - - return dict( - worker_module_name=worker_module_name, - worker_class_name=worker_class_name, - worker_class_fn=worker_class_fn, - trust_remote_code=self.model_config.trust_remote_code, - ) - # child class could overwrite this to return actual env vars. def _get_env_vars_to_be_updated(self): return self._env_vars_for_all_workers @@ -135,7 +124,6 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", # Create the workers. driver_ip = get_ip() - worker_wrapper_kwargs = self._get_worker_wrapper_args() for bundle_id, bundle in enumerate(placement_group.bundle_specs): if not bundle.get("GPU", 0): continue @@ -150,7 +138,7 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", num_gpus=num_gpus, scheduling_strategy=scheduling_strategy, **ray_remote_kwargs, - )(RayWorkerWrapper).remote(**worker_wrapper_kwargs) + )(RayWorkerWrapper).remote(vllm_config=self.vllm_config) if self.use_ray_spmd_worker: self.workers.append(worker) @@ -161,7 +149,7 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", # as the resource holder for the driver process. self.driver_dummy_worker = worker self.driver_worker = RayWorkerWrapper( - **worker_wrapper_kwargs) + vllm_config=self.vllm_config) else: # Else, added to the list of workers. self.workers.append(worker) diff --git a/vllm/executor/ray_hpu_executor.py b/vllm/executor/ray_hpu_executor.py index a24bab6df370e..6fe8c6c403358 100644 --- a/vllm/executor/ray_hpu_executor.py +++ b/vllm/executor/ray_hpu_executor.py @@ -2,8 +2,7 @@ import os from collections import defaultdict from itertools import islice, repeat -from typing import (TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, - Type) +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import msgspec @@ -18,7 +17,6 @@ from vllm.utils import (_run_task_with_lock, get_distributed_init_method, get_ip, get_open_port, get_vllm_instance_id, make_async) -from vllm.worker.worker_base import WorkerBase if ray is not None: from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy @@ -81,33 +79,6 @@ def shutdown(self) -> None: def finish_measurements(self): self._run_workers("finish_measurements") - def _get_worker_module_and_class( - self - ) -> Tuple[str, str, Optional[Callable[[], - Type[WorkerBase]]]]: # noqa: F821 - worker_class_fn = None - if self.scheduler_config.is_multi_step: - raise NotImplementedError( - "Multi-step execution is not implemented for HPU") - elif self.speculative_config: - raise NotImplementedError( - "Speculative decoding is not implemented for HPU") - else: - worker_module_name = "vllm.worker.hpu_worker" - worker_class_name = "HPUWorker" - return (worker_module_name, worker_class_name, worker_class_fn) - - def _get_worker_wrapper_args(self) -> Dict[str, Any]: - (worker_module_name, worker_class_name, - worker_class_fn) = self._get_worker_module_and_class() - - return dict( - worker_module_name=worker_module_name, - worker_class_name=worker_class_name, - worker_class_fn=worker_class_fn, - trust_remote_code=self.model_config.trust_remote_code, - ) - def _init_workers_ray(self, placement_group: "PlacementGroup", **ray_remote_kwargs): # Otherwise, the ray workers are allocated with a full GPU. @@ -128,7 +99,6 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", # Create the workers. driver_ip = get_ip() - worker_wrapper_kwargs = self._get_worker_wrapper_args() for bundle_id, bundle in enumerate(placement_group.bundle_specs): if not bundle.get("HPU", 0): continue @@ -144,7 +114,7 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", resources={'HPU': num_gpus}, scheduling_strategy=scheduling_strategy, **ray_remote_kwargs, - )(RayWorkerWrapper).remote(**worker_wrapper_kwargs) + )(RayWorkerWrapper).remote(vllm_config=self.vllm_config) if self.use_ray_spmd_worker: self.workers.append(worker) @@ -155,7 +125,7 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", # as the resource holder for the driver process. self.driver_dummy_worker = worker self.driver_worker = RayWorkerWrapper( - **worker_wrapper_kwargs) + vllm_config=self.vllm_config) else: # Else, added to the list of workers. self.workers.append(worker) diff --git a/vllm/executor/ray_tpu_executor.py b/vllm/executor/ray_tpu_executor.py index d02fecb46f007..c227b5e283c68 100644 --- a/vllm/executor/ray_tpu_executor.py +++ b/vllm/executor/ray_tpu_executor.py @@ -69,14 +69,6 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", placement_group_bundle_index=bundle_id, ) - assert self.speculative_config is None - if self.scheduler_config.is_multi_step: - worker_module_name = "vllm.worker.multi_step_tpu_worker" - worker_class_name = "MultiStepTPUWorker" - else: - worker_module_name = "vllm.worker.tpu_worker" - worker_class_name = "TPUWorker" - # GKE does not fetch environment information from metadata server # and instead sets these from within the Ray process. Therefore we # need to override the Ray environment variables manually. @@ -95,11 +87,7 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", resources={"TPU": 1}, scheduling_strategy=scheduling_strategy, **ray_remote_kwargs, - )(RayWorkerWrapper).remote( - worker_module_name=worker_module_name, - worker_class_name=worker_class_name, - trust_remote_code=self.model_config.trust_remote_code, - ) + )(RayWorkerWrapper).remote(vllm_config=self.vllm_config) if override_env: worker.override_env_vars.remote(override_env) @@ -109,10 +97,7 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", # as the resource holder for the driver process. self.driver_dummy_worker = worker self.driver_worker = RayWorkerWrapper( - worker_module_name=worker_module_name, - worker_class_name=worker_class_name, - trust_remote_code=self.model_config.trust_remote_code, - ) + vllm_config=self.vllm_config) else: # Else, added to the list of workers. self.workers.append(worker) diff --git a/vllm/executor/xpu_executor.py b/vllm/executor/xpu_executor.py index ba6177e51a453..722b86a95ff8a 100644 --- a/vllm/executor/xpu_executor.py +++ b/vllm/executor/xpu_executor.py @@ -1,4 +1,4 @@ -from typing import Callable, List, Optional, Tuple, Type, Union +from typing import List, Optional, Union from vllm.executor.executor_base import ExecutorAsyncBase from vllm.executor.gpu_executor import GPUExecutor @@ -6,7 +6,6 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.sequence import ExecuteModelRequest, PoolerOutput from vllm.utils import make_async -from vllm.worker.worker_base import WorkerBase logger = init_logger(__name__) @@ -22,17 +21,6 @@ def _init_executor(self) -> None: GPUExecutor._init_executor(self) - def _get_worker_module_and_class( - self) -> Tuple[str, str, Optional[Callable[[], Type[WorkerBase]]]]: - worker_class_fn = None - if self.speculative_config is not None: - raise NotImplementedError( - "XPU does not support speculative decoding") - else: - worker_module_name = "vllm.worker.xpu_worker" - worker_class_name = "XPUWorker" - return (worker_module_name, worker_class_name, worker_class_fn) - def execute_model( self, execute_model_req: ExecuteModelRequest ) -> Optional[List[Union[SamplerOutput, PoolerOutput]]]: diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index 0c4c916406223..9be9031dc3baf 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -84,3 +84,5 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: "distributed executor backend."), parallel_config.distributed_executor_backend) parallel_config.distributed_executor_backend = "mp" + if parallel_config.worker_cls == "auto": + parallel_config.worker_cls = "vllm.worker.cpu_worker.CPUWorker" diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index b38dd7c936896..cf0d41081a5aa 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -4,7 +4,7 @@ import os from functools import lru_cache, wraps -from typing import Callable, List, Tuple, TypeVar +from typing import TYPE_CHECKING, Callable, List, Tuple, TypeVar import pynvml import torch @@ -16,6 +16,11 @@ from .interface import DeviceCapability, Platform, PlatformEnum +if TYPE_CHECKING: + from vllm.config import VllmConfig +else: + VllmConfig = None + logger = init_logger(__name__) _P = ParamSpec("_P") @@ -157,3 +162,17 @@ def is_full_nvlink(cls, physical_device_ids: List[int]) -> bool: " machine has no NVLink equipped.") return False return True + + @classmethod + def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + parallel_config = vllm_config.parallel_config + scheduler_config = vllm_config.scheduler_config + if parallel_config.worker_cls == "auto": + if scheduler_config.is_multi_step: + parallel_config.worker_cls = \ + "vllm.worker.multi_step_worker.MultiStepWorker" + elif vllm_config.speculative_config: + parallel_config.worker_cls = \ + "vllm.spec_decode.spec_decode_worker.create_spec_worker" + else: + parallel_config.worker_cls = "vllm.worker.worker.Worker" diff --git a/vllm/platforms/hpu.py b/vllm/platforms/hpu.py index 36d944b3f24b8..a8f568d31d5a7 100644 --- a/vllm/platforms/hpu.py +++ b/vllm/platforms/hpu.py @@ -1,7 +1,14 @@ +from typing import TYPE_CHECKING + import torch from .interface import Platform, PlatformEnum, _Backend +if TYPE_CHECKING: + from vllm.config import VllmConfig +else: + VllmConfig = None + class HpuPlatform(Platform): _enum = PlatformEnum.HPU @@ -14,3 +21,19 @@ def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: @staticmethod def inference_mode(): return torch.no_grad() + + @classmethod + def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + + scheduler_config = vllm_config.scheduler_config + if scheduler_config.is_multi_step: + raise NotImplementedError( + "Multi-step execution is not implemented for HPU") + + if vllm_config.speculative_config is not None: + raise NotImplementedError( + "Speculative decoding is not implemented for HPU") + + parallel_config = vllm_config.parallel_config + if parallel_config.worker_cls == "auto": + parallel_config.worker_cls = "vllm.worker.hpu_worker.HPUWorker" diff --git a/vllm/platforms/neuron.py b/vllm/platforms/neuron.py index 57e3c0dfae84c..4c4d778ed3dd4 100644 --- a/vllm/platforms/neuron.py +++ b/vllm/platforms/neuron.py @@ -1,5 +1,12 @@ +from typing import TYPE_CHECKING + from .interface import Platform, PlatformEnum +if TYPE_CHECKING: + from vllm.config import VllmConfig +else: + VllmConfig = None + class NeuronPlatform(Platform): _enum = PlatformEnum.NEURON @@ -8,3 +15,10 @@ class NeuronPlatform(Platform): @classmethod def get_device_name(cls, device_id: int = 0) -> str: return "neuron" + + @classmethod + def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + parallel_config = vllm_config.parallel_config + if parallel_config.worker_cls == "auto": + parallel_config.worker_cls = \ + "vllm.worker.neuron_worker.NeuronWorker" diff --git a/vllm/platforms/openvino.py b/vllm/platforms/openvino.py index 130b8eec1b386..33a41933e9fff 100644 --- a/vllm/platforms/openvino.py +++ b/vllm/platforms/openvino.py @@ -1,3 +1,5 @@ +from typing import TYPE_CHECKING + import torch import vllm.envs as envs @@ -5,6 +7,11 @@ from .interface import Platform, PlatformEnum, _Backend +if TYPE_CHECKING: + from vllm.config import VllmConfig +else: + VllmConfig = None + logger = init_logger(__name__) @@ -38,3 +45,14 @@ def is_openvino_gpu(self) -> bool: def is_pin_memory_available(self) -> bool: logger.warning("Pin memory is not supported on OpenViNO.") return False + + @classmethod + def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + parallel_config = vllm_config.parallel_config + assert ( + parallel_config.world_size == 1 + ), "OpenVINOExecutor only supports single CPU socket currently." + + if parallel_config.worker_cls == "auto": + parallel_config.worker_cls = \ + "vllm.worker.openvino_worker.OpenVINOWorker" diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index c62241d8bb47b..3fe8c01c15787 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -1,5 +1,6 @@ import os from functools import lru_cache +from typing import TYPE_CHECKING import torch @@ -7,6 +8,11 @@ from .interface import DeviceCapability, Platform, PlatformEnum, _Backend +if TYPE_CHECKING: + from vllm.config import VllmConfig +else: + VllmConfig = None + logger = init_logger(__name__) try: @@ -58,3 +64,17 @@ def get_device_name(cls, device_id: int = 0) -> str: def get_device_total_memory(cls, device_id: int = 0) -> int: device_props = torch.cuda.get_device_properties(device_id) return device_props.total_memory + + @classmethod + def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + parallel_config = vllm_config.parallel_config + scheduler_config = vllm_config.scheduler_config + if parallel_config.worker_cls == "auto": + if scheduler_config.is_multi_step: + parallel_config.worker_cls = \ + "vllm.worker.multi_step_worker.MultiStepWorker" + elif vllm_config.speculative_config: + parallel_config.worker_cls = \ + "vllm.spec_decode.spec_decode_worker.create_spec_worker" + else: + parallel_config.worker_cls = "vllm.worker.worker.Worker" diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index 863875ef5c2d6..513cfa54687dc 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -48,3 +48,15 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: if compilation_config.backend == "": compilation_config.backend = "openxla" + + assert vllm_config.speculative_config is None, \ + "TPU does not support speculative decoding" + + parallel_config = vllm_config.parallel_config + scheduler_config = vllm_config.scheduler_config + if parallel_config.worker_cls == "auto": + if scheduler_config.is_multi_step: + parallel_config.worker_cls = \ + "vllm.worker.multi_step_tpu_worker.MultiStepTPUWorker" + else: + parallel_config.worker_cls = "vllm.worker.tpu_worker.TPUWorker" diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index 536e17a5f93e8..b2ee0ef2f71cd 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -57,6 +57,10 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: "mode.") model_config.enforce_eager = True + if vllm_config.speculative_config is not None: + raise NotImplementedError( + "XPU does not support speculative decoding") + # check and update parallel config parallel_config = vllm_config.parallel_config if (parallel_config.distributed_executor_backend is not None @@ -66,3 +70,5 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: " executor backend.", parallel_config.distributed_executor_backend) parallel_config.distributed_executor_backend = "ray" + if parallel_config.worker_cls == "auto": + parallel_config.worker_cls = "vllm.worker.xpu_worker.XPUWorker" diff --git a/vllm/worker/worker_base.py b/vllm/worker/worker_base.py index cf8a4946a71c4..e7fec6d17eecd 100644 --- a/vllm/worker/worker_base.py +++ b/vllm/worker/worker_base.py @@ -1,9 +1,8 @@ import dataclasses -import importlib import os import time from abc import ABC, abstractmethod -from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union +from typing import Any, Dict, List, Optional, Set, Tuple, Type, Union import torch @@ -15,7 +14,7 @@ from vllm.platforms import current_platform from vllm.sequence import ExecuteModelRequest, IntermediateTensors from vllm.utils import (enable_trace_function_call_for_thread, - update_environment_variables) + resolve_obj_by_qualname, update_environment_variables) from vllm.worker.model_runner_base import (BroadcastableModelInput, ModelRunnerBase, ModelRunnerInputBase) @@ -411,23 +410,14 @@ class WorkerWrapperBase: We first instantiate the WorkerWrapper, which remembers the worker module and class name. Then, when we call `update_environment_variables`, and the real initialization happens in `init_worker`. - - If worker_class_fn is specified, it will be executed to get the worker - class. - Otherwise, the worker class will be obtained by dynamically importing it - using worker_module_name and worker_class_name. """ def __init__( self, - worker_module_name: str, - worker_class_name: str, - trust_remote_code: bool = False, - worker_class_fn: Optional[Callable[[], - Type[WorkerBase]]] = None) -> None: - self.worker_module_name = worker_module_name - self.worker_class_name = worker_class_name - self.worker_class_fn = worker_class_fn + vllm_config: VllmConfig, + ) -> None: + self.vllm_config = vllm_config + trust_remote_code = vllm_config.model_config.trust_remote_code self.worker: Optional[WorkerBase] = None if trust_remote_code: # note: lazy import to avoid importing torch before initializing @@ -456,12 +446,8 @@ def init_worker(self, *args, **kwargs): from vllm.plugins import load_general_plugins load_general_plugins() - if self.worker_class_fn: - worker_class = self.worker_class_fn() - else: - mod = importlib.import_module(self.worker_module_name) - worker_class = getattr(mod, self.worker_class_name) - + worker_class = resolve_obj_by_qualname( + self.vllm_config.parallel_config.worker_cls) self.worker = worker_class(*args, **kwargs) assert self.worker is not None From b6374e09b0af4f8fa4c0b911b3cd1bd45342ead6 Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Fri, 22 Nov 2024 15:01:56 +0800 Subject: [PATCH 084/122] [Bugfix] Fix Phi-3 BNB quantization with tensor parallel (#9948) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/model_executor/layers/linear.py | 19 +++++++--- vllm/model_executor/model_loader/loader.py | 43 +++++++++++++++++++++- 2 files changed, 56 insertions(+), 6 deletions(-) diff --git a/vllm/model_executor/layers/linear.py b/vllm/model_executor/layers/linear.py index 2471c160d66b7..46ef11e7d02c6 100644 --- a/vllm/model_executor/layers/linear.py +++ b/vllm/model_executor/layers/linear.py @@ -1,3 +1,4 @@ +import itertools from abc import abstractmethod from typing import Dict, List, Optional, Tuple @@ -41,12 +42,12 @@ def adjust_marlin_shard(param, shard_size, shard_offset): def adjust_bitsandbytes_4bit_shard(param: Parameter, - qkv_offsets: Dict[str, Tuple[int, int]], + shard_offsets: Dict[str, Tuple[int, int]], loaded_shard_id: str) -> Tuple[int, int]: """Adjust the quantization offsets and sizes for BitsAndBytes sharding.""" - total, _ = qkv_offsets["total"] - orig_offset, orig_size = qkv_offsets[loaded_shard_id] + total, _ = shard_offsets["total"] + orig_offset, orig_size = shard_offsets[loaded_shard_id] quantized_total = param.data.shape[0] quantized_offset = orig_offset * quantized_total // total @@ -499,9 +500,17 @@ def weight_loader(self, # Special case for Marlin. shard_size, shard_offset = adjust_marlin_shard( param, shard_size, shard_offset) + if use_bitsandbytes_4bit: - shard_size = loaded_weight.shape[output_dim] // 2 - shard_offset = shard_size * shard_id + index = list(itertools.accumulate([0] + self.output_sizes)) + orig_offsets = { + str(i): (index[i], size) + for i, size in enumerate(self.output_sizes) + } + orig_offsets["total"] = (self.output_size, 0) + shard_size, shard_offset = adjust_bitsandbytes_4bit_shard( + param, orig_offsets, str(shard_id)) + loaded_weight_shard = loaded_weight.narrow( output_dim, shard_offset, shard_size) self.weight_loader(param, loaded_weight_shard, shard_id) diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index 936c2fe415375..34e0860162260 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -5,6 +5,7 @@ import fnmatch import glob import inspect +import itertools import json import math import os @@ -27,7 +28,9 @@ get_tensor_model_parallel_world_size) from vllm.envs import VLLM_USE_MODELSCOPE from vllm.logger import init_logger -from vllm.model_executor.layers.linear import (ReplicatedLinear, +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + ReplicatedLinear, RowParallelLinear) from vllm.model_executor.layers.quantization.base_config import ( QuantizeMethodBase) @@ -936,6 +939,34 @@ def _unquantized_generator(self, hf_weights_files, use_safetensors, end_index = total_size // tp_size * (tp_rank + 1) weight_sub_tensor = weight_tensor[..., start_index:end_index] + # Weights have fused on disk. In this case, we assume that the + # weight and module use same name. + elif any( + weight_name.startswith(module) + for module in self.maybe_fused_weights_modules): + # special case for fused weights + # get the size of each shard weight tensor + total_shard_sizes = next( + (sizes for module, sizes in + self.maybe_fused_weights_modules.items() + if weight_name.startswith(module))) + total_size = weight_tensor.size(0) + assert total_size == sum(total_shard_sizes) + # get the start/end index of each shard weight tensor + total_start_index = list( + itertools.accumulate([0] + total_shard_sizes))[:-1] + shard_weights_index = [ + (idx + size // tp_size * tp_rank, + idx + size // tp_size * (tp_rank + 1)) + for idx, size in zip(total_start_index, + total_shard_sizes) + ] + # slice and reorder the weight tensor + weight_tensor = [ + weight_tensor[start_index:end_index, ...] + for start_index, end_index in shard_weights_index + ] + weight_sub_tensor = torch.cat(weight_tensor, dim=0) # Shard by row else: total_size = weight_tensor.size(0) @@ -985,12 +1016,22 @@ def _load_weights(self, model_config: ModelConfig, else: self.target_modules = self.default_target_modules + # Modules whose weights might have fused on disk + # we need their output_sizes to make shard in flight correctly with TP + self.maybe_fused_weights_modules: Dict[str, List[int]] = {} + for name, module in model.named_modules(): # Some modules like `ReplicatedLinear` should not have their weights # sharded. The reason for implementing it this way is to avoid new # static variable in the model implementation. if isinstance(module, (ReplicatedLinear, )): self.unsharded_weights_modules.append(name) + # `QKVParallelLinear` and `MergedColumnParallelLinear` might have + # fused weights on disk. We need to use the output sizes of these + # modules to shard the weights correctly. + elif isinstance(module, + (QKVParallelLinear, MergedColumnParallelLinear)): + self.maybe_fused_weights_modules[name] = module.output_sizes # In TP, these weights are partitioned along the column # dimension (dim=-1) elif isinstance(module, (RowParallelLinear, )): From 11fcf0e0661365f24bfff9591434a0cec640df6c Mon Sep 17 00:00:00 2001 From: Noam Gat Date: Fri, 22 Nov 2024 09:59:47 +0200 Subject: [PATCH 085/122] Remove token-adding chat embedding params (#10551) Signed-off-by: Noam Gat --- vllm/entrypoints/openai/protocol.py | 16 ---------------- vllm/entrypoints/openai/serving_embedding.py | 6 ++++-- 2 files changed, 4 insertions(+), 18 deletions(-) diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index a82212677f63a..9db5951e5fe5b 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -760,22 +760,6 @@ class EmbeddingChatRequest(OpenAIBaseModel): # doc: end-chat-embedding-pooling-params # doc: begin-chat-embedding-extra-params - add_generation_prompt: bool = Field( - default=True, - description= - ("If true, the generation prompt will be added to the chat template. " - "This is a parameter used by chat template in tokenizer config of the " - "model."), - ) - continue_final_message: bool = Field( - default=False, - description= - ("If this is set, the chat will be formatted so that the final " - "message in the chat is open-ended, without any EOS tokens. The " - "model will continue this message rather than starting a new one. " - "This allows you to \"prefill\" part of the model's response for it. " - "Cannot be used at the same time as `add_generation_prompt`."), - ) add_special_tokens: bool = Field( default=False, description=( diff --git a/vllm/entrypoints/openai/serving_embedding.py b/vllm/entrypoints/openai/serving_embedding.py index 74ad7389784fc..c84a7d2d8e13e 100644 --- a/vllm/entrypoints/openai/serving_embedding.py +++ b/vllm/entrypoints/openai/serving_embedding.py @@ -148,8 +148,10 @@ async def create_embedding( chat_template=request.chat_template or self.chat_template, chat_template_content_format=self. chat_template_content_format, - add_generation_prompt=request.add_generation_prompt, - continue_final_message=request.continue_final_message, + # In embedding requests, we are not generating tokens, + # so there is no need to append extra tokens to the input + add_generation_prompt=False, + continue_final_message=False, truncate_prompt_tokens=truncate_prompt_tokens, add_special_tokens=request.add_special_tokens, ) From db100c5cdebc7140b57cbb40b20b5a28d7bff386 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Fri, 22 Nov 2024 10:02:14 -0800 Subject: [PATCH 086/122] [bugfix] fix full graph tests (#10581) Signed-off-by: youkaichao --- tests/compile/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/compile/utils.py b/tests/compile/utils.py index 078c6bf9ea1df..7c92d165d05f7 100644 --- a/tests/compile/utils.py +++ b/tests/compile/utils.py @@ -4,7 +4,7 @@ from tests.quantization.utils import is_quant_method_supported from vllm import LLM, SamplingParams -from vllm.config import CompilationConfig, CompilationLevel +from vllm.config import CompilationLevel from vllm.platforms import current_platform TEST_MODELS = [ @@ -85,7 +85,7 @@ def check_full_graph_support(model, enforce_eager=True, tensor_parallel_size=tp_size, disable_custom_all_reduce=True, - compilation_config=CompilationConfig(level=optimization_level), + compilation_config=optimization_level, **model_kwargs) outputs = llm.generate(prompts, sampling_params) From eebad39f265606cfe35af4d1e0bea678516648a3 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Fri, 22 Nov 2024 14:04:42 -0800 Subject: [PATCH 087/122] [torch.compile] support all attention backends (#10558) Signed-off-by: youkaichao --- tests/kernels/test_encoder_decoder_attn.py | 37 +- vllm/attention/backends/abstract.py | 23 +- vllm/attention/backends/blocksparse_attn.py | 2 +- vllm/attention/backends/flash_attn.py | 412 ++++++++---------- vllm/attention/backends/flashinfer.py | 280 +++++------- vllm/attention/backends/hpu_attn.py | 2 +- vllm/attention/backends/ipex_attn.py | 2 +- vllm/attention/backends/pallas.py | 2 +- vllm/attention/backends/rocm_flash_attn.py | 2 +- vllm/attention/backends/torch_sdpa.py | 12 +- vllm/attention/backends/utils.py | 4 +- vllm/attention/backends/xformers.py | 8 +- vllm/attention/layer.py | 81 +++- vllm/config.py | 9 +- vllm/forward_context.py | 27 +- vllm/model_executor/models/arctic.py | 15 +- vllm/model_executor/models/baichuan.py | 18 +- vllm/model_executor/models/bart.py | 48 +- vllm/model_executor/models/bloom.py | 14 +- vllm/model_executor/models/chameleon.py | 11 +- vllm/model_executor/models/chatglm.py | 25 +- vllm/model_executor/models/commandr.py | 14 +- vllm/model_executor/models/dbrx.py | 21 +- vllm/model_executor/models/deepseek.py | 9 +- vllm/model_executor/models/deepseek_v2.py | 3 +- vllm/model_executor/models/exaone.py | 3 +- vllm/model_executor/models/falcon.py | 22 +- vllm/model_executor/models/florence2.py | 10 +- vllm/model_executor/models/gemma.py | 3 +- vllm/model_executor/models/gemma2.py | 15 +- .../models/glm4_vision_encoder.py | 17 +- vllm/model_executor/models/gpt2.py | 3 +- vllm/model_executor/models/gpt_bigcode.py | 13 +- vllm/model_executor/models/gpt_j.py | 13 +- vllm/model_executor/models/gpt_neox.py | 13 +- vllm/model_executor/models/granite.py | 3 +- vllm/model_executor/models/granitemoe.py | 3 +- vllm/model_executor/models/internlm2.py | 21 +- vllm/model_executor/models/internlm2_ve.py | 23 +- vllm/model_executor/models/jais.py | 13 +- vllm/model_executor/models/jamba.py | 8 +- vllm/model_executor/models/llama.py | 1 + vllm/model_executor/models/minicpm.py | 11 +- vllm/model_executor/models/minicpm3.py | 9 +- vllm/model_executor/models/mixtral.py | 3 +- vllm/model_executor/models/mixtral_quant.py | 12 +- vllm/model_executor/models/molmo.py | 13 +- vllm/model_executor/models/mpt.py | 13 +- vllm/model_executor/models/nemotron.py | 3 +- vllm/model_executor/models/olmo.py | 16 +- vllm/model_executor/models/olmoe.py | 13 +- vllm/model_executor/models/orion.py | 11 +- vllm/model_executor/models/persimmon.py | 16 +- vllm/model_executor/models/phi.py | 17 +- vllm/model_executor/models/phi3_small.py | 26 +- vllm/model_executor/models/phimoe.py | 8 +- vllm/model_executor/models/qwen.py | 11 +- vllm/model_executor/models/qwen2_moe.py | 9 +- vllm/model_executor/models/solar.py | 1 + vllm/model_executor/models/stablelm.py | 16 +- vllm/model_executor/models/starcoder2.py | 15 +- vllm/model_executor/models/xverse.py | 10 +- vllm/platforms/cpu.py | 1 + vllm/platforms/cuda.py | 1 + vllm/platforms/hpu.py | 1 + vllm/platforms/interface.py | 4 + vllm/platforms/openvino.py | 1 + vllm/platforms/rocm.py | 1 + vllm/platforms/tpu.py | 1 + vllm/platforms/xpu.py | 1 + vllm/spec_decode/draft_model_runner.py | 3 +- vllm/utils.py | 3 +- vllm/v1/attention/backends/flash_attn.py | 3 +- vllm/v1/worker/gpu_model_runner.py | 4 +- vllm/worker/embedding_model_runner.py | 2 +- vllm/worker/enc_dec_model_runner.py | 2 +- vllm/worker/model_runner.py | 4 +- 77 files changed, 879 insertions(+), 651 deletions(-) diff --git a/tests/kernels/test_encoder_decoder_attn.py b/tests/kernels/test_encoder_decoder_attn.py index 3d3724c50421d..c4b72ba6bf4ee 100644 --- a/tests/kernels/test_encoder_decoder_attn.py +++ b/tests/kernels/test_encoder_decoder_attn.py @@ -18,8 +18,10 @@ from vllm.attention.backends.utils import STR_NOT_IMPL_ENC_DEC_ROCM_HIP from vllm.attention.selector import (_Backend, _cached_get_attn_backend, global_force_attn_backend_context_manager) +from vllm.config import VllmConfig from vllm.forward_context import set_forward_context from vllm.platforms import current_platform +from vllm.plugins import set_current_vllm_config # List of support backends for encoder/decoder models LIST_ENC_DEC_SUPPORTED_BACKENDS = [_Backend.XFORMERS, _Backend.FLASH_ATTN] @@ -594,6 +596,7 @@ def _run_encoder_attention_test( encoder_test_params: PhaseTestParameters, attn_metadata: AttentionMetadata, test_pt: TestPoint, + vllm_config: VllmConfig, ) -> torch.Tensor: ''' Run encoder attention. @@ -623,7 +626,7 @@ def _run_encoder_attention_test( attn_type = AttentionType.ENCODER packed_qkv = encoder_test_params.packed_qkvo.packed_qkv assert packed_qkv is not None - with set_forward_context(attn_metadata): + with set_forward_context(attn_metadata, vllm_config): # In the test setup the shape of the query is # [batch_size, seq_len, num_heads, head_size]. However # the attention backend expect the shape to be @@ -648,6 +651,7 @@ def _run_decoder_self_attention_test( decoder_test_params: PhaseTestParameters, attn_metadata: AttentionMetadata, test_pt: TestPoint, + vllm_config: VllmConfig, ) -> torch.Tensor: ''' Run decoder self-attention test. @@ -677,7 +681,7 @@ def _run_decoder_self_attention_test( kv_cache = test_rsrcs.kv_cache packed_qkv = decoder_test_params.packed_qkvo.packed_qkv assert packed_qkv is not None - with set_forward_context(attn_metadata): + with set_forward_context(attn_metadata, vllm_config): # In the test setup the shape of the query is # [batch_size, seq_len, num_heads, head_size]. However # the attention backend expect the shape to be @@ -701,6 +705,7 @@ def _run_encoder_decoder_cross_attention_test( cross_test_params: Optional[PhaseTestParameters], attn_metadata: AttentionMetadata, test_pt: TestPoint, + vllm_config: VllmConfig, ) -> torch.Tensor: ''' Run encoder/decoder cross-attention test. @@ -748,7 +753,7 @@ def _run_encoder_decoder_cross_attention_test( cross_pckd_qkv = cross_test_params.packed_qkvo.packed_qkv key = (None if cross_pckd_qkv is None else cross_pckd_qkv.key) value = (None if cross_pckd_qkv is None else cross_pckd_qkv.value) - with set_forward_context(attn_metadata): + with set_forward_context(attn_metadata, vllm_config): # In the test setup the shape of the query is # [batch_size, seq_len, num_heads, head_size]. However # the attention backend expect the shape to be @@ -839,7 +844,9 @@ def test_encoder_only( # Attention scale factor, attention backend instance, attention wrapper # instance, KV cache init - test_rsrcs = _make_test_resources(test_pt) + vllm_config = VllmConfig() + with set_current_vllm_config(vllm_config): + test_rsrcs = _make_test_resources(test_pt) # Construct encoder attention test params (only used # during prefill) @@ -863,7 +870,8 @@ def test_encoder_only( test_rsrcs.attn, enc_test_params, prephase_attn_metadata, - test_pt=test_pt)) + test_pt=test_pt, + vllm_config=vllm_config)) # - Is encoder attention result correct? assert_actual_matches_ideal(enc_test_params, enc_pckd_act_out, @@ -960,7 +968,9 @@ def test_e2e_enc_dec_attn( # Attention scale factor, attention backend instance, attention wrapper # instance, KV cache init - test_rsrcs = _make_test_resources(test_pt) + vllm_config = VllmConfig() + with set_current_vllm_config(vllm_config): + test_rsrcs = _make_test_resources(test_pt) # Construct encoder attention test params (only used # during prefill) @@ -1011,7 +1021,8 @@ def test_e2e_enc_dec_attn( enc_pckd_act_out = _run_encoder_attention_test(test_rsrcs.attn, enc_test_params, prephase_attn_metadata, - test_pt=test_pt) + test_pt=test_pt, + vllm_config=vllm_config) # - Is encoder attention result correct? assert_actual_matches_ideal(enc_test_params, enc_pckd_act_out, @@ -1023,7 +1034,8 @@ def test_e2e_enc_dec_attn( test_rsrcs, prephase_dec_test_params, prephase_attn_metadata, - test_pt=test_pt) + test_pt=test_pt, + vllm_config=vllm_config) # - Is prefill decoder self-attention correct? assert_actual_matches_ideal(prephase_dec_test_params, @@ -1037,7 +1049,8 @@ def test_e2e_enc_dec_attn( prephase_dec_test_params, prephase_cross_test_params, prephase_attn_metadata, - test_pt=test_pt) + test_pt=test_pt, + vllm_config=vllm_config) # - Is prefill encoder/decoder cross-attention correct? assert_actual_matches_ideal(prephase_cross_test_params, @@ -1061,7 +1074,8 @@ def test_e2e_enc_dec_attn( test_rsrcs, decphase_dec_test_params, decphase_attn_metadata, - test_pt=test_pt) + test_pt=test_pt, + vllm_config=vllm_config) # - Is decode-phase decoder self-attention correct? assert_actual_matches_ideal(decphase_dec_test_params, @@ -1075,7 +1089,8 @@ def test_e2e_enc_dec_attn( decphase_dec_test_params, None, decphase_attn_metadata, - test_pt=test_pt) + test_pt=test_pt, + vllm_config=vllm_config) # - Is decode-phase encoder/decoder cross-attention correct? assert_actual_matches_ideal(decphase_cross_test_params, diff --git a/vllm/attention/backends/abstract.py b/vllm/attention/backends/abstract.py index a504cb1f7e318..5be2d83346d00 100644 --- a/vllm/attention/backends/abstract.py +++ b/vllm/attention/backends/abstract.py @@ -1,7 +1,6 @@ from abc import ABC, abstractmethod from contextlib import contextmanager from dataclasses import dataclass, fields -from enum import Enum, auto from typing import (TYPE_CHECKING, Any, Dict, Generic, List, Optional, Set, Tuple, Type, TypeVar) @@ -15,13 +14,19 @@ ModelRunnerInputBuilderBase) -class AttentionType(Enum): - DECODER = auto() # Decoder attention between previous layer Q/K/V - ENCODER = auto( - ) # Encoder attention between previous layer Q/K/V for encoder-decoder - ENCODER_ONLY = auto() # Encoder attention between previous layer Q/K/V - ENCODER_DECODER = auto( - ) # Attention between dec. Q and enc. K/V for encoder-decoder +class AttentionType: + """ + Attention type. + Use string to be compatible with `torch.compile`. + """ + # Decoder attention between previous layer Q/K/V + DECODER = "decoder" + # Encoder attention between previous layer Q/K/V for encoder-decoder + ENCODER = "encoder" + # Encoder attention between previous layer Q/K/V + ENCODER_ONLY = "encoder_only" + # Attention between dec. Q and enc. K/V for encoder-decoder + ENCODER_DECODER = "encoder_decoder" class AttentionBackend(ABC): @@ -241,6 +246,6 @@ def forward( attn_metadata: T, k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: raise NotImplementedError diff --git a/vllm/attention/backends/blocksparse_attn.py b/vllm/attention/backends/blocksparse_attn.py index 409a42187f46c..94002e36db2bb 100644 --- a/vllm/attention/backends/blocksparse_attn.py +++ b/vllm/attention/backends/blocksparse_attn.py @@ -354,7 +354,7 @@ def forward( attn_metadata: BlocksparseFlashAttentionMetadata, k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: """Forward pass with FlashAttention and PagedAttention. diff --git a/vllm/attention/backends/flash_attn.py b/vllm/attention/backends/flash_attn.py index 314822b695722..32738d1043b1d 100644 --- a/vllm/attention/backends/flash_attn.py +++ b/vllm/attention/backends/flash_attn.py @@ -16,10 +16,8 @@ compute_slot_mapping_start_idx, get_num_prefill_decode_query_kv_tokens, get_seq_len_block_table_args, is_all_cross_attn_metadata_set, is_all_encoder_attn_metadata_set, is_block_tables_empty) -from vllm.forward_context import get_forward_context from vllm.multimodal import MultiModalPlaceholderMap -from vllm.utils import (async_tensor_h2d, direct_register_custom_op, - make_tensor_with_pad) +from vllm.utils import async_tensor_h2d, make_tensor_with_pad if TYPE_CHECKING: from vllm.worker.model_runner import (ModelInputForGPUBuilder, @@ -639,7 +637,7 @@ def forward( attn_metadata: FlashAttentionMetadata, k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: """Forward pass with FlashAttention. @@ -668,23 +666,174 @@ def forward( "requires setting cross-attention " "metadata attributes.") - output = torch.ops.vllm.unified_flash_attention( - query, - key, - value, - self.num_heads, - self.head_size, - self.num_kv_heads, - kv_cache, - self.kv_cache_dtype, - k_scale, - v_scale, - self.scale, - attn_type.value, - self.sliding_window, - self.alibi_slopes, - self.logits_soft_cap, - ) + num_heads: int = self.num_heads + head_size: int = self.head_size + num_kv_heads: int = self.num_kv_heads + kv_cache_dtype: str = self.kv_cache_dtype + softmax_scale: float = self.scale + window_size = self.sliding_window + alibi_slopes: Optional[torch.Tensor] = self.alibi_slopes + logits_soft_cap: Optional[float] = self.logits_soft_cap + + num_tokens, hidden_size = query.shape + + # Reshape the query, key, and value tensors. + query = query.view(-1, num_heads, head_size) + if (key is not None) and (value is not None): + key = key.view(-1, num_kv_heads, head_size) + value = value.view(-1, num_kv_heads, head_size) + + if kv_cache.numel() > 0: + key_cache = kv_cache[0] + value_cache = kv_cache[1] + # We skip updating the KV cache under two conditions: + # a. When the Attention Type is ENCODER. In this phase, we compute + # only the encoder attention without updating the cache. + # b. When both Key and Value are None. This occurs during + # cross-attention computation in the decoding phase, where the + # KV cache is already populated with the cross-attention + # tensor. Thus, we skip cache updates during this time. + if (attn_type != AttentionType.ENCODER) and (key is not None) and ( + value is not None): + if attn_type == AttentionType.ENCODER_DECODER: + # Update cross-attention KV cache (prefill-only) + updated_slot_mapping = attn_metadata.cross_slot_mapping + else: + # Update self-attention KV cache (prefill/decode) + updated_slot_mapping = attn_metadata.slot_mapping + + # Reshape the input keys and values and store them in the cache. + # If kv_cache is not provided, the new key and value tensors are + # not cached. This happens during the initial memory + # profiling run. + torch.ops._C_cache_ops.reshape_and_cache_flash( + key, + value, + kv_cache[0], + kv_cache[1], + updated_slot_mapping.flatten(), # type: ignore[union-attr] + kv_cache_dtype, + k_scale, + v_scale, + ) + + (num_prefill_query_tokens, num_prefill_kv_tokens, + num_decode_query_tokens) = \ + get_num_prefill_decode_query_kv_tokens(attn_metadata, attn_type) + decode_query = query[num_prefill_query_tokens:] + # QKV for prefill. + query = query[:num_prefill_query_tokens] + assert query.shape[0] == num_prefill_query_tokens + assert decode_query.shape[0] == num_decode_query_tokens + + prefill_output: Optional[torch.Tensor] = None + decode_output: Optional[torch.Tensor] = None + if prefill_meta := attn_metadata.prefill_metadata: + # Prompt run. + if (kv_cache.numel() == 0 or prefill_meta.block_tables is None + or prefill_meta.block_tables.numel() == 0): + # normal attention + # When block_tables are not filled, it means q and k are the + # prompt, and they have the same length. + q_seq_start_loc, q_seq_len, k_seq_start_loc, k_seq_len = \ + _get_query_key_seq_metadata(prefill_meta, True, attn_type) + + key = key[:num_prefill_kv_tokens] + value = value[:num_prefill_kv_tokens] + + prefill_output = flash_attn_varlen_func( + q=query, + k=key, + v=value, + cu_seqlens_q=q_seq_start_loc, + cu_seqlens_k=k_seq_start_loc, + max_seqlen_q=q_seq_len, + max_seqlen_k=k_seq_len, + softmax_scale=softmax_scale, + causal=_get_causal_option(attn_type), + window_size=window_size, + alibi_slopes=alibi_slopes, + softcap=logits_soft_cap, + ) + else: + # prefix-enabled attention + assert attn_type == AttentionType.DECODER, ( + "Only decoder-only models support prefix caching") + assert prefill_meta.seq_lens is not None + max_seq_len = max(prefill_meta.seq_lens) + prefill_output = flash_attn_varlen_func( # noqa + q=query, + k=key_cache, + v=value_cache, + cu_seqlens_q=prefill_meta.query_start_loc, + max_seqlen_q=prefill_meta.max_query_len, + cu_seqlens_k=prefill_meta.seq_start_loc, + max_seqlen_k=max_seq_len, + softmax_scale=softmax_scale, + causal=True, + window_size=window_size, + alibi_slopes=alibi_slopes, + block_table=prefill_meta.block_tables, + softcap=logits_soft_cap, + ) + + if decode_meta := attn_metadata.decode_metadata: + # Decoding run. + # Use flash_attn_varlen_func kernel for speculative decoding + # because different queries might have different lengths. + + assert decode_meta.max_decode_query_len is not None + # use only for actual varlen decoding + if decode_meta.max_decode_query_len > 1: + assert attn_type == AttentionType.DECODER, ( + "Only decoder-only models support max_decode_query_len > 1" + ) + decode_output = flash_attn_varlen_func( + q=decode_query, + k=key_cache, + v=value_cache, + cu_seqlens_q=decode_meta.query_start_loc, + max_seqlen_q=decode_meta.max_decode_query_len, + cu_seqlens_k=decode_meta.seq_start_loc, + max_seqlen_k=decode_meta.max_decode_seq_len, + softmax_scale=softmax_scale, + causal=True, + window_size=window_size, + alibi_slopes=alibi_slopes, + softcap=logits_soft_cap, + block_table=decode_meta.block_tables, + ) + else: + # Use flash_attn_with_kvcache for normal decoding. + ( + seq_lens_arg, + _, + block_tables_arg, + ) = get_seq_len_block_table_args(decode_meta, False, attn_type) + decode_output = flash_attn_with_kvcache( + q=decode_query.unsqueeze(1), + k_cache=key_cache, + v_cache=value_cache, + block_table=block_tables_arg, + cache_seqlens=seq_lens_arg, + softmax_scale=softmax_scale, + causal=True, + window_size=window_size, + alibi_slopes=alibi_slopes, + softcap=logits_soft_cap, + ).squeeze(1) + + if prefill_output is None: + assert decode_output is not None + return decode_output.view(num_decode_query_tokens, hidden_size) + if decode_output is None: + assert prefill_output is not None + return prefill_output.view(num_prefill_query_tokens, hidden_size) + + assert decode_meta is not None + decode_output = decode_output.squeeze(1) + output = torch.cat([prefill_output, decode_output], dim=0) + return output.view(num_tokens, hidden_size) return output @@ -692,7 +841,7 @@ def forward( def _get_query_key_seq_metadata( attn_metadata, is_prompt: bool, - attn_type: AttentionType, + attn_type: str, ) -> tuple: """ Returns sequence metadata for key and query based on the specified @@ -754,7 +903,7 @@ def _get_query_key_seq_metadata( raise AttributeError(f"Invalid attention type {str(attn_type)}") -def _get_causal_option(attn_type: AttentionType) -> bool: +def _get_causal_option(attn_type: str) -> bool: """ Determine whether the given attention type is suitable for causal attention mechanisms. @@ -770,220 +919,3 @@ def _get_causal_option(attn_type: AttentionType) -> bool: return not (attn_type == AttentionType.ENCODER or attn_type == AttentionType.ENCODER_ONLY or attn_type == AttentionType.ENCODER_DECODER) - - -def unified_flash_attention( - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - num_heads: int, - head_size: int, - num_kv_heads: int, - kv_cache: torch.Tensor, - kv_cache_dtype: str, - k_scale: float, - v_scale: float, - softmax_scale: float, - attn_type_int_val: int, - window_size: Optional[List[int]] = None, - alibi_slopes: Optional[torch.Tensor] = None, - logits_soft_cap: Optional[float] = None, -) -> torch.Tensor: - - # Convert integer attn_type to enum - try: - attn_type = AttentionType(attn_type_int_val) - except ValueError as err: - raise AttributeError( - f"Invalid attention type {str(attn_type_int_val)}") from err - - current_metadata = get_forward_context() - assert current_metadata is not None - assert isinstance(current_metadata, FlashAttentionMetadata) - attn_metadata: FlashAttentionMetadata = current_metadata - - num_tokens, hidden_size = query.shape - - # Reshape the query, key, and value tensors. - query = query.view(-1, num_heads, head_size) - if (key is not None) and (value is not None): - key = key.view(-1, num_kv_heads, head_size) - value = value.view(-1, num_kv_heads, head_size) - - if kv_cache.numel() > 0: - key_cache = kv_cache[0] - value_cache = kv_cache[1] - # We skip updating the KV cache under two conditions: - # a. When the Attention Type is ENCODER. In this phase, we compute - # only the encoder attention without updating the cache. - # b. When both Key and Value are None. This occurs during - # cross-attention computation in the decoding phase, where the KV - # cache is already populated with the cross-attention tensor. - # Thus, we skip cache updates during this time. - if (attn_type != AttentionType.ENCODER) and (key is not None) and ( - value is not None): - if attn_type == AttentionType.ENCODER_DECODER: - # Update cross-attention KV cache (prefill-only) - updated_slot_mapping = attn_metadata.cross_slot_mapping - else: - # Update self-attention KV cache (prefill/decode) - updated_slot_mapping = attn_metadata.slot_mapping - - # Reshape the input keys and values and store them in the cache. - # If kv_cache is not provided, the new key and value tensors are - # not cached. This happens during the initial memory profiling run. - torch.ops._C_cache_ops.reshape_and_cache_flash( - key, - value, - kv_cache[0], - kv_cache[1], - updated_slot_mapping.flatten(), # type: ignore[union-attr] - kv_cache_dtype, - k_scale, - v_scale, - ) - - (num_prefill_query_tokens, num_prefill_kv_tokens, - num_decode_query_tokens) = \ - get_num_prefill_decode_query_kv_tokens(attn_metadata, attn_type) - decode_query = query[num_prefill_query_tokens:] - # QKV for prefill. - query = query[:num_prefill_query_tokens] - assert query.shape[0] == num_prefill_query_tokens - assert decode_query.shape[0] == num_decode_query_tokens - - prefill_output: Optional[torch.Tensor] = None - decode_output: Optional[torch.Tensor] = None - if prefill_meta := attn_metadata.prefill_metadata: - # Prompt run. - if (kv_cache.numel() == 0 or prefill_meta.block_tables is None - or prefill_meta.block_tables.numel() == 0): - # normal attention - # When block_tables are not filled, it means q and k are the - # prompt, and they have the same length. - q_seq_start_loc, q_seq_len, k_seq_start_loc, k_seq_len = \ - _get_query_key_seq_metadata(prefill_meta, True, attn_type) - - key = key[:num_prefill_kv_tokens] - value = value[:num_prefill_kv_tokens] - - prefill_output = flash_attn_varlen_func( - q=query, - k=key, - v=value, - cu_seqlens_q=q_seq_start_loc, - cu_seqlens_k=k_seq_start_loc, - max_seqlen_q=q_seq_len, - max_seqlen_k=k_seq_len, - softmax_scale=softmax_scale, - causal=_get_causal_option(attn_type), - window_size=window_size, - alibi_slopes=alibi_slopes, - softcap=logits_soft_cap, - ) - else: - # prefix-enabled attention - assert attn_type == AttentionType.DECODER, ( - "Only decoder-only models support prefix caching") - assert prefill_meta.seq_lens is not None - max_seq_len = max(prefill_meta.seq_lens) - prefill_output = flash_attn_varlen_func( # noqa - q=query, - k=key_cache, - v=value_cache, - cu_seqlens_q=prefill_meta.query_start_loc, - max_seqlen_q=prefill_meta.max_query_len, - cu_seqlens_k=prefill_meta.seq_start_loc, - max_seqlen_k=max_seq_len, - softmax_scale=softmax_scale, - causal=True, - window_size=window_size, - alibi_slopes=alibi_slopes, - block_table=prefill_meta.block_tables, - softcap=logits_soft_cap, - ) - - if decode_meta := attn_metadata.decode_metadata: - # Decoding run. - # Use flash_attn_varlen_func kernel for speculative decoding - # because different queries might have different lengths. - - assert decode_meta.max_decode_query_len is not None - # use only for actual varlen decoding - if decode_meta.max_decode_query_len > 1: - assert attn_type == AttentionType.DECODER, ( - "Only decoder-only models support max_decode_query_len > 1") - decode_output = flash_attn_varlen_func( - q=decode_query, - k=key_cache, - v=value_cache, - cu_seqlens_q=decode_meta.query_start_loc, - max_seqlen_q=decode_meta.max_decode_query_len, - cu_seqlens_k=decode_meta.seq_start_loc, - max_seqlen_k=decode_meta.max_decode_seq_len, - softmax_scale=softmax_scale, - causal=True, - window_size=window_size, - alibi_slopes=alibi_slopes, - softcap=logits_soft_cap, - block_table=decode_meta.block_tables, - ) - else: - # Use flash_attn_with_kvcache for normal decoding. - ( - seq_lens_arg, - _, - block_tables_arg, - ) = get_seq_len_block_table_args(decode_meta, False, attn_type) - decode_output = flash_attn_with_kvcache( - q=decode_query.unsqueeze(1), - k_cache=key_cache, - v_cache=value_cache, - block_table=block_tables_arg, - cache_seqlens=seq_lens_arg, - softmax_scale=softmax_scale, - causal=True, - window_size=window_size, - alibi_slopes=alibi_slopes, - softcap=logits_soft_cap, - ).squeeze(1) - - if prefill_output is None: - assert decode_output is not None - return decode_output.view(num_decode_query_tokens, hidden_size) - if decode_output is None: - assert prefill_output is not None - return prefill_output.view(num_prefill_query_tokens, hidden_size) - - assert decode_meta is not None - decode_output = decode_output.squeeze(1) - output = torch.cat([prefill_output, decode_output], dim=0) - return output.view(num_tokens, hidden_size) - - -def unified_flash_attention_fake( - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - num_heads: int, - head_size: int, - num_kv_heads: int, - kv_cache: torch.Tensor, - kv_cache_dtype: str, - k_scale: float, - v_scale: float, - softmax_scale: float, - attn_type_int_val: int, - window_size: Optional[List[int]] = None, - alibi_slopes: Optional[torch.Tensor] = None, - logits_soft_cap: Optional[float] = None, -) -> torch.Tensor: - return torch.empty_like(query) - - -direct_register_custom_op( - op_name="unified_flash_attention", - op_func=unified_flash_attention, - mutates_args=["kv_cache"], - fake_impl=unified_flash_attention_fake, -) diff --git a/vllm/attention/backends/flashinfer.py b/vllm/attention/backends/flashinfer.py index b61c660e3e280..1a2024705eb04 100644 --- a/vllm/attention/backends/flashinfer.py +++ b/vllm/attention/backends/flashinfer.py @@ -30,9 +30,8 @@ compute_slot_mapping_start_idx, is_block_tables_empty) from vllm.attention.ops.paged_attn import PagedAttention -from vllm.forward_context import get_forward_context -from vllm.utils import (async_tensor_h2d, direct_register_custom_op, - get_kv_cache_torch_dtype, make_tensor_with_pad) +from vllm.utils import (async_tensor_h2d, get_kv_cache_torch_dtype, + make_tensor_with_pad) if TYPE_CHECKING: from vllm.worker.model_runner import (ModelInputForGPUBuilder, @@ -774,7 +773,7 @@ def forward( attn_metadata: FlashInferMetadata, k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: if attn_type != AttentionType.DECODER: raise NotImplementedError("Encoder self-attention and " @@ -782,174 +781,117 @@ def forward( "are not implemented for " "FlashInferImpl") - return torch.ops.vllm.unified_flash_infer( - query, - key, - value, - self.num_heads, - self.head_size, - self.num_kv_heads, - kv_cache, - self.kv_cache_dtype, - k_scale, - v_scale, - self.scale, - self.sliding_window, - self.alibi_slopes, - self.logits_soft_cap, - ) - - -def unified_flash_infer( - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - num_heads: int, - head_size: int, - num_kv_heads: int, - kv_cache: torch.Tensor, - kv_cache_dtype: str, - k_scale: float, - v_scale: float, - softmax_scale: float, - window_size: Optional[List[int]] = None, - alibi_slopes: Optional[torch.Tensor] = None, - logits_soft_cap: Optional[float] = None, -) -> torch.Tensor: - - current_metadata = get_forward_context() - assert current_metadata is not None - assert isinstance(current_metadata, FlashInferMetadata) - attn_metadata: FlashInferMetadata = current_metadata - - num_tokens, hidden_size = query.shape - query = query.view(-1, num_heads, head_size) - key = key.view(-1, num_kv_heads, head_size) - value = value.view(-1, num_kv_heads, head_size) - - if kv_cache.numel() > 0: - # Use the same reshape and cache kernel as flash attention. - ops.reshape_and_cache_flash( - key, - value, - kv_cache[:, 0], - kv_cache[:, 1], - attn_metadata.slot_mapping.flatten(), - kv_cache_dtype, - k_scale, - v_scale, - ) - # The FlashInfer api requires data to be in fp8_e4m3 or fp8_e5m2 - # to process the cache when the kv_cache_dtype is fp8 - if kv_cache_dtype.startswith("fp8"): - torch_dtype = FlashInferBackend.get_fp8_dtype_for_flashinfer( - kv_cache_dtype) - kv_cache = kv_cache.view(torch_dtype) - - num_prefill_tokens = attn_metadata.num_prefill_tokens - num_decode_tokens = attn_metadata.num_decode_tokens - assert key.shape[0] == num_prefill_tokens + num_decode_tokens, \ - f"key : {key.shape} : #prefill tokens {num_prefill_tokens} : #decode tokens {num_decode_tokens}" # noqa - assert value.shape[0] == num_prefill_tokens + num_decode_tokens, \ - f"value : {value.shape} : #prefill toks {num_prefill_tokens} : #decode toks {num_decode_tokens}" # noqa - query = query.contiguous() # Flashinfer requires query to be contiguous - # Query for decode. KV is not needed because it is already cached. - # QKV for prefill. - decode_query = query[num_prefill_tokens:] - query = query[:num_prefill_tokens] - - key = key[:num_prefill_tokens] - value = value[:num_prefill_tokens] - - assert query.shape[0] == num_prefill_tokens - assert decode_query.shape[0] == num_decode_tokens - - window_left = window_size[0] if window_size is not None else -1 - - prefill_output: Optional[torch.Tensor] = None - decode_output: Optional[torch.Tensor] = None - if prefill_meta := attn_metadata.prefill_metadata: - # We will use flash attention for prefill - # when kv_cache is not provided. - # This happens when vllm runs the profiling to - # determine the number of blocks. - if kv_cache.numel() == 0: - prefill_output = flash_attn_varlen_func( - q=query, - k=key, - v=value, - cu_seqlens_q=prefill_meta.seq_start_loc, - cu_seqlens_k=prefill_meta.seq_start_loc, - max_seqlen_q=prefill_meta.max_prefill_seq_len, - max_seqlen_k=prefill_meta.max_prefill_seq_len, - softmax_scale=softmax_scale, - causal=True, - window_size=window_size, - alibi_slopes=alibi_slopes, + num_heads: int = self.num_heads + head_size: int = self.head_size + num_kv_heads: int = self.num_kv_heads + kv_cache_dtype: str = self.kv_cache_dtype + softmax_scale: float = self.scale + window_size = self.sliding_window + alibi_slopes = self.alibi_slopes + logits_soft_cap = self.logits_soft_cap + + num_tokens, hidden_size = query.shape + query = query.view(-1, num_heads, head_size) + key = key.view(-1, num_kv_heads, head_size) + value = value.view(-1, num_kv_heads, head_size) + + if kv_cache.numel() > 0: + # Use the same reshape and cache kernel as flash attention. + ops.reshape_and_cache_flash( + key, + value, + kv_cache[:, 0], + kv_cache[:, 1], + attn_metadata.slot_mapping.flatten(), + kv_cache_dtype, + k_scale, + v_scale, ) - else: - assert prefill_meta is not None - assert prefill_meta.prefill_wrapper is not None - prefill_output = prefill_meta.prefill_wrapper.forward( - query, + # The FlashInfer api requires data to be in fp8_e4m3 or fp8_e5m2 + # to process the cache when the kv_cache_dtype is fp8 + if kv_cache_dtype.startswith("fp8"): + torch_dtype = FlashInferBackend.get_fp8_dtype_for_flashinfer( + kv_cache_dtype) + kv_cache = kv_cache.view(torch_dtype) + + num_prefill_tokens = attn_metadata.num_prefill_tokens + num_decode_tokens = attn_metadata.num_decode_tokens + assert key.shape[0] == num_prefill_tokens + num_decode_tokens, \ + f"key : {key.shape} : #prefill tokens {num_prefill_tokens} : #decode tokens {num_decode_tokens}" # noqa + assert value.shape[0] == num_prefill_tokens + num_decode_tokens, \ + f"value : {value.shape} : #prefill toks {num_prefill_tokens} : #decode toks {num_decode_tokens}" # noqa + query = query.contiguous( + ) # Flashinfer requires query to be contiguous + # Query for decode. KV is not needed because it is already cached. + # QKV for prefill. + decode_query = query[num_prefill_tokens:] + query = query[:num_prefill_tokens] + + key = key[:num_prefill_tokens] + value = value[:num_prefill_tokens] + + assert query.shape[0] == num_prefill_tokens + assert decode_query.shape[0] == num_decode_tokens + + window_left = window_size[0] if window_size is not None else -1 + + prefill_output: Optional[torch.Tensor] = None + decode_output: Optional[torch.Tensor] = None + if prefill_meta := attn_metadata.prefill_metadata: + # We will use flash attention for prefill + # when kv_cache is not provided. + # This happens when vllm runs the profiling to + # determine the number of blocks. + if kv_cache.numel() == 0: + prefill_output = flash_attn_varlen_func( + q=query, + k=key, + v=value, + cu_seqlens_q=prefill_meta.seq_start_loc, + cu_seqlens_k=prefill_meta.seq_start_loc, + max_seqlen_q=prefill_meta.max_prefill_seq_len, + max_seqlen_k=prefill_meta.max_prefill_seq_len, + softmax_scale=softmax_scale, + causal=True, + window_size=window_size, + alibi_slopes=alibi_slopes, + ) + else: + assert prefill_meta is not None + assert prefill_meta.prefill_wrapper is not None + prefill_output = prefill_meta.prefill_wrapper.forward( + query, + kv_cache, + logits_soft_cap=logits_soft_cap, + causal=True, + k_scale=k_scale, + v_scale=v_scale, + window_left=window_left) + if decode_meta := attn_metadata.decode_metadata: + assert decode_meta is not None + assert decode_meta.decode_wrapper is not None + decode_output = decode_meta.decode_wrapper.forward( + decode_query, kv_cache, + sm_scale=softmax_scale, logits_soft_cap=logits_soft_cap, - causal=True, k_scale=k_scale, v_scale=v_scale, window_left=window_left) - if decode_meta := attn_metadata.decode_metadata: - assert attn_metadata.decode_metadata is not None - assert attn_metadata.decode_metadata.decode_wrapper is not None - decode_output = attn_metadata.decode_metadata.decode_wrapper.forward( - decode_query, - kv_cache, - sm_scale=softmax_scale, - logits_soft_cap=logits_soft_cap, - k_scale=k_scale, - v_scale=v_scale, - window_left=window_left) - - if prefill_output is None and decode_output is not None: - # Decode only batch. - output, num_tokens = decode_output, num_decode_tokens - elif decode_output is None and prefill_output is not None: - # Prefill only batch. - output, num_tokens = prefill_output, num_prefill_tokens - else: - # Chunked prefill batch does not work with speculative decoding in - # FlashInfer backend, so the query length for decode should be 1. - assert prefill_output is not None - assert decode_output is not None - assert decode_meta is not None - assert decode_meta.decode_query_len == 1 - decode_output = decode_output.squeeze(1) - output = torch.cat([prefill_output, decode_output], dim=0) - return output.view(num_tokens, hidden_size) - - -def unified_flash_infer_fake( - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - num_heads: int, - head_size: int, - num_kv_heads: int, - kv_cache: torch.Tensor, - kv_cache_dtype: str, - k_scale: float, - v_scale: float, - softmax_scale: float, - window_size: Optional[List[int]] = None, - alibi_slopes: Optional[torch.Tensor] = None, - logits_soft_cap: Optional[float] = None, -) -> torch.Tensor: - return torch.empty_like(query).contiguous() - - -direct_register_custom_op( - op_name="unified_flash_infer", - op_func=unified_flash_infer, - mutates_args=["kv_cache"], - fake_impl=unified_flash_infer_fake, -) + + if prefill_output is None and decode_output is not None: + # Decode only batch. + output, num_tokens = decode_output, num_decode_tokens + elif decode_output is None and prefill_output is not None: + # Prefill only batch. + output, num_tokens = prefill_output, num_prefill_tokens + else: + # Chunked prefill batch does not work with speculative decoding in + # FlashInfer backend, so the query length for decode should be 1. + assert prefill_output is not None + assert decode_output is not None + assert decode_meta is not None + assert decode_meta.decode_query_len == 1 + decode_output = decode_output.squeeze(1) + output = torch.cat([prefill_output, decode_output], dim=0) + return output.view(num_tokens, hidden_size) diff --git a/vllm/attention/backends/hpu_attn.py b/vllm/attention/backends/hpu_attn.py index a8f4b09b67274..4a3ddd5db94e5 100644 --- a/vllm/attention/backends/hpu_attn.py +++ b/vllm/attention/backends/hpu_attn.py @@ -140,7 +140,7 @@ def forward( attn_metadata: HPUAttentionMetadata, k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: """Forward pass with xFormers and PagedAttention. diff --git a/vllm/attention/backends/ipex_attn.py b/vllm/attention/backends/ipex_attn.py index 87bdb1e0e6565..3b0d51ea4a3d8 100644 --- a/vllm/attention/backends/ipex_attn.py +++ b/vllm/attention/backends/ipex_attn.py @@ -172,7 +172,7 @@ def forward( attn_metadata: IpexAttnMetadata, # type: ignore k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: """Forward pass with IPEX varlen_attention and PagedAttention. diff --git a/vllm/attention/backends/pallas.py b/vllm/attention/backends/pallas.py index eeab8731a2c39..5988be0e6b687 100644 --- a/vllm/attention/backends/pallas.py +++ b/vllm/attention/backends/pallas.py @@ -150,7 +150,7 @@ def forward( attn_metadata: PallasMetadata, k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: """Forward pass with Pallas attention. diff --git a/vllm/attention/backends/rocm_flash_attn.py b/vllm/attention/backends/rocm_flash_attn.py index 2bae370eaa90f..6a494f4e73cb4 100644 --- a/vllm/attention/backends/rocm_flash_attn.py +++ b/vllm/attention/backends/rocm_flash_attn.py @@ -414,7 +414,7 @@ def forward( attn_metadata: ROCmFlashAttentionMetadata, k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: """Forward pass with FlashAttention and PagedAttention. diff --git a/vllm/attention/backends/torch_sdpa.py b/vllm/attention/backends/torch_sdpa.py index 3d025df26a7a1..16e044b618c40 100644 --- a/vllm/attention/backends/torch_sdpa.py +++ b/vllm/attention/backends/torch_sdpa.py @@ -141,7 +141,7 @@ def decode_metadata(self) -> Optional["TorchSDPAMetadata"]: def get_seq_lens( self, - attn_type: AttentionType, + attn_type: str, ): ''' Extract appropriate sequence lengths from attention metadata @@ -174,7 +174,7 @@ def get_seq_lens( def get_attn_bias( self, - attn_type: AttentionType, + attn_type: str, ) -> Optional[List[torch.Tensor]]: ''' Extract appropriate attention bias from attention metadata @@ -203,7 +203,7 @@ def get_attn_bias( def set_attn_bias( self, attn_bias: List[torch.Tensor], - attn_type: AttentionType, + attn_type: str, ) -> None: ''' Update appropriate attention bias field of attention metadata, @@ -229,7 +229,7 @@ def set_attn_bias( def get_seq_len_block_table_args( self, - attn_type: AttentionType, + attn_type: str, ) -> tuple: ''' The particular choice of sequence-length- and block-table-related @@ -426,7 +426,7 @@ def forward( attn_metadata: TorchSDPAMetadata, # type: ignore k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: """Forward pass with torch SDPA and PagedAttention. @@ -574,7 +574,7 @@ def _run_sdpa_forward( key: torch.Tensor, value: torch.Tensor, attn_metadata: TorchSDPAMetadata, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> None: if self.num_kv_heads != self.num_heads: key = key.repeat_interleave(self.num_queries_per_kv, dim=1) diff --git a/vllm/attention/backends/utils.py b/vllm/attention/backends/utils.py index 12800668af223..56cc43430301f 100644 --- a/vllm/attention/backends/utils.py +++ b/vllm/attention/backends/utils.py @@ -478,7 +478,7 @@ def is_all_cross_attn_metadata_set(attn_metadata): def get_seq_len_block_table_args( attn_metadata, is_prompt: bool, - attn_type: AttentionType, + attn_type: str, ) -> tuple: ''' The particular choice of sequence-length- and block-table-related @@ -529,7 +529,7 @@ def get_seq_len_block_table_args( def get_num_prefill_decode_query_kv_tokens( attn_metadata, - attn_type: AttentionType, + attn_type: str, ) -> Tuple[int, int, int]: """ Calculate the number of prefill and decode tokens for query, key/value diff --git a/vllm/attention/backends/xformers.py b/vllm/attention/backends/xformers.py index 83d03606524dc..292575a8736bc 100644 --- a/vllm/attention/backends/xformers.py +++ b/vllm/attention/backends/xformers.py @@ -284,7 +284,7 @@ def decode_metadata(self) -> Optional["XFormersMetadata"]: def _get_attn_bias( attn_metadata: XFormersMetadata, - attn_type: AttentionType, + attn_type: str, ) -> Optional[AttentionBias]: ''' Extract appropriate attention bias from attention metadata @@ -314,7 +314,7 @@ def _get_attn_bias( def _set_attn_bias( attn_metadata: XFormersMetadata, attn_bias: List[Optional[AttentionBias]], - attn_type: AttentionType, + attn_type: str, ) -> None: ''' Update appropriate attention bias field of attention metadata, @@ -416,7 +416,7 @@ def forward( attn_metadata: "XFormersMetadata", k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: """Forward pass with xFormers and PagedAttention. @@ -617,7 +617,7 @@ def _run_memory_efficient_xformers_forward( key: torch.Tensor, value: torch.Tensor, attn_metadata: XFormersMetadata, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: """Attention for 1D query of multiple prompts. Multiple prompt tokens are flattened in to `query` input. diff --git a/vllm/attention/layer.py b/vllm/attention/layer.py index 33d05cbd3fe01..8acbeaf12b0cf 100644 --- a/vllm/attention/layer.py +++ b/vllm/attention/layer.py @@ -4,12 +4,17 @@ import torch import torch.nn as nn +import vllm.envs as envs from vllm.attention import AttentionMetadata, AttentionType from vllm.attention.selector import get_attn_backend from vllm.config import CacheConfig +from vllm.forward_context import ForwardContext, get_forward_context from vllm.model_executor.layers.quantization.base_config import ( QuantizationConfig) from vllm.model_executor.layers.quantization.kv_cache import BaseKVCacheMethod +from vllm.platforms import current_platform +from vllm.plugins import get_current_vllm_config +from vllm.utils import direct_register_custom_op class Attention(nn.Module): @@ -86,6 +91,18 @@ def __init__( alibi_slopes, sliding_window, kv_cache_dtype, blocksparse_params, logits_soft_cap) + # For cuda-alike (CUDA and ROCM) and cpu platforms, we control how + # torch.compile works by registering the attention as one giant + # opaque custom op. For other platforms, we directly call them + # and let torch.compile handle them. + self.use_direct_call = envs.VLLM_USE_V1 or not ( + current_platform.is_cuda_alike() or current_platform.is_cpu()) + compilation_config = get_current_vllm_config().compilation_config + if prefix in compilation_config.static_forward_context: + raise ValueError(f"Duplicate layer name: {prefix}") + compilation_config.static_forward_context[prefix] = self + self.layer_name = prefix + def forward( self, query: torch.Tensor, @@ -93,17 +110,22 @@ def forward( value: torch.Tensor, kv_cache: torch.Tensor, attn_metadata: AttentionMetadata, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: - return self.impl.forward(query, - key, - value, - kv_cache, - attn_metadata, - self._k_scale, - self._v_scale, - attn_type=attn_type) + if self.use_direct_call: + return self.impl.forward(query, + key, + value, + kv_cache, + attn_metadata, + self._k_scale, + self._v_scale, + attn_type=attn_type) + else: + return torch.ops.vllm.unified_attention(query, key, value, + kv_cache, attn_type, + self.layer_name) def extra_repr(self) -> str: s = f"head_size={self.impl.head_size}" # type: ignore @@ -112,3 +134,44 @@ def extra_repr(self) -> str: s += f", scale={self.impl.scale}" # type: ignore s += f", backend={self.impl.__class__.__name__}" return s + + +def unified_attention( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + kv_cache: torch.Tensor, + attn_type: str, + layer_name: str, +) -> torch.Tensor: + forward_context: ForwardContext = get_forward_context() + attn_metadata = forward_context.dynamic_forward_context + self = forward_context.static_forward_context[layer_name] + return self.impl.forward(query, + key, + value, + kv_cache, + attn_metadata, + self._k_scale, + self._v_scale, + attn_type=attn_type) + + +def unified_attention_fake( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + kv_cache: torch.Tensor, + attn_type: str, + layer_name: str, +) -> torch.Tensor: + return torch.empty_like(query).contiguous() + + +direct_register_custom_op( + op_name="unified_attention", + op_func=unified_attention, + mutates_args=["kv_cache"], + fake_impl=unified_attention_fake, + dispatch_key=current_platform.dispatch_key, +) diff --git a/vllm/config.py b/vllm/config.py index b5f2116e3557b..bb02c2ad4c7d4 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2135,8 +2135,7 @@ class CompilationConfig(BaseModel): backend: str = "" custom_ops: List[str] = Field(default_factory=list) splitting_ops: List[str] = Field(default_factory=lambda: [ - "vllm.unified_flash_attention", - "vllm.unified_flash_infer", + "vllm.unified_attention", "vllm.unified_v1_flash_attention", ]) @@ -2197,6 +2196,11 @@ def model_post_init(self, __context: Any) -> None: enabled_custom_ops: Counter[str] = PrivateAttr disabled_custom_ops: Counter[str] = PrivateAttr + # Per-model forward context + # Mainly used to store attention cls + # Map from layer name to the attention cls + static_forward_context: Dict[str, Any] = PrivateAttr + @classmethod def from_cli(cls, cli_value: str) -> "CompilationConfig": """Parse the CLI value for the compilation config.""" @@ -2228,6 +2232,7 @@ def model_post_init(self, __context: Any) -> None: self.enabled_custom_ops = Counter() self.disabled_custom_ops = Counter() + self.static_forward_context = {} def init_backend(self) -> Union[str, Callable]: if self.level == CompilationLevel.NO_COMPILATION: diff --git a/vllm/forward_context.py b/vllm/forward_context.py index 777747505e14a..aaa3e4bb3a1e8 100644 --- a/vllm/forward_context.py +++ b/vllm/forward_context.py @@ -1,21 +1,38 @@ from contextlib import contextmanager -from typing import Any +from dataclasses import dataclass +from typing import Any, Dict, Optional -_forward_context: Any = None +from vllm.config import VllmConfig -def get_forward_context() -> Any: +@dataclass +class ForwardContext: + static_forward_context: Dict[str, Any] + # TODO: extend to support per-layer dynamic forward context + dynamic_forward_context: Any + + +_forward_context: Optional[ForwardContext] = None + + +def get_forward_context() -> ForwardContext: """Get the current forward context.""" + assert _forward_context is not None, ( + "Forward context is not set. " + "Please use `set_forward_context` to set the forward context.") return _forward_context @contextmanager -def set_forward_context(context: Any): +def set_forward_context(context: Any, vllm_config: VllmConfig): """A context manager that stores the current forward context, can be attention metadata, etc.""" global _forward_context prev_context = _forward_context - _forward_context = context + _forward_context = ForwardContext( + static_forward_context=vllm_config.compilation_config. + static_forward_context, + dynamic_forward_context=context) try: yield finally: diff --git a/vllm/model_executor/models/arctic.py b/vllm/model_executor/models/arctic.py index e58ad19cab54c..ac4c464aa10ac 100644 --- a/vllm/model_executor/models/arctic.py +++ b/vllm/model_executor/models/arctic.py @@ -223,6 +223,7 @@ def __init__( layer_idx: Optional[int] = None, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.config = config @@ -274,7 +275,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -299,6 +301,7 @@ def __init__( layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.layer_idx = layer_idx @@ -308,7 +311,8 @@ def __init__( self.self_attn = ArcticAttention(config, layer_idx, cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.self_attn") self.block_sparse_moe = ArcticMoE( config, layer_id=layer_idx, @@ -380,8 +384,11 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): org_num_embeddings=self.vocab_size) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: ArcticDecoderLayer(config, int( - prefix.split(".")[-1]), cache_config, quant_config), + lambda prefix: ArcticDecoderLayer(config, + int(prefix.split(".")[-1]), + cache_config, + quant_config, + prefix=prefix), prefix=f"{prefix}.layers") self._attn_implementation = config._attn_implementation self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) diff --git a/vllm/model_executor/models/baichuan.py b/vllm/model_executor/models/baichuan.py index 3749a16a38994..a923ed36a9db2 100644 --- a/vllm/model_executor/models/baichuan.py +++ b/vllm/model_executor/models/baichuan.py @@ -116,6 +116,7 @@ def __init__( max_position_embeddings: int = 8192, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.hidden_size = hidden_size @@ -158,7 +159,8 @@ def __init__( self.head_dim, scaling, alibi_slopes=alibi_slopes, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") else: self.rotary_emb = get_rope( self.head_dim, @@ -171,7 +173,8 @@ def __init__( self.head_dim, self.scaling, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -195,7 +198,8 @@ def __init__(self, config: PretrainedConfig, position_embedding: str, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None): + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() self.hidden_size = config.hidden_size rope_theta = getattr(config, "rope_theta", 10000) @@ -209,6 +213,7 @@ def __init__(self, max_position_embeddings=max_position_embeddings, cache_config=cache_config, quant_config=quant_config, + prefix=f"{prefix}.self_attn", ) self.mlp = BaiChuanMLP( hidden_size=self.hidden_size, @@ -275,8 +280,11 @@ def __init__( ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: BaiChuanDecoderLayer(config, position_embedding, - cache_config, quant_config), + lambda prefix: BaiChuanDecoderLayer(config, + position_embedding, + cache_config, + quant_config, + prefix=prefix), prefix=f"{prefix}.layers", ) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) diff --git a/vllm/model_executor/models/bart.py b/vllm/model_executor/models/bart.py index a50a5a5b018e1..3776490cb3465 100644 --- a/vllm/model_executor/models/bart.py +++ b/vllm/model_executor/models/bart.py @@ -126,6 +126,7 @@ def __init__( config: Optional[BartConfig] = None, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.d_model = config.d_model @@ -178,7 +179,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward(self, hidden_states: torch.Tensor, kv_cache: torch.Tensor, attn_metadata: AttentionMetadata) -> torch.Tensor: @@ -208,6 +210,7 @@ def __init__( config: Optional[BartConfig] = None, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.d_model = config.d_model @@ -260,7 +263,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward(self, hidden_states: torch.Tensor, kv_cache: torch.Tensor, attn_metadata: AttentionMetadata) -> torch.Tensor: @@ -290,6 +294,7 @@ def __init__( config: Optional[BartConfig] = None, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.d_model = config.d_model @@ -342,7 +347,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -384,6 +390,7 @@ def __init__( config: BartConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.embed_dim = config.d_model @@ -393,7 +400,9 @@ def __init__( num_heads=config.encoder_attention_heads, config=config, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.self_attn", + ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.activation_fn = get_act_fn(config.activation_function) @@ -464,6 +473,7 @@ def __init__( config: BartConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.embed_dim = config.d_model @@ -473,7 +483,9 @@ def __init__( num_heads=config.decoder_attention_heads, config=config, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.self_attn", + ) self.activation_fn = get_act_fn(config.activation_function) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) @@ -486,6 +498,7 @@ def __init__( self.embed_dim, config.decoder_attention_heads, config=config, + prefix=f"{prefix}.encoder_attn", ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) @@ -578,7 +591,8 @@ def __init__(self, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, lora_config: Optional[LoRAConfig] = None, - embed_tokens: Optional[nn.Embedding] = None): + embed_tokens: Optional[nn.Embedding] = None, + prefix: str = ""): super().__init__() self.cache_config = cache_config @@ -599,9 +613,13 @@ def __init__(self, config.max_position_embeddings, embed_dim, ) - self.layers = nn.ModuleList( - [BartEncoderLayer(config,cache_config,quant_config) \ - for _ in range(config.encoder_layers)]) + self.layers = nn.ModuleList([ + BartEncoderLayer(config, + cache_config, + quant_config, + prefix=f"{prefix}.layers.{layer_idx}") + for layer_idx in range(config.encoder_layers) + ]) self.layernorm_embedding = nn.LayerNorm(embed_dim) @@ -661,6 +679,7 @@ def __init__( quant_config: Optional[QuantizationConfig] = None, lora_config: Optional[LoRAConfig] = None, embed_tokens: Optional[nn.Embedding] = None, + prefix: str = "", ): super().__init__() self.cache_config = cache_config @@ -683,8 +702,9 @@ def __init__( ) self.layers = nn.ModuleList( - [BartDecoderLayer(config,cache_config,quant_config) \ - for _ in range(config.decoder_layers)]) + [BartDecoderLayer(config,cache_config,quant_config, + prefix=f"{prefix}.layers.{layer_idx}") \ + for layer_idx in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) @@ -759,10 +779,12 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.encoder = BartEncoder(config, cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.encoder") self.decoder = BartDecoder(config, cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.decoder") def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, encoder_input_ids: torch.Tensor, diff --git a/vllm/model_executor/models/bloom.py b/vllm/model_executor/models/bloom.py index 1060d418474ef..fee74f491acc1 100644 --- a/vllm/model_executor/models/bloom.py +++ b/vllm/model_executor/models/bloom.py @@ -78,6 +78,7 @@ def __init__( config: BloomConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.hidden_size = config.hidden_size @@ -116,7 +117,8 @@ def __init__( scaling, alibi_slopes=alibi_slopes, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -168,14 +170,17 @@ def __init__( config: BloomConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() hidden_size = config.hidden_size self.input_layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) - self.self_attention = BloomAttention(config, cache_config, - quant_config) + self.self_attention = BloomAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.self_attention") self.post_attention_layernorm = nn.LayerNorm( hidden_size, eps=config.layer_norm_epsilon) self.mlp = BloomMLP(config, quant_config) @@ -242,7 +247,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): # Transformer blocks self.start_layer, self.end_layer, self.h = make_layers( config.num_hidden_layers, - lambda prefix: BloomBlock(config, cache_config, quant_config), + lambda prefix: BloomBlock( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.h") # Final Layer Norm diff --git a/vllm/model_executor/models/chameleon.py b/vllm/model_executor/models/chameleon.py index 8f91abffaea90..5a6d6432112f0 100644 --- a/vllm/model_executor/models/chameleon.py +++ b/vllm/model_executor/models/chameleon.py @@ -223,6 +223,7 @@ def __init__( quant_config: Optional[QuantizationConfig] = None, bias: bool = False, cache_config: Optional[CacheConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -276,7 +277,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def _apply_qk_norm(self, q: torch.Tensor, k: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: @@ -313,6 +315,7 @@ def __init__( config: ChameleonConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -336,6 +339,7 @@ def __init__( quant_config=quant_config, bias=False, cache_config=cache_config, + prefix=f"{prefix}.self_attn", ) self.mlp = ChameleonMLP( hidden_size=self.hidden_size, @@ -386,6 +390,7 @@ def __init__( config: ChameleonConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -409,6 +414,7 @@ def __init__( quant_config=quant_config, bias=False, cache_config=cache_config, + prefix=f"{prefix}.self_attn", ) self.mlp = ChameleonMLP( hidden_size=self.hidden_size, @@ -855,7 +861,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.num_hidden_layers, lambda prefix: decoder_layer(config=config, cache_config=cache_config, - quant_config=quant_config), + quant_config=quant_config, + prefix=prefix), prefix=f"{prefix}.layers", ) diff --git a/vllm/model_executor/models/chatglm.py b/vllm/model_executor/models/chatglm.py index 2ea592aaba9f9..e3a068908b7f3 100644 --- a/vllm/model_executor/models/chatglm.py +++ b/vllm/model_executor/models/chatglm.py @@ -230,6 +230,7 @@ def __init__( config: ChatGLMConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.hidden_size = config.hidden_size @@ -285,7 +286,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -364,6 +366,7 @@ def __init__( config: ChatGLMConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.apply_residual_connection_post_layernorm = ( @@ -377,7 +380,10 @@ def __init__( eps=config.layernorm_epsilon) # Self attention. - self.self_attention = GLMAttention(config, cache_config, quant_config) + self.self_attention = GLMAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.self_attention") self.hidden_dropout = config.hidden_dropout # Layernorm on the attention output @@ -446,7 +452,8 @@ def __init__( # Transformer layers. self.start_layer, self.end_layer, self.layers = make_layers( self.num_layers, - lambda prefix: GLMBlock(config, cache_config, quant_config), + lambda prefix: GLMBlock( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers", ) @@ -500,16 +507,22 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.num_layers = config.num_layers self.multi_query_group_num = config.multi_query_group_num self.kv_channels = config.kv_channels - self.encoder = GLMTransformer(config, cache_config, quant_config) + self.encoder = GLMTransformer(config, + cache_config, + quant_config, + prefix=f"{prefix}.encoder") self.output_layer = ParallelLMHead(config.padded_vocab_size, config.hidden_size, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.output_layer") vision_config_flag = getattr(config, 'vision_config', None) if vision_config_flag is not None: self.vision_config = Namespace(**config.vision_config) - self.vision = EVA2CLIPModel(self.config, quant_config) + self.vision = EVA2CLIPModel(self.config, + quant_config, + prefix=f"{prefix}.vision") else: self.vision = None diff --git a/vllm/model_executor/models/commandr.py b/vllm/model_executor/models/commandr.py index 9fd083e5a02a9..85e24ca660686 100644 --- a/vllm/model_executor/models/commandr.py +++ b/vllm/model_executor/models/commandr.py @@ -120,6 +120,7 @@ def __init__( config: CohereConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() tp_size = get_tensor_model_parallel_world_size() @@ -175,7 +176,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") if self.use_qk_norm: self.q_norm = LayerNorm(param_shape=(self.num_heads, self.head_dim), @@ -215,13 +217,15 @@ class CohereDecoderLayer(nn.Module): def __init__(self, config: CohereConfig, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None): + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() self.hidden_size = config.hidden_size self.self_attn = CohereAttention(config, cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.self_attn") self.mlp = CohereMLP(config, quant_config=quant_config) self.input_layernorm = LayerNorm(param_shape=(config.hidden_size), @@ -271,8 +275,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.hidden_size) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: CohereDecoderLayer(config, cache_config, - quant_config), + lambda prefix: CohereDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.norm = LayerNorm(param_shape=(config.hidden_size), eps=config.layer_norm_eps) diff --git a/vllm/model_executor/models/dbrx.py b/vllm/model_executor/models/dbrx.py index eab338800249e..3932d8b52a9d1 100644 --- a/vllm/model_executor/models/dbrx.py +++ b/vllm/model_executor/models/dbrx.py @@ -154,6 +154,7 @@ def __init__( config: DbrxConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.d_model = config.d_model @@ -208,7 +209,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -234,10 +236,14 @@ def __init__( config: DbrxConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.d_model = config.d_model - self.attn = DbrxAttention(config, cache_config, quant_config) + self.attn = DbrxAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.attn") self.norm_1 = nn.LayerNorm(self.d_model) self.norm_2 = nn.LayerNorm(self.d_model) @@ -269,10 +275,14 @@ def __init__( config: DbrxConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() - self.norm_attn_norm = DbrxFusedNormAttention(config, cache_config, - quant_config) + self.norm_attn_norm = DbrxFusedNormAttention( + config, + cache_config, + quant_config, + prefix=f"{prefix}.norm_attn_norm") self.ffn = DbrxMoE(config, quant_config) def forward( @@ -308,7 +318,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.blocks = make_layers( config.n_layers, - lambda prefix: DbrxBlock(config, cache_config, quant_config), + lambda prefix: DbrxBlock( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.blocks", ) self.norm_f = nn.LayerNorm(config.d_model, eps=1e-5) diff --git a/vllm/model_executor/models/deepseek.py b/vllm/model_executor/models/deepseek.py index 8c5ad9904e925..32488d931ea1c 100644 --- a/vllm/model_executor/models/deepseek.py +++ b/vllm/model_executor/models/deepseek.py @@ -184,6 +184,7 @@ def __init__( max_position_embeddings: int = 8192, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -236,7 +237,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -261,6 +263,7 @@ def __init__( layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -277,6 +280,7 @@ def __init__( max_position_embeddings=max_position_embeddings, cache_config=cache_config, quant_config=quant_config, + prefix=f"{prefix}.self_attn", ) if (config.n_routed_experts is not None and layer_idx >= config.first_k_dense_replace @@ -346,7 +350,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): lambda prefix: DeepseekDecoderLayer(config, int(prefix.split(".")[-1]), cache_config, - quant_config=quant_config), + quant_config=quant_config, + prefix=prefix), prefix=f"{prefix}.layers") self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.make_empty_intermediate_tensors = ( diff --git a/vllm/model_executor/models/deepseek_v2.py b/vllm/model_executor/models/deepseek_v2.py index d2c4ca0bf85e9..4cf4e6c358bf2 100644 --- a/vllm/model_executor/models/deepseek_v2.py +++ b/vllm/model_executor/models/deepseek_v2.py @@ -268,7 +268,8 @@ def __init__( self.scaling, num_kv_heads=self.num_local_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, diff --git a/vllm/model_executor/models/exaone.py b/vllm/model_executor/models/exaone.py index 9d739d0479548..5ca26d53a17e7 100644 --- a/vllm/model_executor/models/exaone.py +++ b/vllm/model_executor/models/exaone.py @@ -174,6 +174,7 @@ def __init__( num_kv_heads=self.num_kv_heads, cache_config=cache_config, quant_config=quant_config, + prefix=f"{prefix}.attn", ) def forward( @@ -219,7 +220,7 @@ def __init__( quant_config=quant_config, bias=bias, cache_config=cache_config, - prefix=prefix, + prefix=f"{prefix}.attention", ) def forward( diff --git a/vllm/model_executor/models/falcon.py b/vllm/model_executor/models/falcon.py index 2aa4b67d99894..096ad32b38e86 100644 --- a/vllm/model_executor/models/falcon.py +++ b/vllm/model_executor/models/falcon.py @@ -84,6 +84,7 @@ def __init__( config: FalconConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() @@ -158,7 +159,8 @@ def __init__( self.head_dim, self.inv_norm_factor, num_kv_heads=self.num_kv_heads, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") elif self.use_alibi: tp_rank = get_tensor_model_parallel_rank() head_start = tp_rank * self.num_heads @@ -171,14 +173,16 @@ def __init__( self.inv_norm_factor, num_kv_heads=self.num_kv_heads, alibi_slopes=alibi_slopes, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") else: self.attn = Attention(self.num_heads, self.head_dim, scale=self.inv_norm_factor, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -241,12 +245,16 @@ def __init__( config: FalconConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() hidden_size = config.hidden_size self.num_heads = config.num_attention_heads - self.self_attention = FalconAttention(config, cache_config, - quant_config) + self.self_attention = FalconAttention( + config, + cache_config, + quant_config, + prefix=f"{prefix}.self_attention") self.mlp = FalconMLP(config, quant_config) self.config = config @@ -357,8 +365,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): # Transformer blocks self.start_layer, self.end_layer, self.h = make_layers( config.num_hidden_layers, - lambda prefix: FalconDecoderLayer(config, cache_config, - quant_config), + lambda prefix: FalconDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.h") # Final Layer Norm diff --git a/vllm/model_executor/models/florence2.py b/vllm/model_executor/models/florence2.py index d3a9ff6915b84..3a5fe8e1f4144 100644 --- a/vllm/model_executor/models/florence2.py +++ b/vllm/model_executor/models/florence2.py @@ -35,10 +35,12 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.shared = BartScaledWordEmbedding(self.vocab_size, config.d_model) self.encoder = BartEncoder(config, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.encoder") self.decoder = BartDecoder(config, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.decoder") if self.config.tie_word_embeddings: self.encoder.embed_tokens.weight = self.shared.weight @@ -99,7 +101,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.config = config self.model = Florence2LanguageModel(vllm_config=vllm_config, - prefix=prefix) + prefix=f"{prefix}.model") embed_scale = math.sqrt( config.d_model) if config.scale_embedding else 1.0 @@ -198,7 +200,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): # TODO(Isotr0py): Add vision backbone self.language_model = Florence2LanguageForConditionalGeneration( vllm_config=vllm_config.with_hf_config(config.text_config), - prefix=prefix, + prefix=f"{prefix}.language_model", ) @property diff --git a/vllm/model_executor/models/gemma.py b/vllm/model_executor/models/gemma.py index 64e03b30bf2f1..131e9af139c2a 100644 --- a/vllm/model_executor/models/gemma.py +++ b/vllm/model_executor/models/gemma.py @@ -174,7 +174,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index 4ba39223cc07f..839130364ef4d 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -95,7 +95,8 @@ def __init__(self, rope_theta: float, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, - attn_logits_soft_cap: Optional[float] = None) -> None: + attn_logits_soft_cap: Optional[float] = None, + prefix: str = "") -> None: super().__init__() self.layer_idx = layer_idx self.config = config @@ -154,7 +155,8 @@ def __init__(self, num_kv_heads=self.num_kv_heads, cache_config=cache_config, quant_config=quant_config, - logits_soft_cap=attn_logits_soft_cap) + logits_soft_cap=attn_logits_soft_cap, + prefix=f"{prefix}.attn") def forward( self, @@ -179,6 +181,7 @@ def __init__( config: Gemma2Config, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -194,6 +197,7 @@ def __init__( cache_config=cache_config, quant_config=quant_config, attn_logits_soft_cap=config.attn_logit_softcapping, + prefix=f"{prefix}.self_attn", ) self.hidden_size = config.hidden_size self.mlp = Gemma2MLP( @@ -257,8 +261,11 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: Gemma2DecoderLayer(int(prefix.split(".")[ - -1]), config, cache_config, quant_config), + lambda prefix: Gemma2DecoderLayer(int(prefix.split(".")[-1]), + config, + cache_config, + quant_config, + prefix=prefix), prefix=f"{prefix}.layers") self.norm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) diff --git a/vllm/model_executor/models/glm4_vision_encoder.py b/vllm/model_executor/models/glm4_vision_encoder.py index 025615b0920fd..f37ab0f82d52a 100644 --- a/vllm/model_executor/models/glm4_vision_encoder.py +++ b/vllm/model_executor/models/glm4_vision_encoder.py @@ -56,6 +56,7 @@ def __init__( self, config, quant_config: Optional[QuantizationConfig] = None, + prefix: str = '', ): super().__init__() self.hidden_size = config.hidden_size @@ -135,11 +136,14 @@ def __init__( self, config, quant_config: Optional[QuantizationConfig] = None, + prefix: str = '', ): super().__init__() self.input_layernorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.attention = Attention(config, quant_config=quant_config) + self.attention = Attention(config, + quant_config=quant_config, + prefix=f"{prefix}.attention") self.mlp = MLP(config, quant_config=quant_config) self.post_attention_layernorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) @@ -161,11 +165,14 @@ def __init__( self, config, quant_config: Optional[QuantizationConfig] = None, + prefix: str = '', ): super().__init__() self.layers = nn.ModuleList([ - TransformerLayer(config, quant_config=quant_config) - for _ in range(config.num_hidden_layers) + TransformerLayer(config, + quant_config=quant_config, + prefix=f"{prefix}.layer.{layer_idx}") + for layer_idx in range(config.num_hidden_layers) ]) def forward(self, hidden_states): @@ -252,12 +259,14 @@ def __init__( self, config, quant_config: Optional[QuantizationConfig] = None, + prefix: str = '', ): super().__init__() vision_config = Namespace(**config.vision_config) self.patch_embedding = PatchEmbedding(vision_config) self.transformer = Transformer(vision_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.transformer") self.linear_proj = GLU(config, in_features=config.hidden_size, quant_config=quant_config) diff --git a/vllm/model_executor/models/gpt2.py b/vllm/model_executor/models/gpt2.py index 1c61408ae1dd9..fd926ff0254d4 100644 --- a/vllm/model_executor/models/gpt2.py +++ b/vllm/model_executor/models/gpt2.py @@ -84,7 +84,8 @@ def __init__( self.head_dim, scale=self.scale, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, diff --git a/vllm/model_executor/models/gpt_bigcode.py b/vllm/model_executor/models/gpt_bigcode.py index 50a143cb1b600..c64bc70688806 100644 --- a/vllm/model_executor/models/gpt_bigcode.py +++ b/vllm/model_executor/models/gpt_bigcode.py @@ -52,6 +52,7 @@ def __init__( config: GPTBigCodeConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.hidden_size = config.hidden_size @@ -92,7 +93,8 @@ def __init__( scale=self.scale, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -151,6 +153,7 @@ def __init__( config: GPTBigCodeConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() hidden_size = config.hidden_size @@ -158,7 +161,10 @@ def __init__( hidden_size) self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) - self.attn = GPTBigCodeAttention(config, cache_config, quant_config) + self.attn = GPTBigCodeAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.attn") self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = GPTBigMLP(inner_dim, config, quant_config) @@ -210,7 +216,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.start_layer, self.end_layer, self.h = make_layers( config.num_hidden_layers, - lambda prefix: GPTBigCodeBlock(config, cache_config, quant_config), + lambda prefix: GPTBigCodeBlock( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.h", ) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) diff --git a/vllm/model_executor/models/gpt_j.py b/vllm/model_executor/models/gpt_j.py index d5defc60764e6..4829578a56959 100644 --- a/vllm/model_executor/models/gpt_j.py +++ b/vllm/model_executor/models/gpt_j.py @@ -53,6 +53,7 @@ def __init__( config: GPTJConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.total_num_heads = config.num_attention_heads @@ -94,7 +95,8 @@ def __init__( self.head_size, scaling, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -147,12 +149,16 @@ def __init__( config: GPTJConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() inner_dim = (4 * config.n_embd if config.n_inner is None else config.n_inner) self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) - self.attn = GPTJAttention(config, cache_config, quant_config) + self.attn = GPTJAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.attn") self.mlp = GPTJMLP(inner_dim, config, quant_config) def forward( @@ -193,7 +199,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.h = make_layers( config.n_layer, - lambda prefix: GPTJBlock(config, cache_config, quant_config), + lambda prefix: GPTJBlock( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.h", ) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) diff --git a/vllm/model_executor/models/gpt_neox.py b/vllm/model_executor/models/gpt_neox.py index 0bb5e2f9b95f9..731642772011c 100644 --- a/vllm/model_executor/models/gpt_neox.py +++ b/vllm/model_executor/models/gpt_neox.py @@ -52,6 +52,7 @@ def __init__( config: GPTNeoXConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.total_num_heads = config.num_attention_heads @@ -94,7 +95,8 @@ def __init__( self.head_size, scaling, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -145,6 +147,7 @@ def __init__( config: GPTNeoXConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.use_parallel_residual = config.use_parallel_residual @@ -152,7 +155,10 @@ def __init__( eps=config.layer_norm_eps) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.attention = GPTNeoXAttention(config, cache_config, quant_config) + self.attention = GPTNeoXAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.attention") self.mlp = GPTNeoXMLP(config, quant_config) def forward( @@ -205,7 +211,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: GPTNeoXLayer(config, cache_config, quant_config), + lambda prefix: GPTNeoXLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers", ) self.final_layer_norm = nn.LayerNorm(config.hidden_size, diff --git a/vllm/model_executor/models/granite.py b/vllm/model_executor/models/granite.py index c1e2e87f08ec3..bd2394e71c973 100644 --- a/vllm/model_executor/models/granite.py +++ b/vllm/model_executor/models/granite.py @@ -161,7 +161,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, diff --git a/vllm/model_executor/models/granitemoe.py b/vllm/model_executor/models/granitemoe.py index a91a18816995f..51296ef0cc08e 100644 --- a/vllm/model_executor/models/granitemoe.py +++ b/vllm/model_executor/models/granitemoe.py @@ -164,7 +164,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, diff --git a/vllm/model_executor/models/internlm2.py b/vllm/model_executor/models/internlm2.py index 94b819b5d9366..906128940ff76 100644 --- a/vllm/model_executor/models/internlm2.py +++ b/vllm/model_executor/models/internlm2.py @@ -1,5 +1,5 @@ from functools import partial -from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Type, Union import torch from torch import nn @@ -250,7 +250,12 @@ def forward( @support_torch_compile class InternLM2Model(nn.Module): - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + def __init__( + self, + *, + vllm_config: VllmConfig, + prefix: str = "", + layer_type: Type[InternLMDecoderLayer] = InternLMDecoderLayer): super().__init__() config = vllm_config.model_config.hf_config @@ -266,7 +271,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: InternLMDecoderLayer( + lambda prefix: layer_type( config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) @@ -316,14 +321,18 @@ def forward( class InternLM2ForCausalLM(nn.Module, SupportsPP): - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + def __init__(self, + *, + vllm_config: VllmConfig, + prefix: str = "", + model_type: Type[InternLM2Model] = InternLM2Model): super().__init__() config = vllm_config.model_config.hf_config quant_config = vllm_config.quant_config self.config = config self.quant_config = quant_config - self.model = InternLM2Model(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) + self.model = model_type(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "model")) self.output = ParallelLMHead(config.vocab_size, config.hidden_size, quant_config=quant_config, diff --git a/vllm/model_executor/models/internlm2_ve.py b/vllm/model_executor/models/internlm2_ve.py index f1b7c896cadfe..93ac2dcf8d587 100644 --- a/vllm/model_executor/models/internlm2_ve.py +++ b/vllm/model_executor/models/internlm2_ve.py @@ -14,8 +14,6 @@ InternLM2MLP, InternLM2Model) from vllm.sequence import IntermediateTensors -from .utils import make_layers, maybe_prefix - class InternLM2VEDecoderLayer(nn.Module): @@ -105,17 +103,9 @@ def forward( class InternLM2VEModel(InternLM2Model): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - super().__init__(vllm_config=vllm_config, prefix=prefix) - - config = vllm_config.model_config.hf_config - cache_config = vllm_config.cache_config - quant_config = vllm_config.quant_config - - self.start_layer, self.end_layer, self.layers = make_layers( - config.num_hidden_layers, - lambda prefix: InternLM2VEDecoderLayer( - config, cache_config, quant_config, prefix=prefix), - prefix=f"{prefix}.layers") + super().__init__(vllm_config=vllm_config, + prefix=prefix, + layer_type=InternLM2VEDecoderLayer) def forward( self, @@ -159,7 +149,6 @@ def forward( class InternLM2VEForCausalLM(InternLM2ForCausalLM): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - super().__init__(vllm_config=vllm_config, prefix=prefix) - - self.model = InternLM2VEModel(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) + super().__init__(vllm_config=vllm_config, + prefix=prefix, + model_type=InternLM2VEModel) diff --git a/vllm/model_executor/models/jais.py b/vllm/model_executor/models/jais.py index 41db85b678456..8c81dff6b5768 100644 --- a/vllm/model_executor/models/jais.py +++ b/vllm/model_executor/models/jais.py @@ -76,6 +76,7 @@ def __init__( config: JAISConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.hidden_size = config.hidden_size @@ -114,7 +115,8 @@ def __init__( scale=self.scale, alibi_slopes=alibi_slopes, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -178,6 +180,7 @@ def __init__( config: JAISConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() hidden_size = config.hidden_size @@ -185,7 +188,10 @@ def __init__( hidden_size) self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) - self.attn = JAISAttention(config, cache_config, quant_config) + self.attn = JAISAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.attn") self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = JAISMLP(inner_dim, config, quant_config) @@ -241,7 +247,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.num_hidden_layers, lambda prefix: JAISBlock(config=config, cache_config=cache_config, - quant_config=quant_config), + quant_config=quant_config, + prefix=prefix), prefix=f"{prefix}.h", ) diff --git a/vllm/model_executor/models/jamba.py b/vllm/model_executor/models/jamba.py index f83f0fce7275f..099ca7e12b288 100644 --- a/vllm/model_executor/models/jamba.py +++ b/vllm/model_executor/models/jamba.py @@ -102,7 +102,8 @@ def __init__(self, config: JambaConfig, layer_idx: int, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None) -> None: + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "") -> None: super().__init__() self.config = config self.mamba = MambaMixer(hidden_size= config.hidden_size, @@ -157,6 +158,7 @@ def __init__( layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -198,6 +200,7 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, + prefix=f"{prefix}.attn", ) num_experts = config.layers_num_experts[layer_idx] @@ -287,7 +290,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): layer_class(config, layer_idx=i, cache_config=cache_config, - quant_config=quant_config)) + quant_config=quant_config, + prefix=f"{prefix}.layers.{i}")) self.layers = nn.ModuleList(decoder_layers) self.final_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index 2b40e9ec73fad..66b29e72cfa89 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -174,6 +174,7 @@ def __init__( num_kv_heads=self.num_kv_heads, cache_config=cache_config, quant_config=quant_config, + prefix=f"{prefix}.attn", ) def forward( diff --git a/vllm/model_executor/models/minicpm.py b/vllm/model_executor/models/minicpm.py index b92bff4d7c28c..c9a573278a136 100644 --- a/vllm/model_executor/models/minicpm.py +++ b/vllm/model_executor/models/minicpm.py @@ -192,6 +192,7 @@ def __init__( max_position_embeddings: int = 8192, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -246,7 +247,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -273,6 +275,7 @@ def __init__( config: PretrainedConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.config = config @@ -283,6 +286,7 @@ def __init__( self.rope_scaling = getattr(config, "rope_scaling", None) self.max_position_embeddings = getattr(config, "max_position_embeddings", 8192) + self.prefix = prefix self._init_attn_block() self._init_ffn_block() @@ -298,6 +302,7 @@ def _init_attn_block(self): max_position_embeddings=self.max_position_embeddings, cache_config=self.cache_config, quant_config=self.quant_config, + prefix=f"{self.prefix}.self_attn", ) def _init_ffn_block(self): @@ -388,8 +393,8 @@ def _init_layers( ): self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: MiniCPMDecoderLayer(config, cache_config, - quant_config), + lambda prefix: MiniCPMDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: diff --git a/vllm/model_executor/models/minicpm3.py b/vllm/model_executor/models/minicpm3.py index 278c4bbe6e563..c38c31a0d4953 100644 --- a/vllm/model_executor/models/minicpm3.py +++ b/vllm/model_executor/models/minicpm3.py @@ -60,6 +60,7 @@ def __init__( max_position_embeddings: int = 8192, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -119,7 +120,8 @@ def __init__( self.scaling, num_kv_heads=self.num_local_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -195,6 +197,7 @@ def _init_attn_block(self): max_position_embeddings=self.max_position_embeddings, cache_config=self.cache_config, quant_config=self.quant_config, + prefix=f"{self.prefix}.self_attn", ) @@ -209,8 +212,8 @@ def _init_layers( ): self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: MiniCPM3DecoderLayer(config, cache_config, - quant_config), + lambda prefix: MiniCPM3DecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") diff --git a/vllm/model_executor/models/mixtral.py b/vllm/model_executor/models/mixtral.py index 0faffb4f1b00c..a5b364fe5ec85 100644 --- a/vllm/model_executor/models/mixtral.py +++ b/vllm/model_executor/models/mixtral.py @@ -166,7 +166,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, diff --git a/vllm/model_executor/models/mixtral_quant.py b/vllm/model_executor/models/mixtral_quant.py index ddd6afcf6a1b6..7a9b8cd88cfd0 100644 --- a/vllm/model_executor/models/mixtral_quant.py +++ b/vllm/model_executor/models/mixtral_quant.py @@ -170,6 +170,7 @@ def __init__( rope_theta: float = 10000, quant_config: Optional[QuantizationConfig] = None, cache_config: Optional[CacheConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -219,7 +220,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -243,6 +245,7 @@ def __init__( config: MixtralConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -255,7 +258,9 @@ def __init__( num_kv_heads=config.num_key_value_heads, rope_theta=rope_theta, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.self_attn", + ) self.block_sparse_moe = MixtralMoE(config=config, quant_config=quant_config) self.input_layernorm = RMSNorm(config.hidden_size, @@ -311,7 +316,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, lambda prefix: MixtralDecoderLayer( - config, cache_config, quant_config=quant_config), + config, cache_config, quant_config=quant_config, prefix=prefix + ), prefix=f"{prefix}.layers") self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.make_empty_intermediate_tensors = ( diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index 2528f741864b3..ee7b560fe1ee4 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -370,6 +370,7 @@ def __init__( config: PretrainedConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -427,7 +428,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") # Attention output projection. self.o_proj = RowParallelLinear( @@ -517,10 +519,14 @@ def __init__( config: PretrainedConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() # Attention block. - self.self_attn = MolmoAttention(config, cache_config, quant_config) + self.self_attn = MolmoAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.self_attn") # MLP block. self.mlp = MolmoMLP(config, quant_config=quant_config) @@ -738,7 +744,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): else MolmoDecoderLayer self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: decoder_layer(config, cache_config, quant_config), + lambda prefix: decoder_layer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers", ) diff --git a/vllm/model_executor/models/mpt.py b/vllm/model_executor/models/mpt.py index 8716e92b0f1c2..1235816413a44 100644 --- a/vllm/model_executor/models/mpt.py +++ b/vllm/model_executor/models/mpt.py @@ -50,6 +50,7 @@ def __init__( config: MPTConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.d_model = config.d_model @@ -115,7 +116,8 @@ def __init__( alibi_slopes=alibi_slopes, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -176,11 +178,15 @@ def __init__( config: MPTConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() hidden_size = config.d_model self.norm_1 = nn.LayerNorm(hidden_size) - self.attn = MPTAttention(config, cache_config, quant_config) + self.attn = MPTAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.attn") self.norm_2 = nn.LayerNorm(hidden_size) self.ffn = MPTMLP(config, quant_config) @@ -224,7 +230,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.blocks = make_layers( config.n_layers, - lambda prefix: MPTBlock(config, cache_config, quant_config), + lambda prefix: MPTBlock( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.blocks") self.norm_f = nn.LayerNorm(config.d_model) if config.no_bias: diff --git a/vllm/model_executor/models/nemotron.py b/vllm/model_executor/models/nemotron.py index ceab299a7950a..c7b4c22b6896b 100644 --- a/vllm/model_executor/models/nemotron.py +++ b/vllm/model_executor/models/nemotron.py @@ -195,7 +195,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, diff --git a/vllm/model_executor/models/olmo.py b/vllm/model_executor/models/olmo.py index dc138e2e636ad..538e31ec91699 100644 --- a/vllm/model_executor/models/olmo.py +++ b/vllm/model_executor/models/olmo.py @@ -62,6 +62,7 @@ def __init__( config: OlmoConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.config = config @@ -101,7 +102,8 @@ def __init__( self.head_dim, scale=self.scaling, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") # Attention output projection. self.o_proj = RowParallelLinear( @@ -184,10 +186,14 @@ class OlmoDecoderLayer(nn.Module): def __init__(self, config: OlmoConfig, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None): + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() # Attention block. - self.self_attn = OlmoAttention(config, cache_config, quant_config) + self.self_attn = OlmoAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.self_attn") # MLP block. self.mlp = OlmoMLP(config, quant_config) @@ -238,8 +244,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.hidden_size) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: OlmoDecoderLayer(config, cache_config, quant_config - ), + lambda prefix: OlmoDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.norm = nn.LayerNorm(config.hidden_size, elementwise_affine=False, diff --git a/vllm/model_executor/models/olmoe.py b/vllm/model_executor/models/olmoe.py index ab87695d8e650..5b5b3ef48b035 100644 --- a/vllm/model_executor/models/olmoe.py +++ b/vllm/model_executor/models/olmoe.py @@ -102,6 +102,7 @@ def __init__( max_position_embeddings: int = 4096, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -156,7 +157,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -182,6 +184,7 @@ def __init__( layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -199,6 +202,7 @@ def __init__( max_position_embeddings=max_position_embeddings, cache_config=cache_config, quant_config=quant_config, + prefix=f"{prefix}.self_attn", ) self.mlp = OlmoeMoE( @@ -260,8 +264,11 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: OlmoeDecoderLayer(config, int( - prefix.split(".")[-1]), cache_config, quant_config), + lambda prefix: OlmoeDecoderLayer(config, + int(prefix.split(".")[-1]), + cache_config, + quant_config, + prefix=prefix), prefix=f"{prefix}.layers") self.norm = RMSNorm(config.hidden_size, eps=1e-5) diff --git a/vllm/model_executor/models/orion.py b/vllm/model_executor/models/orion.py index b01734af8ddd8..a3757b5c8808e 100644 --- a/vllm/model_executor/models/orion.py +++ b/vllm/model_executor/models/orion.py @@ -75,6 +75,7 @@ def __init__( max_position_embeddings: int = 8192, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -126,7 +127,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -150,6 +152,7 @@ def __init__( config: PretrainedConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -166,6 +169,7 @@ def __init__( max_position_embeddings=max_position_embeddings, cache_config=cache_config, quant_config=quant_config, + prefix=f"{prefix}.self_attn", ) self.mlp = OrionMLP( hidden_size=self.hidden_size, @@ -226,10 +230,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, lambda prefix: OrionDecoderLayer( - config, - cache_config, - quant_config, - ), + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.norm = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps) self.make_empty_intermediate_tensors = ( diff --git a/vllm/model_executor/models/persimmon.py b/vllm/model_executor/models/persimmon.py index 3b8199f4f1661..14dd4b5b1b4da 100644 --- a/vllm/model_executor/models/persimmon.py +++ b/vllm/model_executor/models/persimmon.py @@ -75,7 +75,8 @@ class PersimmonAttention(nn.Module): def __init__(self, config: PersimmonConfig, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None): + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() self.config = config tensor_parallel_world_size = get_tensor_model_parallel_world_size() @@ -122,7 +123,8 @@ def __init__(self, self.head_dim, scale=self.scaling, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def _split_heads(self, x: torch.Tensor) -> torch.Tensor: # [seq_length, hidden_size] -> [seq_length, num_heads, head_dim] @@ -167,12 +169,14 @@ class PersimmonDecoderLayer(nn.Module): def __init__(self, config: PersimmonConfig, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None): + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() self.hidden_size = config.hidden_size self.self_attn = PersimmonAttention(config=config, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.self_attn") self.mlp = PersimmonMLP(config, quant_config=quant_config) self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) @@ -226,8 +230,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.hidden_size) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: PersimmonDecoderLayer(config, cache_config, - quant_config), + lambda prefix: PersimmonDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.final_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) diff --git a/vllm/model_executor/models/phi.py b/vllm/model_executor/models/phi.py index 0a117bf16c9b3..998d3723a0d7d 100644 --- a/vllm/model_executor/models/phi.py +++ b/vllm/model_executor/models/phi.py @@ -69,7 +69,8 @@ class PhiAttention(nn.Module): def __init__(self, config: PhiConfig, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None): + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() self.total_num_heads = config.num_attention_heads self.hidden_size = config.hidden_size @@ -116,7 +117,8 @@ def __init__(self, self.head_size, scaling, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -167,11 +169,15 @@ class PhiLayer(nn.Module): def __init__(self, config: PhiConfig, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None): + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.self_attn = PhiAttention(config, cache_config, quant_config) + self.self_attn = PhiAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.self_attn") self.mlp = PhiMLP(config, quant_config) def forward( @@ -210,7 +216,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.hidden_size) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: PhiLayer(config, cache_config, quant_config), + lambda prefix: PhiLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.final_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) diff --git a/vllm/model_executor/models/phi3_small.py b/vllm/model_executor/models/phi3_small.py index f71cbd1264c45..da7e4cdbc6940 100644 --- a/vllm/model_executor/models/phi3_small.py +++ b/vllm/model_executor/models/phi3_small.py @@ -117,6 +117,7 @@ def __init__( layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.layer_idx = layer_idx @@ -214,15 +215,14 @@ def __init__( "homo_head": self.homo_heads } - self.attn = Attention( - self.num_heads_per_partition, - self.head_dim, - self.scale, - num_kv_heads=self.num_kv_heads_per_partion, - cache_config=cache_config, - quant_config=quant_config, - blocksparse_params=bs_params, - ) + self.attn = Attention(self.num_heads_per_partition, + self.head_dim, + self.scale, + num_kv_heads=self.num_kv_heads_per_partion, + cache_config=cache_config, + quant_config=quant_config, + blocksparse_params=bs_params, + prefix=f"{prefix}.attn") def forward( self, @@ -259,13 +259,15 @@ def __init__( layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.hidden_size = config.hidden_size self.self_attn = Phi3SmallSelfAttention(config, layer_idx, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.self_attn") self.mlp = Phi3SmallMLP(config, quant_config) self.input_layernorm = nn.LayerNorm(config.hidden_size, @@ -315,7 +317,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.num_hidden_layers, lambda prefix: Phi3SmallDecoderLayer(config, int(prefix.split('.')[-1]), - cache_config, quant_config), + cache_config, + quant_config, + prefix=prefix), prefix=f"{prefix}.layers") self.final_layernorm = nn.LayerNorm(config.hidden_size, diff --git a/vllm/model_executor/models/phimoe.py b/vllm/model_executor/models/phimoe.py index e475d286bd7ea..1febd62f2f705 100644 --- a/vllm/model_executor/models/phimoe.py +++ b/vllm/model_executor/models/phimoe.py @@ -294,6 +294,7 @@ def __init__( cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, rope_scaling: Optional[dict] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -347,6 +348,7 @@ def __init__( num_kv_heads=self.num_kv_heads, cache_config=cache_config, quant_config=quant_config, + prefix=f"{prefix}.attn", ) def forward( @@ -371,6 +373,7 @@ def __init__( config: PhiMoEConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -385,6 +388,7 @@ def __init__( cache_config=cache_config, quant_config=quant_config, rope_scaling=config.rope_scaling, + prefix=f"{prefix}.self_attn", ) self.block_sparse_moe = PhiMoE( num_experts=config.num_local_experts, @@ -454,8 +458,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: PhiMoEDecoderLayer(config, cache_config, - quant_config), + lambda prefix: PhiMoEDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.norm = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps, diff --git a/vllm/model_executor/models/qwen.py b/vllm/model_executor/models/qwen.py index 44ce6eda42943..d3a776f665c74 100644 --- a/vllm/model_executor/models/qwen.py +++ b/vllm/model_executor/models/qwen.py @@ -442,6 +442,7 @@ def __init__( rope_scaling: Optional[Dict[str, Any]] = None, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.hidden_size = hidden_size @@ -478,7 +479,8 @@ def __init__( self.head_dim, self.scaling, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -502,6 +504,7 @@ def __init__( config: PretrainedConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.ln_1 = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) @@ -514,7 +517,8 @@ def __init__( rope_theta=rope_theta, rope_scaling=rope_scaling, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") self.ln_2 = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) @@ -568,7 +572,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.h = make_layers( config.num_hidden_layers, - lambda prefix: QWenBlock(config, cache_config, quant_config), + lambda prefix: QWenBlock( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.h") self.ln_f = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.make_empty_intermediate_tensors = ( diff --git a/vllm/model_executor/models/qwen2_moe.py b/vllm/model_executor/models/qwen2_moe.py index 96a9bc451f4df..1091f88ab2534 100644 --- a/vllm/model_executor/models/qwen2_moe.py +++ b/vllm/model_executor/models/qwen2_moe.py @@ -168,6 +168,7 @@ def __init__( max_position_embeddings: int = 8192, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -220,7 +221,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -245,6 +247,7 @@ def __init__( layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -261,6 +264,7 @@ def __init__( max_position_embeddings=max_position_embeddings, cache_config=cache_config, quant_config=quant_config, + prefix=f"{prefix}.self_attn", ) # Note: Qwen/Qwen2-57B-A14B-Instruct does not have @@ -336,7 +340,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): layer_idx=int( prefix.split(".")[-1]), cache_config=cache_config, - quant_config=quant_config), + quant_config=quant_config, + prefix=prefix), prefix=f"{prefix}.layers", ) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) diff --git a/vllm/model_executor/models/solar.py b/vllm/model_executor/models/solar.py index 6d6fafc5ab0eb..f58710d215056 100644 --- a/vllm/model_executor/models/solar.py +++ b/vllm/model_executor/models/solar.py @@ -167,6 +167,7 @@ def __init__( num_kv_heads=self.num_kv_heads, cache_config=cache_config, quant_config=quant_config, + prefix=f"{prefix}.attn", ) def forward( diff --git a/vllm/model_executor/models/stablelm.py b/vllm/model_executor/models/stablelm.py index e11d2e916730a..6b2107bef0a66 100644 --- a/vllm/model_executor/models/stablelm.py +++ b/vllm/model_executor/models/stablelm.py @@ -77,7 +77,8 @@ class StablelmAttention(nn.Module): def __init__(self, config: PretrainedConfig, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None) -> None: + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "") -> None: super().__init__() self.config = config self.hidden_size = config.hidden_size @@ -131,7 +132,8 @@ def __init__(self, self.scaling, num_kv_heads=self.num_key_value_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -155,9 +157,13 @@ def __init__( config: PretrainedConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() - self.self_attn = StablelmAttention(config, cache_config, quant_config) + self.self_attn = StablelmAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.self_attn") self.mlp = StablelmMLP(config, quant_config) norm_eps = getattr(config, "norm_eps", getattr(config, "layer_norm_eps", 1e-05)) @@ -207,8 +213,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: StablelmDecoderLayer(config, cache_config, - quant_config), + lambda prefix: StablelmDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers", ) norm_eps = getattr(config, "norm_eps", diff --git a/vllm/model_executor/models/starcoder2.py b/vllm/model_executor/models/starcoder2.py index 74c66042226de..15e8f2af52cda 100644 --- a/vllm/model_executor/models/starcoder2.py +++ b/vllm/model_executor/models/starcoder2.py @@ -52,7 +52,8 @@ class Starcoder2Attention(nn.Module): def __init__(self, config: Starcoder2Config, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None): + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() self.config = config @@ -105,7 +106,8 @@ def __init__(self, self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -154,12 +156,14 @@ class Starcoder2DecoderLayer(nn.Module): def __init__(self, config: Starcoder2Config, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None): + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() self.hidden_size = config.hidden_size self.self_attn = Starcoder2Attention(config, cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.self_attn") self.mlp = Starcoder2MLP(config, quant_config=quant_config) self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon) @@ -213,7 +217,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, lambda prefix: Starcoder2DecoderLayer( - config, cache_config, quant_config=quant_config), + config, cache_config, quant_config=quant_config, prefix=prefix + ), prefix=f"{prefix}.layers", ) self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon) diff --git a/vllm/model_executor/models/xverse.py b/vllm/model_executor/models/xverse.py index bc37a997eabb5..25a0d474e2863 100644 --- a/vllm/model_executor/models/xverse.py +++ b/vllm/model_executor/models/xverse.py @@ -93,6 +93,7 @@ def __init__( quant_config: Optional[QuantizationConfig] = None, bias: bool = False, cache_config: Optional[CacheConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -138,7 +139,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -162,6 +164,7 @@ def __init__( config: PretrainedConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -180,6 +183,7 @@ def __init__( quant_config=quant_config, bias=getattr(config, "bias", False), cache_config=cache_config, + prefix=f"{prefix}.self_attn", ) self.mlp = XverseMLP( hidden_size=self.hidden_size, @@ -243,8 +247,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: XverseDecoderLayer(config, cache_config, - quant_config), + lambda prefix: XverseDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers", ) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index 9be9031dc3baf..cbc982752c6b4 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -20,6 +20,7 @@ class CpuPlatform(Platform): _enum = PlatformEnum.CPU device_type: str = "cpu" + dispatch_key: str = "CPU" @classmethod def get_device_name(cls, device_id: int = 0) -> str: diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index cf0d41081a5aa..70724b8be4c45 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -121,6 +121,7 @@ def device_id_to_physical_device_id(device_id: int) -> int: class CudaPlatform(Platform): _enum = PlatformEnum.CUDA device_type: str = "cuda" + dispatch_key: str = "CUDA" @classmethod def get_device_capability(cls, device_id: int = 0) -> DeviceCapability: diff --git a/vllm/platforms/hpu.py b/vllm/platforms/hpu.py index a8f568d31d5a7..3071136e43b85 100644 --- a/vllm/platforms/hpu.py +++ b/vllm/platforms/hpu.py @@ -13,6 +13,7 @@ class HpuPlatform(Platform): _enum = PlatformEnum.HPU device_type: str = "hpu" + dispatch_key: str = "HPU" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index 07f23167d509a..3328665029039 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -57,6 +57,10 @@ def to_int(self) -> int: class Platform: _enum: PlatformEnum device_type: str + # available dispatch keys: + # check https://github.com/pytorch/pytorch/blob/313dac6c1ca0fa0cde32477509cce32089f8532a/torchgen/model.py#L134 # noqa + # use "CPU" as a fallback for platforms not registered in PyTorch + dispatch_key: str = "CPU" def is_cuda(self) -> bool: return self._enum == PlatformEnum.CUDA diff --git a/vllm/platforms/openvino.py b/vllm/platforms/openvino.py index 33a41933e9fff..694de836e1517 100644 --- a/vllm/platforms/openvino.py +++ b/vllm/platforms/openvino.py @@ -18,6 +18,7 @@ class OpenVinoPlatform(Platform): _enum = PlatformEnum.OPENVINO device_type: str = "openvino" + dispatch_key: str = "CPU" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index 3fe8c01c15787..d2f44c3e423e3 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -36,6 +36,7 @@ class RocmPlatform(Platform): _enum = PlatformEnum.ROCM device_type: str = "cuda" + dispatch_key: str = "CUDA" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index 513cfa54687dc..137af57023ea9 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -17,6 +17,7 @@ class TpuPlatform(Platform): _enum = PlatformEnum.TPU device_type: str = "tpu" + dispatch_key: str = "XLA" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index b2ee0ef2f71cd..69388a8e0f27c 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -17,6 +17,7 @@ class XPUPlatform(Platform): _enum = PlatformEnum.XPU device_type: str = "xpu" + dispatch_key: str = "XPU" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/spec_decode/draft_model_runner.py b/vllm/spec_decode/draft_model_runner.py index cd4d7eb0e6e4e..cf166e3eb5bad 100644 --- a/vllm/spec_decode/draft_model_runner.py +++ b/vllm/spec_decode/draft_model_runner.py @@ -273,7 +273,8 @@ def execute_model( if previous_hidden_states is not None else {} # Run model - with set_forward_context(model_input.attn_metadata): + with set_forward_context(model_input.attn_metadata, + self.vllm_config): hidden_states = model_executable( input_ids=model_input.input_tokens, positions=model_input.input_positions, diff --git a/vllm/utils.py b/vllm/utils.py index 67b2629ecc933..30c371b0e3591 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -1573,6 +1573,7 @@ def direct_register_custom_op( mutates_args: List[str], fake_impl: Optional[Callable] = None, target_lib: Optional[Library] = None, + dispatch_key: str = "CUDA", ): """ `torch.library.custom_op` can have significant overhead because it @@ -1601,7 +1602,7 @@ def direct_register_custom_op( schema_str = torch._custom_op.impl.infer_schema(op_func, mutates_args) my_lib = target_lib or vllm_lib my_lib.define(op_name + schema_str) - my_lib.impl(op_name, op_func, "CUDA") + my_lib.impl(op_name, op_func, dispatch_key=dispatch_key) if fake_impl is not None: my_lib._register_fake(op_name, fake_impl) diff --git a/vllm/v1/attention/backends/flash_attn.py b/vllm/v1/attention/backends/flash_attn.py index e73a1e60b2730..d98bb5a716e97 100644 --- a/vllm/v1/attention/backends/flash_attn.py +++ b/vllm/v1/attention/backends/flash_attn.py @@ -173,7 +173,8 @@ def unified_v1_flash_attention( alibi_slopes: Optional[torch.Tensor] = None, logits_soft_cap: Optional[float] = None, ) -> None: - current_metadata = get_forward_context() + context = get_forward_context() + current_metadata = context.dynamic_forward_context if current_metadata is None: # Profiling run. return diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 2cf55cd497659..02f9498142bb7 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -447,7 +447,7 @@ def execute_model( # Run the decoder. # Use persistent buffers for CUDA graphs. - with set_forward_context(attn_metadata): + with set_forward_context(attn_metadata, self.vllm_config): hidden_states = self.model( input_ids=None, positions=self.positions[:num_input_tokens], @@ -523,7 +523,7 @@ def _dummy_run( num_tokens: int, kv_caches: List[torch.Tensor], ) -> torch.Tensor: - with set_forward_context(None): + with set_forward_context(None, self.vllm_config): hidden_states = model( input_ids=None, positions=self.positions[:num_tokens], diff --git a/vllm/worker/embedding_model_runner.py b/vllm/worker/embedding_model_runner.py index 37cfcbf13d7a3..4a55d91e71484 100644 --- a/vllm/worker/embedding_model_runner.py +++ b/vllm/worker/embedding_model_runner.py @@ -97,7 +97,7 @@ def execute_model( model_forward_end = torch.cuda.Event(enable_timing=True) model_forward_start.record() - with set_forward_context(model_input.attn_metadata): + with set_forward_context(model_input.attn_metadata, self.vllm_config): hidden_or_intermediate_states = model_executable( input_ids=model_input.input_tokens, positions=model_input.input_positions, diff --git a/vllm/worker/enc_dec_model_runner.py b/vllm/worker/enc_dec_model_runner.py index 687d2cc79360f..ae18c79c980c8 100644 --- a/vllm/worker/enc_dec_model_runner.py +++ b/vllm/worker/enc_dec_model_runner.py @@ -176,7 +176,7 @@ def execute_model( } if self.has_inner_state else {} multi_modal_kwargs = model_input.multi_modal_kwargs or {} - with set_forward_context(model_input.attn_metadata): + with set_forward_context(model_input.attn_metadata, self.vllm_config): hidden_or_intermediate_states = model_executable( input_ids=model_input.input_tokens, positions=model_input.input_positions, diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index ed0360fb7f727..13301b876217d 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -1503,7 +1503,7 @@ def capture_model(self, kv_caches: List[List[torch.Tensor]]) -> None: self._update_inputs_to_capture_for_enc_dec_model( capture_inputs) - with set_forward_context(attn_metadata): + with set_forward_context(attn_metadata, self.vllm_config): graph_runner.capture(**capture_inputs) self.graph_memory_pool = graph_runner.graph.pool() self.graph_runners[virtual_engine][batch_size] = ( @@ -1649,7 +1649,7 @@ def execute_model( model_forward_end = torch.cuda.Event(enable_timing=True) model_forward_start.record() - with set_forward_context(model_input.attn_metadata): + with set_forward_context(model_input.attn_metadata, self.vllm_config): hidden_or_intermediate_states = model_executable( input_ids=model_input.input_tokens, positions=model_input.input_positions, From 97814fbf0f847a11d2e0eb339e3e7572ca69379d Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Fri, 22 Nov 2024 15:27:25 -0800 Subject: [PATCH 088/122] [v1] Refactor KVCacheManager for more hash input than token ids (#10507) Signed-off-by: rickyx Signed-off-by: Cody Yu Co-authored-by: Cody Yu --- tests/v1/core/test_prefix_caching.py | 225 +++++++++++++++++++-- vllm/v1/core/kv_cache_manager.py | 289 +++++++++++++-------------- vllm/v1/core/kv_cache_utils.py | 37 ++-- 3 files changed, 365 insertions(+), 186 deletions(-) diff --git a/tests/v1/core/test_prefix_caching.py b/tests/v1/core/test_prefix_caching.py index d614d3e67460f..83bfbb6ade8d7 100644 --- a/tests/v1/core/test_prefix_caching.py +++ b/tests/v1/core/test_prefix_caching.py @@ -1,8 +1,11 @@ """Compare the with and without prefix caching.""" +import pytest + from vllm.inputs import token_inputs from vllm.sampling_params import SamplingParams +from vllm.utils import cdiv from vllm.v1.core.kv_cache_manager import KVCacheManager, Request -from vllm.v1.core.kv_cache_utils import hash_block_tokens +from vllm.v1.core.kv_cache_utils import KVCacheBlock, hash_block_tokens def make_request(request_id, prompt_token_ids): @@ -31,7 +34,8 @@ def test_prefill(): # Fully cache miss # Incomplete 1 block (7 tokens) unique_token_ids = [3] * 7 - req0 = make_request("0", common_token_ids + unique_token_ids) + all_token_ids = common_token_ids + unique_token_ids + req0 = make_request("0", all_token_ids) computed_blocks = manager.get_computed_blocks(req0) assert not computed_blocks blocks = manager.allocate_slots(req0, 55, computed_blocks) @@ -40,24 +44,16 @@ def test_prefill(): # Check full block metadata parent_block_hash = None for block_id in (0, 1, 2): - block_hash = hash_block_tokens(parent_block_hash, - manager.block_pool[block_id].token_ids) + block_tokens = tuple(all_token_ids[block_id * 16:(block_id + 1) * 16]) + block_hash = hash_block_tokens(parent_block_hash, block_tokens) assert manager.block_pool[block_id].block_hash == block_hash assert manager.block_pool[block_id].ref_cnt == 1 - assert manager.block_pool[block_id].num_hashed_tokens == 16 * ( - block_id + 1) - assert manager.block_pool[block_id].token_ids == tuple([block_id] * 16) parent_block_hash = block_hash # Check partial/preallocated block metadata for block_id in (3, 4): assert manager.block_pool[block_id].block_hash is None assert manager.block_pool[block_id].ref_cnt == 1 - assert manager.block_pool[block_id].num_hashed_tokens == 0 - if block_id == 3: - assert manager.block_pool[block_id].token_ids == [3] * 7 - else: - assert not manager.block_pool[block_id].token_ids # Cache hit in the common prefix when the original block is still in use. # Incomplete 1 block (5 tokens) @@ -113,7 +109,7 @@ def test_prefill(): req3 = make_request("3", [99] * (16 * 9)) computed_blocks = manager.get_computed_blocks(req3) assert not computed_blocks - blocks = manager.allocate_slots(req2, 16 * 9, computed_blocks) + blocks = manager.allocate_slots(req3, 16 * 9, computed_blocks) # This block ID order also checks the eviction order. assert [b.block_id for b in blocks] == [9, 4, 3, 6, 5, 8, 7, 2, 1, 0] assert manager.free_block_queue.num_free_blocks == 0 @@ -148,7 +144,7 @@ def test_decode(): req0.append_output_token_ids(8) new_blocks = manager.append_slots(req0, 4) assert new_blocks is not None and len(new_blocks) == 0 - assert len(manager.block_pool[3].token_ids) == 11 + assert manager.req_to_blocks[req0.request_id][-2].block_hash is None # Append slots without allocating a new block, but start using the # preallocated block. @@ -159,8 +155,7 @@ def test_decode(): req0.append_output_token_ids(7) new_blocks = manager.append_slots(req0, 15) assert new_blocks is not None and len(new_blocks) == 0 - assert len(manager.block_pool[3].token_ids) == 16 - assert len(manager.block_pool[4].token_ids) == 10 + assert manager.req_to_blocks[req0.request_id][-2].block_hash is not None # Append slots with allocating a new block. req0.num_computed_tokens = 74 @@ -171,9 +166,6 @@ def test_decode(): new_blocks = manager.append_slots(req0, 17) # Plus one preallocated block. assert new_blocks is not None and len(new_blocks) == 2 - assert len(manager.block_pool[4].token_ids) == 16 - assert len(manager.block_pool[5].token_ids) == 11 - assert len(manager.block_pool[6].token_ids) == 0 def test_evict(): @@ -217,3 +209,198 @@ def test_evict(): blocks = manager.allocate_slots(req2, 3, computed_blocks) assert [b.block_id for b in blocks] == [6, 5] assert manager.free_block_queue.num_free_blocks == 6 + + +def test_hash_block_correct_reuse(): + """ + This tests when a previously cached block is reused as a new block, + its hash metadata should be correctly reset. + """ + block_size = 16 + manager = KVCacheManager( + block_size=block_size, + num_gpu_blocks=1, + sliding_window=False, + enable_caching=True, + num_preallocate_tokens=0, + ) + + # Allocate 1 block and cache it. + num_tokens = block_size * 1 + req = make_request("0", list(range(num_tokens))) + computed_blocks = manager.get_computed_blocks(req) + assert not computed_blocks + blocks = manager.allocate_slots(req, num_tokens, computed_blocks) + assert len(blocks) == 1 + + # Deallocate the block. + manager.free(req) + + # Allocate a new block that's not full, make sure hash info on the + # block is cleared. + req = make_request("1", list(range(num_tokens - 1))) + computed_blocks = manager.get_computed_blocks(req) + assert not computed_blocks + blocks = manager.allocate_slots(req, num_tokens - 1, computed_blocks) + assert len(blocks) == 1 + + assert manager.block_pool[blocks[0].block_id].block_hash is None + + +def test_computed_blocks_not_evicted(): + """ + Test that the computed blocks are not evicted when getting new blocks + for a request if there are any other free blocks. + """ + block_size = 16 + manager = KVCacheManager( + block_size=block_size, + num_gpu_blocks=2, + sliding_window=False, + enable_caching=True, + num_preallocate_tokens=0, + ) + + # Allocate a block and cache it. + num_tokens = block_size * 1 + req0 = make_request("0", list(range(num_tokens))) + computed_blocks = manager.get_computed_blocks(req0) + assert not computed_blocks + blocks = manager.allocate_slots(req0, num_tokens, computed_blocks) + assert len(blocks) == 1 + assert blocks[0].block_id == 0 + + # Allocate another block. + req1 = make_request("1", list(range(num_tokens, num_tokens * 2))) + computed_blocks = manager.get_computed_blocks(req1) + assert not computed_blocks + blocks = manager.allocate_slots(req1, num_tokens, computed_blocks) + assert len(blocks) == 1 + assert blocks[0].block_id == 1 + + # Free the blocks. + manager.free(req0) + manager.free(req1) + + # Now if we have a cache hit on the first block, we should evict the second + # cached block rather than the first one. + req2 = make_request("2", list(range(num_tokens * 2))) + computed_blocks = manager.get_computed_blocks(req2) + assert len(computed_blocks) == 1 + assert computed_blocks[0].block_id == 0 + + blocks = manager.allocate_slots(req2, num_tokens * 2 - num_tokens, + computed_blocks) + assert len(blocks) == 1 + assert blocks[0].block_id == 1 + + +def test_basic_prefix_caching_disabled(): + """ + This tests that the prefix caching is disabled. + """ + block_size = 4 + manager = KVCacheManager( + block_size=block_size, + num_gpu_blocks=4, + sliding_window=False, + enable_caching=False, + num_preallocate_tokens=0, + ) + + req1 = make_request("1", list(range(10))) # 2 blocks and some more + + computed_blocks = manager.get_computed_blocks(req1) + assert not computed_blocks + blocks = manager.allocate_slots(req1, 10, computed_blocks) + assert len(blocks) == 3 + + # Free the blocks. + manager.free(req1) + + # No caching. + req2 = make_request("2", list(range(16))) # shared prefix + computed_blocks = manager.get_computed_blocks(req2) + assert not computed_blocks + blocks = manager.allocate_slots(req2, 16, computed_blocks) + assert len(blocks) == 4 + + # New requests should not have any blocks. + req3 = make_request("3", list(range(4))) + computed_blocks = manager.get_computed_blocks(req3) + assert not computed_blocks + blocks = manager.allocate_slots(req3, 4, computed_blocks) + assert not blocks + + +@pytest.mark.parametrize("num_preallocate_tokens", list(range(0, 8))) +@pytest.mark.parametrize("block_size", [4]) +def test_preallocate_blocks(num_preallocate_tokens: int, block_size: int): + """ + This tests that the preallocated blocks are correctly added. + """ + manager = KVCacheManager( + block_size=block_size, + num_gpu_blocks=10, + sliding_window=False, + enable_caching=True, + num_preallocate_tokens=num_preallocate_tokens, + ) + num_preallocated_blocks = cdiv(num_preallocate_tokens, block_size) + + req = make_request("0", list(range(block_size * 30))) + computed_blocks = manager.get_computed_blocks(req) + assert not computed_blocks + # Just ask for 1 block. + blocks = manager.allocate_slots(req, block_size, computed_blocks) + assert len(blocks) == 1 + num_preallocated_blocks + + # Append slots to the block. + req.num_computed_tokens = block_size * len(blocks) # Assume all used. + blocks = manager.append_slots(req, block_size) # Append 1 block. + assert len(blocks) == 1 + num_preallocated_blocks + + +def test_cache_blocks(): + """ + This is a unit test that tests the correctness of the _cache_full_blocks + function of KVCacheManager. + """ + block_size = 4 + manager = KVCacheManager( + block_size=block_size, + num_gpu_blocks=5, + sliding_window=False, + enable_caching=True, + num_preallocate_tokens=0, + ) + # Req: + # Block 0: [0, 1, 2, 3] + # Block 1: [4, 5, 6, 7] + # Block 2: [8, 9, 10, 11] + # Block 3: [12, 13] + req = make_request("0", list(range(14))) + + # Test that blocks are cached correctly for 2 full blocks from the start. + blocks = [KVCacheBlock(block_id=i) for i in range(2)] + + manager._cache_full_blocks( + request=req, + blk_start_idx=0, + full_blocks=blocks, + prev_block=None, + ) + + assert len(manager.cached_block_hash_to_block) == 2 + assert all([block.block_hash is not None for block in blocks]) + + # Test that blocks that don't start from the beginning are cached correctly. + blocks = [KVCacheBlock(block_id=2)] + manager._cache_full_blocks( + request=req, + blk_start_idx=2, + full_blocks=blocks, + prev_block=None, + ) + assert len(manager.cached_block_hash_to_block) == 3 + assert blocks[0].block_hash is not None diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index 38f1c03a4d3ac..8eb3fb976eb87 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -79,6 +79,9 @@ def get_computed_blocks(self, request: Request) -> List[KVCacheBlock]: return [] computed_blocks = [] + + # TODO(rickyx): potentially we could cache this so we don't have to + # recompute it every time. block_hashes = hash_request_tokens(self.block_size, request.all_token_ids) @@ -120,47 +123,45 @@ def append_slots( # slots, but we cannot allocate new blocks due to the limit. return None - # When caching is enabled, assign token IDs to already allocated blocks. - new_token_ids = None - parent_block = None - if self.enable_caching: - # Figure out the token IDs to add to the blocks. - new_token_ids = request.all_token_ids[ - request.num_computed_tokens:request.num_computed_tokens + - num_tokens] - - # Find the last full block index. - # TODO: This may be optimized by calculating the computed tokens. - last_full_block_idx = len(req_blocks) - 1 - while (last_full_block_idx >= 0 - and req_blocks[last_full_block_idx].block_hash is None): - last_full_block_idx -= 1 - - parent_block = (req_blocks[last_full_block_idx] - if last_full_block_idx >= 0 else None) - token_id_idx = self._add_token_ids_to_blocks( - blocks=req_blocks[last_full_block_idx + 1:], - token_ids=new_token_ids, - parent_block=parent_block) - - new_token_ids = new_token_ids[token_id_idx:] - parent_block = req_blocks[-1] - - # No new block is needed. When caching is enabled, we make sure - # token_id_idx is equal to len(new_token_ids), meaning that all tokens - # are added to allocated blocks. - if num_required_blocks <= len(req_blocks): - assert not self.enable_caching or token_id_idx == num_tokens, \ - f"{token_id_idx=} != {num_tokens=}" - return [] + if num_new_blocks <= 0: + # No new block is needed. + new_blocks = [] + else: + # Get new blocks from the free block pool considering + # preallocated blocks. + num_new_blocks = min( + num_new_blocks + self.num_preallocate_blocks, + self.free_block_queue.num_free_blocks, + ) + + new_blocks = self._get_new_blocks(num_new_blocks) + req_blocks.extend(new_blocks) + + if not self.enable_caching: + return new_blocks + + num_computed_full_blocks = (request.num_computed_tokens // + self.block_size) + + # NOTE(rickyx): We are assuming the `num_tokens` are actual + # tokens rather than lookahead slots (e.g. for speculative decoding). + # TODO(rickyx): When supporting speculative decoding, we will need to + # differentiate between them so that we can know how many blocks are + # full after appending the actual tokens. + num_full_blocks_after_append = (request.num_computed_tokens + + num_tokens) // self.block_size + assert num_full_blocks_after_append <= len(req_blocks) + + new_full_blocks = req_blocks[ + num_computed_full_blocks:num_full_blocks_after_append] + self._cache_full_blocks( + request=request, + blk_start_idx=num_computed_full_blocks, + full_blocks=new_full_blocks, + prev_block=req_blocks[num_computed_full_blocks - 1] + if num_computed_full_blocks >= 1 else None, + ) - # Allocate new blocks considering preallocated blocks, and - # add token IDs to them if caching is enabled. - num_new_blocks = min(num_new_blocks + self.num_preallocate_blocks, - self.free_block_queue.num_free_blocks) - new_blocks = self._get_new_blocks(num_new_blocks, new_token_ids, - parent_block) - req_blocks.extend(new_blocks) return new_blocks def allocate_slots( @@ -184,11 +185,20 @@ def allocate_slots( raise ValueError( f"num_tokens must be greater than 0, got {num_tokens}") - # If a computed block of a request is an eviction candidate (in the - # free queue and ref_cnt == 0), it cannot be counted as a free block - # when allocating this request. - num_evictable_computed_blocks = len( - [blk for blk in computed_blocks if blk.ref_cnt == 0]) + # Touch the computed blocks to make sure they won't be evicted. + num_evictable_computed_blocks = 0 + if self.enable_caching: + self._touch(computed_blocks) + + # If a computed block of a request is an eviction candidate (in the + # free queue and ref_cnt == 0), it cannot be counted as a free block + # when allocating this request. + num_evictable_computed_blocks = len( + [blk for blk in computed_blocks if blk.ref_cnt == 0]) + else: + assert not computed_blocks, ( + "Computed blocks should be empty when " + "prefix caching is disabled") num_required_blocks = cdiv(num_tokens, self.block_size) if (num_required_blocks > self.free_block_queue.num_free_blocks - @@ -201,35 +211,28 @@ def allocate_slots( num_new_blocks = min( num_required_blocks + self.num_preallocate_blocks, self.free_block_queue.num_free_blocks - - num_evictable_computed_blocks) - - num_computed_tokens = len(computed_blocks) * self.block_size + num_evictable_computed_blocks, + ) - # When caching is enabled, get the new token IDs and the parent block - # ID to generate cache keys. - new_token_ids = None - parent_block = None - if self.enable_caching: - # Touch the computed blocks to make sure they won't be evicted. - self._touch(computed_blocks) + # Concatenate the computed block IDs and the new block IDs. + new_blocks = self._get_new_blocks(num_new_blocks) + self.req_to_blocks[request.request_id] = computed_blocks + new_blocks - # Get the token IDs for the blocks being allocated for hashing. - new_token_ids = request.all_token_ids[ - num_computed_tokens:num_computed_tokens + num_tokens] - if not new_token_ids: - raise RuntimeError( - "Failed to infer the token IDs for allocation. " - f"#all_tokens={len(request.all_token_ids)} < " - f"#computed_tokens={num_computed_tokens}") + if not self.enable_caching: + return new_blocks - # Get the parent block ID to construct the block chain. - parent_block = computed_blocks[-1] if computed_blocks else None + num_computed_tokens = len(computed_blocks) * self.block_size + num_full_blocks = (num_computed_tokens + num_tokens) // self.block_size - new_blocks = self._get_new_blocks(num_new_blocks, new_token_ids, - parent_block) + self._cache_full_blocks( + request=request, + blk_start_idx=len(computed_blocks), + # The new full blocks are the full blocks that are not computed. + full_blocks=self.req_to_blocks[request.request_id] + [len(computed_blocks):num_full_blocks], + prev_block=computed_blocks[-1] if computed_blocks else None, + ) - # Concatenate the computed block IDs and the new block IDs. - self.req_to_blocks[request.request_id] = computed_blocks + new_blocks return new_blocks def free(self, request: Request) -> None: @@ -248,24 +251,17 @@ def free(self, request: Request) -> None: blocks = reversed(blocks) for block in blocks: - block.ref_cnt -= 1 + block.decr_ref() if block.ref_cnt == 0: self.free_block_queue.append(block) - def _get_new_blocks( - self, - num_blocks: int, - token_ids: Optional[List[int]] = None, - parent_block: Optional[int] = None) -> List[KVCacheBlock]: - """Get new blocks from the free block pool, and add token IDs to - allocated blocks if caching is enabled. + def _get_new_blocks(self, num_blocks: int) -> List[KVCacheBlock]: + """Get new blocks from the free block pool. + Note that we do not check block cache in this function. Args: num_blocks: The number of blocks to allocate. - token_ids: The token IDs in the blocks. None if caching is disabled. - parent_block: The parent block. Used to include block chain - in the block hash. Returns: A list of new block. @@ -274,56 +270,38 @@ def _get_new_blocks( raise ValueError( f"Cannot get {num_blocks} free blocks from the pool") - # First allocate blocks. ret: List[KVCacheBlock] = [] idx = 0 while idx < num_blocks: + # First allocate blocks. curr_block = self.free_block_queue.popleft() assert curr_block.ref_cnt == 0 - # Evict blocks from the cache. + # If the block is cached, evict it. if self.enable_caching: - block_hash = curr_block.block_hash - if (block_hash is not None - and block_hash in self.cached_block_hash_to_block): - if len(self.cached_block_hash_to_block[block_hash]) == 1: - del self.cached_block_hash_to_block[block_hash] - else: - del self.cached_block_hash_to_block[block_hash][ - curr_block.block_id] - curr_block.reset() - - curr_block.ref_cnt = 1 + self._evict_cached_block(curr_block) + + curr_block.incr_ref() ret.append(curr_block) idx += 1 - # Then assign token IDs to the allocated blocks. - if self.enable_caching: - assert token_ids is not None - token_id_idx = self._add_token_ids_to_blocks( - blocks=ret, token_ids=token_ids, parent_block=parent_block) - assert token_id_idx == len(token_ids) - return ret - def _cache_full_block(self, - block: KVCacheBlock, - parent_block: Optional[KVCacheBlock] = None) -> None: - """Cache a full block for prefix caching. + def _evict_cached_block(self, block: KVCacheBlock) -> None: + """ + If a block is cached in `cached_block_hash_to_block`, we reset its hash + metadata and evict it from the cache. Args: - block: The block to cache. - parent_block: The parent block. None if this is the first block. + block: The block to evict. """ - parent_block_hash = (parent_block.block_hash - if parent_block is not None else None) - assert len(block.token_ids) == self.block_size - block.token_ids = tuple(block.token_ids) - block_hash = hash_block_tokens(parent_block_hash, block.token_ids) - block.block_hash = block_hash - block.num_hashed_tokens = self.block_size + ( - parent_block.num_hashed_tokens if parent_block is not None else 0) - self.cached_block_hash_to_block[block_hash][block.block_id] = block + block_hash = block.block_hash + if block_hash and block_hash in self.cached_block_hash_to_block: + block.reset_hash() + del self.cached_block_hash_to_block[block_hash][block.block_id] + + if len(self.cached_block_hash_to_block[block_hash]) == 0: + del self.cached_block_hash_to_block[block_hash] def _get_cached_block(self, block_hash: BlockHashType) -> Optional[KVCacheBlock]: @@ -355,43 +333,50 @@ def _touch(self, blocks: List[KVCacheBlock]) -> None: # candidate), so remove it. if block.ref_cnt == 0: self.free_block_queue.remove(block) - block.ref_cnt += 1 - - def _add_token_ids_to_blocks( - self, - blocks: List[KVCacheBlock], - token_ids: List[int], - parent_block: Optional[KVCacheBlock] = None) -> int: - """Add token IDs to a list of allocated blocks. - If a block becomes full after adding token IDs, cache it. - Return the token ID index that has not been added to the blocks - if the blocks are not enough to hold all the token IDs. + block.incr_ref() - Args: - blocks: A list of blocks to add token IDs. - token_ids: A list of token IDs to add. - parent_block: The parent block. None if this is the - first block. + def _cache_full_blocks( + self, + request: Request, + blk_start_idx: int, + full_blocks: List[KVCacheBlock], + prev_block: Optional[KVCacheBlock], + ) -> None: + """Cache a list of full blocks for prefix caching. - Returns: - The starting token ID index that has not been added to the blocks - due to insufficient given blocks. + This function takes a list of blocks that will have their block hash + metadata to be updated and cached. Given a request, it computes the + block hashes for the blocks starting from `blk_start_idx` to the end + of the request's full blocks, updating the metadata for each block + and caching them in the `cached_block_hash_to_block`. + + Args: + request: The request to cache the blocks. + blk_start_idx: The index of the first block in the request's blocks + to cache. + full_blocks: The list of blocks to update hash metadata. + prev_block: The previous block in the chain. """ - token_id_start = 0 - for curr_block in blocks: - # If all token IDs are added, then the rest of the blocks are - # preallocated blocks, so we only need to update the - # parent_block_id. FIXME - if token_id_start == len(token_ids): - continue - - # Add token IDs to the empty slots in the block. - empty_slots = self.block_size - len(curr_block.token_ids) - token_id_end = min(token_id_start + empty_slots, len(token_ids)) - curr_block.token_ids.extend(token_ids[token_id_start:token_id_end]) - # Cache the block if it becomes full. - if len(curr_block.token_ids) == self.block_size: - self._cache_full_block(curr_block, parent_block) - parent_block = curr_block - token_id_start = token_id_end - return token_id_start + # Update the new blocks with the block hashes through the chain. + prev_block_hash = (prev_block.block_hash + if prev_block is not None else None) + for i, blk in enumerate(full_blocks): + blk_idx = blk_start_idx + i + + block_tokens = request.all_token_ids[blk_idx * + self.block_size:(blk_idx + + 1) * + self.block_size] + assert len(block_tokens) == self.block_size, ( + f"Expected {self.block_size} tokens, got {len(block_tokens)} " + f"at {blk_idx}th block for request " + f"{request.request_id}({request})") + + # Compute the hash of the current block. + block_hash = hash_block_tokens(prev_block_hash, + tuple(block_tokens)) + + # Update and added the full block to the cache. + blk.block_hash = block_hash + self.cached_block_hash_to_block[block_hash][blk.block_id] = blk + prev_block_hash = block_hash diff --git a/vllm/v1/core/kv_cache_utils.py b/vllm/v1/core/kv_cache_utils.py index 33dbfb7377bfd..fb666c364bfb2 100644 --- a/vllm/v1/core/kv_cache_utils.py +++ b/vllm/v1/core/kv_cache_utils.py @@ -1,6 +1,6 @@ """KV-Cache Utilities.""" -from dataclasses import dataclass, field -from typing import List, Optional, Tuple, Union +from dataclasses import dataclass +from typing import List, Optional, Tuple from vllm.logger import init_logger @@ -16,27 +16,34 @@ class KVCacheBlock: block_id: int # Reference count. ref_cnt: int = 0 - # Token IDs in the block. When the block is full, the type of token_ids - # should be Tuple[int] for fast matching. - token_ids: Union[List[int], Tuple[int]] = field(default_factory=list) # The hash of the block composed of (block hash, tuple of token IDs). # It is only available when the block is full. - block_hash: Optional[BlockHashType] = None - # The number of hashed tokens. More hashed tokens means the block - # is closer to the end of a prompt and more likely to be evicted. - num_hashed_tokens: int = 0 + _block_hash: Optional[BlockHashType] = None # Used to construct a doubly linked list for free blocks. # These two attributes should only be manipulated by FreeKVCacheBlockQueue. prev_free_block: Optional["KVCacheBlock"] = None next_free_block: Optional["KVCacheBlock"] = None - def reset(self): - """Reset the block metadata.""" - self.ref_cnt = 0 - self.token_ids = [] - self.block_hash = None - self.num_hashed_tokens = 0 + def incr_ref(self): + self.ref_cnt += 1 + + def decr_ref(self): + self.ref_cnt -= 1 + + @property + def block_hash(self) -> Optional[BlockHashType]: + return self._block_hash + + @block_hash.setter + def block_hash(self, block_hash: BlockHashType): + assert self.block_hash is None, ( + "The block already has a hash. This should not happen.") + self._block_hash = block_hash + + def reset_hash(self): + """Reset the block hash when the block is evicted.""" + self._block_hash = None class FreeKVCacheBlockQueue: From 948c859571af9588e344079cc0e79bbf8597cb18 Mon Sep 17 00:00:00 2001 From: zixuanzhang226 Date: Fri, 22 Nov 2024 16:16:14 -0800 Subject: [PATCH 089/122] support bitsandbytes quantization with qwen model (#10549) Signed-off-by: Ubuntu --- vllm/model_executor/models/qwen.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/vllm/model_executor/models/qwen.py b/vllm/model_executor/models/qwen.py index d3a776f665c74..8f001200308fe 100644 --- a/vllm/model_executor/models/qwen.py +++ b/vllm/model_executor/models/qwen.py @@ -1028,6 +1028,18 @@ class QWenLLM(QWenBaseModel): embedding_modules = {} embedding_padding_modules = [] + default_bitsandbytes_target_modules = [ + ".c_attn.", + ".c_proj.", + ".w1.", + ".w2.", + ] + bitsandbytes_stacked_params_mapping = { + # shard_name, weight_name, index + "w2": ("gate_up_proj", 0), + "w1": ("gate_up_proj", 1), + } + class QWenVL(QWenBaseModel, SupportsMultiModal): packed_modules_mapping = { From 28598f3939f9a04800f514e7fe62ab9bb8f617ec Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Fri, 22 Nov 2024 19:22:53 -0500 Subject: [PATCH 090/122] [Core] remove temporary local variables in LLMEngine.__init__ (#10577) Signed-off-by: Russell Bryant --- vllm/engine/llm_engine.py | 143 ++++++++++++++++++-------------------- 1 file changed, 66 insertions(+), 77 deletions(-) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 2a5eaf1340762..fb21b2dedeb74 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -231,19 +231,18 @@ def __init__( use_cached_outputs: bool = False, ) -> None: - # TODO: remove the local variables and use self.* throughout the class. - model_config = self.model_config = vllm_config.model_config - cache_config = self.cache_config = vllm_config.cache_config - lora_config = self.lora_config = vllm_config.lora_config - parallel_config = self.parallel_config = vllm_config.parallel_config - scheduler_config = self.scheduler_config = vllm_config.scheduler_config - device_config = self.device_config = vllm_config.device_config - speculative_config = self.speculative_config = vllm_config.speculative_config # noqa - load_config = self.load_config = vllm_config.load_config - decoding_config = self.decoding_config = vllm_config.decoding_config or DecodingConfig( # noqa + self.model_config = vllm_config.model_config + self.cache_config = vllm_config.cache_config + self.lora_config = vllm_config.lora_config + self.parallel_config = vllm_config.parallel_config + self.scheduler_config = vllm_config.scheduler_config + self.device_config = vllm_config.device_config + self.speculative_config = vllm_config.speculative_config # noqa + self.load_config = vllm_config.load_config + self.decoding_config = vllm_config.decoding_config or DecodingConfig( # noqa ) - prompt_adapter_config = self.prompt_adapter_config = vllm_config.prompt_adapter_config # noqa - observability_config = self.observability_config = vllm_config.observability_config or ObservabilityConfig( # noqa + self.prompt_adapter_config = vllm_config.prompt_adapter_config # noqa + self.observability_config = vllm_config.observability_config or ObservabilityConfig( # noqa ) logger.info( @@ -265,54 +264,43 @@ def __init__( "mm_processor_kwargs=%s, pooler_config=%r," "compilation_config=%r", VLLM_VERSION, - model_config.model, - speculative_config, - model_config.tokenizer, - model_config.skip_tokenizer_init, - model_config.tokenizer_mode, - model_config.revision, - model_config.override_neuron_config, - model_config.tokenizer_revision, - model_config.trust_remote_code, - model_config.dtype, - model_config.max_model_len, - load_config.download_dir, - load_config.load_format, - parallel_config.tensor_parallel_size, - parallel_config.pipeline_parallel_size, - parallel_config.disable_custom_all_reduce, - model_config.quantization, - model_config.enforce_eager, - cache_config.cache_dtype, - model_config.quantization_param_path, - device_config.device, - decoding_config, - observability_config, - model_config.seed, - model_config.served_model_name, - scheduler_config.num_scheduler_steps, - scheduler_config.chunked_prefill_enabled, - scheduler_config.multi_step_stream_outputs, - cache_config.enable_prefix_caching, - model_config.use_async_output_proc, + self.model_config.model, + self.speculative_config, + self.model_config.tokenizer, + self.model_config.skip_tokenizer_init, + self.model_config.tokenizer_mode, + self.model_config.revision, + self.model_config.override_neuron_config, + self.model_config.tokenizer_revision, + self.model_config.trust_remote_code, + self.model_config.dtype, + self.model_config.max_model_len, + self.load_config.download_dir, + self.load_config.load_format, + self.parallel_config.tensor_parallel_size, + self.parallel_config.pipeline_parallel_size, + self.parallel_config.disable_custom_all_reduce, + self.model_config.quantization, + self.model_config.enforce_eager, + self.cache_config.cache_dtype, + self.model_config.quantization_param_path, + self.device_config.device, + self.decoding_config, + self.observability_config, + self.model_config.seed, + self.model_config.served_model_name, + self.scheduler_config.num_scheduler_steps, + self.scheduler_config.chunked_prefill_enabled, + self.scheduler_config.multi_step_stream_outputs, + self.cache_config.enable_prefix_caching, + self.model_config.use_async_output_proc, use_cached_outputs, - model_config.mm_processor_kwargs, - model_config.pooler_config, + self.model_config.mm_processor_kwargs, + self.model_config.pooler_config, vllm_config.compilation_config, ) # TODO(woosuk): Print more configs in debug mode. - self.model_config = model_config - self.cache_config = cache_config - self.lora_config = lora_config - self.parallel_config = parallel_config - self.scheduler_config = scheduler_config - self.device_config = device_config - self.speculative_config = speculative_config - self.load_config = load_config - self.decoding_config = decoding_config or DecodingConfig() - self.prompt_adapter_config = prompt_adapter_config - self.observability_config = observability_config or ObservabilityConfig( - ) + self.log_stats = log_stats self.use_cached_outputs = use_cached_outputs @@ -334,15 +322,15 @@ def get_tokenizer_for_seq(sequence: Sequence) -> AnyTokenizer: self.seq_counter = Counter() self.generation_config_fields = _load_generation_config_dict( - model_config) + self.model_config) - self.input_preprocessor = InputPreprocessor(model_config, + self.input_preprocessor = InputPreprocessor(self.model_config, self.tokenizer, mm_registry) self.input_registry = input_registry self.input_processor = input_registry.create_input_processor( - model_config) + self.model_config) self.model_executor = executor_class(vllm_config=vllm_config, ) @@ -354,36 +342,36 @@ def get_tokenizer_for_seq(sequence: Sequence) -> AnyTokenizer: from vllm.model_executor.model_loader import ( get_architecture_class_name) usage_message.report_usage( - get_architecture_class_name(model_config), + get_architecture_class_name(self.model_config), usage_context, extra_kvs={ # Common configuration "dtype": - str(model_config.dtype), + str(self.model_config.dtype), "tensor_parallel_size": - parallel_config.tensor_parallel_size, + self.parallel_config.tensor_parallel_size, "block_size": - cache_config.block_size, + self.cache_config.block_size, "gpu_memory_utilization": - cache_config.gpu_memory_utilization, + self.cache_config.gpu_memory_utilization, # Quantization "quantization": - model_config.quantization, + self.model_config.quantization, "kv_cache_dtype": - str(cache_config.cache_dtype), + str(self.cache_config.cache_dtype), # Feature flags "enable_lora": - bool(lora_config), + bool(self.lora_config), "enable_prompt_adapter": - bool(prompt_adapter_config), + bool(self.prompt_adapter_config), "enable_prefix_caching": - cache_config.enable_prefix_caching, + self.cache_config.enable_prefix_caching, "enforce_eager": - model_config.enforce_eager, + self.model_config.enforce_eager, "disable_custom_all_reduce": - parallel_config.disable_custom_all_reduce, + self.parallel_config.disable_custom_all_reduce, }) if self.tokenizer: @@ -402,7 +390,7 @@ def get_tokenizer_for_seq(sequence: Sequence) -> AnyTokenizer: for _ in range(self.parallel_config.pipeline_parallel_size) ] - if model_config.use_async_output_proc: + if self.model_config.use_async_output_proc: process_model_outputs = weak_bind(self._process_model_outputs) self.async_callbacks = [ @@ -422,11 +410,11 @@ def get_tokenizer_for_seq(sequence: Sequence) -> AnyTokenizer: # GPU and CPU blocks, which are profiled in the distributed executor. self.scheduler = [ Scheduler( - scheduler_config, cache_config, lora_config, - parallel_config.pipeline_parallel_size, + self.scheduler_config, self.cache_config, self.lora_config, + self.parallel_config.pipeline_parallel_size, self.async_callbacks[v_id] - if model_config.use_async_output_proc else None) - for v_id in range(parallel_config.pipeline_parallel_size) + if self.model_config.use_async_output_proc else None) + for v_id in range(self.parallel_config.pipeline_parallel_size) ] # Metric Logging. @@ -448,7 +436,8 @@ def get_tokenizer_for_seq(sequence: Sequence) -> AnyTokenizer: "prometheus": PrometheusStatLogger( local_interval=_LOCAL_LOGGING_INTERVAL_SEC, - labels=dict(model_name=model_config.served_model_name), + labels=dict( + model_name=self.model_config.served_model_name), max_model_len=self.model_config.max_model_len), } self.stat_loggers["prometheus"].info("cache_config", From d345f409b7478c0e547b238916ec9e90b6156bbc Mon Sep 17 00:00:00 2001 From: Zhonghua Deng Date: Sat, 23 Nov 2024 09:16:15 +0800 Subject: [PATCH 091/122] [V1] EngineCore supports profiling (#10564) Signed-off-by: Abatom --- vllm/v1/engine/__init__.py | 6 ++++++ vllm/v1/engine/async_llm.py | 4 ++-- vllm/v1/engine/core.py | 14 ++++++++++++-- vllm/v1/engine/core_client.py | 28 +++++++++++++++++++++++----- vllm/v1/worker/gpu_worker.py | 25 +++++++++++++++++++++++++ 5 files changed, 68 insertions(+), 9 deletions(-) diff --git a/vllm/v1/engine/__init__.py b/vllm/v1/engine/__init__.py index edfb8bd7c2fc1..967124fd850ea 100644 --- a/vllm/v1/engine/__init__.py +++ b/vllm/v1/engine/__init__.py @@ -68,6 +68,11 @@ class EngineCoreOutputs(msgspec.Struct, outputs: List[EngineCoreOutput] +@dataclass +class EngineCoreProfile: + is_start: bool + + class EngineCoreRequestType(enum.Enum): """ Request types defined as hex byte strings, so it can be sent over sockets @@ -75,3 +80,4 @@ class EngineCoreRequestType(enum.Enum): """ ADD = b'\x00' ABORT = b'\x01' + PROFILE = b'\x02' diff --git a/vllm/v1/engine/async_llm.py b/vllm/v1/engine/async_llm.py index 09bff9655a882..c44ebb2a85ba0 100644 --- a/vllm/v1/engine/async_llm.py +++ b/vllm/v1/engine/async_llm.py @@ -346,10 +346,10 @@ async def check_health(self) -> None: logger.debug("Called check_health.") async def start_profile(self) -> None: - raise ValueError("Not supported on V1 yet.") + await self.engine_core.profile(True) async def stop_profile(self) -> None: - raise ValueError("Not supported on V1 yet.") + await self.engine_core.profile(False) @property def is_running(self) -> bool: diff --git a/vllm/v1/engine/core.py b/vllm/v1/engine/core.py index 35ed131d50de9..1a978fbe7355f 100644 --- a/vllm/v1/engine/core.py +++ b/vllm/v1/engine/core.py @@ -1,4 +1,5 @@ import multiprocessing +import pickle import queue import threading import time @@ -16,7 +17,8 @@ from vllm.usage.usage_lib import UsageContext from vllm.v1.core.scheduler import Scheduler from vllm.v1.engine import (EngineCoreOutput, EngineCoreOutputs, - EngineCoreRequest, EngineCoreRequestType) + EngineCoreProfile, EngineCoreRequest, + EngineCoreRequestType) from vllm.v1.engine.mm_input_mapper import MMInputMapper from vllm.v1.executor.gpu_executor import GPUExecutor from vllm.v1.request import Request, RequestStatus @@ -126,6 +128,9 @@ def step(self) -> List[EngineCoreOutput]: scheduler_output, output) return engine_core_outputs + def profile(self, is_start=True): + self.model_executor.worker.profile(is_start) + class EngineCoreProc(EngineCore): """ZMQ-wrapper for running EngineCore in background process.""" @@ -312,11 +317,14 @@ def _log_stats(self): self._last_logging_time = now def _handle_client_request( - self, request: Union[EngineCoreRequest, List[str]]) -> None: + self, request: Union[EngineCoreRequest, EngineCoreProfile, + List[str]]) -> None: """Handle EngineCoreRequest or EngineCoreABORT from Client.""" if isinstance(request, EngineCoreRequest): self.add_request(request) + elif isinstance(request, EngineCoreProfile): + self.model_executor.worker.profile(request.is_start) else: # TODO: make an EngineCoreAbort wrapper assert isinstance(request, list) @@ -341,6 +349,8 @@ def process_input_socket(self, input_path: str): request = decoder_add_req.decode(request_data) elif request_type == EngineCoreRequestType.ABORT.value: request = decoder_abort_req.decode(request_data) + elif request_type == EngineCoreRequestType.PROFILE.value: + request = pickle.loads(request_data) else: raise ValueError(f"Unknown RequestType: {request_type}") diff --git a/vllm/v1/engine/core_client.py b/vllm/v1/engine/core_client.py index 09801e20e16ca..835963f7ee86c 100644 --- a/vllm/v1/engine/core_client.py +++ b/vllm/v1/engine/core_client.py @@ -9,7 +9,8 @@ from vllm.logger import init_logger from vllm.utils import get_open_zmq_ipc_path from vllm.v1.engine import (EngineCoreOutput, EngineCoreOutputs, - EngineCoreRequest, EngineCoreRequestType) + EngineCoreProfile, EngineCoreRequest, + EngineCoreRequestType) from vllm.v1.engine.core import EngineCore, EngineCoreProc from vllm.v1.serial_utils import PickleEncoder @@ -58,6 +59,9 @@ def get_output(self) -> List[EngineCoreOutput]: def add_request(self, request: EngineCoreRequest) -> None: raise NotImplementedError + async def profile(self, is_start=True) -> None: + raise NotImplementedError + def abort_requests(self, request_ids: List[str]) -> None: raise NotImplementedError @@ -95,6 +99,9 @@ def add_request(self, request: EngineCoreRequest) -> None: def abort_requests(self, request_ids: List[str]) -> None: self.engine_core.abort_requests(request_ids) + async def profile(self, is_start=True) -> None: + self.engine_core.profile(is_start) + class MPClient(EngineCoreClient): """ @@ -177,8 +184,10 @@ def get_output(self) -> List[EngineCoreOutput]: engine_core_outputs = self.decoder.decode(frame.buffer).outputs return engine_core_outputs - def _send_input(self, request_type: EngineCoreRequestType, - request: Union[EngineCoreRequest, List[str]]) -> None: + def _send_input( + self, request_type: EngineCoreRequestType, + request: Union[EngineCoreRequest, EngineCoreProfile, + List[str]]) -> None: # (RequestType, SerializedRequest) msg = (request_type.value, self.encoder.encode(request)) @@ -190,6 +199,10 @@ def add_request(self, request: EngineCoreRequest) -> None: def abort_requests(self, request_ids: List[str]) -> None: self._send_input(EngineCoreRequestType.ABORT, request_ids) + async def profile(self, is_start=True) -> None: + self._send_input(EngineCoreRequestType.PROFILE, + EngineCoreProfile(is_start)) + class AsyncMPClient(MPClient): """Asyncio-compatible client for multi-proc EngineCore.""" @@ -205,8 +218,9 @@ async def get_output_async(self) -> List[EngineCoreOutput]: return engine_core_outputs async def _send_input( - self, request_type: EngineCoreRequestType, - request: Union[EngineCoreRequest, List[str]]) -> None: + self, request_type: EngineCoreRequestType, + request: Union[EngineCoreRequest, EngineCoreProfile, + List[str]]) -> None: msg = (request_type.value, self.encoder.encode(request)) await self.input_socket.send_multipart(msg, copy=False) @@ -217,3 +231,7 @@ async def add_request_async(self, request: EngineCoreRequest) -> None: async def abort_requests_async(self, request_ids: List[str]) -> None: if len(request_ids) > 0: await self._send_input(EngineCoreRequestType.ABORT, request_ids) + + async def profile(self, is_start=True) -> None: + await self._send_input(EngineCoreRequestType.PROFILE, + EngineCoreProfile(is_start)) diff --git a/vllm/v1/worker/gpu_worker.py b/vllm/v1/worker/gpu_worker.py index 7973349f14a5d..d33b55a8a9f9a 100644 --- a/vllm/v1/worker/gpu_worker.py +++ b/vllm/v1/worker/gpu_worker.py @@ -6,6 +6,7 @@ import torch import torch.distributed +import vllm.envs as envs from vllm.config import CacheConfig, ModelConfig, ParallelConfig, VllmConfig from vllm.distributed import (ensure_model_parallel_initialized, init_distributed_environment, @@ -56,6 +57,22 @@ def __init__( init_cached_hf_modules() self.model_runner = GPUModelRunner(vllm_config) + # Torch profiler. Enabled and configured through env vars: + # VLLM_TORCH_PROFILER_DIR=/path/to/save/trace + if envs.VLLM_TORCH_PROFILER_DIR: + torch_profiler_trace_dir = envs.VLLM_TORCH_PROFILER_DIR + logger.info("Profiling enabled. Traces will be saved to: %s", + torch_profiler_trace_dir) + self.profiler = torch.profiler.profile( + activities=[ + torch.profiler.ProfilerActivity.CPU, + torch.profiler.ProfilerActivity.CUDA, + ], + with_stack=True, + on_trace_ready=torch.profiler.tensorboard_trace_handler( + torch_profiler_trace_dir, use_gzip=True)) + else: + self.profiler = None def initialize(self): if self.device_config.device.type == "cuda": @@ -184,6 +201,14 @@ def execute_model( # TODO(woosuk): Send the output to the engine process. return output + def profile(self, is_start=True): + if self.profiler is None: + raise RuntimeError("Profiler is not enabled.") + if is_start: + self.profiler.start() + else: + self.profiler.stop() + def init_worker_distributed_environment( parallel_config: ParallelConfig, From d559979c548c4bee6eca089d5e6dc318630bf465 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Fri, 22 Nov 2024 17:34:03 -0800 Subject: [PATCH 092/122] [bugfix] fix cpu tests (#10585) Signed-off-by: youkaichao --- vllm/worker/cpu_embedding_model_runner.py | 4 +++- vllm/worker/cpu_enc_dec_model_runner.py | 4 +++- vllm/worker/cpu_model_runner.py | 18 ++++++++++-------- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/vllm/worker/cpu_embedding_model_runner.py b/vllm/worker/cpu_embedding_model_runner.py index d0b8fec48d74f..978de73df6b70 100644 --- a/vllm/worker/cpu_embedding_model_runner.py +++ b/vllm/worker/cpu_embedding_model_runner.py @@ -3,6 +3,7 @@ import torch +from vllm.forward_context import set_forward_context from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.multimodal import MultiModalKwargs from vllm.pooling_params import PoolingParams @@ -64,7 +65,8 @@ def execute_model( intermediate_tensors, } - hidden_states = model_executable(**execute_model_kwargs) + with set_forward_context(model_input.attn_metadata, self.vllm_config): + hidden_states = model_executable(**execute_model_kwargs) # Only perform pooling in the driver worker. if not self.is_driver_worker: diff --git a/vllm/worker/cpu_enc_dec_model_runner.py b/vllm/worker/cpu_enc_dec_model_runner.py index d040831870bd8..1f8e2d2d88a23 100644 --- a/vllm/worker/cpu_enc_dec_model_runner.py +++ b/vllm/worker/cpu_enc_dec_model_runner.py @@ -4,6 +4,7 @@ import torch from vllm.attention import AttentionMetadata +from vllm.forward_context import set_forward_context from vllm.model_executor import SamplingMetadata from vllm.model_executor.layers.sampler import SamplerOutput from vllm.multimodal import MultiModalKwargs @@ -303,7 +304,8 @@ def execute_model( intermediate_tensors, } - hidden_states = model_executable(**execute_model_kwargs) + with set_forward_context(model_input.attn_metadata, self.vllm_config): + hidden_states = model_executable(**execute_model_kwargs) # Compute the logits. logits = self.model.compute_logits(hidden_states, diff --git a/vllm/worker/cpu_model_runner.py b/vllm/worker/cpu_model_runner.py index 66bd844c94901..2cf573625401a 100644 --- a/vllm/worker/cpu_model_runner.py +++ b/vllm/worker/cpu_model_runner.py @@ -10,6 +10,7 @@ from vllm.attention import AttentionMetadata, get_attn_backend from vllm.config import VllmConfig +from vllm.forward_context import set_forward_context from vllm.logger import init_logger from vllm.model_executor import SamplingMetadata from vllm.model_executor.layers.rotary_embedding import MRotaryEmbedding @@ -487,14 +488,15 @@ def execute_model( multimodal_kwargs = MultiModalKwargs.as_kwargs( model_input.multi_modal_kwargs, device=self.device) - hidden_states = model_executable( - input_ids=model_input.input_tokens, - positions=model_input.input_positions, - kv_caches=kv_caches, - attn_metadata=model_input.attn_metadata, - intermediate_tensors=intermediate_tensors, - **multimodal_kwargs, - ) + with set_forward_context(model_input.attn_metadata, self.vllm_config): + hidden_states = model_executable( + input_ids=model_input.input_tokens, + positions=model_input.input_positions, + kv_caches=kv_caches, + attn_metadata=model_input.attn_metadata, + intermediate_tensors=intermediate_tensors, + **multimodal_kwargs, + ) # Compute the logits. logits = self.model.compute_logits(hidden_states, From 9195dbdbcadb681db67181a664521bd6ef98deee Mon Sep 17 00:00:00 2001 From: Travis Johnson Date: Fri, 22 Nov 2024 19:17:38 -0700 Subject: [PATCH 093/122] [Bugfix][Frontend] Update Llama Chat Templates to also support Non-Tool use (#10164) Signed-off-by: Travis Johnson --- .../tool_chat_template_llama3.1_json.jinja | 46 +++++++-- .../tool_chat_template_llama3.2_json.jinja | 96 ++++++++++++++----- tests/entrypoints/test_chat_utils.py | 4 +- 3 files changed, 110 insertions(+), 36 deletions(-) diff --git a/examples/tool_chat_template_llama3.1_json.jinja b/examples/tool_chat_template_llama3.1_json.jinja index c24a7e51335ef..033830936a56b 100644 --- a/examples/tool_chat_template_llama3.1_json.jinja +++ b/examples/tool_chat_template_llama3.1_json.jinja @@ -19,10 +19,18 @@ {#- This block extracts the system message, so we can slot it into the right place. #} {%- if messages[0]['role'] == 'system' %} - {%- set system_message = messages[0]['content']|trim %} + {%- if messages[0]['content'] is string %} + {%- set system_message = messages[0]['content']|trim %} + {%- else %} + {%- set system_message = messages[0]['content'][0]['text']|trim %} + {%- endif %} {%- set messages = messages[1:] %} {%- else %} - {%- set system_message = "You are a helpful assistant with tool calling capabilities. Only reply with a tool call if the function exists in the library provided by the user. If it doesn't exist, just reply directly in natural language. When you receive a tool call response, use the output to format an answer to the original user question." %} + {%- if tools is not none %} + {%- set system_message = "You are a helpful assistant with tool calling capabilities. Only reply with a tool call if the function exists in the library provided by the user. If it doesn't exist, just reply directly in natural language. When you receive a tool call response, use the output to format an answer to the original user question." %} + {%- else %} + {%- set system_message = "" %} + {%- endif %} {%- endif %} {#- System message #} @@ -33,8 +41,8 @@ {{- "Cutting Knowledge Date: December 2023\n" }} {{- "Today Date: " + date_string + "\n\n" }} {%- if tools is not none and not tools_in_user_message %} - {{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }} - {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- "You have access to the following functions. To call a function, please respond with JSON for a function call. " }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}. ' }} {{- "Do not use variables.\n\n" }} {%- for t in tools %} {{- t | tojson(indent=4) }} @@ -48,7 +56,11 @@ {%- if tools_in_user_message and not tools is none %} {#- Extract the first user message so we can plug it in here #} {%- if messages | length != 0 %} - {%- set first_user_message = messages[0]['content']|trim %} + {%- if messages[0]['content'] is string %} + {%- set first_user_message = messages[0]['content']|trim %} + {%- else %} + {%- set first_user_message = messages[0]['content'] | selectattr('type', 'equalto', 'text') | map(attribute='text') | map('trim') | join('\n') %} + {%- endif %} {%- set messages = messages[1:] %} {%- else %} {{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }} @@ -56,7 +68,7 @@ {{- '<|start_header_id|>user<|end_header_id|>\n\n' -}} {{- "Given the following functions, please respond with a JSON for a function call " }} {{- "with its proper arguments that best answers the given prompt.\n\n" }} - {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}. ' }} {{- "Do not use variables.\n\n" }} {%- for t in tools %} {{- t | tojson(indent=4) }} @@ -67,7 +79,17 @@ {%- for message in messages %} {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %} - {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }} + {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' }} + {%- if message['content'] is string %} + {{- message['content'] | trim}} + {%- else %} + {%- for content in message['content'] %} + {%- if content['type'] == 'text' %} + {{- content['text'] | trim }} + {%- endif %} + {%- endfor %} + {%- endif %} + {{- '<|eot_id|>' }} {%- elif 'tool_calls' in message %} {%- if not message.tool_calls|length == 1 %} {{- raise_exception("This model only supports single tool-calls at once!") }} @@ -81,10 +103,14 @@ {{- "<|eot_id|>" }} {%- elif message.role == "tool" or message.role == "ipython" %} {{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }} - {%- if message.content is mapping %} - {{- message.content | tojson }} - {%- else %} + {%- if message.content is string %} {{- { "output": message.content } | tojson }} + {%- else %} + {%- for content in message['content'] %} + {%- if content['type'] == 'text' %} + {{- { "output": content['text'] } | tojson }} + {%- endif %} + {%- endfor %} {%- endif %} {{- "<|eot_id|>" }} {%- endif %} diff --git a/examples/tool_chat_template_llama3.2_json.jinja b/examples/tool_chat_template_llama3.2_json.jinja index 7e24777726a35..39f902c1c3c40 100644 --- a/examples/tool_chat_template_llama3.2_json.jinja +++ b/examples/tool_chat_template_llama3.2_json.jinja @@ -16,38 +16,70 @@ {%- set tools = none %} {%- endif %} +{#- Find out if there are any images #} +{% set image_ns = namespace(has_images=false) %} +{%- for message in messages %} + {%- for content in message['content'] %} + {%- if content['type'] == 'image' %} + {%- set image_ns.has_images = true %} + {%- endif %} + {%- endfor %} +{%- endfor %} + + {#- This block extracts the system message, so we can slot it into the right place. #} {%- if messages[0]['role'] == 'system' %} - {%- set system_message = messages[0]['content']|trim %} + {%- if messages[0]['content'] is string %} + {%- set system_message = messages[0]['content']|trim %} + {%- else %} + {#- Support vLLM's transforming of a content string to JSON. #} + {%- set system_message = messages[0]['content'][0]['text']|trim %} + {%- endif %} {%- set messages = messages[1:] %} {%- else %} - {%- set system_message = "You are a helpful assistant with tool calling capabilities. Only reply with a tool call if the function exists in the library provided by the user. If it doesn't exist, just reply directly in natural language. When you receive a tool call response, use the output to format an answer to the original user question." %} + {%- if tools is not none %} + {%- set system_message = "You are a helpful assistant with tool calling capabilities. Only reply with a tool call if the function exists in the library provided by the user. If it doesn't exist, just reply directly in natural language. When you receive a tool call response, use the output to format an answer to the original user question." %} + {%- else %} + {%- set system_message = "" %} + {%- endif %} {%- endif %} -{#- System message #} -{{- "<|start_header_id|>system<|end_header_id|>\n\n" }} -{%- if tools is not none %} - {{- "Environment: ipython\n" }} +{#- Including an image is not compatible with a system message #} +{%- if image_ns.has_images and not system_message == "" %} + {{- raise_exception("Prompting with images is incompatible with system messages and tool use.") }} {%- endif %} -{{- "Cutting Knowledge Date: December 2023\n" }} -{{- "Today Date: " + date_string + "\n\n" }} -{%- if tools is not none and not tools_in_user_message %} - {{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }} - {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} - {{- "Do not use variables.\n\n" }} - {%- for t in tools %} - {{- t | tojson(indent=4) }} - {{- "\n\n" }} - {%- endfor %} + + +{#- System message, if there are no images #} +{%- if not image_ns.has_images %} + {{- "<|start_header_id|>system<|end_header_id|>\n\n" }} + {%- if tools is not none %} + {{- "Environment: ipython\n" }} + {%- endif %} + {{- "Cutting Knowledge Date: December 2023\n" }} + {{- "Today Date: " + date_string + "\n\n" }} + {%- if tools is not none and not tools_in_user_message %} + {{- "You have access to the following functions. To call a function, please respond with JSON for a function call. " }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}. ' }} + {{- "Do not use variables.\n\n" }} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- "\n\n" }} + {%- endfor %} + {%- endif %} + {{- system_message }} + {{- "<|eot_id|>" }} {%- endif %} -{{- system_message }} -{{- "<|eot_id|>" }} {#- Custom tools are passed in a user message with some extra guidance #} {%- if tools_in_user_message and not tools is none %} {#- Extract the first user message so we can plug it in here #} {%- if messages | length != 0 %} - {%- set first_user_message = messages[0]['content']|trim %} + {%- if messages[0]['content'] is string %} + {%- set first_user_message = messages[0]['content']|trim %} + {%- else %} + {%- set first_user_message = messages[0]['content'] | selectattr('type', 'equalto', 'text') | map(attribute='text') | map('trim') | join('\n') %} + {%- endif %} {%- set messages = messages[1:] %} {%- else %} {{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }} @@ -55,7 +87,7 @@ {{- '<|start_header_id|>user<|end_header_id|>\n\n' -}} {{- "Given the following functions, please respond with a JSON for a function call " }} {{- "with its proper arguments that best answers the given prompt.\n\n" }} - {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}. ' }} {{- "Do not use variables.\n\n" }} {%- for t in tools %} {{- t | tojson(indent=4) }} @@ -66,7 +98,19 @@ {%- for message in messages %} {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %} - {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }} + {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' }} + {%- if message['content'] is string %} + {{- message['content'] | trim}} + {%- else %} + {%- for content in message['content'] %} + {%- if content['type'] == 'image' %} + {{- '<|image|>' }} + {%- elif content['type'] == 'text' %} + {{- content['text'] | trim }} + {%- endif %} + {%- endfor %} + {%- endif %} + {{- '<|eot_id|>' }} {%- elif 'tool_calls' in message %} {%- if not message.tool_calls|length == 1 %} {{- raise_exception("This model only supports single tool-calls at once!") }} @@ -80,10 +124,14 @@ {{- "<|eot_id|>" }} {%- elif message.role == "tool" or message.role == "ipython" %} {{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }} - {%- if message.content is mapping %} - {{- message.content | tojson }} - {%- else %} + {%- if message.content is string %} {{- { "output": message.content } | tojson }} + {%- else %} + {%- for content in message['content'] %} + {%- if content['type'] == 'text' %} + {{- { "output": content['text'] } | tojson }} + {%- endif %} + {%- endfor %} {%- endif %} {{- "<|eot_id|>" }} {%- endif %} diff --git a/tests/entrypoints/test_chat_utils.py b/tests/entrypoints/test_chat_utils.py index 72477e048eafa..996e60bfee592 100644 --- a/tests/entrypoints/test_chat_utils.py +++ b/tests/entrypoints/test_chat_utils.py @@ -766,8 +766,8 @@ def test_resolve_content_format_hf_defined(model, expected_format): ("tool_chat_template_granite_20b_fc.jinja", "string"), ("tool_chat_template_hermes.jinja", "string"), ("tool_chat_template_internlm2_tool.jinja", "string"), - ("tool_chat_template_llama3.1_json.jinja", "string"), - ("tool_chat_template_llama3.2_json.jinja", "string"), + ("tool_chat_template_llama3.1_json.jinja", "openai"), + ("tool_chat_template_llama3.2_json.jinja", "openai"), ("tool_chat_template_mistral_parallel.jinja", "string"), ("tool_chat_template_mistral.jinja", "string")], ) From ebda51968b12b85c8b5b82727b2b7713dfc44f88 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Fri, 22 Nov 2024 21:23:51 -0500 Subject: [PATCH 094/122] [Core] Fix broken log configuration (#10458) Signed-off-by: Russell Bryant --- examples/logging_configuration.md | 2 +- vllm/logger.py | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/examples/logging_configuration.md b/examples/logging_configuration.md index 0d278b0392403..9ac8b13cd5eaf 100644 --- a/examples/logging_configuration.md +++ b/examples/logging_configuration.md @@ -118,7 +118,7 @@ configuration for the root vLLM logger and for the logger you wish to silence: { "formatters": { "vllm": { - "class": "vllm.logging.NewLineFormatter", + "class": "vllm.logging_utils.NewLineFormatter", "datefmt": "%m-%d %H:%M:%S", "format": "%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s" } diff --git a/vllm/logger.py b/vllm/logger.py index 9e16e591315ba..538db0dcf19aa 100644 --- a/vllm/logger.py +++ b/vllm/logger.py @@ -50,7 +50,7 @@ def _configure_vllm_root_logger() -> None: - logging_config: Optional[Dict] = None + logging_config: Dict = {} if not VLLM_CONFIGURE_LOGGING and VLLM_LOGGING_CONFIG_PATH: raise RuntimeError( @@ -75,6 +75,11 @@ def _configure_vllm_root_logger() -> None: type(custom_config).__name__) logging_config = custom_config + for formatter in logging_config.get("formatters", {}).values(): + # This provides backwards compatibility after #10134. + if formatter.get("class") == "vllm.logging.NewLineFormatter": + formatter["class"] = "vllm.logging_utils.NewLineFormatter" + if logging_config: dictConfig(logging_config) From 978b39744b22e90d49a0f5367c3d933ed26d66c8 Mon Sep 17 00:00:00 2001 From: Tyler Michael Smith Date: Fri, 22 Nov 2024 22:14:03 -0500 Subject: [PATCH 095/122] [Misc] Add pynccl wrappers for all_gather and reduce_scatter (#9432) --- tests/distributed/test_pynccl.py | 69 +++++++++++++++++++ .../device_communicators/pynccl.py | 42 +++++++++++ .../device_communicators/pynccl_wrapper.py | 44 ++++++++++++ 3 files changed, 155 insertions(+) diff --git a/tests/distributed/test_pynccl.py b/tests/distributed/test_pynccl.py index e0e424439e3a5..f702d7c46ea73 100644 --- a/tests/distributed/test_pynccl.py +++ b/tests/distributed/test_pynccl.py @@ -150,6 +150,75 @@ def worker_fn_with_cudagraph(): assert a.mean().cpu().item() == pynccl_comm.world_size**1 +@worker_fn_wrapper +def all_gather_worker_fn(): + pynccl_comm = PyNcclCommunicator(get_world_group().cpu_group, + device=get_world_group().device) + + rank = pynccl_comm.rank + world_size = pynccl_comm.world_size + device = f'cuda:{pynccl_comm.rank}' + + num_elems = 1000 + tensor = torch.arange(num_elems, dtype=torch.float32, + device=device) + rank * num_elems + result = torch.zeros(num_elems * world_size, + dtype=torch.float32, + device=device) + + expected = torch.cat([ + torch.arange(num_elems, dtype=torch.float32) + r * num_elems + for r in range(world_size) + ]).to(device) + + with pynccl_comm.change_state(enable=True): + pynccl_comm.all_gather(result, tensor) + torch.testing.assert_close(result, expected, rtol=1e-5, atol=1e-8) + + +@pytest.mark.skipif(torch.cuda.device_count() < 2, + reason="Need at least 2 GPUs to run the test.") +def test_pynccl_all_gather(): + distributed_run(all_gather_worker_fn, 2) + + +@worker_fn_wrapper +def reduce_scatter_worker_fn(): + pynccl_comm = PyNcclCommunicator(get_world_group().cpu_group, + device=get_world_group().device) + + rank = pynccl_comm.rank + world_size = pynccl_comm.world_size + device = f'cuda:{pynccl_comm.rank}' + + num_elems = 1000 + tensor = torch.arange(num_elems, dtype=torch.float32, + device=device) + rank * num_elems + assert (num_elems % world_size == 0) + result = torch.zeros(num_elems // world_size, + dtype=torch.float32, + device=device) + + # Calculate expected result for this rank's chunk + scattered_size = num_elems // world_size + all_tensors = [ + torch.arange(num_elems, dtype=torch.float32) + r * num_elems + for r in range(world_size) + ] + expected = sum(tensor[rank * scattered_size:(rank + 1) * scattered_size] + for tensor in all_tensors).to(device) + + with pynccl_comm.change_state(enable=True): + pynccl_comm.reduce_scatter(result, tensor) + torch.testing.assert_close(result, expected, rtol=1e-5, atol=1e-8) + + +@pytest.mark.skipif(torch.cuda.device_count() < 2, + reason="Need at least 2 GPUs to run the test.") +def test_pynccl_reduce_scatter(): + distributed_run(reduce_scatter_worker_fn, 2) + + @pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Need at least 2 GPUs to run the test.") def test_pynccl_with_cudagraph(): diff --git a/vllm/distributed/device_communicators/pynccl.py b/vllm/distributed/device_communicators/pynccl.py index 7c6f48e88637b..7411304eb18fa 100644 --- a/vllm/distributed/device_communicators/pynccl.py +++ b/vllm/distributed/device_communicators/pynccl.py @@ -131,6 +131,48 @@ def all_reduce(self, ncclRedOpTypeEnum.from_torch(op), self.comm, cudaStream_t(stream.cuda_stream)) + def all_gather(self, + output_tensor: torch.Tensor, + input_tensor: torch.Tensor, + stream=None): + if self.disabled: + return + # nccl communicator created on a specific device + # will only work on tensors on the same device + # otherwise it will cause "illegal memory access" + assert input_tensor.device == self.device, ( + f"this nccl communicator is created to work on {self.device}, " + f"but the input tensor is on {input_tensor.device}") + if stream is None: + stream = self.stream + self.nccl.ncclAllGather( + buffer_type(input_tensor.data_ptr()), + buffer_type(output_tensor.data_ptr()), input_tensor.numel(), + ncclDataTypeEnum.from_torch(input_tensor.dtype), self.comm, + cudaStream_t(stream.cuda_stream)) + + def reduce_scatter(self, + output_tensor: torch.Tensor, + input_tensor: torch.Tensor, + op: ReduceOp = ReduceOp.SUM, + stream=None): + if self.disabled: + return + # nccl communicator created on a specific device + # will only work on tensors on the same device + # otherwise it will cause "illegal memory access" + assert input_tensor.device == self.device, ( + f"this nccl communicator is created to work on {self.device}, " + f"but the input tensor is on {input_tensor.device}") + if stream is None: + stream = self.stream + self.nccl.ncclReduceScatter( + buffer_type(input_tensor.data_ptr()), + buffer_type(output_tensor.data_ptr()), output_tensor.numel(), + ncclDataTypeEnum.from_torch(input_tensor.dtype), + ncclRedOpTypeEnum.from_torch(op), self.comm, + cudaStream_t(stream.cuda_stream)) + def send(self, tensor: torch.Tensor, dst: int, stream=None): if self.disabled: return diff --git a/vllm/distributed/device_communicators/pynccl_wrapper.py b/vllm/distributed/device_communicators/pynccl_wrapper.py index 7619c98f22148..ff88f72470b27 100644 --- a/vllm/distributed/device_communicators/pynccl_wrapper.py +++ b/vllm/distributed/device_communicators/pynccl_wrapper.py @@ -151,6 +151,28 @@ class NCCLLibrary: ncclRedOp_t, ncclComm_t, cudaStream_t ]), + # ncclResult_t ncclAllGather( + # const void* sendbuff, void* recvbuff, size_t count, + # ncclDataType_t datatype, ncclComm_t comm, + # cudaStream_t stream); + # note that cudaStream_t is a pointer type, so the last argument + # is a pointer + Function("ncclAllGather", ncclResult_t, [ + buffer_type, buffer_type, ctypes.c_size_t, ncclDataType_t, + ncclComm_t, cudaStream_t + ]), + + # ncclResult_t ncclReduceScatter( + # const void* sendbuff, void* recvbuff, size_t count, + # ncclDataType_t datatype, ncclRedOp_t op, ncclComm_t comm, + # cudaStream_t stream); + # note that cudaStream_t is a pointer type, so the last argument + # is a pointer + Function("ncclReduceScatter", ncclResult_t, [ + buffer_type, buffer_type, ctypes.c_size_t, ncclDataType_t, + ncclRedOp_t, ncclComm_t, cudaStream_t + ]), + # ncclResult_t ncclSend( # const void* sendbuff, size_t count, ncclDataType_t datatype, # int dest, ncclComm_t comm, cudaStream_t stream); @@ -258,6 +280,28 @@ def ncclAllReduce(self, sendbuff: buffer_type, recvbuff: buffer_type, datatype, op, comm, stream)) + def ncclReduceScatter(self, sendbuff: buffer_type, recvbuff: buffer_type, + count: int, datatype: int, op: int, comm: ncclComm_t, + stream: cudaStream_t) -> None: + # `datatype` actually should be `ncclDataType_t` + # and `op` should be `ncclRedOp_t` + # both are aliases of `ctypes.c_int` + # when we pass int to a function, it will be converted to `ctypes.c_int` + # by ctypes automatically + self.NCCL_CHECK(self._funcs["ncclReduceScatter"](sendbuff, recvbuff, + count, datatype, op, + comm, stream)) + + def ncclAllGather(self, sendbuff: buffer_type, recvbuff: buffer_type, + count: int, datatype: int, comm: ncclComm_t, + stream: cudaStream_t) -> None: + # `datatype` actually should be `ncclDataType_t` + # which is an aliases of `ctypes.c_int` + # when we pass int to a function, it will be converted to `ctypes.c_int` + # by ctypes automatically + self.NCCL_CHECK(self._funcs["ncclAllGather"](sendbuff, recvbuff, count, + datatype, comm, stream)) + def ncclSend(self, sendbuff: buffer_type, count: int, datatype: int, dest: int, comm: ncclComm_t, stream: cudaStream_t) -> None: self.NCCL_CHECK(self._funcs["ncclSend"](sendbuff, count, datatype, From 4aba6e3d1a0cc5cec45efdee0adeaa09278f7518 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Fri, 22 Nov 2024 20:13:54 -0800 Subject: [PATCH 096/122] [core] gemma2 full context length support (#10584) Signed-off-by: youkaichao --- .../test_basic_correctness.py | 25 +++++++++++----- vllm/attention/layer.py | 12 ++++++-- vllm/config.py | 29 +++++++++++++------ vllm/model_executor/models/gemma2.py | 13 +++++---- 4 files changed, 55 insertions(+), 24 deletions(-) diff --git a/tests/basic_correctness/test_basic_correctness.py b/tests/basic_correctness/test_basic_correctness.py index 7f16baa65a644..fcba253d159f3 100644 --- a/tests/basic_correctness/test_basic_correctness.py +++ b/tests/basic_correctness/test_basic_correctness.py @@ -14,11 +14,12 @@ from vllm.platforms import current_platform from vllm.worker.model_runner import ModelInputForGPUWithSamplingMetadata +from ..conftest import VllmRunner from ..models.utils import check_outputs_equal from ..utils import multi_gpu_test MODELS = [ - "facebook/opt-125m", + "google/gemma-2-2b-it", "meta-llama/Llama-3.2-1B", ] @@ -42,8 +43,6 @@ def test_vllm_gc_ed(): @pytest.mark.parametrize("enforce_eager", [False, True]) def test_models( hf_runner, - vllm_runner, - example_prompts, model: str, backend: str, dtype: str, @@ -54,15 +53,27 @@ def test_models( if backend == "FLASHINFER" and current_platform.is_rocm(): pytest.skip("Flashinfer does not support ROCm/HIP.") + if backend == "XFORMERS" and model == "google/gemma-2-2b-it": + pytest.skip( + "XFORMERS does not support gemma2 with full context length.") + os.environ["VLLM_ATTENTION_BACKEND"] = backend + # 5042 tokens for gemma2 + # gemma2 has alternating sliding window size of 4096 + # we need a prompt with more than 4096 tokens to test the sliding window + prompt = "The following numbers of the sequence " + ", ".join( + str(i) for i in range(1024)) + " are:" + example_prompts = [prompt] + with hf_runner(model, dtype=dtype) as hf_model: hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens) - with vllm_runner(model, - dtype=dtype, - enforce_eager=enforce_eager, - gpu_memory_utilization=0.7) as vllm_model: + with VllmRunner(model, + max_model_len=8192, + dtype=dtype, + enforce_eager=enforce_eager, + gpu_memory_utilization=0.7) as vllm_model: vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens) check_outputs_equal( diff --git a/vllm/attention/layer.py b/vllm/attention/layer.py index 8acbeaf12b0cf..cb4dedf481c77 100644 --- a/vllm/attention/layer.py +++ b/vllm/attention/layer.py @@ -40,18 +40,26 @@ def __init__( quant_config: Optional[QuantizationConfig] = None, blocksparse_params: Optional[Dict[str, Any]] = None, logits_soft_cap: Optional[float] = None, + per_layer_sliding_window: Optional[int] = None, prefix: str = "", ) -> None: super().__init__() + if per_layer_sliding_window is not None: + # per-layer sliding window + sliding_window = per_layer_sliding_window + elif cache_config is not None: + # model-level sliding window + sliding_window = cache_config.sliding_window + else: + sliding_window = None + if cache_config is not None: kv_cache_dtype = cache_config.cache_dtype block_size = cache_config.block_size - sliding_window = cache_config.sliding_window is_attention_free = cache_config.is_attention_free else: kv_cache_dtype = "auto" block_size = 16 - sliding_window = None is_attention_free = False if num_kv_heads is None: num_kv_heads = num_heads diff --git a/vllm/config.py b/vllm/config.py index bb02c2ad4c7d4..730b069e076fb 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -233,15 +233,26 @@ def __init__( (self.hf_text_config.model_type in ["gemma2"])) if (not self.disable_sliding_window and has_interleaved_attention): - sliding_window_len_min = get_min_sliding_window( - self.hf_text_config.sliding_window) - - print_warning_once( - f"{self.hf_text_config.model_type} has interleaved attention, " - "which is currently not supported by vLLM. Disabling sliding " - "window and capping the max length to the sliding window size " - f"({sliding_window_len_min}).") - self.disable_sliding_window = True + if envs.VLLM_ATTENTION_BACKEND == "XFORMERS": + sliding_window_len_min = get_min_sliding_window( + self.hf_text_config.sliding_window) + + print_warning_once( + f"{self.hf_text_config.model_type} has interleaved " + "attention, which is currently not supported by the " + "XFORMERS backend. Disabling sliding window and capping " + "the max length to the sliding window size " + f"({sliding_window_len_min}).") + self.disable_sliding_window = True + else: + # for a model with interleaved attention, + # the scheduler and the model treat it as full attention + # (i.e., not dropping any tokens outside the window). + # only the attention layer itself is aware of the sliding + # window, and use the window size to compute the attention. + self.hf_text_config.interleaved_sliding_window = sliding_window + delattr(self.hf_text_config, "sliding_window") + sliding_window = None self.max_model_len = _get_and_verify_max_len( hf_config=self.hf_text_config, diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index 839130364ef4d..9309cced61bb3 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -143,12 +143,12 @@ def __init__(self, is_neox_style=True, ) - # FIXME(woosuk): While Gemma 2 uses sliding window attention for every - # odd layer, vLLM currently ignores it and uses global attention for - # all layers. - use_sliding_window = (layer_idx % 2 == 1 - and config.sliding_window is not None) - del use_sliding_window # Unused. + # reference: + # https://github.com/huggingface/transformers/blob/54be2d7ae87e873482b984cc956e165ca4dc0ba3/src/transformers/models/gemma2/modeling_gemma2.py#L312 # noqa + use_sliding_window = (layer_idx % 2 == 0 and + config.interleaved_sliding_window is not None) + sliding_window = config.interleaved_sliding_window if \ + use_sliding_window else None self.attn = Attention(self.num_heads, self.head_dim, self.scaling, @@ -156,6 +156,7 @@ def __init__(self, cache_config=cache_config, quant_config=quant_config, logits_soft_cap=attn_logits_soft_cap, + per_layer_sliding_window=sliding_window, prefix=f"{prefix}.attn") def forward( From 7d8ffb344f3b9a571d94073644b829eb4baa0a65 Mon Sep 17 00:00:00 2001 From: Varun Vinayak Shenoy Date: Fri, 22 Nov 2024 21:13:29 -0800 Subject: [PATCH 097/122] [Bugfix] Internal Server Error when tool_choice is incorrect. (#10567) Signed-off-by: Varun Shenoy --- tests/entrypoints/openai/test_chat.py | 14 ++++++++++++++ vllm/entrypoints/openai/protocol.py | 12 ++++++------ 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/tests/entrypoints/openai/test_chat.py b/tests/entrypoints/openai/test_chat.py index 843d15e768093..8d23a2be6f9bb 100644 --- a/tests/entrypoints/openai/test_chat.py +++ b/tests/entrypoints/openai/test_chat.py @@ -829,6 +829,20 @@ async def test_inconsistent_tool_choice_and_tools(client: openai.AsyncOpenAI, "name": "nondefined_function_name" } }) + with pytest.raises(openai.BadRequestError): + await client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + max_completion_tokens=1000, + tools=[{ + "type": "function", + "function": { + "name": "dummy_function_name", + "description": "This is a dummy function", + "parameters": sample_json_schema + } + }], + tool_choice={}) @pytest.mark.asyncio diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index 9db5951e5fe5b..f343732174014 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -478,17 +478,17 @@ def check_tool_usage(cls, data): # it matches a valid tool if isinstance(data["tool_choice"], dict): valid_tool = False - specified_function = data["tool_choice"]["function"] + specified_function = data["tool_choice"].get("function") if not specified_function: raise ValueError( - "Incorrectly formatted `tool_choice`. Should be like " - "`{\"type\": \"function\"," + "Expected field `function` in `tool_choice`." + " Correct usage: `{\"type\": \"function\"," " \"function\": {\"name\": \"my_function\"}}`") - specified_function_name = specified_function["name"] + specified_function_name = specified_function.get("name") if not specified_function_name: raise ValueError( - "Incorrectly formatted `tool_choice`. Should be like " - "`{\"type\": \"function\", " + "Expected field `name` in `function` in `tool_choice`." + "Correct usage: `{\"type\": \"function\", " "\"function\": {\"name\": \"my_function\"}}`") for tool in data["tools"]: if tool["function"]["name"] == specified_function_name: From cfea9c04ef43420be594f23fc1773009d1fe88c3 Mon Sep 17 00:00:00 2001 From: Chen Wu <72850361+CNTRYROA@users.noreply.github.com> Date: Sat, 23 Nov 2024 13:13:59 +0800 Subject: [PATCH 098/122] [Model] Fix Baichuan BNB online quantization (#10572) Signed-off-by: Chen Wu --- vllm/model_executor/models/baichuan.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/vllm/model_executor/models/baichuan.py b/vllm/model_executor/models/baichuan.py index a923ed36a9db2..39cb5a8b2cbbe 100644 --- a/vllm/model_executor/models/baichuan.py +++ b/vllm/model_executor/models/baichuan.py @@ -350,6 +350,21 @@ class BaiChuanBaseForCausalLM(nn.Module, SupportsLoRA, SupportsPP): embedding_modules = {} embedding_padding_modules = [] + # BitandBytes specific attributes + default_bitsandbytes_target_modules = [ + ".W_pack.", + ".o_proj.", + ".down_proj.", + ".up_proj.", + ".gate_proj.", + ".up_proj.", + ] + bitsandbytes_stacked_params_mapping = { + # shard_name, weight_name, index + "gate_proj": ("gate_up_proj", 0), + "up_proj": ("gate_up_proj", 1), + } + def __init__( self, *, From 02a43f82a97e37581b48f1c177d3393aca4fe3f2 Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Sat, 23 Nov 2024 00:14:19 -0500 Subject: [PATCH 099/122] Update default max_num_batch_tokens for chunked prefill to 2048 (#10544) --- vllm/config.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 730b069e076fb..42a44f5415e9f 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1133,9 +1133,9 @@ def __post_init__(self) -> None: # max_num_batched_tokens. self.max_num_batched_tokens = max(self.max_model_len, 2048) else: - # It is the values that have the best balance between ITL - # and TTFT on A100. Note it is not optimized for throughput. - self.max_num_batched_tokens = 512 + # This value is chosen to have a balance between ITL + # and TTFT. Note it is not optimized for throughput. + self.max_num_batched_tokens = 2048 else: # If max_model_len is too short, use 2048 as the default value # for higher throughput. From 7c25fe45a6ef4fb5be148217cc7110e88e186446 Mon Sep 17 00:00:00 2001 From: kliuae <17350011+kliuae@users.noreply.github.com> Date: Sat, 23 Nov 2024 13:14:49 +0800 Subject: [PATCH 100/122] [AMD] Add support for GGUF quantization on ROCm (#10254) --- .buildkite/run-amd-test.sh | 1 - CMakeLists.txt | 2 +- csrc/ops.h | 2 + csrc/quantization/gguf/ggml-common.h | 17 +- csrc/quantization/gguf/gguf_kernel.cu | 6 +- csrc/quantization/gguf/mmq.cuh | 70 +++---- csrc/quantization/gguf/mmvq.cuh | 4 +- csrc/quantization/gguf/vecdotq.cuh | 286 +++++++++++++------------- csrc/torch_bindings.cpp | 2 + vllm/_custom_ops.py | 53 ++--- vllm/config.py | 2 +- 11 files changed, 234 insertions(+), 211 deletions(-) diff --git a/.buildkite/run-amd-test.sh b/.buildkite/run-amd-test.sh index 902e162720b89..3515ccd65667e 100755 --- a/.buildkite/run-amd-test.sh +++ b/.buildkite/run-amd-test.sh @@ -85,7 +85,6 @@ if [[ $commands == *" kernels "* ]]; then --ignore=kernels/test_encoder_decoder_attn.py \ --ignore=kernels/test_flash_attn.py \ --ignore=kernels/test_flashinfer.py \ - --ignore=kernels/test_gguf.py \ --ignore=kernels/test_int8_quant.py \ --ignore=kernels/test_machete_gemm.py \ --ignore=kernels/test_mamba_ssm.py \ diff --git a/CMakeLists.txt b/CMakeLists.txt index bfe435937e3bb..ff34225537cdd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -196,6 +196,7 @@ set(VLLM_EXT_SRC "csrc/quantization/gptq/q_gemm.cu" "csrc/quantization/compressed_tensors/int8_quant_kernels.cu" "csrc/quantization/fp8/common.cu" + "csrc/quantization/gguf/gguf_kernel.cu" "csrc/cuda_utils_kernels.cu" "csrc/prepare_inputs/advance_step.cu" "csrc/torch_bindings.cpp") @@ -237,7 +238,6 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") "csrc/mamba/causal_conv1d/causal_conv1d.cu" "csrc/quantization/aqlm/gemm_kernels.cu" "csrc/quantization/awq/gemm_kernels.cu" - "csrc/quantization/gguf/gguf_kernel.cu" "csrc/custom_all_reduce.cu" "csrc/permute_cols.cu" "csrc/quantization/cutlass_w8a8/scaled_mm_entry.cu") diff --git a/csrc/ops.h b/csrc/ops.h index 672e608e9c47e..ea001190bc202 100644 --- a/csrc/ops.h +++ b/csrc/ops.h @@ -128,6 +128,7 @@ torch::Tensor awq_dequantize(torch::Tensor _kernel, int64_t thx, int64_t thy); torch::Tensor permute_cols(torch::Tensor const& A, torch::Tensor const& perm); +#endif torch::Tensor ggml_dequantize(torch::Tensor W, int64_t type, int64_t m, int64_t n); @@ -138,6 +139,7 @@ torch::Tensor ggml_mul_mat_vec_a8(torch::Tensor W, torch::Tensor X, torch::Tensor ggml_mul_mat_a8(torch::Tensor W, torch::Tensor X, int64_t type, int64_t row); +#ifndef USE_ROCM bool cutlass_scaled_mm_supports_fp8(int64_t cuda_device_capability); void cutlass_scaled_mm(torch::Tensor& out, torch::Tensor const& a, diff --git a/csrc/quantization/gguf/ggml-common.h b/csrc/quantization/gguf/ggml-common.h index fba94fd1d157b..d42205a6571db 100644 --- a/csrc/quantization/gguf/ggml-common.h +++ b/csrc/quantization/gguf/ggml-common.h @@ -1,7 +1,7 @@ // copied from https://github.com/ggerganov/llama.cpp/blob/b2899/ggml-common.h #define QK_K 256 #define K_QUANTS_PER_ITERATION 2 -#define WARP_SIZE 32 +#define WARP_SIZE_GGUF 32 #define K_SCALE_SIZE 12 #define CUDA_DEQUANTIZE_BLOCK_SIZE 256 #define CUDA_QUANTIZE_BLOCK_SIZE 256 @@ -1112,4 +1112,19 @@ static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) { #endif return c; } + +static __device__ __forceinline__ uint32_t __vcmpeq4(const uint32_t a, const uint32_t b) { + uint32_t neq = a^b; + return !(neq & 0xff000000) * 0xff000000 | + !(neq & 0x00ff0000) * 0x00ff0000 | + !(neq & 0x0000ff00) * 0x0000ff00 | + !(neq & 0x000000ff) * 0x000000ff; +} + +static __device__ __forceinline__ uint32_t __vsub4(const uint32_t a, const uint32_t b) { + return (static_cast(((a & 0xff000000) >> 24) - ((b & 0xff000000) >> 24)) << 24) + + (static_cast(((a & 0x00ff0000) >> 16) - ((b & 0x00ff0000) >> 16)) << 16) + + (static_cast(((a & 0x0000ff00) >> 8) - ((b & 0x0000ff00) >> 8)) << 8) + + (static_cast(((a & 0x000000ff) >> 0) - ((b & 0x000000ff) >> 0)) << 0); +} #endif // defined(USE_ROCM) diff --git a/csrc/quantization/gguf/gguf_kernel.cu b/csrc/quantization/gguf/gguf_kernel.cu index 37e4de4e14dd3..5f0eaf5a973fb 100644 --- a/csrc/quantization/gguf/gguf_kernel.cu +++ b/csrc/quantization/gguf/gguf_kernel.cu @@ -4,6 +4,8 @@ #include #include +#include "cuda_compat.h" + #include "ggml-common.h" #include "vecdotq.cuh" #include "dequantize.cuh" @@ -32,8 +34,8 @@ static __global__ void quantize_q8_1(const half* __restrict__ x, #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { - amax = fmaxf(amax, __shfl_xor_sync(0xffffffff, amax, mask, 32)); - sum += __shfl_xor_sync(0xffffffff, sum, mask, 32); + amax = fmaxf(amax, VLLM_SHFL_XOR_SYNC_WIDTH(amax, mask, 32)); + sum += VLLM_SHFL_XOR_SYNC_WIDTH(sum, mask, 32); } const float d = amax / 127; diff --git a/csrc/quantization/gguf/mmq.cuh b/csrc/quantization/gguf/mmq.cuh index d13efd5965313..c935faa07df0c 100644 --- a/csrc/quantization/gguf/mmq.cuh +++ b/csrc/quantization/gguf/mmq.cuh @@ -10,7 +10,7 @@ static __device__ __forceinline__ void mul_mat_q( const int blocks_per_row_x = ncols_x / qk; const int blocks_per_col_y = nrows_y / QK8_1; - const int blocks_per_warp = WARP_SIZE / qi; + const int blocks_per_warp = WARP_SIZE_GGUF / qi; const int & ncols_dst = ncols_y; @@ -27,10 +27,10 @@ static __device__ __forceinline__ void mul_mat_q( allocate_tiles(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc); - __shared__ int tile_y_qs[mmq_x * WARP_SIZE]; - __shared__ half2 tile_y_ds[mmq_x * WARP_SIZE/QI8_1]; + __shared__ int tile_y_qs[mmq_x * WARP_SIZE_GGUF]; + __shared__ half2 tile_y_ds[mmq_x * WARP_SIZE_GGUF/QI8_1]; - float sum[mmq_y/WARP_SIZE][mmq_x/nwarps] = {{0.0f}}; + float sum[mmq_y/WARP_SIZE_GGUF][mmq_x/nwarps] = {{0.0f}}; for (int ib0 = 0; ib0 < blocks_per_row_x; ib0 += blocks_per_warp) { @@ -39,26 +39,26 @@ static __device__ __forceinline__ void mul_mat_q( #pragma unroll for (int ir = 0; ir < qr; ++ir) { - const int kqs = ir*WARP_SIZE + threadIdx.x; + const int kqs = ir*WARP_SIZE_GGUF + threadIdx.x; const int kbxd = kqs / QI8_1; #pragma unroll for (int i = 0; i < mmq_x; i += nwarps) { const int col_y_eff = min(col_y_0 + threadIdx.y + i, ncols_y-1); // to prevent out-of-bounds memory accesses const block_q8_1 * by0 = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + kbxd]; - const int index_y = (threadIdx.y + i) * WARP_SIZE + kqs % WARP_SIZE; + const int index_y = (threadIdx.y + i) * WARP_SIZE_GGUF + kqs % WARP_SIZE_GGUF; tile_y_qs[index_y] = get_int_from_int8_aligned(by0->qs, threadIdx.x % QI8_1); } #pragma unroll for (int ids0 = 0; ids0 < mmq_x; ids0 += nwarps * QI8_1) { - const int ids = (ids0 + threadIdx.y * QI8_1 + threadIdx.x / (WARP_SIZE/QI8_1)) % mmq_x; - const int kby = threadIdx.x % (WARP_SIZE/QI8_1); + const int ids = (ids0 + threadIdx.y * QI8_1 + threadIdx.x / (WARP_SIZE_GGUF/QI8_1)) % mmq_x; + const int kby = threadIdx.x % (WARP_SIZE_GGUF/QI8_1); const int col_y_eff = min(col_y_0 + ids, ncols_y-1); // if the sum is not needed it's faster to transform the scale to f32 ahead of time - const half2 * dsi_src = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + ir*(WARP_SIZE/QI8_1) + kby].ds; - half2 * dsi_dst = &tile_y_ds[ids * (WARP_SIZE/QI8_1) + kby]; + const half2 * dsi_src = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + ir*(WARP_SIZE_GGUF/QI8_1) + kby].ds; + half2 * dsi_dst = &tile_y_ds[ids * (WARP_SIZE_GGUF/QI8_1) + kby]; if (need_sum) { *dsi_dst = *dsi_src; } else { @@ -70,12 +70,12 @@ static __device__ __forceinline__ void mul_mat_q( __syncthreads(); // #pragma unroll // unrolling this loop causes too much register pressure - for (int k = ir*WARP_SIZE/qr; k < (ir+1)*WARP_SIZE/qr; k += vdr) { + for (int k = ir*WARP_SIZE_GGUF/qr; k < (ir+1)*WARP_SIZE_GGUF/qr; k += vdr) { #pragma unroll for (int j = 0; j < mmq_x; j += nwarps) { #pragma unroll - for (int i = 0; i < mmq_y; i += WARP_SIZE) { - sum[i/WARP_SIZE][j/nwarps] += vec_dot( + for (int i = 0; i < mmq_y; i += WARP_SIZE_GGUF) { + sum[i/WARP_SIZE_GGUF][j/nwarps] += vec_dot( tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc, tile_y_qs, tile_y_ds, threadIdx.x + i, threadIdx.y + j, k); } @@ -93,12 +93,12 @@ static __device__ __forceinline__ void mul_mat_q( } #pragma unroll - for (int i = 0; i < mmq_y; i += WARP_SIZE) { + for (int i = 0; i < mmq_y; i += WARP_SIZE_GGUF) { const int row_dst = row_dst_0 + threadIdx.x + i; if (row_dst >= nrows_dst) { continue; } - dst[col_dst*nrows_dst + row_dst] = __float2half(sum[i/WARP_SIZE][j/nwarps]); + dst[col_dst*nrows_dst + row_dst] = __float2half(sum[i/WARP_SIZE_GGUF][j/nwarps]); } } } @@ -115,7 +115,7 @@ static __device__ __forceinline__ void mul_mat_q( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q4_0, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q4_0, 2) #endif mul_mat_q4_0( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -140,7 +140,7 @@ static void ggml_mul_mat_q4_0_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; @@ -165,7 +165,7 @@ static void ggml_mul_mat_q4_0_q8_1_cuda( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q4_1, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q4_1, 2) #endif mul_mat_q4_1( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -190,7 +190,7 @@ static void ggml_mul_mat_q4_1_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; @@ -215,7 +215,7 @@ static void ggml_mul_mat_q4_1_q8_1_cuda( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q5_0, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q5_0, 2) #endif mul_mat_q5_0( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -240,7 +240,7 @@ static void ggml_mul_mat_q5_0_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; @@ -265,7 +265,7 @@ static void ggml_mul_mat_q5_0_q8_1_cuda( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q5_1, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q5_1, 2) #endif mul_mat_q5_1( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -289,7 +289,7 @@ static void ggml_mul_mat_q5_1_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; @@ -314,7 +314,7 @@ static void ggml_mul_mat_q5_1_q8_1_cuda( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q8_0, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q8_0, 2) #endif mul_mat_q8_0( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -338,7 +338,7 @@ static void ggml_mul_mat_q8_0_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; @@ -363,7 +363,7 @@ static void ggml_mul_mat_q8_0_q8_1_cuda( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q2_K, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q2_K, 2) #endif mul_mat_q2_K( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -387,7 +387,7 @@ static void ggml_mul_mat_q2_K_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; @@ -412,7 +412,7 @@ static void ggml_mul_mat_q2_K_q8_1_cuda( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q3_K, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q3_K, 2) #endif mul_mat_q3_K( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -438,7 +438,7 @@ static void ggml_mul_mat_q3_K_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; @@ -463,7 +463,7 @@ static void ggml_mul_mat_q3_K_q8_1_cuda( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q4_K, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q4_K, 2) #endif mul_mat_q4_K( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -487,7 +487,7 @@ static void ggml_mul_mat_q4_K_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; @@ -512,7 +512,7 @@ static void ggml_mul_mat_q4_K_q8_1_cuda( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q5_K, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q5_K, 2) #endif mul_mat_q5_K( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -537,7 +537,7 @@ static void ggml_mul_mat_q5_K_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; @@ -562,7 +562,7 @@ static void ggml_mul_mat_q5_K_q8_1_cuda( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q6_K, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q6_K, 2) #endif mul_mat_q6_K( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -586,7 +586,7 @@ static void ggml_mul_mat_q6_K_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; diff --git a/csrc/quantization/gguf/mmvq.cuh b/csrc/quantization/gguf/mmvq.cuh index b221ae7896138..b01e939808a3f 100644 --- a/csrc/quantization/gguf/mmvq.cuh +++ b/csrc/quantization/gguf/mmvq.cuh @@ -28,8 +28,8 @@ static __global__ void mul_mat_vec_q(const void * __restrict__ vx, const void * // sum up partial sums and write back result #pragma unroll - for (int mask = 16; mask > 0; mask >>= 1) { - tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); + for (int mask = WARP_SIZE/2; mask > 0; mask >>= 1) { + tmp += VLLM_SHFL_XOR_SYNC(tmp, mask); } if (threadIdx.x == 0) { diff --git a/csrc/quantization/gguf/vecdotq.cuh b/csrc/quantization/gguf/vecdotq.cuh index d5af345a6b26f..e00422637c65b 100644 --- a/csrc/quantization/gguf/vecdotq.cuh +++ b/csrc/quantization/gguf/vecdotq.cuh @@ -43,7 +43,7 @@ static __device__ __forceinline__ int get_int_from_uint8_aligned(const uint8_t * template static __device__ __forceinline__ float vec_dot_q4_0_q8_1_impl( const int * v, const int * u, const float & d4, const half2 & ds8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM int sumi = 0; #pragma unroll @@ -68,7 +68,7 @@ template static __device__ __forceinline__ float vec_dot_q4_0_q8_1_imp template static __device__ __forceinline__ float vec_dot_q4_1_q8_1_impl( const int * v, const int * u, const half2 & dm4, const half2 & ds8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM int sumi = 0; #pragma unroll @@ -95,7 +95,7 @@ template static __device__ __forceinline__ float vec_dot_q4_1_q8_1_imp template static __device__ __forceinline__ float vec_dot_q5_0_q8_1_impl( const int * vl, const int * vh, const int * u, const float & d5, const half2 & ds8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM int sumi = 0; #pragma unroll @@ -128,7 +128,7 @@ template static __device__ __forceinline__ float vec_dot_q5_0_q8_1_imp template static __device__ __forceinline__ float vec_dot_q5_1_q8_1_impl( const int * vl, const int * vh, const int * u, const half2 & dm5, const half2 & ds8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM int sumi = 0; #pragma unroll @@ -162,7 +162,7 @@ template static __device__ __forceinline__ float vec_dot_q5_1_q8_1_imp template static __device__ __forceinline__ float vec_dot_q8_0_q8_1_impl( const int * v, const int * u, const float & d8_0, const float & d8_1) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM int sumi = 0; #pragma unroll @@ -176,7 +176,7 @@ template static __device__ __forceinline__ float vec_dot_q8_0_q8_1_imp template static __device__ __forceinline__ float vec_dot_q8_1_q8_1_impl( const int * v, const int * u, const half2 & dm8, const half2 & ds8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM int sumi = 0; @@ -202,7 +202,7 @@ template static __device__ __forceinline__ float vec_dot_q8_1_q8_1_imp static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmvq( const int & v, const int * __restrict__ u, const uint8_t * __restrict__ scales, const half2 & dm2, const float * __restrict__ d8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM float sumf_d = 0.0f; float sumf_m = 0.0f; @@ -230,7 +230,7 @@ static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmvq( static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ scales, const half2 & dm2, const float & d8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM int sumi_d = 0; int sumi_m = 0; @@ -267,7 +267,7 @@ static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmq( static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmvq( const int & vl, const int & vh, const int * __restrict__ u, const uint8_t * __restrict__ scales, const int & scale_offset, const float & d3, const float * __restrict__ d8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM float sumf = 0.0f; @@ -301,7 +301,7 @@ static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmvq( static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ scales, const float & d3, const float & d8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM int sumi = 0; #pragma unroll @@ -326,7 +326,7 @@ static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmq( static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_vmmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm4, const float * __restrict__ d8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM float sumf_d = 0.0f; float sumf_m = 0.0f; @@ -351,7 +351,7 @@ static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_vmmq( static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM float sumf_d = 0.0f; float sumf_m = 0.0f; @@ -382,7 +382,7 @@ static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_mmq( static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_vmmq( const int * __restrict__ vl, const int * __restrict__ vh, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm5, const float * __restrict__ d8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM float sumf_d = 0.0f; float sumf_m = 0.0f; @@ -413,7 +413,7 @@ static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_vmmq( static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM float sumf_d = 0.0f; float sumf_m = 0.0f; @@ -445,7 +445,7 @@ static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_mmq( static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmvq( const int & vl, const int & vh, const int * __restrict__ u, const int8_t * __restrict__ scales, const float & d, const float * __restrict__ d8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM float sumf = 0.0f; #pragma unroll @@ -465,7 +465,7 @@ static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmvq( static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ sc, const float & d6, const float * __restrict__ d8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM float sumf_d = 0.0f; #pragma unroll @@ -507,8 +507,8 @@ static __device__ __forceinline__ float vec_dot_q4_0_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q4_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y]; - __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI4_0) + mmq_y/QI4_0]; + __shared__ int tile_x_qs[mmq_y * (WARP_SIZE_GGUF) + mmq_y]; + __shared__ float tile_x_d[mmq_y * (WARP_SIZE_GGUF/QI4_0) + mmq_y/QI4_0]; *x_ql = tile_x_qs; *x_dm = (half2 *) tile_x_d; } @@ -529,11 +529,11 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbx; - x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx); - // x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbx] = bxi->d; + x_ql[i * (WARP_SIZE_GGUF + 1) + k] = get_int_from_uint8(bxi->qs, kqsx); + // x_dmf[i * (WARP_SIZE_GGUF/QI4_0) + i / QI4_0 + kbx] = bxi->d; } - const int blocks_per_tile_x_row = WARP_SIZE / QI4_0; + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI4_0; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll @@ -543,7 +543,7 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbxd; - x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbxd] = __half2float(bxi->d); + x_dmf[i * (WARP_SIZE_GGUF/QI4_0) + i / QI4_0 + kbxd] = __half2float(bxi->d); } } @@ -559,13 +559,13 @@ static __device__ __forceinline__ float vec_dot_q4_0_q8_1_mul_mat( #pragma unroll for (int l = 0; l < VDR_Q4_0_Q8_1_MMQ; ++l) { - u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; - u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_0) % WARP_SIZE]; + u[2*l+0] = y_qs[j * WARP_SIZE_GGUF + (kyqs + l) % WARP_SIZE_GGUF]; + u[2*l+1] = y_qs[j * WARP_SIZE_GGUF + (kyqs + l + QI4_0) % WARP_SIZE_GGUF]; } return vec_dot_q4_0_q8_1_impl - (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dmf[i * (WARP_SIZE/QI4_0) + i/QI4_0 + k/QI4_0], - y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); + (&x_ql[i * (WARP_SIZE_GGUF + 1) + k], u, x_dmf[i * (WARP_SIZE_GGUF/QI4_0) + i/QI4_0 + k/QI4_0], + y_ds[j * (WARP_SIZE_GGUF/QI8_1) + (2*k/QI8_1) % (WARP_SIZE_GGUF/QI8_1)]); } static __device__ __forceinline__ float vec_dot_q4_1_q8_1( @@ -587,8 +587,8 @@ static __device__ __forceinline__ float vec_dot_q4_1_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q4_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + + mmq_y]; - __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_1) + mmq_y/QI4_1]; + __shared__ int tile_x_qs[mmq_y * (WARP_SIZE_GGUF) + + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE_GGUF/QI4_1) + mmq_y/QI4_1]; *x_ql = tile_x_qs; *x_dm = tile_x_dm; } @@ -608,10 +608,10 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbx; - x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); + x_ql[i * (WARP_SIZE_GGUF + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); } - const int blocks_per_tile_x_row = WARP_SIZE / QI4_1; + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI4_1; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll @@ -621,7 +621,7 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbxd; - x_dm[i * (WARP_SIZE/QI4_1) + i / QI4_1 + kbxd] = bxi->dm; + x_dm[i * (WARP_SIZE_GGUF/QI4_1) + i / QI4_1 + kbxd] = bxi->dm; } } @@ -634,13 +634,13 @@ static __device__ __forceinline__ float vec_dot_q4_1_q8_1_mul_mat( #pragma unroll for (int l = 0; l < VDR_Q4_1_Q8_1_MMQ; ++l) { - u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; - u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_1) % WARP_SIZE]; + u[2*l+0] = y_qs[j * WARP_SIZE_GGUF + (kyqs + l) % WARP_SIZE_GGUF]; + u[2*l+1] = y_qs[j * WARP_SIZE_GGUF + (kyqs + l + QI4_1) % WARP_SIZE_GGUF]; } return vec_dot_q4_1_q8_1_impl - (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dm[i * (WARP_SIZE/QI4_1) + i/QI4_1 + k/QI4_1], - y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); + (&x_ql[i * (WARP_SIZE_GGUF + 1) + k], u, x_dm[i * (WARP_SIZE_GGUF/QI4_1) + i/QI4_1 + k/QI4_1], + y_ds[j * (WARP_SIZE_GGUF/QI8_1) + (2*k/QI8_1) % (WARP_SIZE_GGUF/QI8_1)]); } static __device__ __forceinline__ float vec_dot_q5_0_q8_1( @@ -664,8 +664,8 @@ static __device__ __forceinline__ float vec_dot_q5_0_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q5_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; - __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI5_0) + mmq_y/QI5_0]; + __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE_GGUF) + mmq_y]; + __shared__ float tile_x_d[mmq_y * (WARP_SIZE_GGUF/QI5_0) + mmq_y/QI5_0]; *x_ql = tile_x_ql; *x_dm = (half2 *) tile_x_d; @@ -697,7 +697,7 @@ template static __device__ __forceinlin qs0 |= (qh << 25) & 0x10000000; // 3 -> 28 qs0 = __vsubss4(qs0, 0x10101010); // subtract 16 - x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0; + x_ql[i * (2*WARP_SIZE_GGUF + 1) + 2*k+0] = qs0; int qs1 = (ql >> 4) & 0x0F0F0F0F; qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4 @@ -706,10 +706,10 @@ template static __device__ __forceinlin qs1 |= (qh << 9) & 0x10000000; // 19 -> 28 qs1 = __vsubss4(qs1, 0x10101010); // subtract 16 - x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1; + x_ql[i * (2*WARP_SIZE_GGUF + 1) + 2*k+1] = qs1; } - const int blocks_per_tile_x_row = WARP_SIZE / QI5_0; + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI5_0; const int kbxd = k % blocks_per_tile_x_row; float * x_dmf = (float *) x_dm; @@ -722,7 +722,7 @@ template static __device__ __forceinlin } const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbxd; - x_dmf[i * (WARP_SIZE/QI5_0) + i / QI5_0 + kbxd] = __half2float(bxi->d); + x_dmf[i * (WARP_SIZE_GGUF/QI5_0) + i / QI5_0 + kbxd] = __half2float(bxi->d); } } @@ -730,7 +730,7 @@ static __device__ __forceinline__ float vec_dot_q5_0_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); - const int index_bx = i * (WARP_SIZE/QI5_0) + i/QI5_0 + k/QI5_0; + const int index_bx = i * (WARP_SIZE_GGUF/QI5_0) + i/QI5_0 + k/QI5_0; const float * x_dmf = (const float *) x_dm; const float * y_df = (const float *) y_ds; @@ -738,12 +738,12 @@ static __device__ __forceinline__ float vec_dot_q5_0_q8_1_mul_mat( #pragma unroll for (int l = 0; l < VDR_Q5_0_Q8_1_MMQ; ++l) { - u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; - u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_0) % WARP_SIZE]; + u[2*l+0] = y_qs[j * WARP_SIZE_GGUF + (kyqs + l) % WARP_SIZE_GGUF]; + u[2*l+1] = y_qs[j * WARP_SIZE_GGUF + (kyqs + l + QI5_0) % WARP_SIZE_GGUF]; } return vec_dot_q8_0_q8_1_impl - (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dmf[index_bx], y_df[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); + (&x_ql[i * (2*WARP_SIZE_GGUF + 1) + 2 * k], u, x_dmf[index_bx], y_df[j * (WARP_SIZE_GGUF/QI8_1) + (2*k/QI8_1) % (WARP_SIZE_GGUF/QI8_1)]); } static __device__ __forceinline__ float vec_dot_q5_1_q8_1( @@ -767,8 +767,8 @@ static __device__ __forceinline__ float vec_dot_q5_1_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q5_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; - __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_1) + mmq_y/QI5_1]; + __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE_GGUF) + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE_GGUF/QI5_1) + mmq_y/QI5_1]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; @@ -801,7 +801,7 @@ template static __device__ __forceinlin qs0 |= (qh << 18) & 0x00100000; // 2 -> 20 qs0 |= (qh << 25) & 0x10000000; // 3 -> 28 - x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0; + x_ql[i * (2*WARP_SIZE_GGUF + 1) + 2*k+0] = qs0; int qs1 = (ql >> 4) & 0x0F0F0F0F; qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4 @@ -809,10 +809,10 @@ template static __device__ __forceinlin qs1 |= (qh << 2) & 0x00100000; // 18 -> 20 qs1 |= (qh << 9) & 0x10000000; // 19 -> 28 - x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1; + x_ql[i * (2*WARP_SIZE_GGUF + 1) + 2*k+1] = qs1; } - const int blocks_per_tile_x_row = WARP_SIZE / QI5_1; + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI5_1; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll @@ -825,7 +825,7 @@ template static __device__ __forceinlin const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbxd; - x_dm[i * (WARP_SIZE/QI5_1) + i / QI5_1 + kbxd] = bxi->dm; + x_dm[i * (WARP_SIZE_GGUF/QI5_1) + i / QI5_1 + kbxd] = bxi->dm; } } @@ -833,18 +833,18 @@ static __device__ __forceinline__ float vec_dot_q5_1_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); - const int index_bx = i * (WARP_SIZE/QI5_1) + + i/QI5_1 + k/QI5_1; + const int index_bx = i * (WARP_SIZE_GGUF/QI5_1) + + i/QI5_1 + k/QI5_1; int u[2*VDR_Q5_1_Q8_1_MMQ]; #pragma unroll for (int l = 0; l < VDR_Q5_1_Q8_1_MMQ; ++l) { - u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; - u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_1) % WARP_SIZE]; + u[2*l+0] = y_qs[j * WARP_SIZE_GGUF + (kyqs + l) % WARP_SIZE_GGUF]; + u[2*l+1] = y_qs[j * WARP_SIZE_GGUF + (kyqs + l + QI5_1) % WARP_SIZE_GGUF]; } return vec_dot_q8_1_q8_1_impl - (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dm[index_bx], y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); + (&x_ql[i * (2*WARP_SIZE_GGUF + 1) + 2 * k], u, x_dm[index_bx], y_ds[j * (WARP_SIZE_GGUF/QI8_1) + (2*k/QI8_1) % (WARP_SIZE_GGUF/QI8_1)]); } static __device__ __forceinline__ float vec_dot_q8_0_q8_1( @@ -865,8 +865,8 @@ static __device__ __forceinline__ float vec_dot_q8_0_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q8_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y]; - __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI8_0) + mmq_y/QI8_0]; + __shared__ int tile_x_qs[mmq_y * (WARP_SIZE_GGUF) + mmq_y]; + __shared__ float tile_x_d[mmq_y * (WARP_SIZE_GGUF/QI8_0) + mmq_y/QI8_0]; *x_ql = tile_x_qs; *x_dm = (half2 *) tile_x_d; @@ -889,10 +889,10 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbx; - x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_int8(bxi->qs, kqsx); + x_ql[i * (WARP_SIZE_GGUF + 1) + k] = get_int_from_int8(bxi->qs, kqsx); } - const int blocks_per_tile_x_row = WARP_SIZE / QI8_0; + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI8_0; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll @@ -903,7 +903,7 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbxd; - x_dmf[i * (WARP_SIZE/QI8_0) + i / QI8_0 + kbxd] = __half2float(bxi->d); + x_dmf[i * (WARP_SIZE_GGUF/QI8_0) + i / QI8_0 + kbxd] = __half2float(bxi->d); } } @@ -914,8 +914,8 @@ static __device__ __forceinline__ float vec_dot_q8_0_q8_1_mul_mat( const float * y_df = (const float *) y_ds; return vec_dot_q8_0_q8_1_impl - (&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[j * WARP_SIZE + k], x_dmf[i * (WARP_SIZE/QI8_0) + i/QI8_0 + k/QI8_0], - y_df[j * (WARP_SIZE/QI8_1) + k/QI8_1]); + (&x_ql[i * (WARP_SIZE_GGUF + 1) + k], &y_qs[j * WARP_SIZE_GGUF + k], x_dmf[i * (WARP_SIZE_GGUF/QI8_0) + i/QI8_0 + k/QI8_0], + y_df[j * (WARP_SIZE_GGUF/QI8_1) + k/QI8_1]); } static __device__ __forceinline__ float vec_dot_q2_K_q8_1( @@ -942,9 +942,9 @@ static __device__ __forceinline__ float vec_dot_q2_K_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q2_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y]; - __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI2_K) + mmq_y/QI2_K]; - __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4]; + __shared__ int tile_x_ql[mmq_y * (WARP_SIZE_GGUF) + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE_GGUF/QI2_K) + mmq_y/QI2_K]; + __shared__ int tile_x_sc[mmq_y * (WARP_SIZE_GGUF/4) + mmq_y/4]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; @@ -967,10 +967,10 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q2_K * bxi = bx0 + i*blocks_per_row + kbx; - x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); + x_ql[i * (WARP_SIZE_GGUF + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); } - const int blocks_per_tile_x_row = WARP_SIZE / QI2_K; + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI2_K; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll @@ -981,18 +981,18 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q2_K * bxi = bx0 + i*blocks_per_row + kbxd; - x_dm[i * (WARP_SIZE/QI2_K) + i / QI2_K + kbxd] = bxi->dm; + x_dm[i * (WARP_SIZE_GGUF/QI2_K) + i / QI2_K + kbxd] = bxi->dm; } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) { - int i = i0 + i_offset * 4 + k / (WARP_SIZE/4); + int i = i0 + i_offset * 4 + k / (WARP_SIZE_GGUF/4); if (need_check) { i = min(i, i_max); } - const block_q2_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI2_K/4); - x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = get_int_from_uint8_aligned(bxi->scales, k % (QI2_K/4)); + const block_q2_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE_GGUF/4)) / (QI2_K/4); + x_sc[i * (WARP_SIZE_GGUF/4) + i / 4 + k % (WARP_SIZE_GGUF/4)] = get_int_from_uint8_aligned(bxi->scales, k % (QI2_K/4)); } } @@ -1005,7 +1005,7 @@ static __device__ __forceinline__ float vec_dot_q2_K_q8_1_mul_mat( int v[QR2_K*VDR_Q2_K_Q8_1_MMQ]; - const int kqsx = i * (WARP_SIZE + 1) + kbx*QI2_K + (QI2_K/2) * (ky/(2*QI2_K)) + ky % (QI2_K/2); + const int kqsx = i * (WARP_SIZE_GGUF + 1) + kbx*QI2_K + (QI2_K/2) * (ky/(2*QI2_K)) + ky % (QI2_K/2); const int shift = 2 * ((ky % (2*QI2_K)) / (QI2_K/2)); #pragma unroll @@ -1013,10 +1013,10 @@ static __device__ __forceinline__ float vec_dot_q2_K_q8_1_mul_mat( v[l] = (x_ql[kqsx + l] >> shift) & 0x03030303; } - const uint8_t * scales = ((const uint8_t *) &x_sc[i * (WARP_SIZE/4) + i/4 + kbx*4]) + ky/4; + const uint8_t * scales = ((const uint8_t *) &x_sc[i * (WARP_SIZE_GGUF/4) + i/4 + kbx*4]) + ky/4; - const int index_y = j * WARP_SIZE + (QR2_K*k) % WARP_SIZE; - return vec_dot_q2_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dm[i * (WARP_SIZE/QI2_K) + i/QI2_K + kbx], y_df[index_y/QI8_1]); + const int index_y = j * WARP_SIZE_GGUF + (QR2_K*k) % WARP_SIZE_GGUF; + return vec_dot_q2_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dm[i * (WARP_SIZE_GGUF/QI2_K) + i/QI2_K + kbx], y_df[index_y/QI8_1]); } static __device__ __forceinline__ float vec_dot_q3_K_q8_1( @@ -1047,10 +1047,10 @@ static __device__ __forceinline__ float vec_dot_q3_K_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q3_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y]; - __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI3_K) + mmq_y/QI3_K]; - __shared__ int tile_x_qh[mmq_y * (WARP_SIZE/2) + mmq_y/2]; - __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4]; + __shared__ int tile_x_ql[mmq_y * (WARP_SIZE_GGUF) + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE_GGUF/QI3_K) + mmq_y/QI3_K]; + __shared__ int tile_x_qh[mmq_y * (WARP_SIZE_GGUF/2) + mmq_y/2]; + __shared__ int tile_x_sc[mmq_y * (WARP_SIZE_GGUF/4) + mmq_y/4]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; @@ -1073,10 +1073,10 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q3_K * bxi = bx0 + i*blocks_per_row + kbx; - x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx); + x_ql[i * (WARP_SIZE_GGUF + 1) + k] = get_int_from_uint8(bxi->qs, kqsx); } - const int blocks_per_tile_x_row = WARP_SIZE / QI3_K; + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI3_K; const int kbxd = k % blocks_per_tile_x_row; float * x_dmf = (float *) x_dm; @@ -1087,27 +1087,27 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q3_K * bxi = bx0 + i*blocks_per_row + kbxd; - x_dmf[i * (WARP_SIZE/QI3_K) + i / QI3_K + kbxd] = __half2float(bxi->d); + x_dmf[i * (WARP_SIZE_GGUF/QI3_K) + i / QI3_K + kbxd] = __half2float(bxi->d); } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 2) { - int i = i0 + i_offset * 2 + k / (WARP_SIZE/2); + int i = i0 + i_offset * 2 + k / (WARP_SIZE_GGUF/2); if (need_check) { i = min(i, i_max); } - const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/2)) / (QI3_K/2); + const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE_GGUF/2)) / (QI3_K/2); // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted - x_qh[i * (WARP_SIZE/2) + i / 2 + k % (WARP_SIZE/2)] = ~get_int_from_uint8(bxi->hmask, k % (QI3_K/2)); + x_qh[i * (WARP_SIZE_GGUF/2) + i / 2 + k % (WARP_SIZE_GGUF/2)] = ~get_int_from_uint8(bxi->hmask, k % (QI3_K/2)); } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) { - int i = i0 + i_offset * 4 + k / (WARP_SIZE/4); + int i = i0 + i_offset * 4 + k / (WARP_SIZE_GGUF/4); if (need_check) { i = min(i, i_max); } - const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI3_K/4); + const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE_GGUF/4)) / (QI3_K/4); const int ksc = k % (QI3_K/4); @@ -1121,7 +1121,7 @@ template static __device__ __forceinlin const int sc = __vsubss4(sc_low | sc_high, 0x20202020); - x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = sc; + x_sc[i * (WARP_SIZE_GGUF/4) + i / 4 + k % (WARP_SIZE_GGUF/4)] = sc; } } @@ -1134,24 +1134,24 @@ static __device__ __forceinline__ float vec_dot_q3_K_q8_1_mul_mat( const float * x_dmf = (const float *) x_dm; const float * y_df = (const float *) y_ds; - const int8_t * scales = ((const int8_t *) (x_sc + i * (WARP_SIZE/4) + i/4 + kbx*4)) + ky/4; + const int8_t * scales = ((const int8_t *) (x_sc + i * (WARP_SIZE_GGUF/4) + i/4 + kbx*4)) + ky/4; int v[QR3_K*VDR_Q3_K_Q8_1_MMQ]; #pragma unroll for (int l = 0; l < QR3_K*VDR_Q3_K_Q8_1_MMQ; ++l) { - const int kqsx = i * (WARP_SIZE + 1) + kbx*QI3_K + (QI3_K/2) * (ky/(2*QI3_K)) + ky % (QI3_K/2); + const int kqsx = i * (WARP_SIZE_GGUF + 1) + kbx*QI3_K + (QI3_K/2) * (ky/(2*QI3_K)) + ky % (QI3_K/2); const int shift = 2 * ((ky % 32) / 8); const int vll = (x_ql[kqsx + l] >> shift) & 0x03030303; - const int vh = x_qh[i * (WARP_SIZE/2) + i/2 + kbx * (QI3_K/2) + (ky+l)%8] >> ((ky+l) / 8); + const int vh = x_qh[i * (WARP_SIZE_GGUF/2) + i/2 + kbx * (QI3_K/2) + (ky+l)%8] >> ((ky+l) / 8); const int vlh = (vh << 2) & 0x04040404; v[l] = __vsubss4(vll, vlh); } - const int index_y = j * WARP_SIZE + (k*QR3_K) % WARP_SIZE; - return vec_dot_q3_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dmf[i * (WARP_SIZE/QI3_K) + i/QI3_K + kbx], y_df[index_y/QI8_1]); + const int index_y = j * WARP_SIZE_GGUF + (k*QR3_K) % WARP_SIZE_GGUF; + return vec_dot_q3_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dmf[i * (WARP_SIZE_GGUF/QI3_K) + i/QI3_K + kbx], y_df[index_y/QI8_1]); } static __device__ __forceinline__ float vec_dot_q4_K_q8_1( @@ -1200,9 +1200,9 @@ static __device__ __forceinline__ float vec_dot_q4_K_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q4_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y]; - __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_K) + mmq_y/QI4_K]; - __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8]; + __shared__ int tile_x_ql[mmq_y * (WARP_SIZE_GGUF) + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE_GGUF/QI4_K) + mmq_y/QI4_K]; + __shared__ int tile_x_sc[mmq_y * (WARP_SIZE_GGUF/8) + mmq_y/8]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; @@ -1225,10 +1225,10 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q4_K * bxi = bx0 + i*blocks_per_row + kbx; - x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); + x_ql[i * (WARP_SIZE_GGUF + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); } - const int blocks_per_tile_x_row = WARP_SIZE / QI4_K; // == 1 if QK_K == 256 + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI4_K; // == 1 if QK_K == 256 const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 #pragma unroll @@ -1238,27 +1238,27 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q4_K * bxi = bx0 + i*blocks_per_row + kbxd; - x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = bxi->dm; + x_dm[i * (WARP_SIZE_GGUF/QI4_K) + i / QI4_K + kbxd] = bxi->dm; } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { - int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y; + int i = (i0 + i_offset * 8 + k / (WARP_SIZE_GGUF/8)) % mmq_y; if (need_check) { i = min(i, i_max); } - const block_q4_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI4_K/8); + const block_q4_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE_GGUF/8)) / (QI4_K/8); const int * scales = (const int *) bxi->scales; - const int ksc = k % (WARP_SIZE/8); + const int ksc = k % (WARP_SIZE_GGUF/8); // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8 int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits - x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8; + x_sc[i * (WARP_SIZE_GGUF/8) + i / 8 + ksc] = scales8; } } @@ -1267,11 +1267,11 @@ static __device__ __forceinline__ float vec_dot_q4_K_q8_1_mul_mat( const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { (void)x_qh; - const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2*((k % 16) / 8); + const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE_GGUF/8) + i/8 + k/16]) + 2*((k % 16) / 8); - const int index_y = j * WARP_SIZE + (QR4_K*k) % WARP_SIZE; - return vec_dot_q4_K_q8_1_impl_mmq(&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[index_y], sc, sc+8, - x_dm[i * (WARP_SIZE/QI4_K) + i/QI4_K], &y_ds[index_y/QI8_1]); + const int index_y = j * WARP_SIZE_GGUF + (QR4_K*k) % WARP_SIZE_GGUF; + return vec_dot_q4_K_q8_1_impl_mmq(&x_ql[i * (WARP_SIZE_GGUF + 1) + k], &y_qs[index_y], sc, sc+8, + x_dm[i * (WARP_SIZE_GGUF/QI4_K) + i/QI4_K], &y_ds[index_y/QI8_1]); } static __device__ __forceinline__ float vec_dot_q5_K_q8_1( @@ -1321,9 +1321,9 @@ static __device__ __forceinline__ float vec_dot_q5_K_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q5_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; - __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_K) + mmq_y/QI5_K]; - __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8]; + __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE_GGUF) + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE_GGUF/QI5_K) + mmq_y/QI5_K]; + __shared__ int tile_x_sc[mmq_y * (WARP_SIZE_GGUF/8) + mmq_y/8]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; @@ -1360,11 +1360,11 @@ template static __device__ __forceinlin const int kq0 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + 0; const int kq1 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + (QI5_K/4); - x_ql[i * (2*WARP_SIZE + 1) + kq0] = ql0 | qh0; - x_ql[i * (2*WARP_SIZE + 1) + kq1] = ql1 | qh1; + x_ql[i * (2*WARP_SIZE_GGUF + 1) + kq0] = ql0 | qh0; + x_ql[i * (2*WARP_SIZE_GGUF + 1) + kq1] = ql1 | qh1; } - const int blocks_per_tile_x_row = WARP_SIZE / QI5_K; // == 1 if QK_K == 256 + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI5_K; // == 1 if QK_K == 256 const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 #pragma unroll @@ -1376,40 +1376,40 @@ template static __device__ __forceinlin } const block_q5_K * bxi = bx0 + i*blocks_per_row + kbxd; - x_dm[i * (WARP_SIZE/QI5_K) + i / QI5_K + kbxd] = bxi->dm; + x_dm[i * (WARP_SIZE_GGUF/QI5_K) + i / QI5_K + kbxd] = bxi->dm; } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { - int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y; + int i = (i0 + i_offset * 8 + k / (WARP_SIZE_GGUF/8)) % mmq_y; if (need_check) { i = min(i, i_max); } - const block_q5_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI5_K/8); + const block_q5_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE_GGUF/8)) / (QI5_K/8); const int * scales = (const int *) bxi->scales; - const int ksc = k % (WARP_SIZE/8); + const int ksc = k % (WARP_SIZE_GGUF/8); // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8 int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits - x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8; + x_sc[i * (WARP_SIZE_GGUF/8) + i / 8 + ksc] = scales8; } } static __device__ __forceinline__ float vec_dot_q5_K_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { - const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2 * ((k % 16) / 8); + const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE_GGUF/8) + i/8 + k/16]) + 2 * ((k % 16) / 8); - const int index_x = i * (QR5_K*WARP_SIZE + 1) + QR5_K*k; - const int index_y = j * WARP_SIZE + (QR5_K*k) % WARP_SIZE; + const int index_x = i * (QR5_K*WARP_SIZE_GGUF + 1) + QR5_K*k; + const int index_y = j * WARP_SIZE_GGUF + (QR5_K*k) % WARP_SIZE_GGUF; return vec_dot_q5_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, sc+8, - x_dm[i * (WARP_SIZE/QI5_K) + i/QI5_K], &y_ds[index_y/QI8_1]); + x_dm[i * (WARP_SIZE_GGUF/QI5_K) + i/QI5_K], &y_ds[index_y/QI8_1]); } static __device__ __forceinline__ float vec_dot_q6_K_q8_1( @@ -1439,9 +1439,9 @@ static __device__ __forceinline__ float vec_dot_q6_K_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q6_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; - __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI6_K) + mmq_y/QI6_K]; - __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8]; + __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE_GGUF) + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE_GGUF/QI6_K) + mmq_y/QI6_K]; + __shared__ int tile_x_sc[mmq_y * (WARP_SIZE_GGUF/8) + mmq_y/8]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; @@ -1478,11 +1478,11 @@ template static __device__ __forceinlin const int kq0 = ky - ky % QI6_K + k % (QI6_K/2) + 0; const int kq1 = ky - ky % QI6_K + k % (QI6_K/2) + (QI6_K/2); - x_ql[i * (2*WARP_SIZE + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020); - x_ql[i * (2*WARP_SIZE + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020); + x_ql[i * (2*WARP_SIZE_GGUF + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020); + x_ql[i * (2*WARP_SIZE_GGUF + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020); } - const int blocks_per_tile_x_row = WARP_SIZE / QI6_K; // == 1 if QK_K == 256 + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI6_K; // == 1 if QK_K == 256 const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 float * x_dmf = (float *) x_dm; @@ -1496,20 +1496,20 @@ template static __device__ __forceinlin const block_q6_K * bxi = bx0 + i*blocks_per_row + kbxd; - x_dmf[i * (WARP_SIZE/QI6_K) + i / QI6_K + kbxd] = __half2float(bxi->d); + x_dmf[i * (WARP_SIZE_GGUF/QI6_K) + i / QI6_K + kbxd] = __half2float(bxi->d); } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { - int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y; + int i = (i0 + i_offset * 8 + k / (WARP_SIZE_GGUF/8)) % mmq_y; if (need_check) { i = min(i, i_max); } - const block_q6_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / 4; + const block_q6_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE_GGUF/8)) / 4; - x_sc[i * (WARP_SIZE/8) + i / 8 + k % (WARP_SIZE/8)] = get_int_from_int8(bxi->scales, k % (QI6_K/8)); + x_sc[i * (WARP_SIZE_GGUF/8) + i / 8 + k % (WARP_SIZE_GGUF/8)] = get_int_from_int8(bxi->scales, k % (QI6_K/8)); } } @@ -1519,11 +1519,11 @@ static __device__ __forceinline__ float vec_dot_q6_K_q8_1_mul_mat( const float * x_dmf = (const float *) x_dm; const float * y_df = (const float *) y_ds; - const int8_t * sc = ((const int8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/8]); + const int8_t * sc = ((const int8_t *) &x_sc[i * (WARP_SIZE_GGUF/8) + i/8 + k/8]); - const int index_x = i * (QR6_K*WARP_SIZE + 1) + QR6_K*k; - const int index_y = j * WARP_SIZE + (QR6_K*k) % WARP_SIZE; - return vec_dot_q6_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, x_dmf[i * (WARP_SIZE/QI6_K) + i/QI6_K], &y_df[index_y/QI8_1]); + const int index_x = i * (QR6_K*WARP_SIZE_GGUF + 1) + QR6_K*k; + const int index_y = j * WARP_SIZE_GGUF + (QR6_K*k) % WARP_SIZE_GGUF; + return vec_dot_q6_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, x_dmf[i * (WARP_SIZE_GGUF/QI6_K) + i/QI6_K], &y_df[index_y/QI8_1]); } static __device__ __forceinline__ float vec_dot_iq2_xxs_q8_1( @@ -1582,7 +1582,7 @@ static __device__ __forceinline__ float vec_dot_iq2_xs_q8_1( static __device__ __forceinline__ float vec_dot_iq2_s_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM const block_iq2_s * bq2 = (const block_iq2_s *) vbq; const int ib32 = iqs; @@ -1619,7 +1619,7 @@ static __device__ __forceinline__ float vec_dot_iq2_s_q8_1( static __device__ __forceinline__ float vec_dot_iq3_xxs_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM const block_iq3_xxs * bq2 = (const block_iq3_xxs *) vbq; const int ib32 = iqs; @@ -1646,7 +1646,7 @@ static __device__ __forceinline__ float vec_dot_iq3_xxs_q8_1( static __device__ __forceinline__ float vec_dot_iq3_s_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM const block_iq3_s * bq2 = (const block_iq3_s *) vbq; const int ib32 = iqs; @@ -1671,7 +1671,7 @@ static __device__ __forceinline__ float vec_dot_iq3_s_q8_1( static __device__ __forceinline__ float vec_dot_iq1_s_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM const block_iq1_s * bq1 = (const block_iq1_s *) vbq; const int qs_packed = get_int_b2(bq1->qs, iqs); @@ -1703,7 +1703,7 @@ static __device__ __forceinline__ float vec_dot_iq1_s_q8_1( static __device__ __forceinline__ float vec_dot_iq1_m_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM const block_iq1_m * bq1 = (const block_iq1_m *) vbq; @@ -1763,7 +1763,7 @@ static __device__ __forceinline__ void get_int_from_table_16(const uint32_t & q4 static __device__ __forceinline__ float vec_dot_iq4_nl_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM const block_iq4_nl * bq = (const block_iq4_nl *) vbq; @@ -1788,7 +1788,7 @@ static __device__ __forceinline__ float vec_dot_iq4_nl_q8_1( static __device__ __forceinline__ float vec_dot_iq4_xs_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM const block_iq4_xs * bq4 = (const block_iq4_xs *) vbq; const uint8_t * values = (const uint8_t *)kvalues_iq4nl; diff --git a/csrc/torch_bindings.cpp b/csrc/torch_bindings.cpp index 3dccdf61abf3b..4e64b9c92773a 100644 --- a/csrc/torch_bindings.cpp +++ b/csrc/torch_bindings.cpp @@ -258,6 +258,7 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { "awq_marlin_repack(Tensor b_q_weight, SymInt size_k, " "SymInt size_n, int num_bits) -> Tensor"); // conditionally compiled so impl registrations are in source file +#endif // Dequantization for GGML. ops.def("ggml_dequantize(Tensor W, int type, SymInt m, SymInt n) -> Tensor"); @@ -274,6 +275,7 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { "ggml_mul_mat_a8(Tensor W, Tensor X, int type, SymInt row) -> Tensor"); ops.impl("ggml_mul_mat_a8", torch::kCUDA, &ggml_mul_mat_a8); +#ifndef USE_ROCM // fp8_marlin Optimized Quantized GEMM for FP8 weight-only. ops.def( "fp8_marlin_gemm(Tensor a, Tensor b_q_weight, Tensor b_scales, " diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index 41892e4dddf7e..c192c9a7b0e4d 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -344,31 +344,6 @@ def _gptq_marlin_gemm_fake(a: torch.Tensor, is_zp_float: bool = False) -> torch.Tensor: return torch.empty((size_m, size_n), device=a.device, dtype=a.dtype) - @register_fake("_C::ggml_dequantize") - def _ggml_dequantize_fake(W: torch.Tensor, quant_type: int, - m: torch.SymInt, - n: torch.SymInt) -> torch.Tensor: - return torch.empty((m, n), dtype=torch.float16, device=W.device) - - @register_fake("_C::ggml_mul_mat_vec_a8") - def _ggml_mul_mat_vec_a8_fake( - W: torch.Tensor, - X: torch.Tensor, - quant_type: int, - row: torch.SymInt, - ) -> torch.Tensor: - return torch.empty((1, row), dtype=torch.float16, device=W.device) - - @register_fake("_C::ggml_mul_mat_a8") - def _ggml_mul_mat_a8_fake( - W: torch.Tensor, - X: torch.Tensor, - quant_type: int, - row: torch.SymInt, - ) -> torch.Tensor: - batch = X.size(0) - return torch.empty((batch, row), dtype=torch.float16, device=W.device) - @register_fake("_C::marlin_qqq_gemm") def _marlin_qqq_gemm_fake(a: torch.Tensor, b_q_weight: torch.Tensor, s_tok: torch.Tensor, s_ch: torch.Tensor, @@ -468,6 +443,34 @@ def machete_prepack_B_fake( memory_format=torch.contiguous_format) +if hasattr(torch.ops._C, "ggml_dequantize"): + + @register_fake("_C::ggml_dequantize") + def _ggml_dequantize_fake(W: torch.Tensor, quant_type: int, + m: torch.SymInt, + n: torch.SymInt) -> torch.Tensor: + return torch.empty((m, n), dtype=torch.float16, device=W.device) + + @register_fake("_C::ggml_mul_mat_vec_a8") + def _ggml_mul_mat_vec_a8_fake( + W: torch.Tensor, + X: torch.Tensor, + quant_type: int, + row: torch.SymInt, + ) -> torch.Tensor: + return torch.empty((1, row), dtype=torch.float16, device=W.device) + + @register_fake("_C::ggml_mul_mat_a8") + def _ggml_mul_mat_a8_fake( + W: torch.Tensor, + X: torch.Tensor, + quant_type: int, + row: torch.SymInt, + ) -> torch.Tensor: + batch = X.size(0) + return torch.empty((batch, row), dtype=torch.float16, device=W.device) + + # cutlass def cutlass_scaled_mm_supports_fp8(cuda_device_capability: int) -> bool: return torch.ops._C.cutlass_scaled_mm_supports_fp8(cuda_device_capability) diff --git a/vllm/config.py b/vllm/config.py index 42a44f5415e9f..f163665e2c063 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -387,7 +387,7 @@ def _verify_quantization(self) -> None: supported_quantization = QUANTIZATION_METHODS rocm_supported_quantization = [ "awq", "gptq", "fp8", "compressed_tensors", "compressed-tensors", - "fbgemm_fp8" + "fbgemm_fp8", "gguf" ] optimized_quantization_methods = [ "fp8", "marlin", "modelopt", "gptq_marlin_24", "gptq_marlin", From 4634a89d18569ef0ee2d7dd2d535377a1f460188 Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Fri, 22 Nov 2024 21:15:55 -0800 Subject: [PATCH 101/122] Prefix Cache Aware Scheduling [1/n] (#10128) Signed-off-by: rickyx --- tests/core/block/test_prefix_caching_block.py | 181 +++++++++- tests/core/test_scheduler.py | 179 +++++++++- tests/core/utils.py | 51 ++- tests/prefix_caching/test_prefix_caching.py | 106 +++++- vllm/core/block/cpu_gpu_block_allocator.py | 15 +- vllm/core/block/interfaces.py | 36 +- vllm/core/block/naive_block.py | 11 +- vllm/core/block/prefix_caching_block.py | 258 ++++++++----- vllm/core/block_manager.py | 23 +- vllm/core/interfaces.py | 4 + vllm/core/placeholder_block_space_manager.py | 3 + vllm/core/scheduler.py | 338 +++++++++++++----- vllm/sequence.py | 3 + 13 files changed, 967 insertions(+), 241 deletions(-) diff --git a/tests/core/block/test_prefix_caching_block.py b/tests/core/block/test_prefix_caching_block.py index d325b9606843e..bbeb4b3a58f2a 100644 --- a/tests/core/block/test_prefix_caching_block.py +++ b/tests/core/block/test_prefix_caching_block.py @@ -5,9 +5,14 @@ import pytest +from tests.core.utils import create_dummy_sequence +from vllm.core.block.cpu_gpu_block_allocator import CpuGpuBlockAllocator from vllm.core.block.interfaces import Block, BlockAllocator -from vllm.core.block.prefix_caching_block import (PrefixCachingBlock, +from vllm.core.block.prefix_caching_block import (ComputedBlocksTracker, + PrefixCachingBlock, PrefixCachingBlockAllocator) +from vllm.sequence import Logprob +from vllm.utils import Device class TestPrefixCachingBlock: @@ -726,18 +731,71 @@ def test_touch_block(): token_ids=common_token_ids, allocator=allocator, ) - block_ids = [block.block_id for block in blocks] + block_hashes = [block.content_hash for block in blocks] # The allocated blocks should be marked as touched # but not computed. - computed_block_ids = allocator.get_computed_block_ids( - [], block_ids, skip_last_block_id=False) + computed_block_ids = allocator.find_cached_blocks_prefix( + block_hashes) assert len(computed_block_ids) == 0 allocator.mark_blocks_as_computed([]) - computed_block_ids = allocator.get_computed_block_ids( - [], block_ids, skip_last_block_id=False) + computed_block_ids = allocator.find_cached_blocks_prefix( + block_hashes=block_hashes) assert len(computed_block_ids) == common_blocks + @staticmethod + def test_find_cached_blocks_prefix(): + """ + This test verifies the behavior of find_cached_blocks_prefix. + """ + block_size = 4 + num_blocks = 8 + total_test_blocks = 12 + allocator = PrefixCachingBlockAllocator(num_blocks=num_blocks, + block_size=block_size) + + token_ids = list(range(total_test_blocks * block_size)) + block_tokens_seq1 = token_ids[:num_blocks * block_size] + blocks_seq1 = TestPrefixCachingBlockAllocator.create_immutable_chain( + block_size=block_size, + token_ids=block_tokens_seq1, + allocator=allocator, + ) + block_hashes_seq1 = [block.content_hash for block in blocks_seq1] + allocator.mark_blocks_as_computed([]) + + # All blocks should be cached. + cached_blocks_seq1 = allocator.find_cached_blocks_prefix( + block_hashes=block_hashes_seq1) + assert len(cached_blocks_seq1) == num_blocks + + # Free the first sequence. + for block in blocks_seq1: + allocator.free(block) + + # All blocks should be still be cached if not required to be allocated. + cached_blocks = allocator.find_cached_blocks_prefix( + block_hashes=block_hashes_seq1) + assert len(cached_blocks) == num_blocks + + block_tokens_seq2 = token_ids[num_blocks * block_size:] + blocks_seq2 = TestPrefixCachingBlockAllocator.create_immutable_chain( + block_size=block_size, + token_ids=block_tokens_seq2, + allocator=allocator, + ) + block_hashes_seq2 = [block.content_hash for block in blocks_seq2] + allocator.mark_blocks_as_computed([]) + cached_blocks = allocator.find_cached_blocks_prefix( + block_hashes=block_hashes_seq2) + assert len(cached_blocks) == len(blocks_seq2) + + # Half of the blocks from seq1 should still be cached. + num_evicted_blocks = len(blocks_seq2) + cached_blocks = allocator.find_cached_blocks_prefix( + block_hashes=block_hashes_seq1) + assert len(cached_blocks) == len(blocks_seq1) - num_evicted_blocks + @staticmethod def create_immutable_chain( block_size: int, @@ -762,3 +820,114 @@ def create_immutable_chain( blocks.append(prev_block) return blocks + + +class TestComputedBlocksTracker: + + @staticmethod + def _get_mock_allocator(): + return MagicMock(spec=PrefixCachingBlockAllocator) + + @staticmethod + def test_get_num_cached_tokens(): + """ + Test it correctly computes the number of cached tokens for a given + sequence: + + - The cache token count is derived from the number of cached blocks. + - The cache token count is updated when the allocator is updated. + - When a sequence is removed, the cache token count should be updated + accordingly. + + # TODO(rickyx): This behaviour for prefill sequence is a hack until + we fix the computed blocks tracking. + - The cache token count for prefill sequence doesn't change while + the sequence is in continuous prefill (chunked prefill). + """ + block_size = 4 + mock_allocator = TestComputedBlocksTracker._get_mock_allocator() + tracker = ComputedBlocksTracker( + allocator=mock_allocator, + block_size=block_size, + enable_caching=True, + ) + + # Not yet allocated. + tokens = [0, 1, 2, 3, 4, 5] + seq1 = create_dummy_sequence(request_id=0, + token_ids=tokens, + block_size=block_size) + mock_allocator.find_cached_blocks_prefix.return_value = [] + assert tracker.get_num_cached_tokens(seq1) == 0 + + mock_allocator.find_cached_blocks_prefix.return_value = [ + None + ] # 1 block cached. + # Result is cached for prefill sequence. + assert tracker.get_num_cached_tokens(seq1) == 0 + + # Mark the sequence as non-prefill. + seq1.data.update_num_computed_tokens(len(tokens)) # 6 tokens computed. + assert not seq1.is_prefill() + + # Recomputes for decoding sequence. + assert tracker.get_num_cached_tokens(seq1) == 4 + + # Append new tokens to the sequence. + num_new_tokens = 3 + for i in range(num_new_tokens): + seq1.append_token_id(i, {i: Logprob(logprob=0.0)}) + + assert tracker.get_num_cached_tokens(seq1) == 4 + + # Update the allocator. + mock_allocator.find_cached_blocks_prefix.return_value = [ + None + ] * 2 # 2 blocks cached. + assert tracker.get_num_cached_tokens(seq1) == 8 + + # Remove the sequence. + tracker.remove_seq(seq1.seq_id) + + # Re-create the sequence with the same request id to simulate recompute. + seq1 = create_dummy_sequence(request_id=0, + token_ids=tokens, + block_size=block_size) + mock_allocator.find_cached_blocks_prefix.return_value = [ + ] # no cached block + assert tracker.get_num_cached_tokens(seq1) == 0 + + @staticmethod + def test_correct_block_hash(): + """ + Test that the block hash is correctly computed for a sequence (should + match the underlying block allocator's block hash). So the number of + cached tokens is correctly retrieved. + """ + block_size = 4 + allocator = CpuGpuBlockAllocator.create( + allocator_type="prefix_caching", + num_gpu_blocks=16, + num_cpu_blocks=16, + block_size=block_size, + ) + gpu_allocator = allocator._allocators[Device.GPU] + + tracker = ComputedBlocksTracker( + allocator=allocator, + block_size=block_size, + enable_caching=True, + ) + + tokens = list(range(block_size * 4)) # 4 blocks. + seq = create_dummy_sequence(request_id=0, + token_ids=tokens, + block_size=block_size) + _ = TestPrefixCachingBlockAllocator.create_immutable_chain( + block_size=block_size, + token_ids=tokens, + allocator=gpu_allocator, + ) + allocator.mark_blocks_as_computed([]) + + assert tracker.get_num_cached_tokens(seq) == len(tokens) diff --git a/tests/core/test_scheduler.py b/tests/core/test_scheduler.py index 5ff32be611592..8f6de84e566e7 100644 --- a/tests/core/test_scheduler.py +++ b/tests/core/test_scheduler.py @@ -12,9 +12,9 @@ from vllm.lora.request import LoRARequest from vllm.sequence import SequenceGroup -from .utils import (append_new_token, append_new_token_seq_group, - create_dummy_prompt, get_sequence_groups, - schedule_and_update_computed_tokens) +from .utils import (append_new_token, append_new_token_seq, + append_new_token_seq_group, create_dummy_prompt, + get_sequence_groups, schedule_and_update_computed_tokens) def test_scheduler_add_seq_group(): @@ -305,6 +305,8 @@ def initialize_scheduler( block_size=4, num_cpu_blocks=8, num_gpu_blocks=8, + enable_prefix_caching=False, + enable_chunked_prefill=False, ): block_size = block_size scheduler_config = SchedulerConfig( @@ -312,8 +314,15 @@ def initialize_scheduler( max_num_batched_tokens=max_token_budget, max_num_seqs=max_num_seqs, max_model_len=max_model_len, + enable_chunked_prefill=enable_chunked_prefill, + ) + cache_config = CacheConfig( + block_size, + 1.0, + 1, + "auto", + enable_prefix_caching=enable_prefix_caching, ) - cache_config = CacheConfig(block_size, 1.0, 1, "auto") cache_config.num_cpu_blocks = num_cpu_blocks cache_config.num_gpu_blocks = num_gpu_blocks scheduler = Scheduler(scheduler_config, cache_config, lora_config) @@ -800,3 +809,165 @@ def test_scheduling_budget(): assert budget.num_curr_seqs == 0 budget.subtract_num_seqs(seq_group.request_id, 2) assert budget.num_curr_seqs == 0 + + +@pytest.mark.parametrize("enable_prefix_caching", [True, False]) +def test_prefix_caching_aware_prefills(enable_prefix_caching): + """ + Test the below scenario: + + For 3 sequences, seqA, seqB, seqC, share the first block as prefix. + + The test verifies the below scenarios: + 1. SeqA is first scheduled. + 2. SeqB and SeqC can be prefilled together in a single schedule round + even though there are not enough token budgets to prefill both without + considering prefix caching. + """ + + block_size = 4 + max_num_batched_tokens = 12 + max_seq_group = 3 + scheduler = initialize_scheduler( + block_size=block_size, + num_cpu_blocks=16, + num_gpu_blocks=16, + max_token_budget=max_num_batched_tokens, + max_num_seqs=max_seq_group, + max_model_len=max_num_batched_tokens, + enable_prefix_caching=enable_prefix_caching, + ) + + seqA_tokens = list(range(8)) + num_shared_tokens = 4 + seqB_tokens = seqA_tokens[:num_shared_tokens] + list(range( + 12, 16)) # Shared prefix first 4. + seqC_tokens = seqA_tokens[:num_shared_tokens] + list(range( + 16, 20)) # Shared prefix first 4. + + seqA, seqA_group = create_dummy_prompt("0", + prompt_tokens=seqA_tokens, + block_size=block_size) + seqB, seqB_group = create_dummy_prompt("1", + prompt_tokens=seqB_tokens, + block_size=block_size) + seqC, seqC_group = create_dummy_prompt("2", + prompt_tokens=seqC_tokens, + block_size=block_size) + + # Schedule seqA prefill. + scheduler.add_seq_group(seqA_group) + metas, out, _ = scheduler.schedule() + assert (len(out.scheduled_seq_groups) == 1 + and out.scheduled_seq_groups[0].seq_group == seqA_group) + assert out.scheduled_seq_groups[0].token_chunk_size == len(seqA_tokens) + + # Schedule seqA decode. + append_new_token_seq_group(len(seqA_tokens), seqA_group, 999) + metas, out, _ = scheduler.schedule() + + assert len(out.scheduled_seq_groups) == 1 + assert out.scheduled_seq_groups[0].seq_group == seqA_group + assert out.scheduled_seq_groups[0].token_chunk_size == 1 + + # Schedule seqB and seqC prefills should work with prefix caching. + scheduler.add_seq_group(seqB_group) + scheduler.add_seq_group(seqC_group) + metas, out, _ = scheduler.schedule() + + if enable_prefix_caching: + assert len(out.scheduled_seq_groups) == 2 + assert set([ + out.scheduled_seq_groups[0].seq_group, + out.scheduled_seq_groups[1].seq_group, + ]) == set([seqB_group, seqC_group]) + assert len(metas) == 2 + for meta in metas: + assert meta.token_chunk_size == 8 + assert (len(meta.computed_block_nums) == num_shared_tokens // + block_size) # 1 Block for the 8 tokens. + else: + assert len(out.scheduled_seq_groups) == 1 + assert len(metas) == 1 + assert metas[0].token_chunk_size == 8 + assert len(metas[0].computed_block_nums) == 0 # No blocks computed. + + +def test_no_multiple_partial_prefills_with_chunked_prefill_and_prefix_caching( +): + """ + This test verifies that we don't schedule new prefills if there's already + a continuous prefill in progress even though the new prefills with shared + prefix can fit in the token budget: + + - SeqA is being chunked prefill. + - SeqB with the same prompt shouldn't be scheduled for prefill even though + there's enough token budget to prefill the cached tokens. + - Neither should seqC be scheduled. + + - When seqA is in decoding phase, seqB and seqC can be scheduled. + - Entire seqB should be prefilled since it's a full prefix cache hit. + - SeqC would be partially prefilled with the prefix shared, and the + remaining unique tokens would be prefilled (rounded down to be + block-size aligned). + """ + + block_size = 2 + max_num_batched_tokens = 4 + max_seq_group = 3 + scheduler = initialize_scheduler( + block_size=block_size, + num_cpu_blocks=16, + num_gpu_blocks=16, + max_token_budget=max_num_batched_tokens, + max_num_seqs=max_seq_group, + max_model_len=100, + enable_prefix_caching=True, + enable_chunked_prefill=True, + ) + + seqA_tokens = list(range(8)) + seqB_tokens = seqA_tokens + seqC_shared_prefix_len = 4 + seqC_tokens = seqA_tokens[:seqC_shared_prefix_len] + list(range(12, 20)) + + seqA, seqA_group = create_dummy_prompt("0", + prompt_tokens=seqA_tokens, + block_size=block_size) + seqB, seqB_group = create_dummy_prompt("1", + prompt_tokens=seqB_tokens, + block_size=block_size) + + # Chunked prefill seqA. + scheduler.add_seq_group(seqA_group) + metas, out = schedule_and_update_computed_tokens(scheduler) + assert len(out.scheduled_seq_groups) == 1 + assert out.scheduled_seq_groups[0].seq_group == seqA_group + assert out.scheduled_seq_groups[0].token_chunk_size == 4 + + # seqB should not be scheduled with ongoing prefills. + scheduler.add_seq_group(seqB_group) + metas, out = schedule_and_update_computed_tokens(scheduler) + assert len(out.scheduled_seq_groups) == 1 + assert out.scheduled_seq_groups[0].seq_group == seqA_group + assert out.scheduled_seq_groups[0].token_chunk_size == 4 + + # both seqB and seqC can now be scheduled with seqA is over. + # seqA is in decoding phase. + append_new_token_seq(seqA, 999) + seqC, seqC_group = create_dummy_prompt("2", + prompt_tokens=seqC_tokens, + block_size=block_size) + scheduler.add_seq_group(seqC_group) + metas, out = schedule_and_update_computed_tokens(scheduler) + assert len(out.scheduled_seq_groups) == 3 + + metas = {meta.request_id: meta for meta in metas} + assert metas[seqA_group.request_id].token_chunk_size == 1 # Decode + assert (metas[seqB_group.request_id].token_chunk_size == 8 + ) # Fully cached prefill + assert ( + metas[seqC_group.request_id].token_chunk_size == 6 + ), "A partial prefix of C (4 tokens) should be prefilled, with the " + "remaining tokens fit into 3 token budget (4-1 from the seqA). It will " + "then be rounded down to 2 tokens on block size, thus 6 tokens in total." diff --git a/tests/core/utils.py b/tests/core/utils.py index cd0caa4704e11..277368b57b938 100644 --- a/tests/core/utils.py +++ b/tests/core/utils.py @@ -1,17 +1,20 @@ import time -from typing import List, Optional +from collections import defaultdict +from typing import Any, Dict, List, Optional from typing import Sequence as GenericSequence from typing import Tuple from vllm import SamplingParams +from vllm.core.scheduler import Scheduler, SchedulerOutputs from vllm.inputs import EncoderDecoderInputs, token_inputs from vllm.lora.request import LoRARequest -from vllm.sequence import Logprob, Sequence, SequenceGroup +from vllm.sequence import (Logprob, Sequence, SequenceGroup, + SequenceGroupMetadata) def create_dummy_prompt( request_id: str, - prompt_length: int, + prompt_length: int = -1, block_size: Optional[int] = None, lora_request: Optional[LoRARequest] = None, best_of: int = 1, @@ -26,6 +29,7 @@ def create_dummy_prompt( # Create dummy prompt sequence with tokens 0...block_size-1 # and prompt "0 ... block_size". prompt_tokens = list(range(prompt_length)) + prompt_str = " ".join([str(t) for t in prompt_tokens]) prompt = Sequence(int(request_id), inputs=token_inputs(prompt_tokens, prompt=prompt_str), @@ -42,6 +46,15 @@ def create_dummy_prompt( return prompt, seq_group +def create_dummy_sequence(request_id: int, token_ids: List[int], + block_size: int) -> Sequence: + return Sequence( + seq_id=request_id, + inputs=token_inputs(token_ids), + block_size=block_size, + ) + + def create_dummy_prompt_encoder_decoder( request_id: str, decoder_prompt_length: int, @@ -194,12 +207,40 @@ def append_new_token(out, token_id: int): def schedule_and_update_computed_tokens(scheduler): metas, out, _ = scheduler.schedule() - for s, meta in zip(out.scheduled_seq_groups, metas): - s.seq_group.update_num_computed_tokens(meta.token_chunk_size) + for s in out.scheduled_seq_groups: + s.seq_group.update_num_computed_tokens(s.token_chunk_size) return metas, out +def append_new_token_seq(seq: Sequence, token_id: int): + seq.append_token_id(token_id, {token_id: Logprob(token_id)}) + + def append_new_token_seq_group(token_chunk_size, seq_group, token_id: int): seq_group.update_num_computed_tokens(token_chunk_size) for seq in seq_group.get_seqs(): seq.append_token_id(token_id, {token_id: Logprob(token_id)}) + + +class SchedulerProxy: + """ + A proxy class to forward calls to the scheduler. + """ + + def __init__(self, scheduler: Scheduler): + self.scheduler_ = scheduler + self.call_history: Dict[str, List[Any]] = defaultdict(list) + + def __getattr__(self, name: str) -> Any: + + def wrapper(*args, **kwargs): + result = getattr(self.scheduler_, name)(*args, **kwargs) + self.call_history[name].append((args, kwargs, result)) + return result + + return wrapper + + def last_schedule_ret( + self, ) -> Tuple[List[SequenceGroupMetadata], SchedulerOutputs, Any]: + _, _, ret = self.call_history["schedule"][-1] + return ret diff --git a/tests/prefix_caching/test_prefix_caching.py b/tests/prefix_caching/test_prefix_caching.py index 50723dbb610ac..8d16710f14585 100644 --- a/tests/prefix_caching/test_prefix_caching.py +++ b/tests/prefix_caching/test_prefix_caching.py @@ -2,10 +2,15 @@ Run `pytest tests/prefix_caching/test_prefix_caching.py`. """ + import pytest +from tests.conftest import VllmRunner +from tests.core.utils import SchedulerProxy, create_dummy_prompt from tests.kernels.utils import override_backend_env_variable from vllm import SamplingParams, TokensPrompt +from vllm.core.scheduler import Scheduler +from vllm.engine.llm_engine import LLMEngine from ..models.utils import check_outputs_equal @@ -27,6 +32,7 @@ @pytest.mark.parametrize("dtype", ["half"]) @pytest.mark.parametrize("max_tokens", [5]) @pytest.mark.parametrize("cached_position", [0, 1]) +@pytest.mark.parametrize("enable_chunked_prefill", [True, False]) @pytest.mark.parametrize("block_size", [16]) def test_mixed_requests( hf_runner, @@ -37,6 +43,7 @@ def test_mixed_requests( dtype: str, max_tokens: int, cached_position: int, + enable_chunked_prefill: bool, block_size: int, monkeypatch, ) -> None: @@ -55,6 +62,7 @@ def test_mixed_requests( model, dtype=dtype, enable_prefix_caching=True, + enable_chunked_prefill=enable_chunked_prefill, block_size=block_size, ) as vllm_model: # Run the first prompt so the cache is populated @@ -72,13 +80,13 @@ def test_mixed_requests( block_size) * block_size else: expected_num_cached_tokens = 0 - assert req_outputs[ - i].num_cached_tokens == expected_num_cached_tokens + assert ( + req_outputs[i].num_cached_tokens == expected_num_cached_tokens) - vllm_outputs = [ - (output.prompt_token_ids + list(output.outputs[0].token_ids), - output.prompt + output.outputs[0].text) for output in req_outputs - ] + vllm_outputs = [( + output.prompt_token_ids + list(output.outputs[0].token_ids), + output.prompt + output.outputs[0].text, + ) for output in req_outputs] check_outputs_equal( outputs_0_lst=hf_outputs, @@ -105,3 +113,89 @@ def test_unstable_prompt_sequence( for prompt in UNSTABLE_PROMPT_SEQUENCE: vllm_model.generate(TokensPrompt(prompt_token_ids=prompt), SamplingParams(max_tokens=1)) + + +@pytest.mark.parametrize("model", MODELS) +def test_fully_cached_prefill_needs_uncached_token(model): + block_size = 16 + max_num_batched_tokens = 16 + num_output_tokens = 5 + # Make a vllm engine + runner = VllmRunner( + model_name=model, + gpu_memory_utilization=0.7, + enable_chunked_prefill=True, + enforce_eager=True, + enable_prefix_caching=True, + block_size=block_size, + max_num_batched_tokens=max_num_batched_tokens, + max_num_seqs=max_num_batched_tokens, + ) + engine: LLMEngine = runner.model.llm_engine + + scheduler: Scheduler = SchedulerProxy(engine.scheduler[0]) # type: ignore + engine.scheduler[0] = scheduler + + # SeqA + seqA_tokens = list(range(2 * block_size)) + seqA, seq_groupA = create_dummy_prompt( + request_id="0", + prompt_tokens=seqA_tokens, + max_tokens=num_output_tokens, + block_size=block_size, + ) + + scheduler.add_seq_group(seq_groupA) + + assert seqA.data.get_num_computed_tokens() == 0 + + # Prefill seqA + while not seqA.is_finished(): + engine.step() + + # seqB + seqB_tokens = [t + 1 for t in seqA_tokens] # shift by 1 + seqB, seq_groupB = create_dummy_prompt( + request_id="1", + prompt_tokens=seqB_tokens, + max_tokens=num_output_tokens, + block_size=block_size, + ) + + # seqC is the same as seqA + seqC, seq_groupC = create_dummy_prompt( + request_id="2", + prompt_tokens=seqA_tokens, + max_tokens=num_output_tokens, + block_size=block_size, + ) + + scheduler.add_seq_group(seq_groupB) + scheduler.add_seq_group(seq_groupC) + + # Even seqC is fully cached, it should not be prefilled since we + # require at least 1 uncached token. + engine.step() + + sched_metas, sched_out, _ = scheduler.last_schedule_ret() + assert len(sched_out.scheduled_seq_groups) == 1 + assert (sched_out.scheduled_seq_groups[0].seq_group.request_id == + seq_groupB.request_id) + assert (sched_out.scheduled_seq_groups[0].token_chunk_size == + max_num_batched_tokens) + + # When seqB is finished, seqC could be prefilled. + while not seqB.is_finished(): + engine.step() + sched_metas, sched_out, _ = scheduler.last_schedule_ret() + assert len(sched_out.scheduled_seq_groups) == 1 + assert (sched_out.scheduled_seq_groups[0].seq_group.request_id == + seq_groupB.request_id) + + engine.step() + sched_metas, sched_out, _ = scheduler.last_schedule_ret() + assert len(sched_out.scheduled_seq_groups) == 1 + assert (sched_out.scheduled_seq_groups[0].seq_group.request_id == + seq_groupC.request_id) + assert sched_out.scheduled_seq_groups[0].token_chunk_size == len( + seqA_tokens) diff --git a/vllm/core/block/cpu_gpu_block_allocator.py b/vllm/core/block/cpu_gpu_block_allocator.py index 9727f6e19b84e..3197af3c2b7a4 100644 --- a/vllm/core/block/cpu_gpu_block_allocator.py +++ b/vllm/core/block/cpu_gpu_block_allocator.py @@ -306,14 +306,6 @@ def mark_blocks_as_computed(self, block_ids: List[int]) -> None: device = Device.GPU return self._allocators[device].mark_blocks_as_computed(block_ids) - def get_computed_block_ids(self, prev_computed_block_ids: List[int], - block_ids: List[int], - skip_last_block_id: bool) -> List[int]: - # Prefix caching only supported on GPU. - device = Device.GPU - return self._allocators[device].get_computed_block_ids( - prev_computed_block_ids, block_ids, skip_last_block_id) - def get_common_computed_block_ids( self, computed_seq_block_ids: List[List[int]]) -> List[int]: # Prefix caching only supported on GPU. @@ -342,6 +334,13 @@ def get_and_reset_swaps(self) -> List[Tuple[int, int]]: self._swap_mapping.clear() return list(mapping.items()) + def find_cached_blocks_prefix( + self, + block_hashes: List[int], + device: Device = Device.GPU, + ) -> List[int]: + return self._allocators[device].find_cached_blocks_prefix(block_hashes) + class NullBlock(Block): """ diff --git a/vllm/core/block/interfaces.py b/vllm/core/block/interfaces.py index 72bbab1dcea5d..06f4851af3466 100644 --- a/vllm/core/block/interfaces.py +++ b/vllm/core/block/interfaces.py @@ -159,12 +159,6 @@ def mark_blocks_as_accessed(self, block_ids: List[int], def mark_blocks_as_computed(self, block_ids: List[int]) -> None: pass - @abstractmethod - def get_computed_block_ids(self, prev_computed_block_ids: List[int], - block_ids: List[int], - skip_last_block_id: bool) -> List[int]: - pass - @abstractmethod def get_common_computed_block_ids( self, computed_seq_block_ids: List[List[int]]) -> List[int]: @@ -192,6 +186,13 @@ def get_prefix_cache_hit_rate(self) -> float: class NoFreeBlocksError(ValueError): pass + @abstractmethod + def find_cached_blocks_prefix( + self, + block_hashes: List[int], + ) -> List[int]: + pass + class DeviceAwareBlockAllocator(ABC): @@ -207,9 +208,12 @@ def allocate_immutable_block(self, prev_block: Optional[Block], pass @abstractmethod - def allocate_immutable_blocks(self, prev_block: Optional[Block], - block_token_ids: List[List[int]], - device: Device) -> List[Block]: + def allocate_immutable_blocks( + self, + prev_block: Optional[Block], + block_token_ids: List[List[int]], + device: Device, + ) -> List[Block]: pass @abstractmethod @@ -246,12 +250,6 @@ def mark_blocks_as_accessed(self, block_ids: List[int], def mark_blocks_as_computed(self, block_ids: List[int]) -> None: pass - @abstractmethod - def get_computed_block_ids(self, prev_computed_block_ids: List[int], - block_ids: List[int], - skip_last_block_id: bool) -> List[int]: - pass - @abstractmethod def get_common_computed_block_ids( self, computed_seq_block_ids: List[List[int]]) -> List[int]: @@ -284,3 +282,11 @@ def allocate_or_get_null_block(self) -> Block: def get_prefix_cache_hit_rate(self, device: Device) -> float: """Prefix cache hit rate. -1 means not supported or disabled.""" pass + + @abstractmethod + def find_cached_blocks_prefix( + self, + block_hashes: List[int], + device: Device = Device.GPU, + ) -> List[int]: + pass diff --git a/vllm/core/block/naive_block.py b/vllm/core/block/naive_block.py index 9341a518d11c6..a2af5ad6362c1 100644 --- a/vllm/core/block/naive_block.py +++ b/vllm/core/block/naive_block.py @@ -262,13 +262,6 @@ def mark_blocks_as_computed(self, block_ids: List[int]) -> None: """ pass - def get_computed_block_ids(self, prev_computed_block_ids: List[int], - block_ids: List[int], - skip_last_block_id: bool) -> List[int]: - """No prefix caching here => return empty list - """ - return [] - def get_common_computed_block_ids( self, computed_seq_block_ids: List[List[int]]) -> List[int]: """Determine blocks that can be skipped in prefill. @@ -329,6 +322,10 @@ def swap_in(self, blocks: List[Block]) -> None: def get_prefix_cache_hit_rate(self) -> float: return -1 + def find_cached_blocks_prefix(self, block_hashes: List[int]) -> List[int]: + # Not applicable for naive block allocator. + return [] + class NaiveBlock(Block): """An implementation of the Block class that does not support prefix diff --git a/vllm/core/block/prefix_caching_block.py b/vllm/core/block/prefix_caching_block.py index 57527e39b9bdd..b736167f6ceb4 100644 --- a/vllm/core/block/prefix_caching_block.py +++ b/vllm/core/block/prefix_caching_block.py @@ -1,13 +1,18 @@ """Token blocks.""" +import sys +from bisect import bisect_left from os.path import commonprefix -from typing import Dict, FrozenSet, Iterable, List, Optional, Set, Tuple +from typing import (Callable, Dict, FrozenSet, Iterable, List, Optional, Set, + Tuple) from vllm.core.block.common import (CacheMetricData, CopyOnWriteTracker, get_all_blocks_recursively) -from vllm.core.block.interfaces import Block, BlockAllocator, BlockId, Device +from vllm.core.block.interfaces import (Block, BlockAllocator, BlockId, Device, + DeviceAwareBlockAllocator) from vllm.core.block.naive_block import (BlockPool, NaiveBlock, NaiveBlockAllocator) from vllm.core.evictor import EvictionPolicy, Evictor, make_evictor +from vllm.sequence import Sequence PrefixHash = int @@ -534,26 +539,6 @@ def block_is_computed(self, block_id: int) -> bool: else: return block_id in self.evictor - def get_computed_block_ids(self, - prev_computed_block_ids: List[int], - block_ids: List[int], - skip_last_block_id: bool = True) -> List[int]: - prev_prefix_size = len(prev_computed_block_ids) - cur_size = len(block_ids) - if skip_last_block_id: - cur_size -= 1 - - # Sanity checks - assert cur_size >= 0 - assert prev_prefix_size <= cur_size - - ret = prev_computed_block_ids - for i in range(prev_prefix_size, cur_size): - block_id = block_ids[i] - if self.block_is_computed(block_id): - ret.append(block_id) - return ret - def get_common_computed_block_ids( self, computed_seq_block_ids: List[List[int]]) -> List[int]: """Return the block ids that are common for a given sequence group. @@ -634,6 +619,47 @@ def swap_in(self, blocks: List[Block]) -> None: block.block_id = block_id # Assign block_id + def find_cached_blocks_prefix(self, block_hashes: List[int]) -> List[int]: + """ + Given a list of block hashes, return the prefix of the block hashes that + are all cached. + + Since a block's block hash includes the hashes of all previous blocks, + and we only allocate/deallocate blocks in the entire sequence, so if a + block is cached, then all previous blocks are also cached. With this + property, we can use binary search to find the prefix of cached blocks. + + Args: + block_hashes (List[int]): The list of block hashes. + + Returns: + List[int]: The prefix of the `block_hashes` that are cached. + """ + + def _block_is_cached(block_hash: PrefixHash) -> bool: + if block_hash not in self._cached_blocks: + return False + + cached_block_id = self._cached_blocks[block_hash] + # We only consider the blocks that are marked as computed. + return self.block_is_computed(cached_block_id) + + def _bisect_left(a, x, key: Callable[[PrefixHash], bool]) -> int: + + # python <= 3.10 don't have the key argument + if sys.version_info < (3, 10): + a = [key(e) for e in a] + return bisect_left(a, x) + else: + return bisect_left(a, x, key=key) + + # Look for the first block that's not cached, and returns the prefix + # i.e. blocks that are cached. + idx = _bisect_left(block_hashes, + True, + key=lambda x: not _block_is_cached(x)) + return block_hashes[:idx] + class PrefixCachingBlock(Block): """A block implementation that supports prefix caching. @@ -843,86 +869,126 @@ def hash_block_tokens(is_first_block: bool, prev_block_hash: Optional[int], class ComputedBlocksTracker: - """Handles caching of per-sequence computed block ids. - When a sequence appears for the first time, it traverses all of the - blocks and detects the prefix of blocks that is computed. On the - subsequent times, it only traverses the new blocks that were added - and updates the already recorded prefix of blocks with the newly - computed blocks. - - To avoid redundant traversals, the algorithm also detects when there - is a "gap" in the computed prefix. For example, if we have blocks = - [1,2,3,4,5], and we have detected [1,2,3] as the computed prefix, then - we won't try to add more computed blocks to [1,2,3] in this sequence - iteration, and will add more computed blocks only after the sequence is - freed and reused again. - - Note that currently, for a given sequence, we also skip the last - block id for caching purposes, to avoid caching of a full sequence """ + Tracks the computed blocks for each sequence. - def __init__(self, allocator): - self._allocator = allocator - self._cached_computed_seq_blocks: Dict[int, Tuple[List[int], - bool]] = {} + Internally, it maintains a map from sequence id to the list of block hashes + for the sequence. We cache the hashes of the full blocks for each sequence, + and make sure the hash is calculated in the same way as the allocator. + When a sequence is being decoded, we also update the sequence's hash + accordingly and incrementally. - def add_seq(self, seq_id: int) -> None: - """Start tracking seq_id - """ - assert seq_id not in self._cached_computed_seq_blocks - self._cached_computed_seq_blocks[seq_id] = ([], False) - - def remove_seq(self, seq_id: int) -> None: - """Stop tracking seq_id - """ - assert seq_id in self._cached_computed_seq_blocks - del self._cached_computed_seq_blocks[seq_id] - - def get_cached_computed_blocks_and_update( - self, seq_id: int, block_ids: List[int]) -> List[int]: - """ Look at the class documentation for details - """ - # Ensure seq_id is already tracked - assert seq_id in self._cached_computed_seq_blocks - - # Get cached data (may be empty on the first time) - prev_computed_block_ids, has_gap = self._cached_computed_seq_blocks[ - seq_id] - - if has_gap: - # When gap is detected, we do not add more computed blocks at this - # sequence iteration - return prev_computed_block_ids - - # We do not consider the last block id for caching purposes. - num_cur_blocks = len(block_ids) - 1 - assert num_cur_blocks >= 0 - - if len(prev_computed_block_ids) >= num_cur_blocks: - # Cache HIT - assert len(prev_computed_block_ids) == num_cur_blocks - return prev_computed_block_ids - - # If here, then we may possibly add more computed blocks. As a result, - # traverse the additional blocks after prev_computed_block_ids to - # detect more computed blocks and add them. - - # Incremental init for seq_id => Look only at the new blocks - computed_block_ids = self._allocator.get_computed_block_ids( # noqa: E501 - prev_computed_block_ids, - block_ids, - skip_last_block_id= - True, # We skip last block id to avoid caching of full seq - ) + From the sequence hash, with prefix caching enabled, we could also calculate + the number of cached tokens for the sequence by looking up the number of + cached block hashes in the allocator. + """ - # Detect if there is a "gap" - has_gap = len(computed_block_ids) < num_cur_blocks + def __init__( + self, + allocator: DeviceAwareBlockAllocator, + block_size: int, + enable_caching: bool, + ): + self._allocator = allocator + self._block_size = block_size + self._enable_caching = enable_caching + + # A map from seq_id to the list of block hashes for the + # sequence. This is so that we don't have to recompute the block hashes + # for the sequence when we need to check if the sequence is cached. + # Note a block that's not full will not have its hash calculated and + # recorded. + self._seq_id_to_blocks_hashes: Dict[int, List[int]] = {} + + # A map from seq_id to the number of tokens that are cached for the + # sequence. + # We need this so that a sequence in continuous prefill doesn't + # accidentally see its cached token count change. See comments in + # `get_num_cached_tokens` for more details. + self._seq_id_to_num_tokens_computed: Dict[int, int] = {} + + def _update_seq_hashes(self, seq: Sequence) -> None: + """Incrementally update the sequence's block hashes and record them.""" + assert self._enable_caching + + block_hashes_recorded = self._seq_id_to_blocks_hashes.get( + seq.seq_id, []) + cur_num_blocks_recorded = len(block_hashes_recorded) + token_ids = seq.get_token_ids() + assert len(token_ids) >= cur_num_blocks_recorded * self._block_size, ( + f"The sequence has {len(token_ids)} tokens, but" + f" already recorded {cur_num_blocks_recorded} blocks. " + "This should not happen since we assume blocks are " + "only appended other than recomputation. When the sequence is " + "recomputed, we should have removed the info of the old blocks.") + # Update the computed block hashes for the sequence. Since only full + # blocks are considered as "computed", we take floor here. + num_computed_blocks = len(token_ids) // self._block_size + + # We need to know the hash of the previous block to compute the hash of + # the current block so that blocks could be uniquely identified across + # sequences of prefixes. + prev_block_hash = (None if cur_num_blocks_recorded == 0 else + block_hashes_recorded[-1]) + # Only update the computed block hashes for the new blocks + for i in range(cur_num_blocks_recorded, num_computed_blocks): + assert len(token_ids) >= (i + 1) * self._block_size + block_token_ids = token_ids[i * self._block_size:(i + 1) * + self._block_size] + # This has to be kept in sync with the allocator's hash + # calculation. + block_hash = PrefixCachingBlock.hash_block_tokens( + is_first_block=prev_block_hash is None, + prev_block_hash=prev_block_hash, + cur_block_token_ids=block_token_ids, + ) + block_hashes_recorded.append(block_hash) + prev_block_hash = block_hash + + self._seq_id_to_blocks_hashes[seq.seq_id] = block_hashes_recorded + + def get_num_cached_tokens(self, seq: Sequence) -> int: + if not self._enable_caching: + return 0 + + # We always try to update the sequence hashes on the fly. + # This is to ensure that we don't miss any cached tokens for the + # sequence during decode. + # This routine should only update hash for any new blocks too. + self._update_seq_hashes(seq) + + num_computed_tokens_prev = self._seq_id_to_num_tokens_computed.get( + seq.seq_id, None) + + # TODO(rickyx): This hack could be removed once we mark blocks as + # computed correctly with chunked prefills. + if num_computed_tokens_prev is not None and seq.is_prefill(): + # For a sequence that is still in prefill, we don't + # recompute the number of cached tokens. + # This also handles correctly chunked prefill since currently + # we mark blocks as computed even if the sequence is still partially + # prefilled. So a continuously prefilled sequence should not + # see its cached token count change while running. + return num_computed_tokens_prev + + block_hashes = self._seq_id_to_blocks_hashes[seq.seq_id] + + # This is O(logN), where N is the number of blocks. + num_cached_blocks = len( + self._allocator.find_cached_blocks_prefix(block_hashes)) + num_cached_tokens = num_cached_blocks * self._block_size + self._seq_id_to_num_tokens_computed[seq.seq_id] = num_cached_tokens + return num_cached_tokens - # Record - self._cached_computed_seq_blocks[seq_id] = (computed_block_ids, - has_gap) + def remove_seq(self, seq_id: int) -> None: + """Stop tracking the sequence.""" + if not self._enable_caching: + return + assert seq_id in self._seq_id_to_blocks_hashes + del self._seq_id_to_blocks_hashes[seq_id] - return computed_block_ids + assert seq_id in self._seq_id_to_num_tokens_computed + del self._seq_id_to_num_tokens_computed[seq_id] class LastAccessBlocksTracker: diff --git a/vllm/core/block_manager.py b/vllm/core/block_manager.py index 21f4c63b6572d..209487c6b4f9e 100644 --- a/vllm/core/block_manager.py +++ b/vllm/core/block_manager.py @@ -101,7 +101,7 @@ def __init__( self.cross_block_tables: Dict[EncoderSeqId, BlockTable] = {} self._computed_blocks_tracker = ComputedBlocksTracker( - self.block_allocator) + self.block_allocator, self.block_size, self.enable_caching) self._last_access_blocks_tracker = LastAccessBlocksTracker( self.block_allocator) @@ -170,7 +170,6 @@ def allocate(self, seq_group: SequenceGroup) -> None: self.block_tables[seq.seq_id] = block_table # Track seq - self._computed_blocks_tracker.add_seq(seq.seq_id) self._last_access_blocks_tracker.add_seq(seq.seq_id) # Assign the block table for each sequence. @@ -178,7 +177,6 @@ def allocate(self, seq_group: SequenceGroup) -> None: self.block_tables[seq.seq_id] = block_table.fork() # Track seq - self._computed_blocks_tracker.add_seq(seq.seq_id) self._last_access_blocks_tracker.add_seq(seq.seq_id) # Allocate cross-attention block table for encoder sequence @@ -314,11 +312,13 @@ def get_common_computed_block_ids( """ computed_seq_block_ids = [] for seq in seqs: - computed_seq_block_ids.append( - self._computed_blocks_tracker. - get_cached_computed_blocks_and_update( - seq.seq_id, - self.block_tables[seq.seq_id].physical_block_ids)) + all_blocks = self.block_tables[seq.seq_id].physical_block_ids + num_cached_tokens = ( + self._computed_blocks_tracker.get_num_cached_tokens(seq)) + assert num_cached_tokens % self.block_size == 0 + num_cached_blocks = num_cached_tokens // self.block_size + computed_block_ids = all_blocks[:num_cached_blocks] + computed_seq_block_ids.append(computed_block_ids) # NOTE(sang): This assumes seq_block_ids doesn't contain any None. return self.block_allocator.get_common_computed_block_ids( @@ -332,7 +332,6 @@ def fork(self, parent_seq: Sequence, child_seq: Sequence) -> None: self.block_tables[child_seq.seq_id] = src_block_table.fork() # Track child seq - self._computed_blocks_tracker.add_seq(child_seq.seq_id) self._last_access_blocks_tracker.add_seq(child_seq.seq_id) def can_swap_in(self, seq_group: SequenceGroup, @@ -503,3 +502,9 @@ def _can_swap(self, return AllocStatus.OK else: return AllocStatus.LATER + + def get_num_cached_tokens(self, seq: Sequence) -> int: + """Get the number of tokens in blocks that are already computed and + cached in the block manager for the sequence. + """ + return self._computed_blocks_tracker.get_num_cached_tokens(seq) diff --git a/vllm/core/interfaces.py b/vllm/core/interfaces.py index 9501a516bf020..b10b8d3f4a5bf 100644 --- a/vllm/core/interfaces.py +++ b/vllm/core/interfaces.py @@ -121,3 +121,7 @@ def mark_blocks_as_computed(self, seq_group: SequenceGroup, def get_prefix_cache_hit_rate(self, device: Device) -> float: """Prefix cache hit rate. -1 means not supported or disabled.""" pass + + @abstractmethod + def get_num_cached_tokens(self, seq: Sequence) -> int: + pass diff --git a/vllm/core/placeholder_block_space_manager.py b/vllm/core/placeholder_block_space_manager.py index a337392bbed53..26d42b7f1790e 100644 --- a/vllm/core/placeholder_block_space_manager.py +++ b/vllm/core/placeholder_block_space_manager.py @@ -89,3 +89,6 @@ def mark_blocks_as_computed(self, seq_group: SequenceGroup, def get_prefix_cache_hit_rate(self, device: Device) -> float: return -1 + + def get_num_cached_tokens(self, seq: Sequence) -> int: + return 0 diff --git a/vllm/core/scheduler.py b/vllm/core/scheduler.py index af4671ec29be9..841e65c488fc6 100644 --- a/vllm/core/scheduler.py +++ b/vllm/core/scheduler.py @@ -56,11 +56,16 @@ class SchedulingBudget: max_num_seqs: int _request_ids_num_batched_tokens: Set[str] = field(default_factory=set) _request_ids_num_curr_seqs: Set[str] = field(default_factory=set) + # Number of cached tokens in the batch. + _num_cached_tokens: int = 0 + # Number of actual non-cached tokens in the batch. _num_batched_tokens: int = 0 _num_curr_seqs: int = 0 def can_schedule(self, *, num_new_tokens: int, num_new_seqs: int): - assert num_new_tokens != 0 + # We allow num_new_tokens to be 0 when the entire sequence has + # been cached. + assert num_new_tokens >= 0 assert num_new_seqs != 0 return (self.num_batched_tokens + num_new_tokens <= self.token_budget and self.num_curr_seqs + num_new_seqs <= self.max_num_seqs) @@ -68,12 +73,18 @@ def can_schedule(self, *, num_new_tokens: int, num_new_seqs: int): def remaining_token_budget(self): return self.token_budget - self.num_batched_tokens - def add_num_batched_tokens(self, req_id: str, num_batched_tokens: int): + def add_num_batched_tokens(self, + req_id: str, + num_batched_tokens: int, + num_cached_tokens: int = 0): if req_id in self._request_ids_num_batched_tokens: return + assert num_cached_tokens >= 0 + assert num_batched_tokens >= 0 self._request_ids_num_batched_tokens.add(req_id) self._num_batched_tokens += num_batched_tokens + self._num_cached_tokens += num_cached_tokens def subtract_num_batched_tokens(self, req_id: str, num_batched_tokens: int): @@ -101,6 +112,10 @@ def num_batched_tokens(self): def num_curr_seqs(self): return self._num_curr_seqs + @property + def num_cached_tokens(self): + return self._num_cached_tokens + @dataclass class ScheduledSequenceGroup: @@ -541,9 +556,19 @@ def _schedule_running( assert len(self._async_stopped) == 0 while running_queue: seq_group = running_queue[0] - num_running_tokens = self._get_num_new_tokens( - seq_group, SequenceStatus.RUNNING, enable_chunking, budget) - + # We discard the cached tokens info here because we don't need it + # for running sequence: + # 1. If a sequence is running with chunked prefill, the cached + # tokens info was already used for the first prefill. + # 2. If a sequence is running with non-chunked prefill, then + # there it's a decoding sequence, and the cached tokens info is + # irrelevant. + num_uncached_new_tokens, _ = ( + self._get_num_new_uncached_and_cached_tokens( + seq_group, SequenceStatus.RUNNING, enable_chunking, + budget)) + + num_running_tokens = num_uncached_new_tokens if num_running_tokens == 0: # No budget => Stop break @@ -715,13 +740,15 @@ def _schedule_swapped( # The total number of sequences in the RUNNING state should not # exceed the maximum number of sequences. num_new_seqs = seq_group.get_max_num_running_seqs() - num_new_tokens = self._get_num_new_tokens(seq_group, - SequenceStatus.SWAPPED, - enable_chunking, budget) - - if (num_new_tokens == 0 - or not budget.can_schedule(num_new_tokens=num_new_tokens, - num_new_seqs=num_new_seqs)): + num_new_tokens_uncached, num_new_tokens_cached = ( + self._get_num_new_uncached_and_cached_tokens( + seq_group, SequenceStatus.SWAPPED, enable_chunking, + budget)) + + if num_new_tokens_uncached == 0 or not budget.can_schedule( + num_new_tokens=num_new_tokens_uncached, + num_new_seqs=num_new_seqs, + ): break if lora_int_id > 0 and curr_loras is not None: @@ -732,12 +759,19 @@ def _schedule_swapped( is_prefill = seq_group.is_prefill() if is_prefill: prefill_seq_groups.append( - ScheduledSequenceGroup(seq_group, - token_chunk_size=num_new_tokens)) + ScheduledSequenceGroup( + seq_group, + token_chunk_size=num_new_tokens_uncached + + num_new_tokens_cached, + )) else: decode_seq_groups.append( ScheduledSequenceGroup(seq_group, token_chunk_size=1)) - budget.add_num_batched_tokens(seq_group.request_id, num_new_tokens) + budget.add_num_batched_tokens( + seq_group.request_id, + num_batched_tokens=num_new_tokens_uncached, + num_cached_tokens=num_new_tokens_cached, + ) budget.add_num_seqs(seq_group.request_id, num_new_seqs) swapped_queue.extendleft(leftover_swapped) @@ -803,26 +837,30 @@ def _schedule_priority_preemption( if waiting_queue: seq_group = waiting_queue.popleft() num_new_seqs = seq_group.get_max_num_running_seqs() - num_new_tokens = self._get_num_new_tokens(seq_group, - SequenceStatus.WAITING, - False, budget) + num_new_tokens_uncached, _ = ( + self._get_num_new_uncached_and_cached_tokens( + seq_group, SequenceStatus.WAITING, False, budget)) #Only preempt if priority inversion exists while running_queue and self._get_priority( running_queue[-1]) > self._get_priority(seq_group): #Only preempt if waiting sequence cannot be allocated can_allocate = self.block_manager.can_allocate(seq_group) - if (num_new_tokens and can_allocate == AllocStatus.OK - and budget.can_schedule(num_new_tokens=num_new_tokens, - num_new_seqs=num_new_seqs)): + if (num_new_tokens_uncached > 0 + and can_allocate == AllocStatus.OK + and budget.can_schedule( + num_new_tokens=num_new_tokens_uncached, + num_new_seqs=num_new_seqs, + )): break #Adjust budget to remove the victim sequence group vseq_group = running_queue.pop() - num_running_tokens = self._get_num_new_tokens( - vseq_group, SequenceStatus.RUNNING, False, budget) - budget.subtract_num_batched_tokens(vseq_group.request_id, - num_running_tokens) + num_running_tokens_uncached, _ = ( + self._get_num_new_uncached_and_cached_tokens( + vseq_group, SequenceStatus.RUNNING, False, budget)) + budget.subtract_num_batched_tokens( + vseq_group.request_id, num_running_tokens_uncached) num_running_seqs = vseq_group.get_max_num_running_seqs() budget.subtract_num_seqs(vseq_group.request_id, num_running_seqs) @@ -882,9 +920,12 @@ def _schedule_prefills( assert len(waiting_seqs) == 1, ( "Waiting sequence group should have only one prompt " "sequence.") - num_new_tokens = self._get_num_new_tokens(seq_group, - SequenceStatus.WAITING, - enable_chunking, budget) + num_new_tokens_uncached, num_new_tokens_cached = ( + self._get_num_new_uncached_and_cached_tokens( + seq_group, SequenceStatus.WAITING, enable_chunking, + budget)) + num_new_tokens = num_new_tokens_uncached + num_new_tokens_cached + if not enable_chunking: num_prompt_tokens = waiting_seqs[0].get_len() assert num_new_tokens == num_prompt_tokens @@ -935,10 +976,18 @@ def _schedule_prefills( waiting_queue.popleft() continue + if (budget.num_batched_tokens >= + self.scheduler_config.max_num_batched_tokens): + # We've reached the budget limit - since there might be + # continuous prefills in the running queue, we should break + # to avoid scheduling any new prefills. + break + num_new_seqs = seq_group.get_max_num_running_seqs() - if (num_new_tokens == 0 - or not budget.can_schedule(num_new_tokens=num_new_tokens, - num_new_seqs=num_new_seqs)): + if num_new_tokens_uncached == 0 or not budget.can_schedule( + num_new_tokens=num_new_tokens_uncached, + num_new_seqs=num_new_seqs, + ): break # Can schedule this request. @@ -967,7 +1016,11 @@ def _schedule_prefills( seq_groups.append( ScheduledSequenceGroup(seq_group=seq_group, token_chunk_size=num_new_tokens)) - budget.add_num_batched_tokens(seq_group.request_id, num_new_tokens) + budget.add_num_batched_tokens( + seq_group.request_id, + num_batched_tokens=num_new_tokens_uncached, + num_cached_tokens=num_new_tokens_cached, + ) budget.add_num_seqs(seq_group.request_id, num_new_seqs) # Queue requests that couldn't be scheduled. @@ -1075,7 +1128,8 @@ def _schedule_default(self) -> SchedulerOutputs: return SchedulerOutputs( scheduled_seq_groups=scheduled_seq_groups, num_prefill_groups=num_prefill_groups, - num_batched_tokens=budget.num_batched_tokens, + num_batched_tokens=budget.num_batched_tokens + + budget.num_cached_tokens, blocks_to_swap_in=swapped_in.blocks_to_swap_in, blocks_to_swap_out=running_scheduled.blocks_to_swap_out, blocks_to_copy=blocks_to_copy, @@ -1119,7 +1173,6 @@ def _schedule_chunked_prefill(self) -> SchedulerOutputs: running_scheduled.swapped_out) == 0: swapped_in = self._schedule_swapped(budget, curr_loras) - # Schedule new prefills. prefills = self._schedule_prefills(budget, curr_loras, enable_chunking=True) @@ -1157,7 +1210,8 @@ def _schedule_chunked_prefill(self) -> SchedulerOutputs: num_prefill_groups=(len(prefills.seq_groups) + len(swapped_in.prefill_seq_groups) + len(running_scheduled.prefill_seq_groups)), - num_batched_tokens=budget.num_batched_tokens, + num_batched_tokens=budget.num_batched_tokens + + budget.num_cached_tokens, blocks_to_swap_in=swapped_in.blocks_to_swap_in, blocks_to_swap_out=running_scheduled.blocks_to_swap_out, blocks_to_copy=running_scheduled.blocks_to_copy + @@ -1584,64 +1638,178 @@ def _get_num_lookahead_slots(self, is_prefill: bool, return self.scheduler_config.num_lookahead_slots - def _get_num_new_tokens(self, seq_group: SequenceGroup, - status: SequenceStatus, enable_chunking: bool, - budget: SchedulingBudget) -> int: - """Get the next new tokens to compute for a given sequence group - that's in a given `status`. + def _get_num_new_uncached_and_cached_tokens( + self, + seq_group: SequenceGroup, + status: SequenceStatus, + enable_chunking: bool, + budget: SchedulingBudget, + ) -> Tuple[int, int]: + """ + Returns the number of new uncached and cached tokens to schedule for a + given sequence group that's in a given `status`. The API could chunk the number of tokens to compute based on `budget` if `enable_chunking` is True. If a sequence group has multiple sequences (e.g., running beam search), it means it is in decoding phase, so chunking doesn't happen. - Returns 0 if the new token cannot be computed due to token budget. + Returns (0, 0) if the new token cannot be computed due to token budget. + + The cached tokens's blocks are already computed, and the attention + backend will reuse the cached blocks rather than recomputing them. So + the scheduler could schedule these cached tokens "for free". + + Args: + seq_group: The sequence group to get the number of new tokens to + schedule. + status: The status of the sequences to get the number of new tokens + to schedule. + enable_chunking: Whether to chunk the number of tokens to compute. + budget: The budget to chunk the number of tokens to compute. + + + Returns: + A tuple of two ints. The first int is the number of new uncached + tokens to schedule. The second int is the number of cached tokens. + If no more new tokens can be scheduled, returns (0, 0). """ - num_new_tokens = 0 + num_cached_new_tokens = 0 + num_uncached_new_tokens = 0 + seqs = seq_group.get_seqs(status=status) + # Compute the number of new uncached and cached tokens for + # each sequence. for seq in seqs: - num_new_tokens += seq.get_num_new_tokens() - assert num_new_tokens > 0 - # Chunk if a running request cannot fit in the given budget. - # If number of seq > 1, it means it is doing beam search - # in a decode phase. Do not chunk. + if not seq.is_prefill(): + # Decode sequences should always just have 1 uncached token + # TODO(rickyx): Actually is this still correct for multi-step? + num_uncached_new_tokens += 1 + continue + + num_computed_tokens_seq = seq.get_num_computed_tokens() + all_num_new_tokens_seq = seq.get_len() - num_computed_tokens_seq + if not self.cache_config.enable_prefix_caching: + # If prefix caching is not enabled, all new tokens are uncached. + num_uncached_new_tokens += all_num_new_tokens_seq + continue + + # NOTE: the cache token might be currently in a block that's in an + # evictor meaning that it's not yet allocated. However, we don't + # exclude such tokens in the cache count because it will be + # guaranteed to be allocated later if the sequence can be allocated. + num_cached_tokens_seq = self.block_manager.get_num_cached_tokens( + seq) + + # Sanity check. + if num_cached_tokens_seq < num_computed_tokens_seq: + # This should only happen with chunked prefill, and + # the seq is still in prefill. The `num_cached_tokens_seq` + # is the value we calculated on scheduling the first prefill. + # For subsequent continuous prefill steps, we cached the + # number of cache tokens for the sequence so the cached token + # count could be less than the number of computed tokens. + # See comments on `ComputedBlocksTracker` for more details. + assert ( + seq.is_prefill() and seq.status == SequenceStatus.RUNNING + and self.scheduler_config.chunked_prefill_enabled + ), ("Number of cached tokens should not be less than the " + "number of computed tokens for a sequence that's still " + f"in prefill. But there are {num_cached_tokens_seq} cached " + f"tokens and {num_computed_tokens_seq} computed tokens " + f"for sequence {seq.seq_id}.") + + num_cached_new_tokens_seq = max( + 0, num_cached_tokens_seq - num_computed_tokens_seq) + num_uncached_new_tokens_seq = (all_num_new_tokens_seq - + num_cached_new_tokens_seq) + + num_uncached_new_tokens += num_uncached_new_tokens_seq + num_cached_new_tokens += num_cached_new_tokens_seq + + if num_uncached_new_tokens == 0 and num_cached_new_tokens > 0: + # For a fully cached hit sequence, we actually need to recompute the + # last token. So we need at least 1 uncached token to schedule. + # See ModelRunner._compute_for_prefix_cache_hit for more details. + num_uncached_new_tokens = 1 + num_cached_new_tokens -= 1 + if enable_chunking and len(seqs) == 1: - remaining_token_budget = budget.remaining_token_budget() - if self.scheduler_config.is_multi_step: - # The current multi-step + chunked prefill capability does - # not actually support chunking prompts. - # - # Therefore, `num_new_tokens` is computed in the same fashion - # for both multi-step+chunked-prefill & - # multi-step+chunked-prefill+APC - # - # Prompts with more tokens than the current remaining budget - # are postponed to future scheduler steps - if num_new_tokens > self._get_prompt_limit(seq_group): - # If the seq_group is in prompt-stage, pass the - # num_new_tokens as-is so the caller can ignore - # the sequence. - pass - else: - num_new_tokens = 0 \ - if num_new_tokens > remaining_token_budget \ - else num_new_tokens - elif self.cache_config.enable_prefix_caching: - # When prefix caching is enabled, we always allocate - # the number of new tokens that is dividable by the block - # size to avoid partial block matching. - block_size = self.cache_config.block_size - remainder = budget.token_budget % block_size - if remainder != 0: - raise ValueError("When enabling chunked prefill and " - "prefix caching, max_num_batched_tokens " - "(chunk size) must be dividable by " - "block size, but got chunk_size " - f"({budget.token_budget}) % block_size " - f"({block_size}) = {remainder}") - if remaining_token_budget < num_new_tokens: - num_new_tokens = (remaining_token_budget // - block_size) * block_size - else: - num_new_tokens = min(num_new_tokens, remaining_token_budget) + # Chunk if a running request cannot fit in the given budget. + # If number of seq > 1, it means it is doing beam search + # in a decode phase. Do not chunk. + num_uncached_new_tokens = self._chunk_new_tokens_to_schedule( + self.scheduler_config, + self.cache_config, + budget, + self._get_prompt_limit(seq_group), + num_uncached_new_tokens, + ) + + return num_uncached_new_tokens, num_cached_new_tokens + + @staticmethod + def _chunk_new_tokens_to_schedule( + scheduler_config: SchedulerConfig, + cache_config: CacheConfig, + budget: SchedulingBudget, + prompt_limit: int, + num_new_tokens: int, + ) -> int: + """ + Chunks the number of new tokens to schedule based on the budget when + chunked prefill is enabled. + + Args: + scheduler_config: The scheduler config. + cache_config: The cache config. + budget: The budget to chunk the number of tokens to compute. + prompt_limit: The maximum number of tokens allowed in a prompt. + num_new_tokens: The number of new tokens to schedule. + + Returns: + The number of new tokens to schedule after chunking. + """ + remaining_token_budget = budget.remaining_token_budget() + if scheduler_config.is_multi_step: + # The current multi-step + chunked prefill capability does + # not actually support chunking prompts. + # + # Therefore, `num_new_tokens` is computed in the same fashion + # for both multi-step+chunked-prefill & + # multi-step+chunked-prefill+APC + # + # Prompts with more tokens than the current remaining budget + # are postponed to future scheduler steps + if num_new_tokens > prompt_limit: + # If the seq_group is in prompt-stage, pass the + # num_new_tokens as-is so the caller can ignore + # the sequence. + return num_new_tokens + + return (0 if num_new_tokens > remaining_token_budget else + num_new_tokens) + + if cache_config.enable_prefix_caching: + # Adjust the remaining token budget to be divisible by the block + # size when prefix caching is enabled. + + # When prefix caching is enabled, we always allocate + # the number of new tokens that is dividable by the block + # size to avoid partial block matching. + block_size = cache_config.block_size + remainder = budget.token_budget % block_size + if remainder != 0: + raise ValueError("When enabling chunked prefill and " + "prefix caching, max_num_batched_tokens " + "(chunk size) must be dividable by " + "block size, but got chunk_size " + f"({budget.token_budget}) % block_size " + f"({block_size}) = {remainder}") + # Round down to block size. + remaining_token_budget = (remaining_token_budget // block_size * + block_size) + + num_new_tokens = min(num_new_tokens, remaining_token_budget) + return num_new_tokens diff --git a/vllm/sequence.py b/vllm/sequence.py index 3b41d25a2fe42..a1cc8fc3b09de 100644 --- a/vllm/sequence.py +++ b/vllm/sequence.py @@ -579,6 +579,9 @@ def get_num_new_tokens(self) -> int: return 1 return self.data.get_num_uncomputed_tokens() + def get_num_computed_tokens(self) -> int: + return self.data.get_num_computed_tokens() + def is_prefill(self) -> bool: return self.data.stage == SequenceStage.PREFILL From c8acd80548c77bd5d6302353708dd16ea705f031 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Sat, 23 Nov 2024 13:25:09 +0800 Subject: [PATCH 102/122] [2/N] handling placeholders in merged multi-modal processor (#10485) Signed-off-by: DarkLight1337 --- tests/multimodal/test_processing.py | 370 ++++++++++++++ tests/multimodal/test_utils.py | 3 +- vllm/multimodal/inputs.py | 9 +- vllm/multimodal/processing.py | 720 ++++++++++++++++++++++------ vllm/utils.py | 20 +- 5 files changed, 975 insertions(+), 147 deletions(-) create mode 100644 tests/multimodal/test_processing.py diff --git a/tests/multimodal/test_processing.py b/tests/multimodal/test_processing.py new file mode 100644 index 0000000000000..b2367060c6c1b --- /dev/null +++ b/tests/multimodal/test_processing.py @@ -0,0 +1,370 @@ +from typing import cast + +import pytest +from transformers import BatchFeature + +from vllm.multimodal.processing import (PromptReplacement, find_text_matches, + find_token_matches, iter_token_matches, + iter_token_runs, replace_text_matches) +from vllm.transformers_utils.tokenizer import AnyTokenizer +from vllm.utils import full_groupby + + +# yapf: disable +@pytest.mark.parametrize( + ("token_ids", "expected"), + [ + ([], []), + ( + [32000, 32000, 32000], + [{ "token_id": 32000, "start_idx": 0, "length": 3 }], + ), + ( + [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], + [ + { "token_id": 9833, "start_idx": 0, "length": 1 }, + { "token_id": 28747, "start_idx": 1, "length": 1 }, + { "token_id": 32000, "start_idx": 2, "length": 3 }, + { "token_id": 9833, "start_idx": 5, "length": 1 }, + { "token_id": 28747, "start_idx": 6, "length": 1 }, + { "token_id": 32000, "start_idx": 7, "length": 2 }, + { "token_id": 918, "start_idx": 9, "length": 1 }, + ], + ), + ], +) +# yapf: enable +def test_iter_token_runs(token_ids, expected): + result = list(iter_token_runs(token_ids)) + + # Only displayed on error + print("result:", result) + + # Manually constructed results + assert [item._asdict() for item in result] == expected + + # Invariants + assert sum(run_info.length for run_info in result) == len(token_ids) + + +# yapf: disable +@pytest.mark.parametrize( + ("token_ids", "match_ids", "expected"), + [ + ([], [], [{ "start_idx": 0, "end_idx": 0 }]), + ([], [32000], []), + ( + [32000, 32000, 32000], + [32000], + [ + { "start_idx": 0, "end_idx": 1 }, + { "start_idx": 1, "end_idx": 2 }, + { "start_idx": 2, "end_idx": 3 }, + ], + ), + ( + [32000, 32000, 32000], + [32000, 32000], + [{ "start_idx": 0, "end_idx": 2 }], + ), + ( + [32000, 32000, 32000], + [32000, 32000, 32000], + [{ "start_idx": 0, "end_idx": 3 }], + ), + ( + [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], + [28747, 32000], + [ + { "start_idx": 1, "end_idx": 3 }, + { "start_idx": 6, "end_idx": 8 }, + ], + ), + ( + [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], + [28747, 32000, 32000, 32000], + [ + { "start_idx": 1, "end_idx": 5 }, + ], + ), + ( + [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], + [28747, 0, 32000], + [], + ), + ], +) +# yapf: enable +def test_iter_token_matches(token_ids, match_ids, expected): + result = list(iter_token_matches(token_ids, match_ids)) + + # Manually constructed results + assert [item._asdict() for item in result] == expected + + # Invariants + match_lens = [end - start for start, end in result] + print("match_lens:", match_lens) # Only displayed on error + assert all(match_len == len(match_ids) for match_len in match_lens) + + +# yapf: disable +@pytest.mark.parametrize( + ("prompt", "target_by_key", "expected_by_key"), + [ + ( + [], + { + "pattern_1": [], + "pattern_2": [32000], + }, + { + "pattern_1": [{ "start_idx": 0, "end_idx": 0 }], + "pattern_2": [], + } + ), + ( + [32000, 32000, 32000, 32000], + { + "pattern_1": [32000], + "pattern_2": [32000, 32000], + "pattern_3": [32000, 32000, 32000], + }, + { + "pattern_1": [ + { "start_idx": 0, "end_idx": 1 }, + { "start_idx": 1, "end_idx": 2 }, + { "start_idx": 2, "end_idx": 3 }, + { "start_idx": 3, "end_idx": 4 }, + ], + "pattern_2": [ + { "start_idx": 0, "end_idx": 2 }, + { "start_idx": 2, "end_idx": 4 }, + ], + "pattern_3": [ + { "start_idx": 0, "end_idx": 3 }, + ], + }, + ), + ( + [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], + { + "pattern_1": [28747, 32000], + "pattern_2": [28747, 32000, 32000, 32000], + "pattern_3": [28747, 0, 32000], + }, + { + "pattern_1": [ + { "start_idx": 1, "end_idx": 3 }, + { "start_idx": 6, "end_idx": 8 }, + ], + "pattern_2": [ + { "start_idx": 1, "end_idx": 5 }, + ], + "pattern_3": [], + }, + ), + ], +) +# yapf: enable +def test_find_token_matches(prompt, target_by_key, expected_by_key): + # Should not be used since there is nothing to convert to token IDs + mock_tokenizer = cast(AnyTokenizer, object()) + + result = find_token_matches( + prompt, + [ + PromptReplacement(target, [], 0).bind(key, mock_tokenizer) + for key, target in target_by_key.items() + ], + ) + + # Only displayed on error + print("result:", result) + + # Manually constructed results + result_groups = dict(full_groupby(result, key=lambda x: x.modality)) + assert { + key: [ + dict(start_idx=item.start_idx, end_idx=item.end_idx) + for item in result_groups.get(key, []) + ] + for key in expected_by_key + } == expected_by_key + + +# yapf: disable +@pytest.mark.parametrize( + ("prompt", "target_by_key", "expected_by_key"), + [ + # Detokenized test cases of `test_find_token_matches` + # using the vocab of llava-hf/llava-v1.6-mistral-7b-hf + ( + "", + { + "pattern_1": "", + "pattern_2": "", + }, + { + "pattern_1": [{ "start_idx": 0, "end_idx": 0 }], + "pattern_2": [], + } + ), + ( + "", + { + "pattern_1": "", + "pattern_2": "", + "pattern_3": "", + }, + { + "pattern_1": [ + { "start_idx": 0, "end_idx": 7 }, + { "start_idx": 7, "end_idx": 14 }, + { "start_idx": 14, "end_idx": 21 }, + { "start_idx": 21, "end_idx": 28 }, + ], + "pattern_2": [ + { "start_idx": 0, "end_idx": 14 }, + { "start_idx": 14, "end_idx": 28 }, + ], + "pattern_3": [ + { "start_idx": 0, "end_idx": 21 }, + ], + }, + ), + ( + "Image:Image:!", + { + "pattern_1": "Image:", + "pattern_2": "Image:", + "pattern_3": "Image:", + }, + { + "pattern_1": [ + { "start_idx": 0, "end_idx": 13 }, + { "start_idx": 27, "end_idx": 40 }, + ], + "pattern_2": [ + { "start_idx": 0, "end_idx": 27 }, + ], + "pattern_3": [], + }, + ), + # Test regex escape + ( + "<|image|><|image|>", + { + "pattern_1": "<|image|>", + "pattern_2": "<|image|>", + "pattern_3": "<|image|><|image|>", + }, + { + "pattern_1": [ + { "start_idx": 0, "end_idx": 9 }, + { "start_idx": 16, "end_idx": 25 }, + ], + "pattern_2": [ + { "start_idx": 0, "end_idx": 16 }, + { "start_idx": 16, "end_idx": 32 }, + ], + "pattern_3": [ + { "start_idx": 0, "end_idx": 25 }, + ], + }, + ), + ], +) +# yapf: enable +def test_find_text_matches(prompt, target_by_key, expected_by_key): + # Should not be used since there is nothing to convert to text + mock_tokenizer = cast(AnyTokenizer, object()) + + result = find_text_matches( + prompt, + [ + PromptReplacement(target, [], 0).bind(key, mock_tokenizer) + for key, target in target_by_key.items() + ], + ) + + # Only displayed on error + print("result:", result) + + # Manually constructed results + result_groups = dict(full_groupby(result, key=lambda x: x.modality)) + assert { + key: [ + dict(start_idx=item.start_idx, end_idx=item.end_idx) + for item in result_groups.get(key, []) + ] + for key in expected_by_key + } == expected_by_key + + +# yapf: disable +@pytest.mark.parametrize( + ("prompt", "target_by_key", "repl_by_key", "expected_by_mm_count"), + [ + ( + "Image:Image:!", + { + # We use `` before `Image:` to test matches that + # occur out of order + "pattern_1": "", + "pattern_2": "Image:", + "pattern_3": "!", + }, + { + # Test whether target is confused with repl_unit + "pattern_1": ("", 1), + # Test empty repl_unit + "pattern_2": ("", 1), + # Test multiple repl_count + "pattern_3": ("?", 2), + }, + { + # Test no replacement + 0: "Image:Image:!", + # Test single replacement + 1: "Image:??", + # Test repeated replacement + 2: "??", + }, + ), + ] +) +# yapf: enable +def test_find_replace_text( + prompt, + target_by_key, + repl_by_key, + expected_by_mm_count, +): + # Should not be used since there is nothing to convert to text + mock_tokenizer = cast(AnyTokenizer, object()) + + matches = find_text_matches( + prompt, + [ + PromptReplacement(target, *repl_by_key[key]) \ + .bind(key, mock_tokenizer) + for key, target in target_by_key.items() + ], + ) + result_by_mm_count = { + mm_count: replace_text_matches( + prompt, + matches, + {key: list(range(mm_count)) + for key in repl_by_key}, + BatchFeature(), + ) + for mm_count in expected_by_mm_count + } + + # Only displayed on error + print("matches:", matches) + print("result_by_mm_count:", result_by_mm_count) + + # Manually constructed results + assert result_by_mm_count == expected_by_mm_count diff --git a/tests/multimodal/test_utils.py b/tests/multimodal/test_utils.py index 9869c8123f001..fd82fb0c55fd7 100644 --- a/tests/multimodal/test_utils.py +++ b/tests/multimodal/test_utils.py @@ -139,7 +139,8 @@ def test_repeat_and_pad_placeholder_tokens(model): 2, "", [32000, 32000, 32000], - [{ "offset": 0, "length": 2 }]), + [{ "offset": 0, "length": 2 }], + ), ( "", [3, 2], diff --git a/vllm/multimodal/inputs.py b/vllm/multimodal/inputs.py index 64a4c58d5509c..8e67a552afe12 100644 --- a/vllm/multimodal/inputs.py +++ b/vllm/multimodal/inputs.py @@ -203,14 +203,7 @@ class MultiModalInputsV2(TypedDict): """The type of inputs.""" prompt: str - """ - The original, unprocessed prompt text. - - Note: - Since prompt text is not required by vLLM internals, we leave this - unprocessed to save CPU computation. You can still call - :code:`tokenizer.decode(prompt_token_ids)` to get the processed text. - """ + """The processed prompt text.""" prompt_token_ids: List[int] """The processed token IDs which includes placeholder tokens.""" diff --git a/vllm/multimodal/processing.py b/vllm/multimodal/processing.py index 88a924da174a6..28c8dda581982 100644 --- a/vllm/multimodal/processing.py +++ b/vllm/multimodal/processing.py @@ -1,34 +1,91 @@ +import re +from abc import ABC, abstractmethod +from collections.abc import Callable, ItemsView, Iterable, Mapping, Sequence from dataclasses import dataclass -from functools import lru_cache, partial -from typing import (Any, Callable, Collection, Generic, List, Mapping, - Optional, TypedDict, TypeVar, final) +from functools import lru_cache +from itertools import groupby +from typing import Any, Generic, NamedTuple, Optional, Protocol, TypeVar, Union +import numpy as np from transformers import BatchFeature -from typing_extensions import TypeAlias +from typing_extensions import TypeAlias, TypedDict from vllm.inputs import InputProcessingContext from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer -from vllm.utils import is_list_of +from vllm.utils import flatten_2d_lists, full_groupby, is_list_of from .inputs import (AudioItem, ImageItem, MultiModalDataDict, MultiModalInputsV2, MultiModalKwargs, PlaceholderRange, VideoItem) + +def bind_prompt_sequence( + seq: Union[str, list[int]], + tokenizer: AnyTokenizer, +) -> "_BoundPromptSequence": + """ + Bind a text or token sequence to a tokenizer so that it can be + lazily converted into the other format on demand. + """ + return _BoundPromptSequence( + tokenizer=tokenizer, + _text=seq if isinstance(seq, str) else None, + _token_ids=seq if isinstance(seq, list) else None, + ) + + _T = TypeVar("_T") +_S = TypeVar("_S", str, list[int]) -ReplacementFunc: TypeAlias = Callable[[_T, BatchFeature, int], List[int]] -""" -Given the original data item, HF-processed data, and index of the processed -item, output the replacement token IDs to be allocated in vLLM. -""" + +@dataclass +class PromptReplacement(Generic[_S, _T]): + target: _S + """The text or token sequence to find and replace.""" + + repl_unit: _S + """ + The unit making up the replacement text or token sequence. + + See :code:`repl_count` for more details. + """ + + repl_count: Union[Callable[[list[_T], BatchFeature, int], int], int] + """ + Given the original multi-modal items for this modality, HF-processed data, + and index of the processed item, output the number of repetitions of + :code:`repl_unit` to build up the replacement text or token sequence. + + For convenience, you can pass in an integer if the number of repetitions is + a constant. + """ + + def __repr__(self) -> str: + return (f"{type(self).__name__}(target={self.target!r}, " + f"repl_unit={self.repl_unit!r})") + + def bind( + self, + modality: str, + tokenizer: AnyTokenizer, + ) -> "_BoundPromptReplacement[_T]": + return _BoundPromptReplacement( + modality=modality, + target=bind_prompt_sequence(self.target, tokenizer), + repl_unit=bind_prompt_sequence(self.repl_unit, tokenizer), + repl_count=self.repl_count, + ) @dataclass class ModalityProcessingMetadata(Generic[_T]): - placeholder_replacements: Mapping[str, ReplacementFunc] + prompt_repls: Sequence[Union[PromptReplacement[str, _T], + PromptReplacement[list[int], _T]]] """ - A dictionary where each item represents the original placeholder in the - prompt text and the corresponding replacement. + Defines each text or token sequence to replace in the HF-processed prompt. + + This is skipped if the HF-processed prompt is found to already contain + the replacement prompts. """ @@ -52,46 +109,138 @@ class MultiModalProcessingMetadataBuiltins(TypedDict, total=False): Read more on that :ref:`here `. """ -MultiModalMultiData: TypeAlias = List[_T] -""" -A list of data items, where the number of data items allowed -per modality is restricted by :code:`--limit-mm-per-prompt`. -""" +def _encode( + tokenizer: AnyTokenizer, + text: str, + *, + add_special_tokens: bool = False, +) -> list[int]: + """ + Backend-agnostic equivalent of HF's + :code:`tokenizer.encode(text, add_special_tokens=...)`. + """ + if isinstance(tokenizer, MistralTokenizer): + return tokenizer.tokenizer.encode(text, + bos=add_special_tokens, + eos=add_special_tokens) -@final -class MultiModalMultiDataBuiltins(TypedDict, total=False): - """Type annotations for modality types predefined by vLLM.""" + return tokenizer.encode(text, add_special_tokens=add_special_tokens) - image: MultiModalMultiData[ImageItem] - """The input images.""" - video: MultiModalMultiData[VideoItem] - """The input videos.""" +@lru_cache(maxsize=2048) +def _cached_encode( + tokenizer: AnyTokenizer, + text: str, + *, + add_special_tokens: bool = False, +) -> list[int]: + return _encode(tokenizer, text, add_special_tokens=add_special_tokens) - audio: MultiModalMultiData[AudioItem] - """The input audios.""" +def _decode( + tokenizer: AnyTokenizer, + token_ids: list[int], + *, + skip_special_tokens: bool = False, +) -> str: + """ + Backend-agnostic equivalent of HF's + :code:`tokenizer.decode(token_ids, skip_special_tokens=...)`. + """ + return tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens) -MultiModalMultiDataDict: TypeAlias = Mapping[str, MultiModalMultiData[Any]] -""" -A dictionary containing an entry for each modality type to input. -Note: - This dictionary also accepts modality keys defined outside - :class:`MultiModalMultiDataBuiltins` as long as a customized plugin - is registered through the :class:`~vllm.multimodal.MULTIMODAL_REGISTRY`. - Read more on that :ref:`here `. -""" +@lru_cache(maxsize=2048) +def _cached_decode( + tokenizer: AnyTokenizer, + token_ids: tuple[int, ...], + *, + skip_special_tokens: bool = False, +) -> str: + return _decode(tokenizer, + list(token_ids), + skip_special_tokens=skip_special_tokens) + + +class _HasModalityAttr(Protocol): + modality: str + +class _HasModalityProp(Protocol): -def to_multi_format(data: MultiModalDataDict) -> MultiModalMultiDataDict: + @property + def modality(self) -> str: + ... + + +_M = TypeVar("_M", bound=Union[_HasModalityAttr, _HasModalityProp]) + + +def full_groupby_modality(values: Iterable[_M]) -> ItemsView[str, list[_M]]: + """Convenience function to apply :func:`full_groupby` based on modality.""" + return full_groupby(values, key=lambda x: x.modality) + + +@dataclass +class _BoundPromptSequence: + tokenizer: AnyTokenizer + _text: Optional[str] + _token_ids: Optional[list[int]] + + def __post_init__(self) -> None: + if self._text is None and self._token_ids is None: + raise ValueError("At least one of 'text' and 'token_ids' must be " + "specified") + + @property + def text(self) -> str: + if self._text is None: + assert self._token_ids is not None + self._text = _cached_decode(self.tokenizer, tuple(self._token_ids)) + + return self._text + + @property + def token_ids(self) -> list[int]: + if self._token_ids is None: + assert self._text is not None + self._token_ids = _cached_encode(self.tokenizer, self._text) + + return self._token_ids + + def __repr__(self) -> str: + return (f"{type(self).__name__}(_text={self._text!r}, " + f"_token_ids={self._token_ids!r})") + + +@dataclass +class _BoundPromptReplacement(Generic[_T]): + modality: str + target: _BoundPromptSequence + repl_unit: _BoundPromptSequence + repl_count: Union[Callable[[list[_T], BatchFeature, int], int], int] + + def get_count( + self, + mm_items: list[_T], + hf_inputs: BatchFeature, + item_idx: int, + ) -> int: + repl_count = self.repl_count + if isinstance(repl_count, int): + return repl_count + + return repl_count(mm_items, hf_inputs, item_idx) + + +def to_multi_format(data: MultiModalDataDict) -> dict[str, list[Any]]: """ Convert a :class:`MultiModalDataDict` containing single data items to a :class:`MultiModalMultiDataDict` containing multiple data items per entry. """ - multi_data: Mapping[str, MultiModalMultiData[Any]] = {} + multi_data = dict[str, list[Any]]() for k, v in data.items(): # yapf: disable @@ -107,86 +256,279 @@ def to_multi_format(data: MultiModalDataDict) -> MultiModalMultiDataDict: return multi_data -def encode_no_special_tokens( - tokenizer: AnyTokenizer, - text: str, -) -> List[int]: +class _TokenRun(NamedTuple): + token_id: int + + start_idx: int + length: int + + +def iter_token_runs(token_ids: list[int]) -> Iterable[_TokenRun]: """ - Backend-agnostic equivalent of HF's - :code:`tokenizer.encode(text, add_special_tokens=False)`. + Yield the starting index and length of each run of tokens that are the same. """ - if isinstance(tokenizer, MistralTokenizer): - return tokenizer.tokenizer.encode(text, bos=False, eos=False) + start_idx = 0 + + for token_id, it in groupby(token_ids): + length = sum(1 for _ in it) + yield _TokenRun(token_id=token_id, start_idx=start_idx, length=length) + + start_idx += length + + +class _PlaceholderInfo(NamedTuple): + modality: str + offset: int + length: int + + def to_range(self) -> PlaceholderRange: + return PlaceholderRange(offset=self.offset, length=self.length) + + +def iter_placeholders( + prompt_repls: Sequence[_BoundPromptReplacement[Any]], + token_ids: list[int], + *, + min_placeholder_count: int, +) -> Iterable[_PlaceholderInfo]: + """Yield each set of placeholder tokens found in :code:`token_ids`.""" + placeholder_ids_by_modality = { + modality: { + token_id + for prompt_repl in repls + for token_id in prompt_repl.repl_unit.token_ids + } + for modality, repls in full_groupby_modality(prompt_repls) + } - return tokenizer.encode(text, add_special_tokens=False) + for run_info in iter_token_runs(token_ids): + if run_info.length > min_placeholder_count: + for (modality, + placeholder_ids) in placeholder_ids_by_modality.items(): + if run_info.token_id in placeholder_ids: + yield _PlaceholderInfo( + modality=modality, + offset=run_info.start_idx, + length=run_info.length, + ) -@lru_cache -def candidate_placeholders( - tokenizer: AnyTokenizer, - placeholder_text: str, -) -> Collection[List[int]]: - """Generate token ID sequences that may represent a placeholder text.""" - # When the placeholder text is not mapped to a special token ID, - # it may be tokenized differently based on whether it is at the start/end - # of the string. So, we go through each combination of whether the text - # is at the start and end boundaries of the string - - # Matches the placeholder when it is in the middle of the string - start_id, = encode_no_special_tokens(tokenizer, "a") - end_id, = encode_no_special_tokens(tokenizer, "b") - - candidate_basic = encode_no_special_tokens(tokenizer, placeholder_text) - - start_id_, *candidate_a = encode_no_special_tokens( - tokenizer, - f"a{placeholder_text}", - ) - assert start_id == start_id_ +class _TokenMatch(NamedTuple): + start_idx: int + end_idx: int - start_id_, *candidate_ab, end_id_ = encode_no_special_tokens( - tokenizer, - f"a{placeholder_text}b", - ) - assert start_id == start_id_ and end_id == end_id_ - *candidate_b, end_id_ = encode_no_special_tokens( - tokenizer, - f"{placeholder_text}b", - ) - assert end_id == end_id_ +def iter_token_matches( + token_ids: list[int], + match_ids: list[int], +) -> Iterable[_TokenMatch]: + """Yield each occurrence of :code:`match_ids` in :code:`token_ids`.""" + match_len = len(match_ids) - # Remove duplicates (need to convert to tuple to be hashable) - unique_candidates = { - tuple(c) - for c in [candidate_basic, candidate_a, candidate_ab, candidate_b] - } + last_end_idx = 0 + for start_idx in range(len(token_ids) - match_len + 1): + if start_idx < last_end_idx: + continue # Exclude overlapping matches - # Convert back to list - return [list(c) for c in unique_candidates] + end_idx = start_idx + match_len + if token_ids[start_idx:end_idx] == match_ids: + yield _TokenMatch(start_idx=start_idx, end_idx=end_idx) + last_end_idx = end_idx -def apply_placeholders( - token_ids: List[int], - placeholder_ids: List[int], - get_replacement_ids: Callable[[], List[int]], -) -> Optional[PlaceholderRange]: - """ - Find the first occurrence of :code:`placeholder_ids`, - and replace it with the output of :code:`get_replacement_ids`. +class _PromptReplacementMatch(ABC, Generic[_T, _S]): + prompt_repl: _BoundPromptReplacement[_T] + + @property + def modality(self) -> str: + return self.prompt_repl.modality + + @property + @abstractmethod + def start_idx(self) -> int: + raise NotImplementedError + + @property + @abstractmethod + def end_idx(self) -> int: + raise NotImplementedError + + @abstractmethod + def get_repl( + self, + mm_items: list[_T], + hf_inputs: BatchFeature, + item_idx: int, + ) -> _S: + raise NotImplementedError + + def __repr__(self) -> str: + return (f"{type(self).__name__}(modality={self.modality!r}, " + f"start_idx={self.start_idx!r}, end_idx={self.end_idx!r})") + + +@dataclass(repr=False) +class _PromptReplacementTokenMatch(_PromptReplacementMatch[_T, list[int]]): + prompt_repl: _BoundPromptReplacement[_T] + match: _TokenMatch + + @property + def start_idx(self) -> int: + return self.match.start_idx + + @property + def end_idx(self) -> int: + return self.match.end_idx + + def get_repl( + self, + mm_items: list[_T], + hf_inputs: BatchFeature, + item_idx: int, + ) -> list[int]: + prompt_repl = self.prompt_repl + count = prompt_repl.get_count(mm_items, hf_inputs, item_idx) + return prompt_repl.repl_unit.token_ids * count - This function updates :code:`token_ids` in place. + +@dataclass(repr=False) +class _PromptReplacementTextMatch(_PromptReplacementMatch[_T, str]): + prompt_repl: _BoundPromptReplacement[_T] + match: re.Match[str] + + @property + def start_idx(self) -> int: + return self.match.start() + + @property + def end_idx(self) -> int: + return self.match.end() + + def get_repl( + self, + mm_items: list[_T], + hf_inputs: BatchFeature, + item_idx: int, + ) -> str: + prompt_repl = self.prompt_repl + count = prompt_repl.get_count(mm_items, hf_inputs, item_idx) + return prompt_repl.repl_unit.text * count + + +def find_token_matches( + prompt: list[int], + prompt_repls: Sequence[_BoundPromptReplacement[_T]], +) -> list[_PromptReplacementTokenMatch[_T]]: + """Return each target of :code:`prompt_repls` found in :code:`prompt`.""" + return [ + _PromptReplacementTokenMatch(prompt_repl, match) + for prompt_repl in prompt_repls + for match in iter_token_matches(prompt, prompt_repl.target.token_ids) + ] + + +def find_text_matches( + prompt: str, + prompt_repls: Sequence[_BoundPromptReplacement[_T]], +) -> list[_PromptReplacementTextMatch[_T]]: + """Return each target of :code:`prompt_repls` found in :code:`prompt`.""" + return [ + _PromptReplacementTextMatch(prompt_repl, match) + for prompt_repl in prompt_repls + for match in re.finditer(re.escape(prompt_repl.target.text), prompt) + ] + + +def _resolve_matches( + prompt: _S, + matches: Sequence[_PromptReplacementMatch[_T, _S]], +) -> list[_PromptReplacementMatch[_T, _S]]: + """ + Resolve :code:`matches` to ensure that there are no overlapping matches, + and sort them such that earlier matches take priority over later ones. """ - placeholder_length = len(placeholder_ids) + num_matches_by_idx = np.zeros(len(prompt), dtype=int) + for match in matches: + num_matches_by_idx[match.start_idx:match.end_idx] += 1 + + duplicate_matches_idxs, = np.nonzero(num_matches_by_idx > 1) + if len(duplicate_matches_idxs) > 0: + raise ValueError("Unable to find a unique replacement " + f"at indices={duplicate_matches_idxs} " + f"of prompt={prompt}") + + return sorted(matches, key=lambda x: x.start_idx) + + +def _replace_matches( + prompt: _S, + matches: Sequence[_PromptReplacementMatch[_T, _S]], + mm_items_by_modality: Mapping[str, list[_T]], + hf_inputs: BatchFeature, +) -> list[_S]: + out_seqs = list[_S]() + prev_end_idx = 0 + next_idx_by_modality = {modality: 0 for modality in mm_items_by_modality} + + for match in _resolve_matches(prompt, matches): + modality = match.modality + mm_items = mm_items_by_modality[modality] + + item_idx = next_idx_by_modality[modality] + if item_idx >= len(mm_items): + continue + + start_idx = match.start_idx + end_idx = match.end_idx + repl_ids = match.get_repl(mm_items, hf_inputs, item_idx) + + out_seqs.append(prompt[prev_end_idx:start_idx] + repl_ids) + prev_end_idx = end_idx + next_idx_by_modality[modality] += 1 + + out_seqs.append(prompt[prev_end_idx:]) + + return out_seqs + + +def replace_token_matches( + prompt: list[int], + matches: Sequence[_PromptReplacementMatch[_T, list[int]]], + mm_items_by_modality: Mapping[str, list[_T]], + hf_inputs: BatchFeature, +) -> list[int]: + """Apply :code:`prompt_repls` to :code:`prompt`.""" + if not matches: + return prompt + + token_id_seqs = _replace_matches( + prompt, + matches, + mm_items_by_modality, + hf_inputs, + ) + + return flatten_2d_lists(token_id_seqs) - for start_idx in range(len(token_ids) - placeholder_length + 1): - if token_ids[start_idx:placeholder_length] == placeholder_ids: - token_ids[start_idx:placeholder_length] = get_replacement_ids() - return PlaceholderRange(offset=start_idx, - length=placeholder_length) +def replace_text_matches( + prompt: str, + matches: Sequence[_PromptReplacementMatch[_T, str]], + mm_items_by_modality: Mapping[str, list[_T]], + hf_inputs: BatchFeature, +) -> str: + """Apply :code:`prompt_repls` to :code:`prompt`.""" + if not matches: + return prompt - return None + texts = _replace_matches( + prompt, + matches, + mm_items_by_modality, + hf_inputs, + ) + + return "".join(texts) class MultiModalProcessor: @@ -212,62 +554,166 @@ def __call__( ) -> MultiModalInputsV2: return self.apply(prompt, mm_data, mm_processor_kwargs) - def apply( + def _find_placeholders( + self, + all_prompt_repls: Sequence[_BoundPromptReplacement[Any]], + new_token_ids: list[int], + *, + # To avoid false positives from multi-input when detecting + # whether placeholder tokens have been inserted, in case + # the target sequence is a subset of the replacement tokens + min_placeholder_count: int = 16, + ) -> list[_PlaceholderInfo]: + return list( + iter_placeholders( + all_prompt_repls, + new_token_ids, + min_placeholder_count=min_placeholder_count, + )) + + def _apply_hf_processor( self, prompt: str, mm_data: MultiModalDataDict, mm_processor_kwargs: Mapping[str, object], - ) -> MultiModalInputsV2: - tokenizer = self.ctx.tokenizer + ) -> BatchFeature: hf_processor = self.ctx.get_hf_processor() - processed_inputs = hf_processor( + return hf_processor( text=prompt, # type: ignore **mm_data, **mm_processor_kwargs, ) - new_token_ids, = processed_inputs.pop("input_ids").tolist() - mm_kwargs = MultiModalKwargs(processed_inputs) - mm_placeholders: Mapping[str, List[PlaceholderRange]] = {} + def _bind_prompt_replacements( + self, + mm_data: MultiModalDataDict, + ) -> list[_BoundPromptReplacement[Any]]: + tokenizer = self.ctx.tokenizer - for modality, orig_inputs in to_multi_format(mm_data).items(): - assert isinstance(orig_inputs, list) + return [ + prompt_repl.bind(modality, tokenizer) + for modality, metadata in self.metadata.items() + if modality in mm_data for prompt_repl in metadata.prompt_repls + ] - metadata = self.metadata[modality] - placeholder_replacements = metadata.placeholder_replacements + def _apply_prompt_replacements( + self, + mm_data: MultiModalDataDict, + hf_inputs: BatchFeature, + token_ids: list[int], + prompt_repls: Sequence[_BoundPromptReplacement[Any]], + ) -> tuple[list[int], str, list[_PlaceholderInfo]]: + tokenizer = self.ctx.tokenizer - modality_placeholders: List[PlaceholderRange] = [] + mm_items = to_multi_format(mm_data) + token_matches = find_token_matches(token_ids, prompt_repls) + + # If the search text does not represent a special token, + # it may have different token IDs in the prompt, because + # the tokens may go across the boundaries of the search text. + # ---- + # e.g. when searching for "foo" in "food", if "food" itself makes + # up a token, then the token ID of "foo" will not appear at all + # ---- + # Since it is inefficient to search for all possible tokenizations + # of the search text in the prompt, we instead perform string + # replacement on the decoded token IDs, then encode them back. + if all( + len(matches) >= len(mm_data[modality]) + for modality, matches in full_groupby_modality(token_matches) + ): # yapf: disable + token_ids = replace_token_matches( + token_ids, + token_matches, + mm_items, + hf_inputs, + ) + + text = _decode(tokenizer, token_ids) + matched_repls = [match.prompt_repl for match in token_matches] + else: + text = _decode(tokenizer, token_ids) + + text_matches = find_text_matches(text, prompt_repls) + text = replace_text_matches( + text, + text_matches, + mm_items, + hf_inputs, + ) + + token_ids = _encode(tokenizer, text) + matched_repls = [match.prompt_repl for match in text_matches] + + placeholders = self._find_placeholders(matched_repls, token_ids) + + # Sanity check + assert len(placeholders) == len(matched_repls), dict( + # Log this information for easier debugging + text=text, + token_ids=token_ids, + placeholders=placeholders, + matched_repls=matched_repls, + ) - for item_idx, orig_item in enumerate(orig_inputs): - for match_text, replace_fn in placeholder_replacements.items(): - candidates = candidate_placeholders(tokenizer, match_text) - get_replacement_ids = partial( - replace_fn, - orig_item, - processed_inputs, - item_idx, - ) + return token_ids, text, placeholders - for match_ids in candidates: - # TODO(youkaichao): Don't update new_token_ids - placeholders = apply_placeholders( - new_token_ids, - match_ids, - get_replacement_ids, - ) + def apply( + self, + prompt_text: str, + mm_data: MultiModalDataDict, + mm_processor_kwargs: Mapping[str, object], + ) -> MultiModalInputsV2: + """ + Process multi-modal inputs to be used in vLLM. + + The main steps are: + + 1. Apply HF Processor on prompt text and multi-modal data together, + outputting token IDs and processed tensors. + 2. Find and replace sequences in the token IDs with placeholder tokens. + The number of placeholder tokens equals the feature size of the + multi-modal data outputted by the multi-modal encoder. + 3. Extract information about the placeholder tokens from the + processed token IDs. + """ + tokenizer = self.ctx.tokenizer + + hf_inputs = self._apply_hf_processor(prompt_text, mm_data, + mm_processor_kwargs) + prompt_ids, = hf_inputs.pop("input_ids").tolist() + mm_kwargs = MultiModalKwargs(hf_inputs) - if placeholders is not None: - modality_placeholders.append(placeholders) + all_prompt_repls = self._bind_prompt_replacements(mm_data) - # yapf: disable - mm_placeholders[modality] = modality_placeholders # type: ignore[index] - # yapf: enable + # If HF processor already inserts placeholder tokens, + # there is no need for us to insert them + all_placeholders = self._find_placeholders(all_prompt_repls, + prompt_ids) + if all_placeholders: + prompt_text = _decode(tokenizer, prompt_ids) + else: + ( + prompt_ids, + prompt_text, + all_placeholders, + ) = self._apply_prompt_replacements( + mm_data, + hf_inputs, + prompt_ids, + all_prompt_repls, + ) + + mm_placeholders = { + modality: [item.to_range() for item in items] + for modality, items in full_groupby_modality(all_placeholders) + } return MultiModalInputsV2( type="multimodal", - prompt=prompt, - prompt_token_ids=new_token_ids, + prompt=prompt_text, + prompt_token_ids=prompt_ids, mm_kwargs=mm_kwargs, mm_placeholders=mm_placeholders, ) diff --git a/vllm/utils.py b/vllm/utils.py index 30c371b0e3591..dd4283e3ac381 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -19,7 +19,8 @@ import warnings import weakref from asyncio import FIRST_COMPLETED, AbstractEventLoop, Future, Task -from collections.abc import Mapping +from collections import defaultdict +from collections.abc import Iterable, Mapping from functools import lru_cache, partial, wraps from platform import uname from typing import (Any, AsyncGenerator, Awaitable, Callable, Dict, Generic, @@ -905,6 +906,23 @@ def flatten_2d_lists(lists: List[List[T]]) -> List[T]: return [item for sublist in lists for item in sublist] +_K = TypeVar("_K", bound=Hashable) +_V = TypeVar("_V") + + +def full_groupby(values: Iterable[_V], *, key: Callable[[_V], _K]): + """ + Unlike :class:`itertools.groupby`, groups are not broken by + non-contiguous data. + """ + groups = defaultdict[_K, list[_V]](list) + + for value in values: + groups[key(value)].append(value) + + return groups.items() + + # TODO: This function can be removed if transformer_modules classes are # serialized by value when communicating between processes def init_cached_hf_modules() -> None: From 4cfe5d2bcafe1f47d1df046e6788ebbe038eaf3f Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Sat, 23 Nov 2024 13:25:46 +0800 Subject: [PATCH 103/122] [Bugfix] `multi_modal_kwargs` broadcast for CPU tensor parallel (#10541) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/worker/cpu_enc_dec_model_runner.py | 1 + vllm/worker/cpu_model_runner.py | 1 + 2 files changed, 2 insertions(+) diff --git a/vllm/worker/cpu_enc_dec_model_runner.py b/vllm/worker/cpu_enc_dec_model_runner.py index 1f8e2d2d88a23..cc24cfe04d2ba 100644 --- a/vllm/worker/cpu_enc_dec_model_runner.py +++ b/vllm/worker/cpu_enc_dec_model_runner.py @@ -35,6 +35,7 @@ def as_broadcastable_tensor_dict(self) -> Dict[str, Any]: "input_positions": self.input_positions, "encoder_input_tokens": self.encoder_input_tokens, "encoder_input_positions": self.encoder_input_positions, + "multi_modal_kwargs": self.multi_modal_kwargs, } _add_attn_metadata_broadcastable_dict(tensor_dict, self.attn_metadata) _add_sampling_metadata_broadcastable_dict(tensor_dict, diff --git a/vllm/worker/cpu_model_runner.py b/vllm/worker/cpu_model_runner.py index 2cf573625401a..7cab476d7fca4 100644 --- a/vllm/worker/cpu_model_runner.py +++ b/vllm/worker/cpu_model_runner.py @@ -83,6 +83,7 @@ def as_broadcastable_tensor_dict(self) -> Dict[str, Any]: tensor_dict = { "input_tokens": self.input_tokens, "input_positions": self.input_positions, + "multi_modal_kwargs": self.multi_modal_kwargs, } _add_attn_metadata_broadcastable_dict(tensor_dict, self.attn_metadata) _add_sampling_metadata_broadcastable_dict(tensor_dict, From 86a44fb8967f757b0701aaa33aeaa8a431714a27 Mon Sep 17 00:00:00 2001 From: JiHuazhong Date: Sat, 23 Nov 2024 14:23:12 +0800 Subject: [PATCH 104/122] [Platforms] Refactor openvino code (#10573) Signed-off-by: statelesshz --- vllm/executor/openvino_executor.py | 81 ++---------------------------- vllm/platforms/openvino.py | 69 +++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 78 deletions(-) diff --git a/vllm/executor/openvino_executor.py b/vllm/executor/openvino_executor.py index dcd4b7621381d..db0070ce510ee 100644 --- a/vllm/executor/openvino_executor.py +++ b/vllm/executor/openvino_executor.py @@ -1,19 +1,16 @@ from typing import List, Set, Tuple import openvino as ov -import openvino.properties.hint as hints -import torch import vllm.envs as envs -from vllm.config import CacheConfig, ModelConfig from vllm.executor.executor_base import ExecutorAsyncBase, ExecutorBase from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.model_executor.layers.sampler import SamplerOutput from vllm.platforms import current_platform from vllm.sequence import ExecuteModelRequest -from vllm.utils import (GiB_bytes, get_distributed_init_method, get_ip, - get_open_port, make_async) +from vllm.utils import (get_distributed_init_method, get_ip, get_open_port, + make_async) from vllm.worker.worker_base import WorkerWrapperBase logger = init_logger(__name__) @@ -30,11 +27,6 @@ def _init_executor(self) -> None: current_platform.is_openvino_gpu(), \ "OpenVINO backend supports only CPU and GPU devices" - self.ov_core = ov.Core() - self.model_config = _verify_and_get_model_config(self.model_config) - self.cache_config = _verify_and_get_cache_config( - self.ov_core, self.cache_config) - # Instantiate the worker and load the model to CPU. self._init_worker() @@ -45,7 +37,7 @@ def _init_worker(self): distributed_init_method = get_distributed_init_method( get_ip(), get_open_port()) self.driver_worker = wrapper.init_worker( - ov_core=self.ov_core, + ov_core=ov.Core(), vllm_config=self.vllm_config, local_rank=0, rank=0, @@ -130,70 +122,3 @@ async def check_health_async(self) -> None: # OpenVINOExecutor will always be healthy as long as # it's running. return - - -def _verify_and_get_model_config(config: ModelConfig) -> ModelConfig: - if config.dtype != torch.float32: - logger.warning( - f"Only float32 dtype is supported on OpenVINO, casting from {config.dtype}." # noqa: G004, E501 - ) - config.dtype = torch.float32 - if not config.enforce_eager: - logger.warning( - "CUDA graph is not supported on OpenVINO backend, fallback to the " - "eager mode.") - config.enforce_eager = True - return config - - -def _verify_and_get_cache_config(ov_core: ov.Core, - config: CacheConfig) -> CacheConfig: - if envs.VLLM_OPENVINO_CPU_KV_CACHE_PRECISION == "u8": - if not current_platform.is_openvino_cpu(): - logger.info("VLLM_OPENVINO_CPU_KV_CACHE_PRECISION is" - "ignored for GPU, f16 data type will be used.") - config.cache_dtype = ov.Type.f16 - else: - logger.info("KV cache type is overridden to u8 via " - "VLLM_OPENVINO_CPU_KV_CACHE_PRECISION env var.") - config.cache_dtype = ov.Type.u8 - else: - if current_platform.is_openvino_cpu(): - ov_device = envs.VLLM_OPENVINO_DEVICE - inference_precision = ov_core.get_property( - ov_device, hints.inference_precision) - if inference_precision == ov.Type.bf16: - config.cache_dtype = ov.Type.bf16 - else: - config.cache_dtype = ov.Type.f16 - else: - config.cache_dtype = ov.Type.f16 - - if current_platform.is_openvino_cpu(): - if config.block_size != 32: - logger.info( - f"OpenVINO CPU optimal block size is 32, overriding currently set {config.block_size}" # noqa: G004, E501 - ) - config.block_size = 32 - else: - if config.block_size != 16: - logger.info( - f"OpenVINO GPU optimal block size is 16, overriding currently set {config.block_size}" # noqa: G004, E501 - ) - config.block_size = 16 - - kv_cache_space = envs.VLLM_OPENVINO_KVCACHE_SPACE - if kv_cache_space >= 0: - if kv_cache_space == 0 and current_platform.is_openvino_cpu(): - config.openvino_kvcache_space_bytes = 4 * GiB_bytes # type: ignore - logger.warning( - "Environment variable VLLM_OPENVINO_KVCACHE_SPACE (GB) " - "for OpenVINO backend is not set, using 4 by default.") - else: - config.openvino_kvcache_space_bytes = kv_cache_space * GiB_bytes # type: ignore - else: - raise RuntimeError( - "Invalid environment variable VLLM_OPENVINO_KVCACHE_SPACE" - f" {kv_cache_space}, expect a positive integer value.") - - return config diff --git a/vllm/platforms/openvino.py b/vllm/platforms/openvino.py index 694de836e1517..91e615481ff8e 100644 --- a/vllm/platforms/openvino.py +++ b/vllm/platforms/openvino.py @@ -1,5 +1,7 @@ from typing import TYPE_CHECKING +import openvino as ov +import openvino.properties.hint as hints import torch import vllm.envs as envs @@ -49,6 +51,8 @@ def is_pin_memory_available(self) -> bool: @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + from vllm.utils import GiB_bytes + parallel_config = vllm_config.parallel_config assert ( parallel_config.world_size == 1 @@ -57,3 +61,68 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: if parallel_config.worker_cls == "auto": parallel_config.worker_cls = \ "vllm.worker.openvino_worker.OpenVINOWorker" + + # check and update model config + model_config = vllm_config.model_config + if model_config.dtype != torch.float32: + logger.warning( + f"Only float32 dtype is supported on OpenVINO, casting from {model_config.dtype}." # noqa: G004, E501 + ) + model_config.dtype = torch.float32 + if not model_config.enforce_eager: + logger.warning( + "CUDA graph is not supported on OpenVINO backend, fallback to " + "the eager mode.") + model_config.enforce_eager = True + + # check and update cache config + ov_core = ov.Core() + cache_config = vllm_config.cache_config + if envs.VLLM_OPENVINO_CPU_KV_CACHE_PRECISION == "u8": + if not OpenVinoPlatform.is_openvino_cpu(): + logger.info("VLLM_OPENVINO_CPU_KV_CACHE_PRECISION is" + "ignored for GPU, f16 data type will be used.") + cache_config.cache_dtype = ov.Type.f16 + else: + logger.info("KV cache type is overridden to u8 via " + "VLLM_OPENVINO_CPU_KV_CACHE_PRECISION env var.") + cache_config.cache_dtype = ov.Type.u8 + else: + if OpenVinoPlatform.is_openvino_cpu(): + ov_device = envs.VLLM_OPENVINO_DEVICE + inference_precision = ov_core.get_property( + ov_device, hints.inference_precision) + if inference_precision == ov.Type.bf16: + cache_config.cache_dtype = ov.Type.bf16 + else: + cache_config.cache_dtype = ov.Type.f16 + else: + cache_config.cache_dtype = ov.Type.f16 + + if OpenVinoPlatform.is_openvino_cpu(): + if cache_config.block_size != 32: + logger.info( + f"OpenVINO CPU optimal block size is 32, overriding currently set {cache_config.block_size}" # noqa: G004, E501 + ) + cache_config.block_size = 32 + else: + if cache_config.block_size != 16: + logger.info( + f"OpenVINO GPU optimal block size is 16, overriding currently set {cache_config.block_size}" # noqa: G004, E501 + ) + cache_config.block_size = 16 + + kv_cache_space = envs.VLLM_OPENVINO_KVCACHE_SPACE + if kv_cache_space >= 0: + if kv_cache_space == 0 and OpenVinoPlatform.is_openvino_cpu(): + cache_config.openvino_kvcache_space_bytes = 4 * GiB_bytes # type: ignore + logger.warning( + "Environment variable VLLM_OPENVINO_KVCACHE_SPACE (GB) " + "for OpenVINO backend is not set, using 4 by default.") + else: + cache_config.openvino_kvcache_space_bytes = ( # type: ignore + kv_cache_space * GiB_bytes) + else: + raise RuntimeError( + "Invalid environment variable VLLM_OPENVINO_KVCACHE_SPACE" + f" {kv_cache_space}, expect a positive integer value.") From 651f6c31ac86f29aa72fa682ef6c34349bcc75db Mon Sep 17 00:00:00 2001 From: Nishidha Date: Sat, 23 Nov 2024 15:03:53 +0530 Subject: [PATCH 105/122] For ppc64le, disabled tests for now and addressed space issues (#10538) --- .buildkite/run-cpu-test-ppc64le.sh | 44 ++---------------------------- 1 file changed, 3 insertions(+), 41 deletions(-) diff --git a/.buildkite/run-cpu-test-ppc64le.sh b/.buildkite/run-cpu-test-ppc64le.sh index 5d7a0bff90963..bc06838d804ff 100755 --- a/.buildkite/run-cpu-test-ppc64le.sh +++ b/.buildkite/run-cpu-test-ppc64le.sh @@ -4,49 +4,11 @@ # It serves a sanity check for compilation and basic model usage. set -ex -# Try building the docker image -docker build -t cpu-test -f Dockerfile.ppc64le . - # Setup cleanup -remove_docker_container() { docker rm -f cpu-test || true; } +remove_docker_container() { docker rm -f cpu-test || true; docker system prune -f; } trap remove_docker_container EXIT remove_docker_container -# Run the image, setting --shm-size=4g for tensor parallel. -source /etc/environment -#docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test cpu-test -docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true --network host -e HF_TOKEN="$HF_TOKEN" --name cpu-test cpu-test - -function cpu_tests() { - set -e - - # Run basic model test - docker exec cpu-test bash -c " - set -e - pip install pytest pytest-asyncio \ - decord einops librosa peft Pillow sentence-transformers soundfile \ - transformers_stream_generator matplotlib datamodel_code_generator - pip install torchvision --index-url https://download.pytorch.org/whl/cpu - pytest -v -s tests/models/decoder_only/language -m cpu_model - pytest -v -s tests/models/embedding/language -m cpu_model - pytest -v -s tests/models/encoder_decoder/language -m cpu_model - pytest -v -s tests/models/decoder_only/audio_language -m cpu_model - pytest -v -s tests/models/decoder_only/vision_language -m cpu_model" - - # online inference - docker exec cpu-test bash -c " - set -e - python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m & - timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1 - python3 benchmarks/benchmark_serving.py \ - --backend vllm \ - --dataset-name random \ - --model facebook/opt-125m \ - --num-prompts 20 \ - --endpoint /v1/completions \ - --tokenizer facebook/opt-125m" -} +# Try building the docker image +docker build -t cpu-test -f Dockerfile.ppc64le . -# All of CPU tests are expected to be finished less than 25 mins. -export -f cpu_tests -timeout 25m bash -c "cpu_tests" From 04668ebe7a35b69f1d2f8b04ef255bb16c8d2a01 Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Sun, 24 Nov 2024 02:12:20 +0800 Subject: [PATCH 106/122] [Bugfix] Avoid import AttentionMetadata explicitly in Mllama (#10593) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/attention/backends/blocksparse_attn.py | 5 +++++ vllm/attention/layer.py | 3 ++- vllm/model_executor/models/mllama.py | 14 +++++++------- vllm/platforms/openvino.py | 8 ++++++-- vllm/v1/attention/backends/flash_attn.py | 2 +- 5 files changed, 21 insertions(+), 11 deletions(-) diff --git a/vllm/attention/backends/blocksparse_attn.py b/vllm/attention/backends/blocksparse_attn.py index 94002e36db2bb..9e54c3b40c54e 100644 --- a/vllm/attention/backends/blocksparse_attn.py +++ b/vllm/attention/backends/blocksparse_attn.py @@ -87,6 +87,11 @@ def __post_init__(self): class BlocksparseFlashAttentionBackend(AttentionBackend): + @staticmethod + def get_name() -> str: + # For attention layer compatibility + return "FLASH_ATTN" + @staticmethod def get_impl_cls() -> Type["BlocksparseFlashAttentionImpl"]: return BlocksparseFlashAttentionImpl diff --git a/vllm/attention/layer.py b/vllm/attention/layer.py index cb4dedf481c77..1bb335909484b 100644 --- a/vllm/attention/layer.py +++ b/vllm/attention/layer.py @@ -6,7 +6,7 @@ import vllm.envs as envs from vllm.attention import AttentionMetadata, AttentionType -from vllm.attention.selector import get_attn_backend +from vllm.attention.selector import backend_name_to_enum, get_attn_backend from vllm.config import CacheConfig from vllm.forward_context import ForwardContext, get_forward_context from vllm.model_executor.layers.quantization.base_config import ( @@ -98,6 +98,7 @@ def __init__( self.impl = impl_cls(num_heads, head_size, scale, num_kv_heads, alibi_slopes, sliding_window, kv_cache_dtype, blocksparse_params, logits_soft_cap) + self.backend = backend_name_to_enum(attn_backend.get_name()) # For cuda-alike (CUDA and ROCM) and cpu platforms, we control how # torch.compile works by registering the attention as one giant diff --git a/vllm/model_executor/models/mllama.py b/vllm/model_executor/models/mllama.py index 41f62b37f3bd9..9e6634a9a7579 100644 --- a/vllm/model_executor/models/mllama.py +++ b/vllm/model_executor/models/mllama.py @@ -32,9 +32,8 @@ import vllm.distributed.parallel_state as ps from vllm.attention import Attention, AttentionMetadata, AttentionType -from vllm.attention.backends.flash_attn import FlashAttentionMetadata -from vllm.attention.backends.xformers import XFormersMetadata from vllm.attention.ops.paged_attn import PagedAttention +from vllm.attention.selector import _Backend from vllm.config import VllmConfig from vllm.distributed import get_tensor_model_parallel_world_size from vllm.inputs import (INPUT_REGISTRY, DummyData, EncoderDecoderInputs, @@ -828,7 +827,8 @@ def _attention_with_mask( ) -> torch.Tensor: # Skip writing kv-cache for the initial profiling run. if len(kv_cache.shape) > 1: - if isinstance(attn_metadata, FlashAttentionMetadata): + if self.attn.backend in (_Backend.FLASH_ATTN, + _Backend.FLASH_ATTN_VLLM_V1): cached_k = torch.cat([k[s:e] for s, e in kv_range_for_decode]) cached_v = torch.cat([v[s:e] for s, e in kv_range_for_decode]) torch.ops._C_cache_ops.reshape_and_cache_flash( @@ -842,7 +842,7 @@ def _attention_with_mask( 1.0, 1.0, ) - elif isinstance(attn_metadata, XFormersMetadata): + elif self.attn.backend in (_Backend.XFORMERS, _Backend.TORCH_SDPA): key_cache, value_cache = PagedAttention.split_kv_cache( kv_cache, self.num_local_key_value_heads, self.head_dim) cached_k = torch.cat([k[s:e] for s, e in kv_range_for_decode]) @@ -852,9 +852,9 @@ def _attention_with_mask( attn_metadata.cross_slot_mapping, "auto", 1.0, 1.0) else: raise ValueError( - f"Unsupported AttentionMetadata {type(attn_metadata)} " - f"class found. Expected the AttentionMetadata to " - f"be either XFormersMetadata or FlashAttentionMetadata.") + f"Unsupported Attention backend {self.attn.backend} " + "enum found. Expected the Attention backend to be " + "FLASH_ATTN, FLASH_ATTN_VLLM_V1, XFORMERS or TORCH_SDPA.") # We have to call torch.sdpa for prefill when using a # custom cross-attention mask. Because the mask is not a diff --git a/vllm/platforms/openvino.py b/vllm/platforms/openvino.py index 91e615481ff8e..ea5ec7b40b95c 100644 --- a/vllm/platforms/openvino.py +++ b/vllm/platforms/openvino.py @@ -1,7 +1,5 @@ from typing import TYPE_CHECKING -import openvino as ov -import openvino.properties.hint as hints import torch import vllm.envs as envs @@ -16,6 +14,12 @@ logger = init_logger(__name__) +try: + import openvino as ov + import openvino.properties.hint as hints +except ImportError as e: + logger.warning("Failed to import OpenVINO with %r", e) + class OpenVinoPlatform(Platform): _enum = PlatformEnum.OPENVINO diff --git a/vllm/v1/attention/backends/flash_attn.py b/vllm/v1/attention/backends/flash_attn.py index d98bb5a716e97..5f8535eaa303f 100644 --- a/vllm/v1/attention/backends/flash_attn.py +++ b/vllm/v1/attention/backends/flash_attn.py @@ -19,7 +19,7 @@ def get_supported_head_sizes() -> List[int]: @staticmethod def get_name() -> str: - return "flash-attn-vllm-v1" + return "FLASH_ATTN_VLLM_V1" @staticmethod def get_impl_cls() -> Type["FlashAttentionImpl"]: From 17d8fc1806c61e3f859a45b69be9f8dccf9a5fcc Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Sun, 24 Nov 2024 09:22:33 +0800 Subject: [PATCH 107/122] [bugfix] Fix example/tensorize_vllm_model tests (#10595) Signed-off-by: Jee Jee Li --- vllm/model_executor/model_loader/tensorizer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/model_loader/tensorizer.py b/vllm/model_executor/model_loader/tensorizer.py index c48b287ed181a..3fd668765a1b1 100644 --- a/vllm/model_executor/model_loader/tensorizer.py +++ b/vllm/model_executor/model_loader/tensorizer.py @@ -19,6 +19,7 @@ from vllm.logger import init_logger from vllm.model_executor.layers.vocab_parallel_embedding import ( VocabParallelEmbedding) +from vllm.plugins import set_current_vllm_config from vllm.utils import FlexibleArgumentParser tensorizer_error_msg = None @@ -284,7 +285,8 @@ def _init_model(self): model_args = self.tensorizer_config.hf_config model_args.torch_dtype = self.tensorizer_config.dtype assert self.tensorizer_config.model_class is not None - with no_init_or_tensor(): + # TODO: Do we need to consider old-style model class? + with no_init_or_tensor(), set_current_vllm_config(self.vllm_config): return self.tensorizer_config.model_class( vllm_config=self.vllm_config, ) From 1700c543a556e669e559c369a36c0a0d36a8de19 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Sun, 24 Nov 2024 09:23:17 +0800 Subject: [PATCH 108/122] [Bugfix] Fix LoRA weight sharding (#10450) Signed-off-by: Jee Jee Li Co-authored-by: Cyrus Leung --- .buildkite/test-pipeline.yaml | 13 +- .../{test_chatglm3.py => test_chatglm3_tp.py} | 63 +++++-- tests/lora/test_llama.py | 146 ---------------- tests/lora/test_llama_tp.py | 161 ++++++++++++++++++ vllm/lora/fully_sharded_layers.py | 5 + vllm/lora/layers.py | 34 +++- vllm/model_executor/models/chatglm.py | 4 +- 7 files changed, 258 insertions(+), 168 deletions(-) rename tests/lora/{test_chatglm3.py => test_chatglm3_tp.py} (56%) delete mode 100644 tests/lora/test_llama.py create mode 100644 tests/lora/test_llama_tp.py diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index c436d2b48d20f..bff33d35b423e 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -230,7 +230,7 @@ steps: source_file_dependencies: - vllm/lora - tests/lora - command: pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --ignore=lora/test_long_context.py + command: pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --ignore lora/test_long_context.py lora/test_chatglm3_tp.py lora/test_llama_tp.py parallelism: 4 - label: "PyTorch Fullgraph Smoke Test" # 9min @@ -475,18 +475,23 @@ steps: - pytest -v -s distributed/test_pp_cudagraph.py - pytest -v -s distributed/test_pipeline_parallel.py -- label: LoRA Long Context (Distributed) # 11min - # This test runs llama 13B, so it is required to run on 4 GPUs. +- label: LoRA TP Test (Distributed) num_gpus: 4 soft_fail: true source_file_dependencies: - vllm/lora - - tests/lora/test_long_context + - tests/lora commands: # FIXIT: find out which code initialize cuda before running the test # before the fix, we need to use spawn to test it - export VLLM_WORKER_MULTIPROC_METHOD=spawn + # This test runs llama 13B, so it is required to run on 4 GPUs. - pytest -v -s -x lora/test_long_context.py + # There is some Tensor Parallelism related processing logic in LoRA that + # requires multi-GPU testing for validation. + - pytest -v -s -x lora/test_chatglm3_tp.py + - pytest -v -s -x lora/test_llama_tp.py + - label: Weight Loading Multiple GPU Test # 33min working_dir: "/vllm-workspace/tests" diff --git a/tests/lora/test_chatglm3.py b/tests/lora/test_chatglm3_tp.py similarity index 56% rename from tests/lora/test_chatglm3.py rename to tests/lora/test_chatglm3_tp.py index de4cbea80924e..f17464573459f 100644 --- a/tests/lora/test_chatglm3.py +++ b/tests/lora/test_chatglm3_tp.py @@ -1,12 +1,21 @@ from typing import List import vllm +from tests.utils import fork_new_process_for_each_test from vllm.lora.request import LoRARequest +from ..utils import multi_gpu_test + MODEL_PATH = "THUDM/chatglm3-6b" PROMPT_TEMPLATE = """I want you to act as a SQL terminal in front of an example database, you need only to return the sql command to me.Below is an instruction that describes a task, Write a response that appropriately completes the request.\n"\n##Instruction:\nconcert_singer contains tables such as stadium, singer, concert, singer_in_concert. Table stadium has columns such as Stadium_ID, Location, Name, Capacity, Highest, Lowest, Average. Stadium_ID is the primary key.\nTable singer has columns such as Singer_ID, Name, Country, Song_Name, Song_release_year, Age, Is_male. Singer_ID is the primary key.\nTable concert has columns such as concert_ID, concert_Name, Theme, Stadium_ID, Year. concert_ID is the primary key.\nTable singer_in_concert has columns such as concert_ID, Singer_ID. concert_ID is the primary key.\nThe Stadium_ID of concert is the foreign key of Stadium_ID of stadium.\nThe Singer_ID of singer_in_concert is the foreign key of Singer_ID of singer.\nThe concert_ID of singer_in_concert is the foreign key of concert_ID of concert.\n\n###Input:\n{query}\n\n###Response:""" # noqa: E501 +EXPECTED_LORA_OUTPUT = [ + "SELECT count(*) FROM singer", + "SELECT avg(age) , min(age) , max(age) FROM singer WHERE country = 'France'", # noqa: E501 + "SELECT name , country , age FROM singer ORDER BY age", +] + def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: prompts = [ @@ -20,7 +29,6 @@ def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: "Show name, country, age for all singers ordered by age from the oldest to the youngest." # noqa: E501 ), ] - print(prompts) sampling_params = vllm.SamplingParams(temperature=0, max_tokens=32) outputs = llm.generate( prompts, @@ -37,23 +45,58 @@ def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: return generated_texts +@fork_new_process_for_each_test def test_chatglm3_lora(chatglm3_lora_files): llm = vllm.LLM(MODEL_PATH, max_model_len=1024, enable_lora=True, max_loras=4, max_lora_rank=64, + tensor_parallel_size=1, trust_remote_code=True) - expected_lora_output = [ - "SELECT count(*) FROM singer", - "SELECT avg(age) , min(age) , max(age) FROM singer WHERE country = 'France'", # noqa: E501 - "SELECT name , country , age FROM singer ORDER BY age", - ] + output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) + for i in range(len(EXPECTED_LORA_OUTPUT)): + assert output1[i] == EXPECTED_LORA_OUTPUT[i] + output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) + for i in range(len(EXPECTED_LORA_OUTPUT)): + assert output2[i] == EXPECTED_LORA_OUTPUT[i] + +@multi_gpu_test(num_gpus=4) +@fork_new_process_for_each_test +def test_chatglm3_lora_tp4(chatglm3_lora_files): + llm = vllm.LLM(MODEL_PATH, + max_model_len=1024, + enable_lora=True, + max_loras=4, + max_lora_rank=64, + tensor_parallel_size=4, + trust_remote_code=True, + fully_sharded_loras=False) + + output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) + for i in range(len(EXPECTED_LORA_OUTPUT)): + assert output1[i] == EXPECTED_LORA_OUTPUT[i] + output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) + for i in range(len(EXPECTED_LORA_OUTPUT)): + assert output2[i] == EXPECTED_LORA_OUTPUT[i] + + +@multi_gpu_test(num_gpus=4) +@fork_new_process_for_each_test +def test_chatglm3_lora_tp4_fully_sharded_loras(chatglm3_lora_files): + llm = vllm.LLM(MODEL_PATH, + max_model_len=1024, + enable_lora=True, + max_loras=4, + max_lora_rank=64, + tensor_parallel_size=4, + trust_remote_code=True, + fully_sharded_loras=True) output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) - for i in range(len(expected_lora_output)): - assert output1[i] == expected_lora_output[i] + for i in range(len(EXPECTED_LORA_OUTPUT)): + assert output1[i] == EXPECTED_LORA_OUTPUT[i] output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) - for i in range(len(expected_lora_output)): - assert output2[i] == expected_lora_output[i] + for i in range(len(EXPECTED_LORA_OUTPUT)): + assert output2[i] == EXPECTED_LORA_OUTPUT[i] diff --git a/tests/lora/test_llama.py b/tests/lora/test_llama.py deleted file mode 100644 index e2a4f1ed0496a..0000000000000 --- a/tests/lora/test_llama.py +++ /dev/null @@ -1,146 +0,0 @@ -from typing import List - -import pytest -import ray - -import vllm -from vllm.distributed import cleanup_dist_env_and_memory -from vllm.lora.request import LoRARequest - -MODEL_PATH = "meta-llama/Llama-2-7b-hf" - - -def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: - prompts = [ - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]", # noqa: E501 - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? [/user] [assistant]", # noqa: E501 - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_95 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a low tone mora with a gloss of /˩okiru/ [òkìɽɯ́]? [/user] [assistant]", # noqa: E501 - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE candidate (people_id VARCHAR, unsure_rate INTEGER); CREATE TABLE people (sex VARCHAR, people_id VARCHAR)\n\n question: which gender got the highest average uncertain ratio. [/user] [assistant]", # noqa: E501 - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_60 (pick INTEGER, former_wnba_team VARCHAR)\n\n question: What pick was a player that previously played for the Minnesota Lynx? [/user] [assistant]", # noqa: E501 - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]" # noqa: E501 - ] - sampling_params = vllm.SamplingParams(temperature=0, - max_tokens=256, - stop=["[/assistant]"]) - outputs = llm.generate( - prompts, - sampling_params, - lora_request=LoRARequest(str(lora_id), lora_id, lora_path) - if lora_id else None) - # Print the outputs. - generated_texts: List[str] = [] - for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - generated_texts.append(generated_text) - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") - return generated_texts - - -@pytest.mark.parametrize("tp_size", [1, 2, 4]) -def test_llama_lora(sql_lora_files, tp_size, num_gpus_available): - if num_gpus_available < tp_size: - pytest.skip(f"Not enough GPUs for tensor parallelism {tp_size}") - - llm = vllm.LLM(MODEL_PATH, - enable_lora=True, - max_num_seqs=16, - max_loras=4, - tensor_parallel_size=tp_size) - - expected_no_lora_output = [ - "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_75 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_76 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_77 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_78 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user]", # noqa: E501 - " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? ", # noqa: E501 - "\n\n answer: 1\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_96 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a high tone mora with a gloss of /˧kot/ [kòt]? [/user] [assistant]\n\n answer: 2\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_97 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a high tone mora with a gloss of /˧kot/ [kòt]? [/user] [assistant]\n\n answer: 2\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_98 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one m", # noqa: E501 - " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE candidate (people_id VARCHAR, unsure_rate INTEGER); CREATE TABLE people (sex VARCHAR, people_id VARCHAR)\n\n question: which gender got the highest average uncertain ratio. ", # noqa: E501 - " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_60 (pick INTEGER, former_wnba_team VARCHAR)\n\n question: What pick was a player that previously played for the Minnesota Lynx? ", # noqa: E501 - "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE", # noqa: E501 - ] - expected_lora_output = [ - " SELECT icao FROM table_name_74 WHERE airport = 'lilongwe international airport' ", # noqa: E501 - " SELECT nationality FROM table_name_11 WHERE elector = 'anchero pantaleone' ", # noqa: E501 - " SELECT one_mora FROM table_name_95 WHERE gloss = 'low tone mora with a gloss of /˩okiru/' [òkìɽɯ́] AND accented_mora = 'low tone mora with a gloss of /˩okiru/' [òkìɽɯ́] ", # noqa: E501 - " SELECT sex FROM people WHERE people_id IN (SELECT people_id FROM candidate GROUP BY sex ORDER BY COUNT(people_id) DESC LIMIT 1) ", # noqa: E501 - " SELECT pick FROM table_name_60 WHERE former_wnba_team = 'Minnesota Lynx' ", # noqa: E501 - " SELECT womens_doubles FROM table_28138035_4 WHERE mens_singles = 'Werner Schlager' " # noqa: E501 - ] - - print("lora adapter created") - assert do_sample(llm, sql_lora_files, lora_id=0) == expected_no_lora_output - - print("lora 1") - assert do_sample(llm, sql_lora_files, lora_id=1) == expected_lora_output - - print("no lora") - assert do_sample(llm, sql_lora_files, lora_id=0) == expected_no_lora_output - - print("lora 2") - assert do_sample(llm, sql_lora_files, lora_id=2) == expected_lora_output - - print("removing lora") - - -def test_llama_tensor_parallel_equality(sql_lora_files, num_gpus_available): - if num_gpus_available < 4: - pytest.skip("Not enough GPUs for tensor parallelism 4") - - llm_tp1 = vllm.LLM(MODEL_PATH, - enable_lora=True, - max_num_seqs=16, - max_loras=4, - tensor_parallel_size=1) - output_tp1 = do_sample(llm_tp1, sql_lora_files, lora_id=1) - - del llm_tp1 - cleanup_dist_env_and_memory() - - llm_tp2 = vllm.LLM(MODEL_PATH, - enable_lora=True, - max_num_seqs=16, - max_loras=4, - tensor_parallel_size=2) - output_tp2 = do_sample(llm_tp2, sql_lora_files, lora_id=1) - - del llm_tp2 - cleanup_dist_env_and_memory() - - assert output_tp1 == output_tp2 - - llm_tp4 = vllm.LLM(MODEL_PATH, - enable_lora=True, - max_num_seqs=16, - max_loras=4, - tensor_parallel_size=4) - output_tp4 = do_sample(llm_tp4, sql_lora_files, lora_id=1) - - del llm_tp4 - cleanup_dist_env_and_memory() - - assert output_tp1 == output_tp4 - - -def test_llama_lora_warmup(sql_lora_files): - """Test that the LLM initialization works with a warmup LORA path and - is more conservative""" - - @ray.remote(num_gpus=1) - def get_num_gpu_blocks_lora(): - llm = vllm.LLM(MODEL_PATH, enable_lora=True, max_num_seqs=16) - num_gpu_blocks_lora_warmup = llm.llm_engine.cache_config.num_gpu_blocks - return num_gpu_blocks_lora_warmup - - @ray.remote(num_gpus=1) - def get_num_gpu_blocks_no_lora(): - llm = vllm.LLM(MODEL_PATH, max_num_seqs=16) - num_gpu_blocks_no_lora_warmup = ( - llm.llm_engine.cache_config.num_gpu_blocks) - return num_gpu_blocks_no_lora_warmup - - num_gpu_blocks_lora_warmup = ray.get(get_num_gpu_blocks_lora.remote()) - num_gpu_blocks_no_lora_warmup = ray.get( - get_num_gpu_blocks_no_lora.remote()) - assert num_gpu_blocks_lora_warmup < num_gpu_blocks_no_lora_warmup, ( - "The warmup with lora should be more " - "conservative than without lora, therefore the number of " - "memory blocks for the KV cache should be " - "less when using lora than when not using lora") diff --git a/tests/lora/test_llama_tp.py b/tests/lora/test_llama_tp.py new file mode 100644 index 0000000000000..aae6310a2a213 --- /dev/null +++ b/tests/lora/test_llama_tp.py @@ -0,0 +1,161 @@ +from typing import List + +import ray + +import vllm +from tests.utils import fork_new_process_for_each_test +from vllm.lora.request import LoRARequest + +from ..utils import multi_gpu_test + +MODEL_PATH = "meta-llama/Llama-2-7b-hf" + +EXPECTED_NO_LORA_OUTPUT = [ + "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_75 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_76 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_77 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_78 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user]", # noqa: E501 + " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? ", # noqa: E501 + "\n\n answer: 1\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_96 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a high tone mora with a gloss of /˧kot/ [kòt]? [/user] [assistant]\n\n answer: 2\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_97 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a high tone mora with a gloss of /˧kot/ [kòt]? [/user] [assistant]\n\n answer: 2\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_98 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one m", # noqa: E501 + " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE candidate (people_id VARCHAR, unsure_rate INTEGER); CREATE TABLE people (sex VARCHAR, people_id VARCHAR)\n\n question: which gender got the highest average uncertain ratio. ", # noqa: E501 + " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_60 (pick INTEGER, former_wnba_team VARCHAR)\n\n question: What pick was a player that previously played for the Minnesota Lynx? ", # noqa: E501 + "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE", # noqa: E501 +] +EXPECTED_LORA_OUTPUT = [ + " SELECT icao FROM table_name_74 WHERE airport = 'lilongwe international airport' ", # noqa: E501 + " SELECT nationality FROM table_name_11 WHERE elector = 'anchero pantaleone' ", # noqa: E501 + " SELECT one_mora FROM table_name_95 WHERE gloss = 'low tone mora with a gloss of /˩okiru/' [òkìɽɯ́] AND accented_mora = 'low tone mora with a gloss of /˩okiru/' [òkìɽɯ́] ", # noqa: E501 + " SELECT sex FROM people WHERE people_id IN (SELECT people_id FROM candidate GROUP BY sex ORDER BY COUNT(people_id) DESC LIMIT 1) ", # noqa: E501 + " SELECT pick FROM table_name_60 WHERE former_wnba_team = 'Minnesota Lynx' ", # noqa: E501 + " SELECT womens_doubles FROM table_28138035_4 WHERE mens_singles = 'Werner Schlager' " # noqa: E501 +] + + +def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: + prompts = [ + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_95 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a low tone mora with a gloss of /˩okiru/ [òkìɽɯ́]? [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE candidate (people_id VARCHAR, unsure_rate INTEGER); CREATE TABLE people (sex VARCHAR, people_id VARCHAR)\n\n question: which gender got the highest average uncertain ratio. [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_60 (pick INTEGER, former_wnba_team VARCHAR)\n\n question: What pick was a player that previously played for the Minnesota Lynx? [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]" # noqa: E501 + ] + sampling_params = vllm.SamplingParams(temperature=0, + max_tokens=256, + stop=["[/assistant]"]) + outputs = llm.generate( + prompts, + sampling_params, + lora_request=LoRARequest(str(lora_id), lora_id, lora_path) + if lora_id else None) + # Print the outputs. + generated_texts: List[str] = [] + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + generated_texts.append(generated_text) + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + return generated_texts + + +@fork_new_process_for_each_test +def test_llama_lora(sql_lora_files): + + llm = vllm.LLM(MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=1) + + print("lora adapter created") + assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT + + print("lora 1") + assert do_sample(llm, sql_lora_files, lora_id=1) == EXPECTED_LORA_OUTPUT + + print("no lora") + assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT + + print("lora 2") + assert do_sample(llm, sql_lora_files, lora_id=2) == EXPECTED_LORA_OUTPUT + + print("removing lora") + + +@fork_new_process_for_each_test +def test_llama_lora_warmup(sql_lora_files): + """Test that the LLM initialization works with a warmup LORA path and + is more conservative""" + + @ray.remote(num_gpus=1) + def get_num_gpu_blocks_lora(): + llm = vllm.LLM(MODEL_PATH, enable_lora=True, max_num_seqs=16) + num_gpu_blocks_lora_warmup = llm.llm_engine.cache_config.num_gpu_blocks + return num_gpu_blocks_lora_warmup + + @ray.remote(num_gpus=1) + def get_num_gpu_blocks_no_lora(): + llm = vllm.LLM(MODEL_PATH, max_num_seqs=16) + num_gpu_blocks_no_lora_warmup = ( + llm.llm_engine.cache_config.num_gpu_blocks) + return num_gpu_blocks_no_lora_warmup + + num_gpu_blocks_lora_warmup = ray.get(get_num_gpu_blocks_lora.remote()) + num_gpu_blocks_no_lora_warmup = ray.get( + get_num_gpu_blocks_no_lora.remote()) + assert num_gpu_blocks_lora_warmup < num_gpu_blocks_no_lora_warmup, ( + "The warmup with lora should be more " + "conservative than without lora, therefore the number of " + "memory blocks for the KV cache should be " + "less when using lora than when not using lora") + + +@multi_gpu_test(num_gpus=4) +@fork_new_process_for_each_test +def test_llama_lora_tp4(sql_lora_files): + + llm = vllm.LLM( + MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=4, + ) + + print("lora adapter created") + assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT + + print("lora 1") + assert do_sample(llm, sql_lora_files, lora_id=1) == EXPECTED_LORA_OUTPUT + + print("no lora") + assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT + + print("lora 2") + assert do_sample(llm, sql_lora_files, lora_id=2) == EXPECTED_LORA_OUTPUT + + print("removing lora") + + +@multi_gpu_test(num_gpus=4) +@fork_new_process_for_each_test +def test_llama_lora_tp4_fully_sharded_loras(sql_lora_files): + + llm = vllm.LLM( + MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=4, + fully_sharded_loras=True, + ) + print("lora adapter created") + assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT + + print("lora 1") + assert do_sample(llm, sql_lora_files, lora_id=1) == EXPECTED_LORA_OUTPUT + + print("no lora") + assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT + + print("lora 2") + assert do_sample(llm, sql_lora_files, lora_id=2) == EXPECTED_LORA_OUTPUT + + print("removing lora") diff --git a/vllm/lora/fully_sharded_layers.py b/vllm/lora/fully_sharded_layers.py index 3443c3feb4d2a..f5c2eced9d2bb 100644 --- a/vllm/lora/fully_sharded_layers.py +++ b/vllm/lora/fully_sharded_layers.py @@ -44,6 +44,11 @@ class ColumnParallelLinearWithShardedLoRA(ColumnParallelLinearWithLoRA): Based on S-LoRA, slicing happens along the rank dim. """ + # For all LoRA layers where the `base_layer` is `ColumnParallelLinear`, + # their `lora_a` and `lora_b` have different sharding patterns. After + # completing the `lora_a` GEMM , a gather operation is performed. + # Therefore, the sharding of `lora_a` only needs to correspond with the + # gather operation. def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: tp_rank = get_tensor_model_parallel_rank() shard_size = self.lora_a_stacked.shape[2] diff --git a/vllm/lora/layers.py b/vllm/lora/layers.py index 6afe80219fe07..3701988ff692f 100644 --- a/vllm/lora/layers.py +++ b/vllm/lora/layers.py @@ -451,6 +451,12 @@ class ColumnParallelLinearWithLoRA(BaseLayerWithLoRA): def __init__(self, base_layer: ColumnParallelLinear) -> None: super().__init__() + # The base_layer type is ColumnParallelLinear or + # MergedColumnParallelLinear, their weight sharding logic is + # inconsistent when TP is greater than 1. + self.is_merged_col_linear = type( + base_layer) is MergedColumnParallelLinear + self.base_layer = base_layer self.tp_size = get_tensor_model_parallel_world_size() self.input_size = self.base_layer.input_size @@ -508,14 +514,30 @@ def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: return lora_a def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: - tensor_model_parallel_rank = get_tensor_model_parallel_rank() - shard_size = self.output_dim - start_idx = tensor_model_parallel_rank * shard_size - end_idx = (tensor_model_parallel_rank + 1) * shard_size - lora_b = lora_b[:, start_idx:end_idx] + # Applicable to cases where the base_layer is + # MergedColumnParallelLinear. + if self.is_merged_col_linear: + tp_rank = get_tensor_model_parallel_rank() + shard_size = self.output_size // 2 + offset = lora_b.shape[-1] // 2 + + left_weight = lora_b[:, tp_rank * shard_size:(tp_rank + 1) * + shard_size] + right_weight = lora_b[:, offset + tp_rank * shard_size:offset + + (tp_rank + 1) * shard_size] + lora_b = torch.cat([left_weight, right_weight], dim=1) + # Applicable to cases where the base_layer is + # ColumnParallelLinear. + else: + tensor_model_parallel_rank = get_tensor_model_parallel_rank() + shard_size = self.output_dim + start_idx = tensor_model_parallel_rank * shard_size + end_idx = (tensor_model_parallel_rank + 1) * shard_size + lora_b = lora_b[:, start_idx:end_idx] return lora_b def slice_bias(self, bias: torch.Tensor) -> torch.Tensor: + # TODO: Fix the slicing logic of bias. if bias is None: return bias tensor_model_parallel_rank = get_tensor_model_parallel_rank() @@ -779,7 +801,7 @@ def can_replace_layer( class QKVParallelLinearWithLora(ColumnParallelLinearWithLoRA): """ ColumnParallelLinear layer that is specifically designed for - qkv_proj. Certain models, such as chtglm3 and baichuan-7b, + qkv_proj. Certain models, such as chatglm3 and baichuan-7b, only contains a single LoRA within their qkv_proj layer. During inference with Tensor Parallel, the weights of lora_b diff --git a/vllm/model_executor/models/chatglm.py b/vllm/model_executor/models/chatglm.py index e3a068908b7f3..5bcbce7180ca4 100644 --- a/vllm/model_executor/models/chatglm.py +++ b/vllm/model_executor/models/chatglm.py @@ -760,7 +760,7 @@ def __new__( config = vllm_config.model_config.hf_config # Initialize VL if hasattr(config, "visual"): - return ChatGLM(vllm_config=vllm_config, prefix=prefix) + return ChatGLMV(vllm_config=vllm_config, prefix=prefix) # Initialize LLM else: - return ChatGLMV(vllm_config=vllm_config, prefix=prefix) + return ChatGLM(vllm_config=vllm_config, prefix=prefix) From 1c445dca51a877ac6a5b7e03ecdb73e0e34d139e Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Sun, 24 Nov 2024 11:57:13 +0800 Subject: [PATCH 109/122] [CI/Build] Print running script to enhance CI log readability (#10594) Signed-off-by: Jee Jee Li --- .buildkite/test-pipeline.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index bff33d35b423e..ed8c84ce9f5c0 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -52,6 +52,7 @@ steps: - tests/worker - tests/test_lazy_torch_compile.py commands: + - echo 'Running test_lazy_torch_compile.py...' # print running script to enhance CI log readability - python3 test_lazy_torch_compile.py - pytest -v -s mq_llm_engine # MQLLMEngine - pytest -v -s async_engine # AsyncLLMEngine @@ -182,15 +183,25 @@ steps: - examples/ commands: - pip install awscli tensorizer # for llava example and tensorizer test + - echo 'Running offline_inference.py...' # print running script to enhance CI log readability - python3 offline_inference.py + - echo 'Running cpu_offload.py...' - python3 cpu_offload.py + - echo 'Running offline_inference_chat.py...' - python3 offline_inference_chat.py + - echo 'Running offline_inference_with_prefix.py...' - python3 offline_inference_with_prefix.py + - echo 'Running llm_engine_example.py...' - python3 llm_engine_example.py + - echo 'Running offline_inference_vision_language.py...' - python3 offline_inference_vision_language.py + - echo 'Running offline_inference_vision_language_multi_image.py...' - python3 offline_inference_vision_language_multi_image.py + - echo 'Running tensorize_vllm_model.py...' - python3 tensorize_vllm_model.py --model facebook/opt-125m serialize --serialized-directory /tmp/ --suffix v1 && python3 tensorize_vllm_model.py --model facebook/opt-125m deserialize --path-to-tensors /tmp/vllm/facebook/opt-125m/v1/model.tensors + - echo 'Running offline_inference_encoder_decoder.py...' - python3 offline_inference_encoder_decoder.py + - echo 'Running offline_profile.py...' - python3 offline_profile.py --model facebook/opt-125m - label: Prefix Caching Test # 9min From eda2b3589c8b27a9b8f8aea24afe1673890d19d2 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sat, 23 Nov 2024 21:31:47 -0800 Subject: [PATCH 110/122] Revert "Print running script to enhance CI log readability" (#10601) --- .buildkite/test-pipeline.yaml | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index ed8c84ce9f5c0..bff33d35b423e 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -52,7 +52,6 @@ steps: - tests/worker - tests/test_lazy_torch_compile.py commands: - - echo 'Running test_lazy_torch_compile.py...' # print running script to enhance CI log readability - python3 test_lazy_torch_compile.py - pytest -v -s mq_llm_engine # MQLLMEngine - pytest -v -s async_engine # AsyncLLMEngine @@ -183,25 +182,15 @@ steps: - examples/ commands: - pip install awscli tensorizer # for llava example and tensorizer test - - echo 'Running offline_inference.py...' # print running script to enhance CI log readability - python3 offline_inference.py - - echo 'Running cpu_offload.py...' - python3 cpu_offload.py - - echo 'Running offline_inference_chat.py...' - python3 offline_inference_chat.py - - echo 'Running offline_inference_with_prefix.py...' - python3 offline_inference_with_prefix.py - - echo 'Running llm_engine_example.py...' - python3 llm_engine_example.py - - echo 'Running offline_inference_vision_language.py...' - python3 offline_inference_vision_language.py - - echo 'Running offline_inference_vision_language_multi_image.py...' - python3 offline_inference_vision_language_multi_image.py - - echo 'Running tensorize_vllm_model.py...' - python3 tensorize_vllm_model.py --model facebook/opt-125m serialize --serialized-directory /tmp/ --suffix v1 && python3 tensorize_vllm_model.py --model facebook/opt-125m deserialize --path-to-tensors /tmp/vllm/facebook/opt-125m/v1/model.tensors - - echo 'Running offline_inference_encoder_decoder.py...' - python3 offline_inference_encoder_decoder.py - - echo 'Running offline_profile.py...' - python3 offline_profile.py --model facebook/opt-125m - label: Prefix Caching Test # 9min From c055747867e771dbc791c9aa3c394c4d4489cd82 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sat, 23 Nov 2024 22:22:54 -0800 Subject: [PATCH 111/122] [model][utils] add extract_layer_index utility function (#10599) Signed-off-by: youkaichao --- vllm/model_executor/models/arctic.py | 41 +++++++++++-------------- vllm/model_executor/models/deepseek.py | 19 +++++++----- vllm/model_executor/models/gemma2.py | 15 +++------ vllm/model_executor/models/olmoe.py | 8 ++--- vllm/model_executor/models/qwen2_moe.py | 6 ++-- vllm/model_executor/models/utils.py | 21 +++++++++++++ 6 files changed, 59 insertions(+), 51 deletions(-) diff --git a/vllm/model_executor/models/arctic.py b/vllm/model_executor/models/arctic.py index ac4c464aa10ac..fd6b5659df5d1 100644 --- a/vllm/model_executor/models/arctic.py +++ b/vllm/model_executor/models/arctic.py @@ -33,7 +33,7 @@ from vllm.transformers_utils.configs.arctic import ArcticConfig from .interfaces import SupportsPP -from .utils import (is_pp_missing_parameter, +from .utils import (extract_layer_index, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -44,15 +44,14 @@ class ArcticMLP(nn.Module): def __init__(self, config: ArcticConfig, - layer_id: int, expert_id: int = -1, is_residual_mlp: bool = False, quant_config: Optional[QuantizationConfig] = None, - reduce_results: bool = True): + reduce_results: bool = True, + prefix: str = ""): super().__init__() self.hidden_size = config.hidden_size self.expert_id = expert_id - self.layer_id = layer_id self.ffn_dim = config.intermediate_size if not is_residual_mlp \ else self.hidden_size @@ -85,13 +84,14 @@ class ArcticMoE(nn.Module): def __init__(self, config: ArcticConfig, - layer_id: int, tp_size: Optional[int] = None, params_dtype: Optional[torch.dtype] = None, quant_config: Optional[QuantizationConfig] = None, - reduce_results: bool = True): + reduce_results: bool = True, + prefix: str = ""): super().__init__() + layer_id = extract_layer_index(prefix) self.tp_size = tp_size or get_tensor_model_parallel_world_size() self.hidden_size = config.hidden_size self.num_experts = config.num_local_experts @@ -109,15 +109,16 @@ def __init__(self, if not self.is_moe_layer: self.mlp = ArcticMLP(config, - layer_id=layer_id, quant_config=quant_config, - reduce_results=reduce_results) + reduce_results=reduce_results, + prefix=f"{prefix}.mlp") else: self.gate = ReplicatedLinear(self.hidden_size, self.num_experts, bias=False, params_dtype=self.params_dtype, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.gate") if self.is_quant: self.ws = DeepSpeedFPParameter( torch.Size((self.num_experts, 2 * self.intermediate_size, @@ -220,14 +221,12 @@ class ArcticAttention(nn.Module): def __init__( self, config: ArcticConfig, - layer_idx: Optional[int] = None, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, prefix: str = "", ): super().__init__() self.config = config - self.layer_idx = layer_idx self.hidden_size = config.hidden_size tp_size = get_tensor_model_parallel_world_size() @@ -298,26 +297,25 @@ class ArcticDecoderLayer(nn.Module): def __init__( self, config: ArcticConfig, - layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, prefix: str = "", ) -> None: super().__init__() - self.layer_idx = layer_idx self.hidden_size = config.hidden_size + layer_idx = extract_layer_index(prefix) is_moe_layer = (layer_idx + 1) % config.moe_layer_frequency == 0 self.use_residual = config.use_residual and is_moe_layer self.self_attn = ArcticAttention(config, - layer_idx, cache_config, quant_config=quant_config, prefix=f"{prefix}.self_attn") self.block_sparse_moe = ArcticMoE( config, - layer_id=layer_idx, quant_config=quant_config, - reduce_results=(not self.use_residual)) + reduce_results=(not self.use_residual), + prefix=f"{prefix}.block_sparse_moe", + ) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) @@ -328,9 +326,9 @@ def __init__( self.residual_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.residual_mlp = ArcticMLP(config, - layer_id=layer_idx, is_residual_mlp=True, - reduce_results=False) + reduce_results=False, + prefix=f"{prefix}.residual_mlp") def forward( self, @@ -384,11 +382,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): org_num_embeddings=self.vocab_size) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: ArcticDecoderLayer(config, - int(prefix.split(".")[-1]), - cache_config, - quant_config, - prefix=prefix), + lambda prefix: ArcticDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self._attn_implementation = config._attn_implementation self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) diff --git a/vllm/model_executor/models/deepseek.py b/vllm/model_executor/models/deepseek.py index 32488d931ea1c..74b6bfdf21909 100644 --- a/vllm/model_executor/models/deepseek.py +++ b/vllm/model_executor/models/deepseek.py @@ -49,7 +49,7 @@ from vllm.sequence import IntermediateTensors from .interfaces import SupportsPP -from .utils import (is_pp_missing_parameter, +from .utils import (extract_layer_index, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -63,6 +63,7 @@ def __init__( hidden_act: str, quant_config: Optional[QuantizationConfig] = None, reduce_results: bool = True, + prefix: str = "", ) -> None: super().__init__() self.gate_up_proj = MergedColumnParallelLinear( @@ -92,6 +93,7 @@ def __init__( self, config: PretrainedConfig, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.config = config @@ -260,12 +262,12 @@ class DeepseekDecoderLayer(nn.Module): def __init__( self, config: PretrainedConfig, - layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, prefix: str = "", ) -> None: super().__init__() + layer_idx = extract_layer_index(prefix) self.hidden_size = config.hidden_size rope_theta = getattr(config, "rope_theta", 10000) rope_scaling = getattr(config, "rope_scaling", None) @@ -285,13 +287,16 @@ def __init__( if (config.n_routed_experts is not None and layer_idx >= config.first_k_dense_replace and layer_idx % config.moe_layer_freq == 0): - self.mlp = DeepseekMoE(config=config, quant_config=quant_config) + self.mlp = DeepseekMoE(config=config, + quant_config=quant_config, + prefix=f"{prefix}.mlp") else: self.mlp = DeepseekMLP( hidden_size=config.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, quant_config=quant_config, + prefix=f"{prefix}.mlp", ) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) @@ -347,11 +352,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: DeepseekDecoderLayer(config, - int(prefix.split(".")[-1]), - cache_config, - quant_config=quant_config, - prefix=prefix), + lambda prefix: DeepseekDecoderLayer( + config, cache_config, quant_config=quant_config, prefix=prefix + ), prefix=f"{prefix}.layers") self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.make_empty_intermediate_tensors = ( diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index 9309cced61bb3..fd8223dd9be1b 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -42,7 +42,8 @@ from vllm.sequence import IntermediateTensors, PoolerOutput from .interfaces import SupportsLoRA, SupportsPP -from .utils import (AutoWeightsLoader, is_pp_missing_parameter, +from .utils import (AutoWeightsLoader, extract_layer_index, + is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -85,7 +86,6 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: class Gemma2Attention(nn.Module): def __init__(self, - layer_idx: int, config: Gemma2Config, hidden_size: int, num_heads: int, @@ -98,7 +98,6 @@ def __init__(self, attn_logits_soft_cap: Optional[float] = None, prefix: str = "") -> None: super().__init__() - self.layer_idx = layer_idx self.config = config self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() @@ -145,6 +144,7 @@ def __init__(self, # reference: # https://github.com/huggingface/transformers/blob/54be2d7ae87e873482b984cc956e165ca4dc0ba3/src/transformers/models/gemma2/modeling_gemma2.py#L312 # noqa + layer_idx = extract_layer_index(prefix) use_sliding_window = (layer_idx % 2 == 0 and config.interleaved_sliding_window is not None) sliding_window = config.interleaved_sliding_window if \ @@ -178,7 +178,6 @@ class Gemma2DecoderLayer(nn.Module): def __init__( self, - layer_idx: int, config: Gemma2Config, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, @@ -187,7 +186,6 @@ def __init__( super().__init__() self.hidden_size = config.hidden_size self.self_attn = Gemma2Attention( - layer_idx=layer_idx, config=config, hidden_size=self.hidden_size, num_heads=config.num_attention_heads, @@ -262,11 +260,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: Gemma2DecoderLayer(int(prefix.split(".")[-1]), - config, - cache_config, - quant_config, - prefix=prefix), + lambda prefix: Gemma2DecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.norm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) diff --git a/vllm/model_executor/models/olmoe.py b/vllm/model_executor/models/olmoe.py index 5b5b3ef48b035..5d9091cfb9311 100644 --- a/vllm/model_executor/models/olmoe.py +++ b/vllm/model_executor/models/olmoe.py @@ -181,7 +181,6 @@ class OlmoeDecoderLayer(nn.Module): def __init__( self, config: PretrainedConfig, - layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, prefix: str = "", @@ -264,11 +263,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: OlmoeDecoderLayer(config, - int(prefix.split(".")[-1]), - cache_config, - quant_config, - prefix=prefix), + lambda prefix: OlmoeDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.norm = RMSNorm(config.hidden_size, eps=1e-5) diff --git a/vllm/model_executor/models/qwen2_moe.py b/vllm/model_executor/models/qwen2_moe.py index 1091f88ab2534..ba70243c6533d 100644 --- a/vllm/model_executor/models/qwen2_moe.py +++ b/vllm/model_executor/models/qwen2_moe.py @@ -53,7 +53,7 @@ from vllm.utils import print_warning_once from .interfaces import SupportsPP -from .utils import (is_pp_missing_parameter, +from .utils import (extract_layer_index, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -244,7 +244,6 @@ class Qwen2MoeDecoderLayer(nn.Module): def __init__( self, config: PretrainedConfig, - layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, prefix: str = "", @@ -269,6 +268,7 @@ def __init__( # Note: Qwen/Qwen2-57B-A14B-Instruct does not have # `mlp_only_layers` in the config. + layer_idx = extract_layer_index(prefix) mlp_only_layers = ([] if not hasattr(config, "mlp_only_layers") else config.mlp_only_layers) if (layer_idx not in mlp_only_layers) and ( @@ -337,8 +337,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, lambda prefix: Qwen2MoeDecoderLayer(config=config, - layer_idx=int( - prefix.split(".")[-1]), cache_config=cache_config, quant_config=quant_config, prefix=prefix), diff --git a/vllm/model_executor/models/utils.py b/vllm/model_executor/models/utils.py index 2ab9b19e22068..dcfd2cb7d2622 100644 --- a/vllm/model_executor/models/utils.py +++ b/vllm/model_executor/models/utils.py @@ -629,3 +629,24 @@ def maybe_prefix(prefix: str, name: str) -> str: The string "prefix.name" if prefix was non-empty, otherwise just "name". """ return name if not prefix else f"{prefix}.{name}" + + +def extract_layer_index(layer_name: str) -> int: + """ + Extract the layer index from the module name. + Examples: + - "encoder.layers.0" -> 0 + - "encoder.layers.1.self_attn" -> 1 + - "2.self_attn" -> 2 + - "model.encoder.layers.0.sub.1" -> ValueError + """ + subnames = layer_name.split(".") + int_vals: List[int] = [] + for subname in subnames: + try: + int_vals.append(int(subname)) + except ValueError: + continue + assert len(int_vals) == 1, (f"layer name {layer_name} should" + " only contain one integer") + return int_vals[0] From e4fbb1441454847fdd871c9959b5cb05b5037aa2 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 24 Nov 2024 11:21:40 -0800 Subject: [PATCH 112/122] [doc] update the code to add models (#10603) Signed-off-by: youkaichao Co-authored-by: Cyrus Leung --- docs/source/models/adding_model.rst | 85 +++++++++++++++++++---------- 1 file changed, 57 insertions(+), 28 deletions(-) diff --git a/docs/source/models/adding_model.rst b/docs/source/models/adding_model.rst index a70ebf99c746f..df06d736ca86b 100644 --- a/docs/source/models/adding_model.rst +++ b/docs/source/models/adding_model.rst @@ -38,41 +38,70 @@ For instance, vLLM's `OPT model Union[Tuple, CausalLMOutputWithPast]: - + positions: torch.Tensor, - + kv_caches: List[torch.Tensor], - + attn_metadata: AttentionMetadata, - + ) -> Optional[SamplerOutput]: - -1. Update the code by considering that :code:`input_ids` and :code:`positions` are now flattened tensors. -2. Replace the attention operation with either :code:`PagedAttention`, :code:`PagedAttentionWithRoPE`, or :code:`PagedAttentionWithALiBi` depending on the model's architecture. +To ensure compatibility with vLLM, your model must meet the following requirements: + +Initialization Code +^^^^^^^^^^^^^^^^^^^ + +All vLLM modules within the model must include a ``prefix`` argument in their constructor. This ``prefix`` is typically the full name of the module in the model's state dictionary and is crucial for: + +* Runtime support: vLLM's attention operators are registered in a model's state by their full names. Each attention operator must have a unique prefix as its layer name to avoid conflicts. +* Non-uniform quantization support: A quantized checkpoint can selectively quantize certain layers while keeping others in full precision. By providing the ``prefix`` during initialization, vLLM can match the current layer's ``prefix`` with the quantization configuration to determine if the layer should be initialized in quantized mode. + +The initialization code should look like this: + +.. code-block:: python + + from torch import nn + from vllm.config import VllmConfig + from vllm.attention import Attention + + class MyAttention(nn.Module): + def __init__(self, vllm_config: VllmConfig, prefix: str): + super().__init__() + self.attn = Attention(prefix=f"{prefix}.attn") + + class MyDecoderLayer(nn.Module): + def __init__(self, vllm_config: VllmConfig, prefix: str): + super().__init__() + self.self_attn = MyAttention(prefix=f"{prefix}.self_attn") + + class MyModel(nn.Module): + def __init__(self, vllm_config: VllmConfig, prefix: str): + super().__init__() + self.layers = nn.ModuleList( + [MyDecoderLayer(vllm_config, prefix=f"{prefix}.layers.{i}") for i in range(vllm_config.model_config.hf_config.num_hidden_layers)] + ) + + class MyModelForCausalLM(nn.Module): + def __init__(self, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + self.model = MyModel(vllm_config, prefix=f"{prefix}.model") + +Computation Code +^^^^^^^^^^^^^^^^ + +Rewrite the :meth:`~torch.nn.Module.forward` method of your model to remove any unnecessary code, such as training-specific code. Modify the input parameters to treat ``input_ids`` and ``positions`` as flattened tensors with a single batch size dimension, without a max-sequence length dimension. + +.. code-block:: python + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + ... .. note:: Currently, vLLM supports the basic multi-head attention mechanism and its variant with rotary positional embeddings. If your model employs a different attention mechanism, you will need to implement a new attention layer in vLLM. +For reference, check out the `LLAMA model `__. vLLM already supports a large number of models. It is recommended to find a model similar to yours and adapt it to your model's architecture. Check out the `vLLM models `__ directory for more examples. 3. (Optional) Implement tensor parallelism and quantization support ------------------------------------------------------------------- From 49628fe13e1021ce036bbae257242ab71e40aa25 Mon Sep 17 00:00:00 2001 From: Zhuohan Li Date: Sun, 24 Nov 2024 16:45:09 -0800 Subject: [PATCH 113/122] [Doc] Update README.md with Ray Summit talk links (#10610) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0ef073210d070..4e1353d98f1dc 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ Easy, fast, and cheap LLM serving for everyone *Latest News* 🔥 - [2024/11] We hosted [the seventh vLLM meetup](https://lu.ma/h0qvrajz) with Snowflake! Please find the meetup slides [here](https://docs.google.com/presentation/d/1e3CxQBV3JsfGp30SwyvS3eM_tW-ghOhJ9PAJGK6KR54/edit?usp=sharing). - [2024/10] We have just created a developer slack ([slack.vllm.ai](https://slack.vllm.ai)) focusing on coordinating contributions and discussing features. Please feel free to join us there! -- [2024/10] Ray Summit 2024 held a special track for vLLM! Please find the opening talk slides from the vLLM team [here](https://docs.google.com/presentation/d/1B_KQxpHBTRa_mDF-tR6i8rWdOU5QoTZNcEg2MKZxEHM/edit?usp=sharing). Learn more from the [talks](https://raysummit.anyscale.com/flow/anyscale/raysummit2024/landing/page/sessioncatalog?tab.day=20241001&search.sessiontracks=1719251906298001uzJ2) from other vLLM contributors and users! +- [2024/10] Ray Summit 2024 held a special track for vLLM! Please find the opening talk slides from the vLLM team [here](https://docs.google.com/presentation/d/1B_KQxpHBTRa_mDF-tR6i8rWdOU5QoTZNcEg2MKZxEHM/edit?usp=sharing). Learn more from the [talks](https://www.youtube.com/playlist?list=PLzTswPQNepXl6AQwifuwUImLPFRVpksjR) from other vLLM contributors and users! - [2024/09] We hosted [the sixth vLLM meetup](https://lu.ma/87q3nvnh) with NVIDIA! Please find the meetup slides [here](https://docs.google.com/presentation/d/1wrLGwytQfaOTd5wCGSPNhoaW3nq0E-9wqyP7ny93xRs/edit?usp=sharing). - [2024/07] We hosted [the fifth vLLM meetup](https://lu.ma/lp0gyjqr) with AWS! Please find the meetup slides [here](https://docs.google.com/presentation/d/1RgUD8aCfcHocghoP3zmXzck9vX3RCI9yfUAB2Bbcl4Y/edit?usp=sharing). - [2024/07] In partnership with Meta, vLLM officially supports Llama 3.1 with FP8 quantization and pipeline parallelism! Please check out our blog post [here](https://blog.vllm.ai/2024/07/23/llama31.html). From 214efc2c3cb568e8eb3f7d234f3bd8f5bbe24795 Mon Sep 17 00:00:00 2001 From: Maximilien de Bayser Date: Sun, 24 Nov 2024 23:56:20 -0300 Subject: [PATCH 114/122] Support Cross encoder models (#10400) Signed-off-by: Max de Bayser Signed-off-by: Max de Bayser Signed-off-by: Flavia Beo Co-authored-by: Flavia Beo --- .../serving/openai_compatible_server.md | 142 ++++++++++++ examples/openai_cross_encoder_score.py | 58 +++++ tests/conftest.py | 20 ++ tests/entrypoints/openai/test_score.py | 93 ++++++++ .../models/embedding/language/test_scoring.py | 95 ++++++++ tests/models/registry.py | 9 + tests/models/test_registry.py | 23 +- vllm/config.py | 5 + vllm/core/scheduler.py | 1 + vllm/entrypoints/llm.py | 124 +++++++++- vllm/entrypoints/openai/api_server.py | 35 ++- vllm/entrypoints/openai/protocol.py | 36 +++ vllm/entrypoints/openai/serving_score.py | 215 ++++++++++++++++++ vllm/inputs/data.py | 18 ++ vllm/inputs/preprocess.py | 2 + vllm/model_executor/layers/pooler.py | 64 ++++++ vllm/model_executor/models/bert.py | 128 ++++++++++- vllm/model_executor/models/interfaces.py | 36 +++ vllm/model_executor/models/registry.py | 23 +- vllm/model_executor/models/roberta.py | 179 ++++++++++++--- vllm/multimodal/inputs.py | 5 +- vllm/outputs.py | 45 +++- vllm/sequence.py | 9 + vllm/transformers_utils/config.py | 15 ++ vllm/worker/cpu_embedding_model_runner.py | 4 + vllm/worker/cpu_model_runner.py | 13 ++ vllm/worker/embedding_model_runner.py | 7 +- vllm/worker/model_runner.py | 28 +++ 28 files changed, 1370 insertions(+), 62 deletions(-) create mode 100644 examples/openai_cross_encoder_score.py create mode 100644 tests/entrypoints/openai/test_score.py create mode 100644 tests/models/embedding/language/test_scoring.py create mode 100644 vllm/entrypoints/openai/serving_score.py diff --git a/docs/source/serving/openai_compatible_server.md b/docs/source/serving/openai_compatible_server.md index 79d032bf8b211..c39cef85897ed 100644 --- a/docs/source/serving/openai_compatible_server.md +++ b/docs/source/serving/openai_compatible_server.md @@ -44,6 +44,148 @@ We currently support the following OpenAI APIs: - This enables multi-modal inputs to be passed to embedding models, see [Using VLMs](../models/vlm.rst). - *Note: You should run `vllm serve` with `--task embedding` to ensure that the model is being run in embedding mode.* +## Score API for Cross Encoder Models + +vLLM supports *cross encoders models* at the **/v1/score** endpoint, which is not an OpenAI API standard endpoint. You can find the documentation for these kind of models at [sbert.net](https://www.sbert.net/docs/package_reference/cross_encoder/cross_encoder.html). + +A ***Cross Encoder*** takes exactly two sentences / texts as input and either predicts a score or label for this sentence pair. It can for example predict the similarity of the sentence pair on a scale of 0 … 1. + +### Example of usage for a pair of a string and a list of texts + +In this case, the model will compare the first given text to each of the texts containing the list. + +```bash +curl -X 'POST' \ + 'http://127.0.0.1:8000/v1/score' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "model": "BAAI/bge-reranker-v2-m3", + "text_1": "What is the capital of France?", + "text_2": [ + "The capital of Brazil is Brasilia.", + "The capital of France is Paris." + ] +}' +``` + +Response: + +```bash +{ + "id": "score-request-id", + "object": "list", + "created": 693570, + "model": "BAAI/bge-reranker-v2-m3", + "data": [ + { + "index": 0, + "object": "score", + "score": [ + 0.001094818115234375 + ] + }, + { + "index": 1, + "object": "score", + "score": [ + 1 + ] + } + ], + "usage": {} +} +``` + +### Example of usage for a pair of two lists of texts + +In this case, the model will compare the one by one, making pairs by same index correspondent in each list. + +```bash +curl -X 'POST' \ + 'http://127.0.0.1:8000/v1/score' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "model": "BAAI/bge-reranker-v2-m3", + "encoding_format": "float", + "text_1": [ + "What is the capital of Brazil?", + "What is the capital of France?" + ], + "text_2": [ + "The capital of Brazil is Brasilia.", + "The capital of France is Paris." + ] +}' +``` + +Response: + +```bash +{ + "id": "score-request-id", + "object": "list", + "created": 693447, + "model": "BAAI/bge-reranker-v2-m3", + "data": [ + { + "index": 0, + "object": "score", + "score": [ + 1 + ] + }, + { + "index": 1, + "object": "score", + "score": [ + 1 + ] + } + ], + "usage": {} +} +``` + +### Example of usage for a pair of two strings + +In this case, the model will compare the strings of texts. + +```bash +curl -X 'POST' \ + 'http://127.0.0.1:8000/v1/score' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "model": "BAAI/bge-reranker-v2-m3", + "encoding_format": "float", + "text_1": "What is the capital of France?", + "text_2": "The capital of France is Paris." +}' +``` + +Response: + +```bash +{ + "id": "score-request-id", + "object": "list", + "created": 693447, + "model": "BAAI/bge-reranker-v2-m3", + "data": [ + { + "index": 0, + "object": "score", + "score": [ + 1 + ] + } + ], + "usage": {} +} +``` + ## Extra Parameters vLLM supports a set of parameters that are not part of the OpenAI API. diff --git a/examples/openai_cross_encoder_score.py b/examples/openai_cross_encoder_score.py new file mode 100644 index 0000000000000..8c32eea5dd252 --- /dev/null +++ b/examples/openai_cross_encoder_score.py @@ -0,0 +1,58 @@ +"""Examples Python client Score for Cross Encoder Models +""" + +import argparse +import json +import pprint + +import requests + + +def post_http_request(prompt: json, api_url: str) -> requests.Response: + headers = {"User-Agent": "Test Client"} + response = requests.post(api_url, headers=headers, json=prompt) + return response + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--host", type=str, default="localhost") + parser.add_argument("--port", type=int, default=8000) + parser.add_argument("--model", type=str, default="BAAI/bge-reranker-v2-m3") + args = parser.parse_args() + api_url = f"http://{args.host}:{args.port}/v1/score" + + model_name = args.model + + text_1 = "What is the capital of France?" + text_2 = [ + "The capital of Brazil is Brasilia.", "The capital of France is Paris." + ] + prompt = {"model": model_name, "text_1": text_1, "text_2": text_2} + score_response = post_http_request(prompt=prompt, api_url=api_url) + print("Prompt for text_1 is string and text_2 is a list:") + pprint.pprint(prompt) + print("Score Response:") + pprint.pprint(score_response.data) + + text_1 = [ + "What is the capital of Brazil?", "What is the capital of France?" + ] + text_2 = [ + "The capital of Brazil is Brasilia.", "The capital of France is Paris." + ] + prompt = {"model": model_name, "text_1": text_1, "text_2": text_2} + score_response = post_http_request(prompt=prompt, api_url=api_url) + print("Prompt for text_1 and text_2 are lists:") + pprint.pprint(prompt) + print("Score Response:") + pprint.pprint(score_response.data) + + text_1 = "What is the capital of Brazil?" + text_2 = "The capital of Brazil is Brasilia." + prompt = {"model": model_name, "text_1": text_1, "text_2": text_2} + score_response = post_http_request(prompt=prompt, api_url=api_url) + print("Prompt for text_1 and text_2 are strings:") + pprint.pprint(prompt) + print("Score Response:") + pprint.pprint(score_response.data) \ No newline at end of file diff --git a/tests/conftest.py b/tests/conftest.py index 0dc1cc6e83c18..29707f975e2a0 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -265,6 +265,7 @@ def __init__( model_kwargs: Optional[Dict[str, Any]] = None, is_embedding_model: bool = False, is_sentence_transformer: bool = False, + is_cross_encoder: bool = False, skip_tokenizer_init: bool = False, auto_cls: Type[_BaseAutoModelClass] = AutoModelForCausalLM, postprocess_inputs: Callable[..., BatchEncoding] = identity, @@ -282,6 +283,14 @@ def __init__( device="cpu", trust_remote_code=True, ).to(dtype=torch_dtype)) + elif is_cross_encoder: + # Lazy init required for AMD CI + from sentence_transformers import CrossEncoder + self.model = CrossEncoder(model_name, + device="cpu", + trust_remote_code=True) + self.model.model = self.wrap_device(self.model.model)\ + .to(dtype=torch_dtype) else: model_kwargs = model_kwargs if model_kwargs is not None else {} self.model = self.wrap_device( @@ -625,6 +634,9 @@ def generate_encoder_decoder_greedy_logprobs_limit( def encode(self, prompts: List[str]) -> List[List[torch.Tensor]]: return self.model.encode(prompts) + def predict(self, prompts: List[List[str]]) -> torch.Tensor: + return self.model.predict(prompts, convert_to_tensor=True) + def __enter__(self): return self @@ -898,6 +910,14 @@ def encode( req_outputs = self.model.encode(inputs) return [req_output.outputs.embedding for req_output in req_outputs] + def score( + self, + text_1: Union[str, List[str]], + text_2: Union[str, List[str]], + ) -> List[List[float]]: + req_outputs = self.model.score(text_1, text_2) + return [req_output.outputs.embedding for req_output in req_outputs] + def __enter__(self): return self diff --git a/tests/entrypoints/openai/test_score.py b/tests/entrypoints/openai/test_score.py new file mode 100644 index 0000000000000..7565ff7192f67 --- /dev/null +++ b/tests/entrypoints/openai/test_score.py @@ -0,0 +1,93 @@ +import pytest +import requests + +from vllm.entrypoints.openai.protocol import ScoreResponse + +from ...utils import RemoteOpenAIServer + +MODEL_NAME = "BAAI/bge-reranker-v2-m3" + + +@pytest.fixture(scope="module") +def server(): + args = [ + "--enforce-eager", + ] + + with RemoteOpenAIServer(MODEL_NAME, args) as remote_server: + yield remote_server + + +@pytest.mark.asyncio +@pytest.mark.parametrize("model_name", [MODEL_NAME]) +async def test_text_1_str_text_2_list(server: RemoteOpenAIServer, + model_name: str): + text_1 = "What is the capital of France?" + text_2 = [ + "The capital of Brazil is Brasilia.", "The capital of France is Paris." + ] + + score_response = requests.post(server.url_for("v1/score"), + json={ + "model": model_name, + "text_1": text_1, + "text_2": text_2, + }) + score_response.raise_for_status() + score = ScoreResponse.model_validate(score_response.json()) + + assert score.id is not None + assert score.data is not None + assert len(score.data) == 2 + assert score.data[0].score[0] <= 0.01 + assert score.data[1].score[0] >= 0.9 + + +@pytest.mark.asyncio +@pytest.mark.parametrize("model_name", [MODEL_NAME]) +async def test_text_1_list_text_2_list(server: RemoteOpenAIServer, + model_name: str): + text_1 = [ + "What is the capital of the United States?", + "What is the capital of France?" + ] + text_2 = [ + "The capital of Brazil is Brasilia.", "The capital of France is Paris." + ] + + score_response = requests.post(server.url_for("v1/score"), + json={ + "model": model_name, + "text_1": text_1, + "text_2": text_2, + }) + score_response.raise_for_status() + score = ScoreResponse.model_validate(score_response.json()) + + assert score.id is not None + assert score.data is not None + assert len(score.data) == 2 + assert score.data[0].score[0] <= 0.01 + assert score.data[1].score[0] >= 0.9 + + +@pytest.mark.asyncio +@pytest.mark.parametrize("model_name", [MODEL_NAME]) +async def test_text_1_str_text_2_str(server: RemoteOpenAIServer, + model_name: str): + text_1 = "What is the capital of France?" + text_2 = "The capital of France is Paris." + + score_response = requests.post(server.url_for("v1/score"), + json={ + "model": model_name, + "text_1": text_1, + "text_2": text_2, + }) + score_response.raise_for_status() + score = ScoreResponse.model_validate(score_response.json()) + + assert score.id is not None + assert score.data is not None + assert len(score.data) == 1 + assert score.data[0].score[0] >= 0.9 diff --git a/tests/models/embedding/language/test_scoring.py b/tests/models/embedding/language/test_scoring.py new file mode 100644 index 0000000000000..30fa5ea7b36c0 --- /dev/null +++ b/tests/models/embedding/language/test_scoring.py @@ -0,0 +1,95 @@ +"""Compare the embedding outputs of HF and vLLM models. + +Run `pytest tests/models/embedding/language/test_embedding.py`. +""" +import math + +import pytest + +MODELS = [ + "cross-encoder/ms-marco-MiniLM-L-6-v2", # Bert + "BAAI/bge-reranker-v2-m3", # Roberta +] + +TEXTS_1 = [ + "What is the capital of France?", + "What is the capital of Germany?", +] + +TEXTS_2 = [ + "The capital of France is Paris.", + "The capital of Germany is Berlin.", +] + + +@pytest.fixture(scope="module", params=MODELS) +def model_name(request): + yield request.param + + +@pytest.mark.parametrize("dtype", ["half"]) +def test_llm_1_to_1(vllm_runner, hf_runner, model_name, dtype: str): + + text_pair = [TEXTS_1[0], TEXTS_2[0]] + + with hf_runner(model_name, dtype=dtype, is_cross_encoder=True) as hf_model: + hf_outputs = hf_model.predict([text_pair]).tolist() + + with vllm_runner(model_name, + task="embedding", + dtype=dtype, + max_model_len=None) as vllm_model: + vllm_outputs = vllm_model.score(text_pair[0], text_pair[1]) + + assert len(vllm_outputs) == 1 + assert len(hf_outputs) == 1 + + assert math.isclose(hf_outputs[0], vllm_outputs[0][0], rel_tol=0.01) + + +@pytest.mark.parametrize("dtype", ["half"]) +def test_llm_1_to_N(vllm_runner, hf_runner, model_name, dtype: str): + + text_pairs = [ + [TEXTS_1[0], TEXTS_2[0]], + [TEXTS_1[0], TEXTS_2[1]], + ] + + with hf_runner(model_name, dtype=dtype, is_cross_encoder=True) as hf_model: + hf_outputs = hf_model.predict(text_pairs).tolist() + + with vllm_runner(model_name, + task="embedding", + dtype=dtype, + max_model_len=None) as vllm_model: + vllm_outputs = vllm_model.score(TEXTS_1[0], TEXTS_2) + + assert len(vllm_outputs) == 2 + assert len(hf_outputs) == 2 + + assert math.isclose(hf_outputs[0], vllm_outputs[0][0], rel_tol=0.01) + assert math.isclose(hf_outputs[1], vllm_outputs[1][0], rel_tol=0.01) + + +@pytest.mark.parametrize("dtype", ["half"]) +def test_llm_N_to_N(vllm_runner, hf_runner, model_name, dtype: str): + + text_pairs = [ + [TEXTS_1[0], TEXTS_2[0]], + [TEXTS_1[1], TEXTS_2[1]], + ] + + with hf_runner(model_name, dtype=dtype, is_cross_encoder=True) as hf_model: + hf_outputs = hf_model.predict(text_pairs).tolist() + + with vllm_runner(model_name, + task="embedding", + dtype=dtype, + max_model_len=None) as vllm_model: + vllm_outputs = vllm_model.score(TEXTS_1, TEXTS_2) + + assert len(vllm_outputs) == 2 + assert len(hf_outputs) == 2 + + assert math.isclose(hf_outputs[0], vllm_outputs[0][0], rel_tol=0.01) + assert math.isclose(hf_outputs[1], vllm_outputs[1][0], rel_tol=0.01) diff --git a/tests/models/registry.py b/tests/models/registry.py index 3848367b6126c..fa0818c4f0bd1 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -135,6 +135,7 @@ class _HfExamplesInfo: "Qwen2ForRewardModel": _HfExamplesInfo("Qwen/Qwen2.5-Math-RM-72B"), "Qwen2ForSequenceClassification": _HfExamplesInfo("jason9693/Qwen2.5-1.5B-apeach"), # noqa: E501 "RobertaModel": _HfExamplesInfo("sentence-transformers/stsb-roberta-base-v2"), # noqa: E501 + "RobertaForMaskedLM": _HfExamplesInfo("sentence-transformers/all-roberta-large-v1"), # noqa: E501 "XLMRobertaModel": _HfExamplesInfo("intfloat/multilingual-e5-large"), # [Multimodal] "LlavaNextForConditionalGeneration": _HfExamplesInfo("royokong/e5-v"), @@ -143,6 +144,13 @@ class _HfExamplesInfo: "Qwen2VLForConditionalGeneration": _HfExamplesInfo("MrLight/dse-qwen2-2b-mrl-v1"), # noqa: E501 } +_CROSS_ENCODER_EXAMPLE_MODELS = { + # [Text-only] + "BertForSequenceClassification": _HfExamplesInfo("cross-encoder/ms-marco-MiniLM-L-6-v2"), # noqa: E501 + "RobertaForSequenceClassification": _HfExamplesInfo("cross-encoder/quora-roberta-base"), # noqa: E501 + "XLMRobertaForSequenceClassification": _HfExamplesInfo("BAAI/bge-reranker-v2-m3"), # noqa: E501 +} + _MULTIMODAL_EXAMPLE_MODELS = { # [Decoder-only] "Blip2ForConditionalGeneration": _HfExamplesInfo("Salesforce/blip2-opt-2.7b"), # noqa: E501 @@ -195,6 +203,7 @@ class _HfExamplesInfo: _EXAMPLE_MODELS = { **_TEXT_GENERATION_EXAMPLE_MODELS, **_EMBEDDING_EXAMPLE_MODELS, + **_CROSS_ENCODER_EXAMPLE_MODELS, **_MULTIMODAL_EXAMPLE_MODELS, **_SPECULATIVE_DECODING_EXAMPLE_MODELS, } diff --git a/tests/models/test_registry.py b/tests/models/test_registry.py index e462dae3dc688..289ea66b5ebc5 100644 --- a/tests/models/test_registry.py +++ b/tests/models/test_registry.py @@ -6,7 +6,10 @@ from vllm.model_executor.models import (is_embedding_model, is_text_generation_model, supports_multimodal) -from vllm.model_executor.models.registry import (_EMBEDDING_MODELS, +# yapf conflicts with isort for this block +# yapf: disable +from vllm.model_executor.models.registry import (_CROSS_ENCODER_MODELS, + _EMBEDDING_MODELS, _MULTIMODAL_MODELS, _SPECULATIVE_DECODING_MODELS, _TEXT_GENERATION_MODELS, @@ -29,22 +32,28 @@ def test_registry_imports(model_arch): model_arch in _TEXT_GENERATION_MODELS or model_arch in _MULTIMODAL_MODELS) + embedding_models = {**_EMBEDDING_MODELS, **_CROSS_ENCODER_MODELS} assert is_embedding_model(model_cls) is (model_arch - in _EMBEDDING_MODELS) + in embedding_models) assert supports_multimodal(model_cls) is (model_arch in _MULTIMODAL_MODELS) @fork_new_process_for_each_test -@pytest.mark.parametrize("model_arch,is_mm,init_cuda", [ - ("LlamaForCausalLM", False, False), - ("MllamaForConditionalGeneration", True, False), - ("LlavaForConditionalGeneration", True, True), +@pytest.mark.parametrize("model_arch,is_mm,init_cuda,is_ce", [ + ("LlamaForCausalLM", False, False, False), + ("MllamaForConditionalGeneration", True, False, False), + ("LlavaForConditionalGeneration", True, True, False), + ("BertForSequenceClassification", False, False, True), + ("RobertaForSequenceClassification", False, False, True), + ("XLMRobertaForSequenceClassification", False, False, True), ]) -def test_registry_is_multimodal(model_arch, is_mm, init_cuda): +def test_registry_model_property(model_arch, is_mm, init_cuda, is_ce): assert ModelRegistry.is_multimodal_model(model_arch) is is_mm + assert ModelRegistry.is_cross_encoder_model(model_arch) is is_ce + if init_cuda and current_platform.is_cuda_alike(): assert not torch.cuda.is_initialized() diff --git a/vllm/config.py b/vllm/config.py index f163665e2c063..4ea56a14cabba 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -712,6 +712,11 @@ def uses_mrope(self) -> bool: def is_multimodal_model(self) -> bool: return self.multimodal_config is not None + @property + def is_cross_encoder(self) -> bool: + architectures = getattr(self.hf_config, "architectures", []) + return ModelRegistry.is_cross_encoder_model(architectures) + class CacheConfig: """Configuration for the KV cache. diff --git a/vllm/core/scheduler.py b/vllm/core/scheduler.py index 841e65c488fc6..530cbdc3a9190 100644 --- a/vllm/core/scheduler.py +++ b/vllm/core/scheduler.py @@ -1357,6 +1357,7 @@ def schedule( encoder_seq_data=encoder_seq_data, cross_block_table=cross_block_table, state=seq_group.state, + token_type_ids=seq_group.token_type_ids, # `multi_modal_data` will only be present for the 1st comm # between engine and worker. # the subsequent comms can still use delta, but diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index c211ec5aee080..e07f4c04abd84 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -20,7 +20,7 @@ apply_mistral_chat_template, parse_chat_messages, resolve_chat_template_content_format) -from vllm.inputs import PromptType, TextPrompt, TokensPrompt +from vllm.inputs import PromptType, SingletonPrompt, TextPrompt, TokensPrompt from vllm.inputs.parse import parse_and_batch_prompt from vllm.logger import init_logger from vllm.lora.request import LoRARequest @@ -817,6 +817,128 @@ def encode( return self.engine_class.validate_outputs(outputs, EmbeddingRequestOutput) + def score( + self, + text_1: Union[SingletonPrompt, Sequence[SingletonPrompt]], + text_2: Union[SingletonPrompt, Sequence[SingletonPrompt]], + /, + truncate_prompt_tokens: Optional[int] = None, + use_tqdm: bool = True, + lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, + prompt_adapter_request: Optional[PromptAdapterRequest] = None, + ) -> List[EmbeddingRequestOutput]: + """Generates similarity scores for all pairs . + + The inputs can be 1 -> 1, 1 -> N or N -> N. In the 1 - N case + the text_1 sentence will be replicated N times to pair with the text_2 + sentences. The input pairs are used to build a list of prompts for the + cross encoder model. This class automatically batches the prompts, + considering the memory constraint. For the best performance, put all + of your texts into a single list and pass it to this method. + + Args: + text_1: can be a single prompt or a list of prompts, in which + case it has to have the same length as the text_2 list + text_2: The texts to pair with the query to form the input + to the LLM. See :class:`~vllm.inputs.PromptType` for + more details about the format of each prompts. + use_tqdm: Whether to use tqdm to display the progress bar. + lora_request: LoRA request to use for generation, if any. + prompt_adapter_request: Prompt Adapter request to use for + generation, if any. + + Returns: + A list of ``EmbeddingRequestOutput`` objects containing the + generated scores in the same order as the input prompts. + """ + task = self.llm_engine.model_config.task + if task != "embedding": + messages = ["LLM.score() is only supported for embedding models."] + + supported_tasks = self.llm_engine.model_config.supported_tasks + if "embedding" in supported_tasks: + messages.append( + "Your model supports the 'embedding' task, but is " + f"currently initialized for the '{task}' task. Please " + "initialize the model using `--task embedding`.") + + raise ValueError(" ".join(messages)) + + if not self.llm_engine.model_config.is_cross_encoder: + raise ValueError("Your model does not support the cross encoding") + + tokenizer = self.llm_engine.get_tokenizer() + + if isinstance(tokenizer, MistralTokenizer): + raise ValueError( + "MistralTokenizer not supported for cross-encoding") + + # the tokenizer for models such as + # "cross-encoder/ms-marco-MiniLM-L-6-v2" doesn't support passing + # lists of tokens to the `text` and `text_pair` kwargs + def ensure_str(prompt: SingletonPrompt): + if isinstance(prompt, dict): + if "multi_modal_data" in prompt: + raise ValueError("Multi-modal prompt is not " + "supported for cross encoding") + elif "prompt_token_ids" in prompt: + prompt = tokenizer.decode( + cast(TokensPrompt, prompt)["prompt_token_ids"]) + elif "prompt" in prompt: + prompt = cast(TextPrompt, prompt)["prompt"] + assert type(prompt) is str + return prompt + + if isinstance(text_1, (str, dict)): + # Convert a single prompt to a list. + text_1 = [text_1] + text_1 = [ensure_str(t) for t in text_1] + + if isinstance(text_2, (str, dict)): + # Convert a single prompt to a list. + text_2 = [text_2] + text_2 = [ensure_str(t) for t in text_2] + + if len(text_1) > 1 and len(text_1) != len(text_2): + raise ValueError("Input lengths must be either 1:1, 1:N or N:N") + if len(text_1) == 0: + raise ValueError("At least one text element must be given") + if len(text_2) == 0: + raise ValueError("At least one text_pair element must be given") + + if len(text_1) == 1: + text_1 = text_1 * len(text_2) + + input_pairs = [(t1, t2) for t1, t2 in zip(text_1, text_2)] + pooling_params = PoolingParams() + + tokenization_kwargs: Dict[str, Any] = {} + if truncate_prompt_tokens is not None: + tokenization_kwargs["truncation"] = True + tokenization_kwargs["max_length"] = truncate_prompt_tokens + + parsed_prompts = [] + + for q, t in input_pairs: + prompt_inputs = tokenizer(text=q, + text_pair=t, + **tokenization_kwargs) + engine_prompt = TokensPrompt( + prompt_token_ids=prompt_inputs["input_ids"], + token_type_ids=prompt_inputs.get("token_type_ids")) + parsed_prompts.append(engine_prompt) + + self._validate_and_add_requests( + prompts=parsed_prompts, + params=pooling_params, + lora_request=lora_request, + prompt_adapter_request=prompt_adapter_request, + ) + + outputs = self._run_engine(use_tqdm=use_tqdm) + return self.engine_class.validate_outputs(outputs, + EmbeddingRequestOutput) + def start_profile(self) -> None: self.llm_engine.start_profile() diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index b0fe061f5db4a..2b1f14b89b1f2 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -45,6 +45,7 @@ EmbeddingRequest, EmbeddingResponse, ErrorResponse, LoadLoraAdapterRequest, + ScoreRequest, ScoreResponse, TokenizeRequest, TokenizeResponse, UnloadLoraAdapterRequest) @@ -53,6 +54,7 @@ from vllm.entrypoints.openai.serving_completion import OpenAIServingCompletion from vllm.entrypoints.openai.serving_embedding import OpenAIServingEmbedding from vllm.entrypoints.openai.serving_engine import BaseModelPath, OpenAIServing +from vllm.entrypoints.openai.serving_score import OpenAIServingScores from vllm.entrypoints.openai.serving_tokenization import ( OpenAIServingTokenization) from vllm.entrypoints.openai.tool_parsers import ToolParserManager @@ -280,6 +282,10 @@ def embedding(request: Request) -> Optional[OpenAIServingEmbedding]: return request.app.state.openai_serving_embedding +def score(request: Request) -> Optional[OpenAIServingScores]: + return request.app.state.openai_serving_scores + + def tokenization(request: Request) -> OpenAIServingTokenization: return request.app.state.openai_serving_tokenization @@ -391,6 +397,23 @@ async def create_embedding(request: EmbeddingRequest, raw_request: Request): assert_never(generator) +@router.post("/v1/score") +async def create_score(request: ScoreRequest, raw_request: Request): + handler = score(raw_request) + if handler is None: + return base(raw_request).create_error_response( + message="The model does not support Score API") + + generator = await handler.create_score(request, raw_request) + if isinstance(generator, ErrorResponse): + return JSONResponse(content=generator.model_dump(), + status_code=generator.code) + elif isinstance(generator, ScoreResponse): + return JSONResponse(content=generator.model_dump()) + + assert_never(generator) + + if envs.VLLM_TORCH_PROFILER_DIR: logger.warning( "Torch Profiler is enabled in the API server. This should ONLY be " @@ -466,8 +489,9 @@ def build_app(args: Namespace) -> FastAPI: @app.exception_handler(RequestValidationError) async def validation_exception_handler(_, exc): - chat = app.state.openai_serving_chat - err = chat.create_error_response(message=str(exc)) + err = ErrorResponse(message=str(exc), + type="BadRequestError", + code=HTTPStatus.BAD_REQUEST) return JSONResponse(err.model_dump(), status_code=HTTPStatus.BAD_REQUEST) @@ -565,6 +589,13 @@ def init_app_state( chat_template=resolved_chat_template, chat_template_content_format=args.chat_template_content_format, ) if model_config.task == "embedding" else None + state.openai_serving_scores = OpenAIServingScores( + engine_client, + model_config, + base_model_paths, + request_logger=request_logger + ) if (model_config.task == "embedding" \ + and model_config.is_cross_encoder) else None state.openai_serving_tokenization = OpenAIServingTokenization( engine_client, model_config, diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index f343732174014..ee94a9413f098 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -806,6 +806,27 @@ def to_pooling_params(self): EmbeddingRequest = Union[EmbeddingCompletionRequest, EmbeddingChatRequest] +class ScoreRequest(OpenAIBaseModel): + model: str + text_1: Union[List[str], str] + text_2: Union[List[str], str] + truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None + + # doc: begin-chat-embedding-pooling-params + additional_data: Optional[Any] = None + # doc: end-chat-embedding-pooling-params + + priority: int = Field( + default=0, + description=( + "The priority of the request (lower means earlier handling; " + "default: 0). Any priority other than 0 will raise an error " + "if the served model does not use priority scheduling.")) + + def to_pooling_params(self): + return PoolingParams(additional_data=self.additional_data) + + class CompletionLogProbs(OpenAIBaseModel): text_offset: List[int] = Field(default_factory=list) token_logprobs: List[Optional[float]] = Field(default_factory=list) @@ -876,6 +897,21 @@ class EmbeddingResponse(OpenAIBaseModel): usage: UsageInfo +class ScoreResponseData(OpenAIBaseModel): + index: int + object: str = "score" + score: Union[List[float], str] + + +class ScoreResponse(OpenAIBaseModel): + id: str = Field(default_factory=lambda: f"embd-{random_uuid()}") + object: str = "list" + created: int = Field(default_factory=lambda: int(time.time())) + model: str + data: List[ScoreResponseData] + usage: UsageInfo + + class FunctionCall(OpenAIBaseModel): name: str arguments: str diff --git a/vllm/entrypoints/openai/serving_score.py b/vllm/entrypoints/openai/serving_score.py new file mode 100644 index 0000000000000..156fea6f47982 --- /dev/null +++ b/vllm/entrypoints/openai/serving_score.py @@ -0,0 +1,215 @@ +import asyncio +import time +from typing import Any, AsyncGenerator, Dict, List, Optional, Union, cast + +from fastapi import Request + +from vllm.config import ModelConfig +from vllm.engine.protocol import EngineClient +from vllm.entrypoints.logger import RequestLogger +from vllm.entrypoints.openai.protocol import (ErrorResponse, ScoreRequest, + ScoreResponse, ScoreResponseData, + UsageInfo) +from vllm.entrypoints.openai.serving_engine import BaseModelPath, OpenAIServing +from vllm.inputs.data import TokensPrompt +from vllm.logger import init_logger +from vllm.outputs import EmbeddingRequestOutput +from vllm.transformers_utils.tokenizers.mistral import MistralTokenizer +from vllm.utils import merge_async_iterators, random_uuid + +logger = init_logger(__name__) + + +def request_output_to_score_response( + final_res_batch: List[EmbeddingRequestOutput], request_id: str, + created_time: int, model_name: str) -> ScoreResponse: + data: List[ScoreResponseData] = [] + score = None + num_prompt_tokens = 0 + for idx, final_res in enumerate(final_res_batch): + if final_res is not None: + score = final_res.outputs.embedding + score_data = ScoreResponseData(index=idx, score=score) + data.append(score_data) + + usage = UsageInfo( + prompt_tokens=num_prompt_tokens, + total_tokens=num_prompt_tokens, + ) + + return ScoreResponse( + id=request_id, + created=created_time, + model=model_name, + data=data, + usage=usage, + ) + + +def make_pairs(text_1: Union[List[str], str], text_2: Union[List[str], + str]) -> List: + if isinstance(text_1, (str, dict)): + # Convert a single prompt to a list. + text_1 = [text_1] + text_1 = [t for t in text_1] + + if isinstance(text_2, (str, dict)): + # Convert a single prompt to a list. + text_2 = [text_2] + text_2 = [t for t in text_2] + if len(text_1) > 1 and len(text_1) != len(text_2): + raise ValueError("Input lengths must be either 1:1, 1:N or N:N") + if len(text_1) == 0: + raise ValueError("At least one text element must be given") + if len(text_2) == 0: + raise ValueError("At least one text_pair element must be given") + + if len(text_1) == 1: + text_1 = text_1 * len(text_2) + + return [(t1, t2) for t1, t2 in zip(text_1, text_2)] + + +class OpenAIServingScores(OpenAIServing): + + def __init__( + self, + engine_client: EngineClient, + model_config: ModelConfig, + base_model_paths: List[BaseModelPath], + *, + request_logger: Optional[RequestLogger], + ) -> None: + super().__init__(engine_client=engine_client, + model_config=model_config, + base_model_paths=base_model_paths, + lora_modules=None, + prompt_adapters=None, + request_logger=request_logger) + + async def create_score( + self, + request: ScoreRequest, + raw_request: Optional[Request] = None, + ) -> Union[ScoreResponse, ErrorResponse]: + """ + Score API similar to Sentence Transformers cross encoder + + See https://sbert.net/docs/package_reference/cross_encoder + """ + error_check_ret = await self._check_model(request) + if error_check_ret is not None: + return error_check_ret + + model_name = request.model + request_id = f"score-{random_uuid()}" + created_time = int(time.monotonic()) + truncate_prompt_tokens = request.truncate_prompt_tokens + + request_prompts = [] + engine_prompts = [] + + try: + ( + lora_request, + prompt_adapter_request, + ) = self._maybe_get_adapters(request) + + tokenizer = await self.engine_client.get_tokenizer(lora_request) + + if prompt_adapter_request is not None: + raise NotImplementedError("Prompt adapter is not supported " + "for embedding models") + + if isinstance(tokenizer, MistralTokenizer): + raise ValueError( + "MistralTokenizer not supported for cross-encoding") + + if not self.model_config.is_cross_encoder: + raise ValueError("Model is not cross encoder.") + + except ValueError as e: + logger.exception("Error in preprocessing prompt inputs") + return self.create_error_response(str(e)) + + # Schedule the request and get the result generator. + generators: List[AsyncGenerator[EmbeddingRequestOutput, None]] = [] + + input_pairs = make_pairs(request.text_1, request.text_2) + + for q, t in input_pairs: + request_prompt = f"{q}{tokenizer.sep_token}{t}" + + tokenization_kwargs: Dict[str, Any] = {} + if truncate_prompt_tokens is not None: + tokenization_kwargs["truncation"] = True + tokenization_kwargs["max_length"] = truncate_prompt_tokens + + prompt_inputs = tokenizer(text=q, + text_pair=t, + **tokenization_kwargs) + engine_prompt = TokensPrompt( + prompt_token_ids=prompt_inputs["input_ids"], + token_type_ids=prompt_inputs.get("token_type_ids")) + + request_prompts.append(request_prompt) + engine_prompts.append(engine_prompt) + + try: + pooling_params = request.to_pooling_params() + + for i, engine_prompt in enumerate(engine_prompts): + request_id_item = f"{request_id}-{i}" + + self._log_inputs(request_id_item, + request_prompts[i], + params=pooling_params, + lora_request=lora_request, + prompt_adapter_request=prompt_adapter_request) + + trace_headers = (None if raw_request is None else await + self._get_trace_headers(raw_request.headers)) + + generator = self.engine_client.encode( + engine_prompt, + pooling_params, + request_id_item, + lora_request=lora_request, + trace_headers=trace_headers, + priority=request.priority, + ) + + generators.append(generator) + except ValueError as e: + # TODO: Use a vllm-specific Validation Error + return self.create_error_response(str(e)) + + result_generator = merge_async_iterators( + *generators, + is_cancelled=raw_request.is_disconnected if raw_request else None, + ) + + num_prompts = len(engine_prompts) + + # Non-streaming response + final_res_batch: List[Optional[EmbeddingRequestOutput]] + final_res_batch = [None] * num_prompts + + try: + async for i, res in result_generator: + final_res_batch[i] = res + + assert all(final_res is not None for final_res in final_res_batch) + + final_res_batch_checked = cast(List[EmbeddingRequestOutput], + final_res_batch) + + response = request_output_to_score_response( + final_res_batch_checked, request_id, created_time, model_name) + except asyncio.CancelledError: + return self.create_error_response("Client disconnected") + except ValueError as e: + # TODO: Use a vllm-specific Validation Error + return self.create_error_response(str(e)) + + return response diff --git a/vllm/inputs/data.py b/vllm/inputs/data.py index 07ff9faa50f13..fb7dbbebd7b90 100644 --- a/vllm/inputs/data.py +++ b/vllm/inputs/data.py @@ -38,6 +38,9 @@ class TokensPrompt(TypedDict): prompt_token_ids: List[int] """A list of token IDs to pass to the model.""" + token_type_ids: NotRequired[List[int]] + """A list of token type IDs to pass to the cross encoder model.""" + multi_modal_data: NotRequired["MultiModalDataDict"] """ DEPRECATED: Optional multi-modal data to pass to the model, @@ -133,6 +136,9 @@ class TokenInputs(TypedDict): prompt_token_ids: List[int] """The token IDs of the prompt.""" + token_type_ids: NotRequired[List[int]] + """The token type IDs of the prompt.""" + prompt: NotRequired[str] """ The original prompt text corresponding to the token IDs, if available. @@ -160,6 +166,7 @@ class TokenInputs(TypedDict): def token_inputs( prompt_token_ids: List[int], + token_type_ids: Optional[List[int]] = None, prompt: Optional[str] = None, multi_modal_data: Optional["MultiModalDataDict"] = None, multi_modal_placeholders: Optional["MultiModalPlaceholderDict"] = None, @@ -170,6 +177,8 @@ def token_inputs( if prompt is not None: inputs["prompt"] = prompt + if token_type_ids is not None: + inputs["token_type_ids"] = token_type_ids if multi_modal_data is not None: inputs["multi_modal_data"] = multi_modal_data if multi_modal_placeholders is not None: @@ -234,6 +243,15 @@ def prompt_token_ids(self) -> List[int]: assert_never(inputs) + @cached_property + def token_type_ids(self) -> List[int]: + inputs = self.inputs + + if inputs["type"] == "token" or inputs["type"] == "multimodal": + return inputs.get("token_type_ids", []) + + assert_never(inputs) + @cached_property def prompt_embeds(self) -> Optional[torch.Tensor]: inputs = self.inputs diff --git a/vllm/inputs/preprocess.py b/vllm/inputs/preprocess.py index 853257c5ad71f..3d606817e90aa 100644 --- a/vllm/inputs/preprocess.py +++ b/vllm/inputs/preprocess.py @@ -305,6 +305,7 @@ def _prompt_to_llm_inputs( tokens_content = parsed["content"] prompt_token_ids = tokens_content["prompt_token_ids"] + token_type_ids = tokens_content.get("token_type_ids") multi_modal_data = tokens_content.get("multi_modal_data") mm_processor_kwargs = tokens_content.get("mm_processor_kwargs") @@ -318,6 +319,7 @@ def _prompt_to_llm_inputs( return token_inputs( prompt_token_ids=prompt_token_ids, + token_type_ids=token_type_ids, multi_modal_data=multi_modal_data, mm_processor_kwargs=mm_processor_kwargs, ) diff --git a/vllm/model_executor/layers/pooler.py b/vllm/model_executor/layers/pooler.py index df1978241340b..f9437b4112ceb 100644 --- a/vllm/model_executor/layers/pooler.py +++ b/vllm/model_executor/layers/pooler.py @@ -3,11 +3,14 @@ import torch import torch.nn as nn +from transformers import PretrainedConfig from vllm.config import PoolerConfig from vllm.model_executor.pooling_metadata import (PoolingMetadata, PoolingTensors) from vllm.sequence import EmbeddingSequenceGroupOutput, PoolerOutput +from vllm.transformers_utils.config import ( + get_cross_encoder_activation_function) class PoolingType(IntEnum): @@ -152,3 +155,64 @@ def forward( ] return PoolerOutput(outputs=pooled_outputs) + + +class CrossEncodingPooler(nn.Module): + """A layer that pools specific information from hidden states. + + This layer does the following: + 1. Extracts specific tokens or aggregates data based on pooling method. + 2. Normalizes output if specified. + 3. Returns structured results as `PoolerOutput`. + + Attributes: + pooling_type: The type of pooling to use. + normalize: Whether to normalize the pooled data. + """ + + def __init__( + self, + config: PretrainedConfig, + classifier: nn.Module, + pooler: Optional[nn.Module] = None, + ): + super().__init__() + self.classifier = classifier + self.pooler = pooler + self.default_activation_function = \ + get_cross_encoder_activation_function(config) + + def forward( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> PoolerOutput: + """Pools sentence pair scores from the hidden_states.""" + + prompt_lens = PoolingTensors.from_pooling_metadata( + pooling_metadata, hidden_states.device).prompt_lens + + offset = 0 + pooled_data_lst = [] + for prompt_len in prompt_lens: + pooled_data_i = hidden_states[offset:offset + prompt_len] + + if self.pooler is not None: + final_shape_tensor = self.pooler(pooled_data_i) + else: + final_shape_tensor = self.classifier(pooled_data_i) + + pooled_data_lst.append(final_shape_tensor) + offset += prompt_len + + pooled_output = torch.stack(pooled_data_lst) + + if self.pooler is not None: + # apply classifier once on the full batch if possible + pooled_output = self.classifier(pooled_output) + logits = self.default_activation_function(pooled_output) + + pooled_outputs = [ + EmbeddingSequenceGroupOutput(data.tolist()) for data in logits + ] + return PoolerOutput(outputs=pooled_outputs) diff --git a/vllm/model_executor/models/bert.py b/vllm/model_executor/models/bert.py index d8301a36acb01..1fc87bc650d92 100644 --- a/vllm/model_executor/models/bert.py +++ b/vllm/model_executor/models/bert.py @@ -11,14 +11,18 @@ from vllm.model_executor.layers.linear import (ColumnParallelLinear, QKVParallelLinear, RowParallelLinear) -from vllm.model_executor.layers.pooler import Pooler, PoolingType +from vllm.model_executor.layers.pooler import (CrossEncodingPooler, Pooler, + PoolingType) from vllm.model_executor.layers.quantization.base_config import ( QuantizationConfig) from vllm.model_executor.layers.vocab_parallel_embedding import ( VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.models.interfaces import SupportsCrossEncoding from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.sequence import IntermediateTensors, PoolerOutput +from vllm.transformers_utils.config import ( + get_cross_encoder_activation_function) from .utils import maybe_prefix @@ -48,7 +52,9 @@ def __init__(self, config: BertConfig): def forward( self, input_ids: torch.Tensor, - position_ids: Optional[torch.Tensor] = None, + seq_lens: torch.Tensor, + position_ids: torch.Tensor, + token_type_ids: Optional[torch.Tensor] = None, ) -> torch.Tensor: input_shape = input_ids.size() @@ -58,17 +64,34 @@ def forward( # Position embeddings. position_embeddings = self.position_embeddings(position_ids) - # Token type embeddings. (TODO: move off hotpath?) - token_type_embeddings = self.token_type_embeddings( - torch.zeros(input_shape, - dtype=torch.long, - device=inputs_embeds.device)) + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, + dtype=torch.long, + device=inputs_embeds.device) + + token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings + position_embeddings embeddings = self.LayerNorm(embeddings) return embeddings +class BertPooler(nn.Module): + + def __init__(self, config: BertConfig): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[0, :] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + class BertEncoder(nn.Module): def __init__(self, @@ -309,7 +332,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "", - embedding_class: type = BertEmbedding): + embedding_class: type = BertEmbedding, + add_pooling_layer: bool = False): super().__init__() config = vllm_config.model_config.hf_config cache_config = vllm_config.cache_config @@ -319,6 +343,7 @@ def __init__(self, cache_config, quant_config, prefix=f"{prefix}.encoder") + self.pooler = BertPooler(config) if add_pooling_layer else None def forward( self, @@ -328,13 +353,17 @@ def forward( attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, inputs_embeds: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, ) -> torch.Tensor: if inputs_embeds is not None: hidden_states = inputs_embeds else: - hidden_states = self.embeddings(input_ids=input_ids, - position_ids=position_ids) - + assert hasattr(attn_metadata, "seq_lens_tensor") + hidden_states = self.embeddings( + input_ids=input_ids, + seq_lens=attn_metadata.seq_lens_tensor, + position_ids=position_ids, + token_type_ids=token_type_ids) return self.encoder(hidden_states, kv_caches, attn_metadata) def load_weights(self, weights: Iterable[Tuple[str, @@ -349,7 +378,7 @@ def load_weights(self, weights: Iterable[Tuple[str, params_dict = dict(self.named_parameters()) loaded_params: Set[str] = set() for name, loaded_weight in weights: - if "pooler" in name: + if self.pooler is None and "pooler" in name: continue for (param_name, weight_name, shard_id) in stacked_params_mapping: if weight_name not in name: @@ -430,3 +459,78 @@ def _build_pooler(self, pooler_config: PoolerConfig) -> Pooler: pooling_type=PoolingType.CLS, normalize=True, softmax=False) + + +class BertForSequenceClassification(nn.Module, SupportsCrossEncoding): + """A model that uses Bert to provide embedding functionalities. + + This class encapsulates the BertModel and provides an interface for + embedding operations and customized pooling functions. + + Attributes: + model: An instance of BertModel used for forward operations. + _pooler: An instance of Pooler used for pooling operations. + """ + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + config = vllm_config.model_config.hf_config + + self.default_activation_function = \ + get_cross_encoder_activation_function(config) + + self.num_labels = config.num_labels + self.bert = BertModel(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "bert"), + embedding_class=BertEmbedding, + add_pooling_layer=True) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + self._pooler = CrossEncodingPooler(config, self.classifier, + self.bert.pooler) + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + + self_weights = [] + + def weight_filter(): + for name, weight in weights: + if name.startswith("bert."): + yield (name[len("bert."):], weight) + else: + self_weights.append((name, weight)) + + self.bert.load_weights(weight_filter()) + + params_dict = dict(self.named_parameters()) + + for name, loaded_weight in self_weights: + if name.startswith("classifier"): + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + + def pooler( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> Optional[PoolerOutput]: + return self._pooler(hidden_states, pooling_metadata) + + def forward( + self, + input_ids: Optional[torch.Tensor], + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + return self.bert(input_ids=input_ids, + position_ids=positions, + kv_caches=kv_caches, + inputs_embeds=inputs_embeds, + intermediate_tensors=intermediate_tensors, + attn_metadata=attn_metadata, + token_type_ids=token_type_ids) diff --git a/vllm/model_executor/models/interfaces.py b/vllm/model_executor/models/interfaces.py index dcead65115132..4f0c75b2c6a57 100644 --- a/vllm/model_executor/models/interfaces.py +++ b/vllm/model_executor/models/interfaces.py @@ -7,6 +7,8 @@ from vllm.logger import init_logger from vllm.utils import supports_kw +from .interfaces_base import is_embedding_model + if TYPE_CHECKING: from vllm.config import LoRAConfig, MultiModalConfig, SchedulerConfig from vllm.sequence import IntermediateTensors @@ -350,3 +352,37 @@ def is_attention_free( return isinstance(model, _IsAttentionFreeType) return isinstance(model, IsAttentionFree) + + +@runtime_checkable +class SupportsCrossEncoding(Protocol): + """The interface required for all models that support cross encoding.""" + + supports_cross_encoding: ClassVar[Literal[True]] = True + + +@overload +def supports_cross_encoding( + model: Type[object]) -> TypeIs[Type[SupportsCrossEncoding]]: + ... + + +@overload +def supports_cross_encoding(model: object) -> TypeIs[SupportsCrossEncoding]: + ... + + +def _supports_cross_encoding( + model: Union[Type[object], object], +) -> Union[TypeIs[Type[SupportsCrossEncoding]], TypeIs[SupportsCrossEncoding]]: + + if isinstance(model, type): + return isinstance(model, SupportsCrossEncoding) + + return isinstance(model, SupportsCrossEncoding) + + +def supports_cross_encoding( + model: Union[Type[object], object], +) -> Union[TypeIs[Type[SupportsCrossEncoding]], TypeIs[SupportsCrossEncoding]]: + return is_embedding_model(model) and _supports_cross_encoding(model) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 22c2e328bfb65..789ffb4d3bde0 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -21,7 +21,8 @@ from vllm.platforms import current_platform from .interfaces import (has_inner_state, is_attention_free, - supports_multimodal, supports_pp) + supports_cross_encoding, supports_multimodal, + supports_pp) from .interfaces_base import is_embedding_model, is_text_generation_model logger = init_logger(__name__) @@ -100,6 +101,7 @@ # [Text-only] "BertModel": ("bert", "BertEmbeddingModel"), "RobertaModel": ("roberta", "RobertaEmbeddingModel"), + "RobertaForMaskedLM": ("roberta", "RobertaEmbeddingModel"), "XLMRobertaModel": ("roberta", "RobertaEmbeddingModel"), "DeciLMForCausalLM": ("decilm", "DeciLMForCausalLM"), "Gemma2Model": ("gemma2", "Gemma2EmbeddingModel"), @@ -121,6 +123,14 @@ "Qwen2VLForConditionalGeneration": ("qwen2_vl", "Qwen2VLForConditionalGeneration") # noqa: E501, } +_CROSS_ENCODER_MODELS = { + "BertForSequenceClassification": ("bert", "BertForSequenceClassification"), + "RobertaForSequenceClassification": ("roberta", + "RobertaForSequenceClassification"), + "XLMRobertaForSequenceClassification": ("roberta", + "RobertaForSequenceClassification"), +} + _MULTIMODAL_MODELS = { # [Decoder-only] "Blip2ForConditionalGeneration": ("blip2", "Blip2ForConditionalGeneration"), @@ -159,6 +169,7 @@ _VLLM_MODELS = { **_TEXT_GENERATION_MODELS, **_EMBEDDING_MODELS, + **_CROSS_ENCODER_MODELS, **_MULTIMODAL_MODELS, **_SPECULATIVE_DECODING_MODELS, } @@ -193,6 +204,7 @@ class _ModelInfo: is_text_generation_model: bool is_embedding_model: bool + supports_cross_encoding: bool supports_multimodal: bool supports_pp: bool has_inner_state: bool @@ -203,6 +215,7 @@ def from_model_cls(model: Type[nn.Module]) -> "_ModelInfo": return _ModelInfo( is_text_generation_model=is_text_generation_model(model), is_embedding_model=is_embedding_model(model), + supports_cross_encoding=supports_cross_encoding(model), supports_multimodal=supports_multimodal(model), supports_pp=supports_pp(model), has_inner_state=has_inner_state(model), @@ -415,6 +428,12 @@ def is_embedding_model( ) -> bool: return self.inspect_model_cls(architectures).is_embedding_model + def is_cross_encoder_model( + self, + architectures: Union[str, List[str]], + ) -> bool: + return self.inspect_model_cls(architectures).supports_cross_encoding + def is_multimodal_model( self, architectures: Union[str, List[str]], @@ -489,4 +508,4 @@ def _run() -> None: if __name__ == "__main__": - _run() \ No newline at end of file + _run() diff --git a/vllm/model_executor/models/roberta.py b/vllm/model_executor/models/roberta.py index c1dcdd36ec3de..5a296e311f079 100644 --- a/vllm/model_executor/models/roberta.py +++ b/vllm/model_executor/models/roberta.py @@ -1,4 +1,4 @@ -from typing import List, Optional +from typing import Iterable, List, Optional, Tuple import torch from torch import nn @@ -6,10 +6,17 @@ from vllm.attention import AttentionMetadata from vllm.config import VllmConfig +from vllm.model_executor.layers.pooler import CrossEncodingPooler from vllm.model_executor.layers.vocab_parallel_embedding import ( VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.models.bert import BertEmbeddingModel, BertModel -from vllm.sequence import IntermediateTensors +from vllm.model_executor.models.interfaces import SupportsCrossEncoding +from vllm.model_executor.models.utils import maybe_prefix +from vllm.model_executor.pooling_metadata import PoolingMetadata +from vllm.sequence import IntermediateTensors, PoolerOutput +from vllm.transformers_utils.config import ( + get_cross_encoder_activation_function) class RobertaEmbedding(nn.Module): @@ -39,34 +46,93 @@ def __init__(self, config: RobertaConfig): def forward( self, input_ids: torch.Tensor, - position_ids: Optional[torch.Tensor] = None, + seq_lens: torch.Tensor, + position_ids: torch.Tensor, + token_type_ids: Optional[torch.Tensor] = None, ) -> torch.Tensor: input_shape = input_ids.size() - - # Input embeddings. inputs_embeds = self.word_embeddings(input_ids) - # TODO: figure out if there is a better way - # to make to make position ids start at padding_idx + 1 + # Replace position ids because in RoBERTa models + # they have to start at padding_idx + 1 and ignore + # existing padding tokens # References: # - https://github.com/huggingface/transformers/blob/a3d69a8994d673899608a7c17fbf4f953f50474e/src/transformers/models/roberta/modeling_roberta.py#L133 # - https://github.com/huggingface/transformers/blob/a3d69a8994d673899608a7c17fbf4f953f50474e/src/transformers/models/roberta/modeling_roberta.py#L1669 - position_ids += self.padding_idx + 1 + pos_list = [] + token_list = [] + offset = 0 + for seq_len in seq_lens: + pos_list.append(position_ids[offset:offset + seq_len]) + token_list.append(input_ids[offset:offset + seq_len]) + offset += seq_len + + new_pos_list = [] + for positions, tokens in zip(pos_list, token_list): + # Verify assumption that incoming position are + # always a sequence from 0 to N. + expected_pos = torch.arange(positions.size()[0], + dtype=torch.long, + device=inputs_embeds.device) + assert torch.equal(positions, expected_pos) + new_pos_list.append( + create_position_ids_from_input_ids(tokens, self.padding_idx)) + position_ids = torch.cat(new_pos_list) # Position embeddings. position_embeddings = self.position_embeddings(position_ids) + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, + dtype=torch.long, + device=inputs_embeds.device) - # Token type embeddings. (TODO: move off hotpath?) - token_type_embeddings = self.token_type_embeddings( - torch.zeros(input_shape, - dtype=torch.long, - device=inputs_embeds.device)) - + token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings + position_embeddings embeddings = self.LayerNorm(embeddings) return embeddings +# Adapted from transformers +def create_position_ids_from_input_ids(input_ids, + padding_idx, + past_key_values_length=0): + """ + Replace non-padding symbols with their position numbers. + Position numbers begin at padding_idx+1. Padding symbols + are ignored. This is modified from fairseq's `utils.make_positions`. + + Args: + x: torch.Tensor x: + + Returns: torch.Tensor + """ + # The series of casts and type-conversions here are carefully + # balanced to both work with ONNX export and XLA. + mask = input_ids.ne(padding_idx).int() + + incremental_indices = (torch.cumsum(mask, dim=0).type_as(mask) + + past_key_values_length) * mask + + return incremental_indices.long() + padding_idx + + +# Adapted from transformers +class RobertaClassificationHead(nn.Module): + """Head for sentence-level classification tasks.""" + + def __init__(self, config: RobertaConfig): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.out_proj = nn.Linear(config.hidden_size, config.num_labels) + + def forward(self, features, **kwargs): + x = features[0, :] # take token (equiv. to [CLS]) + x = self.dense(x) + x = torch.tanh(x) + x = self.out_proj(x) + return x + + class RobertaEmbeddingModel(BertEmbeddingModel): """A model that uses Roberta to provide embedding functionalities. @@ -85,6 +151,62 @@ def _build_model(self, prefix=prefix, embedding_class=RobertaEmbedding) + +class RobertaForSequenceClassification(nn.Module, SupportsCrossEncoding): + """A model that uses Roberta to provide embedding functionalities. + + This class encapsulates the BertModel and provides an interface for + embedding operations and customized pooling functions. + + Attributes: + roberta: An instance of BertModel used for forward operations. + _pooler: An instance of Pooler used for pooling operations. + """ + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + config = vllm_config.model_config.hf_config + + self.default_activation_function = \ + get_cross_encoder_activation_function(config) + + self.num_labels = config.num_labels + self.roberta = BertModel(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "bert"), + embedding_class=RobertaEmbedding, + add_pooling_layer=False) + self.classifier = RobertaClassificationHead(config) + self._pooler = CrossEncodingPooler(config, self.classifier) + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + + self_weights = [] + + def weight_filter(): + for name, weight in weights: + if name.startswith("roberta."): + yield (name[len("roberta."):], weight) + else: + self_weights.append((name, weight)) + + self.roberta.load_weights(weight_filter()) + + params_dict = dict(self.named_parameters()) + + for name, loaded_weight in self_weights: + if name.startswith("classifier"): + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + + def pooler( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> Optional[PoolerOutput]: + return self._pooler(hidden_states, pooling_metadata) + def forward( self, input_ids: Optional[torch.Tensor], @@ -93,25 +215,12 @@ def forward( attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, inputs_embeds: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, ) -> torch.Tensor: - - # Verify assumption that position are always a sequence from - # 0 to N. (Actually here we just check 0 and N to simplify). - # This is important to fix the position which are assumed to - # start from padding_idx + 1 instead of 0 in the Roberta models. - assert hasattr(attn_metadata, "seq_lens_tensor") - cumulative = attn_metadata.seq_lens_tensor.cumsum(dim=0) - start_pos = torch.cat( - (torch.tensor([0], device=attn_metadata.seq_lens_tensor.device), - cumulative[:-1])) - assert len(torch.nonzero(positions[start_pos])) == 0 - end_pos = cumulative - 1 - last_tokens = attn_metadata.seq_lens_tensor - 1 - assert len(torch.nonzero(positions[end_pos] - last_tokens)) == 0 - - return super().forward(input_ids=input_ids, - positions=positions, - kv_caches=kv_caches, - attn_metadata=attn_metadata, - intermediate_tensors=intermediate_tensors, - inputs_embeds=inputs_embeds) + return self.roberta(input_ids=input_ids, + position_ids=positions, + kv_caches=kv_caches, + inputs_embeds=inputs_embeds, + intermediate_tensors=intermediate_tensors, + attn_metadata=attn_metadata, + token_type_ids=token_type_ids) diff --git a/vllm/multimodal/inputs.py b/vllm/multimodal/inputs.py index 8e67a552afe12..640c7c04b8817 100644 --- a/vllm/multimodal/inputs.py +++ b/vllm/multimodal/inputs.py @@ -6,7 +6,7 @@ import torch import torch.types from PIL.Image import Image -from typing_extensions import TypeAlias +from typing_extensions import NotRequired, TypeAlias from vllm.utils import JSONTree, is_list_of, json_map_leaves @@ -208,6 +208,9 @@ class MultiModalInputsV2(TypedDict): prompt_token_ids: List[int] """The processed token IDs which includes placeholder tokens.""" + token_type_ids: NotRequired[List[int]] + """The token type IDs of the prompt.""" + mm_kwargs: MultiModalKwargs """Keyword arguments to be directly passed to the model after batching.""" diff --git a/vllm/outputs.py b/vllm/outputs.py index 4ae9b377ae693..2d256803edfe8 100644 --- a/vllm/outputs.py +++ b/vllm/outputs.py @@ -60,7 +60,6 @@ class EmbeddingOutput: embedding: The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the embedding guide. """ - embedding: List[float] def __repr__(self) -> str: @@ -363,6 +362,50 @@ def __repr__(self): f"finished={self.finished})") +@dataclass +class ScoreOutput: + """The output data of one completion output of a request. + + Args: + score: The score, which is a list of floats. + index: The correspondent text index of the score. + """ + index: int + score: List[float] + + def __repr__(self) -> str: + return (f"ScoreOutput(" + f"score={self.score}), " + f"index={self.index})") + + +class ScoreRequestOutput: + """ + The output data of an score request to the LLM. + + Args: + request_id (str): A unique identifier for the score request. + outputs (score): The embedding results for the given input. + """ + + def __init__(self, request_id: str, outputs: "ScoreOutput"): + self.request_id = request_id + self.outputs = outputs + + def __repr__(self): + """ + Returns a string representation of an ScoreRequestOutput instance. + + The representation includes the request_id and the number of outputs, + providing a quick overview of the embedding request's results. + + Returns: + str: A string representation of the ScoreRequestOutput instance. + """ + return (f"ScoreRequestOutput(request_id='{self.request_id}', " + f"outputs={repr(self.outputs)}") + + class RequestOutputFactory: @staticmethod diff --git a/vllm/sequence.py b/vllm/sequence.py index a1cc8fc3b09de..669124319c4f4 100644 --- a/vllm/sequence.py +++ b/vllm/sequence.py @@ -449,6 +449,10 @@ def prompt_token_ids(self) -> List[int]: def prompt_embeds(self) -> Optional[torch.Tensor]: return self.inputs.prompt_embeds + @property + def token_type_ids(self) -> List[int]: + return self.inputs.token_type_ids + @property def multi_modal_data(self) -> "MultiModalDataDict": return self.inputs.multi_modal_data @@ -687,6 +691,10 @@ def encoder_prompt_token_ids(self) -> Optional[List[int]]: return (self.encoder_seq.prompt_token_ids if self.encoder_seq is not None else None) + @property + def token_type_ids(self) -> Optional[List[int]]: + return self.first_seq.token_type_ids + @property def multi_modal_data(self) -> MultiModalDataDict: return self.first_seq.multi_modal_data @@ -909,6 +917,7 @@ class SequenceGroupMetadata( default_factory=lambda: SequenceGroupState()) # "MultiModalDataDict" types. We have to use Any due to msgspec # doesn't allow to have union of 2 different dicts. + token_type_ids: Optional[List[int]] = None multi_modal_data: Optional[Any] = None multi_modal_placeholders: Optional[MultiModalPlaceholderDict] = None mm_processor_kwargs: Optional[Dict[str, Any]] = None diff --git a/vllm/transformers_utils/config.py b/vllm/transformers_utils/config.py index 59096753c395d..70d18d40b7aa7 100644 --- a/vllm/transformers_utils/config.py +++ b/vllm/transformers_utils/config.py @@ -9,6 +9,7 @@ from huggingface_hub.utils import (EntryNotFoundError, LocalEntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError) +from torch import nn from transformers import GenerationConfig, PretrainedConfig from transformers.models.auto.image_processing_auto import ( get_image_processor_config) @@ -31,6 +32,7 @@ UltravoxConfig) # yapf: enable from vllm.transformers_utils.utils import check_gguf_file +from vllm.utils import resolve_obj_by_qualname if VLLM_USE_MODELSCOPE: from modelscope import AutoConfig @@ -577,3 +579,16 @@ def try_get_generation_config( return GenerationConfig.from_model_config(config) except OSError: # Not found return None + + +def get_cross_encoder_activation_function(config: PretrainedConfig): + if (hasattr(config, "sbert_ce_default_activation_function") + and config.sbert_ce_default_activation_function is not None): + + function_name = config.sbert_ce_default_activation_function + assert function_name.startswith("torch.nn.modules."), \ + "Loading of activation functions is restricted to " \ + "torch.nn.modules for security reasons" + return resolve_obj_by_qualname(function_name)() + else: + return nn.Sigmoid() if config.num_labels == 1 else nn.Identity() diff --git a/vllm/worker/cpu_embedding_model_runner.py b/vllm/worker/cpu_embedding_model_runner.py index 978de73df6b70..3954e4c4c8a5b 100644 --- a/vllm/worker/cpu_embedding_model_runner.py +++ b/vllm/worker/cpu_embedding_model_runner.py @@ -50,6 +50,9 @@ def execute_model( ] model_executable = self.model + cross_enc_kwargs = {} + if model_input.token_type_ids is not None: + cross_enc_kwargs["token_type_ids"] = model_input.token_type_ids execute_model_kwargs = { "input_ids": model_input.input_tokens, @@ -61,6 +64,7 @@ def execute_model( model_input.attn_metadata, **MultiModalKwargs.as_kwargs(model_input.multi_modal_kwargs or {}, device=self.device), + **cross_enc_kwargs, "intermediate_tensors": intermediate_tensors, } diff --git a/vllm/worker/cpu_model_runner.py b/vllm/worker/cpu_model_runner.py index 7cab476d7fca4..b08171d79f002 100644 --- a/vllm/worker/cpu_model_runner.py +++ b/vllm/worker/cpu_model_runner.py @@ -43,6 +43,7 @@ class ModelInputForCPU(ModelRunnerInputBase): """ input_tokens: Optional[torch.Tensor] = None input_positions: Optional[torch.Tensor] = None + token_type_ids: Optional[torch.Tensor] = None attn_metadata: Optional["AttentionMetadata"] = None multi_modal_kwargs: Optional[BatchedTensorInputs] = None virtual_engine: Optional[int] = None @@ -54,6 +55,7 @@ def as_broadcastable_tensor_dict( tensor_dict = { "input_tokens": self.input_tokens, "input_positions": self.input_positions, + "token_type_ids": self.token_type_ids, "multi_modal_kwargs": self.multi_modal_kwargs, } _add_attn_metadata_broadcastable_dict(tensor_dict, self.attn_metadata) @@ -83,6 +85,7 @@ def as_broadcastable_tensor_dict(self) -> Dict[str, Any]: tensor_dict = { "input_tokens": self.input_tokens, "input_positions": self.input_positions, + "token_type_ids": self.token_type_ids, "multi_modal_kwargs": self.multi_modal_kwargs, } _add_attn_metadata_broadcastable_dict(tensor_dict, self.attn_metadata) @@ -112,6 +115,7 @@ def __init__(self, use_mrope: bool): self.input_tokens: List[int] = [] self.input_positions: Optional[ List[int]] = [] if not self.use_mrope else None + self.token_type_ids: Optional[List[int]] = [] self.seq_lens: List[int] = [] self.query_lens: List[int] = [] self.prefill_block_tables: List[List[int]] = [] @@ -165,6 +169,10 @@ def build(self) -> ModelInputForCPU: if not input_data.use_mrope else input_data.input_mrope_positions, dtype=torch.long, device="cpu") + token_type_ids = torch.tensor(input_data.token_type_ids, + dtype=torch.long, + device="cpu") \ + if input_data.token_type_ids else None # For multi-modal models multi_modal_kwargs = None @@ -178,6 +186,7 @@ def build(self) -> ModelInputForCPU: return self.model_input_cls( input_tokens=input_tokens, input_positions=input_positions, + token_type_ids=token_type_ids, seq_lens=input_data.seq_lens, query_lens=input_data.query_lens, attn_metadata=attn_metadata, @@ -285,6 +294,7 @@ def _compute_prompt_input_tokens(self, data: ModelInputData, tokens = seq_data.get_token_ids() tokens = tokens[context_len:seq_len] token_positions = range(context_len, seq_len) + token_types = seq_group_metadata.token_type_ids # For encoder-only models, the block_table is None, # and there is no need to initialize the slot_mapping. @@ -301,6 +311,9 @@ def _compute_prompt_input_tokens(self, data: ModelInputData, if data.input_positions is not None: data.input_positions.extend(token_positions) + if data.token_type_ids is not None: + data.token_type_ids.extend(token_types if token_types else []) + # Update fields data.input_tokens.extend(tokens) data.num_prefills += 1 diff --git a/vllm/worker/embedding_model_runner.py b/vllm/worker/embedding_model_runner.py index 4a55d91e71484..f56805918fd15 100644 --- a/vllm/worker/embedding_model_runner.py +++ b/vllm/worker/embedding_model_runner.py @@ -97,6 +97,10 @@ def execute_model( model_forward_end = torch.cuda.Event(enable_timing=True) model_forward_start.record() + cross_enc_kwargs = {} + if model_input.token_types is not None: + cross_enc_kwargs["token_type_ids"] = model_input.token_types + with set_forward_context(model_input.attn_metadata, self.vllm_config): hidden_or_intermediate_states = model_executable( input_ids=model_input.input_tokens, @@ -105,7 +109,8 @@ def execute_model( attn_metadata=model_input.attn_metadata, intermediate_tensors=intermediate_tensors, **MultiModalKwargs.as_kwargs(multi_modal_kwargs, - device=self.device)) + device=self.device), + **cross_enc_kwargs) if (self.observability_config is not None and self.observability_config.collect_model_forward_time): diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 13301b876217d..1f654a9cce465 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -92,6 +92,7 @@ class ModelInputForGPU(ModelRunnerInputBase): """ input_tokens: Optional[torch.Tensor] = None input_positions: Optional[torch.Tensor] = None + token_types: Optional[torch.Tensor] = None seq_lens: Optional[List[int]] = None query_lens: Optional[List[int]] = None lora_mapping: Optional["LoRAMapping"] = None @@ -200,6 +201,7 @@ class InterDataForSeqGroup: def simple_reinit(self): self.input_tokens[0].clear() # type: ignore self.input_positions[0].clear() # type: ignore + self.token_types[0].clear() # type: ignore self.mrope_input_positions = None # type: ignore self.seq_lens[0] = 0 # type: ignore self.orig_seq_lens[0] = 0 # type: ignore @@ -226,6 +228,7 @@ def __init__( # Input tokens and positions. input_tokens: Optional[List[List[int]]] = None, input_positions: Optional[List[List[int]]] = None, + token_types: Optional[List[List[int]]] = None, mrope_input_positions: Optional[List[List[List[int]]]] = None, # The sequence length (may be capped to the sliding window). @@ -291,6 +294,12 @@ def __init__( for seq_id in range(len(self.seq_ids)): self.input_positions[seq_id].clear() + if token_types: + self.token_types = token_types + else: + for seq_id in range(len(self.seq_ids)): + self.token_types[seq_id].clear() + self.mrope_input_positions = None if seq_lens: @@ -354,6 +363,7 @@ def __init__( else: self.input_tokens = input_tokens or [] self.input_positions = input_positions or [] + self.token_types = token_types or [] self.mrope_input_positions = mrope_input_positions or None self.seq_lens = seq_lens or [] self.orig_seq_lens = orig_seq_lens or [] @@ -386,6 +396,7 @@ def __post_init__(self): self.input_tokens = [[] for _ in range(self.n_seqs)] self.input_positions = [[] for _ in range(self.n_seqs)] + self.token_types = [[] for _ in range(self.n_seqs)] self.mrope_input_positions = None self.seq_lens = [0] * self.n_seqs self.orig_seq_lens = [0] * self.n_seqs @@ -498,12 +509,15 @@ def _compute_lens(self, inter_data: InterDataForSeqGroup, seq_idx: int, # Compute tokens. tokens = seq_data.get_token_ids()[context_len:seq_len] + token_types = seq_group_metadata.token_type_ids inter_data.seq_lens[seq_idx] = seq_len inter_data.orig_seq_lens[seq_idx] = seq_len inter_data.context_lens[seq_idx] = context_len inter_data.input_tokens[seq_idx].extend(tokens) inter_data.input_positions[seq_idx].extend(range(context_len, seq_len)) + inter_data.token_types[seq_idx].extend( + token_types if token_types else []) inter_data.query_lens[seq_idx] = seq_len - context_len if seq_data.mrope_position_delta is not None: @@ -561,6 +575,8 @@ def _compute_for_prefix_cache_hit( seq_idx][uncomputed_start:] inter_data.input_positions[seq_idx] = inter_data.input_positions[ seq_idx][uncomputed_start:] + inter_data.token_types[seq_idx] = inter_data.token_types[seq_idx][ + uncomputed_start:] context_len = prefix_cache_len inter_data.context_lens[seq_idx] = context_len @@ -575,6 +591,8 @@ def _compute_for_prefix_cache_hit( seq_idx][-1:] inter_data.input_positions[seq_idx] = inter_data.input_positions[ seq_idx][-1:] + inter_data.token_types[seq_idx] = inter_data.token_types[seq_idx][ + -1:] inter_data.query_lens[seq_idx] = 1 inter_data.context_lens[seq_idx] = inter_data.seq_lens[seq_idx] - 1 @@ -803,9 +821,12 @@ def build(self) -> ModelInputForGPU: """ # Combine and flatten intermediate data. input_tokens = [] + token_types = [] for inter_data in self.inter_data_list: for cur_input_tokens in inter_data.input_tokens: input_tokens.extend(cur_input_tokens) + for cur_token_types in inter_data.token_types: + token_types.extend(cur_token_types) if not input_tokens: # This may happen when all prefill requests hit @@ -874,6 +895,12 @@ def build(self) -> ModelInputForGPU: input_tokens_tensor = async_tensor_h2d(input_tokens, torch.long, self.runner.device, self.runner.pin_memory) + + token_types_tensor = async_tensor_h2d(token_types, torch.long, + self.runner.device, + self.runner.pin_memory) \ + if token_types else None + if mrope_input_positions is not None: for idx in range(3): mrope_input_positions[idx].extend( @@ -952,6 +979,7 @@ def build(self) -> ModelInputForGPU: return self.model_input_cls( input_tokens=input_tokens_tensor, input_positions=input_positions_tensor, + token_types=token_types_tensor, attn_metadata=attn_metadata, seq_lens=seq_lens, query_lens=query_lens, From 7ea3cd7c3e9fa1db06cdf8ad1973237b061b7d64 Mon Sep 17 00:00:00 2001 From: Mengqing Cao Date: Mon, 25 Nov 2024 13:14:56 +0800 Subject: [PATCH 115/122] [Refactor][MISC] del redundant code in ParallelConfig.postinit (#10614) Signed-off-by: MengqingCao --- vllm/config.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 4ea56a14cabba..dcdaf58b5ccdb 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -998,20 +998,15 @@ def __post_init__(self) -> None: raise ValueError(f"worker-use-ray can't be used with " f"distributed executor backend " f"'{self.distributed_executor_backend}'.") - - if current_platform.is_tpu() and self.world_size > 1: - if self.distributed_executor_backend is None: - self.distributed_executor_backend = "ray" - if self.distributed_executor_backend != "ray": - raise ValueError( - "TPU backend only supports Ray for distributed inference.") - - if current_platform.is_hpu() and self.world_size > 1: + ray_only_devices = ["tpu", "hpu"] + if (current_platform.device_type in ray_only_devices + and self.world_size > 1): if self.distributed_executor_backend is None: self.distributed_executor_backend = "ray" if self.distributed_executor_backend != "ray": raise ValueError( - "HPU backend only supports Ray for distributed inference.") + f"{current_platform.device_type.upper()} backend only " + "supports Ray for distributed inference.") if self.distributed_executor_backend is None and self.world_size > 1: # We use multiprocessing by default if world_size fits on the From 571841b7fcc67f8b1d171522f6249ed4224033e1 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 24 Nov 2024 21:24:33 -0800 Subject: [PATCH 116/122] [torch.compile] support encoder based models (#10613) Signed-off-by: youkaichao --- tests/compile/test_basic_correctness.py | 10 ++++++++++ vllm/model_executor/models/bert.py | 17 +++++++---------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/tests/compile/test_basic_correctness.py b/tests/compile/test_basic_correctness.py index b7170886d2556..99781c55b672e 100644 --- a/tests/compile/test_basic_correctness.py +++ b/tests/compile/test_basic_correctness.py @@ -62,6 +62,16 @@ class TestSetting: method="encode", fullgraph=True, ), + # encoder-based embedding model (BERT) + TestSetting( + model="BAAI/bge-base-en-v1.5", + model_args=["--task", "embedding"], + pp_size=1, + tp_size=1, + attn_backend="XFORMERS", + method="encode", + fullgraph=True, + ), # vision language model TestSetting( model="microsoft/Phi-3.5-vision-instruct", diff --git a/vllm/model_executor/models/bert.py b/vllm/model_executor/models/bert.py index 1fc87bc650d92..f570d6d3c12b3 100644 --- a/vllm/model_executor/models/bert.py +++ b/vllm/model_executor/models/bert.py @@ -5,6 +5,7 @@ from transformers import BertConfig from vllm.attention import Attention, AttentionMetadata, AttentionType +from vllm.compilation.decorators import support_torch_compile from vllm.config import CacheConfig, PoolerConfig, VllmConfig from vllm.distributed import get_tensor_model_parallel_world_size from vllm.model_executor.layers.activation import get_act_fn @@ -92,14 +93,14 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return pooled_output +@support_torch_compile class BertEncoder(nn.Module): - def __init__(self, - config: BertConfig, - cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None, - prefix: str = ""): + def __init__(self, vllm_config: VllmConfig, prefix: str = ""): super().__init__() + config = vllm_config.model_config.hf_config + cache_config = vllm_config.cache_config + quant_config = vllm_config.quant_config self.layer = nn.ModuleList([ BertLayer(config=config, cache_config=cache_config, @@ -336,12 +337,8 @@ def __init__(self, add_pooling_layer: bool = False): super().__init__() config = vllm_config.model_config.hf_config - cache_config = vllm_config.cache_config - quant_config = vllm_config.quant_config self.embeddings = embedding_class(config) - self.encoder = BertEncoder(config, - cache_config, - quant_config, + self.encoder = BertEncoder(vllm_config=vllm_config, prefix=f"{prefix}.encoder") self.pooler = BertPooler(config) if add_pooling_layer else None From a30a605d214e051c31057f8c0cb948c841a2f743 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Mon, 25 Nov 2024 14:34:07 +0800 Subject: [PATCH 117/122] [Doc] Add encoder-based models to Supported Models page (#10616) Signed-off-by: DarkLight1337 --- docs/source/models/supported_models.rst | 45 +++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index e902d393f2f70..ccd2d8de8ec0b 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -325,6 +325,11 @@ Text Embedding - Example HF Models - :ref:`LoRA ` - :ref:`PP ` + * - :code:`BertModel` + - BERT-based + - :code:`BAAI/bge-base-en-v1.5`, etc. + - + - * - :code:`Gemma2Model` - Gemma2-based - :code:`BAAI/bge-multilingual-gemma2`, etc. @@ -340,6 +345,16 @@ Text Embedding - :code:`ssmits/Qwen2-7B-Instruct-embed-base`, :code:`Alibaba-NLP/gte-Qwen2-1.5B-instruct`, etc. - ✅︎ - ✅︎ + * - :code:`RobertaModel`, :code:`RobertaForMaskedLM` + - RoBERTa-based + - :code:`sentence-transformers/all-roberta-large-v1`, :code:`sentence-transformers/all-roberta-large-v1`, etc. + - + - + * - :code:`XLMRobertaModel` + - XLM-RoBERTa-based + - :code:`intfloat/multilingual-e5-large`, etc. + - + - .. important:: Some model architectures support both generation and embedding tasks. @@ -390,6 +405,36 @@ Classification .. note:: As an interim measure, these models are supported in both offline and online inference via Embeddings API. +Sentence Pair Scoring +--------------------- + +.. list-table:: + :widths: 25 25 50 5 5 + :header-rows: 1 + + * - Architecture + - Models + - Example HF Models + - :ref:`LoRA ` + - :ref:`PP ` + * - :code:`BertForSequenceClassification` + - BERT-based + - :code:`cross-encoder/ms-marco-MiniLM-L-6-v2`, etc. + - + - + * - :code:`RobertaForSequenceClassification` + - RoBERTa-based + - :code:`cross-encoder/quora-roberta-base`, etc. + - + - + * - :code:`XLMRobertaForSequenceClassification` + - XLM-RoBERTa-based + - :code:`BAAI/bge-reranker-v2-m3`, etc. + - + - + +.. note:: + These models are supported in both offline and online inference via Score API. Multimodal Language Models ^^^^^^^^^^^^^^^^^^^^^^^^^^ From 7c2134beda9a4f72c71c4faffcca22cebd4e1c3c Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Mon, 25 Nov 2024 15:04:21 +0800 Subject: [PATCH 118/122] [torch.compile] force inductor threads (#10620) Signed-off-by: Jee Jee Li --- vllm/plugins/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index d5056b18fe968..bd4764c5cc79c 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -3,6 +3,8 @@ from contextlib import contextmanager from typing import TYPE_CHECKING, Optional +import torch + import vllm.envs as envs if TYPE_CHECKING: @@ -26,7 +28,8 @@ def load_general_plugins(): # see https://github.com/vllm-project/vllm/issues/10480 os.environ['TORCHINDUCTOR_COMPILE_THREADS'] = '1' - + # see https://github.com/vllm-project/vllm/issues/10619 + torch._inductor.config.compile_threads = 1 global plugins_loaded if plugins_loaded: return From 65813781a2e2e76d18741601afe66b870a90a717 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 24 Nov 2024 23:27:51 -0800 Subject: [PATCH 119/122] [torch.compile] add warning for unsupported models (#10622) Signed-off-by: youkaichao --- vllm/compilation/counter.py | 1 + vllm/compilation/decorators.py | 2 ++ vllm/plugins/__init__.py | 15 +++++++++++++++ 3 files changed, 18 insertions(+) diff --git a/vllm/compilation/counter.py b/vllm/compilation/counter.py index 100a49aba74ac..6385f1c5dbf81 100644 --- a/vllm/compilation/counter.py +++ b/vllm/compilation/counter.py @@ -5,6 +5,7 @@ @dataclasses.dataclass class CompilationCounter: + num_models_seen: int = 0 num_graphs_seen: int = 0 # including the splitting ops num_piecewise_graphs_seen: int = 0 diff --git a/vllm/compilation/decorators.py b/vllm/compilation/decorators.py index 4b78491bc5a48..8b81a29936989 100644 --- a/vllm/compilation/decorators.py +++ b/vllm/compilation/decorators.py @@ -3,6 +3,7 @@ import torch +from vllm.compilation.counter import compilation_counter from vllm.compilation.wrapper import TorchCompileWrapperWithCustomDispatcher from vllm.config import CompilationLevel, VllmConfig from vllm.logger import init_logger @@ -130,6 +131,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = '', **kwargs): ] or not supports_dynamo() if self.do_not_compile: return + compilation_counter.num_models_seen += 1 TorchCompileWrapperWithCustomDispatcher.__init__( self, compilation_level=vllm_config.compilation_config.level) diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index bd4764c5cc79c..8b43167693598 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -80,6 +80,9 @@ def set_current_vllm_config(vllm_config: "VllmConfig"): """ global _current_vllm_config old_vllm_config = _current_vllm_config + from vllm.compilation.counter import compilation_counter + from vllm.config import CompilationLevel + num_models_seen = compilation_counter.num_models_seen try: _current_vllm_config = vllm_config yield @@ -88,6 +91,18 @@ def set_current_vllm_config(vllm_config: "VllmConfig"): vllm_config.compilation_config.enabled_custom_ops) logger.debug("disabled custom ops: %s", vllm_config.compilation_config.disabled_custom_ops) + if vllm_config.compilation_config.level == CompilationLevel.PIECEWISE \ + and compilation_counter.num_models_seen == num_models_seen: + # If the model supports compilation, + # compilation_counter.num_models_seen should be increased + # by at least 1. + # If it is not increased, it means the model does not support + # compilation (does not have @support_torch_compile decorator). + logger.warning( + "`torch.compile` is turned on, but the model %s" + " does not support it. Please open an issue on GitHub" + "if you want it to be supported.", + vllm_config.model_config.model) _current_vllm_config = old_vllm_config From 25d806e95391a8556deb69bdb214714425f776c9 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 24 Nov 2024 23:40:08 -0800 Subject: [PATCH 120/122] [misc] add torch.compile compatibility check (#10618) Signed-off-by: youkaichao --- tests/v1/engine/test_engine_core_client.py | 2 +- vllm/config.py | 14 ++++++++++++++ vllm/engine/arg_utils.py | 7 +++++++ 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/tests/v1/engine/test_engine_core_client.py b/tests/v1/engine/test_engine_core_client.py index 7b241bf836a0e..e248e35ae4069 100644 --- a/tests/v1/engine/test_engine_core_client.py +++ b/tests/v1/engine/test_engine_core_client.py @@ -81,7 +81,7 @@ def test_engine_core_client(monkeypatch, multiprocessing_mode: bool): with monkeypatch.context() as m: m.setenv("VLLM_USE_V1", "1") - engine_args = EngineArgs(model=MODEL_NAME) + engine_args = EngineArgs(model=MODEL_NAME, compilation_config=3) vllm_config = engine_args.create_engine_config() executor_class = AsyncLLM._get_executor_cls(vllm_config) client = EngineCoreClient.make_client( diff --git a/vllm/config.py b/vllm/config.py index dcdaf58b5ccdb..68720f3a3034d 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2394,6 +2394,20 @@ def __post_init__(self): self.compilation_config.pass_config.enable_reshape = False self.compilation_config.level = CompilationLevel.PIECEWISE + if self.cache_config is not None and \ + self.cache_config.cpu_offload_gb > 0 and \ + self.compilation_config.level != CompilationLevel.NO_COMPILATION: + logger.warning( + "CPU offload is not supported with `torch.compile` yet." + " Disabling `torch.compile`.") + self.compilation_config.level = CompilationLevel.NO_COMPILATION + + if self.lora_config is not None and self.compilation_config.level !=\ + CompilationLevel.NO_COMPILATION: + logger.warning("LoRA is not supported with `torch.compile` yet. " + "Disabling `torch.compile`.") + self.compilation_config.level = CompilationLevel.NO_COMPILATION + current_platform.check_and_update_config(self) def __str__(self): diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 82f1ef51255e9..a43e133f21ac2 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -197,6 +197,13 @@ def __post_init__(self): if not self.tokenizer: self.tokenizer = self.model + # support `EngineArgs(compilation_config={...})` + # without having to manually construct a + # CompilationConfig object + if isinstance(self.compilation_config, (int, dict)): + self.compilation_config = CompilationConfig.from_cli( + json.dumps(self.compilation_config)) + # Setup plugins from vllm.plugins import load_general_plugins load_general_plugins() From 05d1f8c9c64b4458ae7cee2650eb97498146ee50 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 25 Nov 2024 01:27:30 -0800 Subject: [PATCH 121/122] [misc] move functions to config.py (#10624) Signed-off-by: youkaichao --- tests/compile/piecewise/test_simple.py | 4 +- tests/compile/piecewise/test_toy_llama.py | 4 +- tests/kernels/test_encoder_decoder_attn.py | 3 +- .../model_executor/test_enabled_custom_ops.py | 3 +- vllm/attention/layer.py | 3 +- vllm/compilation/wrapper.py | 3 +- vllm/config.py | 51 +++++++++++++++++ vllm/model_executor/custom_op.py | 2 +- vllm/model_executor/model_loader/loader.py | 3 +- .../model_executor/model_loader/tensorizer.py | 3 +- vllm/plugins/__init__.py | 56 ------------------- 11 files changed, 62 insertions(+), 73 deletions(-) diff --git a/tests/compile/piecewise/test_simple.py b/tests/compile/piecewise/test_simple.py index 0db12d6b6a43c..7ef502abee345 100644 --- a/tests/compile/piecewise/test_simple.py +++ b/tests/compile/piecewise/test_simple.py @@ -10,8 +10,8 @@ from vllm.compilation.compile_context import set_compile_context from vllm.compilation.counter import compilation_counter from vllm.compilation.decorators import support_torch_compile -from vllm.config import CompilationConfig, CompilationLevel, VllmConfig -from vllm.plugins import set_current_vllm_config +from vllm.config import (CompilationConfig, CompilationLevel, VllmConfig, + set_current_vllm_config) from vllm.utils import direct_register_custom_op global_counter = 0 diff --git a/tests/compile/piecewise/test_toy_llama.py b/tests/compile/piecewise/test_toy_llama.py index cfe661b8871e0..dbd5a3bbffeab 100644 --- a/tests/compile/piecewise/test_toy_llama.py +++ b/tests/compile/piecewise/test_toy_llama.py @@ -16,8 +16,8 @@ from vllm.compilation.compile_context import set_compile_context from vllm.compilation.counter import compilation_counter from vllm.compilation.decorators import support_torch_compile -from vllm.config import CompilationConfig, CompilationLevel, VllmConfig -from vllm.plugins import set_current_vllm_config +from vllm.config import (CompilationConfig, CompilationLevel, VllmConfig, + set_current_vllm_config) from vllm.utils import direct_register_custom_op # create a library to hold the custom op diff --git a/tests/kernels/test_encoder_decoder_attn.py b/tests/kernels/test_encoder_decoder_attn.py index c4b72ba6bf4ee..d943b048b7934 100644 --- a/tests/kernels/test_encoder_decoder_attn.py +++ b/tests/kernels/test_encoder_decoder_attn.py @@ -18,10 +18,9 @@ from vllm.attention.backends.utils import STR_NOT_IMPL_ENC_DEC_ROCM_HIP from vllm.attention.selector import (_Backend, _cached_get_attn_backend, global_force_attn_backend_context_manager) -from vllm.config import VllmConfig +from vllm.config import VllmConfig, set_current_vllm_config from vllm.forward_context import set_forward_context from vllm.platforms import current_platform -from vllm.plugins import set_current_vllm_config # List of support backends for encoder/decoder models LIST_ENC_DEC_SUPPORTED_BACKENDS = [_Backend.XFORMERS, _Backend.FLASH_ATTN] diff --git a/tests/model_executor/test_enabled_custom_ops.py b/tests/model_executor/test_enabled_custom_ops.py index c54e30995da49..0a3aba255fd76 100644 --- a/tests/model_executor/test_enabled_custom_ops.py +++ b/tests/model_executor/test_enabled_custom_ops.py @@ -2,13 +2,12 @@ import pytest -from vllm.config import CompilationConfig, VllmConfig +from vllm.config import CompilationConfig, VllmConfig, set_current_vllm_config from vllm.model_executor.custom_op import CustomOp from vllm.model_executor.layers.activation import (GeluAndMul, ReLUSquaredActivation, SiluAndMul) from vllm.model_executor.layers.layernorm import RMSNorm -from vllm.plugins import set_current_vllm_config # Registered subclass for test diff --git a/vllm/attention/layer.py b/vllm/attention/layer.py index 1bb335909484b..17157617248f7 100644 --- a/vllm/attention/layer.py +++ b/vllm/attention/layer.py @@ -7,13 +7,12 @@ import vllm.envs as envs from vllm.attention import AttentionMetadata, AttentionType from vllm.attention.selector import backend_name_to_enum, get_attn_backend -from vllm.config import CacheConfig +from vllm.config import CacheConfig, get_current_vllm_config from vllm.forward_context import ForwardContext, get_forward_context from vllm.model_executor.layers.quantization.base_config import ( QuantizationConfig) from vllm.model_executor.layers.quantization.kv_cache import BaseKVCacheMethod from vllm.platforms import current_platform -from vllm.plugins import get_current_vllm_config from vllm.utils import direct_register_custom_op diff --git a/vllm/compilation/wrapper.py b/vllm/compilation/wrapper.py index 0143d0301ca1a..bc4d292fef402 100644 --- a/vllm/compilation/wrapper.py +++ b/vllm/compilation/wrapper.py @@ -8,7 +8,7 @@ import torch import vllm.envs as envs -from vllm.config import CompilationLevel +from vllm.config import CompilationLevel, get_current_vllm_config class TorchCompileWrapperWithCustomDispatcher: @@ -32,7 +32,6 @@ def __init__(self, # default compilation settings # compiling the forward method - from vllm.plugins import get_current_vllm_config backend = get_current_vllm_config( ).compilation_config.init_backend() diff --git a/vllm/config.py b/vllm/config.py index 68720f3a3034d..0a390c4311ba6 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -3,6 +3,7 @@ import hashlib import json import warnings +from contextlib import contextmanager from dataclasses import dataclass, field, replace from pathlib import Path from typing import (TYPE_CHECKING, Any, Callable, ClassVar, Counter, Dict, @@ -2450,3 +2451,53 @@ def __str__(self): self.cache_config.enable_prefix_caching, self.model_config.use_async_output_proc, self.model_config.mm_processor_kwargs) + + +_current_vllm_config: Optional[VllmConfig] = None + + +@contextmanager +def set_current_vllm_config(vllm_config: VllmConfig): + """ + Temporarily set the current VLLM config. + Used during model initialization. + We save the current VLLM config in a global variable, + so that all modules can access it, e.g. custom ops + can access the VLLM config to determine how to dispatch. + """ + global _current_vllm_config + old_vllm_config = _current_vllm_config + from vllm.compilation.counter import compilation_counter + num_models_seen = compilation_counter.num_models_seen + try: + _current_vllm_config = vllm_config + yield + finally: + logger.debug("enabled custom ops: %s", + vllm_config.compilation_config.enabled_custom_ops) + logger.debug("disabled custom ops: %s", + vllm_config.compilation_config.disabled_custom_ops) + if vllm_config.compilation_config.level == CompilationLevel.PIECEWISE \ + and compilation_counter.num_models_seen == num_models_seen: + # If the model supports compilation, + # compilation_counter.num_models_seen should be increased + # by at least 1. + # If it is not increased, it means the model does not support + # compilation (does not have @support_torch_compile decorator). + logger.warning( + "`torch.compile` is turned on, but the model %s" + " does not support it. Please open an issue on GitHub" + "if you want it to be supported.", + vllm_config.model_config.model) + _current_vllm_config = old_vllm_config + + +def get_current_vllm_config() -> VllmConfig: + if _current_vllm_config is None: + # in ci, usually when we test custom ops/modules directly, + # we don't set the vllm config. In that case, we set a default + # config. + logger.warning("Current VLLM config is not set.") + from vllm.config import VllmConfig + return VllmConfig() + return _current_vllm_config diff --git a/vllm/model_executor/custom_op.py b/vllm/model_executor/custom_op.py index b07966f2ab7d0..fddc8bad09ef5 100644 --- a/vllm/model_executor/custom_op.py +++ b/vllm/model_executor/custom_op.py @@ -2,9 +2,9 @@ import torch.nn as nn +from vllm.config import get_current_vllm_config from vllm.logger import init_logger from vllm.platforms import current_platform -from vllm.plugins import get_current_vllm_config from vllm.utils import print_warning_once logger = init_logger(__name__) diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index 34e0860162260..441dd409b4f9d 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -23,7 +23,7 @@ from transformers.utils import SAFE_WEIGHTS_INDEX_NAME from vllm.config import (LoadConfig, LoadFormat, ModelConfig, ParallelConfig, - VllmConfig) + VllmConfig, set_current_vllm_config) from vllm.distributed import (get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size) from vllm.envs import VLLM_USE_MODELSCOPE @@ -47,7 +47,6 @@ safetensors_weights_iterator) from vllm.model_executor.utils import set_weight_attrs from vllm.platforms import current_platform -from vllm.plugins import set_current_vllm_config from vllm.utils import is_pin_memory_available diff --git a/vllm/model_executor/model_loader/tensorizer.py b/vllm/model_executor/model_loader/tensorizer.py index 3fd668765a1b1..87f3fcb5cae00 100644 --- a/vllm/model_executor/model_loader/tensorizer.py +++ b/vllm/model_executor/model_loader/tensorizer.py @@ -13,13 +13,12 @@ from transformers import PretrainedConfig import vllm.envs as envs -from vllm.config import ModelConfig, ParallelConfig +from vllm.config import ModelConfig, ParallelConfig, set_current_vllm_config from vllm.engine.arg_utils import EngineArgs from vllm.engine.llm_engine import LLMEngine from vllm.logger import init_logger from vllm.model_executor.layers.vocab_parallel_embedding import ( VocabParallelEmbedding) -from vllm.plugins import set_current_vllm_config from vllm.utils import FlexibleArgumentParser tensorizer_error_msg = None diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index 8b43167693598..3c64726ca3344 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -1,15 +1,10 @@ import logging import os -from contextlib import contextmanager -from typing import TYPE_CHECKING, Optional import torch import vllm.envs as envs -if TYPE_CHECKING: - from vllm.config import VllmConfig - logger = logging.getLogger(__name__) # make sure one process only loads plugins once @@ -64,54 +59,3 @@ def load_general_plugins(): logger.info("plugin %s loaded.", plugin.name) except Exception: logger.exception("Failed to load plugin %s", plugin.name) - - -_current_vllm_config: Optional["VllmConfig"] = None - - -@contextmanager -def set_current_vllm_config(vllm_config: "VllmConfig"): - """ - Temporarily set the current VLLM config. - Used during model initialization. - We save the current VLLM config in a global variable, - so that all modules can access it, e.g. custom ops - can access the VLLM config to determine how to dispatch. - """ - global _current_vllm_config - old_vllm_config = _current_vllm_config - from vllm.compilation.counter import compilation_counter - from vllm.config import CompilationLevel - num_models_seen = compilation_counter.num_models_seen - try: - _current_vllm_config = vllm_config - yield - finally: - logger.debug("enabled custom ops: %s", - vllm_config.compilation_config.enabled_custom_ops) - logger.debug("disabled custom ops: %s", - vllm_config.compilation_config.disabled_custom_ops) - if vllm_config.compilation_config.level == CompilationLevel.PIECEWISE \ - and compilation_counter.num_models_seen == num_models_seen: - # If the model supports compilation, - # compilation_counter.num_models_seen should be increased - # by at least 1. - # If it is not increased, it means the model does not support - # compilation (does not have @support_torch_compile decorator). - logger.warning( - "`torch.compile` is turned on, but the model %s" - " does not support it. Please open an issue on GitHub" - "if you want it to be supported.", - vllm_config.model_config.model) - _current_vllm_config = old_vllm_config - - -def get_current_vllm_config() -> "VllmConfig": - if _current_vllm_config is None: - # in ci, usually when we test custom ops/modules directly, - # we don't set the vllm config. In that case, we set a default - # config. - logger.warning("Current VLLM config is not set.") - from vllm.config import VllmConfig - return VllmConfig() - return _current_vllm_config From ed46f143212203b7afcbc8538119b6e8155c643e Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Mon, 25 Nov 2024 17:51:20 +0800 Subject: [PATCH 122/122] [Model] Support `is_causal` HF config field for Qwen2 model (#10621) Signed-off-by: DarkLight1337 --- docs/source/models/supported_models.rst | 13 +++++++++--- .../embedding/language/test_embedding.py | 12 +++++++++-- tests/models/embedding/utils.py | 4 ++-- vllm/config.py | 15 ++++++++++---- vllm/model_executor/models/qwen2.py | 20 +++++++++++++++++-- 5 files changed, 51 insertions(+), 13 deletions(-) diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index ccd2d8de8ec0b..54e2c4479c2c9 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -342,7 +342,7 @@ Text Embedding - ✅︎ * - :code:`Qwen2Model`, :code:`Qwen2ForCausalLM` - Qwen2-based - - :code:`ssmits/Qwen2-7B-Instruct-embed-base`, :code:`Alibaba-NLP/gte-Qwen2-1.5B-instruct`, etc. + - :code:`ssmits/Qwen2-7B-Instruct-embed-base`, :code:`Alibaba-NLP/gte-Qwen2-7B-instruct` (see note), etc. - ✅︎ - ✅︎ * - :code:`RobertaModel`, :code:`RobertaForMaskedLM` @@ -363,6 +363,13 @@ Text Embedding .. tip:: You can override the model's pooling method by passing :code:`--override-pooler-config`. +.. note:: + Unlike base Qwen2, :code:`Alibaba-NLP/gte-Qwen2-7B-instruct` uses bi-directional attention. + You can set `--hf-overrides '{"is_causal": false}'` to change the attention mask accordingly. + + On the other hand, its 1.5B variant (:code:`Alibaba-NLP/gte-Qwen2-1.5B-instruct`) uses causal attention + despite being described otherwise on its model card. + Reward Modeling --------------- @@ -606,10 +613,10 @@ Text Generation | :sup:`+` Multiple items can be inputted per text prompt for this modality. .. note:: - vLLM currently only supports adding LoRA to the language backbone of multimodal models. + vLLM currently only supports adding LoRA to the language backbone of multimodal models. .. note:: - For :code:`openbmb/MiniCPM-V-2`, the official repo doesn't work yet, so we need to use a fork (:code:`HwwwH/MiniCPM-V-2`) for now. + The official :code:`openbmb/MiniCPM-V-2` doesn't work yet, so we need to use a fork (:code:`HwwwH/MiniCPM-V-2`) for now. For more details, please see: https://github.com/vllm-project/vllm/pull/4087#issuecomment-2250397630 Multimodal Embedding diff --git a/tests/models/embedding/language/test_embedding.py b/tests/models/embedding/language/test_embedding.py index c3f351ef707be..36b1e5887981c 100644 --- a/tests/models/embedding/language/test_embedding.py +++ b/tests/models/embedding/language/test_embedding.py @@ -21,6 +21,7 @@ marks=[pytest.mark.core_model]), pytest.param("ssmits/Qwen2-7B-Instruct-embed-base"), pytest.param("Alibaba-NLP/gte-Qwen2-1.5B-instruct"), + pytest.param("Alibaba-NLP/gte-Qwen2-7B-instruct"), ], ) @pytest.mark.parametrize("dtype", ["half"]) @@ -31,6 +32,10 @@ def test_models( model, dtype: str, ) -> None: + vllm_extra_kwargs = {} + if model == "Alibaba-NLP/gte-Qwen2-7B-instruct": + vllm_extra_kwargs["hf_overrides"] = {"is_causal": False} + # The example_prompts has ending "\n", for example: # "Write a short story about a robot that dreams for the first time.\n" # sentence_transformers will strip the input texts, see: @@ -43,8 +48,11 @@ def test_models( is_sentence_transformer=True) as hf_model: hf_outputs = hf_model.encode(example_prompts) - with vllm_runner(model, task="embedding", dtype=dtype, - max_model_len=None) as vllm_model: + with vllm_runner(model, + task="embedding", + dtype=dtype, + max_model_len=None, + **vllm_extra_kwargs) as vllm_model: vllm_outputs = vllm_model.encode(example_prompts) # This test is for verifying whether the model's extra_repr # can be printed correctly. diff --git a/tests/models/embedding/utils.py b/tests/models/embedding/utils.py index fd1c44d9c117e..f96c7d2b176db 100644 --- a/tests/models/embedding/utils.py +++ b/tests/models/embedding/utils.py @@ -24,7 +24,7 @@ def check_embeddings_close( dim=0) fail_msg = (f"Test{prompt_idx}:" - f"\n{name_0}:\t{embeddings_0!r}" - f"\n{name_1}:\t{embeddings_1!r}") + f"\n{name_0}:\t{embeddings_0[:16]!r}" + f"\n{name_1}:\t{embeddings_1[:16]!r}") assert sim >= 1 - tol, fail_msg diff --git a/vllm/config.py b/vllm/config.py index 0a390c4311ba6..f9ecb02cd5bde 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -27,7 +27,7 @@ get_hf_text_config, get_pooling_config, get_sentence_transformer_tokenizer_config, is_encoder_decoder, uses_mrope) from vllm.utils import (GiB_bytes, cuda_device_count_stateless, get_cpu_memory, - identity, print_warning_once, resolve_obj_by_qualname) + print_warning_once, resolve_obj_by_qualname) if TYPE_CHECKING: from ray.util.placement_group import PlacementGroup @@ -183,7 +183,7 @@ def __init__( hf_overrides_fn = hf_overrides else: hf_overrides_kw = hf_overrides - hf_overrides_fn = identity + hf_overrides_fn = None if rope_scaling is not None: hf_override: Dict[str, Any] = {"rope_scaling": rope_scaling} @@ -212,8 +212,15 @@ def __init__( self.skip_tokenizer_init = skip_tokenizer_init hf_config = get_config(self.model, trust_remote_code, revision, - code_revision, config_format, **hf_overrides_kw) - hf_config = hf_overrides_fn(hf_config) + code_revision, config_format) + + if hf_overrides_kw: + logger.info("Overriding HF config with %s", hf_overrides_kw) + hf_config.update(hf_overrides_kw) + if hf_overrides_fn: + logger.info("Overriding HF config with %s", hf_overrides_fn) + hf_config = hf_overrides_fn(hf_config) + self.hf_config = hf_config self.hf_text_config = get_hf_text_config(self.hf_config) diff --git a/vllm/model_executor/models/qwen2.py b/vllm/model_executor/models/qwen2.py index 370cff5fa153f..8da75c9935a13 100644 --- a/vllm/model_executor/models/qwen2.py +++ b/vllm/model_executor/models/qwen2.py @@ -27,7 +27,7 @@ from torch import nn from transformers import Qwen2Config -from vllm.attention import Attention, AttentionMetadata +from vllm.attention import Attention, AttentionMetadata, AttentionType from vllm.compilation.decorators import support_torch_compile from vllm.config import CacheConfig, VllmConfig from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size @@ -164,11 +164,17 @@ def forward( hidden_states: torch.Tensor, kv_cache: torch.Tensor, attn_metadata: AttentionMetadata, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) q, k = self.rotary_emb(positions, q, k) - attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + attn_output = self.attn(q, + k, + v, + kv_cache, + attn_metadata, + attn_type=attn_type) output, _ = self.o_proj(attn_output) return output @@ -210,6 +216,15 @@ def __init__( self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + # By default, Qwen2 uses causal attention as it is a decoder-only model. + # You can override the HF config with `is_causal=False` to enable + # bidirectional attention, which is used in some embedding models + # (e.g. Alibaba-NLP/gte-Qwen2-7B-instruct) + if getattr(config, "is_causal", True): + self._attn_type = AttentionType.DECODER + else: + self._attn_type = AttentionType.ENCODER_ONLY + def forward( self, positions: torch.Tensor, @@ -230,6 +245,7 @@ def forward( hidden_states=hidden_states, kv_cache=kv_cache, attn_metadata=attn_metadata, + attn_type=self._attn_type, ) # Fully Connected