From 1fa020c539485e398d10ca9be376c1d0d87ae19b Mon Sep 17 00:00:00 2001 From: Nick Hill Date: Thu, 7 Nov 2024 05:06:57 +0000 Subject: [PATCH] [V1][BugFix] Fix Generator construction in greedy + seed case (#10097) Signed-off-by: Nick Hill --- vllm/v1/worker/gpu_model_runner.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index e6383b59cf7a3..9bb49a21453d0 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -146,7 +146,7 @@ def _update_states(self, scheduler_output: "SchedulerOutput") -> None: for req_data in scheduler_output.scheduled_new_reqs: req_id = req_data.req_id sampling_params = req_data.sampling_params - if sampling_params.seed is not None: + if sampling_params.sampling_type == SamplingType.RANDOM_SEED: generator = torch.Generator(device=self.device) generator.manual_seed(sampling_params.seed) else: @@ -382,7 +382,8 @@ def execute_model( # Rewind the generator state as if the token was not sampled. generator = self.input_batch.generators.get(i) if generator is not None: - generator.set_offset(generator.get_offset() - 1) + # This relies on cuda-specific torch-internal impl details + generator.set_offset(generator.get_offset() - 4) if sampler_output.logprob_token_ids is None: logprob_token_ids = None