From b431117e3fad25651724cb398275894cfab4faca Mon Sep 17 00:00:00 2001 From: Owen Date: Wed, 20 Sep 2023 21:10:04 -0400 Subject: [PATCH] fix batch inference --- sciphi/llm/vllm_llm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sciphi/llm/vllm_llm.py b/sciphi/llm/vllm_llm.py index 67a21ce..8258b48 100644 --- a/sciphi/llm/vllm_llm.py +++ b/sciphi/llm/vllm_llm.py @@ -59,5 +59,5 @@ def get_instruct_completion(self, prompt: str) -> str: def get_batch_instruct_completion(self, prompts: list[str]) -> list[str]: """Get batch instruction completion from local vLLM.""" - raw_outputs = self.model.generate([prompts], self.sampling_params) + raw_outputs = self.model.generate(prompts, self.sampling_params) return [ele.outputs[0].text for ele in raw_outputs]