From bc80d1ed87aa5e6b1917309ce965fbcb0872309a Mon Sep 17 00:00:00 2001 From: xiangw2 Date: Wed, 27 Nov 2024 14:36:05 +0800 Subject: [PATCH] fix --- vllm/model_executor/models/telechat2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/models/telechat2.py b/vllm/model_executor/models/telechat2.py index ff536115f7b1e..0ea79a1717712 100644 --- a/vllm/model_executor/models/telechat2.py +++ b/vllm/model_executor/models/telechat2.py @@ -39,7 +39,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): vllm_config.model_config.hf_config.mlp_bias = True super().__init__(vllm_config=vllm_config, prefix=prefix) # 2. Remove the bias from the qkv_proj and gate_up_proj based on config - # FIXME: Handle qkv_bias etc + # Telechat2's gate_up_proj and qkv_proj don't have bias + # see: https://github.com/vllm-project/vllm/pull/10311#issuecomment-2490297566 for layer in self.layers: layer.self_attn.qkv_proj.bias = layer.mlp.gate_up_proj.bias = None layer.self_attn.qkv_proj.skip_bias_add = True