Skip to content

Commit

Permalink
[fix] fix CI warnings
Browse files Browse the repository at this point in the history
Signed-off-by: Jingxin Pan <[email protected]>
  • Loading branch information
lyppg committed Dec 30, 2024
1 parent 7a43c65 commit e56a741
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 5 deletions.
6 changes: 3 additions & 3 deletions vllm/worker/model_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -436,7 +436,7 @@ def reset_cached_inter_data(self):
def __init__(self,
runner: "GPUModelRunnerBase",
finished_requests_ids: Optional[List[str]] = None,
kv_caches: Optional[List[torch.Tensor]] = []):
kv_caches: Optional[List[torch.Tensor]] = None):
super().__init__()
# Compute functions for each sequence in a sequence group.
# WARNING: The order of the functions matters!
Expand Down Expand Up @@ -1246,7 +1246,7 @@ def _prepare_model_input_tensors(
self,
seq_group_metadata_list: List[SequenceGroupMetadata],
finished_requests_ids: Optional[List[str]] = None,
kv_caches: Optional[List[torch.Tensor]] = [],
kv_caches: Optional[List[torch.Tensor]] = None,
) -> TModelInputForGPU:
"""Helper method to prepare the model input based on a given sequence
group. Prepares metadata needed for the base model forward pass but not
Expand Down Expand Up @@ -1621,7 +1621,7 @@ def prepare_model_input(
seq_group_metadata_list: List[SequenceGroupMetadata],
virtual_engine: int = 0,
finished_requests_ids: Optional[List[str]] = None,
kv_caches: Optional[List[torch.Tensor]] = [],
kv_caches: Optional[List[torch.Tensor]] = None,
) -> ModelInputForGPUWithSamplingMetadata:
"""Prepare the model input based on a given sequence group, including
metadata for the sampling step.
Expand Down
4 changes: 2 additions & 2 deletions vllm/worker/worker_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ def _get_worker_input_from_broadcast(

def _get_driver_input_and_broadcast(
self, execute_model_req: ExecuteModelRequest,
kv_caches: Optional[List[torch.Tensor]] = [],
kv_caches: Optional[List[torch.Tensor]] = None,
) -> Tuple[BroadcastableModelInput, WorkerInput, Dict[str, torch.Tensor]]:
""" Get the driver input and broadcast it to other workers. """
assert self.is_driver_worker
Expand Down Expand Up @@ -291,7 +291,7 @@ def _get_driver_input_and_broadcast(
def prepare_input(
self,
execute_model_req: Optional[ExecuteModelRequest] = None,
kv_caches: Optional[List[torch.Tensor]] = [],
kv_caches: Optional[List[torch.Tensor]] = None,
) -> Optional[Tuple[BroadcastableModelInput, WorkerInput, Dict[
str, torch.Tensor]]]:
"""
Expand Down

0 comments on commit e56a741

Please sign in to comment.