Skip to content

Commit

Permalink
lint
Browse files Browse the repository at this point in the history
  • Loading branch information
sfc-gh-aqiao committed Dec 20, 2024
1 parent 1c16ad2 commit 7282280
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 10 deletions.
7 changes: 3 additions & 4 deletions vllm/model_executor/models/whisper.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import math
from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple,
TypedDict, Union)
from typing import (Iterable, List, Mapping, Optional, Set, Tuple, TypedDict,
Union)

import numpy as np
import torch
Expand Down Expand Up @@ -589,7 +589,6 @@ def dummy_encoder_data_for_whisper(ctx: InputContext, seq_len: int,


def input_processor_for_whisper(ctx: InputContext, inputs):

multi_modal_data = inputs["encoder"]["multi_modal_data"]
if isinstance(multi_modal_data["audio"], list):
assert len(multi_modal_data["audio"]) == 1
Expand Down Expand Up @@ -710,7 +709,7 @@ def _parse_and_validate_audio_input(
if input_features is not None:
if not isinstance(input_features, (torch.Tensor, list)):
raise ValueError("Incorrect type of audio features. "
f"Got type: {type(input_features)}")
f"Got type: {type(input_features)}")
input_features = [feat.to(self.dtype) for feat in input_features]

return WhisperAudioInputs(input_features=input_features)
Expand Down
6 changes: 4 additions & 2 deletions vllm/multimodal/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,8 @@ def _cached_encode(
*,
add_special_tokens: bool = False,
) -> list[int]:
return encode_tokens(tokenizer, text,
return encode_tokens(tokenizer,
text,
add_special_tokens=add_special_tokens)


Expand Down Expand Up @@ -746,7 +747,8 @@ def _apply_prompt_replacements(
mm_item_counts,
)

token_ids = encode_tokens(tokenizer, text,
token_ids = encode_tokens(tokenizer,
text,
add_special_tokens=False)
matched_repls = [match.prompt_repl for match in text_matches]

Expand Down
9 changes: 5 additions & 4 deletions vllm/transformers_utils/tokenizer_group/tokenizer_group.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@

from vllm.config import TokenizerPoolConfig
from vllm.lora.request import LoRARequest
from vllm.transformers_utils.tokenizer import (AnyTokenizer,
encode_tokens,
from vllm.transformers_utils.tokenizer import (AnyTokenizer, encode_tokens,
get_lora_tokenizer,
get_lora_tokenizer_async,
get_tokenizer)
Expand Down Expand Up @@ -59,7 +58,8 @@ def encode(self,
lora_request: Optional[LoRARequest] = None,
add_special_tokens: Optional[bool] = None) -> List[int]:
tokenizer = self.get_lora_tokenizer(lora_request)
ret = encode_tokens(tokenizer, prompt,
ret = encode_tokens(tokenizer,
prompt,
add_special_tokens=add_special_tokens)
self._raise_if_input_too_long(ret, lora_request)
return ret
Expand All @@ -71,7 +71,8 @@ async def encode_async(
lora_request: Optional[LoRARequest] = None,
add_special_tokens: Optional[bool] = None) -> List[int]:
tokenizer = await self.get_lora_tokenizer_async(lora_request)
ret = encode_tokens(tokenizer, prompt,
ret = encode_tokens(tokenizer,
prompt,
add_special_tokens=add_special_tokens)
self._raise_if_input_too_long(ret, lora_request)
return ret
Expand Down

0 comments on commit 7282280

Please sign in to comment.