diff --git a/tests/v1/core/test_kv_cache_utils.py b/tests/v1/core/test_kv_cache_utils.py index 3b0c481813af5..faa3a91de151f 100644 --- a/tests/v1/core/test_kv_cache_utils.py +++ b/tests/v1/core/test_kv_cache_utils.py @@ -2,14 +2,11 @@ from vllm.inputs import token_inputs from vllm.sampling_params import SamplingParams -from vllm.v1.core.kv_cache_utils import ( - BlockHashType, - FreeKVCacheBlockQueue, - KVCacheBlock, - generate_block_hash_extra_keys, - hash_block_tokens, - hash_request_tokens, -) +from vllm.v1.core.kv_cache_utils import (BlockHashType, FreeKVCacheBlockQueue, + KVCacheBlock, + generate_block_hash_extra_keys, + hash_block_tokens, + hash_request_tokens) from vllm.v1.request import Request @@ -19,10 +16,12 @@ def make_request(request_id, mm_hashes=None): return Request( request_id=request_id, - inputs=token_inputs(prompt_token_ids=prompt_token_ids, - multi_modal_placeholders={"image": mm_positions} - if mm_positions else None, - multi_modal_hashes=mm_hashes), + inputs=token_inputs( + prompt_token_ids=prompt_token_ids, + multi_modal_placeholders={"image": mm_positions} + if mm_positions else None, + multi_modal_hashes=mm_hashes, + ), sampling_params=SamplingParams(max_tokens=17), eos_token_id=100, arrival_time=0,