Skip to content

Commit

Permalink
Format
Browse files Browse the repository at this point in the history
Signed-off-by: DarkLight1337 <[email protected]>
  • Loading branch information
DarkLight1337 committed Dec 24, 2024
1 parent 80fde42 commit e5985e5
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 13 deletions.
19 changes: 11 additions & 8 deletions examples/openai_pooling_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@ def post_http_request(prompt: dict, api_url: str) -> requests.Response:
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--port", type=int, default=8000)
parser.add_argument("--model", type=str,
parser.add_argument("--model",
type=str,
default="jason9693/Qwen2.5-1.5B-apeach")

args = parser.parse_args()
Expand All @@ -35,13 +36,15 @@ def post_http_request(prompt: dict, api_url: str) -> requests.Response:

# Input like Chat API
prompt = {
"model": model_name,
"messages": [
{
"role": "user",
"content": [{"type": "text", "text": "vLLM is great!"}],
}
]
"model":
model_name,
"messages": [{
"role": "user",
"content": [{
"type": "text",
"text": "vLLM is great!"
}],
}]
}
pooling_response = post_http_request(prompt=prompt, api_url=api_url)
print("Pooling Response:")
Expand Down
9 changes: 4 additions & 5 deletions vllm/entrypoints/openai/serving_pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,10 @@
from vllm.engine.protocol import EngineClient
from vllm.entrypoints.chat_utils import ChatTemplateContentFormatOption
from vllm.entrypoints.logger import RequestLogger
from vllm.entrypoints.openai.protocol import (PoolingChatRequest,
PoolingRequest,
PoolingResponse,
PoolingResponseData,
ErrorResponse, UsageInfo)
from vllm.entrypoints.openai.protocol import (ErrorResponse,
PoolingChatRequest,
PoolingRequest, PoolingResponse,
PoolingResponseData, UsageInfo)
from vllm.entrypoints.openai.serving_engine import BaseModelPath, OpenAIServing
from vllm.logger import init_logger
from vllm.outputs import PoolingOutput, PoolingRequestOutput
Expand Down

0 comments on commit e5985e5

Please sign in to comment.