Skip to content

Commit

Permalink
fix: ark token usage is none
Browse files Browse the repository at this point in the history
  • Loading branch information
sinomoe committed Sep 13, 2024
1 parent d109881 commit 7d8cd64
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 14 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -208,11 +208,11 @@ def stream_chat(
presence_penalty=presence_penalty,
top_p=top_p,
temperature=temperature,
stream_options={
"include_usage": True
}
)
for chunk in chunks:
if not chunk.choices:
continue
yield chunk
yield from chunks

def embeddings(self, texts: list[str]) -> CreateEmbeddingResponse:
return self.ark.embeddings.create(model=self.endpoint_id, input=texts)
17 changes: 7 additions & 10 deletions api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,25 +241,22 @@ def _generate_v3(

def _handle_stream_chat_response(chunks: Generator[ChatCompletionChunk]) -> Generator:
for chunk in chunks:
if not chunk.choices:
continue
choice = chunk.choices[0]

yield LLMResultChunk(
model=model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(
index=choice.index,
message=AssistantPromptMessage(content=choice.delta.content, tool_calls=[]),
index=0,
message=AssistantPromptMessage(
content=chunk.choices[0].delta.content if chunk.choices else "",
tool_calls=[]
),
usage=self._calc_response_usage(
model=model,
credentials=credentials,
prompt_tokens=chunk.usage.prompt_tokens,
completion_tokens=chunk.usage.completion_tokens,
)
if chunk.usage
else None,
finish_reason=choice.finish_reason,
) if chunk.usage else None,
finish_reason=chunk.choices[0].finish_reason if chunk.choices else None,
),
)

Expand Down

0 comments on commit 7d8cd64

Please sign in to comment.