How to get response_metadata from a result with StructuredOutputParser? #25604
-
Checked other resources
Commit to Help
Example Codellm = ChatOpenAI(
model="gpt-4o",
temperature=0,
max_tokens=None,
timeout=None,
max_retries=2,
api_key=api_info['api_key'],
organization=api_info['organization']
# base_url="...",
# other params...
)
class InMemoryHistory(BaseChatMessageHistory):
def __init__(self):
self.messages = []
def add_messages(self, messages: List[BaseMessage]) -> None:
self.messages.extend(messages)
def clear(self) -> None:
self.messages = []
def __getitem__(self, index):
return self.messages[index]
def __setitem__(self, index, value):
self.messages[index] = value
def __delitem__(self, index):
del self.messages[index]
def __len__(self):
return len(self.messages)
# Store for chat histories
store = {}
def get_session_history(user_id: str, conversation_id: str) -> BaseChatMessageHistory:
if (user_id, conversation_id) not in store:
store[(user_id, conversation_id)] = InMemoryHistory()
return store[(user_id, conversation_id)]
# JSON 출력을 위한 응답 스키마 정의
response_schemas = [
ResponseSchema(name="name", description="A good name for the company"),
ResponseSchema(name="industry", description="The industry the company is in")
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
# 첫 번째 체인: 회사 이름과 산업 생성
prompt_template_1 = ChatPromptTemplate(
messages=[
HumanMessagePromptTemplate.from_template(
"What is a good name for a company that makes {product}? "
"Also, specify the industry.\n{format_instructions}"
)
],
input_variables=["product"],
partial_variables={"format_instructions": output_parser.get_format_instructions()}
)
chain_1 = (
prompt_template_1
| llm
| output_parser
)
# 체인 실행 및 결과 출력
product = "eco-friendly water bottles"
result_1 = chain_1.invoke({"product": product})
print("Chain 1 output:", result_1) DescriptionIf I don't set output parser, the result's format is {'content', 'response_metadata'}. I need 'response_metadata' information to check tokens. System Infolangchain==0.2.9 linux(ubuntu) |
Beta Was this translation helpful? Give feedback.
Replies: 1 comment 7 replies
-
To get from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-4-turbo")
msg = llm.invoke([("human", "What's the oldest known example of cuneiform")])
response_metadata = msg.response_metadata This code snippet demonstrates how to retrieve the {
"token_usage": {
"completion_tokens": 5,
"prompt_tokens": 28,
"total_tokens": 33,
},
"model_name": "gpt-4o",
"system_fingerprint": "fp_319be4768e",
"finish_reason": "stop",
"logprobs": None,
} In your specific case, you can modify your code to access the # Chain execution and result output
product = "eco-friendly water bottles"
result_1 = chain_1.invoke({"product": product})
# Accessing response_metadata
response_metadata = result_1.response_metadata
print("Chain 1 output:", result_1)
print("Response Metadata:", response_metadata) This will allow you to retrieve and print the |
Beta Was this translation helpful? Give feedback.
To access
response_metadata
when usingStructuredOutputParser
withresponse_schemas
, you can directly access the dictionary keys since the result is a dictionary. Here is an example of how you can do it: