Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

docs: add span events documentation for chat completion traces #114

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
104 changes: 102 additions & 2 deletions api-reference/traces/POST-trace.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,45 @@ curl -X POST 'https://app.langtrace.ai/api/trace' \
"llm.response.choices": {
"stringValue": "[{\"role\":\"assistant\",\"content\":\"I'm doing well, thank you for asking!\"}]"
}
}
},
"events": [
{
"name": "token_stream",
"timeUnixNano": "1701555556000000000",
"attributes": {
"llm.token.text": {
"stringValue": "I'm"
},
"llm.token.index": {
"intValue": 0
}
}
},
{
"name": "token_stream",
"timeUnixNano": "1701555556200000000",
"attributes": {
"llm.token.text": {
"stringValue": " doing"
},
"llm.token.index": {
"intValue": 1
}
}
},
{
"name": "function_call",
"timeUnixNano": "1701555556500000000",
"attributes": {
"llm.function.name": {
"stringValue": "get_current_weather"
},
"llm.function.arguments": {
"stringValue": "{\"location\":\"San Francisco\",\"unit\":\"celsius\"}"
}
}
}
]
}]
}]
}]
Expand Down Expand Up @@ -122,7 +160,33 @@ payload = {
"llm.request.type": {"stringValue": "chat_completion"},
"llm.request.messages": {"stringValue": json.dumps([{"role": "user", "content": "Hello, how are you?"}])},
"llm.response.choices": {"stringValue": json.dumps([{"role": "assistant", "content": "I'm doing well, thank you for asking!"}])}
}
},
"events": [
{
"name": "token_stream",
"timeUnixNano": "1701555556000000000",
"attributes": {
"llm.token.text": {"stringValue": "I'm"},
"llm.token.index": {"intValue": 0}
}
},
{
"name": "token_stream",
"timeUnixNano": "1701555556200000000",
"attributes": {
"llm.token.text": {"stringValue": " doing"},
"llm.token.index": {"intValue": 1}
}
},
{
"name": "function_call",
"timeUnixNano": "1701555556500000000",
"attributes": {
"llm.function.name": {"stringValue": "get_current_weather"},
"llm.function.arguments": {"stringValue": "{\"location\":\"San Francisco\",\"unit\":\"celsius\"}"}
}
}
]
}]
}]
}]
Expand All @@ -133,6 +197,42 @@ print(response.json())
```
</RequestExample>

### Span Events

The trace endpoint supports capturing detailed events within spans for chat completions:

<ParamField body="resourceSpans[].scopeSpans[].spans[].events" type="array">
Array of events that occurred during the span
</ParamField>

<ParamField body="resourceSpans[].scopeSpans[].spans[].events[].name" type="string" required>
Name of the event (e.g., "token_stream", "function_call")
</ParamField>

<ParamField body="resourceSpans[].scopeSpans[].spans[].events[].timeUnixNano" type="string" required>
Timestamp of the event in Unix nanoseconds
</ParamField>

<ParamField body="resourceSpans[].scopeSpans[].spans[].events[].attributes" type="object" required>
Attributes specific to the event type
</ParamField>

#### Event Types

**Token Stream Events**
- name: "token_stream"
- attributes:
- llm.token.text: Text content of the token
- llm.token.index: Position in the response sequence
- llm.token.logprob: Token log probability (optional)

**Function Call Events**
- name: "function_call"
- attributes:
- llm.function.name: Name of the called function
- llm.function.arguments: JSON string of function arguments
- llm.function.response: Function response (optional)

### Response

<ResponseField name="status" type="number">
Expand Down
127 changes: 110 additions & 17 deletions tracing/send_traces.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,9 @@ Once the SDK is initialized, Langtrace will automatically capture traces from su

### Example: Sending Traces from an OpenAI Application

Here's an example of how to send traces from an LLM application using the OpenAI API. This example demonstrates how to use the Langtrace SDK to send traces from an OpenAI completion request.
Here's an example of how to send traces from an LLM application using the OpenAI API. This example demonstrates how to use the Langtrace SDK to send traces from an OpenAI completion request, including span events that capture detailed response information.

<Info>
<Info>
Make Sure to pip or npm install the required packages before running the code.

```bash
Expand All @@ -81,48 +81,141 @@ npm install @langtrase/typescript-sdk openai

```python Python
from langtrace_python_sdk import langtrace, with_langtrace_root_span

from openai import OpenAI

langtrace.init(
api_key="<YOUR API KEY>")

@with_langtrace_root_span()
def example():
client = OpenAI()
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "How many states of matter are there?"
}
],
)
print(response.choices[0].message.content)
client = OpenAI()
# Enable stream to capture token-by-token events
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "You are a helpful assistant that can also check the weather."
},
{
"role": "user",
"content": "What's the weather like in San Francisco?"
}
],
stream=True, # Enable streaming for token events
functions=[{ # Define functions to demonstrate function calling
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}]
)

# Process streamed response
for chunk in response:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")
if chunk.choices[0].delta.function_call:
print(f"\nFunction Call: {chunk.choices[0].delta.function_call}")

example()
```

```javascript Javascript
import * as Langtrace from "@langtrase/typescript-sdk";
import OpenAI from "openai";

Langtrace.init({
api_key: "<YOUR API KEY>",
});

const openai = new OpenAI();

async function example() {
const completion = await openai.chat.completions.create({
const stream = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
messages: [
{
role: "system",
content: "How many states of matter are there?",
content: "You are a helpful assistant that can also check the weather.",
},
{
role: "user",
content: "What's the weather like in San Francisco?",
},
],
stream: true, // Enable streaming for token events
functions: [{ // Define functions to demonstrate function calling
name: "get_current_weather",
description: "Get the current weather in a given location",
parameters: {
type: "object",
properties: {
location: {
type: "string",
description: "The city and state, e.g. San Francisco, CA",
},
unit: {
type: "string",
enum: ["celsius", "fahrenheit"],
},
},
required: ["location"],
},
}],
});
console.log(completion.choices[0]);

// Process streamed response
for await (const chunk of stream) {
if (chunk.choices[0].delta.content) {
process.stdout.write(chunk.choices[0].delta.content);
}
if (chunk.choices[0].delta.function_call) {
console.log("\nFunction Call:", chunk.choices[0].delta.function_call);
}
}
}

example().then(() => {
console.log("\nDone");
});
type: "object",
properties: {
location: {
type: "string",
description: "The city and state, e.g. San Francisco, CA"
},
unit: {
type: "string",
enum: ["celsius", "fahrenheit"]
}
},
required: ["location"]
}
}]
});

// Process streamed response
for (const chunk of completion) {
if (chunk.choices[0].delta.content) {
process.stdout.write(chunk.choices[0].delta.content);
}
if (chunk.choices[0].delta.function_call) {
console.log(`\nFunction Call: ${chunk.choices[0].delta.function_call}`);
}
}
}

example().then(() => {
Expand Down