The ZeroEval Python SDK automatically traces intruments the supported integrations, meaning the only thing to do is to initialize the SDK before importing the frameworks you want to trace.
OpenAI
import zeroeval as ze
ze.init()
import openai
client = openai.OpenAI()
# This call is automatically traced
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Hello!"}]
)
# Streaming is also automatically traced
stream = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Tell me a story"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")
LangChain
import zeroeval as ze
ze.init()
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
# All components are automatically traced
model = ChatOpenAI()
prompt = ChatPromptTemplate.from_template("Tell me about {topic}")
chain = prompt | model
response = chain.invoke({"topic": "AI"})
LangGraph
import zeroeval as ze
ze.init()
from langgraph.graph import StateGraph, START, END
from langchain_core.messages import HumanMessage
# Define a multi-node graph
workflow = StateGraph(AgentState)
workflow.add_node("reasoning", reasoning_node)
workflow.add_node("agent", agent_node)
workflow.add_node("tools", tool_node)
workflow.add_conditional_edges(
"agent",
should_continue,
{"tools": "tools", "end": END}
)
app = workflow.compile()
# Full graph execution is automatically traced
result = app.invoke({"messages": [HumanMessage(content="Help me plan a trip")]})
# Streaming is also supported
for chunk in app.stream({"messages": [HumanMessage(content="Hello")]}):
print(chunk)
PydanticAI
PydanticAI agents are automatically traced, including multi-turn conversations. The SDK ensures that all LLM calls within an agent execution share the same trace, and consecutive conversation turns share the same trace ID when using shared message history.
import zeroeval as ze
ze.init()
from pydantic_ai import Agent
from pydantic import BaseModel
class Response(BaseModel):
message: str
sentiment: str
# Create an agent with structured output
agent = Agent(
model="openai:gpt-4o-mini",
output_type=Response,
system_prompt="You are a helpful assistant."
)
# Single execution - automatically traced
result = await agent.run("Hello!")
# Multi-turn conversation - all turns share the same trace
message_history = []
async with agent.iter("First message", message_history=message_history) as run:
async for node in run:
pass
message_history = run.result.all_messages()
# Second turn reuses the same trace_id
async with agent.iter("Follow-up message", message_history=message_history) as run:
async for node in run:
pass
message_history = run.result.all_messages()
When you pass the same message_history list across multiple agent runs, ZeroEval automatically groups all runs under a single trace. This provides a unified view of the entire conversation.
LiveKit
The SDK automatically creates traces for LiveKit agents, including events from the following plugins:
- Cartesia (TTS)
- Deepgram (STT)
- OpenAI (LLM)
import zeroeval as ze
ze.init()
from livekit import agents
from livekit.agents import AgentSession, Agent
from livekit.plugins import openai
async def entrypoint(ctx: agents.JobContext):
await ctx.connect()
# All agent sessions are automatically traced
session = AgentSession(
llm=openai.realtime.RealtimeModel(voice="coral")
)
await session.start(
room=ctx.room,
agent=Agent(instructions="You are a helpful voice AI assistant.")
)
# Agent interactions are automatically captured
await session.generate_reply(
instructions="Greet the user and offer your assistance."
)
if __name__ == "__main__":
agents.cli.run_app(agents.WorkerOptions(entrypoint_fnc=entrypoint))