The ZeroEval Python SDK automatically traces intruments the supported integrations, meaning the only thing to do is to initialize the SDK before importing the frameworks you want to trace.

OpenAI

import zeroeval as ze
ze.init()

import openai
client = openai.OpenAI()

# This call is automatically traced
response = client.chat.completions.create(
    model="gpt-4",
    messages=[{"role": "user", "content": "Hello!"}]
)

# Streaming is also automatically traced
stream = client.chat.completions.create(
    model="gpt-4",
    messages=[{"role": "user", "content": "Tell me a story"}],
    stream=True
)

for chunk in stream:
    if chunk.choices[0].delta.content:
        print(chunk.choices[0].delta.content, end="")

LangChain

import zeroeval as ze
ze.init()

from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate

# All components are automatically traced
model = ChatOpenAI()
prompt = ChatPromptTemplate.from_template("Tell me about {topic}")
chain = prompt | model

response = chain.invoke({"topic": "AI"})

LangGraph

import zeroeval as ze
ze.init()

from langgraph.graph import StateGraph, START, END
from langchain_core.messages import HumanMessage

# Define a multi-node graph
workflow = StateGraph(AgentState)
workflow.add_node("reasoning", reasoning_node)
workflow.add_node("agent", agent_node)
workflow.add_node("tools", tool_node)

workflow.add_conditional_edges(
    "agent",
    should_continue,
    {"tools": "tools", "end": END}
)

app = workflow.compile()

# Full graph execution is automatically traced
result = app.invoke({"messages": [HumanMessage(content="Help me plan a trip")]})

# Streaming is also supported
for chunk in app.stream({"messages": [HumanMessage(content="Hello")]}):
    print(chunk)

LiveKit

The SDK automatically creates traces for LiveKit agents, including events from the following plugins:
  • Cartesia (TTS)
  • Deepgram (STT)
  • OpenAI (LLM)
import zeroeval as ze
ze.init()

from livekit import agents
from livekit.agents import AgentSession, Agent
from livekit.plugins import openai

async def entrypoint(ctx: agents.JobContext):
    await ctx.connect()

    # All agent sessions are automatically traced
    session = AgentSession(
        llm=openai.realtime.RealtimeModel(voice="coral")
    )

    await session.start(
        room=ctx.room,
        agent=Agent(instructions="You are a helpful voice AI assistant.")
    )

    # Agent interactions are automatically captured
    await session.generate_reply(
        instructions="Greet the user and offer your assistance."
    )

if __name__ == "__main__":
    agents.cli.run_app(agents.WorkerOptions(entrypoint_fnc=entrypoint))
Need help? Contact us at [email protected] or join our Discord.