OpenInference OpenAI Agents Instrumentation
This library provides OpenTelemetry instrumentation for OpenAI Agents (Assistants API). It automatically captures trace data for interactions with OpenAI's `client.beta.assistants`, `client.beta.threads`, `client.beta.threads.messages`, and `client.beta.threads.runs`, converting them into OpenInference-compliant spans. The current version is 1.4.1, and it's part of the broader OpenInference monorepo, which releases frequently, often with per-library updates.
Warnings
- breaking This instrumentation is designed for `openai` library versions 1.0.0 and above. Using `openai` versions 0.x.x will lead to `AttributeError` or incorrect instrumentation as the API structure changed significantly.
- gotcha The OpenTelemetry `TracerProvider` must be configured and set *before* calling `OpenAIInstrumentor().instrument()`. If the order is reversed, the instrumentation might not pick up the `TracerProvider`, resulting in no traces being emitted.
- gotcha By default, the instrumentation captures full inputs and outputs, which may include sensitive or personally identifiable information (PII). Be mindful of data privacy requirements.
Install
-
pip install openinference-instrumentation-openai-agents openai opentelemetry-sdk
Imports
- OpenAIInstrumentor
from openinference.instrumentation.openai_agents import OpenAIInstrumentor
Quickstart
import os
import time
from opentelemetry import trace
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter
from openinference.instrumentation.openai_agents import OpenAIInstrumentor
import openai
# 1. Setup OpenTelemetry (before instrumentation)
resource = Resource.create({"service.name": "openai-agents-quickstart"})
tracer_provider = TracerProvider(resource=resource)
tracer_provider.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter()))
trace.set_tracer_provider(tracer_provider)
# 2. Instrument OpenAI Agents
OpenAIInstrumentor().instrument()
# 3. Initialize OpenAI Client
# Ensure OPENAI_API_KEY is set in your environment
client = openai.OpenAI(api_key=os.environ.get("OPENAI_API_KEY", ""))
try:
# 4. Interact with OpenAI Assistants API
assistant = client.beta.assistants.create(
name="Math Tutor",
instructions="You are a personal math tutor. Answer questions briefly.",
model="gpt-4o",
)
print(f"Created Assistant: {assistant.id}")
thread = client.beta.threads.create()
print(f"Created Thread: {thread.id}")
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content="What is 1 + 1?",
)
print(f"Added Message: {message.id}")
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id,
instructions="Please address the user as Professor."
)
print(f"Started Run: {run.id}")
# 5. Wait for the run to complete (simulated polling)
while run.status in ['queued', 'in_progress', 'cancelling']:
time.sleep(1)
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id
)
print(f"Run status: {run.status}")
# 6. Retrieve messages after the run
messages = client.beta.threads.messages.list(
thread_id=thread.id
)
print("\nConversation:")
for m in reversed(messages.data):
print(f"{m.role}: {m.content[0].text.value}")
except openai.APIAssistantError as e:
print(f"OpenAI API Error: {e}")
print("Please ensure you have an OpenAI API key set and have access to the Assistants API.")
except Exception as e:
print(f"An unexpected error occurred: {e}")
finally:
# Clean up resources
try:
if 'thread' in locals() and thread.id:
client.beta.threads.delete(thread.id)
print(f"Deleted Thread: {thread.id}")
if 'assistant' in locals() and assistant.id:
client.beta.assistants.delete(assistant.id)
print(f"Deleted Assistant: {assistant.id}")
except Exception as e:
print(f"Error during cleanup: {e}")