OpenInference OpenAI Agents Instrumentation

1.4.1 · active · verified Tue Apr 14

This library provides OpenTelemetry instrumentation for OpenAI Agents (Assistants API). It automatically captures trace data for interactions with OpenAI's `client.beta.assistants`, `client.beta.threads`, `client.beta.threads.messages`, and `client.beta.threads.runs`, converting them into OpenInference-compliant spans. The current version is 1.4.1, and it's part of the broader OpenInference monorepo, which releases frequently, often with per-library updates.

Warnings

Install

Imports

Quickstart

This quickstart demonstrates how to instrument an OpenAI Assistants API workflow. It sets up an OpenTelemetry TracerProvider, instruments the OpenAI library, creates an assistant, thread, message, and runs the assistant, capturing traces for each step. Remember to set your `OPENAI_API_KEY` environment variable.

import os
import time
from opentelemetry import trace
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter
from openinference.instrumentation.openai_agents import OpenAIInstrumentor
import openai

# 1. Setup OpenTelemetry (before instrumentation)
resource = Resource.create({"service.name": "openai-agents-quickstart"})
tracer_provider = TracerProvider(resource=resource)
tracer_provider.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter()))
trace.set_tracer_provider(tracer_provider)

# 2. Instrument OpenAI Agents
OpenAIInstrumentor().instrument()

# 3. Initialize OpenAI Client
# Ensure OPENAI_API_KEY is set in your environment
client = openai.OpenAI(api_key=os.environ.get("OPENAI_API_KEY", ""))

try:
    # 4. Interact with OpenAI Assistants API
    assistant = client.beta.assistants.create(
        name="Math Tutor",
        instructions="You are a personal math tutor. Answer questions briefly.",
        model="gpt-4o",
    )
    print(f"Created Assistant: {assistant.id}")

    thread = client.beta.threads.create()
    print(f"Created Thread: {thread.id}")

    message = client.beta.threads.messages.create(
        thread_id=thread.id,
        role="user",
        content="What is 1 + 1?",
    )
    print(f"Added Message: {message.id}")

    run = client.beta.threads.runs.create(
        thread_id=thread.id,
        assistant_id=assistant.id,
        instructions="Please address the user as Professor."
    )
    print(f"Started Run: {run.id}")

    # 5. Wait for the run to complete (simulated polling)
    while run.status in ['queued', 'in_progress', 'cancelling']:
        time.sleep(1)
        run = client.beta.threads.runs.retrieve(
            thread_id=thread.id,
            run_id=run.id
        )
        print(f"Run status: {run.status}")

    # 6. Retrieve messages after the run
    messages = client.beta.threads.messages.list(
        thread_id=thread.id
    )
    print("\nConversation:")
    for m in reversed(messages.data):
        print(f"{m.role}: {m.content[0].text.value}")

except openai.APIAssistantError as e:
    print(f"OpenAI API Error: {e}")
    print("Please ensure you have an OpenAI API key set and have access to the Assistants API.")
except Exception as e:
    print(f"An unexpected error occurred: {e}")
finally:
    # Clean up resources
    try:
        if 'thread' in locals() and thread.id:
            client.beta.threads.delete(thread.id)
            print(f"Deleted Thread: {thread.id}")
        if 'assistant' in locals() and assistant.id:
            client.beta.assistants.delete(assistant.id)
            print(f"Deleted Assistant: {assistant.id}")
    except Exception as e:
        print(f"Error during cleanup: {e}")

view raw JSON →