openai-retry-on-429

code_execution · unverified · null · json · download .py

Model to use

import sys
import os
import subprocess
import time
import urllib.request
import json

# ─────────────────────────────────────────
# PRE_EXECUTION
# ─────────────────────────────────────────

for attempt in range(2):
    try:
        req = urllib.request.Request(
            "https://checklist.day/api/registry/openai",
            headers={"User-Agent": "checklist-agent/1.0"}
        )
        with urllib.request.urlopen(req, timeout=10) as resp:
            registry = json.loads(resp.read())
            break
    except Exception as e:
        if attempt == 1:
            print(f"ABORT: registry unreachable — {e}")
            sys.exit(1)
        time.sleep(2)

warnings = registry.get("warnings", [])
if warnings:
    print("[openai] WARNINGS:")
    for w in warnings if isinstance(warnings, list) else [warnings]:
        print(f"  ⚠ {w}")

# ─────────────────────────────────────────
# EXECUTION
# ─────────────────────────────────────────

subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "openai>=1.0.0", "tenacity>=8.2.0"])

from openai import OpenAI, RateLimitError, APIStatusError
from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type

OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
OPENAI_MODEL   = os.environ.get("OPENAI_MODEL", "gpt-4o-mini")

if not OPENAI_API_KEY:
    print("ABORT: OPENAI_API_KEY env var not set")
    sys.exit(1)

client  = OpenAI(api_key=OPENAI_API_KEY)
attempts = 0

# FOOTGUN: catch RateLimitError specifically, not all exceptions
# FOOTGUN: respect Retry-After header if present — tenacity's wait_exponential is a fallback
# FOOTGUN: max_retries should be finite — unbounded retries can hang agents
@retry(
    retry=retry_if_exception_type(RateLimitError),
    wait=wait_exponential(multiplier=1, min=2, max=60),
    stop=stop_after_attempt(5),
    reraise=True,
)
def call_with_retry():
    global attempts
    attempts += 1
    print(f"  attempt {attempts}...")
    return client.chat.completions.create(
        model=OPENAI_MODEL,
        messages=[{"role": "user", "content": "Say PASS."}],
        max_tokens=8,
        temperature=0,
    )

succeeded = False
retry_logic_ok = False

try:
    response = call_with_retry()
    content = response.choices[0].message.content
    succeeded = True
    retry_logic_ok = attempts >= 1
    print(f"  succeeded on attempt {attempts}: {content!r}")
except RateLimitError as e:
    print(f"  rate limited after {attempts} attempts: {e}")
except Exception as e:
    print(f"  failed after {attempts} attempts: {e}")

# ─────────────────────────────────────────
# POST_EXECUTION
# ─────────────────────────────────────────

assert succeeded, f"FAIL: request did not succeed after {attempts} attempts"
assert retry_logic_ok, "FAIL: retry logic did not execute"

result = {
    "attempts":       attempts,
    "succeeded":      succeeded,
    "retry_logic_ok": retry_logic_ok,
}
print(json.dumps(result, indent=2))
print("PASS")