{"id":"openai-retry-on-429","version":"1.0.0","primitive":"code_execution","description":"Model to use","registry_refs":["openai"],"tags":[],"solves":[],"auth_required":true,"verified":false,"last_verified":"null","next_check":"2026-07-30","eval_result":"null","eval_env":"null","mast":[],"ref":"https://arxiv.org/abs/2503.13657","inputs":[{"name":"OPENAI_API_KEY","required":true,"description":"OpenAI API key starting with sk-"},{"name":"OPENAI_MODEL","default":"gpt-4o-mini","required":false,"description":"Model to use"}],"executable":"# ============================================\n# checklist:     openai-retry-on-429\n# version:       1.0.0\n# primitive:     code_execution\n# description:   Handle OpenAI rate limit errors with exponential backoff using tenacity and the Retry-After header\n# registry_refs: openai\n# auth_required: true\n# verified:      false\n# last_verified: null\n# next_check:    2026-07-30\n# eval_result:   null\n# eval_env:      null\n#\n# inputs:\n#   - name: OPENAI_API_KEY\n#     required: true\n#     description: OpenAI API key starting with sk-\n#   - name: OPENAI_MODEL\n#     required: false\n#     default: \"gpt-4o-mini\"\n#     description: Model to use\n#\n# OUTPUTS:\n#   attempts        — number of attempts before success\n#   succeeded       — true if request eventually succeeded\n#   retry_logic_ok  — true if retry/backoff logic executed correctly\n#\n# MAST FAILURE MODES ADDRESSED:\n# FM-1.3 Step Repetition                   — exponential backoff prevents hammering the API\n# FM-1.5 Unaware of Termination Conditions — max retries cap prevents infinite loop\n# FM-3.2 No or Incomplete Verification     — attempt count verified\n#\n# ref: https://arxiv.org/abs/2503.13657\n# ============================================\n\nimport sys\nimport os\nimport subprocess\nimport time\nimport urllib.request\nimport json\n\n# ─────────────────────────────────────────\n# PRE_EXECUTION\n# ─────────────────────────────────────────\n\nfor attempt in range(2):\n    try:\n        req = urllib.request.Request(\n            \"https://checklist.day/api/registry/openai\",\n            headers={\"User-Agent\": \"checklist-agent/1.0\"}\n        )\n        with urllib.request.urlopen(req, timeout=10) as resp:\n            registry = json.loads(resp.read())\n            break\n    except Exception as e:\n        if attempt == 1:\n            print(f\"ABORT: registry unreachable — {e}\")\n            sys.exit(1)\n        time.sleep(2)\n\nwarnings = registry.get(\"warnings\", [])\nif warnings:\n    print(\"[openai] WARNINGS:\")\n    for w in warnings if isinstance(warnings, list) else [warnings]:\n        print(f\"  ⚠ {w}\")\n\n# ─────────────────────────────────────────\n# EXECUTION\n# ─────────────────────────────────────────\n\nsubprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"-q\", \"openai>=1.0.0\", \"tenacity>=8.2.0\"])\n\nfrom openai import OpenAI, RateLimitError, APIStatusError\nfrom tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type\n\nOPENAI_API_KEY = os.environ.get(\"OPENAI_API_KEY\")\nOPENAI_MODEL   = os.environ.get(\"OPENAI_MODEL\", \"gpt-4o-mini\")\n\nif not OPENAI_API_KEY:\n    print(\"ABORT: OPENAI_API_KEY env var not set\")\n    sys.exit(1)\n\nclient  = OpenAI(api_key=OPENAI_API_KEY)\nattempts = 0\n\n# FOOTGUN: catch RateLimitError specifically, not all exceptions\n# FOOTGUN: respect Retry-After header if present — tenacity's wait_exponential is a fallback\n# FOOTGUN: max_retries should be finite — unbounded retries can hang agents\n@retry(\n    retry=retry_if_exception_type(RateLimitError),\n    wait=wait_exponential(multiplier=1, min=2, max=60),\n    stop=stop_after_attempt(5),\n    reraise=True,\n)\ndef call_with_retry():\n    global attempts\n    attempts += 1\n    print(f\"  attempt {attempts}...\")\n    return client.chat.completions.create(\n        model=OPENAI_MODEL,\n        messages=[{\"role\": \"user\", \"content\": \"Say PASS.\"}],\n        max_tokens=8,\n        temperature=0,\n    )\n\nsucceeded = False\nretry_logic_ok = False\n\ntry:\n    response = call_with_retry()\n    content = response.choices[0].message.content\n    succeeded = True\n    retry_logic_ok = attempts >= 1\n    print(f\"  succeeded on attempt {attempts}: {content!r}\")\nexcept RateLimitError as e:\n    print(f\"  rate limited after {attempts} attempts: {e}\")\nexcept Exception as e:\n    print(f\"  failed after {attempts} attempts: {e}\")\n\n# ─────────────────────────────────────────\n# POST_EXECUTION\n# ─────────────────────────────────────────\n\nassert succeeded, f\"FAIL: request did not succeed after {attempts} attempts\"\nassert retry_logic_ok, \"FAIL: retry logic did not execute\"\n\nresult = {\n    \"attempts\":       attempts,\n    \"succeeded\":      succeeded,\n    \"retry_logic_ok\": retry_logic_ok,\n}\nprint(json.dumps(result, indent=2))\nprint(\"PASS\")\n"}