openai-chat-completion
Model to use for completion
import sys
import os
import subprocess
import time
import urllib.request
import json
# ─────────────────────────────────────────
# PRE_EXECUTION
# ─────────────────────────────────────────
for attempt in range(2):
try:
req = urllib.request.Request(
"https://checklist.day/api/registry/openai",
headers={"User-Agent": "checklist-agent/1.0"}
)
with urllib.request.urlopen(req, timeout=10) as resp:
registry = json.loads(resp.read())
break
except Exception as e:
if attempt == 1:
print(f"ABORT: registry unreachable — {e}")
sys.exit(1)
time.sleep(2)
warnings = registry.get("warnings", [])
if warnings:
print("[openai] WARNINGS:")
for w in warnings if isinstance(warnings, list) else [warnings]:
print(f" ⚠ {w}")
# ─────────────────────────────────────────
# EXECUTION
# ─────────────────────────────────────────
subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "openai>=1.0.0"])
# FOOTGUN: import is `from openai import OpenAI`, not `import openai` then `openai.ChatCompletion.create()`
# FOOTGUN: openai.ChatCompletion.create() was removed in v1.0 — use client.chat.completions.create()
from openai import OpenAI
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
OPENAI_MODEL = os.environ.get("OPENAI_MODEL", "gpt-4o-mini")
if not OPENAI_API_KEY:
print("ABORT: OPENAI_API_KEY env var not set")
sys.exit(1)
client = OpenAI(api_key=OPENAI_API_KEY)
response = client.chat.completions.create(
model=OPENAI_MODEL,
messages=[
{"role": "system", "content": "You are a helpful assistant. Be concise."},
{"role": "user", "content": "Say the word CHECKLIST and nothing else."},
],
max_tokens=16,
temperature=0,
)
# FOOTGUN: content is at response.choices[0].message.content, not response.content
# FOOTGUN: content can be None if finish_reason is "content_filter"
content = response.choices[0].message.content
finish_reason = response.choices[0].finish_reason
prompt_tokens = response.usage.prompt_tokens
completion_tokens = response.usage.completion_tokens
print(f" model: {response.model}")
print(f" content: {content!r}")
print(f" finish_reason: {finish_reason}")
print(f" tokens: {prompt_tokens} prompt + {completion_tokens} completion")
# ─────────────────────────────────────────
# POST_EXECUTION
# ─────────────────────────────────────────
assert content is not None, "FAIL: content is None (content_filter?)"
assert "CHECKLIST" in content.upper(), f"FAIL: expected 'CHECKLIST' in response, got: {content!r}"
assert finish_reason == "stop", f"FAIL: unexpected finish_reason '{finish_reason}'"
assert prompt_tokens > 0, "FAIL: prompt_tokens is 0"
assert completion_tokens > 0, "FAIL: completion_tokens is 0"
result = {
"model": response.model,
"content": content,
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"finish_reason": finish_reason,
}
print(json.dumps(result, indent=2))
print("PASS")