{"id":22513,"library":"unclecode-litellm","title":"LiteLLM (Unclecode fork)","description":"A pre-compromise fork of the original LiteLLM, providing a unified interface to call 100+ LLM providers (OpenAI, Anthropic, Cohere, Azure, Bedrock, etc.) with standardized input/output formats. Current version 1.81.13, actively maintained with monthly releases.","status":"active","version":"1.81.13","language":"python","source_language":"en","source_url":"https://github.com/unclecode/litellm","tags":["llm","api","openai","anthropic","cohere","azure","fork"],"install":[{"cmd":"pip install unclecode-litellm","lang":"bash","label":"Install fork"}],"dependencies":[],"imports":[{"note":"The fork preserves the same top-level package name 'litellm' as the original.","wrong":"from unclecode import litellm","symbol":"","correct":"import litellm"},{"note":"Function is singular 'completion', not 'completions'.","wrong":"from litellm import completions","symbol":"completion","correct":"from litellm import completion"}],"quickstart":{"code":"import litellm\n\nresponse = litellm.completion(\n    model=\"gpt-4\",\n    messages=[{\"role\": \"user\", \"content\": \"Hello world\"}],\n    api_key=os.environ.get(\"OPENAI_API_KEY\", \"\"),\n)\nprint(response.choices[0].message.content)","lang":"python","description":"Call any provider by changing the model string (e.g., 'claude-2', 'command-nightly')."},"warnings":[{"fix":"Set os.environ['LITELLM_LOG'] = 'WARN' before importing litellm.","message":"Environment variable LITELLM_LOG controls logging level. Not setting it may result in excessive debug output.","severity":"gotcha","affected_versions":">=1.0.0"},{"fix":"Pass request_timeout=600 to completion() or set litellm.request_timeout = 600.","message":"This fork changed the default timeout from 600s to 60s. Some long-running completions may fail with timeout.","severity":"breaking","affected_versions":">=1.0.0"},{"fix":"Use litellm._turn_on_debug() or set environment variable LITELLM_LOG=DEBUG.","message":"The 'litellm.set_verbose' method is deprecated. Use 'litellm._turn_on_debug()' or set LITELLM_LOG=DEBUG.","severity":"deprecated","affected_versions":">=1.40.0"},{"fix":"Always set max_tokens=1024 (or desired value) when using Anthropic models.","message":"Anthropic models default to a max_tokens of 256. Many users expect 4096 or higher.","severity":"gotcha","affected_versions":">=1.0.0"}],"env_vars":null,"last_verified":"2026-04-27T00:00:00.000Z","next_check":"2026-07-26T00:00:00.000Z","problems":[{"fix":"Run 'pip install unclecode-litellm' and import as 'import litellm' (package name is 'litellm').","cause":"Installed the fork under the name 'unclecode-litellm' but importing as 'unclecode_litellm' or not installing at all.","error":"ModuleNotFoundError: No module named 'litellm'"},{"fix":"Verify API key is set and has credits. Use litellm.utils.RateLimitError for catching.","cause":"API key missing or insufficient quota. The fork does not wrap rate limit errors differently.","error":"openai.RateLimitError: You exceeded your current quota"},{"fix":"Import directly with 'from litellm import completion' or call litellm.completion (standard path works).","cause":"Unclecode fork removed some aliases. Use litellm.litellm_core.completion?","error":"AttributeError: module 'litellm' has no attribute 'completion'"}],"ecosystem":"pypi","meta_description":null,"install_score":null,"install_tag":null,"quickstart_score":null,"quickstart_tag":null}