{"id":23323,"library":"axolotl","title":"Axolotl","description":"A user-friendly LLM training framework supporting a wide range of models, including Llama, Mistral, and others. Axolotl streamlines fine-tuning with PEFT, FSDP, and other optimizations, currently at version 0.16.1. Release cadence is regular, with multiple releases per month.","status":"active","version":"0.16.1","language":"python","source_language":"en","source_url":"https://github.com/axolotl-ai-cloud/axolotl.git","tags":["llm","fine-tuning","training","transformers","peft","deepspeed","flash-attention"],"install":[{"cmd":"pip install axolotl","lang":"bash","label":"PyPI"},{"cmd":"pip install axolotl[flash-attn]","lang":"bash","label":"With Flash Attention"},{"cmd":"pip install axolotl[deepspeed]","lang":"bash","label":"With DeepSpeed"}],"dependencies":[{"reason":"Core dependency for model operations","package":"torch","optional":false},{"reason":"Hugging Face transformers library","package":"transformers","optional":false},{"reason":"Dataset loading and processing","package":"datasets","optional":false},{"reason":"Training acceleration","package":"accelerate","optional":false},{"reason":"Parameter-Efficient Fine-Tuning","package":"peft","optional":true},{"reason":"DeepSpeed integration for distributed training","package":"deepspeed","optional":true},{"reason":"Flash Attention for memory-efficient attention","package":"flash-attn","optional":true}],"imports":[{"note":"train() is no longer top-level; moved to utils.trainer","wrong":"from axolotl import train","symbol":"train","correct":"from axolotl.utils.trainer import train"},{"note":"Config utilities moved under utils","wrong":"from axolotl.config import load_config","symbol":"load_config","correct":"from axolotl.utils.config import load_config"}],"quickstart":{"code":"from axolotl.utils.config import load_config\nfrom axolotl.utils.trainer import train\n\nconfig = load_config('config.yml')\n# config must include 'model_name_or_path' and other required fields\ntrain(cfg=config)","lang":"python","description":"Basic training using a YAML config file."},"warnings":[{"fix":"Use load_config to load a YAML or dict config, then call train(cfg=config).","message":"Version 0.4.0 changed the trainer API: train() now requires a config object, not kwargs.","severity":"breaking","affected_versions":">=0.4.0"},{"fix":"Install with pip install axolotl[flash-attn] and ensure GPU is available.","message":"Flash Attention must be installed separately and CUDA-compatible GPU required.","severity":"gotcha","affected_versions":"all"},{"fix":"Refer to the official config documentation for correct key names.","message":"YAML config keys are case-sensitive and must match exactly with axolotl's schema.","severity":"gotcha","affected_versions":"all"},{"fix":"Use `python -m axolotl.cli.train config.yml` or the Python API.","message":"The cli.py entry point is deprecated; use the Python API or `axolotl` command-line tool.","severity":"deprecated","affected_versions":">=0.5.0"}],"env_vars":null,"last_verified":"2026-05-01T00:00:00.000Z","next_check":"2026-07-30T00:00:00.000Z","problems":[{"fix":"Use load_config to load config from YAML and pass config object: train(cfg=config).","cause":"Using old API where train was called with direct arguments instead of a config object.","error":"TypeError: train() got an unexpected keyword argument 'model'"},{"fix":"Use: from axolotl.utils.trainer import train","cause":"train() is no longer in the top-level module; moved to utils.trainer.","error":"ImportError: cannot import name 'train' from 'axolotl'"},{"fix":"Enable gradient checkpointing, use smaller batch size, or activate DeepSpeed ZeRO stage 2/3.","cause":"Model too large for available GPU memory, or not using memory optimizations.","error":"RuntimeError: CUDA out of memory"},{"fix":"Add 'model_name_or_path' to the YAML config file with the model identifier.","cause":"Required config key missing from YAML.","error":"KeyError: 'model_name_or_path' in config"}],"ecosystem":"pypi","meta_description":null,"install_score":null,"install_tag":null,"quickstart_score":null,"quickstart_tag":null}