{"id":26714,"library":"alora","title":"Activated LoRA (aLoRA)","description":"Activated LoRA (aLoRA) is a low rank adapter architecture that allows reusing existing base model KV cache, built on top of PEFT. Version 0.3.0 is current; pre-1.0 release cadence is intermittent.","status":"active","version":"0.3.0","language":"python","source_language":"en","source_url":"https://github.com/IBM/activated-lora","tags":["lora","fine-tuning","parameter-efficient","transformers","peft","kv-cache","activated-lora"],"install":[{"cmd":"pip install alora","lang":"bash","label":"Install from PyPI"}],"dependencies":[{"reason":"aLoRA subclasses PEFT models and requires bitsandbytes integration","package":"peft","optional":false},{"reason":"Underlying tensor operations and model loading","package":"torch","optional":false},{"reason":"Required for quantization support (bnb import from PEFT)","package":"bitsandbytes","optional":true}],"imports":[{"note":"Correct import path since v0.1.0","wrong":null,"symbol":"AloraConfig","correct":"from alora import AloraConfig"},{"note":"Correct import path since v0.1.0","wrong":null,"symbol":"AloraModel","correct":"from alora import AloraModel"}],"quickstart":{"code":"from transformers import AutoModelForCausalLM, AutoTokenizer\nfrom alora import AloraConfig, AloraModel\n\nmodel_name = \"gpt2\"\nmodel = AutoModelForCausalLM.from_pretrained(model_name)\ntokenizer = AutoTokenizer.from_pretrained(model_name)\n\nalora_config = AloraConfig(\n    r=8,\n    lora_alpha=16,\n    target_modules=[\"q_proj\", \"v_proj\"],\n    use_activated_lora=True\n)\n\nmodel = AloraModel(model, alora_config)\n\ninputs = tokenizer(\"Hello, I'm\", return_tensors=\"pt\")\noutputs = model.generate(**inputs, max_new_tokens=20)\nprint(tokenizer.decode(outputs[0]))","lang":"python","description":"Instantiate aLoraConfig, wrap a base model with AloraModel, and generate text."},"warnings":[{"fix":"Update any custom subclass to use PeftModel instead of LoraModel, or pin alora==0.2.0.","message":"v0.3.0 changed base class from PEFT's LoraModel to PeftModel. Custom subclasses of AloraModel may break if they rely on LoraModel internals.","severity":"breaking","affected_versions":"0.2.0 -> 0.3.0"},{"fix":"Ensure bitsandbytes is installed and use from peft.import_utils import is_bnb_available instead of direct bnb imports.","message":"bitsandbytes import path changed in PEFT; direct bnb imports are deprecated. Alora uses peft.utils for bnb-related modules.","severity":"deprecated","affected_versions":">=0.2.0"},{"fix":"Always call with an instantiated model object: AloraModel(model, config), not AloraModel('model_name', config).","message":"AloraModel expects a HuggingFace model as the first argument, not model id. Passing a string causes AttributeError.","severity":"gotcha","affected_versions":"all"}],"env_vars":null,"last_verified":"2026-05-01T00:00:00.000Z","next_check":"2026-07-30T00:00:00.000Z","problems":[{"fix":"Instantiate model first: model = AutoModelForCausalLM.from_pretrained('gpt2'), then wrap with AloraModel(model, config).","cause":"Passing a model name string instead of a model object to AloraModel.","error":"AttributeError: 'str' object has no attribute 'config'"},{"fix":"Install bitsandbytes: pip install bitsandbytes, or use the CPU-only fallback by disabling quantization in model config.","cause":"bitsandbytes is not installed but required by PEFT for certain features.","error":"ModuleNotFoundError: No module named 'bitsandbytes'"},{"fix":"Set use_activated_lora=True explicitly in AloraConfig to enable aLoRA behavior.","cause":"Omitting use_activated_lora in AloraConfig leads to silent fallback to standard LoRA.","error":"ValueError: AloraConfig requires use_activated_lora=True"}],"ecosystem":"pypi","meta_description":null,"install_score":null,"install_tag":null,"quickstart_score":null,"quickstart_tag":null}