{"id":22471,"library":"torch-xla","title":"PyTorch/XLA","description":"PyTorch/XLA is a Python package that bridges PyTorch with XLA devices (TPU, GPU, CPU) to enable high-performance machine learning. The current stable version is 2.9.0, with releases aligned to PyTorch minor versions. It supports Python 3.10-3.13 and provides both PJRT and XRT runtimes (PJRT recommended).","status":"active","version":"2.9.0","language":"python","source_language":"en","source_url":"https://github.com/pytorch/xla","tags":["pytorch","xla","tpu","google","machine-learning","deep-learning"],"install":[{"cmd":"pip install torch torch_xla[tpu] -f https://storage.googleapis.com/libtpu-releases/index.html","lang":"bash","label":"Install with TPU support"}],"dependencies":[{"reason":"PyTorch/XLA is an XLA bridge for PyTorch","package":"torch","optional":false},{"reason":"Experimental JAX interoperability","package":"jax","optional":true}],"imports":[{"note":"torch_xla is a package; importing directly works. 'from torch_xla import xla_model' is outdated and may miss other submodules.","wrong":"from torch_xla import xla_model","symbol":"torch_xla","correct":"import torch_xla"},{"note":"The correct module path is 'torch_xla.core.xla_model', not a top-level xla_model.","wrong":"import torch_xla.xla_model as xm","symbol":"torch_xla.core.xla_model","correct":"import torch_xla.core.xla_model as xm"}],"quickstart":{"code":"import torch\nimport torch_xla\nimport torch_xla.core.xla_model as xm\n\n# Get XLA device\ndevice = xm.xla_device()\n\n# Create tensor on XLA device\nt = torch.randn(3, 3, device=device)\nprint(f\"Tensor device: {t.device}\")\n\n# Perform operations\nresult = t + t\nprint(f\"Result: {result}\")\n\n# Mark step and synchronize (required for XLA)\nxm.mark_step()\nxm.wait_device_ops()","lang":"python","description":"Basic example: get XLA device, create tensor, run ops, mark step."},"warnings":[{"fix":"Ensure you are using PJRT (default). If you explicitly used XRT, switch to PJRT by not setting XRT runtime env vars.","message":"XRT runtime is deprecated and removed in PyTorch/XLA 2.1+. Use PJRT runtime for all new code.","severity":"breaking","affected_versions":">=2.1.0"},{"fix":"Call xm.mark_step() after each training step and xm.wait_device_ops() before measuring time or synchronizing.","message":"Missing call to xm.mark_step() or xm.wait_device_ops() causes lazy execution to not materialize, leading to hangs or incorrect results.","severity":"gotcha","affected_versions":"all"},{"fix":"Install libtpu directly: pip install torch_xla[tpu] -f https://storage.googleapis.com/libtpu-releases/index.html for PyTorch/XLA 2.9+ it installs libtpu automatically.","message":"Library installation extra [tpu] installs torch-xla but may conflict with newer versions of libtpu. Use separate pip install with index URL.","severity":"deprecated","affected_versions":"<2.9.0"}],"env_vars":null,"last_verified":"2026-04-27T00:00:00.000Z","next_check":"2026-07-26T00:00:00.000Z","problems":[{"fix":"Verify you are on a TPU VM or have GPU with XLA support. If on TPU, run inside a TPU VM or set XRT_TPU_CONFIG correctly. For PJRT, ensure libtpu is installed and environment variable PJRT_DEVICE=TPU set.","cause":"No XLA device (TPU/GPU) available or PJRT runtime not initialized properly.","error":"RuntimeError: XLA device not found"},{"fix":"Use 'import torch_xla.core.xla_model as xm' instead of 'import torch_xla.xla_model as xm'.","cause":"Incorrect import path for xla_model; it resides in torch_xla.core.xla_model.","error":"ImportError: cannot import name 'xla_model' from 'torch_xla'"},{"fix":"Install the correct PyTorch wheel from pytorch.org that includes CUDA, or use a prebuilt TPU VM image that includes the right PyTorch version.","cause":"torch_xla requires PyTorch built with CUDA support even when using TPU.","error":"AssertionError: Torch not compiled with CUDA enabled"}],"ecosystem":"pypi","meta_description":null,"install_score":null,"install_tag":null,"quickstart_score":null,"quickstart_tag":null}