{"id":21124,"library":"dgl","title":"Deep Graph Library (DGL)","description":"An open-source Python library for deep learning on graphs, supporting PyTorch, TensorFlow, and Apache MXNet. Currently at version 2.4.0, with releases every few months. Focuses on graph neural networks (GNNs) and scalable graph computation.","status":"active","version":"2.4.0","language":"python","source_language":"en","source_url":"https://github.com/dmlc/dgl","tags":["graph-neural-networks","deep-learning","gpu","pytorch"],"install":[{"cmd":"pip install dgl","lang":"bash","label":"CPU version (PyTorch backend)"},{"cmd":"pip install dgl -f https://data.dgl.ai/wheels/cu118/repo.html","lang":"bash","label":"CUDA 11.8 version (PyTorch)"},{"cmd":"pip install dgl -f https://data.dgl.ai/wheels/cu121/repo.html","lang":"bash","label":"CUDA 12.1 version (PyTorch)"}],"dependencies":[{"reason":"Primary deep learning backend (>=2.1.0, <2.5). Must be installed separately.","package":"torch","optional":false},{"reason":"Array operations; version must be <2.0.0 for DGL <2.4.0, but 2.4.0 may support numpy>=2.0.0.","package":"numpy","optional":true},{"reason":"Optional backend (DGL TF support is legacy, use PyTorch).","package":"tensorflow","optional":true}],"imports":[{"note":"dgl.graph is not a direct module; use dgl.graph() function after import dgl.","wrong":"import dgl.graph","symbol":"dgl.graph","correct":"import dgl"},{"note":"SAGEConv is in dgl.nn submodule, not top-level.","wrong":"from dgl import SAGEConv","symbol":"dgl.nn.SAGEConv","correct":"import dgl; from dgl.nn import SAGEConv"},{"note":"DataLoader is in dgl.dataloading submodule.","wrong":"from dgl import DataLoader","symbol":"dgl.dataloading.DataLoader","correct":"from dgl.dataloading import DataLoader"}],"quickstart":{"code":"import dgl\nimport torch\n\n# Create a simple graph with 3 nodes and 2 edges\ng = dgl.graph(([0, 1], [1, 2]), num_nodes=3)\n\n# Assign node features\nx = torch.randn(3, 5)\n\n# Create a Graph Neural Network layer\nfrom dgl.nn import SAGEConv\nconv = SAGEConv(5, 2, 'mean')\n\n# Forward pass\nh = conv(g, x)\nprint(h.shape)  # torch.Size([3, 2])","lang":"python","description":"Basic DGL usage: create graph, assign features, apply SAGEConv layer."},"warnings":[{"fix":"Upgrade PyTorch to 2.1.0 or later (or as recommended by DGL version).","message":"Support for PyTorch 1.13 and earlier versions is deprecated. DGL 2.1+ requires PyTorch >=2.0.0.","severity":"deprecated","affected_versions":">=2.0.0"},{"fix":"pip install 'numpy<2.0.0'","message":"numpy 2.0.0 is not fully compatible with DGL <2.4.0. Pin numpy<2.0.0.","severity":"gotcha","affected_versions":"<2.4.0"},{"fix":"import dgl.distributed before using distributed features.","message":"In DGL 2.4, distributed module is not imported by default. Users must import dgl.distributed manually.","severity":"breaking","affected_versions":"2.4.0 and later"},{"fix":"Use HeteroItemSet instead of ItemSetDict.","message":"HeteroItemSet replaces ItemSetDict. ItemSetDict is deprecated and will be removed.","severity":"breaking","affected_versions":"2.3.0 and later"},{"fix":"Upgrade CUDA to 11.7 or later.","message":"CUDA 11.6 support dropped in DGL 2.1.0. Supported CUDA: 11.7, 11.8, 12.1.","severity":"deprecated","affected_versions":">=2.1.0"},{"fix":"Access 'seeds' attribute on MiniBatch objects.","message":"In DGL 2.2 GraphBolt, MiniBatch now uses 'seeds' attribute instead of 'seed_nodes' and 'node_pairs'.","severity":"breaking","affected_versions":"2.2.0 and later"}],"env_vars":null,"last_verified":"2026-04-27T00:00:00.000Z","next_check":"2026-07-26T00:00:00.000Z","problems":[{"fix":"Use 'from dgl.nn import SAGEConv'.","cause":"SAGEConv is in dgl.nn submodule, not top-level.","error":"ImportError: cannot import name 'SAGEConv' from 'dgl'"},{"fix":"Ensure you have DGL >=1.0 and use 'dgl.graph(...)' (lowercase g).","cause":"Incorrect import or outdated DGL version (pre-1.0).","error":"AttributeError: module 'dgl' has no attribute 'graph'"},{"fix":"Ensure all tensors have consistent dtype (use .float() or .half() accordingly).","cause":"Mixing float16 and float32 tensors in graph operations.","error":"RuntimeError: expected scalar type Float but found Half"}],"ecosystem":"pypi","meta_description":null,"install_score":null,"install_tag":null,"quickstart_score":null,"quickstart_tag":null}