{"id":18514,"library":"lop","title":"lop","description":"lop is a JavaScript parsing library using parser combinators that emphasizes helpful error messages. Current stable version is 0.4.2, with infrequent releases. Unlike many parser libraries (e.g., PEG.js, nearley), lop integrates directly with tokenisers and provides cut operators to control backtracking and produce precise error locations. Its unique selling point is the generation of detailed, file-and-line-anchored error messages when parsing fails. It supports both automatic regex-based tokenisation and custom tokenisers. The library is untyped (no TypeScript definitions). It is lightweight with no dependencies.","status":"active","version":"0.4.2","language":"javascript","source_language":"en","source_url":"https://github.com/mwilliamson/lop","tags":["javascript","parse","parser","combinator"],"install":[{"cmd":"npm install lop","lang":"bash","label":"npm"},{"cmd":"yarn add lop","lang":"bash","label":"yarn"},{"cmd":"pnpm add lop","lang":"bash","label":"pnpm"}],"dependencies":[],"imports":[{"note":"lop does not have a default export; use named import or destructure from require.","wrong":"const lop = require('lop'); const Parser = lop.Parser;","symbol":"lop.Parser","correct":"import { Parser } from 'lop';"},{"note":"Named export; same pattern as other symbols.","wrong":"const RegexTokeniser = require('lop').RegexTokeniser;","symbol":"RegexTokeniser","correct":"import { RegexTokeniser } from 'lop';"},{"note":"rules object contains combinators like tokenOfType, sequence, firstOf.","wrong":"const { rules } = require('lop');","symbol":"rules","correct":"import { rules } from 'lop';"},{"note":"Used for custom tokeniser input.","wrong":"const StringSource = require('lop').StringSource;","symbol":"StringSource","correct":"import { StringSource } from 'lop';"},{"note":"No default export; namespace import works.","wrong":"import lop from 'lop';","symbol":"default (none)","correct":"import * as lop from 'lop';"}],"quickstart":{"code":"import { Parser, rules, tokenOfType, token, sequence, firstOf, rule } from 'lop';\nimport { RegexTokeniser } from 'lop';\n\n// Define tokeniser\nconst tokeniser = new RegexTokeniser([\n  { name: 'integer', regex: /(\\d+)/ },\n  { name: 'keyword', regex: /(if|then|else)/ },\n  { name: 'whitespace', regex: /(\\s+)/ },\n]);\n\nclass IntegerNode { constructor(value) { this.value = value; } }\nclass IfNode { constructor(condition, trueBranch, falseBranch) { this.condition = condition; this.trueBranch = trueBranch; this.falseBranch = falseBranch; } }\n\nconst integerRule = rules.then(\n  rules.tokenOfType('integer'),\n  (value) => new IntegerNode(parseInt(value, 10))\n);\n\nconst expressionRule = rules.rule(() =>\n  rules.firstOf('expression', integerRule, ifRule)\n);\n\nconst ifRule = rules.sequence(\n  rules.token('keyword', 'if'),\n  rules.sequence.cut(),\n  rules.sequence.capture(expressionRule),\n  rules.token('keyword', 'then'),\n  rules.sequence.capture(expressionRule),\n  rules.token('keyword', 'else'),\n  rules.sequence.capture(expressionRule)\n).map((condition, trueBranch, falseBranch) =>\n  new IfNode(condition, trueBranch, falseBranch)\n);\n\nconst parser = new Parser();\nconst tokens = tokeniser.tokenise('if 1 then 2 else 3');\ntry {\n  const result = parser.parseTokens(expressionRule, tokens);\n  console.log(result);\n} catch (e) {\n  console.error(e.message);\n}","lang":"javascript","description":"Demonstrates a simple parser using lop with a regex tokeniser and parser combinators, including handling of cut and custom token types."},"warnings":[{"fix":"Use rules.rule(() => ...) or lop.rule(...) for mutually recursive rules.","message":"Rules must be wrapped in lop.rule() to defer evaluation when referencing rules that are defined later.","severity":"gotcha","affected_versions":">=0.0.0"},{"fix":"Add a capture group to your regex: /(\\d+)/ instead of /\\d+/.","message":"Token values are the first regex capture group; if your regex has no groups, value is undefined.","severity":"gotcha","affected_versions":">=0.0.0"},{"fix":"List specific patterns before generic ones (e.g., keywords before identifiers).","message":"The lexer's rule order matters: the first matching rule produces the token. Unrecognized characters become 'unrecognisedCharacter' tokens.","severity":"gotcha","affected_versions":">=0.0.0"},{"fix":"","message":"No known deprecations in current version; the library is stable but low-maintenance.","severity":"deprecated","affected_versions":"0.4.2"},{"fix":"Wrap input in new StringSource(string, 'description') before tokenising.","message":"Error messages reference 'File', 'Line number', and 'Character number' only if tokens have source info from StringSource. Using raw strings without source produces less informative errors.","severity":"gotcha","affected_versions":">=0.0.0"}],"env_vars":null,"last_verified":"2026-04-25T00:00:00.000Z","next_check":"2026-07-24T00:00:00.000Z","problems":[{"fix":"Check that the token sequence matches the rule exactly. The error indicates a specific mismatch after the cut point.","cause":"Parsing failed inside a rule after a cut, preventing backtracking.","error":"Error: Expected keyword \"then\" but got integer \"42\""},{"fix":"Use const { Parser } = require('lop'); or import { Parser } from 'lop';","cause":"Using import or require incorrectly; likely used default import instead of named.","error":"TypeError: lop.Parser is not a constructor"},{"fix":"Call rules.firstOf('expressionName', rule1, rule2) with a string name first.","cause":"Missing rule name as first argument to firstOf.","error":"Error: First argument to rules.firstOf must be a string (name), followed by rules"},{"fix":"Use import * as lop from 'lop' or import { specific } from 'lop'.","cause":"Attempted import lop from 'lop' which is incorrect.","error":"Error: The lop module does not have a default export"}],"ecosystem":"npm","meta_description":null,"install_score":null,"install_tag":null,"quickstart_score":null,"quickstart_tag":null}