{"id":24510,"library":"recipe-scrapers","title":"recipe-scrapers","description":"A Python package for scraping recipe data from hundreds of websites. Provides a unified interface to extract structured data such as ingredients, instructions, cook times, and nutrition. Requires Python >=3.10. Current version: 15.11.0. Active development with frequent releases every few weeks.","status":"active","version":"15.11.0","language":"python","source_language":"en","source_url":"https://github.com/hhursev/recipe-scrapers","tags":["recipe","scraping","food","web-scraping"],"install":[{"cmd":"pip install recipe-scrapers","lang":"bash","label":"Install from PyPI"}],"dependencies":[{"reason":"HTML parsing","package":"beautifulsoup4","optional":false},{"reason":"HTTP requests","package":"requests","optional":false},{"reason":"XML parser","package":"lxml","optional":false}],"imports":[{"note":"scrape_me is a top-level function, not from the scraper module.","wrong":"from recipe_scrapers.scraper import scrape_me","symbol":"scrape_me","correct":"from recipe_scrapers import scrape_me"},{"note":"AbstractScraper is exported from the package root for convenience.","wrong":"from recipe_scrapers.abstract import AbstractScraper","symbol":"AbstractScraper","correct":"from recipe_scrapers import AbstractScraper"}],"quickstart":{"code":"from recipe_scrapers import scrape_me\n\nurl = 'https://www.allrecipes.com/recipe/12345/'\nscraper = scrape_me(url)\nprint(scraper.title())\nprint(scraper.ingredients())\nprint(scraper.instructions())","lang":"python","description":"Fetch and parse a recipe from a given URL. scrape_me returns a scraper object with methods like title(), ingredients(), instructions(), etc."},"warnings":[{"fix":"Upgrade Python to 3.10 or higher.","message":"Python 3.9 dropped as of v15.11.0. Must use Python >=3.10.","severity":"breaking","affected_versions":">=15.11.0"},{"fix":"Use `cook_time()` or `prep_time()` instead if available.","message":"The method `total_time()` may be deprecated in favor of `cook_time()` and `prep_time()`. Check each scraper for support.","severity":"deprecated","affected_versions":">=15.0.0"},{"fix":"Check the list of supported sites in the docs or pass `wild_mode=True` as fallback.","message":"Not all websites are supported. Trying an unsupported URL raises an exception. Use `scrape_me(url, wild_mode=True)` to attempt generic scraping.","severity":"gotcha","affected_versions":"all"}],"env_vars":null,"last_verified":"2026-05-01T00:00:00.000Z","next_check":"2026-07-30T00:00:00.000Z","problems":[{"fix":"Change import to: from recipe_scrapers import scrape_me","cause":"Importing the module name with a hyphen (recipe-scrapers) instead of the correct package name (recipe_scrapers).","error":"AttributeError: module 'recipe_scrapers' has no attribute 'scrape_me'"},{"fix":"Pass a custom headers dict to scrape_me: scrape_me(url, headers={'User-Agent': 'Mozilla/5.0'})","cause":"Some websites block automated requests. The scraper does not handle this gracefully.","error":"requests.exceptions.HTTPError: 403 Client Error"},{"fix":"Ensure the URL is from a supported source. Use wild_mode=True for generic parsing.","cause":"The URL does not correspond to a recipe page or the scraper cannot parse it.","error":"ValueError: No recipe found at this URL"}],"ecosystem":"pypi","meta_description":null,"install_score":null,"install_tag":null,"quickstart_score":null,"quickstart_tag":null}