Fickling

0.1.10 · active · verified Sat Apr 11

Fickling is a static analyzer and interpreter for Python pickle data. It identifies dangerous modules, functions, and attributes used within pickle files to prevent arbitrary code execution vulnerabilities. The current version is 0.1.10, and it maintains an active release cadence, frequently publishing security updates and expanded blocklists.

Warnings

Install

Imports

Quickstart

This quickstart demonstrates how to use `fickling.analysis.analyze_pickle` to check both benign and potentially malicious pickle data. It shows how to catch `UnsafeError` and iterate through detected violations.

import pickle
from fickling.analysis import analyze_pickle
from fickling.errors import UnsafeError

# Create a benign pickle (for demonstration)
class MyObject:
    def __init__(self, name):
        self.name = name

obj = MyObject("safe_data")
benign_pickled_data = pickle.dumps(obj)

# Analyze the pickled data
try:
    print("\n--- Analyzing benign pickle ---")
    results = analyze_pickle(benign_pickled_data)
    if results.is_safe():
        print("Pickle is safe. No violations found.")
    else:
        print("Pickle is potentially unsafe. Violations:")
        for violation in results.violations:
            print(f"- {violation.severity.name}: {violation.message}")
except UnsafeError as e:
    print(f"Analysis detected an unsafe pickle: {e}")
except Exception as e:
    print(f"An unexpected error occurred during analysis: {e}")

# Example of a potentially unsafe pickle (e.g., using os.system)
# NOTE: Do NOT run this with untrusted data in production!
# This is purely illustrative of what Fickling detects.
class MaliciousObject:
    def __reduce__(self):
        return (getattr(os, 'system'), ('echo malicious command executed!',))
import os
malicious_obj = MaliciousObject()
unsafe_pickled_data = pickle.dumps(malicious_obj)

# Analyze the potentially unsafe pickled data
try:
    print("\n--- Analyzing potentially unsafe pickle ---")
    results = analyze_pickle(unsafe_pickled_data)
    if results.is_safe():
        print("Pickle is safe. No violations found.")
    else:
        print("Pickle is potentially unsafe. Violations:")
        for violation in results.violations:
            print(f"- {violation.severity.name}: {violation.message}")
except UnsafeError as e:
    print(f"Analysis detected an unsafe pickle: {e}")
except Exception as e:
    print(f"An unexpected error occurred during analysis: {e}")

view raw JSON →