Flagsmith Flag Engine
The `flagsmith-flag-engine` is the core evaluation engine for the Flagsmith API, enabling feature flag and remote configuration management. It processes environment, feature, segment, and identity data to determine flag states locally. The library is actively maintained, with version 10.0.3 currently available, and frequent releases that often include minor and major updates with breaking changes.
Warnings
- breaking Version 10.0.0 removed `FeatureContext.feature_key` and `SegmentResult.key`.
- breaking Version 9.0.0 changed multivariate evaluation to use `FeatureValue.priority`.
- breaking Version 8.0.0 introduced a new `EvaluationResult` model and updated test data structures.
- breaking Version 7.0.0 dropped support for Python 3.8 and removed deprecated APIs and Pydantic models.
- gotcha Relying on implicit identity keys for 'PERCENTAGE_SPLIT' operator can lead to incorrect evaluations. This was a bug that was fixed in v10.0.1.
- gotcha Not cleaning up unused feature flags can lead to cluttered codebases, technical debt, and potential system failures.
Install
-
pip install flagsmith-flag-engine
Imports
- EnvironmentModel
from flagsmith_engine.environments import EnvironmentModel
- IdentityModel
from flagsmith_engine.identities import IdentityModel
- TraitModel
from flagsmith_engine.features import TraitModel
- evaluate_identity_flags
from flagsmith_engine.evaluator import evaluate_identity_flags
Quickstart
import os
from flagsmith_engine.environments import EnvironmentModel, FeatureStateModel, SegmentModel
from flagsmith_engine.identities import IdentityModel, TraitModel
from flagsmith_engine.evaluator import evaluate_identity_flags
from flagsmith_engine.segments import SegmentCondition, SegmentRule, SEGMENT_CONDITION_OPERATORS
# Simulate an Environment structure (typically fetched from Flagsmith API)
environment_data = {
"id": 1,
"api_key": os.environ.get('FLAGSMITH_ENVIRONMENT_KEY', 'your_environment_key'),
"project": {"id": 1, "name": "Test Project", "organisation": {"id": 1, "name": "Test Org"}},
"feature_states": [
{
"id": 101,
"feature": {"id": 1, "name": "my_test_feature", "type": "STANDARD"},
"enabled": True,
"feature_state_value": "default_value"
},
{
"id": 102,
"feature": {"id": 2, "name": "another_feature", "type": "STANDARD"},
"enabled": False,
"feature_state_value": "false_value"
}
],
"segments": [
{
"id": 201,
"name": "Test Segment",
"rules": [
{
"type": "ALL",
"conditions": [
{
"operator": SEGMENT_CONDITION_OPERATORS["EQUAL"],
"property": "plan",
"value": "premium"
}
]
}
]
}
]
}
environment = EnvironmentModel.parse_obj(environment_data)
# Create an identity
identity_traits = [TraitModel(trait_key="plan", trait_value="premium")]
identity = IdentityModel(identifier="test_user_123", environment_api_key=environment.api_key, traits=identity_traits)
# Evaluate flags for the identity
feature_states, identities_segments = evaluate_identity_flags(identity, environment, None)
print(f"Identity '{identity.identifier}' is in segments: {[s.name for s in identities_segments]}")
for fs in feature_states:
print(f"Feature '{fs.feature.name}': Enabled={fs.enabled}, Value='{fs.feature_state_value}'")
# Example with a user not in the segment
identity_traits_basic = [TraitModel(trait_key="plan", trait_value="basic")]
identity_basic = IdentityModel(identifier="basic_user_456", environment_api_key=environment.api_key, traits=identity_traits_basic)
feature_states_basic, identities_segments_basic = evaluate_identity_flags(identity_basic, environment, None)
print(f"\nIdentity '{identity_basic.identifier}' is in segments: {[s.name for s in identities_segments_basic]}")
for fs in feature_states_basic:
print(f"Feature '{fs.feature.name}': Enabled={fs.enabled}, Value='{fs.feature_state_value}'")