Python gRPC Prometheus Interceptors

0.8.0 · active · verified Sun Apr 12

py-grpc-prometheus is an instrumentation library that provides Prometheus metrics for gRPC services in Python. It offers client and server interceptors to expose standard gRPC metrics, aiming for parity with similar libraries in Java and Go. The current version is 0.8.0, released in February 2024, with releases occurring on an 'as-needed' basis.

Warnings

Install

Imports

Quickstart

This quickstart demonstrates how to set up both gRPC server and client interceptors with `py-grpc-prometheus` to expose metrics. It includes starting a Prometheus HTTP server and enabling histogram metrics for latency tracking. Note: for a fully functional gRPC application, you would replace the placeholder `greeter_pb2` and `GreeterServicer` with generated code from your `.proto` definitions.

import grpc
from concurrent import futures
from prometheus_client import start_http_server
from py_grpc_prometheus.prometheus_server_interceptor import PromServerInterceptor
from py_grpc_prometheus.prometheus_client_interceptor import PromClientInterceptor

# --- Example gRPC Service (replace with your actual service) ---
# In a real application, you would generate this from a .proto file
class GreeterServicer(grpc.ServerInterceptor):
    def SayHello(self, request, context):
        print(f"Server received: {request.name}")
        return greeter_pb2.HelloReply(message=f"Hello, {request.name}!")

    def intercept_service(self, continuation, handler_call_details):
        # Simple example of a custom interceptor that just passes through
        print(f"Custom server interceptor: {handler_call_details.method}")
        return continuation(handler_call_details)

# To make this example runnable, we'll mock proto definitions
class greeter_pb2:
    class HelloRequest:
        def __init__(self, name=''):
            self.name = name
    class HelloReply:
        def __init__(self, message=''):
            self.message = message


# --- Server Setup ---
def serve():
    # Start Prometheus HTTP server to expose metrics
    start_http_server(8000)
    print("Prometheus metrics exposed on port 8000")

    # Initialize Prometheus server interceptor
    prom_server_interceptor = PromServerInterceptor(enable_handling_time_histogram=True)

    # Create gRPC server with interceptor(s)
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), 
                         interceptors=(prom_server_interceptor, GreeterServicer()))

    # Add your actual service to the server
    # In a real app, this would be `greeter_pb2_grpc.add_GreeterServicer_to_server`
    # For this example, we'll just bind the port for a placeholder
    server.add_insecure_port('[::]:50051')
    print("gRPC server started on port 50051")
    server.start()
    server.wait_for_termination()

# --- Client Setup ---
def run_client():
    # Initialize Prometheus client interceptor
    prom_client_interceptor = PromClientInterceptor(enable_client_handling_time_histogram=True)

    # Create gRPC channel with interceptor
    channel = grpc.intercept_channel(
        grpc.insecure_channel('localhost:50051'),
        prom_client_interceptor
    )
    # In a real app, this would be `greeter_pb2_grpc.GreeterStub(channel)`
    stub = GreeterServicer() # Using the mock servicer for simplicity

    print("Client sending request...")
    try:
        response = stub.SayHello(greeter_pb2.HelloRequest(name='World'), context=None)
        print(f"Client received: {response.message}")
    except grpc.RpcError as e:
        print(f"Client received RPC error: {e.code()} - {e.details()}")
    print("Client finished.")

if __name__ == '__main__':
    # This part requires a proper gRPC setup (proto files, generated code)
    # For a truly runnable quickstart without proto generation, this is illustrative.
    # To run this, you would typically run the server in one terminal and client in another.
    # For demonstration, we'll just show the components.
    
    # As this cannot be fully 'runnable' without proto generation, 
    # consider this an illustrative quickstart. 
    # In a real scenario, you'd run `serve()` in one process and `run_client()` in another.
    print("Quickstart shows client and server setup. Requires gRPC proto generation for full functionality.")
    print("To test metrics, run `serve()` in a separate process, then `run_client()`.")
    # Example of how you'd typically start them:
    # import multiprocessing
    # server_process = multiprocessing.Process(target=serve)
    # server_process.start()
    # time.sleep(2) # Give server time to start
    # run_client()
    # server_process.terminate()
    # server_process.join()

view raw JSON →