Python Examples

Advanced Python integration examples for AletheionGuard, including FastAPI, Django, Flask, and async patterns.

FastAPI Integration

Build a REST API with FastAPI that includes epistemic uncertainty auditing for all responses.

from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from aletheion_guard import EpistemicAuditor
app = FastAPI()
auditor = EpistemicAuditor()
class QueryRequest(BaseModel):
question: str
context: str | None = None
class QueryResponse(BaseModel):
answer: str
verdict: str
q1: float
q2: float
height: float
confidence_score: float
@app.post("/query", response_model=QueryResponse)
async def query_with_audit(request: QueryRequest):
"""Answer questions with epistemic uncertainty auditing."""
# Generate answer (placeholder for your LLM)
answer = generate_answer(request.question, request.context)
# Audit the answer
result = auditor.evaluate(
text=answer,
context=request.context
)
# Return only if confidence is acceptable
if result.verdict == "REFUSED":
raise HTTPException(
status_code=503,
detail="Cannot provide confident answer"
)
return QueryResponse(
answer=answer,
verdict=result.verdict,
q1=result.q1,
q2=result.q2,
height=result.height,
confidence_score=result.height
)
def generate_answer(question: str, context: str | None) -> str:
# Your LLM integration here
return "Example answer"

Tip: Use dependency injection to share a single auditor instance across all requests for better performance.

Django Integration

Add epistemic uncertainty auditing to Django views and models.

views.py

from django.http import JsonResponse
from django.views.decorators.http import require_http_methods
from aletheion_guard import EpistemicAuditor
import json
# Initialize once at module level
auditor = EpistemicAuditor()
@require_http_methods(["POST"])
def audit_content(request):
"""Audit user-generated content before publishing."""
data = json.loads(request.body)
content = data.get("content")
# Audit the content
result = auditor.evaluate(content)
response_data = {
"verdict": result.verdict,
"q1": result.q1,
"q2": result.q2,
"height": result.height,
"action": {
"ACCEPT": "publish",
"MAYBE": "review",
"REFUSED": "reject"
}.get(result.verdict)
}
return JsonResponse(response_data)

middleware.py - Audit Middleware

from aletheion_guard import EpistemicAuditor
import json
class AuditMiddleware:
"""Middleware to audit all API responses."""
def __init__(self, get_response):
self.get_response = get_response
self.auditor = EpistemicAuditor()
def __call__(self, request):
response = self.get_response(request)
# Only audit JSON responses
if response.get("Content-Type") == "application/json":
try:
data = json.loads(response.content)
# Audit main content field
if "content" in data:
result = self.auditor.evaluate(data["content"])
# Add audit metadata to response
data["_audit"] = {
"verdict": result.verdict,
"height": result.height
}
response.content = json.dumps(data)
except json.JSONDecodeError:
pass
return response

Flask Integration

Integrate AletheionGuard with Flask applications using decorators.

from flask import Flask, request, jsonify
from functools import wraps
from aletheion_guard import EpistemicAuditor
app = Flask(__name__)
auditor = EpistemicAuditor()
def audit_response(min_height=0.6):
"""Decorator to audit route responses."""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
# Get the response
response = f(*args, **kwargs)
# Extract text to audit
if isinstance(response, dict) and "answer" in response:
result = auditor.evaluate(response["answer"])
# Add audit info
response["audit"] = {
"verdict": result.verdict,
"height": result.height,
"q1": result.q1,
"q2": result.q2
}
# Reject if below threshold
if result.height < min_height:
return jsonify({
"error": "Low confidence response",
"height": result.height
}), 503
return jsonify(response)
return decorated_function
return decorator
@app.route("/ask", methods=["POST"])
@audit_response(min_height=0.7)
def ask_question():
question = request.json.get("question")
answer = generate_answer(question)
return {"answer": answer}

Async Batch Processing

Process large volumes of text asynchronously using Python's async/await.

import asyncio
from typing import List
from aletheion_guard import EpistemicAuditor
class AsyncAuditor:
"""Async wrapper for batch auditing."""
def __init__(self):
self.auditor = EpistemicAuditor()
async def audit_batch_async(
self,
texts: List[str],
batch_size: int = 32
) -> List[dict]:
"""Process texts in batches asynchronously."""
results = []
for i in range(0, len(texts), batch_size):
batch = texts[i:i + batch_size]
# Run in executor to avoid blocking
loop = asyncio.get_event_loop()
batch_results = await loop.run_in_executor(
None,
self.auditor.batch_evaluate,
batch
)
results.extend(batch_results)
return results
async def main():
auditor = AsyncAuditor()
# Large dataset
texts = [f"Statement {i}" for i in range(1000)]
# Process asynchronously
results = await auditor.audit_batch_async(texts)
print(f"Processed {len(results)} texts")
# Run
asyncio.run(main())

Celery Task Queue

Use Celery to audit content asynchronously in background tasks.

from celery import Celery
from aletheion_guard import EpistemicAuditor
# Initialize Celery
app = Celery(
"tasks",
broker="redis://localhost:6379/0",
backend="redis://localhost:6379/0"
)
# Initialize auditor once
auditor = EpistemicAuditor()
@app.task
def audit_content_task(content_id: int, text: str):
"""Background task to audit content."""
result = auditor.evaluate(text)
# Save results to database
save_audit_result(
content_id=content_id,
verdict=result.verdict,
q1=result.q1,
q2=result.q2,
height=result.height
)
return {
"content_id": content_id,
"verdict": result.verdict
}
@app.task
def audit_batch_task(items: list):
"""Batch audit task."""
texts = [item["text"] for item in items]
results = auditor.batch_evaluate(texts)
for item, result in zip(items, results):
save_audit_result(
content_id=item["id"],
verdict=result.verdict,
q1=result.q1,
q2=result.q2,
height=result.height
)
return len(results)
# Usage
audit_content_task.delay(content_id=123, text="Example text")

OpenAI Integration

Audit OpenAI responses before returning them to users.

from openai import OpenAI
from aletheion_guard import EpistemicAuditor
client = OpenAI()
auditor = EpistemicAuditor()
def ask_with_audit(
question: str,
model: str = "gpt-4",
min_height: float = 0.6
) -> dict:
"""Ask OpenAI and audit the response."""
# Get OpenAI response
response = client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": question}]
)
answer = response.choices[0].message.content
# Audit the answer
audit_result = auditor.evaluate(
text=answer,
context=question
)
# Prepare response
result = {
"question": question,
"answer": answer,
"model": model,
"audit": {
"verdict": audit_result.verdict,
"q1": audit_result.q1,
"q2": audit_result.q2,
"height": audit_result.height
}
}
# Add warning if low confidence
if audit_result.height < min_height:
result["warning"] = "Low confidence response. Verify independently."
return result
# Usage
result = ask_with_audit("What is the capital of France?")
print(f"Answer: {result['answer']}")
print(f"Verdict: {result['audit']['verdict']}")

Pandas DataFrame Processing

Process entire datasets using Pandas DataFrames with vectorized operations.

import pandas as pd
from aletheion_guard import EpistemicAuditor
auditor = EpistemicAuditor()
# Load dataset
df = pd.read_csv("content.csv")
# Batch audit all texts
texts = df["content"].tolist()
results = auditor.batch_evaluate(texts, batch_size=64)
# Add audit columns
df["verdict"] = [r.verdict for r in results]
df["q1"] = [r.q1 for r in results]
df["q2"] = [r.q2 for r in results]
df["height"] = [r.height for r in results]
# Filter by verdict
accepted = df[df["verdict"] == "ACCEPT"]
review_needed = df[df["verdict"] == "MAYBE"]
rejected = df[df["verdict"] == "REFUSED"]
# Statistics
print(f"Total: {len(df)}")
print(f"Accepted: {len(accepted)} ({len(accepted)/len(df)*100:.1f}%)")
print(f"Review: {len(review_needed)} ({len(review_needed)/len(df)*100:.1f}%)")
print(f"Rejected: {len(rejected)} ({len(rejected)/len(df)*100:.1f}%)")
# Save results
df.to_csv("content_audited.csv", index=False)

Redis Caching Layer

Add a Redis caching layer to avoid redundant audits and improve performance.

import redis
import hashlib
import json
from aletheion_guard import EpistemicAuditor
class CachedAuditor:
"""Auditor with Redis caching."""
def __init__(self, redis_url: str = "redis://localhost:6379"):
self.auditor = EpistemicAuditor()
self.redis = redis.from_url(redis_url)
self.cache_ttl = 3600 # 1 hour
def _cache_key(self, text: str, context: str | None) -> str:
"""Generate cache key."""
content = f"{text}:{context or ''}"
hash_obj = hashlib.sha256(content.encode())
return f"audit:{hash_obj.hexdigest()}"
def evaluate(self, text: str, context: str | None = None):
"""Evaluate with caching."""
cache_key = self._cache_key(text, context)
# Check cache
cached = self.redis.get(cache_key)
if cached:
return json.loads(cached)
# Audit and cache
result = self.auditor.evaluate(text, context)
result_dict = {
"verdict": result.verdict,
"q1": result.q1,
"q2": result.q2,
"height": result.height
}
self.redis.setex(
cache_key,
self.cache_ttl,
json.dumps(result_dict)
)
return result_dict
# Usage
auditor = CachedAuditor()
result = auditor.evaluate("Paris is the capital of France")

Next Steps