LangChain Integration

Integrate AletheionGuard epistemic uncertainty detection seamlessly into your LangChain workflows, agents, and RAG systems.

Installation

Install the required packages for LangChain and AletheionGuard integration.

pip install langchain langchain-openai aletheion-guard

Basic Chain Integration

Add epistemic uncertainty auditing to any LangChain chain using custom callbacks.

from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema import StrOutputParser
from aletheion_guard import EpistemicAuditor
# Initialize components
llm = ChatOpenAI(model="gpt-4")
auditor = EpistemicAuditor()
# Create prompt
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant."),
("user", "{question}")
])
# Build chain with audit wrapper
def chain_with_audit(question: str):
# Generate response
chain = prompt | llm | StrOutputParser()
answer = chain.invoke({"question": question})
# Audit the response
audit_result = auditor.evaluate(
text=answer,
context=question
)
return {
"answer": answer,
"verdict": audit_result.verdict,
"q1": audit_result.q1,
"q2": audit_result.q2,
"height": audit_result.height
}
# Usage
result = chain_with_audit("What is the capital of France?")
print(f"Answer: {result['answer']}")
print(f"Verdict: {result['verdict']} (height: {result['height']:.2f})")

RAG System Integration

Enhance your RAG (Retrieval-Augmented Generation) system with epistemic uncertainty checks. Trigger re-retrieval when Q2 is high.

from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.text_splitter import RecursiveCharacterTextSplitter
from aletheion_guard import EpistemicAuditor
class AuditedRAG:
"""RAG system with epistemic uncertainty auditing."""
def __init__(self, documents, q2_threshold=0.35):
self.auditor = EpistemicAuditor()
self.q2_threshold = q2_threshold
# Setup RAG components
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200
)
splits = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
self.vectorstore = Chroma.from_documents(splits, embeddings)
self.llm = ChatOpenAI(model="gpt-4", temperature=0)
self.qa_chain = RetrievalQA.from_chain_type(
llm=self.llm,
retriever=self.vectorstore.as_retriever(
search_kwargs={"k": 3}
)
)
def query(self, question: str, max_retries=2):
"""Query with automatic re-retrieval on high Q2."""
k = 3 # Initial number of documents
for attempt in range(max_retries + 1):
# Retrieve and generate
self.qa_chain.retriever.search_kwargs["k"] = k
answer = self.qa_chain.invoke(question)["result"]
# Audit the answer
audit = self.auditor.evaluate(answer, context=question)
# Check Q2 (epistemic uncertainty)
if audit.q2 < self.q2_threshold:
# Confident answer
return {
"answer": answer,
"verdict": audit.verdict,
"q2": audit.q2,
"height": audit.height,
"attempts": attempt + 1,
"documents_used": k
}
# High Q2: retrieve more context
if attempt < max_retries:
k += 3 # Increase retrieval
print(f"High Q2 ({audit.q2:.2f}). Retrieving more context...")
# Max retries reached
return {
"answer": "I cannot provide a confident answer with the available information.",
"verdict": "REFUSED",
"q2": audit.q2,
"height": audit.height
}
# Usage
rag = AuditedRAG(documents)
result = rag.query("What are the key findings?")

Agent Integration

Create LangChain agents that use AletheionGuard as a tool to assess their own outputs.

from langchain.agents import initialize_agent, AgentType, Tool
from langchain_openai import ChatOpenAI
from aletheion_guard import EpistemicAuditor
auditor = EpistemicAuditor()
def audit_tool(text: str) -> str:
"""Tool for agents to audit their own responses."""
result = auditor.evaluate(text)
return f"""Verdict: {result.verdict}
Q1 (Aleatoric): {result.q1:.3f}
Q2 (Epistemic): {result.q2:.3f}
Height (Confidence): {result.height:.3f}
Recommendation: {"Accept" if result.verdict == "ACCEPT" else "Verify or gather more info"}"""
# Define tools
tools = [
Tool(
name="EpistemicAudit",
func=audit_tool,
description="""Use this tool to audit the epistemic uncertainty of a statement.
Input should be the text to audit. Returns verdict and uncertainty metrics."""
),
# Add other tools...
]
# Create agent
llm = ChatOpenAI(model="gpt-4", temperature=0)
agent = initialize_agent(
tools=tools,
llm=llm,
agent=AgentType.OPENAI_FUNCTIONS,
verbose=True
)
# Agent can now audit its own outputs
response = agent.run(
"""Make a claim about quantum computing and then audit
your claim using the EpistemicAudit tool."""
)

Custom Runnable

Create a custom LangChain Runnable that integrates auditing into the pipeline.

from langchain.schema.runnable import Runnable, RunnableConfig
from typing import Any, Dict
from aletheion_guard import EpistemicAuditor
class AuditRunnable(Runnable[str, Dict[str, Any]]):
"""Custom runnable that audits text."""
def __init__(self, min_height: float = 0.6):
self.auditor = EpistemicAuditor()
self.min_height = min_height
def invoke(
self,
input: str,
config: RunnableConfig | None = None
) -> Dict[str, Any]:
result = self.auditor.evaluate(input)
return {
"text": input,
"verdict": result.verdict,
"q1": result.q1,
"q2": result.q2,
"height": result.height,
"passed": result.height >= self.min_height
}
# Usage in a chain
from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema import StrOutputParser
prompt = ChatPromptTemplate.from_template("Answer: {question}")
llm = ChatOpenAI()
audit = AuditRunnable(min_height=0.7)
# Chain: prompt → LLM → audit
chain = prompt | llm | StrOutputParser() | audit
result = chain.invoke({"question": "What is quantum entanglement?"})
print(f"Passed audit: {result['passed']}")

Conversational Memory with Audit

Track epistemic uncertainty across conversation history to identify when the assistant becomes uncertain.

from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationChain
from langchain_openai import ChatOpenAI
from aletheion_guard import EpistemicAuditor
class AuditedConversation:
"""Conversational chain with uncertainty tracking."""
def __init__(self):
self.auditor = EpistemicAuditor()
self.llm = ChatOpenAI(model="gpt-4")
self.memory = ConversationBufferMemory()
self.chain = ConversationChain(
llm=self.llm,
memory=self.memory
)
self.audit_history = []
def chat(self, user_input: str):
# Generate response
response = self.chain.predict(input=user_input)
# Audit the response
audit = self.auditor.evaluate(response, context=user_input)
# Track audit history
self.audit_history.append({
"user": user_input,
"assistant": response,
"verdict": audit.verdict,
"q2": audit.q2,
"height": audit.height
})
# Warn if uncertainty is increasing
if len(self.audit_history) > 1:
prev_q2 = self.audit_history[-2]["q2"]
if audit.q2 > prev_q2 + 0.15:
response += "\n\n[Note: My confidence has decreased. Consider verifying this information.]"
return {
"response": response,
"audit": audit
}
def get_audit_summary(self):
"""Get summary of conversation uncertainty."""
if not self.audit_history:
return "No conversation history"
avg_height = sum(h["height"] for h in self.audit_history) / len(self.audit_history)
refused_count = sum(1 for h in self.audit_history if h["verdict"] == "REFUSED")
return {
"turns": len(self.audit_history),
"avg_confidence": avg_height,
"refused_responses": refused_count
}
# Usage
conv = AuditedConversation()
result = conv.chat("Tell me about quantum physics")

LangGraph State Machine

Use LangGraph to build a state machine that routes based on epistemic uncertainty.

from langgraph.graph import StateGraph, END
from typing import TypedDict
from aletheion_guard import EpistemicAuditor
from langchain_openai import ChatOpenAI
class GraphState(TypedDict):
question: str
answer: str
verdict: str
q2: float
retry_count: int
auditor = EpistemicAuditor()
llm = ChatOpenAI(model="gpt-4")
def generate_node(state: GraphState):
"""Generate answer."""
answer = llm.invoke(state["question"]).content
return {"answer": answer}
def audit_node(state: GraphState):
"""Audit the answer."""
result = auditor.evaluate(state["answer"], state["question"])
return {
"verdict": result.verdict,
"q2": result.q2
}
def should_retry(state: GraphState):
"""Decide if we should retry."""
if state["q2"] > 0.4 and state.get("retry_count", 0) < 2:
return "retry"
return "end"
def retry_node(state: GraphState):
"""Increment retry counter."""
return {"retry_count": state.get("retry_count", 0) + 1}
# Build graph
workflow = StateGraph(GraphState)
workflow.add_node("generate", generate_node)
workflow.add_node("audit", audit_node)
workflow.add_node("retry", retry_node)
workflow.set_entry_point("generate")
workflow.add_edge("generate", "audit")
workflow.add_conditional_edges("audit", should_retry, {
"retry": "retry",
"end": END
})
workflow.add_edge("retry", "generate")
app = workflow.compile()

Batch Document Processing

Process and audit multiple documents efficiently using LangChain's batch processing.

from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from aletheion_guard import EpistemicAuditor
def process_documents_with_audit(documents: list[str]):
"""Process and audit multiple documents."""
auditor = EpistemicAuditor()
llm = ChatOpenAI(model="gpt-4")
prompt = ChatPromptTemplate.from_template(
"Summarize the following document:\n\n{document}"
)
# Generate summaries
chain = prompt | llm
summaries = chain.batch([{"document": doc} for doc in documents])
summary_texts = [s.content for s in summaries]
# Batch audit
audit_results = auditor.batch_evaluate(summary_texts)
# Combine results
results = []
for doc, summary, audit in zip(documents, summary_texts, audit_results):
results.append({
"document": doc[:100], # Preview
"summary": summary,
"verdict": audit.verdict,
"confidence": audit.height
})
return results

Next Steps