Skip to main content
Humancheck provides built-in adapters for integrating with LangChain and LangGraph agents. This allows your agents to automatically request human review for tool calls that require approval.

Overview

The LangChain integration works by:
  1. Intercepting tool calls from your agent
  2. Converting them to Humancheck reviews
  3. Waiting for human decisions (or checking back later)
  4. Resuming agent execution with the decision

Installation

Install Humancheck with LangChain dependencies:
pip install humancheck[langchain]
Or with Poetry:
poetry install --with langchain

Basic Integration

Simple Example

from humancheck.adapters.langchain_hitl import LangChainHITLAdapter
from humancheck.database import init_db
from humancheck.config import get_config

# Initialize database
config = get_config()
db = init_db(config.get_database_url())
await db.create_tables()

# Create adapter
adapter = LangChainHITLAdapter(db.session)

# Configure which tools require approval
tool_approval_rules = {
    "write_file": ["approve", "edit", "reject"],
    "execute_sql": ["approve", "reject"],
    "send_email": ["approve", "edit", "reject"],
    "read_data": None,  # No approval needed
}

# In your agent execution
async def execute_with_review(tool_calls):
    """Execute tool calls with human review."""
    for tool_call in tool_calls:
        tool_name = tool_call["name"]
        
        # Check if tool requires approval
        if tool_name in tool_approval_rules:
            # Create review
            review = await adapter.create_review_from_tool_call(
                tool_call,
                tool_approval_rules[tool_name]
            )
            
            # Wait for decision
            decision = await adapter.handle_blocking(review.id, timeout=300)
            
            # Process decision
            if decision["decision_type"] == "approve":
                # Execute tool call as-is
                result = await execute_tool(tool_call)
            elif decision["decision_type"] == "modify":
                # Use modified arguments
                modified_call = {**tool_call, "args": decision["args"]}
                result = await execute_tool(modified_call)
            else:
                # Rejected
                result = {"error": "Tool call rejected"}
        else:
            # No approval needed
            result = await execute_tool(tool_call)
    
    return results

LangGraph Integration

Using with LangGraph HITL

Humancheck integrates with LangGraph’s Human-in-the-Loop (HITL) feature:
from langgraph.graph import StateGraph
from langgraph.checkpoint.memory import MemorySaver
from humancheck.adapters.langchain_hitl import LangChainHITLAdapter

# Create adapter
adapter = LangChainHITLAdapter(db_session_factory)

# Create HITL interrupt handler
async def handle_hitl_interrupt(state, config):
    """Handle HITL interrupts with Humancheck."""
    tool_calls = state.get("tool_calls", [])
    
    # Create reviews for tool calls
    reviews = []
    for tool_call in tool_calls:
        review = await adapter.create_review_from_tool_call(
            tool_call,
            allowed_decisions=["approve", "edit", "reject"]
        )
        reviews.append(review)
    
    # Wait for decisions
    decisions = []
    for review in reviews:
        decision = await adapter.handle_blocking(review.id, timeout=300)
        decisions.append(decision)
    
    return decisions

# In your LangGraph workflow
graph = StateGraph(...)
graph.add_node("tool_execution", execute_tools)
graph.add_node("hitl_review", handle_hitl_interrupt)

# Add interrupt point
graph.add_edge("tool_execution", "hitl_review")
graph.add_edge("hitl_review", "continue_workflow")

Complete Example

Here’s a complete example of integrating Humancheck with a LangChain agent:
import asyncio
from langchain.agents import AgentExecutor
from langchain.tools import Tool
from humancheck.adapters.langchain_hitl import LangChainHITLAdapter
from humancheck.database import init_db
from humancheck.config import get_config

async def create_agent_with_humancheck():
    """Create a LangChain agent with Humancheck integration."""
    
    # Initialize Humancheck
    config = get_config()
    db = init_db(config.get_database_url())
    await db.create_tables()
    
    adapter = LangChainHITLAdapter(db.session)
    
    # Define tools that require approval
    tools = [
        Tool(
            name="execute_sql",
            func=lambda query: f"SQL: {query}",
            description="Execute SQL query (requires approval)"
        ),
        Tool(
            name="send_email",
            func=lambda to, subject, body: f"Email sent to {to}",
            description="Send email (requires approval)"
        ),
        Tool(
            name="read_file",
            func=lambda path: f"File content: {path}",
            description="Read file (no approval needed)"
        ),
    ]
    
    # Wrap tool execution with Humancheck
    async def execute_with_review(tool_name, tool_input):
        """Execute tool with human review."""
        tool_call = {
            "name": tool_name,
            "arguments": tool_input,
            "description": next(t.desc for t in tools if t.name == tool_name)
        }
        
        # Tools requiring approval
        approval_required = ["execute_sql", "send_email"]
        
        if tool_name in approval_required:
            # Create review
            review = await adapter.create_review_from_tool_call(
                tool_call,
                allowed_decisions=["approve", "edit", "reject"]
            )
            
            print(f"Review created (ID: {review.id})")
            print(f"View in dashboard: http://localhost:8501")
            
            # Wait for decision
            decision = await adapter.handle_blocking(review.id, timeout=300)
            
            if decision["decision_type"] == "approve":
                # Execute tool
                tool = next(t for t in tools if t.name == tool_name)
                return tool.func(**tool_input)
            elif decision["decision_type"] == "modify":
                # Use modified arguments
                modified_input = decision.get("args", tool_input)
                tool = next(t for t in tools if t.name == tool_name)
                return tool.func(**modified_input)
            else:
                return {"error": "Tool call rejected by reviewer"}
        else:
            # No approval needed
            tool = next(t for t in tools if t.name == tool_name)
            return tool.func(**tool_input)
    
    # Create agent executor
    agent = AgentExecutor(
        tools=tools,
        # ... other agent configuration
    )
    
    return agent, execute_with_review

# Usage
async def main():
    agent, execute_fn = await create_agent_with_humancheck()
    
    # Run agent
    result = await agent.arun("Execute SELECT * FROM users")
    
    print(result)

asyncio.run(main())

Tool Approval Configuration

Configure Approval Rules

tool_approval_rules = {
    # Tool name: allowed decisions
    "execute_sql": ["approve", "reject"],  # No editing
    "write_file": ["approve", "edit", "reject"],  # All decisions
    "send_email": ["approve", "edit", "reject"],
    "delete_data": ["approve", "reject"],
    "read_file": None,  # No approval needed
}

Conditional Approval

def requires_approval(tool_name, tool_args):
    """Determine if tool call requires approval."""
    if tool_name == "execute_sql":
        # Check if it's a destructive operation
        query = tool_args.get("query", "").lower()
        if any(keyword in query for keyword in ["delete", "drop", "truncate"]):
            return True
    elif tool_name == "write_file":
        # Check file path
        path = tool_args.get("path", "")
        if path.startswith("/etc/") or path.startswith("/var/"):
            return True
    return False

Handling Multiple Tool Calls

async def handle_multiple_tool_calls(tool_calls):
    """Handle multiple tool calls in a single review."""
    reviews = []
    
    for tool_call in tool_calls:
        review = await adapter.create_review_from_tool_call(
            tool_call,
            allowed_decisions=["approve", "edit", "reject"]
        )
        reviews.append(review)
    
    # Wait for all decisions
    decisions = []
    for review in reviews:
        decision = await adapter.handle_blocking(review.id, timeout=300)
        decisions.append({
            "tool_call": tool_call,
            "decision": decision
        })
    
    return decisions

Non-blocking Pattern

For async workflows where you don’t want to block:
async def create_review_non_blocking(tool_call):
    """Create review without waiting for decision."""
    review = await adapter.create_review_from_tool_call(
        tool_call,
        allowed_decisions=["approve", "edit", "reject"]
    )
    
    # Return review ID immediately
    return review.id

# Later, check for decision
async def check_decision(review_id):
    """Check if decision is available."""
    decision = await adapter.get_decision(review_id)
    if decision:
        return decision
    return None

# Poll periodically
async def poll_for_decision(review_id):
    """Poll for decision."""
    while True:
        decision = await check_decision(review_id)
        if decision:
            return decision
        await asyncio.sleep(5)  # Check every 5 seconds

Best Practices

  1. Define clear approval rules: Specify which tools require approval and which decisions are allowed
  2. Provide context: Include agent reasoning and confidence scores in reviews
  3. Use appropriate urgency: Set urgency based on the impact of the tool call
  4. Handle timeouts: Implement timeout handling for blocking requests
  5. Test with dashboard: Use the Streamlit dashboard to test your integration

Next Steps