import httpx
import asyncio
async def moderate_content_workflow(content, content_type="text"):
"""Moderate content with human review for borderline cases."""
async with httpx.AsyncClient() as client:
# Analyze content with AI
analysis = await analyze_content(content, content_type)
confidence = analysis["confidence"]
violation_type = analysis["violation_type"]
# Threshold for auto-decision vs review
AUTO_APPROVE_THRESHOLD = 0.95
AUTO_REJECT_THRESHOLD = 0.05
if AUTO_REJECT_THRESHOLD < confidence < AUTO_APPROVE_THRESHOLD:
# Borderline case - request human review
response = await client.post(
"https://api.humancheck.dev/reviews",
headers={
"Authorization": "Bearer your-api-key-here",
"Content-Type": "application/json"
},
json={
"task_type": "content_moderation",
"proposed_action": f"Flag content as {violation_type}",
"agent_reasoning": f"Low confidence ({confidence:.2%}) in moderation decision. Content may violate policy.",
"confidence_score": confidence,
"urgency": "high", # Content moderation is time-sensitive
"metadata": {
"content": content[:500], # First 500 chars
"content_type": content_type,
"violation_type": violation_type,
"content_length": len(content),
"analysis_details": analysis,
"user_id": analysis.get("user_id"),
"content_id": analysis.get("content_id")
},
"blocking": True # Wait for decision before publishing
}
)
review = response.json()
# Get decision
decision = review.get("decision")
if not decision:
decision_response = await client.get(
f"https://api.humancheck.dev/reviews/{review['id']}/decision"
)
decision = decision_response.json()
# Apply decision
if decision["decision_type"] == "approve":
# Approve content (allow publishing)
await approve_content(review["metadata"]["content_id"])
return {"status": "approved", "content_id": review["metadata"]["content_id"]}
elif decision["decision_type"] == "modify":
# Apply modification (e.g., redact specific parts)
modified_content = decision.get("modified_action")
await update_content(review["metadata"]["content_id"], modified_content)
return {"status": "modified", "content_id": review["metadata"]["content_id"]}
else:
# Reject content
await reject_content(review["metadata"]["content_id"], decision.get("notes"))
return {"status": "rejected", "reason": decision.get("notes")}
elif confidence >= AUTO_APPROVE_THRESHOLD:
# Auto-approve
await approve_content(analysis["content_id"])
return {"status": "auto_approved"}
else:
# Auto-reject
await reject_content(analysis["content_id"], "High confidence violation detected")
return {"status": "auto_rejected"}
async def analyze_content(content, content_type):
"""Analyze content for policy violations."""
# Your AI moderation logic here
# This is a simplified example
violations = {
"hate_speech": 0.3,
"spam": 0.1,
"harassment": 0.2,
"none": 0.4
}
# Determine most likely violation
violation_type = max(violations, key=violations.get)
confidence = violations[violation_type]
return {
"confidence": confidence,
"violation_type": violation_type if violation_type != "none" else None,
"content_id": generate_content_id(),
"user_id": extract_user_id(content)
}