Python SDK
Installation
pip install archetypal-govern
# or with poetrypoetry add archetypal-govern
# or with uvuv add archetypal-governRequires Python 3.9+.
Initialization
from archetypal_govern import GovernClient
govern = GovernClient( api_key=os.environ["GOVERN_API_KEY"], org_id=os.environ["GOVERN_ORG_ID"],
# Optional base_url="https://api.govern.archetypal.ai", # default timeout=5.0, # seconds, default: 5.0 mode="flag", # log | flag | block, default: uses org policy fail_open=True, # default: True)Basic assessment
import anthropicfrom archetypal_govern import GovernClient, PolicyViolationError
client = anthropic.Anthropic()govern = GovernClient(api_key=os.environ["GOVERN_API_KEY"], org_id=os.environ["GOVERN_ORG_ID"])
message = client.messages.create( model="claude-sonnet-4-20250514", max_tokens=1024, messages=[{"role": "user", "content": user_input}])
assessment = govern.assess( model="claude-sonnet-4-20250514", prompt=[{"role": "user", "content": user_input}], response=message.content[0].text, metadata={ "user_id": current_user.id, "feature": "document-summary", })
if assessment.action == "block": raise PolicyViolationError(assessment.violations)
return message.content[0].textAsync support
import asynciofrom archetypal_govern.async_client import AsyncGovernClient
govern = AsyncGovernClient( api_key=os.environ["GOVERN_API_KEY"], org_id=os.environ["GOVERN_ORG_ID"],)
async def process_with_governance(user_input: str) -> str: client = anthropic.AsyncAnthropic()
message = await client.messages.create( model="claude-sonnet-4-20250514", max_tokens=1024, messages=[{"role": "user", "content": user_input}] )
assessment = await govern.assess( model="claude-sonnet-4-20250514", prompt=[{"role": "user", "content": user_input}], response=message.content[0].text, )
if assessment.action == "block": raise PolicyViolationError(assessment.violations)
return message.content[0].textFastAPI integration
from fastapi import FastAPI, HTTPExceptionfrom archetypal_govern import GovernClientfrom archetypal_govern.middleware import GovernFastAPIMiddleware
app = FastAPI()govern = GovernClient(...)
app.add_middleware( GovernFastAPIMiddleware, govern=govern, extract_prompt=lambda req, body: body.get("messages"), extract_response=lambda req, resp_body: resp_body.get("content"),)
@app.post("/api/chat")async def chat(body: ChatRequest): response = await call_model(body.messages) # Middleware handles assessment automatically return {"content": response}Batch assessment
results = govern.assess_batch([ { "model": "claude-sonnet-4-20250514", "prompt": [{"role": "user", "content": prompts[i]}], "response": responses[i], } for i in range(len(prompts))])
violations = [r for r in results if r.action != "pass"]print(f"{len(violations)}/{len(results)} assessments have violations")
# Fail CI if violation rate > 5%if len(violations) / len(results) > 0.05: sys.exit(1)LangChain integration
from langchain_anthropic import ChatAnthropicfrom archetypal_govern.langchain import GovernCallbackHandler
govern_handler = GovernCallbackHandler( govern=govern, mode="flag",)
llm = ChatAnthropic( model="claude-sonnet-4-20250514", callbacks=[govern_handler],)
# All LangChain calls through this LLM are automatically assessedresponse = llm.invoke("Summarize this document...")LlamaIndex integration
from llama_index.llms.anthropic import Anthropicfrom archetypal_govern.llamaindex import GovernObserver
observer = GovernObserver(govern=govern)
llm = Anthropic(model="claude-sonnet-4-20250514")llm.set_callback_manager(observer.callback_manager)Assessment result dataclass
@dataclassclass AssessmentResult: assessment_id: str action: Literal["pass", "flag", "block"] scores: Scores violations: list[Violation] latency_ms: int model: str timestamp: datetime
@dataclassclass Scores: security: float bias: float accuracy: float drift: float | None cost: float