Skip to content

Python SDK

Installation

Terminal window
pip install archetypal-govern
# or with poetry
poetry add archetypal-govern
# or with uv
uv add archetypal-govern

Requires Python 3.9+.

Initialization

from archetypal_govern import GovernClient
govern = GovernClient(
api_key=os.environ["GOVERN_API_KEY"],
org_id=os.environ["GOVERN_ORG_ID"],
# Optional
base_url="https://api.govern.archetypal.ai", # default
timeout=5.0, # seconds, default: 5.0
mode="flag", # log | flag | block, default: uses org policy
fail_open=True, # default: True
)

Basic assessment

import anthropic
from archetypal_govern import GovernClient, PolicyViolationError
client = anthropic.Anthropic()
govern = GovernClient(api_key=os.environ["GOVERN_API_KEY"], org_id=os.environ["GOVERN_ORG_ID"])
message = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{"role": "user", "content": user_input}]
)
assessment = govern.assess(
model="claude-sonnet-4-20250514",
prompt=[{"role": "user", "content": user_input}],
response=message.content[0].text,
metadata={
"user_id": current_user.id,
"feature": "document-summary",
}
)
if assessment.action == "block":
raise PolicyViolationError(assessment.violations)
return message.content[0].text

Async support

import asyncio
from archetypal_govern.async_client import AsyncGovernClient
govern = AsyncGovernClient(
api_key=os.environ["GOVERN_API_KEY"],
org_id=os.environ["GOVERN_ORG_ID"],
)
async def process_with_governance(user_input: str) -> str:
client = anthropic.AsyncAnthropic()
message = await client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{"role": "user", "content": user_input}]
)
assessment = await govern.assess(
model="claude-sonnet-4-20250514",
prompt=[{"role": "user", "content": user_input}],
response=message.content[0].text,
)
if assessment.action == "block":
raise PolicyViolationError(assessment.violations)
return message.content[0].text

FastAPI integration

from fastapi import FastAPI, HTTPException
from archetypal_govern import GovernClient
from archetypal_govern.middleware import GovernFastAPIMiddleware
app = FastAPI()
govern = GovernClient(...)
app.add_middleware(
GovernFastAPIMiddleware,
govern=govern,
extract_prompt=lambda req, body: body.get("messages"),
extract_response=lambda req, resp_body: resp_body.get("content"),
)
@app.post("/api/chat")
async def chat(body: ChatRequest):
response = await call_model(body.messages)
# Middleware handles assessment automatically
return {"content": response}

Batch assessment

results = govern.assess_batch([
{
"model": "claude-sonnet-4-20250514",
"prompt": [{"role": "user", "content": prompts[i]}],
"response": responses[i],
}
for i in range(len(prompts))
])
violations = [r for r in results if r.action != "pass"]
print(f"{len(violations)}/{len(results)} assessments have violations")
# Fail CI if violation rate > 5%
if len(violations) / len(results) > 0.05:
sys.exit(1)

LangChain integration

from langchain_anthropic import ChatAnthropic
from archetypal_govern.langchain import GovernCallbackHandler
govern_handler = GovernCallbackHandler(
govern=govern,
mode="flag",
)
llm = ChatAnthropic(
model="claude-sonnet-4-20250514",
callbacks=[govern_handler],
)
# All LangChain calls through this LLM are automatically assessed
response = llm.invoke("Summarize this document...")

LlamaIndex integration

from llama_index.llms.anthropic import Anthropic
from archetypal_govern.llamaindex import GovernObserver
observer = GovernObserver(govern=govern)
llm = Anthropic(model="claude-sonnet-4-20250514")
llm.set_callback_manager(observer.callback_manager)

Assessment result dataclass

@dataclass
class AssessmentResult:
assessment_id: str
action: Literal["pass", "flag", "block"]
scores: Scores
violations: list[Violation]
latency_ms: int
model: str
timestamp: datetime
@dataclass
class Scores:
security: float
bias: float
accuracy: float
drift: float | None
cost: float