LLM Provider Integration
Secure direct integrations with Anthropic Claude, OpenAI GPT, and other LLM providers. AIM auto-detects your provider from imports.
Auto-Detection from Imports
The AIM SDK automatically detects which LLM provider you're using based on your Python imports:
import anthropic → Agent type: anthropicimport openai → Agent type: openaiimport google.generativeai → Agent type: googleimport cohere → Agent type: cohereAnthropic Claude Integration
Secure your Anthropic Claude API calls with automatic request/response logging:
from aim_sdk import secure
import anthropic # SDK auto-detects this!
# Initialize secure agent (auto-detected as "anthropic")
agent = secure("claude-assistant")
client = anthropic.Anthropic()
@agent.perform_action(capability="llm:chat", risk_level="low")
def chat_with_claude(message: str) -> str:
"""Every Claude API call is verified and logged"""
response = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{"role": "user", "content": message}]
)
return response.content[0].text
@agent.perform_action(capability="llm:analyze", risk_level="medium")
def analyze_document(document: str, question: str) -> str:
"""Document analysis with full audit trail"""
response = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=2048,
messages=[
{"role": "user", "content": f"Document:\n{document}\n\nQuestion: {question}"}
]
)
return response.content[0].text
# Usage
result = chat_with_claude("What is the capital of France?")
print(result)Tool Use with Claude
from aim_sdk import secure
import anthropic
agent = secure("claude-tool-agent")
client = anthropic.Anthropic()
# Define tools
tools = [
{
"name": "get_weather",
"description": "Get current weather for a location",
"input_schema": {
"type": "object",
"properties": {
"location": {"type": "string", "description": "City name"}
},
"required": ["location"]
}
}
]
@agent.perform_action(capability="weather:fetch", risk_level="low")
def get_weather(location: str) -> str:
"""Secured tool execution"""
return f"Weather in {location}: Sunny, 72°F"
@agent.perform_action(capability="llm:tool_use", risk_level="medium")
def chat_with_tools(message: str) -> str:
"""Claude with tool use - all tool calls logged"""
response = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
tools=tools,
messages=[{"role": "user", "content": message}]
)
# Handle tool use
for block in response.content:
if block.type == "tool_use":
if block.name == "get_weather":
result = get_weather(block.input["location"])
# Continue conversation with tool result...
return response.content[0].text if response.content else ""