🔌 Framework Integrations
Wrap your existing AI agents with kernel-level governance in one line
Agent OS provides drop-in integrations for all major AI agent frameworks. Each integration wraps your existing agents with kernel-level governance—no code changes required.
# The universal pattern: wrap any agent with one line
from agent_os.integrations import <framework>_kernel
governed_agent = <framework>_kernel.wrap(your_agent)
$ pip install agent-os-kernel[all]
Supported Frameworks
LangChain
Full support for LangChain agents, chains, and tools with automatic policy enforcement.
from langchain.agents import AgentExecutor
from agent_os.integrations import langchain_kernel
# Your existing LangChain agent
agent = AgentExecutor.from_agent_and_tools(
agent=llm_agent,
tools=tools
)
# Wrap with Agent OS governance
governed = langchain_kernel.wrap(
agent,
policy="strict",
audit_log=True
)
# Execute with kernel protection
result = governed.invoke({"input": "analyze data"})
CrewAI
Multi-agent crew governance with inter-agent trust and coordinated policy enforcement.
from crewai import Crew, Agent, Task
from agent_os.integrations import crewai_kernel
# Your existing CrewAI setup
researcher = Agent(role="Researcher", ...)
writer = Agent(role="Writer", ...)
crew = Crew(agents=[researcher, writer], tasks=[...])
# Wrap entire crew with governance
governed_crew = crewai_kernel.wrap(
crew,
trust_protocol="iatp",
isolation="process"
)
# All agents governed by kernel
result = governed_crew.kickoff()
AutoGen
Govern Microsoft AutoGen conversations with message interception and code execution controls.
from autogen import AssistantAgent, UserProxyAgent
from agent_os.integrations import autogen_kernel
# Your existing AutoGen agents
assistant = AssistantAgent("assistant", llm_config={...})
user_proxy = UserProxyAgent("user", code_execution_config={...})
# Wrap with Agent OS
governed_assistant = autogen_kernel.wrap(
assistant,
code_policy="sandboxed",
max_iterations=10
)
# Safe multi-agent chat
user_proxy.initiate_chat(governed_assistant, message="...")
OpenAI Assistants
Wrap OpenAI Assistants API with kernel-level tool call interception and file access controls.
from openai import OpenAI
from agent_os.integrations import openai_kernel
client = OpenAI()
# Create assistant through governed client
governed_client = openai_kernel.wrap(
client,
policy={
"file_access": ["read"],
"code_interpreter": True,
"retrieval": True,
"function_calling": "restricted"
}
)
# All assistant actions pass through kernel
assistant = governed_client.beta.assistants.create(
name="Data Analyst",
instructions="Analyze uploaded data files",
tools=[{"type": "code_interpreter"}],
model="gpt-4-turbo"
)
Semantic Kernel
Microsoft Semantic Kernel integration with plugin governance and memory isolation.
import semantic_kernel as sk
from agent_os.integrations import semantic_kernel_adapter
# Your existing Semantic Kernel setup
kernel = sk.Kernel()
kernel.add_chat_service("chat", AzureChatCompletion(...))
kernel.import_plugin(MyPlugin(), "my_plugin")
# Wrap with Agent OS governance
governed_kernel = semantic_kernel_adapter.wrap(
kernel,
policy="enterprise",
plugin_allowlist=["my_plugin", "core"]
)
# Plugin calls filtered by kernel policy
result = await governed_kernel.invoke(
governed_kernel.plugins["my_plugin"]["process"],
input="sensitive data"
)
LlamaIndex
Govern LlamaIndex query engines and agents with data access policies and retrieval controls.
from llama_index.core import VectorStoreIndex
from llama_index.agent.openai import OpenAIAgent
from agent_os.integrations import llamaindex_kernel
# Your existing LlamaIndex setup
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
# Wrap with data access governance
governed_engine = llamaindex_kernel.wrap(
query_engine,
data_policy={
"pii_filter": True,
"source_attribution": "required",
"max_context_docs": 5
}
)
# Queries governed by kernel
response = governed_engine.query("What are our Q4 results?")
Integration Status
Current availability and stability of each integration.
IDE Extensions
Develop governed agents with full IDE support.
VS Code Extension
IntelliSense for policies, real-time violation warnings, integrated flight recorder viewer.
# Install from VS Code Marketplace
ext install agent-os.vscode-agent-os
# Features:
# • Policy autocomplete & validation
# • Inline violation warnings
# • Flight recorder timeline view
# • Quick-fix suggestions
# • Kernel status in status bar
JetBrains Plugin
Full support for PyCharm, IntelliJ with agent debugging and policy inspection.
# Install from JetBrains Marketplace
# Preferences → Plugins → Agent OS
# Features:
# • Advanced policy editor
# • Agent execution debugger
# • Memory graph visualizer
# • Trust chain inspector
# • Integrated kernel console
Cursor AI Integration
AI-assisted policy generation and automatic governance suggestions in Cursor.
# Cursor automatically detects Agent OS
# AI Features:
# • "Generate policy for this agent"
# • "Add HIPAA compliance rules"
# • "Explain this violation"
# • "Suggest safer alternative"
# • Auto-import kernel decorators
Creating Custom Integrations
Build your own integration for any framework using the Agent OS adapter API.
Implement the Adapter Interface
from agent_os.integrations.base import BaseAdapter, AdapterConfig
from agent_os import KernelSpace
from typing import Any, Callable
class MyFrameworkAdapter(BaseAdapter):
"""Custom adapter for MyFramework agents."""
def __init__(self, config: AdapterConfig = None):
super().__init__(config or AdapterConfig())
self.kernel = KernelSpace(policy=self.config.policy)
def wrap(self, agent: Any, **kwargs) -> Any:
"""Wrap a MyFramework agent with kernel governance."""
# Store original execution method
original_execute = agent.execute
# Create governed wrapper
@self.kernel.register
async def governed_execute(task: str):
# Pre-execution hook
self.kernel.emit("agent:execute:start", {"task": task})
# Execute through kernel
result = await original_execute(task)
# Post-execution hook
self.kernel.emit("agent:execute:complete", {"result": result})
return result
# Replace execution method
agent.execute = governed_execute
return agent
Add Action Interception
from agent_os.core import PolicyViolation
class MyFrameworkAdapter(BaseAdapter):
def _intercept_tool_call(self, tool_name: str, args: dict) -> dict:
"""Intercept and validate tool calls before execution."""
# Check against kernel policy
decision = self.kernel.check_action({
"type": "tool_call",
"tool": tool_name,
"arguments": args
})
if decision.blocked:
raise PolicyViolation(
f"Tool '{tool_name}' blocked by policy: {decision.reason}"
)
# Apply any argument transformations
if decision.modified_args:
args = decision.modified_args
return args
def _wrap_tools(self, agent: Any) -> None:
"""Wrap all agent tools with interception."""
for tool in agent.tools:
original_fn = tool.function
def governed_fn(*args, **kwargs):
kwargs = self._intercept_tool_call(tool.name, kwargs)
return original_fn(*args, **kwargs)
tool.function = governed_fn
Register and Use
from agent_os.integrations import register_adapter
# Register your custom adapter
register_adapter("myframework", MyFrameworkAdapter)
# Now use it like built-in integrations
from agent_os.integrations import myframework_kernel
# Wrap your agent
my_agent = MyFrameworkAgent(...)
governed = myframework_kernel.wrap(my_agent, policy="strict")
# Execute with full kernel protection
result = governed.execute("process sensitive data")
Ready to Govern Your Agents?
Get started with your preferred framework in minutes.