Skip to main content

Agent

The main class for creating LLM agents.

Constructor

Agent(
# Core
model: str | None = None,
tools: list[Tool] | None = None,
api_key: str | None = None,
max_steps: int = 10,
system_prompt: str | None = None,
template: str | None = None,
debug: bool = False,
provider: str = "mistral",
session: str | None = None,
memory: Memory | None = None,
# Hooks
on_tool_call: Callable | None = None,
on_tool_result: Callable | None = None,
on_thinking: Callable | None = None,
on_plan: Callable | None = None,
# Reliability
retries: int = 0,
timeout: float | None = None,
max_messages: int | None = None,
fallback: str | None = None,
cache: bool = False,
# Tool control
tool_choice: str | None = None,
enabled_groups: list[str] | None = None,
# Validation
validator: Callable[[str], bool] | None = None,
validation_retries: int = 0,
)

Parameters

Core

ParameterTypeDefaultDescription
modelstrProvider defaultModel to use
toolslist[Tool]NoneTools the agent can use
api_keystrFrom envAPI key
max_stepsint10Max ReAct loop iterations
system_promptstrAutoCustom system prompt
templatestrNonePredefined template
debugboolFalsePrint debug info
providerstr"mistral"LLM provider
sessionstrNoneSession ID for persistence
memoryMemoryJSONMemoryCustom memory backend

Hooks

ParameterTypeDescription
on_tool_callCallable[[str, dict], None]Before tool execution
on_tool_resultCallable[[str, str], None]After tool execution
on_thinkingCallable[[str], None]On intermediate text
on_planCallable[[str], None]When plan is created (with plan=True)

Reliability

ParameterTypeDefaultDescription
retriesint0Retry attempts on failure
timeoutfloatNoneRequest timeout (seconds)
max_messagesintNoneMax messages in context
fallbackstrNoneFallback provider
cacheboolFalseEnable response caching

Tool Control

ParameterTypeDefaultDescription
tool_choicestrNone"auto", "required", "none"
enabled_groupslist[str]NoneEnabled tool groups

Validation

ParameterTypeDefaultDescription
validatorCallable[[str], bool]NoneResponse validator
validation_retriesint0Retries on validation fail

Methods

run

async def run(
self,
prompt: str,
output: type[T] | None = None,
images: list[str] | None = None,
plan: bool = False,
) -> str | T

Run the agent. Returns response or dataclass instance.

ParameterTypeDefaultDescription
promptstrRequiredThe prompt
outputtypeNoneDataclass for structured output
imageslist[str]NoneImage paths to include
planboolFalseCreate plan before executing

run_sync

def run_sync(prompt, output=None, images=None, plan=False) -> str | T

Synchronous version of run().

batch

async def batch(self, prompts: list[str]) -> list[str]

Run multiple prompts in parallel.

stream

async def stream(self, prompt: str) -> AsyncIterator[str]

Stream response token by token.

save / load / clear

def save(self, path: str) -> None
def load(self, path: str) -> None
def clear(self) -> None

Manage conversation history.

enable_group / disable_group

def enable_group(self, group: str) -> None
def disable_group(self, group: str) -> None

Dynamically enable/disable tool groups.

Attributes

AttributeTypeDescription
messageslist[Message]Conversation history
toolsdict[str, Tool]Active tools
usageUsageToken usage tracking

Example

from pure_agents import Agent, tool

@tool(timeout=10, group="search")
def search(query: str) -> str:
"""Search the web."""
return f"Results for {query}"

agent = Agent(
provider="openai",
fallback="mistral",
template="researcher",
tools=[search],
retries=2,
timeout=30.0,
cache=True,
tool_choice="auto",
on_tool_call=lambda n, a: print(f"Calling {n}"),
)

result = await agent.run("Search for Python tutorials")
print(f"Tokens: {agent.usage.total_tokens}")
print(f"Cost: ${agent.usage.cost('openai'):.4f}")