Documentation
¶
Index ¶
- type AgentToolApprovalPolicy
- type Conversation
- type JSONSchema
- type LLMClient
- type LLMProvider
- type LLMRequest
- type LLMResponse
- type LLMStream
- type LLMStreamChunk
- type ListMessagesOption
- type ListMessagesOptions
- type Message
- type MessageRole
- type ResponseFormat
- type ResponseFormatType
- type Tool
- type ToolApproval
- type ToolCall
- type ToolRegistry
- type ToolSpec
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type AgentToolApprovalPolicy ¶
AgentToolApprovalPolicy determines if a tool execution requires approval. Implement for custom behavior. Built-in policies: agent.RequireAllToolApprovalPolicy (default), agent.AutoToolApprovalPolicy(), agent.AllowlistToolApprovalPolicy(names...).
type Conversation ¶
type Conversation interface {
// AddMessage adds a message to the conversation identified by id. Id is passed at runtime (e.g. from Run input, workflow).
AddMessage(ctx context.Context, id string, msg Message) error
// ListMessages returns messages for the conversation identified by id.
ListMessages(ctx context.Context, id string, opts ...ListMessagesOption) ([]Message, error)
// Clear removes all messages for the conversation identified by id. Called by the user when ending a session.
Clear(ctx context.Context, id string) error
// IsDistributed returns true if the implementation uses distributed storage (Redis, Postgres, etc.).
// In-memory implementations return false. Use distributed implementations when using remote workers.
IsDistributed() bool
}
type JSONSchema ¶
func (JSONSchema) MarshalJSON ¶
func (s JSONSchema) MarshalJSON() ([]byte, error)
type LLMClient ¶
type LLMClient interface {
Generate(ctx context.Context, request *LLMRequest) (*LLMResponse, error)
GenerateStream(ctx context.Context, request *LLMRequest) (LLMStream, error)
GetModel() string
GetProvider() LLMProvider
// IsStreamSupported returns true if the client supports streaming (e.g. OpenAI, Anthropic).
IsStreamSupported() bool
}
type LLMProvider ¶
type LLMProvider string
const ( LLMProviderOpenAI LLMProvider = "openai" LLMProviderAnthropic LLMProvider = "anthropic" LLMProviderGemini LLMProvider = "gemini" )
type LLMRequest ¶
type LLMRequest struct {
SystemMessage string
ResponseFormat *ResponseFormat
Tools []ToolSpec // Tool specs for the LLM to choose from
// Messages is the conversation history. For first turn, use one user message.
// For continuation after tool use: append assistant (with ToolCalls) + tool result messages.
Messages []Message
// Sampling (per-request; typically set from agent config). nil/0 = provider default.
Temperature *float64 // 0-2 OpenAI, 0-1 Anthropic
MaxTokens int // 0 = provider default
TopP *float64 // 0-1; OpenAI only
TopK *int // Anthropic only
}
type LLMResponse ¶
type LLMStream ¶
type LLMStream interface {
Next() bool
Current() *LLMStreamChunk
Err() error
// GetResult returns the accumulated content and tool calls after streaming completes.
// Call after the Next loop; returns nil if streaming failed or was not completed.
GetResult() *LLMResponse
}
LLMStream yields partial content and optional thinking/tool-call chunks from a streaming LLM response.
type LLMStreamChunk ¶
type LLMStreamChunk struct {
ContentDelta string // partial text content
ThinkingDelta string // Anthropic extended thinking (optional)
ToolCalls []*ToolCall // set on final chunk when tool calls are present
}
LLMStreamChunk is a single chunk from a streaming LLM response.
type ListMessagesOption ¶
type ListMessagesOption func(*ListMessagesOptions)
func WithLimit ¶
func WithLimit(limit int) ListMessagesOption
WithLimit sets the maximum number of messages to retrieve
func WithOffset ¶
func WithOffset(offset int) ListMessagesOption
WithOffset sets the number of messages to skip
func WithRoles ¶
func WithRoles(roles ...MessageRole) ListMessagesOption
WithRoles filters messages by role
type ListMessagesOptions ¶
type ListMessagesOptions struct {
// Limit is the maximum number of messages to retrieve from recent. -1 = all.
Limit int
// Offset is the number of most recent messages to skip. -1 = 0 (default).
Offset int
// Roles filters messages by role
Roles []MessageRole
}
type Message ¶
type Message struct {
Role MessageRole `json:"role"`
Content string `json:"content"`
ToolName string `json:"tool_name"`
ToolCallID string `json:"tool_call_id"`
ToolCalls []*ToolCall `json:"tool_calls"`
Metadata map[string]any `json:"metadata"`
CreatedAt time.Time `json:"created_at"`
}
Message represents a conversation turn for multi-turn (including tool use).
type MessageRole ¶
type MessageRole string
const ( MessageRoleSystem MessageRole = "system" MessageRoleUser MessageRole = "user" MessageRoleAssistant MessageRole = "assistant" MessageRoleTool MessageRole = "tool" )
type ResponseFormat ¶
type ResponseFormat struct {
Type ResponseFormatType
Name string
Schema JSONSchema
}
type ResponseFormatType ¶
type ResponseFormatType string
const ( ResponseFormatJSON ResponseFormatType = "json" ResponseFormatText ResponseFormatType = "text" )
type Tool ¶
type Tool interface {
// Name returns the tool identifier (e.g. "search", "calculator"). Used by the LLM in tool calls.
Name() string
// Description describes when and how to use this tool. Shown to the LLM for tool selection.
Description() string
// Parameters returns the JSON schema for the tool's input. The LLM produces args matching this schema.
// Use tools.Params with tools.ParamString, ParamInteger, etc. for type-safe construction.
Parameters() JSONSchema
// Execute runs the tool with the given args. Args match the Parameters schema.
// Called by the agent when the LLM returns a tool call for this tool.
Execute(ctx context.Context, args map[string]any) (any, error)
}
Tool is a callable capability the agent can offer to the LLM. Register tools via agent.WithTools. The LLM receives tool definitions and chooses which to call; the agent executes the chosen tool.
type ToolApproval ¶
type ToolApproval interface {
ApprovalRequired() bool
}
ToolApproval is an optional interface for tools that require user approval before execution. When implemented, the agent honors ApprovalRequired() when no agent-level policy is set. WithToolApprovalPolicy overrides this tool-level default when set.
type ToolCall ¶
type ToolCall struct {
ToolCallID string `json:"tool_call_id"` // from API; needed to match tool results
ToolName string `json:"tool_name"`
Args map[string]any `json:"args"`
}
ToolCall is the LLM's decision to invoke a tool.
type ToolRegistry ¶
type ToolRegistry interface {
// Register adds a tool. Overwrites if a tool with the same name exists.
Register(tool Tool)
// Get returns the tool by name, or (nil, false) if not found.
Get(name string) (Tool, bool)
// Tools returns all registered tools in registration order.
Tools() []Tool
}
ToolRegistry manages a collection of tools. Use for registering and looking up tools by name.
type ToolSpec ¶
type ToolSpec struct {
Name string `json:"name"`
Description string `json:"description"`
Parameters JSONSchema `json:"parameters"`
}
ToolSpec is the schema sent to the LLM for tool selection. Convert from Tool via ToolToSpec.
func ToolToSpec ¶
ToolToSpec converts a Tool to its spec for the LLM.
func ToolsToSpecs ¶
ToolsToSpecs converts a slice of Tool to specs for the LLM.