Documentation
¶
Overview ¶
Package llm defines types and interfaces for Large Language Model interactions including message handlers, threads, configuration, and usage tracking for different LLM providers.
Index ¶
- Constants
- Variables
- func DefaultContextPatterns() []string
- type AnthropicAPIAccess
- type Config
- type ConsoleMessageHandler
- func (h *ConsoleMessageHandler) HandleContentBlockEnd()
- func (h *ConsoleMessageHandler) HandleDone()
- func (h *ConsoleMessageHandler) HandleText(text string)
- func (h *ConsoleMessageHandler) HandleTextDelta(delta string)
- func (h *ConsoleMessageHandler) HandleThinking(thinking string)
- func (h *ConsoleMessageHandler) HandleThinkingBlockEnd()
- func (h *ConsoleMessageHandler) HandleThinkingDelta(delta string)
- func (h *ConsoleMessageHandler) HandleThinkingStart()
- func (h *ConsoleMessageHandler) HandleToolResult(_, _ string, result tooltypes.ToolResult)
- func (h *ConsoleMessageHandler) HandleToolUse(_ string, toolName string, input string)
- type ContextConfig
- type CustomModels
- type CustomPricing
- type DeltaEntry
- type GoogleConfig
- type HeadlessStreamHandler
- func (h *HeadlessStreamHandler) HandleContentBlockEnd()
- func (h *HeadlessStreamHandler) HandleDone()
- func (h *HeadlessStreamHandler) HandleText(_ string)
- func (h *HeadlessStreamHandler) HandleTextDelta(delta string)
- func (h *HeadlessStreamHandler) HandleThinking(_ string)
- func (h *HeadlessStreamHandler) HandleThinkingBlockEnd()
- func (h *HeadlessStreamHandler) HandleThinkingDelta(delta string)
- func (h *HeadlessStreamHandler) HandleThinkingStart()
- func (h *HeadlessStreamHandler) HandleToolResult(_, _ string, _ tooltypes.ToolResult)
- func (h *HeadlessStreamHandler) HandleToolUse(_, _, _ string)
- type HookConfig
- type Message
- type MessageEvent
- type MessageHandler
- type MessageOpt
- type ModelPricing
- type OpenAIAPIMode
- type OpenAIConfig
- type ProfileConfig
- type RetryConfig
- type SkillsConfig
- type StreamingMessageHandler
- type StringCollectorHandler
- func (h *StringCollectorHandler) CollectedText() string
- func (h *StringCollectorHandler) HandleContentBlockEnd()
- func (h *StringCollectorHandler) HandleDone()
- func (h *StringCollectorHandler) HandleText(text string)
- func (h *StringCollectorHandler) HandleTextDelta(delta string)
- func (h *StringCollectorHandler) HandleThinking(thinking string)
- func (h *StringCollectorHandler) HandleThinkingBlockEnd()
- func (h *StringCollectorHandler) HandleThinkingDelta(delta string)
- func (h *StringCollectorHandler) HandleThinkingStart()
- func (h *StringCollectorHandler) HandleToolResult(_, _ string, result tooltypes.ToolResult)
- func (h *StringCollectorHandler) HandleToolUse(_ string, toolName string, input string)
- type Thread
- type ToolMode
- type Usage
Constants ¶
const ( // AnthropicAPIAccessAuto uses subscription auth if available, then falls back to API key AnthropicAPIAccessAuto AnthropicAPIAccess = "auto" // AnthropicAPIAccessSubscription forces use of subscription-based OAuth auth only AnthropicAPIAccessSubscription AnthropicAPIAccess = "subscription" // AnthropicAPIAccessAPIKey forces use of API key-based auth only AnthropicAPIAccessAPIKey AnthropicAPIAccess = "api-key" // ToolModeFull allows the standard direct file tools. ToolModeFull ToolMode = "full" // ToolModePatch restricts file operations to apply_patch plus search/navigation tools. ToolModePatch ToolMode = "patch" )
const ( EventTypeThinking = "thinking" EventTypeText = "text" EventTypeToolUse = "tool_use" EventTypeToolResult = "tool_result" // Streaming event types EventTypeTextDelta = "text_delta" EventTypeThinkingStart = "thinking_start" EventTypeThinkingDelta = "thinking_delta" EventTypeThinkingBlockEnd = "thinking_block_end" EventTypeContentBlockEnd = "content_block_end" )
Event types
Variables ¶
var DefaultRetryConfig = RetryConfig{
Attempts: 3,
InitialDelay: 1000,
MaxDelay: 10000,
BackoffType: "exponential",
}
DefaultRetryConfig holds the default retry configuration
Functions ¶
func DefaultContextPatterns ¶
func DefaultContextPatterns() []string
DefaultContextPatterns returns the default context file patterns.
Types ¶
type AnthropicAPIAccess ¶
type AnthropicAPIAccess string
AnthropicAPIAccess defines the mode for Anthropic API access
type Config ¶
type Config struct {
IsSubAgent bool `mapstructure:"is_sub_agent" json:"is_sub_agent" yaml:"is_sub_agent"` // IsSubAgent is true if the LLM is a sub-agent
Provider string `mapstructure:"provider" json:"provider" yaml:"provider"` // Provider is the LLM provider (anthropic, openai)
Model string `mapstructure:"model" json:"model" yaml:"model"` // Model is the main driver
WeakModel string `mapstructure:"weak_model" json:"weak_model" yaml:"weak_model"` // WeakModel is the less capable but faster model to use
MaxTokens int `mapstructure:"max_tokens" json:"max_tokens" yaml:"max_tokens"`
WeakModelMaxTokens int `mapstructure:"weak_model_max_tokens" json:"weak_model_max_tokens" yaml:"weak_model_max_tokens"` // WeakModelMaxTokens is the maximum tokens for the weak model
ThinkingBudgetTokens int `mapstructure:"thinking_budget_tokens" json:"thinking_budget_tokens" yaml:"thinking_budget_tokens"` // ThinkingBudgetTokens is the budget for the thinking capability
ReasoningEffort string `mapstructure:"reasoning_effort" json:"reasoning_effort" yaml:"reasoning_effort"` // ReasoningEffort is used for OpenAI models (none, minimal, low, medium, high, xhigh)
AllowedCommands []string `mapstructure:"allowed_commands" json:"allowed_commands" yaml:"allowed_commands"` // AllowedCommands is a list of allowed command patterns for the bash tool
AllowedDomainsFile string `mapstructure:"allowed_domains_file" json:"allowed_domains_file" yaml:"allowed_domains_file"` // AllowedDomainsFile is the path to the file containing allowed domains for web_fetch tool
AllowedTools []string `mapstructure:"allowed_tools" json:"allowed_tools" yaml:"allowed_tools"` // AllowedTools is a list of allowed tools for the main agent (empty means use defaults)
WorkingDirectory string `mapstructure:"working_directory" json:"working_directory" yaml:"working_directory"`
ToolMode ToolMode `mapstructure:"tool_mode" json:"tool_mode" yaml:"tool_mode"` // ToolMode controls file-interaction behavior (e.g. full or patch)
AnthropicAPIAccess AnthropicAPIAccess `mapstructure:"anthropic_api_access" json:"anthropic_api_access" yaml:"anthropic_api_access"` // AnthropicAPIAccess controls how to authenticate with Anthropic API
AnthropicAccount string `mapstructure:"anthropic_account" json:"anthropic_account" yaml:"anthropic_account"` // AnthropicAccount specifies which Anthropic subscription account to use
UseCopilot bool `mapstructure:"use_copilot" json:"use_copilot" yaml:"use_copilot"` // UseCopilot enables GitHub Copilot subscription for OpenAI requests
Aliases map[string]string `mapstructure:"aliases" json:"aliases,omitempty" yaml:"aliases,omitempty"` // Aliases maps short model names to full model names
Retry RetryConfig `mapstructure:"retry" json:"retry" yaml:"retry"` // Retry configuration for API calls
MCPExecutionMode string `mapstructure:"mcp_execution_mode" json:"mcp_execution_mode" yaml:"mcp_execution_mode"` // MCP execution mode (code, direct, or empty)
MCPWorkspaceDir string `mapstructure:"mcp_workspace_dir" json:"mcp_workspace_dir" yaml:"mcp_workspace_dir"` // MCP workspace directory for code execution mode
Sysprompt string `mapstructure:"sysprompt" json:"sysprompt,omitempty" yaml:"sysprompt,omitempty"` // Sysprompt is the path to a custom system prompt template file
SyspromptArgs map[string]string `mapstructure:"sysprompt_args" json:"sysprompt_args,omitempty" yaml:"sysprompt_args,omitempty"` // SyspromptArgs are custom template arguments for system prompt rendering
// Profile system configuration
Profile string `mapstructure:"profile" json:"profile,omitempty" yaml:"profile,omitempty"` // Active profile name
Profiles map[string]ProfileConfig `mapstructure:"profiles" json:"profiles,omitempty" yaml:"profiles,omitempty"` // Named configuration profiles
// Provider-specific configurations
OpenAI *OpenAIConfig `mapstructure:"openai" json:"openai,omitempty" yaml:"openai,omitempty"` // OpenAI-specific configuration including compatible providers
Google *GoogleConfig `mapstructure:"google" json:"google,omitempty" yaml:"google,omitempty"` // Google GenAI-specific configuration
// SubagentArgs is CLI arguments to pass when spawning subagents via shell-out
// Example: "--profile cheap" or "--use-weak-model"
SubagentArgs string `mapstructure:"subagent_args" json:"subagent_args,omitempty" yaml:"subagent_args,omitempty"`
// Skills configuration
Skills *SkillsConfig `mapstructure:"skills" json:"skills,omitempty" yaml:"skills,omitempty"` // Skills configuration for agentic skills system
// Context configuration
Context *ContextConfig `mapstructure:"context" json:"context,omitempty" yaml:"context,omitempty"` // Context configuration for context file discovery
// Hooks and feature toggle configuration
NoHooks bool `mapstructure:"no_hooks" json:"no_hooks" yaml:"no_hooks"` // NoHooks disables agent lifecycle hooks
DisableFSSearchTools bool `mapstructure:"disable_fs_search_tools" json:"disable_fs_search_tools" yaml:"disable_fs_search_tools"` // DisableFSSearchTools disables glob_tool and grep_tool and updates prompt/tool guidance accordingly
DisableSubagent bool `mapstructure:"disable_subagent" json:"disable_subagent" yaml:"disable_subagent"` // DisableSubagent disables the subagent tool and removes subagent-related system prompt context
EnableTodos bool `mapstructure:"enable_todos" json:"enable_todos" yaml:"enable_todos"` // EnableTodos enables todo_read and todo_write tools for the main agent
RecipeName string `mapstructure:"recipe_name" json:"recipe_name" yaml:"recipe_name"` // RecipeName is the active recipe/fragment name for hooks
}
Config holds the configuration for the LLM client
type ConsoleMessageHandler ¶
type ConsoleMessageHandler struct {
Silent bool
}
ConsoleMessageHandler prints messages to the console
func (*ConsoleMessageHandler) HandleContentBlockEnd ¶
func (h *ConsoleMessageHandler) HandleContentBlockEnd()
HandleContentBlockEnd prints a newline when a content block ends unless Silent is true
func (*ConsoleMessageHandler) HandleDone ¶
func (h *ConsoleMessageHandler) HandleDone()
HandleDone is called when message processing is complete
func (*ConsoleMessageHandler) HandleText ¶
func (h *ConsoleMessageHandler) HandleText(text string)
HandleText prints the text to the console unless Silent is true
func (*ConsoleMessageHandler) HandleTextDelta ¶
func (h *ConsoleMessageHandler) HandleTextDelta(delta string)
HandleTextDelta prints streamed text chunks to the console unless Silent is true
func (*ConsoleMessageHandler) HandleThinking ¶
func (h *ConsoleMessageHandler) HandleThinking(thinking string)
HandleThinking prints thinking content to the console unless Silent is true
func (*ConsoleMessageHandler) HandleThinkingBlockEnd ¶
func (h *ConsoleMessageHandler) HandleThinkingBlockEnd()
HandleThinkingBlockEnd prints a separator when a thinking block ends unless Silent is true
func (*ConsoleMessageHandler) HandleThinkingDelta ¶
func (h *ConsoleMessageHandler) HandleThinkingDelta(delta string)
HandleThinkingDelta prints streamed thinking chunks to the console unless Silent is true
func (*ConsoleMessageHandler) HandleThinkingStart ¶
func (h *ConsoleMessageHandler) HandleThinkingStart()
HandleThinkingStart prints the thinking prefix to the console unless Silent is true
func (*ConsoleMessageHandler) HandleToolResult ¶
func (h *ConsoleMessageHandler) HandleToolResult(_, _ string, result tooltypes.ToolResult)
HandleToolResult prints tool execution results to the console unless Silent is true
func (*ConsoleMessageHandler) HandleToolUse ¶
func (h *ConsoleMessageHandler) HandleToolUse(_ string, toolName string, input string)
HandleToolUse prints tool invocation details to the console unless Silent is true
type ContextConfig ¶
type ContextConfig struct {
// Patterns is a list of filenames to search for in each directory.
// Default is ["AGENTS.md"]. Files are searched in order; first match wins per directory.
Patterns []string `mapstructure:"patterns" json:"patterns" yaml:"patterns"`
}
ContextConfig holds configuration for context file discovery. Context files provide project-specific instructions and guidelines to the agent.
type CustomModels ¶
type CustomModels struct {
Reasoning []string `mapstructure:"reasoning" json:"reasoning" yaml:"reasoning"` // Models that support reasoning (o1, o3, etc.)
NonReasoning []string `mapstructure:"non_reasoning" json:"non_reasoning" yaml:"non_reasoning"` // Models that don't support reasoning (gpt-4, etc.)
}
CustomModels holds model categorization for custom configurations
type CustomPricing ¶
type CustomPricing map[string]ModelPricing
CustomPricing maps model names to their pricing information
type DeltaEntry ¶
type DeltaEntry struct {
Kind string `json:"kind"`
Delta string `json:"delta,omitempty"`
Content string `json:"content,omitempty"`
ConversationID string `json:"conversation_id"`
Role string `json:"role"`
}
DeltaEntry represents a streaming delta event for headless mode output
type GoogleConfig ¶
type GoogleConfig struct {
Backend string `mapstructure:"backend" json:"backend" yaml:"backend"` // Backend to use: "gemini" or "vertexai" (auto-detected if not specified)
APIKey string `mapstructure:"api_key" json:"api_key" yaml:"api_key"` // API key for Gemini API
Project string `mapstructure:"project" json:"project" yaml:"project"` // Google Cloud project ID for Vertex AI
Location string `mapstructure:"location" json:"location" yaml:"location"` // Google Cloud region for Vertex AI (e.g., "us-central1")
ThinkingBudget int32 `mapstructure:"thinking_budget" json:"thinking_budget" yaml:"thinking_budget"` // Token budget for thinking capability
}
GoogleConfig holds Google GenAI-specific configuration for both Vertex AI and Gemini API
type HeadlessStreamHandler ¶
type HeadlessStreamHandler struct {
// contains filtered or unexported fields
}
HeadlessStreamHandler outputs streaming events as JSON to stdout for headless mode with --stream-deltas enabled.
func NewHeadlessStreamHandler ¶
func NewHeadlessStreamHandler(conversationID string) *HeadlessStreamHandler
NewHeadlessStreamHandler creates a new HeadlessStreamHandler with the given conversation ID
func (*HeadlessStreamHandler) HandleContentBlockEnd ¶
func (h *HeadlessStreamHandler) HandleContentBlockEnd()
HandleContentBlockEnd outputs content block end event
func (*HeadlessStreamHandler) HandleDone ¶
func (h *HeadlessStreamHandler) HandleDone()
HandleDone is called when message processing is complete
func (*HeadlessStreamHandler) HandleText ¶
func (h *HeadlessStreamHandler) HandleText(_ string)
HandleText is a no-op as complete text is handled by ConversationStreamer
func (*HeadlessStreamHandler) HandleTextDelta ¶
func (h *HeadlessStreamHandler) HandleTextDelta(delta string)
HandleTextDelta outputs text delta events
func (*HeadlessStreamHandler) HandleThinking ¶
func (h *HeadlessStreamHandler) HandleThinking(_ string)
HandleThinking is a no-op as complete thinking is handled by ConversationStreamer
func (*HeadlessStreamHandler) HandleThinkingBlockEnd ¶
func (h *HeadlessStreamHandler) HandleThinkingBlockEnd()
HandleThinkingBlockEnd outputs thinking block end event
func (*HeadlessStreamHandler) HandleThinkingDelta ¶
func (h *HeadlessStreamHandler) HandleThinkingDelta(delta string)
HandleThinkingDelta outputs thinking delta events
func (*HeadlessStreamHandler) HandleThinkingStart ¶
func (h *HeadlessStreamHandler) HandleThinkingStart()
HandleThinkingStart outputs thinking block start event
func (*HeadlessStreamHandler) HandleToolResult ¶
func (h *HeadlessStreamHandler) HandleToolResult(_, _ string, _ tooltypes.ToolResult)
HandleToolResult is a no-op as tool results are handled by ConversationStreamer
func (*HeadlessStreamHandler) HandleToolUse ¶
func (h *HeadlessStreamHandler) HandleToolUse(_, _, _ string)
HandleToolUse is a no-op as tool calls are handled by ConversationStreamer
type HookConfig ¶
type HookConfig struct {
Handler string // Built-in handler name (e.g., "swap_context")
Once bool // If true, only execute on the first turn
}
HookConfig is a forward declaration of hooks.HookConfig to avoid circular imports. The actual type is defined in pkg/hooks/builtin.go.
type MessageEvent ¶
MessageEvent represents an event from processing a message
type MessageHandler ¶
type MessageHandler interface {
HandleText(text string)
HandleToolUse(toolCallID string, toolName string, input string)
HandleToolResult(toolCallID string, toolName string, result tooltypes.ToolResult)
HandleThinking(thinking string)
HandleDone()
}
MessageHandler defines how message events should be processed
type MessageOpt ¶
type MessageOpt struct {
// PromptCache indicates if prompt caching should be used
PromptCache bool
// UseWeakModel allows temporarily overriding the model for this message
UseWeakModel bool
// NoToolUse indicates that no tool use should be performed
NoToolUse bool
// NoSaveConversation indicates that the following conversation should not be saved
NoSaveConversation bool
// Images contains image paths or URLs to include with the message
Images []string
// MaxTurns limits the number of turns within a single SendMessage call
// A value of 0 means no limit, and negative values are treated as 0
MaxTurns int
// CompactRatio is the ratio of context window at which to trigger auto-compact (0.0-1.0)
CompactRatio float64
// DisableAutoCompact disables auto-compact functionality
DisableAutoCompact bool
// DisableUsageLog disables LLM usage logging for this message
DisableUsageLog bool
}
MessageOpt represents options for sending messages
type ModelPricing ¶
type ModelPricing struct {
Input float64 `mapstructure:"input" json:"input" yaml:"input"` // Input token cost per token
CachedInput float64 `mapstructure:"cached_input" json:"cached_input" yaml:"cached_input"` // Cached input token cost per token
Output float64 `mapstructure:"output" json:"output" yaml:"output"` // Output token cost per token
ContextWindow int `mapstructure:"context_window" json:"context_window" yaml:"context_window"` // Maximum context window size
}
ModelPricing holds the per-token pricing for different operations
type OpenAIAPIMode ¶
type OpenAIAPIMode string
OpenAIAPIMode defines which OpenAI-compatible API surface to use.
const ( // OpenAIAPIModeChatCompletions routes requests via chat completions API. OpenAIAPIModeChatCompletions OpenAIAPIMode = "chat_completions" // OpenAIAPIModeResponses routes requests via responses API. OpenAIAPIModeResponses OpenAIAPIMode = "responses" )
type OpenAIConfig ¶
type OpenAIConfig struct {
Platform string `mapstructure:"platform" json:"platform" yaml:"platform"` // Canonical platform name for OpenAI-compatible APIs (e.g., openai, xai, codex)
BaseURL string `mapstructure:"base_url" json:"base_url" yaml:"base_url"` // Custom API base URL (overrides platform defaults)
APIKeyEnvVar string `mapstructure:"api_key_env_var" json:"api_key_env_var" yaml:"api_key_env_var"` // Environment variable name for API key (overrides platform default)
APIMode OpenAIAPIMode `mapstructure:"api_mode" json:"api_mode" yaml:"api_mode"` // Preferred API mode selection (chat_completions or responses)
EnableSearch *bool `mapstructure:"enable_search" json:"enable_search,omitempty" yaml:"enable_search,omitempty"` // Enable native OpenAI Responses web_search tool when supported (defaults to true)
ManualCache bool `mapstructure:"manual_cache" json:"manual_cache" yaml:"manual_cache"` // Enables manual cache affinity headers for Chat Completions when prompt caching is requested
Models *CustomModels `mapstructure:"models" json:"models,omitempty" yaml:"models,omitempty"` // Custom model configuration
Pricing map[string]ModelPricing `mapstructure:"pricing" json:"pricing,omitempty" yaml:"pricing,omitempty"` // Custom pricing configuration
}
OpenAIConfig holds OpenAI-specific configuration including support for compatible APIs
type ProfileConfig ¶
ProfileConfig holds the configuration values for a named profile
type RetryConfig ¶
type RetryConfig struct {
Attempts int `mapstructure:"attempts" json:"attempts" yaml:"attempts"` // Maximum number of retry attempts (default: 3)
InitialDelay int `mapstructure:"initial_delay" json:"initial_delay" yaml:"initial_delay"` // Initial delay in milliseconds (default: 1000) - OpenAI only
MaxDelay int `mapstructure:"max_delay" json:"max_delay" yaml:"max_delay"` // Maximum delay in milliseconds (default: 10000) - OpenAI only
BackoffType string `mapstructure:"backoff_type" json:"backoff_type" yaml:"backoff_type"` // Backoff strategy: "fixed", "exponential" (default: "exponential") - OpenAI only
}
RetryConfig holds the retry configuration for API calls Note: Anthropic only uses Attempts (relies on SDK retry), OpenAI uses all fields
type SkillsConfig ¶
type SkillsConfig struct {
// Enabled controls whether skills are active. When the SkillsConfig is nil
// (not specified in config), skills default to enabled. Set to false to disable.
Enabled bool `mapstructure:"enabled" json:"enabled" yaml:"enabled"`
// Allowed is an allowlist of skill names. When empty, all discovered skills are available.
// When specified, only the listed skills will be enabled.
Allowed []string `mapstructure:"allowed" json:"allowed" yaml:"allowed"`
}
SkillsConfig holds configuration for the agentic skills system. When this config is nil or omitted, skills are enabled by default. To disable skills, explicitly set Enabled to false.
type StreamingMessageHandler ¶
type StreamingMessageHandler interface {
MessageHandler
HandleTextDelta(delta string) // Called for each text chunk as it streams
HandleThinkingStart() // Called when a thinking block starts
HandleThinkingDelta(delta string) // Called for each thinking chunk as it streams
HandleThinkingBlockEnd() // Called when a thinking block ends (for visual separation)
HandleContentBlockEnd() // Called when any content block ends
}
StreamingMessageHandler extends MessageHandler with delta streaming support. Handlers implementing this interface will receive content as it streams from the LLM.
type StringCollectorHandler ¶
type StringCollectorHandler struct {
Silent bool
// contains filtered or unexported fields
}
StringCollectorHandler collects text responses into a string
func (*StringCollectorHandler) CollectedText ¶
func (h *StringCollectorHandler) CollectedText() string
CollectedText returns the accumulated text responses as a single string
func (*StringCollectorHandler) HandleContentBlockEnd ¶
func (h *StringCollectorHandler) HandleContentBlockEnd()
HandleContentBlockEnd optionally prints a newline when a content block ends
func (*StringCollectorHandler) HandleDone ¶
func (h *StringCollectorHandler) HandleDone()
HandleDone is called when message processing is complete
func (*StringCollectorHandler) HandleText ¶
func (h *StringCollectorHandler) HandleText(text string)
HandleText collects the text in a string builder and optionally prints to console
func (*StringCollectorHandler) HandleTextDelta ¶
func (h *StringCollectorHandler) HandleTextDelta(delta string)
HandleTextDelta collects streamed text chunks and optionally prints to console
func (*StringCollectorHandler) HandleThinking ¶
func (h *StringCollectorHandler) HandleThinking(thinking string)
HandleThinking optionally prints thinking content to the console (does not affect collection)
func (*StringCollectorHandler) HandleThinkingBlockEnd ¶
func (h *StringCollectorHandler) HandleThinkingBlockEnd()
HandleThinkingBlockEnd optionally prints a separator when a thinking block ends
func (*StringCollectorHandler) HandleThinkingDelta ¶
func (h *StringCollectorHandler) HandleThinkingDelta(delta string)
HandleThinkingDelta optionally prints streamed thinking chunks to the console
func (*StringCollectorHandler) HandleThinkingStart ¶
func (h *StringCollectorHandler) HandleThinkingStart()
HandleThinkingStart optionally prints the thinking prefix to the console
func (*StringCollectorHandler) HandleToolResult ¶
func (h *StringCollectorHandler) HandleToolResult(_, _ string, result tooltypes.ToolResult)
HandleToolResult optionally prints tool execution results to the console (does not affect collection)
func (*StringCollectorHandler) HandleToolUse ¶
func (h *StringCollectorHandler) HandleToolUse(_ string, toolName string, input string)
HandleToolUse optionally prints tool invocation details to the console (does not affect collection)
type Thread ¶
type Thread interface {
// SetState sets the state for the thread
SetState(s tooltypes.State)
// GetState returns the current state of the thread
GetState() tooltypes.State
// AddUserMessage adds a user message with optional images to the thread
AddUserMessage(ctx context.Context, message string, imagePaths ...string)
// SendMessage sends a message to the LLM and processes the response
SendMessage(ctx context.Context, message string, handler MessageHandler, opt MessageOpt) (finalOutput string, err error)
// GetUsage returns the current token usage for the thread
GetUsage() Usage
// GetConversationID returns the current conversation ID
GetConversationID() string
// SetConversationID sets the conversation ID
SetConversationID(id string)
// SaveConversation saves the current thread to the conversation store
SaveConversation(ctx context.Context, summarise bool) error
// IsPersisted returns whether this thread is being persisted
IsPersisted() bool
// EnablePersistence enables conversation persistence for this thread
EnablePersistence(ctx context.Context, enabled bool)
// Provider returns the provider of the thread
Provider() string
// GetMessages returns the messages from the thread
GetMessages() ([]Message, error)
// GetConfig returns the configuration of the thread
GetConfig() Config
// AggregateSubagentUsage aggregates usage from a subagent into this thread's usage
// This aggregates token counts and costs but NOT context window (which should remain isolated)
AggregateSubagentUsage(usage Usage)
// SetRecipeHooks sets the recipe hook configurations for the thread
SetRecipeHooks(hooks map[string]HookConfig)
// GetRecipeHooks returns the recipe hook configurations for the thread
GetRecipeHooks() map[string]HookConfig
}
Thread represents a conversation thread with an LLM
type ToolMode ¶
type ToolMode string
ToolMode defines how the agent can interact with project files.
func (ToolMode) IsPatchMode ¶
IsPatchMode reports whether the tool mode should use apply_patch-only workflows.
type Usage ¶
type Usage struct {
InputTokens int `json:"inputTokens"` // Regular input tokens count
OutputTokens int `json:"outputTokens"` // Output tokens generated
CacheCreationInputTokens int `json:"cacheCreationInputTokens"` // Tokens used for creating cache entries
CacheReadInputTokens int `json:"cacheReadInputTokens"` // Tokens used for reading from cache
InputCost float64 `json:"inputCost"` // Cost for input tokens in USD
OutputCost float64 `json:"outputCost"` // Cost for output tokens in USD
CacheCreationCost float64 `json:"cacheCreationCost"` // Cost for cache creation in USD
CacheReadCost float64 `json:"cacheReadCost"` // Cost for cache read in USD
CurrentContextWindow int `json:"currentContextWindow"` // Current context window size
MaxContextWindow int `json:"maxContextWindow"` // Max context window size
}
Usage represents token usage information from LLM API calls
func (*Usage) TotalTokens ¶
TotalTokens returns the total number of tokens used