Documentation
¶
Index ¶
- Constants
- Variables
- func BuildResponsesToolChoice(choice ToolChoice) interface{}
- func BuildResponsesTools(specs []ToolSpec) []any
- func CleanupOldLogs(baseDir string, maxAge time.Duration) error
- func DebugRawEvent(enabled bool, event Event)
- func DebugRawRequest(enabled bool, providerName, credential string, req Request, label string)
- func DebugRawSection(enabled bool, label, body string)
- func DebugRawToolCall(enabled bool, call ToolCall)
- func DebugRawToolResult(enabled bool, id, name, content string)
- func DebugToolCall(enabled bool, call ToolCall)
- func DebugToolResult(enabled bool, id, name, content string)
- func EditToolSchema() map[string]interface{}
- func FilterOpenRouterModels(models []string, prefix string) []string
- func GetBuiltInProviderNames() []string
- func GetCachedOpenRouterModels(apiKey string) []string
- func GetImageProviderNames() []string
- func GetProviderCompletions(toComplete string, isImage bool, cfg *config.Config) []string
- func GetProviderNames(cfg *config.Config) []string
- func ParseProviderModel(s string, cfg *config.Config) (string, string, error)
- func ParseUnifiedDiff(call ToolCall) (string, error)
- func RefreshOpenRouterCacheSync(apiKey string, models []ModelInfo)
- func UnifiedDiffToolSchema() map[string]interface{}
- type AnthropicProvider
- func (p *AnthropicProvider) Capabilities() Capabilities
- func (p *AnthropicProvider) Credential() string
- func (p *AnthropicProvider) ListModels(ctx context.Context) ([]ModelInfo, error)
- func (p *AnthropicProvider) Name() string
- func (p *AnthropicProvider) Stream(ctx context.Context, req Request) (Stream, error)
- type Capabilities
- type ChatGPTProvider
- type ClaudeBinProvider
- type CommandSuggestion
- type CopilotProvider
- func (p *CopilotProvider) Capabilities() Capabilities
- func (p *CopilotProvider) Credential() string
- func (p *CopilotProvider) GetUsage(ctx context.Context) (*CopilotUsage, error)
- func (p *CopilotProvider) ListModels(ctx context.Context) ([]ModelInfo, error)
- func (p *CopilotProvider) Name() string
- func (p *CopilotProvider) ResetConversation()
- func (p *CopilotProvider) Stream(ctx context.Context, req Request) (Stream, error)
- type CopilotQuota
- type CopilotUsage
- type DebugLogger
- func (l *DebugLogger) Close() error
- func (l *DebugLogger) Flush()
- func (l *DebugLogger) LogEvent(event Event)
- func (l *DebugLogger) LogRequest(provider, model string, req Request)
- func (l *DebugLogger) LogSessionStart(command string, args []string, cwd string)
- func (l *DebugLogger) LogTurnRequest(turn int, provider, model string, req Request)
- type EditToolCall
- type Engine
- func (e *Engine) ClearAllowedTools()
- func (e *Engine) IsToolAllowed(name string) bool
- func (e *Engine) RegisterTool(tool Tool)
- func (e *Engine) SetAllowedTools(tools []string)
- func (e *Engine) SetDebugLogger(logger *DebugLogger)
- func (e *Engine) Stream(ctx context.Context, req Request) (Stream, error)
- func (e *Engine) Tools() *ToolRegistry
- func (e *Engine) UnregisterTool(name string)
- type Event
- type EventType
- type GeminiCLIProvider
- type GeminiProvider
- type Message
- type MockProvider
- func (m *MockProvider) AddError(err error) *MockProvider
- func (m *MockProvider) AddTextResponse(text string) *MockProvider
- func (m *MockProvider) AddToolCall(id, name string, args any) *MockProvider
- func (m *MockProvider) AddTurn(t MockTurn) *MockProvider
- func (m *MockProvider) Capabilities() Capabilities
- func (m *MockProvider) Credential() string
- func (m *MockProvider) CurrentTurn() int
- func (m *MockProvider) Name() string
- func (m *MockProvider) Reset()
- func (m *MockProvider) ResetTurns()
- func (m *MockProvider) Stream(ctx context.Context, req Request) (Stream, error)
- func (m *MockProvider) TurnCount() int
- func (m *MockProvider) WithCapabilities(c Capabilities) *MockProvider
- type MockTurn
- type ModelInfo
- type OpenAICompatProvider
- func NewOpenAICompatProvider(baseURL, apiKey, model, name string) *OpenAICompatProvider
- func NewOpenAICompatProviderFull(baseURL, chatURL, apiKey, model, name string, headers map[string]string) *OpenAICompatProvider
- func NewOpenAICompatProviderWithHeaders(baseURL, apiKey, model, name string, headers map[string]string) *OpenAICompatProvider
- func NewOpenRouterProvider(apiKey, model, appURL, appTitle string) *OpenAICompatProvider
- func (p *OpenAICompatProvider) Capabilities() Capabilities
- func (p *OpenAICompatProvider) Credential() string
- func (p *OpenAICompatProvider) ListModels(ctx context.Context) ([]ModelInfo, error)
- func (p *OpenAICompatProvider) Name() string
- func (p *OpenAICompatProvider) Stream(ctx context.Context, req Request) (Stream, error)
- type OpenAIProvider
- func (p *OpenAIProvider) Capabilities() Capabilities
- func (p *OpenAIProvider) Credential() string
- func (p *OpenAIProvider) ListModels(ctx context.Context) ([]ModelInfo, error)
- func (p *OpenAIProvider) Name() string
- func (p *OpenAIProvider) ResetConversation()
- func (p *OpenAIProvider) Stream(ctx context.Context, req Request) (Stream, error)
- type Part
- type PartType
- type Provider
- type RateLimitError
- type ReadURLTool
- type Request
- type ResponsesClient
- type ResponsesContentPart
- type ResponsesInputItem
- type ResponsesReasoning
- type ResponsesRequest
- type ResponsesTool
- type ResponsesWebSearchTool
- type RetryConfig
- type RetryProvider
- type Role
- type Stream
- type Tool
- type ToolCall
- type ToolChoice
- type ToolChoiceMode
- type ToolRegistry
- type ToolResult
- type ToolSpec
- type Usage
- type WebSearchTool
- type XAIProvider
- type ZenProvider
Constants ¶
const ( SuggestCommandsToolName = "suggest_commands" EditToolName = "edit" UnifiedDiffToolName = "unified_diff" WebSearchToolName = "web_search" )
const EditToolDescription = "" /* 215-byte string literal not displayed */
EditToolDescription is the description for the edit tool.
const (
ReadURLToolName = "read_url"
)
const UnifiedDiffToolDescription = `` /* 929-byte string literal not displayed */
UnifiedDiffToolDescription is the description for the unified diff tool.
Variables ¶
var ImageProviderModels = map[string][]string{
"debug": {"random"},
"gemini": {"gemini-2.5-flash-image", "gemini-3-pro-image-preview"},
"openai": {"gpt-image-1.5", "gpt-image-1-mini"},
"flux": {"flux-2-pro", "flux-kontext-pro", "flux-2-max"},
"openrouter": {"google/gemini-2.5-flash-image", "google/gemini-3-pro-image-preview", "openai/gpt-5-image", "openai/gpt-5-image-mini", "bytedance-seed/seedream-4.5", "black-forest-labs/flux.2-pro"},
}
var ProviderModels = map[string][]string{
"anthropic": {
"claude-sonnet-4-5",
"claude-sonnet-4-5-thinking",
"claude-opus-4-5",
"claude-opus-4-5-thinking",
"claude-haiku-4-5",
"claude-haiku-4-5-thinking",
},
"openai": {
"gpt-5.2-codex",
"gpt-5.2-codex-medium",
"gpt-5.2-codex-high",
"gpt-5.2-codex-xhigh",
"gpt-5.2",
"gpt-5.2-high",
"gpt-5.2-xhigh",
"gpt-5.1",
"gpt-5.1-high",
"gpt-5",
"gpt-5-mini",
"gpt-5-nano",
"o3-mini",
},
"chatgpt": {
"gpt-5.2-codex",
"gpt-5.2-codex-low",
"gpt-5.2-codex-medium",
"gpt-5.2-codex-high",
"gpt-5.2-codex-xhigh",
"gpt-5.2",
"gpt-5.2-low",
"gpt-5.2-medium",
"gpt-5.2-high",
"gpt-5.2-xhigh",
"gpt-5.1-codex-max",
"gpt-5.1-codex",
"gpt-5.1-codex-mini",
"gpt-5.1",
"gpt-5-codex",
"gpt-5-codex-mini",
"gpt-5",
},
"copilot": {
"gpt-4.1",
"gpt-5.2-codex",
"gpt-5.1-codex",
"gpt-5.1-codex-max",
"gpt-5.1-codex-mini",
"gpt-5.2",
"gpt-5.1",
"gpt-5-mini",
"claude-opus-4.5",
"claude-sonnet-4.5",
"claude-sonnet-4",
"claude-haiku-4.5",
"gemini-3-pro",
"gemini-3-flash",
"grok-code-fast-1",
"raptor-mini",
},
"openrouter": {
"x-ai/grok-code-fast-1",
},
"gemini": {
"gemini-3-pro-preview",
"gemini-3-pro-preview-thinking",
"gemini-3-flash-preview",
"gemini-3-flash-preview-thinking",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
},
"gemini-cli": {
"gemini-3-pro-preview",
"gemini-3-pro-preview-thinking",
"gemini-3-flash-preview",
"gemini-3-flash-preview-thinking",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
},
"zen": {
"minimax-m2.1-free",
"glm-4.7-free",
"grok-code",
"big-pickle",
"gpt-5-nano",
},
"claude-bin": {
"opus",
"sonnet",
"haiku",
},
"xai": {
"grok-4-1-fast",
"grok-4-1-fast-reasoning",
"grok-4-1-fast-non-reasoning",
"grok-4",
"grok-4-fast-reasoning",
"grok-4-fast-non-reasoning",
"grok-3",
"grok-3-fast",
"grok-3-mini",
"grok-3-mini-fast",
"grok-code-fast-1",
"grok-2",
},
}
ProviderModels contains the curated list of common models per LLM provider type
Functions ¶
func BuildResponsesToolChoice ¶ added in v0.0.34
func BuildResponsesToolChoice(choice ToolChoice) interface{}
BuildResponsesToolChoice converts ToolChoice to Open Responses format
func BuildResponsesTools ¶ added in v0.0.34
BuildResponsesTools converts []ToolSpec to Open Responses format with schema normalization
func CleanupOldLogs ¶ added in v0.0.34
CleanupOldLogs removes JSONL log files older than maxAge from the specified directory. This prevents debug logs from accumulating indefinitely.
func DebugRawEvent ¶ added in v0.0.10
DebugRawEvent prints each stream event with a timestamp.
func DebugRawRequest ¶ added in v0.0.10
DebugRawRequest prints the raw request with all message parts in debug mode.
func DebugRawSection ¶ added in v0.0.10
DebugRawSection prints a timestamped debug section.
func DebugRawToolCall ¶ added in v0.0.10
DebugRawToolCall prints a tool call with raw JSON arguments and a timestamp.
func DebugRawToolResult ¶ added in v0.0.10
DebugRawToolResult prints a tool result payload with a timestamp.
func DebugToolCall ¶ added in v0.0.10
DebugToolCall prints a tool call in debug mode with readable formatting.
func DebugToolResult ¶ added in v0.0.10
DebugToolResult prints a tool result in debug mode with readable formatting.
func EditToolSchema ¶ added in v0.0.9
func EditToolSchema() map[string]interface{}
EditToolSchema returns the JSON schema for the edit tool.
func FilterOpenRouterModels ¶ added in v0.0.24
func GetBuiltInProviderNames ¶ added in v0.0.15
func GetBuiltInProviderNames() []string
GetBuiltInProviderNames returns the built-in provider type names
func GetCachedOpenRouterModels ¶ added in v0.0.24
func GetImageProviderNames ¶ added in v0.0.6
func GetImageProviderNames() []string
GetImageProviderNames returns valid provider names for image generation
func GetProviderCompletions ¶ added in v0.0.6
GetProviderCompletions returns completions for the --provider flag It handles both provider-only and provider:model completion scenarios. For LLM providers, pass a config to include custom provider names.
func GetProviderNames ¶ added in v0.0.6
GetProviderNames returns valid provider names from config plus built-in types. If cfg is nil, returns only built-in provider names.
func ParseProviderModel ¶ added in v0.0.6
ParseProviderModel parses "provider:model" or just "provider" from a flag value. Returns (provider, model, error). Model will be empty if not specified. For the new config format, we validate against configured providers or built-in types.
func ParseUnifiedDiff ¶ added in v0.0.9
ParseUnifiedDiff parses a unified_diff tool call payload.
func RefreshOpenRouterCacheSync ¶ added in v0.0.24
func UnifiedDiffToolSchema ¶ added in v0.0.9
func UnifiedDiffToolSchema() map[string]interface{}
UnifiedDiffToolSchema returns the JSON schema for the unified diff tool.
Types ¶
type AnthropicProvider ¶
type AnthropicProvider struct {
// contains filtered or unexported fields
}
AnthropicProvider implements Provider using the Anthropic API.
func NewAnthropicProvider ¶
func NewAnthropicProvider(apiKey, model string) *AnthropicProvider
func (*AnthropicProvider) Capabilities ¶ added in v0.0.10
func (p *AnthropicProvider) Capabilities() Capabilities
func (*AnthropicProvider) Credential ¶ added in v0.0.10
func (p *AnthropicProvider) Credential() string
func (*AnthropicProvider) ListModels ¶ added in v0.0.8
func (p *AnthropicProvider) ListModels(ctx context.Context) ([]ModelInfo, error)
ListModels returns available models from Anthropic.
func (*AnthropicProvider) Name ¶
func (p *AnthropicProvider) Name() string
type Capabilities ¶ added in v0.0.10
type Capabilities struct {
NativeWebSearch bool // Provider has native web search capability
NativeWebFetch bool // Provider has native URL fetch capability
ToolCalls bool
}
Capabilities describe optional provider features.
type ChatGPTProvider ¶ added in v0.0.32
type ChatGPTProvider struct {
// contains filtered or unexported fields
}
ChatGPTProvider implements Provider using the ChatGPT backend API with native OAuth.
func NewChatGPTProvider ¶ added in v0.0.32
func NewChatGPTProvider(model string) (*ChatGPTProvider, error)
NewChatGPTProvider creates a new ChatGPT provider. If credentials are not available or expired, it will prompt the user to authenticate.
func NewChatGPTProviderWithCreds ¶ added in v0.0.32
func NewChatGPTProviderWithCreds(creds *credentials.ChatGPTCredentials, model string) *ChatGPTProvider
NewChatGPTProviderWithCreds creates a ChatGPT provider with pre-loaded credentials. This is used by the factory when credentials are already resolved.
func (*ChatGPTProvider) Capabilities ¶ added in v0.0.32
func (p *ChatGPTProvider) Capabilities() Capabilities
func (*ChatGPTProvider) Credential ¶ added in v0.0.32
func (p *ChatGPTProvider) Credential() string
func (*ChatGPTProvider) Name ¶ added in v0.0.32
func (p *ChatGPTProvider) Name() string
type ClaudeBinProvider ¶ added in v0.0.21
type ClaudeBinProvider struct {
// contains filtered or unexported fields
}
ClaudeBinProvider implements Provider using the claude CLI binary. This provider shells out to the claude command for inference, using Claude Code's existing authentication.
func NewClaudeBinProvider ¶ added in v0.0.21
func NewClaudeBinProvider(model string) *ClaudeBinProvider
NewClaudeBinProvider creates a new provider that uses the claude binary.
func (*ClaudeBinProvider) Capabilities ¶ added in v0.0.21
func (p *ClaudeBinProvider) Capabilities() Capabilities
func (*ClaudeBinProvider) Credential ¶ added in v0.0.21
func (p *ClaudeBinProvider) Credential() string
func (*ClaudeBinProvider) Name ¶ added in v0.0.21
func (p *ClaudeBinProvider) Name() string
type CommandSuggestion ¶
type CommandSuggestion struct {
Command string `json:"command"`
Explanation string `json:"explanation"`
Likelihood int `json:"likelihood"` // 1-10, how likely this matches user intent
}
CommandSuggestion represents a single command suggestion from the LLM.
func ParseCommandSuggestions ¶ added in v0.0.10
func ParseCommandSuggestions(call ToolCall) ([]CommandSuggestion, error)
ParseCommandSuggestions parses a suggest_commands tool call.
type CopilotProvider ¶ added in v0.0.34
type CopilotProvider struct {
// contains filtered or unexported fields
}
CopilotProvider implements Provider using GitHub Copilot's OpenAI-compatible API.
func NewCopilotProvider ¶ added in v0.0.34
func NewCopilotProvider(model string) (*CopilotProvider, error)
NewCopilotProvider creates a new Copilot provider. If credentials are not available or expired, it will prompt the user to authenticate.
func NewCopilotProviderWithCreds ¶ added in v0.0.34
func NewCopilotProviderWithCreds(creds *credentials.CopilotCredentials, model string) *CopilotProvider
NewCopilotProviderWithCreds creates a Copilot provider with pre-loaded credentials. This is used by the factory when credentials are already resolved.
func (*CopilotProvider) Capabilities ¶ added in v0.0.34
func (p *CopilotProvider) Capabilities() Capabilities
func (*CopilotProvider) Credential ¶ added in v0.0.34
func (p *CopilotProvider) Credential() string
func (*CopilotProvider) GetUsage ¶ added in v0.0.34
func (p *CopilotProvider) GetUsage(ctx context.Context) (*CopilotUsage, error)
GetUsage fetches the current Copilot usage and quota information. This uses GitHub's internal API which requires the VS Code OAuth client ID.
func (*CopilotProvider) ListModels ¶ added in v0.0.34
func (p *CopilotProvider) ListModels(ctx context.Context) ([]ModelInfo, error)
ListModels returns available models from the GitHub Copilot API
func (*CopilotProvider) Name ¶ added in v0.0.34
func (p *CopilotProvider) Name() string
func (*CopilotProvider) ResetConversation ¶ added in v0.0.34
func (p *CopilotProvider) ResetConversation()
ResetConversation clears server state for the Responses API client. Called on /clear or new conversation.
type CopilotQuota ¶ added in v0.0.34
type CopilotQuota struct {
Entitlement int `json:"entitlement"`
Used int `json:"used"`
Remaining int `json:"remaining"`
PercentRemaining float64 `json:"percent_remaining"`
Unlimited bool `json:"unlimited"`
}
CopilotQuota represents quota information for a specific feature
type CopilotUsage ¶ added in v0.0.34
type CopilotUsage struct {
Plan string `json:"plan"`
ResetDate time.Time `json:"reset_date"`
PremiumChat *CopilotQuota `json:"premium_chat,omitempty"`
Chat *CopilotQuota `json:"chat,omitempty"`
Completions *CopilotQuota `json:"completions,omitempty"`
}
CopilotUsage represents the usage data from GitHub Copilot
type DebugLogger ¶ added in v0.0.34
type DebugLogger struct {
// contains filtered or unexported fields
}
DebugLogger logs LLM requests and events to JSONL files for debugging. Each session gets its own file based on the session ID.
func NewDebugLogger ¶ added in v0.0.34
func NewDebugLogger(baseDir, sessionID string) (*DebugLogger, error)
NewDebugLogger creates a new DebugLogger. The sessionID is used to create a unique filename for this session. Old log files (>7 days) are automatically cleaned up.
func (*DebugLogger) Close ¶ added in v0.0.34
func (l *DebugLogger) Close() error
Close closes the debug logger and flushes any buffered data. Close is idempotent and safe to call multiple times.
func (*DebugLogger) Flush ¶ added in v0.0.34
func (l *DebugLogger) Flush()
Flush flushes the buffered writer to disk.
func (*DebugLogger) LogEvent ¶ added in v0.0.34
func (l *DebugLogger) LogEvent(event Event)
LogEvent logs an LLM event.
func (*DebugLogger) LogRequest ¶ added in v0.0.34
func (l *DebugLogger) LogRequest(provider, model string, req Request)
LogRequest logs an LLM request.
func (*DebugLogger) LogSessionStart ¶ added in v0.0.35
func (l *DebugLogger) LogSessionStart(command string, args []string, cwd string)
LogSessionStart logs the session start with CLI invocation details.
func (*DebugLogger) LogTurnRequest ¶ added in v0.0.34
func (l *DebugLogger) LogTurnRequest(turn int, provider, model string, req Request)
LogTurnRequest logs a request for a specific turn in an agentic loop. This captures the state after tool results have been appended.
type EditToolCall ¶ added in v0.0.5
type EditToolCall struct {
FilePath string `json:"file_path"`
OldString string `json:"old_string"`
NewString string `json:"new_string"`
}
EditToolCall represents a single edit tool call (find/replace).
func ParseEditToolCall ¶ added in v0.0.10
func ParseEditToolCall(call ToolCall) (EditToolCall, error)
ParseEditToolCall parses a single edit tool call payload.
type Engine ¶ added in v0.0.10
type Engine struct {
// contains filtered or unexported fields
}
Engine orchestrates provider calls and external tool execution.
func NewEngine ¶ added in v0.0.10
func NewEngine(provider Provider, tools *ToolRegistry) *Engine
func (*Engine) ClearAllowedTools ¶ added in v0.0.37
func (e *Engine) ClearAllowedTools()
ClearAllowedTools removes the tool filter, allowing all registered tools.
func (*Engine) IsToolAllowed ¶ added in v0.0.37
IsToolAllowed checks if a tool can be executed under current restrictions.
func (*Engine) RegisterTool ¶ added in v0.0.15
RegisterTool adds a tool to the engine's registry.
func (*Engine) SetAllowedTools ¶ added in v0.0.37
SetAllowedTools sets the list of tools that can be executed. When set, only tools in this list can run; all others are blocked. Pass nil or empty slice to allow all tools. The list is intersected with registered tools (can't allow unregistered tools).
func (*Engine) SetDebugLogger ¶ added in v0.0.34
func (e *Engine) SetDebugLogger(logger *DebugLogger)
SetDebugLogger sets the debug logger for this engine.
func (*Engine) Stream ¶ added in v0.0.10
Stream returns a stream, applying external tools when needed.
func (*Engine) Tools ¶ added in v0.0.15
func (e *Engine) Tools() *ToolRegistry
Tools returns the engine's tool registry.
func (*Engine) UnregisterTool ¶ added in v0.0.15
UnregisterTool removes a tool from the engine's registry.
type Event ¶ added in v0.0.10
type Event struct {
Type EventType
Text string
Tool *ToolCall
ToolCallID string // For EventToolExecStart/End: unique ID of this tool invocation
ToolName string // For EventToolExecStart/End: name of tool being executed
ToolInfo string // For EventToolExecStart/End: additional info (e.g., URL being fetched)
ToolSuccess bool // For EventToolExecEnd: whether tool execution succeeded
ToolOutput string // For EventToolExecEnd: the tool's output (for image marker parsing)
Use *Usage
Err error
// Retry fields (for EventRetry)
RetryAttempt int
RetryMaxAttempts int
RetryWaitSecs float64
}
Event represents a streamed output update.
type EventType ¶ added in v0.0.10
type EventType string
EventType describes streaming events.
const ( EventTextDelta EventType = "text_delta" EventToolCall EventType = "tool_call" EventToolExecStart EventType = "tool_exec_start" // Emitted when tool execution begins EventToolExecEnd EventType = "tool_exec_end" // Emitted when tool execution completes EventUsage EventType = "usage" EventPhase EventType = "phase" // Emitted for high-level phase changes (Thinking, Searching, etc.) EventDone EventType = "done" EventError EventType = "error" EventRetry EventType = "retry" // Emitted when retrying after rate limit )
type GeminiCLIProvider ¶ added in v0.0.31
type GeminiCLIProvider struct {
// contains filtered or unexported fields
}
GeminiCLIProvider implements Provider using Google Code Assist API with OAuth
func NewGeminiCLIProvider ¶ added in v0.0.31
func NewGeminiCLIProvider(creds *credentials.GeminiOAuthCredentials, model string) *GeminiCLIProvider
func (*GeminiCLIProvider) Capabilities ¶ added in v0.0.31
func (p *GeminiCLIProvider) Capabilities() Capabilities
func (*GeminiCLIProvider) Credential ¶ added in v0.0.31
func (p *GeminiCLIProvider) Credential() string
func (*GeminiCLIProvider) Name ¶ added in v0.0.31
func (p *GeminiCLIProvider) Name() string
type GeminiProvider ¶
type GeminiProvider struct {
// contains filtered or unexported fields
}
GeminiProvider implements Provider using the Google Gemini API.
func NewGeminiProvider ¶
func NewGeminiProvider(apiKey, model string) *GeminiProvider
func (*GeminiProvider) Capabilities ¶ added in v0.0.10
func (p *GeminiProvider) Capabilities() Capabilities
func (*GeminiProvider) Credential ¶ added in v0.0.10
func (p *GeminiProvider) Credential() string
func (*GeminiProvider) Name ¶
func (p *GeminiProvider) Name() string
type Message ¶ added in v0.0.10
Message holds a role with structured parts.
func AssistantText ¶ added in v0.0.10
func SystemText ¶ added in v0.0.10
func ToolErrorMessage ¶ added in v0.0.23
ToolErrorMessage creates a tool result message that indicates an error. The error is passed to the LLM so it can respond gracefully instead of failing the stream.
func ToolResultMessage ¶ added in v0.0.10
type MockProvider ¶ added in v0.0.25
type MockProvider struct {
Requests []Request // Recorded requests for verification
// contains filtered or unexported fields
}
MockProvider is a configurable provider for testing. It returns scripted responses and records all requests for verification.
func NewMockProvider ¶ added in v0.0.25
func NewMockProvider(name string) *MockProvider
NewMockProvider creates a new mock provider with the given name.
func (*MockProvider) AddError ¶ added in v0.0.25
func (m *MockProvider) AddError(err error) *MockProvider
AddError adds a turn that returns an error.
func (*MockProvider) AddTextResponse ¶ added in v0.0.25
func (m *MockProvider) AddTextResponse(text string) *MockProvider
AddTextResponse is a convenience method to add a simple text response.
func (*MockProvider) AddToolCall ¶ added in v0.0.25
func (m *MockProvider) AddToolCall(id, name string, args any) *MockProvider
AddToolCall is a convenience method to add a turn with a single tool call.
func (*MockProvider) AddTurn ¶ added in v0.0.25
func (m *MockProvider) AddTurn(t MockTurn) *MockProvider
AddTurn adds a response turn and returns the provider for chaining.
func (*MockProvider) Capabilities ¶ added in v0.0.25
func (m *MockProvider) Capabilities() Capabilities
Capabilities returns the provider capabilities.
func (*MockProvider) Credential ¶ added in v0.0.25
func (m *MockProvider) Credential() string
Credential returns "mock" for the mock provider.
func (*MockProvider) CurrentTurn ¶ added in v0.0.25
func (m *MockProvider) CurrentTurn() int
CurrentTurn returns the current turn index (0-based).
func (*MockProvider) Name ¶ added in v0.0.25
func (m *MockProvider) Name() string
Name returns the provider name.
func (*MockProvider) Reset ¶ added in v0.0.25
func (m *MockProvider) Reset()
Reset clears recorded requests and resets the turn index.
func (*MockProvider) ResetTurns ¶ added in v0.0.25
func (m *MockProvider) ResetTurns()
ResetTurns clears the scripted turns and resets the turn index.
func (*MockProvider) TurnCount ¶ added in v0.0.25
func (m *MockProvider) TurnCount() int
TurnCount returns the number of scripted turns.
func (*MockProvider) WithCapabilities ¶ added in v0.0.25
func (m *MockProvider) WithCapabilities(c Capabilities) *MockProvider
WithCapabilities sets the provider capabilities and returns the provider for chaining.
type MockTurn ¶ added in v0.0.25
type MockTurn struct {
Text string // Text to emit (will be chunked for realistic streaming)
ToolCalls []ToolCall // Tool calls to emit
Usage Usage // Token usage to report
Delay time.Duration // Optional delay before responding (for timeout tests)
Error error // Return this error instead of responding
}
MockTurn represents a single response turn from the mock provider.
type ModelInfo ¶ added in v0.0.8
type ModelInfo struct {
ID string
DisplayName string
Created int64
OwnedBy string
// Pricing per 1M tokens (0 = free, -1 = unknown)
InputPrice float64
OutputPrice float64
}
ModelInfo represents a model available from a provider.
type OpenAICompatProvider ¶ added in v0.0.8
type OpenAICompatProvider struct {
// contains filtered or unexported fields
}
OpenAICompatProvider implements Provider for OpenAI-compatible APIs Used by Ollama, LM Studio, and other compatible servers.
func NewOpenAICompatProvider ¶ added in v0.0.8
func NewOpenAICompatProvider(baseURL, apiKey, model, name string) *OpenAICompatProvider
func NewOpenAICompatProviderFull ¶ added in v0.0.15
func NewOpenAICompatProviderFull(baseURL, chatURL, apiKey, model, name string, headers map[string]string) *OpenAICompatProvider
NewOpenAICompatProviderFull creates a provider with full control over URLs. If chatURL is provided, it's used directly for chat completions (no path appending). If only baseURL is provided, /chat/completions is appended. baseURL is normalized to strip /chat/completions if accidentally included.
func NewOpenAICompatProviderWithHeaders ¶ added in v0.0.10
func NewOpenAICompatProviderWithHeaders(baseURL, apiKey, model, name string, headers map[string]string) *OpenAICompatProvider
func NewOpenRouterProvider ¶ added in v0.0.10
func NewOpenRouterProvider(apiKey, model, appURL, appTitle string) *OpenAICompatProvider
NewOpenRouterProvider creates an OpenRouter provider using OpenAI-compatible APIs.
func (*OpenAICompatProvider) Capabilities ¶ added in v0.0.10
func (p *OpenAICompatProvider) Capabilities() Capabilities
func (*OpenAICompatProvider) Credential ¶ added in v0.0.10
func (p *OpenAICompatProvider) Credential() string
func (*OpenAICompatProvider) ListModels ¶ added in v0.0.8
func (p *OpenAICompatProvider) ListModels(ctx context.Context) ([]ModelInfo, error)
ListModels returns available models from the server.
func (*OpenAICompatProvider) Name ¶ added in v0.0.8
func (p *OpenAICompatProvider) Name() string
type OpenAIProvider ¶
type OpenAIProvider struct {
// contains filtered or unexported fields
}
OpenAIProvider implements Provider using the standard OpenAI API.
func NewOpenAIProvider ¶
func NewOpenAIProvider(apiKey, model string) *OpenAIProvider
func (*OpenAIProvider) Capabilities ¶ added in v0.0.10
func (p *OpenAIProvider) Capabilities() Capabilities
func (*OpenAIProvider) Credential ¶ added in v0.0.10
func (p *OpenAIProvider) Credential() string
func (*OpenAIProvider) ListModels ¶ added in v0.0.31
func (p *OpenAIProvider) ListModels(ctx context.Context) ([]ModelInfo, error)
func (*OpenAIProvider) Name ¶
func (p *OpenAIProvider) Name() string
func (*OpenAIProvider) ResetConversation ¶ added in v0.0.34
func (p *OpenAIProvider) ResetConversation()
ResetConversation clears server state for the Responses API client. Called on /clear or new conversation.
type Part ¶ added in v0.0.10
type Part struct {
Type PartType
Text string
ToolCall *ToolCall
ToolResult *ToolResult
}
Part represents a single content part.
type Provider ¶
type Provider interface {
Name() string
Credential() string // Returns credential type for debugging (e.g., "api_key", "codex", "claude-code")
Capabilities() Capabilities
Stream(ctx context.Context, req Request) (Stream, error)
}
Provider streams model output events for a request.
func NewProvider ¶
NewProvider creates a new LLM provider based on the config. Providers are wrapped with automatic retry for rate limits (429) and transient errors.
func NewProviderByName ¶ added in v0.0.15
NewProviderByName creates a provider by name from the config, with an optional model override. This is useful for per-command provider overrides. If the provider is a built-in type but not explicitly configured, it will be created with default settings.
func WrapWithRetry ¶ added in v0.0.12
func WrapWithRetry(p Provider, config RetryConfig) Provider
WrapWithRetry wraps a provider with retry logic.
type RateLimitError ¶ added in v0.0.33
type RateLimitError struct {
Message string
RetryAfter time.Duration
PlanType string
PrimaryUsed int
SecondaryUsed int
}
RateLimitError represents a rate limit error with retry information.
func (*RateLimitError) Error ¶ added in v0.0.33
func (e *RateLimitError) Error() string
func (*RateLimitError) IsLongWait ¶ added in v0.0.33
func (e *RateLimitError) IsLongWait() bool
IsLongWait returns true if the retry wait is too long for automatic retry.
type ReadURLTool ¶ added in v0.0.11
type ReadURLTool struct {
// contains filtered or unexported fields
}
ReadURLTool fetches web pages using Jina AI Reader.
func NewReadURLTool ¶ added in v0.0.11
func NewReadURLTool() *ReadURLTool
func (*ReadURLTool) Execute ¶ added in v0.0.11
func (t *ReadURLTool) Execute(ctx context.Context, args json.RawMessage) (string, error)
func (*ReadURLTool) Preview ¶ added in v0.0.25
func (t *ReadURLTool) Preview(args json.RawMessage) string
func (*ReadURLTool) Spec ¶ added in v0.0.11
func (t *ReadURLTool) Spec() ToolSpec
type Request ¶ added in v0.0.10
type Request struct {
Model string
Messages []Message
Tools []ToolSpec
ToolChoice ToolChoice
LastTurnToolChoice *ToolChoice // If set, force this tool choice on the last agentic turn
ParallelToolCalls bool
Search bool
ForceExternalSearch bool // If true, use external search even if provider supports native
ReasoningEffort string
MaxOutputTokens int
Temperature float32
TopP float32
MaxTurns int // Max agentic turns for tool execution (0 = use default)
Debug bool
DebugRaw bool
}
Request represents a single model turn.
type ResponsesClient ¶ added in v0.0.34
type ResponsesClient struct {
BaseURL string // Full URL for responses endpoint (e.g., "https://api.openai.com/v1/responses")
GetAuthHeader func() string // Dynamic auth (allows token refresh)
ExtraHeaders map[string]string // Provider-specific headers
HTTPClient *http.Client // HTTP client to use
LastResponseID string // Track for conversation continuity (server state)
DisableServerState bool // Set to true to disable previous_response_id (e.g., for Copilot)
}
ResponsesClient makes raw HTTP calls to Open Responses-compliant endpoints. See https://www.openresponses.org/specification
func (*ResponsesClient) ResetConversation ¶ added in v0.0.34
func (c *ResponsesClient) ResetConversation()
ResetConversation clears server state (called on /clear or new conversation)
func (*ResponsesClient) Stream ¶ added in v0.0.34
func (c *ResponsesClient) Stream(ctx context.Context, req ResponsesRequest, debugRaw bool) (Stream, error)
Stream makes a streaming request to the Responses API and returns events via a Stream
type ResponsesContentPart ¶ added in v0.0.34
type ResponsesContentPart struct {
Type string `json:"type"`
Text string `json:"text,omitempty"`
ImageURL string `json:"image_url,omitempty"` // Plain URL string for Responses API (not object)
}
ResponsesContentPart represents a content part (text or image)
type ResponsesInputItem ¶ added in v0.0.34
type ResponsesInputItem struct {
Type string `json:"type"`
Role string `json:"role,omitempty"`
Content interface{} `json:"content,omitempty"` // string or []ResponsesContentPart
// For function_call type
CallID string `json:"call_id,omitempty"`
Name string `json:"name,omitempty"`
Arguments string `json:"arguments,omitempty"`
// For function_call_output type
Output string `json:"output,omitempty"`
}
ResponsesInputItem represents an input item in the Open Responses format
func BuildResponsesInput ¶ added in v0.0.34
func BuildResponsesInput(messages []Message) []ResponsesInputItem
BuildResponsesInput converts []Message to Open Responses input format
type ResponsesReasoning ¶ added in v0.0.34
type ResponsesReasoning struct {
Effort string `json:"effort,omitempty"` // "low", "medium", "high", "xhigh"
}
ResponsesReasoning configures reasoning effort for models that support it
type ResponsesRequest ¶ added in v0.0.34
type ResponsesRequest struct {
Model string `json:"model"`
Input []ResponsesInputItem `json:"input"`
Tools []any `json:"tools,omitempty"` // Can contain ResponsesTool or ResponsesWebSearchTool
ToolChoice any `json:"tool_choice,omitempty"`
ParallelToolCalls *bool `json:"parallel_tool_calls,omitempty"`
MaxOutputTokens int `json:"max_output_tokens,omitempty"`
Temperature *float64 `json:"temperature,omitempty"`
TopP *float64 `json:"top_p,omitempty"`
Reasoning *ResponsesReasoning `json:"reasoning,omitempty"`
Stream bool `json:"stream"`
PreviousResponseID string `json:"previous_response_id,omitempty"`
}
ResponsesRequest follows the Open Responses spec
type ResponsesTool ¶ added in v0.0.34
type ResponsesTool struct {
Type string `json:"type"`
Name string `json:"name"`
Description string `json:"description,omitempty"`
Parameters map[string]interface{} `json:"parameters"`
Strict bool `json:"strict,omitempty"`
}
ResponsesTool represents a tool definition in Open Responses format
type ResponsesWebSearchTool ¶ added in v0.0.34
type ResponsesWebSearchTool struct {
Type string `json:"type"` // "web_search_preview"
}
ResponsesWebSearchTool represents the web search tool for OpenAI
type RetryConfig ¶ added in v0.0.12
RetryConfig configures retry behavior.
func DefaultRetryConfig ¶ added in v0.0.12
func DefaultRetryConfig() RetryConfig
DefaultRetryConfig returns sensible defaults for rate limit retries.
type RetryProvider ¶ added in v0.0.12
type RetryProvider struct {
// contains filtered or unexported fields
}
RetryProvider wraps a provider with automatic retry on transient errors.
func (*RetryProvider) Capabilities ¶ added in v0.0.12
func (r *RetryProvider) Capabilities() Capabilities
func (*RetryProvider) Credential ¶ added in v0.0.12
func (r *RetryProvider) Credential() string
func (*RetryProvider) Name ¶ added in v0.0.12
func (r *RetryProvider) Name() string
type Stream ¶ added in v0.0.10
Stream yields events until io.EOF.
func WrapDebugStream ¶ added in v0.0.10
type Tool ¶ added in v0.0.10
type Tool interface {
Spec() ToolSpec
Execute(ctx context.Context, args json.RawMessage) (string, error)
// Preview returns a human-readable description of what the tool will do,
// shown to the user before execution starts (e.g., "Generating image: a cat").
// Returns empty string if no preview is available.
Preview(args json.RawMessage) string
}
Tool describes a callable external tool.
type ToolCall ¶ added in v0.0.10
type ToolCall struct {
ID string
Name string
Arguments json.RawMessage
ThoughtSig []byte // Gemini 3 thought signature (must be passed back in result)
}
ToolCall is a model-requested tool invocation.
type ToolChoice ¶ added in v0.0.10
type ToolChoice struct {
Mode ToolChoiceMode
Name string
}
ToolChoice configures which tool the model should call.
type ToolChoiceMode ¶ added in v0.0.10
type ToolChoiceMode string
ToolChoiceMode controls tool selection behavior.
const ( ToolChoiceAuto ToolChoiceMode = "auto" ToolChoiceNone ToolChoiceMode = "none" ToolChoiceRequired ToolChoiceMode = "required" ToolChoiceName ToolChoiceMode = "name" )
type ToolRegistry ¶ added in v0.0.10
type ToolRegistry struct {
// contains filtered or unexported fields
}
ToolRegistry stores tools by name for execution.
func NewToolRegistry ¶ added in v0.0.10
func NewToolRegistry() *ToolRegistry
func (*ToolRegistry) AllSpecs ¶ added in v0.0.15
func (r *ToolRegistry) AllSpecs() []ToolSpec
AllSpecs returns the specs for all registered tools.
func (*ToolRegistry) Register ¶ added in v0.0.10
func (r *ToolRegistry) Register(tool Tool)
func (*ToolRegistry) Unregister ¶ added in v0.0.15
func (r *ToolRegistry) Unregister(name string)
type ToolResult ¶ added in v0.0.10
type ToolResult struct {
ID string
Name string
Content string
IsError bool // True if this result represents a tool execution error
ThoughtSig []byte // Gemini 3 thought signature (passed through from ToolCall)
}
ToolResult is the output from executing a tool call.
type ToolSpec ¶ added in v0.0.10
ToolSpec describes a callable tool.
func EditToolSpec ¶ added in v0.0.10
func EditToolSpec() ToolSpec
EditToolSpec returns the tool spec for the edit tool.
func ReadURLToolSpec ¶ added in v0.0.11
func ReadURLToolSpec() ToolSpec
ReadURLToolSpec returns the tool spec for reading web pages.
func SuggestCommandsToolSpec ¶ added in v0.0.10
SuggestCommandsToolSpec returns the tool spec for command suggestions.
func UnifiedDiffToolSpec ¶ added in v0.0.10
func UnifiedDiffToolSpec() ToolSpec
UnifiedDiffToolSpec returns the tool spec for unified diff edits.
func WebSearchToolSpec ¶ added in v0.0.10
func WebSearchToolSpec() ToolSpec
WebSearchToolSpec returns the tool spec for external web search.
type Usage ¶ added in v0.0.10
type Usage struct {
InputTokens int
OutputTokens int
CachedInputTokens int // Tokens read from cache
}
Usage captures token usage if available.
type WebSearchTool ¶ added in v0.0.10
type WebSearchTool struct {
// contains filtered or unexported fields
}
WebSearchTool executes searches through a Searcher.
func NewWebSearchTool ¶ added in v0.0.10
func NewWebSearchTool(searcher search.Searcher) *WebSearchTool
func (*WebSearchTool) Execute ¶ added in v0.0.10
func (t *WebSearchTool) Execute(ctx context.Context, args json.RawMessage) (string, error)
func (*WebSearchTool) Preview ¶ added in v0.0.25
func (t *WebSearchTool) Preview(args json.RawMessage) string
func (*WebSearchTool) Spec ¶ added in v0.0.10
func (t *WebSearchTool) Spec() ToolSpec
type XAIProvider ¶ added in v0.0.31
type XAIProvider struct {
// contains filtered or unexported fields
}
XAIProvider implements Provider for the xAI (Grok) API. Uses OpenAI-compatible chat completions for tool calling, and the Responses API for native web/X search.
func NewXAIProvider ¶ added in v0.0.31
func NewXAIProvider(apiKey, model string) *XAIProvider
NewXAIProvider creates a new xAI provider.
func (*XAIProvider) Capabilities ¶ added in v0.0.31
func (p *XAIProvider) Capabilities() Capabilities
func (*XAIProvider) Credential ¶ added in v0.0.31
func (p *XAIProvider) Credential() string
func (*XAIProvider) ListModels ¶ added in v0.0.31
func (p *XAIProvider) ListModels(ctx context.Context) ([]ModelInfo, error)
ListModels returns available models from the xAI API.
func (*XAIProvider) Name ¶ added in v0.0.31
func (p *XAIProvider) Name() string
type ZenProvider ¶ added in v0.0.2
type ZenProvider struct {
*OpenAICompatProvider
}
ZenProvider wraps OpenAICompatProvider with models.dev pricing data.
func NewZenProvider ¶ added in v0.0.2
func NewZenProvider(apiKey, model string) *ZenProvider
NewZenProvider creates a ZenProvider preconfigured for OpenCode Zen. Zen provides free access to models like GLM 4.7 via opencode.ai. API key is optional: empty for free tier, or set ZEN_API_KEY for paid models.
func (*ZenProvider) ListModels ¶ added in v0.0.25
func (p *ZenProvider) ListModels(ctx context.Context) ([]ModelInfo, error)
ListModels returns available models with pricing from models.dev.
Source Files
¶
- anthropic.go
- chatgpt.go
- claude_bin.go
- copilot.go
- debug.go
- debug_logger.go
- engine.go
- factory.go
- gemini.go
- gemini_cli.go
- gemini_schema.go
- helpers.go
- message_helpers.go
- mock_provider.go
- models.go
- openai.go
- openai_compat.go
- openrouter.go
- openrouter_models.go
- read_url_tool.go
- responses_api.go
- retry.go
- schema.go
- stream.go
- tools.go
- types.go
- web_search_tool.go
- xai.go
- zen.go