Documentation
¶
Overview ¶
Package providers implements multi-LLM provider support with unified interfaces.
This package provides a common abstraction for chat-based LLM providers including OpenAI, Anthropic Claude, and Google Gemini. It handles:
- Chat completion requests with streaming support
- Tool/function calling with provider-specific formats
- Cost tracking and token usage calculation
- Rate limiting and error handling
All providers implement the Provider interface for basic chat, and ToolSupport interface for function calling capabilities.
Index ¶
- func IsValidationAbort(err error) bool
- type ChatRequest
- type ChatResponse
- type ClaudeProvider
- func (p *ClaudeProvider) CalculateCost(tokensIn, tokensOut, cachedTokens int) types.CostInfo
- func (p *ClaudeProvider) Chat(ctx context.Context, req ChatRequest) (ChatResponse, error)
- func (p *ClaudeProvider) ChatStream(ctx context.Context, req ChatRequest) (<-chan StreamChunk, error)
- func (p *ClaudeProvider) Close() error
- func (p *ClaudeProvider) ID() string
- func (p *ClaudeProvider) ShouldIncludeRawOutput() bool
- func (p *ClaudeProvider) SupportsStreaming() bool
- type ClaudeToolProvider
- func (p *ClaudeToolProvider) BuildTooling(descriptors []*ToolDescriptor) (interface{}, error)
- func (p *ClaudeToolProvider) ChatWithTools(ctx context.Context, req ChatRequest, tools interface{}, toolChoice string) (ChatResponse, []types.MessageToolCall, error)
- func (p *ClaudeToolProvider) ContinueWithToolResults(ctx context.Context, req ChatRequest, prior ChatResponse, tools interface{}, ...) (ChatResponse, []types.MessageToolCall, error)
- type ExecutionResult
- type GeminiProvider
- func (p *GeminiProvider) CalculateCost(tokensIn, tokensOut, cachedTokens int) types.CostInfo
- func (p *GeminiProvider) Chat(ctx context.Context, req ChatRequest) (ChatResponse, error)
- func (p *GeminiProvider) ChatStream(ctx context.Context, req ChatRequest) (<-chan StreamChunk, error)
- func (p *GeminiProvider) Close() error
- func (p *GeminiProvider) ID() string
- func (p *GeminiProvider) ShouldIncludeRawOutput() bool
- func (p *GeminiProvider) SupportsStreaming() bool
- type GeminiToolProvider
- func (p *GeminiToolProvider) BuildTooling(descriptors []*ToolDescriptor) (interface{}, error)
- func (p *GeminiToolProvider) ChatWithTools(ctx context.Context, req ChatRequest, tools interface{}, toolChoice string) (ChatResponse, []types.MessageToolCall, error)
- func (p *GeminiToolProvider) ContinueWithToolResults(ctx context.Context, req ChatRequest, prior ChatResponse, tools interface{}, ...) (ChatResponse, []types.MessageToolCall, error)
- type MockProvider
- func (m *MockProvider) CalculateCost(inputTokens, outputTokens, cachedTokens int) types.CostInfo
- func (m *MockProvider) Chat(ctx context.Context, req ChatRequest) (ChatResponse, error)
- func (m *MockProvider) ChatStream(ctx context.Context, req ChatRequest) (<-chan StreamChunk, error)
- func (m *MockProvider) Close() error
- func (m *MockProvider) ID() string
- func (m *MockProvider) ShouldIncludeRawOutput() bool
- func (m *MockProvider) SupportsStreaming() bool
- type OpenAIProvider
- func (p *OpenAIProvider) CalculateCost(tokensIn, tokensOut, cachedTokens int) types.CostInfo
- func (p *OpenAIProvider) Chat(ctx context.Context, req ChatRequest) (ChatResponse, error)
- func (p *OpenAIProvider) ChatStream(ctx context.Context, req ChatRequest) (<-chan StreamChunk, error)
- func (p *OpenAIProvider) Close() error
- func (p *OpenAIProvider) ID() string
- func (p *OpenAIProvider) ShouldIncludeRawOutput() bool
- func (p *OpenAIProvider) SupportsStreaming() bool
- type OpenAIToolProvider
- func (p *OpenAIToolProvider) BuildTooling(descriptors []*ToolDescriptor) (interface{}, error)
- func (p *OpenAIToolProvider) ChatWithTools(ctx context.Context, req ChatRequest, tools interface{}, toolChoice string) (ChatResponse, []types.MessageToolCall, error)
- func (p *OpenAIToolProvider) ContinueWithToolResults(ctx context.Context, req ChatRequest, prior ChatResponse, tools interface{}, ...) (ChatResponse, []types.MessageToolCall, error)
- type Pricing
- type Provider
- type ProviderDefaults
- type ProviderSpec
- type Registry
- type SSEScanner
- type StreamChunk
- type StreamEvent
- type StreamObserver
- type ToolDescriptor
- type ToolResult
- type ToolSupport
- type UnsupportedProviderError
- type ValidationAbortError
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func IsValidationAbort ¶
IsValidationAbort checks if an error is a validation abort
Types ¶
type ChatRequest ¶
type ChatRequest struct {
System string `json:"system"`
Messages []types.Message `json:"messages"`
Temperature float32 `json:"temperature"`
TopP float32 `json:"top_p"`
MaxTokens int `json:"max_tokens"`
Seed *int `json:"seed,omitempty"`
}
ChatRequest represents a request to a chat provider
type ChatResponse ¶
type ChatResponse struct {
Content string `json:"content"`
CostInfo *types.CostInfo `json:"cost_info,omitempty"` // Cost breakdown for this response (includes token counts)
Latency time.Duration `json:"latency"`
Raw []byte `json:"raw,omitempty"`
RawRequest interface{} `json:"raw_request,omitempty"` // Raw API request (for debugging)
ToolCalls []types.MessageToolCall `json:"tool_calls,omitempty"` // Tools called in this response
}
ChatResponse represents a response from a chat provider
type ClaudeProvider ¶
type ClaudeProvider struct {
// contains filtered or unexported fields
}
ClaudeProvider implements the Provider interface for Anthropic Claude
func NewClaudeProvider ¶
func NewClaudeProvider(id, model, baseURL string, defaults ProviderDefaults, includeRawOutput bool) *ClaudeProvider
NewClaudeProvider creates a new Claude provider
func (*ClaudeProvider) CalculateCost ¶
func (p *ClaudeProvider) CalculateCost(tokensIn, tokensOut, cachedTokens int) types.CostInfo
CalculateCost calculates detailed cost breakdown including optional cached tokens
func (*ClaudeProvider) Chat ¶
func (p *ClaudeProvider) Chat(ctx context.Context, req ChatRequest) (ChatResponse, error)
Chat sends a chat request to Claude
func (*ClaudeProvider) ChatStream ¶
func (p *ClaudeProvider) ChatStream(ctx context.Context, req ChatRequest) (<-chan StreamChunk, error)
ChatStream streams a chat response from Claude
func (*ClaudeProvider) Close ¶
func (p *ClaudeProvider) Close() error
Close closes the HTTP client and cleans up idle connections
func (*ClaudeProvider) ShouldIncludeRawOutput ¶
func (p *ClaudeProvider) ShouldIncludeRawOutput() bool
ShouldIncludeRawOutput returns whether to include raw API requests in output
func (*ClaudeProvider) SupportsStreaming ¶
func (p *ClaudeProvider) SupportsStreaming() bool
SupportsStreaming returns true for Claude
type ClaudeToolProvider ¶
type ClaudeToolProvider struct {
*ClaudeProvider
}
ClaudeToolProvider extends ClaudeProvider with tool support
func NewClaudeToolProvider ¶
func NewClaudeToolProvider(id, model, baseURL string, defaults ProviderDefaults, includeRawOutput bool) *ClaudeToolProvider
NewClaudeToolProvider creates a new Claude provider with tool support
func (*ClaudeToolProvider) BuildTooling ¶
func (p *ClaudeToolProvider) BuildTooling(descriptors []*ToolDescriptor) (interface{}, error)
BuildTooling converts tool descriptors to Claude format
func (*ClaudeToolProvider) ChatWithTools ¶
func (p *ClaudeToolProvider) ChatWithTools(ctx context.Context, req ChatRequest, tools interface{}, toolChoice string) (ChatResponse, []types.MessageToolCall, error)
ChatWithTools performs a chat request with tool support
func (*ClaudeToolProvider) ContinueWithToolResults ¶
func (p *ClaudeToolProvider) ContinueWithToolResults(ctx context.Context, req ChatRequest, prior ChatResponse, tools interface{}, toolChoice string, results []types.MessageToolResult) (ChatResponse, []types.MessageToolCall, error)
ContinueWithToolResults continues conversation with tool results
type ExecutionResult ¶
type ExecutionResult interface{}
Forward declare ExecutionResult to avoid circular import
type GeminiProvider ¶
type GeminiProvider struct {
Model string
BaseURL string
ApiKey string
Defaults ProviderDefaults
Client *http.Client
// contains filtered or unexported fields
}
GeminiProvider implements the Provider interface for Google Gemini
func NewGeminiProvider ¶
func NewGeminiProvider(id, model, baseURL string, defaults ProviderDefaults, includeRawOutput bool) *GeminiProvider
NewGeminiProvider creates a new Gemini provider
func (*GeminiProvider) CalculateCost ¶
func (p *GeminiProvider) CalculateCost(tokensIn, tokensOut, cachedTokens int) types.CostInfo
CalculateCost calculates detailed cost breakdown including optional cached tokens
func (*GeminiProvider) Chat ¶
func (p *GeminiProvider) Chat(ctx context.Context, req ChatRequest) (ChatResponse, error)
Chat sends a chat request to Gemini
func (*GeminiProvider) ChatStream ¶
func (p *GeminiProvider) ChatStream(ctx context.Context, req ChatRequest) (<-chan StreamChunk, error)
ChatStream streams a chat response from Gemini
func (*GeminiProvider) Close ¶
func (p *GeminiProvider) Close() error
Close closes the HTTP client and cleans up idle connections
func (*GeminiProvider) ShouldIncludeRawOutput ¶
func (p *GeminiProvider) ShouldIncludeRawOutput() bool
ShouldIncludeRawOutput returns whether to include raw API requests in output
func (*GeminiProvider) SupportsStreaming ¶
func (p *GeminiProvider) SupportsStreaming() bool
SupportsStreaming returns true for Gemini
type GeminiToolProvider ¶
type GeminiToolProvider struct {
*GeminiProvider
// contains filtered or unexported fields
}
GeminiToolProvider extends GeminiProvider with tool support
func NewGeminiToolProvider ¶
func NewGeminiToolProvider(id, model, baseURL string, defaults ProviderDefaults, includeRawOutput bool) *GeminiToolProvider
NewGeminiToolProvider creates a new Gemini provider with tool support
func (*GeminiToolProvider) BuildTooling ¶
func (p *GeminiToolProvider) BuildTooling(descriptors []*ToolDescriptor) (interface{}, error)
BuildTooling converts tool descriptors to Gemini format
func (*GeminiToolProvider) ChatWithTools ¶
func (p *GeminiToolProvider) ChatWithTools(ctx context.Context, req ChatRequest, tools interface{}, toolChoice string) (ChatResponse, []types.MessageToolCall, error)
ChatWithTools performs a chat request with tool support
func (*GeminiToolProvider) ContinueWithToolResults ¶
func (p *GeminiToolProvider) ContinueWithToolResults(ctx context.Context, req ChatRequest, prior ChatResponse, tools interface{}, toolChoice string, results []types.MessageToolResult) (ChatResponse, []types.MessageToolCall, error)
ContinueWithToolResults continues conversation with tool results
type MockProvider ¶
type MockProvider struct {
// contains filtered or unexported fields
}
MockProvider is a simple mock provider for testing and development. It returns canned responses without making any API calls.
func NewMockProvider ¶
func NewMockProvider(id, model string, includeRawOutput bool) *MockProvider
NewMockProvider creates a new mock provider.
func (*MockProvider) CalculateCost ¶
func (m *MockProvider) CalculateCost(inputTokens, outputTokens, cachedTokens int) types.CostInfo
CalculateCost calculates cost breakdown for given token counts.
func (*MockProvider) Chat ¶
func (m *MockProvider) Chat(ctx context.Context, req ChatRequest) (ChatResponse, error)
Chat returns a mock response.
func (*MockProvider) ChatStream ¶
func (m *MockProvider) ChatStream(ctx context.Context, req ChatRequest) (<-chan StreamChunk, error)
ChatStream returns a mock streaming response.
func (*MockProvider) Close ¶
func (m *MockProvider) Close() error
Close is a no-op for the mock provider.
func (*MockProvider) ShouldIncludeRawOutput ¶
func (m *MockProvider) ShouldIncludeRawOutput() bool
ShouldIncludeRawOutput returns whether raw API responses should be included.
func (*MockProvider) SupportsStreaming ¶
func (m *MockProvider) SupportsStreaming() bool
SupportsStreaming indicates whether the provider supports streaming.
type OpenAIProvider ¶
type OpenAIProvider struct {
// contains filtered or unexported fields
}
OpenAIProvider implements the Provider interface for OpenAI
func NewOpenAIProvider ¶
func NewOpenAIProvider(id, model, baseURL string, defaults ProviderDefaults, includeRawOutput bool) *OpenAIProvider
NewOpenAIProvider creates a new OpenAI provider
func (*OpenAIProvider) CalculateCost ¶
func (p *OpenAIProvider) CalculateCost(tokensIn, tokensOut, cachedTokens int) types.CostInfo
CalculateCost calculates detailed cost breakdown including optional cached tokens
func (*OpenAIProvider) Chat ¶
func (p *OpenAIProvider) Chat(ctx context.Context, req ChatRequest) (ChatResponse, error)
Chat sends a chat request to OpenAI
func (*OpenAIProvider) ChatStream ¶
func (p *OpenAIProvider) ChatStream(ctx context.Context, req ChatRequest) (<-chan StreamChunk, error)
ChatStream streams a chat response from OpenAI
func (*OpenAIProvider) Close ¶
func (p *OpenAIProvider) Close() error
Close closes the HTTP client and cleans up idle connections
func (*OpenAIProvider) ShouldIncludeRawOutput ¶
func (p *OpenAIProvider) ShouldIncludeRawOutput() bool
ShouldIncludeRawOutput returns whether to include raw API requests in output
func (*OpenAIProvider) SupportsStreaming ¶
func (p *OpenAIProvider) SupportsStreaming() bool
SupportsStreaming returns true for OpenAI
type OpenAIToolProvider ¶
type OpenAIToolProvider struct {
*OpenAIProvider
}
OpenAIToolProvider extends OpenAIProvider with tool support
func NewOpenAIToolProvider ¶
func NewOpenAIToolProvider(id, model, baseURL string, defaults ProviderDefaults, includeRawOutput bool) *OpenAIToolProvider
NewOpenAIToolProvider creates a new OpenAI provider with tool support
func (*OpenAIToolProvider) BuildTooling ¶
func (p *OpenAIToolProvider) BuildTooling(descriptors []*ToolDescriptor) (interface{}, error)
BuildTooling converts tool descriptors to OpenAI format
func (*OpenAIToolProvider) ChatWithTools ¶
func (p *OpenAIToolProvider) ChatWithTools(ctx context.Context, req ChatRequest, tools interface{}, toolChoice string) (ChatResponse, []types.MessageToolCall, error)
ChatWithTools performs a chat request with tool support
func (*OpenAIToolProvider) ContinueWithToolResults ¶
func (p *OpenAIToolProvider) ContinueWithToolResults(ctx context.Context, req ChatRequest, prior ChatResponse, tools interface{}, toolChoice string, results []types.MessageToolResult) (ChatResponse, []types.MessageToolCall, error)
ContinueWithToolResults continues conversation with tool results
type Provider ¶
type Provider interface {
ID() string
Chat(ctx context.Context, req ChatRequest) (ChatResponse, error)
// Streaming support
ChatStream(ctx context.Context, req ChatRequest) (<-chan StreamChunk, error)
SupportsStreaming() bool
ShouldIncludeRawOutput() bool
Close() error // Close cleans up provider resources (e.g., HTTP connections)
// CalculateCost calculates cost breakdown for given token counts
CalculateCost(inputTokens, outputTokens, cachedTokens int) types.CostInfo
}
Provider interface defines the contract for chat providers
func CreateProviderFromSpec ¶
func CreateProviderFromSpec(spec ProviderSpec) (Provider, error)
CreateProviderFromSpec creates a provider implementation from a spec. Returns an error if the provider type is unsupported.
type ProviderDefaults ¶
ProviderDefaults holds default parameters for providers
type ProviderSpec ¶
type ProviderSpec struct {
ID string
Type string
Model string
BaseURL string
Defaults ProviderDefaults
IncludeRawOutput bool
}
ProviderSpec holds the configuration needed to create a provider instance
type Registry ¶
type Registry struct {
// contains filtered or unexported fields
}
Registry manages available providers
type SSEScanner ¶
type SSEScanner struct {
// contains filtered or unexported fields
}
SSEScanner scans Server-Sent Events (SSE) streams
func NewSSEScanner ¶
func NewSSEScanner(r io.Reader) *SSEScanner
NewSSEScanner creates a new SSE scanner
type StreamChunk ¶
type StreamChunk struct {
// Content is the accumulated content so far
Content string `json:"content"`
// Delta is the new content in this chunk
Delta string `json:"delta"`
// TokenCount is the total number of tokens so far
TokenCount int `json:"token_count"`
// DeltaTokens is the number of tokens in this delta
DeltaTokens int `json:"delta_tokens"`
// ToolCalls contains accumulated tool calls (for assistant messages that invoke tools)
ToolCalls []types.MessageToolCall `json:"tool_calls,omitempty"`
// FinishReason is nil until stream is complete
// Values: "stop", "length", "content_filter", "tool_calls", "error", "validation_failed", "cancelled"
FinishReason *string `json:"finish_reason,omitempty"`
// Error is set if an error occurred during streaming
Error error `json:"error,omitempty"`
// Metadata contains provider-specific metadata
Metadata map[string]interface{} `json:"metadata,omitempty"`
// FinalResult contains the complete execution result (only set in the final chunk)
FinalResult ExecutionResult `json:"final_result,omitempty"`
// CostInfo contains cost breakdown (only present in final chunk when FinishReason != nil)
CostInfo *types.CostInfo `json:"cost_info,omitempty"`
}
StreamChunk represents a batch of tokens with metadata
type StreamEvent ¶
type StreamEvent struct {
// Type is the event type: "chunk", "complete", "error"
Type string `json:"type"`
// Chunk contains the stream chunk data
Chunk *StreamChunk `json:"chunk,omitempty"`
// Error is set for error events
Error error `json:"error,omitempty"`
// Timestamp is when the event occurred
Timestamp time.Time `json:"timestamp"`
}
StreamEvent is sent to observers for monitoring
type StreamObserver ¶
type StreamObserver interface {
OnChunk(chunk StreamChunk)
OnComplete(totalTokens int, duration time.Duration)
OnError(err error)
}
StreamObserver receives stream events for monitoring
type ToolDescriptor ¶
type ToolDescriptor struct {
Name string `json:"name"`
Description string `json:"description"`
InputSchema json.RawMessage `json:"input_schema"`
OutputSchema json.RawMessage `json:"output_schema"`
}
ToolDescriptor represents a tool that can be used by providers
type ToolResult ¶
type ToolResult = types.MessageToolResult
ToolResult represents the result of a tool execution This is an alias to types.MessageToolResult for provider-specific context
type ToolSupport ¶
type ToolSupport interface {
Provider // Extends the base Provider interface
// BuildTooling converts tool descriptors to provider-native format
BuildTooling(descriptors []*ToolDescriptor) (interface{}, error)
// ChatWithTools performs a chat request with tool support
ChatWithTools(ctx context.Context, req ChatRequest, tools interface{}, toolChoice string) (ChatResponse, []types.MessageToolCall, error)
// ContinueWithToolResults continues conversation with tool results
// req contains the original messages, prior is the response with tool calls, results are tool outputs
ContinueWithToolResults(ctx context.Context, req ChatRequest, prior ChatResponse, tools interface{}, toolChoice string, results []types.MessageToolResult) (ChatResponse, []types.MessageToolCall, error)
}
ToolSupport interface for providers that support tool/function calling
type UnsupportedProviderError ¶
type UnsupportedProviderError struct {
ProviderType string
}
UnsupportedProviderError is returned when a provider type is not recognized
func (*UnsupportedProviderError) Error ¶
func (e *UnsupportedProviderError) Error() string
type ValidationAbortError ¶
type ValidationAbortError struct {
Reason string
Chunk StreamChunk
}
ValidationAbortError is returned when a streaming validator aborts a stream
func (*ValidationAbortError) Error ¶
func (e *ValidationAbortError) Error() string