Documentation
¶
Overview ¶
Package llm provides LLM client implementations.
Package llm provides LLM client implementations.
Package llm provides LLM client implementations.
Index ¶
- Constants
- func EstimateTokens(text string) int
- type AnthropicClient
- func (c *AnthropicClient) Chat(ctx context.Context, model string, messages []Message, tools []map[string]any) (*ChatResponse, error)
- func (c *AnthropicClient) ChatStream(ctx context.Context, model string, messages []Message, tools []map[string]any, ...) (*ChatResponse, error)
- func (c *AnthropicClient) Ping(ctx context.Context) error
- type ChatRequest
- type ChatResponse
- type Client
- type Message
- type MultiClient
- func (m *MultiClient) AddModel(modelName, providerName string)
- func (m *MultiClient) AddProvider(name string, client Client)
- func (m *MultiClient) Chat(ctx context.Context, model string, messages []Message, tools []map[string]any) (*ChatResponse, error)
- func (m *MultiClient) ChatStream(ctx context.Context, model string, messages []Message, tools []map[string]any, ...) (*ChatResponse, error)
- func (m *MultiClient) Ping(ctx context.Context) error
- type OllamaClient
- func (c *OllamaClient) Chat(ctx context.Context, model string, messages []Message, tools []map[string]any) (*ChatResponse, error)
- func (c *OllamaClient) ChatStream(ctx context.Context, model string, messages []Message, tools []map[string]any, ...) (*ChatResponse, error)
- func (c *OllamaClient) IsReady() bool
- func (c *OllamaClient) ListModels(ctx context.Context) ([]string, error)
- func (c *OllamaClient) Ping(ctx context.Context) error
- func (c *OllamaClient) SetWatcher(w readyChecker)
- type Options
- type StreamCallback
- type StreamEvent
- type StreamEventKind
- type ToolCall
Constants ¶
const LevelTrace = slog.Level(-8)
LevelTrace is below Debug, used for wire-level payload logging.
Variables ¶
This section is empty.
Functions ¶
func EstimateTokens ¶ added in v0.8.0
EstimateTokens returns a rough token count estimate for English text. Rule of thumb: ~4 characters per token.
Types ¶
type AnthropicClient ¶
type AnthropicClient struct {
// contains filtered or unexported fields
}
AnthropicClient is a client for the Anthropic Messages API.
func NewAnthropicClient ¶
func NewAnthropicClient(apiKey string, logger *slog.Logger) *AnthropicClient
NewAnthropicClient creates a new Anthropic client.
func (*AnthropicClient) Chat ¶
func (c *AnthropicClient) Chat(ctx context.Context, model string, messages []Message, tools []map[string]any) (*ChatResponse, error)
Chat sends a non-streaming chat completion request.
func (*AnthropicClient) ChatStream ¶
func (c *AnthropicClient) ChatStream(ctx context.Context, model string, messages []Message, tools []map[string]any, callback StreamCallback) (*ChatResponse, error)
ChatStream sends a chat request, optionally streaming tokens via callback.
type ChatRequest ¶
type ChatRequest struct {
Model string `json:"model"`
Messages []Message `json:"messages"`
Stream bool `json:"stream"`
Tools []map[string]any `json:"tools,omitempty"`
Options *Options `json:"options,omitempty"`
}
ChatRequest is the request format for Ollama chat API.
type ChatResponse ¶
type ChatResponse struct {
Model string
CreatedAt time.Time
Message Message
Done bool
// Token usage (provider-neutral)
InputTokens int
OutputTokens int
// Timing (populated when available)
TotalDuration time.Duration
LoadDuration time.Duration
EvalDuration time.Duration
}
ChatResponse is the unified response from any LLM provider. All fields use proper Go types — wire format conversion happens at provider boundaries (ollama.go, anthropic.go).
type Client ¶
type Client interface {
// Chat sends a chat completion request and returns the response.
Chat(ctx context.Context, model string, messages []Message, tools []map[string]any) (*ChatResponse, error)
// ChatStream sends a streaming chat request. If callback is non-nil, tokens are streamed to it.
ChatStream(ctx context.Context, model string, messages []Message, tools []map[string]any, callback StreamCallback) (*ChatResponse, error)
// Ping checks if the provider is reachable.
Ping(ctx context.Context) error
}
Client is the interface that all LLM providers must implement.
type Message ¶
type Message struct {
Role string `json:"role"`
Content string `json:"content"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
ToolCallID string `json:"tool_call_id,omitempty"` // For tool responses
}
Message represents a chat message for the LLM.
type MultiClient ¶
type MultiClient struct {
// contains filtered or unexported fields
}
MultiClient routes requests to the appropriate provider based on model name.
func NewMultiClient ¶
func NewMultiClient(fallback Client) *MultiClient
NewMultiClient creates a client that routes to multiple providers.
func (*MultiClient) AddModel ¶
func (m *MultiClient) AddModel(modelName, providerName string)
AddModel maps a model name to a provider.
func (*MultiClient) AddProvider ¶
func (m *MultiClient) AddProvider(name string, client Client)
AddProvider registers a client for a provider name.
func (*MultiClient) Chat ¶
func (m *MultiClient) Chat(ctx context.Context, model string, messages []Message, tools []map[string]any) (*ChatResponse, error)
Chat sends a request to the appropriate provider for the model.
func (*MultiClient) ChatStream ¶
func (m *MultiClient) ChatStream(ctx context.Context, model string, messages []Message, tools []map[string]any, callback StreamCallback) (*ChatResponse, error)
ChatStream sends a streaming request to the appropriate provider.
type OllamaClient ¶
type OllamaClient struct {
// contains filtered or unexported fields
}
OllamaClient is a client for the Ollama API.
func NewOllamaClient ¶
func NewOllamaClient(baseURL string, logger *slog.Logger) *OllamaClient
NewOllamaClient creates a new Ollama client.
func (*OllamaClient) Chat ¶
func (c *OllamaClient) Chat(ctx context.Context, model string, messages []Message, tools []map[string]any) (*ChatResponse, error)
Chat sends a chat completion request to Ollama.
func (*OllamaClient) ChatStream ¶
func (c *OllamaClient) ChatStream(ctx context.Context, model string, messages []Message, tools []map[string]any, callback StreamCallback) (*ChatResponse, error)
ChatStream sends a streaming chat request to Ollama. If callback is non-nil, tokens are streamed to it.
func (*OllamaClient) IsReady ¶ added in v0.5.0
func (c *OllamaClient) IsReady() bool
IsReady reports whether Ollama is currently reachable. Returns true if no watcher is configured (backward compatible).
func (*OllamaClient) ListModels ¶
func (c *OllamaClient) ListModels(ctx context.Context) ([]string, error)
ListModels returns available models.
func (*OllamaClient) Ping ¶
func (c *OllamaClient) Ping(ctx context.Context) error
Ping checks if Ollama is reachable.
func (*OllamaClient) SetWatcher ¶ added in v0.5.0
func (c *OllamaClient) SetWatcher(w readyChecker)
SetWatcher sets the connection watcher for health status queries.
type Options ¶
type Options struct {
Temperature float64 `json:"temperature,omitempty"`
NumPredict int `json:"num_predict,omitempty"`
}
Options are model parameters.
type StreamCallback ¶
type StreamCallback func(event StreamEvent)
StreamCallback receives streaming events. For backward compatibility, pure-text consumers can check event.Kind == KindToken.
type StreamEvent ¶
type StreamEvent struct {
Kind StreamEventKind
// Token is set for KindToken events.
Token string
// ToolCall is set for KindToolCallStart events.
ToolCall *ToolCall
// ToolName and ToolResult are set for KindToolCallDone events.
ToolName string
ToolResult string
ToolError string
// Response is set for KindDone events (final summary).
Response *ChatResponse
}
StreamEvent represents a single event in a streaming response. Consumers switch on Kind to determine what data is available.
type StreamEventKind ¶
type StreamEventKind int
StreamEventKind identifies the type of stream event.
const ( // KindToken is an incremental text token from the model. KindToken StreamEventKind = iota // KindToolCallStart fires when the model invokes a tool. KindToolCallStart // KindToolCallDone fires when a tool execution completes. KindToolCallDone // KindDone signals the stream is complete. Response carries final metadata. KindDone )
type ToolCall ¶
type ToolCall struct {
ID string `json:"id,omitempty"` // Provider-assigned ID (required by Anthropic for tool_result correlation)
Function struct {
Name string `json:"name"`
Arguments map[string]any `json:"arguments"`
} `json:"function"`
}
ToolCall represents a tool call from the model.