Documentation
¶
Overview ¶
Package llm provides LLM backend implementations.
Package llm provides LLM backend implementations for the vega package.
Anthropic Backend ¶
The primary backend is Anthropic's Claude API:
llm := llm.NewAnthropic() // Uses ANTHROPIC_API_KEY env var
// Or with custom API key
llm := llm.NewAnthropic(llm.WithAPIKey("sk-..."))
// Or with custom model
llm := llm.NewAnthropic(llm.WithModel("claude-opus-4-20250514"))
Using with Orchestrator ¶
Configure the orchestrator to use the LLM:
llm := llm.NewAnthropic() orch := vega.NewOrchestrator(vega.WithLLM(llm))
Streaming ¶
The Anthropic backend supports streaming responses:
stream, err := proc.SendStream(ctx, "Tell me a story")
for chunk := range stream.Chunks() {
fmt.Print(chunk)
}
Tool Support ¶
Tools are automatically converted to the Anthropic tool format:
tools := tools.NewTools()
tools.Register("search", searchFunc)
agent := vega.Agent{
Tools: tools,
// ...
}
When the model decides to use a tool, the tool is executed and the result is sent back automatically in a multi-turn conversation.
Rate Limiting ¶
The Anthropic API has rate limits. Configure rate limiting on the orchestrator:
orch := vega.NewOrchestrator(
vega.WithLLM(llm),
vega.WithRateLimits(map[string]vega.RateLimitConfig{
"claude-sonnet-4-20250514": {
RequestsPerMinute: 60,
TokensPerMinute: 100000,
},
}),
)
Implementing Custom Backends ¶
To implement a custom LLM backend, implement the llm.LLM interface:
type LLM interface {
Generate(ctx context.Context, messages []Message, tools []ToolSchema) (*LLMResponse, error)
GenerateStream(ctx context.Context, messages []Message, tools []ToolSchema) (<-chan StreamEvent, error)
}
Index ¶
- Constants
- func CalculateCost(model string, inputTokens, outputTokens int) float64
- type AnthropicLLM
- func (a *AnthropicLLM) Generate(ctx context.Context, messages []Message, tools []ToolSchema) (*LLMResponse, error)
- func (a *AnthropicLLM) GenerateStream(ctx context.Context, messages []Message, tools []ToolSchema) (<-chan StreamEvent, error)
- func (a *AnthropicLLM) ValidateKey(ctx context.Context) error
- type AnthropicOption
- type LLM
- type LLMResponse
- type Message
- type Role
- type StopReason
- type StreamEvent
- type StreamEventType
- type ToolCall
- type ToolSchema
Constants ¶
const ( DefaultAnthropicTimeout = 5 * time.Minute DefaultAnthropicModel = "claude-sonnet-4-20250514" DefaultAnthropicBaseURL = "https://api.anthropic.com" )
Default Anthropic configuration values
Variables ¶
This section is empty.
Functions ¶
func CalculateCost ¶
CalculateCost calculates the cost of a request.
Types ¶
type AnthropicLLM ¶
type AnthropicLLM struct {
// contains filtered or unexported fields
}
AnthropicLLM is an LLM implementation using the Anthropic API.
func NewAnthropic ¶
func NewAnthropic(opts ...AnthropicOption) *AnthropicLLM
NewAnthropic creates a new Anthropic LLM client.
func (*AnthropicLLM) Generate ¶
func (a *AnthropicLLM) Generate(ctx context.Context, messages []Message, tools []ToolSchema) (*LLMResponse, error)
Generate sends a request and returns the complete response.
func (*AnthropicLLM) GenerateStream ¶
func (a *AnthropicLLM) GenerateStream(ctx context.Context, messages []Message, tools []ToolSchema) (<-chan StreamEvent, error)
GenerateStream sends a request and returns a channel of streaming events.
func (*AnthropicLLM) ValidateKey ¶
func (a *AnthropicLLM) ValidateKey(ctx context.Context) error
ValidateKey makes a minimal API call to verify the API key is valid. Returns nil on success, or an error describing the failure (empty key, authentication failure, or network/other error).
type AnthropicOption ¶
type AnthropicOption func(*AnthropicLLM)
AnthropicOption configures the Anthropic client.
func WithHTTPClient ¶
func WithHTTPClient(client *http.Client) AnthropicOption
WithHTTPClient sets a custom HTTP client.
type LLM ¶
type LLM interface {
// Generate sends a request and returns the complete response.
Generate(ctx context.Context, messages []Message, tools []ToolSchema) (*LLMResponse, error)
// GenerateStream sends a request and returns a channel of streaming events.
GenerateStream(ctx context.Context, messages []Message, tools []ToolSchema) (<-chan StreamEvent, error)
}
LLM is the interface for language model backends.
type LLMResponse ¶
type LLMResponse struct {
// Content is the text response
Content string
// ToolCalls are any tool calls the model wants to make
ToolCalls []ToolCall
// Token counts
InputTokens int
OutputTokens int
// Cost in USD
CostUSD float64
// Latency in milliseconds
LatencyMs int64
// StopReason indicates why generation stopped
StopReason StopReason
}
LLMResponse is the response from an LLM call.
type StopReason ¶
type StopReason string
StopReason indicates why the LLM stopped generating.
const ( StopReasonEnd StopReason = "end_turn" StopReasonToolUse StopReason = "tool_use" StopReasonLength StopReason = "max_tokens" StopReasonStop StopReason = "stop_sequence" StopReasonFiltered StopReason = "content_filter" )
type StreamEvent ¶
type StreamEvent struct {
// Type of event
Type StreamEventType
// Delta is new content for ContentDelta events
Delta string
// ToolCall for ToolCallStart events
ToolCall *ToolCall
// Error if something went wrong
Error error
// InputTokens after message start
InputTokens int
// OutputTokens after message end
OutputTokens int
}
StreamEvent is an event from streaming generation.
type StreamEventType ¶
type StreamEventType string
StreamEventType categorizes stream events.
const ( StreamEventMessageStart StreamEventType = "message_start" StreamEventContentStart StreamEventType = "content_start" StreamEventContentDelta StreamEventType = "content_delta" StreamEventContentEnd StreamEventType = "content_end" StreamEventToolStart StreamEventType = "tool_start" StreamEventToolDelta StreamEventType = "tool_delta" StreamEventToolEnd StreamEventType = "tool_end" StreamEventMessageEnd StreamEventType = "message_end" StreamEventError StreamEventType = "error" )
type ToolCall ¶
type ToolCall struct {
// ID is the unique identifier for this tool call
ID string
// Name is the tool being called
Name string
// Arguments are the parameters passed to the tool
Arguments map[string]any
}
ToolCall represents a tool call from the LLM.
type ToolSchema ¶
type ToolSchema struct {
// Name of the tool
Name string `json:"name"`
// Description of what the tool does
Description string `json:"description"`
// InputSchema is the JSON Schema for parameters
InputSchema map[string]any `json:"input_schema"`
}
ToolSchema describes a tool for the LLM.