Documentation
¶
Overview ¶
Package llm provides LLM provider abstraction for test generation.
This package implements a provider interface supporting multiple LLM backends (Anthropic Claude, OpenAI GPT) with cost optimization features like caching and batching.
Index ¶
- Constants
- Variables
- func GetDefaultModel(providerName string) string
- type AnthropicProvider
- func (p *AnthropicProvider) BatchComplete(ctx context.Context, reqs []CompletionRequest) ([]*CompletionResponse, error)
- func (p *AnthropicProvider) Complete(ctx context.Context, req CompletionRequest) (*CompletionResponse, error)
- func (p *AnthropicProvider) Configure(config ProviderConfig) error
- func (p *AnthropicProvider) CountTokens(text string) int
- func (p *AnthropicProvider) GetUsage() *UsageMetrics
- func (p *AnthropicProvider) Name() string
- type Batcher
- type Cache
- func (c *Cache) Clear()
- func (c *Cache) GenerateKey(prompt string, systemRole string, model string) string
- func (c *Cache) Get(key string) (*CompletionResponse, bool)
- func (c *Cache) Set(key string, response *CompletionResponse)
- func (c *Cache) Stats() (size int, hits int, misses int, hitRate float64)
- type CachedProvider
- type CompletionRequest
- type CompletionResponse
- type GeminiProvider
- func (p *GeminiProvider) BatchComplete(ctx context.Context, reqs []CompletionRequest) ([]*CompletionResponse, error)
- func (p *GeminiProvider) Complete(ctx context.Context, req CompletionRequest) (*CompletionResponse, error)
- func (p *GeminiProvider) Configure(config ProviderConfig) error
- func (p *GeminiProvider) CountTokens(text string) int
- func (p *GeminiProvider) GetUsage() *UsageMetrics
- func (p *GeminiProvider) Name() string
- type GroqProvider
- func (p *GroqProvider) BatchComplete(ctx context.Context, reqs []CompletionRequest) ([]*CompletionResponse, error)
- func (p *GroqProvider) Complete(ctx context.Context, req CompletionRequest) (*CompletionResponse, error)
- func (p *GroqProvider) Configure(config ProviderConfig) error
- func (p *GroqProvider) CountTokens(text string) int
- func (p *GroqProvider) GetUsage() *UsageMetrics
- func (p *GroqProvider) Name() string
- type Message
- type OpenAIProvider
- func (p *OpenAIProvider) BatchComplete(ctx context.Context, reqs []CompletionRequest) ([]*CompletionResponse, error)
- func (p *OpenAIProvider) Complete(ctx context.Context, req CompletionRequest) (*CompletionResponse, error)
- func (p *OpenAIProvider) Configure(config ProviderConfig) error
- func (p *OpenAIProvider) CountTokens(text string) int
- func (p *OpenAIProvider) GetUsage() *UsageMetrics
- func (p *OpenAIProvider) Name() string
- type Provider
- type ProviderConfig
- type RateLimiter
- type UsageMetrics
Constants ¶
const ( AnthropicDefaultModel = "claude-3-5-sonnet-20241022" OpenAIDefaultModel = "gpt-4-turbo-preview" GeminiDefaultModel = "gemini-1.5-pro" GroqDefaultModel = "llama-3.3-70b-versatile" )
DefaultModels for each provider
Variables ¶
var ( ErrNoAPIKey = errors.New("API key not configured") ErrRateLimited = errors.New("rate limited by provider") ErrContextLength = errors.New("context length exceeded") ErrInvalidModel = errors.New("invalid model specified") )
Common errors
Functions ¶
func GetDefaultModel ¶
GetDefaultModel returns the default model for a provider
Types ¶
type AnthropicProvider ¶
type AnthropicProvider struct {
// contains filtered or unexported fields
}
AnthropicProvider implements the Provider interface for Anthropic Claude
func NewAnthropicProvider ¶
func NewAnthropicProvider() *AnthropicProvider
NewAnthropicProvider creates a new Anthropic provider
func (*AnthropicProvider) BatchComplete ¶
func (p *AnthropicProvider) BatchComplete(ctx context.Context, reqs []CompletionRequest) ([]*CompletionResponse, error)
BatchComplete processes multiple requests
func (*AnthropicProvider) Complete ¶
func (p *AnthropicProvider) Complete(ctx context.Context, req CompletionRequest) (*CompletionResponse, error)
Complete sends a completion request to Anthropic
func (*AnthropicProvider) Configure ¶
func (p *AnthropicProvider) Configure(config ProviderConfig) error
Configure sets up the Anthropic provider
func (*AnthropicProvider) CountTokens ¶
func (p *AnthropicProvider) CountTokens(text string) int
CountTokens estimates token count (rough approximation)
func (*AnthropicProvider) GetUsage ¶
func (p *AnthropicProvider) GetUsage() *UsageMetrics
GetUsage returns usage metrics
func (*AnthropicProvider) Name ¶
func (p *AnthropicProvider) Name() string
Name returns the provider name
type Batcher ¶
type Batcher struct {
// contains filtered or unexported fields
}
Batcher batches multiple requests for efficiency
func NewBatcher ¶
NewBatcher creates a request batcher
func (*Batcher) Flush ¶
func (b *Batcher) Flush(ctx context.Context) ([]*CompletionResponse, error)
Flush processes all pending requests
func (*Batcher) GetBatchSize ¶
GetBatchSize returns the configured batch size
func (*Batcher) PendingCount ¶
PendingCount returns the number of pending requests
type Cache ¶
type Cache struct {
// contains filtered or unexported fields
}
Cache provides semantic caching for LLM responses
func (*Cache) GenerateKey ¶
GenerateKey creates a cache key from the request parameters
func (*Cache) Get ¶
func (c *Cache) Get(key string) (*CompletionResponse, bool)
Get retrieves a cached response
func (*Cache) Set ¶
func (c *Cache) Set(key string, response *CompletionResponse)
Set stores a response in the cache
type CachedProvider ¶
type CachedProvider struct {
// contains filtered or unexported fields
}
CachedProvider wraps a Provider with caching
func NewCachedProvider ¶
func NewCachedProvider(provider Provider, cache *Cache) *CachedProvider
NewCachedProvider creates a provider wrapper with caching
func (*CachedProvider) GetCache ¶
func (p *CachedProvider) GetCache() *Cache
GetCache returns the underlying cache
func (*CachedProvider) GetProvider ¶
func (p *CachedProvider) GetProvider() Provider
GetProvider returns the underlying provider
type CompletionRequest ¶
type CompletionRequest struct {
Prompt string
SystemRole string
MaxTokens int
Temperature float32
Seed *int // For reproducibility
}
CompletionRequest represents a completion request
type CompletionResponse ¶
type CompletionResponse struct {
Content string
TokensInput int
TokensOutput int
Cached bool
Model string
FinishReason string
}
CompletionResponse represents a completion response
type GeminiProvider ¶
type GeminiProvider struct {
// contains filtered or unexported fields
}
GeminiProvider implements the Provider interface for Google Gemini
func NewGeminiProvider ¶
func NewGeminiProvider() *GeminiProvider
NewGeminiProvider creates a new Gemini provider
func (*GeminiProvider) BatchComplete ¶
func (p *GeminiProvider) BatchComplete(ctx context.Context, reqs []CompletionRequest) ([]*CompletionResponse, error)
BatchComplete processes multiple requests
func (*GeminiProvider) Complete ¶
func (p *GeminiProvider) Complete(ctx context.Context, req CompletionRequest) (*CompletionResponse, error)
Complete sends a completion request to Gemini
func (*GeminiProvider) Configure ¶
func (p *GeminiProvider) Configure(config ProviderConfig) error
Configure sets up the Gemini provider
func (*GeminiProvider) CountTokens ¶
func (p *GeminiProvider) CountTokens(text string) int
CountTokens estimates token count (rough approximation)
func (*GeminiProvider) GetUsage ¶
func (p *GeminiProvider) GetUsage() *UsageMetrics
GetUsage returns usage metrics
type GroqProvider ¶
type GroqProvider struct {
// contains filtered or unexported fields
}
GroqProvider implements the Provider interface for Groq Cloud
func NewGroqProvider ¶
func NewGroqProvider() *GroqProvider
NewGroqProvider creates a new Groq provider
func (*GroqProvider) BatchComplete ¶
func (p *GroqProvider) BatchComplete(ctx context.Context, reqs []CompletionRequest) ([]*CompletionResponse, error)
BatchComplete processes multiple requests
func (*GroqProvider) Complete ¶
func (p *GroqProvider) Complete(ctx context.Context, req CompletionRequest) (*CompletionResponse, error)
Complete sends a completion request to Groq
func (*GroqProvider) Configure ¶
func (p *GroqProvider) Configure(config ProviderConfig) error
Configure sets up the Groq provider
func (*GroqProvider) CountTokens ¶
func (p *GroqProvider) CountTokens(text string) int
CountTokens estimates token count (rough approximation)
func (*GroqProvider) GetUsage ¶
func (p *GroqProvider) GetUsage() *UsageMetrics
GetUsage returns usage metrics
type OpenAIProvider ¶
type OpenAIProvider struct {
// contains filtered or unexported fields
}
OpenAIProvider implements the Provider interface for OpenAI
func NewOpenAIProvider ¶
func NewOpenAIProvider() *OpenAIProvider
NewOpenAIProvider creates a new OpenAI provider
func (*OpenAIProvider) BatchComplete ¶
func (p *OpenAIProvider) BatchComplete(ctx context.Context, reqs []CompletionRequest) ([]*CompletionResponse, error)
BatchComplete processes multiple requests
func (*OpenAIProvider) Complete ¶
func (p *OpenAIProvider) Complete(ctx context.Context, req CompletionRequest) (*CompletionResponse, error)
Complete sends a completion request to OpenAI
func (*OpenAIProvider) Configure ¶
func (p *OpenAIProvider) Configure(config ProviderConfig) error
Configure sets up the OpenAI provider
func (*OpenAIProvider) CountTokens ¶
func (p *OpenAIProvider) CountTokens(text string) int
CountTokens estimates token count
func (*OpenAIProvider) GetUsage ¶
func (p *OpenAIProvider) GetUsage() *UsageMetrics
GetUsage returns usage metrics
type Provider ¶
type Provider interface {
// Name returns the provider name (e.g., "anthropic", "openai")
Name() string
// Configure sets up the provider with credentials
Configure(config ProviderConfig) error
// Complete sends a prompt and returns a completion
Complete(ctx context.Context, req CompletionRequest) (*CompletionResponse, error)
// BatchComplete processes multiple prompts
BatchComplete(ctx context.Context, reqs []CompletionRequest) ([]*CompletionResponse, error)
// CountTokens estimates token count for text
CountTokens(text string) int
// GetUsage returns usage metrics
GetUsage() *UsageMetrics
}
Provider defines the interface for LLM providers
type ProviderConfig ¶
type ProviderConfig struct {
APIKey string
Model string
MaxTokens int
Temperature float32
BaseURL string // Optional custom endpoint
}
ProviderConfig contains provider configuration
type RateLimiter ¶
type RateLimiter struct {
// contains filtered or unexported fields
}
RateLimiter controls request rate to LLM providers
func NewRateLimiter ¶
func NewRateLimiter(requestsPerMinute int) *RateLimiter
NewRateLimiter creates a rate limiter with the given requests per minute