llm

package
v0.1.9 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 5, 2026 License: Apache-2.0 Imports: 12 Imported by: 0

Documentation

Overview

Package llm provides LLM provider abstraction for test generation.

This package implements a provider interface supporting multiple LLM backends (Anthropic Claude, OpenAI GPT) with cost optimization features like caching and batching.

Index

Constants

View Source
const (
	AnthropicDefaultModel = "claude-3-5-sonnet-20241022"
	OpenAIDefaultModel    = "gpt-4-turbo-preview"
	GeminiDefaultModel    = "gemini-1.5-pro"
	GroqDefaultModel      = "llama-3.3-70b-versatile"
)

DefaultModels for each provider

Variables

View Source
var (
	ErrNoAPIKey      = errors.New("API key not configured")
	ErrRateLimited   = errors.New("rate limited by provider")
	ErrContextLength = errors.New("context length exceeded")
	ErrInvalidModel  = errors.New("invalid model specified")
)

Common errors

Functions

func GetDefaultModel

func GetDefaultModel(providerName string) string

GetDefaultModel returns the default model for a provider

Types

type AnthropicProvider

type AnthropicProvider struct {
	// contains filtered or unexported fields
}

AnthropicProvider implements the Provider interface for Anthropic Claude

func NewAnthropicProvider

func NewAnthropicProvider() *AnthropicProvider

NewAnthropicProvider creates a new Anthropic provider

func (*AnthropicProvider) BatchComplete

func (p *AnthropicProvider) BatchComplete(ctx context.Context, reqs []CompletionRequest) ([]*CompletionResponse, error)

BatchComplete processes multiple requests

func (*AnthropicProvider) Complete

Complete sends a completion request to Anthropic

func (*AnthropicProvider) Configure

func (p *AnthropicProvider) Configure(config ProviderConfig) error

Configure sets up the Anthropic provider

func (*AnthropicProvider) CountTokens

func (p *AnthropicProvider) CountTokens(text string) int

CountTokens estimates token count (rough approximation)

func (*AnthropicProvider) GetUsage

func (p *AnthropicProvider) GetUsage() *UsageMetrics

GetUsage returns usage metrics

func (*AnthropicProvider) Name

func (p *AnthropicProvider) Name() string

Name returns the provider name

type Batcher

type Batcher struct {
	// contains filtered or unexported fields
}

Batcher batches multiple requests for efficiency

func NewBatcher

func NewBatcher(provider Provider, batchSize int, flushTimeout time.Duration) *Batcher

NewBatcher creates a request batcher

func (*Batcher) Add

func (b *Batcher) Add(req CompletionRequest)

Add adds a request to the batch

func (*Batcher) Flush

func (b *Batcher) Flush(ctx context.Context) ([]*CompletionResponse, error)

Flush processes all pending requests

func (*Batcher) GetBatchSize

func (b *Batcher) GetBatchSize() int

GetBatchSize returns the configured batch size

func (*Batcher) PendingCount

func (b *Batcher) PendingCount() int

PendingCount returns the number of pending requests

type Cache

type Cache struct {
	// contains filtered or unexported fields
}

Cache provides semantic caching for LLM responses

func NewCache

func NewCache(maxSize int) *Cache

NewCache creates a new cache with the given maximum size

func (*Cache) Clear

func (c *Cache) Clear()

Clear removes all entries from the cache

func (*Cache) GenerateKey

func (c *Cache) GenerateKey(prompt string, systemRole string, model string) string

GenerateKey creates a cache key from the request parameters

func (*Cache) Get

func (c *Cache) Get(key string) (*CompletionResponse, bool)

Get retrieves a cached response

func (*Cache) Set

func (c *Cache) Set(key string, response *CompletionResponse)

Set stores a response in the cache

func (*Cache) Stats

func (c *Cache) Stats() (size int, hits int, misses int, hitRate float64)

Stats returns cache statistics

type CachedProvider

type CachedProvider struct {
	// contains filtered or unexported fields
}

CachedProvider wraps a Provider with caching

func NewCachedProvider

func NewCachedProvider(provider Provider, cache *Cache) *CachedProvider

NewCachedProvider creates a provider wrapper with caching

func (*CachedProvider) GetCache

func (p *CachedProvider) GetCache() *Cache

GetCache returns the underlying cache

func (*CachedProvider) GetProvider

func (p *CachedProvider) GetProvider() Provider

GetProvider returns the underlying provider

type CompletionRequest

type CompletionRequest struct {
	Prompt      string
	SystemRole  string
	MaxTokens   int
	Temperature float32
	Seed        *int // For reproducibility
}

CompletionRequest represents a completion request

type CompletionResponse

type CompletionResponse struct {
	Content      string
	TokensInput  int
	TokensOutput int
	Cached       bool
	Model        string
	FinishReason string
}

CompletionResponse represents a completion response

type GeminiProvider

type GeminiProvider struct {
	// contains filtered or unexported fields
}

GeminiProvider implements the Provider interface for Google Gemini

func NewGeminiProvider

func NewGeminiProvider() *GeminiProvider

NewGeminiProvider creates a new Gemini provider

func (*GeminiProvider) BatchComplete

func (p *GeminiProvider) BatchComplete(ctx context.Context, reqs []CompletionRequest) ([]*CompletionResponse, error)

BatchComplete processes multiple requests

func (*GeminiProvider) Complete

Complete sends a completion request to Gemini

func (*GeminiProvider) Configure

func (p *GeminiProvider) Configure(config ProviderConfig) error

Configure sets up the Gemini provider

func (*GeminiProvider) CountTokens

func (p *GeminiProvider) CountTokens(text string) int

CountTokens estimates token count (rough approximation)

func (*GeminiProvider) GetUsage

func (p *GeminiProvider) GetUsage() *UsageMetrics

GetUsage returns usage metrics

func (*GeminiProvider) Name

func (p *GeminiProvider) Name() string

Name returns the provider name

type GroqProvider

type GroqProvider struct {
	// contains filtered or unexported fields
}

GroqProvider implements the Provider interface for Groq Cloud

func NewGroqProvider

func NewGroqProvider() *GroqProvider

NewGroqProvider creates a new Groq provider

func (*GroqProvider) BatchComplete

func (p *GroqProvider) BatchComplete(ctx context.Context, reqs []CompletionRequest) ([]*CompletionResponse, error)

BatchComplete processes multiple requests

func (*GroqProvider) Complete

Complete sends a completion request to Groq

func (*GroqProvider) Configure

func (p *GroqProvider) Configure(config ProviderConfig) error

Configure sets up the Groq provider

func (*GroqProvider) CountTokens

func (p *GroqProvider) CountTokens(text string) int

CountTokens estimates token count (rough approximation)

func (*GroqProvider) GetUsage

func (p *GroqProvider) GetUsage() *UsageMetrics

GetUsage returns usage metrics

func (*GroqProvider) Name

func (p *GroqProvider) Name() string

Name returns the provider name

type Message

type Message struct {
	Role    string `json:"role"`
	Content string `json:"content"`
}

Message represents a chat message

type OpenAIProvider

type OpenAIProvider struct {
	// contains filtered or unexported fields
}

OpenAIProvider implements the Provider interface for OpenAI

func NewOpenAIProvider

func NewOpenAIProvider() *OpenAIProvider

NewOpenAIProvider creates a new OpenAI provider

func (*OpenAIProvider) BatchComplete

func (p *OpenAIProvider) BatchComplete(ctx context.Context, reqs []CompletionRequest) ([]*CompletionResponse, error)

BatchComplete processes multiple requests

func (*OpenAIProvider) Complete

Complete sends a completion request to OpenAI

func (*OpenAIProvider) Configure

func (p *OpenAIProvider) Configure(config ProviderConfig) error

Configure sets up the OpenAI provider

func (*OpenAIProvider) CountTokens

func (p *OpenAIProvider) CountTokens(text string) int

CountTokens estimates token count

func (*OpenAIProvider) GetUsage

func (p *OpenAIProvider) GetUsage() *UsageMetrics

GetUsage returns usage metrics

func (*OpenAIProvider) Name

func (p *OpenAIProvider) Name() string

Name returns the provider name

type Provider

type Provider interface {
	// Name returns the provider name (e.g., "anthropic", "openai")
	Name() string

	// Configure sets up the provider with credentials
	Configure(config ProviderConfig) error

	// Complete sends a prompt and returns a completion
	Complete(ctx context.Context, req CompletionRequest) (*CompletionResponse, error)

	// BatchComplete processes multiple prompts
	BatchComplete(ctx context.Context, reqs []CompletionRequest) ([]*CompletionResponse, error)

	// CountTokens estimates token count for text
	CountTokens(text string) int

	// GetUsage returns usage metrics
	GetUsage() *UsageMetrics
}

Provider defines the interface for LLM providers

type ProviderConfig

type ProviderConfig struct {
	APIKey      string
	Model       string
	MaxTokens   int
	Temperature float32
	BaseURL     string // Optional custom endpoint
}

ProviderConfig contains provider configuration

type RateLimiter

type RateLimiter struct {
	// contains filtered or unexported fields
}

RateLimiter controls request rate to LLM providers

func NewRateLimiter

func NewRateLimiter(requestsPerMinute int) *RateLimiter

NewRateLimiter creates a rate limiter with the given requests per minute

func (*RateLimiter) Wait

func (rl *RateLimiter) Wait(ctx context.Context) error

Wait blocks until a request can proceed

type UsageMetrics

type UsageMetrics struct {
	TotalRequests    int
	TotalTokensIn    int
	TotalTokensOut   int
	CachedTokens     int
	EstimatedCostUSD float64
}

UsageMetrics tracks API usage

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL