llm

package
v1.2.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 9, 2026 License: MIT Imports: 11 Imported by: 0

Documentation

Overview

Package llm provides LLM provider implementations for AI-powered analysis

Package llm provides LLM provider implementations for AI-powered analysis

Package llm provides LLM provider implementations for AI-powered analysis

Package llm provides LLM provider implementations for AI-powered analysis

Index

Constants

This section is empty.

Variables

View Source
var (
	ErrNoAPIKey       = errors.New("API key not configured")
	ErrInvalidConfig  = errors.New("invalid provider configuration")
	ErrProviderError  = errors.New("provider returned an error")
	ErrRateLimited    = errors.New("rate limited by provider")
	ErrContextTooLong = errors.New("context length exceeded")
	ErrInvalidJSON    = errors.New("failed to parse response as JSON")
)

Errors

View Source
var ProviderCosts = map[string]*CostEstimator{
	"openai": {
		// contains filtered or unexported fields
	},
	"anthropic": {
		// contains filtered or unexported fields
	},
	"ollama": {
		// contains filtered or unexported fields
	},
	"lmstudio": {
		// contains filtered or unexported fields
	},
}

ProviderCosts holds cost information for different providers

Functions

func DefaultRateLimitConfigs

func DefaultRateLimitConfigs() map[string]*RateLimitConfig

DefaultRateLimitConfigs returns default rate limits per provider

func ParseJSONResponse

func ParseJSONResponse(content string, result interface{}) error

ParseJSONResponse attempts to parse a JSON response from LLM output

Types

type AnthropicContentBlock

type AnthropicContentBlock struct {
	Type string `json:"type"`
	Text string `json:"text"`
}

AnthropicContentBlock represents a content block

type AnthropicError

type AnthropicError struct {
	Type    string `json:"type"`
	Message string `json:"message"`
}

AnthropicError represents an API error

type AnthropicMessage

type AnthropicMessage struct {
	Role    string `json:"role"`
	Content string `json:"content"`
}

AnthropicMessage represents a message

type AnthropicProvider

type AnthropicProvider struct {
	BaseProvider
	// contains filtered or unexported fields
}

AnthropicProvider implements the Provider interface for Anthropic Claude

func NewAnthropicProvider

func NewAnthropicProvider(config types.ProviderConfig) (*AnthropicProvider, error)

NewAnthropicProvider creates a new Anthropic provider

func (*AnthropicProvider) Analyze

func (p *AnthropicProvider) Analyze(ctx context.Context, prompt string) (string, error)

Analyze sends a prompt to Anthropic and returns the response

func (*AnthropicProvider) AnalyzeStructured

func (p *AnthropicProvider) AnalyzeStructured(ctx context.Context, prompt string, result interface{}) error

AnalyzeStructured sends a prompt and parses the response as JSON

func (*AnthropicProvider) AnalyzeWithSystem

func (p *AnthropicProvider) AnalyzeWithSystem(ctx context.Context, system, prompt string) (string, error)

AnalyzeWithSystem sends a prompt with a system message

type AnthropicRequest

type AnthropicRequest struct {
	Model       string             `json:"model"`
	MaxTokens   int                `json:"max_tokens"`
	Messages    []AnthropicMessage `json:"messages"`
	System      string             `json:"system,omitempty"`
	Temperature float64            `json:"temperature,omitempty"`
}

AnthropicRequest represents an Anthropic API request

type AnthropicResponse

type AnthropicResponse struct {
	ID           string                  `json:"id"`
	Type         string                  `json:"type"`
	Role         string                  `json:"role"`
	Content      []AnthropicContentBlock `json:"content"`
	Model        string                  `json:"model"`
	StopReason   string                  `json:"stop_reason"`
	StopSequence string                  `json:"stop_sequence"`
	Usage        AnthropicUsage          `json:"usage"`
}

AnthropicResponse represents an Anthropic API response

type AnthropicUsage

type AnthropicUsage struct {
	InputTokens  int `json:"input_tokens"`
	OutputTokens int `json:"output_tokens"`
}

AnthropicUsage represents token usage

type BaseProvider

type BaseProvider struct {
	// contains filtered or unexported fields
}

BaseProvider provides common functionality for providers

func (*BaseProvider) Model

func (p *BaseProvider) Model() string

Model returns the configured model

func (*BaseProvider) Name

func (p *BaseProvider) Name() string

Name returns the provider name

type ChatRequest

type ChatRequest struct {
	Messages    []Message `json:"messages"`
	MaxTokens   int       `json:"max_tokens,omitempty"`
	Temperature float64   `json:"temperature,omitempty"`
	Model       string    `json:"model,omitempty"`
}

ChatRequest represents a chat completion request

type ChatResponse

type ChatResponse struct {
	Content      string `json:"content"`
	FinishReason string `json:"finish_reason,omitempty"`
	Usage        *Usage `json:"usage,omitempty"`
}

ChatResponse represents a chat completion response

type CostEstimator

type CostEstimator struct {
	// contains filtered or unexported fields
}

CostEstimator estimates API costs

func NewCostEstimator

func NewCostEstimator(provider string) *CostEstimator

NewCostEstimator creates a cost estimator for a provider

func (*CostEstimator) EstimateCost

func (c *CostEstimator) EstimateCost(stats *UsageStats) float64

EstimateCost estimates the cost for given usage

type LMStudioChoice

type LMStudioChoice struct {
	Index        int             `json:"index"`
	Message      LMStudioMessage `json:"message"`
	FinishReason string          `json:"finish_reason"`
}

LMStudioChoice represents a choice in the response

type LMStudioMessage

type LMStudioMessage struct {
	Role    string `json:"role"`
	Content string `json:"content"`
}

LMStudioMessage represents a message in LM Studio format

type LMStudioProvider

type LMStudioProvider struct {
	BaseProvider
	// contains filtered or unexported fields
}

LMStudioProvider implements the Provider interface for LM Studio (OpenAI-compatible)

func NewLMStudioProvider

func NewLMStudioProvider(config types.ProviderConfig) (*LMStudioProvider, error)

NewLMStudioProvider creates a new LM Studio provider

func (*LMStudioProvider) Analyze

func (p *LMStudioProvider) Analyze(ctx context.Context, prompt string) (string, error)

Analyze sends a prompt to LM Studio and returns the response

func (*LMStudioProvider) AnalyzeStructured

func (p *LMStudioProvider) AnalyzeStructured(ctx context.Context, prompt string, result interface{}) error

AnalyzeStructured sends a prompt and parses the response as JSON

func (*LMStudioProvider) AnalyzeWithSystem

func (p *LMStudioProvider) AnalyzeWithSystem(ctx context.Context, system, prompt string) (string, error)

AnalyzeWithSystem sends a prompt with a system message

type LMStudioRequest

type LMStudioRequest struct {
	Model       string            `json:"model"`
	Messages    []LMStudioMessage `json:"messages"`
	MaxTokens   int               `json:"max_tokens,omitempty"`
	Temperature float64           `json:"temperature,omitempty"`
	Stream      bool              `json:"stream"`
}

LMStudioRequest represents an LM Studio chat request (OpenAI-compatible)

type LMStudioResponse

type LMStudioResponse struct {
	ID      string           `json:"id"`
	Object  string           `json:"object"`
	Created int64            `json:"created"`
	Model   string           `json:"model"`
	Choices []LMStudioChoice `json:"choices"`
	Usage   *LMStudioUsage   `json:"usage,omitempty"`
}

LMStudioResponse represents an LM Studio chat response

type LMStudioUsage

type LMStudioUsage struct {
	PromptTokens     int `json:"prompt_tokens"`
	CompletionTokens int `json:"completion_tokens"`
	TotalTokens      int `json:"total_tokens"`
}

LMStudioUsage represents token usage

type Message

type Message struct {
	Role    string `json:"role"`
	Content string `json:"content"`
}

Message represents a chat message

type MockProvider

type MockProvider struct {
	BaseProvider
	// contains filtered or unexported fields
}

MockProvider is a mock LLM provider for testing

func NewMockProvider

func NewMockProvider(opts ...MockProviderOption) *MockProvider

NewMockProvider creates a new mock provider for testing

func (*MockProvider) Analyze

func (m *MockProvider) Analyze(ctx context.Context, prompt string) (string, error)

Analyze sends a prompt and returns a mock response

func (*MockProvider) AnalyzeStructured

func (m *MockProvider) AnalyzeStructured(ctx context.Context, prompt string, result interface{}) error

AnalyzeStructured sends a prompt and parses response into a struct

func (*MockProvider) AnalyzeWithSystem

func (m *MockProvider) AnalyzeWithSystem(ctx context.Context, system, prompt string) (string, error)

AnalyzeWithSystem sends a prompt with a system message

func (*MockProvider) CallCount

func (m *MockProvider) CallCount() int

CallCount returns the number of times the provider was called

func (*MockProvider) LastPrompt

func (m *MockProvider) LastPrompt() string

LastPrompt returns the last prompt received

func (*MockProvider) Model

func (m *MockProvider) Model() string

Model returns the model name

func (*MockProvider) Name

func (m *MockProvider) Name() string

Name returns the provider name

func (*MockProvider) Reset

func (m *MockProvider) Reset()

Reset resets the mock provider state

func (*MockProvider) SetDefaultResponse

func (m *MockProvider) SetDefaultResponse(response string)

SetDefaultResponse sets the default response

func (*MockProvider) SetError

func (m *MockProvider) SetError(err error)

SetError sets an error to return

func (*MockProvider) SetResponse

func (m *MockProvider) SetResponse(prompt, response string)

SetResponse sets a response for a specific prompt

type MockProviderOption

type MockProviderOption func(*MockProvider)

MockProviderOption is a function that configures a MockProvider

func WithDefaultResponse

func WithDefaultResponse(response string) MockProviderOption

WithDefaultResponse sets the default response

func WithError

func WithError(err error) MockProviderOption

WithError sets an error to return on calls

func WithResponse

func WithResponse(prompt, response string) MockProviderOption

WithResponse adds a specific response for a prompt

func WithStructuredResponse

func WithStructuredResponse(resp interface{}) MockProviderOption

WithStructuredResponse sets a pre-defined structured response

type OllamaChatRequest

type OllamaChatRequest struct {
	Model    string          `json:"model"`
	Messages []OllamaMessage `json:"messages"`
	Stream   bool            `json:"stream"`
	Options  *OllamaOptions  `json:"options,omitempty"`
}

OllamaChatRequest represents an Ollama chat request

type OllamaChatResponse

type OllamaChatResponse struct {
	Model     string        `json:"model"`
	CreatedAt string        `json:"created_at"`
	Message   OllamaMessage `json:"message"`
	Done      bool          `json:"done"`
}

OllamaChatResponse represents an Ollama chat response

type OllamaMessage

type OllamaMessage struct {
	Role    string `json:"role"`
	Content string `json:"content"`
}

OllamaMessage represents a message in Ollama format

type OllamaOptions

type OllamaOptions struct {
	Temperature float64 `json:"temperature,omitempty"`
	NumPredict  int     `json:"num_predict,omitempty"`
}

OllamaOptions represents Ollama-specific options

type OllamaProvider

type OllamaProvider struct {
	BaseProvider
	// contains filtered or unexported fields
}

OllamaProvider implements the Provider interface for Ollama

func NewOllamaProvider

func NewOllamaProvider(config types.ProviderConfig) (*OllamaProvider, error)

NewOllamaProvider creates a new Ollama provider

func (*OllamaProvider) Analyze

func (p *OllamaProvider) Analyze(ctx context.Context, prompt string) (string, error)

Analyze sends a prompt to Ollama and returns the response

func (*OllamaProvider) AnalyzeStructured

func (p *OllamaProvider) AnalyzeStructured(ctx context.Context, prompt string, result interface{}) error

AnalyzeStructured sends a prompt and parses the response as JSON

func (*OllamaProvider) AnalyzeWithSystem

func (p *OllamaProvider) AnalyzeWithSystem(ctx context.Context, system, prompt string) (string, error)

AnalyzeWithSystem sends a prompt with a system message

type OpenAIProvider

type OpenAIProvider struct {
	BaseProvider
	// contains filtered or unexported fields
}

OpenAIProvider implements the Provider interface for OpenAI

func NewOpenAIProvider

func NewOpenAIProvider(config types.ProviderConfig) (*OpenAIProvider, error)

NewOpenAIProvider creates a new OpenAI provider

func (*OpenAIProvider) Analyze

func (p *OpenAIProvider) Analyze(ctx context.Context, prompt string) (string, error)

Analyze sends a prompt to OpenAI and returns the response

func (*OpenAIProvider) AnalyzeStructured

func (p *OpenAIProvider) AnalyzeStructured(ctx context.Context, prompt string, result interface{}) error

AnalyzeStructured sends a prompt and parses the response as JSON

func (*OpenAIProvider) AnalyzeWithSystem

func (p *OpenAIProvider) AnalyzeWithSystem(ctx context.Context, system, prompt string) (string, error)

AnalyzeWithSystem sends a prompt with a system message

type Provider

type Provider interface {
	// Analyze sends a prompt and returns the response
	Analyze(ctx context.Context, prompt string) (string, error)

	// AnalyzeStructured sends a prompt and parses response into a struct
	AnalyzeStructured(ctx context.Context, prompt string, result interface{}) error

	// AnalyzeWithSystem sends a prompt with a system message
	AnalyzeWithSystem(ctx context.Context, system, prompt string) (string, error)

	// Name returns the provider name
	Name() string

	// Model returns the model being used
	Model() string
}

Provider defines the interface for LLM providers

func NewProvider

func NewProvider(config types.ProviderConfig) (Provider, error)

NewProvider creates a new LLM provider based on configuration

type RateLimitConfig

type RateLimitConfig struct {
	RequestsPerMinute int           // Max requests per minute
	MinInterval       time.Duration // Minimum time between requests
	BurstSize         int           // Maximum burst size
}

RateLimitConfig holds rate limiter configuration

type RateLimitedProvider

type RateLimitedProvider struct {
	// contains filtered or unexported fields
}

RateLimitedProvider wraps a provider with rate limiting

func NewRateLimitedProvider

func NewRateLimitedProvider(provider Provider, config *RateLimitConfig) *RateLimitedProvider

NewRateLimitedProvider creates a rate-limited provider wrapper

func (*RateLimitedProvider) Analyze

func (p *RateLimitedProvider) Analyze(ctx context.Context, prompt string) (string, error)

Analyze sends a prompt with rate limiting

func (*RateLimitedProvider) AnalyzeStructured

func (p *RateLimitedProvider) AnalyzeStructured(ctx context.Context, prompt string, result interface{}) error

AnalyzeStructured sends a prompt with rate limiting

func (*RateLimitedProvider) AnalyzeWithSystem

func (p *RateLimitedProvider) AnalyzeWithSystem(ctx context.Context, system, prompt string) (string, error)

AnalyzeWithSystem sends a prompt with system message and rate limiting

func (*RateLimitedProvider) GetUsage

func (p *RateLimitedProvider) GetUsage() *UsageStats

GetUsage returns usage statistics

func (*RateLimitedProvider) Model

func (p *RateLimitedProvider) Model() string

Model returns the model name

func (*RateLimitedProvider) Name

func (p *RateLimitedProvider) Name() string

Name returns the provider name

type RateLimiter

type RateLimiter struct {
	// contains filtered or unexported fields
}

RateLimiter provides rate limiting for LLM API calls

func NewRateLimiter

func NewRateLimiter(config *RateLimitConfig) *RateLimiter

NewRateLimiter creates a new rate limiter

func (*RateLimiter) OnRateLimitError

func (r *RateLimiter) OnRateLimitError(retryAfter time.Duration)

OnRateLimitError handles a rate limit error from the API

func (*RateLimiter) OnSuccess

func (r *RateLimiter) OnSuccess()

OnSuccess resets backoff count on successful request

func (*RateLimiter) Wait

func (r *RateLimiter) Wait(ctx context.Context) error

Wait blocks until a request can be made

type Usage

type Usage struct {
	PromptTokens     int `json:"prompt_tokens"`
	CompletionTokens int `json:"completion_tokens"`
	TotalTokens      int `json:"total_tokens"`
}

Usage tracks token usage

type UsageBudget

type UsageBudget struct {
	// contains filtered or unexported fields
}

UsageBudget tracks usage against a budget

func NewUsageBudget

func NewUsageBudget(maxTokens int64, maxRequests int) *UsageBudget

NewUsageBudget creates a usage budget

func (*UsageBudget) CanMakeRequest

func (b *UsageBudget) CanMakeRequest(estimatedTokens int64) bool

CanMakeRequest checks if a request can be made within budget

func (*UsageBudget) GetRemaining

func (b *UsageBudget) GetRemaining() (tokens int64, requests int)

GetRemaining returns remaining budget

func (*UsageBudget) IsExceeded

func (b *UsageBudget) IsExceeded() bool

IsExceeded returns whether the budget has been exceeded

func (*UsageBudget) RecordUsage

func (b *UsageBudget) RecordUsage(tokens int64)

RecordUsage records token usage

type UsageStats

type UsageStats struct {
	TotalRequests      int           `json:"total_requests"`
	SuccessfulRequests int           `json:"successful_requests"`
	FailedRequests     int           `json:"failed_requests"`
	TotalPromptChars   int64         `json:"total_prompt_chars"`
	TotalResponseChars int64         `json:"total_response_chars"`
	EstimatedTokens    int64         `json:"estimated_tokens"`
	AverageLatency     time.Duration `json:"average_latency"`
	TotalLatency       time.Duration `json:"total_latency"`
	RateLimitHits      int           `json:"rate_limit_hits"`
	StartTime          time.Time     `json:"start_time"`
	LastRequestTime    time.Time     `json:"last_request_time"`
}

UsageStats holds usage statistics

type UsageTracker

type UsageTracker struct {
	// contains filtered or unexported fields
}

UsageTracker tracks LLM API usage

func NewUsageTracker

func NewUsageTracker() *UsageTracker

NewUsageTracker creates a new usage tracker

func (*UsageTracker) GetStats

func (u *UsageTracker) GetStats() *UsageStats

GetStats returns current usage statistics

func (*UsageTracker) RecordFailure

func (u *UsageTracker) RecordFailure()

RecordFailure records a failed request

func (*UsageTracker) RecordRateLimitHit

func (u *UsageTracker) RecordRateLimitHit()

RecordRateLimitHit records a rate limit hit

func (*UsageTracker) RecordRequest

func (u *UsageTracker) RecordRequest(promptSize, responseSize int)

RecordRequest records a successful request

func (*UsageTracker) RecordRequestWithLatency

func (u *UsageTracker) RecordRequestWithLatency(promptSize, responseSize int, latency time.Duration)

RecordRequestWithLatency records a request with latency

func (*UsageTracker) Reset

func (u *UsageTracker) Reset()

Reset resets usage statistics

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL