Documentation
¶
Index ¶
- type Client
- type Connection
- type Cost
- type CostInfo
- type Costs
- type LLMBackend
- type ModelInfo
- type ModelType
- type OpenRouterModel
- type OpenRouterPricing
- type OpenRouterResponse
- type PriceTier
- type PricingCache
- type Provider
- type ProviderRequest
- type ProviderResponse
- type RequestBuilder
- func (b *RequestBuilder) Execute(ctx context.Context) (*Response, error)
- func (b *RequestBuilder) WithMaxTokens(n int) *RequestBuilder
- func (b *RequestBuilder) WithPrompt(prompt string) *RequestBuilder
- func (b *RequestBuilder) WithProvider(provider Provider) *RequestBuilder
- func (b *RequestBuilder) WithStructuredOutput(schema interface{}) *RequestBuilder
- func (b *RequestBuilder) WithSystemPrompt(prompt string) *RequestBuilder
- func (b *RequestBuilder) WithTimeout(d time.Duration) *RequestBuilder
- type Response
- type Session
- type TopProvider
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type Client ¶
type Client interface {
// NewRequest creates a new request builder for constructing LLM requests.
NewRequest() *RequestBuilder
}
Client provides a fluent interface for making LLM requests.
type Connection ¶
type Connection struct {
types.HTTP
Backend LLMBackend
Model string
}
Connection represents a resolved connection with provider-specific configuration. It embeds types.HTTP to reuse URL and Bearer token fields with EnvVar lookup support.
type Cost ¶
type Cost struct {
Model string `json:"model,omitempty"`
// Multiple models can be used during to full a single request (e.g., cache + reasoning + LLM)
ModelType ModelType `json:"model_type,omitempty"`
InputTokens int `json:"input_tokens,omitempty"`
OutputTokens int `json:"output_tokens,omitempty"`
TotalTokens int `json:"total_tokens,omitempty"`
InputCost float64 `json:"input_cost,omitempty"`
OutputCost float64 `json:"output_cost,omitempty"`
}
type CostInfo ¶
type CostInfo struct {
InputTokens int `json:"inputTokens"`
OutputTokens int `json:"outputTokens"`
ReasoningTokens *int `json:"reasoningTokens,omitempty"`
CacheReadTokens *int `json:"cacheReadTokens,omitempty"`
CacheWriteTokens *int `json:"cacheWriteTokens,omitempty"`
Cost float64 `json:"cost"`
Model string `json:"model"`
CostError *string `json:"costCalculationError,omitempty"`
}
CostInfo contains token usage and cost information for an LLM request.
type LLMBackend ¶
type LLMBackend string
LLMBackend represents the supported LLM providers.
const ( LLMBackendOpenAI LLMBackend = "openai" LLMBackendAnthropic LLMBackend = "anthropic" LLMBackendGemini LLMBackend = "gemini" LLMBackendClaudeCode LLMBackend = "claude-code" )
type ModelInfo ¶
type ModelInfo struct {
Provider LLMBackend
ModelID string
MaxTokens int
ContextWindow int
SupportsImages bool
SupportsPromptCache bool
InputPrice float64 // Per million tokens
OutputPrice float64 // Per million tokens
InputPriceTiers []PriceTier
OutputPriceTiers []PriceTier
CacheWritesPrice float64 // Per million tokens
CacheReadsPrice float64 // Per million tokens
}
ModelInfo contains pricing and capability information for a specific LLM model.
func GetModelInfo ¶
GetModelInfo retrieves pricing information for a specific model from the registry. Returns the ModelInfo and a boolean indicating whether the model was found.
type OpenRouterModel ¶
type OpenRouterModel struct {
ID string `json:"id"`
Name string `json:"name"`
Pricing OpenRouterPricing `json:"pricing"`
ContextLength int `json:"context_length"`
TopProvider *TopProvider `json:"top_provider,omitempty"`
}
OpenRouterModel represents a single model from OpenRouter API
type OpenRouterPricing ¶
type OpenRouterPricing struct {
Prompt string `json:"prompt"` // Per-token input price
Completion string `json:"completion"` // Per-token output price
Request string `json:"request,omitempty"` // Per-request fee
Image string `json:"image,omitempty"` // Image processing cost
InternalReasoning string `json:"internal_reasoning,omitempty"` // Reasoning token cost
InputCacheRead string `json:"input_cache_read,omitempty"` // Cache read cost
InputCacheWrite string `json:"input_cache_write,omitempty"` // Cache write cost
}
OpenRouterPricing contains pricing information for a model
type OpenRouterResponse ¶
type OpenRouterResponse struct {
Data []OpenRouterModel `json:"data"`
}
OpenRouterResponse represents the response from OpenRouter API
type PriceTier ¶
type PriceTier struct {
TokenLimit int // Upper limit (inclusive)
Price float64 // Per million tokens
}
PriceTier represents a pricing tier for models with tiered pricing.
type PricingCache ¶
type PricingCache struct {
Timestamp time.Time `json:"timestamp"`
Models map[string]*ModelInfo `json:"models"`
}
PricingCache represents the cached pricing data
func NewPricingCache ¶
func NewPricingCache() (*PricingCache, error)
NewPricingCache loads or fetches pricing cache, handling the full lifecycle: memory cache → disk cache → API fetch. Automatically merges into model registry.
func (*PricingCache) IsExpired ¶
func (c *PricingCache) IsExpired() bool
IsExpired returns true if the cache is older than the expiry duration
func (*PricingCache) Save ¶
func (c *PricingCache) Save() error
Save writes the pricing cache to disk
type Provider ¶
type Provider interface {
// Execute sends a request to the LLM provider and returns the response.
Execute(ctx *Session, req ProviderRequest) (ProviderResponse, error)
// GetModel returns the model name configured for this provider.
GetModel() string
// GetBackend returns the backend type for this provider.
GetBackend() LLMBackend
// GetOpenRouterModelID returns the OpenRouter model identifier for pricing lookups.
// Returns empty string if the model is not available on OpenRouter.
GetOpenRouterModelID() string
}
Provider is the interface that all LLM provider implementations must satisfy.
type ProviderRequest ¶
type ProviderRequest struct {
SystemPrompt string
Prompt string
MaxTokens *int
StructuredOutput interface{} // Schema for structured JSON output
Model string
APIKey string
APIURL string
}
ProviderRequest contains all the information needed to make an LLM request.
func (ProviderRequest) PrettShort ¶
func (r ProviderRequest) PrettShort() api.Text
Pretty returns a formatted representation of the ProviderRequest for display.
func (ProviderRequest) Pretty ¶
func (r ProviderRequest) Pretty() api.Text
Pretty returns a formatted representation of the ProviderRequest for display.
type ProviderResponse ¶
type ProviderResponse struct {
Cached bool
Text string
StructuredData interface{} // Populated if structured output was requested
Model string
InputTokens int
OutputTokens int
ReasoningTokens *int
CacheReadTokens *int
CacheWriteTokens *int
Raw interface{} // Raw provider-specific response
}
ProviderResponse contains the raw response from an LLM provider.
func (ProviderResponse) Pretty ¶
func (r ProviderResponse) Pretty() api.Text
Pretty returns a formatted representation of the ProviderResponse for display.
func (ProviderResponse) PrettyShort ¶
func (r ProviderResponse) PrettyShort() api.Text
type RequestBuilder ¶
type RequestBuilder struct {
Provider Provider
SystemPrompt string
Prompt string
MaxTokens *int
Timeout time.Duration
StructuredOutput interface{}
}
RequestBuilder provides a fluent interface for building LLM requests.
func (*RequestBuilder) Execute ¶
func (b *RequestBuilder) Execute(ctx context.Context) (*Response, error)
Execute sends the request to the configured LLM provider and returns the response.
func (*RequestBuilder) WithMaxTokens ¶
func (b *RequestBuilder) WithMaxTokens(n int) *RequestBuilder
WithMaxTokens sets the maximum number of output tokens.
func (*RequestBuilder) WithPrompt ¶
func (b *RequestBuilder) WithPrompt(prompt string) *RequestBuilder
WithPrompt sets the user prompt (required).
func (*RequestBuilder) WithProvider ¶
func (b *RequestBuilder) WithProvider(provider Provider) *RequestBuilder
WithConnection sets the named connection to use for this request. The connection is resolved from the duty/connection registry.
func (*RequestBuilder) WithStructuredOutput ¶
func (b *RequestBuilder) WithStructuredOutput(schema interface{}) *RequestBuilder
WithStructuredOutput configures the request to return structured JSON output matching the schema of the provided pointer to a struct.
func (*RequestBuilder) WithSystemPrompt ¶
func (b *RequestBuilder) WithSystemPrompt(prompt string) *RequestBuilder
WithSystemPrompt sets the system prompt that establishes context and behavior.
func (*RequestBuilder) WithTimeout ¶
func (b *RequestBuilder) WithTimeout(d time.Duration) *RequestBuilder
WithTimeout sets the request timeout duration.
type Response ¶
type Response struct {
// Text is the text response from the LLM (empty if structured output was requested).
Text string
// StructuredData contains the parsed structured output if WithStructuredOutput was used.
StructuredData interface{}
// CostInfo provides token usage and cost information for this request.
CostInfo CostInfo
// Model is the specific model that generated the response.
Model string
// Provider is the LLM provider (openai, anthropic, gemini).
Provider string
}
Response contains the LLM response data, including text, structured output, and cost information.
type Session ¶
type Session struct {
context.Context
Model string
ID string
ProjectName string
Costs Costs
// contains filtered or unexported fields
}
Session tracks costs across multiple agent calls
func NewSession ¶
NewSession creates a new session for tracking costs
func (*Session) GetCostsByModel ¶
GetCostsByModel returns costs grouped by model
func (*Session) GetTotalCost ¶
GetTotalCost returns the aggregated cost across all entries
type TopProvider ¶
type TopProvider struct {
MaxCompletionTokens *int `json:"max_completion_tokens,omitempty"`
}
TopProvider contains provider-specific limits