Documentation
¶
Index ¶
- func GetRateLimiter() *llm.RateLimiter
- func GetRouter() *llm.Router
- func Init(logger *slog.Logger) error
- func SetGlobal(b Brain)
- type Brain
- type BudgetConfig
- type BudgetControlledBrain
- type CacheConfig
- type CircuitBreakerConfig
- type Config
- type CostConfig
- type FailoverConfig
- type HealthStatus
- type MetricsConfig
- type ModelConfig
- type ObservableBrain
- type PriorityBrain
- type PriorityConfig
- type RateLimitConfig
- type ResilientBrain
- type RetryConfig
- type RoutableBrain
- type RouterConfig
- type StreamingBrain
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func GetRateLimiter ¶ added in v0.18.0
func GetRateLimiter() *llm.RateLimiter
GetRateLimiter returns the global rate limiter if available.
func GetRouter ¶ added in v0.18.0
GetRouter returns the global router if the brain supports routing.
Types ¶
type Brain ¶
type Brain interface {
// Chat generates a plain text response for a given prompt.
// Best used for simple questions, greetings, or summarization.
Chat(ctx context.Context, prompt string) (string, error)
// Analyze performs structured analysis and returns the result in the target struct.
// The target must be a pointer to a struct that can be unmarshaled from JSON.
// Useful for intent routing, safety checks, and complex data extraction.
Analyze(ctx context.Context, prompt string, target any) error
}
Brain represents the core "System 1" intelligence for HotPlex. It provides fast, structured, and low-cost reasoning capabilities.
type BudgetConfig ¶ added in v0.18.0
type BudgetControlledBrain ¶ added in v0.18.0
type BudgetControlledBrain interface {
Brain
// GetBudgetTracker returns the budget tracker for a session.
GetBudgetTracker(sessionID string) *llm.BudgetTracker
// GetBudgetManager returns the budget manager.
GetBudgetManager() *llm.BudgetManager
}
BudgetControlledBrain extends Brain with budget control capabilities.
type CacheConfig ¶ added in v0.18.0
type CircuitBreakerConfig ¶ added in v0.18.0
type Config ¶
type Config struct {
// Enabled is automatically determined based on APIKey presence.
Enabled bool
// Model is the model configuration.
Model ModelConfig
// Cache is the cache configuration.
Cache CacheConfig
// Retry is the retry configuration.
Retry RetryConfig
// Metrics is the metrics configuration.
Metrics MetricsConfig
// Cost is the cost configuration.
Cost CostConfig
// RateLimit is the rate limit configuration.
RateLimit RateLimitConfig
// Router is the router configuration.
Router RouterConfig
// CircuitBreaker is the circuit breaker configuration.
CircuitBreaker CircuitBreakerConfig
// Failover is the failover configuration.
Failover FailoverConfig
// Budget is the budget configuration.
Budget BudgetConfig
// Priority is the priority configuration.
Priority PriorityConfig
}
Config holds the configuration for the Global Brain.
func LoadConfigFromEnv ¶
func LoadConfigFromEnv() Config
LoadConfigFromEnv loads the brain configuration from environment variables.
type CostConfig ¶ added in v0.18.0
type FailoverConfig ¶ added in v0.18.0
type HealthStatus ¶ added in v0.18.0
type HealthStatus = llm.HealthStatus
HealthStatus represents the health status of the Brain service. Re-exported from llm package for convenience.
type MetricsConfig ¶ added in v0.18.0
type ModelConfig ¶ added in v0.18.0
type ObservableBrain ¶ added in v0.18.0
type ObservableBrain interface {
Brain
// GetMetrics returns current metrics statistics.
GetMetrics() llm.MetricsStats
// GetCostCalculator returns the cost calculator.
GetCostCalculator() *llm.CostCalculator
}
ObservableBrain provides observability and metrics access.
type PriorityBrain ¶ added in v0.18.0
type PriorityBrain interface {
Brain
// GetPriorityScheduler returns the priority scheduler.
GetPriorityScheduler() *llm.PriorityScheduler
// SubmitWithPriority submits a request with specified priority.
SubmitWithPriority(ctx context.Context, prompt string, priority llm.Priority) (string, error)
}
PriorityBrain extends Brain with priority queue capabilities.
type PriorityConfig ¶ added in v0.18.0
type RateLimitConfig ¶ added in v0.18.0
type ResilientBrain ¶ added in v0.18.0
type ResilientBrain interface {
Brain
// GetCircuitBreaker returns the circuit breaker.
GetCircuitBreaker() *llm.CircuitBreaker
// GetFailoverManager returns the failover manager.
GetFailoverManager() *llm.FailoverManager
// ResetCircuitBreaker manually resets the circuit breaker.
ResetCircuitBreaker()
// ManualFailover manually switches to a specific provider.
ManualFailover(providerName string) error
}
ResilientBrain extends Brain with circuit breaker and failover capabilities.
type RetryConfig ¶ added in v0.18.0
type RoutableBrain ¶ added in v0.18.0
type RoutableBrain interface {
Brain
// ChatWithModel generates a response using a specific model.
ChatWithModel(ctx context.Context, model string, prompt string) (string, error)
// AnalyzeWithModel performs analysis using a specific model.
AnalyzeWithModel(ctx context.Context, model string, prompt string, target any) error
}
RoutableBrain extends Brain with model routing capability.
type RouterConfig ¶ added in v0.18.0
type RouterConfig struct {
Enabled bool
DefaultStage string
Models []llm.ModelConfig
}
type StreamingBrain ¶ added in v0.18.0
type StreamingBrain interface {
Brain
// ChatStream returns a channel that streams tokens as they are generated.
// The channel is closed when the stream completes or an error occurs.
// Best used for long responses, real-time UI updates, or progressive rendering.
ChatStream(ctx context.Context, prompt string) (<-chan string, error)
}
StreamingBrain extends Brain with streaming capabilities. It provides token-by-token streaming for real-time responses.