Documentation
ΒΆ
Index ΒΆ
- Constants
- Variables
- type APIError
- type ChatClient
- func (c *ChatClient) AppendMessage(ctx context.Context, sessionID string, message provider.Message) error
- func (c *ChatClient) Close() error
- func (c *ChatClient) CreateChatCompletion(ctx context.Context, req *provider.ChatCompletionRequest) (*provider.ChatCompletionResponse, error)
- func (c *ChatClient) CreateChatCompletionStream(ctx context.Context, req *provider.ChatCompletionRequest) (provider.ChatCompletionStream, error)
- func (c *ChatClient) CreateChatCompletionStreamWithMemory(ctx context.Context, sessionID string, req *provider.ChatCompletionRequest) (provider.ChatCompletionStream, error)
- func (c *ChatClient) CreateChatCompletionWithMemory(ctx context.Context, sessionID string, req *provider.ChatCompletionRequest) (*provider.ChatCompletionResponse, error)
- func (c *ChatClient) CreateConversationWithSystemMessage(ctx context.Context, sessionID, systemMessage string) error
- func (c *ChatClient) DeleteConversation(ctx context.Context, sessionID string) error
- func (c *ChatClient) GetConversationMessages(ctx context.Context, sessionID string) ([]provider.Message, error)
- func (c *ChatClient) HasMemory() bool
- func (c *ChatClient) LoadConversation(ctx context.Context, sessionID string) (*ConversationMemory, error)
- func (c *ChatClient) Logger() *slog.Logger
- func (c *ChatClient) Memory() *MemoryManager
- func (c *ChatClient) Provider() provider.Provider
- func (c *ChatClient) SaveConversation(ctx context.Context, conversation *ConversationMemory) error
- type ChatCompletionChoice
- type ChatCompletionChunk
- type ChatCompletionRequest
- type ChatCompletionResponse
- type ChatCompletionStream
- type ClientConfig
- type ConversationMemory
- type LLMCallInfo
- type MemoryConfig
- type MemoryManager
- func (m *MemoryManager) AppendMessage(ctx context.Context, sessionID string, message Message) error
- func (m *MemoryManager) AppendMessages(ctx context.Context, sessionID string, messages []Message) error
- func (m *MemoryManager) CreateConversationWithSystemMessage(ctx context.Context, sessionID, systemMessage string) error
- func (m *MemoryManager) DeleteConversation(ctx context.Context, sessionID string) error
- func (m *MemoryManager) GetMessages(ctx context.Context, sessionID string) ([]Message, error)
- func (m *MemoryManager) LoadConversation(ctx context.Context, sessionID string) (*ConversationMemory, error)
- func (m *MemoryManager) SaveConversation(ctx context.Context, conversation *ConversationMemory) error
- func (m *MemoryManager) SetMetadata(ctx context.Context, sessionID string, metadata map[string]any) error
- type Message
- type ModelInfo
- type ObservabilityHook
- type Provider
- type ProviderName
- type Role
- type Tool
- type ToolCall
- type ToolFunction
- type ToolSpec
- type Usage
Constants ΒΆ
const ( EnvVarAnthropicAPIKey = "ANTHROPIC_API_KEY" // #nosec G101 EnvVarOpenAIAPIKey = "OPENAI_API_KEY" // #nosec G101 EnvVarGeminiAPIKey = "GEMINI_API_KEY" // #nosec G101 EnvVarXAIAPIKey = "XAI_API_KEY" // #nosec G101 )
const ( // Bedrock Models - Re-exported from models package ModelBedrockClaude3Opus = models.BedrockClaude3Opus ModelBedrockClaude3Sonnet = models.BedrockClaude3Sonnet ModelBedrockClaudeOpus4 = models.BedrockClaudeOpus4 ModelBedrockTitan = models.BedrockTitan // Claude Models - Re-exported from models package ModelClaudeOpus4_1 = models.ClaudeOpus4_1 ModelClaudeOpus4 = models.ClaudeOpus4 ModelClaudeSonnet4 = models.ClaudeSonnet4 ModelClaude3_7Sonnet = models.Claude3_7Sonnet ModelClaude3_5Haiku = models.Claude3_5Haiku ModelClaude3Opus = models.Claude3Opus ModelClaude3Sonnet = models.Claude3Sonnet ModelClaude3Haiku = models.Claude3Haiku // Gemini Models - Re-exported from models package ModelGemini2_5Pro = models.Gemini2_5Pro ModelGemini2_5Flash = models.Gemini2_5Flash ModelGeminiLive2_5Flash = models.GeminiLive2_5Flash ModelGemini1_5Pro = models.Gemini1_5Pro ModelGemini1_5Flash = models.Gemini1_5Flash ModelGeminiPro = models.GeminiPro // Ollama Models - Re-exported from models package ModelOllamaLlama3_8B = models.OllamaLlama3_8B ModelOllamaLlama3_70B = models.OllamaLlama3_70B ModelOllamaMistral7B = models.OllamaMistral7B ModelOllamaMixtral8x7B = models.OllamaMixtral8x7B ModelOllamaCodeLlama = models.OllamaCodeLlama ModelOllamaGemma2B = models.OllamaGemma2B ModelOllamaGemma7B = models.OllamaGemma7B ModelOllamaQwen2_5 = models.OllamaQwen2_5 ModelOllamaDeepSeek = models.OllamaDeepSeek // OpenAI Models - Re-exported from models package ModelGPT5 = models.GPT5 ModelGPT5Mini = models.GPT5Mini ModelGPT5Nano = models.GPT5Nano ModelGPT5ChatLatest = models.GPT5ChatLatest ModelGPT4_1 = models.GPT4_1 ModelGPT4_1Mini = models.GPT4_1Mini ModelGPT4_1Nano = models.GPT4_1Nano ModelGPT4o = models.GPT4o ModelGPT4oMini = models.GPT4oMini ModelGPT4Turbo = models.GPT4Turbo ModelGPT35Turbo = models.GPT35Turbo // Vertex AI Models - Re-exported from models package ModelVertexClaudeOpus4 = models.VertexClaudeOpus4 // X.AI Grok Models - Re-exported from models package // Grok 4.1 (Latest - November 2025) ModelGrok4_1FastReasoning = models.Grok4_1FastReasoning ModelGrok4_1FastNonReasoning = models.Grok4_1FastNonReasoning // Grok 4 (July 2025) ModelGrok4_0709 = models.Grok4_0709 ModelGrok4FastReasoning = models.Grok4FastReasoning ModelGrok4FastNonReasoning = models.Grok4FastNonReasoning ModelGrokCodeFast1 = models.GrokCodeFast1 // Grok 3 ModelGrok3 = models.Grok3 ModelGrok3Mini = models.Grok3Mini // Grok 2 ModelGrok2_1212 = models.Grok2_1212 ModelGrok2_Vision = models.Grok2_Vision // Deprecated models ModelGrokBeta = models.GrokBeta ModelGrokVision = models.GrokVision )
Common model constants for each provider.
NOTE: For new code, prefer importing "github.com/agentplexus/omnillm/models" directly for better organization and documentation. These constants are maintained for backwards compatibility with existing code.
const ( RoleSystem = provider.RoleSystem RoleUser = provider.RoleUser RoleAssistant = provider.RoleAssistant RoleTool = provider.RoleTool )
Role constants for convenience
Variables ΒΆ
var ( // Common errors ErrUnsupportedProvider = errors.New("unsupported provider") ErrBedrockExternal = errors.New("bedrock provider moved to github.com/agentplexus/omnillm-bedrock; use CustomProvider to inject it") ErrInvalidConfiguration = errors.New("invalid configuration") ErrEmptyAPIKey = errors.New("API key cannot be empty") ErrEmptyModel = errors.New("model cannot be empty") ErrEmptyMessages = errors.New("messages cannot be empty") ErrStreamClosed = errors.New("stream is closed") ErrInvalidResponse = errors.New("invalid response format") ErrRateLimitExceeded = errors.New("rate limit exceeded") ErrQuotaExceeded = errors.New("quota exceeded") ErrInvalidRequest = errors.New("invalid request") ErrModelNotFound = errors.New("model not found") ErrServerError = errors.New("server error") ErrNetworkError = errors.New("network error") )
Functions ΒΆ
This section is empty.
Types ΒΆ
type APIError ΒΆ
type APIError struct {
StatusCode int `json:"status_code"`
Message string `json:"message"`
Type string `json:"type"`
Code string `json:"code"`
Provider ProviderName `json:"provider"`
}
APIError represents an error response from the API
func NewAPIError ΒΆ
func NewAPIError(provider ProviderName, statusCode int, message, errorType, code string) *APIError
NewAPIError creates a new API error
type ChatClient ΒΆ
type ChatClient struct {
// contains filtered or unexported fields
}
ChatClient is the main client interface that wraps a Provider
func NewClient ΒΆ
func NewClient(config ClientConfig) (*ChatClient, error)
NewClient creates a new ChatClient based on the provider
func (*ChatClient) AppendMessage ΒΆ
func (c *ChatClient) AppendMessage(ctx context.Context, sessionID string, message provider.Message) error
AppendMessage appends a message to a conversation in memory
func (*ChatClient) CreateChatCompletion ΒΆ
func (c *ChatClient) CreateChatCompletion(ctx context.Context, req *provider.ChatCompletionRequest) (*provider.ChatCompletionResponse, error)
CreateChatCompletion creates a chat completion
func (*ChatClient) CreateChatCompletionStream ΒΆ
func (c *ChatClient) CreateChatCompletionStream(ctx context.Context, req *provider.ChatCompletionRequest) (provider.ChatCompletionStream, error)
CreateChatCompletionStream creates a streaming chat completion
func (*ChatClient) CreateChatCompletionStreamWithMemory ΒΆ
func (c *ChatClient) CreateChatCompletionStreamWithMemory(ctx context.Context, sessionID string, req *provider.ChatCompletionRequest) (provider.ChatCompletionStream, error)
CreateChatCompletionStreamWithMemory creates a streaming chat completion using conversation memory
func (*ChatClient) CreateChatCompletionWithMemory ΒΆ
func (c *ChatClient) CreateChatCompletionWithMemory(ctx context.Context, sessionID string, req *provider.ChatCompletionRequest) (*provider.ChatCompletionResponse, error)
CreateChatCompletionWithMemory creates a chat completion using conversation memory
func (*ChatClient) CreateConversationWithSystemMessage ΒΆ
func (c *ChatClient) CreateConversationWithSystemMessage(ctx context.Context, sessionID, systemMessage string) error
CreateConversationWithSystemMessage creates a new conversation with a system message
func (*ChatClient) DeleteConversation ΒΆ
func (c *ChatClient) DeleteConversation(ctx context.Context, sessionID string) error
DeleteConversation removes a conversation from memory
func (*ChatClient) GetConversationMessages ΒΆ
func (c *ChatClient) GetConversationMessages(ctx context.Context, sessionID string) ([]provider.Message, error)
GetConversationMessages retrieves messages from a conversation
func (*ChatClient) HasMemory ΒΆ
func (c *ChatClient) HasMemory() bool
HasMemory returns true if memory is configured
func (*ChatClient) LoadConversation ΒΆ
func (c *ChatClient) LoadConversation(ctx context.Context, sessionID string) (*ConversationMemory, error)
LoadConversation loads a conversation from memory
func (*ChatClient) Logger ΒΆ
func (c *ChatClient) Logger() *slog.Logger
Logger returns the client's logger
func (*ChatClient) Memory ΒΆ
func (c *ChatClient) Memory() *MemoryManager
Memory returns the memory manager (nil if not configured)
func (*ChatClient) Provider ΒΆ
func (c *ChatClient) Provider() provider.Provider
Provider returns the underlying provider
func (*ChatClient) SaveConversation ΒΆ
func (c *ChatClient) SaveConversation(ctx context.Context, conversation *ConversationMemory) error
SaveConversation saves a conversation to memory
type ChatCompletionChoice ΒΆ
type ChatCompletionChoice = provider.ChatCompletionChoice
type ChatCompletionChunk ΒΆ
type ChatCompletionChunk = provider.ChatCompletionChunk
type ChatCompletionRequest ΒΆ
type ChatCompletionRequest = provider.ChatCompletionRequest
type ChatCompletionResponse ΒΆ
type ChatCompletionResponse = provider.ChatCompletionResponse
type ChatCompletionStream ΒΆ
type ChatCompletionStream = provider.ChatCompletionStream
ChatCompletionStream is an alias to the provider.ChatCompletionStream interface for backward compatibility
type ClientConfig ΒΆ
type ClientConfig struct {
Provider ProviderName
APIKey string
BaseURL string
Region string // For AWS Bedrock
// Timeout sets the HTTP client timeout for API calls.
// If zero, providers use their default timeouts.
// Recommended: 300 * time.Second for reasoning models.
Timeout time.Duration
// HTTPClient is an optional HTTP client with custom transport (e.g., retry transport).
// If nil, providers will create clients using the Timeout value above.
// This can be used to add retry logic, tracing, or other middleware.
// Example with retry:
// rt := retryhttp.NewWithOptions(retryhttp.WithMaxRetries(3))
// config.HTTPClient = &http.Client{Transport: rt}
HTTPClient *http.Client
// Memory configuration (optional)
Memory kvs.Client
MemoryConfig *MemoryConfig
// Direct provider injection (for 3rd party providers)
CustomProvider provider.Provider
// ObservabilityHook is called before/after LLM calls (optional)
ObservabilityHook ObservabilityHook
// Logger for internal logging (optional, defaults to null logger)
Logger *slog.Logger
// Provider-specific configurations can be added here
Extra map[string]any
}
ClientConfig holds configuration for creating a client
type ConversationMemory ΒΆ
type ConversationMemory struct {
SessionID string `json:"session_id"`
Messages []Message `json:"messages"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
Metadata map[string]any `json:"metadata,omitempty"`
}
ConversationMemory represents stored conversation data
type LLMCallInfo ΒΆ
type LLMCallInfo struct {
CallID string // Unique identifier for correlating BeforeRequest/AfterResponse
ProviderName string // e.g., "openai", "anthropic"
StartTime time.Time // When the call started
}
LLMCallInfo provides metadata about the LLM call for observability
type MemoryConfig ΒΆ
type MemoryConfig struct {
// MaxMessages limits the number of messages to keep in memory per session
MaxMessages int
// TTL sets the time-to-live for stored conversations (0 for no expiration)
TTL time.Duration
// KeyPrefix allows customizing the key prefix for stored conversations
KeyPrefix string
}
MemoryConfig holds configuration for conversation memory
func DefaultMemoryConfig ΒΆ
func DefaultMemoryConfig() MemoryConfig
DefaultMemoryConfig returns sensible defaults for memory configuration
type MemoryManager ΒΆ
type MemoryManager struct {
// contains filtered or unexported fields
}
MemoryManager handles conversation persistence using KVS
func NewMemoryManager ΒΆ
func NewMemoryManager(kvsClient kvs.Client, config MemoryConfig) *MemoryManager
NewMemoryManager creates a new memory manager with the given KVS client and config
func (*MemoryManager) AppendMessage ΒΆ
AppendMessage adds a message to the conversation and saves it
func (*MemoryManager) AppendMessages ΒΆ
func (m *MemoryManager) AppendMessages(ctx context.Context, sessionID string, messages []Message) error
AppendMessages adds multiple messages to the conversation and saves it
func (*MemoryManager) CreateConversationWithSystemMessage ΒΆ
func (m *MemoryManager) CreateConversationWithSystemMessage(ctx context.Context, sessionID, systemMessage string) error
CreateConversationWithSystemMessage creates a new conversation with a system message
func (*MemoryManager) DeleteConversation ΒΆ
func (m *MemoryManager) DeleteConversation(ctx context.Context, sessionID string) error
DeleteConversation removes a conversation from memory
func (*MemoryManager) GetMessages ΒΆ
GetMessages returns just the messages from a conversation
func (*MemoryManager) LoadConversation ΒΆ
func (m *MemoryManager) LoadConversation(ctx context.Context, sessionID string) (*ConversationMemory, error)
LoadConversation retrieves a conversation from memory
func (*MemoryManager) SaveConversation ΒΆ
func (m *MemoryManager) SaveConversation(ctx context.Context, conversation *ConversationMemory) error
SaveConversation stores a conversation in memory
func (*MemoryManager) SetMetadata ΒΆ
func (m *MemoryManager) SetMetadata(ctx context.Context, sessionID string, metadata map[string]any) error
SetMetadata sets metadata for a conversation
type ModelInfo ΒΆ
type ModelInfo struct {
ID string `json:"id"`
Provider ProviderName `json:"provider"`
Name string `json:"name"`
MaxTokens int `json:"max_tokens"`
}
ModelInfo represents information about a model
func GetModelInfo ΒΆ
GetModelInfo returns model information
type ObservabilityHook ΒΆ
type ObservabilityHook interface {
// BeforeRequest is called before each LLM call.
// Returns a new context for trace/span propagation.
// The hook should not modify the request.
BeforeRequest(ctx context.Context, info LLMCallInfo, req *provider.ChatCompletionRequest) context.Context
// AfterResponse is called after each LLM call completes.
// This is called for both successful and failed requests.
AfterResponse(ctx context.Context, info LLMCallInfo, req *provider.ChatCompletionRequest, resp *provider.ChatCompletionResponse, err error)
// WrapStream wraps a stream for observability.
// This allows the hook to observe streaming responses.
// The returned stream must implement the same interface as the input.
//
// Note: For streaming, AfterResponse is only called if stream creation fails.
// To track streaming completion timing and content, the wrapper returned here
// should handle Close() or detect EOF in Recv() to finalize metrics/traces.
WrapStream(ctx context.Context, info LLMCallInfo, req *provider.ChatCompletionRequest, stream provider.ChatCompletionStream) provider.ChatCompletionStream
}
ObservabilityHook allows external packages to observe LLM calls. Implementations can use this to add tracing, logging, or metrics without modifying the core OmniLLM library.
type ProviderName ΒΆ
type ProviderName string
ProviderName represents the different LLM provider names
const ( ProviderNameOpenAI ProviderName = "openai" ProviderNameAnthropic ProviderName = "anthropic" ProviderNameBedrock ProviderName = "bedrock" ProviderNameOllama ProviderName = "ollama" ProviderNameGemini ProviderName = "gemini" ProviderNameXAI ProviderName = "xai" )
type ToolFunction ΒΆ
type ToolFunction = provider.ToolFunction
Source Files
ΒΆ
Directories
ΒΆ
| Path | Synopsis |
|---|---|
|
examples
|
|
|
anthropic_streaming
command
|
|
|
architecture_demo
command
|
|
|
basic
command
|
|
|
conversation
command
|
|
|
custom_provider
command
|
|
|
gemini
command
|
|
|
memory_demo
command
|
|
|
ollama
command
|
|
|
ollama_streaming
command
|
|
|
providers_demo
command
|
|
|
streaming
command
|
|
|
xai
command
|
|
|
Package models provides a comprehensive catalog of LLM model identifiers and documentation references for all supported providers.
|
Package models provides a comprehensive catalog of LLM model identifiers and documentation references for all supported providers. |
|
Package provider defines the core interfaces that external LLM providers must implement.
|
Package provider defines the core interfaces that external LLM providers must implement. |
|
providers
|
|
|
anthropic
Package anthropic provides Anthropic provider adapter for the OmniLLM unified interface
|
Package anthropic provides Anthropic provider adapter for the OmniLLM unified interface |
|
gemini
Package gemini provides Google Gemini provider adapter for the OmniLLM unified interface
|
Package gemini provides Google Gemini provider adapter for the OmniLLM unified interface |
|
ollama
Package ollama provides Ollama provider adapter for the OmniLLM unified interface
|
Package ollama provides Ollama provider adapter for the OmniLLM unified interface |
|
openai
Package openai provides OpenAI provider adapter for the OmniLLM unified interface
|
Package openai provides OpenAI provider adapter for the OmniLLM unified interface |
|
xai
Package xai provides X.AI Grok provider adapter for the OmniLLM unified interface
|
Package xai provides X.AI Grok provider adapter for the OmniLLM unified interface |
|
Package testing provides mock implementations for testing
|
Package testing provides mock implementations for testing |