providers

package
v0.1.2 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 8, 2026 License: MIT Imports: 5 Imported by: 0

README

LLM Provider Interface

A unified, extensible Go interface for integrating multiple Large Language Model (LLM) providers including Anthropic, OpenAI, Google Gemini, AWS Bedrock, Azure OpenAI, and Ollama.

Architecture Overview

The provider interface implements several Go design patterns to create a clean, maintainable, and type-safe abstraction:

  • Provider Interface Pattern: Unified contract for all LLM providers
  • Functional Options Pattern: Flexible request configuration
  • Factory Pattern: Dynamic provider instantiation
  • Registry Pattern: Centralized provider management
  • Event Streaming: Real-time response handling via Go channels

Core Components

1. Provider Interface

The Provider interface defines the contract that all LLM provider implementations must satisfy:

type Provider interface {
    // Chat sends a single request and returns the complete response
    Chat(ctx context.Context, req *ChatRequest, opts ...Option) (*Response, error)

    // Stream sends a request and returns a channel for streaming events
    Stream(ctx context.Context, req *StreamRequest, opts ...Option) (<-chan Event, error)

    // Name returns the provider's identifier
    Name() string

    // Models returns available models for this provider
    Models(ctx context.Context) ([]Model, error)

    // Close releases provider resources
    Close() error
}
2. Type System
Messages and Roles
const (
    RoleUser      Role = "user"
    RoleAssistant Role = "assistant"
    RoleSystem    Role = "system"
)

type Message struct {
    Role    Role
    Content string
}
Request Types
type ChatRequest struct {
    Messages      []Message
    Model         string
    MaxTokens     int
    Temperature   float64
    TopP          float64
    StopSequences []string
    Metadata      map[string]interface{}
}

type StreamRequest struct {
    Messages      []Message
    Model         string
    MaxTokens     int
    Temperature   float64
    TopP          float64
    StopSequences []string
    Metadata      map[string]interface{}
}
Response Types
type Response struct {
    Content      string
    Model        string
    Provider     string
    FinishReason string
    Usage        *UsageInfo
    Metadata     map[string]interface{}
    CreatedAt    time.Time
}

type UsageInfo struct {
    PromptTokens     int
    CompletionTokens int
    TotalTokens      int
}
Event System
const (
    EventTextDelta     EventType = "text_delta"
    EventContentStart  EventType = "content_start"
    EventContentEnd    EventType = "content_end"
    EventMessageStart  EventType = "message_start"
    EventMessageStop   EventType = "message_stop"
    EventError         EventType = "error"
    EventUsage         EventType = "usage"
    EventThinking      EventType = "thinking"
)

type Event struct {
    Type      EventType
    Data      interface{}
    Usage     *UsageInfo
    Timestamp time.Time
}

Usage Examples

Example 1: Basic Chat Request
package main

import (
    "context"
    "fmt"
    "log"

    "github.com/AINative-studio/ainative-code/internal/providers"
)

func main() {
    // Get the global registry
    registry := providers.GetGlobalRegistry()

    // Create a provider instance (assumes factory is registered)
    config := providers.Config{
        APIKey:  "your-api-key",
        BaseURL: "https://api.anthropic.com",
    }

    provider, err := registry.Create("anthropic", config)
    if err != nil {
        log.Fatalf("Failed to create provider: %v", err)
    }
    defer provider.Close()

    // Create a chat request
    req := &providers.ChatRequest{
        Messages: []providers.Message{
            {Role: providers.RoleUser, Content: "What is the capital of France?"},
        },
        Model: "claude-3-sonnet-20240229",
    }

    // Send the request
    ctx := context.Background()
    resp, err := provider.Chat(ctx, req)
    if err != nil {
        log.Fatalf("Chat request failed: %v", err)
    }

    // Display the response
    fmt.Printf("Response: %s\n", resp.Content)
    fmt.Printf("Model: %s\n", resp.Model)
    fmt.Printf("Tokens used: %d\n", resp.Usage.TotalTokens)
}
Example 2: Using Functional Options
package main

import (
    "context"
    "fmt"
    "log"

    "github.com/AINative-studio/ainative-code/internal/providers"
)

func main() {
    registry := providers.GetGlobalRegistry()
    provider, _ := registry.Get("anthropic")

    req := &providers.ChatRequest{
        Messages: []providers.Message{
            {Role: providers.RoleSystem, Content: "You are a helpful assistant."},
            {Role: providers.RoleUser, Content: "Write a haiku about coding."},
        },
        Model: "claude-3-sonnet-20240229",
    }

    // Apply functional options for fine-grained control
    providers.ApplyOptions(req,
        providers.WithMaxTokens(1024),
        providers.WithTemperature(0.7),
        providers.WithTopP(0.9),
        providers.WithStopSequences("\n\n"),
        providers.WithMetadata("session_id", "abc-123"),
        providers.WithMetadata("user_id", 42),
    )

    ctx := context.Background()
    resp, err := provider.Chat(ctx, req)
    if err != nil {
        log.Fatalf("Chat failed: %v", err)
    }

    fmt.Printf("Haiku:\n%s\n", resp.Content)
}
Example 3: Streaming Responses
package main

import (
    "context"
    "fmt"
    "log"

    "github.com/AINative-studio/ainative-code/internal/providers"
)

func main() {
    registry := providers.GetGlobalRegistry()
    provider, _ := registry.Get("anthropic")

    req := &providers.StreamRequest{
        Messages: []providers.Message{
            {Role: providers.RoleUser, Content: "Tell me a story about a brave knight."},
        },
        Model: "claude-3-sonnet-20240229",
    }

    providers.ApplyStreamOptions(req,
        providers.WithMaxTokens(2048),
        providers.WithTemperature(0.8),
    )

    ctx := context.Background()
    eventChan, err := provider.Stream(ctx, req)
    if err != nil {
        log.Fatalf("Stream failed: %v", err)
    }

    // Process streaming events
    var fullContent string
    for event := range eventChan {
        switch event.Type {
        case providers.EventMessageStart:
            fmt.Println("Stream started...")

        case providers.EventTextDelta:
            delta := event.Data.(string)
            fmt.Print(delta)
            fullContent += delta

        case providers.EventUsage:
            usage := event.Usage
            fmt.Printf("\nTokens used: %d\n", usage.TotalTokens)

        case providers.EventError:
            fmt.Printf("Error: %v\n", event.Data)

        case providers.EventMessageStop:
            fmt.Println("\nStream complete")
        }
    }
}
Example 4: Provider Registry Management
package main

import (
    "context"
    "fmt"
    "log"

    "github.com/AINative-studio/ainative-code/internal/providers"
)

// Custom provider factory function
func createOpenAIProvider(config providers.Config) (providers.Provider, error) {
    // Implementation would create OpenAI client
    // This is a placeholder example
    return &CustomOpenAIProvider{
        apiKey: config.APIKey,
        baseURL: config.BaseURL,
    }, nil
}

func main() {
    registry := providers.NewRegistry()

    // Register a provider factory
    err := registry.RegisterFactory("openai", createOpenAIProvider)
    if err != nil {
        log.Fatalf("Factory registration failed: %v", err)
    }

    // Create provider instances with different configurations
    config1 := providers.Config{
        APIKey:  "key-for-service-a",
        BaseURL: "https://api.openai.com/v1",
    }
    provider1, err := registry.Create("openai", config1)
    if err != nil {
        log.Fatalf("Provider creation failed: %v", err)
    }

    // List all registered providers
    providers := registry.List()
    fmt.Printf("Registered providers: %v\n", providers)

    // Retrieve a specific provider
    provider, err := registry.Get("openai")
    if err != nil {
        log.Fatalf("Provider not found: %v", err)
    }

    // Use the provider
    req := &providers.ChatRequest{
        Messages: []providers.Message{
            {Role: providers.RoleUser, Content: "Hello!"},
        },
        Model: "gpt-4",
    }

    resp, _ := provider.Chat(context.Background(), req)
    fmt.Printf("Response: %s\n", resp.Content)

    // Clean up - close all providers
    defer registry.Close()
}
Example 5: Context-Based Cancellation and Timeout
package main

import (
    "context"
    "fmt"
    "log"
    "time"

    "github.com/AINative-studio/ainative-code/internal/providers"
)

func main() {
    registry := providers.GetGlobalRegistry()
    provider, _ := registry.Get("anthropic")

    req := &providers.ChatRequest{
        Messages: []providers.Message{
            {Role: providers.RoleUser, Content: "Explain quantum computing in detail."},
        },
        Model: "claude-3-sonnet-20240229",
    }

    // Set a 10-second timeout
    ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
    defer cancel()

    resp, err := provider.Chat(ctx, req)
    if err != nil {
        if ctx.Err() == context.DeadlineExceeded {
            log.Fatal("Request timed out after 10 seconds")
        }
        log.Fatalf("Request failed: %v", err)
    }

    fmt.Printf("Response: %s\n", resp.Content)
}

func streamWithCancellation() {
    registry := providers.GetGlobalRegistry()
    provider, _ := registry.Get("anthropic")

    req := &providers.StreamRequest{
        Messages: []providers.Message{
            {Role: providers.RoleUser, Content: "Write a long essay."},
        },
        Model: "claude-3-sonnet-20240229",
    }

    // Create cancellable context
    ctx, cancel := context.WithCancel(context.Background())

    // Cancel after 5 seconds
    go func() {
        time.Sleep(5 * time.Second)
        cancel()
    }()

    eventChan, err := provider.Stream(ctx, req)
    if err != nil {
        log.Fatalf("Stream failed: %v", err)
    }

    // Process events until cancelled
    for event := range eventChan {
        if event.Type == providers.EventTextDelta {
            fmt.Print(event.Data.(string))
        }
    }

    if ctx.Err() == context.Canceled {
        fmt.Println("\nStream cancelled by user")
    }
}
Example 6: Implementing a Custom Provider
package main

import (
    "context"
    "fmt"
    "time"

    "github.com/AINative-studio/ainative-code/internal/providers"
)

// CustomAnthropicProvider implements the Provider interface
type CustomAnthropicProvider struct {
    name    string
    apiKey  string
    baseURL string
    client  *http.Client
}

func (p *CustomAnthropicProvider) Chat(ctx context.Context, req *providers.ChatRequest, opts ...providers.Option) (*providers.Response, error) {
    // Apply any functional options
    providers.ApplyOptions(req, opts...)

    // Build API request (implementation specific)
    apiReq := buildAnthropicRequest(req)

    // Make HTTP call to Anthropic API
    apiResp, err := p.client.Do(apiReq.WithContext(ctx))
    if err != nil {
        return nil, fmt.Errorf("API request failed: %w", err)
    }
    defer apiResp.Body.Close()

    // Parse response and convert to unified format
    response := &providers.Response{
        Content:      parseContent(apiResp),
        Model:        req.Model,
        Provider:     p.name,
        FinishReason: parseFinishReason(apiResp),
        Usage: &providers.UsageInfo{
            PromptTokens:     parsePromptTokens(apiResp),
            CompletionTokens: parseCompletionTokens(apiResp),
            TotalTokens:      parseTotalTokens(apiResp),
        },
        CreatedAt: time.Now(),
    }

    return response, nil
}

func (p *CustomAnthropicProvider) Stream(ctx context.Context, req *providers.StreamRequest, opts ...providers.Option) (<-chan providers.Event, error) {
    providers.ApplyStreamOptions(req, opts...)

    // Create event channel
    eventChan := make(chan providers.Event, 100)

    // Start streaming in goroutine
    go func() {
        defer close(eventChan)

        // Send message start event
        eventChan <- providers.Event{
            Type:      providers.EventMessageStart,
            Timestamp: time.Now(),
        }

        // Build and execute streaming request
        apiReq := buildAnthropicStreamRequest(req)
        resp, err := p.client.Do(apiReq.WithContext(ctx))
        if err != nil {
            eventChan <- providers.Event{
                Type:      providers.EventError,
                Data:      err.Error(),
                Timestamp: time.Now(),
            }
            return
        }
        defer resp.Body.Close()

        // Parse SSE stream
        scanner := bufio.NewScanner(resp.Body)
        for scanner.Scan() {
            select {
            case <-ctx.Done():
                return
            default:
                line := scanner.Text()
                event := parseSSEEvent(line)

                // Convert to unified event format
                switch event.Type {
                case "content_block_delta":
                    eventChan <- providers.Event{
                        Type:      providers.EventTextDelta,
                        Data:      event.Delta.Text,
                        Timestamp: time.Now(),
                    }
                case "message_stop":
                    eventChan <- providers.Event{
                        Type:      providers.EventMessageStop,
                        Usage:     parseUsage(event),
                        Timestamp: time.Now(),
                    }
                }
            }
        }
    }()

    return eventChan, nil
}

func (p *CustomAnthropicProvider) Name() string {
    return p.name
}

func (p *CustomAnthropicProvider) Models(ctx context.Context) ([]providers.Model, error) {
    return []providers.Model{
        {
            ID:           "claude-3-opus-20240229",
            Name:         "Claude 3 Opus",
            Provider:     p.name,
            MaxTokens:    200000,
            Capabilities: []string{"chat", "streaming", "vision"},
        },
        {
            ID:           "claude-3-sonnet-20240229",
            Name:         "Claude 3 Sonnet",
            Provider:     p.name,
            MaxTokens:    200000,
            Capabilities: []string{"chat", "streaming", "vision"},
        },
    }, nil
}

func (p *CustomAnthropicProvider) Close() error {
    // Clean up resources (close connections, etc.)
    p.client.CloseIdleConnections()
    return nil
}

// Factory function for creating Anthropic provider instances
func NewAnthropicProvider(config providers.Config) (providers.Provider, error) {
    if config.APIKey == "" {
        return nil, fmt.Errorf("API key is required")
    }

    return &CustomAnthropicProvider{
        name:    "anthropic",
        apiKey:  config.APIKey,
        baseURL: config.BaseURL,
        client:  &http.Client{Timeout: 60 * time.Second},
    }, nil
}

Best Practices

1. Always Use Context

Pass context to all Chat() and Stream() calls to enable cancellation and timeout control:

ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()

resp, err := provider.Chat(ctx, req)
2. Handle Streaming Events Properly

Always consume the entire event channel to prevent goroutine leaks:

eventChan, err := provider.Stream(ctx, req)
if err != nil {
    return err
}

for event := range eventChan {
    // Process event
}
3. Close Providers When Done

Use defer to ensure providers are properly closed:

provider, err := registry.Create("anthropic", config)
if err != nil {
    return err
}
defer provider.Close()
4. Use Functional Options for Flexibility

Leverage functional options to keep the API clean while supporting advanced configuration:

// Reusable options
productionOpts := []providers.Option{
    providers.WithMaxTokens(4096),
    providers.WithTemperature(0.7),
    providers.WithMetadata("env", "production"),
}

providers.ApplyOptions(req, productionOpts...)
5. Implement Proper Error Handling

Check for context errors separately from provider errors:

resp, err := provider.Chat(ctx, req)
if err != nil {
    if ctx.Err() == context.DeadlineExceeded {
        // Handle timeout
    } else if ctx.Err() == context.Canceled {
        // Handle cancellation
    } else {
        // Handle provider error
    }
}
6. Use Registry for Multi-Provider Applications

When supporting multiple providers, use the registry pattern:

registry := providers.GetGlobalRegistry()

// Register multiple providers
registry.RegisterFactory("anthropic", NewAnthropicProvider)
registry.RegisterFactory("openai", NewOpenAIProvider)
registry.RegisterFactory("gemini", NewGeminiProvider)

// Use provider by name
provider, _ := registry.Get(userSelectedProvider)
resp, _ := provider.Chat(ctx, req)

Thread Safety

The Registry implementation is thread-safe and can be accessed concurrently:

  • Get(), List(), and Create() use read locks
  • Register(), RegisterFactory(), Unregister(), and Close() use write locks
  • Provider implementations should ensure their own thread-safety

Testing

The package includes comprehensive test coverage (100%) with examples of:

  • Unit tests for all types and constants
  • Functional options pattern testing
  • Registry operations and thread safety
  • Mock provider implementations
  • Error handling and edge cases

See *_test.go files for testing patterns and examples.

Contributing

When implementing a new provider:

  1. Implement all methods of the Provider interface
  2. Handle context cancellation properly in both Chat() and Stream()
  3. Convert provider-specific types to unified types
  4. Map provider events to unified event types
  5. Implement proper resource cleanup in Close()
  6. Create a factory function that accepts Config
  7. Write comprehensive unit tests
  8. Document provider-specific configuration requirements

License

See LICENSE file for details.

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

func ApplyOptions

func ApplyOptions(req *ChatRequest, opts ...Option)

ApplyOptions applies all functional options to a ChatRequest

func ApplyStreamOptions

func ApplyStreamOptions(req *StreamRequest, opts ...Option)

ApplyStreamOptions applies all functional options to a StreamRequest

Types

type ChatRequest

type ChatRequest struct {
	Messages      []Message              `json:"messages"`
	Model         string                 `json:"model"`
	MaxTokens     int                    `json:"max_tokens,omitempty"`
	Temperature   float64                `json:"temperature,omitempty"`
	TopP          float64                `json:"top_p,omitempty"`
	StopSequences []string               `json:"stop_sequences,omitempty"`
	Metadata      map[string]interface{} `json:"metadata,omitempty"`
}

ChatRequest represents a request to the Chat method

type Config

type Config struct {
	APIKey       string
	BaseURL      string
	MaxRetries   int
	Timeout      int // in seconds
	DefaultModel string
	Metadata     map[string]interface{}
}

Config holds common provider configuration

type Event

type Event struct {
	Type      EventType              `json:"type"`
	Data      string                 `json:"data,omitempty"`
	Error     error                  `json:"error,omitempty"`
	Usage     *UsageInfo             `json:"usage,omitempty"`
	Metadata  map[string]interface{} `json:"metadata,omitempty"`
	Timestamp time.Time              `json:"timestamp"`
}

Event represents a streaming event from the LLM

type EventType

type EventType string

EventType represents the type of streaming event

const (
	EventTextDelta    EventType = "text_delta"
	EventContentStart EventType = "content_start"
	EventContentEnd   EventType = "content_end"
	EventMessageStart EventType = "message_start"
	EventMessageStop  EventType = "message_stop"
	EventError        EventType = "error"
	EventUsage        EventType = "usage"
	EventThinking     EventType = "thinking"
)

type Message

type Message struct {
	Role    Role   `json:"role"`
	Content string `json:"content"`
}

Message represents a chat message

type Model

type Model struct {
	ID           string   `json:"id"`
	Name         string   `json:"name"`
	Provider     string   `json:"provider"`
	MaxTokens    int      `json:"max_tokens"`
	Capabilities []string `json:"capabilities,omitempty"`
}

Model represents an LLM model

type Option

type Option func(*RequestOptions)

Option is a functional option for configuring requests

func WithMaxTokens

func WithMaxTokens(tokens int) Option

WithMaxTokens sets the maximum number of tokens to generate

func WithMetadata

func WithMetadata(key string, value interface{}) Option

WithMetadata adds custom metadata to the request

func WithStopSequences

func WithStopSequences(sequences ...string) Option

WithStopSequences sets sequences where the model will stop generating

func WithTemperature

func WithTemperature(temp float64) Option

WithTemperature sets the sampling temperature (typically 0.0 to 1.0)

func WithTopP

func WithTopP(topP float64) Option

WithTopP sets the nucleus sampling parameter (typically 0.0 to 1.0)

type Provider

type Provider interface {
	// Chat sends a chat request and returns a complete response
	Chat(ctx context.Context, req *ChatRequest, opts ...Option) (*Response, error)

	// Stream sends a streaming chat request and returns a channel of events
	Stream(ctx context.Context, req *StreamRequest, opts ...Option) (<-chan Event, error)

	// Name returns the provider's name (e.g., "anthropic", "openai")
	Name() string

	// Models returns the list of available models for this provider
	Models(ctx context.Context) ([]Model, error)

	// Close closes the provider and releases any resources
	io.Closer
}

Provider defines the unified interface for all LLM providers

type ProviderFactory

type ProviderFactory func(config Config) (Provider, error)

ProviderFactory is a function that creates a new provider instance

type Registry

type Registry struct {
	// contains filtered or unexported fields
}

Registry manages multiple provider instances

func GetGlobalRegistry

func GetGlobalRegistry() *Registry

GetGlobalRegistry returns the global registry instance

func NewRegistry

func NewRegistry() *Registry

NewRegistry creates a new provider registry

func (*Registry) Close

func (r *Registry) Close() error

Close closes all registered providers

func (*Registry) Create

func (r *Registry) Create(name string, config Config) (Provider, error)

Create creates a new provider instance using a registered factory

func (*Registry) Get

func (r *Registry) Get(name string) (Provider, error)

Get retrieves a provider by name

func (*Registry) List

func (r *Registry) List() []string

List returns all registered provider names

func (*Registry) Register

func (r *Registry) Register(name string, provider Provider) error

Register registers a provider instance

func (*Registry) RegisterFactory

func (r *Registry) RegisterFactory(name string, factory ProviderFactory) error

RegisterFactory registers a provider factory function

func (*Registry) Unregister

func (r *Registry) Unregister(name string) error

Unregister removes a provider from the registry

type RequestOptions

type RequestOptions struct {
	MaxTokens     *int
	Temperature   *float64
	TopP          *float64
	StopSequences []string
	Metadata      map[string]interface{}
}

RequestOptions holds optional parameters for Chat and Stream requests

type Response

type Response struct {
	Content      string                 `json:"content"`
	Model        string                 `json:"model"`
	Provider     string                 `json:"provider"`
	FinishReason string                 `json:"finish_reason,omitempty"`
	Usage        *UsageInfo             `json:"usage,omitempty"`
	Metadata     map[string]interface{} `json:"metadata,omitempty"`
	CreatedAt    time.Time              `json:"created_at"`
}

Response represents a complete LLM response

type Role

type Role string

Role represents the role of a message sender

const (
	RoleUser      Role = "user"
	RoleAssistant Role = "assistant"
	RoleSystem    Role = "system"
)

type StreamRequest

type StreamRequest struct {
	Messages      []Message              `json:"messages"`
	Model         string                 `json:"model"`
	MaxTokens     int                    `json:"max_tokens,omitempty"`
	Temperature   float64                `json:"temperature,omitempty"`
	TopP          float64                `json:"top_p,omitempty"`
	StopSequences []string               `json:"stop_sequences,omitempty"`
	Metadata      map[string]interface{} `json:"metadata,omitempty"`
}

StreamRequest represents a request to the Stream method

type UsageInfo

type UsageInfo struct {
	PromptTokens     int `json:"prompt_tokens"`
	CompletionTokens int `json:"completion_tokens"`
	TotalTokens      int `json:"total_tokens"`
}

UsageInfo contains token usage information

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL