meta

package
v0.1.2 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 8, 2026 License: MIT Imports: 8 Imported by: 0

Documentation

Index

Constants

View Source
const (
	ErrTypeInvalidRequest    = "invalid_request_error"
	ErrTypeAuthentication    = "authentication_error"
	ErrTypePermission        = "permission_error"
	ErrTypeNotFound          = "not_found_error"
	ErrTypeRateLimit         = "rate_limit_error"
	ErrTypeAPI               = "api_error"
	ErrTypeTimeout           = "timeout_error"
	ErrTypeInvalidAPIKey     = "invalid_api_key"
	ErrTypeInsufficientQuota = "insufficient_quota"
)

Error types

View Source
const (
	// Llama 4 Models - Mixture of Experts
	ModelLlama4Maverick = "Llama-4-Maverick-17B-128E-Instruct-FP8" // 400B total params, 17B active
	ModelLlama4Scout    = "Llama-4-Scout-17B-16E"                  // 109B total params, 17B active

	// Llama 3.3 Models - Dense
	ModelLlama33_70B = "Llama-3.3-70B-Instruct" // 70B params
	ModelLlama33_8B  = "Llama-3.3-8B-Instruct"  // 8B params (fast)

	// API endpoints
	DefaultBaseURL = "https://api.llama.com/compat/v1"
	DefaultTimeout = 60 * time.Second
)

Supported Meta LLAMA models

Variables

This section is empty.

Functions

func IsValidModel

func IsValidModel(modelID string) bool

IsValidModel checks if a model ID is valid

Types

type ChatRequest

type ChatRequest struct {
	Model            string    `json:"model"`
	Messages         []Message `json:"messages"`
	Temperature      float64   `json:"temperature,omitempty"`
	TopP             float64   `json:"top_p,omitempty"`
	MaxTokens        int       `json:"max_tokens,omitempty"`
	Stream           bool      `json:"stream,omitempty"`
	Stop             []string  `json:"stop,omitempty"`
	PresencePenalty  float64   `json:"presence_penalty,omitempty"`
	FrequencyPenalty float64   `json:"frequency_penalty,omitempty"`
	N                int       `json:"n,omitempty"`
	User             string    `json:"user,omitempty"`
}

ChatRequest represents a Meta LLAMA API chat completion request Compatible with OpenAI Chat Completions API format

type ChatResponse

type ChatResponse struct {
	ID      string   `json:"id"`
	Object  string   `json:"object"`
	Created int64    `json:"created"`
	Model   string   `json:"model"`
	Choices []Choice `json:"choices"`
	Usage   Usage    `json:"usage"`
}

ChatResponse represents a Meta LLAMA API chat completion response

type Choice

type Choice struct {
	Index        int     `json:"index"`
	Message      Message `json:"message"`
	FinishReason string  `json:"finish_reason"`
}

Choice represents a completion choice

type Config

type Config struct {
	// APIKey is the Meta LLAMA API key (required)
	// Format: LLM|<app_id>|<token>
	APIKey string

	// BaseURL is the Meta LLAMA API endpoint
	// Default: https://api.llama.com/compat/v1
	BaseURL string

	// Model is the Meta LLAMA model to use
	// Options: Llama-4-Maverick-17B-128E-Instruct-FP8, Llama-4-Scout-17B-16E,
	//          Llama-3.3-70B-Instruct, Llama-3.3-8B-Instruct
	Model string

	// Temperature controls randomness (0.0 to 2.0, default: 0.7)
	Temperature float64

	// TopP controls nucleus sampling (0.0 to 1.0, default: 0.9)
	TopP float64

	// MaxTokens is the maximum number of tokens to generate
	MaxTokens int

	// Timeout is the request timeout
	Timeout time.Duration

	// HTTPClient is the HTTP client to use (optional)
	HTTPClient *http.Client

	// PresencePenalty reduces repetition (-2.0 to 2.0, default: 0.0)
	PresencePenalty float64

	// FrequencyPenalty reduces repetition of token sequences (-2.0 to 2.0, default: 0.0)
	FrequencyPenalty float64

	// Stop sequences where the API will stop generating
	Stop []string
}

Config holds configuration for the Meta LLAMA provider

func DefaultConfig

func DefaultConfig() *Config

DefaultConfig returns a Config with sensible defaults

func (*Config) SetDefaults

func (c *Config) SetDefaults()

SetDefaults sets default values for unspecified fields

func (*Config) Validate

func (c *Config) Validate() error

Validate checks if the configuration is valid

type ErrorDetail

type ErrorDetail struct {
	Message string `json:"message"`
	Type    string `json:"type"`
	Param   string `json:"param,omitempty"`
	Code    string `json:"code,omitempty"`
}

ErrorDetail contains error information

type ErrorResponse

type ErrorResponse struct {
	Error ErrorDetail `json:"error"`
}

ErrorResponse represents an API error response

type Message

type Message struct {
	Role    string `json:"role"`    // "system", "user", or "assistant"
	Content string `json:"content"` // Message content
}

Message represents a chat message

type MessageDelta

type MessageDelta struct {
	Role    string `json:"role,omitempty"`
	Content string `json:"content,omitempty"`
}

MessageDelta represents a partial message in streaming

type MetaError

type MetaError struct {
	StatusCode int
	Type       string
	Message    string
	Param      string
	Code       string
}

MetaError represents an error from the Meta LLAMA API

func NewAuthenticationError

func NewAuthenticationError(message string) *MetaError

NewAuthenticationError creates a new authentication error

func NewInvalidRequestError

func NewInvalidRequestError(message string) *MetaError

NewInvalidRequestError creates a new invalid request error

func NewRateLimitError

func NewRateLimitError(message string) *MetaError

NewRateLimitError creates a new rate limit error

func NewTimeoutError

func NewTimeoutError(message string) *MetaError

NewTimeoutError creates a new timeout error

func (*MetaError) Error

func (e *MetaError) Error() string

Error implements the error interface

func (*MetaError) IsAuthenticationError

func (e *MetaError) IsAuthenticationError() bool

IsAuthenticationError checks if the error is an authentication error

func (*MetaError) IsQuotaError

func (e *MetaError) IsQuotaError() bool

IsQuotaError checks if the error is an insufficient quota error

func (*MetaError) IsRateLimitError

func (e *MetaError) IsRateLimitError() bool

IsRateLimitError checks if the error is a rate limit error

func (*MetaError) IsRetryable

func (e *MetaError) IsRetryable() bool

IsRetryable checks if the error is retryable

type MetaProvider

type MetaProvider struct {
	// contains filtered or unexported fields
}

MetaProvider implements the Provider interface for Meta LLAMA API

func NewMetaProvider

func NewMetaProvider(config *Config) (*MetaProvider, error)

NewMetaProvider creates a new Meta LLAMA provider instance

func (*MetaProvider) Chat

func (p *MetaProvider) Chat(ctx context.Context, messages []provider.Message, opts ...provider.ChatOption) (provider.Response, error)

Chat sends a complete chat request and waits for the full response

func (*MetaProvider) Close

func (p *MetaProvider) Close() error

Close releases any resources held by the provider

func (*MetaProvider) Models

func (p *MetaProvider) Models() []string

Models returns the list of supported model identifiers

func (*MetaProvider) Name

func (p *MetaProvider) Name() string

Name returns the provider's name

func (*MetaProvider) Stream

func (p *MetaProvider) Stream(ctx context.Context, messages []provider.Message, opts ...provider.StreamOption) (<-chan provider.Event, error)

Stream sends a streaming chat request and returns a channel for events

type ModelInfo

type ModelInfo struct {
	ID             string
	Name           string
	Description    string
	ParameterCount string
	Architecture   string
	MaxTokens      int
	Recommended    bool
}

ModelInfo contains information about a Meta LLAMA model

func GetSupportedModels

func GetSupportedModels() []ModelInfo

GetSupportedModels returns information about all supported Meta LLAMA models

type StreamChoice

type StreamChoice struct {
	Index        int          `json:"index"`
	Delta        MessageDelta `json:"delta"`
	FinishReason string       `json:"finish_reason,omitempty"`
}

StreamChoice represents a streaming completion choice

type StreamResponse

type StreamResponse struct {
	ID      string         `json:"id"`
	Object  string         `json:"object"`
	Created int64          `json:"created"`
	Model   string         `json:"model"`
	Choices []StreamChoice `json:"choices"`
}

StreamResponse represents a streaming chat completion chunk

type Usage

type Usage struct {
	PromptTokens     int `json:"prompt_tokens"`
	CompletionTokens int `json:"completion_tokens"`
	TotalTokens      int `json:"total_tokens"`
}

Usage represents token usage information

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL