models

package
v0.1.4 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jul 22, 2025 License: Apache-2.0 Imports: 15 Imported by: 0

Documentation

Overview

Package models - AmazonBedrockModel implementation

Package models - InferenceClientModel implementation

Package models - LiteLLMModel implementation

Package models provides interfaces and implementations for different LLM backends.

This includes local models, API-based models, and utilities for message processing, tool calling, and structured generation.

Package models - Multimodal support for vision and audio

Package models - OpenAIServerModel implementation

Package models - Structured generation and response format handling

Index

Constants

This section is empty.

Variables

View Source
var (
	JSONObjectFormat = &ResponseFormat{
		Type:        "json_object",
		Description: "A valid JSON object",
	}

	TextFormat = &ResponseFormat{
		Type:        "text",
		Description: "Plain text response",
	}
)

Common response formats for convenience

View Source
var BedrockModelRegistry = map[string]map[string]interface{}{
	"amazon.titan-text-express-v1": {
		"max_tokens":  8192,
		"temperature": 0.7,
	},
	"amazon.titan-text-lite-v1": {
		"max_tokens":  4096,
		"temperature": 0.7,
	},
	"anthropic.claude-3-sonnet-20240229-v1:0": {
		"max_tokens":  4096,
		"temperature": 0.7,
	},
	"anthropic.claude-3-haiku-20240307-v1:0": {
		"max_tokens":  4096,
		"temperature": 0.7,
	},
	"anthropic.claude-3-opus-20240229-v1:0": {
		"max_tokens":  4096,
		"temperature": 0.7,
	},
	"meta.llama3-8b-instruct-v1:0": {
		"max_tokens":  2048,
		"temperature": 0.7,
	},
	"meta.llama3-70b-instruct-v1:0": {
		"max_tokens":  2048,
		"temperature": 0.7,
	},
	"us.amazon.nova-pro-v1:0": {
		"max_tokens":  4096,
		"temperature": 0.7,
	},
	"us.amazon.nova-lite-v1:0": {
		"max_tokens":  4096,
		"temperature": 0.7,
	},
	"us.amazon.nova-micro-v1:0": {
		"max_tokens":  4096,
		"temperature": 0.7,
	},
}

BedrockModelRegistry contains model-specific configurations for Bedrock

View Source
var CodeAgentResponseFormat = &ResponseFormat{
	Type: "json_schema",
	JSONSchema: &JSONSchema{
		Name:        "ThoughtAndCodeAnswer",
		Description: "Structured response format for code agents with thought process and code",
		Schema: map[string]interface{}{
			"type":                 "object",
			"additionalProperties": false,
			"properties": map[string]interface{}{
				"thought": map[string]interface{}{
					"type":        "string",
					"description": "A free form text description of the thought process.",
					"title":       "Thought",
				},
				"code": map[string]interface{}{
					"type":        "string",
					"description": "Valid Go code snippet implementing the thought.",
					"title":       "Code",
				},
			},
			"required": []string{"thought", "code"},
			"title":    "ThoughtAndCodeAnswer",
		},
		Strict: true,
	},
	Strict: true,
}

CodeAgentResponseFormat defines the JSON schema for code agent responses

View Source
var DefaultMultimodalSupport = NewMultimodalSupport()

DefaultMultimodalSupport is the default multimodal support instance

View Source
var DefaultStructuredGenerator = NewStructuredGenerator()

Default structured generator instance

View Source
var ModelsWithoutStopSequences = []string{
	"moonshotai/Kimi-K2-Instruct",
}

ModelsWithoutStopSequences lists models that don't support stop sequences parameter

View Source
var StructuredGenerationProviders = []string{"cerebras", "fireworks-ai"}

StructuredGenerationProviders lists providers that support structured generation

ToolRoleConversions maps tool-specific roles to standard roles

Functions

func GenerateStructuredPrompt

func GenerateStructuredPrompt(basePrompt string, format *ResponseFormat) string

func GetBedrockModelDefaults

func GetBedrockModelDefaults(modelID string) map[string]interface{}

GetBedrockModelDefaults returns default configuration for a Bedrock model

func GetCleanMessageList

func GetCleanMessageList(
	messages []interface{},
	roleConversions map[MessageRole]MessageRole,
	convertImagesToImageURLs bool,
	flattenMessagesAsText bool,
) ([]map[string]interface{}, error)

GetCleanMessageList preprocesses messages for model consumption

func GetModelInfo

func GetModelInfo(modelType ModelType, modelID string) map[string]interface{}

GetModelInfo returns information about a model

func GetToolJSONSchema

func GetToolJSONSchema(tool tools.Tool) map[string]interface{}

GetToolJSONSchema converts a tool to JSON schema format

func IsSupportedBedrockModel

func IsSupportedBedrockModel(modelID string) bool

IsSupportedBedrockModel checks if a model ID is supported by Bedrock

func ParseJSONIfNeeded

func ParseJSONIfNeeded(arguments interface{}) interface{}

ParseJSONIfNeeded parses arguments as JSON if they're a string

func RemoveStopSequences

func RemoveStopSequences(content string, stopSequences []string) string

RemoveStopSequences removes stop sequences from content

func SupportsStopParameter

func SupportsStopParameter(modelID string) bool

SupportsStopParameter checks if a model supports stop parameters

func ValidateModelConfiguration

func ValidateModelConfiguration(modelType ModelType, modelID string, options map[string]interface{}) error

ValidateModelConfiguration validates a model configuration

Types

type AmazonBedrockModel

type AmazonBedrockModel struct {
	*BaseModel
	Client                BedrockClient          `json:"-"`
	ClientKwargs          map[string]interface{} `json:"client_kwargs"`
	CustomRoleConversions map[string]string      `json:"custom_role_conversions"`
	InferenceConfig       map[string]interface{} `json:"inference_config"`
	GuardrailConfig       map[string]interface{} `json:"guardrail_config"`
	AdditionalModelFields map[string]interface{} `json:"additional_model_fields"`
}

AmazonBedrockModel represents a model using Amazon Bedrock API

func NewAmazonBedrockModel

func NewAmazonBedrockModel(modelID string, options map[string]interface{}) *AmazonBedrockModel

NewAmazonBedrockModel creates a new Amazon Bedrock model

func (*AmazonBedrockModel) Generate

func (bm *AmazonBedrockModel) Generate(messages []interface{}, options *GenerateOptions) (*ChatMessage, error)

Generate implements Model interface

func (*AmazonBedrockModel) GenerateStream

func (bm *AmazonBedrockModel) GenerateStream(messages []interface{}, options *GenerateOptions) (<-chan *ChatMessageStreamDelta, error)

GenerateStream implements Model interface

func (*AmazonBedrockModel) SupportsStreaming

func (bm *AmazonBedrockModel) SupportsStreaming() bool

SupportsStreaming implements Model interface

func (*AmazonBedrockModel) ToDict

func (bm *AmazonBedrockModel) ToDict() map[string]interface{}

ToDict implements Model interface

type AudioData

type AudioData struct {
	Data   string `json:"data"`   // Base64 encoded audio data
	Format string `json:"format"` // "mp3", "wav", "ogg", etc.
	URL    string `json:"url,omitempty"`
}

AudioData represents audio content

type AzureOpenAIServerModel

type AzureOpenAIServerModel struct {
	*OpenAIServerModel
	APIVersion   string `json:"api_version"`
	Deployment   string `json:"deployment"`
	ResourceName string `json:"resource_name"`
}

AzureOpenAIServerModel extends OpenAIServerModel for Azure-specific configurations

func NewAzureOpenAIServerModel

func NewAzureOpenAIServerModel(modelID string, resourceName string, deployment string, apiKey string, options map[string]interface{}) *AzureOpenAIServerModel

NewAzureOpenAIServerModel creates a new Azure OpenAI server model

func (*AzureOpenAIServerModel) ToDict

func (azm *AzureOpenAIServerModel) ToDict() map[string]interface{}

ToDict implements Model interface for Azure variant

type BaseModel

type BaseModel struct {
	FlattenMessagesAsText bool   `json:"flatten_messages_as_text"`
	ToolNameKey           string `json:"tool_name_key"`
	ToolArgumentsKey      string `json:"tool_arguments_key"`
	ModelID               string `json:"model_id"`

	// Deprecated token counting properties (maintained for compatibility)
	LastInputTokenCount  *int `json:"_last_input_token_count,omitempty"`
	LastOutputTokenCount *int `json:"_last_output_token_count,omitempty"`

	CustomParams map[string]interface{} `json:"custom_params,omitempty"`
}

BaseModel provides common functionality for model implementations

func NewBaseModel

func NewBaseModel(modelID string, options map[string]interface{}) *BaseModel

NewBaseModel creates a new base model

func (*BaseModel) Close

func (bm *BaseModel) Close() error

Close implements Model (default: no-op)

func (*BaseModel) ConvertResponseFormat

func (bm *BaseModel) ConvertResponseFormat(format *ResponseFormat) map[string]interface{}

ConvertResponseFormat converts ResponseFormat to API-compatible format

func (*BaseModel) GenerateWithStructuredOutput

func (bm *BaseModel) GenerateWithStructuredOutput(
	model Model,
	messages []interface{},
	options *GenerateOptions,
) (*StructuredOutput, error)

GenerateWithStructuredOutput generates output with structured response format

func (*BaseModel) GetModelID

func (bm *BaseModel) GetModelID() string

GetModelID implements Model

func (*BaseModel) ParseToolCalls

func (bm *BaseModel) ParseToolCalls(message *ChatMessage) (*ChatMessage, error)

ParseToolCalls implements Model

func (*BaseModel) PrepareCompletionKwargs

func (bm *BaseModel) PrepareCompletionKwargs(
	options *GenerateOptions,
	defaultParams map[string]interface{},
	priorityParams map[string]interface{},
) map[string]interface{}

PrepareCompletionKwargs prepares keyword arguments for model completion

func (*BaseModel) PrepareStructuredPrompt

func (bm *BaseModel) PrepareStructuredPrompt(
	basePrompt string,
	format *ResponseFormat,
) string

PrepareStructuredPrompt prepares a prompt for structured generation

func (*BaseModel) SupportsStreaming

func (bm *BaseModel) SupportsStreaming() bool

SupportsStreaming implements Model (default: false)

func (*BaseModel) ToDict

func (bm *BaseModel) ToDict() map[string]interface{}

ToDict implements Model

type BedrockClient

type BedrockClient interface {
	Converse(input map[string]interface{}) (map[string]interface{}, error)
	ConverseStream(input map[string]interface{}) (<-chan map[string]interface{}, error)
}

BedrockClient interface for AWS Bedrock client abstraction

type ChatMessage

type ChatMessage struct {
	Role       string                 `json:"role"`
	Content    *string                `json:"content,omitempty"`
	ToolCalls  []ChatMessageToolCall  `json:"tool_calls,omitempty"`
	Raw        interface{}            `json:"-"` // Stores raw output from API
	TokenUsage *monitoring.TokenUsage `json:"token_usage,omitempty"`
}

ChatMessage represents a message in the conversation

func NewChatMessage

func NewChatMessage(role string, content string) *ChatMessage

NewChatMessage creates a new chat message

func (*ChatMessage) FromDict

func (cm *ChatMessage) FromDict(data map[string]interface{}, raw interface{}, tokenUsage *monitoring.TokenUsage) error

FromDict creates a ChatMessage from a dictionary

func (*ChatMessage) ModelDumpJSON

func (cm *ChatMessage) ModelDumpJSON() ([]byte, error)

ModelDumpJSON returns JSON representation excluding raw field

func (*ChatMessage) ToDict

func (cm *ChatMessage) ToDict() map[string]interface{}

ToDict returns a dictionary representation of the message

type ChatMessageStreamDelta

type ChatMessageStreamDelta struct {
	Content    *string                `json:"content,omitempty"`
	ToolCalls  []ChatMessageToolCall  `json:"tool_calls,omitempty"`
	TokenUsage *monitoring.TokenUsage `json:"token_usage,omitempty"`
}

ChatMessageStreamDelta represents a streaming delta for chat messages

type ChatMessageToolCall

type ChatMessageToolCall struct {
	Function ChatMessageToolCallDefinition `json:"function"`
	ID       string                        `json:"id"`
	Type     string                        `json:"type"`
}

ChatMessageToolCall represents a tool call made by the model

type ChatMessageToolCallDefinition

type ChatMessageToolCallDefinition struct {
	Arguments   interface{} `json:"arguments"`
	Name        string      `json:"name"`
	Description *string     `json:"description,omitempty"`
}

ChatMessageToolCallDefinition defines a function call within a tool call

type DefaultBedrockClient

type DefaultBedrockClient struct {
	// contains filtered or unexported fields
}

DefaultBedrockClient is a placeholder implementation of BedrockClient

func NewDefaultBedrockClient

func NewDefaultBedrockClient(kwargs map[string]interface{}) *DefaultBedrockClient

NewDefaultBedrockClient creates a new default Bedrock client

func (*DefaultBedrockClient) Converse

func (dbc *DefaultBedrockClient) Converse(input map[string]interface{}) (map[string]interface{}, error)

Converse implements BedrockClient interface (placeholder)

func (*DefaultBedrockClient) ConverseStream

func (dbc *DefaultBedrockClient) ConverseStream(input map[string]interface{}) (<-chan map[string]interface{}, error)

ConverseStream implements BedrockClient interface (placeholder)

type GenerateOptions

type GenerateOptions struct {
	StopSequences     []string               `json:"stop_sequences,omitempty"`
	ResponseFormat    *ResponseFormat        `json:"response_format,omitempty"`     // Enhanced structured format
	ResponseFormatRaw map[string]interface{} `json:"response_format_raw,omitempty"` // Raw format for backwards compatibility
	ToolsToCallFrom   []tools.Tool           `json:"tools_to_call_from,omitempty"`
	Grammar           map[string]string      `json:"grammar,omitempty"`
	MaxTokens         *int                   `json:"max_tokens,omitempty"`
	Temperature       *float64               `json:"temperature,omitempty"`
	TopP              *float64               `json:"top_p,omitempty"`
	TopK              *int                   `json:"top_k,omitempty"`
	FrequencyPenalty  *float64               `json:"frequency_penalty,omitempty"`
	PresencePenalty   *float64               `json:"presence_penalty,omitempty"`
	Seed              *int                   `json:"seed,omitempty"`
	ValidateOutput    bool                   `json:"validate_output"`  // Whether to validate structured output
	RetryOnFailure    bool                   `json:"retry_on_failure"` // Whether to retry on validation failure
	MaxRetries        int                    `json:"max_retries"`      // Maximum number of retries
	CustomParams      map[string]interface{} `json:"custom_params,omitempty"`
}

GenerateOptions represents options for model generation

type ImageURL

type ImageURL struct {
	URL    string `json:"url"`
	Detail string `json:"detail,omitempty"` // "low", "high", "auto"
}

ImageURL represents an image reference

type InferenceClientModel

type InferenceClientModel struct {
	*BaseModel
	Provider string            `json:"provider"`
	Client   interface{}       `json:"-"` // HTTP client or SDK client
	Token    string            `json:"-"` // API token
	BaseURL  string            `json:"base_url"`
	Headers  map[string]string `json:"headers"`
}

InferenceClientModel represents a model using Hugging Face Inference API

func NewInferenceClientModel

func NewInferenceClientModel(modelID string, token string, options map[string]interface{}) *InferenceClientModel

NewInferenceClientModel creates a new inference client model

func (*InferenceClientModel) Generate

func (icm *InferenceClientModel) Generate(messages []interface{}, options *GenerateOptions) (*ChatMessage, error)

Generate implements Model interface

func (*InferenceClientModel) GenerateStream

func (icm *InferenceClientModel) GenerateStream(messages []interface{}, options *GenerateOptions) (<-chan *ChatMessageStreamDelta, error)

GenerateStream implements Model interface

func (*InferenceClientModel) SupportsStreaming

func (icm *InferenceClientModel) SupportsStreaming() bool

SupportsStreaming implements Model interface

func (*InferenceClientModel) ToDict

func (icm *InferenceClientModel) ToDict() map[string]interface{}

ToDict implements Model interface

type JSONSchema

type JSONSchema struct {
	Name        string                 `json:"name"`
	Description string                 `json:"description,omitempty"`
	Schema      map[string]interface{} `json:"schema"`
	Strict      bool                   `json:"strict,omitempty"`
}

JSONSchema represents a JSON schema for structured generation

func CreateFunctionCallSchema

func CreateFunctionCallSchema() *JSONSchema

CreateFunctionCallSchema creates a schema for function calls

func CreateJSONSchema

func CreateJSONSchema(name, description string, example interface{}) *JSONSchema

CreateJSONSchema creates a JSON schema from a Go struct or map

func CreateToolCallSchema

func CreateToolCallSchema() *JSONSchema

CreateToolCallSchema creates a schema for tool calls

type LiteLLMModel

type LiteLLMModel struct {
	*BaseModel
	APIBase               string            `json:"api_base"`
	APIKey                string            `json:"-"` // API key (not serialized)
	Headers               map[string]string `json:"headers"`
	Client                interface{}       `json:"-"`       // HTTP client
	Timeout               int               `json:"timeout"` // Request timeout in seconds
	CustomRoleConversions map[string]string `json:"custom_role_conversions"`
	Organization          string            `json:"organization"`
	Project               string            `json:"project"`
}

LiteLLMModel represents a model using LiteLLM proxy

func NewLiteLLMModel

func NewLiteLLMModel(modelID string, options map[string]interface{}) *LiteLLMModel

NewLiteLLMModel creates a new LiteLLM model

func (*LiteLLMModel) Generate

func (llm *LiteLLMModel) Generate(messages []interface{}, options *GenerateOptions) (*ChatMessage, error)

Generate implements Model interface

func (*LiteLLMModel) GenerateStream

func (llm *LiteLLMModel) GenerateStream(messages []interface{}, options *GenerateOptions) (<-chan *ChatMessageStreamDelta, error)

GenerateStream implements Model interface

func (*LiteLLMModel) SupportsStreaming

func (llm *LiteLLMModel) SupportsStreaming() bool

SupportsStreaming implements Model interface

func (*LiteLLMModel) ToDict

func (llm *LiteLLMModel) ToDict() map[string]interface{}

ToDict implements Model interface

type MediaContent

type MediaContent struct {
	Type      MediaType   `json:"type"`
	Text      *string     `json:"text,omitempty"`
	ImageURL  *ImageURL   `json:"image_url,omitempty"`
	AudioData *AudioData  `json:"audio_data,omitempty"`
	VideoData *VideoData  `json:"video_data,omitempty"`
	Metadata  interface{} `json:"metadata,omitempty"`
}

MediaContent represents multimodal content

func CreateText

func CreateText(text string) *MediaContent

CreateText is a convenience function to create text content using the default instance

func LoadAudio

func LoadAudio(filePath string) (*MediaContent, error)

LoadAudio is a convenience function to load audio using the default instance

func LoadImage

func LoadImage(filePath string) (*MediaContent, error)

LoadImage is a convenience function to load an image using the default instance

func LoadImageURL

func LoadImageURL(url string, detail string) (*MediaContent, error)

LoadImageURL is a convenience function to load an image from URL using the default instance

func LoadVideo

func LoadVideo(filePath string) (*MediaContent, error)

LoadVideo is a convenience function to load video using the default instance

type MediaType

type MediaType string

MediaType represents different types of media content

const (
	MediaTypeText  MediaType = "text"
	MediaTypeImage MediaType = "image"
	MediaTypeAudio MediaType = "audio"
	MediaTypeVideo MediaType = "video"
)

type MessageRole

type MessageRole string

MessageRole represents the role of a message in conversation

const (
	RoleUser         MessageRole = "user"
	RoleAssistant    MessageRole = "assistant"
	RoleSystem       MessageRole = "system"
	RoleToolCall     MessageRole = "tool-call"
	RoleToolResponse MessageRole = "tool-response"
)

func (MessageRole) Roles

func (MessageRole) Roles() []string

Roles returns all available message roles

type Model

type Model interface {
	// Generate generates a response from the model
	Generate(
		messages []interface{},
		options *GenerateOptions,
	) (*ChatMessage, error)

	// GenerateStream generates a streaming response (if supported)
	GenerateStream(
		messages []interface{},
		options *GenerateOptions,
	) (<-chan *ChatMessageStreamDelta, error)

	// ParseToolCalls parses tool calls from message content
	ParseToolCalls(message *ChatMessage) (*ChatMessage, error)

	// ToDict converts the model to a dictionary representation
	ToDict() map[string]interface{}

	// GetModelID returns the model identifier
	GetModelID() string

	// SupportsStreaming returns true if the model supports streaming
	SupportsStreaming() bool

	// Close cleans up model resources
	Close() error
}

Model represents the main interface for all LLM models

func CreateModel

func CreateModel(modelType ModelType, modelID string, options map[string]interface{}) (Model, error)

CreateModel creates a model of the specified type

type ModelType

type ModelType string

ModelType represents different types of models supported

const (
	ModelTypeInferenceClient ModelType = "inference_client"
	ModelTypeOpenAIServer    ModelType = "openai_server"
	ModelTypeAzureOpenAI     ModelType = "azure_openai"
	ModelTypeLiteLLM         ModelType = "litellm"
	ModelTypeBedrockModel    ModelType = "bedrock"
	ModelTypeTransformers    ModelType = "transformers"
)

func AutoDetectModelType

func AutoDetectModelType(modelID string) ModelType

AutoDetectModelType attempts to detect the appropriate model type from modelID

type MultimodalMessage

type MultimodalMessage struct {
	Role    string          `json:"role"`
	Content []*MediaContent `json:"content"`
}

MultimodalMessage represents a message with mixed content types

func CreateMessage

func CreateMessage(role string, contents ...*MediaContent) *MultimodalMessage

CreateMessage is a convenience function to create a multimodal message using the default instance

type MultimodalSupport

type MultimodalSupport struct {
	MaxImageSize int64    `json:"max_image_size"` // Maximum image size in bytes
	MaxAudioSize int64    `json:"max_audio_size"` // Maximum audio size in bytes
	MaxVideoSize int64    `json:"max_video_size"` // Maximum video size in bytes
	ImageFormats []string `json:"image_formats"`  // Supported image formats
	AudioFormats []string `json:"audio_formats"`  // Supported audio formats
	VideoFormats []string `json:"video_formats"`  // Supported video formats
}

MultimodalSupport provides utilities for handling multimodal content

func NewMultimodalSupport

func NewMultimodalSupport() *MultimodalSupport

NewMultimodalSupport creates a new multimodal support instance

func (*MultimodalSupport) CompressImage

func (ms *MultimodalSupport) CompressImage(content *MediaContent, quality int) (*MediaContent, error)

CompressImage compresses an image for better performance (placeholder implementation)

func (*MultimodalSupport) ConvertToStandardFormat

func (ms *MultimodalSupport) ConvertToStandardFormat(messages []*MultimodalMessage) []map[string]interface{}

ConvertToStandardFormat converts multimodal messages to standard message format

func (*MultimodalSupport) CreateMultimodalMessage

func (ms *MultimodalSupport) CreateMultimodalMessage(role string, contents ...*MediaContent) *MultimodalMessage

CreateMultimodalMessage creates a multimodal message

func (*MultimodalSupport) CreateTextContent

func (ms *MultimodalSupport) CreateTextContent(text string) *MediaContent

CreateTextContent creates text content

func (*MultimodalSupport) ExtractText

func (ms *MultimodalSupport) ExtractText(message *MultimodalMessage) string

ExtractText extracts all text content from a multimodal message

func (*MultimodalSupport) GenerateImageThumbnail

func (ms *MultimodalSupport) GenerateImageThumbnail(content *MediaContent, width, height int) (*MediaContent, error)

GenerateImageThumbnail generates a thumbnail for an image (placeholder implementation)

func (*MultimodalSupport) GetMediaCount

func (ms *MultimodalSupport) GetMediaCount(message *MultimodalMessage) map[MediaType]int

GetMediaCount returns the count of different media types in a message

func (*MultimodalSupport) LoadAudioFromFile

func (ms *MultimodalSupport) LoadAudioFromFile(filePath string) (*MediaContent, error)

LoadAudioFromFile loads audio from a file path

func (*MultimodalSupport) LoadAudioFromURL

func (ms *MultimodalSupport) LoadAudioFromURL(url string) (*MediaContent, error)

LoadAudioFromURL loads audio from a URL

func (*MultimodalSupport) LoadImageFromFile

func (ms *MultimodalSupport) LoadImageFromFile(filePath string) (*MediaContent, error)

LoadImageFromFile loads an image from a file path

func (*MultimodalSupport) LoadImageFromURL

func (ms *MultimodalSupport) LoadImageFromURL(url string, detail string) (*MediaContent, error)

LoadImageFromURL loads an image from a URL

func (*MultimodalSupport) LoadVideoFromFile

func (ms *MultimodalSupport) LoadVideoFromFile(filePath string) (*MediaContent, error)

LoadVideoFromFile loads video from a file path

func (*MultimodalSupport) ValidateMessage

func (ms *MultimodalSupport) ValidateMessage(message *MultimodalMessage) error

ValidateMessage validates a multimodal message

type OpenAIServerModel

type OpenAIServerModel struct {
	*BaseModel
	BaseURL string            `json:"base_url"`
	APIKey  string            `json:"-"` // API key (not serialized)
	Headers map[string]string `json:"headers"`
	Client  interface{}       `json:"-"`       // HTTP client
	Timeout int               `json:"timeout"` // Request timeout in seconds
}

OpenAIServerModel represents a model using OpenAI-compatible API servers

func NewOpenAIServerModel

func NewOpenAIServerModel(modelID string, baseURL string, apiKey string, options map[string]interface{}) *OpenAIServerModel

NewOpenAIServerModel creates a new OpenAI server model

func (*OpenAIServerModel) Generate

func (osm *OpenAIServerModel) Generate(messages []interface{}, options *GenerateOptions) (*ChatMessage, error)

Generate implements Model interface

func (*OpenAIServerModel) GenerateStream

func (osm *OpenAIServerModel) GenerateStream(messages []interface{}, options *GenerateOptions) (<-chan *ChatMessageStreamDelta, error)

GenerateStream implements Model interface

func (*OpenAIServerModel) SupportsStreaming

func (osm *OpenAIServerModel) SupportsStreaming() bool

SupportsStreaming implements Model interface

func (*OpenAIServerModel) ToDict

func (osm *OpenAIServerModel) ToDict() map[string]interface{}

ToDict implements Model interface

type ResponseFormat

type ResponseFormat struct {
	Type        string                 `json:"type"`                  // "json_object", "json_schema", "text"
	JSONSchema  *JSONSchema            `json:"json_schema,omitempty"` // For structured JSON output
	Schema      map[string]interface{} `json:"schema,omitempty"`      // Raw schema definition
	Strict      bool                   `json:"strict,omitempty"`      // Whether to enforce strict schema compliance
	Name        string                 `json:"name,omitempty"`        // Name for the response format
	Description string                 `json:"description,omitempty"` // Description of the format
}

ResponseFormat represents different structured output formats

type SchemaValidator

type SchemaValidator struct {
	// contains filtered or unexported fields
}

SchemaValidator provides schema validation functionality

func NewSchemaValidator

func NewSchemaValidator() *SchemaValidator

NewSchemaValidator creates a new schema validator

func (*SchemaValidator) RegisterSchema

func (sv *SchemaValidator) RegisterSchema(schema *JSONSchema)

RegisterSchema registers a JSON schema for validation

func (*SchemaValidator) ValidateJSON

func (sv *SchemaValidator) ValidateJSON(data interface{}, schemaName string) (bool, []string)

ValidateJSON validates JSON data against a schema

type StructuredGenerator

type StructuredGenerator struct {
	// contains filtered or unexported fields
}

StructuredGenerator provides structured generation capabilities

func NewStructuredGenerator

func NewStructuredGenerator() *StructuredGenerator

NewStructuredGenerator creates a new structured generator

func (*StructuredGenerator) GenerateStructuredPrompt

func (sg *StructuredGenerator) GenerateStructuredPrompt(basePrompt string, format *ResponseFormat) string

GenerateStructuredPrompt generates a prompt that encourages structured output

func (*StructuredGenerator) ParseStructuredOutput

func (sg *StructuredGenerator) ParseStructuredOutput(output string, format *ResponseFormat) (*StructuredOutput, error)

ParseStructuredOutput parses model output according to specified format

type StructuredOutput

type StructuredOutput struct {
	Content  interface{}            `json:"content"`  // The parsed content
	Raw      string                 `json:"raw"`      // Raw text output
	Format   *ResponseFormat        `json:"format"`   // The format used
	Valid    bool                   `json:"valid"`    // Whether output is valid according to schema
	Errors   []string               `json:"errors"`   // Validation errors if any
	Metadata map[string]interface{} `json:"metadata"` // Additional metadata
}

StructuredOutput represents a parsed structured output

func ParseStructuredOutput

func ParseStructuredOutput(output string, format *ResponseFormat) (*StructuredOutput, error)

Convenience functions using the default generator

type TransformersModel

type TransformersModel struct {
	*BaseModel
	ModelPath        string                 `json:"model_path"`
	TokenizerPath    string                 `json:"tokenizer_path"`
	Device           string                 `json:"device"`
	TorchDtype       string                 `json:"torch_dtype"`
	ModelKwargs      map[string]interface{} `json:"model_kwargs"`
	TokenizerKwargs  map[string]interface{} `json:"tokenizer_kwargs"`
	StoppingCriteria interface{}            `json:"-"` // StoppingCriteriaList
}

TransformersModel represents a local model using Hugging Face Transformers

func NewTransformersModelImpl

func NewTransformersModelImpl(modelID string, options map[string]interface{}) *TransformersModel

NewTransformersModelImpl creates a new transformers model implementation

func (*TransformersModel) Generate

func (tm *TransformersModel) Generate(messages []interface{}, options *GenerateOptions) (*ChatMessage, error)

Generate implements Model interface for Transformers

func (*TransformersModel) GenerateStream

func (tm *TransformersModel) GenerateStream(messages []interface{}, options *GenerateOptions) (<-chan *ChatMessageStreamDelta, error)

GenerateStream implements Model interface for Transformers

func (*TransformersModel) SupportsStreaming

func (tm *TransformersModel) SupportsStreaming() bool

SupportsStreaming implements Model interface for Transformers

func (*TransformersModel) ToDict

func (tm *TransformersModel) ToDict() map[string]interface{}

ToDict implements Model interface for Transformers

type VideoData

type VideoData struct {
	Data   string `json:"data"`   // Base64 encoded video data
	Format string `json:"format"` // "mp4", "webm", "avi", etc.
	URL    string `json:"url,omitempty"`
}

VideoData represents video content

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL