model

package
v0.9.0-alpha.2 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 22, 2026 License: Apache-2.0 Imports: 3 Imported by: 115

Documentation

Overview

Package model defines callback payloads and configuration types for chat models.

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

func GetImplSpecificOptions

func GetImplSpecificOptions[T any](base *T, opts ...Option) *T

GetImplSpecificOptions extract the implementation specific options from Option list, optionally providing a base options with default values. e.g.

myOption := &MyOption{
	Field1: "default_value",
}

myOption := model.GetImplSpecificOptions(myOption, opts...)

Types

type AgenticCallbackInput

type AgenticCallbackInput struct {
	// Messages is the agentic messages to be sent to the agentic model.
	Messages []*schema.AgenticMessage
	// Tools is the tools to be used in the agentic model.
	Tools []*schema.ToolInfo
	// Config is the config for the agentic model.
	Config *AgenticConfig
	// Extra is the extra information for the callback.
	Extra map[string]any
}

AgenticCallbackInput is the input for the agentic model callback.

func ConvAgenticCallbackInput

func ConvAgenticCallbackInput(src callbacks.CallbackInput) *AgenticCallbackInput

ConvAgenticCallbackInput converts the callback input to the agentic model callback input.

type AgenticCallbackOutput

type AgenticCallbackOutput struct {
	// Message is the agentic message generated by the agentic model.
	Message *schema.AgenticMessage
	// Config is the config for the agentic model.
	Config *AgenticConfig
	// TokenUsage is the token usage of this request.
	TokenUsage *TokenUsage
	// Extra is the extra information for the callback.
	Extra map[string]any
}

AgenticCallbackOutput is the output for the agentic model callback.

func ConvAgenticCallbackOutput

func ConvAgenticCallbackOutput(src callbacks.CallbackOutput) *AgenticCallbackOutput

ConvAgenticCallbackOutput converts the callback output to the agentic model callback output.

type AgenticConfig

type AgenticConfig struct {
	// Model is the model name.
	Model string
	// MaxTokens is the max number of output tokens, if reached the max tokens, the model will stop generating.
	MaxTokens int
	// Temperature is the temperature, which controls the randomness of the agentic model.
	Temperature float32
	// TopP is the top p, which controls the diversity of the agentic model.
	TopP float32
}

AgenticConfig is the config for the agentic model.

type AgenticModel

type AgenticModel interface {
	Generate(ctx context.Context, input []*schema.AgenticMessage, opts ...Option) (*schema.AgenticMessage, error)
	Stream(ctx context.Context, input []*schema.AgenticMessage, opts ...Option) (*schema.StreamReader[*schema.AgenticMessage], error)

	// WithTools returns a new Model instance with the specified tools bound.
	// This method does not modify the current instance, making it safer for concurrent use.
	WithTools(tools []*schema.ToolInfo) (AgenticModel, error)
}

AgenticModel defines the interface for agentic models that support AgenticMessage. It provides methods for generating complete and streaming outputs, and supports tool calling via the WithTools method.

type BaseChatModel added in v0.3.23

type BaseChatModel interface {
	Generate(ctx context.Context, input []*schema.Message, opts ...Option) (*schema.Message, error)
	Stream(ctx context.Context, input []*schema.Message, opts ...Option) (
		*schema.StreamReader[*schema.Message], error)
}

BaseChatModel defines the basic interface for chat models. It provides methods for generating complete outputs and streaming outputs. This interface serves as the foundation for all chat model implementations.

type CallbackInput

type CallbackInput struct {
	// Messages is the messages to be sent to the model.
	Messages []*schema.Message
	// Tools is the tools to be used in the model.
	Tools []*schema.ToolInfo
	// ToolChoice is the tool choice, which controls the tool to be used in the model.
	ToolChoice *schema.ToolChoice
	// Config is the config for the model.
	Config *Config
	// Extra is the extra information for the callback.
	Extra map[string]any
}

CallbackInput is the input for the model callback.

func ConvCallbackInput

func ConvCallbackInput(src callbacks.CallbackInput) *CallbackInput

ConvCallbackInput converts the callback input to the model callback input.

type CallbackOutput

type CallbackOutput struct {
	// Message is the message generated by the model.
	Message *schema.Message
	// Config is the config for the model.
	Config *Config
	// TokenUsage is the token usage of this request.
	TokenUsage *TokenUsage
	// Extra is the extra information for the callback.
	Extra map[string]any
}

CallbackOutput is the output for the model callback.

func ConvCallbackOutput

func ConvCallbackOutput(src callbacks.CallbackOutput) *CallbackOutput

ConvCallbackOutput converts the callback output to the model callback output.

type ChatModel deprecated

type ChatModel interface {
	BaseChatModel

	// BindTools bind tools to the model.
	// BindTools before requesting ChatModel generally.
	// notice the non-atomic problem of BindTools and Generate.
	BindTools(tools []*schema.ToolInfo) error
}

Deprecated: Please use ToolCallingChatModel interface instead, which provides a safer way to bind tools without the concurrency issues and tool overwriting problems that may arise from the BindTools method.

type CompletionTokensDetails added in v0.7.10

type CompletionTokensDetails struct {
	// ReasoningTokens tokens generated by the model for reasoning.
	// This is currently supported by OpenAI, Gemini, ARK and Qwen  chat models.
	// For other models, this field will be 0.
	ReasoningTokens int `json:"reasoning_tokens,omitempty"`
}

type Config

type Config struct {
	// Model is the model name.
	Model string
	// MaxTokens is the max number of tokens, if reached the max tokens, the model will stop generating, and mostly return an finish reason of "length".
	MaxTokens int
	// Temperature is the temperature, which controls the randomness of the model.
	Temperature float32
	// TopP is the top p, which controls the diversity of the model.
	TopP float32
	// Stop is the stop words, which controls the stopping condition of the model.
	Stop []string
}

Config is the config for the model.

type Option

type Option struct {
	// contains filtered or unexported fields
}

Option is the call option for ChatModel component.

func WithAgenticToolChoice

func WithAgenticToolChoice(toolChoice *schema.AgenticToolChoice) Option

WithAgenticToolChoice is the option to set tool choice for the agentic model. Only available for AgenticModel.

func WithMaxTokens

func WithMaxTokens(maxTokens int) Option

WithMaxTokens is the option to set the max tokens for the model.

func WithModel

func WithModel(name string) Option

WithModel is the option to set the model name.

func WithStop

func WithStop(stop []string) Option

WithStop is the option to set the stop words for the model.

func WithTemperature

func WithTemperature(temperature float32) Option

WithTemperature is the option to set the temperature for the model.

func WithToolChoice added in v0.3.8

func WithToolChoice(toolChoice schema.ToolChoice, allowedToolNames ...string) Option

WithToolChoice sets the tool choice for the model. It also allows for providing a list of tool names to constrain the model to a specific subset of the available tools. Only available for ChatModel.

func WithTools added in v0.3.8

func WithTools(tools []*schema.ToolInfo) Option

WithTools is the option to set tools for the model.

func WithTopP

func WithTopP(topP float32) Option

WithTopP is the option to set the top p for the model.

func WrapImplSpecificOptFn

func WrapImplSpecificOptFn[T any](optFn func(*T)) Option

WrapImplSpecificOptFn is the option to wrap the implementation specific option function.

type Options

type Options struct {
	// Temperature is the temperature for the model, which controls the randomness of the model.
	Temperature *float32
	// Model is the model name.
	Model *string
	// TopP is the top p for the model, which controls the diversity of the model.
	TopP *float32
	// Tools is a list of tools the model may call.
	Tools []*schema.ToolInfo
	// MaxTokens is the max number of tokens, if reached the max tokens, the model will stop generating, and mostly return a finish reason of "length".
	MaxTokens *int
	// Stop is the stop words for the model, which controls the stopping condition of the model.
	Stop []string

	// ToolChoice controls which tool is called by the model.
	ToolChoice *schema.ToolChoice
	// AllowedToolNames specifies a list of tool names that the model is allowed to call.
	// This allows for constraining the model to a specific subset of the available tools.
	AllowedToolNames []string

	// AgenticToolChoice controls how the agentic model calls tools.
	AgenticToolChoice *schema.AgenticToolChoice
}

Options is the common options for the model.

func GetCommonOptions

func GetCommonOptions(base *Options, opts ...Option) *Options

GetCommonOptions extract model Options from Option list, optionally providing a base Options with default values.

type PromptTokenDetails added in v0.4.2

type PromptTokenDetails struct {
	// Cached tokens present in the prompt.
	CachedTokens int
}

PromptTokenDetails provides a breakdown of prompt token usage.

type TokenUsage

type TokenUsage struct {
	// PromptTokens is the number of prompt tokens, including all the input tokens of this request.
	PromptTokens int
	// PromptTokenDetails is a breakdown of the prompt tokens.
	PromptTokenDetails PromptTokenDetails
	// CompletionTokens is the number of completion tokens.
	CompletionTokens int
	// TotalTokens is the total number of tokens.
	TotalTokens int
	// CompletionTokensDetails is breakdown of completion tokens.
	CompletionTokensDetails CompletionTokensDetails `json:"completion_token_details"`
}

TokenUsage is the token usage for the model.

type ToolCallingChatModel added in v0.3.23

type ToolCallingChatModel interface {
	BaseChatModel

	// WithTools returns a new ToolCallingChatModel instance with the specified tools bound.
	// This method does not modify the current instance, making it safer for concurrent use.
	WithTools(tools []*schema.ToolInfo) (ToolCallingChatModel, error)
}

ToolCallingChatModel extends BaseChatModel with tool calling capabilities. It provides a WithTools method that returns a new instance with the specified tools bound, avoiding state mutation and concurrency issues.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL