Documentation
¶
Overview ¶
Package chatmodel provides functionalities for working with Large Language Models (LLMs).
Index ¶
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type Anthropic ¶
Anthropic is a chat model based on the Anthropic API.
func NewAnthropic ¶
func NewAnthropic(apiKey string, optFns ...func(o *AnthropicOptions)) (*Anthropic, error)
NewAnthropic creates a new instance of the Anthropic chat model with the provided options.
func (*Anthropic) Generate ¶
func (cm *Anthropic) Generate(ctx context.Context, messages schema.ChatMessages, optFns ...func(o *schema.GenerateOptions)) (*schema.ModelResult, error)
Generate generates text based on the provided chat messages and options.
func (*Anthropic) InvocationParams ¶ added in v0.0.27
InvocationParams returns the parameters used in the model invocation.
type AnthropicOptions ¶
type AnthropicOptions struct {
*schema.CallbackOptions `map:"-"`
schema.Tokenizer `map:"-"`
// Model name to use.
ModelName string `map:"model_name,omitempty"`
// Temperature parameter controls the randomness of the generation output.
Temperature float64 `map:"temperature,omitempty"`
// Denotes the number of tokens to predict per generation.
MaxTokens int `map:"max_tokens,omitempty"`
// TopK parameter specifies the number of highest probability tokens to consider for generation.
TopK int `map:"top_k,omitempty"`
// TopP parameter specifies the cumulative probability threshold for generating tokens.
TopP float64 `map:"top_p,omitempty"`
}
AnthropicOptions contains options for configuring the Anthropic chat model.
type AzureOpenAIOptions ¶ added in v0.0.26
type AzureOpenAIOptions struct {
OpenAIOptions
Deployment string
}
type Fake ¶ added in v0.0.14
func (*Fake) Generate ¶ added in v0.0.14
func (cm *Fake) Generate(ctx context.Context, messages schema.ChatMessages, optFns ...func(o *schema.GenerateOptions)) (*schema.ModelResult, error)
Generate generates text based on the provided chat messages and options.
func (*Fake) InvocationParams ¶ added in v0.0.27
InvocationParams returns the parameters used in the model invocation.
type OpenAI ¶
func NewAzureOpenAI ¶ added in v0.0.26
func NewAzureOpenAI(apiKey, baseURL string, optFns ...func(o *AzureOpenAIOptions)) (*OpenAI, error)
func (*OpenAI) Generate ¶
func (cm *OpenAI) Generate(ctx context.Context, messages schema.ChatMessages, optFns ...func(o *schema.GenerateOptions)) (*schema.ModelResult, error)
Generate generates text based on the provided chat messages and options.
func (*OpenAI) InvocationParams ¶ added in v0.0.27
InvocationParams returns the parameters used in the model invocation.
type OpenAIOptions ¶
type OpenAIOptions struct {
*schema.CallbackOptions `map:"-"`
schema.Tokenizer `map:"-"`
// Model name to use.
ModelName string
// Sampling temperature to use.
Temperatur float32
// The maximum number of tokens to generate in the completion.
// -1 returns as many tokens as possible given the prompt and
//the models maximal context size.
MaxTokens int
// Total probability mass of tokens to consider at each step.
TopP float32
// Penalizes repeated tokens.
PresencePenalty float32
// Penalizes repeated tokens according to frequency.
FrequencyPenalty float32
// How many completions to generate for each prompt.
N int
}