Documentation
¶
Overview ¶
Package llm provides the main API for making LLM calls.
Index ¶
- Constants
- Variables
- type FinishReason
- type Message
- func AssistantMessage(content string) Message
- func AssistantMessageWithToolCalls(content string, toolCalls []ToolCall) Message
- func ExecuteToolCalls(ctx context.Context, toolCalls []ToolCall, registry *ToolRegistry) ([]Message, error)
- func SystemMessage(content string) Message
- func ToolMessage(toolCallID, content string) Message
- func UserMessage(content string) Message
- type Model
- func (m *Model) Call(ctx context.Context, prompt string, opts ...Option) (Response[string], error)
- func (m *Model) CallMessages(ctx context.Context, messages []Message, opts ...Option) (Response[string], error)
- func (m *Model) CallParse(ctx context.Context, prompt string, target any, opts ...Option) error
- type Option
- func WithMaxTokens(n int) Option
- func WithMessages(msgs ...Message) Option
- func WithModel(name string) Option
- func WithProvider(name string) Option
- func WithSeed(seed int) Option
- func WithStopSequences(seqs ...string) Option
- func WithSystemMessage(msg string) Option
- func WithTemperature(t float64) Option
- func WithTools(tools ...Tool) Option
- func WithTopK(k int) Option
- func WithTopP(p float64) Option
- type ParseError
- type ProviderError
- type Response
- func Call(ctx context.Context, prompt string, opts ...Option) (Response[string], error)
- func CallMessages(ctx context.Context, messages []Message, opts ...Option) (Response[string], error)
- func CallMessagesParse[T any](ctx context.Context, messages []Message, opts ...Option) (Response[T], error)
- func CallParse[T any](ctx context.Context, prompt string, opts ...Option) (Response[T], error)
- func (r Response[T]) FinishReason() FinishReason
- func (r Response[T]) HasToolCalls() bool
- func (r Response[T]) Messages() []Message
- func (r Response[T]) MustParse() T
- func (r Response[T]) Parsed() (T, error)
- func (r Response[T]) Raw() *provider.Response
- func (r Response[T]) Resume(ctx context.Context, content string, opts ...Option) (Response[string], error)
- func (r Response[T]) ResumeWithToolOutputs(ctx context.Context, toolOutputs []Message, opts ...Option) (Response[string], error)
- func (r Response[T]) Text() string
- func (r Response[T]) ToolCalls() []ToolCall
- func (r Response[T]) Usage() Usage
- type Role
- type Stream
- type StreamChunk
- type Tool
- type ToolCall
- type ToolCallDelta
- type ToolError
- type ToolNotFoundError
- type ToolRegistry
- type TypedTool
- func (t *TypedTool[In, Out]) Description() string
- func (t *TypedTool[In, Out]) Execute(ctx context.Context, args json.RawMessage) (any, error)
- func (t *TypedTool[In, Out]) Name() string
- func (t *TypedTool[In, Out]) Parameters() *jsonschema.Schema
- func (t *TypedTool[In, Out]) TypedCall(ctx context.Context, input In) (Out, error)
- type Usage
Constants ¶
const ( RoleSystem = provider.RoleSystem RoleUser = provider.RoleUser RoleAssistant = provider.RoleAssistant RoleTool = provider.RoleTool )
Role constants.
Variables ¶
var ( // ErrProviderRequired is returned when WithProvider is not specified. ErrProviderRequired = errors.New("provider is required: use WithProvider option") // ErrModelRequired is returned when WithModel is not specified. ErrModelRequired = errors.New("model is required: use WithModel option") // ErrNotParsed is returned when Parsed() is called but no parsing occurred. ErrNotParsed = errors.New("response was not parsed: use CallParse to get structured output") )
Common errors.
Functions ¶
This section is empty.
Types ¶
type FinishReason ¶
type FinishReason string
FinishReason indicates why the model stopped generating.
const ( FinishReasonStop FinishReason = "stop" FinishReasonToolCalls FinishReason = "tool_calls" FinishReasonLength FinishReason = "length" )
type Message ¶
Message is an alias for provider.Message for convenience.
func AssistantMessage ¶
AssistantMessage creates an assistant message.
func AssistantMessageWithToolCalls ¶
AssistantMessageWithToolCalls creates an assistant message with tool calls.
func ExecuteToolCalls ¶
func ExecuteToolCalls(ctx context.Context, toolCalls []ToolCall, registry *ToolRegistry) ([]Message, error)
ExecuteToolCalls executes tool calls and returns tool result messages.
func SystemMessage ¶
SystemMessage creates a system message.
func ToolMessage ¶
ToolMessage creates a tool result message.
type Model ¶
type Model struct {
// contains filtered or unexported fields
}
Model represents a configured LLM model with default options. It provides a convenient way to reuse common configuration.
Example:
model := llm.NewModel("openai", "o4-mini",
llm.WithTemperature(0.7),
)
resp, err := model.Call(ctx, "Tell me a joke")
func NewModel ¶
NewModel creates a new Model with the given provider and model name. Additional options can be provided as default configuration.
func (*Model) Call ¶
Call makes an LLM call using this model's configuration. Per-call options override the model's base options.
type Option ¶
type Option func(*callConfig)
Option configures an LLM call.
func WithMaxTokens ¶
WithMaxTokens sets the maximum tokens in the response.
func WithMessages ¶
WithMessages sets the conversation history. This is useful for multi-turn conversations with Call.
func WithProvider ¶
WithProvider sets the LLM provider (e.g., "openai", "anthropic").
func WithStopSequences ¶
WithStopSequences sets stop sequences to end generation. The model will stop generating text if one of these strings is encountered.
func WithSystemMessage ¶
WithSystemMessage sets a system message.
func WithTemperature ¶
WithTemperature sets the sampling temperature.
type ParseError ¶
ParseError represents a failure to parse the LLM response.
func (*ParseError) Error ¶
func (e *ParseError) Error() string
func (*ParseError) Unwrap ¶
func (e *ParseError) Unwrap() error
type ProviderError ¶
ProviderError represents an error from the LLM provider.
func (*ProviderError) Error ¶
func (e *ProviderError) Error() string
func (*ProviderError) Unwrap ¶
func (e *ProviderError) Unwrap() error
type Response ¶
type Response[T any] struct { // contains filtered or unexported fields }
Response wraps the provider response with type-safe parsed content. T is the type of structured output expected from the LLM.
func Call ¶
Call makes an LLM call and returns a text response.
Example:
resp, err := llm.Call(ctx, "Recommend a fantasy book",
llm.WithProvider("openai"),
llm.WithModel("o4-mini"),
)
if err != nil {
return err
}
fmt.Println(resp.Text())
func CallMessages ¶
func CallMessages(ctx context.Context, messages []Message, opts ...Option) (Response[string], error)
CallMessages makes an LLM call with a full message history. This is useful for multi-turn conversations.
Example:
messages := []llm.Message{
llm.SystemMessage("You are a helpful assistant"),
llm.UserMessage("Hello"),
llm.AssistantMessage("Hi! How can I help?"),
llm.UserMessage("Tell me a joke"),
}
resp, err := llm.CallMessages(ctx, messages,
llm.WithProvider("openai"),
llm.WithModel("o4-mini"),
)
func CallMessagesParse ¶
func CallMessagesParse[T any](ctx context.Context, messages []Message, opts ...Option) (Response[T], error)
CallMessagesParse makes an LLM call with messages and parses the response. Combines CallMessages with structured output parsing.
func CallParse ¶
CallParse makes an LLM call with structured output and parses the response into type T. The JSON schema is automatically generated from T.
Example:
type Book struct {
Title string `json:"title" jsonschema:"required,description=Book title"`
Author string `json:"author" jsonschema:"required"`
}
resp, err := llm.CallParse[Book](ctx, "Recommend a sci-fi book",
llm.WithProvider("openai"),
llm.WithModel("o4-mini"),
)
if err != nil {
return err
}
book := resp.MustParse()
fmt.Printf("%s by %s\n", book.Title, book.Author)
func (Response[T]) FinishReason ¶
func (r Response[T]) FinishReason() FinishReason
FinishReason returns why the model stopped generating.
func (Response[T]) HasToolCalls ¶
HasToolCalls returns true if the response contains tool calls.
func (Response[T]) Messages ¶
Messages returns the full conversation history including the assistant's response.
func (Response[T]) MustParse ¶
func (r Response[T]) MustParse() T
MustParse returns the parsed value or panics. Useful in tests or when you're certain parsing succeeded.
func (Response[T]) Parsed ¶
Parsed returns the structured output with compile-time type safety. Returns ErrNotParsed if the response was not created via CallParse.
func (Response[T]) Raw ¶
Raw returns the underlying provider response. This can be useful for debugging or accessing provider-specific data.
func (Response[T]) Resume ¶
func (r Response[T]) Resume(ctx context.Context, content string, opts ...Option) (Response[string], error)
Resume continues the conversation with additional user content. It uses the same provider, model, and tools from the original call.
Example:
resp, _ := llm.Call(ctx, "Recommend a book", opts...) continuation, _ := resp.Resume(ctx, "Why did you recommend that one?") fmt.Println(continuation.Text())
func (Response[T]) ResumeWithToolOutputs ¶
func (r Response[T]) ResumeWithToolOutputs(ctx context.Context, toolOutputs []Message, opts ...Option) (Response[string], error)
ResumeWithToolOutputs continues the conversation with tool execution results. This is used after the LLM has requested tool calls.
Example:
if resp.HasToolCalls() {
toolMessages, _ := llm.ExecuteToolCalls(ctx, resp.ToolCalls(), registry)
continuation, _ := resp.ResumeWithToolOutputs(ctx, toolMessages)
fmt.Println(continuation.Text())
}
type Stream ¶
type Stream struct {
// contains filtered or unexported fields
}
Stream represents a streaming response from an LLM.
func CallMessagesStream ¶
CallMessagesStream makes a streaming LLM call with message history.
func CallStream ¶
CallStream makes a streaming LLM call.
Example:
stream, err := llm.CallStream(ctx, "Write a short story",
llm.WithProvider("openai"),
llm.WithModel("o4-mini"),
)
if err != nil {
return err
}
defer stream.Close()
for chunk := range stream.Chunks() {
fmt.Print(chunk.Delta)
}
if err := stream.Err(); err != nil {
return err
}
func (*Stream) Chunks ¶
func (s *Stream) Chunks() iter.Seq[StreamChunk]
Chunks returns an iterator over the stream chunks. This uses Go 1.23+ range-over-func.
Example:
stream, err := llm.CallStream(ctx, "Write a story", opts...)
if err != nil {
return err
}
defer stream.Close()
for chunk := range stream.Chunks() {
fmt.Print(chunk.Delta)
}
type StreamChunk ¶
type StreamChunk struct {
Delta string
ToolCallDelta *ToolCallDelta
FinishReason FinishReason
}
StreamChunk represents a single chunk in a streaming response.
type Tool ¶
type Tool interface {
// Name returns the tool's name as seen by the LLM.
Name() string
// Description returns the tool's description for the LLM.
Description() string
// Parameters returns the JSON schema for the tool's parameters.
Parameters() *jsonschema.Schema
// Execute runs the tool with the given JSON arguments.
Execute(ctx context.Context, args json.RawMessage) (any, error)
}
Tool represents an executable tool that the LLM can call. This interface allows for heterogeneous collections of tools.
type ToolCallDelta ¶
ToolCallDelta represents incremental tool call data.
type ToolNotFoundError ¶
type ToolNotFoundError struct {
Name string
}
ToolNotFoundError is returned when a tool is not found.
func (*ToolNotFoundError) Error ¶
func (e *ToolNotFoundError) Error() string
type ToolRegistry ¶
type ToolRegistry struct {
// contains filtered or unexported fields
}
ToolRegistry manages a collection of tools.
func NewToolRegistry ¶
func NewToolRegistry() *ToolRegistry
NewToolRegistry creates a new tool registry.
func (*ToolRegistry) Get ¶
func (r *ToolRegistry) Get(name string) (Tool, bool)
Get retrieves a tool by name.
func (*ToolRegistry) Register ¶
func (r *ToolRegistry) Register(tools ...Tool)
Register adds a tool to the registry.
type TypedTool ¶
TypedTool provides type-safe tool creation with auto-generated schema. In is the input type, Out is the output type.
func MustNewTool ¶
func MustNewTool[In any, Out any]( name, description string, fn func(ctx context.Context, in In) (Out, error), ) *TypedTool[In, Out]
MustNewTool is like NewTool but panics on error. Useful for package-level tool definitions.
func NewTool ¶
func NewTool[In any, Out any]( name, description string, fn func(ctx context.Context, in In) (Out, error), ) (*TypedTool[In, Out], error)
NewTool creates a type-safe tool from a function. The input type In is used to generate the JSON schema automatically.
Example:
type WeatherInput struct {
City string `json:"city" jsonschema:"required,description=City name"`
}
type WeatherOutput struct {
Temperature float64 `json:"temperature"`
Conditions string `json:"conditions"`
}
weatherTool, err := llm.NewTool("get_weather", "Get weather for a city",
func(ctx context.Context, in WeatherInput) (WeatherOutput, error) {
return WeatherOutput{Temperature: 72.5, Conditions: "Sunny"}, nil
},
)
func (*TypedTool[In, Out]) Description ¶
Description returns the tool's description.
func (*TypedTool[In, Out]) Execute ¶
Execute runs the tool with the given JSON arguments. Implements the Tool interface.
func (*TypedTool[In, Out]) Parameters ¶
func (t *TypedTool[In, Out]) Parameters() *jsonschema.Schema
Parameters returns the JSON schema for the tool's parameters.