Documentation
¶
Overview ¶
Package llm provides a unified abstraction layer for Large Language Model interactions within the Mattermost AI plugin.
This package defines the core interfaces and data structures for working with various LLM providers (OpenAI, Anthropic, etc.) in a consistent manner. It handles:
- LanguageModel interface abstraction for different LLM providers
- Conversation management with structured posts, roles, and context
- Prompt template system with embedded templates and variable substitution
- Streaming text responses for real-time chat interactions
- Tool/function calling capabilities with JSON schema validation
- Request/response structures with token counting and truncation
- Context management including user info, channels, and bot configurations
The package is designed to be provider-agnostic, allowing the plugin to work with multiple LLM services through a common interface while preserving provider-specific capabilities like vision, JSON output, and tool calling.
Index ¶
- Constants
- func CreateTokenLogger() (*mlog.Logger, error)
- func NewJSONSchemaFromStruct[T any]() *jsonschema.Schema
- type BotConfig
- type ChannelAccessLevel
- type CompletionRequest
- type Context
- type ContextOption
- type EventType
- type File
- type LanguageModel
- type LanguageModelConfig
- type LanguageModelLogWrapper
- func (w *LanguageModelLogWrapper) ChatCompletion(request CompletionRequest, opts ...LanguageModelOption) (*TextStreamResult, error)
- func (w *LanguageModelLogWrapper) ChatCompletionNoStream(request CompletionRequest, opts ...LanguageModelOption) (string, error)
- func (w *LanguageModelLogWrapper) CountTokens(text string) int
- func (w *LanguageModelLogWrapper) InputTokenLimit() int
- type LanguageModelOption
- type LanguageModelTestLogWrapper
- func (w *LanguageModelTestLogWrapper) ChatCompletion(request CompletionRequest, opts ...LanguageModelOption) (*TextStreamResult, error)
- func (w *LanguageModelTestLogWrapper) ChatCompletionNoStream(request CompletionRequest, opts ...LanguageModelOption) (string, error)
- func (w *LanguageModelTestLogWrapper) CountTokens(text string) int
- func (w *LanguageModelTestLogWrapper) InputTokenLimit() int
- type LanguageModelWrapper
- type Post
- type PostRole
- type Prompts
- type ServiceConfig
- type TextStreamEvent
- type TextStreamResult
- type TokenUsage
- type TokenUsageLoggingWrapper
- func (w *TokenUsageLoggingWrapper) ChatCompletion(request CompletionRequest, opts ...LanguageModelOption) (*TextStreamResult, error)
- func (w *TokenUsageLoggingWrapper) ChatCompletionNoStream(request CompletionRequest, opts ...LanguageModelOption) (string, error)
- func (w *TokenUsageLoggingWrapper) CountTokens(text string) int
- func (w *TokenUsageLoggingWrapper) InputTokenLimit() int
- type Tool
- type ToolArgumentGetter
- type ToolAuthError
- type ToolCall
- type ToolCallStatus
- type ToolResolver
- type ToolStore
- func (s *ToolStore) AddAuthError(authError ToolAuthError)
- func (s *ToolStore) AddTools(tools []Tool)
- func (s *ToolStore) GetAuthErrors() []ToolAuthError
- func (s *ToolStore) GetTools() []Tool
- func (s *ToolStore) ResolveTool(name string, argsGetter ToolArgumentGetter, context *Context) (string, error)
- func (s *ToolStore) TraceResolved(name string, argsGetter ToolArgumentGetter, result string, err error)
- func (s *ToolStore) TraceUnknown(name string, argsGetter ToolArgumentGetter)
- type TraceLog
- type TruncationWrapper
- func (w *TruncationWrapper) ChatCompletion(request CompletionRequest, opts ...LanguageModelOption) (*TextStreamResult, error)
- func (w *TruncationWrapper) ChatCompletionNoStream(request CompletionRequest, opts ...LanguageModelOption) (string, error)
- func (w *TruncationWrapper) CountTokens(text string) int
- func (w *TruncationWrapper) InputTokenLimit() int
- type UserAccessLevel
Constants ¶
const ( ServiceTypeOpenAI = "openai" ServiceTypeOpenAICompatible = "openaicompatible" ServiceTypeAzure = "azure" ServiceTypeASage = "asage" ServiceTypeAnthropic = "anthropic" ServiceTypeCohere = "cohere" )
const FunctionsTokenBudget = 200
const MinTokens = 100
const PromptExtension = "tmpl"
const TokenLimitBufferSize = 0.9
Variables ¶
This section is empty.
Functions ¶
func CreateTokenLogger ¶ added in v1.4.0
CreateTokenLogger creates a dedicated logger for token usage metrics
func NewJSONSchemaFromStruct ¶
func NewJSONSchemaFromStruct[T any]() *jsonschema.Schema
NewJSONSchemaFromStruct creates a JSONSchema from a Go struct using generics It's a helper function for tool providers that currently define schemas as structs
Types ¶
type BotConfig ¶
type BotConfig struct {
ID string `json:"id"`
Name string `json:"name"`
DisplayName string `json:"displayName"`
CustomInstructions string `json:"customInstructions"`
Service ServiceConfig `json:"service"`
EnableVision bool `json:"enableVision"`
DisableTools bool `json:"disableTools"`
ChannelAccessLevel ChannelAccessLevel `json:"channelAccessLevel"`
ChannelIDs []string `json:"channelIDs"`
UserAccessLevel UserAccessLevel `json:"userAccessLevel"`
UserIDs []string `json:"userIDs"`
TeamIDs []string `json:"teamIDs"`
MaxFileSize int64 `json:"maxFileSize"`
}
type ChannelAccessLevel ¶
type ChannelAccessLevel int
const ( ChannelAccessLevelAll ChannelAccessLevel = iota ChannelAccessLevelAllow ChannelAccessLevelBlock ChannelAccessLevelNone )
type CompletionRequest ¶
func (CompletionRequest) ExtractSystemMessage ¶
func (b CompletionRequest) ExtractSystemMessage() string
ExtractSystemMessage extracts the system message from the conversation.
func (CompletionRequest) String ¶
func (b CompletionRequest) String() string
type Context ¶
type Context struct {
// Server
Time string
ServerName string
CompanyName string
// Location
Team *model.Team
Channel *model.Channel
Thread []Post // Normalized posts that already have been formatted. nil if not in a thread or a root post
// User that is making the request
RequestingUser *model.User
// Bot Specific
BotName string
BotUsername string
BotModel string
CustomInstructions string
Tools *ToolStore
Parameters map[string]interface{}
}
Context represents the data necessary to build the context of the LLM. For consumers none of the fields can be assumed to be present.
func NewContext ¶
func NewContext(opts ...ContextOption) *Context
NewContext creates a new Context with the given options
type ContextOption ¶
type ContextOption func(*Context)
ContextOption defines a function that configures a Context
type EventType ¶
type EventType int
EventType represents the type of event in the text stream
const ( // EventTypeText represents a text chunk event EventTypeText EventType = iota // EventTypeEnd represents the end of the stream EventTypeEnd // EventTypeError represents an error event EventTypeError // EventTypeToolCalls represents a tool call event EventTypeToolCalls // EventTypeReasoning represents a reasoning summary chunk event EventTypeReasoning // EventTypeReasoningEnd represents the end of reasoning summary EventTypeReasoningEnd // EventTypeUsage represents token usage data EventTypeUsage )
type LanguageModel ¶
type LanguageModel interface {
ChatCompletion(conversation CompletionRequest, opts ...LanguageModelOption) (*TextStreamResult, error)
ChatCompletionNoStream(conversation CompletionRequest, opts ...LanguageModelOption) (string, error)
CountTokens(text string) int
InputTokenLimit() int
}
type LanguageModelConfig ¶
type LanguageModelConfig struct {
Model string
MaxGeneratedTokens int
EnableVision bool
JSONOutputFormat *jsonschema.Schema
}
type LanguageModelLogWrapper ¶
type LanguageModelLogWrapper struct {
// contains filtered or unexported fields
}
func NewLanguageModelLogWrapper ¶
func NewLanguageModelLogWrapper(log pluginapi.LogService, wrapped LanguageModel) *LanguageModelLogWrapper
func (*LanguageModelLogWrapper) ChatCompletion ¶
func (w *LanguageModelLogWrapper) ChatCompletion(request CompletionRequest, opts ...LanguageModelOption) (*TextStreamResult, error)
func (*LanguageModelLogWrapper) ChatCompletionNoStream ¶
func (w *LanguageModelLogWrapper) ChatCompletionNoStream(request CompletionRequest, opts ...LanguageModelOption) (string, error)
func (*LanguageModelLogWrapper) CountTokens ¶
func (w *LanguageModelLogWrapper) CountTokens(text string) int
func (*LanguageModelLogWrapper) InputTokenLimit ¶
func (w *LanguageModelLogWrapper) InputTokenLimit() int
type LanguageModelOption ¶
type LanguageModelOption func(*LanguageModelConfig)
func WithJSONOutput ¶
func WithJSONOutput[T any]() LanguageModelOption
func WithMaxGeneratedTokens ¶
func WithMaxGeneratedTokens(maxGeneratedTokens int) LanguageModelOption
func WithModel ¶
func WithModel(model string) LanguageModelOption
type LanguageModelTestLogWrapper ¶
type LanguageModelTestLogWrapper struct {
// contains filtered or unexported fields
}
func NewLanguageModelTestLogWrapper ¶
func NewLanguageModelTestLogWrapper(t *testing.T, wrapped LanguageModel) *LanguageModelTestLogWrapper
func (*LanguageModelTestLogWrapper) ChatCompletion ¶
func (w *LanguageModelTestLogWrapper) ChatCompletion(request CompletionRequest, opts ...LanguageModelOption) (*TextStreamResult, error)
func (*LanguageModelTestLogWrapper) ChatCompletionNoStream ¶
func (w *LanguageModelTestLogWrapper) ChatCompletionNoStream(request CompletionRequest, opts ...LanguageModelOption) (string, error)
func (*LanguageModelTestLogWrapper) CountTokens ¶
func (w *LanguageModelTestLogWrapper) CountTokens(text string) int
func (*LanguageModelTestLogWrapper) InputTokenLimit ¶
func (w *LanguageModelTestLogWrapper) InputTokenLimit() int
type LanguageModelWrapper ¶
type LanguageModelWrapper func(LanguageModel) LanguageModel
type Prompts ¶
type Prompts struct {
// contains filtered or unexported fields
}
type ServiceConfig ¶
type ServiceConfig struct {
Name string `json:"name"`
Type string `json:"type"`
APIKey string `json:"apiKey"`
OrgID string `json:"orgId"`
DefaultModel string `json:"defaultModel"`
APIURL string `json:"apiURL"`
// Renaming the JSON field to inputTokenLimit would require a migration, leaving as is for now.
InputTokenLimit int `json:"tokenLimit"`
StreamingTimeoutSeconds int `json:"streamingTimeoutSeconds"`
SendUserID bool `json:"sendUserID"`
// Otherwise known as maxTokens
OutputTokenLimit int `json:"outputTokenLimit"`
// UseResponsesAPI determines whether to use the new OpenAI Responses API
// Only applicable to OpenAI and OpenAI-compatible services
UseResponsesAPI bool `json:"useResponsesAPI"`
// EnabledNativeTools contains the list of enabled OpenAI native tools
// Only works when UseResponsesAPI is true
// Example: ["web_search", "file_search", "code_interpreter"]
EnabledNativeTools []string `json:"enabledNativeTools"`
}
type TextStreamEvent ¶
TextStreamEvent represents an event in the text stream
type TextStreamResult ¶
type TextStreamResult struct {
Stream <-chan TextStreamEvent
}
TextStreamResult represents a stream of text events
func NewStreamFromString ¶
func NewStreamFromString(text string) *TextStreamResult
func (*TextStreamResult) ReadAll ¶
func (t *TextStreamResult) ReadAll() (string, error)
type TokenUsage ¶ added in v1.4.0
type TokenUsage struct {
InputTokens int64 `json:"input_tokens"`
OutputTokens int64 `json:"output_tokens"`
}
TokenUsage represents token usage statistics for an LLM request
type TokenUsageLoggingWrapper ¶ added in v1.4.0
type TokenUsageLoggingWrapper struct {
// contains filtered or unexported fields
}
TokenUsageLoggingWrapper wraps a LanguageModel to log token usage
func NewTokenUsageLoggingWrapper ¶ added in v1.4.0
func NewTokenUsageLoggingWrapper(wrapped LanguageModel, botUsername string, tokenLogger *mlog.Logger) *TokenUsageLoggingWrapper
NewTokenUsageLoggingWrapper creates a new wrapper that logs token usage
func (*TokenUsageLoggingWrapper) ChatCompletion ¶ added in v1.4.0
func (w *TokenUsageLoggingWrapper) ChatCompletion(request CompletionRequest, opts ...LanguageModelOption) (*TextStreamResult, error)
ChatCompletion intercepts the streaming response to extract and log token usage
func (*TokenUsageLoggingWrapper) ChatCompletionNoStream ¶ added in v1.4.0
func (w *TokenUsageLoggingWrapper) ChatCompletionNoStream(request CompletionRequest, opts ...LanguageModelOption) (string, error)
ChatCompletionNoStream uses the streaming method internally, so token usage logging happens automatically when ReadAll() processes the intercepted stream
func (*TokenUsageLoggingWrapper) CountTokens ¶ added in v1.4.0
func (w *TokenUsageLoggingWrapper) CountTokens(text string) int
CountTokens delegates to the wrapped model
func (*TokenUsageLoggingWrapper) InputTokenLimit ¶ added in v1.4.0
func (w *TokenUsageLoggingWrapper) InputTokenLimit() int
InputTokenLimit delegates to the wrapped model
type Tool ¶
type Tool struct {
Name string
Description string
Schema *jsonschema.Schema
Resolver ToolResolver
}
Tool represents a function that can be called by the language model during a conversation.
Each tool has a name, description, and schema that defines its parameters. These are passed to the LLM for it to understand what capabilities it has. It is the Resolver function that implements the actual functionality.
The Schema field should contain a JSONSchema that defines the expected structure of the tool's arguments. The Resolver function receives the conversation context and a way to access the parsed arguments, and returns either a result that will be passed to the LLM or an error.
type ToolArgumentGetter ¶
type ToolAuthError ¶ added in v1.4.0
type ToolAuthError struct {
ServerName string `json:"server_name"`
AuthURL string `json:"auth_url"`
Error error `json:"error"`
}
ToolAuthError represents an authentication error that occurred during tool creation
type ToolCall ¶
type ToolCall struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Arguments json.RawMessage `json:"arguments"`
Result string `json:"result"`
Status ToolCallStatus `json:"status"`
}
ToolCall represents a tool call. An empty result indicates that the tool has not yet been resolved.
type ToolCallStatus ¶
type ToolCallStatus int
ToolCallStatus represents the current status of a tool call
const ( // ToolCallStatusPending indicates the tool is waiting for user approval/rejection ToolCallStatusPending ToolCallStatus = iota // ToolCallStatusAccepted indicates the user has accepted the tool call but it's not resolved yet ToolCallStatusAccepted // ToolCallStatusRejected indicates the user has rejected the tool call ToolCallStatusRejected // ToolCallStatusError indicates the tool call was accepted but errored during resolution ToolCallStatusError // ToolCallStatusSuccess indicates the tool call was accepted and resolved successfully ToolCallStatusSuccess )
type ToolResolver ¶
type ToolResolver func(context *Context, argsGetter ToolArgumentGetter) (string, error)
type ToolStore ¶
type ToolStore struct {
// contains filtered or unexported fields
}
func NewNoTools ¶
func NewNoTools() *ToolStore
func NewToolStore ¶
func (*ToolStore) AddAuthError ¶ added in v1.4.0
func (s *ToolStore) AddAuthError(authError ToolAuthError)
AddAuthError adds an authentication error to the tool store
func (*ToolStore) GetAuthErrors ¶ added in v1.4.0
func (s *ToolStore) GetAuthErrors() []ToolAuthError
GetAuthErrors returns all authentication errors collected during tool creation
func (*ToolStore) ResolveTool ¶
func (*ToolStore) TraceResolved ¶
func (s *ToolStore) TraceResolved(name string, argsGetter ToolArgumentGetter, result string, err error)
func (*ToolStore) TraceUnknown ¶
func (s *ToolStore) TraceUnknown(name string, argsGetter ToolArgumentGetter)
type TruncationWrapper ¶
type TruncationWrapper struct {
// contains filtered or unexported fields
}
func NewLLMTruncationWrapper ¶
func NewLLMTruncationWrapper(llm LanguageModel) *TruncationWrapper
func (*TruncationWrapper) ChatCompletion ¶
func (w *TruncationWrapper) ChatCompletion(request CompletionRequest, opts ...LanguageModelOption) (*TextStreamResult, error)
func (*TruncationWrapper) ChatCompletionNoStream ¶
func (w *TruncationWrapper) ChatCompletionNoStream(request CompletionRequest, opts ...LanguageModelOption) (string, error)
func (*TruncationWrapper) CountTokens ¶
func (w *TruncationWrapper) CountTokens(text string) int
func (*TruncationWrapper) InputTokenLimit ¶
func (w *TruncationWrapper) InputTokenLimit() int
type UserAccessLevel ¶
type UserAccessLevel int
const ( UserAccessLevelAll UserAccessLevel = iota UserAccessLevelAllow UserAccessLevelBlock UserAccessLevelNone )