Documentation
¶
Index ¶
- Constants
- Variables
- func AppendCacheTTLTimestamp(meta *PortalMetadata)
- func BuildDebounceKey(roomID id.RoomID, sender id.UserID) string
- func CombineDebounceEntries(entries []DebounceEntry) (string, int)
- func DedupeChatToolParams(tools []openai.ChatCompletionToolUnionParam) []openai.ChatCompletionToolUnionParam
- func DefaultChatPortalKey(loginID networkid.UserLoginID) networkid.PortalKey
- func DefaultLinkPreviewConfig() linkpreview.Config
- func EnqueueReactionFeedback(roomID id.RoomID, feedback ReactionFeedback)
- func EstimateMessageChars(msg openai.ChatCompletionMessageParamUnion) int
- func ExtractBeeperPreviews(previews []*linkpreview.PreviewWithImage) []*event.BeeperLinkPreview
- func ExtractSystemPrompt(messages []UnifiedMessage) string
- func ExtractURLs(text string, maxURLs int) []string
- func FormatPreviewsForContext(previews []*event.BeeperLinkPreview, maxChars int) string
- func FormatReactionFeedback(feedback []ReactionFeedback) string
- func GetPDFEngineFromContext(ctx context.Context) string
- func HumanUserID(loginID networkid.UserLoginID) networkid.UserID
- func IsCacheTTLEligibleProvider(model string) bool
- func JoinProxyPath(base, suffix string) string
- func LimitHistoryTurns(prompt []openai.ChatCompletionMessageParamUnion, limit int) []openai.ChatCompletionMessageParamUnion
- func MakeMessageID(eventID id.EventID) networkid.MessageID
- func MakePDFPluginMiddleware(defaultEngine string) option.Middleware
- func MakeToolDedupMiddleware(log zerolog.Logger) option.Middleware
- func ModelOverrideFromContext(ctx context.Context) (string, bool)
- func ModelUserID(modelID string) networkid.UserID
- func NewCallID() string
- func NewLinkPreviewer(config linkpreview.Config) *linkpreview.Previewer
- func NewTurnID() string
- func NormalizeMagicProxyBaseURL(raw string) string
- func NormalizeMimeString(value string) string
- func NormalizeToolAlias(name string) string
- func ParseExistingLinkPreviews(rawContent map[string]any) []*event.BeeperLinkPreview
- func ParseModelFromGhostID(ghostID string) string
- func PreviewsToMapSlice(previews []*event.BeeperLinkPreview) []map[string]any
- func PruneContext(prompt []openai.ChatCompletionMessageParamUnion, config *PruningConfig, ...) []openai.ChatCompletionMessageParamUnion
- func RegisterAICommand(def commandregistry.Definition) *commands.FullHandler
- func RequireClientMeta(ce *commands.Event) (*AIClient, *PortalMetadata, bool)
- func SanitizeToolCallID(id string, mode string) string
- func ServiceTokensEmpty(tokens *ServiceTokens) bool
- func ShouldDebounce(evt *event.Event, body string) bool
- func ShouldFallbackOnError(err error) bool
- func ShouldIncludeInHistory(meta *MessageMetadata) bool
- func ShouldRefreshCacheTTL(meta *PortalMetadata) bool
- func SmartTruncatePrompt(prompt []openai.ChatCompletionMessageParamUnion, targetReduction float64) []openai.ChatCompletionMessageParamUnion
- func StripEnvelope(text string) string
- func StripSilentToken(text string) string
- func ToOpenAIChatTools(tools []ToolDefinition, log *zerolog.Logger) []openai.ChatCompletionToolUnionParam
- func ToOpenAIResponsesInput(messages []UnifiedMessage) responses.ResponseInputParam
- func ToOpenAITools(tools []ToolDefinition, strictMode ToolStrictMode, log *zerolog.Logger) []responses.ToolUnionParam
- func ToolSchemaToMap(schema any) map[string]any
- func TraceEnabled(meta *PortalMetadata) bool
- func TraceFull(meta *PortalMetadata) bool
- func UploadPreviewImages(ctx context.Context, previews []*linkpreview.PreviewWithImage, ...) []*event.BeeperLinkPreview
- func WithBridgeToolContext(ctx context.Context, btc *BridgeToolContext) context.Context
- func WithModelOverride(ctx context.Context, model string) context.Context
- func WithPDFEngine(ctx context.Context, engine string) context.Context
- func WithTypingContext(ctx context.Context, typing *TypingContext) context.Context
- type AIClient
- func (oc *AIClient) API() openai.Client
- func (oc *AIClient) APIKey() string
- func (oc *AIClient) BackgroundContext(ctx context.Context) context.Context
- func (oc *AIClient) BroadcastRoomState(ctx context.Context, portal *bridgev2.Portal) error
- func (oc *AIClient) BuildBasePrompt(ctx context.Context, portal *bridgev2.Portal, meta *PortalMetadata) ([]openai.ChatCompletionMessageParamUnion, error)
- func (oc *AIClient) BuildMatrixInboundBody(ctx context.Context, portal *bridgev2.Portal, meta *PortalMetadata, ...) string
- func (oc *AIClient) BuildPrompt(ctx context.Context, portal *bridgev2.Portal, meta *PortalMetadata, ...) ([]openai.ChatCompletionMessageParamUnion, error)
- func (oc *AIClient) BuildPromptWithLinkContext(ctx context.Context, portal *bridgev2.Portal, meta *PortalMetadata, ...) ([]openai.ChatCompletionMessageParamUnion, error)
- func (oc *AIClient) CanUseImageGeneration() bool
- func (oc *AIClient) CancelRoomRun(roomID id.RoomID) bool
- func (oc *AIClient) ClearActiveRoomsAndQueues()
- func (oc *AIClient) ClearPendingQueue(roomID id.RoomID)
- func (oc *AIClient) Connect(ctx context.Context)
- func (oc *AIClient) Connector() *OpenAIConnector
- func (oc *AIClient) Disconnect()
- func (oc *AIClient) DisconnectCtx() context.Context
- func (oc *AIClient) DispatchInternalMessage(ctx context.Context, portal *bridgev2.Portal, meta *PortalMetadata, ...) (id.EventID, bool, error)
- func (oc *AIClient) DownloadAndEncodeMedia(ctx context.Context, mxcURL string, encryptedFile *event.EncryptedFileInfo, ...) (string, string, error)
- func (oc *AIClient) EffectiveModel(meta *PortalMetadata) string
- func (oc *AIClient) EnsureModelInRoom(ctx context.Context, portal *bridgev2.Portal) error
- func (oc *AIClient) ExecuteBuiltinTool(ctx context.Context, portal *bridgev2.Portal, toolName string, argsJSON string) (string, error)
- func (oc *AIClient) FindModelInfo(modelID string) *ModelInfo
- func (oc *AIClient) GetCapabilities(ctx context.Context, portal *bridgev2.Portal) *event.RoomFeatures
- func (oc *AIClient) GetChatInfo(ctx context.Context, portal *bridgev2.Portal) (*bridgev2.ChatInfo, error)
- func (oc *AIClient) GetContactList(ctx context.Context) ([]*bridgev2.ResolveIdentifierResponse, error)
- func (oc *AIClient) GetModelContextWindow(meta *PortalMetadata) int
- func (oc *AIClient) GetModelIntent(ctx context.Context, portal *bridgev2.Portal) bridgev2.MatrixAPI
- func (oc *AIClient) GetStreamingHooks() StreamingHooks
- func (oc *AIClient) GetUserInfo(ctx context.Context, ghost *bridgev2.Ghost) (*bridgev2.UserInfo, error)
- func (oc *AIClient) HandleMatrixDisappearingTimer(ctx context.Context, msg *bridgev2.MatrixDisappearingTimer) (bool, error)
- func (oc *AIClient) HandleMatrixEdit(ctx context.Context, edit *bridgev2.MatrixEdit) error
- func (oc *AIClient) HandleMatrixMessage(ctx context.Context, msg *bridgev2.MatrixMessage) (*bridgev2.MatrixMessageResponse, error)
- func (oc *AIClient) HandleMatrixMessageRemove(ctx context.Context, msg *bridgev2.MatrixMessageRemove) error
- func (oc *AIClient) HandleMatrixReaction(ctx context.Context, msg *bridgev2.MatrixReaction) (*database.Reaction, error)
- func (oc *AIClient) HandleMatrixReactionRemove(ctx context.Context, msg *bridgev2.MatrixReactionRemove) error
- func (oc *AIClient) HandleMatrixTyping(ctx context.Context, typing *bridgev2.MatrixTyping) error
- func (oc *AIClient) HasQueuedItems(roomID id.RoomID) (hasItems bool, droppedCount int)
- func (oc *AIClient) ImplicitModelCatalogEntries(meta *UserLoginMetadata) []ModelCatalogEntry
- func (oc *AIClient) InitPortalForChat(ctx context.Context, opts PortalInitOpts) (*bridgev2.Portal, *bridgev2.ChatInfo, error)
- func (oc *AIClient) IsLoggedIn() bool
- func (oc *AIClient) IsThisUser(ctx context.Context, userID networkid.UserID) bool
- func (oc *AIClient) ListAllChatPortals(ctx context.Context) ([]*bridgev2.Portal, error)
- func (oc *AIClient) ListAvailableModels(ctx context.Context, forceRefresh bool) ([]ModelInfo, error)
- func (oc *AIClient) Log() *zerolog.Logger
- func (oc *AIClient) Logger() *zerolog.Logger
- func (oc *AIClient) LoggerForContext(ctx context.Context) *zerolog.Logger
- func (oc *AIClient) LogoutRemote(ctx context.Context)
- func (oc *AIClient) MarkMessageSendSuccess(ctx context.Context, portal *bridgev2.Portal, evt *event.Event)
- func (oc *AIClient) ModelFallbackChain(ctx context.Context, meta *PortalMetadata) []string
- func (oc *AIClient) ModelIDForAPI(modelID string) string
- func (oc *AIClient) OverrideModel(meta *PortalMetadata, modelID string) *PortalMetadata
- func (oc *AIClient) PreHandleMatrixReaction(ctx context.Context, msg *bridgev2.MatrixReaction) (bridgev2.MatrixReactionPreResponse, error)
- func (oc *AIClient) Provider() AIProvider
- func (oc *AIClient) ResolveIdentifier(ctx context.Context, identifier string, createChat bool) (*bridgev2.ResolveIdentifierResponse, error)
- func (oc *AIClient) ResolveModelID(ctx context.Context, modelID string) (string, bool, error)
- func (oc *AIClient) ResolveServiceConfig() ServiceConfigMap
- func (oc *AIClient) ResolveUserTimezone() (string, *time.Location)
- func (oc *AIClient) ResolveVisionModelForImage(ctx context.Context, meta *PortalMetadata) (string, bool)
- func (oc *AIClient) ResponseWithRetryAndReasoningFallback(ctx context.Context, evt *event.Event, portal *bridgev2.Portal, ...) (bool, error)
- func (oc *AIClient) SavePortalQuiet(ctx context.Context, portal *bridgev2.Portal, action string)
- func (oc *AIClient) SearchUsers(ctx context.Context, query string) ([]*bridgev2.ResolveIdentifierResponse, error)
- func (oc *AIClient) SelectResponseFn(meta *PortalMetadata, prompt []openai.ChatCompletionMessageParamUnion) (ResponseFunc, string)
- func (oc *AIClient) SendPlainAssistantMessage(ctx context.Context, portal *bridgev2.Portal, text string) error
- func (oc *AIClient) SendPlainAssistantMessageWithResult(ctx context.Context, portal *bridgev2.Portal, text string) error
- func (oc *AIClient) SendSystemNotice(ctx context.Context, portal *bridgev2.Portal, text string)
- func (oc *AIClient) SendWelcomeMessage(ctx context.Context, portal *bridgev2.Portal)
- func (oc *AIClient) SetAPI(c openai.Client)
- func (oc *AIClient) SetClientHooks(hooks AIClientHooks)
- func (oc *AIClient) SetDisconnectCtx(ctx context.Context, cancel context.CancelFunc)
- func (oc *AIClient) SetProvider(p AIProvider)
- func (oc *AIClient) SetRoomName(ctx context.Context, portal *bridgev2.Portal, name string) error
- func (oc *AIClient) SetRoomNameNoSave(ctx context.Context, portal *bridgev2.Portal, name string) error
- func (oc *AIClient) SetRoomSystemPrompt(ctx context.Context, portal *bridgev2.Portal, prompt string) error
- func (oc *AIClient) SetRoomSystemPromptNoSave(ctx context.Context, portal *bridgev2.Portal, prompt string) error
- func (oc *AIClient) SetRoomTopic(ctx context.Context, portal *bridgev2.Portal, topic string) error
- func (oc *AIClient) SetStreamingHooks(h StreamingHooks)
- func (oc *AIClient) StreamingResponseWithRetry(ctx context.Context, evt *event.Event, portal *bridgev2.Portal, ...)
- type AIClientHooks
- type AIErrorContent
- type AIErrorData
- type AIProvider
- type AckReactionGateParams
- type AckReactionScope
- type Annotation
- type AnnotationSource
- type AssistantTurnAI
- type AssistantTurnContent
- type AttachmentMetadata
- type BeeperConfig
- type BridgeConfig
- type BridgePolicy
- type BridgeToolContext
- type ChannelConfig
- type ChannelDefaultsConfig
- type ChannelsConfig
- type ClientFactory
- type CommandsConfig
- type Config
- type ConnectorHooks
- type ContentPart
- type ContentPartType
- type DebounceBuffer
- type DebounceEntry
- type Debouncer
- type DirectChatConfig
- type EffectiveSettings
- type EnvelopeFormatOptions
- type EventUsageInfo
- type FetchConfig
- type FileAnnotation
- type GenerateParams
- type GenerateResponse
- type GeneratedFileRef
- type GenerationDetails
- type GenerationProgress
- type GenerationStatusContent
- type GhostMetadata
- type GravatarProfile
- type GravatarState
- type GroupChatConfig
- type ImageGenerationMetadata
- type InboundConfig
- type InboundDebounceConfig
- type MediaToolsConfig
- type MediaUnderstandingAttachmentDecision
- type MediaUnderstandingAttachmentsConfig
- type MediaUnderstandingCapability
- type MediaUnderstandingConfig
- type MediaUnderstandingDecision
- type MediaUnderstandingDeepgramConfig
- type MediaUnderstandingKind
- type MediaUnderstandingModelConfig
- type MediaUnderstandingModelDecision
- type MediaUnderstandingOutput
- type MediaUnderstandingScopeConfig
- type MediaUnderstandingScopeMatch
- type MediaUnderstandingScopeRule
- type MessageMetadata
- type MessageRole
- type MessagesConfig
- type ModelAPI
- type ModelCache
- type ModelCapabilities
- type ModelCapabilitiesEventContent
- type ModelCatalogEntry
- type ModelDefinitionConfig
- type ModelInfo
- type ModelProviderConfig
- type ModelsConfig
- type NoopStreamingHooks
- func (NoopStreamingHooks) AdditionalTools(context.Context, *PortalMetadata) []responses.ToolUnionParam
- func (NoopStreamingHooks) OnContinuationPreSend(_ context.Context, _ *streamingState, outputs []functionCallOutput) (responses.ResponseInputParam, []functionCallOutput)
- func (NoopStreamingHooks) OnStreamFinished(context.Context, *bridgev2.Portal, *streamingState, *PortalMetadata)
- func (NoopStreamingHooks) OnToolCallComplete(context.Context, string, string, *streamingState)
- func (NoopStreamingHooks) ShouldContinue(*streamingState) bool
- type NormalizedLocation
- type OpenAIConnector
- func (oc *OpenAIConnector) ApplyRuntimeDefaults()
- func (oc *OpenAIConnector) Bridge() *bridgev2.Bridge
- func (oc *OpenAIConnector) CreateLogin(ctx context.Context, user *bridgev2.User, flowID string) (bridgev2.LoginProcess, error)
- func (oc *OpenAIConnector) FillPortalBridgeInfo(portal *bridgev2.Portal, content *event.BridgeEventContent)
- func (oc *OpenAIConnector) GetBridgeInfoVersion() (info, capabilities int)
- func (oc *OpenAIConnector) GetCapabilities() *bridgev2.NetworkGeneralCapabilities
- func (oc *OpenAIConnector) GetClient(id networkid.UserLoginID) (bridgev2.NetworkAPI, bool)
- func (oc *OpenAIConnector) GetClients() map[networkid.UserLoginID]bridgev2.NetworkAPI
- func (oc *OpenAIConnector) GetConfig() (example string, data any, upgrader configupgrade.Upgrader)
- func (oc *OpenAIConnector) GetDBMetaTypes() database.MetaTypes
- func (oc *OpenAIConnector) GetLoginFlows() []bridgev2.LoginFlow
- func (oc *OpenAIConnector) GetName() bridgev2.BridgeName
- func (oc *OpenAIConnector) Init(bridge *bridgev2.Bridge)
- func (oc *OpenAIConnector) LoadUserLogin(ctx context.Context, login *bridgev2.UserLogin) error
- func (oc *OpenAIConnector) Policy() BridgePolicy
- func (oc *OpenAIConnector) RemoveClient(id networkid.UserLoginID)
- func (oc *OpenAIConnector) ResolveBeeperBaseURL(meta *UserLoginMetadata) string
- func (oc *OpenAIConnector) ResolveBeeperToken(meta *UserLoginMetadata) string
- func (oc *OpenAIConnector) ResolveExaProxyBaseURL(meta *UserLoginMetadata) string
- func (oc *OpenAIConnector) ResolveOpenAIAPIKey(meta *UserLoginMetadata) string
- func (oc *OpenAIConnector) ResolveOpenAIBaseURL() string
- func (oc *OpenAIConnector) ResolveOpenRouterAPIKey(meta *UserLoginMetadata) string
- func (oc *OpenAIConnector) ResolveProviderAPIKey(meta *UserLoginMetadata) string
- func (oc *OpenAIConnector) ResolveProxyRoot(meta *UserLoginMetadata) string
- func (oc *OpenAIConnector) ResolveServiceConfig(meta *UserLoginMetadata) ServiceConfigMap
- func (oc *OpenAIConnector) SetClient(id networkid.UserLoginID, client bridgev2.NetworkAPI)
- func (oc *OpenAIConnector) SetClientFactory(f ClientFactory)
- func (oc *OpenAIConnector) SetHooks(hooks ConnectorHooks)
- func (oc *OpenAIConnector) SetMatrixCredentials(accessToken, homeserver string)
- func (oc *OpenAIConnector) SetPolicy(policy BridgePolicy)
- func (oc *OpenAIConnector) Start(ctx context.Context) error
- func (oc *OpenAIConnector) Stop(ctx context.Context)
- type OpenAILogin
- type OpenAIProvider
- func NewOpenAIProvider(apiKey string, log zerolog.Logger) (*OpenAIProvider, error)
- func NewOpenAIProviderWithBaseURL(apiKey, baseURL string, log zerolog.Logger) (*OpenAIProvider, error)
- func NewOpenAIProviderWithPDFPlugin(apiKey, baseURL, userID, pdfEngine string, headers map[string]string, ...) (*OpenAIProvider, error)
- func NewOpenAIProviderWithUserID(apiKey, baseURL, userID string, log zerolog.Logger) (*OpenAIProvider, error)
- func (o *OpenAIProvider) Client() openai.Client
- func (o *OpenAIProvider) Generate(ctx context.Context, params GenerateParams) (*GenerateResponse, error)
- func (o *OpenAIProvider) GenerateStream(ctx context.Context, params GenerateParams) (<-chan StreamEvent, error)
- func (o *OpenAIProvider) ListModels(ctx context.Context) ([]ModelInfo, error)
- func (o *OpenAIProvider) Name() string
- type OpenAIRemoteMessage
- func (m *OpenAIRemoteMessage) AddLogContext(c zerolog.Context) zerolog.Context
- func (m *OpenAIRemoteMessage) ConvertMessage(ctx context.Context, portal *bridgev2.Portal, intent bridgev2.MatrixAPI) (*bridgev2.ConvertedMessage, error)
- func (m *OpenAIRemoteMessage) GetID() networkid.MessageID
- func (m *OpenAIRemoteMessage) GetPortalKey() networkid.PortalKey
- func (m *OpenAIRemoteMessage) GetSender() bridgev2.EventSender
- func (m *OpenAIRemoteMessage) GetStreamOrder() int64
- func (m *OpenAIRemoteMessage) GetTimestamp() time.Time
- func (m *OpenAIRemoteMessage) GetTransactionID() networkid.TransactionID
- func (m *OpenAIRemoteMessage) GetType() bridgev2.RemoteEventType
- type PDFConfig
- type PDFPluginConfig
- type PortalInitOpts
- type PortalMetadata
- type ProviderBraveConfig
- type ProviderConfig
- type ProviderDirectConfig
- type ProviderExaConfig
- type ProviderOpenRouterConfig
- type ProviderPerplexityConfig
- type ProvidersConfig
- type ProvisioningAPI
- type PruningConfig
- type QueueConfig
- type ReactionFeedback
- type ReactionQueue
- type ReasoningEffortOption
- type ReplyTarget
- type ReqSetDefaults
- type ResponseDirectives
- type ResponseFunc
- type ResponsePrefixContext
- type ResultStatus
- type RoomCapabilitiesEventContent
- type RoomSettingsEventContent
- type SearchConfig
- type ServiceConfig
- type ServiceConfigMap
- type ServiceTokens
- type SettingExplanation
- type SettingSource
- type StatusDisplay
- type StepBoundaryContent
- type StepDisplay
- type StreamContentType
- type StreamCursor
- type StreamDeltaContent
- type StreamEvent
- type StreamEventType
- type StreamingConfig
- type StreamingHooks
- type ThinkingContent
- type TimingInfo
- type ToolArtifact
- type ToolCallContent
- type ToolCallData
- type ToolCallMetadata
- type ToolCallResult
- type ToolDefinition
- type ToolDisplay
- type ToolExecutor
- type ToolInfo
- type ToolOutputPreview
- type ToolProgressContent
- type ToolProgressDetails
- type ToolProvidersConfig
- type ToolResultContent
- type ToolResultData
- type ToolResultDisplay
- type ToolStatus
- type ToolStrictMode
- type ToolType
- type TurnCancelledContent
- type TurnStatus
- type TypingContext
- type TypingController
- type TypingControllerOptions
- type TypingMode
- type TypingSignaler
- type UnifiedMessage
- type UsageInfo
- type UserDefaults
- type UserLoginMetadata
Constants ¶
const ( ToolNameCalculator = toolspec.CalculatorName ToolNameWebSearch = toolspec.WebSearchName )
Tool name constants
const ( AIMaxTextLength = 100000 AIEditMaxAge = 24 * time.Hour )
AI bridge capability constants
const ( // Retryable errors ErrorContextTooLong = "context_too_long" ErrorContentFilter = "content_filter" ErrorToolFailed = "tool_failed" ErrorToolTimeout = "tool_timeout" // Non-retryable errors ErrorCancelled = "cancelled" ErrorInvalidInput = "invalid_input" )
const ( ToolWebSearch = aimodels.ToolWebSearch ToolFunctionCalling = aimodels.ToolFunctionCalling )
Tool constants for model capabilities
const ( RelReplace = matrixevents.RelReplace RelReference = matrixevents.RelReference RelThread = matrixevents.RelThread RelInReplyTo = matrixevents.RelInReplyTo )
Relation types
const ( BeeperAIKey = matrixevents.BeeperAIKey BeeperAIToolCallKey = matrixevents.BeeperAIToolCallKey BeeperAIToolResultKey = matrixevents.BeeperAIToolResultKey )
Content field keys
const ( ProviderBeeper = "beeper" // Legacy alias for magic_proxy ProviderOpenAI = "openai" // Direct OpenAI API ProviderOpenRouter = "openrouter" // Direct OpenRouter API ProviderMagicProxy = "magic_proxy" // Magic Proxy (OpenRouter-style) FlowCustom = "custom" // Custom login flow (provider resolved during login) )
Provider constants - all use OpenAI SDK with different base URLs
const ( ToolNameMessage = toolspec.MessageName ImageResultPrefix = "IMAGE:" ImagesResultPrefix = "IMAGES:" TTSResultPrefix = "AUDIO:" )
const DefaultDebounceMs = 0
DefaultDebounceMs is the default debounce delay in milliseconds.
const DefaultRawModeSystemPrompt = defaultRawModeSystemPrompt
DefaultRawModeSystemPrompt is the default system prompt for model-only (raw) rooms.
const SessionGreetingPrompt = sessionGreetingPrompt
SessionGreetingPrompt is the greeting prompt for new sessions.
const SilentReplyToken = "NO_REPLY"
SilentReplyToken is the token used to indicate no response is needed. Matches clawdbot/OpenClaw's SILENT_REPLY_TOKEN.
const ToolNameBetterWebSearch = "better_web_search"
Variables ¶
var ( IsGoogleModel = turnval.IsGoogleModel ValidateGeminiTurns = turnval.ValidateGeminiTurns SanitizeGoogleTurnOrdering = turnval.SanitizeGoogleTurnOrdering )
Re-export turn validation functions from the library package.
var AIErrorEventType = matrixevents.AIErrorEventType
AIErrorEventType represents AI generation errors that are part of conversation
var AssistantTurnEventType = matrixevents.AssistantTurnEventType
AssistantTurnEventType is the container event for an assistant's response
var BuildSessionIdentityHint = buildSessionIdentityHint
BuildSessionIdentityHint builds the session identity hint from portal and metadata.
var CommandActivation = registerAICommand(commandregistry.Definition{ Name: "activation", Description: "Set group activation policy (mention|always)", Args: "<mention|always>", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnActivation, })
CommandActivation handles the !ai activation command.
var CommandCommands = registerAICommand(commandregistry.Definition{ Name: "commands", Aliases: []string{"cmds"}, Description: "Show AI command groups and recommended command forms", Section: HelpSectionAI, RequiresPortal: false, RequiresLogin: true, Handler: fnCommands, })
var CommandConfig = registerAICommand(commandregistry.Definition{ Name: "config", Description: "Show current chat configuration", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnConfig, })
CommandConfig handles the !ai config command
var CommandContext = registerAICommand(commandregistry.Definition{ Name: "context", Description: "Get or set context message limit (1-100)", Args: "[_count_]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnContext, })
CommandContext handles the !ai context command
var CommandDebounce = registerAICommand(commandregistry.Definition{ Name: "debounce", Description: "Get or set message debounce delay (ms), 'off' to disable, 'default' to reset", Args: "[_delay_|off|default]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnDebounce, })
CommandDebounce handles the !ai debounce command
var CommandFork = registerAICommand(commandregistry.Definition{ Name: "fork", Description: "Fork conversation to a new chat", Args: "[_event_id_]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnFork, })
CommandFork handles the !ai fork command
var CommandMode = registerAICommand(commandregistry.Definition{ Name: "mode", Description: "Set conversation mode (messages|responses)", Args: "[_mode_]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnMode, })
CommandMode handles the !ai mode command
var CommandModel = registerAICommand(commandregistry.Definition{ Name: "model", Description: "Get or set the AI model for this chat", Args: "[_model name_]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnModel, })
CommandModel handles the !ai model command
var CommandModels = registerAICommand(commandregistry.Definition{ Name: "models", Description: "List all available models", Section: HelpSectionAI, RequiresLogin: true, Handler: fnModels, })
CommandModels handles the !ai models command
var CommandNew = registerAICommand(commandregistry.Definition{ Name: "new", Description: "Create a new chat (model-first)", Args: "[model]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnNew, })
CommandNew handles the !ai new command
var CommandPlayground = registerAICommand(commandregistry.Definition{ Name: "playground", Aliases: []string{"sandbox"}, Description: "Manage AI chat rooms (new, list)", Args: "<new [model] | list>", Section: HelpSectionAI, RequiresLogin: true, Handler: fnPlayground, })
CommandPlayground handles the !ai playground command with sub-commands.
var CommandQueue = registerAICommand(commandregistry.Definition{ Name: "queue", Description: "Inspect or configure the message queue", Args: "[status|reset|<mode>] [debounce:<dur>] [cap:<n>] [drop:<old|new|summarize>]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnQueue, })
CommandQueue handles the !ai queue command.
var CommandReasoning = registerAICommand(commandregistry.Definition{ Name: "reasoning", Description: "Get or set reasoning visibility/effort (off|on|low|medium|high|xhigh)", Args: "[level]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnReasoning, })
CommandReasoning handles the !ai reasoning command.
var CommandRegenerate = registerAICommand(commandregistry.Definition{ Name: "regenerate", Description: "Regenerate the last AI response", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnRegenerate, })
CommandRegenerate handles the !ai regenerate command
var CommandReset = registerAICommand(commandregistry.Definition{ Name: "reset", Description: "Start a new session/thread in this room", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnReset, })
CommandReset handles the !ai reset command.
var CommandSend = registerAICommand(commandregistry.Definition{ Name: "send", Description: "Allow/deny sending messages (on|off|inherit)", Args: "<on|off|inherit>", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnSend, })
CommandSend handles the !ai send command.
var CommandStatus = registerAICommand(commandregistry.Definition{ Name: "status", Description: "Show current session status", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnStatus, })
CommandStatus handles the !ai status command.
var CommandStop = registerAICommand(commandregistry.Definition{ Name: "stop", Aliases: []string{"abort", "interrupt"}, Description: "Abort the current run and clear the pending queue", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnStop, })
CommandStop handles the !ai stop command.
var CommandSystemPrompt = registerAICommand(commandregistry.Definition{ Name: "system-prompt", Aliases: []string{"prompt", "system"}, Description: "Get or set the system prompt (shows full constructed prompt)", Args: "[_text_]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnSystemPrompt, })
CommandSystemPrompt handles the !ai system-prompt command
var CommandTemp = registerAICommand(commandregistry.Definition{ Name: "temp", Aliases: []string{"temperature"}, Description: "Get or set the temperature (0-2)", Args: "[_value_]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnTemp, })
CommandTemp handles the !ai temp command
var CommandThink = registerAICommand(commandregistry.Definition{ Name: "think", Description: "Get or set thinking level (off|minimal|low|medium|high|xhigh)", Args: "[level]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnThink, })
CommandThink handles the !ai think command.
var CommandTimezone = registerAICommand(commandregistry.Definition{ Name: "timezone", Aliases: []string{"tz"}, Description: "Get or set your timezone for all chats (IANA name)", Args: "[_timezone_|reset]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnTimezone, })
CommandTimezone handles the !ai timezone command
var CommandTitle = registerAICommand(commandregistry.Definition{ Name: "title", Aliases: []string{"retitle"}, Description: "Regenerate the chat room title", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnTitle, })
CommandTitle handles the !ai title command
var CommandTokens = registerAICommand(commandregistry.Definition{ Name: "tokens", Aliases: []string{"maxtokens"}, Description: "Get or set max completion tokens (1-16384)", Args: "[_count_]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnTokens, })
CommandTokens handles the !ai tokens command
var CommandTools = registerAICommand(commandregistry.Definition{ Name: "tools", Description: "Enable/disable tools", Args: "[on|off] [_tool_]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnTools, })
CommandTools handles the !ai tools command
var CommandTyping = registerAICommand(commandregistry.Definition{ Name: "typing", Description: "Get or set typing indicator behavior for this chat", Args: "[never|instant|thinking|message|off|reset|interval <seconds>]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnTyping, })
CommandTyping handles the !ai typing command
var CommandVerbose = registerAICommand(commandregistry.Definition{ Name: "verbose", Aliases: []string{"v"}, Description: "Get or set verbosity (off|on|full)", Args: "[level]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnVerbose, })
CommandVerbose handles the !ai verbose command.
var CommandWhoami = registerAICommand(commandregistry.Definition{ Name: "whoami", Aliases: []string{"id"}, Description: "Show your Matrix user ID", Section: HelpSectionAI, RequiresPortal: false, RequiresLogin: false, Handler: fnWhoami, })
CommandWhoami handles the !ai whoami command.
var CompactionStatusEventType = matrixevents.CompactionStatusEventType
CompactionStatusEventType notifies clients about context compaction
var GenerationStatusEventType = matrixevents.GenerationStatusEventType
GenerationStatusEventType provides rich status updates during generation
var HelpSectionAI = commands.HelpSection{
Name: "AI Chat",
Order: 30,
}
HelpSectionAI is the help section for AI-related commands
var MaybePrependSessionGreeting = maybePrependSessionGreeting
MaybePrependSessionGreeting prepends a session greeting if applicable.
var ModelCapabilitiesEventType = matrixevents.ModelCapabilitiesEventType
ModelCapabilitiesEventType is the Matrix state event type for broadcasting available models
var RoomCapabilitiesEventType = matrixevents.RoomCapabilitiesEventType
RoomCapabilitiesEventType is the Matrix state event type for bridge-controlled capabilities Protected by power levels (100) so only the bridge bot can modify
var RoomSettingsEventType = matrixevents.RoomSettingsEventType
RoomSettingsEventType is the Matrix state event type for user-editable settings Normal power level (0) so users can modify
var StepBoundaryEventType = matrixevents.StepBoundaryEventType
StepBoundaryEventType represents multi-step boundaries within a turn
var StreamDeltaEventType = matrixevents.StreamDeltaEventType
StreamDeltaEventType is the custom event type for streaming token updates (ephemeral).
var StreamEventMessageType = matrixevents.StreamEventMessageType
StreamEventMessageType is the unified event type for AI streaming updates (ephemeral).
var ToolCallEventType = matrixevents.ToolCallEventType
ToolCallEventType represents a tool invocation
var ToolProgressEventType = matrixevents.ToolProgressEventType
ToolProgressEventType provides tool execution progress updates
var ToolResultEventType = matrixevents.ToolResultEventType
ToolResultEventType represents a tool execution result
var TurnCancelledEventType = matrixevents.TurnCancelledEventType
TurnCancelledEventType represents a cancelled turn
Functions ¶
func AppendCacheTTLTimestamp ¶
func AppendCacheTTLTimestamp(meta *PortalMetadata)
AppendCacheTTLTimestamp records the current time as the last cache-eligible request timestamp on the portal metadata.
func BuildDebounceKey ¶
BuildDebounceKey creates a key for debouncing: room+sender.
func CombineDebounceEntries ¶
func CombineDebounceEntries(entries []DebounceEntry) (string, int)
CombineDebounceEntries combines multiple entries into a single body. Returns the combined body and the count of combined messages.
func DedupeChatToolParams ¶
func DedupeChatToolParams(tools []openai.ChatCompletionToolUnionParam) []openai.ChatCompletionToolUnionParam
DedupeChatToolParams deduplicates tool parameters by name.
func DefaultChatPortalKey ¶
func DefaultChatPortalKey(loginID networkid.UserLoginID) networkid.PortalKey
DefaultChatPortalKey returns the default portal key for a login.
func DefaultLinkPreviewConfig ¶
func DefaultLinkPreviewConfig() linkpreview.Config
DefaultLinkPreviewConfig returns sensible defaults.
func EnqueueReactionFeedback ¶
func EnqueueReactionFeedback(roomID id.RoomID, feedback ReactionFeedback)
EnqueueReactionFeedback adds reaction feedback for a room.
func EstimateMessageChars ¶
func EstimateMessageChars(msg openai.ChatCompletionMessageParamUnion) int
EstimateMessageChars estimates the character count of a message.
func ExtractBeeperPreviews ¶
func ExtractBeeperPreviews(previews []*linkpreview.PreviewWithImage) []*event.BeeperLinkPreview
ExtractBeeperPreviews extracts just the BeeperLinkPreview from PreviewWithImage slice.
func ExtractSystemPrompt ¶
func ExtractSystemPrompt(messages []UnifiedMessage) string
ExtractSystemPrompt extracts the system prompt from unified messages.
func ExtractURLs ¶
ExtractURLs extracts URLs from text, returning up to maxURLs unique URLs.
func FormatPreviewsForContext ¶
func FormatPreviewsForContext(previews []*event.BeeperLinkPreview, maxChars int) string
FormatPreviewsForContext formats link previews for injection into LLM context.
func FormatReactionFeedback ¶
func FormatReactionFeedback(feedback []ReactionFeedback) string
FormatReactionFeedback formats reaction feedback as context for the AI. Keep the string stable and channel-specific so the model can reason about where the reaction happened.
func GetPDFEngineFromContext ¶
GetPDFEngineFromContext retrieves the PDF engine override from context
func HumanUserID ¶
func HumanUserID(loginID networkid.UserLoginID) networkid.UserID
HumanUserID returns the ghost UserID for a human login.
func IsCacheTTLEligibleProvider ¶
IsCacheTTLEligibleProvider returns true if the model is served by Anthropic (directly or via OpenRouter) and thus eligible for prompt caching.
func JoinProxyPath ¶
JoinProxyPath joins a base URL and suffix path.
func LimitHistoryTurns ¶
func LimitHistoryTurns(prompt []openai.ChatCompletionMessageParamUnion, limit int) []openai.ChatCompletionMessageParamUnion
func MakeMessageID ¶
MakeMessageID creates a message ID from a Matrix event ID
func MakePDFPluginMiddleware ¶
func MakePDFPluginMiddleware(defaultEngine string) option.Middleware
MakePDFPluginMiddleware creates middleware that injects the file-parser plugin for PDFs. The defaultEngine parameter is used as a fallback when no per-request engine is set in context. To set a per-request engine, use WithPDFEngine() to add it to the request context.
func MakeToolDedupMiddleware ¶
func MakeToolDedupMiddleware(log zerolog.Logger) option.Middleware
MakeToolDedupMiddleware removes duplicate tool names from outbound Responses requests.
func ModelOverrideFromContext ¶
ModelOverrideFromContext extracts a model override from context.
func ModelUserID ¶
ModelUserID returns the ghost UserID for a model.
func NewLinkPreviewer ¶
func NewLinkPreviewer(config linkpreview.Config) *linkpreview.Previewer
NewLinkPreviewer creates a new link previewer with the given config.
func NormalizeMagicProxyBaseURL ¶
NormalizeMagicProxyBaseURL normalizes the magic proxy base URL.
func NormalizeMimeString ¶
NormalizeMimeString normalizes a MIME type string.
func NormalizeToolAlias ¶
NormalizeToolAlias resolves tool name aliases.
func ParseExistingLinkPreviews ¶
func ParseExistingLinkPreviews(rawContent map[string]any) []*event.BeeperLinkPreview
ParseExistingLinkPreviews extracts link previews from a Matrix event's raw content.
func ParseModelFromGhostID ¶
ParseModelFromGhostID extracts the model ID from a ghost user ID.
func PreviewsToMapSlice ¶
func PreviewsToMapSlice(previews []*event.BeeperLinkPreview) []map[string]any
PreviewsToMapSlice converts BeeperLinkPreviews to a format suitable for JSON serialization.
func PruneContext ¶
func PruneContext(prompt []openai.ChatCompletionMessageParamUnion, config *PruningConfig, contextWindowTokens int) []openai.ChatCompletionMessageParamUnion
func RegisterAICommand ¶
func RegisterAICommand(def commandregistry.Definition) *commands.FullHandler
RegisterAICommand registers a command definition and returns the handler.
func RequireClientMeta ¶
func RequireClientMeta(ce *commands.Event) (*AIClient, *PortalMetadata, bool)
RequireClientMeta extracts the AIClient and PortalMetadata from a command event.
func SanitizeToolCallID ¶
SanitizeToolCallID cleans a tool call ID for provider requirements.
Modes:
- "strict": strips all non-alphanumeric characters, preserves "call_" prefix
- "strict9": strips non-alphanumeric, truncates to 9 chars (some providers require short IDs)
If the ID is empty after sanitization, a new random call ID is generated.
func ServiceTokensEmpty ¶
func ServiceTokensEmpty(tokens *ServiceTokens) bool
ServiceTokensEmpty reports whether service tokens are empty.
func ShouldDebounce ¶
ShouldDebounce returns false for messages that shouldn't be debounced. Media, commands, and empty messages are processed immediately.
func ShouldFallbackOnError ¶
ShouldFallbackOnError reports whether the error warrants model fallback.
func ShouldIncludeInHistory ¶
func ShouldIncludeInHistory(meta *MessageMetadata) bool
ShouldIncludeInHistory reports whether a message should be included in history.
func ShouldRefreshCacheTTL ¶
func ShouldRefreshCacheTTL(meta *PortalMetadata) bool
ShouldRefreshCacheTTL returns true if the Anthropic prompt cache TTL window is about to expire (or has expired) and a cache-warming request should include a cache_control breakpoint.
func SmartTruncatePrompt ¶
func SmartTruncatePrompt( prompt []openai.ChatCompletionMessageParamUnion, targetReduction float64, ) []openai.ChatCompletionMessageParamUnion
SmartTruncatePrompt truncates a prompt to reduce tokens.
func StripEnvelope ¶
StripEnvelope removes the [Channel Timestamp] envelope prefix from a message body. This is useful when replaying historical messages to the model — the envelope is informative for the current turn but noisy in history.
func StripSilentToken ¶
StripSilentToken removes the silent token from text if present. Returns the cleaned text.
func ToOpenAIChatTools ¶
func ToOpenAIChatTools(tools []ToolDefinition, log *zerolog.Logger) []openai.ChatCompletionToolUnionParam
ToOpenAIChatTools converts tool definitions to OpenAI Chat Completions tool format.
func ToOpenAIResponsesInput ¶
func ToOpenAIResponsesInput(messages []UnifiedMessage) responses.ResponseInputParam
ToOpenAIResponsesInput converts unified messages to OpenAI Responses API format. Supports text + image/PDF inputs for user messages; audio/video are intentionally excluded (caller should fall back to Chat Completions for those).
func ToOpenAITools ¶
func ToOpenAITools(tools []ToolDefinition, strictMode ToolStrictMode, log *zerolog.Logger) []responses.ToolUnionParam
ToOpenAITools converts tool definitions to OpenAI Responses API format
func ToolSchemaToMap ¶
ToolSchemaToMap converts a tool schema to a map.
func TraceEnabled ¶
func TraceEnabled(meta *PortalMetadata) bool
TraceEnabled reports whether tracing is enabled for the portal.
func TraceFull ¶
func TraceFull(meta *PortalMetadata) bool
TraceFull reports whether full tracing is enabled for the portal.
func UploadPreviewImages ¶
func UploadPreviewImages(ctx context.Context, previews []*linkpreview.PreviewWithImage, intent bridgev2.MatrixAPI, roomID id.RoomID) []*event.BeeperLinkPreview
UploadPreviewImages uploads images from PreviewWithImage to Matrix and returns final BeeperLinkPreviews.
func WithBridgeToolContext ¶
func WithBridgeToolContext(ctx context.Context, btc *BridgeToolContext) context.Context
func WithModelOverride ¶
WithModelOverride returns a context with a model override set.
func WithPDFEngine ¶
WithPDFEngine adds a PDF engine override to the context
func WithTypingContext ¶
func WithTypingContext(ctx context.Context, typing *TypingContext) context.Context
Types ¶
type AIClient ¶
AIClient handles communication with AI providers
func NewAIClient ¶
func NewAIClient(login *bridgev2.UserLogin, connector *OpenAIConnector, apiKey string) (*AIClient, error)
NewAIClient creates a new base AIClient. Exported for use by downstream bridges that need to create a base client before wrapping it.
func RequireClient ¶
RequireClient extracts the AIClient from a command event.
func (*AIClient) BackgroundContext ¶
BackgroundContext returns a context that survives request cancellation.
func (*AIClient) BroadcastRoomState ¶
BroadcastRoomState sends current room capabilities and settings to Matrix room state
func (*AIClient) BuildBasePrompt ¶
func (oc *AIClient) BuildBasePrompt(ctx context.Context, portal *bridgev2.Portal, meta *PortalMetadata) ([]openai.ChatCompletionMessageParamUnion, error)
BuildBasePrompt builds only the base prompt (system + history) for a portal.
func (*AIClient) BuildMatrixInboundBody ¶
func (oc *AIClient) BuildMatrixInboundBody( ctx context.Context, portal *bridgev2.Portal, meta *PortalMetadata, evt *event.Event, rawBody string, senderName string, roomName string, isGroup bool, ) string
BuildMatrixInboundBody builds the message body for an inbound Matrix event.
func (*AIClient) BuildPrompt ¶
func (oc *AIClient) BuildPrompt(ctx context.Context, portal *bridgev2.Portal, meta *PortalMetadata, latest string, eventID id.EventID) ([]openai.ChatCompletionMessageParamUnion, error)
BuildPrompt builds the complete prompt for a portal conversation.
func (*AIClient) BuildPromptWithLinkContext ¶
func (oc *AIClient) BuildPromptWithLinkContext( ctx context.Context, portal *bridgev2.Portal, meta *PortalMetadata, latest string, rawEventContent map[string]any, eventID id.EventID, ) ([]openai.ChatCompletionMessageParamUnion, error)
BuildPromptWithLinkContext builds a prompt with link preview context.
func (*AIClient) CanUseImageGeneration ¶
CanUseImageGeneration checks if image generation is available.
func (*AIClient) CancelRoomRun ¶
CancelRoomRun cancels the active room run for a room.
func (*AIClient) ClearActiveRoomsAndQueues ¶
func (oc *AIClient) ClearActiveRoomsAndQueues()
ClearActiveRoomsAndQueues clears in-flight rooms and pending queues (for logout).
func (*AIClient) ClearPendingQueue ¶
ClearPendingQueue clears the pending message queue for a room.
func (*AIClient) Connector ¶
func (oc *AIClient) Connector() *OpenAIConnector
Connector returns the OpenAIConnector that owns this client.
func (*AIClient) Disconnect ¶
func (oc *AIClient) Disconnect()
func (*AIClient) DisconnectCtx ¶
DisconnectCtx returns the per-login cancellation context.
func (*AIClient) DispatchInternalMessage ¶
func (oc *AIClient) DispatchInternalMessage(ctx context.Context, portal *bridgev2.Portal, meta *PortalMetadata, body, source string, excludeFromHistory bool) (id.EventID, bool, error)
DispatchInternalMessage dispatches an internal message to a portal.
func (*AIClient) DownloadAndEncodeMedia ¶
func (oc *AIClient) DownloadAndEncodeMedia(ctx context.Context, mxcURL string, encryptedFile *event.EncryptedFileInfo, maxSizeMB int) (string, string, error)
DownloadAndEncodeMedia downloads an mxc URL and returns base64 + MIME type.
func (*AIClient) EffectiveModel ¶
func (oc *AIClient) EffectiveModel(meta *PortalMetadata) string
EffectiveModel returns the effective model ID for a portal.
func (*AIClient) EnsureModelInRoom ¶
EnsureModelInRoom ensures the AI ghost is present in the portal room.
func (*AIClient) ExecuteBuiltinTool ¶
func (oc *AIClient) ExecuteBuiltinTool(ctx context.Context, portal *bridgev2.Portal, toolName string, argsJSON string) (string, error)
ExecuteBuiltinTool executes a named builtin tool.
func (*AIClient) FindModelInfo ¶
FindModelInfo looks up model information by ID.
func (*AIClient) GetCapabilities ¶
func (*AIClient) GetChatInfo ¶
func (*AIClient) GetContactList ¶
func (oc *AIClient) GetContactList(ctx context.Context) ([]*bridgev2.ResolveIdentifierResponse, error)
GetContactList returns model contacts only.
func (*AIClient) GetModelContextWindow ¶
func (oc *AIClient) GetModelContextWindow(meta *PortalMetadata) int
GetModelContextWindow returns the context window size for the portal's model.
func (*AIClient) GetModelIntent ¶
GetModelIntent returns the Matrix API for the model ghost.
func (*AIClient) GetStreamingHooks ¶
func (oc *AIClient) GetStreamingHooks() StreamingHooks
GetStreamingHooks returns the streaming hooks for this client.
func (*AIClient) GetUserInfo ¶
func (*AIClient) HandleMatrixDisappearingTimer ¶
func (oc *AIClient) HandleMatrixDisappearingTimer(ctx context.Context, msg *bridgev2.MatrixDisappearingTimer) (bool, error)
HandleMatrixDisappearingTimer handles disappearing message timer changes from Matrix For AI bridge, we just update the portal's disappear field - the bridge framework handles the actual deletion
func (*AIClient) HandleMatrixEdit ¶
HandleMatrixEdit handles edits to previously sent messages
func (*AIClient) HandleMatrixMessage ¶
func (oc *AIClient) HandleMatrixMessage(ctx context.Context, msg *bridgev2.MatrixMessage) (*bridgev2.MatrixMessageResponse, error)
HandleMatrixMessage processes incoming Matrix messages and dispatches them to the AI
func (*AIClient) HandleMatrixMessageRemove ¶
func (oc *AIClient) HandleMatrixMessageRemove(ctx context.Context, msg *bridgev2.MatrixMessageRemove) error
HandleMatrixMessageRemove handles message deletions from Matrix For AI bridge, we just delete from our database - there's no "remote" to sync to
func (*AIClient) HandleMatrixReaction ¶
func (*AIClient) HandleMatrixReactionRemove ¶
func (*AIClient) HandleMatrixTyping ¶
HandleMatrixTyping tracks local user typing state for auto-greeting delays.
func (*AIClient) HasQueuedItems ¶
HasQueuedItems checks if a room has any pending queued items.
func (*AIClient) ImplicitModelCatalogEntries ¶
func (oc *AIClient) ImplicitModelCatalogEntries(meta *UserLoginMetadata) []ModelCatalogEntry
ImplicitModelCatalogEntries returns implicit model catalog entries.
func (*AIClient) InitPortalForChat ¶
func (oc *AIClient) InitPortalForChat(ctx context.Context, opts PortalInitOpts) (*bridgev2.Portal, *bridgev2.ChatInfo, error)
InitPortalForChat initializes a portal for a new chat.
func (*AIClient) IsLoggedIn ¶
func (*AIClient) IsThisUser ¶
func (*AIClient) ListAllChatPortals ¶
ListAllChatPortals lists all chat portals for this user.
func (*AIClient) ListAvailableModels ¶
func (oc *AIClient) ListAvailableModels(ctx context.Context, forceRefresh bool) ([]ModelInfo, error)
ListAvailableModels lists all available models.
func (*AIClient) LoggerForContext ¶
LoggerForContext returns the logger enriched with context metadata.
func (*AIClient) LogoutRemote ¶
func (*AIClient) MarkMessageSendSuccess ¶
func (oc *AIClient) MarkMessageSendSuccess(ctx context.Context, portal *bridgev2.Portal, evt *event.Event)
MarkMessageSendSuccess marks a streaming message as sent.
func (*AIClient) ModelFallbackChain ¶
func (oc *AIClient) ModelFallbackChain(ctx context.Context, meta *PortalMetadata) []string
ModelFallbackChain returns the fallback model chain for a portal.
func (*AIClient) ModelIDForAPI ¶
ModelIDForAPI returns the model ID suitable for API calls.
func (*AIClient) OverrideModel ¶
func (oc *AIClient) OverrideModel(meta *PortalMetadata, modelID string) *PortalMetadata
OverrideModel returns portal metadata with the model overridden.
func (*AIClient) PreHandleMatrixReaction ¶
func (oc *AIClient) PreHandleMatrixReaction(ctx context.Context, msg *bridgev2.MatrixReaction) (bridgev2.MatrixReactionPreResponse, error)
func (*AIClient) Provider ¶
func (oc *AIClient) Provider() AIProvider
Provider returns the AI provider for this client.
func (*AIClient) ResolveIdentifier ¶
func (oc *AIClient) ResolveIdentifier(ctx context.Context, identifier string, createChat bool) (*bridgev2.ResolveIdentifierResponse, error)
ResolveIdentifier resolves a model identifier to a ghost and optionally creates a chat.
func (*AIClient) ResolveModelID ¶
ResolveModelID validates and resolves a model ID.
func (*AIClient) ResolveServiceConfig ¶
func (oc *AIClient) ResolveServiceConfig() ServiceConfigMap
ResolveServiceConfig returns the service config for the current login.
func (*AIClient) ResolveUserTimezone ¶
ResolveUserTimezone returns the user's timezone name and location.
func (*AIClient) ResolveVisionModelForImage ¶
func (oc *AIClient) ResolveVisionModelForImage(ctx context.Context, meta *PortalMetadata) (string, bool)
ResolveVisionModelForImage returns the vision model for image understanding.
func (*AIClient) ResponseWithRetryAndReasoningFallback ¶
func (oc *AIClient) ResponseWithRetryAndReasoningFallback( ctx context.Context, evt *event.Event, portal *bridgev2.Portal, meta *PortalMetadata, prompt []openai.ChatCompletionMessageParamUnion, responseFn ResponseFunc, logLabel string, ) (bool, error)
ResponseWithRetryAndReasoningFallback runs response with retry and reasoning fallback.
func (*AIClient) SavePortalQuiet ¶
SavePortalQuiet saves a portal without sending Matrix events.
func (*AIClient) SearchUsers ¶
func (oc *AIClient) SearchUsers(ctx context.Context, query string) ([]*bridgev2.ResolveIdentifierResponse, error)
SearchUsers returns model contacts that match the query.
func (*AIClient) SelectResponseFn ¶
func (oc *AIClient) SelectResponseFn(meta *PortalMetadata, prompt []openai.ChatCompletionMessageParamUnion) (ResponseFunc, string)
SelectResponseFn selects the appropriate response function and log label.
func (*AIClient) SendPlainAssistantMessage ¶
func (oc *AIClient) SendPlainAssistantMessage(ctx context.Context, portal *bridgev2.Portal, text string) error
SendPlainAssistantMessage sends a plain text assistant message.
func (*AIClient) SendPlainAssistantMessageWithResult ¶
func (oc *AIClient) SendPlainAssistantMessageWithResult(ctx context.Context, portal *bridgev2.Portal, text string) error
SendPlainAssistantMessageWithResult sends a plain assistant message and returns error.
func (*AIClient) SendSystemNotice ¶
SendSystemNotice sends a system notice to a portal.
func (*AIClient) SendWelcomeMessage ¶
SendWelcomeMessage sends the welcome message to a new chat.
func (*AIClient) SetClientHooks ¶
func (oc *AIClient) SetClientHooks(hooks AIClientHooks)
SetClientHooks sets the AIClientHooks for this client.
func (*AIClient) SetDisconnectCtx ¶
func (oc *AIClient) SetDisconnectCtx(ctx context.Context, cancel context.CancelFunc)
SetDisconnectCtx sets the per-login cancellation context and cancel func.
func (*AIClient) SetProvider ¶
func (oc *AIClient) SetProvider(p AIProvider)
SetProvider replaces the AI provider for this client.
func (*AIClient) SetRoomName ¶
SetRoomName sets the room name (with save).
func (*AIClient) SetRoomNameNoSave ¶
func (oc *AIClient) SetRoomNameNoSave(ctx context.Context, portal *bridgev2.Portal, name string) error
SetRoomNameNoSave sets the room name without persisting.
func (*AIClient) SetRoomSystemPrompt ¶
func (oc *AIClient) SetRoomSystemPrompt(ctx context.Context, portal *bridgev2.Portal, prompt string) error
SetRoomSystemPrompt sets the room's system prompt and saves.
func (*AIClient) SetRoomSystemPromptNoSave ¶
func (oc *AIClient) SetRoomSystemPromptNoSave(ctx context.Context, portal *bridgev2.Portal, prompt string) error
SetRoomSystemPromptNoSave sets the room's system prompt without saving.
func (*AIClient) SetRoomTopic ¶
SetRoomTopic sets the room topic.
func (*AIClient) SetStreamingHooks ¶
func (oc *AIClient) SetStreamingHooks(h StreamingHooks)
SetStreamingHooks replaces the streaming hooks for this client.
func (*AIClient) StreamingResponseWithRetry ¶
func (oc *AIClient) StreamingResponseWithRetry( ctx context.Context, evt *event.Event, portal *bridgev2.Portal, meta *PortalMetadata, prompt []openai.ChatCompletionMessageParamUnion, )
StreamingResponseWithRetry runs a streaming response with retry logic.
type AIClientHooks ¶
type AIClientHooks interface {
BeforeResponse(ctx context.Context, portal *bridgev2.Portal, meta *PortalMetadata) error
AfterResponse(ctx context.Context, portal *bridgev2.Portal, meta *PortalMetadata) error
GetExtraSystemPrompt(ctx context.Context, portal *bridgev2.Portal, meta *PortalMetadata) string
AdditionalTools(ctx context.Context, portal *bridgev2.Portal, meta *PortalMetadata) []ToolDefinition
}
AIClientHooks allows downstream bridges to inject behavior into the message handling pipeline.
type AIErrorContent ¶
type AIErrorContent struct {
Body string `json:"body"`
MsgType string `json:"msgtype"`
Error *AIErrorData `json:"com.beeper.ai.error"`
}
AIErrorContent represents an AI error timeline event
type AIErrorData ¶
type AIErrorData struct {
TurnID string `json:"turn_id,omitempty"`
ErrorCode string `json:"error_code"`
ErrorMessage string `json:"error_message"`
Retryable bool `json:"retryable"`
Suggestion string `json:"suggestion,omitempty"`
}
AIErrorData contains error details
type AIProvider ¶
type AIProvider interface {
Name() string
GenerateStream(ctx context.Context, params GenerateParams) (<-chan StreamEvent, error)
Generate(ctx context.Context, params GenerateParams) (*GenerateResponse, error)
ListModels(ctx context.Context) ([]ModelInfo, error)
}
type AckReactionGateParams ¶
type AckReactionScope ¶
type AckReactionScope string
const ( AckScopeAll AckReactionScope = "all" AckScopeDirect AckReactionScope = "direct" AckScopeGroupAll AckReactionScope = "group-all" AckScopeGroupMention AckReactionScope = "group-mentions" AckScopeOff AckReactionScope = "off" AckScopeNone AckReactionScope = "none" )
type Annotation ¶
type Annotation struct {
Type string `json:"type"` // "citation", "reference"
Index int `json:"index,omitempty"` // Citation number [1], [2], etc.
StartChar int `json:"start_char,omitempty"`
EndChar int `json:"end_char,omitempty"`
Source *AnnotationSource `json:"source,omitempty"`
}
Annotation represents a citation or reference in the text
type AnnotationSource ¶
type AnnotationSource struct {
Type string `json:"type"` // "web", "document", "file"
URL string `json:"url,omitempty"`
Title string `json:"title,omitempty"`
Snippet string `json:"snippet,omitempty"`
FileID string `json:"file_id,omitempty"`
Filename string `json:"filename,omitempty"`
Page int `json:"page,omitempty"`
}
AnnotationSource provides source information for a citation
type AssistantTurnAI ¶
type AssistantTurnAI struct {
TurnID string `json:"turn_id"`
Model string `json:"model"`
Status TurnStatus `json:"status"`
FinishReason string `json:"finish_reason,omitempty"`
// Embedded thinking (not separate event)
Thinking *ThinkingContent `json:"thinking,omitempty"`
// Token usage
Usage *EventUsageInfo `json:"usage,omitempty"`
// Related events
ToolCalls []string `json:"tool_calls,omitempty"`
Images []string `json:"images,omitempty"`
// Timing information
Timing *TimingInfo `json:"timing,omitempty"`
// Annotations/citations
Annotations []Annotation `json:"annotations,omitempty"`
}
AssistantTurnAI contains the AI-specific metadata for an assistant turn
type AssistantTurnContent ¶
type AssistantTurnContent struct {
// Standard Matrix fallback fields
Body string `json:"body"`
MsgType string `json:"msgtype"`
Format string `json:"format,omitempty"`
FormattedBody string `json:"formatted_body,omitempty"`
// AI-specific metadata
AI *AssistantTurnAI `json:"com.beeper.ai,omitempty"`
}
AssistantTurnContent represents the content of an assistant turn event
type AttachmentMetadata ¶
type AttachmentMetadata struct {
Type string `json:"type"` // "file", "image"
FileID string `json:"file_id,omitempty"`
Filename string `json:"filename,omitempty"`
MxcURI string `json:"mxc_uri,omitempty"`
Mimetype string `json:"mimetype,omitempty"`
Size int `json:"size,omitempty"`
Width int `json:"width,omitempty"` // For images
Height int `json:"height,omitempty"` // For images
}
AttachmentMetadata describes files attached to user messages
type BeeperConfig ¶
type BeeperConfig struct {
BaseURL string `yaml:"base_url"` // Beeper AI proxy endpoint
Token string `yaml:"token"` // Beeper Matrix access token
}
BeeperConfig contains Beeper AI proxy credentials for automatic login. If both BaseURL and Token are set, users don't need to manually log in.
type BridgeConfig ¶
type BridgeConfig struct {
CommandPrefix string `yaml:"command_prefix"`
LogEphemeralEvents *bool `yaml:"log_ephemeral_events"`
}
BridgeConfig tweaks Matrix-side behaviour for the AI bridge.
type BridgePolicy ¶
type BridgePolicy struct {
Name string
NetworkID string
BeeperBridgeType string
ProvisioningEnabled bool
ResolveIdentifier bridgev2.ResolveIdentifierCapabilities
}
BridgePolicy configures connector composition for a dedicated bridge binary. This keeps feature selection out of runtime config flags.
type BridgeToolContext ¶
type BridgeToolContext struct {
Client *AIClient
Portal *bridgev2.Portal
Meta *PortalMetadata
SourceEventID id.EventID
SenderID string
}
BridgeToolContext carries runtime data for tool execution.
func GetBridgeToolContext ¶
func GetBridgeToolContext(ctx context.Context) *BridgeToolContext
type ChannelConfig ¶
type ChannelDefaultsConfig ¶
type ChannelDefaultsConfig struct {
ResponsePrefix string `yaml:"responsePrefix"`
}
type ChannelsConfig ¶
type ChannelsConfig struct {
Defaults *ChannelDefaultsConfig `yaml:"defaults"`
Matrix *ChannelConfig `yaml:"matrix"`
}
ChannelsConfig defines per-channel settings (OpenClaw-style subset for Matrix).
type ClientFactory ¶
type ClientFactory func(login *bridgev2.UserLogin, connector *OpenAIConnector, apiKey string) (bridgev2.NetworkAPI, error)
ClientFactory creates a bridgev2.NetworkAPI for a login. Downstream bridges can use this to create their own client types.
type CommandsConfig ¶
type CommandsConfig struct {
OwnerAllowFrom []string `yaml:"ownerAllowFrom"`
}
CommandsConfig defines command authorization settings (OpenClaw-style).
type Config ¶
type Config struct {
Beeper BeeperConfig `yaml:"beeper"`
Providers ProvidersConfig `yaml:"providers"`
Models *ModelsConfig `yaml:"models"`
Bridge BridgeConfig `yaml:"bridge"`
Tools ToolProvidersConfig `yaml:"tools"`
Channels *ChannelsConfig `yaml:"channels"`
Messages *MessagesConfig `yaml:"messages"`
Commands *CommandsConfig `yaml:"commands"`
// Global settings
DefaultSystemPrompt string `yaml:"default_system_prompt"`
ModelCacheDuration time.Duration `yaml:"model_cache_duration"`
// Context pruning configuration (OpenClaw-style)
Pruning *PruningConfig `yaml:"pruning"`
// Link preview configuration
LinkPreviews *linkpreview.Config `yaml:"link_previews"`
// Inbound message processing configuration
Inbound *InboundConfig `yaml:"inbound"`
// Extra holds bridge-specific config that the core engine carries
// opaquely.
Extra any `yaml:"-"`
}
Config represents the connector-specific configuration that is nested under the `network:` block in the main bridge config.
type ConnectorHooks ¶
type ConnectorHooks interface {
OnStart(ctx context.Context) error
OnNewClient(client *AIClient) error
RegisterCommands(proc *commands.Processor)
RegisterEventHandlers(conn *matrix.Connector)
}
ConnectorHooks allows downstream bridges to inject behavior at key lifecycle points.
type ContentPart ¶
type ContentPartType ¶
type ContentPartType string
const ( ContentTypeText ContentPartType = "text" ContentTypeImage ContentPartType = "image" ContentTypePDF ContentPartType = "pdf" ContentTypeAudio ContentPartType = "audio" ContentTypeVideo ContentPartType = "video" )
Re-export content type constants.
type DebounceBuffer ¶
type DebounceBuffer struct {
// contains filtered or unexported fields
}
DebounceBuffer holds pending messages for a key.
type DebounceEntry ¶
type DebounceEntry struct {
Event *event.Event
Portal *bridgev2.Portal
Meta *PortalMetadata
RawBody string
SenderName string
RoomName string
IsGroup bool
WasMentioned bool
AckEventID id.EventID // Track ack reaction for removal after flush
PendingSent bool // Whether a pending status was already sent for this event
}
DebounceEntry represents a buffered message waiting to be processed.
type Debouncer ¶
type Debouncer struct {
// contains filtered or unexported fields
}
Debouncer buffers rapid messages and combines them. Based on clawdbot's inbound-debounce.ts implementation.
func NewDebouncer ¶
func NewDebouncer(delayMs int, onFlush func([]DebounceEntry), onError func(error, []DebounceEntry)) *Debouncer
NewDebouncer creates a new debouncer with the given delay and callbacks.
func NewDebouncerWithLogger ¶
func NewDebouncerWithLogger(delayMs int, onFlush func([]DebounceEntry), onError func(error, []DebounceEntry), log zerolog.Logger) *Debouncer
NewDebouncerWithLogger creates a new debouncer with logging support.
func (*Debouncer) Enqueue ¶
func (d *Debouncer) Enqueue(key string, entry DebounceEntry, shouldDebounce bool)
Enqueue adds a message to the debounce buffer. If shouldDebounce is false, the message is processed immediately.
func (*Debouncer) EnqueueWithDelay ¶
func (d *Debouncer) EnqueueWithDelay(key string, entry DebounceEntry, shouldDebounce bool, delayMs int)
EnqueueWithDelay adds a message with a custom debounce delay. delayMs: 0 = use default, -1 = immediate (no debounce), >0 = custom delay
func (*Debouncer) FlushAll ¶
func (d *Debouncer) FlushAll()
FlushAll flushes all pending buffers (e.g., on shutdown).
func (*Debouncer) FlushKey ¶
FlushKey immediately flushes the buffer for a key (e.g., when media arrives).
func (*Debouncer) PendingCount ¶
PendingCount returns the number of keys with pending buffers.
type DirectChatConfig ¶
type DirectChatConfig struct {
HistoryLimit int `yaml:"historyLimit"`
}
DirectChatConfig defines direct message defaults.
type EffectiveSettings ¶
type EffectiveSettings struct {
Model SettingExplanation `json:"model"`
SystemPrompt SettingExplanation `json:"system_prompt"`
Temperature SettingExplanation `json:"temperature"`
ReasoningEffort SettingExplanation `json:"reasoning_effort"`
}
EffectiveSettings shows current values with source explanations
type EnvelopeFormatOptions ¶
type EventUsageInfo ¶
type EventUsageInfo struct {
PromptTokens int64 `json:"prompt_tokens,omitempty"`
CompletionTokens int64 `json:"completion_tokens,omitempty"`
ReasoningTokens int64 `json:"reasoning_tokens,omitempty"`
}
EventUsageInfo contains token usage information for Matrix events This is separate from the internal UsageInfo in provider.go to allow different serialization formats (int64 for Matrix JSON vs int for internal use)
type FetchConfig ¶
type FetchConfig struct {
Provider string `yaml:"provider"`
Fallbacks []string `yaml:"fallbacks"`
Exa ProviderExaConfig `yaml:"exa"`
Direct ProviderDirectConfig `yaml:"direct"`
}
type FileAnnotation ¶
type FileAnnotation struct {
FileHash string `json:"file_hash"` // SHA256 hash of the file content
ParsedText string `json:"parsed_text"` // Extracted text content
PageCount int `json:"page_count,omitempty"` // Number of pages
CreatedAt int64 `json:"created_at"` // Unix timestamp when cached
}
FileAnnotation stores cached parsed PDF content from OpenRouter's file-parser plugin
type GenerateParams ¶
type GenerateParams struct {
Model string
Messages []UnifiedMessage
SystemPrompt string
Temperature float64
MaxCompletionTokens int
Tools []ToolDefinition
ReasoningEffort string
PreviousResponseID string
WebSearchEnabled bool
}
type GenerateResponse ¶
type GenerateResponse struct {
Content string
FinishReason string
ResponseID string
ToolCalls []ToolCallResult
Usage UsageInfo
}
type GeneratedFileRef ¶
GeneratedFileRef stores a reference to a file generated by the assistant (e.g., image generation).
type GenerationDetails ¶
type GenerationDetails struct {
CurrentTool string `json:"current_tool,omitempty"`
CallID string `json:"call_id,omitempty"`
ToolsCompleted int `json:"tools_completed,omitempty"`
ToolsTotal int `json:"tools_total,omitempty"`
}
GenerationDetails provides detailed status information
type GenerationProgress ¶
type GenerationProgress struct {
TokensGenerated int `json:"tokens_generated,omitempty"`
ThinkingTokens int `json:"thinking_tokens,omitempty"`
}
GenerationProgress tracks token generation progress
type GenerationStatusContent ¶
type GenerationStatusContent struct {
TurnID string `json:"turn_id"`
TargetEvent string `json:"target_event,omitempty"`
Status string `json:"status"` // "starting", "thinking", "generating", "tool_use", etc.
StatusMessage string `json:"status_message,omitempty"`
Details *GenerationDetails `json:"details,omitempty"`
Progress *GenerationProgress `json:"progress,omitempty"`
Display *StatusDisplay `json:"display,omitempty"`
}
GenerationStatusContent represents a generation status update
type GhostMetadata ¶
GhostMetadata stores metadata for AI model ghosts
type GravatarProfile ¶
type GravatarProfile struct {
Email string `json:"email,omitempty"`
Hash string `json:"hash,omitempty"`
Profile map[string]any `json:"profile,omitempty"` // Full profile payload
FetchedAt int64 `json:"fetched_at,omitempty"`
}
GravatarProfile stores the selected Gravatar profile for a login.
type GravatarState ¶
type GravatarState struct {
Primary *GravatarProfile `json:"primary,omitempty"`
}
GravatarState stores Gravatar profile state for a login.
type GroupChatConfig ¶
type GroupChatConfig struct {
MentionPatterns []string `yaml:"mentionPatterns"`
Activation string `yaml:"activation"` // mention|always
HistoryLimit int `yaml:"historyLimit"`
}
GroupChatConfig mirrors OpenClaw's group chat settings.
type ImageGenerationMetadata ¶
type ImageGenerationMetadata struct {
TurnID string `json:"turn_id,omitempty"`
Prompt string `json:"prompt,omitempty"`
RevisedPrompt string `json:"revised_prompt,omitempty"`
Model string `json:"model,omitempty"`
Style string `json:"style,omitempty"` // "vivid", "natural"
Quality string `json:"quality,omitempty"` // "standard", "hd"
}
ImageGenerationMetadata is added to m.image events for AI-generated images
type InboundConfig ¶
type InboundConfig struct {
// Deduplication settings
DedupeTTL time.Duration `yaml:"dedupe_ttl"` // Time-to-live for dedupe entries (default: 20m)
DedupeMaxSize int `yaml:"dedupe_max_size"` // Max entries in dedupe cache (default: 5000)
// Debounce settings
DefaultDebounceMs int `yaml:"default_debounce_ms"` // Default debounce delay in ms (default: 500)
}
InboundConfig contains settings for inbound message processing including deduplication and debouncing.
func (*InboundConfig) WithDefaults ¶
func (c *InboundConfig) WithDefaults() *InboundConfig
WithDefaults returns the InboundConfig with default values applied.
type InboundDebounceConfig ¶
type InboundDebounceConfig struct {
DebounceMs int `yaml:"debounceMs"`
ByChannel map[string]int `yaml:"byChannel"`
}
InboundDebounceConfig mirrors OpenClaw's inbound debounce config.
type MediaToolsConfig ¶
type MediaToolsConfig struct {
Models []MediaUnderstandingModelConfig `yaml:"models" json:"models,omitempty"`
Concurrency int `yaml:"concurrency" json:"concurrency,omitempty"`
Image *MediaUnderstandingConfig `yaml:"image" json:"image,omitempty"`
Audio *MediaUnderstandingConfig `yaml:"audio" json:"audio,omitempty"`
Video *MediaUnderstandingConfig `yaml:"video" json:"video,omitempty"`
}
type MediaUnderstandingAttachmentDecision ¶
type MediaUnderstandingAttachmentDecision struct {
AttachmentIndex int `json:"attachment_index"`
Attempts []MediaUnderstandingModelDecision `json:"attempts,omitempty"`
Chosen *MediaUnderstandingModelDecision `json:"chosen,omitempty"`
}
type MediaUnderstandingCapability ¶
type MediaUnderstandingCapability string
const ( MediaCapabilityImage MediaUnderstandingCapability = "image" MediaCapabilityAudio MediaUnderstandingCapability = "audio" MediaCapabilityVideo MediaUnderstandingCapability = "video" )
type MediaUnderstandingConfig ¶
type MediaUnderstandingConfig struct {
Enabled *bool `yaml:"enabled" json:"enabled,omitempty"`
Scope *MediaUnderstandingScopeConfig `yaml:"scope" json:"scope,omitempty"`
MaxBytes int `yaml:"maxBytes" json:"maxBytes,omitempty"`
MaxChars int `yaml:"maxChars" json:"maxChars,omitempty"`
Prompt string `yaml:"prompt" json:"prompt,omitempty"`
TimeoutSeconds int `yaml:"timeoutSeconds" json:"timeoutSeconds,omitempty"`
Language string `yaml:"language" json:"language,omitempty"`
ProviderOptions map[string]map[string]any `yaml:"providerOptions" json:"providerOptions,omitempty"`
Deepgram *MediaUnderstandingDeepgramConfig `yaml:"deepgram" json:"deepgram,omitempty"`
BaseURL string `yaml:"baseUrl" json:"baseUrl,omitempty"`
Headers map[string]string `yaml:"headers" json:"headers,omitempty"`
Attachments *MediaUnderstandingAttachmentsConfig `yaml:"attachments" json:"attachments,omitempty"`
Models []MediaUnderstandingModelConfig `yaml:"models" json:"models,omitempty"`
}
type MediaUnderstandingDecision ¶
type MediaUnderstandingDecision struct {
Capability MediaUnderstandingCapability `json:"capability"`
Outcome string `json:"outcome,omitempty"`
Attachments []MediaUnderstandingAttachmentDecision `json:"attachments,omitempty"`
}
type MediaUnderstandingKind ¶
type MediaUnderstandingKind string
const ( MediaKindAudioTranscription MediaUnderstandingKind = "audio.transcription" MediaKindImageDescription MediaUnderstandingKind = "image.description" MediaKindVideoDescription MediaUnderstandingKind = "video.description" )
type MediaUnderstandingModelConfig ¶
type MediaUnderstandingModelConfig struct {
Provider string `yaml:"provider" json:"provider,omitempty"`
Model string `yaml:"model" json:"model,omitempty"`
Capabilities []string `yaml:"capabilities" json:"capabilities,omitempty"`
Type string `yaml:"type" json:"type,omitempty"`
Command string `yaml:"command" json:"command,omitempty"`
Args []string `yaml:"args" json:"args,omitempty"`
Prompt string `yaml:"prompt" json:"prompt,omitempty"`
MaxChars int `yaml:"maxChars" json:"maxChars,omitempty"`
MaxBytes int `yaml:"maxBytes" json:"maxBytes,omitempty"`
TimeoutSeconds int `yaml:"timeoutSeconds" json:"timeoutSeconds,omitempty"`
Language string `yaml:"language" json:"language,omitempty"`
ProviderOptions map[string]map[string]any `yaml:"providerOptions" json:"providerOptions,omitempty"`
Deepgram *MediaUnderstandingDeepgramConfig `yaml:"deepgram" json:"deepgram,omitempty"`
BaseURL string `yaml:"baseUrl" json:"baseUrl,omitempty"`
Headers map[string]string `yaml:"headers" json:"headers,omitempty"`
Profile string `yaml:"profile" json:"profile,omitempty"`
PreferredProfile string `yaml:"preferredProfile" json:"preferredProfile,omitempty"`
}
type MediaUnderstandingOutput ¶
type MediaUnderstandingOutput struct {
Kind MediaUnderstandingKind `json:"kind"`
AttachmentIndex int `json:"attachment_index"`
Text string `json:"text"`
Provider string `json:"provider"`
Model string `json:"model,omitempty"`
}
type MediaUnderstandingScopeConfig ¶
type MediaUnderstandingScopeConfig struct {
Default string `yaml:"default" json:"default,omitempty"`
Rules []MediaUnderstandingScopeRule `yaml:"rules" json:"rules,omitempty"`
}
type MediaUnderstandingScopeRule ¶
type MediaUnderstandingScopeRule struct {
Action string `yaml:"action" json:"action,omitempty"`
Match *MediaUnderstandingScopeMatch `yaml:"match" json:"match,omitempty"`
}
type MessageMetadata ¶
type MessageMetadata struct {
Role string `json:"role,omitempty"`
Body string `json:"body,omitempty"`
CompletionID string `json:"completion_id,omitempty"`
FinishReason string `json:"finish_reason,omitempty"`
PromptTokens int64 `json:"prompt_tokens,omitempty"`
CompletionTokens int64 `json:"completion_tokens,omitempty"`
Model string `json:"model,omitempty"`
ReasoningTokens int64 `json:"reasoning_tokens,omitempty"`
HasToolCalls bool `json:"has_tool_calls,omitempty"`
Transcript string `json:"transcript,omitempty"`
// Media understanding (OpenClaw-style)
MediaUnderstanding []MediaUnderstandingOutput `json:"media_understanding,omitempty"`
MediaUnderstandingDecisions []MediaUnderstandingDecision `json:"media_understanding_decisions,omitempty"`
// Turn tracking for the new schema
TurnID string `json:"turn_id,omitempty"` // Unique identifier for this assistant turn
// Tool call tracking
ToolCalls []ToolCallMetadata `json:"tool_calls,omitempty"` // List of tool calls in this turn
// Canonical internal schema payload (AI SDK).
CanonicalSchema string `json:"canonical_schema,omitempty"` // e.g. ai-sdk-ui-message-v1
CanonicalUIMessage map[string]any `json:"canonical_ui_message,omitempty"` // AI SDK UIMessage payload
// Timing information
StartedAtMs int64 `json:"started_at_ms,omitempty"` // Unix ms when generation started
FirstTokenAtMs int64 `json:"first_token_at_ms,omitempty"` // Unix ms of first token
CompletedAtMs int64 `json:"completed_at_ms,omitempty"` // Unix ms when completed
// Thinking/reasoning content (embedded, not separate)
ThinkingContent string `json:"thinking_content,omitempty"` // Full thinking text
ThinkingTokenCount int `json:"thinking_token_count,omitempty"` // Number of thinking tokens
// History exclusion
ExcludeFromHistory bool `json:"exclude_from_history,omitempty"` // Exclude from LLM context (e.g., welcome messages)
// Multimodal history: media attached to this message for re-injection into prompts.
MediaURL string `json:"media_url,omitempty"` // mxc:// URL for user-sent media (image, PDF, audio, video)
MimeType string `json:"mime_type,omitempty"` // MIME type of user-sent media
GeneratedFiles []GeneratedFileRef `json:"generated_files,omitempty"` // Files generated by the assistant in this turn
}
MessageMetadata keeps a tiny summary of each exchange so we can rebuild prompts using database history.
func MessageMeta ¶
func MessageMeta(msg *database.Message) *MessageMetadata
MessageMeta extracts the MessageMetadata from a database.Message.
func (*MessageMetadata) CopyFrom ¶
func (mm *MessageMetadata) CopyFrom(other any)
CopyFrom allows the metadata struct to participate in mautrix's meta merge.
type MessageRole ¶
type MessageRole string
const ( RoleSystem MessageRole = "system" RoleUser MessageRole = "user" RoleAssistant MessageRole = "assistant" RoleTool MessageRole = "tool" )
Re-export role constants.
type MessagesConfig ¶
type MessagesConfig struct {
ResponsePrefix string `yaml:"responsePrefix"`
AckReaction string `yaml:"ackReaction"`
AckReactionScope string `yaml:"ackReactionScope"` // group-mentions|group-all|direct|all|off|none
RemoveAckAfter bool `yaml:"removeAckAfterReply"`
GroupChat *GroupChatConfig `yaml:"groupChat"`
DirectChat *DirectChatConfig `yaml:"directChat"`
Queue *QueueConfig `yaml:"queue"`
InboundDebounce *InboundDebounceConfig `yaml:"inbound"`
}
MessagesConfig defines message rendering settings (OpenClaw-style).
type ModelAPI ¶
type ModelAPI string
const ( ModelAPIResponses ModelAPI = ModelAPI(aimodels.ModelAPIResponses) ModelAPIChatCompletions ModelAPI = ModelAPI(aimodels.ModelAPIChatCompletions) )
type ModelCache ¶
type ModelCache struct {
Models []ModelInfo `json:"models,omitempty"`
LastRefresh int64 `json:"last_refresh,omitempty"`
CacheDuration int64 `json:"cache_duration,omitempty"` // seconds
}
ModelCache stores available models (cached in UserLoginMetadata) Uses provider-agnostic ModelInfo instead of openai.Model
type ModelCapabilities ¶
type ModelCapabilities struct {
SupportsVision bool `json:"supports_vision"`
SupportsReasoning bool `json:"supports_reasoning"` // Models that support reasoning_effort parameter
SupportsPDF bool `json:"supports_pdf"`
SupportsImageGen bool `json:"supports_image_gen"`
SupportsAudio bool `json:"supports_audio"` // Models that accept audio input
SupportsVideo bool `json:"supports_video"` // Models that accept video input
SupportsToolCalling bool `json:"supports_tool_calling"` // Models that support function calling
}
ModelCapabilities stores computed capabilities for a model This is NOT sent to the API, just used for local caching
func GetModelCapabilities ¶
func GetModelCapabilities(modelID string, info *ModelInfo) ModelCapabilities
GetModelCapabilities returns the capabilities of a model.
type ModelCapabilitiesEventContent ¶
type ModelCapabilitiesEventContent struct {
AvailableModels []ModelInfo `json:"available_models"`
}
ModelCapabilitiesEventContent represents available models and their capabilities
type ModelCatalogEntry ¶
type ModelCatalogEntry struct {
ID string `json:"id"`
Name string `json:"name,omitempty"`
Provider string `json:"provider"`
ContextWindow int `json:"contextWindow,omitempty"`
MaxOutputTokens int `json:"maxTokens,omitempty"`
Reasoning bool `json:"reasoning,omitempty"`
Input []string `json:"input,omitempty"`
}
type ModelDefinitionConfig ¶
type ModelDefinitionConfig struct {
ID string `yaml:"id"`
Name string `yaml:"name"`
Reasoning bool `yaml:"reasoning"`
Input []string `yaml:"input"`
ContextWindow int `yaml:"context_window"`
MaxTokens int `yaml:"max_tokens"`
}
ModelDefinitionConfig defines a model entry for catalog seeding.
type ModelProviderConfig ¶
type ModelProviderConfig struct {
Models []ModelDefinitionConfig `yaml:"models"`
}
ModelProviderConfig describes models for a specific provider.
type ModelsConfig ¶
type ModelsConfig struct {
Mode string `yaml:"mode"` // merge | replace
Providers map[string]ModelProviderConfig `yaml:"providers"`
}
ModelsConfig configures model catalog seeding (OpenClaw-style).
type NoopStreamingHooks ¶
type NoopStreamingHooks struct{}
NoopStreamingHooks provides default no-op implementations of StreamingHooks. Used by the simple bridge where no extension behavior is needed.
func (NoopStreamingHooks) AdditionalTools ¶
func (NoopStreamingHooks) AdditionalTools(context.Context, *PortalMetadata) []responses.ToolUnionParam
func (NoopStreamingHooks) OnContinuationPreSend ¶
func (NoopStreamingHooks) OnContinuationPreSend(_ context.Context, _ *streamingState, outputs []functionCallOutput) (responses.ResponseInputParam, []functionCallOutput)
func (NoopStreamingHooks) OnStreamFinished ¶
func (NoopStreamingHooks) OnStreamFinished(context.Context, *bridgev2.Portal, *streamingState, *PortalMetadata)
func (NoopStreamingHooks) OnToolCallComplete ¶
func (NoopStreamingHooks) OnToolCallComplete(context.Context, string, string, *streamingState)
func (NoopStreamingHooks) ShouldContinue ¶
func (NoopStreamingHooks) ShouldContinue(*streamingState) bool
type NormalizedLocation ¶
type OpenAIConnector ¶
type OpenAIConnector struct {
Config Config
// contains filtered or unexported fields
}
OpenAIConnector wires mautrix bridgev2 to the OpenAI chat APIs.
func (*OpenAIConnector) ApplyRuntimeDefaults ¶
func (oc *OpenAIConnector) ApplyRuntimeDefaults()
ApplyRuntimeDefaults applies default configuration values.
func (*OpenAIConnector) Bridge ¶
func (oc *OpenAIConnector) Bridge() *bridgev2.Bridge
Bridge returns the underlying bridgev2.Bridge instance.
func (*OpenAIConnector) CreateLogin ¶
func (oc *OpenAIConnector) CreateLogin(ctx context.Context, user *bridgev2.User, flowID string) (bridgev2.LoginProcess, error)
func (*OpenAIConnector) FillPortalBridgeInfo ¶
func (oc *OpenAIConnector) FillPortalBridgeInfo(portal *bridgev2.Portal, content *event.BridgeEventContent)
FillPortalBridgeInfo sets custom room type for AI rooms
func (*OpenAIConnector) GetBridgeInfoVersion ¶
func (oc *OpenAIConnector) GetBridgeInfoVersion() (info, capabilities int)
func (*OpenAIConnector) GetCapabilities ¶
func (oc *OpenAIConnector) GetCapabilities() *bridgev2.NetworkGeneralCapabilities
func (*OpenAIConnector) GetClient ¶
func (oc *OpenAIConnector) GetClient(id networkid.UserLoginID) (bridgev2.NetworkAPI, bool)
GetClient returns the NetworkAPI for a given user login ID.
func (*OpenAIConnector) GetClients ¶
func (oc *OpenAIConnector) GetClients() map[networkid.UserLoginID]bridgev2.NetworkAPI
GetClients returns a snapshot of all registered clients.
func (*OpenAIConnector) GetConfig ¶
func (oc *OpenAIConnector) GetConfig() (example string, data any, upgrader configupgrade.Upgrader)
func (*OpenAIConnector) GetDBMetaTypes ¶
func (oc *OpenAIConnector) GetDBMetaTypes() database.MetaTypes
func (*OpenAIConnector) GetLoginFlows ¶
func (oc *OpenAIConnector) GetLoginFlows() []bridgev2.LoginFlow
Package-level flow definitions (use Provider* constants as flow IDs)
func (*OpenAIConnector) GetName ¶
func (oc *OpenAIConnector) GetName() bridgev2.BridgeName
func (*OpenAIConnector) Init ¶
func (oc *OpenAIConnector) Init(bridge *bridgev2.Bridge)
func (*OpenAIConnector) LoadUserLogin ¶
func (*OpenAIConnector) Policy ¶
func (oc *OpenAIConnector) Policy() BridgePolicy
Policy returns the BridgePolicy for this connector.
func (*OpenAIConnector) RemoveClient ¶
func (oc *OpenAIConnector) RemoveClient(id networkid.UserLoginID)
RemoveClient removes the NetworkAPI for a given user login ID.
func (*OpenAIConnector) ResolveBeeperBaseURL ¶
func (oc *OpenAIConnector) ResolveBeeperBaseURL(meta *UserLoginMetadata) string
ResolveBeeperBaseURL returns the Beeper base URL for the given login metadata.
func (*OpenAIConnector) ResolveBeeperToken ¶
func (oc *OpenAIConnector) ResolveBeeperToken(meta *UserLoginMetadata) string
ResolveBeeperToken returns the Beeper token for the given login metadata.
func (*OpenAIConnector) ResolveExaProxyBaseURL ¶
func (oc *OpenAIConnector) ResolveExaProxyBaseURL(meta *UserLoginMetadata) string
ResolveExaProxyBaseURL returns the Exa proxy base URL for the given login metadata.
func (*OpenAIConnector) ResolveOpenAIAPIKey ¶
func (oc *OpenAIConnector) ResolveOpenAIAPIKey(meta *UserLoginMetadata) string
ResolveOpenAIAPIKey returns the OpenAI API key for the given login metadata.
func (*OpenAIConnector) ResolveOpenAIBaseURL ¶
func (oc *OpenAIConnector) ResolveOpenAIBaseURL() string
ResolveOpenAIBaseURL returns the OpenAI base URL.
func (*OpenAIConnector) ResolveOpenRouterAPIKey ¶
func (oc *OpenAIConnector) ResolveOpenRouterAPIKey(meta *UserLoginMetadata) string
ResolveOpenRouterAPIKey returns the OpenRouter API key for the given login metadata.
func (*OpenAIConnector) ResolveProviderAPIKey ¶
func (oc *OpenAIConnector) ResolveProviderAPIKey(meta *UserLoginMetadata) string
ResolveProviderAPIKey returns the API key for the given login metadata.
func (*OpenAIConnector) ResolveProxyRoot ¶
func (oc *OpenAIConnector) ResolveProxyRoot(meta *UserLoginMetadata) string
ResolveProxyRoot returns the proxy root URL for the given login metadata.
func (*OpenAIConnector) ResolveServiceConfig ¶
func (oc *OpenAIConnector) ResolveServiceConfig(meta *UserLoginMetadata) ServiceConfigMap
ResolveServiceConfig returns the service configuration map for the given login metadata.
func (*OpenAIConnector) SetClient ¶
func (oc *OpenAIConnector) SetClient(id networkid.UserLoginID, client bridgev2.NetworkAPI)
SetClient registers a NetworkAPI for a given user login ID.
func (*OpenAIConnector) SetClientFactory ¶
func (oc *OpenAIConnector) SetClientFactory(f ClientFactory)
SetClientFactory sets a custom client factory on the connector. When set, LoadUserLogin will use this factory instead of the default newAIClient.
func (*OpenAIConnector) SetHooks ¶
func (oc *OpenAIConnector) SetHooks(hooks ConnectorHooks)
SetHooks sets the ConnectorHooks for this connector.
func (*OpenAIConnector) SetMatrixCredentials ¶
func (oc *OpenAIConnector) SetMatrixCredentials(accessToken, homeserver string)
SetMatrixCredentials seeds Beeper provider config from the Matrix account, if unset.
func (*OpenAIConnector) SetPolicy ¶
func (oc *OpenAIConnector) SetPolicy(policy BridgePolicy)
func (*OpenAIConnector) Stop ¶
func (oc *OpenAIConnector) Stop(ctx context.Context)
type OpenAILogin ¶
type OpenAILogin struct {
User *bridgev2.User
Connector *OpenAIConnector
FlowID string
}
OpenAILogin maps a Matrix user to a synthetic OpenAI "login".
func (*OpenAILogin) Cancel ¶
func (ol *OpenAILogin) Cancel()
func (*OpenAILogin) SubmitUserInput ¶
type OpenAIProvider ¶
type OpenAIProvider struct {
// contains filtered or unexported fields
}
OpenAIProvider implements AIProvider for OpenAI's API
func NewOpenAIProvider ¶
func NewOpenAIProvider(apiKey string, log zerolog.Logger) (*OpenAIProvider, error)
NewOpenAIProvider creates a new OpenAI provider
func NewOpenAIProviderWithBaseURL ¶
func NewOpenAIProviderWithBaseURL(apiKey, baseURL string, log zerolog.Logger) (*OpenAIProvider, error)
NewOpenAIProviderWithBaseURL creates an OpenAI provider with custom base URL Used for OpenRouter, Beeper proxy, or custom endpoints
func NewOpenAIProviderWithPDFPlugin ¶
func NewOpenAIProviderWithPDFPlugin(apiKey, baseURL, userID, pdfEngine string, headers map[string]string, log zerolog.Logger) (*OpenAIProvider, error)
NewOpenAIProviderWithPDFPlugin creates an OpenAI provider with PDF plugin middleware. Used for OpenRouter/Beeper to enable universal PDF support via file-parser plugin.
func NewOpenAIProviderWithUserID ¶
func NewOpenAIProviderWithUserID(apiKey, baseURL, userID string, log zerolog.Logger) (*OpenAIProvider, error)
NewOpenAIProviderWithUserID creates an OpenAI provider that passes user_id with each request. Used for Beeper proxy to ensure correct rate limiting and feature flags per user.
func (*OpenAIProvider) Client ¶
func (o *OpenAIProvider) Client() openai.Client
Client returns the underlying OpenAI client for direct access Used by the bridge for advanced features like Responses API
func (*OpenAIProvider) Generate ¶
func (o *OpenAIProvider) Generate(ctx context.Context, params GenerateParams) (*GenerateResponse, error)
Generate performs a non-streaming generation using Responses API
func (*OpenAIProvider) GenerateStream ¶
func (o *OpenAIProvider) GenerateStream(ctx context.Context, params GenerateParams) (<-chan StreamEvent, error)
GenerateStream generates a streaming response from OpenAI using Responses API
func (*OpenAIProvider) ListModels ¶
func (o *OpenAIProvider) ListModels(ctx context.Context) ([]ModelInfo, error)
ListModels returns available OpenAI models
func (*OpenAIProvider) Name ¶
func (o *OpenAIProvider) Name() string
type OpenAIRemoteMessage ¶
type OpenAIRemoteMessage struct {
PortalKey networkid.PortalKey
ID networkid.MessageID
Sender bridgev2.EventSender
Content string
Timestamp time.Time
Metadata *MessageMetadata
FormattedContent string
ReplyToEventID id.EventID
ToolCallEventIDs []string
ImageEventIDs []string
}
OpenAIRemoteMessage represents a GPT answer that should be bridged to Matrix.
func (*OpenAIRemoteMessage) AddLogContext ¶
func (m *OpenAIRemoteMessage) AddLogContext(c zerolog.Context) zerolog.Context
func (*OpenAIRemoteMessage) ConvertMessage ¶
func (m *OpenAIRemoteMessage) ConvertMessage(ctx context.Context, portal *bridgev2.Portal, intent bridgev2.MatrixAPI) (*bridgev2.ConvertedMessage, error)
func (*OpenAIRemoteMessage) GetID ¶
func (m *OpenAIRemoteMessage) GetID() networkid.MessageID
func (*OpenAIRemoteMessage) GetPortalKey ¶
func (m *OpenAIRemoteMessage) GetPortalKey() networkid.PortalKey
func (*OpenAIRemoteMessage) GetSender ¶
func (m *OpenAIRemoteMessage) GetSender() bridgev2.EventSender
func (*OpenAIRemoteMessage) GetStreamOrder ¶
func (m *OpenAIRemoteMessage) GetStreamOrder() int64
func (*OpenAIRemoteMessage) GetTimestamp ¶
func (m *OpenAIRemoteMessage) GetTimestamp() time.Time
func (*OpenAIRemoteMessage) GetTransactionID ¶
func (m *OpenAIRemoteMessage) GetTransactionID() networkid.TransactionID
GetTransactionID implements RemoteMessageWithTransactionID
func (*OpenAIRemoteMessage) GetType ¶
func (m *OpenAIRemoteMessage) GetType() bridgev2.RemoteEventType
type PDFConfig ¶
type PDFConfig struct {
Engine string `json:"engine,omitempty"` // pdf-text (free), mistral-ocr (OCR, paid, default), native
}
PDFConfig stores per-room PDF processing configuration
type PDFPluginConfig ¶
type PDFPluginConfig struct {
ID string `json:"id"`
Config json.RawMessage `json:"config,omitempty"`
}
PDFPluginConfig holds configuration for the PDF file-parser plugin
type PortalInitOpts ¶
type PortalInitOpts struct {
ModelID string
Title string
SystemPrompt string
CopyFrom *PortalMetadata // For forked chats - copies config from source
PortalKey *networkid.PortalKey
}
PortalInitOpts contains options for initializing a chat portal
type PortalMetadata ¶
type PortalMetadata struct {
Model string `json:"model,omitempty"` // Set from room state
SystemPrompt string `json:"system_prompt,omitempty"` // Set from room state
ResponsePrefix string `json:"response_prefix,omitempty"` // Per-room response prefix override
Temperature float64 `json:"temperature,omitempty"` // Set from room state
MaxContextMessages int `json:"max_context_messages,omitempty"` // Set from room state
MaxCompletionTokens int `json:"max_completion_tokens,omitempty"` // Set from room state
ReasoningEffort string `json:"reasoning_effort,omitempty"` // none, low, medium, high, xhigh
Slug string `json:"slug,omitempty"`
Title string `json:"title,omitempty"`
TitleGenerated bool `json:"title_generated,omitempty"` // True if title was auto-generated
WelcomeSent bool `json:"welcome_sent,omitempty"`
AutoGreetingSent bool `json:"auto_greeting_sent,omitempty"`
Capabilities ModelCapabilities `json:"capabilities,omitempty"`
LastRoomStateSync int64 `json:"last_room_state_sync,omitempty"` // Track when we've synced room state
PDFConfig *PDFConfig `json:"pdf_config,omitempty"` // Per-room PDF processing configuration
ConversationMode string `json:"conversation_mode,omitempty"`
LastResponseID string `json:"last_response_id,omitempty"`
EmitThinking bool `json:"emit_thinking,omitempty"`
EmitToolArgs bool `json:"emit_tool_args,omitempty"`
ThinkingLevel string `json:"thinking_level,omitempty"` // off|minimal|low|medium|high|xhigh
VerboseLevel string `json:"verbose_level,omitempty"` // off|on|full
GroupActivation string `json:"group_activation,omitempty"` // mention|always
GroupActivationNeedsIntro bool `json:"group_activation_needs_intro,omitempty"`
GroupIntroSent bool `json:"group_intro_sent,omitempty"`
SendPolicy string `json:"send_policy,omitempty"` // allow|deny
SessionResetAt int64 `json:"session_reset_at,omitempty"`
AbortedLastRun bool `json:"aborted_last_run,omitempty"`
CompactionCount int `json:"compaction_count,omitempty"`
SessionBootstrappedAt int64 `json:"session_bootstrapped_at,omitempty"`
IsRawMode bool `json:"is_raw_mode,omitempty"` // True if this is a playground/raw mode room (no directive processing)
// Ack reaction config - similar to OpenClaw's ack reactions
AckReactionEmoji string `json:"ack_reaction_emoji,omitempty"` // Emoji to react with when message received (e.g., "👀", "🤔"). Empty = disabled.
AckReactionRemoveAfter bool `json:"ack_reaction_remove_after,omitempty"` // Remove the ack reaction after replying
// Runtime-only overrides (not persisted)
DisabledTools []string `json:"-"`
// Debounce configuration (0 = use default, -1 = disabled)
DebounceMs int `json:"debounce_ms,omitempty"`
// Per-session typing overrides (OpenClaw-style).
TypingMode string `json:"typing_mode,omitempty"` // never|instant|thinking|message
TypingIntervalSeconds *int `json:"typing_interval_seconds,omitempty"` // Optional per-session override
// Anthropic prompt cache TTL tracking
LastCacheTTLRefresh int64 `json:"last_cache_ttl_refresh,omitempty"` // Unix millis of last cache-eligible request
// Extra holds bridge-specific metadata that the core engine carries
// opaquely.
Extra any `json:"extra,omitempty"`
}
PortalMetadata stores per-room tuning knobs for the assistant.
func ClonePortalMetadata ¶
func ClonePortalMetadata(src *PortalMetadata) *PortalMetadata
ClonePortalMetadata returns a deep copy of portal metadata.
func PortalMeta ¶
func PortalMeta(portal *bridgev2.Portal) *PortalMetadata
PortalMeta extracts the PortalMetadata from a bridgev2.Portal.
type ProviderBraveConfig ¶
type ProviderBraveConfig struct {
Enabled *bool `yaml:"enabled"`
BaseURL string `yaml:"base_url"`
APIKey string `yaml:"api_key"`
TimeoutSecs int `yaml:"timeout_seconds"`
CacheTtlSecs int `yaml:"cache_ttl_seconds"`
SearchLang string `yaml:"search_lang"`
UILang string `yaml:"ui_lang"`
DefaultCountry string `yaml:"default_country"`
DefaultFreshness string `yaml:"default_freshness"`
}
type ProviderConfig ¶
type ProviderConfig struct {
APIKey string `yaml:"api_key"`
BaseURL string `yaml:"base_url"`
DefaultModel string `yaml:"default_model"`
DefaultPDFEngine string `yaml:"default_pdf_engine"` // pdf-text, mistral-ocr (default), native
}
ProviderConfig holds settings for a specific AI provider.
type ProviderDirectConfig ¶
type ProviderExaConfig ¶
type ProviderExaConfig struct {
Enabled *bool `yaml:"enabled"`
BaseURL string `yaml:"base_url"`
APIKey string `yaml:"api_key"`
Type string `yaml:"type"`
Category string `yaml:"category"`
NumResults int `yaml:"num_results"`
IncludeText bool `yaml:"include_text"`
TextMaxCharacters int `yaml:"text_max_chars"`
Highlights bool `yaml:"highlights"`
}
type ProvidersConfig ¶
type ProvidersConfig struct {
Beeper ProviderConfig `yaml:"beeper"`
OpenAI ProviderConfig `yaml:"openai"`
OpenRouter ProviderConfig `yaml:"openrouter"`
}
ProvidersConfig contains per-provider configuration.
type ProvisioningAPI ¶
type ProvisioningAPI struct {
// contains filtered or unexported fields
}
ProvisioningAPI handles the provisioning endpoints for user defaults
type PruningConfig ¶
func DefaultPruningConfig ¶
func DefaultPruningConfig() *PruningConfig
type QueueConfig ¶
type QueueConfig struct {
Mode string `yaml:"mode"`
ByChannel map[string]string `yaml:"byChannel"`
DebounceMs *int `yaml:"debounceMs"`
DebounceMsByChannel map[string]int `yaml:"debounceMsByChannel"`
Cap *int `yaml:"cap"`
Drop string `yaml:"drop"`
}
QueueConfig mirrors OpenClaw's queue settings.
type ReactionFeedback ¶
type ReactionFeedback struct {
Emoji string // The emoji used (e.g., "👍", "👎")
Timestamp time.Time // When the reaction was added
Sender string // Who sent the reaction (display name or user ID)
MessageID string // Which message was reacted to (event ID or timestamp)
RoomName string // Room/channel name for context
Action string // "added" or "removed"
}
ReactionFeedback represents a user reaction to an AI message. Similar to OpenClaw's system events, these are queued and drained when building the next prompt.
func DrainReactionFeedback ¶
func DrainReactionFeedback(roomID id.RoomID) []ReactionFeedback
DrainReactionFeedback returns and clears all reaction feedback for a room.
type ReactionQueue ¶
type ReactionQueue struct {
// contains filtered or unexported fields
}
ReactionQueue holds reaction feedback for a room.
func (*ReactionQueue) AddReaction ¶
func (q *ReactionQueue) AddReaction(feedback ReactionFeedback)
AddReaction adds a reaction feedback to the queue. Skips consecutive duplicates like OpenClaw does.
func (*ReactionQueue) DrainFeedback ¶
func (q *ReactionQueue) DrainFeedback() []ReactionFeedback
DrainFeedback returns all queued feedback and clears the queue.
type ReasoningEffortOption ¶
type ReasoningEffortOption struct {
Value string `json:"value"` // minimal, low, medium, high, xhigh
Label string `json:"label"` // Display name
}
ReasoningEffortOption represents an available reasoning effort level
type ReplyTarget ¶
func (ReplyTarget) EffectiveReplyTo ¶
func (t ReplyTarget) EffectiveReplyTo() id.EventID
type ReqSetDefaults ¶
type ReqSetDefaults struct {
Model *string `json:"model,omitempty"`
SystemPrompt *string `json:"system_prompt,omitempty"`
Temperature *float64 `json:"temperature,omitempty"`
ReasoningEffort *string `json:"reasoning_effort,omitempty"`
}
ReqSetDefaults is the request body for PUT /v1/defaults
type ResponseDirectives ¶
type ResponseDirectives struct {
// Text is the cleaned response text with directives stripped.
Text string
// IsSilent indicates the response should not be sent (NO_REPLY token present).
IsSilent bool
// ReplyToEventID is the Matrix event ID to reply to (from [[reply_to:<id>]] or [[reply_to_current]]).
ReplyToEventID id.EventID
// ReplyToCurrent indicates [[reply_to_current]] was used (reply to triggering message).
ReplyToCurrent bool
// HasReplyTag indicates a reply tag was present in the original text.
HasReplyTag bool
}
ResponseDirectives contains parsed directives from an LLM response. Matches OpenClaw's directive parsing behavior.
func ParseResponseDirectives ¶
func ParseResponseDirectives(text string, currentEventID id.EventID) *ResponseDirectives
ParseResponseDirectives extracts directives from LLM response text. currentEventID is the triggering message's event ID (used for [[reply_to_current]]).
type ResponseFunc ¶
type ResponseFunc func(ctx context.Context, evt *event.Event, portal *bridgev2.Portal, meta *PortalMetadata, prompt []openai.ChatCompletionMessageParamUnion) (bool, *aierrors.ContextLengthError, error)
ResponseFunc is the exported alias for the response handler type.
type ResponsePrefixContext ¶
type ResponsePrefixContext struct {
Model string
ModelFull string
Provider string
ThinkingLevel string
}
ResponsePrefixContext mirrors OpenClaw's template context.
type ResultStatus ¶
type ResultStatus string
ResultStatus represents the status of a tool result
const ( ResultStatusSuccess ResultStatus = "success" ResultStatusError ResultStatus = "error" ResultStatusPartial ResultStatus = "partial" ResultStatusDenied ResultStatus = "denied" )
type RoomCapabilitiesEventContent ¶
type RoomCapabilitiesEventContent struct {
Capabilities *ModelCapabilities `json:"capabilities,omitempty"`
AvailableTools []ToolInfo `json:"available_tools,omitempty"`
ReasoningEffortOptions []ReasoningEffortOption `json:"reasoning_effort_options,omitempty"`
Provider string `json:"provider,omitempty"`
EffectiveSettings *EffectiveSettings `json:"effective_settings,omitempty"`
}
RoomCapabilitiesEventContent represents bridge-controlled room capabilities This is protected by power levels (100) so only the bridge bot can modify
type RoomSettingsEventContent ¶
type RoomSettingsEventContent struct {
Model string `json:"model,omitempty"`
SystemPrompt string `json:"system_prompt,omitempty"`
Temperature *float64 `json:"temperature,omitempty"`
MaxContextMessages int `json:"max_context_messages,omitempty"`
MaxCompletionTokens int `json:"max_completion_tokens,omitempty"`
ReasoningEffort string `json:"reasoning_effort,omitempty"`
ConversationMode string `json:"conversation_mode,omitempty"` // "messages" or "responses"
EmitThinking *bool `json:"emit_thinking,omitempty"`
EmitToolArgs *bool `json:"emit_tool_args,omitempty"`
}
RoomSettingsEventContent represents user-editable room settings This uses normal power levels (0) so users can modify
type SearchConfig ¶
type SearchConfig struct {
Provider string `yaml:"provider"`
Fallbacks []string `yaml:"fallbacks"`
Exa ProviderExaConfig `yaml:"exa"`
Brave ProviderBraveConfig `yaml:"brave"`
Perplexity ProviderPerplexityConfig `yaml:"perplexity"`
OpenRouter ProviderOpenRouterConfig `yaml:"openrouter"`
}
type ServiceConfig ¶
type ServiceConfigMap ¶
type ServiceConfigMap map[string]ServiceConfig
type ServiceTokens ¶
type ServiceTokens struct {
OpenAI string `json:"openai,omitempty"`
OpenRouter string `json:"openrouter,omitempty"`
Exa string `json:"exa,omitempty"`
Brave string `json:"brave,omitempty"`
Perplexity string `json:"perplexity,omitempty"`
}
ServiceTokens stores optional per-login credentials for external services.
type SettingExplanation ¶
type SettingExplanation struct {
Value any `json:"value"`
Source SettingSource `json:"source"`
Reason string `json:"reason,omitempty"` // Only when limited/unavailable
}
SettingExplanation describes why a setting has its current value
type SettingSource ¶
type SettingSource string
SettingSource indicates where a setting value came from
const ( SourceRoomOverride SettingSource = "room_override" SourceUserDefault SettingSource = "user_default" SourceProviderConfig SettingSource = "provider_config" SourceGlobalDefault SettingSource = "global_default" SourceModelLimit SettingSource = "model_limitation" SourceProviderLimit SettingSource = "provider_limitation" )
type StatusDisplay ¶
type StatusDisplay struct {
Icon string `json:"icon,omitempty"`
Animation string `json:"animation,omitempty"` // "pulse", "spin", etc.
Color string `json:"color,omitempty"`
}
StatusDisplay contains display hints for status indicators
type StepBoundaryContent ¶
type StepBoundaryContent struct {
TurnID string `json:"turn_id"`
StepNumber int `json:"step_number"`
StepType string `json:"step_type"` // "tool_response_processed", etc.
PreviousToolCalls []string `json:"previous_tool_calls,omitempty"`
Display *StepDisplay `json:"display,omitempty"`
}
StepBoundaryContent represents a step boundary within a turn
type StepDisplay ¶
type StepDisplay struct {
Label string `json:"label,omitempty"`
}
StepDisplay contains display hints for step boundaries
type StreamContentType ¶
type StreamContentType string
StreamContentType identifies the type of content in a stream delta
const ( StreamContentText StreamContentType = "text" StreamContentReasoning StreamContentType = "reasoning" StreamContentToolInput StreamContentType = "tool_input" StreamContentToolResult StreamContentType = "tool_result" StreamContentCode StreamContentType = "code" StreamContentImage StreamContentType = "image" )
type StreamCursor ¶
type StreamCursor struct {
BlockType string `json:"block_type,omitempty"` // "text", "code", etc.
CharOffset int `json:"char_offset,omitempty"`
Field string `json:"field,omitempty"` // For tool_input, which field
}
StreamCursor provides position information for streaming
type StreamDeltaContent ¶
type StreamDeltaContent struct {
TurnID string `json:"turn_id"`
TargetEvent string `json:"target_event,omitempty"` // Event ID being updated
ContentType StreamContentType `json:"content_type"`
Delta string `json:"delta"`
Seq int `json:"seq"`
// For tool_input streaming
CallID string `json:"call_id,omitempty"`
ToolName string `json:"tool_name,omitempty"`
// Cursor information
Cursor *StreamCursor `json:"cursor,omitempty"`
}
StreamDeltaContent represents a streaming delta event
type StreamEvent ¶
type StreamEvent struct {
Type StreamEventType
Delta string
ReasoningDelta string
ToolCall *ToolCallResult
FinishReason string
ResponseID string
Usage *UsageInfo
Error error
}
type StreamEventType ¶
type StreamEventType string
const ( StreamEventDelta StreamEventType = "delta" StreamEventReasoning StreamEventType = "reasoning" StreamEventToolCall StreamEventType = "tool_call" StreamEventComplete StreamEventType = "complete" StreamEventError StreamEventType = "error" )
Re-export StreamEventType constants.
type StreamingConfig ¶
type StreamingConfig struct {
Enabled bool `json:"enabled,omitempty"`
}
StreamingConfig contains streaming behavior settings
type StreamingHooks ¶
type StreamingHooks interface {
// AdditionalTools returns extra tool parameters to include in API requests.
// Called during buildResponsesAPIParams after builtin tools are added.
// Downstream bridges can inject additional tool definitions.
AdditionalTools(ctx context.Context, meta *PortalMetadata) []responses.ToolUnionParam
// OnContinuationPreSend is called before each continuation round in the
// streaming loop. It may modify the pending function outputs (e.g. to
// inject bridge-specific events) and return additional input items to prepend
// to the continuation request.
OnContinuationPreSend(ctx context.Context, state *streamingState, outputs []functionCallOutput) (extraInput responses.ResponseInputParam, modifiedOutputs []functionCallOutput)
// ShouldContinue is called at the top of each continuation iteration.
// Return false to break out of the tool-call loop early.
ShouldContinue(state *streamingState) bool
// OnToolCallComplete is called after a builtin tool finishes execution,
// before the result is recorded for continuation.
OnToolCallComplete(ctx context.Context, toolCallID, toolName string, state *streamingState)
// OnStreamFinished is called after the streaming response is fully
// complete (messages sent, state saved).
OnStreamFinished(ctx context.Context, portal *bridgev2.Portal, state *streamingState, meta *PortalMetadata)
}
StreamingHooks defines extension points for the streaming engine. Downstream bridges can override these for custom behavior. The simple bridge uses NoopStreamingHooks.
type ThinkingContent ¶
type ThinkingContent struct {
Content string `json:"content,omitempty"`
TokenCount int `json:"token_count,omitempty"`
DurationMs int64 `json:"duration_ms,omitempty"`
Summary string `json:"summary,omitempty"`
}
ThinkingContent represents embedded thinking/reasoning content
type TimingInfo ¶
type TimingInfo struct {
StartedAt int64 `json:"started_at,omitempty"` // Unix ms
FirstTokenAt int64 `json:"first_token_at,omitempty"` // Unix ms
CompletedAt int64 `json:"completed_at,omitempty"` // Unix ms
}
TimingInfo contains timing information for events
type ToolArtifact ¶
type ToolArtifact struct {
Type string `json:"type"` // "file", "image"
MxcURI string `json:"mxc_uri,omitempty"`
Filename string `json:"filename,omitempty"`
Mimetype string `json:"mimetype,omitempty"`
Size int `json:"size,omitempty"`
}
ToolArtifact represents a file or image generated by a tool
type ToolCallContent ¶
type ToolCallContent struct {
// Standard Matrix fallback
Body string `json:"body"`
MsgType string `json:"msgtype"`
// Tool call details
ToolCall *ToolCallData `json:"com.beeper.ai.tool_call"`
}
ToolCallContent represents a tool call timeline event
type ToolCallData ¶
type ToolCallData struct {
CallID string `json:"call_id"`
TurnID string `json:"turn_id"`
ToolName string `json:"tool_name"`
ToolType ToolType `json:"tool_type"`
Status ToolStatus `json:"status"`
// Input arguments (fully accumulated)
Input map[string]any `json:"input,omitempty"`
// Display hints
Display *ToolDisplay `json:"display,omitempty"`
// Reference to result event (set after completion)
ResultEvent string `json:"result_event,omitempty"`
// Timing
Timing *TimingInfo `json:"timing,omitempty"`
}
ToolCallData contains the tool call metadata
type ToolCallMetadata ¶
type ToolCallMetadata struct {
CallID string `json:"call_id"`
ToolName string `json:"tool_name"`
ToolType string `json:"tool_type"` // builtin, provider, function
Input map[string]any `json:"input,omitempty"`
Output map[string]any `json:"output,omitempty"`
Status string `json:"status"` // pending, running, completed, failed, timeout, cancelled
ResultStatus string `json:"result_status,omitempty"` // success, error, partial
ErrorMessage string `json:"error_message,omitempty"`
StartedAtMs int64 `json:"started_at_ms,omitempty"`
CompletedAtMs int64 `json:"completed_at_ms,omitempty"`
// Event IDs for timeline events (if emitted as separate events)
CallEventID string `json:"call_event_id,omitempty"`
ResultEventID string `json:"result_event_id,omitempty"`
}
ToolCallMetadata tracks a tool call within a message
type ToolCallResult ¶
type ToolDefinition ¶
type ToolDefinition struct {
Name string
Description string
Parameters map[string]any
Execute func(ctx context.Context, args map[string]any) (string, error)
}
ToolDefinition defines a tool callable by the model.
func BuiltinTools ¶
func BuiltinTools() []ToolDefinition
BuiltinTools returns builtin tools enabled by this bridge profile.
func GetBuiltinTool ¶
func GetBuiltinTool(name string) *ToolDefinition
func GetEnabledBuiltinTools ¶
func GetEnabledBuiltinTools(isToolEnabled func(string) bool) []ToolDefinition
type ToolDisplay ¶
type ToolDisplay struct {
Title string `json:"title,omitempty"`
Icon string `json:"icon,omitempty"` // mxc:// URL
Collapsed bool `json:"collapsed,omitempty"`
}
ToolDisplay contains display hints for tool rendering
type ToolExecutor ¶
ToolExecutor is the exported alias for the builtin tool executor signature.
type ToolInfo ¶
type ToolInfo struct {
Name string `json:"name"`
DisplayName string `json:"display_name"` // Human-readable name for UI
Type string `json:"type"` // "builtin", "provider", "plugin"
Description string `json:"description,omitempty"`
Enabled bool `json:"enabled"`
Available bool `json:"available"` // Based on model capabilities and provider
Source SettingSource `json:"source,omitempty"` // Where enabled state came from
Reason string `json:"reason,omitempty"` // Only when limited/unavailable
}
ToolInfo describes a tool and its status for room state broadcasting
type ToolOutputPreview ¶
type ToolOutputPreview struct {
Stdout string `json:"stdout,omitempty"`
Stderr string `json:"stderr,omitempty"`
Truncated bool `json:"truncated,omitempty"`
}
ToolOutputPreview contains preview of tool output
type ToolProgressContent ¶
type ToolProgressContent struct {
CallID string `json:"call_id"`
TurnID string `json:"turn_id"`
ToolName string `json:"tool_name"`
Status ToolStatus `json:"status"`
Progress *ToolProgressDetails `json:"progress,omitempty"`
// Output preview (for long-running tools, etc.)
OutputPreview *ToolOutputPreview `json:"output_preview,omitempty"`
}
ToolProgressContent represents tool execution progress
type ToolProgressDetails ¶
type ToolProgressDetails struct {
Stage string `json:"stage,omitempty"` // "executing", "processing", etc.
Percent int `json:"percent,omitempty"` // 0-100
Message string `json:"message,omitempty"`
}
ToolProgressDetails contains progress information
type ToolProvidersConfig ¶
type ToolProvidersConfig struct {
Search *SearchConfig `yaml:"search"`
Fetch *FetchConfig `yaml:"fetch"`
Media *MediaToolsConfig `yaml:"media"`
}
ToolProvidersConfig configures external tool providers like search and fetch.
type ToolResultContent ¶
type ToolResultContent struct {
// Standard Matrix fallback
Body string `json:"body"`
MsgType string `json:"msgtype"`
Format string `json:"format,omitempty"`
FormattedBody string `json:"formatted_body,omitempty"`
// Tool result details
ToolResult *ToolResultData `json:"com.beeper.ai.tool_result"`
}
ToolResultContent represents a tool result timeline event
type ToolResultData ¶
type ToolResultData struct {
CallID string `json:"call_id"`
TurnID string `json:"turn_id"`
ToolName string `json:"tool_name"`
Status ResultStatus `json:"status"`
// Output data
Output map[string]any `json:"output,omitempty"`
// Artifacts (files, images generated by tool)
Artifacts []ToolArtifact `json:"artifacts,omitempty"`
// Display hints
Display *ToolResultDisplay `json:"display,omitempty"`
}
ToolResultData contains the tool result metadata
type ToolResultDisplay ¶
type ToolResultDisplay struct {
Format string `json:"format,omitempty"` // "search_results", "code_output", etc.
Expandable bool `json:"expandable,omitempty"`
DefaultExpanded bool `json:"default_expanded,omitempty"`
ShowStdout bool `json:"show_stdout,omitempty"`
ShowArtifacts bool `json:"show_artifacts,omitempty"`
}
ToolResultDisplay contains display hints for tool result rendering
type ToolStatus ¶
type ToolStatus string
ToolStatus represents the state of a tool call
const ( ToolStatusPending ToolStatus = "pending" ToolStatusRunning ToolStatus = "running" ToolStatusCompleted ToolStatus = "completed" ToolStatusFailed ToolStatus = "failed" ToolStatusTimeout ToolStatus = "timeout" ToolStatusCancelled ToolStatus = "cancelled" )
type ToolStrictMode ¶
type ToolStrictMode string
ToolStrictMode controls whether OpenAI strict mode is used for tool schemas.
const ( ToolStrictModeOff ToolStrictMode = "off" ToolStrictModeOn ToolStrictMode = "on" )
type TurnCancelledContent ¶
type TurnCancelledContent struct {
TurnID string `json:"turn_id"`
CancelledAt int64 `json:"cancelled_at"` // Unix ms
Reason string `json:"reason,omitempty"`
PartialContent string `json:"partial_content,omitempty"`
ToolCallsCancelled []string `json:"tool_calls_cancelled,omitempty"`
}
TurnCancelledContent represents a cancelled turn event
type TurnStatus ¶
type TurnStatus string
TurnStatus represents the state of an assistant turn
const ( TurnStatusPending TurnStatus = "pending" TurnStatusThinking TurnStatus = "thinking" TurnStatusGenerating TurnStatus = "generating" TurnStatusToolUse TurnStatus = "tool_use" TurnStatusCompleted TurnStatus = "completed" TurnStatusFailed TurnStatus = "failed" TurnStatusCancelled TurnStatus = "cancelled" )
type TypingContext ¶
type TypingController ¶
type TypingController struct {
// contains filtered or unexported fields
}
TypingController manages typing indicators with TTL and refresh. Similar to OpenClaw's TypingController pattern.
func NewTypingController ¶
func NewTypingController(client *AIClient, ctx context.Context, portal *bridgev2.Portal, opts TypingControllerOptions) *TypingController
NewTypingController creates a new typing controller.
func (*TypingController) IsActive ¶
func (tc *TypingController) IsActive() bool
IsActive returns whether typing is currently active.
func (*TypingController) MarkDispatchIdle ¶
func (tc *TypingController) MarkDispatchIdle()
MarkDispatchIdle marks the dispatcher as idle.
func (*TypingController) MarkRunComplete ¶
func (tc *TypingController) MarkRunComplete()
MarkRunComplete marks the AI run as complete. Typing will stop when both run is complete and dispatch is idle.
func (*TypingController) RefreshTTL ¶
func (tc *TypingController) RefreshTTL()
RefreshTTL resets the TTL timer, keeping typing active longer. Call this when activity occurs (tool calls, text chunks).
func (*TypingController) Start ¶
func (tc *TypingController) Start()
Start begins the typing indicator with automatic refresh.
func (*TypingController) Stop ¶
func (tc *TypingController) Stop()
Stop stops the typing indicator and cleans up.
type TypingControllerOptions ¶
type TypingMode ¶
type TypingMode string
const ( TypingModeNever TypingMode = TypingMode(aityping.TypingModeNever) TypingModeInstant TypingMode = TypingMode(aityping.TypingModeInstant) TypingModeThinking TypingMode = TypingMode(aityping.TypingModeThinking) TypingModeMessage TypingMode = TypingMode(aityping.TypingModeMessage) )
type TypingSignaler ¶
type TypingSignaler struct {
// contains filtered or unexported fields
}
func NewTypingSignaler ¶
func NewTypingSignaler(typing *TypingController, mode TypingMode) *TypingSignaler
func (*TypingSignaler) SignalMessageStart ¶
func (ts *TypingSignaler) SignalMessageStart()
func (*TypingSignaler) SignalReasoningDelta ¶
func (ts *TypingSignaler) SignalReasoningDelta()
func (*TypingSignaler) SignalRunStart ¶
func (ts *TypingSignaler) SignalRunStart()
func (*TypingSignaler) SignalTextDelta ¶
func (ts *TypingSignaler) SignalTextDelta(text string)
func (*TypingSignaler) SignalToolStart ¶
func (ts *TypingSignaler) SignalToolStart()
type UnifiedMessage ¶
type UnifiedMessage struct {
Role MessageRole
Content []ContentPart
ToolCalls []ToolCallResult
ToolCallID string
Name string
}
func NewImageMessage ¶
func NewImageMessage(role MessageRole, imageURL, mimeType string) UnifiedMessage
func NewTextMessage ¶
func NewTextMessage(role MessageRole, text string) UnifiedMessage
func (*UnifiedMessage) HasImages ¶
func (m *UnifiedMessage) HasImages() bool
func (*UnifiedMessage) HasMultimodalContent ¶
func (m *UnifiedMessage) HasMultimodalContent() bool
func (*UnifiedMessage) Text ¶
func (m *UnifiedMessage) Text() string
type UserDefaults ¶
type UserDefaults struct {
Model string `json:"model,omitempty"`
SystemPrompt string `json:"system_prompt,omitempty"`
Temperature *float64 `json:"temperature,omitempty"`
ReasoningEffort string `json:"reasoning_effort,omitempty"`
}
UserDefaults stores user-level default settings for new chats
type UserLoginMetadata ¶
type UserLoginMetadata struct {
Persona string `json:"persona,omitempty"`
Provider string `json:"provider,omitempty"` // Selected provider
APIKey string `json:"api_key,omitempty"`
BaseURL string `json:"base_url,omitempty"` // Per-user API endpoint
TitleGenerationModel string `json:"title_generation_model,omitempty"` // Model to use for generating chat titles
NextChatIndex int `json:"next_chat_index,omitempty"`
DefaultChatPortalID string `json:"default_chat_portal_id,omitempty"`
ModelCache *ModelCache `json:"model_cache,omitempty"`
ChatsSynced bool `json:"chats_synced,omitempty"` // True after initial bootstrap completed successfully
Gravatar *GravatarState `json:"gravatar,omitempty"`
Timezone string `json:"timezone,omitempty"`
ResponsePrefix string `json:"response_prefix,omitempty"`
// FileAnnotationCache stores parsed PDF content from OpenRouter's file-parser plugin
// Key is the file hash (SHA256), pruned after 7 days
FileAnnotationCache map[string]FileAnnotation `json:"file_annotation_cache,omitempty"`
// User-level defaults for new chats (set via provisioning API)
Defaults *UserDefaults `json:"defaults,omitempty"`
// Optional per-login tokens for external services
ServiceTokens *ServiceTokens `json:"service_tokens,omitempty"`
// Provider health tracking
ConsecutiveErrors int `json:"consecutive_errors,omitempty"`
LastErrorAt int64 `json:"last_error_at,omitempty"` // Unix timestamp
// Extra holds bridge-specific login metadata that the core engine
// carries opaquely.
Extra any `json:"extra,omitempty"`
}
UserLoginMetadata is stored on each login row to keep per-user settings.
func LoginMeta ¶
func LoginMeta(login *bridgev2.UserLogin) *UserLoginMetadata
LoginMeta extracts the UserLoginMetadata from a bridgev2.UserLogin.
Source Files
¶
- abort_helpers.go
- abort_triggers.go
- ack_reactions.go
- audio_analysis.go
- audio_generation.go
- audio_mime.go
- bot_check.go
- cache_ttl.go
- chat.go
- client.go
- command_registry.go
- commands.go
- commands_helpers.go
- commands_parity.go
- commands_playground.go
- config.go
- connector.go
- context_overrides.go
- context_pruning.go
- debounce.go
- envelope.go
- error_logging.go
- events.go
- exported_api.go
- exported_functions.go
- group_activation.go
- group_history.go
- handleai.go
- handlematrix.go
- hardcut_stubs.go
- identifiers.go
- image_analysis.go
- image_generation.go
- image_understanding.go
- inbound_commands.go
- inbound_debounce.go
- inbound_directive_apply.go
- internal_dispatch.go
- linkpreview.go
- login.go
- matrix_helpers.go
- matrix_payload.go
- media_download.go
- media_helpers.go
- media_prompt.go
- media_send.go
- media_understanding_attachments.go
- media_understanding_cli.go
- media_understanding_conversions.go
- media_understanding_defaults.go
- media_understanding_format.go
- media_understanding_providers.go
- media_understanding_resolve.go
- media_understanding_runner.go
- media_understanding_scope.go
- media_understanding_types.go
- mentions.go
- message_status.go
- messages.go
- metadata.go
- model_api.go
- model_catalog.go
- model_fallback.go
- model_info_conversions.go
- owner_allowlist.go
- pending_queue.go
- policy.go
- provider.go
- provider_openai.go
- provisioning.go
- pruning.go
- queue_helpers.go
- queue_resolution.go
- queue_settings.go
- raw_mode_prompt.go
- reaction_feedback.go
- reaction_handling.go
- reasoning_fallback.go
- remote_message.go
- reply_mentions.go
- reply_policy.go
- response_directives.go
- response_finalization.go
- response_prefix.go
- response_prefix_template.go
- response_retry.go
- room_capabilities.go
- room_runs.go
- session_greeting.go
- source_citations.go
- status_events_context.go
- status_text.go
- stream_events.go
- streaming.go
- streaming_directives.go
- streaming_hooks.go
- system_ack.go
- system_prompts.go
- text_files.go
- timezone.go
- token_resolver.go
- tool_aliases.go
- tool_call_id.go
- tool_descriptions.go
- tool_execution.go
- tool_registry.go
- tools.go
- trace.go
- turn_validation.go
- typing_context.go
- typing_controller.go
- typing_mode.go
- typing_queue.go
- typing_state.go
- video_analysis.go