core

package
v0.2.21 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 14, 2025 License: Apache-2.0 Imports: 20 Imported by: 0

Documentation

Index

Constants

View Source
const Name = "llm/core"

Variables

View Source
var ErrContextLimitExceeded = errors.New("llm/core: context limit exceeded")

ErrContextLimitExceeded signals that a provider/model rejected the request due to exceeding the maximum context window (prompt too long / too many tokens).

Functions

func ContainsContextLimitError added in v0.2.9

func ContainsContextLimitError(input string) bool

func IsContinuationEnabled added in v0.2.18

func IsContinuationEnabled(model llm.Model, opts *llm.Options) bool

IsContinuationEnabled is an exported wrapper for continuationEnabled so that other packages (e.g., providers) can check whether continuation-by-response-id should be considered for a given model and options.

Types

type GenerateInput

type GenerateInput struct {
	llm.ModelSelection
	SystemPrompt *prompt.Prompt

	Prompt  *prompt.Prompt
	Binding *prompt.Binding
	Message []llm.Message
	// Participant identities for multi-user/agent attribution
	UserID  string `yaml:"userID" json:"userID"`
	AgentID string `yaml:"agentID" json:"agentID"`
}

func (*GenerateInput) Init

func (i *GenerateInput) Init(ctx context.Context) error

func (*GenerateInput) MatchModelIfNeeded

func (i *GenerateInput) MatchModelIfNeeded(matcher llm.Matcher)

func (*GenerateInput) Validate

func (i *GenerateInput) Validate(ctx context.Context) error

type GenerateOutput

type GenerateOutput struct {
	Response  *llm.GenerateResponse
	Content   string
	MessageID string
}

GenerateOutput represents output from extraction

type Service

type Service struct {
	// contains filtered or unexported fields
}

func New

func New(finder llm.Finder, registry tool.Registry, convClient apiconv.Client) *Service

New creates a new extractor service

func (*Service) AttachmentUsage added in v0.2.1

func (s *Service) AttachmentUsage(convID string) int64

AttachmentUsage returns cumulative attachment bytes recorded for a conversation.

func (*Service) BuildContinuationRequest added in v0.2.18

func (s *Service) BuildContinuationRequest(ctx context.Context, req *llm.GenerateRequest, history *prompt.History) *llm.GenerateRequest

BuildContinuationRequest constructs a continuation request by selecting the latest assistant response anchor (resp.id) and including only tool-call messages that map to that anchor.

func (*Service) Generate

func (s *Service) Generate(ctx context.Context, input *GenerateInput, output *GenerateOutput) error

func (*Service) Method

func (s *Service) Method(name string) (svc.Executable, error)

Method returns the specified method

func (*Service) Methods

func (s *Service) Methods() svc.Signatures

Methods returns the service methods

func (*Service) ModelFinder

func (s *Service) ModelFinder() llm.Finder

func (*Service) ModelImplements

func (s *Service) ModelImplements(ctx context.Context, modelName, feature string) bool

ModelImplements reports whether a given model supports a feature. When modelName is empty or not found, it returns false.

func (*Service) ModelMatcher

func (s *Service) ModelMatcher() llm.Matcher

func (*Service) ModelToolPreviewLimit added in v0.2.6

func (s *Service) ModelToolPreviewLimit(model string) int

ModelToolPreviewLimit returns the preview limit in bytes for a model or 0 when not configured.

func (*Service) Name

func (s *Service) Name() string

Name returns the service Name

func (*Service) ProviderAttachmentLimit added in v0.2.1

func (s *Service) ProviderAttachmentLimit(model llm.Model) int64

ProviderAttachmentLimit returns the provider-configured attachment cap for the given model. Zero means unlimited/not enforced by this provider.

func (*Service) SetAttachmentUsage added in v0.2.1

func (s *Service) SetAttachmentUsage(convID string, used int64)

SetAttachmentUsage sets cumulative attachment bytes for a conversation.

func (*Service) SetConversationClient

func (s *Service) SetConversationClient(c apiconv.Client)

func (*Service) SetModelPreviewLimits added in v0.2.6

func (s *Service) SetModelPreviewLimits(m map[string]int)

SetModelPreviewLimits sets per-model preview byte limits used by binding to trim tool results.

func (*Service) Stream

func (s *Service) Stream(ctx context.Context, in, out interface{}) (func(), error)

Stream handles streaming LLM responses, structuring JSON output for text chunks, function calls and finish reasons.

func (*Service) ToolDefinitions

func (s *Service) ToolDefinitions() []llm.ToolDefinition

ToolDefinitions returns every tool definition registered in the tool registry. The slice may be empty when no registry is configured (unit tests or mis-configuration).

type StreamInput

type StreamInput struct {
	*GenerateInput
	StreamID string
}

type StreamOutput

type StreamOutput struct {
	Events    []stream.Event `json:"events"`
	MessageID string         `json:"messageId,omitempty"`
}

StreamOutput aggregates streaming events into a slice.

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL