Documentation
¶
Overview ¶
Package rlm provides context structure pre-analysis for RLM. The analyzer detects the structure of input context (JSON, markdown, code, etc.) and provides hints for optimal processing strategies.
Package rlm provides a native Recursive Language Model implementation for dspy-go. RLM enables LLMs to explore large contexts programmatically through a Go REPL, making iterative queries to sub-LLMs until a final answer is reached.
Index ¶
- Constants
- Variables
- func ChunkAnalysisSignature() core.Signature
- func CompactIterationSignature() core.Signature
- func ContextMetadata(payload any) string
- func DefaultIterationInstruction(compact bool) string
- func DefaultOuterInstruction() string
- func FindCodeBlocks(text string) []string
- func FormatExecutionResult(result *ExecutionResult) string
- func IterationDemos() []core.Example
- func IterationSignature() core.Signature
- func RLMSignature() core.Signature
- func RLMSignatureWithInstruction(instruction string) core.Signature
- func SubQueryDemos() []core.Example
- func SubQuerySignature() core.Signature
- func SynthesisSignature() core.Signature
- type AdaptiveIterationConfig
- type AnalyzerConfig
- type AsyncBatchHandle
- type AsyncQueryHandle
- type Chunk
- type ChunkConfig
- type ChunkStrategy
- type CodeBlock
- type CompletionResult
- type Config
- type ContextAnalysis
- type ContextIndex
- func (idx *ContextIndex) AllChunks() []Chunk
- func (idx *ContextIndex) ChunkCount() int
- func (idx *ContextIndex) FindRelevant(ctx context.Context, query string, topK int) ([]Chunk, error)
- func (idx *ContextIndex) GetChunk(id int) (Chunk, bool)
- func (idx *ContextIndex) GetContext(startLine, endLine int) string
- func (idx *ContextIndex) GetRawContent() string
- func (idx *ContextIndex) IndexEagerly(ctx context.Context) error
- func (idx *ContextIndex) IsIndexed() bool
- func (idx *ContextIndex) LineCount() int
- func (idx *ContextIndex) SetChunkSummary(chunkID int, summary string)
- func (idx *ContextIndex) SetEmbeddingFunc(fn EmbeddingFunc)
- type ContextPolicyPreset
- type ContextType
- type EmbeddingFunc
- type ExecutionResult
- type FinalAnswer
- type FinalAnswerType
- type HistoryCompressionConfig
- type HistoryEntry
- type ImmutableHistory
- func (h *ImmutableHistory) Append(entry HistoryEntry)
- func (h *ImmutableHistory) CountAction(action string) int
- func (h *ImmutableHistory) Entries() []HistoryEntry
- func (h *ImmutableHistory) Len() int
- func (h *ImmutableHistory) RenderCheckpointed(maxEntryLen, verbatimEntries, maxSummaryTokens int) string
- func (h *ImmutableHistory) String(maxEntryLen int) string
- type IterationProgress
- type LLMCall
- type LLMSubClient
- type Option
- func WithAdaptiveCheckpointThreshold(n int) Option
- func WithAdaptiveIteration() Option
- func WithAdaptiveIterationConfig(cfg AdaptiveIterationConfig) Option
- func WithCompactIterationInstructions(enabled bool) Option
- func WithContextInfoPreviewChars(n int) Option
- func WithContextPolicyPreset(preset ContextPolicyPreset) Option
- func WithHistoryCompression(verbatimIterations, maxSummaryTokens int) Option
- func WithIterationDemos(enabled bool) Option
- func WithMaxFullContextQueryChars(n int) Option
- func WithMaxIterations(n int) Option
- func WithMaxTokens(n int) Option
- func WithOutputTruncation() Option
- func WithOutputTruncationConfig(cfg OutputTruncationConfig) Option
- func WithProgressHandler(handler func(IterationProgress)) Option
- func WithREPLSetup(fn func(repl *YaegiREPL) error) Option
- func WithSubRLM() Option
- func WithSubRLMConfig(cfg SubRLMConfig) Option
- func WithTimeout(d time.Duration) Option
- func WithTraceDir(dir string) Option
- func WithVerbose(v bool) Option
- type OutputFieldSpec
- type OutputTruncationConfig
- type QueryResponse
- type REPLEnvironment
- type REPLVariable
- type RLM
- func (r *RLM) Clone() core.Module
- func (r *RLM) Complete(ctx context.Context, contextPayload any, query string) (*CompletionResult, error)
- func (r *RLM) CompleteWithTrace(ctx context.Context, contextPayload any, query string) (*CompletionResult, *RLMTrace, error)
- func (r *RLM) Config() Config
- func (r *RLM) GetTokenTracker() *TokenTracker
- func (r *RLM) Process(ctx context.Context, inputs map[string]any, opts ...core.Option) (map[string]any, error)
- func (r *RLM) ProcessWithInterceptors(ctx context.Context, inputs map[string]any, ...) (map[string]any, error)
- func (r *RLM) SetConfig(cfg Config)
- func (r *RLM) SetLLM(llm core.LLM)
- func (r *RLM) WithOptions(opts ...Option) *RLM
- type RLMTrace
- type RLMTraceStep
- type RootIterationSnapshot
- type StructureHints
- type SubLLMClient
- type SubRLMCall
- type SubRLMConfig
- type SubRLMEntry
- type TokenTracker
- func (t *TokenTracker) AddRootUsage(promptTokens, completionTokens int)
- func (t *TokenTracker) AddRootUsageForIteration(iteration, promptTokens, completionTokens int)
- func (t *TokenTracker) AddSubCall(call LLMCall)
- func (t *TokenTracker) AddSubCalls(calls []LLMCall)
- func (t *TokenTracker) AddSubRLMCall(call SubRLMCall)
- func (t *TokenTracker) ClearSubCalls()
- func (t *TokenTracker) GetMaxRootPromptTokens() int
- func (t *TokenTracker) GetMeanRootPromptTokens() int
- func (t *TokenTracker) GetRootSnapshots() []RootIterationSnapshot
- func (t *TokenTracker) GetRootUsage() core.TokenUsage
- func (t *TokenTracker) GetSubCalls() []LLMCall
- func (t *TokenTracker) GetSubRLMCalls() []SubRLMCall
- func (t *TokenTracker) GetSubRLMUsage() core.TokenUsage
- func (t *TokenTracker) GetSubUsage() core.TokenUsage
- func (t *TokenTracker) GetTotalUsage() core.TokenUsage
- func (t *TokenTracker) Reset()
- type YaegiREPL
- func (r *YaegiREPL) ClearAsyncQueries()
- func (r *YaegiREPL) ClearFinal()
- func (r *YaegiREPL) ClearLLMCalls()
- func (r *YaegiREPL) ClearSubmit()
- func (r *YaegiREPL) ContextInfo() string
- func (r *YaegiREPL) Execute(ctx context.Context, code string) (result *ExecutionResult, err error)
- func (r *YaegiREPL) Final() string
- func (r *YaegiREPL) GetAsyncQuery(handleID string) (*AsyncQueryHandle, bool)
- func (r *YaegiREPL) GetContextIndex() *ContextIndex
- func (r *YaegiREPL) GetLLMCalls() []LLMCall
- func (r *YaegiREPL) GetLocals() map[string]any
- func (r *YaegiREPL) GetSubmitOutput() map[string]any
- func (r *YaegiREPL) GetVariable(name string) (string, error)
- func (r *YaegiREPL) GetVariableMetadata() []REPLVariable
- func (r *YaegiREPL) HasFinal() bool
- func (r *YaegiREPL) HasSubmit() bool
- func (r *YaegiREPL) IndexContext(ctx context.Context) error
- func (r *YaegiREPL) InjectSymbols(symbols map[string]reflect.Value) error
- func (r *YaegiREPL) LoadContext(payload any) error
- func (r *YaegiREPL) PendingAsyncQueries() int
- func (r *YaegiREPL) QueryAsync(prompt string) *AsyncQueryHandle
- func (r *YaegiREPL) QueryBatchedAsync(prompts []string) *AsyncBatchHandle
- func (r *YaegiREPL) Reset() error
- func (r *YaegiREPL) SetChunkConfig(config ChunkConfig)
- func (r *YaegiREPL) SetContext(ctx context.Context)
- func (r *YaegiREPL) SetContextInfoPreviewChars(n int)
- func (r *YaegiREPL) SetEmbeddingFunc(fn EmbeddingFunc)
- func (r *YaegiREPL) SetMaxFullContextQueryChars(n int)
- func (r *YaegiREPL) SetSubmitSchema(schema map[string]OutputFieldSpec)
- func (r *YaegiREPL) SetVariable(name, value string) error
- func (r *YaegiREPL) WaitAllAsyncQueries()
Constants ¶
const ( TraceMetadataIterations = "iterations" TraceMetadataTerminationCause = "termination_cause" TraceMetadataAdaptiveIterationEnabled = "adaptive_iteration_enabled" TraceMetadataCompactIterationInstructions = "compact_iteration_instructions" TraceMetadataUseIterationDemos = "use_iteration_demos" TraceMetadataMaxIterations = "max_iterations" TraceMetadataMaxTokens = "max_tokens" TraceMetadataAdaptiveBaseIterations = "adaptive_base_iterations" TraceMetadataAdaptiveMaxIterations = "adaptive_max_iterations" TraceMetadataAdaptiveConfidenceThreshold = "adaptive_confidence_threshold" TraceMetadataContextPolicyPreset = "context_policy_preset" TraceMetadataSubRLMMaxDirectCalls = "sub_rlm_max_direct_calls" TraceMetadataSubRLMMaxTotalCalls = "sub_rlm_max_total_calls" TraceMetadataSubLLMCallCount = "sub_llm_call_count" TraceMetadataSubRLMCallCount = "sub_rlm_call_count" TraceMetadataConfidenceSignals = "confidence_signals" TraceMetadataHistoryCompressions = "history_compressions" TraceMetadataRootPromptMeanTokens = "root_prompt_mean_tokens" TraceMetadataRootPromptMaxTokens = "root_prompt_max_tokens" )
Variables ¶
Functions ¶
func ChunkAnalysisSignature ¶
ChunkAnalysisSignature for analyzing individual chunks of large contexts.
func CompactIterationSignature ¶ added in v0.76.0
CompactIterationSignature is a shorter runtime-oriented variant of IterationSignature. It keeps the same schema but substantially reduces repeated static prompt overhead.
func ContextMetadata ¶
ContextMetadata returns a string describing the context.
func DefaultIterationInstruction ¶ added in v0.80.0
DefaultIterationInstruction returns the built-in iteration instruction for the given mode.
func DefaultOuterInstruction ¶ added in v0.80.0
func DefaultOuterInstruction() string
DefaultOuterInstruction returns the built-in outer RLM instruction.
func FindCodeBlocks ¶
FindCodeBlocks extracts all ```go or ```repl code blocks from the LLM response. Returns an empty slice if no code blocks are found.
func FormatExecutionResult ¶
func FormatExecutionResult(result *ExecutionResult) string
FormatExecutionResult formats an execution result for display.
func IterationDemos ¶
IterationDemos provides few-shot examples for the iteration module.
func IterationSignature ¶
IterationSignature defines the signature for each RLM iteration. This powers the inner loop where the LLM decides what to do next.
func RLMSignature ¶
RLMSignature creates the main RLM module signature. This is the outer interface: takes context + query, returns answer.
func RLMSignatureWithInstruction ¶ added in v0.80.0
RLMSignatureWithInstruction creates the outer RLM signature with an explicit instruction override.
func SubQueryDemos ¶
SubQueryDemos provides few-shot examples for sub-LLM queries.
func SubQuerySignature ¶
SubQuerySignature defines the signature for sub-LLM queries. This is used by Query() and QueryBatched() internally.
func SynthesisSignature ¶
SynthesisSignature for combining results from multiple chunk analyses.
Types ¶
type AdaptiveIterationConfig ¶ added in v0.75.1
type AdaptiveIterationConfig struct {
// Enabled turns on adaptive iteration (default: false).
Enabled bool
// BaseIterations is the base number of iterations before context scaling.
// Default: 10.
BaseIterations int
// MaxIterations caps the total iterations regardless of context size.
// Default: 50.
MaxIterations int
// ContextScaleFactor determines how much context size increases iterations.
// iterations = BaseIterations + (contextSize / ContextScaleFactor)
// Default: 100000 (100KB per additional iteration).
ContextScaleFactor int
// EnableEarlyTermination allows early exit when model signals confidence.
// Default: true.
EnableEarlyTermination bool
// ConfidenceThreshold is the number of confidence signals needed for early termination.
// Default: 1.
ConfidenceThreshold int
// ConfidenceDetector is a custom function to detect confidence signals in responses.
// If nil, a default heuristic based on FINAL markers is used.
// The function returns true if the response indicates high confidence.
ConfidenceDetector func(response string) bool
}
AdaptiveIterationConfig configures adaptive iteration behavior.
func DefaultAdaptiveIterationConfig ¶ added in v0.80.0
func DefaultAdaptiveIterationConfig() AdaptiveIterationConfig
DefaultAdaptiveIterationConfig returns the built-in adaptive iteration policy.
type AnalyzerConfig ¶ added in v0.75.1
type AnalyzerConfig struct {
// SmallContextThreshold is the size below which no chunking is needed.
SmallContextThreshold int
// TokenEstimateRatio is the character-to-token ratio estimate.
TokenEstimateRatio float64
}
AnalyzerConfig holds analyzer configuration options.
func DefaultAnalyzerConfig ¶ added in v0.75.1
func DefaultAnalyzerConfig() AnalyzerConfig
DefaultAnalyzerConfig returns the default analyzer configuration.
type AsyncBatchHandle ¶ added in v0.75.1
type AsyncBatchHandle struct {
// contains filtered or unexported fields
}
AsyncBatchHandle represents a batch of pending async queries.
func (*AsyncBatchHandle) CompletedCount ¶ added in v0.75.1
func (bh *AsyncBatchHandle) CompletedCount() int
CompletedCount returns the number of completed queries.
func (*AsyncBatchHandle) Handles ¶ added in v0.75.1
func (bh *AsyncBatchHandle) Handles() []*AsyncQueryHandle
Handles returns the individual query handles.
func (*AsyncBatchHandle) Ready ¶ added in v0.75.1
func (bh *AsyncBatchHandle) Ready() bool
Ready returns true if all queries have completed.
func (*AsyncBatchHandle) TotalCount ¶ added in v0.75.1
func (bh *AsyncBatchHandle) TotalCount() int
TotalCount returns the total number of queries in the batch.
func (*AsyncBatchHandle) WaitAll ¶ added in v0.75.1
func (bh *AsyncBatchHandle) WaitAll() ([]string, error)
WaitAll blocks until all queries complete and returns all results.
type AsyncQueryHandle ¶ added in v0.75.1
type AsyncQueryHandle struct {
// contains filtered or unexported fields
}
AsyncQueryHandle represents a pending async query.
func (*AsyncQueryHandle) Duration ¶ added in v0.75.1
func (h *AsyncQueryHandle) Duration() time.Duration
Duration returns the time elapsed since the query was started.
func (*AsyncQueryHandle) Error ¶ added in v0.75.1
func (h *AsyncQueryHandle) Error() error
Error returns any error that occurred during the query.
func (*AsyncQueryHandle) ID ¶ added in v0.75.1
func (h *AsyncQueryHandle) ID() string
ID returns the unique identifier for this async query.
func (*AsyncQueryHandle) Ready ¶ added in v0.75.1
func (h *AsyncQueryHandle) Ready() bool
Ready returns true if the result is available.
func (*AsyncQueryHandle) Result ¶ added in v0.75.1
func (h *AsyncQueryHandle) Result() (string, bool)
Result returns the result if ready, or empty string if not.
func (*AsyncQueryHandle) Wait ¶ added in v0.75.1
func (h *AsyncQueryHandle) Wait() (string, error)
Wait blocks until the query completes and returns the result.
type Chunk ¶ added in v0.75.2
type Chunk struct {
ID int // Unique identifier
Content string // The actual text content
StartLine int // Starting line number (1-indexed)
EndLine int // Ending line number (1-indexed)
Summary string // Optional summary of the chunk
Embedding []float32 // Embedding vector for semantic search
}
Chunk represents a segment of the context with metadata.
type ChunkConfig ¶ added in v0.75.2
type ChunkConfig struct {
// MaxChunkSize is the maximum number of characters per chunk
MaxChunkSize int
// OverlapSize is the number of characters to overlap between chunks
OverlapSize int
// ChunkByLines if true, chunks by line count instead of character count
ChunkByLines bool
// LinesPerChunk when ChunkByLines is true
LinesPerChunk int
}
ChunkConfig configures how content is chunked.
func DefaultChunkConfig ¶ added in v0.75.2
func DefaultChunkConfig() ChunkConfig
DefaultChunkConfig returns sensible defaults for chunking.
type ChunkStrategy ¶ added in v0.75.1
type ChunkStrategy string
ChunkStrategy represents the recommended chunking approach.
const ( // StrategyNone means no chunking needed - context is small enough. StrategyNone ChunkStrategy = "none" // StrategyFixed means split into fixed-size chunks. StrategyFixed ChunkStrategy = "fixed" // StrategyDelimiter means split on natural delimiters. StrategyDelimiter ChunkStrategy = "delimiter" // StrategyHierarchical means split based on document structure. StrategyHierarchical ChunkStrategy = "hierarchical" // StrategySemantic means split based on semantic units. StrategySemantic ChunkStrategy = "semantic" )
type CodeBlock ¶
type CodeBlock struct {
Code string
Result ExecutionResult
}
CodeBlock represents an extracted and executed code block.
type CompletionResult ¶
type CompletionResult struct {
Response string
Iterations int
Duration time.Duration
Usage core.TokenUsage
}
CompletionResult represents the final result of an RLM completion.
type Config ¶
type Config struct {
// OuterInstruction overrides the top-level RLM module instruction.
// Empty string uses the default outer instruction.
OuterInstruction string
// IterationInstruction overrides the iteration module instruction.
// Empty string uses the compact/full built-in instruction based on config.
IterationInstruction string
// MaxIterations is the maximum number of iteration loops (default: 30).
MaxIterations int
// MaxTokens is the maximum cumulative token budget across root, sub-LLM,
// and nested sub-RLM calls for a single completion. Zero disables the limit.
MaxTokens int
// Verbose enables verbose logging.
Verbose bool
// Timeout is the maximum duration for the entire RLM completion.
// Zero means no timeout (default).
Timeout time.Duration
// TraceDir is the directory for RLM trace logs (JSONL format compatible with rlm-viewer).
// Empty string disables tracing.
TraceDir string
// UseIterationDemos enables few-shot demos for the iteration module.
// Disabled by default to avoid repeating large static prompt examples on every iteration.
UseIterationDemos bool
// CompactIterationInstructions uses a shorter runtime instruction for the
// iteration module. Enabled by default to reduce repeated prompt overhead.
CompactIterationInstructions bool
// HistoryCompression configures incremental history compression.
// When enabled, older iterations are replayed as checkpoint summaries while
// recent iterations stay verbatim.
HistoryCompression *HistoryCompressionConfig
// ContextPolicy controls how iteration history is replayed into subsequent
// prompts. "full" replays everything verbatim, "checkpointed" summarizes
// older entries and keeps recent entries verbatim, and "adaptive" switches
// between the two based on history size and compression settings.
ContextPolicy ContextPolicyPreset
// AdaptiveCheckpointThreshold is the history-entry count where the adaptive
// context policy switches from full replay to checkpointed replay when no
// explicit HistoryCompression policy is configured. Values <= 0 use the
// built-in default.
AdaptiveCheckpointThreshold int
// AdaptiveIteration configures adaptive iteration strategy.
// When enabled, max iterations are dynamically calculated based on context size.
AdaptiveIteration *AdaptiveIterationConfig
// SubRLM configures nested sub-RLM behavior.
// When enabled, allows spawning nested RLM loops that share REPL state.
SubRLM *SubRLMConfig
// OutputTruncation configures output truncation settings.
// Controls max lengths for execution output, variable previews, and history entries.
OutputTruncation *OutputTruncationConfig
// ContextInfoPreviewChars controls how many characters of the loaded context are
// exposed in context_info metadata. Set to 0 to disable raw preview text.
ContextInfoPreviewChars int
// MaxFullContextQueryChars limits Query()/QueryBatched() calls that auto-prepend the
// full loaded context. Zero disables the guardrail.
MaxFullContextQueryChars int
// OnProgress is called at the start of each iteration with progress info.
// Can be used to display progress to users or implement custom termination logic.
OnProgress func(progress IterationProgress)
// REPLSetup is called after REPL creation and context loading, but before
// the iteration loop starts. Use this to inject additional REPL symbols.
REPLSetup func(repl *YaegiREPL) error
}
Config holds RLM configuration.
func DefaultConfig ¶
func DefaultConfig() Config
DefaultConfig returns the default RLM configuration.
type ContextAnalysis ¶ added in v0.75.1
type ContextAnalysis struct {
// Type is the detected primary content type.
Type ContextType
// SecondaryTypes are additional types detected in mixed content.
SecondaryTypes []ContextType
// Size is the total size in bytes.
Size int
// EstimatedTokens is the estimated token count (rough approximation).
EstimatedTokens int
// RecommendedStrategy is the suggested chunking strategy.
RecommendedStrategy ChunkStrategy
// RecommendedChunkSize is the suggested chunk size in bytes.
RecommendedChunkSize int
// Delimiters are natural delimiters found in the content.
Delimiters []string
// Structure provides hints about the content structure.
Structure StructureHints
// LLMHint is a string hint to prepend to LLM prompts about the context.
LLMHint string
}
ContextAnalysis contains the analysis results for a context payload.
func AnalyzeContext ¶ added in v0.75.1
func AnalyzeContext(payload any) *ContextAnalysis
AnalyzeContext examines the context payload and returns analysis results.
func AnalyzeContextWithConfig ¶ added in v0.75.1
func AnalyzeContextWithConfig(payload any, cfg AnalyzerConfig) *ContextAnalysis
AnalyzeContextWithConfig performs analysis with custom configuration.
func (*ContextAnalysis) IsLargeContext ¶ added in v0.75.1
func (a *ContextAnalysis) IsLargeContext() bool
IsLargeContext returns true if the context is considered large and would benefit from chunking.
func (*ContextAnalysis) ShouldUseBatching ¶ added in v0.75.1
func (a *ContextAnalysis) ShouldUseBatching() bool
ShouldUseBatching returns true if the context is large enough to warrant batched queries.
type ContextIndex ¶ added in v0.75.2
type ContextIndex struct {
// contains filtered or unexported fields
}
ContextIndex provides efficient access to context slices. It supports both line-based access and semantic search via embeddings.
func NewContextIndex ¶ added in v0.75.2
func NewContextIndex(content string, config ChunkConfig) *ContextIndex
NewContextIndex creates a new context index from raw content.
func (*ContextIndex) AllChunks ¶ added in v0.75.2
func (idx *ContextIndex) AllChunks() []Chunk
AllChunks returns all chunks.
func (*ContextIndex) ChunkCount ¶ added in v0.75.2
func (idx *ContextIndex) ChunkCount() int
ChunkCount returns the number of chunks.
func (*ContextIndex) FindRelevant ¶ added in v0.75.2
FindRelevant returns the top-k most relevant chunks for a query. If embeddings are not indexed, returns chunks based on keyword matching.
func (*ContextIndex) GetChunk ¶ added in v0.75.2
func (idx *ContextIndex) GetChunk(id int) (Chunk, bool)
GetChunk returns a chunk by its ID.
func (*ContextIndex) GetContext ¶ added in v0.75.2
func (idx *ContextIndex) GetContext(startLine, endLine int) string
GetContext returns content between start and end lines (1-indexed, inclusive).
func (*ContextIndex) GetRawContent ¶ added in v0.75.2
func (idx *ContextIndex) GetRawContent() string
GetRawContent returns the raw content string.
func (*ContextIndex) IndexEagerly ¶ added in v0.75.2
func (idx *ContextIndex) IndexEagerly(ctx context.Context) error
IndexEagerly computes embeddings for all chunks immediately.
func (*ContextIndex) IsIndexed ¶ added in v0.75.2
func (idx *ContextIndex) IsIndexed() bool
IsIndexed returns whether embeddings have been computed.
func (*ContextIndex) LineCount ¶ added in v0.75.2
func (idx *ContextIndex) LineCount() int
LineCount returns the number of lines.
func (*ContextIndex) SetChunkSummary ¶ added in v0.75.2
func (idx *ContextIndex) SetChunkSummary(chunkID int, summary string)
SetChunkSummary sets a summary for a chunk.
func (*ContextIndex) SetEmbeddingFunc ¶ added in v0.75.2
func (idx *ContextIndex) SetEmbeddingFunc(fn EmbeddingFunc)
SetEmbeddingFunc sets the function used to compute embeddings.
type ContextPolicyPreset ¶ added in v0.83.1
type ContextPolicyPreset string
ContextPolicyPreset controls how structured iteration history is replayed back to the root model on subsequent iterations.
const ( ContextPolicyFull ContextPolicyPreset = "full" ContextPolicyCheckpointed ContextPolicyPreset = "checkpointed" ContextPolicyAdaptive ContextPolicyPreset = "adaptive" )
type ContextType ¶ added in v0.75.1
type ContextType string
ContextType represents the detected type of content.
const ( // TypeUnknown is used when the content type cannot be determined. TypeUnknown ContextType = "unknown" // TypeJSON indicates JSON-structured content. TypeJSON ContextType = "json" // TypeMarkdown indicates markdown-formatted content. TypeMarkdown ContextType = "markdown" // TypeCode indicates source code content. TypeCode ContextType = "code" // TypePlainText indicates plain text content. TypePlainText ContextType = "plaintext" // TypeCSV indicates CSV/tabular data. TypeCSV ContextType = "csv" // TypeXML indicates XML-structured content. TypeXML ContextType = "xml" // TypeLog indicates log file content. TypeLog ContextType = "log" // TypeMixed indicates mixed content with multiple types. TypeMixed ContextType = "mixed" )
type EmbeddingFunc ¶ added in v0.75.2
EmbeddingFunc computes embeddings for text inputs.
type ExecutionResult ¶
ExecutionResult represents the result of executing code in the REPL.
type FinalAnswer ¶
type FinalAnswer struct {
Type FinalAnswerType
Content string
}
FinalAnswer represents a detected FINAL or FINAL_VAR signal.
func FindFinalAnswer ¶
func FindFinalAnswer(text string) *FinalAnswer
FindFinalAnswer detects FINAL() or FINAL_VAR() signals in the LLM response. Returns nil if no final answer is found. Note: Code blocks are filtered out first to avoid false positives when FINAL appears in code examples.
type FinalAnswerType ¶
type FinalAnswerType string
FinalAnswerType indicates whether the answer is direct or a variable reference.
const ( // FinalTypeDirect indicates a direct value like FINAL(42). FinalTypeDirect FinalAnswerType = "FINAL" // FinalTypeVariable indicates a variable reference like FINAL_VAR(answer). FinalTypeVariable FinalAnswerType = "FINAL_VAR" )
type HistoryCompressionConfig ¶ added in v0.75.1
type HistoryCompressionConfig struct {
// Enabled turns on checkpointed replay (default: false).
Enabled bool
// VerbatimIterations is the number of recent iterations to keep verbatim.
// Older iterations will be summarized. Default: 3.
VerbatimIterations int
// MaxSummaryTokens is the approximate maximum tokens for summarized history.
// Default: 500.
MaxSummaryTokens int
}
HistoryCompressionConfig configures checkpointed replay of older iterations.
type HistoryEntry ¶ added in v0.75.2
type HistoryEntry struct {
Iteration int // 1-indexed iteration number
Timestamp time.Time // When this iteration started
Action string // Action type: explore, query, compute, final, subrlm
Code string // Code that was executed (if any)
Output string // Execution output (truncated)
Duration time.Duration
Success bool
Error string
SubRLM *SubRLMEntry // Non-nil if this was a subrlm action
}
HistoryEntry represents a single iteration in the RLM history. This provides immutable, structured history for debugging/checkpointing.
type ImmutableHistory ¶ added in v0.75.2
type ImmutableHistory struct {
// contains filtered or unexported fields
}
ImmutableHistory provides append-only history for RLM iterations.
func NewImmutableHistory ¶ added in v0.75.2
func NewImmutableHistory() *ImmutableHistory
NewImmutableHistory creates a new empty history.
func (*ImmutableHistory) Append ¶ added in v0.75.2
func (h *ImmutableHistory) Append(entry HistoryEntry)
Append adds a new entry to the history.
func (*ImmutableHistory) CountAction ¶ added in v0.83.1
func (h *ImmutableHistory) CountAction(action string) int
CountAction returns the number of entries with the provided action.
func (*ImmutableHistory) Entries ¶ added in v0.75.2
func (h *ImmutableHistory) Entries() []HistoryEntry
Entries returns all history entries (immutable copy).
func (*ImmutableHistory) Len ¶ added in v0.75.2
func (h *ImmutableHistory) Len() int
Len returns the number of entries.
func (*ImmutableHistory) RenderCheckpointed ¶ added in v0.83.1
func (h *ImmutableHistory) RenderCheckpointed(maxEntryLen, verbatimEntries, maxSummaryTokens int) string
RenderCheckpointed summarizes older entries and keeps recent entries verbatim.
func (*ImmutableHistory) String ¶ added in v0.75.2
func (h *ImmutableHistory) String(maxEntryLen int) string
String converts history to a string for LLM prompts. Uses configurable truncation settings.
type IterationProgress ¶ added in v0.75.1
type IterationProgress struct {
// CurrentIteration is the current iteration number (1-indexed).
CurrentIteration int
// MaxIterations is the computed maximum iterations for this request.
MaxIterations int
// ConfidenceSignals counts how many times the model has signaled confidence.
ConfidenceSignals int
// HasFinalAttempt indicates the model tried to give a final answer.
HasFinalAttempt bool
// ContextSize is the size of the input context in bytes.
ContextSize int
// RootPromptTokens is the prompt token count for this iteration's root LLM call,
// as reported by the provider via LLMResponse.Usage.PromptTokens.
// This is the per-call value (not cumulative) — divide by context window size
// to get context_fill_ratio for this iteration.
RootPromptTokens int
}
IterationProgress tracks progress and confidence during iteration.
type LLMCall ¶
type LLMCall struct {
Prompt string `json:"prompt"`
Response string `json:"response"`
Duration time.Duration `json:"duration"`
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
}
LLMCall represents a sub-LLM call made from within the REPL.
type LLMSubClient ¶
type LLMSubClient struct {
// contains filtered or unexported fields
}
LLMSubClient adapts a prompt-capable model to the SubLLMClient interface. This allows any compatible dspy-go model to be used for sub-queries in RLM.
func NewLLMSubClient ¶
func NewLLMSubClient(llm core.PromptModel) *LLMSubClient
NewLLMSubClient creates a SubLLMClient from a prompt-capable model.
func (*LLMSubClient) Query ¶
func (c *LLMSubClient) Query(ctx context.Context, prompt string) (QueryResponse, error)
Query implements SubLLMClient.
func (*LLMSubClient) QueryBatched ¶
func (c *LLMSubClient) QueryBatched(ctx context.Context, prompts []string) ([]QueryResponse, error)
QueryBatched implements SubLLMClient with concurrent queries.
type Option ¶
type Option func(*Config)
Option configures the RLM.
func WithAdaptiveCheckpointThreshold ¶ added in v0.83.1
WithAdaptiveCheckpointThreshold sets the history-entry count where adaptive replay switches from full to checkpointed mode.
func WithAdaptiveIteration ¶ added in v0.75.1
func WithAdaptiveIteration() Option
WithAdaptiveIteration enables adaptive iteration strategy with default configuration. This dynamically adjusts max iterations based on context size and enables early termination when the model signals confidence.
func WithAdaptiveIterationConfig ¶ added in v0.75.1
func WithAdaptiveIterationConfig(cfg AdaptiveIterationConfig) Option
WithAdaptiveIterationConfig enables adaptive iteration with custom configuration.
func WithCompactIterationInstructions ¶ added in v0.76.0
WithCompactIterationInstructions enables or disables the shorter runtime iteration instruction.
func WithContextInfoPreviewChars ¶ added in v0.80.0
WithContextInfoPreviewChars sets the maximum number of raw context characters exposed in context_info metadata. Use 0 to disable preview text entirely.
func WithContextPolicyPreset ¶ added in v0.83.1
func WithContextPolicyPreset(preset ContextPolicyPreset) Option
WithContextPolicyPreset sets how structured history is replayed into prompts.
func WithHistoryCompression ¶ added in v0.75.1
WithHistoryCompression enables checkpointed replay for older iterations. verbatimIterations is how many recent iterations to keep in full (default: 3). maxSummaryTokens is the approximate max tokens for the summary (default: 500).
func WithIterationDemos ¶ added in v0.76.0
WithIterationDemos enables or disables runtime iteration demos.
func WithMaxFullContextQueryChars ¶ added in v0.80.0
WithMaxFullContextQueryChars limits Query()/QueryBatched() calls that auto-prepend the full loaded context. Use 0 to disable the guardrail.
func WithMaxIterations ¶
WithMaxIterations sets the maximum number of iterations. Values <= 0 are ignored and the default is used.
func WithMaxTokens ¶ added in v0.76.0
WithMaxTokens sets the maximum cumulative token budget for a completion. Values <= 0 disable the budget.
func WithOutputTruncation ¶ added in v0.75.2
func WithOutputTruncation() Option
WithOutputTruncation enables output truncation with default configuration.
func WithOutputTruncationConfig ¶ added in v0.75.2
func WithOutputTruncationConfig(cfg OutputTruncationConfig) Option
WithOutputTruncationConfig enables output truncation with custom configuration.
func WithProgressHandler ¶ added in v0.75.1
func WithProgressHandler(handler func(IterationProgress)) Option
WithProgressHandler sets a callback for iteration progress updates.
func WithREPLSetup ¶ added in v0.76.0
WithREPLSetup sets a callback that runs after REPL creation + context load and before the iteration loop begins.
func WithSubRLM ¶ added in v0.75.2
func WithSubRLM() Option
WithSubRLM enables sub-RLM support with default configuration. Sub-RLMs allow nested RLM loops that share REPL state for complex multi-step analysis.
func WithSubRLMConfig ¶ added in v0.75.2
func WithSubRLMConfig(cfg SubRLMConfig) Option
WithSubRLMConfig enables sub-RLM support with custom configuration.
func WithTimeout ¶
WithTimeout sets the maximum duration for the completion.
func WithTraceDir ¶
WithTraceDir enables JSONL tracing to the specified directory. The trace files are compatible with rlm-go's rlm-viewer command.
type OutputFieldSpec ¶ added in v0.75.2
type OutputFieldSpec struct {
Type string // Expected type: "string", "int", "float", "bool", "[]string", "map"
Required bool // Whether the field is required
Description string // Description of what this field should contain
}
OutputFieldSpec defines expected type and validation for SUBMIT output fields.
type OutputTruncationConfig ¶ added in v0.75.2
type OutputTruncationConfig struct {
// Enabled turns on output truncation (default: true).
Enabled bool
// MaxOutputLen is the maximum characters in execution output (default: 5000).
MaxOutputLen int
// MaxVarPreviewLen is the maximum characters in variable preview (default: 100).
MaxVarPreviewLen int
// MaxHistoryEntryLen is the maximum characters per history entry (default: 1000).
MaxHistoryEntryLen int
}
OutputTruncationConfig configures output truncation settings.
func DefaultOutputTruncationConfig ¶ added in v0.75.2
func DefaultOutputTruncationConfig() OutputTruncationConfig
DefaultOutputTruncationConfig returns the default output truncation configuration.
type QueryResponse ¶
QueryResponse contains the LLM response with usage metadata.
type REPLEnvironment ¶
type REPLEnvironment interface {
// LoadContext loads the context payload into the REPL environment.
LoadContext(payload any) error
// Execute runs Go code in the interpreter and returns the result.
Execute(ctx context.Context, code string) (*ExecutionResult, error)
// GetVariable retrieves a variable value from the interpreter.
GetVariable(name string) (string, error)
// SetVariable sets a variable in the interpreter.
SetVariable(name, value string) error
// Reset clears the interpreter state.
Reset() error
// ContextInfo returns metadata about the loaded context.
ContextInfo() string
// GetLocals extracts commonly used variables from the interpreter.
GetLocals() map[string]any
// GetVariableMetadata returns rich metadata about REPL variables.
GetVariableMetadata() []REPLVariable
// HasFinal returns true if FINAL() or FINAL_VAR() has been called.
// This provides state-verified completion detection (Nightjar Algorithm 1 Gap 1).
HasFinal() bool
// Final returns the value passed to FINAL() or FINAL_VAR().
// Returns empty string if not set.
Final() string
// ClearFinal resets the final state.
ClearFinal()
// HasSubmit returns true if SUBMIT() has been called with valid output.
HasSubmit() bool
// GetSubmitOutput returns the submitted output fields.
GetSubmitOutput() map[string]any
// SetSubmitSchema sets the expected output schema for SUBMIT validation.
SetSubmitSchema(schema map[string]OutputFieldSpec)
// GetLLMCalls returns the LLM calls made during code execution.
GetLLMCalls() []LLMCall
}
REPLEnvironment defines the interface for a REPL that can execute code and make LLM queries.
type REPLVariable ¶ added in v0.75.2
type REPLVariable struct {
Name string // Variable name
Value any // The actual value
Type string // Type description (string, int, []string, map, etc.)
Length int // Length/size for strings and collections (-1 if N/A)
Preview string // Truncated representation for display
IsImportant bool // True for key variables (result, answer, etc.)
}
REPLVariable provides rich metadata about a variable in the REPL.
type RLM ¶
type RLM struct {
core.BaseModule
// contains filtered or unexported fields
}
RLM is the main Recursive Language Model module implementation. It enables LLMs to explore large contexts programmatically through a Go REPL, making iterative queries to sub-LLMs until a final answer is reached.
func New ¶
func New(rootLLM core.LLM, subLLMClient SubLLMClient, opts ...Option) *RLM
New creates a new RLM module instance with separate LLMs. rootLLM is used for the main orchestration loop. subLLMClient is used for Query/QueryBatched calls from within the REPL. For most cases, use NewFromLLM instead which uses the same LLM for both.
func NewFromLLM ¶
NewFromLLM creates a new RLM module using a single core.LLM for both root orchestration and sub-queries. This is the recommended constructor for most use cases.
func (*RLM) Complete ¶
func (r *RLM) Complete(ctx context.Context, contextPayload any, query string) (*CompletionResult, error)
Complete runs an RLM completion. contextPayload is the context data (string, map, or slice). query is the user's question.
func (*RLM) CompleteWithTrace ¶ added in v0.80.0
func (r *RLM) CompleteWithTrace(ctx context.Context, contextPayload any, query string) (*CompletionResult, *RLMTrace, error)
CompleteWithTrace runs an RLM completion and returns a structured trace of the execution.
func (*RLM) Config ¶ added in v0.80.0
Config returns a defensive copy of the current RLM configuration.
func (*RLM) GetTokenTracker ¶
func (r *RLM) GetTokenTracker() *TokenTracker
GetTokenTracker returns the token tracker for inspecting usage.
func (*RLM) Process ¶
func (r *RLM) Process(ctx context.Context, inputs map[string]any, opts ...core.Option) (map[string]any, error)
Process implements the core.Module interface. It takes inputs with "context" and "query" fields and returns the answer.
func (*RLM) ProcessWithInterceptors ¶
func (r *RLM) ProcessWithInterceptors(ctx context.Context, inputs map[string]any, interceptors []core.ModuleInterceptor, opts ...core.Option) (map[string]any, error)
ProcessWithInterceptors executes the RLM module's logic with interceptor support.
func (*RLM) SetConfig ¶ added in v0.80.0
SetConfig applies a new RLM configuration and rebuilds prompt-bearing internals.
func (*RLM) WithOptions ¶
WithOptions applies additional options to the RLM module.
type RLMTrace ¶ added in v0.80.0
type RLMTrace struct {
Input map[string]any
Output map[string]any
Steps []RLMTraceStep
StartedAt time.Time
CompletedAt time.Time
ProcessingTime time.Duration
Iterations int
Usage core.TokenUsage
RootUsage core.TokenUsage
SubUsage core.TokenUsage
SubRLMUsage core.TokenUsage
RootSnapshots []RootIterationSnapshot
SubLLMCallCount int
SubRLMCallCount int
ConfidenceSignals int
CompressionCount int
TerminationCause string
Error string
// contains filtered or unexported fields
}
RLMTrace captures a structured RLM completion, including iterative steps.
type RLMTraceStep ¶ added in v0.80.0
type RLMTraceStep struct {
Index int
Thought string
Action string
Code string
SubQuery string
Observation string
Duration time.Duration
Success bool
Error string
}
RLMTraceStep captures one iteration of the RLM loop.
type RootIterationSnapshot ¶ added in v0.76.0
type RootIterationSnapshot struct {
Iteration int `json:"iteration"` // 1-indexed iteration number
PromptTokens int `json:"prompt_tokens"` // Root LLM prompt tokens for this iteration
CompletionTokens int `json:"completion_tokens"` // Root LLM completion tokens for this iteration
}
RootIterationSnapshot captures per-iteration root LLM prompt token counts. This is the key data needed to prove that RLM context stays bounded: if PromptTokens stays flat as iterations increase, context is not accumulating.
type StructureHints ¶ added in v0.75.1
type StructureHints struct {
// HasHeaders indicates if the content has section headers.
HasHeaders bool
// HeaderCount is the number of headers detected.
HeaderCount int
// HasCodeBlocks indicates if code blocks are present.
HasCodeBlocks bool
// CodeBlockCount is the number of code blocks detected.
CodeBlockCount int
// HasLists indicates if lists are present.
HasLists bool
// ListCount is the number of lists detected.
ListCount int
// HasTables indicates if tables are present.
HasTables bool
// NestingDepth is the maximum nesting depth for structured content.
NestingDepth int
// LineCount is the total number of lines.
LineCount int
// AvgLineLength is the average line length.
AvgLineLength int
}
StructureHints provides detailed structure information.
type SubLLMClient ¶
type SubLLMClient interface {
// Query makes a single LLM query.
Query(ctx context.Context, prompt string) (QueryResponse, error)
// QueryBatched makes concurrent LLM queries.
QueryBatched(ctx context.Context, prompts []string) ([]QueryResponse, error)
}
SubLLMClient defines the interface for making LLM calls from within the REPL.
type SubRLMCall ¶ added in v0.75.2
type SubRLMCall struct {
Query string `json:"query"`
Result string `json:"result"`
Iterations int `json:"iterations"`
Depth int `json:"depth"`
Duration time.Duration `json:"duration"`
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
}
SubRLMCall represents a nested sub-RLM invocation.
type SubRLMConfig ¶ added in v0.75.2
type SubRLMConfig struct {
// MaxDepth is the maximum nesting depth for sub-RLM calls (default: 3).
// A value of 1 means no nesting allowed, 2 means one level of nesting, etc.
MaxDepth int
// CurrentDepth tracks the current nesting level (0 = root RLM).
// This is set internally and should not be configured by users.
CurrentDepth int
// MaxIterationsPerSubRLM limits iterations for each sub-RLM call.
// Default: 10. Use 0 to inherit parent's max iterations.
MaxIterationsPerSubRLM int
// MaxDirectSubRLMCalls limits direct child sub-RLM invocations from one RLM
// node. Zero disables the budget.
MaxDirectSubRLMCalls int
// MaxTotalSubRLMCalls limits total sub-RLM invocations across the whole
// request tree. Zero disables the budget.
MaxTotalSubRLMCalls int
}
SubRLMConfig configures nested sub-RLM behavior.
type SubRLMEntry ¶ added in v0.75.2
type SubRLMEntry struct {
Query string // The sub-RLM query
Result string // The sub-RLM result
Iterations int // How many iterations the sub-RLM took
Duration time.Duration // How long the sub-RLM ran
}
SubRLMEntry captures sub-RLM invocation details.
type TokenTracker ¶
type TokenTracker struct {
// contains filtered or unexported fields
}
TokenTracker aggregates token usage across root LLM and sub-LLM calls.
func NewTokenTracker ¶
func NewTokenTracker() *TokenTracker
NewTokenTracker creates a new token tracker.
func (*TokenTracker) AddRootUsage ¶
func (t *TokenTracker) AddRootUsage(promptTokens, completionTokens int)
AddRootUsage adds token usage from a root LLM call.
func (*TokenTracker) AddRootUsageForIteration ¶ added in v0.76.0
func (t *TokenTracker) AddRootUsageForIteration(iteration, promptTokens, completionTokens int)
AddRootUsageForIteration adds token usage from a root LLM call and records a per-iteration snapshot. The snapshot captures the exact PromptTokens the provider reported for this single root call — not a cumulative delta. This is the data needed to compute context_fill_ratio per iteration.
func (*TokenTracker) AddSubCall ¶
func (t *TokenTracker) AddSubCall(call LLMCall)
AddSubCall adds a sub-LLM call with its token usage.
func (*TokenTracker) AddSubCalls ¶
func (t *TokenTracker) AddSubCalls(calls []LLMCall)
AddSubCalls adds multiple sub-LLM calls.
func (*TokenTracker) AddSubRLMCall ¶ added in v0.75.2
func (t *TokenTracker) AddSubRLMCall(call SubRLMCall)
AddSubRLMCall adds a sub-RLM call with its token usage.
func (*TokenTracker) ClearSubCalls ¶
func (t *TokenTracker) ClearSubCalls()
ClearSubCalls clears the recorded sub-LLM calls but preserves the counts.
func (*TokenTracker) GetMaxRootPromptTokens ¶ added in v0.76.0
func (t *TokenTracker) GetMaxRootPromptTokens() int
GetMaxRootPromptTokens returns the largest single root prompt across all iterations. Returns 0 if no snapshots have been recorded.
func (*TokenTracker) GetMeanRootPromptTokens ¶ added in v0.76.0
func (t *TokenTracker) GetMeanRootPromptTokens() int
GetMeanRootPromptTokens returns the mean root prompt tokens across all iterations. Returns 0 if no snapshots have been recorded.
func (*TokenTracker) GetRootSnapshots ¶ added in v0.76.0
func (t *TokenTracker) GetRootSnapshots() []RootIterationSnapshot
GetRootSnapshots returns a copy of all per-iteration root LLM snapshots.
func (*TokenTracker) GetRootUsage ¶
func (t *TokenTracker) GetRootUsage() core.TokenUsage
GetRootUsage returns token usage from root LLM calls only.
func (*TokenTracker) GetSubCalls ¶
func (t *TokenTracker) GetSubCalls() []LLMCall
GetSubCalls returns a copy of all sub-LLM calls.
func (*TokenTracker) GetSubRLMCalls ¶ added in v0.75.2
func (t *TokenTracker) GetSubRLMCalls() []SubRLMCall
GetSubRLMCalls returns a copy of all sub-RLM calls.
func (*TokenTracker) GetSubRLMUsage ¶ added in v0.75.2
func (t *TokenTracker) GetSubRLMUsage() core.TokenUsage
GetSubRLMUsage returns token usage from sub-RLM calls only.
func (*TokenTracker) GetSubUsage ¶
func (t *TokenTracker) GetSubUsage() core.TokenUsage
GetSubUsage returns token usage from sub-LLM calls only.
func (*TokenTracker) GetTotalUsage ¶
func (t *TokenTracker) GetTotalUsage() core.TokenUsage
GetTotalUsage returns the total aggregated token usage.
type YaegiREPL ¶
type YaegiREPL struct {
// contains filtered or unexported fields
}
YaegiREPL is a Yaegi-based Go interpreter with RLM capabilities.
SECURITY NOTE: The interpreter is sandboxed by restricting imports to a safe subset of the standard library (no os, net, syscall, etc.). However, it does NOT protect against resource exhaustion attacks. LLM-generated code could potentially allocate large amounts of memory or create infinite loops that exceed the execution timeout. If running untrusted code in production, consider additional OS-level resource limits (e.g., cgroups, containers) or running the interpreter in a separate process with strict memory limits.
func NewYaegiREPL ¶
func NewYaegiREPL(client SubLLMClient) (*YaegiREPL, error)
NewYaegiREPL creates a new YaegiREPL instance. Returns an error if initialization fails (e.g., stdlib loading or builtin injection).
func (*YaegiREPL) ClearAsyncQueries ¶ added in v0.75.1
func (r *YaegiREPL) ClearAsyncQueries()
ClearAsyncQueries clears all tracked async queries.
func (*YaegiREPL) ClearFinal ¶ added in v0.75.2
func (r *YaegiREPL) ClearFinal()
ClearFinal resets the final state. Call this at the start of each iteration or when reusing the REPL.
func (*YaegiREPL) ClearLLMCalls ¶
func (r *YaegiREPL) ClearLLMCalls()
ClearLLMCalls clears the recorded LLM calls.
func (*YaegiREPL) ClearSubmit ¶ added in v0.75.2
func (r *YaegiREPL) ClearSubmit()
ClearSubmit resets the submit state.
func (*YaegiREPL) ContextInfo ¶
ContextInfo returns metadata about the loaded context.
func (*YaegiREPL) Execute ¶
Execute runs Go code in the interpreter. Execution errors are captured in stderr rather than returned, allowing the caller to inspect all output. The mutex is held for the entire duration to ensure thread safety, as the yaegi interpreter is not safe for concurrent use. Panics from the Yaegi interpreter are recovered and reported as stderr errors.
func (*YaegiREPL) Final ¶ added in v0.75.2
Final returns the value passed to FINAL() or FINAL_VAR(). Returns empty string if not set.
func (*YaegiREPL) GetAsyncQuery ¶ added in v0.75.1
func (r *YaegiREPL) GetAsyncQuery(handleID string) (*AsyncQueryHandle, bool)
GetAsyncQuery returns the async query handle by ID.
func (*YaegiREPL) GetContextIndex ¶ added in v0.75.2
func (r *YaegiREPL) GetContextIndex() *ContextIndex
GetContextIndex returns the current context index (for external access).
func (*YaegiREPL) GetLLMCalls ¶
GetLLMCalls returns and clears the recorded LLM calls. Returns a copy of the calls slice to prevent external modification.
func (*YaegiREPL) GetSubmitOutput ¶ added in v0.75.2
GetSubmitOutput returns the submitted output fields.
func (*YaegiREPL) GetVariable ¶
GetVariable retrieves a variable value from the interpreter.
func (*YaegiREPL) GetVariableMetadata ¶ added in v0.75.2
func (r *YaegiREPL) GetVariableMetadata() []REPLVariable
GetVariableMetadata returns rich metadata about REPL variables. This provides type info, length, and preview for better LLM context.
func (*YaegiREPL) HasFinal ¶ added in v0.75.2
HasFinal returns true if FINAL() or FINAL_VAR() has been called. This provides state-verified completion detection (Nightjar Algorithm 1 Gap 1).
func (*YaegiREPL) HasSubmit ¶ added in v0.75.2
HasSubmit returns true if SUBMIT() has been called with valid output.
func (*YaegiREPL) IndexContext ¶ added in v0.75.2
IndexContext eagerly indexes the loaded context for semantic search. Call this after LoadContext if you want embedding-based FindRelevant.
func (*YaegiREPL) InjectSymbols ¶ added in v0.76.0
InjectSymbols merges external symbols into the REPL's "rlm/rlm" namespace. Call this after REPL creation and context loading, before Execute().
func (*YaegiREPL) LoadContext ¶
LoadContext injects the context payload into the interpreter as the `context` variable.
func (*YaegiREPL) PendingAsyncQueries ¶ added in v0.75.1
PendingAsyncQueries returns the number of pending async queries.
func (*YaegiREPL) QueryAsync ¶ added in v0.75.1
func (r *YaegiREPL) QueryAsync(prompt string) *AsyncQueryHandle
QueryAsync starts an async query and returns a handle. This is the Go API for async queries.
func (*YaegiREPL) QueryBatchedAsync ¶ added in v0.75.1
func (r *YaegiREPL) QueryBatchedAsync(prompts []string) *AsyncBatchHandle
QueryBatchedAsync starts batch async queries and returns a batch handle. This is the Go API for batch async queries.
func (*YaegiREPL) SetChunkConfig ¶ added in v0.75.2
func (r *YaegiREPL) SetChunkConfig(config ChunkConfig)
SetChunkConfig sets the chunking configuration for context indexing.
func (*YaegiREPL) SetContext ¶
SetContext sets the execution context for LLM calls.
func (*YaegiREPL) SetContextInfoPreviewChars ¶ added in v0.80.0
SetContextInfoPreviewChars sets how many raw context characters may appear in context_info. Use 0 to expose only structural metadata.
func (*YaegiREPL) SetEmbeddingFunc ¶ added in v0.75.2
func (r *YaegiREPL) SetEmbeddingFunc(fn EmbeddingFunc)
SetEmbeddingFunc sets the function used for semantic search. If not set, FindRelevant falls back to keyword matching.
func (*YaegiREPL) SetMaxFullContextQueryChars ¶ added in v0.80.0
SetMaxFullContextQueryChars limits Query()/QueryBatched() calls that auto-prepend the full context. Use 0 to disable the guardrail.
func (*YaegiREPL) SetSubmitSchema ¶ added in v0.75.2
func (r *YaegiREPL) SetSubmitSchema(schema map[string]OutputFieldSpec)
SetSubmitSchema sets the expected output schema for SUBMIT validation.
func (*YaegiREPL) SetVariable ¶ added in v0.75.2
SetVariable sets a variable in the interpreter that can be accessed in subsequent code executions. This is used by sub-RLM to store results that the parent RLM can access.
func (*YaegiREPL) WaitAllAsyncQueries ¶ added in v0.75.1
func (r *YaegiREPL) WaitAllAsyncQueries()
WaitAllAsyncQueries waits for all pending async queries to complete.