Documentation
¶
Index ¶
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func ComputeConfidence ¶
func ComputeConfidence(answer string, chunks []store.RetrievalResult, weights ConfidenceWeights) float64
ComputeConfidence calculates a confidence score for an answer.
Types ¶
type Answer ¶
type Answer struct {
Text string `json:"text"`
Confidence float64 `json:"confidence"`
Sources []Source `json:"sources"`
Reasoning []Step `json:"reasoning"`
ModelUsed string `json:"model_used"`
Rounds int `json:"rounds"`
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
}
Answer is the final output of the reasoning pipeline.
type Citation ¶
type Citation struct {
Text string `json:"text"` // The cited text
SourceRef string `json:"source_ref"` // Reference string (e.g., "doc.pdf, Section 3.2")
ChunkID int64 `json:"chunk_id"` // Matched chunk ID, 0 if unmatched
Verified bool `json:"verified"` // Whether the citation was verified against sources
}
Citation represents an extracted citation from an answer.
func ExtractCitations ¶
func ExtractCitations(answer string, chunks []store.RetrievalResult) []Citation
ExtractCitations finds citation references in an answer text.
type ConfidenceWeights ¶
type ConfidenceWeights struct {
SourceCoverage float64 // How many sources are referenced
CitationAccuracy float64 // How accurate citations are
SelfConsistency float64 // Internal consistency of the answer
AnswerLength float64 // Whether the answer is substantive
}
ConfidenceWeights controls the relative importance of confidence factors.
func DefaultConfidenceWeights ¶
func DefaultConfidenceWeights() ConfidenceWeights
DefaultConfidenceWeights returns balanced weights.
type Engine ¶
type Engine struct {
// contains filtered or unexported fields
}
Engine runs multi-round reasoning with validation between rounds.
func (*Engine) Reason ¶
func (e *Engine) Reason(ctx context.Context, question string, chunks []store.RetrievalResult, opts Options) (*Answer, error)
Reason runs the multi-round reasoning pipeline: Round 1: Generate initial answer from retrieved context Round 2: Validate citations and check for gaps Round 3: If confidence < threshold, refine and re-answer
type Options ¶
type Options struct {
MaxRounds int
}
Options configures a single reasoning operation.
type Source ¶
type Source struct {
ChunkID int64 `json:"chunk_id"`
DocumentID int64 `json:"document_id"`
Filename string `json:"filename"`
Path string `json:"path"`
Content string `json:"content"`
Heading string `json:"heading"`
ChunkType string `json:"chunk_type"`
PageNumber int `json:"page_number"`
PositionInDoc int `json:"position_in_doc"`
Score float64 `json:"score"`
ChunkMeta string `json:"chunk_metadata,omitempty"`
DocMeta string `json:"doc_metadata,omitempty"`
}
Source tracks a chunk used in the answer.
type Step ¶
type Step struct {
Round int `json:"round"`
Action string `json:"action"`
Input string `json:"input,omitempty"`
Output string `json:"output,omitempty"`
Prompt string `json:"prompt,omitempty"` // full prompt sent to LLM (for replay)
Response string `json:"response,omitempty"` // raw LLM response
Validation string `json:"validation,omitempty"`
ChunksUsed int `json:"chunks_used,omitempty"`
Tokens int `json:"tokens,omitempty"`
ElapsedMs int64 `json:"elapsed_ms,omitempty"`
Issues []string `json:"issues,omitempty"` // validation issues found
}
Step records a single round of the reasoning pipeline.