Documentation
¶
Overview ¶
Example (PlanningAgent) ¶
Example demonstrates how to use CreatePlanningAgent to build a dynamic workflow based on user requests
package main
import (
"context"
"fmt"
"github.com/smallnest/langgraphgo/graph"
"github.com/tmc/langchaingo/llms"
)
func main() {
// Step 1: Define your custom nodes that can be used in workflows
nodes := []*graph.Node{
{
Name: "fetch_data",
Description: "Fetch data from external API or database",
Function: func(ctx context.Context, state interface{}) (interface{}, error) {
mState := state.(map[string]interface{})
messages := mState["messages"].([]llms.MessageContent)
// Simulate fetching data
fmt.Println("Fetching data from API...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Data fetched successfully: [item1, item2, item3]")},
}
return map[string]interface{}{
"messages": append(messages, msg),
}, nil
},
},
{
Name: "validate_data",
Description: "Validate the integrity and format of data",
Function: func(ctx context.Context, state interface{}) (interface{}, error) {
mState := state.(map[string]interface{})
messages := mState["messages"].([]llms.MessageContent)
// Simulate validation
fmt.Println("Validating data...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Data validation passed")},
}
return map[string]interface{}{
"messages": append(messages, msg),
}, nil
},
},
{
Name: "transform_data",
Description: "Transform and normalize data into required format",
Function: func(ctx context.Context, state interface{}) (interface{}, error) {
mState := state.(map[string]interface{})
messages := mState["messages"].([]llms.MessageContent)
// Simulate transformation
fmt.Println("Transforming data...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Data transformed to JSON format")},
}
return map[string]interface{}{
"messages": append(messages, msg),
}, nil
},
},
{
Name: "analyze_data",
Description: "Perform statistical analysis on the data",
Function: func(ctx context.Context, state interface{}) (interface{}, error) {
mState := state.(map[string]interface{})
messages := mState["messages"].([]llms.MessageContent)
// Simulate analysis
fmt.Println("Analyzing data...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Analysis complete: mean=42, median=40, std=5.2")},
}
return map[string]interface{}{
"messages": append(messages, msg),
}, nil
},
},
{
Name: "save_results",
Description: "Save processed results to storage",
Function: func(ctx context.Context, state interface{}) (interface{}, error) {
mState := state.(map[string]interface{})
messages := mState["messages"].([]llms.MessageContent)
// Simulate saving
fmt.Println("Saving results...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Results saved to database")},
}
return map[string]interface{}{
"messages": append(messages, msg),
}, nil
},
},
}
// Step 2: Create your LLM model
// In a real application, you would use an actual LLM like OpenAI, Anthropic, etc.
// var model llms.Model = openai.New(...)
// For this example, we'll skip the actual LLM call
// The LLM would receive the user request and available nodes,
// then generate a workflow plan like:
// {
// "nodes": [
// {"name": "fetch_data", "type": "process"},
// {"name": "validate_data", "type": "process"},
// {"name": "transform_data", "type": "process"},
// {"name": "save_results", "type": "process"}
// ],
// "edges": [
// {"from": "START", "to": "fetch_data"},
// {"from": "fetch_data", "to": "validate_data"},
// {"from": "validate_data", "to": "transform_data"},
// {"from": "transform_data", "to": "save_results"},
// {"from": "save_results", "to": "END"}
// ]
// }
fmt.Println("CreatePlanningAgent example:")
fmt.Println("This agent dynamically creates workflows based on user requests")
fmt.Println()
fmt.Println("Available nodes:")
for i, node := range nodes {
fmt.Printf("%d. %s: %s\n", i+1, node.Name, node.Description)
}
fmt.Println()
fmt.Println("User request: 'Fetch data, validate it, transform it, and save the results'")
fmt.Println()
fmt.Println("The LLM will:")
fmt.Println("1. Analyze the user request")
fmt.Println("2. Select appropriate nodes from available nodes")
fmt.Println("3. Generate a workflow plan (similar to a mermaid diagram)")
fmt.Println("4. The agent will execute the planned workflow")
fmt.Println()
fmt.Println("Expected workflow:")
fmt.Println("START -> fetch_data -> validate_data -> transform_data -> save_results -> END")
}
Output: CreatePlanningAgent example: This agent dynamically creates workflows based on user requests Available nodes: 1. fetch_data: Fetch data from external API or database 2. validate_data: Validate the integrity and format of data 3. transform_data: Transform and normalize data into required format 4. analyze_data: Perform statistical analysis on the data 5. save_results: Save processed results to storage User request: 'Fetch data, validate it, transform it, and save the results' The LLM will: 1. Analyze the user request 2. Select appropriate nodes from available nodes 3. Generate a workflow plan (similar to a mermaid diagram) 4. The agent will execute the planned workflow Expected workflow: START -> fetch_data -> validate_data -> transform_data -> save_results -> END
Example (PlanningAgentRealUsage) ¶
Example showing real usage pattern
package main
import (
"fmt"
)
func main() {
fmt.Println("Real usage pattern:")
fmt.Println()
fmt.Println("// 1. Define your nodes")
fmt.Println("nodes := []*graph.Node{...}")
fmt.Println()
fmt.Println("// 2. Initialize your LLM model")
fmt.Println("model := openai.New()")
fmt.Println()
fmt.Println("// 3. Create the planning agent")
fmt.Println("agent, err := prebuilt.CreatePlanningAgent(")
fmt.Println(" model,")
fmt.Println(" nodes,")
fmt.Println(" []tools.Tool{},")
fmt.Println(" prebuilt.WithVerbose(true),")
fmt.Println(")")
fmt.Println()
fmt.Println("// 4. Prepare initial state with user request")
fmt.Println("initialState := map[string]interface{}{")
fmt.Println(" \"messages\": []llms.MessageContent{")
fmt.Println(" llms.TextParts(llms.ChatMessageTypeHuman,")
fmt.Println(" \"Fetch, validate, and save the customer data\"),")
fmt.Println(" },")
fmt.Println("}")
fmt.Println()
fmt.Println("// 5. Execute the agent")
fmt.Println("result, err := agent.Invoke(context.Background(), initialState)")
fmt.Println()
fmt.Println("// 6. Access results")
fmt.Println("mState := result.(map[string]interface{})")
fmt.Println("messages := mState[\"messages\"].([]llms.MessageContent)")
}
Output: Real usage pattern: // 1. Define your nodes nodes := []*graph.Node{...} // 2. Initialize your LLM model model := openai.New() // 3. Create the planning agent agent, err := prebuilt.CreatePlanningAgent( model, nodes, []tools.Tool{}, prebuilt.WithVerbose(true), ) // 4. Prepare initial state with user request initialState := map[string]interface{}{ "messages": []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Fetch, validate, and save the customer data"), }, } // 5. Execute the agent result, err := agent.Invoke(context.Background(), initialState) // 6. Access results mState := result.(map[string]interface{}) messages := mState["messages"].([]llms.MessageContent)
Example (PlanningAgentWithVerbose) ¶
Example showing how to use the planning agent with verbose mode
package main
import (
"fmt"
)
func main() {
// In a real application, you would define nodes and create the agent
// nodes := []*graph.Node{...}
// agent, err := prebuilt.CreatePlanningAgent(model, nodes, []tools.Tool{}, prebuilt.WithVerbose(true))
fmt.Println("With verbose mode enabled, you will see:")
fmt.Println("🤔 Planning workflow...")
fmt.Println("📋 Generated plan: {...}")
fmt.Println("🚀 Executing planned workflow...")
fmt.Println(" ✓ Added node: step1")
fmt.Println(" ✓ Added node: step2")
fmt.Println(" ✓ Added edge: step1 -> step2")
fmt.Println(" ✓ Added edge: step2 -> END")
fmt.Println("✅ Workflow execution completed")
}
Output: With verbose mode enabled, you will see: 🤔 Planning workflow... 📋 Generated plan: {...} 🚀 Executing planned workflow... ✓ Added node: step1 ✓ Added node: step2 ✓ Added edge: step1 -> step2 ✓ Added edge: step2 -> END ✅ Workflow execution completed
Example (WorkflowPlanFormat) ¶
Example showing how the LLM generates workflow plans
package main
import (
"fmt"
)
func main() {
fmt.Println("Workflow Plan JSON Format:")
fmt.Println()
fmt.Println("{")
fmt.Println(" \"nodes\": [")
fmt.Println(" {\"name\": \"node_name\", \"type\": \"process\"},")
fmt.Println(" {\"name\": \"another_node\", \"type\": \"process\"}")
fmt.Println(" ],")
fmt.Println(" \"edges\": [")
fmt.Println(" {\"from\": \"START\", \"to\": \"node_name\"},")
fmt.Println(" {\"from\": \"node_name\", \"to\": \"another_node\"},")
fmt.Println(" {\"from\": \"another_node\", \"to\": \"END\"}")
fmt.Println(" ]")
fmt.Println("}")
fmt.Println()
fmt.Println("Rules:")
fmt.Println("1. Workflow must start with edge from 'START'")
fmt.Println("2. Workflow must end with edge to 'END'")
fmt.Println("3. Only use nodes from available nodes list")
fmt.Println("4. Create logical flow based on user request")
}
Output: Workflow Plan JSON Format: { "nodes": [ {"name": "node_name", "type": "process"}, {"name": "another_node", "type": "process"} ], "edges": [ {"from": "START", "to": "node_name"}, {"from": "node_name", "to": "another_node"}, {"from": "another_node", "to": "END"} ] } Rules: 1. Workflow must start with edge from 'START' 2. Workflow must end with edge to 'END' 3. Only use nodes from available nodes list 4. Create logical flow based on user request
Index ¶
- func CreateAgent(model llms.Model, inputTools []tools.Tool, opts ...CreateAgentOption) (*graph.StateRunnable, error)
- func CreatePlanningAgent(model llms.Model, nodes []*graph.Node, inputTools []tools.Tool, ...) (*graph.StateRunnable, error)
- func CreateReactAgent(model llms.Model, inputTools []tools.Tool) (*graph.StateRunnable, error)
- func CreateReflectionAgent(config ReflectionAgentConfig) (*graph.StateRunnable, error)
- func CreateSupervisor(model llms.Model, members map[string]*graph.StateRunnable) (*graph.StateRunnable, error)
- type ChatMessageHistory
- func (h *ChatMessageHistory) AddAIMessage(ctx context.Context, message string) error
- func (h *ChatMessageHistory) AddMessage(ctx context.Context, message llms.ChatMessage) error
- func (h *ChatMessageHistory) AddUserMessage(ctx context.Context, message string) error
- func (h *ChatMessageHistory) Clear(ctx context.Context) error
- func (h *ChatMessageHistory) GetHistory() schema.ChatMessageHistory
- func (h *ChatMessageHistory) Messages(ctx context.Context) ([]llms.ChatMessage, error)
- func (h *ChatMessageHistory) SetMessages(ctx context.Context, messages []llms.ChatMessage) error
- type CreateAgentOption
- func WithCheckpointer(checkpointer graph.CheckpointStore) CreateAgentOption
- func WithSkillDir(skillDir string) CreateAgentOption
- func WithStateModifier(modifier func(messages []llms.MessageContent) []llms.MessageContent) CreateAgentOption
- func WithSystemMessage(message string) CreateAgentOption
- func WithVerbose(verbose bool) CreateAgentOption
- type CreateAgentOptions
- type Document
- type DocumentLoader
- type DocumentWithScore
- type Embedder
- type InMemoryVectorStore
- func (s *InMemoryVectorStore) AddDocuments(ctx context.Context, documents []Document, embeddings [][]float64) error
- func (s *InMemoryVectorStore) SimilaritySearch(ctx context.Context, query string, k int) ([]Document, error)
- func (s *InMemoryVectorStore) SimilaritySearchWithScore(ctx context.Context, query string, k int) ([]DocumentWithScore, error)
- type LangChainDocumentLoader
- type LangChainEmbedder
- type LangChainMemory
- func NewConversationBufferMemory(options ...memory.ConversationBufferOption) *LangChainMemory
- func NewConversationTokenBufferMemory(llm llms.Model, maxTokenLimit int, options ...memory.ConversationBufferOption) *LangChainMemory
- func NewConversationWindowBufferMemory(windowSize int, options ...memory.ConversationBufferOption) *LangChainMemory
- func NewLangChainMemory(buffer schema.Memory) *LangChainMemory
- func (m *LangChainMemory) Clear(ctx context.Context) error
- func (m *LangChainMemory) GetMessages(ctx context.Context) ([]llms.ChatMessage, error)
- func (m *LangChainMemory) LoadMemoryVariables(ctx context.Context, inputs map[string]any) (map[string]any, error)
- func (m *LangChainMemory) SaveContext(ctx context.Context, inputValues map[string]any, outputValues map[string]any) error
- type LangChainTextSplitter
- type LangChainVectorStore
- func (s *LangChainVectorStore) AddDocuments(ctx context.Context, documents []Document, embeddings [][]float64) error
- func (s *LangChainVectorStore) SimilaritySearch(ctx context.Context, query string, k int) ([]Document, error)
- func (s *LangChainVectorStore) SimilaritySearchWithScore(ctx context.Context, query string, k int) ([]DocumentWithScore, error)
- type Memory
- type MockEmbedder
- type RAGConfig
- type RAGPipeline
- type RAGState
- type ReflectionAgentConfig
- type Reranker
- type Retriever
- type SimpleReranker
- type SimpleTextSplitter
- type StaticDocumentLoader
- type TextSplitter
- type ToolExecutor
- type ToolInvocation
- type ToolNode
- type VectorStore
- type VectorStoreRetriever
- type WorkflowEdge
- type WorkflowNode
- type WorkflowPlan
Examples ¶
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func CreateAgent ¶ added in v0.3.1
func CreateAgent(model llms.Model, inputTools []tools.Tool, opts ...CreateAgentOption) (*graph.StateRunnable, error)
CreateAgent creates a new agent graph with options
func CreatePlanningAgent ¶ added in v0.4.0
func CreatePlanningAgent(model llms.Model, nodes []*graph.Node, inputTools []tools.Tool, opts ...CreateAgentOption) (*graph.StateRunnable, error)
CreatePlanningAgent creates an agent that first plans the workflow using LLM, then executes according to the generated plan
Example ¶
Example usage documentation
// Define your custom nodes
nodes := []*graph.Node{
{
Name: "fetch_data",
Description: "Fetch data from API",
Function: func(ctx context.Context, state interface{}) (interface{}, error) {
// Your implementation
fmt.Println("Fetching data...")
return state, nil
},
},
{
Name: "transform_data",
Description: "Transform the fetched data",
Function: func(ctx context.Context, state interface{}) (interface{}, error) {
// Your implementation
fmt.Println("Transforming data...")
return state, nil
},
},
}
// Create your LLM model (this is a placeholder)
var model llms.Model // = your actual LLM model
// Create the planning agent
agent, _ := CreatePlanningAgent(
model,
nodes,
[]tools.Tool{},
WithVerbose(true),
)
// Use the agent
initialState := map[string]interface{}{
"messages": []llms.MessageContent{
llms.TextParts(llms.ChatMessageTypeHuman, "Fetch and transform the data"),
},
}
result, _ := agent.Invoke(context.Background(), initialState)
fmt.Printf("Result: %v\n", result)
// Output will show the planning and execution steps
func CreateReactAgent ¶
CreateReactAgent creates a new ReAct agent graph
func CreateReflectionAgent ¶ added in v0.5.0
func CreateReflectionAgent(config ReflectionAgentConfig) (*graph.StateRunnable, error)
CreateReflectionAgent creates a new Reflection Agent that iteratively improves its responses through self-reflection
The Reflection pattern involves: 1. Generate: Create an initial response 2. Reflect: Critique the response and suggest improvements 3. Revise: Generate an improved version based on reflection 4. Repeat until satisfactory or max iterations reached
Example ¶
package main
import (
"context"
"fmt"
"log"
"github.com/smallnest/langgraphgo/prebuilt"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/openai"
)
func main() {
// Create LLM
model, err := openai.New()
if err != nil {
log.Fatal(err)
}
// Configure reflection agent
config := prebuilt.ReflectionAgentConfig{
Model: model,
MaxIterations: 3,
Verbose: true,
SystemMessage: "You are an expert technical writer. Create clear, accurate, and comprehensive responses.",
}
// Create agent
agent, err := prebuilt.CreateReflectionAgent(config)
if err != nil {
log.Fatal(err)
}
// Prepare initial state
initialState := map[string]interface{}{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{llms.TextPart("Explain the CAP theorem in distributed systems")},
},
},
}
// Invoke agent
result, err := agent.Invoke(context.Background(), initialState)
if err != nil {
log.Fatal(err)
}
// Extract final response
finalState := result.(map[string]interface{})
messages := finalState["messages"].([]llms.MessageContent)
fmt.Println("=== Final Response ===")
for _, msg := range messages {
if msg.Role == llms.ChatMessageTypeAI {
for _, part := range msg.Parts {
if textPart, ok := part.(llms.TextContent); ok {
fmt.Println(textPart.Text)
}
}
}
}
}
Output:
Example (CustomCriteria) ¶
package main
import (
"context"
"fmt"
"log"
"github.com/smallnest/langgraphgo/prebuilt"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/openai"
)
func main() {
model, err := openai.New()
if err != nil {
log.Fatal(err)
}
// Custom reflection criteria for code quality
config := prebuilt.ReflectionAgentConfig{
Model: model,
MaxIterations: 2,
Verbose: true,
SystemMessage: "You are a senior software engineer reviewing code.",
ReflectionPrompt: `Evaluate the code review for:
1. **Security**: Are security issues identified?
2. **Performance**: Are performance concerns addressed?
3. **Maintainability**: Are code quality issues noted?
4. **Best Practices**: Are language/framework best practices mentioned?
Provide specific, actionable feedback.`,
}
agent, err := prebuilt.CreateReflectionAgent(config)
if err != nil {
log.Fatal(err)
}
initialState := map[string]interface{}{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{llms.TextPart("Review this SQL query function for issues")},
},
},
}
result, err := agent.Invoke(context.Background(), initialState)
if err != nil {
log.Fatal(err)
}
finalState := result.(map[string]interface{})
draft := finalState["draft"].(string)
fmt.Printf("Code review:\n%s\n", draft)
}
Output:
Example (WithSeparateReflector) ¶
package main
import (
"context"
"fmt"
"log"
"github.com/smallnest/langgraphgo/prebuilt"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/openai"
)
func main() {
// Create generation model
generationModel, err := openai.New(openai.WithModel("gpt-4"))
if err != nil {
log.Fatal(err)
}
// Create separate reflection model (could be a different model)
reflectionModel, err := openai.New(openai.WithModel("gpt-4"))
if err != nil {
log.Fatal(err)
}
// Configure with separate models
config := prebuilt.ReflectionAgentConfig{
Model: generationModel,
ReflectionModel: reflectionModel,
MaxIterations: 2,
Verbose: true,
SystemMessage: "You are a helpful assistant providing detailed explanations.",
ReflectionPrompt: `You are a senior technical reviewer.
Evaluate the response for:
1. Technical accuracy
2. Completeness of explanation
3. Clarity for the target audience
4. Use of examples
Be specific in your feedback.`,
}
agent, err := prebuilt.CreateReflectionAgent(config)
if err != nil {
log.Fatal(err)
}
initialState := map[string]interface{}{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{llms.TextPart("What is a Merkle tree and how is it used in blockchain?")},
},
},
}
result, err := agent.Invoke(context.Background(), initialState)
if err != nil {
log.Fatal(err)
}
finalState := result.(map[string]interface{})
draft := finalState["draft"].(string)
iteration := finalState["iteration"].(int)
fmt.Printf("Final draft (after %d iterations):\n%s\n", iteration, draft)
}
Output:
func CreateSupervisor ¶
func CreateSupervisor(model llms.Model, members map[string]*graph.StateRunnable) (*graph.StateRunnable, error)
CreateSupervisor creates a supervisor graph that orchestrates multiple agents
Types ¶
type ChatMessageHistory ¶
type ChatMessageHistory struct {
// contains filtered or unexported fields
}
ChatMessageHistory provides direct access to chat message history
func NewChatMessageHistory ¶
func NewChatMessageHistory(options ...memory.ChatMessageHistoryOption) *ChatMessageHistory
NewChatMessageHistory creates a new chat message history
func (*ChatMessageHistory) AddAIMessage ¶
func (h *ChatMessageHistory) AddAIMessage(ctx context.Context, message string) error
AddAIMessage adds an AI message to the history
func (*ChatMessageHistory) AddMessage ¶
func (h *ChatMessageHistory) AddMessage(ctx context.Context, message llms.ChatMessage) error
AddMessage adds a message to the history
func (*ChatMessageHistory) AddUserMessage ¶
func (h *ChatMessageHistory) AddUserMessage(ctx context.Context, message string) error
AddUserMessage adds a user message to the history
func (*ChatMessageHistory) Clear ¶
func (h *ChatMessageHistory) Clear(ctx context.Context) error
Clear clears all messages from the history
func (*ChatMessageHistory) GetHistory ¶
func (h *ChatMessageHistory) GetHistory() schema.ChatMessageHistory
GetHistory returns the underlying langchaingo ChatMessageHistory
func (*ChatMessageHistory) Messages ¶
func (h *ChatMessageHistory) Messages(ctx context.Context) ([]llms.ChatMessage, error)
Messages returns all messages in the history
func (*ChatMessageHistory) SetMessages ¶
func (h *ChatMessageHistory) SetMessages(ctx context.Context, messages []llms.ChatMessage) error
SetMessages sets the messages in the history
type CreateAgentOption ¶ added in v0.3.1
type CreateAgentOption func(*CreateAgentOptions)
CreateAgentOption is a function that configures CreateAgentOptions
func WithCheckpointer ¶ added in v0.3.1
func WithCheckpointer(checkpointer graph.CheckpointStore) CreateAgentOption
WithCheckpointer sets the checkpointer for the agent Note: Currently this is a placeholder and may not be fully integrated into the graph execution yet
func WithSkillDir ¶ added in v0.3.2
func WithSkillDir(skillDir string) CreateAgentOption
WithSkillDir sets the skill directory for the agent
func WithStateModifier ¶ added in v0.3.1
func WithStateModifier(modifier func(messages []llms.MessageContent) []llms.MessageContent) CreateAgentOption
WithStateModifier sets a function to modify messages before they are sent to the model
func WithSystemMessage ¶ added in v0.3.1
func WithSystemMessage(message string) CreateAgentOption
WithSystemMessage sets the system message for the agent
func WithVerbose ¶ added in v0.3.2
func WithVerbose(verbose bool) CreateAgentOption
WithVerbose sets the verbose mode for the agent
type CreateAgentOptions ¶ added in v0.3.1
type CreateAgentOptions struct {
Verbose bool
SystemMessage string
StateModifier func(messages []llms.MessageContent) []llms.MessageContent
Checkpointer graph.CheckpointStore
// contains filtered or unexported fields
}
CreateAgentOptions contains options for creating an agent
type DocumentLoader ¶
DocumentLoader loads documents from various sources
type DocumentWithScore ¶
DocumentWithScore represents a document with its similarity score
type Embedder ¶
type Embedder interface {
EmbedDocuments(ctx context.Context, texts []string) ([][]float64, error)
EmbedQuery(ctx context.Context, text string) ([]float64, error)
}
Embedder generates embeddings for text
type InMemoryVectorStore ¶
type InMemoryVectorStore struct {
// contains filtered or unexported fields
}
InMemoryVectorStore is a simple in-memory vector store implementation
func NewInMemoryVectorStore ¶
func NewInMemoryVectorStore(embedder Embedder) *InMemoryVectorStore
NewInMemoryVectorStore creates a new InMemoryVectorStore
func (*InMemoryVectorStore) AddDocuments ¶
func (s *InMemoryVectorStore) AddDocuments(ctx context.Context, documents []Document, embeddings [][]float64) error
AddDocuments adds documents with their embeddings to the store
func (*InMemoryVectorStore) SimilaritySearch ¶
func (s *InMemoryVectorStore) SimilaritySearch(ctx context.Context, query string, k int) ([]Document, error)
SimilaritySearch performs similarity search and returns top k documents
func (*InMemoryVectorStore) SimilaritySearchWithScore ¶
func (s *InMemoryVectorStore) SimilaritySearchWithScore(ctx context.Context, query string, k int) ([]DocumentWithScore, error)
SimilaritySearchWithScore performs similarity search and returns documents with scores
type LangChainDocumentLoader ¶
type LangChainDocumentLoader struct {
// contains filtered or unexported fields
}
LangChainDocumentLoader adapts langchaingo's documentloaders.Loader to our DocumentLoader interface
func NewLangChainDocumentLoader ¶
func NewLangChainDocumentLoader(loader documentloaders.Loader) *LangChainDocumentLoader
NewLangChainDocumentLoader creates a new adapter for langchaingo document loaders
func (*LangChainDocumentLoader) Load ¶
func (l *LangChainDocumentLoader) Load(ctx context.Context) ([]Document, error)
Load loads documents using the underlying langchaingo loader
func (*LangChainDocumentLoader) LoadAndSplit ¶
func (l *LangChainDocumentLoader) LoadAndSplit(ctx context.Context, splitter textsplitter.TextSplitter) ([]Document, error)
LoadAndSplit loads and splits documents using langchaingo's text splitter
type LangChainEmbedder ¶
type LangChainEmbedder struct {
// contains filtered or unexported fields
}
LangChainEmbedder adapts langchaingo's embeddings.Embedder to our Embedder interface
func NewLangChainEmbedder ¶
func NewLangChainEmbedder(embedder embeddings.Embedder) *LangChainEmbedder
NewLangChainEmbedder creates a new adapter for langchaingo embedders
func (*LangChainEmbedder) EmbedDocuments ¶
func (e *LangChainEmbedder) EmbedDocuments(ctx context.Context, texts []string) ([][]float64, error)
EmbedDocuments generates embeddings for multiple documents
func (*LangChainEmbedder) EmbedQuery ¶
EmbedQuery generates an embedding for a single query
type LangChainMemory ¶
type LangChainMemory struct {
// contains filtered or unexported fields
}
LangChainMemory adapts langchaingo's memory implementations to our Memory interface
func NewConversationBufferMemory ¶
func NewConversationBufferMemory(options ...memory.ConversationBufferOption) *LangChainMemory
NewConversationBufferMemory creates a new conversation buffer memory with default settings
func NewConversationTokenBufferMemory ¶
func NewConversationTokenBufferMemory(llm llms.Model, maxTokenLimit int, options ...memory.ConversationBufferOption) *LangChainMemory
NewConversationTokenBufferMemory creates a new conversation token buffer memory that keeps conversation history within a token limit
func NewConversationWindowBufferMemory ¶
func NewConversationWindowBufferMemory(windowSize int, options ...memory.ConversationBufferOption) *LangChainMemory
NewConversationWindowBufferMemory creates a new conversation window buffer memory that keeps only the last N conversation turns
func NewLangChainMemory ¶
func NewLangChainMemory(buffer schema.Memory) *LangChainMemory
NewLangChainMemory creates a new adapter for langchaingo memory Supports ConversationBuffer, ConversationWindowBuffer, ConversationTokenBuffer, etc.
func (*LangChainMemory) Clear ¶
func (m *LangChainMemory) Clear(ctx context.Context) error
Clear clears memory contents
func (*LangChainMemory) GetMessages ¶
func (m *LangChainMemory) GetMessages(ctx context.Context) ([]llms.ChatMessage, error)
GetMessages returns all messages in memory This is a convenience method that extracts messages from the memory buffer
func (*LangChainMemory) LoadMemoryVariables ¶
func (m *LangChainMemory) LoadMemoryVariables(ctx context.Context, inputs map[string]any) (map[string]any, error)
LoadMemoryVariables loads memory variables
func (*LangChainMemory) SaveContext ¶
func (m *LangChainMemory) SaveContext(ctx context.Context, inputValues map[string]any, outputValues map[string]any) error
SaveContext saves the context from this conversation to buffer
type LangChainTextSplitter ¶
type LangChainTextSplitter struct {
// contains filtered or unexported fields
}
LangChainTextSplitter adapts langchaingo's textsplitter.TextSplitter to our TextSplitter interface
func NewLangChainTextSplitter ¶
func NewLangChainTextSplitter(splitter textsplitter.TextSplitter) *LangChainTextSplitter
NewLangChainTextSplitter creates a new adapter for langchaingo text splitters
func (*LangChainTextSplitter) SplitDocuments ¶
func (s *LangChainTextSplitter) SplitDocuments(documents []Document) ([]Document, error)
SplitDocuments splits documents using the underlying langchaingo splitter
type LangChainVectorStore ¶
type LangChainVectorStore struct {
// contains filtered or unexported fields
}
LangChainVectorStore adapts langchaingo's vectorstores.VectorStore to our VectorStore interface
func NewLangChainVectorStore ¶
func NewLangChainVectorStore(store vectorstores.VectorStore) *LangChainVectorStore
NewLangChainVectorStore creates a new adapter for langchaingo vector stores
func (*LangChainVectorStore) AddDocuments ¶
func (s *LangChainVectorStore) AddDocuments(ctx context.Context, documents []Document, embeddings [][]float64) error
AddDocuments adds documents to the vector store
func (*LangChainVectorStore) SimilaritySearch ¶
func (s *LangChainVectorStore) SimilaritySearch(ctx context.Context, query string, k int) ([]Document, error)
SimilaritySearch searches for similar documents
func (*LangChainVectorStore) SimilaritySearchWithScore ¶
func (s *LangChainVectorStore) SimilaritySearchWithScore(ctx context.Context, query string, k int) ([]DocumentWithScore, error)
SimilaritySearchWithScore searches for similar documents and returns them with scores
type Memory ¶
type Memory interface {
// SaveContext saves the context from this conversation to buffer
SaveContext(ctx context.Context, inputValues map[string]any, outputValues map[string]any) error
// LoadMemoryVariables loads memory variables
LoadMemoryVariables(ctx context.Context, inputs map[string]any) (map[string]any, error)
// Clear clears memory contents
Clear(ctx context.Context) error
// GetMessages returns all messages in memory
GetMessages(ctx context.Context) ([]llms.ChatMessage, error)
}
Memory is the interface for conversation memory management in langgraphgo
type MockEmbedder ¶
type MockEmbedder struct {
Dimension int
}
MockEmbedder is a simple mock embedder for testing
func NewMockEmbedder ¶
func NewMockEmbedder(dimension int) *MockEmbedder
NewMockEmbedder creates a new MockEmbedder
func (*MockEmbedder) EmbedDocuments ¶
EmbedDocuments generates mock embeddings for documents
func (*MockEmbedder) EmbedQuery ¶
EmbedQuery generates a mock embedding for a query
type RAGConfig ¶
type RAGConfig struct {
// Retrieval configuration
TopK int // Number of documents to retrieve
ScoreThreshold float64 // Minimum relevance score
UseReranking bool // Whether to use reranking
UseFallback bool // Whether to use fallback search
// Generation configuration
SystemPrompt string
IncludeCitations bool
MaxTokens int
Temperature float64
// Components
Loader DocumentLoader
Splitter TextSplitter
Embedder Embedder
VectorStore VectorStore
Retriever Retriever
Reranker Reranker
LLM llms.Model
}
RAGConfig configures a RAG pipeline
func DefaultRAGConfig ¶
func DefaultRAGConfig() *RAGConfig
DefaultRAGConfig returns a default RAG configuration
type RAGPipeline ¶
type RAGPipeline struct {
// contains filtered or unexported fields
}
RAGPipeline represents a complete RAG pipeline
func NewRAGPipeline ¶
func NewRAGPipeline(config *RAGConfig) *RAGPipeline
NewRAGPipeline creates a new RAG pipeline with the given configuration
func (*RAGPipeline) BuildAdvancedRAG ¶
func (p *RAGPipeline) BuildAdvancedRAG() error
BuildAdvancedRAG builds an advanced RAG pipeline: Retrieve -> Rerank -> Generate
func (*RAGPipeline) BuildBasicRAG ¶
func (p *RAGPipeline) BuildBasicRAG() error
BuildBasicRAG builds a basic RAG pipeline: Retrieve -> Generate
func (*RAGPipeline) BuildConditionalRAG ¶
func (p *RAGPipeline) BuildConditionalRAG() error
BuildConditionalRAG builds a RAG pipeline with conditional routing based on relevance
func (*RAGPipeline) Compile ¶
func (p *RAGPipeline) Compile() (*graph.Runnable, error)
Compile compiles the RAG pipeline into a runnable graph
func (*RAGPipeline) GetGraph ¶
func (p *RAGPipeline) GetGraph() *graph.MessageGraph
GetGraph returns the underlying graph for visualization
type RAGState ¶
type RAGState struct {
Query string
Documents []Document
RetrievedDocuments []Document
RankedDocuments []DocumentWithScore
Context string
Answer string
Citations []string
Metadata map[string]interface{}
}
RAGState represents the state flowing through a RAG pipeline
type ReflectionAgentConfig ¶ added in v0.5.0
type ReflectionAgentConfig struct {
// Model is the LLM to use for both generation and reflection
Model llms.Model
// ReflectionModel is an optional separate model for reflection
// If nil, uses the same model as generation
ReflectionModel llms.Model
// MaxIterations is the maximum number of generation-reflection cycles
MaxIterations int
// SystemMessage is the system message for the generation step
SystemMessage string
// ReflectionPrompt is the system message for the reflection step
ReflectionPrompt string
// Verbose enables detailed logging
Verbose bool
}
ReflectionAgentConfig configures the reflection agent
type Reranker ¶
type Reranker interface {
Rerank(ctx context.Context, query string, documents []Document) ([]DocumentWithScore, error)
}
Reranker reranks retrieved documents based on relevance
type Retriever ¶
type Retriever interface {
GetRelevantDocuments(ctx context.Context, query string) ([]Document, error)
}
Retriever retrieves relevant documents for a query
type SimpleReranker ¶
type SimpleReranker struct {
}
SimpleReranker is a simple reranker that scores documents based on keyword matching
func NewSimpleReranker ¶
func NewSimpleReranker() *SimpleReranker
NewSimpleReranker creates a new SimpleReranker
func (*SimpleReranker) Rerank ¶
func (r *SimpleReranker) Rerank(ctx context.Context, query string, documents []Document) ([]DocumentWithScore, error)
Rerank reranks documents based on query relevance
type SimpleTextSplitter ¶
SimpleTextSplitter splits text into chunks of a given size
func NewSimpleTextSplitter ¶
func NewSimpleTextSplitter(chunkSize, chunkOverlap int) *SimpleTextSplitter
NewSimpleTextSplitter creates a new SimpleTextSplitter
func (*SimpleTextSplitter) SplitDocuments ¶
func (s *SimpleTextSplitter) SplitDocuments(documents []Document) ([]Document, error)
SplitDocuments splits documents into smaller chunks
type StaticDocumentLoader ¶
type StaticDocumentLoader struct {
Documents []Document
}
StaticDocumentLoader loads documents from a static list
func NewStaticDocumentLoader ¶
func NewStaticDocumentLoader(documents []Document) *StaticDocumentLoader
NewStaticDocumentLoader creates a new StaticDocumentLoader
type TextSplitter ¶
TextSplitter splits documents into smaller chunks
type ToolExecutor ¶
type ToolExecutor struct {
// contains filtered or unexported fields
}
ToolExecutor executes tools based on invocations
func NewToolExecutor ¶
func NewToolExecutor(inputTools []tools.Tool) *ToolExecutor
NewToolExecutor creates a new ToolExecutor with the given tools
func (*ToolExecutor) Execute ¶
func (te *ToolExecutor) Execute(ctx context.Context, invocation ToolInvocation) (string, error)
Execute executes a single tool invocation
func (*ToolExecutor) ExecuteMany ¶
func (te *ToolExecutor) ExecuteMany(ctx context.Context, invocations []ToolInvocation) ([]string, error)
ExecuteMany executes multiple tool invocations in parallel (if needed, but here sequential for simplicity) In a real graph, this might be a ParallelNode, but here we provide a helper.
func (*ToolExecutor) ToolNode ¶
func (te *ToolExecutor) ToolNode(ctx context.Context, state interface{}) (interface{}, error)
ToolNode is a graph node function that executes tools It expects the state to contain a list of ToolInvocation or a single ToolInvocation This is a simplified version. In a real agent, it would parse messages.
type ToolInvocation ¶
ToolInvocation represents a request to execute a tool
type ToolNode ¶
type ToolNode struct {
Executor *ToolExecutor
}
ToolNode is a reusable node that executes tool calls from the last AI message. It expects the state to be a map[string]interface{} with a "messages" key containing []llms.MessageContent.
func NewToolNode ¶
NewToolNode creates a new ToolNode with the given tools.
type VectorStore ¶
type VectorStore interface {
AddDocuments(ctx context.Context, documents []Document, embeddings [][]float64) error
SimilaritySearch(ctx context.Context, query string, k int) ([]Document, error)
SimilaritySearchWithScore(ctx context.Context, query string, k int) ([]DocumentWithScore, error)
}
VectorStore stores and retrieves document embeddings
type VectorStoreRetriever ¶
type VectorStoreRetriever struct {
VectorStore VectorStore
TopK int
}
VectorStoreRetriever implements Retriever using a VectorStore
func NewVectorStoreRetriever ¶
func NewVectorStoreRetriever(vectorStore VectorStore, topK int) *VectorStoreRetriever
NewVectorStoreRetriever creates a new VectorStoreRetriever
func (*VectorStoreRetriever) GetRelevantDocuments ¶
func (r *VectorStoreRetriever) GetRelevantDocuments(ctx context.Context, query string) ([]Document, error)
GetRelevantDocuments retrieves relevant documents for a query
type WorkflowEdge ¶ added in v0.4.0
type WorkflowEdge struct {
From string `json:"from"`
To string `json:"to"`
Condition string `json:"condition,omitempty"` // For conditional edges
}
WorkflowEdge represents an edge in the workflow plan
type WorkflowNode ¶ added in v0.4.0
type WorkflowNode struct {
Name string `json:"name"`
Type string `json:"type"` // "start", "process", "end", "conditional"
}
WorkflowNode represents a node in the workflow plan
type WorkflowPlan ¶ added in v0.4.0
type WorkflowPlan struct {
Nodes []WorkflowNode `json:"nodes"`
Edges []WorkflowEdge `json:"edges"`
}
WorkflowPlan represents the parsed workflow plan from LLM