Documentation
¶
Overview ¶
Package prebuilt provides ready-to-use agent implementations for common AI patterns.
This package offers a collection of pre-built agents that implement various reasoning and execution patterns, from simple tool-using agents to complex multi-agent systems. Each agent is implemented using the core graph package and can be easily customized or extended for specific use cases.
Available Agents ¶
## ReAct Agent (Reason + Act) The ReAct agent combines reasoning and acting by having the model think about what to do, choose tools to use, and act on the results. It's suitable for general-purpose tasks.
import (
"github.com/smallnest/langgraphgo/prebuilt"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/tools"
)
// Create a ReAct agent with tools
agent, err := prebuilt.CreateReactAgent(
llm, // Language model
[]tools.Tool{ // Available tools
&tools.CalculatorTool{},
weatherTool,
},
10, // Max iterations
)
// Execute agent
result, err := agent.Invoke(ctx, map[string]any{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{
llms.TextPart("What's the weather in London and calculate 15% of 100?"),
},
},
},
})
## Typed ReAct Agent A type-safe version of the ReAct agent using Go generics:
type AgentState struct {
Messages []llms.MessageContent `json:"messages"`
IterationCount int `json:"iteration_count"`
}
agent, err := prebuilt.CreateReactAgentTyped[AgentState](
llm,
tools,
10,
func() AgentState { return AgentState{} },
)
## Supervisor Agent Orchestrates multiple specialized agents, routing tasks to the appropriate agent:
// Create specialized agents
weatherAgent, _ := prebuilt.CreateReactAgent(llm, weatherTools, 5)
calcAgent, _ := prebuilt.CreateReactAgent(llm, calcTools, 5)
searchAgent, _ := prebuilt.CreateReactAgent(llm, searchTools, 5)
// Create supervisor
members := map[string]*graph.StateRunnableUntyped
"weather": weatherAgent,
"calculator": calcAgent,
"search": searchAgent,
}
supervisor, err := prebuilt.CreateSupervisor(
llm,
members,
"Router", // Router agent name
)
// Use supervisor to route tasks
result, err := supervisor.Invoke(ctx, map[string]any{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{
llms.TextPart("Calculate the distance between London and Paris"),
},
},
},
})
## Planning Agent Creates and executes plans for complex tasks:
planner, err := prebuilt.CreatePlanningAgent(
llm,
planningTools,
executionTools,
)
// The agent will create a plan, then execute each step
result, err := planner.Invoke(ctx, map[string]any{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{
llms.TextPart("Plan and execute a research report on renewable energy"),
},
},
},
})
## Reflection Agent Uses self-reflection to improve responses:
reflectionAgent, err := prebuilt.CreateReflectionAgent(
llm,
tools,
)
// The agent will reflect on and potentially revise its answers
result, err := reflectionAgent.Invoke(ctx, map[string]any{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{
llms.TextPart("Explain quantum computing"),
},
},
},
})
## Tree of Thoughts Agent Explores multiple reasoning paths before choosing the best:
totAgent, err := prebuilt.CreateTreeOfThoughtsAgent(
llm,
3, // Number of thoughts to generate
5, // Maximum steps
)
// The agent will generate and evaluate multiple reasoning paths
result, err := totAgent.Invoke(ctx, map[string]any{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{
llms.TextPart("Solve this complex math problem step by step"),
},
},
},
})
RAG (Retrieval-Augmented Generation) ¶
## Basic RAG Agent Combines document retrieval with generation:
rag, err := prebuilt.CreateRAGAgent(
llm,
documentLoader, // Loads documents
textSplitter, // Splits text into chunks
embedder, // Creates embeddings
vectorStore, // Stores and searches embeddings
5, // Number of documents to retrieve
)
// The agent will retrieve relevant documents and generate answers
result, err := rag.Invoke(ctx, map[string]any{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{
llms.TextPart("What are the benefits of renewable energy?"),
},
},
},
})
## Advanced RAG with Conditional Processing
rag, err := prebuilt.CreateConditionalRAGAgent(
llm,
loader,
splitter,
embedder,
vectorStore,
3, // Retrieve count
// Condition function to decide whether to use RAG
func(ctx context.Context, query string) bool {
return len(strings.Fields(query)) > 5
},
)
# Chat Agent For conversational applications:
chatAgent, err := prebuilt.CreateChatAgent(
llm,
systemPrompt, // Optional system prompt
memory, // Memory for conversation history
)
// The agent maintains conversation context
result, err := chatAgent.Invoke(ctx, map[string]any{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{
llms.TextPart("Hello! How are you?"),
},
},
},
})
Custom Tools ¶
Create custom tools for agents:
type WeatherTool struct{}
func (t *WeatherTool) Name() string { return "get_weather" }
func (t *WeatherTool) Description() string {
return "Get current weather for a city"
}
func (t *WeatherTool) Call(ctx context.Context, input string) (string, error) {
// Parse the city from input
var data struct {
City string `json:"city"`
}
if err := json.Unmarshal([]byte(input), &data); err != nil {
return "", err
}
// Call weather API
// Implementation here...
return fmt.Sprintf("The weather in %s is 22°C and sunny", data.City), nil
}
// Use with any agent
weatherTool := &WeatherTool{}
agent, err := prebuilt.CreateReactAgent(llm, []tools.Tool{weatherTool}, 10)
Agent Configuration ¶
Most agents support configuration through options:
agent, err := prebuilt.CreateReactAgent(llm, tools, 10, prebuilt.WithMaxTokens(4000), prebuilt.WithTemperature(0.7), prebuilt.WithStreaming(true), prebuilt.WithCheckpointing(checkpointer), prebuilt.WithMemory(memory), )
Streaming Support ¶
Enable real-time streaming of agent thoughts and actions:
// Create streaming agent
agent, _ := prebuilt.CreateReactAgent(llm, tools, 10)
streaming := prebuilt.NewStreamingAgent(agent)
// Stream execution
stream, _ := streaming.Stream(ctx, input)
for event := range stream.Events {
fmt.Printf("Event: %v\n", event)
}
Memory Integration ¶
Agents can integrate with various memory strategies:
import "github.com/smallnest/langgraphgo/memory" // Use buffer memory bufferMemory := memory.NewBufferMemory(100) agent, _ := prebuilt.CreateChatAgent(llm, "", bufferMemory) // Use summarization memory summMemory := memory.NewSummarizationMemory(llm, 2000) agent, _ := prebuilt.CreateChatAgent(llm, "", summMemory)
Best Practices ¶
- Choose the right agent pattern for your use case
- Provide clear tool descriptions and examples
- Set appropriate iteration limits to prevent infinite loops
- Use memory for conversational applications
- Enable streaming for better user experience
- Use checkpointing for long-running tasks
- Test with various input patterns
- Monitor token usage and costs
Error Handling ¶
Agents include built-in error handling:
- Tool execution failures
- LLM API errors
- Timeout protection
- Iteration limit enforcement
- Graceful degradation strategies
Performance Considerations ¶
- Use typed agents for better performance
- Cache tool results when appropriate
- Batch tool calls when possible
- Monitor resource usage
- Consider parallel execution for independent tasks
Example (ManusAgent) ¶
Example_manusAgent demonstrates how to use CreateManusAgent with persistent Markdown files for planning and tracking
package main
import (
"context"
"fmt"
"os"
"github.com/smallnest/langgraphgo/graph"
"github.com/smallnest/langgraphgo/prebuilt"
"github.com/tmc/langchaingo/llms"
)
func main() {
// Define available nodes that can be used in the plan
nodes := []graph.TypedNode[map[string]any]{
{
Name: "research",
Description: "Research and gather information from external sources",
Function: func(ctx context.Context, state map[string]any) (map[string]any, error) {
messages := state["messages"].([]llms.MessageContent)
fmt.Println("🔍 Researching...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Research complete: Found 15 relevant sources")},
}
return map[string]any{
"messages": append(messages, msg),
}, nil
},
},
{
Name: "compile",
Description: "Compile findings into organized notes",
Function: func(ctx context.Context, state map[string]any) (map[string]any, error) {
messages := state["messages"].([]llms.MessageContent)
fmt.Println("📝 Compiling findings...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Notes compiled: 5 key findings organized")},
}
return map[string]any{
"messages": append(messages, msg),
}, nil
},
},
{
Name: "write",
Description: "Write final deliverable based on research",
Function: func(ctx context.Context, state map[string]any) (map[string]any, error) {
messages := state["messages"].([]llms.MessageContent)
fmt.Println("✍️ Writing final output...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Final document written: 2000 words summary")},
}
return map[string]any{
"messages": append(messages, msg),
}, nil
},
},
{
Name: "review",
Description: "Review and validate the output",
Function: func(ctx context.Context, state map[string]any) (map[string]any, error) {
messages := state["messages"].([]llms.MessageContent)
fmt.Println("✅ Reviewing...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Review complete: Output validated successfully")},
}
return map[string]any{
"messages": append(messages, msg),
}, nil
},
},
}
// Configure the Manus agent
_ = prebuilt.ManusConfig{
WorkDir: "./work",
PlanPath: "./work/task_plan.md",
NotesPath: "./work/notes.md",
OutputPath: "./work/output.md",
AutoSave: true,
Verbose: true,
}
// Create initial state with user request
_ = map[string]any{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{llms.TextPart("Research TypeScript benefits and write a summary")},
},
},
"goal": "Research and document the benefits of TypeScript for development teams",
}
fmt.Println("🚀 Manus Agent Example")
fmt.Println("=====================")
fmt.Println()
fmt.Println("This example demonstrates:")
fmt.Println("1. Persistent Markdown planning (task_plan.md)")
fmt.Println("2. Research notes storage (notes.md)")
fmt.Println("3. Progress tracking with checkboxes")
fmt.Println("4. Final output generation (output.md)")
fmt.Println()
fmt.Println("Available nodes:")
for i, node := range nodes {
fmt.Printf(" %d. %s: %s\n", i+1, node.Name, node.Description)
}
fmt.Println()
fmt.Println("Generated files:")
fmt.Println(" 📄 task_plan.md - Workflow plan with progress checkboxes")
fmt.Println(" 📄 notes.md - Research findings and error logs")
fmt.Println(" 📄 output.md - Final deliverable")
fmt.Println()
// Clean up work directory for demo
os.RemoveAll("./work")
}
Example (ManusAgentFileStructure) ¶
Example_manusAgentFileStructure shows the file structure
package main
import (
"fmt"
)
func main() {
fmt.Println("📁 Manus Agent File Structure")
fmt.Println("=============================")
fmt.Println()
fmt.Println("work/")
fmt.Println("├── task_plan.md # Workflow plan with checkboxes")
fmt.Println("│ %% Goal")
fmt.Println("│ Research TypeScript benefits")
fmt.Println("│ ")
fmt.Println("│ %% Phases")
fmt.Println("│ - [x] Phase 1: Research")
fmt.Println("│ - [ ] Phase 2: Compile")
fmt.Println("│ - [ ] Phase 3: Write")
fmt.Println("│")
fmt.Println("├── notes.md # Research findings & errors")
fmt.Println("│ # Research Notes")
fmt.Println("│ ")
fmt.Println("│ ## TypeScript Benefits")
fmt.Println("│ - Type safety")
fmt.Println("│ - Better IDE support")
fmt.Println("│ ")
fmt.Println("│ ## Error Log")
fmt.Println("│ [Error entries here]")
fmt.Println("│")
fmt.Println("└── output.md # Final deliverable")
fmt.Println(" # TypeScript Benefits Summary")
fmt.Println(" ...")
fmt.Println()
}
Example (ManusAgentIntegration) ¶
Example_manusAgentIntegration shows real usage pattern
package main
import (
"fmt"
)
func main() {
fmt.Println("💻 Integration Example")
fmt.Println("=====================")
fmt.Println()
fmt.Println("// 1. Define your nodes")
fmt.Println("nodes := []graph.TypedNode[map[string]any]{")
fmt.Println(" {Name: \"research\", Description: \"...\", Function: ...},")
fmt.Println(" {Name: \"compile\", Description: \"...\", Function: ...},")
fmt.Println(" {Name: \"write\", Description: \"...\", Function: ...},")
fmt.Println("}")
fmt.Println()
fmt.Println("// 2. Configure Manus agent")
fmt.Println("config := prebuilt.ManusConfig{")
fmt.Println(" WorkDir: \"./work\",")
fmt.Println(" PlanPath: \"./work/task_plan.md\",")
fmt.Println(" NotesPath: \"./work/notes.md\",")
fmt.Println(" OutputPath: \"./work/output.md\",")
fmt.Println(" AutoSave: true,")
fmt.Println(" Verbose: true,")
fmt.Println("}")
fmt.Println()
fmt.Println("// 3. Create the agent")
fmt.Println("agent, err := prebuilt.CreateManusAgent(")
fmt.Println(" model,")
fmt.Println(" nodes,")
fmt.Println(" []tools.Tool{},")
fmt.Println(" config,")
fmt.Println(")")
fmt.Println()
fmt.Println("// 4. Execute")
fmt.Println("result, err := agent.Invoke(ctx, initialState)")
fmt.Println()
fmt.Println("// 5. Check results in work/")
fmt.Println("// - task_plan.md shows progress")
fmt.Println("// - notes.md contains research")
fmt.Println("// - output.md has final deliverable")
fmt.Println()
}
Example (ManusAgentWithErrors) ¶
Example_manusAgentWithErrors shows error handling and recovery
package main
import (
"fmt"
)
func main() {
fmt.Println("🔄 Manus Agent with Error Handling")
fmt.Println("===================================")
fmt.Println()
fmt.Println("The Manus agent handles errors by:")
fmt.Println("1. Logging errors to notes.md")
fmt.Println("2. Updating checkboxes in task_plan.md")
fmt.Println("3. Maintaining state for recovery")
fmt.Println()
fmt.Println("Example error flow:")
fmt.Println(" ❌ Phase 2 fails → error logged to notes.md")
fmt.Println(" 📋 task_plan.md shows Phase 1 complete, Phase 2 pending")
fmt.Println(" 🔄 Agent can resume and retry Phase 2")
fmt.Println()
fmt.Println("Error logging format in notes.md:")
fmt.Println(" ## Error [2025-01-07 15:30:45]")
fmt.Println(" Error in phase 2 (compile): connection timeout")
}
Example (ManusVsPlanningAgent) ¶
Example_manusVsPlanningAgent compares both approaches
package main
import (
"fmt"
)
func main() {
fmt.Println("📊 Manus Agent vs Planning Agent")
fmt.Println("=================================")
fmt.Println()
fmt.Println("Planning Agent (prebuilt.CreatePlanningAgent):")
fmt.Println(" ✅ Dynamic workflow generation")
fmt.Println(" ✅ JSON-based plan format")
fmt.Println(" ✅ In-memory state management")
fmt.Println(" ✅ Fast execution")
fmt.Println()
fmt.Println("Manus Agent (prebuilt.CreateManusAgent):")
fmt.Println(" ✅ Persistent Markdown files")
fmt.Println(" ✅ Human-readable plans")
fmt.Println(" ✅ Progress tracking with checkboxes")
fmt.Println(" ✅ Error logging to notes.md")
fmt.Println(" ✅ Resume capability")
fmt.Println(" ✅ Knowledge accumulation")
fmt.Println()
fmt.Println("When to use:")
fmt.Println(" • Planning Agent - Quick tasks, automated workflows")
fmt.Println(" • Manus Agent - Complex multi-step tasks, research, documentation")
fmt.Println()
}
Example (PlanningAgent) ¶
Example demonstrates how to use CreatePlanningAgentMap to build a dynamic workflow based on user requests
package main
import (
"context"
"fmt"
"github.com/smallnest/langgraphgo/graph"
"github.com/tmc/langchaingo/llms"
)
func main() {
// Step 1: Define your custom nodes that can be used in workflows
nodes := []graph.TypedNode[map[string]any]{
{
Name: "fetch_data",
Description: "Fetch data from external API or database",
Function: func(ctx context.Context, state map[string]any) (map[string]any, error) {
messages := state["messages"].([]llms.MessageContent)
// Simulate fetching data
fmt.Println("Fetching data from API...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Data fetched successfully: [item1, item2, item3]")},
}
return map[string]any{
"messages": append(messages, msg),
}, nil
},
},
{
Name: "validate_data",
Description: "Validate the integrity and format of data",
Function: func(ctx context.Context, state map[string]any) (map[string]any, error) {
messages := state["messages"].([]llms.MessageContent)
// Simulate validation
fmt.Println("Validating data...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Data validation passed")},
}
return map[string]any{
"messages": append(messages, msg),
}, nil
},
},
{
Name: "transform_data",
Description: "Transform and normalize data into required format",
Function: func(ctx context.Context, state map[string]any) (map[string]any, error) {
messages := state["messages"].([]llms.MessageContent)
// Simulate transformation
fmt.Println("Transforming data...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Data transformed to JSON format")},
}
return map[string]any{
"messages": append(messages, msg),
}, nil
},
},
{
Name: "analyze_data",
Description: "Perform statistical analysis on the data",
Function: func(ctx context.Context, state map[string]any) (map[string]any, error) {
messages := state["messages"].([]llms.MessageContent)
// Simulate analysis
fmt.Println("Analyzing data...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Analysis complete: mean=42, median=40, std=5.2")},
}
return map[string]any{
"messages": append(messages, msg),
}, nil
},
},
{
Name: "save_results",
Description: "Save processed results to storage",
Function: func(ctx context.Context, state map[string]any) (map[string]any, error) {
messages := state["messages"].([]llms.MessageContent)
// Simulate saving
fmt.Println("Saving results...")
msg := llms.MessageContent{
Role: llms.ChatMessageTypeAI,
Parts: []llms.ContentPart{llms.TextPart("Results saved to database")},
}
return map[string]any{
"messages": append(messages, msg),
}, nil
},
},
}
// Step 2: Create your LLM model
// In a real application, you would use an actual LLM like OpenAI, Anthropic, etc.
// var model llms.Model = openai.New(...)
// For this example, we'll skip the actual LLM call
// The LLM would receive the user request and available nodes,
// then generate a workflow plan like:
// {
// "nodes": [
// {"name": "fetch_data", "type": "process"},
// {"name": "validate_data", "type": "process"},
// {"name": "transform_data", "type": "process"},
// {"name": "save_results", "type": "process"}
// ],
// "edges": [
// {"from": "START", "to": "fetch_data"},
// {"from": "fetch_data", "to": "validate_data"},
// {"from": "validate_data", "to": "transform_data"},
// {"from": "transform_data", "to": "save_results"},
// {"from": "save_results", "to": "END"}
// ]
// }
fmt.Println("CreatePlanningAgent example:")
fmt.Println("This agent dynamically creates workflows based on user requests")
fmt.Println()
fmt.Println("Available nodes:")
for i, node := range nodes {
fmt.Printf("%d. %s: %s\n", i+1, node.Name, node.Description)
}
fmt.Println()
fmt.Println("User request: 'Fetch data, validate it, transform it, and save the results'")
fmt.Println()
fmt.Println("The LLM will:")
fmt.Println("1. Analyze the user request")
fmt.Println("2. Select appropriate nodes from available nodes")
fmt.Println("3. Generate a workflow plan (similar to a mermaid diagram)")
fmt.Println("4. The agent will execute the planned workflow")
fmt.Println()
fmt.Println("Expected workflow:")
fmt.Println("START -> fetch_data -> validate_data -> transform_data -> save_results -> END")
}
Output: CreatePlanningAgent example: This agent dynamically creates workflows based on user requests Available nodes: 1. fetch_data: Fetch data from external API or database 2. validate_data: Validate the integrity and format of data 3. transform_data: Transform and normalize data into required format 4. analyze_data: Perform statistical analysis on the data 5. save_results: Save processed results to storage User request: 'Fetch data, validate it, transform it, and save the results' The LLM will: 1. Analyze the user request 2. Select appropriate nodes from available nodes 3. Generate a workflow plan (similar to a mermaid diagram) 4. The agent will execute the planned workflow Expected workflow: START -> fetch_data -> validate_data -> transform_data -> save_results -> END
Example (PlanningAgentRealUsage) ¶
Example showing real usage pattern
package main
import (
"fmt"
)
func main() {
fmt.Println("Real usage pattern:")
fmt.Println()
fmt.Println("// 1. Define your nodes")
fmt.Println("nodes := []graph.TypedNode[map[string]any]{...}")
fmt.Println()
fmt.Println("// 2. Initialize your LLM model")
fmt.Println("model := openai.New()")
fmt.Println()
fmt.Println("// 3. Create the planning agent")
fmt.Println("agent, err := prebuilt.CreatePlanningAgentMap(")
fmt.Println(" model,")
fmt.Println(" nodes,")
fmt.Println(" []tools.Tool{},")
fmt.Println(" prebuilt.WithVerbose(true),")
fmt.Println(")")
fmt.Println()
fmt.Println("// 4. Prepare initial state with user request")
fmt.Println("initialState := map[string]any{")
fmt.Println(" \"messages\": []llms.MessageContent{")
fmt.Println(" llms.TextParts(llms.ChatMessageTypeHuman,")
fmt.Println(" \"Fetch, validate, and save the customer data\"),")
fmt.Println(" },")
fmt.Println("}")
fmt.Println()
fmt.Println("// 5. Execute the agent")
fmt.Println("result, err := agent.Invoke(context.Background(), initialState)")
fmt.Println()
fmt.Println("// 6. Access results")
fmt.Println("mState := result")
fmt.Println("messages := mState[\"messages\"].([]llms.MessageContent)")
}
Output: Real usage pattern: // 1. Define your nodes nodes := []graph.TypedNode[map[string]any]{...} // 2. Initialize your LLM model model := openai.New() // 3. Create the planning agent agent, err := prebuilt.CreatePlanningAgentMap( model, nodes, []tools.Tool{}, prebuilt.WithVerbose(true), ) // 4. Prepare initial state with user request initialState := map[string]any{ "messages": []llms.MessageContent{ llms.TextParts(llms.ChatMessageTypeHuman, "Fetch, validate, and save the customer data"), }, } // 5. Execute the agent result, err := agent.Invoke(context.Background(), initialState) // 6. Access results mState := result messages := mState["messages"].([]llms.MessageContent)
Example (PlanningAgentWithVerbose) ¶
Example showing how to use the planning agent with verbose mode
package main
import (
"fmt"
)
func main() {
// In a real application, you would define nodes and create the agent
// nodes := []graph.TypedNode[map[string]any]{...}
// agent, err := prebuilt.CreatePlanningAgentMap(model, nodes, []tools.Tool{}, prebuilt.WithVerbose(true))
fmt.Println("With verbose mode enabled, you will see:")
fmt.Println("🤔 Planning workflow...")
fmt.Println("📋 Generated plan: {...}")
fmt.Println("🚀 Executing planned workflow...")
fmt.Println(" ✓ Added node: step1")
fmt.Println(" ✓ Added node: step2")
fmt.Println(" ✓ Added edge: step1 -> step2")
fmt.Println(" ✓ Added edge: step2 -> END")
fmt.Println("✅ Workflow execution completed")
}
Output: With verbose mode enabled, you will see: 🤔 Planning workflow... 📋 Generated plan: {...} 🚀 Executing planned workflow... ✓ Added node: step1 ✓ Added node: step2 ✓ Added edge: step1 -> step2 ✓ Added edge: step2 -> END ✅ Workflow execution completed
Example (WorkflowPlanFormat) ¶
Example showing how the LLM generates workflow plans
package main
import (
"fmt"
)
func main() {
fmt.Println("Workflow Plan JSON Format:")
fmt.Println()
fmt.Println("{")
fmt.Println(" \"nodes\": [")
fmt.Println(" {\"name\": \"node_name\", \"type\": \"process\"},")
fmt.Println(" {\"name\": \"another_node\", \"type\": \"process\"}")
fmt.Println(" ],")
fmt.Println(" \"edges\": [")
fmt.Println(" {\"from\": \"START\", \"to\": \"node_name\"},")
fmt.Println(" {\"from\": \"node_name\", \"to\": \"another_node\"},")
fmt.Println(" {\"from\": \"another_node\", \"to\": \"END\"}")
fmt.Println(" ]")
fmt.Println("}")
fmt.Println()
fmt.Println("Rules:")
fmt.Println("1. Workflow must start with edge from 'START'")
fmt.Println("2. Workflow must end with edge to 'END'")
fmt.Println("3. Only use nodes from available nodes list")
fmt.Println("4. Create logical flow based on user request")
}
Output: Workflow Plan JSON Format: { "nodes": [ {"name": "node_name", "type": "process"}, {"name": "another_node", "type": "process"} ], "edges": [ {"from": "START", "to": "node_name"}, {"from": "node_name", "to": "another_node"}, {"from": "another_node", "to": "END"} ] } Rules: 1. Workflow must start with edge from 'START' 2. Workflow must end with edge to 'END' 3. Only use nodes from available nodes list 4. Create logical flow based on user request
Index ¶
- Constants
- func ApplyDefaultMaxIterations(maxIterations int) int
- func BuildToolDefinitions(inputTools []tools.Tool, getSchema func(tools.Tool) map[string]any) []llms.Tool
- func CreateAgent[S any](model llms.Model, inputTools []tools.Tool, ...) (*graph.StateRunnable[S], error)
- func CreateAgentMap(model llms.Model, inputTools []tools.Tool, maxIterations int, ...) (*graph.StateRunnable[map[string]any], error)
- func CreateManusAgent(model llms.Model, availableNodes []graph.TypedNode[map[string]any], ...) (*graph.StateRunnable[map[string]any], error)
- func CreatePEVAgent[S any](config PEVAgentConfig, getMessages func(S) []llms.MessageContent, ...) (*graph.StateRunnable[S], error)
- func CreatePEVAgentMap(config PEVAgentConfig) (*graph.StateRunnable[map[string]any], error)
- func CreatePlanningAgent[S any](model llms.Model, availableNodes []graph.TypedNode[S], ...) (*graph.StateRunnable[S], error)
- func CreatePlanningAgentMap(model llms.Model, availableNodes []graph.TypedNode[map[string]any], ...) (*graph.StateRunnable[map[string]any], error)
- func CreateReactAgent[S any](model llms.Model, inputTools []tools.Tool, ...) (*graph.StateRunnable[S], error)
- func CreateReactAgentMap(model llms.Model, inputTools []tools.Tool, maxIterations int) (*graph.StateRunnable[map[string]any], error)deprecated
- func CreateReflectionAgent[S any](config ReflectionAgentConfig, getMessages func(S) []llms.MessageContent, ...) (*graph.StateRunnable[S], error)
- func CreateReflectionAgentMap(config ReflectionAgentConfig) (*graph.StateRunnable[map[string]any], error)
- func CreateStandardAgentSchema() *graph.MapSchema
- func CreateSupervisor[S any](model llms.Model, members map[string]*graph.StateRunnable[S], ...) (*graph.StateRunnable[S], error)
- func CreateSupervisorMap(model llms.Model, members map[string]*graph.StateRunnable[map[string]any]) (*graph.StateRunnable[map[string]any], error)
- func CreateTreeOfThoughtsAgent[S any](config TreeOfThoughtsConfig, getActivePaths func(S) map[string]*SearchPath, ...) (*graph.StateRunnable[S], error)
- func CreateTreeOfThoughtsAgentMap(config TreeOfThoughtsConfig) (*graph.StateRunnable[map[string]any], error)
- func HasToolCallsInLastMessage(messages []llms.MessageContent) bool
- func ToolNode[S any](executor *ToolExecutor, getMessages func(S) []llms.MessageContent, ...) func(context.Context, S) (S, error)
- func ToolNodeMap(executor *ToolExecutor) func(context.Context, map[string]any) (map[string]any, error)
- type AgentState
- type ChatAgent
- func (c *ChatAgent) AddTool(tool tools.Tool)
- func (c *ChatAgent) AsyncChat(ctx context.Context, message string) (<-chan string, error)
- func (c *ChatAgent) AsyncChatWithChunks(ctx context.Context, message string) (<-chan string, error)
- func (c *ChatAgent) Chat(ctx context.Context, message string) (string, error)
- func (c *ChatAgent) ClearTools()
- func (c *ChatAgent) GetTools() []tools.Tool
- func (c *ChatAgent) PrintStream(ctx context.Context, message string, w io.Writer) error
- func (c *ChatAgent) RemoveTool(toolName string) bool
- func (c *ChatAgent) SetTools(newTools []tools.Tool)
- func (c *ChatAgent) ThreadID() string
- type ChatAgentState
- type CreateAgentOption
- func WithMaxIterations(maxIterations int) CreateAgentOption
- func WithSkillDir(skillDir string) CreateAgentOption
- func WithStateModifier(modifier func(messages []llms.MessageContent) []llms.MessageContent) CreateAgentOption
- func WithSystemMessage(message string) CreateAgentOption
- func WithVerbose(verbose bool) CreateAgentOption
- type CreateAgentOptions
- type ManusConfig
- type MockLLMEmptyContent
- type MockLLMError
- type MockToolError
- type PEVAgentConfig
- type PEVAgentState
- type Phase
- type PlanningAgentState
- type ReactAgentState
- type ReflectionAgentConfig
- type ReflectionAgentState
- type SearchPath
- type SupervisorState
- type ThoughtEvaluator
- type ThoughtGenerator
- type ThoughtState
- type ToolExecutor
- type ToolInvocation
- type ToolWithSchema
- type TreeOfThoughtsConfig
- type TreeOfThoughtsState
- type VerificationResult
- type WorkflowEdge
- type WorkflowNode
- type WorkflowPlan
Examples ¶
- Package (ManusAgent)
- Package (ManusAgentFileStructure)
- Package (ManusAgentIntegration)
- Package (ManusAgentWithErrors)
- Package (ManusVsPlanningAgent)
- Package (PlanningAgent)
- Package (PlanningAgentRealUsage)
- Package (PlanningAgentWithVerbose)
- Package (WorkflowPlanFormat)
- ChatAgent
- CreateReflectionAgent
- CreateReflectionAgent (CustomCriteria)
- CreateReflectionAgent (WithSeparateReflector)
Constants ¶
const DefaultMaxIterations = 20
DefaultMaxIterations is the default maximum number of iterations for agent execution.
Variables ¶
This section is empty.
Functions ¶
func ApplyDefaultMaxIterations ¶ added in v0.8.5
ApplyDefaultMaxIterations returns maxIterations if > 0, otherwise returns DefaultMaxIterations.
func BuildToolDefinitions ¶ added in v0.8.5
func BuildToolDefinitions(inputTools []tools.Tool, getSchema func(tools.Tool) map[string]any) []llms.Tool
BuildToolDefinitions converts a slice of tools.Tool to llms.Tool definitions. This is a common pattern used across different agent implementations.
func CreateAgent ¶ added in v0.3.1
func CreateAgent[S any]( model llms.Model, inputTools []tools.Tool, getMessages func(S) []llms.MessageContent, setMessages func(S, []llms.MessageContent) S, getExtraTools func(S) []tools.Tool, setExtraTools func(S, []tools.Tool) S, opts ...CreateAgentOption, ) (*graph.StateRunnable[S], error)
CreateAgent creates a generic agent graph
func CreateAgentMap ¶ added in v0.8.0
func CreateAgentMap(model llms.Model, inputTools []tools.Tool, maxIterations int, opts ...CreateAgentOption) (*graph.StateRunnable[map[string]any], error)
CreateAgentMap creates a new agent graph with map[string]any state
func CreateManusAgent ¶ added in v0.8.5
func CreateManusAgent( model llms.Model, availableNodes []graph.TypedNode[map[string]any], inputTools []tools.Tool, config ManusConfig, opts ...CreateAgentOption, ) (*graph.StateRunnable[map[string]any], error)
CreateManusAgent creates a Manus-style planning agent that: 1. Generates and saves plans to task_plan.md 2. Stores research findings in notes.md 3. Tracks progress with checkboxes 4. Supports human-in-the-loop intervention 5. Maintains persistent state across sessions
func CreatePEVAgent ¶ added in v0.6.0
func CreatePEVAgent[S any]( config PEVAgentConfig, getMessages func(S) []llms.MessageContent, setMessages func(S, []llms.MessageContent) S, getPlan func(S) []string, setPlan func(S, []string) S, getCurrentStep func(S) int, setCurrentStep func(S, int) S, getLastToolResult func(S) string, setLastToolResult func(S, string) S, getIntermediateSteps func(S) []string, setIntermediateSteps func(S, []string) S, getRetries func(S) int, setRetries func(S, int) S, getVerificationResult func(S) string, setVerificationResult func(S, string) S, getFinalAnswer func(S) string, setFinalAnswer func(S, string) S, ) (*graph.StateRunnable[S], error)
CreatePEVAgent creates a generic PEV Agent
func CreatePEVAgentMap ¶ added in v0.8.0
func CreatePEVAgentMap(config PEVAgentConfig) (*graph.StateRunnable[map[string]any], error)
CreatePEVAgentMap creates a new PEV Agent with map[string]any state
func CreatePlanningAgent ¶ added in v0.4.0
func CreatePlanningAgent[S any]( model llms.Model, availableNodes []graph.TypedNode[S], getMessages func(S) []llms.MessageContent, setMessages func(S, []llms.MessageContent) S, getPlan func(S) *WorkflowPlan, setPlan func(S, *WorkflowPlan) S, opts ...CreateAgentOption, ) (*graph.StateRunnable[S], error)
CreatePlanningAgent creates a generic planning agent
func CreatePlanningAgentMap ¶ added in v0.8.0
func CreatePlanningAgentMap(model llms.Model, availableNodes []graph.TypedNode[map[string]any], inputTools []tools.Tool, opts ...CreateAgentOption) (*graph.StateRunnable[map[string]any], error)
CreatePlanningAgentMap creates a planning agent with map[string]any state
func CreateReactAgent ¶
func CreateReactAgent[S any]( model llms.Model, inputTools []tools.Tool, getMessages func(S) []llms.MessageContent, setMessages func(S, []llms.MessageContent) S, getIterationCount func(S) int, setIterationCount func(S, int) S, maxIterations int, ) (*graph.StateRunnable[S], error)
CreateReactAgent creates a new typed ReAct agent graph
func CreateReactAgentMap
deprecated
added in
v0.8.0
func CreateReactAgentMap(model llms.Model, inputTools []tools.Tool, maxIterations int) (*graph.StateRunnable[map[string]any], error)
CreateReactAgentMap creates a new ReAct agent graph with map[string]any state
Deprecated: Use CreateAgentMap instead, which now includes the same iteration limiting functionality. This function is kept for backward compatibility and will be removed in a future version.
func CreateReflectionAgent ¶ added in v0.5.0
func CreateReflectionAgent[S any]( config ReflectionAgentConfig, getMessages func(S) []llms.MessageContent, setMessages func(S, []llms.MessageContent) S, getDraft func(S) string, setDraft func(S, string) S, getIteration func(S) int, setIteration func(S, int) S, getReflection func(S) string, setReflection func(S, string) S, ) (*graph.StateRunnable[S], error)
CreateReflectionAgent creates a generic reflection agent
Example ¶
package main
import (
"context"
"fmt"
"log"
"github.com/smallnest/langgraphgo/prebuilt"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/openai"
)
func main() {
// Create LLM
model, err := openai.New()
if err != nil {
log.Fatal(err)
}
// Configure reflection agent
config := prebuilt.ReflectionAgentConfig{
Model: model,
MaxIterations: 3,
Verbose: true,
SystemMessage: "You are an expert technical writer. Create clear, accurate, and comprehensive responses.",
}
// Create agent
agent, err := prebuilt.CreateReflectionAgentMap(config)
if err != nil {
log.Fatal(err)
}
// Prepare initial state
initialState := map[string]any{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{llms.TextPart("Explain the CAP theorem in distributed systems")},
},
},
}
// Invoke agent
result, err := agent.Invoke(context.Background(), initialState)
if err != nil {
log.Fatal(err)
}
// Extract final response
messages := result["messages"].([]llms.MessageContent)
fmt.Println("=== Final Response ===")
for _, msg := range messages {
if msg.Role == llms.ChatMessageTypeAI {
for _, part := range msg.Parts {
if textPart, ok := part.(llms.TextContent); ok {
fmt.Println(textPart.Text)
}
}
}
}
}
Example (CustomCriteria) ¶
package main
import (
"context"
"fmt"
"log"
"github.com/smallnest/langgraphgo/prebuilt"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/openai"
)
func main() {
model, err := openai.New()
if err != nil {
log.Fatal(err)
}
// Custom reflection criteria for code quality
config := prebuilt.ReflectionAgentConfig{
Model: model,
MaxIterations: 2,
Verbose: true,
SystemMessage: "You are a senior software engineer reviewing code.",
ReflectionPrompt: `Evaluate the code review for:
1. **Security**: Are security issues identified?
2. **Performance**: Are performance concerns addressed?
3. **Maintainability**: Are code quality issues noted?
4. **Best Practices**: Are language/framework best practices mentioned?
Provide specific, actionable feedback.`,
}
agent, err := prebuilt.CreateReflectionAgentMap(config)
if err != nil {
log.Fatal(err)
}
initialState := map[string]any{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{llms.TextPart("Review this SQL query function for issues")},
},
},
}
result, err := agent.Invoke(context.Background(), initialState)
if err != nil {
log.Fatal(err)
}
draft := result["draft"].(string)
fmt.Printf("Code review:\n%s\n", draft)
}
Example (WithSeparateReflector) ¶
package main
import (
"context"
"fmt"
"log"
"github.com/smallnest/langgraphgo/prebuilt"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/openai"
)
func main() {
// Create generation model
generationModel, err := openai.New(openai.WithModel("gpt-4"))
if err != nil {
log.Fatal(err)
}
// Create separate reflection model (could be a different model)
reflectionModel, err := openai.New(openai.WithModel("gpt-4"))
if err != nil {
log.Fatal(err)
}
// Configure with separate models
config := prebuilt.ReflectionAgentConfig{
Model: generationModel,
ReflectionModel: reflectionModel,
MaxIterations: 2,
Verbose: true,
SystemMessage: "You are a helpful assistant providing detailed explanations.",
ReflectionPrompt: `You are a senior technical reviewer.
Evaluate the response for:
1. Technical accuracy
2. Completeness of explanation
3. Clarity for the target audience
4. Use of examples
Be specific in your feedback.`,
}
agent, err := prebuilt.CreateReflectionAgentMap(config)
if err != nil {
log.Fatal(err)
}
initialState := map[string]any{
"messages": []llms.MessageContent{
{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{llms.TextPart("What is a Merkle tree and how is it used in blockchain?")},
},
},
}
result, err := agent.Invoke(context.Background(), initialState)
if err != nil {
log.Fatal(err)
}
draft := result["draft"].(string)
iteration := result["iteration"].(int)
fmt.Printf("Final draft (after %d iterations):\n%s\n", iteration, draft)
}
func CreateReflectionAgentMap ¶ added in v0.8.0
func CreateReflectionAgentMap(config ReflectionAgentConfig) (*graph.StateRunnable[map[string]any], error)
CreateReflectionAgentMap creates a new Reflection Agent with map[string]any state
func CreateStandardAgentSchema ¶ added in v0.8.5
CreateStandardAgentSchema creates a standard map schema for agents with messages reducer. This is the common schema setup used by most agent implementations.
func CreateSupervisor ¶
func CreateSupervisor[S any]( model llms.Model, members map[string]*graph.StateRunnable[S], getMessages func(S) []llms.MessageContent, getNext func(S) string, setNext func(S, string) S, ) (*graph.StateRunnable[S], error)
CreateSupervisor creates a generic supervisor graph
func CreateSupervisorMap ¶ added in v0.8.0
func CreateSupervisorMap(model llms.Model, members map[string]*graph.StateRunnable[map[string]any]) (*graph.StateRunnable[map[string]any], error)
CreateSupervisorMap creates a supervisor graph with map[string]any state
func CreateTreeOfThoughtsAgent ¶ added in v0.6.0
func CreateTreeOfThoughtsAgent[S any]( config TreeOfThoughtsConfig, getActivePaths func(S) map[string]*SearchPath, setActivePaths func(S, map[string]*SearchPath) S, getSolution func(S) string, setSolution func(S, string) S, getVisited func(S) map[string]bool, setVisited func(S, map[string]bool) S, getIteration func(S) int, setIteration func(S, int) S, ) (*graph.StateRunnable[S], error)
CreateTreeOfThoughtsAgent creates a generic Tree of Thoughts Agent
func CreateTreeOfThoughtsAgentMap ¶ added in v0.8.0
func CreateTreeOfThoughtsAgentMap(config TreeOfThoughtsConfig) (*graph.StateRunnable[map[string]any], error)
CreateTreeOfThoughtsAgentMap creates a ToT agent with map[string]any state
func HasToolCallsInLastMessage ¶ added in v0.8.5
func HasToolCallsInLastMessage(messages []llms.MessageContent) bool
HasToolCallsInLastMessage checks if the last message in the messages slice contains tool calls. This is used for conditional edge routing in agent graphs. Returns true if any part in the last message is a ToolCall.
func ToolNode ¶
func ToolNode[S any]( executor *ToolExecutor, getMessages func(S) []llms.MessageContent, setMessages func(S, []llms.MessageContent) S, ) func(context.Context, S) (S, error)
ToolNode creates a generic tool execution node
func ToolNodeMap ¶ added in v0.8.0
func ToolNodeMap(executor *ToolExecutor) func(context.Context, map[string]any) (map[string]any, error)
ToolNodeMap is a reusable node that executes tool calls from the last AI message for map[string]any state.
Types ¶
type AgentState ¶ added in v0.8.0
type AgentState struct {
// Messages contains the conversation history
Messages []llms.MessageContent
// ExtraTools contains additional tools available to the agent
ExtraTools []tools.Tool
}
AgentState represents the general agent state. This is the default state type for generic agents.
type ChatAgent ¶ added in v0.6.0
type ChatAgent struct {
// The underlying agent runnable
Runnable *graph.StateRunnable[map[string]any]
// contains filtered or unexported fields
}
ChatAgent represents a session with a user and can handle multi-turn conversations.
Example ¶
Example demonstrating multi-turn conversation with ChatAgent
package main
import (
"context"
"fmt"
"os"
"github.com/smallnest/langgraphgo/prebuilt"
"github.com/tmc/langchaingo/llms/openai"
)
func main() {
// Check if API key is available
apiKey := os.Getenv("OPENAI_API_KEY")
if apiKey == "" {
fmt.Println("OPENAI_API_KEY not set, skipping example")
return
}
// Create OpenAI model
model, err := openai.New()
if err != nil {
fmt.Printf("Error creating model: %v\n", err)
return
}
// Create ChatAgent with no tools
agent, err := prebuilt.NewChatAgent(model, nil)
if err != nil {
fmt.Printf("Error creating agent: %v\n", err)
return
}
ctx := context.Background()
// First turn
fmt.Println("User: Hello! My name is Alice.")
resp1, err := agent.Chat(ctx, "Hello! My name is Alice.")
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
fmt.Printf("Agent: %s\n\n", resp1)
// Second turn - agent should remember the name
fmt.Println("User: What's my name?")
resp2, err := agent.Chat(ctx, "What's my name?")
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
fmt.Printf("Agent: %s\n\n", resp2)
// Third turn - continue the conversation
fmt.Println("User: Tell me a short joke about programmers.")
resp3, err := agent.Chat(ctx, "Tell me a short joke about programmers.")
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
fmt.Printf("Agent: %s\n", resp3)
// Display the session ID
fmt.Printf("\nSession ID: %s\n", agent.ThreadID())
}
func NewChatAgent ¶ added in v0.6.0
func NewChatAgent(model llms.Model, inputTools []tools.Tool, opts ...CreateAgentOption) (*ChatAgent, error)
NewChatAgent creates a new ChatAgent. It wraps the underlying agent graph and manages conversation history automatically.
func (*ChatAgent) AddTool ¶ added in v0.6.0
AddTool adds a new tool to the dynamic tools list. If a tool with the same name already exists, it will be replaced.
func (*ChatAgent) AsyncChat ¶ added in v0.6.0
AsyncChat sends a message to the agent and returns a channel for streaming the response. This method provides TRUE streaming by using the LLM's streaming API. Chunks are sent to the channel as they're generated by the LLM in real-time. The channel will be closed when the response is complete or an error occurs.
func (*ChatAgent) AsyncChatWithChunks ¶ added in v0.6.0
AsyncChatWithChunks sends a message to the agent and returns a channel for streaming the response. Unlike AsyncChat, this streams the response in word-sized chunks for better readability. The channel will be closed when the response is complete.
func (*ChatAgent) Chat ¶ added in v0.6.0
Chat sends a message to the agent and returns the response. It maintains the conversation context by accumulating message history.
func (*ChatAgent) ClearTools ¶ added in v0.6.0
func (c *ChatAgent) ClearTools()
ClearTools removes all dynamic tools.
func (*ChatAgent) GetTools ¶ added in v0.6.0
GetTools returns a copy of the current dynamic tools list. Note: This does not include the base tools provided when creating the agent.
func (*ChatAgent) PrintStream ¶ added in v0.6.0
PrintStream prints the agent's response to the provided writer (e.g., os.Stdout). Note: This is a simplified version that uses Chat internally. For true streaming support, you would need to use a graph that supports streaming.
func (*ChatAgent) RemoveTool ¶ added in v0.6.0
RemoveTool removes a tool by name from the dynamic tools list. Returns true if the tool was found and removed, false otherwise.
type ChatAgentState ¶ added in v0.8.0
type ChatAgentState struct {
// Messages contains the conversation history
Messages []llms.MessageContent
// SystemPrompt is the system prompt for the chat agent
SystemPrompt string
// ExtraTools contains additional tools available to the agent
ExtraTools []tools.Tool
}
ChatAgentState represents the state for a chat agent. This is a conversational agent that maintains message history.
type CreateAgentOption ¶ added in v0.3.1
type CreateAgentOption func(*CreateAgentOptions)
func WithMaxIterations ¶ added in v0.8.3
func WithMaxIterations(maxIterations int) CreateAgentOption
func WithSkillDir ¶ added in v0.3.2
func WithSkillDir(skillDir string) CreateAgentOption
func WithStateModifier ¶ added in v0.3.1
func WithStateModifier(modifier func(messages []llms.MessageContent) []llms.MessageContent) CreateAgentOption
func WithSystemMessage ¶ added in v0.3.1
func WithSystemMessage(message string) CreateAgentOption
func WithVerbose ¶ added in v0.3.2
func WithVerbose(verbose bool) CreateAgentOption
type CreateAgentOptions ¶ added in v0.3.1
type CreateAgentOptions struct {
Verbose bool
SystemMessage string
StateModifier func(messages []llms.MessageContent) []llms.MessageContent
MaxIterations int
// contains filtered or unexported fields
}
CreateAgentOptions contains options for creating an agent
type ManusConfig ¶ added in v0.8.5
type ManusConfig struct {
WorkDir string
PlanPath string
NotesPath string
OutputPath string
AutoSave bool
Verbose bool
}
ManusConfig configures a Manus-style planning agent with persistent files
type MockLLMEmptyContent ¶ added in v0.6.0
type MockLLMEmptyContent struct{}
MockLLMEmptyContent for testing empty content response
func (*MockLLMEmptyContent) Call ¶ added in v0.6.0
func (m *MockLLMEmptyContent) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error)
func (*MockLLMEmptyContent) GenerateContent ¶ added in v0.6.0
func (m *MockLLMEmptyContent) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error)
type MockLLMError ¶ added in v0.6.0
type MockLLMError struct{}
MockLLMError for testing GenerateContent error
func (*MockLLMError) Call ¶ added in v0.6.0
func (m *MockLLMError) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error)
func (*MockLLMError) GenerateContent ¶ added in v0.6.0
func (m *MockLLMError) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error)
type MockToolError ¶ added in v0.6.0
type MockToolError struct {
// contains filtered or unexported fields
}
MockToolError for testing tool execution error
func (*MockToolError) Description ¶ added in v0.6.0
func (t *MockToolError) Description() string
func (*MockToolError) Name ¶ added in v0.6.0
func (t *MockToolError) Name() string
type PEVAgentConfig ¶ added in v0.6.0
type PEVAgentConfig struct {
Model llms.Model
Tools []tools.Tool
MaxRetries int
SystemMessage string
VerificationPrompt string
Verbose bool
}
PEVAgentConfig configures the PEV (Plan, Execute, Verify) agent
type PEVAgentState ¶ added in v0.8.0
type PEVAgentState struct {
// Messages contains the conversation history
Messages []llms.MessageContent
// Plan contains the list of steps to execute
Plan []string
// CurrentStep is the index of the current step being executed
CurrentStep int
// LastToolResult contains the result of the last tool execution
LastToolResult string
// IntermediateSteps contains results from intermediate steps
IntermediateSteps []string
// Retries counts the number of retries attempted
Retries int
// VerificationResult contains the verification status
VerificationResult string
// FinalAnswer contains the final answer after verification
FinalAnswer string
}
PEVAgentState represents the state for a Plan-Execute-Verify agent. This agent follows a three-step process: plan, execute, and verify.
type Phase ¶ added in v0.8.5
type Phase struct {
Name string
Description string
NodeName string
Complete bool
CompletedAt time.Time
}
Phase represents a single phase in the Manus plan
type PlanningAgentState ¶ added in v0.8.0
type PlanningAgentState struct {
// Messages contains the conversation history
Messages []llms.MessageContent
// WorkflowPlan contains the parsed workflow plan from LLM
WorkflowPlan *WorkflowPlan
}
PlanningAgentState represents the state for a planning agent. The planning agent first generates a workflow plan using LLM, then executes according to the generated plan.
type ReactAgentState ¶ added in v0.6.0
type ReactAgentState struct {
// Messages contains the conversation history
Messages []llms.MessageContent `json:"messages"`
// IterationCount counts the current iteration number
IterationCount int `json:"iteration_count"`
}
ReactAgentState represents the default state for a ReAct agent
type ReflectionAgentConfig ¶ added in v0.5.0
type ReflectionAgentConfig struct {
Model llms.Model
ReflectionModel llms.Model
MaxIterations int
SystemMessage string
ReflectionPrompt string
Verbose bool
}
ReflectionAgentConfig configures the reflection agent
type ReflectionAgentState ¶ added in v0.8.0
type ReflectionAgentState struct {
// Messages contains the conversation history
Messages []llms.MessageContent
// Iteration counts the current iteration number
Iteration int
// Reflection contains the agent's self-reflection on its draft
Reflection string
// Draft contains the current draft response being refined
Draft string
}
ReflectionAgentState represents the state for a reflection agent. The reflection agent iteratively improves its response through self-reflection and revision.
type SearchPath ¶ added in v0.6.0
type SearchPath struct {
States []ThoughtState
Score float64
}
type SupervisorState ¶ added in v0.6.0
type SupervisorState struct {
// Messages contains the conversation history
Messages []llms.MessageContent `json:"messages"`
// Next is the next worker to act
Next string `json:"next,omitempty"`
}
SupervisorState represents the state for a supervisor workflow
type ThoughtEvaluator ¶ added in v0.6.0
type ThoughtGenerator ¶ added in v0.6.0
type ThoughtGenerator interface {
Generate(ctx context.Context, current ThoughtState) ([]ThoughtState, error)
}
type ThoughtState ¶ added in v0.6.0
type ToolExecutor ¶
ToolExecutor executes tools based on invocations
func NewToolExecutor ¶
func NewToolExecutor(inputTools []tools.Tool) *ToolExecutor
NewToolExecutor creates a new ToolExecutor with the given tools
func (*ToolExecutor) Execute ¶
func (te *ToolExecutor) Execute(ctx context.Context, invocation ToolInvocation) (string, error)
Execute executes a single tool invocation
func (*ToolExecutor) ExecuteMany ¶
func (te *ToolExecutor) ExecuteMany(ctx context.Context, invocations []ToolInvocation) ([]string, error)
ExecuteMany executes multiple tool invocations in parallel (if needed, but here sequential for simplicity) In a real graph, this might be a ParallelNode, but here we provide a helper.
type ToolInvocation ¶
ToolInvocation represents a request to execute a tool
type ToolWithSchema ¶ added in v0.8.3
ToolWithSchema is an optional interface that tools can implement to provide their parameter schema
type TreeOfThoughtsConfig ¶ added in v0.6.0
type TreeOfThoughtsConfig struct {
Generator ThoughtGenerator
Evaluator ThoughtEvaluator
MaxDepth int
MaxPaths int
Verbose bool
InitialState ThoughtState
}
type TreeOfThoughtsState ¶ added in v0.8.0
type TreeOfThoughtsState struct {
// ActivePaths contains all active reasoning paths being explored
ActivePaths map[string]*SearchPath
// Solution contains the best solution found so far
Solution string
// VisitedStates tracks which states have been visited to avoid cycles
VisitedStates map[string]bool
// Iteration counts the current iteration number
Iteration int
}
TreeOfThoughtsState represents the state for a tree-of-thoughts agent. This agent explores multiple reasoning paths in parallel to find the best solution.
type VerificationResult ¶ added in v0.6.0
type VerificationResult struct {
IsSuccessful bool `json:"is_successful"`
Reasoning string `json:"reasoning"`
}
VerificationResult represents the result of verification
type WorkflowEdge ¶ added in v0.4.0
type WorkflowEdge struct {
From string `json:"from"`
To string `json:"to"`
Condition string `json:"condition,omitempty"` // For conditional edges
}
WorkflowEdge represents an edge in the workflow plan
type WorkflowNode ¶ added in v0.4.0
type WorkflowNode struct {
Name string `json:"name"`
Type string `json:"type"` // "start", "process", "end", "conditional"
}
WorkflowNode represents a node in the workflow plan
type WorkflowPlan ¶ added in v0.4.0
type WorkflowPlan struct {
Nodes []WorkflowNode `json:"nodes"`
Edges []WorkflowEdge `json:"edges"`
}
WorkflowPlan represents the parsed workflow plan from LLM