prebuilt

package
v0.7.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Dec 21, 2025 License: MIT Imports: 13 Imported by: 1

Documentation

Overview

Package prebuilt provides ready-to-use agent implementations for common AI patterns.

This package offers a collection of pre-built agents that implement various reasoning and execution patterns, from simple tool-using agents to complex multi-agent systems. Each agent is implemented using the core graph package and can be easily customized or extended for specific use cases.

Available Agents

## ReAct Agent (Reason + Act) The ReAct agent combines reasoning and acting by having the model think about what to do, choose tools to use, and act on the results. It's suitable for general-purpose tasks.

import (
	"github.com/smallnest/langgraphgo/prebuilt"
	"github.com/tmc/langchaingo/llms"
	"github.com/tmc/langchaingo/tools"
)

// Create a ReAct agent with tools
agent, err := prebuilt.CreateReactAgent(
	llm,           // Language model
	[]tools.Tool{  // Available tools
		&tools.CalculatorTool{},
		weatherTool,
	},
	10, // Max iterations
)

// Execute agent
result, err := agent.Invoke(ctx, map[string]any{
	"messages": []llms.MessageContent{
		{
			Role: llms.ChatMessageTypeHuman,
			Parts: []llms.ContentPart{
				llms.TextPart("What's the weather in London and calculate 15% of 100?"),
			},
		},
	},
})

## Typed ReAct Agent A type-safe version of the ReAct agent using Go generics:

type AgentState struct {
	Messages       []llms.MessageContent `json:"messages"`
	IterationCount int                    `json:"iteration_count"`
}

agent, err := prebuilt.CreateReactAgentTyped[AgentState](
	llm,
	tools,
	10,
	func() AgentState { return AgentState{} },
)

## Supervisor Agent Orchestrates multiple specialized agents, routing tasks to the appropriate agent:

// Create specialized agents
weatherAgent, _ := prebuilt.CreateReactAgent(llm, weatherTools, 5)
calcAgent, _ := prebuilt.CreateReactAgent(llm, calcTools, 5)
searchAgent, _ := prebuilt.CreateReactAgent(llm, searchTools, 5)

// Create supervisor
members := map[string]*graph.StateRunnable{
	"weather": weatherAgent,
	"calculator": calcAgent,
	"search": searchAgent,
}

supervisor, err := prebuilt.CreateSupervisor(
	llm,
	members,
	"Router", // Router agent name
)

// Use supervisor to route tasks
result, err := supervisor.Invoke(ctx, map[string]any{
	"messages": []llms.MessageContent{
		{
			Role: llms.ChatMessageTypeHuman,
			Parts: []llms.ContentPart{
				llms.TextPart("Calculate the distance between London and Paris"),
			},
		},
	},
})

## Planning Agent Creates and executes plans for complex tasks:

planner, err := prebuilt.CreatePlanningAgent(
	llm,
	planningTools,
	executionTools,
)

// The agent will create a plan, then execute each step
result, err := planner.Invoke(ctx, map[string]any{
	"messages": []llms.MessageContent{
		{
			Role: llms.ChatMessageTypeHuman,
			Parts: []llms.ContentPart{
				llms.TextPart("Plan and execute a research report on renewable energy"),
			},
		},
	},
})

## Reflection Agent Uses self-reflection to improve responses:

reflectionAgent, err := prebuilt.CreateReflectionAgent(
	llm,
	tools,
)

// The agent will reflect on and potentially revise its answers
result, err := reflectionAgent.Invoke(ctx, map[string]any{
	"messages": []llms.MessageContent{
		{
			Role: llms.ChatMessageTypeHuman,
			Parts: []llms.ContentPart{
				llms.TextPart("Explain quantum computing"),
			},
		},
	},
})

## Tree of Thoughts Agent Explores multiple reasoning paths before choosing the best:

totAgent, err := prebuilt.CreateTreeOfThoughtsAgent(
	llm,
	3, // Number of thoughts to generate
	5, // Maximum steps
)

// The agent will generate and evaluate multiple reasoning paths
result, err := totAgent.Invoke(ctx, map[string]any{
	"messages": []llms.MessageContent{
		{
			Role: llms.ChatMessageTypeHuman,
			Parts: []llms.ContentPart{
				llms.TextPart("Solve this complex math problem step by step"),
			},
		},
	},
})

RAG (Retrieval-Augmented Generation)

## Basic RAG Agent Combines document retrieval with generation:

rag, err := prebuilt.CreateRAGAgent(
	llm,
	documentLoader,   // Loads documents
	textSplitter,     // Splits text into chunks
	embedder,         // Creates embeddings
	vectorStore,      // Stores and searches embeddings
	5,                // Number of documents to retrieve
)

// The agent will retrieve relevant documents and generate answers
result, err := rag.Invoke(ctx, map[string]any{
	"messages": []llms.MessageContent{
		{
			Role: llms.ChatMessageTypeHuman,
			Parts: []llms.ContentPart{
				llms.TextPart("What are the benefits of renewable energy?"),
			},
		},
	},
})

## Advanced RAG with Conditional Processing

rag, err := prebuilt.CreateConditionalRAGAgent(
	llm,
	loader,
	splitter,
	embedder,
	vectorStore,
	3, // Retrieve count
	// Condition function to decide whether to use RAG
	func(ctx context.Context, query string) bool {
		return len(strings.Fields(query)) > 5
	},
)

# Chat Agent For conversational applications:

chatAgent, err := prebuilt.CreateChatAgent(
	llm,
	systemPrompt, // Optional system prompt
	memory,        // Memory for conversation history
)

// The agent maintains conversation context
result, err := chatAgent.Invoke(ctx, map[string]any{
	"messages": []llms.MessageContent{
		{
			Role: llms.ChatMessageTypeHuman,
			Parts: []llms.ContentPart{
				llms.TextPart("Hello! How are you?"),
			},
		},
	},
})

Custom Tools

Create custom tools for agents:

type WeatherTool struct{}

func (t *WeatherTool) Name() string { return "get_weather" }
func (t *WeatherTool) Description() string {
	return "Get current weather for a city"
}

func (t *WeatherTool) Call(ctx context.Context, input string) (string, error) {
	// Parse the city from input
	var data struct {
		City string `json:"city"`
	}
	if err := json.Unmarshal([]byte(input), &data); err != nil {
		return "", err
	}

	// Call weather API
	// Implementation here...

	return fmt.Sprintf("The weather in %s is 22°C and sunny", data.City), nil
}

// Use with any agent
weatherTool := &WeatherTool{}
agent, err := prebuilt.CreateReactAgent(llm, []tools.Tool{weatherTool}, 10)

Agent Configuration

Most agents support configuration through options:

agent, err := prebuilt.CreateReactAgent(llm, tools, 10,
	prebuilt.WithMaxTokens(4000),
	prebuilt.WithTemperature(0.7),
	prebuilt.WithStreaming(true),
	prebuilt.WithCheckpointing(checkpointer),
	prebuilt.WithMemory(memory),
)

Streaming Support

Enable real-time streaming of agent thoughts and actions:

// Create streaming agent
agent, _ := prebuilt.CreateReactAgent(llm, tools, 10)
streaming := prebuilt.NewStreamingAgent(agent)

// Stream execution
stream, _ := streaming.Stream(ctx, input)
for event := range stream.Events {
	fmt.Printf("Event: %v\n", event)
}

Memory Integration

Agents can integrate with various memory strategies:

import "github.com/smallnest/langgraphgo/memory"

// Use buffer memory
bufferMemory := memory.NewBufferMemory(100)
agent, _ := prebuilt.CreateChatAgent(llm, "", bufferMemory)

// Use summarization memory
summMemory := memory.NewSummarizationMemory(llm, 2000)
agent, _ := prebuilt.CreateChatAgent(llm, "", summMemory)

Best Practices

  1. Choose the right agent pattern for your use case
  2. Provide clear tool descriptions and examples
  3. Set appropriate iteration limits to prevent infinite loops
  4. Use memory for conversational applications
  5. Enable streaming for better user experience
  6. Use checkpointing for long-running tasks
  7. Test with various input patterns
  8. Monitor token usage and costs

Error Handling

Agents include built-in error handling:

  • Tool execution failures
  • LLM API errors
  • Timeout protection
  • Iteration limit enforcement
  • Graceful degradation strategies

Performance Considerations

  • Use typed agents for better performance
  • Cache tool results when appropriate
  • Batch tool calls when possible
  • Monitor resource usage
  • Consider parallel execution for independent tasks
Example (PlanningAgent)

Example demonstrates how to use CreatePlanningAgent to build a dynamic workflow based on user requests

package main

import (
	"context"
	"fmt"

	"github.com/smallnest/langgraphgo/graph"
	"github.com/tmc/langchaingo/llms"
)

func main() {
	// Step 1: Define your custom nodes that can be used in workflows
	nodes := []*graph.Node{
		{
			Name:        "fetch_data",
			Description: "Fetch data from external API or database",
			Function: func(ctx context.Context, state any) (any, error) {
				mState := state.(map[string]any)
				messages := mState["messages"].([]llms.MessageContent)

				// Simulate fetching data
				fmt.Println("Fetching data from API...")

				msg := llms.MessageContent{
					Role:  llms.ChatMessageTypeAI,
					Parts: []llms.ContentPart{llms.TextPart("Data fetched successfully: [item1, item2, item3]")},
				}

				return map[string]any{
					"messages": append(messages, msg),
				}, nil
			},
		},
		{
			Name:        "validate_data",
			Description: "Validate the integrity and format of data",
			Function: func(ctx context.Context, state any) (any, error) {
				mState := state.(map[string]any)
				messages := mState["messages"].([]llms.MessageContent)

				// Simulate validation
				fmt.Println("Validating data...")

				msg := llms.MessageContent{
					Role:  llms.ChatMessageTypeAI,
					Parts: []llms.ContentPart{llms.TextPart("Data validation passed")},
				}

				return map[string]any{
					"messages": append(messages, msg),
				}, nil
			},
		},
		{
			Name:        "transform_data",
			Description: "Transform and normalize data into required format",
			Function: func(ctx context.Context, state any) (any, error) {
				mState := state.(map[string]any)
				messages := mState["messages"].([]llms.MessageContent)

				// Simulate transformation
				fmt.Println("Transforming data...")

				msg := llms.MessageContent{
					Role:  llms.ChatMessageTypeAI,
					Parts: []llms.ContentPart{llms.TextPart("Data transformed to JSON format")},
				}

				return map[string]any{
					"messages": append(messages, msg),
				}, nil
			},
		},
		{
			Name:        "analyze_data",
			Description: "Perform statistical analysis on the data",
			Function: func(ctx context.Context, state any) (any, error) {
				mState := state.(map[string]any)
				messages := mState["messages"].([]llms.MessageContent)

				// Simulate analysis
				fmt.Println("Analyzing data...")

				msg := llms.MessageContent{
					Role:  llms.ChatMessageTypeAI,
					Parts: []llms.ContentPart{llms.TextPart("Analysis complete: mean=42, median=40, std=5.2")},
				}

				return map[string]any{
					"messages": append(messages, msg),
				}, nil
			},
		},
		{
			Name:        "save_results",
			Description: "Save processed results to storage",
			Function: func(ctx context.Context, state any) (any, error) {
				mState := state.(map[string]any)
				messages := mState["messages"].([]llms.MessageContent)

				// Simulate saving
				fmt.Println("Saving results...")

				msg := llms.MessageContent{
					Role:  llms.ChatMessageTypeAI,
					Parts: []llms.ContentPart{llms.TextPart("Results saved to database")},
				}

				return map[string]any{
					"messages": append(messages, msg),
				}, nil
			},
		},
	}

	// Step 2: Create your LLM model
	// In a real application, you would use an actual LLM like OpenAI, Anthropic, etc.
	// var model llms.Model = openai.New(...)

	// For this example, we'll skip the actual LLM call
	// The LLM would receive the user request and available nodes,
	// then generate a workflow plan like:
	// {
	//   "nodes": [
	//     {"name": "fetch_data", "type": "process"},
	//     {"name": "validate_data", "type": "process"},
	//     {"name": "transform_data", "type": "process"},
	//     {"name": "save_results", "type": "process"}
	//   ],
	//   "edges": [
	//     {"from": "START", "to": "fetch_data"},
	//     {"from": "fetch_data", "to": "validate_data"},
	//     {"from": "validate_data", "to": "transform_data"},
	//     {"from": "transform_data", "to": "save_results"},
	//     {"from": "save_results", "to": "END"}
	//   ]
	// }

	fmt.Println("CreatePlanningAgent example:")
	fmt.Println("This agent dynamically creates workflows based on user requests")
	fmt.Println()
	fmt.Println("Available nodes:")
	for i, node := range nodes {
		fmt.Printf("%d. %s: %s\n", i+1, node.Name, node.Description)
	}
	fmt.Println()
	fmt.Println("User request: 'Fetch data, validate it, transform it, and save the results'")
	fmt.Println()
	fmt.Println("The LLM will:")
	fmt.Println("1. Analyze the user request")
	fmt.Println("2. Select appropriate nodes from available nodes")
	fmt.Println("3. Generate a workflow plan (similar to a mermaid diagram)")
	fmt.Println("4. The agent will execute the planned workflow")
	fmt.Println()
	fmt.Println("Expected workflow:")
	fmt.Println("START -> fetch_data -> validate_data -> transform_data -> save_results -> END")

}
Output:
CreatePlanningAgent example:
This agent dynamically creates workflows based on user requests

Available nodes:
1. fetch_data: Fetch data from external API or database
2. validate_data: Validate the integrity and format of data
3. transform_data: Transform and normalize data into required format
4. analyze_data: Perform statistical analysis on the data
5. save_results: Save processed results to storage

User request: 'Fetch data, validate it, transform it, and save the results'

The LLM will:
1. Analyze the user request
2. Select appropriate nodes from available nodes
3. Generate a workflow plan (similar to a mermaid diagram)
4. The agent will execute the planned workflow

Expected workflow:
START -> fetch_data -> validate_data -> transform_data -> save_results -> END
Example (PlanningAgentRealUsage)

Example showing real usage pattern

package main

import (
	"fmt"
)

func main() {
	fmt.Println("Real usage pattern:")
	fmt.Println()
	fmt.Println("// 1. Define your nodes")
	fmt.Println("nodes := []*graph.Node{...}")
	fmt.Println()
	fmt.Println("// 2. Initialize your LLM model")
	fmt.Println("model := openai.New()")
	fmt.Println()
	fmt.Println("// 3. Create the planning agent")
	fmt.Println("agent, err := prebuilt.CreatePlanningAgent(")
	fmt.Println("    model,")
	fmt.Println("    nodes,")
	fmt.Println("    []tools.Tool{},")
	fmt.Println("    prebuilt.WithVerbose(true),")
	fmt.Println(")")
	fmt.Println()
	fmt.Println("// 4. Prepare initial state with user request")
	fmt.Println("initialState := map[string]any{")
	fmt.Println("    \"messages\": []llms.MessageContent{")
	fmt.Println("        llms.TextParts(llms.ChatMessageTypeHuman,")
	fmt.Println("            \"Fetch, validate, and save the customer data\"),")
	fmt.Println("    },")
	fmt.Println("}")
	fmt.Println()
	fmt.Println("// 5. Execute the agent")
	fmt.Println("result, err := agent.Invoke(context.Background(), initialState)")
	fmt.Println()
	fmt.Println("// 6. Access results")
	fmt.Println("mState := result.(map[string]any)")
	fmt.Println("messages := mState[\"messages\"].([]llms.MessageContent)")

}
Output:
Real usage pattern:

// 1. Define your nodes
nodes := []*graph.Node{...}

// 2. Initialize your LLM model
model := openai.New()

// 3. Create the planning agent
agent, err := prebuilt.CreatePlanningAgent(
    model,
    nodes,
    []tools.Tool{},
    prebuilt.WithVerbose(true),
)

// 4. Prepare initial state with user request
initialState := map[string]any{
    "messages": []llms.MessageContent{
        llms.TextParts(llms.ChatMessageTypeHuman,
            "Fetch, validate, and save the customer data"),
    },
}

// 5. Execute the agent
result, err := agent.Invoke(context.Background(), initialState)

// 6. Access results
mState := result.(map[string]any)
messages := mState["messages"].([]llms.MessageContent)
Example (PlanningAgentWithVerbose)

Example showing how to use the planning agent with verbose mode

package main

import (
	"fmt"
)

func main() {
	// In a real application, you would define nodes and create the agent
	// nodes := []*graph.Node{...}
	// agent, err := prebuilt.CreatePlanningAgent(model, nodes, []tools.Tool{}, prebuilt.WithVerbose(true))

	fmt.Println("With verbose mode enabled, you will see:")
	fmt.Println("🤔 Planning workflow...")
	fmt.Println("📋 Generated plan: {...}")
	fmt.Println("🚀 Executing planned workflow...")
	fmt.Println("  ✓ Added node: step1")
	fmt.Println("  ✓ Added node: step2")
	fmt.Println("  ✓ Added edge: step1 -> step2")
	fmt.Println("  ✓ Added edge: step2 -> END")
	fmt.Println("✅ Workflow execution completed")

}
Output:
With verbose mode enabled, you will see:
🤔 Planning workflow...
📋 Generated plan: {...}
🚀 Executing planned workflow...
  ✓ Added node: step1
  ✓ Added node: step2
  ✓ Added edge: step1 -> step2
  ✓ Added edge: step2 -> END
✅ Workflow execution completed
Example (WorkflowPlanFormat)

Example showing how the LLM generates workflow plans

package main

import (
	"fmt"
)

func main() {
	fmt.Println("Workflow Plan JSON Format:")
	fmt.Println()
	fmt.Println("{")
	fmt.Println("  \"nodes\": [")
	fmt.Println("    {\"name\": \"node_name\", \"type\": \"process\"},")
	fmt.Println("    {\"name\": \"another_node\", \"type\": \"process\"}")
	fmt.Println("  ],")
	fmt.Println("  \"edges\": [")
	fmt.Println("    {\"from\": \"START\", \"to\": \"node_name\"},")
	fmt.Println("    {\"from\": \"node_name\", \"to\": \"another_node\"},")
	fmt.Println("    {\"from\": \"another_node\", \"to\": \"END\"}")
	fmt.Println("  ]")
	fmt.Println("}")
	fmt.Println()
	fmt.Println("Rules:")
	fmt.Println("1. Workflow must start with edge from 'START'")
	fmt.Println("2. Workflow must end with edge to 'END'")
	fmt.Println("3. Only use nodes from available nodes list")
	fmt.Println("4. Create logical flow based on user request")

}
Output:
Workflow Plan JSON Format:

{
  "nodes": [
    {"name": "node_name", "type": "process"},
    {"name": "another_node", "type": "process"}
  ],
  "edges": [
    {"from": "START", "to": "node_name"},
    {"from": "node_name", "to": "another_node"},
    {"from": "another_node", "to": "END"}
  ]
}

Rules:
1. Workflow must start with edge from 'START'
2. Workflow must end with edge to 'END'
3. Only use nodes from available nodes list
4. Create logical flow based on user request

Index

Examples

Constants

This section is empty.

Variables

This section is empty.

Functions

func CreateAgent added in v0.3.1

func CreateAgent(model llms.Model, inputTools []tools.Tool, opts ...CreateAgentOption) (*graph.StateRunnable, error)

CreateAgent creates a new agent graph with options

func CreatePEVAgent added in v0.6.0

func CreatePEVAgent(config PEVAgentConfig) (*graph.StateRunnable, error)

CreatePEVAgent creates a new PEV (Plan, Execute, Verify) Agent that implements a robust, self-correcting loop for reliable task execution.

The PEV pattern involves: 1. Plan: Break down the user request into executable steps 2. Execute: Run each step using available tools 3. Verify: Check if the execution was successful 4. Retry: If verification fails, re-plan and execute again

This pattern is particularly useful for: - High-stakes automation scenarios - Systems requiring accuracy verification - Situations with unreliable external tools

func CreatePlanningAgent added in v0.4.0

func CreatePlanningAgent(model llms.Model, nodes []*graph.Node, inputTools []tools.Tool, opts ...CreateAgentOption) (*graph.StateRunnable, error)

CreatePlanningAgent creates an agent that first plans the workflow using LLM, then executes according to the generated plan

Example

Example usage documentation

// Define your custom nodes
nodes := []*graph.Node{
	{
		Name:        "fetch_data",
		Description: "Fetch data from API",
		Function: func(ctx context.Context, state any) (any, error) {
			// Your implementation
			fmt.Println("Fetching data...")
			return state, nil
		},
	},
	{
		Name:        "transform_data",
		Description: "Transform the fetched data",
		Function: func(ctx context.Context, state any) (any, error) {
			// Your implementation
			fmt.Println("Transforming data...")
			return state, nil
		},
	},
}

// Create your LLM model (this is a placeholder)
var model llms.Model // = your actual LLM model

// Create the planning agent
agent, _ := CreatePlanningAgent(
	model,
	nodes,
	[]tools.Tool{},
	WithVerbose(true),
)

// Use the agent
initialState := map[string]any{
	"messages": []llms.MessageContent{
		llms.TextParts(llms.ChatMessageTypeHuman, "Fetch and transform the data"),
	},
}

result, _ := agent.Invoke(context.Background(), initialState)
fmt.Printf("Result: %v\n", result)

// Output will show the planning and execution steps

func CreateReactAgent

func CreateReactAgent(model llms.Model, inputTools []tools.Tool, maxIterations int) (*graph.StateRunnable, error)

CreateReactAgent creates a new ReAct agent graph

func CreateReactAgentTyped added in v0.6.0

func CreateReactAgentTyped(model llms.Model, inputTools []tools.Tool, maxIterations int) (*graph.StateRunnableTyped[ReactAgentState], error)

CreateReactAgentTyped creates a new typed ReAct agent graph

func CreateReactAgentWithCustomStateTyped added in v0.6.0

func CreateReactAgentWithCustomStateTyped[S any](
	model llms.Model,
	inputTools []tools.Tool,
	getMessages func(S) []llms.MessageContent,
	setMessages func(S, []llms.MessageContent) S,
	getIterationCount func(S) int,
	setIterationCount func(S, int) S,
	hasToolCalls func([]llms.MessageContent) bool,
	maxIterations int,
) (*graph.StateRunnableTyped[S], error)

CreateReactAgentWithCustomStateTyped creates a typed ReAct agent with custom state type

func CreateReflectionAgent added in v0.5.0

func CreateReflectionAgent(config ReflectionAgentConfig) (*graph.StateRunnable, error)

CreateReflectionAgent creates a new Reflection Agent that iteratively improves its responses through self-reflection

The Reflection pattern involves: 1. Generate: Create an initial response 2. Reflect: Critique the response and suggest improvements 3. Revise: Generate an improved version based on reflection 4. Repeat until satisfactory or max iterations reached

Example
package main

import (
	"context"
	"fmt"
	"log"

	"github.com/smallnest/langgraphgo/prebuilt"
	"github.com/tmc/langchaingo/llms"
	"github.com/tmc/langchaingo/llms/openai"
)

func main() {
	// Create LLM
	model, err := openai.New()
	if err != nil {
		log.Fatal(err)
	}

	// Configure reflection agent
	config := prebuilt.ReflectionAgentConfig{
		Model:         model,
		MaxIterations: 3,
		Verbose:       true,
		SystemMessage: "You are an expert technical writer. Create clear, accurate, and comprehensive responses.",
	}

	// Create agent
	agent, err := prebuilt.CreateReflectionAgent(config)
	if err != nil {
		log.Fatal(err)
	}

	// Prepare initial state
	initialState := map[string]any{
		"messages": []llms.MessageContent{
			{
				Role:  llms.ChatMessageTypeHuman,
				Parts: []llms.ContentPart{llms.TextPart("Explain the CAP theorem in distributed systems")},
			},
		},
	}

	// Invoke agent
	result, err := agent.Invoke(context.Background(), initialState)
	if err != nil {
		log.Fatal(err)
	}

	// Extract final response
	finalState := result.(map[string]any)
	messages := finalState["messages"].([]llms.MessageContent)

	fmt.Println("=== Final Response ===")
	for _, msg := range messages {
		if msg.Role == llms.ChatMessageTypeAI {
			for _, part := range msg.Parts {
				if textPart, ok := part.(llms.TextContent); ok {
					fmt.Println(textPart.Text)
				}
			}
		}
	}
}
Example (CustomCriteria)
package main

import (
	"context"
	"fmt"
	"log"

	"github.com/smallnest/langgraphgo/prebuilt"
	"github.com/tmc/langchaingo/llms"
	"github.com/tmc/langchaingo/llms/openai"
)

func main() {
	model, err := openai.New()
	if err != nil {
		log.Fatal(err)
	}

	// Custom reflection criteria for code quality
	config := prebuilt.ReflectionAgentConfig{
		Model:         model,
		MaxIterations: 2,
		Verbose:       true,
		SystemMessage: "You are a senior software engineer reviewing code.",
		ReflectionPrompt: `Evaluate the code review for:
1. **Security**: Are security issues identified?
2. **Performance**: Are performance concerns addressed?
3. **Maintainability**: Are code quality issues noted?
4. **Best Practices**: Are language/framework best practices mentioned?

Provide specific, actionable feedback.`,
	}

	agent, err := prebuilt.CreateReflectionAgent(config)
	if err != nil {
		log.Fatal(err)
	}

	initialState := map[string]any{
		"messages": []llms.MessageContent{
			{
				Role:  llms.ChatMessageTypeHuman,
				Parts: []llms.ContentPart{llms.TextPart("Review this SQL query function for issues")},
			},
		},
	}

	result, err := agent.Invoke(context.Background(), initialState)
	if err != nil {
		log.Fatal(err)
	}

	finalState := result.(map[string]any)
	draft := finalState["draft"].(string)
	fmt.Printf("Code review:\n%s\n", draft)
}
Example (WithSeparateReflector)
package main

import (
	"context"
	"fmt"
	"log"

	"github.com/smallnest/langgraphgo/prebuilt"
	"github.com/tmc/langchaingo/llms"
	"github.com/tmc/langchaingo/llms/openai"
)

func main() {
	// Create generation model
	generationModel, err := openai.New(openai.WithModel("gpt-4"))
	if err != nil {
		log.Fatal(err)
	}

	// Create separate reflection model (could be a different model)
	reflectionModel, err := openai.New(openai.WithModel("gpt-4"))
	if err != nil {
		log.Fatal(err)
	}

	// Configure with separate models
	config := prebuilt.ReflectionAgentConfig{
		Model:           generationModel,
		ReflectionModel: reflectionModel,
		MaxIterations:   2,
		Verbose:         true,
		SystemMessage:   "You are a helpful assistant providing detailed explanations.",
		ReflectionPrompt: `You are a senior technical reviewer.
Evaluate the response for:
1. Technical accuracy
2. Completeness of explanation
3. Clarity for the target audience
4. Use of examples

Be specific in your feedback.`,
	}

	agent, err := prebuilt.CreateReflectionAgent(config)
	if err != nil {
		log.Fatal(err)
	}

	initialState := map[string]any{
		"messages": []llms.MessageContent{
			{
				Role:  llms.ChatMessageTypeHuman,
				Parts: []llms.ContentPart{llms.TextPart("What is a Merkle tree and how is it used in blockchain?")},
			},
		},
	}

	result, err := agent.Invoke(context.Background(), initialState)
	if err != nil {
		log.Fatal(err)
	}

	finalState := result.(map[string]any)
	draft := finalState["draft"].(string)
	iteration := finalState["iteration"].(int)

	fmt.Printf("Final draft (after %d iterations):\n%s\n", iteration, draft)
}

func CreateSupervisor

func CreateSupervisor(model llms.Model, members map[string]*graph.StateRunnable) (*graph.StateRunnable, error)

CreateSupervisor creates a supervisor graph that orchestrates multiple agents

func CreateSupervisorTyped added in v0.6.0

func CreateSupervisorTyped(model llms.Model, members map[string]*graph.StateRunnableTyped[SupervisorState]) (*graph.StateRunnableTyped[SupervisorState], error)

CreateSupervisorTyped creates a typed supervisor graph that orchestrates multiple agents

func CreateSupervisorWithStateTyped added in v0.6.0

func CreateSupervisorWithStateTyped[S any](
	model llms.Model,
	members map[string]*graph.StateRunnableTyped[S],
	getMessages func(S) []llms.MessageContent,
	updateMessages func(S, []llms.MessageContent) S,
	getNext func(S) string,
	setNext func(S, string) S,
) (*graph.StateRunnableTyped[S], error)

CreateSupervisorWithStateTyped creates a typed supervisor with custom state type

func CreateTreeOfThoughtsAgent added in v0.6.0

func CreateTreeOfThoughtsAgent(config TreeOfThoughtsConfig) (*graph.StateRunnable, error)

CreateTreeOfThoughtsAgent creates a Tree of Thoughts search agent

Tree of Thoughts (ToT) is a search-based reasoning framework where problem-solving is modeled as a search through a tree. At each step, multiple candidate "thoughts" are generated, evaluated for feasibility, and the most promising branches are expanded while unpromising ones are pruned.

The ToT pattern involves: 1. Decomposition: Break down the problem into steps 2. Thought Generation: Generate multiple possible next steps (branches) 3. State Evaluation: Evaluate each thought for validity and promise 4. Pruning & Expansion: Remove bad branches, expand good ones 5. Solution: Continue until a goal state is reached

This pattern is ideal for: - Logic puzzles with clear rules and goal states - Complex planning problems with constraints - Problems where multiple strategies should be explored

func PrintSolution added in v0.6.0

func PrintSolution(solution any)

Helper function to print solution

Types

type ChatAgent added in v0.6.0

type ChatAgent struct {
	// The underlying agent runnable
	Runnable *graph.StateRunnable
	// contains filtered or unexported fields
}

ChatAgent represents a session with a user and can handle multi-turn conversations.

Example

Example demonstrating multi-turn conversation with ChatAgent

package main

import (
	"context"
	"fmt"
	"os"

	"github.com/smallnest/langgraphgo/prebuilt"
	"github.com/tmc/langchaingo/llms/openai"
)

func main() {
	// Check if API key is available
	apiKey := os.Getenv("OPENAI_API_KEY")
	if apiKey == "" {
		fmt.Println("OPENAI_API_KEY not set, skipping example")
		return
	}

	// Create OpenAI model
	model, err := openai.New()
	if err != nil {
		fmt.Printf("Error creating model: %v\n", err)
		return
	}

	// Create ChatAgent with no tools
	agent, err := prebuilt.NewChatAgent(model, nil)
	if err != nil {
		fmt.Printf("Error creating agent: %v\n", err)
		return
	}

	ctx := context.Background()

	// First turn
	fmt.Println("User: Hello! My name is Alice.")
	resp1, err := agent.Chat(ctx, "Hello! My name is Alice.")
	if err != nil {
		fmt.Printf("Error: %v\n", err)
		return
	}
	fmt.Printf("Agent: %s\n\n", resp1)

	// Second turn - agent should remember the name
	fmt.Println("User: What's my name?")
	resp2, err := agent.Chat(ctx, "What's my name?")
	if err != nil {
		fmt.Printf("Error: %v\n", err)
		return
	}
	fmt.Printf("Agent: %s\n\n", resp2)

	// Third turn - continue the conversation
	fmt.Println("User: Tell me a short joke about programmers.")
	resp3, err := agent.Chat(ctx, "Tell me a short joke about programmers.")
	if err != nil {
		fmt.Printf("Error: %v\n", err)
		return
	}
	fmt.Printf("Agent: %s\n", resp3)

	// Display the session ID
	fmt.Printf("\nSession ID: %s\n", agent.ThreadID())
}

func NewChatAgent added in v0.6.0

func NewChatAgent(model llms.Model, inputTools []tools.Tool, opts ...CreateAgentOption) (*ChatAgent, error)

NewChatAgent creates a new ChatAgent. It wraps the underlying agent graph and manages conversation history automatically.

func (*ChatAgent) AddTool added in v0.6.0

func (c *ChatAgent) AddTool(tool tools.Tool)

AddTool adds a new tool to the dynamic tools list. If a tool with the same name already exists, it will be replaced.

func (*ChatAgent) AsyncChat added in v0.6.0

func (c *ChatAgent) AsyncChat(ctx context.Context, message string) (<-chan string, error)

AsyncChat sends a message to the agent and returns a channel for streaming the response. This method provides TRUE streaming by using the LLM's streaming API. Chunks are sent to the channel as they're generated by the LLM in real-time. The channel will be closed when the response is complete or an error occurs.

func (*ChatAgent) AsyncChatWithChunks added in v0.6.0

func (c *ChatAgent) AsyncChatWithChunks(ctx context.Context, message string) (<-chan string, error)

AsyncChatWithChunks sends a message to the agent and returns a channel for streaming the response. Unlike AsyncChat, this streams the response in word-sized chunks for better readability. The channel will be closed when the response is complete.

func (*ChatAgent) Chat added in v0.6.0

func (c *ChatAgent) Chat(ctx context.Context, message string) (string, error)

Chat sends a message to the agent and returns the response. It maintains the conversation context by accumulating message history.

func (*ChatAgent) ClearTools added in v0.6.0

func (c *ChatAgent) ClearTools()

ClearTools removes all dynamic tools.

func (*ChatAgent) GetTools added in v0.6.0

func (c *ChatAgent) GetTools() []tools.Tool

GetTools returns a copy of the current dynamic tools list. Note: This does not include the base tools provided when creating the agent.

func (*ChatAgent) PrintStream added in v0.6.0

func (c *ChatAgent) PrintStream(ctx context.Context, message string, w io.Writer) error

PrintStream prints the agent's response to the provided writer (e.g., os.Stdout). Note: This is a simplified version that uses Chat internally. For true streaming support, you would need to use a graph that supports streaming.

func (*ChatAgent) RemoveTool added in v0.6.0

func (c *ChatAgent) RemoveTool(toolName string) bool

RemoveTool removes a tool by name from the dynamic tools list. Returns true if the tool was found and removed, false otherwise.

func (*ChatAgent) SetTools added in v0.6.0

func (c *ChatAgent) SetTools(newTools []tools.Tool)

SetTools replaces all dynamic tools with the provided tools. Note: This does not affect the base tools provided when creating the agent.

func (*ChatAgent) ThreadID added in v0.6.0

func (c *ChatAgent) ThreadID() string

ThreadID returns the current session ID.

type CreateAgentOption added in v0.3.1

type CreateAgentOption func(*CreateAgentOptions)

CreateAgentOption is a function that configures CreateAgentOptions

func WithCheckpointer added in v0.3.1

func WithCheckpointer(checkpointer graph.CheckpointStore) CreateAgentOption

WithCheckpointer sets the checkpointer for the agent Note: Currently this is a placeholder and may not be fully integrated into the graph execution yet

func WithSkillDir added in v0.3.2

func WithSkillDir(skillDir string) CreateAgentOption

WithSkillDir sets the skill directory for the agent

func WithStateModifier added in v0.3.1

func WithStateModifier(modifier func(messages []llms.MessageContent) []llms.MessageContent) CreateAgentOption

WithStateModifier sets a function to modify messages before they are sent to the model

func WithSystemMessage added in v0.3.1

func WithSystemMessage(message string) CreateAgentOption

WithSystemMessage sets the system message for the agent

func WithVerbose added in v0.3.2

func WithVerbose(verbose bool) CreateAgentOption

WithVerbose sets the verbose mode for the agent

type CreateAgentOptions added in v0.3.1

type CreateAgentOptions struct {
	Verbose       bool
	SystemMessage string
	StateModifier func(messages []llms.MessageContent) []llms.MessageContent
	Checkpointer  graph.CheckpointStore
	// contains filtered or unexported fields
}

CreateAgentOptions contains options for creating an agent

type MockLLMEmptyContent added in v0.6.0

type MockLLMEmptyContent struct{}

MockLLMEmptyContent for testing empty content response

func (*MockLLMEmptyContent) Call added in v0.6.0

func (m *MockLLMEmptyContent) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error)

func (*MockLLMEmptyContent) GenerateContent added in v0.6.0

func (m *MockLLMEmptyContent) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error)

type MockLLMError added in v0.6.0

type MockLLMError struct{}

MockLLMError for testing GenerateContent error

func (*MockLLMError) Call added in v0.6.0

func (m *MockLLMError) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error)

func (*MockLLMError) GenerateContent added in v0.6.0

func (m *MockLLMError) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error)

type MockToolError added in v0.6.0

type MockToolError struct {
	// contains filtered or unexported fields
}

MockToolError for testing tool execution error

func (*MockToolError) Call added in v0.6.0

func (t *MockToolError) Call(ctx context.Context, input string) (string, error)

func (*MockToolError) Description added in v0.6.0

func (t *MockToolError) Description() string

func (*MockToolError) Name added in v0.6.0

func (t *MockToolError) Name() string

type PEVAgentConfig added in v0.6.0

type PEVAgentConfig struct {
	// Model is the LLM to use for planning and verification
	Model llms.Model

	// Tools are the available tools that can be executed
	Tools []tools.Tool

	// MaxRetries is the maximum number of retry attempts when verification fails
	MaxRetries int

	// SystemMessage is the system message for the planner
	SystemMessage string

	// VerificationPrompt is the prompt for the verifier
	VerificationPrompt string

	// Verbose enables detailed logging
	Verbose bool
}

PEVAgentConfig configures the PEV (Plan, Execute, Verify) agent

type ReactAgentState added in v0.6.0

type ReactAgentState struct {
	Messages       []llms.MessageContent `json:"messages"`
	IterationCount int                   `json:"iteration_count"`
}

ReactAgentState represents the state for a ReAct agent

type ReflectionAgentConfig added in v0.5.0

type ReflectionAgentConfig struct {
	// Model is the LLM to use for both generation and reflection
	Model llms.Model

	// ReflectionModel is an optional separate model for reflection
	// If nil, uses the same model as generation
	ReflectionModel llms.Model

	// MaxIterations is the maximum number of generation-reflection cycles
	MaxIterations int

	// SystemMessage is the system message for the generation step
	SystemMessage string

	// ReflectionPrompt is the system message for the reflection step
	ReflectionPrompt string

	// Verbose enables detailed logging
	Verbose bool
}

ReflectionAgentConfig configures the reflection agent

type SearchPath added in v0.6.0

type SearchPath struct {
	States []ThoughtState
	Score  float64
}

SearchPath represents a path in the search tree

type SupervisorState added in v0.6.0

type SupervisorState struct {
	Messages []llms.MessageContent `json:"messages"`
	Next     string                `json:"next,omitempty"`
}

SupervisorState represents the state for a supervisor workflow

type ThoughtEvaluator added in v0.6.0

type ThoughtEvaluator interface {
	// Evaluate returns a score for the state (higher is better)
	// Returns -1 if the state should be pruned
	Evaluate(ctx context.Context, state ThoughtState, pathLength int) (float64, error)
}

ThoughtEvaluator evaluates the quality/promise of a state

type ThoughtGenerator added in v0.6.0

type ThoughtGenerator interface {
	// Generate returns all possible next states from the current state
	Generate(ctx context.Context, current ThoughtState) ([]ThoughtState, error)
}

ThoughtGenerator generates possible next states from a current state

type ThoughtState added in v0.6.0

type ThoughtState interface {
	// IsValid checks if the state is valid (no rule violations)
	IsValid() bool

	// IsGoal checks if this state represents a solution
	IsGoal() bool

	// GetDescription returns a human-readable description of the state
	GetDescription() string

	// Hash returns a unique hash for the state (for cycle detection)
	Hash() string
}

ThoughtState represents a state in the search tree

type ToolExecutor

type ToolExecutor struct {
	// contains filtered or unexported fields
}

ToolExecutor executes tools based on invocations

func NewToolExecutor

func NewToolExecutor(inputTools []tools.Tool) *ToolExecutor

NewToolExecutor creates a new ToolExecutor with the given tools

func (*ToolExecutor) Execute

func (te *ToolExecutor) Execute(ctx context.Context, invocation ToolInvocation) (string, error)

Execute executes a single tool invocation

func (*ToolExecutor) ExecuteMany

func (te *ToolExecutor) ExecuteMany(ctx context.Context, invocations []ToolInvocation) ([]string, error)

ExecuteMany executes multiple tool invocations in parallel (if needed, but here sequential for simplicity) In a real graph, this might be a ParallelNode, but here we provide a helper.

func (*ToolExecutor) ToolNode

func (te *ToolExecutor) ToolNode(ctx context.Context, state any) (any, error)

ToolNode is a graph node function that executes tools It expects the state to contain a list of ToolInvocation or a single ToolInvocation This is a simplified version. In a real agent, it would parse messages.

type ToolInvocation

type ToolInvocation struct {
	Tool      string `json:"tool"`
	ToolInput string `json:"tool_input"`
}

ToolInvocation represents a request to execute a tool

type ToolNode

type ToolNode struct {
	Executor *ToolExecutor
}

ToolNode is a reusable node that executes tool calls from the last AI message. It expects the state to be a map[string]any with a "messages" key containing []llms.MessageContent.

func NewToolNode

func NewToolNode(inputTools []tools.Tool) *ToolNode

NewToolNode creates a new ToolNode with the given tools.

func (*ToolNode) Invoke

func (tn *ToolNode) Invoke(ctx context.Context, state any) (any, error)

Invoke executes the tool calls found in the last message.

type TreeOfThoughtsConfig added in v0.6.0

type TreeOfThoughtsConfig struct {
	// Generator creates new states
	Generator ThoughtGenerator

	// Evaluator scores states
	Evaluator ThoughtEvaluator

	// MaxDepth is the maximum search depth
	MaxDepth int

	// MaxPaths is the maximum number of active paths to maintain
	MaxPaths int

	// Verbose enables detailed logging
	Verbose bool

	// InitialState is the starting state
	InitialState ThoughtState
}

TreeOfThoughtsConfig configures the Tree of Thoughts search

type VerificationResult added in v0.6.0

type VerificationResult struct {
	IsSuccessful bool   `json:"is_successful"`
	Reasoning    string `json:"reasoning"`
}

VerificationResult represents the result of verification

type WorkflowEdge added in v0.4.0

type WorkflowEdge struct {
	From      string `json:"from"`
	To        string `json:"to"`
	Condition string `json:"condition,omitempty"` // For conditional edges
}

WorkflowEdge represents an edge in the workflow plan

type WorkflowNode added in v0.4.0

type WorkflowNode struct {
	Name string `json:"name"`
	Type string `json:"type"` // "start", "process", "end", "conditional"
}

WorkflowNode represents a node in the workflow plan

type WorkflowPlan added in v0.4.0

type WorkflowPlan struct {
	Nodes []WorkflowNode `json:"nodes"`
	Edges []WorkflowEdge `json:"edges"`
}

WorkflowPlan represents the parsed workflow plan from LLM

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL