Documentation
¶
Index ¶
- Variables
- func ExtractBoolean(llm LLM, f Fragment, opts ...Option) (*structures.Boolean, error)
- func ExtractGoal(llm LLM, f Fragment, opts ...Option) (*structures.Goal, error)
- func ExtractKnowledgeGaps(llm LLM, f Fragment, opts ...Option) ([]string, error)
- func ExtractPlan(llm LLM, f Fragment, goal *structures.Goal, opts ...Option) (*structures.Plan, error)
- func ExtractTODOs(llm LLM, plan *structures.Plan, goal *structures.Goal, opts ...Option) (*structures.TODOList, error)
- func IsGoalAchieved(llm LLM, f Fragment, goal *structures.Goal, opts ...Option) (*structures.Boolean, error)
- func ReEvaluatePlan(llm LLM, f, subtaskFragment Fragment, goal *structures.Goal, ...) (*structures.Plan, error)
- func WithContext(ctx context.Context) func(o *Options)
- func WithFeedbackCallback(fn func() *Fragment) func(o *Options)
- func WithForceReasoning() func(o *Options)
- func WithForceReasoningTool() func(o *Options)
- func WithGaps(gaps ...string) func(o *Options)
- func WithGuidelines(guidelines ...Guideline) func(o *Options)
- func WithIterations(i int) func(o *Options)
- func WithLoopDetection(steps int) func(o *Options)
- func WithMCPArgs(args map[string]string) func(o *Options)
- func WithMCPs(sessions ...*mcp.ClientSession) func(o *Options)
- func WithMaxAdjustmentAttempts(attempts int) func(o *Options)
- func WithMaxAttempts(i int) func(o *Options)
- func WithMaxRetries(retries int) func(o *Options)
- func WithMessagesManipulator(fn func([]openai.ChatCompletionMessage) []openai.ChatCompletionMessage) func(o *Options)
- func WithPrompt(t prompt.PromptType, p prompt.StaticPrompt) func(o *Options)
- func WithReasoningCallback(fn func(string)) func(o *Options)
- func WithReviewerLLM(reviewerLLMs ...LLM) func(o *Options)
- func WithSinkState(tool ToolDefinitionInterface) func(o *Options)
- func WithStartWithAction(tool ...*ToolChoice) func(o *Options)
- func WithStatusCallback(fn func(string)) func(o *Options)
- func WithTODOPersistence(path string) func(o *Options)
- func WithTODOs(todoList *structures.TODOList) func(o *Options)
- func WithToolCallBack(fn func(*ToolChoice, *SessionState) ToolCallDecision) func(o *Options)
- func WithToolCallResultCallback(fn func(ToolStatus)) func(o *Options)
- func WithTools(tools ...ToolDefinitionInterface) func(o *Options)
- type Fragment
- func ContentReview(llm LLM, originalFragment Fragment, opts ...Option) (Fragment, error)
- func ExecutePlan(llm LLM, conv Fragment, plan *structures.Plan, goal *structures.Goal, ...) (Fragment, error)
- func ExecuteTools(llm LLM, f Fragment, opts ...Option) (Fragment, error)
- func NewEmptyFragment() Fragment
- func NewFragment(messages ...openai.ChatCompletionMessage) Fragment
- func ToolReasoner(llm LLM, f Fragment, opts ...Option) (Fragment, error)
- func (f Fragment) AddLastMessage(f2 Fragment) Fragment
- func (r Fragment) AddMessage(role MessageRole, content string, mm ...Multimedia) Fragment
- func (r Fragment) AddStartMessage(role MessageRole, content string, mm ...Multimedia) Fragment
- func (r Fragment) AddToolMessage(content, toolCallID string) Fragment
- func (f Fragment) AllFragmentsStrings() string
- func (r Fragment) ExtractStructure(ctx context.Context, llm LLM, s structures.Structure) error
- func (f Fragment) GetMessages() []openai.ChatCompletionMessage
- func (f Fragment) LastAssistantAndToolMessages() []openai.ChatCompletionMessage
- func (f Fragment) LastMessage() *openai.ChatCompletionMessage
- func (f Fragment) SelectTool(ctx context.Context, llm LLM, availableTools Tools, forceTool string) (Fragment, *ToolChoice, error)
- func (f Fragment) String() string
- type Guideline
- type GuidelineMetadata
- type GuidelineMetadataList
- type Guidelines
- type IntentionResponseMultiple
- type IntentionResponseSingle
- type LLM
- type MessageRole
- type Multimedia
- type OpenAIClient
- type Option
- type Options
- type PlanStatus
- type ReasoningResponse
- type SessionState
- type Status
- type Tool
- type ToolCallDecision
- type ToolChoice
- type ToolDefinition
- type ToolDefinitionInterface
- type ToolStatus
- type Tools
Constants ¶
This section is empty.
Variables ¶
var ( ErrNoToolSelected error = errors.New("no tool selected by the LLM") ErrLoopDetected error = errors.New("loop detected: same tool called repeatedly with same parameters") ErrToolCallCallbackInterrupted error = errors.New("interrupted via ToolCallCallback") )
var (
ErrGoalNotAchieved error = errors.New("goal not achieved")
)
Functions ¶
func ExtractBoolean ¶
ExtractBoolean extracts a boolean from a conversation
func ExtractGoal ¶
ExtractGoal extracts a goal from a conversation
func ExtractKnowledgeGaps ¶
func ExtractPlan ¶
func ExtractPlan(llm LLM, f Fragment, goal *structures.Goal, opts ...Option) (*structures.Plan, error)
ExtractPlan extracts a plan from a conversation To override the prompt, define a PromptPlanType, PromptReEvaluatePlanType and PromptSubtaskExtractionType
func ExtractTODOs ¶ added in v0.8.0
func ExtractTODOs(llm LLM, plan *structures.Plan, goal *structures.Goal, opts ...Option) (*structures.TODOList, error)
ExtractTODOs generates a TODO list from plan subtasks using the LLM
func IsGoalAchieved ¶
func IsGoalAchieved(llm LLM, f Fragment, goal *structures.Goal, opts ...Option) (*structures.Boolean, error)
IsGoalAchieved checks if a goal has been achieved
func ReEvaluatePlan ¶
func ReEvaluatePlan(llm LLM, f, subtaskFragment Fragment, goal *structures.Goal, toolStatuses []ToolStatus, subtask string, opts ...Option) (*structures.Plan, error)
ExtractPlan extracts a plan from a conversation to override the prompt, define a PromptReEvaluatePlanType and PromptSubtaskExtractionType
func WithContext ¶
WithContext sets the execution context for the agent
func WithFeedbackCallback ¶
WithFeedbackCallback sets a callback to get continous feedback during execution of plans
func WithForceReasoning ¶ added in v0.4.0
func WithForceReasoning() func(o *Options)
WithForceReasoning enables forcing the LLM to reason before selecting tools
func WithForceReasoningTool ¶ added in v0.9.1
func WithForceReasoningTool() func(o *Options)
WithForceReasoningTool enables forcing the LLM to use the reasoning tool before selecting tools. This ensures structured output from the LLM instead of free text that might accidentally contain tool call JSON.
func WithGuidelines ¶
WithGuidelines adds behavioral guidelines for the agent to follow. The guildelines allows a more curated selection of the tool to use and only relevant are shown to the LLM during tool selection.
func WithIterations ¶
WithIterations allows to set the number of refinement iterations
func WithLoopDetection ¶ added in v0.4.0
WithLoopDetection enables loop detection to prevent repeated tool calls If the same tool with the same parameters is called more than 'steps' times, it will be detected
func WithMCPArgs ¶ added in v0.3.0
WithMCPArgs sets the arguments for the MCP prompts
func WithMCPs ¶ added in v0.2.0
func WithMCPs(sessions ...*mcp.ClientSession) func(o *Options)
WithMCPs adds Model Context Protocol client sessions for external tool integration. When specified, the tools available in the MCPs will be available to the cogito pipelines
func WithMaxAdjustmentAttempts ¶ added in v0.7.0
WithMaxAdjustmentAttempts sets the maximum number of adjustment attempts when using tool call callbacks This prevents infinite loops when the user provides adjustment feedback Default is 5 attempts
func WithMaxAttempts ¶
WithMaxAttempts sets the maximum number of execution attempts
func WithMaxRetries ¶ added in v0.4.0
WithMaxRetries sets the maximum number of retries for LLM calls
func WithMessagesManipulator ¶ added in v0.9.0
func WithMessagesManipulator(fn func([]openai.ChatCompletionMessage) []openai.ChatCompletionMessage) func(o *Options)
WithMessagesManipulator allows to manipulate the messages before they are sent to the LLM This is useful to add additional system messages or other context to the messages that needs to change during execution
func WithPrompt ¶
func WithPrompt(t prompt.PromptType, p prompt.StaticPrompt) func(o *Options)
WithPrompt allows to set a custom prompt for a given PromptType
func WithReasoningCallback ¶ added in v0.4.1
WithReasoningCallback sets a callback function to receive reasoning updates during execution
func WithReviewerLLM ¶ added in v0.8.0
WithReviewerLLM specifies a judge LLM for Planning with TODOs. When provided along with a plan, enables Planning with TODOs where the judge LLM reviews work after each iteration and decides whether goal execution is completed or needs rework.
func WithSinkState ¶ added in v0.6.0
func WithSinkState(tool ToolDefinitionInterface) func(o *Options)
func WithStartWithAction ¶ added in v0.7.0
func WithStartWithAction(tool ...*ToolChoice) func(o *Options)
WithStartWithAction sets the initial tool choice to start with
func WithStatusCallback ¶
WithStatusCallback sets a callback function to receive status updates during execution
func WithTODOPersistence ¶ added in v0.8.0
WithTODOPersistence enables file-based TODO persistence. TODOs will be saved to and loaded from the specified file path.
func WithTODOs ¶ added in v0.8.0
func WithTODOs(todoList *structures.TODOList) func(o *Options)
WithTODOs provides an in-memory TODO list for TODO-based iterative execution. If not provided, TODOs will be automatically generated from plan subtasks.
func WithToolCallBack ¶
func WithToolCallBack(fn func(*ToolChoice, *SessionState) ToolCallDecision) func(o *Options)
WithToolCallBack allows to set a callback to intercept and modify tool calls before execution The callback receives the proposed tool choice and session state, and returns a ToolCallDecision that can approve, reject, provide adjustment feedback, or directly modify the tool choice
func WithToolCallResultCallback ¶
func WithToolCallResultCallback(fn func(ToolStatus)) func(o *Options)
WithToolCallResultCallback runs the callback on every tool result
func WithTools ¶
func WithTools(tools ...ToolDefinitionInterface) func(o *Options)
WithTools allows to set the tools available to the Agent. Pass *ToolDefinition[T] instances - they will automatically generate openai.Tool via their Tool() method. Example: WithTools(&ToolDefinition[SearchArgs]{...}, &ToolDefinition[WeatherArgs]{...})
Types ¶
type Fragment ¶
type Fragment struct {
Messages []openai.ChatCompletionMessage
ParentFragment *Fragment
Status *Status
Multimedia []Multimedia
}
func ContentReview ¶
ContentReview refines an LLM response until for a fixed number of iterations or if the LLM doesn't find anymore gaps
func ExecutePlan ¶
func ExecutePlan(llm LLM, conv Fragment, plan *structures.Plan, goal *structures.Goal, opts ...Option) (Fragment, error)
ExecutePlan Executes an already-defined plan with a set of options. To override its prompt, configure PromptPlanExecutionType, PromptPlanType, PromptReEvaluatePlanType and PromptSubtaskExtractionType
func ExecuteTools ¶
ExecuteTools runs a fragment through an LLM, and executes Tools. It returns a new fragment with the tool result at the end The result is guaranteed that can be called afterwards with llm.Ask() to explain the result to the user.
func NewEmptyFragment ¶
func NewEmptyFragment() Fragment
func NewFragment ¶
func NewFragment(messages ...openai.ChatCompletionMessage) Fragment
func ToolReasoner ¶
ToolReasoner forces the LLM to reason about available tools in a fragment
func (Fragment) AddLastMessage ¶
func (Fragment) AddMessage ¶
func (r Fragment) AddMessage(role MessageRole, content string, mm ...Multimedia) Fragment
func (Fragment) AddStartMessage ¶
func (r Fragment) AddStartMessage(role MessageRole, content string, mm ...Multimedia) Fragment
func (Fragment) AddToolMessage ¶ added in v0.5.1
AddToolMessage adds a tool result message with the specified tool_call_id
func (Fragment) AllFragmentsStrings ¶
AllFragmentsStrings walks through all the fragment parents to retrieve all the conversations and represent that as a string This is particularly useful if chaining different fragments and want to still feed the conversation as a context to the LLM.
func (Fragment) ExtractStructure ¶
ExtractStructure extracts a structure from the result using the provided JSON schema definition and unmarshals it into the provided destination
func (Fragment) GetMessages ¶ added in v0.4.0
func (f Fragment) GetMessages() []openai.ChatCompletionMessage
Messages returns the chat completion messages from this fragment, automatically prepending a force-text-reply system message if tool calls are detected. This ensures LLMs provide natural language responses instead of JSON tool syntax when Ask() is called after ExecuteTools().
func (Fragment) LastAssistantAndToolMessages ¶ added in v0.3.0
func (f Fragment) LastAssistantAndToolMessages() []openai.ChatCompletionMessage
func (Fragment) LastMessage ¶
func (f Fragment) LastMessage() *openai.ChatCompletionMessage
type GuidelineMetadata ¶
type GuidelineMetadataList ¶
type GuidelineMetadataList []GuidelineMetadata
type Guidelines ¶
type Guidelines []Guideline
func GetRelevantGuidelines ¶
func GetRelevantGuidelines(llm LLM, guidelines Guidelines, fragment Fragment, opts ...Option) (Guidelines, error)
func (Guidelines) ToMetadata ¶
func (g Guidelines) ToMetadata() GuidelineMetadataList
type IntentionResponseMultiple ¶ added in v0.9.0
type IntentionResponseMultiple struct {
Tools []string `json:"tools"`
Reasoning string `json:"reasoning"`
}
IntentionResponseMultiple is used to extract multiple tool choices from the intention tool
type IntentionResponseSingle ¶ added in v0.9.0
type IntentionResponseSingle struct {
Tool string `json:"tool"`
Reasoning string `json:"reasoning"`
}
IntentionResponseSingle is used to extract a single tool choice from the intention tool
type MessageRole ¶ added in v0.9.0
type MessageRole string
const ( AssistantMessageRole MessageRole = "assistant" UserMessageRole MessageRole = "user" ToolMessageRole MessageRole = "tool" SystemMessageRole MessageRole = "system" )
func (MessageRole) String ¶ added in v0.9.0
func (m MessageRole) String() string
type OpenAIClient ¶
type OpenAIClient struct {
// contains filtered or unexported fields
}
func NewOpenAILLM ¶
func NewOpenAILLM(model, apiKey, baseURL string) *OpenAIClient
func (*OpenAIClient) Ask ¶
Ask prompts to the LLM with the provided messages and returns a Fragment containing the response. The Fragment.GetMessages() method automatically handles force-text-reply when tool calls are present in the conversation history.
func (*OpenAIClient) CreateChatCompletion ¶
func (llm *OpenAIClient) CreateChatCompletion(ctx context.Context, request openai.ChatCompletionRequest) (openai.ChatCompletionResponse, error)
type Option ¶
type Option func(*Options)
var ( // EnableDeepContext enables full context to the LLM when chaining conversations // It might yield to better results to the cost of bigger context use. EnableDeepContext Option = func(o *Options) { o.deepContext = true } // EnableToolReasoner enables the reasoning about the need to call other tools // before each tool call, preventing calling more tools than necessary. EnableToolReasoner Option = func(o *Options) { o.toolReasoner = true } // DisableSinkState disables the use of a sink state // when the LLM decides that no tool is needed DisableSinkState Option = func(o *Options) { o.sinkState = false } // EnableInfiniteExecution enables infinite, long-term execution on Plans EnableInfiniteExecution Option = func(o *Options) { o.infiniteExecution = true } // EnableStrictGuidelines enforces cogito to pick tools only from the guidelines EnableStrictGuidelines Option = func(o *Options) { o.strictGuidelines = true } // EnableAutoPlan enables cogito to automatically use planning if needed EnableAutoPlan Option = func(o *Options) { o.autoPlan = true } EnableAlwaysPickTools Option = func(o *Options) { o.alwaysPickTools = true } // EnableAutoPlanReEvaluator enables cogito to automatically re-evaluate the need to use planning EnableAutoPlanReEvaluator Option = func(o *Options) { o.planReEvaluator = true } // EnableMCPPrompts enables the use of MCP prompts EnableMCPPrompts Option = func(o *Options) { o.mcpPrompts = true } // EnableGuidedTools enables filtering tools through guidance using their descriptions. // When no guidelines exist, creates virtual guidelines for all tools using their descriptions. // When guidelines exist, creates virtual guidelines for tools not in any guideline. EnableGuidedTools Option = func(o *Options) { o.guidedTools = true } // EnableParallelToolExecution enables parallel execution of multiple tool calls. // When enabled, the LLM can select multiple tools and they will be executed concurrently. EnableParallelToolExecution Option = func(o *Options) { o.parallelToolExecution = true } )
type Options ¶
type Options struct {
// contains filtered or unexported fields
}
Options contains all configuration options for the Cogito agent It allows customization of behavior, tools, prompts, and execution parameters
type PlanStatus ¶ added in v0.3.2
type PlanStatus struct {
Plan structures.Plan
Tools []ToolStatus
}
type ReasoningResponse ¶ added in v0.9.1
type ReasoningResponse struct {
Reasoning string `json:"reasoning"`
}
ReasoningResponse is used to extract reasoning from the reasoning tool
type SessionState ¶ added in v0.7.0
type SessionState struct {
ToolChoice *ToolChoice `json:"tool_choice"`
Fragment Fragment `json:"fragment"`
}
type Status ¶
type Status struct {
Iterations int
ToolsCalled Tools
ToolResults []ToolStatus
Plans []PlanStatus
PastActions []ToolStatus // Track past actions for loop detections
ReasoningLog []string // Track reasoning for each iteration
TODOs *structures.TODOList // TODO tracking for iterative execution
TODOIteration int // Current TODO iteration
TODOPhase string // Current phase: "work" or "review"
}
type ToolCallDecision ¶ added in v0.7.0
type ToolCallDecision struct {
// Approved: true to proceed with the tool call, false to interrupt execution
Approved bool
// Adjustment: feedback string for the LLM to interpret and adjust the tool call
// Empty string means no adjustment needed. If provided, the LLM will re-evaluate
// the tool call based on this feedback.
Adjustment string
// Modified: directly modified tool choice that takes precedence over Adjustment
// If set, this tool choice is used directly without re-querying the LLM
// This allows programmatic modification of tool arguments
Modified *ToolChoice
// Skip: skip this tool call but continue execution (alternative to Approved: false)
// When true, the tool call is skipped and execution continues
Skip bool
}
ToolCallDecision represents the decision made by a tool call callback It allows the callback to approve, reject, provide adjustment feedback, or directly modify the tool choice
type ToolChoice ¶
type ToolDefinition ¶ added in v0.5.0
type ToolDefinition[T any] struct { ToolRunner Tool[T] InputArguments any Name, Description string }
func (*ToolDefinition[T]) Execute ¶ added in v0.5.0
Execute implements ToolDef.Execute by marshaling the arguments map to type T and calling ToolRunner.Run
func (ToolDefinition[T]) Tool ¶ added in v0.5.0
func (t ToolDefinition[T]) Tool() openai.Tool
type ToolDefinitionInterface ¶ added in v0.5.0
type ToolDefinitionInterface interface {
Tool() openai.Tool
// Execute runs the tool with the given arguments (as JSON map) and returns the result
Execute(args map[string]any) (string, any, error)
}
func NewToolDefinition ¶ added in v0.5.0
func NewToolDefinition[T any](toolRunner Tool[T], inputArguments any, name, description string) ToolDefinitionInterface
type ToolStatus ¶
type ToolStatus struct {
Executed bool
ToolArguments ToolChoice
Result string
Name string
ResultData any
}
type Tools ¶
type Tools []ToolDefinitionInterface
func (Tools) Definitions ¶
func (t Tools) Definitions() []*openai.FunctionDefinition
func (Tools) Find ¶
func (t Tools) Find(name string) ToolDefinitionInterface