Documentation
¶
Index ¶
- Variables
- func ExtractBoolean(llm LLM, f Fragment, opts ...Option) (*structures.Boolean, error)
- func ExtractGoal(llm LLM, f Fragment, opts ...Option) (*structures.Goal, error)
- func ExtractKnowledgeGaps(llm LLM, f Fragment, opts ...Option) ([]string, error)
- func ExtractPlan(llm LLM, f Fragment, goal *structures.Goal, opts ...Option) (*structures.Plan, error)
- func IsGoalAchieved(llm LLM, f Fragment, goal *structures.Goal, opts ...Option) (*structures.Boolean, error)
- func ReEvaluatePlan(llm LLM, f, subtaskFragment Fragment, goal *structures.Goal, ...) (*structures.Plan, error)
- func WithContext(ctx context.Context) func(o *Options)
- func WithFeedbackCallback(fn func() *Fragment) func(o *Options)
- func WithForceReasoning() func(o *Options)
- func WithGaps(gaps ...string) func(o *Options)
- func WithGuidelines(guidelines ...Guideline) func(o *Options)
- func WithIterations(i int) func(o *Options)
- func WithLoopDetection(steps int) func(o *Options)
- func WithMCPArgs(args map[string]string) func(o *Options)
- func WithMCPs(sessions ...*mcp.ClientSession) func(o *Options)
- func WithMaxAttempts(i int) func(o *Options)
- func WithMaxRetries(retries int) func(o *Options)
- func WithPrompt(t prompt.PromptType, p prompt.StaticPrompt) func(o *Options)
- func WithReasoningCallback(fn func(string)) func(o *Options)
- func WithSinkState(tool ToolDefinitionInterface) func(o *Options)
- func WithStatusCallback(fn func(string)) func(o *Options)
- func WithToolCallBack(fn func(*ToolChoice) bool) func(o *Options)
- func WithToolCallResultCallback(fn func(ToolStatus)) func(o *Options)
- func WithTools(tools ...ToolDefinitionInterface) func(o *Options)
- type Fragment
- func ContentReview(llm LLM, originalFragment Fragment, opts ...Option) (Fragment, error)
- func ExecutePlan(llm LLM, conv Fragment, plan *structures.Plan, goal *structures.Goal, ...) (Fragment, error)
- func ExecuteTools(llm LLM, f Fragment, opts ...Option) (Fragment, error)
- func NewEmptyFragment() Fragment
- func NewFragment(messages ...openai.ChatCompletionMessage) Fragment
- func ToolReasoner(llm LLM, f Fragment, opts ...Option) (Fragment, error)
- func (f Fragment) AddLastMessage(f2 Fragment) Fragment
- func (r Fragment) AddMessage(role, content string, mm ...Multimedia) Fragment
- func (r Fragment) AddStartMessage(role, content string, mm ...Multimedia) Fragment
- func (r Fragment) AddToolMessage(content, toolCallID string) Fragment
- func (f Fragment) AllFragmentsStrings() string
- func (r Fragment) ExtractStructure(ctx context.Context, llm LLM, s structures.Structure) error
- func (f Fragment) GetMessages() []openai.ChatCompletionMessage
- func (f Fragment) LastAssistantAndToolMessages() []openai.ChatCompletionMessage
- func (f Fragment) LastMessage() *openai.ChatCompletionMessage
- func (f Fragment) SelectTool(ctx context.Context, llm LLM, availableTools Tools, forceTool string) (Fragment, *ToolChoice, error)
- func (f Fragment) String() string
- type Guideline
- type GuidelineMetadata
- type GuidelineMetadataList
- type Guidelines
- type IntentionResponse
- type LLM
- type Multimedia
- type OpenAIClient
- type Option
- type Options
- type PlanStatus
- type Status
- type Tool
- type ToolChoice
- type ToolDefinition
- type ToolDefinitionInterface
- type ToolStatus
- type Tools
Constants ¶
This section is empty.
Variables ¶
var ( ErrNoToolSelected error = errors.New("no tool selected by the LLM") ErrLoopDetected error = errors.New("loop detected: same tool called repeatedly with same parameters") )
var (
ErrGoalNotAchieved error = errors.New("goal not achieved")
)
Functions ¶
func ExtractBoolean ¶
ExtractBoolean extracts a boolean from a conversation
func ExtractGoal ¶
ExtractGoal extracts a goal from a conversation
func ExtractKnowledgeGaps ¶
func ExtractPlan ¶
func ExtractPlan(llm LLM, f Fragment, goal *structures.Goal, opts ...Option) (*structures.Plan, error)
ExtractPlan extracts a plan from a conversation To override the prompt, define a PromptPlanType, PromptReEvaluatePlanType and PromptSubtaskExtractionType
func IsGoalAchieved ¶
func IsGoalAchieved(llm LLM, f Fragment, goal *structures.Goal, opts ...Option) (*structures.Boolean, error)
IsGoalAchieved checks if a goal has been achieved
func ReEvaluatePlan ¶
func ReEvaluatePlan(llm LLM, f, subtaskFragment Fragment, goal *structures.Goal, toolStatuses []ToolStatus, subtask string, opts ...Option) (*structures.Plan, error)
ExtractPlan extracts a plan from a conversation to override the prompt, define a PromptReEvaluatePlanType and PromptSubtaskExtractionType
func WithContext ¶
WithContext sets the execution context for the agent
func WithFeedbackCallback ¶
WithFeedbackCallback sets a callback to get continous feedback during execution of plans
func WithForceReasoning ¶ added in v0.4.0
func WithForceReasoning() func(o *Options)
WithForceReasoning enables forcing the LLM to reason before selecting tools
func WithGuidelines ¶
WithGuidelines adds behavioral guidelines for the agent to follow. The guildelines allows a more curated selection of the tool to use and only relevant are shown to the LLM during tool selection.
func WithIterations ¶
WithIterations allows to set the number of refinement iterations
func WithLoopDetection ¶ added in v0.4.0
WithLoopDetection enables loop detection to prevent repeated tool calls If the same tool with the same parameters is called more than 'steps' times, it will be detected
func WithMCPArgs ¶ added in v0.3.0
WithMCPArgs sets the arguments for the MCP prompts
func WithMCPs ¶ added in v0.2.0
func WithMCPs(sessions ...*mcp.ClientSession) func(o *Options)
WithMCPs adds Model Context Protocol client sessions for external tool integration. When specified, the tools available in the MCPs will be available to the cogito pipelines
func WithMaxAttempts ¶
WithMaxAttempts sets the maximum number of execution attempts
func WithMaxRetries ¶ added in v0.4.0
WithMaxRetries sets the maximum number of retries for LLM calls
func WithPrompt ¶
func WithPrompt(t prompt.PromptType, p prompt.StaticPrompt) func(o *Options)
WithPrompt allows to set a custom prompt for a given PromptType
func WithReasoningCallback ¶ added in v0.4.1
WithReasoningCallback sets a callback function to receive reasoning updates during execution
func WithSinkState ¶ added in v0.6.0
func WithSinkState(tool ToolDefinitionInterface) func(o *Options)
func WithStatusCallback ¶
WithStatusCallback sets a callback function to receive status updates during execution
func WithToolCallBack ¶
func WithToolCallBack(fn func(*ToolChoice) bool) func(o *Options)
WithToolCallBack allows to set a callback to prompt the user if running the tool or not
func WithToolCallResultCallback ¶
func WithToolCallResultCallback(fn func(ToolStatus)) func(o *Options)
WithToolCallResultCallback runs the callback on every tool result
func WithTools ¶
func WithTools(tools ...ToolDefinitionInterface) func(o *Options)
WithTools allows to set the tools available to the Agent. Pass *ToolDefinition[T] instances - they will automatically generate openai.Tool via their Tool() method. Example: WithTools(&ToolDefinition[SearchArgs]{...}, &ToolDefinition[WeatherArgs]{...})
Types ¶
type Fragment ¶
type Fragment struct {
Messages []openai.ChatCompletionMessage
ParentFragment *Fragment
Status *Status
Multimedia []Multimedia
}
func ContentReview ¶
ContentReview refines an LLM response until for a fixed number of iterations or if the LLM doesn't find anymore gaps
func ExecutePlan ¶
func ExecutePlan(llm LLM, conv Fragment, plan *structures.Plan, goal *structures.Goal, opts ...Option) (Fragment, error)
ExecutePlan Executes an already-defined plan with a set of options. To override its prompt, configure PromptPlanExecutionType, PromptPlanType, PromptReEvaluatePlanType and PromptSubtaskExtractionType
func ExecuteTools ¶
ExecuteTools runs a fragment through an LLM, and executes Tools. It returns a new fragment with the tool result at the end The result is guaranteed that can be called afterwards with llm.Ask() to explain the result to the user.
func NewEmptyFragment ¶
func NewEmptyFragment() Fragment
func NewFragment ¶
func NewFragment(messages ...openai.ChatCompletionMessage) Fragment
func ToolReasoner ¶
ToolReasoner forces the LLM to reason about available tools in a fragment
func (Fragment) AddLastMessage ¶
func (Fragment) AddMessage ¶
func (r Fragment) AddMessage(role, content string, mm ...Multimedia) Fragment
func (Fragment) AddStartMessage ¶
func (r Fragment) AddStartMessage(role, content string, mm ...Multimedia) Fragment
func (Fragment) AddToolMessage ¶ added in v0.5.1
AddToolMessage adds a tool result message with the specified tool_call_id
func (Fragment) AllFragmentsStrings ¶
AllFragmentsStrings walks through all the fragment parents to retrieve all the conversations and represent that as a string This is particularly useful if chaining different fragments and want to still feed the conversation as a context to the LLM.
func (Fragment) ExtractStructure ¶
ExtractStructure extracts a structure from the result using the provided JSON schema definition and unmarshals it into the provided destination
func (Fragment) GetMessages ¶ added in v0.4.0
func (f Fragment) GetMessages() []openai.ChatCompletionMessage
Messages returns the chat completion messages from this fragment, automatically prepending a force-text-reply system message if tool calls are detected. This ensures LLMs provide natural language responses instead of JSON tool syntax when Ask() is called after ExecuteTools().
func (Fragment) LastAssistantAndToolMessages ¶ added in v0.3.0
func (f Fragment) LastAssistantAndToolMessages() []openai.ChatCompletionMessage
func (Fragment) LastMessage ¶
func (f Fragment) LastMessage() *openai.ChatCompletionMessage
type GuidelineMetadata ¶
type GuidelineMetadataList ¶
type GuidelineMetadataList []GuidelineMetadata
type Guidelines ¶
type Guidelines []Guideline
func GetRelevantGuidelines ¶
func GetRelevantGuidelines(llm LLM, guidelines Guidelines, fragment Fragment, opts ...Option) (Guidelines, error)
func (Guidelines) ToMetadata ¶
func (g Guidelines) ToMetadata() GuidelineMetadataList
type IntentionResponse ¶ added in v0.4.0
IntentionResponse is used to extract the tool choice from the intention tool
type OpenAIClient ¶
type OpenAIClient struct {
// contains filtered or unexported fields
}
func NewOpenAILLM ¶
func NewOpenAILLM(model, apiKey, baseURL string) *OpenAIClient
func (*OpenAIClient) Ask ¶
Ask prompts to the LLM with the provided messages and returns a Fragment containing the response. The Fragment.GetMessages() method automatically handles force-text-reply when tool calls are present in the conversation history.
func (*OpenAIClient) CreateChatCompletion ¶
func (llm *OpenAIClient) CreateChatCompletion(ctx context.Context, request openai.ChatCompletionRequest) (openai.ChatCompletionResponse, error)
type Option ¶
type Option func(*Options)
var ( // EnableDeepContext enables full context to the LLM when chaining conversations // It might yield to better results to the cost of bigger context use. EnableDeepContext Option = func(o *Options) { o.deepContext = true } // EnableToolReasoner enables the reasoning about the need to call other tools // before each tool call, preventing calling more tools than necessary. EnableToolReasoner Option = func(o *Options) { o.toolReasoner = true } // DisableToolReEvaluator disables the re-evaluation of the need to call other tools // after each tool call. It might yield to better results to the cost of more // LLM calls. DisableToolReEvaluator Option = func(o *Options) { o.toolReEvaluator = false } // DisableSinkState disables the use of a sink state // when the LLM decides that no tool is needed DisableSinkState Option = func(o *Options) { o.sinkState = false } // EnableInfiniteExecution enables infinite, long-term execution on Plans EnableInfiniteExecution Option = func(o *Options) { o.infiniteExecution = true } // EnableStrictGuidelines enforces cogito to pick tools only from the guidelines EnableStrictGuidelines Option = func(o *Options) { o.strictGuidelines = true } // EnableAutoPlan enables cogito to automatically use planning if needed EnableAutoPlan Option = func(o *Options) { o.autoPlan = true } // EnableAutoPlanReEvaluator enables cogito to automatically re-evaluate the need to use planning EnableAutoPlanReEvaluator Option = func(o *Options) { o.planReEvaluator = true } // EnableMCPPrompts enables the use of MCP prompts EnableMCPPrompts Option = func(o *Options) { o.mcpPrompts = true } )
type Options ¶
type Options struct {
// contains filtered or unexported fields
}
Options contains all configuration options for the Cogito agent It allows customization of behavior, tools, prompts, and execution parameters
type PlanStatus ¶ added in v0.3.2
type PlanStatus struct {
Plan structures.Plan
Tools []ToolStatus
}
type Status ¶
type Status struct {
Iterations int
ToolsCalled Tools
ToolResults []ToolStatus
Plans []PlanStatus
PastActions []ToolStatus // Track past actions for loop detections
ReasoningLog []string // Track reasoning for each iteration
}
type ToolChoice ¶
func ToolReEvaluator ¶ added in v0.4.0
func ToolReEvaluator(llm LLM, f Fragment, previousTool ToolStatus, tools Tools, guidelines Guidelines, opts ...Option) (*ToolChoice, string, error)
ToolReEvaluator evaluates the conversation after a tool execution and determines next steps Calls pickAction/toolSelection with reEvaluationTemplate and the conversation that already has tool results
type ToolDefinition ¶ added in v0.5.0
type ToolDefinition[T any] struct { ToolRunner Tool[T] InputArguments any Name, Description string }
func (*ToolDefinition[T]) Execute ¶ added in v0.5.0
func (t *ToolDefinition[T]) Execute(args map[string]any) (string, error)
Execute implements ToolDef.Execute by marshaling the arguments map to type T and calling ToolRunner.Run
func (ToolDefinition[T]) Tool ¶ added in v0.5.0
func (t ToolDefinition[T]) Tool() openai.Tool
type ToolDefinitionInterface ¶ added in v0.5.0
type ToolDefinitionInterface interface {
Tool() openai.Tool
// Execute runs the tool with the given arguments (as JSON map) and returns the result
Execute(args map[string]any) (string, error)
}
func NewToolDefinition ¶ added in v0.5.0
func NewToolDefinition[T any](toolRunner Tool[T], inputArguments any, name, description string) ToolDefinitionInterface
type ToolStatus ¶
type ToolStatus struct {
Executed bool
ToolArguments ToolChoice
Result string
Name string
}
type Tools ¶
type Tools []ToolDefinitionInterface
func (Tools) Definitions ¶
func (t Tools) Definitions() []*openai.FunctionDefinition
func (Tools) Find ¶
func (t Tools) Find(name string) ToolDefinitionInterface