Documentation
¶
Index ¶
Constants ¶
This section is empty.
Variables ¶
View Source
var ErrAPIKeyNotSet = errors.New("GEMINI_API_KEY not set")
Functions ¶
func CleanUpPromptFiles ¶ added in v0.36.0
func CleanUpPromptFiles(dir string)
CleanUpPromptFiles removes agent config files and known output files from the given directory to avoid influencing the LLM.
Types ¶
type AgenticCallOptions ¶ added in v0.38.0
type AgenticCallOptions struct {
Model string // e.g. "gemini-2.5-flash"
Provider string // "google", "anthropic", "openai"
APIKey string
// Tools selects specific exploration tools. When non-nil, takes precedence
// over ToolSet. submit_answer is always included regardless.
Tools []AgenticTool
// ToolSet selects a preset collection of tools. Used when Tools is nil.
// The zero value (DefaultTooling) includes all exploration tools.
ToolSet ToolSet
// SystemPrompt overrides the intro portion of the system prompt. The
// AVAILABLE TOOLS section is always auto-appended. When empty, a default
// intro is used.
SystemPrompt string
}
AgenticCallOptions contains configuration for the agentic LLM call.
type AgenticClient ¶ added in v0.38.0
type AgenticClient interface {
CallLLM(ctx context.Context, questions []string, repositoryPath string) ([]AnswerSchema, error)
}
AgenticClient is an interface for agentic LLM interactions
func NewAgenticClient ¶ added in v0.38.0
func NewAgenticClient(opts *AgenticCallOptions) (AgenticClient, error)
NewAgenticClient creates a new AgenticClient with the given options
type AgenticTool ¶ added in v0.38.0
type AgenticTool string
AgenticTool identifies an exploration tool available to the agent.
const ( ToolReadFile AgenticTool = "read_file" ToolListDirectory AgenticTool = "list_directory" ToolGrep AgenticTool = "grep" ToolGit AgenticTool = "git" )
type AnswerSchema ¶ added in v0.38.0
type AnswerSchema struct {
Question string `json:"question"`
Answer string `json:"answer"`
ShortAnswer bool `json:"short_answer"`
Files []string `json:"files,omitempty"`
CodeSnippet string `json:"code_snippet,omitempty"`
// Error is set when the agent failed to answer this question
// (e.g. budget exhausted). Consumers should skip errored answers.
Error string `json:"error,omitempty"`
}
AnswerSchema represents the structured response from the agentic client.
type CallLLMOptions ¶ added in v0.36.0
type CallLLMOptions struct {
Model string // e.g. "gemini-2.5-flash", empty = CLI default
}
type GeminiClient ¶
type GeminiClient struct{}
func NewGeminiClient ¶
func NewGeminiClient() *GeminiClient
func (*GeminiClient) CallLLM ¶
func (g *GeminiClient) CallLLM(prompt, repositoryPath string, opts *CallLLMOptions) error
func (*GeminiClient) CanUseLLM ¶ added in v0.36.0
func (g *GeminiClient) CanUseLLM() error
type LLMClient ¶
type LLMClient interface {
CanUseLLM() error
CallLLM(prompt, repositoryPath string, opts *CallLLMOptions) error
}
type MockAgenticClient ¶ added in v0.38.0
type MockAgenticClient struct {
// contains filtered or unexported fields
}
MockAgenticClient implements AgenticClient for testing.
func NewMockAgenticClient ¶ added in v0.38.0
func NewMockAgenticClient(answers []AnswerSchema) *MockAgenticClient
func (*MockAgenticClient) CallLLM ¶ added in v0.38.0
func (m *MockAgenticClient) CallLLM(ctx context.Context, questions []string, repoPath string) ([]AnswerSchema, error)
type MockLLMClient ¶
type MockLLMClient struct {
// contains filtered or unexported fields
}
func NewMockLLMClient ¶
func NewMockLLMClient() *MockLLMClient
func (*MockLLMClient) CallLLM ¶
func (m *MockLLMClient) CallLLM(prompt, repositoryPath string, opts *CallLLMOptions) error
func (*MockLLMClient) CanUseLLM ¶ added in v0.36.0
func (m *MockLLMClient) CanUseLLM() error
func (*MockLLMClient) WithResponses ¶
func (m *MockLLMClient) WithResponses(responses []MockResponse) *MockLLMClient
type MockResponse ¶
Click to show internal directories.
Click to hide internal directories.