Documentation
¶
Index ¶
- Constants
- Variables
- func CleanupJSONResponse(response string) string
- func NewAnthropicProvider(apiKey, model, apiURL string) Provider
- func NewClaudeCodeProvider(model string) Provider
- func NewClientWithModel(model string, options ...middleware.Option) (Client, error)
- func NewGeminiProvider(apiKey, model, apiURL string) Provider
- func NewOpenAIProvider(apiKey, model, apiURL string) Provider
- func NewProvider(conn *Connection) (Provider, error)
- func UnmarshalWithCleanup(data string, v interface{}) error
- type JSONSchema
- type LLMAgent
- func (la *LLMAgent) Close() error
- func (la *LLMAgent) ExecuteBatch(ctx context.Context, requests []ai.PromptRequest) (map[string]*ai.PromptResponse, error)
- func (la *LLMAgent) ExecutePrompt(ctx context.Context, request ai.PromptRequest) (*ai.PromptResponse, error)
- func (la *LLMAgent) GetConfig() ai.AgentConfig
- func (la *LLMAgent) GetCosts() ai.Costs
- func (la *LLMAgent) GetType() ai.AgentType
- func (la *LLMAgent) ListModels(ctx context.Context) ([]ai.Model, error)
- func (la LLMAgent) Pretty() api.Text
- func (la LLMAgent) String() string
Constants ¶
const AgentTypeLLM ai.AgentType = "llm"
Variables ¶
var ( // ErrConnectionNotFound is returned when a named connection doesn't exist in the registry. ErrConnectionNotFound = errors.New("connection not found") // ErrMissingAPIKey is returned when a connection has no API key (password field). ErrMissingAPIKey = errors.New("connection missing API key") // ErrInvalidProvider is returned when the connection type doesn't map to a known provider. ErrInvalidProvider = errors.New("invalid or unknown provider type") // ErrMissingPrompt is returned when Execute() is called without a prompt. ErrMissingPrompt = errors.New("prompt is required") // ErrMissingConnection is returned when Execute() is called without a connection name. ErrMissingConnection = errors.New("connection name is required") // ErrTimeout is returned when a request exceeds the configured timeout. ErrTimeout = errors.New("request timeout exceeded") // ErrSchemaValidation is returned when structured output doesn't match the schema. ErrSchemaValidation = errors.New("response failed schema validation") // ErrInvalidMaxTokens is returned when max tokens is <= 0. ErrInvalidMaxTokens = errors.New("max tokens must be greater than 0") // ErrInvalidTimeout is returned when timeout is <= 0. ErrInvalidTimeout = errors.New("timeout must be greater than 0") // ErrCLINotFound is returned when the claude-code CLI executable is not found in PATH. ErrCLINotFound = errors.New("claude-code CLI not found in PATH") // ErrCLIExecutionFailed is returned when the CLI process exits with a non-zero status. ErrCLIExecutionFailed = errors.New("claude-code CLI execution failed") )
Functions ¶
func CleanupJSONResponse ¶
CleanupJSONResponse attempts to extract and clean JSON from LLM responses that may contain markdown formatting, explanatory text, or other noise.
It tries the following strategies in order: 1. Validate if already valid JSON 2. Extract JSON from markdown code blocks (```json or ```) 3. Extract the first JSON object {...} 4. Extract the first JSON array [...] 5. Return the trimmed original string
After extraction, it validates that the result is valid JSON.
func NewAnthropicProvider ¶
func NewAnthropicProvider(apiKey, model, apiURL string) Provider
NewAnthropicProvider creates a new Anthropic provider with the specified configuration.
func NewClaudeCodeProvider ¶
func NewClaudeCodeProvider(model string) Provider
NewClaudeCodeProvider creates a new Claude Code provider with the specified model.
func NewClientWithModel ¶
func NewClientWithModel(model string, options ...middleware.Option) (Client, error)
NewClientWithModel creates a new LLM client for the specified model. The client automatically infers the provider from the model name and looks up the API key from environment variables:
- OpenAI models: OPENAI_API_KEY
- Anthropic models: ANTHROPIC_API_KEY
- Gemini models: GEMINI_API_KEY or GOOGLE_API_KEY
Example usage:
client := llm.NewClientWithModel("gpt-4o")
resp, err := client.NewRequest().
WithPrompt("Hello world").
Execute(ctx)
func NewGeminiProvider ¶
func NewGeminiProvider(apiKey, model, apiURL string) Provider
NewGeminiProvider creates a new Gemini provider with the specified configuration.
func NewOpenAIProvider ¶
func NewOpenAIProvider(apiKey, model, apiURL string) Provider
NewOpenAIProvider creates a new OpenAI provider with the specified configuration.
func NewProvider ¶
func NewProvider(conn *Connection) (Provider, error)
NewProvider creates a provider from a connection configuration.
func UnmarshalWithCleanup ¶
UnmarshalWithCleanup attempts to unmarshal JSON with automatic cleanup
Types ¶
type JSONSchema ¶
type JSONSchema struct {
Type string `json:"type,omitempty"`
Properties map[string]JSONSchema `json:"properties,omitempty"`
Required []string `json:"required,omitempty"`
Items *JSONSchema `json:"items,omitempty"`
Description string `json:"description,omitempty"`
Enum []interface{} `json:"enum,omitempty"`
AdditionalProperties bool `json:"additionalProperties,omitempty"`
}
JSONSchema represents a JSON Schema definition
type LLMAgent ¶
type LLMAgent struct {
// contains filtered or unexported fields
}
LLMAgent implements the Agent interface using the commons-db LLM client. It supports all LLM backends (OpenAI, Anthropic, Gemini, Claude Code CLI).
func NewLLMAgent ¶
func NewLLMAgent(config ai.AgentConfig) (*LLMAgent, error)
NewLLMAgent creates a new LLM agent with the specified configuration.
The agent can be configured to use any LLM backend by specifying the ai.Model name:
- OpenAI: gpt-4o, gpt-4-turbo, gpt-3.5-turbo
- Anthropic: claude-3-opus, claude-3.5-sonnet, claude-3.5-haiku
- Gemini: gemini-2.5-pro, gemini-2.0-flash, gemini-1.5-pro
- Claude Code CLI: claude-code-sonnet, claude-code-opus, claude-code-haiku
func (*LLMAgent) ExecuteBatch ¶
func (la *LLMAgent) ExecuteBatch(ctx context.Context, requests []ai.PromptRequest) (map[string]*ai.PromptResponse, error)
ExecuteBatch processes multiple prompts concurrently.
func (*LLMAgent) ExecutePrompt ¶
func (la *LLMAgent) ExecutePrompt(ctx context.Context, request ai.PromptRequest) (*ai.PromptResponse, error)
ExecutePrompt processes a single prompt using the LLM client.
func (*LLMAgent) GetConfig ¶
func (la *LLMAgent) GetConfig() ai.AgentConfig
GetConfig returns the agent configuration.
func (*LLMAgent) ListModels ¶
Listai.Models returns available ai.Models for all LLM backends.