interfaces

package
v0.1.7 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 30, 2026 License: Apache-2.0 Imports: 4 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type AgentToolApprovalPolicy

type AgentToolApprovalPolicy interface {
	RequiresApproval(tool Tool) bool
}

AgentToolApprovalPolicy determines if a tool execution requires approval. Implement for custom behavior. Built-in policies: agent.RequireAllToolApprovalPolicy (default), agent.AutoToolApprovalPolicy(), agent.AllowlistToolApprovalPolicy(agent.AllowlistToolApprovalConfig{...}) (may error on invalid sub-agent names).

type Conversation

type Conversation interface {
	// AddMessage adds a message to the conversation identified by id. Id is passed at runtime (e.g. from Run input, workflow).
	AddMessage(ctx context.Context, id string, msg Message) error

	// ListMessages returns messages for the conversation identified by id.
	ListMessages(ctx context.Context, id string, opts ...ListMessagesOption) ([]Message, error)

	// Clear removes all messages for the conversation identified by id. Called by the user when ending a session.
	Clear(ctx context.Context, id string) error

	// IsDistributed returns true if the implementation uses distributed storage (Redis, Postgres, etc.).
	// In-memory implementations return false. Use distributed implementations when using remote workers.
	IsDistributed() bool
}

type JSONSchema

type JSONSchema = types.JSONSchema

JSONSchema is a loose JSON Schema object for tool parameters (canonical definition in types.JSONSchema).

type LLMClient

type LLMClient interface {
	// Generate generates a response from the LLM.
	Generate(ctx context.Context, request *LLMRequest) (*LLMResponse, error)
	// GenerateStream generates a response from the LLM using streaming.
	GenerateStream(ctx context.Context, request *LLMRequest) (LLMStream, error)
	// GetModel returns the model name.
	GetModel() string
	// GetProvider returns the provider name.
	GetProvider() LLMProvider
	// IsStreamSupported returns true if the client supports streaming (e.g. OpenAI, Anthropic).
	IsStreamSupported() bool
}

type LLMProvider

type LLMProvider string
const (
	LLMProviderOpenAI    LLMProvider = "openai"
	LLMProviderAnthropic LLMProvider = "anthropic"
	LLMProviderGemini    LLMProvider = "gemini"
)

type LLMReasoning added in v0.0.10

type LLMReasoning = types.LLMReasoning

LLMReasoning configures reasoning/thinking in a provider-agnostic way (canonical definition in types.LLMReasoning).

type LLMRequest

type LLMRequest struct {
	SystemMessage  string
	ResponseFormat *ResponseFormat
	Tools          []ToolSpec // Tool specs for the LLM to choose from
	// Messages is the conversation history. For first turn, use one user message.
	// For continuation after tool use: append assistant (with ToolCalls) + tool result messages.
	Messages []Message

	// Sampling (per-request; typically set from agent config). nil/0 = provider default.
	Temperature *float64 // 0-2 OpenAI, 0-1 Anthropic; also Gemini
	MaxTokens   int      // 0 = provider default
	TopP        *float64 // 0-1; OpenAI and Gemini (Anthropic client does not set TopP)
	TopK        *int     // Anthropic only

	// Reasoning configures generic reasoning/thinking when non-nil; each LLM client maps fields to its API.
	Reasoning *LLMReasoning
}

type LLMResponse

type LLMResponse struct {
	Content  string
	Metadata map[string]any
	// Usage is set when the provider returns token usage for this completion (non-stream and stream).
	Usage *LLMUsage
	// ToolCalls contains any tool invocations the LLM chose; empty when none.
	ToolCalls []*ToolCall
}

type LLMStream

type LLMStream interface {
	Next() bool
	Current() *LLMStreamChunk
	Err() error
	// GetResult returns the accumulated content and tool calls after streaming completes.
	// Call after the Next loop; returns nil if streaming failed or was not completed.
	GetResult() *LLMResponse
}

LLMStream yields partial content and optional thinking/tool-call chunks from a streaming LLM response.

type LLMStreamChunk

type LLMStreamChunk struct {
	ContentDelta  string      // partial text content
	ThinkingDelta string      // Anthropic extended thinking (optional)
	ToolCalls     []*ToolCall // set on final chunk when tool calls are present
}

LLMStreamChunk is a single chunk from a streaming LLM response.

type LLMUsage added in v0.0.10

type LLMUsage = types.LLMUsage

LLMUsage reports token counts from the provider (canonical definition in types.LLMUsage).

type ListMessagesOption

type ListMessagesOption func(*ListMessagesOptions)

func WithLimit

func WithLimit(limit int) ListMessagesOption

WithLimit sets the maximum number of messages to retrieve

func WithOffset

func WithOffset(offset int) ListMessagesOption

WithOffset sets the number of messages to skip

func WithRoles

func WithRoles(roles ...MessageRole) ListMessagesOption

WithRoles filters messages by role

type ListMessagesOptions

type ListMessagesOptions struct {
	// Limit is the maximum number of messages to retrieve from recent. -1 = all.
	Limit int

	// Offset is the number of most recent messages to skip. -1 = 0 (default).
	Offset int

	// Roles filters messages by role
	Roles []MessageRole
}

type MCPClient added in v0.1.2

type MCPClient interface {
	// Name identifies this connection for logging and tool prefixes (e.g. [github.com/agenticenv/agent-sdk-go/pkg/agent.MCPServers] key or [github.com/agenticenv/agent-sdk-go/pkg/mcp/client.NewClient] first argument).
	Name() string
	// Ping checks that the server responds (MCP ping on a short-lived session). The default
	// implementation connects, pings, and disconnects; ListTools and CallTool each open their own session.
	Ping(ctx context.Context) error
	// ListTools returns tool definitions from the server (tools/list).
	ListTools(ctx context.Context) ([]ToolSpec, error)
	// CallTool invokes a tool by name with JSON arguments (tools/call).
	CallTool(ctx context.Context, tool string, input json.RawMessage) (json.RawMessage, error)
	// Close releases the connection or session.
	Close() error
}

MCPClient is a client to one MCP server: optional reachability check, tools, optional close. Implementations may wrap modelcontextprotocol/go-sdk or other transports.

type MCPPromptClient added in v0.1.2

type MCPPromptClient interface {
	MCPClient
	// ListPrompts returns available prompts (prompts/list).
	ListPrompts(ctx context.Context) ([]PromptSpec, error)
	// GetPrompt resolves a prompt template with arguments (prompts/get).
	GetPrompt(ctx context.Context, name string, args map[string]string) (json.RawMessage, error)
}

MCPPromptClient extends MCPClient with MCP prompts. Optional: tool-only agents need only MCPClient.

type MCPResourceClient added in v0.1.2

type MCPResourceClient interface {
	MCPClient
	// ListResources returns available resources (resources/list).
	ListResources(ctx context.Context) ([]ResourceSpec, error)
	// ReadResource returns the resource body for uri (resources/read).
	ReadResource(ctx context.Context, uri string) (json.RawMessage, error)
}

MCPResourceClient extends MCPClient with MCP resources. Optional: tool-only agents need only MCPClient.

type Message

type Message struct {
	Role    MessageRole `json:"role"`
	Content string      `json:"content"`

	ToolName   string      `json:"tool_name"`
	ToolCallID string      `json:"tool_call_id"`
	ToolCalls  []*ToolCall `json:"tool_calls"`

	Metadata  map[string]any `json:"metadata"`
	CreatedAt time.Time      `json:"created_at"`
}

Message represents a conversation turn for multi-turn (including tool use).

type MessageRole

type MessageRole string
const (
	MessageRoleSystem    MessageRole = "system"
	MessageRoleUser      MessageRole = "user"
	MessageRoleAssistant MessageRole = "assistant"
	MessageRoleTool      MessageRole = "tool"
	MessageRoleReasoning MessageRole = "reasoning"
)

type PromptSpec added in v0.1.2

type PromptSpec struct {
	Name        string `json:"name"`                  // Prompt identifier.
	Description string `json:"description,omitempty"` // Short description.
}

PromptSpec is one entry from prompts/list (subset of MCP; fields may grow with spec versions).

type ResourceSpec added in v0.1.2

type ResourceSpec struct {
	URI         string `json:"uri"`                   // Resource URI.
	Name        string `json:"name,omitempty"`        // Human-readable name.
	Description string `json:"description,omitempty"` // Short description.
	MimeType    string `json:"mimeType,omitempty"`    // Optional MIME type hint.
}

ResourceSpec is one entry from resources/list (subset of MCP; fields may grow with spec versions).

type ResponseFormat

type ResponseFormat struct {
	Type   ResponseFormatType
	Name   string
	Schema JSONSchema
}

type ResponseFormatType

type ResponseFormatType string
const (
	ResponseFormatJSON ResponseFormatType = "json"
	ResponseFormatText ResponseFormatType = "text"
)

type Tool

type Tool interface {
	// Name returns the tool identifier (e.g. "search", "calculator"). Used by the LLM in tool calls.
	Name() string

	// DisplayName returns the tool display name (e.g. "Search", "Calculator"). Used by the LLM in tool calls.
	DisplayName() string

	// Description describes when and how to use this tool. Shown to the LLM for tool selection.
	Description() string

	// Parameters returns the JSON schema for the tool's input. The LLM produces args matching this schema.
	// Use tools.Params with tools.ParamString, ParamInteger, etc. for type-safe construction.
	Parameters() JSONSchema

	// Execute runs the tool with the given args. Args match the Parameters schema.
	// Called by the agent when the LLM returns a tool call for this tool.
	Execute(ctx context.Context, args map[string]any) (any, error)
}

Tool is a callable capability the agent can offer to the LLM. Register tools via agent.WithTools. The LLM receives tool definitions and chooses which to call; the agent executes the chosen tool.

type ToolApproval

type ToolApproval interface {
	ApprovalRequired() bool
}

ToolApproval is an optional interface for tools that require interactive human approval before execution. When implemented, the agent honors ApprovalRequired() when no agent-level approval policy is set. WithToolApprovalPolicy overrides this tool-level default when set.

type ToolAuthorizationDecision added in v0.1.3

type ToolAuthorizationDecision struct {
	Allow  bool   `json:"allow"`
	Reason string `json:"reason,omitempty"`
}

ToolAuthorizationDecision is the structured authorization outcome for one tool call. Reason is optional and primarily useful when Allow is false.

type ToolAuthorizer added in v0.1.3

type ToolAuthorizer interface {
	Authorize(ctx context.Context, args map[string]any) (ToolAuthorizationDecision, error)
}

ToolAuthorizer is an optional interface for tools that enforce programmatic authorization. When implemented, the agent checks Authorize before approval/Execute in the tool call flow. Return a decision with Allow=true/false and optional deny metadata.

type ToolCall

type ToolCall struct {
	ToolCallID string         `json:"tool_call_id"` // from API; needed to match tool results
	ToolName   string         `json:"tool_name"`
	Args       map[string]any `json:"args"`
}

ToolCall is the LLM's decision to invoke a tool.

type ToolRegistry

type ToolRegistry interface {
	// Register adds a tool. Overwrites if a tool with the same name exists.
	Register(tool Tool)

	// Get returns the tool by name, or (nil, false) if not found.
	Get(name string) (Tool, bool)

	// Tools returns all registered tools in registration order.
	Tools() []Tool
}

ToolRegistry manages a collection of tools. Use for registering and looking up tools by name.

type ToolSpec

type ToolSpec = types.ToolSpec

ToolSpec is the schema sent to the LLM for tool selection (canonical definition in types.ToolSpec).

func ToolToSpec

func ToolToSpec(t Tool) ToolSpec

ToolToSpec converts a Tool to its spec for the LLM.

func ToolsToSpecs

func ToolsToSpecs(tools []Tool) []ToolSpec

ToolsToSpecs converts a slice of Tool to specs for the LLM.

Directories

Path Synopsis
Package mocks is a generated GoMock package.
Package mocks is a generated GoMock package.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL