Documentation
¶
Index ¶
- type A2AAuth
- type Agent
- type Attachment
- type Chain
- type ChainLimits
- type ChainPublish
- type ChainTarget
- type ContextInputs
- type ExposeA2A
- type Finder
- type Identity
- type Knowledge
- type Loader
- type Profile
- type Resource
- type Serve
- type ServeA2A
- type Source
- type Tool
- type ToolCallExposure
- type WhenExpect
- type WhenSpec
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type A2AAuth ¶ added in v0.2.9
type A2AAuth struct {
Enabled bool `yaml:"enabled,omitempty" json:"enabled,omitempty"`
Resource string `yaml:"resource,omitempty" json:"resource,omitempty"`
Scopes []string `yaml:"scopes,omitempty" json:"scopes,omitempty"`
UseIDToken bool `yaml:"useIDToken,omitempty" json:"useIDToken,omitempty"`
ExcludePrefix string `yaml:"excludePrefix,omitempty" json:"excludePrefix,omitempty"`
}
A2AAuth configures per-agent A2A auth middleware.
type Agent ¶
type Agent struct {
Identity `yaml:",inline" json:",inline"`
Source *Source `yaml:"source,omitempty" json:"source,omitempty"` // Source of the agent
llm.ModelSelection `yaml:",inline" json:",inline"`
Temperature float64 `yaml:"temperature,omitempty" json:"temperature,omitempty"` // Temperature
Description string `yaml:"description,omitempty" json:"description,omitempty"` // Description of the agent
Prompt *prompt.Prompt `yaml:"prompt,omitempty" json:"prompt,omitempty"` // Prompt template
Knowledge []*Knowledge `yaml:"knowledge,omitempty" json:"knowledge,omitempty"`
// Resources: generic resource roots (file paths or MCP URIs)
Resources []*Resource `yaml:"resources,omitempty" json:"resources,omitempty"`
// AutoSummarize controls whether the conversation is automatically
// summarized/compacted after a turn (when supported by the runtime).
AutoSummarize *bool `yaml:"autoSummarize,omitempty" json:"autoSummarize,omitempty"`
// UI defaults: whether to show execution details and tool feed in chat
ShowExecutionDetails *bool `yaml:"showExecutionDetails,omitempty" json:"showExecutionDetails,omitempty"`
ShowToolFeed *bool `yaml:"showToolFeed,omitempty" json:"showToolFeed,omitempty"`
// RingOnFinish enables a short client-side notification sound when a turn
// completes (done or error). Consumed by the UI via metadata.AgentInfo.
RingOnFinish bool `yaml:"ringOnFinish,omitempty" json:"ringOnFinish,omitempty"`
SystemPrompt *prompt.Prompt `yaml:"systemPrompt,omitempty" json:"systemPrompt,omitempty"`
SystemKnowledge []*Knowledge `yaml:"systemKnowledge,omitempty" json:"systemKnowledge,omitempty"`
// Tool defines the serialized tool configuration block using the new
// contract: tool: { items: [], callExposure }.
// This preserves backward compatibility while enabling richer config.
Tool Tool `yaml:"tool,omitempty" json:"tool,omitempty"`
// Reasoning controls provider native reasoning behavior (e.g., effort/summary
// for OpenAI o-series). When set, EnsureGenerateOptions passes it to LLM core.
Reasoning *llm.Reasoning `yaml:"reasoning,omitempty" json:"reasoning,omitempty"`
// ParallelToolCalls requests providers that support it to execute
// multiple tool calls in parallel within a single reasoning step.
// Honored only when the selected model implements the feature.
ParallelToolCalls bool `yaml:"parallelToolCalls,omitempty" json:"parallelToolCalls,omitempty"`
// SupportsContinuationByResponseID, when set to false, disables provider
// continuation by previous_response_id even if the selected model supports
// it. When omitted (nil), the runtime decides based on model capability.
// This is parsed from YAML and propagated to llm.Options.ContinuationEnabled.
SupportsContinuationByResponseID *bool `yaml:"supportsContinuationByResponseID,omitempty" json:"supportsContinuationByResponseID,omitempty"`
// Persona defines the default conversational persona the agent uses when
// sending messages. When nil the role defaults to "assistant".
Persona *prompt.Persona `yaml:"persona,omitempty" json:"persona,omitempty"`
// Profile controls agent discoverability in the catalog/list (preferred over Directory).
Profile *Profile `yaml:"profile,omitempty" json:"profile,omitempty"`
// Serve groups serving endpoints (e.g., A2A). Preferred over legacy ExposeA2A.
Serve *Serve `yaml:"serve,omitempty" json:"serve,omitempty"`
// ExposeA2A (legacy) retained for backward compatibility; prefer Serve.A2A.
ExposeA2A *ExposeA2A `yaml:"exposeA2A,omitempty" json:"exposeA2A,omitempty"`
// Attachment groups binary-attachment behavior
Attachment *Attachment `yaml:"attachment,omitempty" json:"attachment,omitempty"`
// Chains defines post-turn follow-ups executed after a turn finishes.
Chains []*Chain `yaml:"chains,omitempty" json:"chains,omitempty"`
// ContextInputs (YAML key: elicitation) defines an optional schema-driven
// payload describing auxiliary inputs to be placed under args.context when
// calling this agent. UIs can render these ahead of, or during, execution.
// Runtime behavior remains controlled by QueryInput.elicitationMode and
// service options (router/awaiter).
ContextInputs *ContextInputs `yaml:"elicitation,omitempty" json:"elicitation,omitempty"`
}
Agent represents an agent
func (*Agent) HasAutoSummarizeDefinition ¶ added in v0.2.2
func (*Agent) Init ¶ added in v0.2.2
func (a *Agent) Init()
Init applies default values to the agent after it has been loaded from YAML. It should be invoked by the loader to ensure a single place for defaults.
func (*Agent) ShallAutoSummarize ¶ added in v0.2.2
type Attachment ¶ added in v0.2.2
type Attachment struct {
// LimitBytes caps cumulative attachments size per conversation for this agent.
// When zero, a provider default may apply or no cap if provider has none.
LimitBytes int64 `yaml:"limitBytes,omitempty" json:"limitBytes,omitempty"`
// Mode controls delivery: "ref" or "inline"
Mode string `yaml:"mode,omitempty" json:"mode,omitempty"`
// TTLSec sets TTL for attachments in seconds.
TTLSec int64 `yaml:"ttlSec,omitempty" json:"ttlSec,omitempty"`
}
Attachment configures binary attachment behavior for an agent.
type Chain ¶ added in v0.2.2
type Chain struct {
On string `yaml:"on,omitempty" json:"on,omitempty"` // succeeded|failed|canceled|*
Target ChainTarget `yaml:"target" json:"target"` // required: agent to invoke
Conversation string `yaml:"conversation,omitempty" json:"conversation,omitempty"` // reuse|link (default link)
When *WhenSpec `yaml:"when,omitempty" json:"when,omitempty"` // optional condition
Query *prompt.Prompt `yaml:"query,omitempty" json:"query,omitempty"` // templated query/payload
Publish *ChainPublish `yaml:"publish,omitempty" json:"publish,omitempty"` // optional publish settings
OnError string `yaml:"onError,omitempty" json:"onError,omitempty"` // ignore|message|propagate
Limits *ChainLimits `yaml:"limits,omitempty" json:"limits,omitempty"` // guard-rails
}
Chain defines a single post-turn follow-up.
type ChainLimits ¶ added in v0.2.2
type ChainLimits struct {
MaxDepth int `yaml:"maxDepth,omitempty" json:"maxDepth,omitempty"`
}
type ChainPublish ¶ added in v0.2.2
type ChainPublish struct {
Role string `yaml:"role,omitempty" json:"role,omitempty"` // assistant|user|system|tool|none
Name string `yaml:"name,omitempty" json:"name,omitempty"` // attribution handle
Type string `yaml:"type,omitempty" json:"type,omitempty"` // text|control
Parent string `yaml:"parent,omitempty" json:"parent,omitempty"` // same_turn|last_user|none
}
type ChainTarget ¶ added in v0.2.2
type ChainTarget struct {
AgentID string `yaml:"agentId" json:"agentId"`
}
type ContextInputs ¶ added in v0.2.10
type ContextInputs struct {
// Enabled gates whether this elicitation should be considered when exposing
// agent-derived tool schemas or metadata.
Enabled bool `yaml:"enabled,omitempty" json:"enabled,omitempty"`
// Inline MCP request parameters: Title, Message, RequestedSchema, etc.
mcpproto.ElicitRequestParams `yaml:",inline" json:",inline"`
}
Elicitation describes a JSON-Schema based input request associated with an agent. It embeds the MCP protocol ElicitRequestParams for consistent wire format with tool- and assistant-originated elicitations. ContextInputs models auxiliary inputs for an agent (YAML key: elicitation).
type ExposeA2A ¶ added in v0.2.9
type ExposeA2A struct {
Enabled bool `yaml:"enabled,omitempty" json:"enabled,omitempty"`
Port int `yaml:"port,omitempty" json:"port,omitempty"`
BasePath string `yaml:"basePath,omitempty" json:"basePath,omitempty"`
Streaming bool `yaml:"streaming,omitempty" json:"streaming,omitempty"`
Auth *A2AAuth `yaml:"auth,omitempty" json:"auth,omitempty"`
}
ExposeA2A (legacy): retained for backward compatibility; use Serve.A2A instead.
type Knowledge ¶
type Knowledge struct {
Description string `yaml:"description,omitempty" json:"description,omitempty"`
Match *option.Options `json:"match,omitempty"` // Optional matching options
URL string `yaml:"url,omitempty" json:"url,omitempty"`
InclusionMode string `yaml:"inclusionMode,omitempty" json:"inclusionMode,omitempty"` // Inclusion mode for the knowledge base
MaxFiles int `yaml:"maxFiles,omitempty" json:"maxFiles,omitempty"` // Max matched assets per knowledge (default 5)
MinScore *float64 `yaml:"minScore,omitempty" json:"minScore,omitempty"` // Force match mode when set; optional score threshold
}
func (*Knowledge) EffectiveMaxFiles ¶ added in v0.2.2
EffectiveMaxFiles returns the max files constraint with a default of 5 when unset.
type Loader ¶
type Loader interface {
// Add stores an in-memory representation of an Agent so it becomes
// available for subsequent queries.
Add(name string, agent *Agent)
//Load retrieves an Agent by its name. If the Agent does not exist, it
Load(ctx context.Context, name string) (*Agent, error)
}
Loader exposes operations required by higher-level services on top of the concrete Loader implementation. The interface is intentionally minimal to keep package dependencies low – additional Loader methods should be added only when they are genuinely used by an upstream layer.
type Profile ¶ added in v0.2.9
type Profile struct {
Publish bool `yaml:"publish,omitempty" json:"publish,omitempty"`
Name string `yaml:"name,omitempty" json:"name,omitempty"`
Description string `yaml:"description,omitempty" json:"description,omitempty"`
Tags []string `yaml:"tags,omitempty" json:"tags,omitempty"`
Rank int `yaml:"rank,omitempty" json:"rank,omitempty"`
// Future-proof: extra metadata for presentation
Capabilities map[string]interface{} `yaml:"capabilities,omitempty" json:"capabilities,omitempty"`
Responsibilities []string `yaml:"responsibilities,omitempty" json:"responsibilities,omitempty"`
InScope []string `yaml:"inScope,omitempty" json:"inScope,omitempty"`
OutOfScope []string `yaml:"outOfScope,omitempty" json:"outOfScope,omitempty"`
// ConversationScope controls child conversation reuse when this agent is
// invoked as a tool via llm/agents:run. Supported values:
// - "new" → always create a new linked child conversation
// - "parent" → reuse a single child per parent conversation (agentId+parentId)
// - "parentTurn" → reuse per parent turn (agentId+parentId+parentTurnId)
// When empty, the runtime defaults to "new".
ConversationScope string `yaml:"conversationScope,omitempty" json:"conversationScope,omitempty"`
}
Profile controls discoverability in the agent catalog/list.
type Resource ¶ added in v0.2.21
type Resource struct {
URI string `yaml:"uri" json:"uri"`
Role string `yaml:"role,omitempty" json:"role,omitempty"` // system|user
Binding bool `yaml:"binding,omitempty" json:"binding,omitempty"` // include in auto top‑N binding
MaxFiles int `yaml:"maxFiles,omitempty" json:"maxFiles,omitempty"`
TrimPath string `yaml:"trimPath,omitempty" json:"trimPath,omitempty"`
Match *option.Options `yaml:"match,omitempty" json:"match,omitempty"`
MinScore *float64 `yaml:"minScore,omitempty" json:"minScore,omitempty"`
}
Resource defines a single resource root with optional binding behavior.
type Serve ¶ added in v0.2.9
type Serve struct {
A2A *ServeA2A `yaml:"a2a,omitempty" json:"a2a,omitempty"`
}
Serve groups serving endpoints for this agent (e.g., A2A).
type ServeA2A ¶ added in v0.2.9
type ServeA2A struct {
Enabled bool `yaml:"enabled,omitempty" json:"enabled,omitempty"`
Port int `yaml:"port,omitempty" json:"port,omitempty"`
Streaming bool `yaml:"streaming,omitempty" json:"streaming,omitempty"`
Auth *A2AAuth `yaml:"auth,omitempty" json:"auth,omitempty"`
}
ServeA2A declares how to expose an internal agent as an A2A server.
type Tool ¶ added in v0.2.9
type Tool struct {
Items []*llm.Tool `yaml:"items,omitempty" json:"items,omitempty"`
CallExposure ToolCallExposure `yaml:"callExposure,omitempty" json:"callExposure,omitempty"`
}
type ToolCallExposure ¶ added in v0.2.0
type ToolCallExposure string
ToolCallExposure controls how tool calls are exposed back to the LLM prompt and templates. Supported modes: - "turn": include only tool calls from the current turn - "conversation": include tool calls from the whole conversation - "semantic": reserved for future use (provider-native tool semantics)
type WhenExpect ¶ added in v0.2.2
type WhenExpect struct {
Kind string `yaml:"kind,omitempty" json:"kind,omitempty"`
Pattern string `yaml:"pattern,omitempty" json:"pattern,omitempty"`
Path string `yaml:"path,omitempty" json:"path,omitempty"`
}
WhenExpect describes how to extract a boolean from an LLM response. Supported kinds: boolean (default), regex, jsonpath (basic $.field).
type WhenSpec ¶ added in v0.2.2
type WhenSpec struct {
Expr string `yaml:"expr,omitempty" json:"expr,omitempty"`
Query *prompt.Prompt `yaml:"query,omitempty" json:"query,omitempty"`
Model string `yaml:"model,omitempty" json:"model,omitempty"`
Expect *WhenExpect `yaml:"expect,omitempty" json:"expect,omitempty"`
}
WhenSpec specifies a conditional gate for executing a chain. Evaluate Expr first; if empty and Query present, run an LLM prompt and extract a boolean using Expect.