Documentation
¶
Index ¶
- Constants
- func Main(args []string, opts Options) error
- type ChatHandler
- type ChatOptions
- type ClientUnion
- type Message
- type MessageHistoryUnion
- type Messages
- func (messages Messages) ToAnthropic() (msgs []anthropic.MessageParam, systemPrompts []string, err error)
- func (messages Messages) ToGemini() (msgs []*genai.Content, systemPrompts []string, err error)
- func (messages Messages) ToOpenAI(keepSystemPrompts bool) (msgs []openai.ChatCompletionMessageParamUnion, systemPrompts []string, ...)
- func (messages Messages) ToOpenAI2() []openai.ChatCompletionMessageParamUnion
- type MessagesUnion
- type MsgType
- type Number
- type Options
- type ResolvedOptions
- type ResponseResultAnthropic
- type ResponseResultGemini
- type ResponseResultOpenAI
- type Role
- type TokenCost
- type TokenCostInputBreakdown
- type TokenUsage
- type TokenUsageCost
- type TokenUsageInputBreakdown
- type TokenUsageOutputBreakdown
- type ToolInfo
- type ToolInfoMapping
Constants ¶
View Source
const ( MsgType_Msg = "msg" MsgType_ToolCall = "tool_call" MsgType_ToolResult = "tool_result" MsgType_TokenUsage = "token_usage" MsgType_StopReason = "stop_reason" // anthropic specific )
View Source
const ( Role_User = "user" Role_Assistant = "assistant" Role_System = "system" )
Variables ¶
This section is empty.
Functions ¶
Types ¶
type ChatHandler ¶
func (*ChatHandler) Handle ¶
func (c *ChatHandler) Handle(model string, baseUrl string, token string, msg string, opts ChatOptions) error
type ChatOptions ¶
type ChatOptions struct {
// contains filtered or unexported fields
}
type ClientUnion ¶
type Message ¶
type Message struct {
Type MsgType `json:"type"`
Time string `json:"time"`
Role Role `json:"role"`
Model string `json:"model"`
Content string `json:"content"`
ToolUseID string `json:"tool_use_id,omitempty"`
ToolName string `json:"tool_name,omitempty"`
TokenUsage *TokenUsage `json:"token_usage,omitempty"`
}
Message represents a message in the chat record
type MessageHistoryUnion ¶
type MessageHistoryUnion struct {
FullHistory Messages
SystemPrompts []string
OpenAI []openai.ChatCompletionMessageParamUnion
Anthropic []anthropic.MessageParam
Gemini []*genai.Content
}
type Messages ¶
type Messages []Message
Messages represents a slice of unified messages with conversion methods
func (Messages) ToAnthropic ¶
func (messages Messages) ToAnthropic() (msgs []anthropic.MessageParam, systemPrompts []string, err error)
ToAnthropic converts unified messages to Anthropic format
func (Messages) ToOpenAI ¶
func (messages Messages) ToOpenAI(keepSystemPrompts bool) (msgs []openai.ChatCompletionMessageParamUnion, systemPrompts []string, err error)
ToAnthropic converts unified messages to Anthropic format
func (Messages) ToOpenAI2 ¶
func (messages Messages) ToOpenAI2() []openai.ChatCompletionMessageParamUnion
ToOpenAI converts unified messages to OpenAI format
type MessagesUnion ¶
type MessagesUnion struct {
OpenAI []openai.ChatCompletionMessageParamUnion
Anthropic []anthropic.MessageParam
Gemini []*genai.Content
}
type ResolvedOptions ¶ added in v0.0.8
func ResolveEnvOptions ¶ added in v0.0.8
func ResolveProviderDefaultEnvOptions ¶ added in v0.0.8
type ResponseResultAnthropic ¶
type ResponseResultAnthropic struct {
ToolUseNum int
Messages []anthropic.ContentBlockParamUnion
ToolResults []anthropic.ContentBlockParamUnion
TokenUsage TokenUsage
}
type ResponseResultGemini ¶ added in v0.0.7
type ResponseResultOpenAI ¶
type ResponseResultOpenAI struct {
ToolUseNum int
Messages []openai.ChatCompletionMessageParamUnion
ToolResults []openai.ChatCompletionMessageParamUnion
TokenUsage TokenUsage
}
type TokenCost ¶
type TokenCost struct {
// the three are available for all providers
InputUSD string
OutputUSD string
TotalUSD string
// Input breakdown
// anthropic has this detail
InputBreakdown TokenCostInputBreakdown
}
type TokenCostInputBreakdown ¶
type TokenCostInputBreakdown struct {
CacheWriteUSD string
CacheReadUSD string
NonCacheReadUSD string
}
func (TokenCostInputBreakdown) Add ¶
func (c TokenCostInputBreakdown) Add(b TokenCostInputBreakdown) TokenCostInputBreakdown
type TokenUsage ¶
type TokenUsage struct {
Input int64 `json:"input"`
Output int64 `json:"output"`
Total int64 `json:"total"`
InputBreakdown TokenUsageInputBreakdown `json:"input_breakdown"`
OutputBreakdown TokenUsageOutputBreakdown `json:"output_breakdown"`
}
Anthropic:
- how to: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching
- when: https://www.anthropic.com/news/prompt-caching
- summary: . seems anthropic only caches for long enough texts . The minimum cacheable prompt length is: . 1024 tokens for Claude Opus 4, Claude Sonnet 4, Claude Sonnet 3.7, Claude Sonnet 3.5 and Claude Opus 3 . The cache is invalidated after 5 minutes
func (TokenUsage) Add ¶
func (c TokenUsage) Add(b TokenUsage) TokenUsage
type TokenUsageCost ¶
type TokenUsageCost struct {
Usage TokenUsage
Cost TokenCost
}
type TokenUsageInputBreakdown ¶
type TokenUsageInputBreakdown struct {
CacheWrite int64 `json:"cache_write"` // anthropic specific
CacheRead int64 `json:"cache_read"`
NonCacheRead int64 `json:"non_cache_read"`
}
func (TokenUsageInputBreakdown) Add ¶
func (c TokenUsageInputBreakdown) Add(b TokenUsageInputBreakdown) TokenUsageInputBreakdown
type TokenUsageOutputBreakdown ¶
type TokenUsageOutputBreakdown struct {
CacheOutput int64 `json:"cache_output"`
}
type ToolInfo ¶ added in v0.0.8
type ToolInfoMapping ¶ added in v0.0.8
Click to show internal directories.
Click to hide internal directories.