Documentation
¶
Index ¶
- func New(client *openai.Client) (types.LLMProvider, error)
- type ChatGPT
- func (gpt *ChatGPT) CalculateCost(model string, inputTokens, outputTokens, cachedTokens int64) int64
- func (gpt *ChatGPT) Chat(ctx context.Context, model string, request *types.ChatRequest) (types.ChatResponse, error)
- func (gpt *ChatGPT) CheckContextWindow(model string, totalInputTokens, compactAtPercent int) error
- func (gpt *ChatGPT) EstimateInputTokens(model string, messages []types.Message) (int, error)
- func (chatgpt *ChatGPT) ValidateModel(ctx context.Context, model string) error
- type TokenUsage
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
Types ¶
type ChatGPT ¶
type ChatGPT struct {
// contains filtered or unexported fields
}
func (*ChatGPT) CalculateCost ¶
func (gpt *ChatGPT) CalculateCost(model string, inputTokens, outputTokens, cachedTokens int64) int64
CalculateCost returns total USD cents (rounded half-up) at Standard pricing. Reasoning tokens must be included in outputTokens by the caller.
func (*ChatGPT) Chat ¶
func (gpt *ChatGPT) Chat(ctx context.Context, model string, request *types.ChatRequest) (types.ChatResponse, error)
func (*ChatGPT) CheckContextWindow ¶
func (*ChatGPT) EstimateInputTokens ¶
type TokenUsage ¶
type TokenUsage struct {
InputTokens int64 `json:"input_tokens"`
OutputTokens int64 `json:"output_tokens"`
TotalTokens int64 `json:"total_tokens"`
InputTokensDetails struct {
CachedTokens int64 `json:"cached_tokens"`
} `json:"input_tokens_details"`
OutputTokensDetails struct {
ReasoningTokens int64 `json:"reasoning_tokens"`
} `json:"output_tokens_details"`
}
Click to show internal directories.
Click to hide internal directories.