Documentation
¶
Index ¶
- Constants
- Variables
- func ConvertEmbeddingsRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)
- func ConvertImageRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)
- func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)
- func ConvertRerankRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)
- func ConvertSTTRequest(meta *meta.Meta, request *http.Request) (string, http.Header, io.Reader, error)
- func ConvertTTSRequest(meta *meta.Meta, req *http.Request, defaultVoice string) (string, http.Header, io.Reader, error)
- func ConvertTextRequest(meta *meta.Meta, req *http.Request, doNotPatchStreamOptionsIncludeUsage bool) (string, http.Header, io.Reader, error)
- func CountTokenInput(input any, model string) int
- func CountTokenMessages(messages []*model.Message, model string) int
- func CountTokenText(text string, model string) int
- func DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode)
- func ErrorHanlder(resp *http.Response) *model.ErrorWithStatusCode
- func ErrorWrapper(err error, code any, statusCode int) *relaymodel.ErrorWithStatusCode
- func ErrorWrapperWithMessage(message string, code any, statusCode int) *relaymodel.ErrorWithStatusCode
- func GetBalance(channel *model.Channel) (float64, error)
- func GetFullRequestURL(baseURL string, requestURL string) string
- func GetPromptTokens(meta *meta.Meta, textRequest *relaymodel.GeneralOpenAIRequest) int
- func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)
- func ImageHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)
- func ModerationsHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)
- func RerankHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)
- func ResponseText2Usage(responseText string, modeName string, promptTokens int) *model.Usage
- func STTHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)
- func SplitThink(data map[string]any)
- func SplitThinkModeld(data *TextResponse)
- func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)
- func StreamSplitThink(data map[string]any, thinkSplitter *splitter.Splitter, ...)
- func StreamSplitThinkModeld(data *ChatCompletionsStreamResponse, thinkSplitter *splitter.Splitter, ...)
- func TTSHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*relaymodel.Usage, *relaymodel.ErrorWithStatusCode)
- type Adaptor
- func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)
- func (a *Adaptor) DoRequest(_ *meta.Meta, _ *gin.Context, req *http.Request) (*http.Response, error)
- func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode)
- func (a *Adaptor) GetBalance(channel *model.Channel) (float64, error)
- func (a *Adaptor) GetBaseURL() string
- func (a *Adaptor) GetChannelName() string
- func (a *Adaptor) GetModelList() []*model.ModelConfig
- func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error)
- func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error
- type ChatCompletionsStreamResponse
- type ChatCompletionsStreamResponseChoice
- type ChatRequest
- type EmbeddingResponse
- type EmbeddingResponseItem
- type ErrorResp
- type GeneralErrorResponse
- type ImageContent
- type ImageData
- type ImageRequest
- type ImageResponse
- type Segment
- type SlimRerankResponse
- type SlimTextResponse
- type SubscriptionResponse
- type TextContent
- type TextRequest
- type TextResponse
- type TextResponseChoice
- type UsageAndChoicesResponse
- type UsageOrResponseText
- type UsageResponse
- type WhisperJSONResponse
- type WhisperVerboseJSONResponse
Constants ¶
View Source
const ( ErrorTypeAIProxy = middleware.ErrorTypeAIPROXY ErrorTypeUpstream = "upstream_error" ErrorCodeBadResponse = "bad_response" )
View Source
const ( DataPrefix = "data:" Done = "[DONE]" DataPrefixLength = len(DataPrefix) )
View Source
const DoNotPatchStreamOptionsIncludeUsageMetaKey = "do_not_patch_stream_options_include_usage"
View Source
const MetaEmbeddingsPatchInputToSlices = "embeddings_input_to_slices"
View Source
const MetaResponseFormat = "response_format"
Variables ¶
View Source
var ( DataPrefixBytes = conv.StringToBytes(DataPrefix) DoneBytes = conv.StringToBytes(Done) )
View Source
var ModelList = []*model.ModelConfig{ { Model: "gpt-3.5-turbo", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, InputPrice: 0.022, OutputPrice: 0.044, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(4096), model.WithModelConfigToolChoice(true), ), }, { Model: "gpt-3.5-turbo-16k", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, InputPrice: 0.022, OutputPrice: 0.044, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(16384), model.WithModelConfigToolChoice(true), ), }, { Model: "gpt-3.5-turbo-instruct", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-4", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, InputPrice: 0.22, OutputPrice: 0.44, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(8192), model.WithModelConfigToolChoice(true), ), }, { Model: "gpt-4-32k", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, InputPrice: 0.44, OutputPrice: 0.88, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32768), model.WithModelConfigToolChoice(true), ), }, { Model: "gpt-4-turbo", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, InputPrice: 0.071, OutputPrice: 0.213, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigToolChoice(true), ), }, { Model: "gpt-4o", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, InputPrice: 0.01775, OutputPrice: 0.071, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigVision(true), model.WithModelConfigToolChoice(true), ), }, { Model: "chatgpt-4o-latest", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-4o-mini", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, InputPrice: 0.001065, OutputPrice: 0.00426, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigToolChoice(true), ), }, { Model: "gpt-4-vision-preview", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "o1-mini", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, InputPrice: 0.0213, OutputPrice: 0.0852, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), ), }, { Model: "o1-preview", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, InputPrice: 0.1065, OutputPrice: 0.426, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), ), }, { Model: "text-embedding-ada-002", Type: relaymode.Embeddings, Owner: model.ModelOwnerOpenAI, }, { Model: "text-embedding-3-small", Type: relaymode.Embeddings, Owner: model.ModelOwnerOpenAI, }, { Model: "text-embedding-3-large", Type: relaymode.Embeddings, Owner: model.ModelOwnerOpenAI, }, { Model: "text-curie-001", Type: relaymode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "text-babbage-001", Type: relaymode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "text-ada-001", Type: relaymode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "text-davinci-002", Type: relaymode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "text-davinci-003", Type: relaymode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "text-moderation-latest", Type: relaymode.Moderations, Owner: model.ModelOwnerOpenAI, }, { Model: "text-moderation-stable", Type: relaymode.Moderations, Owner: model.ModelOwnerOpenAI, }, { Model: "text-davinci-edit-001", Type: relaymode.Edits, Owner: model.ModelOwnerOpenAI, }, { Model: "davinci-002", Type: relaymode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "babbage-002", Type: relaymode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "dall-e-2", Type: relaymode.ImagesGenerations, Owner: model.ModelOwnerOpenAI, }, { Model: "dall-e-3", Type: relaymode.ImagesGenerations, Owner: model.ModelOwnerOpenAI, }, { Model: "whisper-1", Type: relaymode.AudioTranscription, Owner: model.ModelOwnerOpenAI, }, { Model: "tts-1", Type: relaymode.AudioSpeech, Owner: model.ModelOwnerOpenAI, }, { Model: "tts-1-1106", Type: relaymode.AudioSpeech, Owner: model.ModelOwnerOpenAI, }, { Model: "tts-1-hd", Type: relaymode.AudioSpeech, Owner: model.ModelOwnerOpenAI, }, { Model: "tts-1-hd-1106", Type: relaymode.AudioSpeech, Owner: model.ModelOwnerOpenAI, }, }
Functions ¶
func ConvertImageRequest ¶
func ConvertRequest ¶
func ConvertRerankRequest ¶
func ConvertSTTRequest ¶
func ConvertTTSRequest ¶
func ConvertTextRequest ¶
func CountTokenInput ¶
func CountTokenText ¶
func DoResponse ¶
func DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode)
func ErrorHanlder ¶
func ErrorHanlder(resp *http.Response) *model.ErrorWithStatusCode
func ErrorWrapper ¶
func ErrorWrapper(err error, code any, statusCode int) *relaymodel.ErrorWithStatusCode
func ErrorWrapperWithMessage ¶
func ErrorWrapperWithMessage(message string, code any, statusCode int) *relaymodel.ErrorWithStatusCode
func GetFullRequestURL ¶
func GetPromptTokens ¶
func GetPromptTokens(meta *meta.Meta, textRequest *relaymodel.GeneralOpenAIRequest) int
func ImageHandler ¶
func ModerationsHandler ¶
func RerankHandler ¶
func ResponseText2Usage ¶
func STTHandler ¶
func SplitThink ¶
func SplitThinkModeld ¶
func SplitThinkModeld(data *TextResponse)
func StreamHandler ¶
func StreamSplitThink ¶
func StreamSplitThink(data map[string]any, thinkSplitter *splitter.Splitter, renderCallback func(data map[string]any))
renderCallback maybe reuse data, so don't modify data
func StreamSplitThinkModeld ¶
func StreamSplitThinkModeld(data *ChatCompletionsStreamResponse, thinkSplitter *splitter.Splitter, renderCallback func(data *ChatCompletionsStreamResponse))
func TTSHandler ¶
func TTSHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*relaymodel.Usage, *relaymodel.ErrorWithStatusCode)
Types ¶
type Adaptor ¶
type Adaptor struct{}
func (*Adaptor) ConvertRequest ¶
func (*Adaptor) DoResponse ¶
func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode)
func (*Adaptor) GetBaseURL ¶
func (*Adaptor) GetChannelName ¶
func (*Adaptor) GetModelList ¶
func (a *Adaptor) GetModelList() []*model.ModelConfig
type ChatRequest ¶
type EmbeddingResponse ¶
type EmbeddingResponse struct {
Object string `json:"object"`
Model string `json:"model"`
Data []*EmbeddingResponseItem `json:"data"`
model.Usage `json:"usage"`
}
type EmbeddingResponseItem ¶
type GeneralErrorResponse ¶
type GeneralErrorResponse struct {
Error model.Error `json:"error"`
Message string `json:"message"`
Msg string `json:"msg"`
Err string `json:"err"`
ErrorMsg string `json:"error_msg"`
Header struct {
Message string `json:"message"`
} `json:"header"`
Response struct {
Error struct {
Message string `json:"message"`
} `json:"error"`
} `json:"response"`
}
func (GeneralErrorResponse) ToMessage ¶
func (e GeneralErrorResponse) ToMessage() string
type ImageContent ¶
type ImageRequest ¶
type ImageRequest struct {
Model string `json:"model"`
Prompt string `binding:"required" json:"prompt"`
Size string `json:"size,omitempty"`
Quality string `json:"quality,omitempty"`
ResponseFormat string `json:"response_format,omitempty"`
Style string `json:"style,omitempty"`
User string `json:"user,omitempty"`
N int `json:"n,omitempty"`
}
ImageRequest docs: https://platform.openai.com/docs/api-reference/images/create
type ImageResponse ¶
type Segment ¶
type Segment struct {
Text string `json:"text"`
Tokens []int `json:"tokens"`
ID int `json:"id"`
Seek int `json:"seek"`
Start float64 `json:"start"`
End float64 `json:"end"`
Temperature float64 `json:"temperature"`
AvgLogprob float64 `json:"avg_logprob"`
CompressionRatio float64 `json:"compression_ratio"`
NoSpeechProb float64 `json:"no_speech_prob"`
}
type SlimRerankResponse ¶
type SlimRerankResponse struct {
Meta model.RerankMeta `json:"meta"`
}
type SlimTextResponse ¶
type SlimTextResponse struct {
Error model.Error `json:"error"`
Choices []*TextResponseChoice `json:"choices"`
Usage model.Usage `json:"usage"`
}
func GetSlimTextResponseFromNode ¶
func GetSlimTextResponseFromNode(node *ast.Node) (*SlimTextResponse, error)
type SubscriptionResponse ¶
type SubscriptionResponse struct {
Object string `json:"object"`
HasPaymentMethod bool `json:"has_payment_method"`
SoftLimitUSD float64 `json:"soft_limit_usd"`
HardLimitUSD float64 `json:"hard_limit_usd"`
SystemHardLimitUSD float64 `json:"system_hard_limit_usd"`
AccessUntil int64 `json:"access_until"`
}
type TextContent ¶
type TextRequest ¶
type TextResponse ¶
type TextResponseChoice ¶
type UsageAndChoicesResponse ¶
type UsageAndChoicesResponse struct {
Usage *model.Usage
Choices []*ChatCompletionsStreamResponseChoice
}
func GetUsageAndChoicesResponseFromNode ¶
func GetUsageAndChoicesResponseFromNode(node *ast.Node) (*UsageAndChoicesResponse, error)
type UsageOrResponseText ¶
type UsageResponse ¶
type WhisperJSONResponse ¶
type WhisperJSONResponse struct {
Text string `json:"text,omitempty"`
}
Click to show internal directories.
Click to hide internal directories.