Documentation
¶
Index ¶
- Constants
- Variables
- type APIError
- type Base64Embedding
- type ChatCompletionChoice
- type ChatCompletionMessage
- type ChatCompletionMessageContent
- type ChatCompletionMessageContentPart
- type ChatCompletionMessageContentPartType
- type ChatCompletionRequestdeprecated
- type ChatCompletionResponse
- type ChatCompletionResponseChoicesElemModerationHitType
- type ChatCompletionStreamChoice
- type ChatCompletionStreamChoiceDelta
- type ChatCompletionStreamResponse
- type ChatMessageImageURL
- type ChatRequest
- type CompletionTokensDetails
- type Content
- type ContentGenerationContentItemType
- type ContentGenerationError
- type ContentGenerationUsage
- type ContextChatCompletionRequest
- type ContextMode
- type CreateChatCompletionRequest
- type CreateContentGenerationContentItem
- type CreateContentGenerationTaskRequest
- type CreateContentGenerationTaskResponse
- type CreateContextRequest
- type CreateContextResponse
- type DeleteContentGenerationTaskRequest
- type Embedding
- type EmbeddingEncodingFormat
- type EmbeddingRequest
- type EmbeddingRequestConverter
- type EmbeddingRequestStrings
- type EmbeddingRequestTokens
- type EmbeddingResponse
- type EmbeddingResponseBase64
- type ErrorResponse
- type FinishReason
- type FunctionCall
- type FunctionDefinedeprecated
- type FunctionDefinition
- type GenerateImagesError
- type GenerateImagesRequest
- type GenerateImagesUsage
- type GetContentGenerationTaskRequest
- type GetContentGenerationTaskResponse
- type HttpHeader
- type Image
- type ImageURL
- type ImageURLDetail
- type ImagesResponse
- type ImagesStreamResponse
- type ListContentGenerationTaskItem
- type ListContentGenerationTasksFilter
- type ListContentGenerationTasksRequest
- type ListContentGenerationTasksResponse
- type LogProb
- type LogProbs
- type MultiModalEmbeddingInputType
- type MultiModalEmbeddingRequest
- type MultiModalEmbeddingResponseBase64
- type MultimodalEmbedding
- type MultimodalEmbeddingImageURL
- type MultimodalEmbeddingInput
- type MultimodalEmbeddingPromptTokensDetail
- type MultimodalEmbeddingResponse
- type MultimodalEmbeddingUsage
- type PromptTokensDetail
- type RawResponse
- type RequestError
- type Response
- type ResponseFormat
- type ResponseFormatType
- type SequentialImageGeneration
- type SequentialImageGenerationOptions
- type StreamOptions
- type Tool
- type ToolCall
- type ToolChoice
- type ToolChoiceFunction
- type ToolType
- type TopLogProbs
- type TruncationStrategy
- type TruncationStrategyType
- type Usage
Constants ¶
const ( ChatMessageRoleSystem = "system" ChatMessageRoleUser = "user" ChatMessageRoleAssistant = "assistant" ChatMessageRoleTool = "tool" )
const ( ToolChoiceStringTypeAuto = "auto" ToolChoiceStringTypeNone = "none" ToolChoiceStringTypeRequired = "required" )
const ( ClientRequestHeader = "X-Client-Request-Id" RetryAfterHeader = "Retry-After" DefaultMandatoryRefreshTimeout = 10 * 60 // 10 min DefaultAdvisoryRefreshTimeout = 30 * 60 // 30 min DefaultStsTimeout = 7 * 24 * 60 * 60 // 7 days InitialRetryDelay = 0.5 MaxRetryDelay = 8.0 ErrorRetryBaseDelay = 500 * time.Millisecond ErrorRetryMaxDelay = 8 * time.Second )
const ( StatusSucceeded = "succeeded" StatusCancelled = "cancelled" StatusFailed = "failed" StatusRunning = "running" StatusQueued = "queued" )
const ( GenerateImagesResponseFormatBase64 = "b64_json" GenerateImagesResponseFormatURL = "url" GenerateImagesSizeAdaptive = "adaptive" )
Variables ¶
var ( ErrTooManyEmptyStreamMessages = errors.New("stream has sent too many empty messages") ErrChatCompletionInvalidModel = errors.New("this model is not supported with this method, please use CreateCompletion client method instead") //nolint:lll ErrChatCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateChatCompletionStream") //nolint:lll ErrContentFieldsMisused = errors.New("can't use both Content and MultiContent properties simultaneously") ErrBodyWithoutEndpoint = errors.New("can't fetch endpoint sts token without endpoint") ErrBodyWithoutBot = errors.New("can't fetch bot sts token without bot id") ErrAKSKNotSupported = errors.New("ak&sk authentication is currently not supported for this method, please use api key instead") )
Functions ¶
This section is empty.
Types ¶
type APIError ¶
type Base64Embedding ¶ added in v1.0.25
type Base64Embedding struct {
Object string `json:"object"`
Embedding base64String `json:"embedding"`
Index int `json:"index"`
}
Base64Embedding is a container for base64 encoded embeddings.
type ChatCompletionChoice ¶
type ChatCompletionChoice struct {
Index int `json:"index"`
Message ChatCompletionMessage `json:"message"`
// FinishReason
// stop: API returned complete message,
// or a message terminated by one of the stop sequences provided via the stop parameter
// length: Incomplete model output due to max_tokens parameter or token limit
// function_call: The model decided to call a function
// content_filter: Omitted content due to a flag from our content filters
// null: API response still in progress or incomplete
FinishReason FinishReason `json:"finish_reason"`
// ModerationHitType
// The type of content moderation strategy hit.
// Only after selecting a moderation strategy for the endpoint that supports returning moderation hit types,
// API will return the corresponding values.
ModerationHitType *ChatCompletionResponseChoicesElemModerationHitType `json:"moderation_hit_type,omitempty" yaml:"moderation_hit_type,omitempty" mapstructure:"moderation_hit_type,omitempty"`
LogProbs *LogProbs `json:"logprobs,omitempty"`
}
type ChatCompletionMessage ¶
type ChatCompletionMessage struct {
Role string `json:"role"`
Content *ChatCompletionMessageContent `json:"content"`
ReasoningContent *string `json:"reasoning_content,omitempty"`
Name *string `json:"name"`
FunctionCall *FunctionCall `json:"function_call,omitempty"`
ToolCalls []*ToolCall `json:"tool_calls,omitempty"`
ToolCallID string `json:"tool_call_id,omitempty"`
}
type ChatCompletionMessageContent ¶
type ChatCompletionMessageContent struct {
StringValue *string
ListValue []*ChatCompletionMessageContentPart
}
func (ChatCompletionMessageContent) MarshalJSON ¶
func (j ChatCompletionMessageContent) MarshalJSON() ([]byte, error)
MarshalJSON implements json.Marshaler.
func (*ChatCompletionMessageContent) UnmarshalJSON ¶
func (j *ChatCompletionMessageContent) UnmarshalJSON(b []byte) error
type ChatCompletionMessageContentPart ¶
type ChatCompletionMessageContentPart struct {
Type ChatCompletionMessageContentPartType `json:"type,omitempty"`
Text string `json:"text,omitempty"`
ImageURL *ChatMessageImageURL `json:"image_url,omitempty"`
}
type ChatCompletionMessageContentPartType ¶
type ChatCompletionMessageContentPartType string
const ( ChatCompletionMessageContentPartTypeText ChatCompletionMessageContentPartType = "text" ChatCompletionMessageContentPartTypeImageURL ChatCompletionMessageContentPartType = "image_url" )
type ChatCompletionRequest
deprecated
type ChatCompletionRequest struct {
Model string `json:"model"`
Messages []*ChatCompletionMessage `json:"messages"`
MaxTokens int `json:"max_tokens,omitempty"`
Temperature float32 `json:"temperature,omitempty"`
TopP float32 `json:"top_p,omitempty"`
Stream bool `json:"stream,omitempty"`
Stop []string `json:"stop,omitempty"`
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
LogitBias map[string]int `json:"logit_bias,omitempty"`
LogProbs bool `json:"logprobs,omitempty"`
TopLogProbs int `json:"top_logprobs,omitempty"`
User string `json:"user,omitempty"`
FunctionCall interface{} `json:"function_call,omitempty"`
Tools []*Tool `json:"tools,omitempty"`
ToolChoice interface{} `json:"tool_choice,omitempty"`
StreamOptions *StreamOptions `json:"stream_options,omitempty"`
PresencePenalty float32 `json:"presence_penalty,omitempty"`
RepetitionPenalty float32 `json:"repetition_penalty,omitempty"`
N int `json:"n,omitempty"`
ResponseFormat *ResponseFormat `json:"response_format,omitempty"`
ServiceTier *string `json:"service_tier,omitempty"`
}
Deprecated: use `CreateChatCompletionRequest` instead. ChatCompletionRequest - When making a request using this struct, only non-zero fields take effect. This means that if your field value is 0, an empty string (""), false, or other zero values, it will not be sent to the server. The server will handle these fields according to their default values. If you need to specify a zero value, please use CreateChatCompletionRequest.
func (ChatCompletionRequest) GetModel ¶
func (r ChatCompletionRequest) GetModel() string
func (ChatCompletionRequest) IsStream ¶
func (r ChatCompletionRequest) IsStream() bool
func (ChatCompletionRequest) MarshalJSON ¶
func (r ChatCompletionRequest) MarshalJSON() ([]byte, error)
func (ChatCompletionRequest) WithStream ¶
func (r ChatCompletionRequest) WithStream(stream bool) ChatRequest
type ChatCompletionResponse ¶
type ChatCompletionResponse struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
// mark the request is scale-tier or default, only exists for scale-tier
ServiceTier string `json:"service_tier,omitempty"`
Choices []*ChatCompletionChoice `json:"choices"`
Usage Usage `json:"usage"`
HttpHeader
}
ChatCompletionResponse represents a response structure for chat completion API.
type ChatCompletionResponseChoicesElemModerationHitType ¶
type ChatCompletionResponseChoicesElemModerationHitType string
const ( ChatCompletionResponseChoicesElemModerationHitTypeViolence ChatCompletionResponseChoicesElemModerationHitType = "violence" ChatCompletionResponseChoicesElemModerationHitTypeSevereViolation ChatCompletionResponseChoicesElemModerationHitType = "severe_violation" )
type ChatCompletionStreamChoice ¶
type ChatCompletionStreamChoice struct {
Index int `json:"index"`
Delta ChatCompletionStreamChoiceDelta `json:"delta"`
LogProbs *LogProbs `json:"logprobs,omitempty"`
FinishReason FinishReason `json:"finish_reason"`
ModerationHitType *ChatCompletionResponseChoicesElemModerationHitType `json:"moderation_hit_type,omitempty" yaml:"moderation_hit_type,omitempty" mapstructure:"moderation_hit_type,omitempty"`
}
type ChatCompletionStreamChoiceDelta ¶
type ChatCompletionStreamChoiceDelta struct {
Content string `json:"content,omitempty"`
Role string `json:"role,omitempty"`
ReasoningContent *string `json:"reasoning_content,omitempty"`
FunctionCall *FunctionCall `json:"function_call,omitempty"`
ToolCalls []*ToolCall `json:"tool_calls,omitempty"`
}
type ChatCompletionStreamResponse ¶
type ChatCompletionStreamResponse struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
// mark the request is scale-tier or default, only exists for scale-tier
ServiceTier string `json:"service_tier,omitempty"`
Choices []*ChatCompletionStreamChoice `json:"choices"`
// An optional field that will only be present when you set stream_options: {"include_usage": true} in your request.
// When present, it contains a null value except for the last chunk which contains the token usage statistics
// for the entire request.
Usage *Usage `json:"usage,omitempty"`
}
type ChatMessageImageURL ¶
type ChatMessageImageURL struct {
URL string `json:"url,omitempty"`
Detail ImageURLDetail `json:"detail,omitempty"`
}
type ChatRequest ¶
type CompletionTokensDetails ¶ added in v1.0.12
type CompletionTokensDetails struct {
ReasoningTokens int `json:"reasoning_tokens"`
}
type ContentGenerationContentItemType ¶ added in v1.0.18
type ContentGenerationContentItemType string
const ( ContentGenerationContentItemTypeText ContentGenerationContentItemType = "text" ContentGenerationContentItemTypeImage ContentGenerationContentItemType = "image_url" )
type ContentGenerationError ¶ added in v1.0.18
type ContentGenerationUsage ¶ added in v1.0.18
type ContentGenerationUsage struct {
CompletionTokens int `json:"completion_tokens"`
}
type ContextChatCompletionRequest ¶
type ContextChatCompletionRequest struct {
ContextID string `json:"context_id"`
Mode ContextMode `json:"mode"`
Model string `json:"model"`
Messages []*ChatCompletionMessage `json:"messages"`
MaxTokens int `json:"max_tokens,omitempty"`
Temperature float32 `json:"temperature,omitempty"`
TopP float32 `json:"top_p,omitempty"`
Stream bool `json:"stream,omitempty"`
Stop []string `json:"stop,omitempty"`
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
LogitBias map[string]int `json:"logit_bias,omitempty"`
LogProbs bool `json:"logprobs,omitempty"`
TopLogProbs int `json:"top_logprobs,omitempty"`
User string `json:"user,omitempty"`
FunctionCall interface{} `json:"function_call,omitempty"`
Tools []*Tool `json:"tools,omitempty"`
ToolChoice interface{} `json:"tool_choice,omitempty"`
StreamOptions *StreamOptions `json:"stream_options,omitempty"`
Metadata map[string]interface{} `json:"metadata,omitempty"`
}
type ContextMode ¶
type ContextMode string
const ( ContextModeSession ContextMode = "session" ContextModeCommonPrefix ContextMode = "common_prefix" )
type CreateChatCompletionRequest ¶
type CreateChatCompletionRequest struct {
Model string `json:"model"`
Messages []*ChatCompletionMessage `json:"messages"`
MaxTokens *int `json:"max_tokens,omitempty"`
Temperature *float32 `json:"temperature,omitempty"`
TopP *float32 `json:"top_p,omitempty"`
Stream *bool `json:"stream,omitempty"`
Stop []string `json:"stop,omitempty"`
FrequencyPenalty *float32 `json:"frequency_penalty,omitempty"`
LogitBias map[string]int `json:"logit_bias,omitempty"`
LogProbs *bool `json:"logprobs,omitempty"`
TopLogProbs *int `json:"top_logprobs,omitempty"`
User *string `json:"user,omitempty"`
FunctionCall interface{} `json:"function_call,omitempty"`
Tools []*Tool `json:"tools,omitempty"`
ToolChoice interface{} `json:"tool_choice,omitempty"`
StreamOptions *StreamOptions `json:"stream_options,omitempty"`
PresencePenalty *float32 `json:"presence_penalty,omitempty"`
RepetitionPenalty *float32 `json:"repetition_penalty,omitempty"`
N *int `json:"n,omitempty"`
ResponseFormat *ResponseFormat `json:"response_format,omitempty"`
ParallelToolCalls *bool `json:"parallel_tool_calls,omitempty"`
ServiceTier *string `json:"service_tier,omitempty"`
}
CreateChatCompletionRequest - When making a request using this struct, if your field value is 0, an empty string (""), false, or other zero values, it will be sent to the server. The server will handle these fields according to the specified values.
func (CreateChatCompletionRequest) GetModel ¶
func (r CreateChatCompletionRequest) GetModel() string
func (CreateChatCompletionRequest) IsStream ¶
func (r CreateChatCompletionRequest) IsStream() bool
func (CreateChatCompletionRequest) MarshalJSON ¶
func (r CreateChatCompletionRequest) MarshalJSON() ([]byte, error)
func (CreateChatCompletionRequest) WithStream ¶
func (r CreateChatCompletionRequest) WithStream(stream bool) ChatRequest
type CreateContentGenerationContentItem ¶ added in v1.0.18
type CreateContentGenerationContentItem struct {
Type ContentGenerationContentItemType `json:"type"`
Text *string `json:"text,omitempty"`
ImageURL *ImageURL `json:"image_url,omitempty"`
Role *string `json:"role,omitempty"`
}
type CreateContentGenerationTaskRequest ¶ added in v1.0.18
type CreateContentGenerationTaskRequest struct {
Model string `json:"model"`
Content []*CreateContentGenerationContentItem `json:"content"`
CallbackUrl *string `json:"callback_url,omitempty"`
ReturnLastFrame *bool `json:"return_last_frame,omitempty"`
}
type CreateContentGenerationTaskResponse ¶ added in v1.0.18
type CreateContentGenerationTaskResponse struct {
ID string `json:"id"`
HttpHeader
}
type CreateContextRequest ¶
type CreateContextRequest struct {
Model string `json:"model"`
Mode ContextMode `json:"mode"`
Messages []*ChatCompletionMessage `json:"messages"`
TTL *int `json:"ttl,omitempty"`
TruncationStrategy *TruncationStrategy `json:"truncation_strategy,omitempty"`
}
type CreateContextResponse ¶
type CreateContextResponse struct {
ID string `json:"id"`
Mode ContextMode `json:"mode"`
Model string `json:"model"`
TTL *int `json:"ttl,omitempty"`
TruncationStrategy *TruncationStrategy `json:"truncation_strategy,omitempty"`
Usage Usage `json:"usage"`
HttpHeader
}
type DeleteContentGenerationTaskRequest ¶ added in v1.0.18
type DeleteContentGenerationTaskRequest struct {
ID string `json:"id"`
}
type Embedding ¶ added in v1.0.25
type Embedding struct {
Object string `json:"object"`
Embedding []float32 `json:"embedding"`
Index int `json:"index"`
}
Embedding is a special format of data representation that can be easily utilized by machine learning models and algorithms. The embedding is an information dense representation of the semantic meaning of a piece of text. Each embedding is a vector of floating point numbers, such that the distance between two embeddings in the vector space is correlated with semantic similarity between two inputs in the original format. For example, if two texts are similar, then their vector representations should also be similar.
type EmbeddingEncodingFormat ¶ added in v1.0.25
type EmbeddingEncodingFormat string
EmbeddingEncodingFormat is the format of the embeddings data. Currently, only "float" and "base64" are supported, however, "base64" is not officially documented. If not specified will use "float".
const ( EmbeddingEncodingFormatFloat EmbeddingEncodingFormat = "float" EmbeddingEncodingFormatBase64 EmbeddingEncodingFormat = "base64" )
type EmbeddingRequest ¶ added in v1.0.25
type EmbeddingRequest struct {
Input interface{} `json:"input"`
Model string `json:"model"`
User string `json:"user"`
EncodingFormat EmbeddingEncodingFormat `json:"encoding_format,omitempty"`
// Dimensions The number of dimensions the resulting output embeddings should have.
// Only supported in text-embedding-3 and later models.
Dimensions int `json:"dimensions,omitempty"`
}
func (EmbeddingRequest) Convert ¶ added in v1.0.25
func (r EmbeddingRequest) Convert() EmbeddingRequest
type EmbeddingRequestConverter ¶ added in v1.0.25
type EmbeddingRequestConverter interface {
// Needs to be of type EmbeddingRequestStrings or EmbeddingRequestTokens
Convert() EmbeddingRequest
}
type EmbeddingRequestStrings ¶ added in v1.0.25
type EmbeddingRequestStrings struct {
// Input is a slice of strings for which you want to generate an Embedding vector.
// Each input must not exceed 8192 tokens in length.
// OpenAPI suggests replacing newlines (\n) in your input with a single space, as they
// have observed inferior results when newlines are present.
// E.g.
// "The food was delicious and the waiter..."
Input []string `json:"input"`
// ID of the model to use. You can use the List models API to see all of your available models,
// or see our Model overview for descriptions of them.
Model string `json:"model"`
// A unique identifier representing your end-user, which will help to monitor and detect abuse.
User string `json:"user"`
// EmbeddingEncodingFormat is the format of the embeddings data.
// Currently, only "float" and "base64" are supported, however, "base64" is not officially documented.
// If not specified will use "float".
EncodingFormat EmbeddingEncodingFormat `json:"encoding_format,omitempty"`
// Dimensions The number of dimensions the resulting output embeddings should have.
// Only supported in text-embedding-3 and later models.
Dimensions int `json:"dimensions,omitempty"`
}
EmbeddingRequestStrings is the input to a create embeddings request with a slice of strings.
func (EmbeddingRequestStrings) Convert ¶ added in v1.0.25
func (r EmbeddingRequestStrings) Convert() EmbeddingRequest
type EmbeddingRequestTokens ¶ added in v1.0.25
type EmbeddingRequestTokens struct {
// Input is a slice of slices of ints ([][]int) for which you want to generate an Embedding vector.
// Each input must not exceed 8192 tokens in length.
// OpenAPI suggests replacing newlines (\n) in your input with a single space, as they
// have observed inferior results when newlines are present.
// E.g.
// "The food was delicious and the waiter..."
Input [][]int `json:"input"`
// ID of the model to use. You can use the List models API to see all of your available models,
// or see our Model overview for descriptions of them.
Model string `json:"model"`
// A unique identifier representing your end-user, which will help to monitor and detect abuse.
User string `json:"user"`
// EmbeddingEncodingFormat is the format of the embeddings data.
// Currently, only "float" and "base64" are supported, however, "base64" is not officially documented.
// If not specified will use "float".
EncodingFormat EmbeddingEncodingFormat `json:"encoding_format,omitempty"`
// Dimensions The number of dimensions the resulting output embeddings should have.
// Only supported in text-embedding-3 and later models.
Dimensions int `json:"dimensions,omitempty"`
}
func (EmbeddingRequestTokens) Convert ¶ added in v1.0.25
func (r EmbeddingRequestTokens) Convert() EmbeddingRequest
type EmbeddingResponse ¶ added in v1.0.25
type EmbeddingResponse struct {
ID string `json:"id"`
Created int `json:"created"`
Object string `json:"object"`
Data []Embedding `json:"data"`
Model string `json:"model"`
Usage Usage `json:"usage"`
HttpHeader
}
EmbeddingResponse is the response from a Create embeddings request.
type EmbeddingResponseBase64 ¶ added in v1.0.25
type EmbeddingResponseBase64 struct {
Object string `json:"object"`
Data []Base64Embedding `json:"data"`
Model string `json:"model"`
Usage Usage `json:"usage"`
HttpHeader
}
EmbeddingResponseBase64 is the response from a Create embeddings request with base64 encoding format.
func (*EmbeddingResponseBase64) ToEmbeddingResponse ¶ added in v1.0.25
func (r *EmbeddingResponseBase64) ToEmbeddingResponse() (EmbeddingResponse, error)
ToEmbeddingResponse converts an embeddingResponseBase64 to an EmbeddingResponse.
type ErrorResponse ¶
type ErrorResponse struct {
Error *APIError `json:"error,omitempty"`
}
type FinishReason ¶
type FinishReason string
const ( FinishReasonStop FinishReason = "stop" FinishReasonLength FinishReason = "length" FinishReasonFunctionCall FinishReason = "function_call" FinishReasonToolCalls FinishReason = "tool_calls" FinishReasonContentFilter FinishReason = "content_filter" FinishReasonNull FinishReason = "null" )
func (FinishReason) MarshalJSON ¶
func (r FinishReason) MarshalJSON() ([]byte, error)
type FunctionCall ¶
type FunctionDefine
deprecated
type FunctionDefine = FunctionDefinition
Deprecated: use FunctionDefinition instead.
type FunctionDefinition ¶
type FunctionDefinition struct {
Name string `json:"name"`
Description string `json:"description,omitempty"`
// Parameters is an object describing the function.
// You can pass json.RawMessage to describe the schema,
// or you can pass in a struct which serializes to the proper JSON schema.
// The jsonschema package is provided for convenience, but you should
// consider another specialized library if you require more complex schemas.
Parameters interface{} `json:"parameters"`
}
type GenerateImagesError ¶ added in v1.0.18
type GenerateImagesRequest ¶ added in v1.0.18
type GenerateImagesRequest struct {
Model string `json:"model"`
Prompt string `json:"prompt"`
Image interface{} `json:"image,omitempty"`
ResponseFormat *string `json:"response_format,omitempty"`
Seed *int64 `json:"seed,omitempty"`
GuidanceScale *float64 `json:"guidance_scale,omitempty"`
Size *string `json:"size,omitempty"`
Watermark *bool `json:"watermark,omitempty"`
OptimizePrompt *bool `json:"optimize_prompt,omitempty"`
SequentialImageGeneration *SequentialImageGeneration `json:"sequential_image_generation,omitempty"`
SequentialImageGenerationOptions *SequentialImageGenerationOptions `json:"sequential_image_generation_options,omitempty"`
}
func (*GenerateImagesRequest) NormalizeImages ¶ added in v1.0.27
func (req *GenerateImagesRequest) NormalizeImages() error
type GenerateImagesUsage ¶ added in v1.0.18
type GetContentGenerationTaskRequest ¶ added in v1.0.18
type GetContentGenerationTaskRequest struct {
ID string `json:"id"`
}
type GetContentGenerationTaskResponse ¶ added in v1.0.18
type GetContentGenerationTaskResponse struct {
ID string `json:"id"`
Model string `json:"model"`
Status string `json:"status"`
Error *ContentGenerationError `json:"error,omitempty"`
Content Content `json:"content"`
Usage Usage `json:"usage"`
CreatedAt int64 `json:"created_at"`
UpdatedAt int64 `json:"updated_at"`
Seed *int64 `json:"seed,omitempty"`
RevisedPrompt *string `json:"revised_prompt,omitempty"`
HttpHeader
}
type HttpHeader ¶
func (*HttpHeader) GetHeader ¶
func (h *HttpHeader) GetHeader() http.Header
func (*HttpHeader) Header ¶
func (h *HttpHeader) Header() http.Header
func (*HttpHeader) SetHeader ¶
func (h *HttpHeader) SetHeader(header http.Header)
type ImageURLDetail ¶
type ImageURLDetail string
const ( ImageURLDetailHigh ImageURLDetail = "high" ImageURLDetailLow ImageURLDetail = "low" ImageURLDetailAuto ImageURLDetail = "auto" )
type ImagesResponse ¶ added in v1.0.18
type ImagesResponse struct {
Model string `json:"model"`
Created int64 `json:"created"`
Data []*Image `json:"data"`
Usage *GenerateImagesUsage `json:"usage,omitempty"`
Error *GenerateImagesError `json:"error,omitempty"`
HttpHeader
}
type ImagesStreamResponse ¶ added in v1.0.27
type ImagesStreamResponse struct {
Type string `json:"type"`
Model string `json:"model"`
Created int64 `json:"created"`
ImageIndex int64 `json:"image_index"`
Url *string `json:"url,omitempty"`
B64Json *string `json:"b64_json,omitempty"`
Size string `json:"size"`
Usage *GenerateImagesUsage `json:"usage,omitempty"`
Error *GenerateImagesError `json:"error,omitempty"`
HttpHeader
}
type ListContentGenerationTaskItem ¶ added in v1.0.18
type ListContentGenerationTaskItem struct {
ID string `json:"id"`
Model string `json:"model"`
Status string `json:"status"`
FailureReason *ContentGenerationError `json:"failure_reason,omitempty"`
Content Content `json:"content"`
Usage Usage `json:"usage"`
CreatedAt int64 `json:"created_at"`
UpdatedAt int64 `json:"updated_at"`
Seed *int64 `json:"seed,omitempty"`
RevisedPrompt *string `json:"revised_prompt,omitempty"`
}
type ListContentGenerationTasksFilter ¶ added in v1.0.18
type ListContentGenerationTasksRequest ¶ added in v1.0.18
type ListContentGenerationTasksRequest struct {
PageNum *int `json:"page_num,omitempty"`
PageSize *int `json:"page_size,omitempty"`
Filter *ListContentGenerationTasksFilter `json:"filter,omitempty"`
}
type ListContentGenerationTasksResponse ¶ added in v1.0.18
type ListContentGenerationTasksResponse struct {
Total int64 `json:"total"`
Items []ListContentGenerationTaskItem `json:"items"`
HttpHeader
}
type LogProb ¶
type LogProb struct {
Token string `json:"token"`
LogProb float64 `json:"logprob"`
Bytes []rune `json:"bytes,omitempty"` // Omitting the field if it is null
// TopLogProbs is a list of the most likely tokens and their log probability, at this token position.
// In rare cases, there may be fewer than the number of requested top_logprobs returned.
TopLogProbs []*TopLogProbs `json:"top_logprobs"`
}
LogProb represents the probability information for a token.
type LogProbs ¶
type LogProbs struct {
// Content is a list of message content tokens with log probability information.
Content []*LogProb `json:"content"`
}
LogProbs is the top-level structure containing the log probability information.
type MultiModalEmbeddingInputType ¶ added in v1.0.25
type MultiModalEmbeddingInputType string
const ( MultiModalEmbeddingInputTypeText MultiModalEmbeddingInputType = "text" MultiModalEmbeddingInputTypeImageURL MultiModalEmbeddingInputType = "image_url" )
type MultiModalEmbeddingRequest ¶ added in v1.0.25
type MultiModalEmbeddingRequest struct {
Input []MultimodalEmbeddingInput `json:"input"`
// ID of the model to use. You can use the List models API to see all of your available models,
// or see our Model overview for descriptions of them.
Model string `json:"model"`
// EmbeddingEncodingFormat is the format of the embeddings data.
// Currently, only "float" and "base64" are supported, however, "base64" is not officially documented.
// If not specified will use "float".
EncodingFormat *EmbeddingEncodingFormat `json:"encoding_format,omitempty"`
// Dimensions Value range: 1024 or 2048.
// Specifies the dimensionality of the output embedding vector.
// This parameter is only supported in doubao-embedding-vision-250615 and later versions.
Dimensions *int `json:"dimensions,omitempty"`
}
MultiModalEmbeddingRequest is the input to a create embeddings request.
type MultiModalEmbeddingResponseBase64 ¶ added in v1.0.25
type MultiModalEmbeddingResponseBase64 struct {
Id string `json:"id"`
Model string `json:"model"`
Created int64 `json:"created"`
Object string `json:"object"`
Data Base64Embedding `json:"data"`
Usage MultimodalEmbeddingUsage `json:"usage"`
HttpHeader
}
MultiModalEmbeddingResponseBase64 is the response from a Create embeddings request with base64 encoding format.
func (*MultiModalEmbeddingResponseBase64) ToMultiModalEmbeddingResponse ¶ added in v1.0.25
func (r *MultiModalEmbeddingResponseBase64) ToMultiModalEmbeddingResponse() (MultimodalEmbeddingResponse, error)
ToMultiModalEmbeddingResponse converts an embeddingResponseBase64 to an MultimodalEmbeddingResponse.
type MultimodalEmbedding ¶ added in v1.0.25
type MultimodalEmbeddingImageURL ¶ added in v1.0.25
type MultimodalEmbeddingImageURL struct {
URL string `json:"url"`
}
type MultimodalEmbeddingInput ¶ added in v1.0.25
type MultimodalEmbeddingInput struct {
Type MultiModalEmbeddingInputType `json:"type"`
Text *string `json:"text,omitempty"`
ImageURL *MultimodalEmbeddingImageURL `json:"image_url,omitempty"`
}
type MultimodalEmbeddingPromptTokensDetail ¶ added in v1.0.25
type MultimodalEmbeddingResponse ¶ added in v1.0.25
type MultimodalEmbeddingResponse struct {
Id string `json:"id"`
Model string `json:"model"`
Created int64 `json:"created"`
Object string `json:"object"`
Data MultimodalEmbedding `json:"data"`
Usage MultimodalEmbeddingUsage `json:"usage"`
HttpHeader
}
type MultimodalEmbeddingUsage ¶ added in v1.0.25
type MultimodalEmbeddingUsage struct {
PromptTokens int `json:"prompt_tokens"`
TotalTokens int `json:"total_tokens"`
PromptTokensDetails MultimodalEmbeddingPromptTokensDetail `json:"prompt_tokens_details"`
}
type PromptTokensDetail ¶
type PromptTokensDetail struct {
CachedTokens int `json:"cached_tokens"`
}
type RawResponse ¶
type RawResponse struct {
io.ReadCloser
HttpHeader
}
type RequestError ¶
RequestError provides information about generic request errors.
func NewRequestError ¶
func NewRequestError(httpStatusCode int, rawErr error, requestID string) *RequestError
func (*RequestError) Error ¶
func (e *RequestError) Error() string
func (*RequestError) Unwrap ¶
func (e *RequestError) Unwrap() error
type ResponseFormat ¶
type ResponseFormat struct {
Type ResponseFormatType `json:"type"`
Schema interface{} `json:"schema,omitempty"`
}
type ResponseFormatType ¶
type ResponseFormatType string
const ( ResponseFormatJsonObject ResponseFormatType = "json_object" ResponseFormatText ResponseFormatType = "text" )
type SequentialImageGeneration ¶ added in v1.0.27
type SequentialImageGeneration string
type SequentialImageGenerationOptions ¶ added in v1.0.27
type SequentialImageGenerationOptions struct {
MaxImages *int `json:"max_images,omitempty"`
}
type StreamOptions ¶
type StreamOptions struct {
// If set, an additional chunk will be streamed before the data: [DONE] message.
// The usage field on this chunk shows the token usage statistics for the entire request,
// and the choices field will always be an empty array.
// All other chunks will also include a usage field, but with a null value.
IncludeUsage bool `json:"include_usage,omitempty"`
// if set, each data chunk will include a `usage` field
// representing the current cumulative token usage for the entire request.
ChunkIncludeUsage bool `json:"chunk_include_usage,omitempty"`
}
type Tool ¶
type Tool struct {
Type ToolType `json:"type"`
Function *FunctionDefinition `json:"function,omitempty"`
}
type ToolCall ¶
type ToolCall struct {
ID string `json:"id"`
Type ToolType `json:"type"`
Function FunctionCall `json:"function"`
Index *int `json:"index,omitempty"`
}
type ToolChoice ¶
type ToolChoice struct {
Type ToolType `json:"type"`
Function ToolChoiceFunction `json:"function,omitempty"`
}
type ToolChoiceFunction ¶
type ToolChoiceFunction struct {
Name string `json:"name"`
}
type TopLogProbs ¶
type TruncationStrategy ¶
type TruncationStrategy struct {
Type TruncationStrategyType `json:"type"`
LastHistoryTokens *int `json:"last_history_tokens,omitempty"`
RollingTokens *bool `json:"rolling_tokens,omitempty"`
}
type TruncationStrategyType ¶
type TruncationStrategyType string
const ( TruncationStrategyTypeLastHistoryTokens TruncationStrategyType = "last_history_tokens" TruncationStrategyTypeRollingTokens TruncationStrategyType = "rolling_tokens" )
type Usage ¶
type Usage struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
PromptTokensDetails PromptTokensDetail `json:"prompt_tokens_details"`
CompletionTokensDetails CompletionTokensDetails `json:"completion_tokens_details"`
}