model

package
v1.2.9 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 23, 2026 License: Apache-2.0 Imports: 9 Imported by: 56

Documentation

Index

Constants

View Source
const (
	ChatMessageRoleSystem    = "system"
	ChatMessageRoleUser      = "user"
	ChatMessageRoleAssistant = "assistant"
	ChatMessageRoleTool      = "tool"
)
View Source
const (
	ToolChoiceStringTypeAuto     = "auto"
	ToolChoiceStringTypeNone     = "none"
	ToolChoiceStringTypeRequired = "required"
)
View Source
const (
	ClientRequestHeader = "X-Client-Request-Id"
	RetryAfterHeader    = "Retry-After"

	ClientSessionTokenHeader = "X-Session-Token"
	ClientEncryptInfoHeader  = "X-Encrypt-Info"
	ClientIsEncryptedHeader  = "x-is-encrypted"

	DefaultMandatoryRefreshTimeout = 10 * 60          // 10 min
	DefaultAdvisoryRefreshTimeout  = 30 * 60          // 30 min
	DefaultStsTimeout              = 7 * 24 * 60 * 60 // 7 days

	InitialRetryDelay = 0.5
	MaxRetryDelay     = 8.0

	ErrorRetryBaseDelay = 500 * time.Millisecond
	ErrorRetryMaxDelay  = 8 * time.Second
)
View Source
const (
	StatusSucceeded = "succeeded"
	StatusCancelled = "cancelled"
	StatusFailed    = "failed"
	StatusRunning   = "running"
	StatusQueued    = "queued"
)
View Source
const (
	GenerateImagesResponseFormatBase64 = "b64_json"

	GenerateImagesResponseFormatURL = "url"

	GenerateImagesSizeAdaptive = "adaptive"

	OptimizePromptThinkingAuto = "auto"

	OptimizePromptThinkingEnabled = "enabled"

	OptimizePromptThinkingDisabled = "disabled"

	OptimizePromptModeStandard = "standard"

	OptimizePromptModeFast = "fast"

	SequentialImageGenerationAuto = "auto"

	SequentialImageGenerationDisabled = "disabled"

	ImageGenerationStreamEventPartialSucceeded = "image_generation.partial_succeeded"

	ImageGenerationStreamEventPartialFailed = "image_generation.partial_failed"

	ImageGenerationStreamEventCompleted = "image_generation.completed"
)
View Source
const CipherVersionAICCv01 = "AICCv0.1"

Variables

View Source
var (
	ErrTooManyEmptyStreamMessages       = errors.New("stream has sent too many empty messages")
	ErrChatCompletionInvalidModel       = errors.New("this model is not supported with this method, please use CreateCompletion client method instead") //nolint:lll
	ErrChatCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateChatCompletionStream")              //nolint:lll
	ErrContentFieldsMisused             = errors.New("can't use both Content and MultiContent properties simultaneously")
	ErrBodyWithoutEndpoint              = errors.New("can't fetch endpoint sts token without endpoint")
	ErrBodyWithoutBot                   = errors.New("can't fetch bot sts token without bot id")
	ErrAKSKNotSupported                 = errors.New("ak&sk authentication is currently not supported for this method, please use api key instead")
	ErrBodyWithoutProjectName           = errors.New("project name is required for preset endpoint")
)

Functions

This section is empty.

Types

type APIError

type APIError struct {
	Code           string  `json:"code,omitempty"`
	Message        string  `json:"message"`
	Param          *string `json:"param,omitempty"`
	Type           string  `json:"type"`
	HTTPStatusCode int     `json:"-"`
	RequestId      string  `json:"request_id"`
}

func (*APIError) Error

func (e *APIError) Error() string

type Base64Embedding

type Base64Embedding struct {
	Object    string       `json:"object"`
	Embedding base64String `json:"embedding"`
	Index     int          `json:"index"`
}

Base64Embedding is a container for base64 encoded embeddings.

type BotActionDetail added in v1.1.18

type BotActionDetail struct {
	Name        string           `json:"name"`                   // 插件分类名称,如 content_plugin(内容插件)等
	Count       int              `json:"count"`                  // 本次请求某插件分类中插件调用次数
	ToolDetails []*BotToolDetail `json:"tool_details,omitempty"` // 某插件分类中插件调用详细信息
}

type BotActionUsage added in v1.0.146

type BotActionUsage struct {
	Name             string  `json:"name"`
	PromptTokens     string  `json:"prompt_tokens,omitempty"`
	CompletionTokens int     `json:"completion_tokens,omitempty"`
	TotalTokens      int     `json:"total_tokens,omitempty"`
	SearchCount      int     `json:"search_count,omitempty"`
	ActionName       *string `json:"action_name,omitempty"`
	Count            *int    `json:"count,omitempty"`
}

type BotChatCompletionRequest added in v1.0.146

type BotChatCompletionRequest struct {
	BotId             string                   `json:"bot_id,omitempty"`
	Model             string                   `json:"model"`
	Messages          []*ChatCompletionMessage `json:"messages"`
	MaxTokens         int                      `json:"max_tokens,omitempty"`
	Temperature       float32                  `json:"temperature,omitempty"`
	TopP              float32                  `json:"top_p,omitempty"`
	Stream            bool                     `json:"stream,omitempty"`
	Stop              []string                 `json:"stop,omitempty"`
	FrequencyPenalty  float32                  `json:"frequency_penalty,omitempty"`
	LogitBias         map[string]int           `json:"logit_bias,omitempty"`
	LogProbs          bool                     `json:"logprobs,omitempty"`
	TopLogProbs       int                      `json:"top_logprobs,omitempty"`
	User              string                   `json:"user,omitempty"`
	FunctionCall      interface{}              `json:"function_call,omitempty"`
	Tools             []*Tool                  `json:"tools,omitempty"`
	ToolChoice        interface{}              `json:"tool_choice,omitempty"`
	StreamOptions     *StreamOptions           `json:"stream_options,omitempty"`
	PresencePenalty   float32                  `json:"presence_penalty,omitempty"`
	RepetitionPenalty float32                  `json:"repetition_penalty,omitempty"`
	N                 int                      `json:"n,omitempty"`
	ResponseFormat    *ResponseFormat          `json:"response_format,omitempty"`
	Thinking          *Thinking                `json:"thinking,omitempty"`
	Metadata          map[string]interface{}   `json:"metadata,omitempty"`
}

type BotChatCompletionResponse added in v1.0.146

type BotChatCompletionResponse struct {
	ChatCompletionResponse
	Metadata   map[string]interface{}    `json:"metadata,omitempty"`
	BotUsage   *BotUsage                 `json:"bot_usage,omitempty"`
	References []*BotChatResultReference `json:"references,omitempty"`
}

type BotChatCompletionStreamResponse added in v1.0.146

type BotChatCompletionStreamResponse struct {
	ChatCompletionStreamResponse
	Metadata   map[string]interface{}    `json:"metadata,omitempty"`
	BotUsage   *BotUsage                 `json:"bot_usage,omitempty"`
	References []*BotChatResultReference `json:"references,omitempty"`
}

type BotChatResultReference added in v1.0.146

type BotChatResultReference struct {
	Url                string                 `json:"url,omitempty"`
	LogoUrl            string                 `json:"logo_url,omitempty"`
	MobileUrl          string                 `json:"mobile_url,omitempty"`
	SiteName           string                 `json:"site_name,omitempty"`
	Title              string                 `json:"title,omitempty"`
	CoverImage         *BotCoverImage         `json:"cover_image,omitempty"`
	Summary            string                 `json:"summary,omitempty"`
	PublishTime        string                 `json:"publish_time,omitempty"`
	CollectionName     string                 `json:"collection_name,omitempty"`
	Project            string                 `json:"project,omitempty"`
	DocId              string                 `json:"doc_id,omitempty"`
	DocName            string                 `json:"doc_name,omitempty"`
	DocType            string                 `json:"doc_type,omitempty"`
	DocTitle           string                 `json:"doc_title,omitempty"`
	ChunkId            string                 `json:"chunk_id,omitempty"`
	ChunkTitle         string                 `json:"chunk_title,omitempty"`
	PageNums           string                 `json:"page_nums,omitempty"`
	OriginTextTokenLen int                    `json:"origin_text_token_len,omitempty"`
	FileName           string                 `json:"file_name,omitempty"`
	Extra              map[string]interface{} `json:"extra,omitempty"`
}

type BotCoverImage added in v1.0.146

type BotCoverImage struct {
	Url    string `json:"url,omitempty"`
	Width  int    `json:"width,omitempty"`
	Height int    `json:"height,omitempty"`
}

type BotModelUsage added in v1.0.146

type BotModelUsage struct {
	Usage
	Name string `json:"name"`
}

type BotToolDetail added in v1.1.18

type BotToolDetail struct {
	Name        string      `json:"name"`         // 具体调用的工具名称
	Input       interface{} `json:"input"`        // 插件输入参数,调用插件的数据结构
	Output      interface{} `json:"output"`       // 插件输出结果,调用插件返回的数据结构
	CreatedAt   int         `json:"created_at"`   // 插件调用开始时间
	CompletedAt int         `json:"completed_at"` // 插件调用结束时间
}

type BotUsage added in v1.0.146

type BotUsage struct {
	ModelUsage  []*BotModelUsage  `json:"model_usage,omitempty"`
	ActionUsage []*BotActionUsage `json:"action_usage,omitempty"`

	ActionDetails []*BotActionDetail `json:"action_details,omitempty"` // 本次请求插件调用详情
}

type CertificateResponse added in v1.2.9

type CertificateResponse struct {
	Error       map[string]string `json:"error,omitempty"`
	Certificate string            `json:"Certificate"`
}

type ChatCompletionChoice

type ChatCompletionChoice struct {
	Index   int                   `json:"index"`
	Message ChatCompletionMessage `json:"message"`
	// FinishReason
	// stop: API returned complete message,
	// or a message terminated by one of the stop sequences provided via the stop parameter
	// length: Incomplete model output due to max_tokens parameter or token limit
	// function_call: The model decided to call a function
	// content_filter: Omitted content due to a flag from our content filters
	// null: API response still in progress or incomplete
	FinishReason FinishReason `json:"finish_reason"`
	// ModerationHitType
	// The type of content moderation strategy hit.
	// Only after selecting a moderation strategy for the endpoint that supports returning moderation hit types,
	// API will return the corresponding values.
	ModerationHitType *ChatCompletionResponseChoicesElemModerationHitType `json:"moderation_hit_type,omitempty" yaml:"moderation_hit_type,omitempty" mapstructure:"moderation_hit_type,omitempty"`
	LogProbs          *LogProbs                                           `json:"logprobs,omitempty"`
}

type ChatCompletionMessage

type ChatCompletionMessage struct {
	Role             string                        `json:"role"`
	Content          *ChatCompletionMessageContent `json:"content"`
	ReasoningContent *string                       `json:"reasoning_content,omitempty"`
	Name             *string                       `json:"name"`
	FunctionCall     *FunctionCall                 `json:"function_call,omitempty"`
	ToolCalls        []*ToolCall                   `json:"tool_calls,omitempty"`
	ToolCallID       string                        `json:"tool_call_id,omitempty"`
}

type ChatCompletionMessageContent

type ChatCompletionMessageContent struct {
	StringValue *string
	ListValue   []*ChatCompletionMessageContentPart
}

func (ChatCompletionMessageContent) MarshalJSON

func (j ChatCompletionMessageContent) MarshalJSON() ([]byte, error)

MarshalJSON implements json.Marshaler.

func (*ChatCompletionMessageContent) UnmarshalJSON

func (j *ChatCompletionMessageContent) UnmarshalJSON(b []byte) error

type ChatCompletionMessageContentPart

type ChatCompletionMessageContentPart struct {
	Type     ChatCompletionMessageContentPartType `json:"type,omitempty"`
	Text     string                               `json:"text,omitempty"`
	ImageURL *ChatMessageImageURL                 `json:"image_url,omitempty"`
	VideoURL *ChatMessageVideoURL                 `json:"video_url,omitempty"`
}

type ChatCompletionMessageContentPartType

type ChatCompletionMessageContentPartType string
const (
	ChatCompletionMessageContentPartTypeText     ChatCompletionMessageContentPartType = "text"
	ChatCompletionMessageContentPartTypeImageURL ChatCompletionMessageContentPartType = "image_url"
	ChatCompletionMessageContentPartTypeVideoURL ChatCompletionMessageContentPartType = "video_url"
)

type ChatCompletionRequest deprecated

type ChatCompletionRequest struct {
	Model             string                   `json:"model"`
	Messages          []*ChatCompletionMessage `json:"messages"`
	MaxTokens         int                      `json:"max_tokens,omitempty"`
	Temperature       float32                  `json:"temperature,omitempty"`
	TopP              float32                  `json:"top_p,omitempty"`
	Stream            bool                     `json:"stream,omitempty"`
	Stop              []string                 `json:"stop,omitempty"`
	FrequencyPenalty  float32                  `json:"frequency_penalty,omitempty"`
	LogitBias         map[string]int           `json:"logit_bias,omitempty"`
	LogProbs          bool                     `json:"logprobs,omitempty"`
	TopLogProbs       int                      `json:"top_logprobs,omitempty"`
	User              string                   `json:"user,omitempty"`
	FunctionCall      interface{}              `json:"function_call,omitempty"`
	Tools             []*Tool                  `json:"tools,omitempty"`
	ToolChoice        interface{}              `json:"tool_choice,omitempty"`
	StreamOptions     *StreamOptions           `json:"stream_options,omitempty"`
	PresencePenalty   float32                  `json:"presence_penalty,omitempty"`
	RepetitionPenalty float32                  `json:"repetition_penalty,omitempty"`
	N                 int                      `json:"n,omitempty"`
	ResponseFormat    *ResponseFormat          `json:"response_format,omitempty"`
	ServiceTier       *string                  `json:"service_tier,omitempty"`
}

Deprecated: use `CreateChatCompletionRequest` instead. ChatCompletionRequest - When making a request using this struct, only non-zero fields take effect. This means that if your field value is 0, an empty string (""), false, or other zero values, it will not be sent to the server. The server will handle these fields according to their default values. If you need to specify a zero value, please use CreateChatCompletionRequest.

func (ChatCompletionRequest) GetModel added in v1.0.159

func (r ChatCompletionRequest) GetModel() string

func (ChatCompletionRequest) IsStream added in v1.0.159

func (r ChatCompletionRequest) IsStream() bool

func (ChatCompletionRequest) MarshalJSON added in v1.0.159

func (r ChatCompletionRequest) MarshalJSON() ([]byte, error)

func (ChatCompletionRequest) WithStream added in v1.0.159

func (r ChatCompletionRequest) WithStream(stream bool) ChatRequest

type ChatCompletionResponse

type ChatCompletionResponse struct {
	ID      string `json:"id"`
	Object  string `json:"object"`
	Created int64  `json:"created"`
	Model   string `json:"model"`
	// mark the request is scale-tier or default, only exists for scale-tier
	ServiceTier string                  `json:"service_tier,omitempty"`
	Choices     []*ChatCompletionChoice `json:"choices"`
	Usage       Usage                   `json:"usage"`

	HttpHeader
}

ChatCompletionResponse represents a response structure for chat completion API.

type ChatCompletionResponseChoicesElemModerationHitType added in v1.0.177

type ChatCompletionResponseChoicesElemModerationHitType string
const (
	ChatCompletionResponseChoicesElemModerationHitTypeViolence        ChatCompletionResponseChoicesElemModerationHitType = "violence"
	ChatCompletionResponseChoicesElemModerationHitTypeSevereViolation ChatCompletionResponseChoicesElemModerationHitType = "severe_violation"
)

type ChatCompletionStreamChoice

type ChatCompletionStreamChoice struct {
	Index             int                                                 `json:"index"`
	Delta             ChatCompletionStreamChoiceDelta                     `json:"delta"`
	LogProbs          *LogProbs                                           `json:"logprobs,omitempty"`
	FinishReason      FinishReason                                        `json:"finish_reason"`
	ModerationHitType *ChatCompletionResponseChoicesElemModerationHitType `json:"moderation_hit_type,omitempty" yaml:"moderation_hit_type,omitempty" mapstructure:"moderation_hit_type,omitempty"`
}

type ChatCompletionStreamChoiceDelta

type ChatCompletionStreamChoiceDelta struct {
	Content          string        `json:"content,omitempty"`
	Role             string        `json:"role,omitempty"`
	ReasoningContent *string       `json:"reasoning_content,omitempty"`
	FunctionCall     *FunctionCall `json:"function_call,omitempty"`
	ToolCalls        []*ToolCall   `json:"tool_calls,omitempty"`
}

type ChatCompletionStreamResponse

type ChatCompletionStreamResponse struct {
	ID      string `json:"id"`
	Object  string `json:"object"`
	Created int64  `json:"created"`
	Model   string `json:"model"`
	// mark the request is scale-tier or default, only exists for scale-tier
	ServiceTier string                        `json:"service_tier,omitempty"`
	Choices     []*ChatCompletionStreamChoice `json:"choices"`
	// An optional field that will only be present when you set stream_options: {"include_usage": true} in your request.
	// When present, it contains a null value except for the last chunk which contains the token usage statistics
	// for the entire request.
	Usage *Usage `json:"usage,omitempty"`
}

type ChatMessageImageURL

type ChatMessageImageURL struct {
	URL    string         `json:"url,omitempty"`
	Detail ImageURLDetail `json:"detail,omitempty"`
}

type ChatMessageVideoURL added in v1.1.12

type ChatMessageVideoURL struct {
	URL string   `json:"url"`
	FPS *float64 `json:"fps,omitempty"`
}

type ChatRequest added in v1.0.159

type ChatRequest interface {
	json.Marshaler
	WithStream(stream bool) ChatRequest
	IsStream() bool
	GetModel() string
}

type CompletionTokensDetails added in v1.0.180

type CompletionTokensDetails struct {
	ReasoningTokens   int  `json:"reasoning_tokens"`
	ProvisionedTokens *int `json:"provisioned_tokens,omitempty"`
}

type Content added in v1.0.177

type Content struct {
	VideoURL     string `json:"video_url"`
	LastFrameURL string `json:"last_frame_url"`
	FileURL      string `json:"file_url"`
}

type ContentGenerationContentItemType added in v1.0.177

type ContentGenerationContentItemType string
const (
	ContentGenerationContentItemTypeText      ContentGenerationContentItemType = "text"
	ContentGenerationContentItemTypeImage     ContentGenerationContentItemType = "image_url"
	ContentGenerationContentItemTypeDraftTask ContentGenerationContentItemType = "draft_task"
)

type ContentGenerationError added in v1.0.179

type ContentGenerationError struct {
	Code    string `json:"code"`
	Message string `json:"message"`
}

type ContentGenerationUsage added in v1.0.177

type ContentGenerationUsage struct {
	CompletionTokens int `json:"completion_tokens"`
}

type ContextChatCompletionRequest added in v1.0.173

type ContextChatCompletionRequest struct {
	ContextID        string                   `json:"context_id"`
	Mode             ContextMode              `json:"mode"`
	Model            string                   `json:"model"`
	Messages         []*ChatCompletionMessage `json:"messages"`
	MaxTokens        int                      `json:"max_tokens,omitempty"`
	Temperature      float32                  `json:"temperature,omitempty"`
	TopP             float32                  `json:"top_p,omitempty"`
	Stream           bool                     `json:"stream,omitempty"`
	Stop             []string                 `json:"stop,omitempty"`
	FrequencyPenalty float32                  `json:"frequency_penalty,omitempty"`
	LogitBias        map[string]int           `json:"logit_bias,omitempty"`
	LogProbs         bool                     `json:"logprobs,omitempty"`
	TopLogProbs      int                      `json:"top_logprobs,omitempty"`
	User             string                   `json:"user,omitempty"`
	FunctionCall     interface{}              `json:"function_call,omitempty"`
	Tools            []*Tool                  `json:"tools,omitempty"`
	ToolChoice       interface{}              `json:"tool_choice,omitempty"`
	StreamOptions    *StreamOptions           `json:"stream_options,omitempty"`
	Metadata         map[string]interface{}   `json:"metadata,omitempty"`
}

type ContextMode added in v1.0.173

type ContextMode string
const (
	ContextModeSession      ContextMode = "session"
	ContextModeCommonPrefix ContextMode = "common_prefix"
)

type CreateChatCompletionRequest added in v1.0.159

type CreateChatCompletionRequest struct {
	Model               string                   `json:"model"`
	Messages            []*ChatCompletionMessage `json:"messages"`
	MaxTokens           *int                     `json:"max_tokens,omitempty"`
	Temperature         *float32                 `json:"temperature,omitempty"`
	TopP                *float32                 `json:"top_p,omitempty"`
	Stream              *bool                    `json:"stream,omitempty"`
	Stop                []string                 `json:"stop,omitempty"`
	FrequencyPenalty    *float32                 `json:"frequency_penalty,omitempty"`
	LogitBias           map[string]int           `json:"logit_bias,omitempty"`
	LogProbs            *bool                    `json:"logprobs,omitempty"`
	TopLogProbs         *int                     `json:"top_logprobs,omitempty"`
	User                *string                  `json:"user,omitempty"`
	FunctionCall        interface{}              `json:"function_call,omitempty"`
	Tools               []*Tool                  `json:"tools,omitempty"`
	ToolChoice          interface{}              `json:"tool_choice,omitempty"`
	StreamOptions       *StreamOptions           `json:"stream_options,omitempty"`
	PresencePenalty     *float32                 `json:"presence_penalty,omitempty"`
	RepetitionPenalty   *float32                 `json:"repetition_penalty,omitempty"`
	N                   *int                     `json:"n,omitempty"`
	ResponseFormat      *ResponseFormat          `json:"response_format,omitempty"`
	ParallelToolCalls   *bool                    `json:"parallel_tool_calls,omitempty"`
	ServiceTier         *string                  `json:"service_tier,omitempty"`
	Thinking            *Thinking                `json:"thinking,omitempty"`
	MaxCompletionTokens *int                     `json:"max_completion_tokens,omitempty"`
	ReasoningEffort     *ReasoningEffort         `json:"reasoning_effort,omitempty"`
}

CreateChatCompletionRequest - When making a request using this struct, if your field value is 0, an empty string (""), false, or other zero values, it will be sent to the server. The server will handle these fields according to the specified values.

func (CreateChatCompletionRequest) GetModel added in v1.0.159

func (r CreateChatCompletionRequest) GetModel() string

func (CreateChatCompletionRequest) IsStream added in v1.0.159

func (r CreateChatCompletionRequest) IsStream() bool

func (CreateChatCompletionRequest) MarshalJSON added in v1.0.159

func (r CreateChatCompletionRequest) MarshalJSON() ([]byte, error)

func (CreateChatCompletionRequest) WithStream added in v1.0.159

func (r CreateChatCompletionRequest) WithStream(stream bool) ChatRequest

type CreateContentGenerationContentItem added in v1.0.177

type CreateContentGenerationContentItem struct {
	Type      ContentGenerationContentItemType `json:"type"`
	Text      *string                          `json:"text,omitempty"`
	ImageURL  *ImageURL                        `json:"image_url,omitempty"`
	Role      *string                          `json:"role,omitempty"`
	DraftTask *DraftTask                       `json:"draft_task,omitempty"`
}

type CreateContentGenerationTaskRequest added in v1.0.177

type CreateContentGenerationTaskRequest struct {
	Model                 string                                `json:"model"`
	Content               []*CreateContentGenerationContentItem `json:"content"`
	CallbackUrl           *string                               `json:"callback_url,omitempty"`
	ReturnLastFrame       *bool                                 `json:"return_last_frame,omitempty"`
	ServiceTier           *string                               `json:"service_tier,omitempty"`
	ExecutionExpiresAfter *int64                                `json:"execution_expires_after,omitempty"`
	GenerateAudio         *bool                                 `json:"generate_audio,omitempty"`
	Draft                 *bool                                 `json:"draft,omitempty"`
	CameraFixed           *bool                                 `json:"camera_fixed,omitempty"`
	Watermark             *bool                                 `json:"watermark,omitempty"`
	Seed                  *int64                                `json:"seed,omitempty"`
	Resolution            *string                               `json:"resolution,omitempty"`
	Ratio                 *string                               `json:"ratio,omitempty"`
	Duration              *int64                                `json:"duration,omitempty"`
	Frames                *int64                                `json:"frames,omitempty"`
	ExtraBody             `json:"-"`
}

func (CreateContentGenerationTaskRequest) MarshalJSON added in v1.2.6

func (r CreateContentGenerationTaskRequest) MarshalJSON() ([]byte, error)

type CreateContentGenerationTaskResponse added in v1.0.177

type CreateContentGenerationTaskResponse struct {
	ID string `json:"id"`

	HttpHeader
}

type CreateContextRequest added in v1.0.173

type CreateContextRequest struct {
	Model              string                   `json:"model"`
	Mode               ContextMode              `json:"mode"`
	Messages           []*ChatCompletionMessage `json:"messages"`
	TTL                *int                     `json:"ttl,omitempty"`
	TruncationStrategy *TruncationStrategy      `json:"truncation_strategy,omitempty"`
}

type CreateContextResponse added in v1.0.173

type CreateContextResponse struct {
	ID                 string              `json:"id"`
	Mode               ContextMode         `json:"mode"`
	Model              string              `json:"model"`
	TTL                *int                `json:"ttl,omitempty"`
	TruncationStrategy *TruncationStrategy `json:"truncation_strategy,omitempty"`

	Usage Usage `json:"usage"`

	HttpHeader
}

type DeleteContentGenerationTaskRequest added in v1.0.177

type DeleteContentGenerationTaskRequest struct {
	ID string `json:"id"`
}

type DraftTask added in v1.2.6

type DraftTask struct {
	ID string `json:"id"`
}

type Embedding

type Embedding struct {
	Object    string    `json:"object"`
	Embedding []float32 `json:"embedding"`
	Index     int       `json:"index"`
}

Embedding is a special format of data representation that can be easily utilized by machine learning models and algorithms. The embedding is an information dense representation of the semantic meaning of a piece of text. Each embedding is a vector of floating point numbers, such that the distance between two embeddings in the vector space is correlated with semantic similarity between two inputs in the original format. For example, if two texts are similar, then their vector representations should also be similar.

type EmbeddingEncodingFormat

type EmbeddingEncodingFormat string

EmbeddingEncodingFormat is the format of the embeddings data. Currently, only "float" and "base64" are supported, however, "base64" is not officially documented. If not specified will use "float".

const (
	EmbeddingEncodingFormatFloat  EmbeddingEncodingFormat = "float"
	EmbeddingEncodingFormatBase64 EmbeddingEncodingFormat = "base64"
)

type EmbeddingRequest

type EmbeddingRequest struct {
	Input          interface{}             `json:"input"`
	Model          string                  `json:"model"`
	User           string                  `json:"user"`
	EncodingFormat EmbeddingEncodingFormat `json:"encoding_format,omitempty"`
	// Dimensions The number of dimensions the resulting output embeddings should have.
	// Only supported in text-embedding-3 and later models.
	Dimensions int `json:"dimensions,omitempty"`
}

func (EmbeddingRequest) Convert

func (r EmbeddingRequest) Convert() EmbeddingRequest

type EmbeddingRequestConverter

type EmbeddingRequestConverter interface {
	// Needs to be of type EmbeddingRequestStrings or EmbeddingRequestTokens
	Convert() EmbeddingRequest
}

type EmbeddingRequestStrings

type EmbeddingRequestStrings struct {
	// Input is a slice of strings for which you want to generate an Embedding vector.
	// Each input must not exceed 8192 tokens in length.
	// OpenAPI suggests replacing newlines (\n) in your input with a single space, as they
	// have observed inferior results when newlines are present.
	// E.g.
	//	"The food was delicious and the waiter..."
	Input []string `json:"input"`
	// ID of the model to use. You can use the List models API to see all of your available models,
	// or see our Model overview for descriptions of them.
	Model string `json:"model"`
	// A unique identifier representing your end-user, which will help to monitor and detect abuse.
	User string `json:"user"`
	// EmbeddingEncodingFormat is the format of the embeddings data.
	// Currently, only "float" and "base64" are supported, however, "base64" is not officially documented.
	// If not specified will use "float".
	EncodingFormat EmbeddingEncodingFormat `json:"encoding_format,omitempty"`
	// Dimensions The number of dimensions the resulting output embeddings should have.
	// Only supported in text-embedding-3 and later models.
	Dimensions int `json:"dimensions,omitempty"`
}

EmbeddingRequestStrings is the input to a create embeddings request with a slice of strings.

func (EmbeddingRequestStrings) Convert

type EmbeddingRequestTokens

type EmbeddingRequestTokens struct {
	// Input is a slice of slices of ints ([][]int) for which you want to generate an Embedding vector.
	// Each input must not exceed 8192 tokens in length.
	// OpenAPI suggests replacing newlines (\n) in your input with a single space, as they
	// have observed inferior results when newlines are present.
	// E.g.
	//	"The food was delicious and the waiter..."
	Input [][]int `json:"input"`
	// ID of the model to use. You can use the List models API to see all of your available models,
	// or see our Model overview for descriptions of them.
	Model string `json:"model"`
	// A unique identifier representing your end-user, which will help to monitor and detect abuse.
	User string `json:"user"`
	// EmbeddingEncodingFormat is the format of the embeddings data.
	// Currently, only "float" and "base64" are supported, however, "base64" is not officially documented.
	// If not specified will use "float".
	EncodingFormat EmbeddingEncodingFormat `json:"encoding_format,omitempty"`
	// Dimensions The number of dimensions the resulting output embeddings should have.
	// Only supported in text-embedding-3 and later models.
	Dimensions int `json:"dimensions,omitempty"`
}

func (EmbeddingRequestTokens) Convert

type EmbeddingResponse

type EmbeddingResponse struct {
	ID      string      `json:"id"`
	Created int         `json:"created"`
	Object  string      `json:"object"`
	Data    []Embedding `json:"data"`
	Model   string      `json:"model"`
	Usage   Usage       `json:"usage"`

	HttpHeader
}

EmbeddingResponse is the response from a Create embeddings request.

type EmbeddingResponseBase64

type EmbeddingResponseBase64 struct {
	Object string            `json:"object"`
	Data   []Base64Embedding `json:"data"`
	Model  string            `json:"model"`
	Usage  Usage             `json:"usage"`

	HttpHeader
}

EmbeddingResponseBase64 is the response from a Create embeddings request with base64 encoding format.

func (*EmbeddingResponseBase64) ToEmbeddingResponse

func (r *EmbeddingResponseBase64) ToEmbeddingResponse() (EmbeddingResponse, error)

ToEmbeddingResponse converts an embeddingResponseBase64 to an EmbeddingResponse.

type EncryptInfo added in v1.2.9

type EncryptInfo struct {
	Version    string `thrift:"Version,1,optional" header:"Version" json:"Version,omitempty"`
	RingID     string `thrift:"RingID,2,optional" header:"RingID" json:"RingID,omitempty"`
	KeyID      string `thrift:"KeyID,3,optional" header:"KeyID" json:"KeyID,omitempty"`
	ExpireTime int64  `thrift:"ExpireTime,4,optional" header:"ExpireTime" json:"ExpireTime,omitempty"`
}

type ErrorResponse

type ErrorResponse struct {
	Error *APIError `json:"error,omitempty"`
}

type ExtraBody added in v1.2.6

type ExtraBody map[string]interface{}

type FinishReason

type FinishReason string
const (
	FinishReasonStop          FinishReason = "stop"
	FinishReasonLength        FinishReason = "length"
	FinishReasonFunctionCall  FinishReason = "function_call"
	FinishReasonToolCalls     FinishReason = "tool_calls"
	FinishReasonContentFilter FinishReason = "content_filter"
	FinishReasonNull          FinishReason = "null"
)

func (FinishReason) MarshalJSON

func (r FinishReason) MarshalJSON() ([]byte, error)

type FunctionCall

type FunctionCall struct {
	Name      string `json:"name,omitempty"`
	Arguments string `json:"arguments,omitempty"`
}

type FunctionDefine deprecated

type FunctionDefine = FunctionDefinition

Deprecated: use FunctionDefinition instead.

type FunctionDefinition

type FunctionDefinition struct {
	Name        string `json:"name"`
	Description string `json:"description,omitempty"`
	// Parameters is an object describing the function.
	// You can pass json.RawMessage to describe the schema,
	// or you can pass in a struct which serializes to the proper JSON schema.
	// The jsonschema package is provided for convenience, but you should
	// consider another specialized library if you require more complex schemas.
	Parameters interface{} `json:"parameters"`
}

type GenerateImagesError added in v1.1.8

type GenerateImagesError struct {
	Code    string `json:"code"`
	Message string `json:"message"`
}

type GenerateImagesRequest added in v1.1.8

type GenerateImagesRequest struct {
	Model                            string                            `json:"model"`
	Prompt                           string                            `json:"prompt"`
	Image                            interface{}                       `json:"image,omitempty"`
	ResponseFormat                   *string                           `json:"response_format,omitempty"`
	Seed                             *int64                            `json:"seed,omitempty"`
	GuidanceScale                    *float64                          `json:"guidance_scale,omitempty"`
	Size                             *string                           `json:"size,omitempty"`
	Watermark                        *bool                             `json:"watermark,omitempty"`
	OptimizePrompt                   *bool                             `json:"optimize_prompt,omitempty"`
	OptimizePromptOptions            *OptimizePromptOptions            `json:"optimize_prompt_options,omitempty"`
	SequentialImageGeneration        *SequentialImageGeneration        `json:"sequential_image_generation,omitempty"`
	SequentialImageGenerationOptions *SequentialImageGenerationOptions `json:"sequential_image_generation_options,omitempty"`
}

func (*GenerateImagesRequest) NormalizeImages added in v1.1.33

func (req *GenerateImagesRequest) NormalizeImages() error

type GenerateImagesUsage added in v1.1.8

type GenerateImagesUsage struct {
	GeneratedImages int64 `json:"generated_images"`
	OutputTokens    int64 `json:"output_tokens"`
	TotalTokens     int64 `json:"total_tokens"`
}

type GetContentGenerationTaskRequest added in v1.0.177

type GetContentGenerationTaskRequest struct {
	ID string `json:"id"`
}

type GetContentGenerationTaskResponse added in v1.0.177

type GetContentGenerationTaskResponse struct {
	ID                    string                  `json:"id"`
	Model                 string                  `json:"model"`
	Status                string                  `json:"status"`
	Error                 *ContentGenerationError `json:"error,omitempty"`
	Content               Content                 `json:"content"`
	Usage                 Usage                   `json:"usage"`
	SubdivisionLevel      *string                 `json:"subdivisionlevel,omitempty"`
	FileFormat            *string                 `json:"fileformat,omitempty"`
	Frames                *int64                  `json:"frames"`
	FramesPerSecond       *int64                  `json:"framespersecond"`
	Resolution            *string                 `json:"resolution,omitempty"`
	Ratio                 *string                 `json:"ratio,omitempty"`
	Duration              *int64                  `json:"duration,omitempty"`
	CreatedAt             int64                   `json:"created_at"`
	UpdatedAt             int64                   `json:"updated_at"`
	Seed                  *int64                  `json:"seed,omitempty"`
	RevisedPrompt         *string                 `json:"revised_prompt,omitempty"`
	ServiceTier           *string                 `json:"service_tier,omitempty"`
	ExecutionExpiresAfter *int64                  `json:"execution_expires_after,omitempty"`
	GenerateAudio         *bool                   `json:"generate_audio,omitempty"`
	Draft                 *bool                   `json:"draft,omitempty"`
	DraftTaskID           *string                 `json:"draft_task_id,omitempty"`

	HttpHeader
}

type HttpHeader

type HttpHeader http.Header

func (*HttpHeader) GetHeader added in v1.0.172

func (h *HttpHeader) GetHeader() http.Header

func (*HttpHeader) Header

func (h *HttpHeader) Header() http.Header

func (*HttpHeader) SetHeader

func (h *HttpHeader) SetHeader(header http.Header)

type Image added in v1.1.8

type Image struct {
	Url     *string `json:"url,omitempty"`
	B64Json *string `json:"b64_json,omitempty"`
	Size    string  `json:"size"`
}

type ImageURL added in v1.0.177

type ImageURL struct {
	URL string `json:"url"`
}

type ImageURLDetail

type ImageURLDetail string
const (
	ImageURLDetailHigh ImageURLDetail = "high"
	ImageURLDetailLow  ImageURLDetail = "low"
	ImageURLDetailAuto ImageURLDetail = "auto"
)

type ImagesResponse added in v1.1.8

type ImagesResponse struct {
	Model   string               `json:"model"`
	Created int64                `json:"created"`
	Data    []*Image             `json:"data"`
	Usage   *GenerateImagesUsage `json:"usage,omitempty"`
	Error   *GenerateImagesError `json:"error,omitempty"`

	HttpHeader
}

type ImagesStreamResponse added in v1.1.33

type ImagesStreamResponse struct {
	Type       string               `json:"type"`
	Model      string               `json:"model"`
	Created    int64                `json:"created"`
	ImageIndex int64                `json:"image_index"`
	Url        *string              `json:"url,omitempty"`
	B64Json    *string              `json:"b64_json,omitempty"`
	Size       string               `json:"size"`
	Usage      *GenerateImagesUsage `json:"usage,omitempty"`
	Error      *GenerateImagesError `json:"error,omitempty"`

	HttpHeader
}

type ListContentGenerationTaskItem added in v1.0.177

type ListContentGenerationTaskItem struct {
	ID                    string                  `json:"id"`
	Model                 string                  `json:"model"`
	Status                string                  `json:"status"`
	FailureReason         *ContentGenerationError `json:"failure_reason,omitempty"`
	Content               Content                 `json:"content"`
	Usage                 Usage                   `json:"usage"`
	SubdivisionLevel      *string                 `json:"subdivisionlevel,omitempty"`
	FileFormat            *string                 `json:"fileformat,omitempty"`
	Frames                *int64                  `json:"frames"`
	FramesPerSecond       *int64                  `json:"framespersecond"`
	CreatedAt             int64                   `json:"created_at"`
	UpdatedAt             int64                   `json:"updated_at"`
	Seed                  *int64                  `json:"seed,omitempty"`
	RevisedPrompt         *string                 `json:"revised_prompt,omitempty"`
	ServiceTier           *string                 `json:"service_tier,omitempty"`
	ExecutionExpiresAfter *int64                  `json:"execution_expires_after,omitempty"`
	GenerateAudio         *bool                   `json:"generate_audio,omitempty"`
	Draft                 *bool                   `json:"draft,omitempty"`
	DraftTaskID           *string                 `json:"draft_task_id,omitempty"`
}

type ListContentGenerationTasksFilter added in v1.0.177

type ListContentGenerationTasksFilter struct {
	Status      *string   `json:"status,omitempty"`
	TaskIDs     []*string `json:"task_ids,omitempty"`
	Model       *string   `json:"model,omitempty"`
	ServiceTier *string   `json:"service_tier,omitempty"`
}

type ListContentGenerationTasksRequest added in v1.0.177

type ListContentGenerationTasksRequest struct {
	PageNum  *int                              `json:"page_num,omitempty"`
	PageSize *int                              `json:"page_size,omitempty"`
	Filter   *ListContentGenerationTasksFilter `json:"filter,omitempty"`
}

type ListContentGenerationTasksResponse added in v1.0.177

type ListContentGenerationTasksResponse struct {
	Total int64                           `json:"total"`
	Items []ListContentGenerationTaskItem `json:"items"`
	HttpHeader
}

type LogProb

type LogProb struct {
	Token   string  `json:"token"`
	LogProb float64 `json:"logprob"`
	Bytes   []rune  `json:"bytes,omitempty"` // Omitting the field if it is null
	// TopLogProbs is a list of the most likely tokens and their log probability, at this token position.
	// In rare cases, there may be fewer than the number of requested top_logprobs returned.
	TopLogProbs []*TopLogProbs `json:"top_logprobs"`
}

LogProb represents the probability information for a token.

type LogProbs

type LogProbs struct {
	// Content is a list of message content tokens with log probability information.
	Content []*LogProb `json:"content"`
}

LogProbs is the top-level structure containing the log probability information.

type MultiModalEmbeddingInputType added in v1.0.179

type MultiModalEmbeddingInputType string
const (
	MultiModalEmbeddingInputTypeText     MultiModalEmbeddingInputType = "text"
	MultiModalEmbeddingInputTypeImageURL MultiModalEmbeddingInputType = "image_url"
	MultiModalEmbeddingInputTypeVideoURL MultiModalEmbeddingInputType = "video_url"
)

type MultiModalEmbeddingRequest added in v1.0.179

type MultiModalEmbeddingRequest struct {
	Input []MultimodalEmbeddingInput `json:"input"`
	// ID of the model to use. You can use the List models API to see all of your available models,
	// or see our Model overview for descriptions of them.
	Model string `json:"model"`
	// EmbeddingEncodingFormat is the format of the embeddings data.
	// Currently, only "float" and "base64" are supported, however, "base64" is not officially documented.
	// If not specified will use "float".
	EncodingFormat *EmbeddingEncodingFormat `json:"encoding_format,omitempty"`
	// Dimensions Value range: 1024 or 2048.
	// Specifies the dimensionality of the output embedding vector.
	// This parameter is only supported in doubao-embedding-vision-250615 and later versions.
	Dimensions *int `json:"dimensions,omitempty"`

	// SparseEmbedding stands for whether to return sparse embedding.
	SparseEmbedding *SparseEmbeddingInput `json:"sparse_embedding,omitempty"`
}

MultiModalEmbeddingRequest is the input to a create embeddings request.

type MultiModalEmbeddingResponseBase64 added in v1.0.179

type MultiModalEmbeddingResponseBase64 struct {
	Id      string                   `json:"id"`
	Model   string                   `json:"model"`
	Created int64                    `json:"created"`
	Object  string                   `json:"object"`
	Data    Base64Embedding          `json:"data"`
	Usage   MultimodalEmbeddingUsage `json:"usage"`

	HttpHeader
}

MultiModalEmbeddingResponseBase64 is the response from a Create embeddings request with base64 encoding format.

func (*MultiModalEmbeddingResponseBase64) ToMultiModalEmbeddingResponse added in v1.0.179

func (r *MultiModalEmbeddingResponseBase64) ToMultiModalEmbeddingResponse() (MultimodalEmbeddingResponse, error)

ToMultiModalEmbeddingResponse converts an embeddingResponseBase64 to an MultimodalEmbeddingResponse.

type MultimodalEmbedding added in v1.0.179

type MultimodalEmbedding struct {
	Embedding       []float32          `json:"embedding"`
	SparseEmbedding *[]SparseEmbedding `json:"sparse_embedding,omitempty"`
	Object          string             `json:"object"`
}

type MultimodalEmbeddingImageURL added in v1.0.179

type MultimodalEmbeddingImageURL struct {
	URL string `json:"url"`
}

type MultimodalEmbeddingInput added in v1.0.179

type MultimodalEmbeddingInput struct {
	Type     MultiModalEmbeddingInputType `json:"type"`
	Text     *string                      `json:"text,omitempty"`
	ImageURL *MultimodalEmbeddingImageURL `json:"image_url,omitempty"`
	VideoURL *MultimodalEmbeddingVideoURL `json:"video_url,omitempty"`
}

type MultimodalEmbeddingPromptTokensDetail added in v1.0.179

type MultimodalEmbeddingPromptTokensDetail struct {
	TextTokens  int `json:"text_tokens"`
	ImageTokens int `json:"image_tokens"`
}

type MultimodalEmbeddingResponse added in v1.0.179

type MultimodalEmbeddingResponse struct {
	Id      string                   `json:"id"`
	Model   string                   `json:"model"`
	Created int64                    `json:"created"`
	Object  string                   `json:"object"`
	Data    MultimodalEmbedding      `json:"data"`
	Usage   MultimodalEmbeddingUsage `json:"usage"`

	HttpHeader
}

type MultimodalEmbeddingUsage added in v1.0.179

type MultimodalEmbeddingUsage struct {
	PromptTokens        int                                   `json:"prompt_tokens"`
	TotalTokens         int                                   `json:"total_tokens"`
	PromptTokensDetails MultimodalEmbeddingPromptTokensDetail `json:"prompt_tokens_details"`
}

type MultimodalEmbeddingVideoURL added in v1.1.28

type MultimodalEmbeddingVideoURL struct {
	URL string   `json:"url"`
	FPS *float64 `json:"fps,omitempty"`
}

type OptimizePromptMode added in v1.1.44

type OptimizePromptMode string

type OptimizePromptOptions added in v1.1.44

type OptimizePromptOptions struct {
	Thinking *OptimizePromptThinking `json:"thinking,omitempty"`
	Mode     *OptimizePromptMode     `json:"mode,omitempty"`
}

type OptimizePromptThinking added in v1.1.44

type OptimizePromptThinking string

type PromptTokensDetail added in v1.0.173

type PromptTokensDetail struct {
	CachedTokens      int  `json:"cached_tokens"`
	ProvisionedTokens *int `json:"provisioned_tokens,omitempty"`
}

type RawResponse

type RawResponse struct {
	io.ReadCloser

	HttpHeader
}

type ReasoningEffort added in v1.1.44

type ReasoningEffort string
const (
	ReasoningEffortMinimal ReasoningEffort = "minimal"
	ReasoningEffortLow     ReasoningEffort = "low"
	ReasoningEffortMedium  ReasoningEffort = "medium"
	ReasoningEffortHigh    ReasoningEffort = "high"
)

type RequestError

type RequestError struct {
	HTTPStatusCode int
	Err            error
	RequestId      string `json:"request_id"`
}

RequestError provides information about generic request errors.

func NewRequestError added in v1.0.160

func NewRequestError(httpStatusCode int, rawErr error, requestID string) *RequestError

func (*RequestError) Error

func (e *RequestError) Error() string

func (*RequestError) Unwrap

func (e *RequestError) Unwrap() error

type Response

type Response interface {
	SetHeader(http.Header)
	GetHeader() http.Header
}

type ResponseFormat added in v1.0.151

type ResponseFormat struct {
	Type       ResponseFormatType                       `json:"type"`
	JSONSchema *ResponseFormatJSONSchemaJSONSchemaParam `json:"json_schema,omitempty"`
	// Deprecated: use `JSONSchema` instead.
	Schema interface{} `json:"schema,omitempty"`
}

type ResponseFormatJSONSchemaJSONSchemaParam added in v1.1.12

type ResponseFormatJSONSchemaJSONSchemaParam struct {
	// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores
	// and dashes, with a maximum length of 64.
	Name string `json:"name"`
	// A description of what the response format is for, used by the model to determine
	// how to respond in the format.
	Description string `json:"description"`
	// The schema for the response format, described as a JSON Schema object.
	Schema interface{} `json:"schema"`
	// Whether to enable strict schema adherence when generating the output. If set to
	// true, the model will always follow the exact schema defined in the `schema`
	// field. Only a subset of JSON Schema is supported when `strict` is `true`.
	Strict bool `json:"strict"`
}

type ResponseFormatType added in v1.0.151

type ResponseFormatType string
const (
	ResponseFormatJSONSchema ResponseFormatType = "json_schema"
	ResponseFormatJsonObject ResponseFormatType = "json_object"
	ResponseFormatText       ResponseFormatType = "text"
)

type SequentialImageGeneration added in v1.1.33

type SequentialImageGeneration string

type SequentialImageGenerationOptions added in v1.1.33

type SequentialImageGenerationOptions struct {
	MaxImages *int `json:"max_images,omitempty"`
}

type SparseEmbedding added in v1.1.28

type SparseEmbedding struct {
	Index int     `json:"index"`
	Value float64 `json:"value"`
}

type SparseEmbeddingInput added in v1.1.28

type SparseEmbeddingInput struct {
	Type SparseEmbeddingInputType `json:"type"`
}

type SparseEmbeddingInputType added in v1.1.28

type SparseEmbeddingInputType string
const (
	SparseEmbeddingInputTypeEnabled  SparseEmbeddingInputType = "enabled"
	SparseEmbeddingInputTypeDisabled SparseEmbeddingInputType = "disabled"
)

type StreamOptions

type StreamOptions struct {
	// If set, an additional chunk will be streamed before the data: [DONE] message.
	// The usage field on this chunk shows the token usage statistics for the entire request,
	// and the choices field will always be an empty array.
	// All other chunks will also include a usage field, but with a null value.
	IncludeUsage bool `json:"include_usage,omitempty"`
	// if set, each data chunk will include a `usage` field
	// representing the current cumulative token usage for the entire request.
	ChunkIncludeUsage bool `json:"chunk_include_usage,omitempty"`
}

type Thinking added in v1.1.8

type Thinking struct {
	Type ThinkingType `json:"type"`
}

type ThinkingType added in v1.1.8

type ThinkingType string
const (
	ThinkingTypeEnabled  ThinkingType = "enabled"
	ThinkingTypeDisabled ThinkingType = "disabled"
	ThinkingTypeAuto     ThinkingType = "auto"
)

type Tokenization added in v1.0.151

type Tokenization struct {
	Index         int     `json:"index"`
	Object        string  `json:"object"`
	TotalTokens   int     `json:"total_tokens"`
	TokenIDs      []int   `json:"token_ids"`
	OffsetMapping [][]int `json:"offset_mapping"`
}

type TokenizationRequest added in v1.0.151

type TokenizationRequest struct {
	Text  interface{} `json:"text"`
	Model string      `json:"model"`
	User  string      `json:"user"`
}

func (TokenizationRequest) Convert added in v1.0.151

type TokenizationRequestConverter added in v1.0.151

type TokenizationRequestConverter interface {
	Convert() TokenizationRequest
}

type TokenizationRequestString added in v1.0.151

type TokenizationRequestString struct {
	Text  string `json:"text"`
	Model string `json:"model"`
	User  string `json:"user"`
}

TokenizationRequestString is the input to a create tokenization request with a slice of strings.

func (TokenizationRequestString) Convert added in v1.0.151

type TokenizationRequestStrings added in v1.0.151

type TokenizationRequestStrings struct {
	Text  []string `json:"text"`
	Model string   `json:"model"`
	User  string   `json:"user"`
}

TokenizationRequestStrings is the input to a create tokenization request with a slice of strings.

func (TokenizationRequestStrings) Convert added in v1.0.151

type TokenizationResponse added in v1.0.151

type TokenizationResponse struct {
	ID      string          `json:"id"`
	Created int             `json:"created"`
	Model   string          `json:"model"`
	Object  string          `json:"object"`
	Data    []*Tokenization `json:"data"`

	HttpHeader
}

TokenizationResponse is the response from a Create tokenization request.

type Tool

type Tool struct {
	Type     ToolType            `json:"type"`
	Function *FunctionDefinition `json:"function,omitempty"`
}

type ToolCall

type ToolCall struct {
	ID       string       `json:"id"`
	Type     ToolType     `json:"type"`
	Function FunctionCall `json:"function"`
	Index    *int         `json:"index,omitempty"`
}

type ToolChoice

type ToolChoice struct {
	Type     ToolType           `json:"type"`
	Function ToolChoiceFunction `json:"function,omitempty"`
}

type ToolChoiceFunction added in v1.0.151

type ToolChoiceFunction struct {
	Name string `json:"name"`
}

type ToolType

type ToolType string
const (
	ToolTypeFunction ToolType = "function"
)

type TopLogProbs

type TopLogProbs struct {
	Token   string  `json:"token"`
	LogProb float64 `json:"logprob"`
	Bytes   []rune  `json:"bytes,omitempty"`
}

type TruncationStrategy added in v1.0.173

type TruncationStrategy struct {
	Type                TruncationStrategyType `json:"type"`
	LastHistoryTokens   *int                   `json:"last_history_tokens,omitempty"`
	RollingTokens       *bool                  `json:"rolling_tokens,omitempty"`
	MaxWindowTokens     *int                   `json:"max_window_tokens,omitempty"`
	RollingWindowTokens *int                   `json:"rolling_window_tokens,omitempty"`
}

type TruncationStrategyType added in v1.0.173

type TruncationStrategyType string
const (
	TruncationStrategyTypeLastHistoryTokens TruncationStrategyType = "last_history_tokens"
	TruncationStrategyTypeRollingTokens     TruncationStrategyType = "rolling_tokens"
)

type Usage

type Usage struct {
	PromptTokens            int                     `json:"prompt_tokens"`
	CompletionTokens        int                     `json:"completion_tokens"`
	TotalTokens             int                     `json:"total_tokens"`
	PromptTokensDetails     PromptTokensDetail      `json:"prompt_tokens_details"`
	CompletionTokensDetails CompletionTokensDetails `json:"completion_tokens_details"`
}

Directories

Path Synopsis
Package contextmanagement is the contextmanagement service.
Package contextmanagement is the contextmanagement service.
Package responses provides the responses model of the ark runtime service.
Package responses provides the responses model of the ark runtime service.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL