openai

package
v0.3.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Aug 21, 2025 License: Apache-2.0 Imports: 9 Imported by: 0

Documentation

Overview

Package openai contains the following is the OpenAI API schema definitions. Note that we intentionally do not use the code generation tools like OpenAPI Generator not only to keep the code simple but also because the OpenAI's OpenAPI definition is not compliant with the spec and the existing tools do not work well.

Index

Constants

View Source
const (
	ChatMessageRoleSystem    = "system"
	ChatMessageRoleDeveloper = "developer"
	ChatMessageRoleUser      = "user"
	ChatMessageRoleAssistant = "assistant"
	ChatMessageRoleFunction  = "function"
	ChatMessageRoleTool      = "tool"
)

Chat message role defined by the OpenAI API.

View Source
const (
	// ModelGPT5Nano is the cheapest model usable with /chat/completions.
	ModelGPT5Nano = "gpt-5-nano"
	// ModelO3Mini is the cheapest reasoning model usable with /chat/completions.
	ModelO3Mini = "o3-mini"
	// ModelGPT4oMiniAudioPreview is the cheapest audio synthesis model usable with /chat/completions.
	ModelGPT4oMiniAudioPreview = "gpt-4o-mini-audio-preview"
	// ModelGPT4oAudioPreview is the cheapest audio transcription model usable with /chat/completions.
	// Note: gpt-4o-mini-transcribe is NOT a chat model, so cannot be used with /v1/chat/completions.
	ModelGPT4oAudioPreview = "gpt-4o-audio-preview"
	// ModelGPT4oMiniSearchPreview is the cheapest web search model usable with /chat/completions.
	// Note: gpt-5 series supports web search, but only in the /responses API.
	ModelGPT4oMiniSearchPreview = "gpt-4o-mini-search-preview"

	// ModelTextEmbedding3Small is the cheapest model usable with /embeddings.
	ModelTextEmbedding3Small = "text-embedding-3-small"
)

Model names for testing.

View Source
const (
	ChatCompletionContentPartTextTypeText             ChatCompletionContentPartTextType       = "text"
	ChatCompletionContentPartRefusalTypeRefusal       ChatCompletionContentPartRefusalType    = "refusal"
	ChatCompletionContentPartInputAudioTypeInputAudio ChatCompletionContentPartInputAudioType = "input_audio"
	ChatCompletionContentPartImageTypeImageURL        ChatCompletionContentPartImageType      = "image_url"
)

Variables

This section is empty.

Functions

This section is empty.

Types

type Annotation added in v0.3.0

type Annotation struct {
	// Type is the type of the annotation. Always "url_citation" for web search.
	Type string `json:"type"`
	// URLCitation contains the citation details when type is "url_citation".
	URLCitation *URLCitation `json:"url_citation,omitempty"` //nolint:tagliatelle //follow openai api
}

Annotation represents a URL citation when using web search. The annotation appears in message content when the model cites web sources. Docs: https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat

type AnthropicVendorFields added in v0.3.0

type AnthropicVendorFields struct {
	// Thinking holds Anthropic thinking configuration options.
	//
	// https://docs.anthropic.com/en/api/messages#body-thinking
	Thinking *anthropic.ThinkingConfigParamUnion `json:"thinking,omitzero"`
}

AnthropicVendorFields contains Anthropic vendor-specific fields.

type ChatCompletionAssistantMessageParam

type ChatCompletionAssistantMessageParam struct {
	// The role of the messages author, in this case `assistant`.
	Role string `json:"role"`
	// Data about a previous audio response from the model.
	// [Learn more](https://platform.openai.com/docs/guides/audio).
	Audio ChatCompletionAssistantMessageParamAudio `json:"audio,omitzero"`
	// The contents of the assistant message. Required unless `tool_calls` or
	// `function_call` is specified.
	Content StringOrAssistantRoleContentUnion `json:"content"`
	// An optional name for the participant. Provides the model information to
	// differentiate between participants of the same role.
	Name string `json:"name,omitempty"`
	// The refusal message by the assistant.
	Refusal string `json:"refusal,omitempty"`
	// The tool calls generated by the model, such as function calls.
	ToolCalls []ChatCompletionMessageToolCallParam `json:"tool_calls,omitempty"`
}

ChatCompletionAssistantMessageParam Messages sent by the model in response to user messages.

type ChatCompletionAssistantMessageParamAudio

type ChatCompletionAssistantMessageParamAudio struct {
	// Unique identifier for a previous audio response from the model.
	ID string `json:"id"`
}

ChatCompletionAssistantMessageParamAudio Data about a previous audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio).

type ChatCompletionAssistantMessageParamContent

type ChatCompletionAssistantMessageParamContent struct {
	// The type of the content part.
	Type ChatCompletionAssistantMessageParamContentType `json:"type"`
	// The refusal message generated by the model.
	Refusal *string `json:"refusal,omitempty"`
	// The text content.
	Text *string `json:"text,omitempty"`
}

ChatCompletionAssistantMessageParamContent Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation).

type ChatCompletionAssistantMessageParamContentType

type ChatCompletionAssistantMessageParamContentType string

ChatCompletionAssistantMessageParamContentType The type of the content part.

const (
	ChatCompletionAssistantMessageParamContentTypeText    ChatCompletionAssistantMessageParamContentType = "text"
	ChatCompletionAssistantMessageParamContentTypeRefusal ChatCompletionAssistantMessageParamContentType = "refusal"
)

type ChatCompletionAudioFormat added in v0.3.0

type ChatCompletionAudioFormat string

ChatCompletionAudioFormat represents the output audio format.

const (
	// ChatCompletionAudioFormatWav represents WAV audio format.
	ChatCompletionAudioFormatWav ChatCompletionAudioFormat = "wav"
	// ChatCompletionAudioFormatAAC represents AAC audio format.
	ChatCompletionAudioFormatAAC ChatCompletionAudioFormat = "aac"
	// ChatCompletionAudioFormatMP3 represents MP3 audio format.
	ChatCompletionAudioFormatMP3 ChatCompletionAudioFormat = "mp3"
	// ChatCompletionAudioFormatFlac represents FLAC audio format.
	ChatCompletionAudioFormatFlac ChatCompletionAudioFormat = "flac"
	// ChatCompletionAudioFormatOpus represents Opus audio format.
	ChatCompletionAudioFormatOpus ChatCompletionAudioFormat = "opus"
	// ChatCompletionAudioFormatPCM16 represents PCM16 audio format.
	ChatCompletionAudioFormatPCM16 ChatCompletionAudioFormat = "pcm16"
)

type ChatCompletionAudioParam added in v0.3.0

type ChatCompletionAudioParam struct {
	// Voice specifies the voice the model uses to respond. Supported voices are alloy, ash, ballad, coral, echo, fable, nova, onyx, sage, and shimmer.
	Voice ChatCompletionAudioVoice `json:"voice"`
	// Format specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16.
	Format ChatCompletionAudioFormat `json:"format"`
}

ChatCompletionAudioParam represents parameters for audio output. Required when audio output is requested with modalities: ["audio"].

type ChatCompletionAudioVoice added in v0.3.0

type ChatCompletionAudioVoice string

ChatCompletionAudioVoice represents available voices for audio output.

const (
	// ChatCompletionAudioVoiceAlloy represents the alloy voice.
	ChatCompletionAudioVoiceAlloy ChatCompletionAudioVoice = "alloy"
	// ChatCompletionAudioVoiceAsh represents the ash voice.
	ChatCompletionAudioVoiceAsh ChatCompletionAudioVoice = "ash"
	// ChatCompletionAudioVoiceBallad represents the ballad voice.
	ChatCompletionAudioVoiceBallad ChatCompletionAudioVoice = "ballad"
	// ChatCompletionAudioVoiceCoral represents the coral voice.
	ChatCompletionAudioVoiceCoral ChatCompletionAudioVoice = "coral"
	// ChatCompletionAudioVoiceEcho represents the echo voice.
	ChatCompletionAudioVoiceEcho ChatCompletionAudioVoice = "echo"
	// ChatCompletionAudioVoiceFable represents the fable voice.
	ChatCompletionAudioVoiceFable ChatCompletionAudioVoice = "fable"
	// ChatCompletionAudioVoiceNova represents the nova voice.
	ChatCompletionAudioVoiceNova ChatCompletionAudioVoice = "nova"
	// ChatCompletionAudioVoiceOnyx represents the onyx voice.
	ChatCompletionAudioVoiceOnyx ChatCompletionAudioVoice = "onyx"
	// ChatCompletionAudioVoiceSage represents the sage voice.
	ChatCompletionAudioVoiceSage ChatCompletionAudioVoice = "sage"
	// ChatCompletionAudioVoiceShimmer represents the shimmer voice.
	ChatCompletionAudioVoiceShimmer ChatCompletionAudioVoice = "shimmer"
)

type ChatCompletionChoicesFinishReason

type ChatCompletionChoicesFinishReason string

ChatCompletionChoicesFinishReason The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.

const (
	ChatCompletionChoicesFinishReasonStop          ChatCompletionChoicesFinishReason = "stop"
	ChatCompletionChoicesFinishReasonLength        ChatCompletionChoicesFinishReason = "length"
	ChatCompletionChoicesFinishReasonToolCalls     ChatCompletionChoicesFinishReason = "tool_calls"
	ChatCompletionChoicesFinishReasonContentFilter ChatCompletionChoicesFinishReason = "content_filter"
)

type ChatCompletionChoicesLogprobs

type ChatCompletionChoicesLogprobs struct {
	// A list of message content tokens with log probability information.
	Content []ChatCompletionTokenLogprob `json:"content,omitempty"`
	// A list of message refusal tokens with log probability information.
	Refusal []ChatCompletionTokenLogprob `json:"refusal,omitempty"`
}

ChatCompletionChoicesLogprobs Log probability information for the choice.

type ChatCompletionContentPartImageImageURLDetail

type ChatCompletionContentPartImageImageURLDetail string
const (
	ChatCompletionContentPartImageImageURLDetailAuto ChatCompletionContentPartImageImageURLDetail = "auto"
	ChatCompletionContentPartImageImageURLDetailLow  ChatCompletionContentPartImageImageURLDetail = "low"
	ChatCompletionContentPartImageImageURLDetailHigh ChatCompletionContentPartImageImageURLDetail = "high"
)

type ChatCompletionContentPartImageImageURLParam

type ChatCompletionContentPartImageImageURLParam struct {
	// Either a URL of the image or the base64 encoded image data.
	URL string `json:"url"`
	// Specifies the detail level of the image. Learn more in the
	// [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).
	Detail ChatCompletionContentPartImageImageURLDetail `json:"detail,omitempty"`
}

type ChatCompletionContentPartImageParam

type ChatCompletionContentPartImageParam struct {
	ImageURL ChatCompletionContentPartImageImageURLParam `json:"image_url"`
	// The type of the content part.
	Type ChatCompletionContentPartImageType `json:"type"`
}

ChatCompletionContentPartImageParam Learn about [image inputs](https://platform.openai.com/docs/guides/vision).

type ChatCompletionContentPartImageType

type ChatCompletionContentPartImageType string

ChatCompletionContentPartImageType The type of the content part.

type ChatCompletionContentPartInputAudioInputAudioFormat

type ChatCompletionContentPartInputAudioInputAudioFormat string

ChatCompletionContentPartInputAudioInputAudioFormat The format of the encoded audio data. Currently supports "wav" and "mp3".

const (
	ChatCompletionContentPartInputAudioInputAudioFormatWAV ChatCompletionContentPartInputAudioInputAudioFormat = "wav"
	ChatCompletionContentPartInputAudioInputAudioFormatMP3 ChatCompletionContentPartInputAudioInputAudioFormat = "mp3"
)

type ChatCompletionContentPartInputAudioInputAudioParam

type ChatCompletionContentPartInputAudioInputAudioParam struct {
	// Base64 encoded audio data.
	Data string `json:"data"`
	// The format of the encoded audio data. Currently supports "wav" and "mp3".
	Format ChatCompletionContentPartInputAudioInputAudioFormat `json:"format"`
}

type ChatCompletionContentPartInputAudioParam

type ChatCompletionContentPartInputAudioParam struct {
	InputAudio ChatCompletionContentPartInputAudioInputAudioParam `json:"input_audio"`
	// The type of the content part. Always `input_audio`.
	Type ChatCompletionContentPartInputAudioType `json:"type"`
}

ChatCompletionContentPartInputAudioParam Learn about [audio inputs](https://platform.openai.com/docs/guides/audio).

type ChatCompletionContentPartInputAudioType

type ChatCompletionContentPartInputAudioType string

ChatCompletionContentPartInputAudioType The type of the content part. Always `input_audio`.

type ChatCompletionContentPartRefusalParam

type ChatCompletionContentPartRefusalParam struct {
	// The refusal message generated by the model.
	Refusal string `json:"refusal"`
	// The type of the content part.
	Type ChatCompletionContentPartRefusalType `json:"type"`
}

type ChatCompletionContentPartRefusalType

type ChatCompletionContentPartRefusalType string

ChatCompletionContentPartRefusalType The type of the content part.

type ChatCompletionContentPartTextParam

type ChatCompletionContentPartTextParam struct {
	// The text content.
	Text string `json:"text"`
	// The type of the content part.
	Type string `json:"type"`
}

ChatCompletionContentPartTextParam Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation).

type ChatCompletionContentPartTextType

type ChatCompletionContentPartTextType string

ChatCompletionContentPartTextType The type of the content part.

type ChatCompletionContentPartUserUnionParam

type ChatCompletionContentPartUserUnionParam struct {
	TextContent       *ChatCompletionContentPartTextParam
	InputAudioContent *ChatCompletionContentPartInputAudioParam
	ImageContent      *ChatCompletionContentPartImageParam
}

ChatCompletionContentPartUserUnionParam Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation).

func (ChatCompletionContentPartUserUnionParam) MarshalJSON added in v0.3.0

func (c ChatCompletionContentPartUserUnionParam) MarshalJSON() ([]byte, error)

func (*ChatCompletionContentPartUserUnionParam) UnmarshalJSON

func (c *ChatCompletionContentPartUserUnionParam) UnmarshalJSON(data []byte) error

type ChatCompletionDeveloperMessageParam

type ChatCompletionDeveloperMessageParam struct {
	// The contents of the developer message.
	Content StringOrArray `json:"content"`
	// The role of the messages author, in this case `developer`.
	Role string `json:"role"`
	// An optional name for the participant. Provides the model information to
	// differentiate between participants of the same role.
	Name string `json:"name,omitempty"`
}

ChatCompletionDeveloperMessageParam Developer-provided instructions that the model should follow, regardless of messages sent by the user. With o1 models and newer, use `developer` messages for this purpose instead.

type ChatCompletionMessageParamUnion

type ChatCompletionMessageParamUnion struct {
	Value interface{}
	Type  string
}

func (ChatCompletionMessageParamUnion) MarshalJSON added in v0.3.0

func (c ChatCompletionMessageParamUnion) MarshalJSON() ([]byte, error)

func (*ChatCompletionMessageParamUnion) UnmarshalJSON

func (c *ChatCompletionMessageParamUnion) UnmarshalJSON(data []byte) error

type ChatCompletionMessageToolCallFunctionParam

type ChatCompletionMessageToolCallFunctionParam struct {
	// The arguments to call the function with, as generated by the model in JSON
	// format. Note that the model does not always generate valid JSON, and may
	// hallucinate parameters not defined by your function schema. Validate the
	// arguments in your code before calling your function.
	Arguments string `json:"arguments"`
	// The name of the function to call.
	Name string `json:"name"`
}

ChatCompletionMessageToolCallFunctionParam The function that the model called.

type ChatCompletionMessageToolCallParam

type ChatCompletionMessageToolCallParam struct {
	// Add this Index field. It is required for streaming.
	Index *int `json:"index,omitempty"`
	// The ID of the tool call.
	ID *string `json:"id"`
	// The function that the model called.
	Function ChatCompletionMessageToolCallFunctionParam `json:"function"`
	// The type of the tool. Currently, only `function` is supported.
	Type ChatCompletionMessageToolCallType `json:"type,omitempty"`
}

type ChatCompletionMessageToolCallType

type ChatCompletionMessageToolCallType string

ChatCompletionMessageToolCallType The type of the tool. Currently, only `function` is supported.

const (
	ChatCompletionMessageToolCallTypeFunction ChatCompletionMessageToolCallType = "function"
)

type ChatCompletionModality added in v0.3.0

type ChatCompletionModality string

ChatCompletionRequest represents a request structure for chat completion API. ChatCompletionModality represents the output types that the model can generate.

const (
	// ChatCompletionModalityText represents text output.
	ChatCompletionModalityText ChatCompletionModality = "text"
	// ChatCompletionModalityAudio represents audio output.
	ChatCompletionModalityAudio ChatCompletionModality = "audio"
)

type ChatCompletionNamedToolChoice added in v0.3.0

type ChatCompletionNamedToolChoice struct {
	// Type is the type of the tool. Currently, only `function` is supported.
	Type ToolChoiceType `json:"type"`
	// Function specifies the function to call.
	Function ChatCompletionNamedToolChoiceFunction `json:"function"`
}

ChatCompletionNamedToolChoice specifies a tool the model should use. Use to force the model to call a specific function.

type ChatCompletionNamedToolChoiceFunction added in v0.3.0

type ChatCompletionNamedToolChoiceFunction struct {
	// Name is the name of the function to call.
	Name string `json:"name"`
}

ChatCompletionNamedToolChoiceFunction represents the function to call.

type ChatCompletionRequest

type ChatCompletionRequest struct {
	// Messages: A list of messages comprising the conversation so far.
	// Depending on the model you use, different message types (modalities) are supported,
	// like text, images, and audio.
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages
	Messages []ChatCompletionMessageParamUnion `json:"messages"`

	// Model: ID of the model to use
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-model
	Model string `json:"model"`

	// FrequencyPenalty: Number between -2.0 and 2.0
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-frequency_penalty
	FrequencyPenalty *float32 `json:"frequency_penalty,omitempty"` //nolint:tagliatelle //follow openai api

	// LogitBias Modify the likelihood of specified tokens appearing in the completion.
	// It must be a token id string (specified by their token ID in the tokenizer), not a word string.
	// incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}`
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-logit_bias
	LogitBias map[string]int `json:"logit_bias,omitempty"` //nolint:tagliatelle //follow openai api

	// LogProbs indicates whether to return log probabilities of the output tokens or not.
	// If true, returns the log probabilities of each output token returned in the content of message.
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-logprobs
	LogProbs *bool `json:"logprobs,omitempty"`

	// TopLogProbs is an integer between 0 and 5 specifying the number of most likely tokens to return at each
	// token position, each with an associated log probability.
	// logprobs must be set to true if this parameter is used.
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_logprobs
	TopLogProbs *int `json:"top_logprobs,omitempty"` //nolint:tagliatelle //follow openai api

	// MaxTokens The maximum number of tokens that can be generated in the chat completion.
	// This value can be used to control costs for text generated via API.
	// This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models.
	// refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens
	MaxTokens *int64 `json:"max_tokens,omitempty"` //nolint:tagliatelle //follow openai api

	// MaxCompletionTokens is an Optional integer
	// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.
	// refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_completion_tokens
	MaxCompletionTokens *int64 `json:"max_completion_tokens,omitempty"` //nolint:tagliatelle //follow openai api

	// N: LLM Gateway does not support multiple completions.
	// The only accepted value is 1.
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-n
	N *int `json:"n,omitempty"`

	// PresencePenalty Positive values penalize new tokens based on whether they appear in the text so far,
	// increasing the model's likelihood to talk about new topics.
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-presence_penalty
	PresencePenalty *float32 `json:"presence_penalty,omitempty"` //nolint:tagliatelle //follow openai api

	// Reasoning
	// o-series models only
	// refs: https://platform.openai.com/docs/api-reference/responses/create#responses-create-reasoning
	Reasoning *Reasoning `json:"reasoning,omitempty"`

	// ResponseFormat is only for GPT models.
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-response_format
	ResponseFormat *ChatCompletionResponseFormat `json:"response_format,omitempty"` //nolint:tagliatelle //follow openai api

	// Seed: This feature is in Beta. If specified, our system will make a best effort to
	// sample deterministically, such that repeated requests with the same `seed` and
	// parameters should return the same result. Determinism is not guaranteed, and you
	// should refer to the `system_fingerprint` response parameter to monitor changes
	// in the backend.
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-seed
	Seed *int `json:"seed,omitempty"`

	// ServiceTier:string or null - Defaults to auto
	// Specifies the processing type used for serving the request.
	// If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
	// If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
	// If set to 'flex' or 'priority', then the request will be processed with the corresponding service tier.
	// When the service_tier parameter is set, the response body will include the service_tier value based on the processing mode actually used to serve the request.
	// This response value may be different from the value set in the parameter.
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-service_tier
	ServiceTier *string `json:"service_tier,omitempty"`

	// Stop string / array / null Defaults to null
	// Up to 4 sequences where the API will stop generating further tokens.
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-stop
	Stop interface{} `json:"stop,omitempty"`

	// Stream: If set, partial message deltas will be sent, like in ChatGPT.
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-stream
	Stream bool `json:"stream,omitempty"`

	// StreamOptions for streaming response. Only set this when you set stream: true.
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-stream_options
	StreamOptions *StreamOptions `json:"stream_options,omitempty"` //nolint:tagliatelle //follow openai api

	// Temperature What sampling temperature to use, between 0 and 2.
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-temperature
	Temperature *float64 `json:"temperature,omitempty"`

	// TopP An alternative to sampling with temperature, called nucleus sampling,
	// where the model considers the results of the tokens with top_p probability mass.
	// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-top_p
	TopP *float64 `json:"top_p,omitempty"` //nolint:tagliatelle //follow openai api

	// Tools provide a list of tool definitions to be used by the LLM.
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-response_format
	Tools []Tool `json:"tools,omitempty"`

	// ToolChoice controls which (if any) tool is called by the model.
	// `none` means the model will not call any tool and instead generates a message.
	// `auto` means the model can pick between generating a message or calling one or more tools.
	// `required` means the model must call one or more tools.
	// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool.
	// `none` is the default when no tools are present. `auto` is the default if tools are present.
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-tool_choice
	ToolChoice ChatCompletionToolChoice `json:"tool_choice,omitempty"` //nolint:tagliatelle //follow openai api

	// ParallelToolCalls enables multiple tools to be returned by the model.
	// Docs: https://platform.openai.com/docs/guides/function-calling/parallel-function-calling
	ParallelToolCalls *bool `json:"parallel_tool_calls,omitempty"` //nolint:tagliatelle //follow openai api

	// User: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
	// Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-user
	User string `json:"user,omitempty"`

	// Modalities specifies the output types that you would like the model to generate. Most models are capable of generating text, which is the default. The gpt-4o-audio-preview model can also generate audio.
	Modalities []ChatCompletionModality `json:"modalities,omitempty"`

	// Audio specifies parameters for audio output. Required when audio output is requested with modalities: ["audio"].
	Audio *ChatCompletionAudioParam `json:"audio,omitempty"`

	// PredictionContent provides configuration for a Predicted Output, which can greatly improve response times when large parts of the model response are known ahead of time.
	PredictionContent *PredictionContent `json:"prediction,omitempty"`

	// WebSearchOptions configures web search tool for models that support it.
	// This tool searches the web for relevant results to use in a response.
	// Docs: https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat
	WebSearchOptions *WebSearchOptions `json:"web_search_options,omitempty"` //nolint:tagliatelle //follow openai api

	*GCPVertexAIVendorFields `json:",inline,omitempty"`
	*AnthropicVendorFields   `json:",inline,omitempty"`
}

type ChatCompletionResponse

type ChatCompletionResponse struct {
	// ID is a unique identifier for the chat completion.
	ID string `json:"id,omitempty"`
	// Choices are described in the OpenAI API documentation:
	// https://platform.openai.com/docs/api-reference/chat/object#chat/object-choices
	Choices []ChatCompletionResponseChoice `json:"choices,omitempty"`

	// Created is the Unix timestamp (in seconds) of when the chat completion was created.
	Created JSONUNIXTime `json:"created,omitzero"`

	// Model is the model used for the chat completion.
	Model string `json:"model,omitempty"`

	// ServiceTier is the service tier used for the completion.
	ServiceTier string `json:"service_tier,omitempty"`

	// SystemFingerprint represents the backend configuration that the model runs with.
	SystemFingerprint string `json:"system_fingerprint,omitempty"`

	// Object is always "chat.completion" for completions.
	// https://platform.openai.com/docs/api-reference/chat/object#chat/object-object
	Object string `json:"object,omitempty"`

	// Usage is described in the OpenAI API documentation:
	// https://platform.openai.com/docs/api-reference/chat/object#chat/object-usage
	Usage ChatCompletionResponseUsage `json:"usage,omitzero"`

	// Obfuscation are random characters that normalize payload sizes as a
	// mitigation to certain side-channel attacks.
	// https://platform.openai.com/docs/api-reference/responses/get#responses_get-include_obfuscation
	Obfuscation string `json:"obfuscation,omitempty"`
}

ChatCompletionResponse represents a response from /v1/chat/completions. https://platform.openai.com/docs/api-reference/chat/object

type ChatCompletionResponseChoice

type ChatCompletionResponseChoice struct {
	// The reason the model stopped generating tokens. This will be `stop` if the model
	// hit a natural stop point or a provided stop sequence, `length` if the maximum
	// number of tokens specified in the request was reached, `content_filter` if
	// content was omitted due to a flag from our content filters, `tool_calls` if the
	// model called a tool, or `function_call` (deprecated) if the model called a
	// function.
	FinishReason ChatCompletionChoicesFinishReason `json:"finish_reason"`
	// The index of the choice in the list of choices.
	Index int64 `json:"index"`
	// Log probability information for the choice.
	Logprobs ChatCompletionChoicesLogprobs `json:"logprobs,omitzero"`
	// Message is described in the OpenAI API documentation:
	// https://platform.openai.com/docs/api-reference/chat/object#chat/object-choices
	Message ChatCompletionResponseChoiceMessage `json:"message,omitempty"`
}

ChatCompletionResponseChoice is described in the OpenAI API documentation: https://platform.openai.com/docs/api-reference/chat/object#chat/object-choices

type ChatCompletionResponseChoiceMessage

type ChatCompletionResponseChoiceMessage struct {
	// The contents of the message.
	Content *string `json:"content,omitempty"`

	// The role of the author of this message.
	Role string `json:"role,omitempty"`

	// The tool calls generated by the model, such as function calls.
	ToolCalls []ChatCompletionMessageToolCallParam `json:"tool_calls,omitempty"`

	// Annotations for the message, when applicable, as when using the web search tool.
	Annotations *[]Annotation `json:"annotations,omitempty"`

	// Audio is the audio response generated by the model, if applicable.
	Audio *ChatCompletionResponseChoiceMessageAudio `json:"audio,omitempty"`
}

ChatCompletionResponseChoiceMessage is described in the OpenAI API documentation: https://platform.openai.com/docs/api-reference/chat/object#chat/object-choices

type ChatCompletionResponseChoiceMessageAudio added in v0.3.0

type ChatCompletionResponseChoiceMessageAudio struct {
	Data       string `json:"data"`
	ExpiresAt  int64  `json:"expires_at"`
	ID         string `json:"id"`
	Transcript string `json:"transcript"`
}

ChatCompletionResponseChoiceMessageAudio is described in the OpenAI API documentation.

type ChatCompletionResponseChunk

type ChatCompletionResponseChunk struct {
	// ID is a unique identifier for the chat completion chunk.
	ID string `json:"id,omitempty"`
	// Choices are described in the OpenAI API documentation:
	// https://platform.openai.com/docs/api-reference/chat/streaming#chat/streaming-choices
	Choices []ChatCompletionResponseChunkChoice `json:"choices,omitempty"`

	// Created is the Unix timestamp (in seconds) of when the chat completion was created.
	Created JSONUNIXTime `json:"created,omitzero"`

	// Model is the model used for the chat completion.
	Model string `json:"model,omitempty"`

	// ServiceTier is the service tier used for the completion.
	ServiceTier string `json:"service_tier,omitempty"`

	// SystemFingerprint represents the backend configuration that the model runs with.
	SystemFingerprint string `json:"system_fingerprint,omitempty"`

	// Object is always "chat.completion.chunk" for completions.
	// https://platform.openai.com/docs/api-reference/chat/streaming#chat/streaming-object
	Object string `json:"object,omitempty"`

	// Usage is described in the OpenAI API documentation:
	// https://platform.openai.com/docs/api-reference/chat/streaming#chat/streaming-usage
	Usage *ChatCompletionResponseUsage `json:"usage,omitempty"`

	// Obfuscation are random characters that normalize payload sizes as a
	// mitigation to certain side-channel attacks.
	// https://platform.openai.com/docs/api-reference/responses/get#responses_get-include_obfuscation
	Obfuscation string `json:"obfuscation,omitempty"`
}

ChatCompletionResponseChunk is described in the OpenAI API documentation: https://platform.openai.com/docs/api-reference/chat/streaming#chat-create-messages

func (*ChatCompletionResponseChunk) String

func (c *ChatCompletionResponseChunk) String() string

String implements fmt.Stringer.

type ChatCompletionResponseChunkChoice

type ChatCompletionResponseChunkChoice struct {
	Index        int64                                   `json:"index"`
	Delta        *ChatCompletionResponseChunkChoiceDelta `json:"delta,omitzero"`
	Logprobs     *ChatCompletionChoicesLogprobs          `json:"logprobs,omitzero"`
	FinishReason ChatCompletionChoicesFinishReason       `json:"finish_reason,omitempty"`
}

ChatCompletionResponseChunkChoice is described in the OpenAI API documentation: https://platform.openai.com/docs/api-reference/chat/streaming#chat/streaming-choices

type ChatCompletionResponseChunkChoiceDelta

type ChatCompletionResponseChunkChoiceDelta struct {
	Content     *string                              `json:"content,omitempty"`
	Role        string                               `json:"role,omitempty"`
	ToolCalls   []ChatCompletionMessageToolCallParam `json:"tool_calls,omitempty"`
	Annotations *[]Annotation                        `json:"annotations,omitempty"`
}

ChatCompletionResponseChunkChoiceDelta is described in the OpenAI API documentation: https://platform.openai.com/docs/api-reference/chat/streaming#chat/streaming-choices

type ChatCompletionResponseFormat

type ChatCompletionResponseFormat struct {
	Type       ChatCompletionResponseFormatType        `json:"type,omitempty"`
	JSONSchema *ChatCompletionResponseFormatJSONSchema `json:"json_schema,omitempty"` //nolint:tagliatelle //follow openai api
}

type ChatCompletionResponseFormatJSONSchema

type ChatCompletionResponseFormatJSONSchema struct {
	Name        string `json:"name"`
	Description string `json:"description,omitempty"`
	Schema      any    `json:"schema"` // See detail in https://github.com/openai/openai-go/blob/28c93a9fa58bb622b5d23b3262af7d4fdd2ebde9/shared/shared.go#L519C6-L519C30
	Strict      bool   `json:"strict"`
}

type ChatCompletionResponseFormatType

type ChatCompletionResponseFormatType string
const (
	ChatCompletionResponseFormatTypeJSONObject ChatCompletionResponseFormatType = "json_object"
	ChatCompletionResponseFormatTypeJSONSchema ChatCompletionResponseFormatType = "json_schema"
	ChatCompletionResponseFormatTypeText       ChatCompletionResponseFormatType = "text"
)

type ChatCompletionResponseUsage

type ChatCompletionResponseUsage struct {
	// Number of tokens in the generated completion.
	CompletionTokens int `json:"completion_tokens,omitzero"`
	// Number of tokens in the prompt.
	PromptTokens int `json:"prompt_tokens,omitzero"`
	// Total number of tokens used in the request (prompt + completion).
	TotalTokens             int                      `json:"total_tokens,omitzero"`
	CompletionTokensDetails *CompletionTokensDetails `json:"completion_tokens_details,omitzero"`
	PromptTokensDetails     *PromptTokensDetails     `json:"prompt_tokens_details,omitzero"`
}

ChatCompletionResponseUsage is described in the OpenAI API documentation: https://platform.openai.com/docs/api-reference/chat/object#chat/object-usage

type ChatCompletionSystemMessageParam

type ChatCompletionSystemMessageParam struct {
	// The contents of the system message.
	Content StringOrArray `json:"content"`
	// The role of the messages author, in this case `system`.
	Role string `json:"role"`
	// An optional name for the participant. Provides the model information to
	// differentiate between participants of the same role.
	Name string `json:"name,omitempty"`
}

ChatCompletionSystemMessageParam Developer-provided instructions that the model should follow, regardless of messages sent by the user. With o1 models and newer, use `developer` messages for this purpose instead.

type ChatCompletionTokenLogprob

type ChatCompletionTokenLogprob struct {
	// The token.
	Token string `json:"token"`
	// A list of integers representing the UTF-8 bytes representation of the token.
	// Useful in instances where characters are represented by multiple tokens and
	// their byte representations must be combined to generate the correct text
	// representation. Can be `null` if there is no bytes representation for the token.
	Bytes []int64 `json:"bytes,omitempty"`
	// The log probability of this token, if it is within the top 20 most likely
	// tokens. Otherwise, the value `-9999.0` is used to signify that the token is very
	// unlikely.
	Logprob float64 `json:"logprob"`
	// List of the most likely tokens and their log probability, at this token
	// position. In rare cases, there may be fewer than the number of requested
	// `top_logprobs` returned.
	TopLogprobs []ChatCompletionTokenLogprobTopLogprob `json:"top_logprobs"`
}

type ChatCompletionTokenLogprobTopLogprob

type ChatCompletionTokenLogprobTopLogprob struct {
	// The token.
	Token string `json:"token"`
	// A list of integers representing the UTF-8 bytes representation of the token.
	// Useful in instances where characters are represented by multiple tokens and
	// their byte representations must be combined to generate the correct text
	// representation. Can be `null` if there is no bytes representation for the token.
	Bytes []int64 `json:"bytes,omitempty"`
	// The log probability of this token, if it is within the top 20 most likely
	// tokens. Otherwise, the value `-9999.0` is used to signify that the token is very
	// unlikely.
	Logprob float64 `json:"logprob"`
}

type ChatCompletionToolChoice added in v0.3.0

type ChatCompletionToolChoice interface{}

ChatCompletionToolChoice represents the tool choice for chat completions. It can be either a string (none, auto, required) or a ChatCompletionNamedToolChoice object.

type ChatCompletionToolMessageParam

type ChatCompletionToolMessageParam struct {
	// The contents of the tool message.
	Content StringOrArray `json:"content"`
	// The role of the messages author, in this case `tool`.
	Role string `json:"role"`
	// Tool call that this message is responding to.
	ToolCallID string `json:"tool_call_id"`
}

type ChatCompletionUserMessageParam

type ChatCompletionUserMessageParam struct {
	// The contents of the user message.
	Content StringOrUserRoleContentUnion `json:"content"`
	// The role of the messages author, in this case `user`.
	Role string `json:"role"`
	// An optional name for the participant. Provides the model information to
	// differentiate between participants of the same role.
	Name string `json:"name,omitempty"`
}

ChatCompletionUserMessageParam Messages sent by an end user, containing prompts or additional context information.

type CompletionTokensDetails added in v0.3.0

type CompletionTokensDetails struct {
	// Text input tokens present in the prompt.
	TextTokens int `json:"text_tokens,omitzero"`

	// When using Predicted Outputs, the number of tokens in the prediction that appeared in the completion.
	AcceptedPredictionTokens int `json:"accepted_prediction_tokens,omitzero"`
	// Audio input tokens generated by the model.
	AudioTokens int `json:"audio_tokens,omitzero"`
	// Tokens generated by the model for reasoning.
	ReasoningTokens int `json:"reasoning_tokens,omitzero"`
	// When using Predicted Outputs, the number of tokens in the prediction that did not appear in the completion.
	// However, like reasoning tokens, these tokens are still counted in the total completion tokens
	// for purposes of billing, output, and context window limits.
	RejectedPredictionTokens int `json:"rejected_prediction_tokens,omitzero"`
}

CompletionTokensDetails breakdown of tokens used in a completion.

type Embedding added in v0.3.0

type Embedding struct {
	// Object: The object type, which is always "embedding".
	Object string `json:"object"`

	// Embedding: The embedding vector, which can be a list of floats or a string.
	// The length of vector depends on the model as listed in the embedding guide.
	Embedding EmbeddingUnion `json:"embedding"`

	// Index: The index of the embedding in the list of embeddings.
	Index int `json:"index"`
}

Embedding represents a single embedding vector. https://platform.openai.com/docs/api-reference/embeddings/object#embeddings/object-data

type EmbeddingRequest added in v0.3.0

type EmbeddingRequest struct {
	// Input: Input text to embed, encoded as a string or array of tokens.
	// To embed multiple inputs in a single request, pass an array of strings or array of token arrays.
	// The input must not exceed the max input tokens for the model (8192 tokens for text-embedding-ada-002),
	// cannot be an empty string, and any array must be 2048 dimensions or less.
	// Docs: https://platform.openai.com/docs/api-reference/embeddings/create#embeddings-create-input
	Input StringOrArray `json:"input"`

	// Model: ID of the model to use.
	// Docs: https://platform.openai.com/docs/api-reference/embeddings/create#embeddings-create-model
	Model string `json:"model"`

	// EncodingFormat: The format to return the embeddings in. Can be either float or base64.
	// Docs: https://platform.openai.com/docs/api-reference/embeddings/create#embeddings-create-encoding_format
	EncodingFormat *string `json:"encoding_format,omitempty"` //nolint:tagliatelle //follow openai api

	// Dimensions: The number of dimensions the resulting output embeddings should have.
	// Only supported in text-embedding-3 and later models.
	// Docs: https://platform.openai.com/docs/api-reference/embeddings/create#embeddings-create-dimensions
	Dimensions *int `json:"dimensions,omitempty"`

	// User: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
	// Docs: https://platform.openai.com/docs/api-reference/embeddings/create#embeddings-create-user
	User *string `json:"user,omitempty"`
}

EmbeddingRequest represents a request structure for embeddings API.

type EmbeddingResponse added in v0.3.0

type EmbeddingResponse struct {
	// Object: The object type, which is always "list".
	// https://platform.openai.com/docs/api-reference/embeddings/object#embeddings/object-object
	Object string `json:"object"`

	// Data: The list of embeddings generated by the model.
	// https://platform.openai.com/docs/api-reference/embeddings/object#embeddings/object-data
	Data []Embedding `json:"data"`

	// Model: The name of the model used to generate the embedding.
	// https://platform.openai.com/docs/api-reference/embeddings/object#embeddings/object-model
	Model string `json:"model"`

	// Usage: The usage information for the request.
	// https://platform.openai.com/docs/api-reference/embeddings/object#embeddings/object-usage
	Usage EmbeddingUsage `json:"usage"`
}

EmbeddingResponse represents a response from /v1/embeddings. https://platform.openai.com/docs/api-reference/embeddings/object

type EmbeddingUnion added in v0.3.0

type EmbeddingUnion struct {
	Value interface{}
}

EmbeddingUnion is a union type that can handle both []float64 and string formats.

func (EmbeddingUnion) MarshalJSON added in v0.3.0

func (e EmbeddingUnion) MarshalJSON() ([]byte, error)

MarshalJSON implements json.Marshaler.

func (*EmbeddingUnion) UnmarshalJSON added in v0.3.0

func (e *EmbeddingUnion) UnmarshalJSON(data []byte) error

UnmarshalJSON implements json.Unmarshaler to handle both []float64 and string formats.

type EmbeddingUsage added in v0.3.0

type EmbeddingUsage struct {
	// PromptTokens: The number of tokens used by the prompt.
	PromptTokens int `json:"prompt_tokens"` //nolint:tagliatelle //follow openai api

	// TotalTokens: The total number of tokens used by the request.
	TotalTokens int `json:"total_tokens"` //nolint:tagliatelle //follow openai api
}

EmbeddingUsage represents the usage information for an embeddings request. https://platform.openai.com/docs/api-reference/embeddings/object#embeddings/object-usage

type Error

type Error struct {
	// The unique ID of the server event.
	EventID *string `json:"event_id,omitempty"`
	// The event type, must be error.
	Type string `json:"type"`
	// Details of the error.
	Error ErrorType `json:"error"`
}

Error is described in the OpenAI API documentation https://platform.openai.com/docs/api-reference/realtime-server-events/error

type ErrorType

type ErrorType struct {
	// The type of error (e.g., "invalid_request_error", "server_error").
	Type string `json:"type"`
	// Error code, if any.
	Code *string `json:"code,omitempty"`
	// A human-readable error message.
	Message string `json:"message,omitempty"`
	// Parameter related to the error, if any.
	Param *string `json:"param,omitempty"`
	// The event_id of the client event that caused the error, if applicable.
	EventID *string `json:"event_id,omitempty"`
}

type FunctionDefine deprecated

type FunctionDefine = FunctionDefinition

Deprecated: use FunctionDefinition instead.

type FunctionDefinition

type FunctionDefinition struct {
	// Name is the name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
	Name string `json:"name"`
	// Description is a description of what the function does, used by the model to choose when and how to call the function.
	Description string `json:"description,omitempty"`
	Strict      bool   `json:"strict,omitempty"`
	// Parameters is an object describing the function.
	// You can pass json.RawMessage to describe the schema,
	// or you can pass in a struct which serializes to the proper JSON schema.
	// The jsonschema package is provided for convenience, but you should
	// consider another specialized library if you require more complex schemas.
	Parameters any `json:"parameters"`
}

FunctionDefinition represents a function that can be called.

type GCPVertexAIGenerationConfig added in v0.3.0

type GCPVertexAIGenerationConfig struct {
	// ThinkingConfig holds Gemini thinking configuration options.
	//
	// https://cloud.google.com/vertex-ai/docs/reference/rest/v1/GenerationConfig#ThinkingConfig
	ThinkingConfig *genai.GenerationConfigThinkingConfig `json:"thinkingConfig,omitzero"`
}

GCPVertexAIGenerationConfig represents Gemini generation configuration options.

type GCPVertexAIVendorFields added in v0.3.0

type GCPVertexAIVendorFields struct {
	// GenerationConfig holds Gemini generation configuration options.
	// Currently only a subset of the options are supported.
	//
	// https://cloud.google.com/vertex-ai/docs/reference/rest/v1/GenerationConfig
	GenerationConfig *GCPVertexAIGenerationConfig `json:"generationConfig,omitzero"`
}

GCPVertexAIVendorFields contains GCP Vertex AI (Gemini) vendor-specific fields.

type JSONUNIXTime

type JSONUNIXTime time.Time

JSONUNIXTime is a helper type to marshal/unmarshal time.Time UNIX timestamps.

func (JSONUNIXTime) Equal added in v0.3.0

func (t JSONUNIXTime) Equal(other JSONUNIXTime) bool

Equal compares two JSONUNIXTime values for equality. This is only for testing purposes.

func (JSONUNIXTime) MarshalJSON

func (t JSONUNIXTime) MarshalJSON() ([]byte, error)

MarshalJSON implements json.Marshaler.

func (*JSONUNIXTime) UnmarshalJSON

func (t *JSONUNIXTime) UnmarshalJSON(s []byte) error

UnmarshalJSON implements json.Unmarshaler.

type LogProb

type LogProb struct {
	Token   string  `json:"token"`
	LogProb float64 `json:"logprob"`
	Bytes   []byte  `json:"bytes,omitempty"` // Omitting the field if it is null.
	// TopLogProbs is a list of the most likely tokens and their log probability, at this token position.
	// In rare cases, there may be fewer than the number of requested top_logprobs returned.
	TopLogProbs []TopLogProbs `json:"top_logprobs"` //nolint:tagliatelle //follow openai api
}

LogProb represents the probability information for a token.

type LogProbs

type LogProbs struct {
	// Content is a list of message content tokens with log probability information.
	Content []LogProb `json:"content"`
}

LogProbs is the top-level structure containing the log probability information.

type Model

type Model struct {
	// ID is the model identifier, which can be referenced in the API endpoints.
	ID string `json:"id"`
	// Created is the Unix timestamp (in seconds) when the model was created.
	Created JSONUNIXTime `json:"created"`
	// Object is the object type, which is always "model".
	Object string `json:"object"`
	// OwnedBy is the organization that owns the model.
	OwnedBy string `json:"owned_by"`
}

Model is described in the OpenAI API documentation https://platform.openai.com/docs/api-reference/models/object

type ModelList

type ModelList struct {
	// Data is a list of models.
	Data []Model `json:"data"`
	// Object is the object type, which is always "list".
	Object string `json:"object"`
}

ModelList is described in the OpenAI API documentation https://platform.openai.com/docs/api-reference/models/list

type PredictionContent added in v0.3.0

type PredictionContent struct {
	// Type is the type of the predicted content you want to provide. This type is currently always content.
	Type PredictionContentType `json:"type"`
	// Content is the content that should be matched when generating a model response. If generated tokens would match this content, the entire model response can be returned much more quickly.
	Content StringOrArray `json:"content"`
}

PredictionContent represents static predicted output content, such as the content of a text file that is being regenerated.

type PredictionContentType added in v0.3.0

type PredictionContentType string

PredictionContentType represents the type of predicted content.

const (
	// PredictionContentTypeContent represents static content prediction.
	PredictionContentTypeContent PredictionContentType = "content"
)

type PromptTokensDetails added in v0.3.0

type PromptTokensDetails struct {
	// Text input tokens present in the prompt.
	TextTokens int `json:"text_tokens,omitzero"`

	// Audio input tokens present in the prompt.
	AudioTokens int `json:"audio_tokens,omitzero"`
	// Cached tokens present in the prompt.
	CachedTokens int `json:"cached_tokens,omitzero"`
}

PromptTokensDetails breakdown of tokens used in the prompt.

type Reasoning added in v0.3.0

type Reasoning struct {
	// Effort constrains effort on reasoning for reasoning models.
	// Supported values: "low", "medium", "high". Defaults to "medium".
	Effort *string `json:"effort,omitempty"`

	// GenerateSummary is deprecated. Use Summary instead.
	// Supported values: "auto", "concise", "detailed".
	GenerateSummary *string `json:"generate_summary,omitempty"`

	// Summary of the reasoning performed by the model.
	// Supported values: "auto", "concise", "detailed".
	Summary *string `json:"summary,omitempty"`
}

Reasoning represents the reasoning options for o-series models. Docs: https://platform.openai.com/docs/api-reference/responses/create#responses-create-reasoning

type StreamOptions

type StreamOptions struct {
	// If set, an additional chunk will be streamed before the data: [DONE] message.
	// The usage field on this chunk shows the token usage statistics for the entire request,
	// and the choices field will always be an empty array.
	// All other chunks will also include a usage field, but with a null value.
	IncludeUsage bool `json:"include_usage,omitempty"` //nolint:tagliatelle //follow openai api
}

type StringOrArray

type StringOrArray struct {
	Value interface{}
}

func (StringOrArray) MarshalJSON added in v0.3.0

func (s StringOrArray) MarshalJSON() ([]byte, error)

func (*StringOrArray) UnmarshalJSON

func (s *StringOrArray) UnmarshalJSON(data []byte) error

type StringOrAssistantRoleContentUnion added in v0.1.3

type StringOrAssistantRoleContentUnion struct {
	Value interface{}
}

func (StringOrAssistantRoleContentUnion) MarshalJSON added in v0.3.0

func (s StringOrAssistantRoleContentUnion) MarshalJSON() ([]byte, error)

func (*StringOrAssistantRoleContentUnion) UnmarshalJSON added in v0.1.3

func (s *StringOrAssistantRoleContentUnion) UnmarshalJSON(data []byte) error

type StringOrUserRoleContentUnion

type StringOrUserRoleContentUnion struct {
	Value interface{}
}

func (StringOrUserRoleContentUnion) MarshalJSON added in v0.3.0

func (s StringOrUserRoleContentUnion) MarshalJSON() ([]byte, error)

func (*StringOrUserRoleContentUnion) UnmarshalJSON

func (s *StringOrUserRoleContentUnion) UnmarshalJSON(data []byte) error

type Tool

type Tool struct {
	Type     ToolType            `json:"type"`
	Function *FunctionDefinition `json:"function,omitempty"`
}

type ToolChoice

type ToolChoice struct {
	Type     ToolType     `json:"type"`
	Function ToolFunction `json:"function,omitempty"`
}

ToolChoice represents the choice of tool.

type ToolChoiceType added in v0.3.0

type ToolChoiceType string

ToolChoiceType represents the type of tool choice.

const (
	// ToolChoiceTypeNone means the model will not call any tool and instead generates a message.
	ToolChoiceTypeNone ToolChoiceType = "none"
	// ToolChoiceTypeAuto means the model can pick between generating a message or calling one or more tools.
	ToolChoiceTypeAuto ToolChoiceType = "auto"
	// ToolChoiceTypeRequired means the model must call one or more tools.
	ToolChoiceTypeRequired ToolChoiceType = "required"
	// ToolChoiceTypeFunction is used when specifying a particular function.
	ToolChoiceTypeFunction ToolChoiceType = "function"
)

type ToolFunction

type ToolFunction struct {
	Name string `json:"name"`
}

ToolFunction represents the function to call.

type ToolType

type ToolType string
const (
	ToolTypeFunction        ToolType = "function"
	ToolTypeImageGeneration ToolType = "image_generation"
)

type TopLogProbs

type TopLogProbs struct {
	Token   string  `json:"token"`
	LogProb float64 `json:"logprob"`
	Bytes   []byte  `json:"bytes,omitempty"`
}

type URLCitation added in v0.3.0

type URLCitation struct {
	// EndIndex is the index of the last character of the URL citation in the message.
	EndIndex int `json:"end_index"`
	// StartIndex is the index of the first character of the URL citation in the message.
	StartIndex int `json:"start_index"`
	// URL is the URL of the web resource.
	URL string `json:"url"`
	// Title is the title of the web resource.
	Title string `json:"title"`
}

URLCitation contains citation information for web search results. Docs: https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat

type WebSearchContextSize added in v0.3.0

type WebSearchContextSize string

WebSearchContextSize represents the context size for web search.

const (
	// WebSearchContextSizeLow provides minimal context from search results.
	WebSearchContextSizeLow WebSearchContextSize = "low"
	// WebSearchContextSizeMedium provides moderate context from search results.
	WebSearchContextSizeMedium WebSearchContextSize = "medium"
	// WebSearchContextSizeHigh provides maximum context from search results.
	WebSearchContextSizeHigh WebSearchContextSize = "high"
)

type WebSearchLocation added in v0.3.0

type WebSearchLocation struct {
	// City is the approximate city name.
	City string `json:"city,omitempty"`
	// Region is the approximate region or state.
	Region string `json:"region,omitempty"`
	// Country is the approximate country.
	Country string `json:"country,omitempty"`
}

WebSearchLocation contains location details for web search.

type WebSearchOptions added in v0.3.0

type WebSearchOptions struct {
	// UserLocation provides approximate location parameters for the search.
	UserLocation *WebSearchUserLocation `json:"user_location,omitempty"` //nolint:tagliatelle //follow openai api
	// SearchContextSize controls how much context to include from search results.
	SearchContextSize WebSearchContextSize `json:"search_context_size,omitempty"` //nolint:tagliatelle //follow openai api
}

WebSearchOptions configures the web search tool behavior.

type WebSearchUserLocation added in v0.3.0

type WebSearchUserLocation struct {
	// Type is the type of location approximation. Always "approximate".
	Type string `json:"type"`
	// Approximate contains the approximate location details.
	Approximate WebSearchLocation `json:"approximate"`
}

WebSearchUserLocation represents approximate location for web search.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL