Documentation
¶
Index ¶
- Constants
- Variables
- type DataType
- type ImageUrl
- type JSONSchema
- type OpenAI
- func (g *OpenAI) Embed(request *embed.Request) (*embed.Response, error)
- func (g *OpenAI) EmbedDocument(request *embed.DocumentRequest) (*embed.DocumentResponse, error)
- func (g *OpenAI) Generator(options ...gen.Option) *gen.Generator
- func (g *OpenAI) Provider() string
- func (g *OpenAI) SetLogger(logger *slog.Logger) *OpenAI
- type ReasoningEffort
- type ServiceTier
- type StreamOptions
Constants ¶
View Source
const Provider = "OpenAI"
Variables ¶
View Source
var EmbedModel_text3_large = embed.Model{ Provider: Provider, Name: "text-embedding-3-large", Description: "Increased performance over 2nd generation ada embedding Model", InputMaxTokens: 8191, OutputDimensions: 3072, }
View Source
var EmbedModel_text3_small = embed.Model{ Provider: Provider, Name: "text-embedding-3-small", Description: "Most capable embedding Model for both english and non-english tasks", InputMaxTokens: 8191, OutputDimensions: 1536, }
View Source
var EmbedModel_text_ada_002 = embed.Model{ Provider: Provider, Name: "text-embedding-ada-002", Description: "Most capable 2nd generation embedding Model, replacing 16 first generation models", InputMaxTokens: 8191, OutputDimensions: 1536, }
View Source
var EmbedModels = map[string]embed.Model{ EmbedModel_text3_small.Name: EmbedModel_text3_small, EmbedModel_text3_large.Name: EmbedModel_text3_large, EmbedModel_text_ada_002.Name: EmbedModel_text_ada_002, }
View Source
var GenModel_gpt4 = gen.Model{ Provider: Provider, Name: "gpt-4", Description: "GPT-4 is a large multimodal Model (accepting text or image inputs and outputting text) " + "that can solve difficult problems with greater accuracy than any of our previous models, thanks to its broader " + "general knowledge and advanced reasoning capabilities. GPT-4 is available in the OpenAI API to paying customers. " + "Like gpt-3.5-turbo, GPT-4 is optimized for chat but works well for traditional completions tasks using the Chat " + "Completions API. Learn how to use GPT-4 in our text generation guide.", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gpt4_0613 = gen.Model{ Provider: Provider, Name: "gpt-4-0613", Description: "GPT-4 is a large multimodal Model (accepting text or image inputs and outputting text) " + "that can solve difficult problems with greater accuracy than any of our previous models, thanks to its broader " + "general knowledge and advanced reasoning capabilities. GPT-4 is available in the OpenAI API to paying customers. " + "Like gpt-3.5-turbo, GPT-4 is optimized for chat but works well for traditional completions tasks using the Chat " + "Completions API. Learn how to use GPT-4 in our text generation guide.", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gpt4_1_250414 = gen.Model{ Provider: Provider, Name: "gpt-4.1-2025-04-14", InputMaxToken: 1_047_576, OutputMaxToken: 32_768, }
View Source
var GenModel_gpt4_1_latest = gen.Model{ Provider: Provider, Name: "gpt-4.1", InputMaxToken: 1_047_576, OutputMaxToken: 32_768, }
View Source
var GenModel_gpt4_1_mini_250414 = gen.Model{ Provider: Provider, Name: "gpt-4.1-mini-2025-04-14", InputMaxToken: 1_047_576, OutputMaxToken: 32_768, }
View Source
var GenModel_gpt4_1_mini_latest = gen.Model{ Provider: Provider, Name: "gpt-4.1-mini", InputMaxToken: 1_047_576, OutputMaxToken: 32_768, }
View Source
var GenModel_gpt4_1_nano_250414 = gen.Model{ Provider: Provider, Name: "gpt-4.1-nano-2025-04-14", InputMaxToken: 1_047_576, OutputMaxToken: 32_768, }
View Source
var GenModel_gpt4_1_nano_latest = gen.Model{ Provider: Provider, Name: "gpt-4.1-nano", InputMaxToken: 1_047_576, OutputMaxToken: 32_768, }
View Source
var GenModel_gpt4_preview_0125 = gen.Model{ Provider: Provider, Name: "gpt-4-0125-preview", Description: "GPT-4 is a large multimodal Model (accepting text or image inputs and outputting text) " + "that can solve difficult problems with greater accuracy than any of our previous models, thanks to its broader " + "general knowledge and advanced reasoning capabilities. GPT-4 is available in the OpenAI API to paying customers. " + "Like gpt-3.5-turbo, GPT-4 is optimized for chat but works well for traditional completions tasks using the Chat " + "Completions API. Learn how to use GPT-4 in our text generation guide.", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gpt4_preview_1106 = gen.Model{ Provider: Provider, Name: "gpt-4-1106-preview", Description: "GPT-4 is a large multimodal Model (accepting text or image inputs and outputting text) " + "that can solve difficult problems with greater accuracy than any of our previous models, thanks to its broader " + "general knowledge and advanced reasoning capabilities. GPT-4 is available in the OpenAI API to paying customers. " + "Like gpt-3.5-turbo, GPT-4 is optimized for chat but works well for traditional completions tasks using the Chat " + "Completions API. Learn how to use GPT-4 in our text generation guide.", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gpt4_turbo = gen.Model{ Provider: Provider, Name: "gpt-4-turbo", Description: "GPT-4 is a large multimodal Model (accepting text or image inputs and outputting text) " + "that can solve difficult problems with greater accuracy than any of our previous models, thanks to its broader " + "general knowledge and advanced reasoning capabilities. GPT-4 is available in the OpenAI API to paying customers. " + "Like gpt-3.5-turbo, GPT-4 is optimized for chat but works well for traditional completions tasks using the Chat " + "Completions API. Learn how to use GPT-4 in our text generation guide.", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
GenModel_gpt4_turbo
View Source
var GenModel_gpt4_turbo_240409 = gen.Model{ Provider: Provider, Name: "gpt-4-turbo-2024-04-09", Description: "GPT-4 is a large multimodal Model (accepting text or image inputs and outputting text) " + "that can solve difficult problems with greater accuracy than any of our previous models, thanks to its broader " + "general knowledge and advanced reasoning capabilities. GPT-4 is available in the OpenAI API to paying customers. " + "Like gpt-3.5-turbo, GPT-4 is optimized for chat but works well for traditional completions tasks using the Chat " + "Completions API. Learn how to use GPT-4 in our text generation guide.", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gpt4_turbo_preview = gen.Model{ Provider: Provider, Name: "gpt-4-turbo-preview", Description: "GPT-4 is a large multimodal Model (accepting text or image inputs and outputting text) " + "that can solve difficult problems with greater accuracy than any of our previous models, thanks to its broader " + "general knowledge and advanced reasoning capabilities. GPT-4 is available in the OpenAI API to paying customers. " + "Like gpt-3.5-turbo, GPT-4 is optimized for chat but works well for traditional completions tasks using the Chat " + "Completions API. Learn how to use GPT-4 in our text generation guide.", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gpt4o = gen.Model{ Provider: Provider, Name: "gpt-4o", Description: "Our high-intelligence flagship Model for complex, multi-step tasks. GPT-4o is cheaper and " + "faster than GPT-4 Turbo.", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gpt4o_240513 = gen.Model{ Provider: Provider, Name: "gpt-4o-2024-05-13", Description: "Our high-intelligence flagship Model for complex, multi-step tasks. GPT-4o is cheaper and " + "faster than GPT-4 Turbo.", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gpt4o_240806 = gen.Model{ Provider: Provider, Name: "gpt-4o-2024-08-06", Description: "Our high-intelligence flagship Model for complex, multi-step tasks. GPT-4o is cheaper and " + "faster than GPT-4 Turbo.", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gpt4o_latest = gen.Model{ Provider: Provider, Name: "chatgpt-4o-latest", Description: "Our high-intelligence flagship Model for complex, multi-step tasks. GPT-4o is cheaper and " + "faster than GPT-4 Turbo.", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gpt4o_mini = gen.Model{ Provider: Provider, Name: "gpt-4o-mini", Description: "Our affordable and intelligent small Model for fast, lightweight tasks. GPT-4o mini is " + "cheaper and more capable,than GPT-3.5 Turbo.", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
GenModel_gpt4o_mini
View Source
var GenModel_gpt4o_mini_240718 = gen.Model{ Provider: Provider, Name: "gpt-4o-mini-2024-07-18", Description: "Our affordable and intelligent small Model for fast, lightweight tasks. GPT-4o mini is " + "cheaper and more capable,than GPT-3.5 Turbo.", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gpt5_1_latest = gen.Model{ Provider: Provider, Name: "gpt-5.1", InputMaxToken: 400_000, OutputMaxToken: 128_000, }
View Source
var GenModel_gpt5_latest = gen.Model{ Provider: Provider, Name: "gpt-5", InputMaxToken: 400_000, OutputMaxToken: 128_000, }
View Source
var GenModel_gpt5_mini_latest = gen.Model{ Provider: Provider, Name: "gpt-5-mini", InputMaxToken: 400_000, OutputMaxToken: 128_000, }
View Source
var GenModel_gpt5_nano_latest = gen.Model{ Provider: Provider, Name: "gpt-5-nano", InputMaxToken: 400_000, OutputMaxToken: 128_000, }
View Source
var GenModel_o1_mini = gen.Model{ Provider: Provider, Name: "o1-mini", Description: "The o1 series of large language models are trained with reinforcement learning to perform " + "complex reasoning. o1 models think before they answer, producing a long internal chain of thought before " + "responding to the user.", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_o1_mini_240912 = gen.Model{ Provider: Provider, Name: "o1-mini-2024-09-12", Description: "The o1 series of large language models are trained with reinforcement learning to perform " + "complex reasoning. o1 models think before they answer, producing a long internal chain of thought before " + "responding to the user.", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_o1_preview = gen.Model{ Provider: Provider, Name: "o1-preview", Description: "The o1 series of large language models are trained with reinforcement learning to perform " + "complex reasoning. o1 models think before they answer, producing a long internal chain of thought before " + "responding to the user.", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
GenModel_o1_preview
View Source
var GenModel_o1_preview_240912 = gen.Model{ Provider: Provider, Name: "o1-preview-2024-09-12", Description: "The o1 series of large language models are trained with reinforcement learning to perform " + "complex reasoning. o1 models think before they answer, producing a long internal chain of thought before " + "responding to the user.", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_o3_250416 = gen.Model{ Provider: Provider, Name: "o3-2025-04-16", }
View Source
var GenModel_o3_mini_250131 = gen.Model{ Provider: Provider, Name: "o3-mini-2025-01-31", }
View Source
var GenModel_o3_pro_250610 = gen.Model{ Provider: Provider, Name: "o3-pro-2025-06-10", }
View Source
var GenModel_o4_mini_250416 = gen.Model{ Provider: Provider, Name: "o4-mini-2025-04-16", }
View Source
var GenModels = map[string]gen.Model{ GenModel_gpt4o_latest.Name: GenModel_gpt4o_latest, GenModel_gpt4o.Name: GenModel_gpt4o, GenModel_gpt4o_240806.Name: GenModel_gpt4o_240806, GenModel_gpt4o_240513.Name: GenModel_gpt4o_240513, GenModel_gpt4o_mini.Name: GenModel_gpt4o_mini, GenModel_gpt4o_mini_240718.Name: GenModel_gpt4o_mini_240718, GenModel_o1_preview.Name: GenModel_o1_preview, GenModel_o1_preview_240912.Name: GenModel_o1_preview_240912, GenModel_o1_mini.Name: GenModel_o1_mini, GenModel_o1_mini_240912.Name: GenModel_o1_mini_240912, GenModel_gpt4_turbo.Name: GenModel_gpt4_turbo, GenModel_gpt4_turbo_240409.Name: GenModel_gpt4_turbo_240409, GenModel_gpt4_turbo_preview.Name: GenModel_gpt4_turbo_preview, GenModel_gpt4_preview_0125.Name: GenModel_gpt4_preview_0125, GenModel_gpt4_preview_1106.Name: GenModel_gpt4_preview_1106, GenModel_gpt4.Name: GenModel_gpt4, GenModel_gpt4_0613.Name: GenModel_gpt4_0613, }
Functions ¶
This section is empty.
Types ¶
type ImageUrl ¶
type ImageUrl struct {
Url string `json:"url"` /// data:image/jpeg;base64,......
// contains filtered or unexported fields
}
func (ImageUrl) MarshalJSON ¶
type JSONSchema ¶ added in v0.8.0
type JSONSchema struct {
Ref string `json:"$ref,omitempty"` // #/$defs/... etc, overrides everything else
Defs map[string]*JSONSchema `json:"$defs,omitempty"` // for $ref
// Type specifies the data type of the schema. OpenAI uses []string{Type, Null} to represent nullable types.
Type any `json:"type,omitempty"`
// Description is the description of the schema.
Description string `json:"description,omitempty"`
// Enum is used to restrict a value to a fixed set of values. It must be an array with at least
// one element, where each element is unique. You will probably only use this with strings.
Enum []any `json:"enum,omitempty"`
Pattern string `json:"pattern,omitempty"` // Regular expression that the string must match.
Format string `json:"format,omitempty"` // Format of the data, e.g. "email", "date-time", etc.
// Properties describes the properties of an object, if the schema type is Object.
Properties *map[string]JSONSchema `json:"properties,omitempty"`
// Required specifies which properties are required, if the schema type is Object.
Required []string `json:"required,omitempty"`
// Items specifies which data type an array contains, if the schema type is Array.
Items *JSONSchema `json:"items,omitempty"`
// AdditionalProperties is used to control the handling of properties in an object
// that are not explicitly defined in the properties section of the schema. example:
// additionalProperties: true
// additionalProperties: false
// additionalProperties: jsonschema.JSONSchema{Type: jsonschema.String}
AdditionalProperties any `json:"additionalProperties,omitempty"`
MinLength int `json:"minLength,omitempty"` // Minimum length of the string.
MaxLength int `json:"maxLength,omitempty"` // Maximum length of the string.
Minimum float64 `json:"minimum,omitempty"` // Minimum value of the integer and number types.
Maximum float64 `json:"maximum,omitempty"` // Minimum value of the integer and number types.
MinItems int `json:"minItems,omitempty"` // Minimum number of items in an array.
MaxItems int `json:"maxItems,omitempty"` // Maximum number of items in an array.
}
func (JSONSchema) IsObjectRequired ¶ added in v0.8.0
func (s JSONSchema) IsObjectRequired() bool
type OpenAI ¶
func (*OpenAI) EmbedDocument ¶ added in v1.0.0
func (g *OpenAI) EmbedDocument(request *embed.DocumentRequest) (*embed.DocumentResponse, error)
type ReasoningEffort ¶ added in v0.11.9
type ReasoningEffort string
ReasoningEffort is a string that can be "low", "medium", or "high".
const ( ReasoningEffortNone ReasoningEffort = "none" ReasoningEffortLow ReasoningEffort = "low" ReasoningEffortMedium ReasoningEffort = "medium" ReasoningEffortHigh ReasoningEffort = "high" )
type ServiceTier ¶ added in v0.12.10
type ServiceTier string
const ( ServiceTierAuto ServiceTier = "auto" ServiceTierDefault ServiceTier = "default" ServiceTierFlex ServiceTier = "flex" ServiceTierPriority ServiceTier = "priority" )
type StreamOptions ¶ added in v0.11.7
type StreamOptions struct {
IncludeUsage bool `json:"include_usage"`
}
Click to show internal directories.
Click to hide internal directories.