Documentation
¶
Index ¶
- type Audio
- type Chat
- type ChatChoice
- type Choice
- type Completion
- type CreateChatCompletionRequest
- type CreateChatCompletionResult
- type CreateCompletionRequest
- type CreateCompletionResult
- type CreateEditRequest
- type CreateEditResult
- type CreateEmbeddingsRequest
- type CreateFineTuneRequest
- type CreateFineTuneResult
- type CreateImageEditRequest
- type CreateImageRequest
- type CreateImageVariationRequest
- type CreateModerationRequest
- type CreateModerationResult
- type CreateTranscriptionRequest
- type CreateTranslationRequest
- type DeleteFileResponse
- type DeleteFineTuneModelResult
- type EditChoice
- type Edits
- type EmbeddingData
- type Embeddings
- type EmbeddingsResponse
- type Error
- type ErrorDetail
- type FileInfo
- type Files
- func (f *Files) DeleteFile(fileID string) (*DeleteFileResponse, error)
- func (f *Files) ListFiles() (*ListFilesResponse, error)
- func (f *Files) RetrieveFile(fileID string) (*FileInfo, error)
- func (f *Files) RetrieveFileContent(fileID string, destination io.Writer) error
- func (f *Files) UploadFile(file *os.File, purpose string) (*FileInfo, error)
- type FineTuneEvent
- type FineTunes
- func (ft *FineTunes) CancelFineTune(fineTuneID string) (*CreateFineTuneResult, error)
- func (ft *FineTunes) CreateFineTune(req *CreateFineTuneRequest) (*CreateFineTuneResult, error)
- func (ft *FineTunes) DeleteFineTuneModel(model string) (*DeleteFineTuneModelResult, error)
- func (ft *FineTunes) ListFineTuneEvents(req *ListFineTuneEventsRequest) (*ListFineTuneEventsResult, error)
- func (ft *FineTunes) ListFineTunes() (*ListFineTunesResult, error)
- func (ft *FineTunes) RetrieveFineTune(fineTuneID string) (*CreateFineTuneResult, error)
- type ImageData
- type ImageResponse
- type Images
- type ListFilesResponse
- type ListFineTuneEventsRequest
- type ListFineTuneEventsResult
- type ListFineTunesResult
- type ListModelsResult
- type ListModelsResultData
- type Message
- type Model
- type Moderations
- type Permission
- type RetrieveModelResult
- type TrainingFile
- type TranscriptionResponse
- type Usage
- type V1
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type Audio ¶
type Audio struct {
// contains filtered or unexported fields
}
func (*Audio) CreateTranscription ¶
func (a *Audio) CreateTranscription(req *CreateTranscriptionRequest) (*TranscriptionResponse, error)
func (*Audio) CreateTranslation ¶
func (a *Audio) CreateTranslation(req *CreateTranslationRequest) (*TranscriptionResponse, error)
type Chat ¶
type Chat struct {
// contains filtered or unexported fields
}
func (*Chat) CreateChatCompletion ¶
func (c *Chat) CreateChatCompletion(req *CreateChatCompletionRequest) (*CreateChatCompletionResult, error)
type ChatChoice ¶
type Completion ¶
type Completion struct {
// contains filtered or unexported fields
}
func (*Completion) CreateCompletion ¶
func (c *Completion) CreateCompletion(req *CreateCompletionRequest) (*CreateCompletionResult, error)
type CreateChatCompletionRequest ¶
type CreateChatCompletionRequest struct {
Model string `json:"model"`
Messages []Message `json:"messages"`
Temperature *float64 `json:"temperature,omitempty"`
TopP *float64 `json:"top_p,omitempty"`
N *int `json:"n,omitempty"`
Stream *bool `json:"stream,omitempty"`
Stop openaitype.StringOrArray `json:"stop,omitempty"`
MaxTokens *int `json:"max_tokens,omitempty"`
PresencePenalty *float64 `json:"presence_penalty,omitempty"`
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"`
LogitBias map[string]int `json:"logit_bias,omitempty"`
User string `json:"user,omitempty"`
}
type CreateCompletionRequest ¶
type CreateCompletionRequest struct {
Model string `json:"model"`
Prompt openaitype.StringOrArray `json:"prompt,omitempty"`
Suffix string `json:"suffix,omitempty"`
MaxTokens *int `json:"max_tokens,omitempty"`
Temperature *float64 `json:"temperature"`
TopP *float64 `json:"top_p,omitempty"`
N *int `json:"n,omitempty"`
Stream *bool `json:"stream,omitempty"`
Logprobs *int `json:"logprobs,omitempty"`
Echo *bool `json:"echo,omitempty"`
Stop openaitype.StringOrArray `json:"stop,omitempty"`
PresencePenalty *float64 `json:"presence_penalty,omitempty"`
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"`
BestOf *int `json:"best_of,omitempty"`
LogitBias map[string]int `json:"logit_bias,omitempty"`
User string `json:"user,omitempty"`
}
type CreateCompletionResult ¶
type CreateEditRequest ¶
type CreateEditResult ¶
type CreateEditResult struct {
Object string `json:"object"`
Created int64 `json:"created"`
Choices []EditChoice `json:"choices"`
Usage Usage `json:"usage"`
*Error
}
type CreateEmbeddingsRequest ¶
type CreateEmbeddingsRequest struct {
// String (Required)
/* ID of the model to use. You can use the List models API to see all of your available models, or see our Model overview for descriptions of them.*/
/* 要使用的模型的 ID。您可以使用 List models API 来查看所有可用模型,或查看我们的模型概述以了解它们的描述。*/
Model string `json:"model"`
// StringOrArray (Required)
/* Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 8192 tokens in length.*/
/* 要获取嵌入的输入文本,编码为字符串或令牌数组。要在单个请求中获取多个输入的嵌入,请传递字符串数组或令牌数组数组。每个输入的长度不得超过 8192 个令牌。*/
Input openaitype.StringOrArray `json:"input"`
// String (Optional)
/* ID of the prompt to use. If not provided, the model's default prompt will be used. You can use the List prompts API to see all of your available prompts, or see our Prompt overview for descriptions of them.*/
/* 要使用的提示的 ID。如果未提供,则将使用模型的默认提示。您可以使用 List prompts API 来查看所有可用提示,或查看我们的提示概述以了解它们的描述。*/
// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids)
User string `json:"user,omitempty"`
}
type CreateFineTuneRequest ¶
type CreateFineTuneRequest struct {
/* String (Required) */
/**/
// The ID of an uploaded file that contains training data.
// See upload file for how to upload a file.
// Your dataset must be formatted as a JSONL file, where each training example is a JSON object with the keys "prompt" and "completion". Additionally, you must upload your file with the purpose fine-tune.
// See the fine-tuning guide for more details.
/**/
// 包含训练数据的上传文件的ID。请参阅上传文件以了解如何上传文件。
// 您的数据集必须格式化为JSONL文件,其中每个训练示例都是具有键“提示”和“完成”的JSON对象。此外,您必须使用微调目的上传您的文件。
// 有关详细信息,请参阅微调指南。
TrainingFile string `json:"training_file"`
/* String (Optional) */
/**/
// The ID of an uploaded file that contains validation data.
// If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the fine-tuning results file. Your train and validation data should be mutually exclusive.
// Your dataset must be formatted as a JSONL file, where each validation example is a JSON object with the keys "prompt" and "completion". Additionally, you must upload your file with the purpose fine-tune.
// See the fine-tuning guide for more details.
/**/
// 包含验证数据的上传文件的ID。
// 如果您提供此文件,则该数据将在微调期间定期生成验证指标。这些指标可以在微调结果文件中查看。您的训练和验证数据应互斥。
// 您的数据集必须格式化为JSONL文件,其中每个验证示例都是具有键“提示”和“完成”的JSON对象。此外,您必须使用微调目的上传您的文件。
// 有关详细信息,请参阅微调指南。
ValidationFile string `json:"validation_file,omitempty"`
/* String (Optional) Defaults to curie */
/**/
// The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21. To learn more about these models, see the Models documentation.
/**/
// 微调的基本模型的名称。您可以选择“ada”,“babbage”,“curie”,“davinci”或在2022-04-21之后创建的微调模型之一。要了解有关这些模型的更多信息,请参阅模型文档。
Model string `json:"model,omitempty"`
// String (Optional) Defaults to 4.
/**/
// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
/**/
// 训练模型的周期数。一个周期指的是通过训练数据集的一个完整周期。
NEpochs int `json:"n_epochs,omitempty"`
// Integer (Optional) Defaults to null.
/**/
// The batch size to use for training. The batch size is the number of training examples used to train a single forward and backward pass.
// By default, the batch size will be dynamically configured to be ~0.2% of the number of examples in the training set, capped at 256 - in general, we've found that larger batch sizes tend to work better for larger datasets.
/**/
// 用于训练的批量大小。批量大小是用于训练单个正向和反向传递的训练示例数。
// 默认情况下,批量大小将动态配置为训练集中示例数的~0.2%,上限为256 - 一般来说,我们发现对于较大的数据集,较大的批量大小往往效果更好。
BatchSize *int `json:"batch_size,omitempty"`
// Float (Optional) Defaults to null.
/**/
// The learning rate multiplier to use for training. The fine-tuning learning rate is the original learning rate used for pretraining multiplied by this value.
// By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final batch_size (larger learning rates tend to perform better with larger batch sizes). We recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best results.
/**/
// 用于训练的学习率乘数。微调学习率是用于预训练的原始学习率乘以此值。
// 默认情况下,学习率乘数是0.05,0.1或0.2,具体取决于最终的batch_size(较大的学习率往往在较大的批量大小下表现更好)。
LearningRateMultiplier *float64 `json:"learning_rate_multiplier,omitempty"`
// Float (Optional) Defaults to 0.01.
/**/
// The weight to use for loss on the prompt tokens. This controls how much the model tries to learn to generate the prompt (as compared to the completion which always has a weight of 1.0), and can add a stabilizing effect to training when completions are short.
// If prompts are extremely long (relative to completions), it may make sense to reduce this weight so as to avoid over-prioritizing learning the prompt.
/**/
// 用于提示令牌损失的权重。这控制模型尝试学习生成提示的程度(与始终具有权重1.0的完成相比),并且当完成短时,可以为训练添加稳定效果。
// 如果提示非常长(相对于完成),则可以减少此权重,以避免过度优先学习提示。
PromptLossWeight *float64 `json:"prompt_loss_weight,omitempty"`
// Boolean (Optional) Defaults to false.
/**/
// If set, we calculate classification-specific metrics such as accuracy and F-1 score using the validation set at the end of every epoch. These metrics can be viewed in the results file.
// In order to compute classification metrics, you must provide a validation_file. Additionally, you must specify classification_n_classes for multiclass classification or classification_positive_class for binary classification.
/**/
// 如果设置,我们会在每个周期结束时使用验证集计算分类相关的指标,如准确率和F-1分数。这些指标可以在结果文件中查看。
// 若要计算分类指标,您必须提供一个validation_file。此外,您必须为多分类任务指定classification_n_classes,或者为二分类任务指定classification_positive_class。
ComputeClassificationMetrics bool `json:"compute_classification_metrics,omitempty"`
// Integer (Optional) Defaults to null.
/**/
// The number of classes in a classification task.This parameter is required for multiclass classification.
/**/
// 分类任务中的类别数量。对于多类分类,此参数是必需的。
ClassificationNClasses int `json:"classification_n_classes,omitempty"`
// String (Optional) Defaults to null.
/**/
// The positive class in binary classification.
// This parameter is needed to generate precision, recall, and F1 metrics when doing binary classification.
/**/
// 二元分类中的正类。
// 在进行二元分类时,需要此参数来生成精确度、召回率和F1指标。
ClassificationPositiveClass string `json:"classification_positive_class,omitempty"`
// Array (Optional) Defaults to null.
/**/
// If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score is a generalization of F-1 score. This is only used for binary classification.
// With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger beta score puts more weight on recall and less on precision. A smaller beta score puts more weight on precision and less on recall.
/**/
// 如果提供了此参数,我们将计算指定beta值的F-beta分数。F-beta分数是F-1分数的泛化。这仅用于二元分类。
// 当beta为1(即F-1分数)时,精确度和召回率具有相同的权重。较大的beta分数在召回率上赋予更大的权重,在精确度上赋予较小的权重。较小的beta分数在精确度上赋予更大的权重,在召回率上赋予较小的权重。
ClassificationBetas []float64 `json:"classification_betas,omitempty"`
// String (Optional) Defaults to null.
/**/
// A string of up to 40 characters that will be added to your fine-tuned model name.
// For example, a suffix of "custom-model-name" would produce a model name like ada:ft-your-org:custom-model-name-2022-02-15-04-21-04.
/**/
// 最多40个字符的字符串,将添加到您的微调模型名称中。
// 例如,后缀为“custom-model-name”的模型名称类似于ada:ft-your-org:custom-model-name-2022-02-15-04-21-04
Suffix string `json:"suffix,omitempty"`
}
type CreateFineTuneResult ¶
type CreateFineTuneResult struct {
ID string `json:"id"`
Object string `json:"object"`
Model string `json:"model"`
CreatedAt int64 `json:"created_at"`
Events []FineTuneEvent `json:"events"`
FineTunedModel string `json:"fine_tuned_model"`
Hyperparams map[string]interface{} `json:"hyperparams"`
OrganizationID string `json:"organization_id"`
ResultFiles []interface{} `json:"result_files"`
Status string `json:"status"`
ValidationFiles []interface{} `json:"validation_files"`
TrainingFiles []TrainingFile `json:"training_files"`
UpdatedAt int64 `json:"updated_at"`
*Error
}
type CreateImageEditRequest ¶
type CreateImageEditRequest struct {
// FileReader (Required)
/* The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.*/
/* 用作变体的基础的图像。必须是有效的PNG文件,小于4MB,并且是正方形的。*/
Image *os.File
// FileReader (Optional)
/* An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be
edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image.*/
/* 另一个图像,其完全透明的区域(例如,其中alpha为零)指示应编辑图像的位置。必须是有效的PNG文件,小于4MB,并且具有与图像相同的尺寸。*/
Mask *os.File
// String (Required)
/* A text description of the desired image(s). The maximum length is 1000 characters.*/
/* 对所需图像的文本描述。最大长度为1000个字符。*/
Prompt string
// Integer (Optional) Defaults to 1
/* The number of images to generate. Must be between 1 and 10.*/
/* 要生成的图像数。必须介于1和10之间。*/
N *int
// String (Optional) Defaults to 1024x1024
/* The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.*/
/* 生成图像的尺寸。必须是256x256、512x512或1024x1024之一。*/
Size string
// String (Optional) Defaults to url
/* The format in which the generated images are returned. Must be one of url or b64_json.*/
/* 以何种格式返回生成的图像。必须是url或b64_json之一。*/
ResponseFormat string
// String (Optional)
/* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.*/
/* 代表您的最终用户的唯一标识符,可以帮助OpenAI监控和检测滥用。*/
// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids)
User string
}
type CreateImageRequest ¶
type CreateImageRequest struct {
// String (Required)
/* A text description of the desired image(s). The maximum length is 1000 characters.*/
/* 对所需图像的文本描述。最大长度为1000个字符。*/
Prompt string `json:"prompt"`
// Integer (Optional) Defaults to 1
/* The number of images to generate. Must be between 1 and 10.*/
/* 要生成的图像数。必须介于1和10之间。*/
N *int `json:"n,omitempty"`
// String (Optional) Defaults to 1024x1024
/* The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.*/
/* 生成图像的尺寸。必须是256x256、512x512或1024x1024之一。*/
Size string `json:"size,omitempty"`
// String (Optional) Defaults to url
/* The format in which the generated images are returned. Must be one of url or b64_json.*/
/* 以何种格式返回生成的图像。必须是url或b64_json之一。*/
ResponseFormat string `json:"response_format,omitempty"`
// String (Optional)
/* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.*/
/* 代表您的最终用户的唯一标识符,可以帮助OpenAI监控和检测滥用。*/
// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids)
User string `json:"user,omitempty"`
}
type CreateImageVariationRequest ¶
type CreateImageVariationRequest struct {
// FileReader (Required)
/* The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.*/
/* 用作变体的基础的图像。必须是有效的PNG文件,小于4MB,并且是正方形的。*/
Image *os.File
// Integer (Optional) Defaults to 1
/* The number of images to generate. Must be between 1 and 10.*/
/* 要生成的图像数。必须介于1和10之间。*/
N *int
// String (Optional) Defaults to 1024x1024
/* The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.*/
/* 生成图像的尺寸。必须是256x256、512x512或1024x1024之一。*/
Size string
// String (Optional) Defaults to url
/* The format in which the generated images are returned. Must be one of url or b64_json.*/
/* 以何种格式返回生成的图像。必须是url或b64_json之一。*/
ResponseFormat string
// String (Optional)
/* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.*/
/* 代表您的最终用户的唯一标识符,可以帮助OpenAI监控和检测滥用。*/
User string
}
type CreateModerationRequest ¶
type CreateModerationRequest struct {
Input openaitype.StringOrArray `json:"input"`
Model string `json:"model,omitempty"`
}
type CreateModerationResult ¶
type CreateTranscriptionRequest ¶
type CreateTranscriptionRequest struct {
// FileReader (Required)
/* The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.*/
/* 要翻译的音频文件,格式为mp3、mp4、mpeg、mpga、m4a、wav或webm。*/
File *os.File
// String (Required)
/* ID of the model to use. Only whisper-1 is currently available.*/
/* 要使用的模型的ID。目前仅支持whisper-1。*/
Model string
// String (Optional)
/* An optional text to guide the model's style or continue a previous audio segment. The prompt should be in English.*/
/* 可选的文本,用于指导模型的风格或继续以前的音频段。提示应为英语。*/
Prompt string
// String (Optional) Defaults to json
/* The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.*/
/* 字幕输出的格式,可选项为json、text、srt、verbose_json或vtt。*/
ResponseFormat string
// Float (Optional) Defaults to 0
/* The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.*/
/* 采样温度,介于0和1之间。更高的值(如0.8)将使输出更随机,而更低的值(如0.2)将使其更专注和确定。如果设置为0,则模型将使用对数概率自动增加温度,直到达到某些阈值。*/
Temperature *float64
// String (Optional)
/* The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.*/
/* 输入音频的语言。以ISO-639-1格式提供输入语言将提高准确性和延迟。*/
Language string
}
type CreateTranslationRequest ¶
type CreateTranslationRequest struct {
// FileReader (Required)
/* The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.*/
/* 要翻译的音频文件,格式为mp3、mp4、mpeg、mpga、m4a、wav或webm。*/
File *os.File
// String (Required)
/* ID of the model to use. Only whisper-1 is currently available.*/
/* 要使用的模型的ID。目前仅支持whisper-1。*/
Model string
// String (Optional)
/* An optional text to guide the model's style or continue a previous audio segment. The prompt should be in English.*/
/* 可选的文本,用于指导模型的风格或继续以前的音频段。提示应为英语。*/
Prompt string
// String (Optional) Defaults to json
/* The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.*/
/* 字幕输出的格式,可选项为json、text、srt、verbose_json或vtt。*/
ResponseFormat string
// String (Optional)
/* The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.*/
/* 采样温度,介于0和1之间。更高的值(如0.8)将使输出更随机,而更低的值(如0.2)将使其更专注和确定。如果设置为0,则模型将使用对数概率自动增加温度,直到达到某些阈值。*/
Temperature float64
}
type DeleteFileResponse ¶
type EditChoice ¶
type Edits ¶
type Edits struct {
// contains filtered or unexported fields
}
func (*Edits) CreateEdit ¶
func (e *Edits) CreateEdit(req *CreateEditRequest) (*CreateEditResult, error)
type EmbeddingData ¶
type Embeddings ¶
type Embeddings struct {
// contains filtered or unexported fields
}
func (*Embeddings) CreateEmbeddings ¶
func (e *Embeddings) CreateEmbeddings(req *CreateEmbeddingsRequest) (*EmbeddingsResponse, error)
type EmbeddingsResponse ¶
type EmbeddingsResponse struct {
Object string `json:"object"`
Data []EmbeddingData `json:"data"`
Model string `json:"model"`
Usage struct {
PromptTokens int `json:"prompt_tokens"`
TotalTokens int `json:"total_tokens"`
} `json:"usage"`
}
type Error ¶
type Error struct {
Detail ErrorDetail `json:"error"`
}
type ErrorDetail ¶
type Files ¶
type Files struct {
// contains filtered or unexported fields
}
func (*Files) DeleteFile ¶
func (f *Files) DeleteFile(fileID string) (*DeleteFileResponse, error)
func (*Files) ListFiles ¶
func (f *Files) ListFiles() (*ListFilesResponse, error)
func (*Files) RetrieveFileContent ¶
type FineTuneEvent ¶
type FineTunes ¶
type FineTunes struct {
// contains filtered or unexported fields
}
func (*FineTunes) CancelFineTune ¶
func (ft *FineTunes) CancelFineTune(fineTuneID string) (*CreateFineTuneResult, error)
func (*FineTunes) CreateFineTune ¶
func (ft *FineTunes) CreateFineTune(req *CreateFineTuneRequest) (*CreateFineTuneResult, error)
CreateFineTune Manage fine-tuning jobs to tailor a model to your specific training data.
Related guide: [Fine-tune models](https://platform.openai.com/docs/guides/fine-tuning)
管理微调作业以使模型适合您的特定训练数据。
func (*FineTunes) DeleteFineTuneModel ¶
func (ft *FineTunes) DeleteFineTuneModel(model string) (*DeleteFineTuneModelResult, error)
func (*FineTunes) ListFineTuneEvents ¶
func (ft *FineTunes) ListFineTuneEvents(req *ListFineTuneEventsRequest) (*ListFineTuneEventsResult, error)
func (*FineTunes) ListFineTunes ¶
func (ft *FineTunes) ListFineTunes() (*ListFineTunesResult, error)
ListFineTunes List your organization's fine-tuning jobs
列出您的组织的微调作业
func (*FineTunes) RetrieveFineTune ¶
func (ft *FineTunes) RetrieveFineTune(fineTuneID string) (*CreateFineTuneResult, error)
type ImageResponse ¶
type Images ¶
type Images struct {
// contains filtered or unexported fields
}
func (*Images) CreateImage ¶
func (i *Images) CreateImage(req *CreateImageRequest) (*ImageResponse, error)
func (*Images) CreateImageEdit ¶
func (i *Images) CreateImageEdit(req *CreateImageEditRequest) (*ImageResponse, error)
func (*Images) CreateImageVariation ¶
func (i *Images) CreateImageVariation(req *CreateImageVariationRequest) (*ImageResponse, error)
type ListFilesResponse ¶
type ListFineTuneEventsResult ¶
type ListFineTuneEventsResult struct {
Object string `json:"object"`
Data []FineTuneEvent `json:"data"`
*Error
}
type ListFineTunesResult ¶
type ListFineTunesResult struct {
Object string `json:"object"`
Data []CreateFineTuneResult `json:"data"`
*Error
}
type ListModelsResult ¶
type ListModelsResult struct {
Data []ListModelsResultData `json:"data"`
Object string `json:"object"`
Root string `json:"root"`
// todo un_know type
Parent interface{} `json:"parent"`
*Error
}
type ListModelsResultData ¶
type ListModelsResultData struct {
ID string `json:"id"`
Object string `json:"object"`
OwnedBy string `json:"owned_by"`
Permission []Permission `json:"permission"`
}
type Model ¶
type Model struct {
// contains filtered or unexported fields
}
func (*Model) ListModels ¶
func (m *Model) ListModels() (*ListModelsResult, error)
ListModels https://platform.openai.com/docs/api-reference/models/list
func (*Model) RetrieveModel ¶
func (m *Model) RetrieveModel(model string) (*RetrieveModelResult, error)
RetrieveModel https://platform.openai.com/docs/api-reference/models/retrieve
type Moderations ¶
type Moderations struct {
// contains filtered or unexported fields
}
func (*Moderations) CreateModeration ¶
func (m *Moderations) CreateModeration(req *CreateModerationRequest) (*CreateModerationResult, error)
type Permission ¶
type Permission map[string]interface{}
type RetrieveModelResult ¶
type TrainingFile ¶
type TranscriptionResponse ¶
type TranscriptionResponse struct {
Text string `json:"text"`
}
type V1 ¶
type V1 struct {
Model Model
Completion Completion
Chat Chat
Edits Edits
Images Images
Embeddings Embeddings
Audio Audio
Files Files
FineTunes FineTunes
Moderations Moderations
}
func NewV1 ¶
func NewV1(helper httphelper.Helper) *V1
Click to show internal directories.
Click to hide internal directories.