Documentation
¶
Index ¶
- func GetOpenAIClient(m interface{}) (*client.OpenAIClient, error)
- func GetOpenAIClientWithAdminKey(m interface{}) (*client.OpenAIClient, error)
- func GetOpenAIClientWithProjectKey(m interface{}) (*client.OpenAIClient, error)
- func Provider() *schema.Provider
- type AdminAPIKey
- type AdminAPIKeyResponse
- type AdminAPIKeyWithLastUsed
- type AssistantCreateRequest
- type AssistantFileCreateRequest
- type AssistantFileResponse
- type AssistantResponse
- type AssistantSummary
- type AssistantTool
- type AssistantToolFunction
- type AttachmentRequest
- type AudioTranscriptionData
- type AudioTranscriptionsResponse
- type AudioTranslationData
- type AudioTranslationsResponse
- type BatchCreateRequest
- type BatchError
- type BatchResponse
- type BatchesResponse
- type ChatCompletionChoice
- type ChatCompletionMessage
- type ChatCompletionMessageResponse
- type ChatCompletionRequest
- type ChatCompletionResponse
- type ChatCompletionUsage
- type ChatCompletionsResponse
- type ChatFunction
- type ChatFunctionCall
- type ChatFunctionDef
- type ChatToolCall
- type CheckpointPermissionRequest
- type CompletionChoice
- type CompletionLogprobs
- type CompletionRequest
- type CompletionResponse
- type CompletionUsage
- type DPO
- type DPOHyperparams
- type DataSourceChatCompletionChoice
- type DataSourceChatCompletionMessage
- type DataSourceChatCompletionResponse
- type EditChoice
- type EditRequest
- type EditResponse
- type EditUsage
- type EmbeddingData
- type EmbeddingRequest
- type EmbeddingResponse
- type EmbeddingUsage
- type ErrorResponse
- type EvalConfig
- type EvaluationCreateRequest
- type EvaluationMetric
- type EvaluationResponse
- type EvaluationTestCase
- type FileResponse
- type FineTunedModelSummary
- type FineTuningCheckpointData
- type FineTuningCheckpointsResponse
- type FineTuningError
- type FineTuningEventData
- type FineTuningEventsResponse
- type FineTuningHyperparams
- type FineTuningIntegration
- type FineTuningJobData
- type FineTuningJobRequest
- type FineTuningJobResponse
- type FineTuningJobsResponse
- type FineTuningMethod
- type FunctionCall
- type ImageEditData
- type ImageEditResponse
- type ImageGenerationData
- type ImageGenerationRequest
- type ImageGenerationResponse
- type ImageVariationData
- type ImageVariationResponse
- type ListAPIKeysResponse
- type ListAssistantsResponse
- type ListFilesResponse
- type ListRunStepsResponse
- type ListRunsResponse
- type MessageAttachment
- type MessageContent
- type MessageContentText
- type MessageCreateRequest
- type MessageResponse
- type MessagesListResponse
- type Model
- type ModelInfoResponse
- type ModelPermission
- type ModelsResponse
- type ModerationCategories
- type ModerationCategoryScores
- type ModerationRequest
- type ModerationResponse
- type ModerationResult
- type OpenAIClient
- type PlaygroundConfigRequest
- type PlaygroundConfigResponse
- type ProjectAPIKeyCreateRequest
- type ProjectAPIKeyResponse
- type ProjectAPIKeysResponse
- type ProjectResourcesResponse
- type ProjectResponse
- type ProjectsListResponse
- type ResponseFormat
- type RunError
- type RunRequest
- type RunRequiredAction
- type RunResponse
- type RunStepResponse
- type RunSubmitToolsRequest
- type RunToolCall
- type RunUsage
- type Segment
- type ServiceAccountSummary
- type SpeechToTextData
- type SpeechToTextResponse
- type SpeechToTextsResponse
- type SubmitToolOutputsRequest
- type Supervised
- type SupervisedHyperparams
- type TextToSpeechData
- type TextToSpeechRequest
- type TextToSpeechsResponse
- type ThreadCreateRequest
- type ThreadMessage
- type ThreadResponse
- type ThreadRun
- type ThreadRunCreateRequest
- type ThreadRunMessageRequest
- type ThreadRunRequest
- type ThreadRunResponse
- type ToolOutput
- type ToolRequest
- type TranscriptionResponse
- type TranslationResponse
- type TruncationStrategy
- type UploadCreateRequest
- type UploadResponse
- type UsageLimits
- type WandBIntegration
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func GetOpenAIClient ¶
func GetOpenAIClient(m interface{}) (*client.OpenAIClient, error)
GetOpenAIClient extracts the client from the meta interface passed to resource functions
func GetOpenAIClientWithAdminKey ¶
func GetOpenAIClientWithAdminKey(m interface{}) (*client.OpenAIClient, error)
GetOpenAIClientWithAdminKey returns a client configured with the admin API key This is useful for resources that require admin-level API keys (like organization management)
func GetOpenAIClientWithProjectKey ¶
func GetOpenAIClientWithProjectKey(m interface{}) (*client.OpenAIClient, error)
GetOpenAIClientWithProjectKey returns a client configured with the project API key This is useful for resources that require project-level API keys (like models)
Types ¶
type AdminAPIKey ¶
type AdminAPIKey struct { ID string `json:"id"` Name string `json:"name"` CreatedAt int64 `json:"created_at"` ExpiresAt *int64 `json:"expires_at,omitempty"` Object string `json:"object"` Scopes []string `json:"scopes,omitempty"` }
AdminAPIKey represents the API response for getting an OpenAI admin API key
type AdminAPIKeyResponse ¶
type AdminAPIKeyResponse struct { ID string `json:"id"` Name string `json:"name"` CreatedAt int64 `json:"created_at"` ExpiresAt *int64 `json:"expires_at,omitempty"` Object string `json:"object"` Scopes []string `json:"scopes,omitempty"` Key string `json:"key"` }
AdminAPIKeyResponse represents the API response for an OpenAI admin API key
type AdminAPIKeyWithLastUsed ¶
type AdminAPIKeyWithLastUsed struct { ID string `json:"id"` Name string `json:"name"` CreatedAt int64 `json:"created_at"` ExpiresAt *int64 `json:"expires_at,omitempty"` LastUsedAt *int64 `json:"last_used_at,omitempty"` Object string `json:"object"` Scopes []string `json:"scopes,omitempty"` Key string `json:"key,omitempty"` }
AdminAPIKeyWithLastUsed extends AdminAPIKeyResponse to include last_used_at field
type AssistantCreateRequest ¶
type AssistantCreateRequest struct { Model string `json:"model"` Name string `json:"name,omitempty"` Description string `json:"description,omitempty"` Instructions string `json:"instructions,omitempty"` Tools []AssistantTool `json:"tools,omitempty"` FileIDs []string `json:"file_ids,omitempty"` Metadata map[string]interface{} `json:"metadata,omitempty"` }
AssistantCreateRequest represents the payload for creating an assistant in the OpenAI API. It contains all the fields that can be set when creating a new assistant.
type AssistantFileCreateRequest ¶
type AssistantFileCreateRequest struct {
FileID string `json:"file_id"`
}
AssistantFileCreateRequest represents the request to create an assistant file
type AssistantFileResponse ¶
type AssistantFileResponse struct { ID string `json:"id"` Object string `json:"object"` CreatedAt int `json:"created_at"` AssistantID string `json:"assistant_id"` FileID string `json:"file_id"` }
AssistantFileResponse represents the API response for an OpenAI assistant file
type AssistantResponse ¶
type AssistantResponse struct { ID string `json:"id"` Object string `json:"object"` CreatedAt int `json:"created_at"` Name string `json:"name"` Description string `json:"description"` Model string `json:"model"` Instructions string `json:"instructions"` Tools []AssistantTool `json:"tools"` FileIDs []string `json:"file_ids"` Metadata map[string]interface{} `json:"metadata"` }
AssistantResponse represents the API response for an OpenAI assistant. It contains all the fields returned by the OpenAI API when creating or retrieving an assistant.
type AssistantSummary ¶
type AssistantSummary struct { ID string `json:"id"` Object string `json:"object"` CreatedAt int `json:"created_at"` Name string `json:"name"` Model string `json:"model"` }
AssistantSummary represents a summary of an assistant
type AssistantTool ¶
type AssistantTool struct { Type string `json:"type"` Function *AssistantToolFunction `json:"function,omitempty"` }
AssistantTool represents a tool that can be used by an assistant. Tools can be of different types such as code interpreter, retrieval, function, or file search.
type AssistantToolFunction ¶
type AssistantToolFunction struct { Name string `json:"name"` Description string `json:"description,omitempty"` Parameters json.RawMessage `json:"parameters"` }
AssistantToolFunction represents a function definition for an assistant tool. It contains the name, description, and parameters of the function in JSON Schema format.
type AttachmentRequest ¶
type AttachmentRequest struct { FileID string `json:"file_id"` Tools []ToolRequest `json:"tools"` }
AttachmentRequest represents a file attachment in a message creation request.
type AudioTranscriptionData ¶
type AudioTranscriptionData struct { ID string `json:"id"` Object string `json:"object"` CreatedAt int `json:"created_at"` Status string `json:"status"` Model string `json:"model"` Text string `json:"text"` Duration int `json:"duration"` Language string `json:"language"` }
AudioTranscriptionData represents a single transcription in the list response
type AudioTranscriptionsResponse ¶
type AudioTranscriptionsResponse struct { Object string `json:"object"` Data []AudioTranscriptionData `json:"data"` HasMore bool `json:"has_more"` }
AudioTranscriptionsResponse represents the API response for listing OpenAI audio transcriptions
type AudioTranslationData ¶
type AudioTranslationData struct { ID string `json:"id"` Object string `json:"object"` CreatedAt int `json:"created_at"` Status string `json:"status"` Model string `json:"model"` Text string `json:"text"` Duration int `json:"duration"` }
AudioTranslationData represents a single translation in the list response
type AudioTranslationsResponse ¶
type AudioTranslationsResponse struct { Object string `json:"object"` Data []AudioTranslationData `json:"data"` HasMore bool `json:"has_more"` }
AudioTranslationsResponse represents the API response for listing OpenAI audio translations
type BatchCreateRequest ¶
type BatchCreateRequest struct { InputFileID string `json:"input_file_id"` // ID of the input file to process Endpoint string `json:"endpoint"` // API endpoint to use for processing CompletionWindow string `json:"completion_window"` // Time window for job completion Metadata map[string]interface{} `json:"metadata,omitempty"` // Optional metadata for the batch }
BatchCreateRequest represents the request payload for creating a new batch job. It specifies the input file, endpoint, and completion window for the batch operation.
type BatchError ¶
type BatchError struct { Code string `json:"code"` // Error code identifying the type of error Message string `json:"message"` // Human-readable error message Param string `json:"param,omitempty"` // Parameter that caused the error (if applicable) }
BatchError represents an error that occurred during batch processing. It contains details about the error, including its code and message.
type BatchResponse ¶
type BatchResponse struct { ID string `json:"id"` // Unique identifier for the batch job Object string `json:"object"` // Type of object (e.g., "batch") Endpoint string `json:"endpoint"` // Endpoint used for this batch Status string `json:"status"` // Current status of the batch job InputFileID string `json:"input_file_id"` // ID of the input file CompletionWindow string `json:"completion_window"` // Time window for completion OutputFileID string `json:"output_file_id"` // ID of the output file (if available) ErrorFileID string `json:"error_file_id"` // ID of the error file (if available) CreatedAt int `json:"created_at"` // Unix timestamp when the job was created InProgressAt *int `json:"in_progress_at"` // When processing started ExpiresAt int `json:"expires_at"` // Unix timestamp when the job expires FinalizingAt *int `json:"finalizing_at"` // When finalizing started CompletedAt *int `json:"completed_at"` // When processing completed FailedAt *int `json:"failed_at"` // When processing failed ExpiredAt *int `json:"expired_at"` // When the job expired CancellingAt *int `json:"cancelling_at"` // When cancellation started CancelledAt *int `json:"cancelled_at"` // When the job was cancelled RequestCounts map[string]int `json:"request_counts"` // Statistics about request processing Errors interface{} `json:"errors,omitempty"` // Errors that occurred (if any) Metadata map[string]interface{} `json:"metadata"` // Additional custom data }
BatchResponse represents the API response for batch operations. It contains information about the batch job, including its status, timing, and results.
type BatchesResponse ¶
type BatchesResponse struct { Object string `json:"object"` Data []BatchResponse `json:"data"` HasMore bool `json:"has_more"` }
BatchesResponse represents the API response for listing OpenAI batch jobs
type ChatCompletionChoice ¶
type ChatCompletionChoice struct { Index int `json:"index"` // Index of the choice in the list Message ChatCompletionMessage `json:"message"` // The generated message FinishReason string `json:"finish_reason"` // Reason why the completion finished }
ChatCompletionChoice represents a single completion option from the model. It contains the generated message and information about why the completion finished.
type ChatCompletionMessage ¶
type ChatCompletionMessage struct { Role string `json:"role"` // Role of the message sender (system, user, assistant) Content string `json:"content"` // Content of the message FunctionCall *ChatFunctionCall `json:"function_call,omitempty"` // Optional function call Name string `json:"name,omitempty"` // Optional name of the message sender }
ChatCompletionMessage represents a message in the chat completion. It can be either a user message, assistant message, or system message.
type ChatCompletionMessageResponse ¶
type ChatCompletionMessageResponse struct { Object string `json:"object"` Data []ChatCompletionMessage `json:"data"` FirstID string `json:"first_id"` LastID string `json:"last_id"` HasMore bool `json:"has_more"` }
ChatCompletionMessage represents a message in a chat completion.
type ChatCompletionRequest ¶
type ChatCompletionRequest struct { Model string `json:"model"` // ID of the model to use Messages []ChatCompletionMessage `json:"messages"` // List of messages in the conversation Functions []ChatFunction `json:"functions,omitempty"` // Optional list of available functions FunctionCall interface{} `json:"function_call,omitempty"` // Optional function call configuration Temperature float64 `json:"temperature,omitempty"` // Sampling temperature TopP float64 `json:"top_p,omitempty"` // Nucleus sampling parameter N int `json:"n,omitempty"` // Number of completions to generate Stream bool `json:"stream,omitempty"` // Whether to stream the response Stop []string `json:"stop,omitempty"` // Optional stop sequences MaxTokens int `json:"max_tokens,omitempty"` // Maximum tokens to generate PresencePenalty float64 `json:"presence_penalty,omitempty"` // Presence penalty parameter FrequencyPenalty float64 `json:"frequency_penalty,omitempty"` // Frequency penalty parameter LogitBias map[string]float64 `json:"logit_bias,omitempty"` // Optional token bias User string `json:"user,omitempty"` // Optional user identifier Store bool `json:"store,omitempty"` // Whether to store the completion Metadata map[string]string `json:"metadata,omitempty"` // Optional metadata for filtering }
ChatCompletionRequest represents the request payload for creating a chat completion. It specifies the model, messages, and various parameters to control the completion.
type ChatCompletionResponse ¶
type ChatCompletionResponse struct { ID string `json:"id"` // Unique identifier for the completion Object string `json:"object"` // Type of object (e.g., "chat.completion") Created int `json:"created"` // Unix timestamp when the completion was created Model string `json:"model"` // Model used for the completion Choices []ChatCompletionChoice `json:"choices"` // List of possible completions Usage ChatCompletionUsage `json:"usage"` // Token usage statistics }
ChatCompletionResponse represents the API response for chat completions. It contains the generated response, model information, and usage statistics.
type ChatCompletionUsage ¶
type ChatCompletionUsage struct { PromptTokens int `json:"prompt_tokens"` // Number of tokens in the prompt CompletionTokens int `json:"completion_tokens"` // Number of tokens in the completion TotalTokens int `json:"total_tokens"` // Total number of tokens used }
ChatCompletionUsage represents token usage statistics for the completion request. It tracks the number of tokens used in the prompt and completion.
type ChatCompletionsResponse ¶
type ChatCompletionsResponse struct { Object string `json:"object"` Data []ChatCompletionResponse `json:"data"` HasMore bool `json:"has_more"` }
ChatCompletionsResponse represents the API response for listing chat completions.
type ChatFunction ¶
type ChatFunction struct { Name string `json:"name"` // Name of the function Description string `json:"description,omitempty"` // Optional function description Parameters json.RawMessage `json:"parameters"` // JSON schema for function parameters }
ChatFunction represents a function that can be called by the model. It contains the function name, description, and parameter schema.
type ChatFunctionCall ¶
type ChatFunctionCall struct { Name string `json:"name"` // Name of the function to call Arguments string `json:"arguments"` // JSON string containing function arguments }
ChatFunctionCall represents a function call generated by the model. It contains the function name and arguments to be passed to the function.
type ChatFunctionDef ¶
type ChatFunctionDef struct { Name string `json:"name"` // Name of the function to call Arguments string `json:"arguments"` // JSON string containing function arguments }
ChatFunctionDef represents a function definition in a tool call. It contains the function name and arguments.
type ChatToolCall ¶
type ChatToolCall struct { ID string `json:"id"` // The ID of the tool call Type string `json:"type"` // The type of tool call Function *ChatFunctionDef `json:"function"` // The function details }
ChatToolCall represents a tool call generated by the model. It contains the ID, type, and function details.
type CheckpointPermissionRequest ¶
type CheckpointPermissionRequest struct {
ProjectIDs []string `json:"project_ids"`
}
CheckpointPermissionRequest represents the request body for creating a checkpoint permission
type CompletionChoice ¶
type CompletionChoice struct { Text string `json:"text"` // Generated completion text Index int `json:"index"` // Position in the list of choices Logprobs *CompletionLogprobs `json:"logprobs"` // Optional probability information FinishReason string `json:"finish_reason"` // Why the completion stopped }
CompletionChoice represents a single completion option. It contains the generated text and metadata about how the completion was generated.
type CompletionLogprobs ¶
type CompletionLogprobs struct { Tokens []string `json:"tokens"` // Individual tokens in the completion TokenLogprobs []float64 `json:"token_logprobs"` // Log probabilities of tokens TopLogprobs []map[string]float64 `json:"top_logprobs"` // Top alternative tokens and their probabilities TextOffset []int `json:"text_offset"` // Character offsets for tokens }
CompletionLogprobs represents probability information for a completion. It provides detailed token-level probability data for analyzing the completion.
type CompletionRequest ¶
type CompletionRequest struct { Model string `json:"model"` // ID of the model to use Prompt string `json:"prompt"` // Input text to generate from MaxTokens int `json:"max_tokens,omitempty"` // Maximum tokens to generate Temperature float64 `json:"temperature,omitempty"` // Sampling temperature (0-2) TopP float64 `json:"top_p,omitempty"` // Nucleus sampling parameter N int `json:"n,omitempty"` // Number of completions to generate Stream bool `json:"stream,omitempty"` // Whether to stream responses Logprobs *int `json:"logprobs,omitempty"` // Number of log probabilities to return Echo bool `json:"echo,omitempty"` // Whether to include prompt in completion Stop []string `json:"stop,omitempty"` // Sequences where completion should stop PresencePenalty float64 `json:"presence_penalty,omitempty"` // Penalty for new tokens FrequencyPenalty float64 `json:"frequency_penalty,omitempty"` // Penalty for frequent tokens BestOf int `json:"best_of,omitempty"` // Number of completions to generate server-side LogitBias map[string]float64 `json:"logit_bias,omitempty"` // Modify likelihood of specific tokens User string `json:"user,omitempty"` // Optional user identifier Suffix string `json:"suffix,omitempty"` // Text to append to completion }
CompletionRequest represents the request payload for generating completions. It specifies the parameters that control the text generation process.
type CompletionResponse ¶
type CompletionResponse struct { ID string `json:"id"` // Unique identifier for the completion Object string `json:"object"` // Type of object (e.g., "text_completion") Created int `json:"created"` // Unix timestamp of creation Model string `json:"model"` // Model used for completion Choices []CompletionChoice `json:"choices"` // List of generated completions Usage CompletionUsage `json:"usage"` // Token usage statistics }
CompletionResponse represents the API response for text completions. It contains the generated completions and metadata about the generation process.
type CompletionUsage ¶
type CompletionUsage struct { PromptTokens int `json:"prompt_tokens"` // Number of tokens in the prompt CompletionTokens int `json:"completion_tokens"` // Number of tokens in the completion TotalTokens int `json:"total_tokens"` // Total tokens used }
CompletionUsage represents token usage statistics for the request. It tracks the number of tokens used in both the input and output.
type DPO ¶
type DPO struct {
Hyperparameters *DPOHyperparams `json:"hyperparameters,omitempty"`
}
DPO represents the DPO fine-tuning configuration
type DPOHyperparams ¶
type DPOHyperparams struct {
Beta *float64 `json:"beta,omitempty"`
}
DPOHyperparams represents hyperparameters for DPO fine-tuning
type DataSourceChatCompletionChoice ¶
type DataSourceChatCompletionChoice struct { Index int `json:"index"` // Index of the choice in the list Message DataSourceChatCompletionMessage `json:"message"` // The generated message FinishReason string `json:"finish_reason"` // Reason why the completion finished }
DataSourceChatCompletionChoice extends ChatCompletionChoice with our custom message type
type DataSourceChatCompletionMessage ¶
type DataSourceChatCompletionMessage struct { Role string `json:"role"` // Role of the message sender (system, user, assistant) Content string `json:"content"` // Content of the message FunctionCall *ChatFunctionCall `json:"function_call,omitempty"` // Optional function call Name string `json:"name,omitempty"` // Optional name of the message sender ToolCalls []ChatToolCall `json:"tool_calls,omitempty"` // Optional tool calls }
For our data source, we need to extend the ChatCompletionMessage to include tool calls
type DataSourceChatCompletionResponse ¶
type DataSourceChatCompletionResponse struct { ID string `json:"id"` // Unique identifier for the completion Object string `json:"object"` // Type of object (e.g., "chat.completion") Created int `json:"created"` // Unix timestamp when the completion was created Model string `json:"model"` // Model used for the completion Choices []DataSourceChatCompletionChoice `json:"choices"` // List of possible completions Usage ChatCompletionUsage `json:"usage"` // Token usage statistics }
DataSourceChatCompletionResponse extends ChatCompletionResponse with our custom choice type
type EditChoice ¶
type EditChoice struct { Text string `json:"text"` // The edited text Index int `json:"index"` // Position of this choice in the list }
EditChoice represents a single edit option from the model. It contains the edited text and its position in the list of choices.
type EditRequest ¶
type EditRequest struct { Model string `json:"model"` // ID of the model to use Input string `json:"input,omitempty"` // Text to be edited Instruction string `json:"instruction"` // Instructions for how to edit the text Temperature float64 `json:"temperature,omitempty"` // Sampling temperature TopP float64 `json:"top_p,omitempty"` // Nucleus sampling parameter N int `json:"n,omitempty"` // Number of edits to generate }
EditRequest represents the request payload for creating a text edit. It specifies the model, input text, instruction, and various parameters to control the edit.
type EditResponse ¶
type EditResponse struct { ID string `json:"id"` // Unique identifier for the edit Object string `json:"object"` // Type of object (e.g., "edit") Created int `json:"created"` // Unix timestamp when the edit was created Model string `json:"model"` // Model used for the edit Choices []EditChoice `json:"choices"` // List of possible edits Usage EditUsage `json:"usage"` // Token usage statistics }
EditResponse represents the API response for text edits. It contains the edited text, model information, and usage statistics.
type EditUsage ¶
type EditUsage struct { PromptTokens int `json:"prompt_tokens"` // Number of tokens in the input CompletionTokens int `json:"completion_tokens"` // Number of tokens in the completion TotalTokens int `json:"total_tokens"` // Total number of tokens used }
EditUsage represents token usage statistics for the edit request. It tracks the number of tokens used in the input and completion.
type EmbeddingData ¶
type EmbeddingData struct { Object string `json:"object"` // Type of object (e.g., "embedding") Index int `json:"index"` // Position of this embedding in the list Embedding json.RawMessage `json:"embedding"` // Vector representation of the text (can be float array or base64 string) }
EmbeddingData represents a single text embedding. It contains the vector representation of the input text.
type EmbeddingRequest ¶
type EmbeddingRequest struct { Model string `json:"model"` // ID of the model to use Input interface{} `json:"input"` // Text or list of texts to embed User string `json:"user,omitempty"` // Optional user identifier EncodingFormat string `json:"encoding_format,omitempty"` // Format of the embedding output Dimensions int `json:"dimensions,omitempty"` // Optional number of dimensions }
EmbeddingRequest represents the request payload for creating text embeddings. It specifies the model, input text, and various parameters to control the embedding process.
type EmbeddingResponse ¶
type EmbeddingResponse struct { Object string `json:"object"` // Type of object (e.g., "list") Data []EmbeddingData `json:"data"` // List of generated embeddings Model string `json:"model"` // Model used for the embeddings Usage EmbeddingUsage `json:"usage"` // Token usage statistics }
EmbeddingResponse represents the API response for text embeddings. It contains the generated embeddings, model information, and usage statistics.
type EmbeddingUsage ¶
type EmbeddingUsage struct { PromptTokens int `json:"prompt_tokens"` // Number of tokens in the input text TotalTokens int `json:"total_tokens"` // Total number of tokens used }
EmbeddingUsage represents token usage statistics for the embedding request. It tracks the number of tokens used in the input text.
type ErrorResponse ¶
type ErrorResponse struct { Error struct { Message string `json:"message"` // Human-readable error message Type string `json:"type"` // Type of error (e.g., "invalid_request_error") Code string `json:"code"` // Error code for programmatic handling } `json:"error"` }
ErrorResponse represents an error response from the OpenAI API. It contains detailed information about any errors that occur during API operations, providing structured error information for proper error handling and debugging.
type EvalConfig ¶
type EvalConfig struct { ID string `json:"id"` Model string `json:"model"` Name string `json:"name"` Description string `json:"description,omitempty"` TestData string `json:"test_data"` Metrics []string `json:"metrics"` ProjectPath string `json:"project_path"` Status string `json:"status"` Results map[string]interface{} `json:"results,omitempty"` Metadata map[string]interface{} `json:"metadata,omitempty"` CreatedAt int `json:"created_at,omitempty"` CompletedAt int `json:"completed_at,omitempty"` }
EvalConfig represents the configuration for the evaluation API
type EvaluationCreateRequest ¶
type EvaluationCreateRequest struct { Name string `json:"name"` Description string `json:"description,omitempty"` Model string `json:"model"` TestCases []EvaluationTestCase `json:"test_cases"` Metrics []EvaluationMetric `json:"metrics"` ProjectID string `json:"project_id,omitempty"` Metadata map[string]interface{} `json:"metadata,omitempty"` }
EvaluationCreateRequest represents the payload for creating an evaluation
type EvaluationMetric ¶
type EvaluationMetric struct { Type string `json:"type"` Weight float64 `json:"weight,omitempty"` Threshold float64 `json:"threshold,omitempty"` Aggregation string `json:"aggregation,omitempty"` }
EvaluationMetric represents a metric in an evaluation
type EvaluationResponse ¶
type EvaluationResponse struct { ID string `json:"id"` Object string `json:"object"` CreatedAt int `json:"created_at"` Name string `json:"name"` Description string `json:"description,omitempty"` Model string `json:"model"` Status string `json:"status"` TestCases []EvaluationTestCase `json:"test_cases"` Metrics []EvaluationMetric `json:"metrics"` CompletedAt int `json:"completed_at,omitempty"` ProjectID string `json:"project_id,omitempty"` Metadata map[string]interface{} `json:"metadata,omitempty"` Results map[string]interface{} `json:"results,omitempty"` }
EvaluationResponse represents the API response for an evaluation
type EvaluationTestCase ¶
type EvaluationTestCase struct { ID string `json:"id"` Input json.RawMessage `json:"input"` Ideal json.RawMessage `json:"ideal,omitempty"` Metadata map[string]interface{} `json:"metadata,omitempty"` }
EvaluationTestCase represents a test case in an evaluation
type FileResponse ¶
type FileResponse struct { ID string `json:"id"` // Unique identifier for the file Object string `json:"object"` // Type of object (e.g., "file") Bytes int `json:"bytes"` // Size of the file in bytes CreatedAt int `json:"created_at"` // Unix timestamp of file creation Filename string `json:"filename"` // Original name of the uploaded file Purpose string `json:"purpose"` // Intended use of the file (e.g., "fine-tune", "assistants") }
FileResponse represents the API response for an OpenAI file. It contains all the fields returned by the OpenAI API when creating or retrieving a file. This structure provides comprehensive information about the file's status, purpose, and metadata.
type FineTunedModelSummary ¶
type FineTunedModelSummary struct { ID string `json:"id"` Object string `json:"object"` CreatedAt int `json:"created_at"` FineTunedModel string `json:"fine_tuned_model"` Status string `json:"status"` }
FineTunedModelSummary represents a summary of a fine-tuned model
type FineTuningCheckpointData ¶
type FineTuningCheckpointData struct { ID string `json:"id"` // Unique identifier for this checkpoint Object string `json:"object"` // Type of object (fine_tuning.checkpoint) FineTuningJobID string `json:"fine_tuning_job_id"` // ID of the fine-tuning job that created this checkpoint CreatedAt int `json:"created_at"` // Unix timestamp when the checkpoint was created Status string `json:"status"` // Status of the checkpoint (e.g., active, deleted) TrainedTokens int `json:"trained_tokens"` // Number of tokens processed during training until this checkpoint TrainingProgress float64 `json:"training_progress"` // Progress percentage of the fine-tuning job when this checkpoint was created }
FineTuningCheckpointData represents a single fine-tuning checkpoint
type FineTuningCheckpointsResponse ¶
type FineTuningCheckpointsResponse struct { Object string `json:"object"` // Type of object returned (list) Data []FineTuningCheckpointData `json:"data"` // List of checkpoints HasMore bool `json:"has_more"` // Whether there are more checkpoints to fetch }
FineTuningCheckpointsResponse represents the API response for fine-tuning checkpoints
type FineTuningError ¶
type FineTuningError struct { Message string `json:"message"` // Human-readable error message Type string `json:"type"` // Type of error (e.g., "validation_error") Code string `json:"code"` // Error code for programmatic handling }
FineTuningError represents an error that occurred during fine-tuning. It provides detailed information about what went wrong during the training process.
type FineTuningEventData ¶
type FineTuningEventData struct { Object string `json:"object"` // Type of object (event) ID string `json:"id"` // Unique identifier for this event CreatedAt int `json:"created_at"` // Unix timestamp when the event was created Level string `json:"level"` // Event level (info, warning, error) Message string `json:"message"` // The message describing the event Type string `json:"type"` // Event type (e.g., metrics, status_update) Data any `json:"data,omitempty"` // Additional data about the event }
FineTuningEventData represents a single fine-tuning event
type FineTuningEventsResponse ¶
type FineTuningEventsResponse struct { Object string `json:"object"` // Type of object returned (list) Data []FineTuningEventData `json:"data"` // List of events HasMore bool `json:"has_more"` // Whether there are more events to fetch }
FineTuningEventsResponse represents the API response for fine-tuning events
type FineTuningHyperparams ¶
type FineTuningHyperparams struct { NEpochs interface{} `json:"n_epochs,omitempty"` // Number of training epochs BatchSize interface{} `json:"batch_size,omitempty"` // Size of training batches LearningRateMultiplier interface{} `json:"learning_rate_multiplier,omitempty"` // Learning rate adjustment factor }
FineTuningHyperparams represents the hyperparameters used for fine-tuning. These parameters control various aspects of the training process and can be customized to achieve different training objectives.
func (*FineTuningHyperparams) MarshalJSON ¶
func (hp *FineTuningHyperparams) MarshalJSON() ([]byte, error)
MarshalJSON helps debug the JSON marshaling
type FineTuningIntegration ¶
type FineTuningIntegration struct { Type string `json:"type"` WandB *WandBIntegration `json:"wandb,omitempty"` }
FineTuningIntegration represents an integration for fine-tuning
type FineTuningJobData ¶
type FineTuningJobData struct { ID string `json:"id"` // Unique identifier for the fine-tuning job Object string `json:"object"` // Type of object (e.g., "fine_tuning.job") Model string `json:"model"` // Base model being fine-tuned CreatedAt int `json:"created_at"` // Unix timestamp of job creation FinishedAt *int `json:"finished_at,omitempty"` // Unix timestamp of job completion Status string `json:"status"` // Current status of the fine-tuning job TrainingFile string `json:"training_file"` // ID of the training data file ValidationFile *string `json:"validation_file,omitempty"` // Optional ID of validation data file Hyperparameters FineTuningHyperparams `json:"hyperparameters"` // Training hyperparameters ResultFiles []string `json:"result_files"` // List of result file IDs TrainedTokens *int `json:"trained_tokens,omitempty"` // Number of tokens processed FineTunedModel *string `json:"fine_tuned_model,omitempty"` // ID of the resulting model Error *FineTuningError `json:"error,omitempty"` // Error information if job failed }
FineTuningJobData represents a fine-tuning job entry in the list.
type FineTuningJobRequest ¶
type FineTuningJobRequest struct { Model string `json:"model"` // Base model to fine-tune TrainingFile string `json:"training_file"` // ID of the training data file ValidationFile string `json:"validation_file,omitempty"` // Optional validation data file Hyperparameters *FineTuningHyperparams `json:"hyperparameters,omitempty"` // Optional training parameters Suffix string `json:"suffix,omitempty"` // Optional suffix for the fine-tuned model name }
FineTuningJobRequest represents the request payload for creating a fine-tuning job. It specifies the model to fine-tune, training data, and optional parameters that control the fine-tuning process.
type FineTuningJobResponse ¶
type FineTuningJobResponse struct { ID string `json:"id"` // Unique identifier for the fine-tuning job Object string `json:"object"` // Type of object (e.g., "fine_tuning.job") Model string `json:"model"` // Base model being fine-tuned CreatedAt int `json:"created_at"` // Unix timestamp of job creation FinishedAt int `json:"finished_at,omitempty"` // Unix timestamp of job completion Status string `json:"status"` // Current status of the fine-tuning job TrainingFile string `json:"training_file"` // ID of the training data file ValidationFile string `json:"validation_file,omitempty"` // Optional ID of validation data file Hyperparameters FineTuningHyperparams `json:"hyperparameters"` // Training hyperparameters ResultFiles []string `json:"result_files"` // List of result file IDs TrainedTokens int `json:"trained_tokens,omitempty"` // Number of tokens processed FineTunedModel string `json:"fine_tuned_model,omitempty"` // ID of the resulting model Error *FineTuningError `json:"error,omitempty"` // Error information if job failed }
FineTuningJobResponse represents the API response for a fine-tuning job. It contains comprehensive information about the fine-tuning process, including job status, training details, and results. This structure captures all aspects of the fine-tuning job from creation to completion.
type FineTuningJobsResponse ¶
type FineTuningJobsResponse struct { Object string `json:"object"` // Type of object, usually "list" Data []FineTuningJobData `json:"data"` // List of fine-tuning jobs HasMore bool `json:"has_more"` // Whether there are more jobs to fetch }
FineTuningJobsResponse represents the API response for listing fine-tuning jobs.
type FineTuningMethod ¶
type FineTuningMethod struct { Type string `json:"type"` Supervised *Supervised `json:"supervised,omitempty"` DPO *DPO `json:"dpo,omitempty"` }
FineTuningMethod represents the method configuration for fine-tuning
type FunctionCall ¶
FunctionCall represents a function call made by a tool.
type ImageEditData ¶
type ImageEditData struct { URL string `json:"url,omitempty"` // URL to the edited image B64JSON string `json:"b64_json,omitempty"` // Base64-encoded image data }
ImageEditData represents a single edited image. It contains the edited image data in either URL or base64 format.
type ImageEditResponse ¶
type ImageEditResponse struct { Created int `json:"created"` // Unix timestamp of image creation Data []ImageEditData `json:"data"` // List of edited images }
ImageEditResponse represents the API response for image editing. It contains the edited images and metadata about the editing process. This structure provides access to both URL and base64-encoded image data.
type ImageGenerationData ¶
type ImageGenerationData struct { URL string `json:"url,omitempty"` // URL to the generated image B64JSON string `json:"b64_json,omitempty"` // Base64-encoded image data RevisedPrompt string `json:"revised_prompt,omitempty"` // Modified prompt used for generation }
ImageGenerationData represents a single generated image. It contains the image data in either URL or base64 format, along with any revised prompt that was used to generate the image.
type ImageGenerationRequest ¶
type ImageGenerationRequest struct { Model string `json:"model,omitempty"` // ID of the model to use Prompt string `json:"prompt"` // Text description of desired image N int `json:"n,omitempty"` // Number of images to generate Quality string `json:"quality,omitempty"` // Quality level of generated images ResponseFormat string `json:"response_format,omitempty"` // Format of the response (url or b64_json) Size string `json:"size,omitempty"` // Dimensions of generated images Style string `json:"style,omitempty"` // Style to apply to generated images User string `json:"user,omitempty"` // Optional user identifier }
ImageGenerationRequest represents the request payload for generating images. It specifies the parameters that control the image generation process, including model selection, prompt, and various generation options.
type ImageGenerationResponse ¶
type ImageGenerationResponse struct { Created int `json:"created"` // Unix timestamp of image creation Data []ImageGenerationData `json:"data"` // List of generated images }
ImageGenerationResponse represents the API response for image generation. It contains the generated images and metadata about the generation process. This structure provides access to both URL and base64-encoded image data.
type ImageVariationData ¶
type ImageVariationData struct { URL string `json:"url,omitempty"` // URL to the variation image B64JSON string `json:"b64_json,omitempty"` // Base64-encoded image data }
ImageVariationData represents a single image variation. It contains the variation data in either URL or base64 format.
type ImageVariationResponse ¶
type ImageVariationResponse struct { Created int `json:"created"` // Unix timestamp of variation creation Data []ImageVariationData `json:"data"` // List of generated variations }
ImageVariationResponse represents the API response for image variations. It contains the generated variations and metadata about the variation process. This structure provides access to both URL and base64-encoded image data.
type ListAPIKeysResponse ¶
type ListAPIKeysResponse struct { Data []AdminAPIKeyWithLastUsed `json:"data"` HasMore bool `json:"has_more"` Object string `json:"object"` }
ListAPIKeysResponse represents the API response for listing admin API keys
type ListAssistantsResponse ¶
type ListAssistantsResponse struct { Object string `json:"object"` // Object type, always "list" Data []client.AssistantResponse `json:"data"` // Array of assistant objects FirstID string `json:"first_id"` // ID of the first assistant in the list LastID string `json:"last_id"` // ID of the last assistant in the list HasMore bool `json:"has_more"` // Whether there are more assistants to fetch }
ListAssistantsResponse represents the API response for listing OpenAI assistants.
type ListFilesResponse ¶
type ListFilesResponse struct { Data []FileResponse `json:"data"` Object string `json:"object"` }
ListFilesResponse represents the API response for listing OpenAI files
type ListRunStepsResponse ¶
type ListRunStepsResponse struct { Object string `json:"object"` // Object type, always "list" Data []RunStepResponse `json:"data"` // Array of run steps FirstID string `json:"first_id"` // ID of the first item in the list LastID string `json:"last_id"` // ID of the last item in the list HasMore bool `json:"has_more"` // Whether there are more items to fetch }
ListRunStepsResponse represents the API response for listing run steps. It provides a paginated list of steps in a run, with metadata for navigation.
type ListRunsResponse ¶
type ListRunsResponse struct { Object string `json:"object"` // Object type, always "list" Data []RunResponse `json:"data"` // Array of runs FirstID string `json:"first_id"` // ID of the first item in the list LastID string `json:"last_id"` // ID of the last item in the list HasMore bool `json:"has_more"` // Whether there are more items to fetch }
ListRunsResponse represents the API response for listing runs in a thread. It provides details about the list of runs and pagination metadata.
type MessageAttachment ¶
type MessageAttachment struct { ID string `json:"id"` Type string `json:"type"` AssistantID string `json:"assistant_id,omitempty"` CreatedAt int `json:"created_at"` }
MessageAttachment represents an attachment in a message.
type MessageContent ¶
type MessageContent struct { Type string `json:"type"` Text *MessageContentText `json:"text,omitempty"` }
MessageContent represents the content of a message. It can contain text or other types of content with their respective annotations.
type MessageContentText ¶
type MessageContentText struct { Value string `json:"value"` Annotations []interface{} `json:"annotations,omitempty"` }
MessageContentText represents the text content of a message. It includes the text value and any associated annotations.
type MessageCreateRequest ¶
type MessageCreateRequest struct { Role string `json:"role"` Content string `json:"content"` Attachments []AttachmentRequest `json:"attachments,omitempty"` Metadata map[string]interface{} `json:"metadata,omitempty"` }
MessageCreateRequest represents the payload for creating a message in the OpenAI API. It contains all the fields that can be set when creating a new message.
type MessageResponse ¶
type MessageResponse struct { ID string `json:"id"` Object string `json:"object"` CreatedAt int `json:"created_at"` ThreadID string `json:"thread_id"` Role string `json:"role"` Content []MessageContent `json:"content"` AssistantID string `json:"assistant_id,omitempty"` RunID string `json:"run_id,omitempty"` Metadata map[string]interface{} `json:"metadata"` Attachments []MessageAttachment `json:"attachments,omitempty"` }
MessageResponse represents the API response for an OpenAI message. It contains all the fields returned by the OpenAI API when creating or retrieving a message.
type MessagesListResponse ¶
type MessagesListResponse struct { Object string `json:"object"` Data []MessageResponse `json:"data"` FirstID string `json:"first_id"` LastID string `json:"last_id"` HasMore bool `json:"has_more"` }
MessagesListResponse represents the API response for a list of messages
type Model ¶
type Model struct { ID string `json:"id"` Object string `json:"object"` Created int `json:"created"` OwnedBy string `json:"owned_by"` }
Model represents an OpenAI model
type ModelInfoResponse ¶
type ModelInfoResponse struct { ID string `json:"id"` Object string `json:"object"` Created int `json:"created"` OwnedBy string `json:"owned_by"` Permission []ModelPermission `json:"permission"` }
ModelInfoResponse represents the API response for an OpenAI model info endpoint
type ModelPermission ¶
type ModelPermission struct { ID string `json:"id"` Object string `json:"object"` Created int `json:"created"` AllowCreateEngine bool `json:"allow_create_engine"` AllowSampling bool `json:"allow_sampling"` AllowLogprobs bool `json:"allow_logprobs"` AllowSearchIndices bool `json:"allow_search_indices"` AllowView bool `json:"allow_view"` AllowFineTuning bool `json:"allow_fine_tuning"` Organization string `json:"organization"` Group interface{} `json:"group"` IsBlocking bool `json:"is_blocking"` }
ModelPermission represents the permission details for a model
type ModelsResponse ¶
ModelsResponse represents the API response from the models endpoint
type ModerationCategories ¶
type ModerationCategories struct { Sexual bool `json:"sexual"` // Sexual content Hate bool `json:"hate"` // Hate speech Harassment bool `json:"harassment"` // Harassing content SelfHarm bool `json:"self-harm"` // Self-harm content SexualMinors bool `json:"sexual/minors"` // Sexual content involving minors HateThreatening bool `json:"hate/threatening"` // Threatening hate speech ViolenceGraphic bool `json:"violence/graphic"` // Graphic violence SelfHarmIntent bool `json:"self-harm/intent"` // Intent to self-harm SelfHarmInstructions bool `json:"self-harm/instructions"` // Instructions for self-harm HarassmentThreatening bool `json:"harassment/threatening"` // Threatening harassment Violence bool `json:"violence"` // General violence }
ModerationCategories represents the categories of content detected during moderation. Each field indicates whether content of that category was detected in the input.
type ModerationCategoryScores ¶
type ModerationCategoryScores struct { Sexual float64 `json:"sexual"` // Score for sexual content Hate float64 `json:"hate"` // Score for hate speech Harassment float64 `json:"harassment"` // Score for harassment SelfHarm float64 `json:"self-harm"` // Score for self-harm SexualMinors float64 `json:"sexual/minors"` // Score for sexual content involving minors HateThreatening float64 `json:"hate/threatening"` // Score for threatening hate speech ViolenceGraphic float64 `json:"violence/graphic"` // Score for graphic violence SelfHarmIntent float64 `json:"self-harm/intent"` // Score for intent to self-harm SelfHarmInstructions float64 `json:"self-harm/instructions"` // Score for self-harm instructions HarassmentThreatening float64 `json:"harassment/threatening"` // Score for threatening harassment Violence float64 `json:"violence"` // Score for general violence }
ModerationCategoryScores represents the confidence scores for each moderation category. Each field contains a float value between 0 and 1 indicating the confidence level.
type ModerationRequest ¶
type ModerationRequest struct { Input string `json:"input"` // Text content to moderate Model string `json:"model,omitempty"` // Optional model to use for moderation }
ModerationRequest represents the request payload for content moderation. It specifies the input text to moderate and optionally the model to use.
type ModerationResponse ¶
type ModerationResponse struct { ID string `json:"id"` // Unique identifier for the moderation request Model string `json:"model"` // Model used for moderation Results []ModerationResult `json:"results"` // List of moderation results }
ModerationResponse represents the API response for content moderation. It contains the moderation results and metadata about the moderation process. This structure provides comprehensive information about content safety and compliance.
type ModerationResult ¶
type ModerationResult struct { Flagged bool `json:"flagged"` // Whether the content was flagged Categories ModerationCategories `json:"categories"` // Categories of detected content CategoryScores ModerationCategoryScores `json:"category_scores"` // Confidence scores for each category }
ModerationResult represents the moderation analysis for a single input. It contains detailed information about flagged content and category scores.
type OpenAIClient ¶
type OpenAIClient struct { *client.OpenAIClient // Embed the client package's OpenAIClient ProjectAPIKey string // Store the project API key separately AdminAPIKey string // Store the admin API key separately }
OpenAIClient represents a client for interacting with the OpenAI API. It handles authentication and provides methods for making API requests.
type PlaygroundConfigRequest ¶
type PlaygroundConfigRequest struct { Name string `json:"name"` Settings map[string]interface{} `json:"settings"` }
PlaygroundConfigRequest represents the request to create or update a playground configuration
type PlaygroundConfigResponse ¶
type PlaygroundConfigResponse struct { ID string `json:"id"` Object string `json:"object"` CreatedAt int `json:"created_at"` UpdatedAt int `json:"updated_at"` Name string `json:"name"` Settings map[string]interface{} `json:"settings"` }
PlaygroundConfigResponse represents the API response for an OpenAI playground configuration
type ProjectAPIKeyCreateRequest ¶
type ProjectAPIKeyCreateRequest struct {
Name string `json:"name,omitempty"`
}
ProjectAPIKeyCreateRequest represents the request to create a project API key
type ProjectAPIKeyResponse ¶
type ProjectAPIKeyResponse struct { ID string `json:"id"` Object string `json:"object"` Name string `json:"name"` CreatedAt int `json:"created_at"` LastUsedAt int `json:"last_used_at,omitempty"` Value string `json:"value,omitempty"` // Only returned on creation }
ProjectAPIKeyResponse represents the API response for an OpenAI project API key
type ProjectAPIKeysResponse ¶
type ProjectAPIKeysResponse struct { Object string `json:"object"` Data []ProjectAPIKeyResponse `json:"data"` }
ProjectAPIKeysResponse represents the API response for a list of OpenAI project API keys
type ProjectResourcesResponse ¶
type ProjectResourcesResponse struct { APIKeys []ProjectAPIKeyResponse `json:"api_keys"` Assistants []AssistantSummary `json:"assistants"` Files []FileResponse `json:"files"` FineTunedModels []FineTunedModelSummary `json:"fine_tuned_models"` ServiceAccounts []ServiceAccountSummary `json:"service_accounts"` }
ProjectResourcesResponse represents the API response for OpenAI project resources
type ProjectResponse ¶
type ProjectResponse struct { ID string `json:"id"` Object string `json:"object"` Name string `json:"name"` CreatedAt int `json:"created_at"` Status string `json:"status"` UsageLimits UsageLimits `json:"usage_limits"` }
ProjectResponse represents the API response for an OpenAI project
type ProjectsListResponse ¶
type ProjectsListResponse struct { Object string `json:"object"` // Object type, always "list" Data []ProjectResponse `json:"data"` // Array of project objects FirstID string `json:"first_id"` // ID of the first project in the list LastID string `json:"last_id"` // ID of the last project in the list HasMore bool `json:"has_more"` // Whether there are more projects to fetch }
ProjectsListResponse represents the API response for a list of OpenAI projects
type ResponseFormat ¶
type ResponseFormat struct { Type string `json:"type,omitempty"` // Format type (auto, json_object, etc.) JSONSchema interface{} `json:"json_schema,omitempty"` // JSON schema for structured output }
ResponseFormat represents the format configuration for the assistant's response.
type RunRequest ¶
type RunRequest struct { AssistantID string `json:"assistant_id"` // ID of the assistant to use for this run Model string `json:"model,omitempty"` // Optional model override for this run Instructions string `json:"instructions,omitempty"` // Optional instructions override for this run Tools []map[string]interface{} `json:"tools,omitempty"` // Tools the assistant can use for this run Metadata map[string]interface{} `json:"metadata,omitempty"` // Optional metadata for the run Temperature *float64 `json:"temperature,omitempty"` // Sampling temperature (0-2) MaxTokens *int `json:"max_tokens,omitempty"` // Maximum number of tokens to generate TopP *float64 `json:"top_p,omitempty"` // Nucleus sampling parameter (0-1) StreamForTool *bool `json:"stream_for_tool,omitempty"` // Whether to stream tool outputs }
RunRequest represents the request payload for creating a run in the OpenAI API. It contains all the configuration parameters needed to execute an assistant run, including model settings, tools, and execution parameters.
type RunRequiredAction ¶
type RunRequiredAction struct { Type string `json:"type"` SubmitTool *RunSubmitToolsRequest `json:"submit_tool_outputs,omitempty"` }
RunRequiredAction represents an action that is required to continue a run.
type RunResponse ¶
type RunResponse struct { ID string `json:"id"` // Unique identifier for the run Object string `json:"object"` // Object type, always "thread.run" CreatedAt int64 `json:"created_at"` // Unix timestamp when the run was created ThreadID string `json:"thread_id"` // ID of the thread this run belongs to AssistantID string `json:"assistant_id"` // ID of the assistant used for this run Status string `json:"status"` // Current status of the run StartedAt *int64 `json:"started_at,omitempty"` // Unix timestamp when the run started CompletedAt *int64 `json:"completed_at,omitempty"` // Unix timestamp when the run completed Model string `json:"model"` // Model used for the run Instructions string `json:"instructions"` // Instructions used for the run Tools []map[string]interface{} `json:"tools"` // Tools available to the assistant FileIDs []string `json:"file_ids"` // Files available to the assistant Metadata map[string]interface{} `json:"metadata"` // User-provided metadata Usage *RunUsage `json:"usage,omitempty"` // Token usage statistics }
RunResponse represents the API response for a run. It contains comprehensive information about a run's execution and status, including timing information, configuration, and results.
type RunStepResponse ¶
type RunStepResponse struct { ID string `json:"id"` // Unique identifier for the step Object string `json:"object"` // Object type, always "thread.run.step" CreatedAt int64 `json:"created_at"` // Unix timestamp when the step was created RunID string `json:"run_id"` // ID of the run this step belongs to Type string `json:"type"` // Type of step (e.g., "message_creation", "tool_calls") Status string `json:"status"` // Current status of the step Details map[string]interface{} `json:"details"` // Additional details about the step }
RunStepResponse represents a single step in a run's execution. It provides detailed information about each action taken during the run, including timing, status, and step-specific details.
type RunSubmitToolsRequest ¶
type RunSubmitToolsRequest struct {
ToolCalls []RunToolCall `json:"tool_calls"`
}
RunSubmitToolsRequest represents a request to submit tool outputs for a run.
type RunToolCall ¶
type RunToolCall struct { ID string `json:"id"` Type string `json:"type"` Function *FunctionCall `json:"function,omitempty"` }
RunToolCall represents a tool call that was made during a run.
type RunUsage ¶
type RunUsage struct { PromptTokens int `json:"prompt_tokens"` // Number of tokens in the prompt CompletionTokens int `json:"completion_tokens"` // Number of tokens in the completion TotalTokens int `json:"total_tokens"` // Total tokens used in the run }
RunUsage represents token usage statistics for a run. It tracks the number of tokens used for prompts and completions, providing detailed information about resource consumption.
type Segment ¶
type Segment struct { ID int `json:"id"` // Unique identifier for the segment Start float64 `json:"start"` // Start time of the segment in seconds End float64 `json:"end"` // End time of the segment in seconds Text string `json:"text"` // Transcribed text for this segment }
Segment represents a single segment of the audio transcription. It contains timing information and the transcribed text for that segment.
type ServiceAccountSummary ¶
type ServiceAccountSummary struct { ID string `json:"id"` Object string `json:"object"` CreatedAt int `json:"created_at"` Name string `json:"name"` Email string `json:"email"` }
ServiceAccountSummary represents a summary of a service account
type SpeechToTextData ¶
type SpeechToTextData struct { ID string `json:"id"` Object string `json:"object"` CreatedAt int `json:"created_at"` Status string `json:"status"` Model string `json:"model"` Text string `json:"text"` Duration int `json:"duration"` }
SpeechToTextData represents a single speech-to-text result in the list response
type SpeechToTextResponse ¶
type SpeechToTextResponse struct {
Text string `json:"text"` // The transcribed text from the audio file
}
SpeechToTextResponse represents the API response for speech-to-text transcription. It contains the transcribed text from the audio input.
type SpeechToTextsResponse ¶
type SpeechToTextsResponse struct { Object string `json:"object"` Data []SpeechToTextData `json:"data"` HasMore bool `json:"has_more"` }
SpeechToTextsResponse represents the API response for listing OpenAI speech-to-text outputs
type SubmitToolOutputsRequest ¶
type SubmitToolOutputsRequest struct {
ToolOutputs []ToolOutput `json:"tool_outputs"` // List of tool outputs to submit
}
SubmitToolOutputsRequest represents the request payload for submitting tool outputs. It contains a list of tool outputs that were generated during a run step.
type Supervised ¶
type Supervised struct {
Hyperparameters *SupervisedHyperparams `json:"hyperparameters,omitempty"`
}
Supervised represents the supervised fine-tuning configuration
type SupervisedHyperparams ¶
type SupervisedHyperparams struct { NEpochs *int `json:"n_epochs,omitempty"` BatchSize *int `json:"batch_size,omitempty"` LearningRateMultiplier *float64 `json:"learning_rate_multiplier,omitempty"` }
SupervisedHyperparams represents hyperparameters for supervised fine-tuning
type TextToSpeechData ¶
type TextToSpeechData struct { ID string `json:"id"` Object string `json:"object"` CreatedAt int `json:"created_at"` Status string `json:"status"` Model string `json:"model"` Voice string `json:"voice"` Input string `json:"input"` Duration int `json:"duration"` }
TextToSpeechData represents a single text-to-speech result in the list response
type TextToSpeechRequest ¶
type TextToSpeechRequest struct { Model string `json:"model"` // ID of the model to use (e.g., "tts-1", "tts-1-hd", "gpt-4o-mini-tts") Input string `json:"input"` // Text to convert to speech Voice string `json:"voice"` // Voice to use for synthesis ResponseFormat string `json:"response_format,omitempty"` // Format of the audio output Speed float64 `json:"speed,omitempty"` // Speed of speech (0.25 to 4.0) Instructions string `json:"instructions,omitempty"` // Instructions to guide the voice (gpt-4o-mini-tts only) }
TextToSpeechRequest represents the request payload for text-to-speech conversion. It specifies the parameters that control the speech synthesis process, including model selection, input text, voice, and various generation options.
type TextToSpeechsResponse ¶
type TextToSpeechsResponse struct { Object string `json:"object"` Data []TextToSpeechData `json:"data"` HasMore bool `json:"has_more"` }
TextToSpeechsResponse represents the API response for listing OpenAI text-to-speech outputs
type ThreadCreateRequest ¶
type ThreadCreateRequest struct { Messages []ThreadMessage `json:"messages,omitempty"` Metadata map[string]interface{} `json:"metadata,omitempty"` }
ThreadCreateRequest represents the request payload for creating a thread in the OpenAI API. It can include initial messages and metadata for the thread.
type ThreadMessage ¶
type ThreadMessage struct { Role string `json:"role"` Content string `json:"content"` FileIDs []string `json:"file_ids,omitempty"` Metadata map[string]interface{} `json:"metadata,omitempty"` }
ThreadMessage represents a message within a thread. Each message has a role, content, optional file attachments, and metadata.
type ThreadResponse ¶
type ThreadResponse struct { ID string `json:"id"` Object string `json:"object"` CreatedAt int `json:"created_at"` Metadata map[string]interface{} `json:"metadata"` }
ThreadResponse represents the API response for an OpenAI thread. It contains the thread's identifier, creation timestamp, and associated metadata.
type ThreadRun ¶
type ThreadRun struct { ID string `json:"id"` Object string `json:"object"` CreatedAt int `json:"created_at"` AssistantID string `json:"assistant_id"` ThreadID string `json:"thread_id"` Status string `json:"status"` StartedAt int `json:"started_at"` CompletedAt int `json:"completed_at,omitempty"` LastError *RunError `json:"last_error,omitempty"` Model string `json:"model"` Instructions string `json:"instructions,omitempty"` Tools []map[string]interface{} `json:"tools,omitempty"` FileIDs []string `json:"file_ids,omitempty"` Metadata map[string]interface{} `json:"metadata,omitempty"` Usage *RunUsage `json:"usage,omitempty"` ExpiresAt int `json:"expires_at,omitempty"` FailedAt int `json:"failed_at,omitempty"` CancelledAt int `json:"cancelled_at,omitempty"` RequiredAction *RunRequiredAction `json:"required_action,omitempty"` Temperature *float64 `json:"temperature,omitempty"` TopP *float64 `json:"top_p,omitempty"` ResponseFormat *ResponseFormat `json:"response_format,omitempty"` Stream *bool `json:"stream,omitempty"` MaxCompletionTokens *int `json:"max_completion_tokens,omitempty"` MaxPromptTokens *int `json:"max_prompt_tokens,omitempty"` TruncationStrategy *TruncationStrategy `json:"truncation_strategy,omitempty"` }
ThreadRun represents a run in the OpenAI API.
type ThreadRunCreateRequest ¶
type ThreadRunCreateRequest struct { AssistantID string `json:"assistant_id"` Thread *ThreadCreateRequest `json:"thread,omitempty"` Model string `json:"model,omitempty"` Instructions string `json:"instructions,omitempty"` Tools []map[string]interface{} `json:"tools,omitempty"` Metadata map[string]interface{} `json:"metadata,omitempty"` Stream *bool `json:"stream,omitempty"` Temperature *float64 `json:"temperature,omitempty"` TopP *float64 `json:"top_p,omitempty"` ResponseFormat *ResponseFormat `json:"response_format,omitempty"` MaxCompletionTokens *int `json:"max_completion_tokens,omitempty"` MaxPromptTokens *int `json:"max_prompt_tokens,omitempty"` TruncationStrategy *TruncationStrategy `json:"truncation_strategy,omitempty"` }
ThreadRunCreateRequest represents a request to create a new thread and run.
type ThreadRunMessageRequest ¶
type ThreadRunMessageRequest struct { Role string `json:"role"` Content string `json:"content"` FileIDs []string `json:"file_ids,omitempty"` Attachments []AttachmentRequest `json:"attachments,omitempty"` Metadata map[string]interface{} `json:"metadata,omitempty"` }
ThreadRunMessageRequest represents a message in a thread run request.
type ThreadRunRequest ¶
type ThreadRunRequest struct { AssistantID string `json:"assistant_id"` // ID of the assistant to use for this run Thread *ThreadCreateRequest `json:"thread,omitempty"` // Thread configuration Model string `json:"model,omitempty"` // Optional model override for this run Instructions string `json:"instructions,omitempty"` // Optional instructions override for this run Tools []map[string]interface{} `json:"tools,omitempty"` // Tools the assistant can use for this run Metadata map[string]interface{} `json:"metadata,omitempty"` // Optional metadata for the run Temperature *float64 `json:"temperature,omitempty"` // Sampling temperature (0-2) MaxCompletionTokens *int `json:"max_completion_tokens,omitempty"` // Maximum number of completion tokens to generate MaxPromptTokens *int `json:"max_prompt_tokens,omitempty"` // Maximum number of prompt tokens to use TopP *float64 `json:"top_p,omitempty"` // Nucleus sampling parameter (0-1) ResponseFormat *ResponseFormat `json:"response_format,omitempty"` // Response format configuration Stream *bool `json:"stream,omitempty"` // Whether to stream the response ToolChoice interface{} `json:"tool_choice,omitempty"` // Controls which tool is called TruncationStrategy *TruncationStrategy `json:"truncation_strategy,omitempty"` // Controls how thread will be truncated }
ThreadRunRequest represents the request payload for creating a run and thread together in the OpenAI API. It contains all the configuration parameters needed to execute an assistant run, including thread configuration, model settings, tools, and execution parameters.
type ThreadRunResponse ¶
type ThreadRunResponse struct { ID string `json:"id"` // Unique identifier for the run Object string `json:"object"` // Object type, always "thread.run" CreatedAt int64 `json:"created_at"` // Unix timestamp when the run was created ThreadID string `json:"thread_id"` // ID of the thread this run belongs to AssistantID string `json:"assistant_id"` // ID of the assistant used for this run Status string `json:"status"` // Current status of the run StartedAt *int64 `json:"started_at,omitempty"` // Unix timestamp when the run started CompletedAt *int64 `json:"completed_at,omitempty"` // Unix timestamp when the run completed Model string `json:"model"` // Model used for the run Instructions string `json:"instructions"` // Instructions used for the run Tools []map[string]interface{} `json:"tools"` // Tools available to the assistant FileIDs []string `json:"file_ids"` // Files available to the assistant Metadata map[string]interface{} `json:"metadata"` // User-provided metadata Usage *RunUsage `json:"usage,omitempty"` // Token usage statistics }
ThreadRunResponse represents the API response for a thread run creation. It contains all the details of the created run, including thread ID, status, and configuration.
type ToolOutput ¶
type ToolOutput struct { ToolCallID string `json:"tool_call_id"` // ID of the tool call that generated this output Output string `json:"output"` // Output generated by the tool }
ToolOutput represents the output from a single tool execution. It contains the tool call ID and the output generated by the tool.
type ToolRequest ¶
type ToolRequest struct {
Type string `json:"type"`
}
ToolRequest represents a tool in an attachment
type TranscriptionResponse ¶
type TranscriptionResponse struct { Text string `json:"text"` // The complete transcribed text Duration float64 `json:"duration,omitempty"` // Duration of the audio in seconds Segments []Segment `json:"segments,omitempty"` // Optional segments of the transcription }
TranscriptionResponse represents the API response for audio transcriptions. It contains the transcribed text and optional metadata about the transcription.
type TranslationResponse ¶
type TranslationResponse struct { Text string `json:"text"` // The complete translated text Duration float64 `json:"duration,omitempty"` // Duration of the audio in seconds Segments []Segment `json:"segments,omitempty"` // Optional segments of the translation }
TranslationResponse represents the API response for audio translations. It contains the translated text and optional metadata about the translation.
type TruncationStrategy ¶
type TruncationStrategy struct { Type string `json:"type,omitempty"` // Type of truncation strategy LastNMessages int `json:"last_n_messages,omitempty"` // Number of messages to keep }
TruncationStrategy represents configuration for how a thread will be truncated.
type UploadCreateRequest ¶
type UploadCreateRequest struct { Purpose string `json:"purpose"` // Intended use of the upload (e.g., "fine-tune") Filename string `json:"filename"` // Name of the file being uploaded Bytes int `json:"bytes"` // Size of the file in bytes MimeType string `json:"mime_type"` // MIME type of the file }
UploadCreateRequest represents the request payload for creating a new upload. It contains the required parameters for initiating an upload.
type UploadResponse ¶
type UploadResponse struct { ID string `json:"id"` // Unique identifier for the upload Object string `json:"object"` // Type of object (e.g., "upload") Purpose string `json:"purpose"` // Intended use of the upload Filename string `json:"filename"` // Name of the uploaded file Bytes int `json:"bytes"` // Size of the file in bytes CreatedAt int `json:"created_at"` // Unix timestamp of upload creation Status string `json:"status"` // Current status of the upload (e.g., "pending", "completed") }
UploadResponse represents the API response for an OpenAI upload. It contains information about the upload status and properties.
type UsageLimits ¶
type UsageLimits struct { MaxMonthlyDollars float64 `json:"max_monthly_dollars"` MaxParallelRequests int `json:"max_parallel_requests"` MaxTokens int `json:"max_tokens"` }
UsageLimits represents the usage limits for a project
type WandBIntegration ¶
type WandBIntegration struct { Project string `json:"project"` Name string `json:"name,omitempty"` Tags []string `json:"tags,omitempty"` }
WandBIntegration represents Weights & Biases integration
Source Files
¶
- data_source_openai_admin_api_key.go
- data_source_openai_admin_api_keys.go
- data_source_openai_assistant.go
- data_source_openai_assistants.go
- data_source_openai_audio_transcription.go
- data_source_openai_audio_transcriptions.go
- data_source_openai_audio_translation.go
- data_source_openai_audio_translations.go
- data_source_openai_batch.go
- data_source_openai_batches.go
- data_source_openai_chat_completion.go
- data_source_openai_chat_completion_messages.go
- data_source_openai_chat_completions.go
- data_source_openai_file.go
- data_source_openai_files.go
- data_source_openai_fine_tuning_checkpoint_permissions.go
- data_source_openai_fine_tuning_checkpoints.go
- data_source_openai_fine_tuning_events.go
- data_source_openai_fine_tuning_job.go
- data_source_openai_fine_tuning_jobs.go
- data_source_openai_invite.go
- data_source_openai_invites.go
- data_source_openai_message.go
- data_source_openai_messages.go
- data_source_openai_model.go
- data_source_openai_model_response.go
- data_source_openai_model_response_input_items.go
- data_source_openai_model_responses.go
- data_source_openai_models.go
- data_source_openai_organization_user.go
- data_source_openai_organization_users.go
- data_source_openai_project.go
- data_source_openai_project_api_key.go
- data_source_openai_project_api_keys.go
- data_source_openai_project_resources.go
- data_source_openai_project_service_account.go
- data_source_openai_project_service_accounts.go
- data_source_openai_project_user.go
- data_source_openai_project_users.go
- data_source_openai_projects.go
- data_source_openai_rate_limit.go
- data_source_openai_rate_limits.go
- data_source_openai_run.go
- data_source_openai_runs.go
- data_source_openai_speech_to_text.go
- data_source_openai_speech_to_texts.go
- data_source_openai_text_to_speech.go
- data_source_openai_text_to_speechs.go
- data_source_openai_thread.go
- data_source_openai_thread_run.go
- data_source_openai_user_role.go
- data_source_openai_vector_store.go
- data_source_openai_vector_store_file.go
- data_source_openai_vector_store_file_batch.go
- data_source_openai_vector_store_file_batch_files.go
- data_source_openai_vector_store_file_content.go
- data_source_openai_vector_store_files.go
- data_source_openai_vector_stores.go
- provider.go
- resource_openai_admin_api_key.go
- resource_openai_assistant.go
- resource_openai_assistant_file.go
- resource_openai_audio_transcription.go
- resource_openai_audio_translation.go
- resource_openai_batch.go
- resource_openai_chat_completion.go
- resource_openai_completion.go
- resource_openai_edit.go
- resource_openai_embedding.go
- resource_openai_evaluation.go
- resource_openai_file.go
- resource_openai_fine_tuned_model.go
- resource_openai_fine_tuning_checkpoint_permission.go
- resource_openai_fine_tuning_job.go
- resource_openai_image_edit.go
- resource_openai_image_generation.go
- resource_openai_image_variation.go
- resource_openai_invite.go
- resource_openai_message.go
- resource_openai_model.go
- resource_openai_model_response.go
- resource_openai_moderation.go
- resource_openai_organization_user.go
- resource_openai_playground_config.go
- resource_openai_project.go
- resource_openai_project_api_key.go
- resource_openai_project_service_account.go
- resource_openai_project_user.go
- resource_openai_rate_limit.go
- resource_openai_run.go
- resource_openai_run_step.go
- resource_openai_speech_to_text.go
- resource_openai_text_to_speech.go
- resource_openai_thread.go
- resource_openai_thread_run.go
- resource_openai_upload.go
- resource_openai_user_role.go
- resource_openai_vector_store.go
- resource_openai_vector_store_file.go
- resource_openai_vector_store_file_batch.go