Documentation
¶
Overview ¶
Package models defines core types for model routing and selection.
Index ¶
- Constants
- type APIKey
- type APIKeyConfig
- type APIKeyCreateRequest
- type APIKeyResponse
- type APIKeyUsage
- type AdaptiveAnthropicUsage
- type AdaptiveChatCompletionChoice
- type AdaptiveChatCompletionChunkChoice
- type AdaptiveChatCompletionChunkChoiceDelta
- type AdaptiveChatCompletionMessage
- type AdaptiveGeminiUsage
- type AdaptiveUsage
- type AddCreditsParams
- type AddOrganizationMemberRequest
- type AddProjectMemberRequest
- type Alternative
- type AnthropicMessage
- type AnthropicMessageChunk
- type AnthropicMessageRequest
- type AuthConfig
- type CacheBackendType
- type CacheConfig
- type CacheResult
- type ChatCompletion
- type ChatCompletionChunk
- type ChatCompletionRequest
- type CircuitBreakerConfig
- type ClerkAuthConfig
- type CreditPackage
- type CreditTransaction
- type CreditTransactionType
- type DatabaseAuthConfig
- type DatabaseConfig
- type DatabaseType
- type DeductCreditsParams
- type EndpointConfig
- type EndpointsConfig
- type ExecutionFunc
- type FallbackConfig
- type FallbackMode
- type FallbackResult
- type GeminiGenerateContentResponse
- type GeminiGenerateRequest
- type ModelCapability
- type ModelRouterClientConfig
- type ModelRouterConfig
- type ModelSelectionRequest
- type ModelSelectionResponse
- type Organization
- type OrganizationCreateRequest
- type OrganizationCredit
- type OrganizationMember
- type OrganizationUpdateRequest
- type PeriodStats
- type Project
- type ProjectCreateRequest
- type ProjectMember
- type ProjectMemberRole
- type ProjectResponse
- type ProjectStatus
- type ProjectUpdateRequest
- type ProviderConfig
- type ProviderConfigCreateRequest
- type ProviderConfigUpdateRequest
- type ProviderConfigWithSource
- type ProviderConfiguration
- type ProviderConfigurationHistory
- type RecordUsageParams
- type RedisConfig
- type SelectModelRequest
- type SelectModelResponse
- type ServerConfig
- type StripeConfig
- type TaskType
- type UpdateProjectMemberRoleRequest
- type UsageByPeriod
- type UsageStats
- type User
- type UserCreateRequest
- type UserUpdateRequest
Constants ¶
const ( BudgetResetNone = "" BudgetResetDaily = "daily" BudgetResetWeekly = "weekly" BudgetResetMonthly = "monthly" )
const ( CacheTierSemanticExact = "semantic_exact" CacheTierSemanticSimilar = "semantic_similar" )
Cache tier constants
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type APIKey ¶
type APIKey struct {
ID uint `gorm:"primaryKey;autoIncrement"`
Name string
KeyHash string `gorm:"uniqueIndex"`
KeyPrefix string `gorm:"index"`
OrganizationID string `gorm:"index"`
UserID string `gorm:"index"`
ProjectID uint `gorm:"index"`
Metadata string
Scopes string
RateLimitRpm int
BudgetLimit float64
BudgetUsed float64
BudgetCurrency string
BudgetResetType string
BudgetResetAt time.Time
IsActive bool `gorm:"index"`
ExpiresAt time.Time
LastUsedAt time.Time
CreatedAt time.Time `gorm:"autoCreateTime"`
UpdatedAt time.Time `gorm:"autoUpdateTime"`
}
type APIKeyConfig ¶
type APIKeyConfig struct {
Enabled bool `yaml:"enabled" json:"enabled"`
HeaderNames []string `yaml:"header_names,omitempty" json:"header_names,omitzero"`
RequireForAll bool `yaml:"require_for_all,omitempty" json:"require_for_all,omitzero"`
AllowAnonymous bool `yaml:"allow_anonymous,omitempty" json:"allow_anonymous,omitzero"`
}
type APIKeyCreateRequest ¶
type APIKeyCreateRequest struct {
Name string `json:"name" validate:"required,min=1,max=255"`
OrganizationID string `json:"organization_id,omitzero"`
UserID string `json:"user_id,omitzero"`
ProjectID uint `json:"project_id,omitzero"`
Metadata string `json:"metadata,omitzero"`
Scopes []string `json:"scopes,omitzero"`
RateLimitRpm *int `json:"rate_limit_rpm,omitzero"`
BudgetLimit *float64 `json:"budget_limit,omitzero"`
BudgetCurrency string `json:"budget_currency,omitzero"`
BudgetResetType string `json:"budget_reset_type,omitzero"`
ExpiresAt *time.Time `json:"expires_at,omitzero"`
}
type APIKeyResponse ¶
type APIKeyResponse struct {
ID uint `json:"id"`
Name string `json:"name"`
Key string `json:"key,omitzero"`
KeyPrefix string `json:"key_prefix"`
OrganizationID string `json:"organization_id,omitzero"`
UserID string `json:"user_id,omitzero"`
ProjectID uint `json:"project_id,omitzero"`
Metadata string `json:"metadata,omitzero"`
Scopes string `json:"scopes,omitzero"`
RateLimitRpm int `json:"rate_limit_rpm,omitzero"`
BudgetLimit float64 `json:"budget_limit,omitzero"`
BudgetUsed float64 `json:"budget_used"`
BudgetRemaining float64 `json:"budget_remaining,omitzero"`
BudgetCurrency string `json:"budget_currency,omitzero"`
BudgetResetType string `json:"budget_reset_type,omitzero"`
BudgetResetAt time.Time `json:"budget_reset_at,omitzero"`
IsActive bool `json:"is_active"`
ExpiresAt time.Time `json:"expires_at,omitzero"`
LastUsedAt time.Time `json:"last_used_at,omitzero"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
type APIKeyUsage ¶
type APIKeyUsage struct {
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
APIKeyID uint `gorm:"index" json:"api_key_id"`
Endpoint string `gorm:"index" json:"endpoint"`
Provider string `json:"provider"`
Model string `json:"model"`
TokensInput int `json:"tokens_input"`
TokensOutput int `json:"tokens_output"`
TokensTotal int `json:"tokens_total"`
Cost float64 `json:"cost"`
Currency string `json:"currency"`
StatusCode int `json:"status_code"`
LatencyMs int `json:"latency_ms"`
Metadata string `json:"metadata,omitempty"`
RequestID string `gorm:"index" json:"request_id"`
UserAgent string `json:"user_agent"`
IPAddress string `json:"ip_address"`
ErrorMessage string `json:"error_message,omitempty"`
CreatedAt time.Time `gorm:"autoCreateTime;index" json:"timestamp"`
}
func (APIKeyUsage) TableName ¶
func (APIKeyUsage) TableName() string
type AdaptiveAnthropicUsage ¶
type AdaptiveAnthropicUsage struct {
CacheCreationInputTokens int64 `json:"cache_creation_input_tokens,omitzero"`
CacheReadInputTokens int64 `json:"cache_read_input_tokens,omitzero"`
InputTokens int64 `json:"input_tokens,omitzero"`
OutputTokens int64 `json:"output_tokens,omitzero"`
ServiceTier string `json:"service_tier,omitzero"`
// Cache tier information for adaptive system
CacheTier string `json:"cache_tier,omitzero"` // e.g., "semantic_exact", "semantic_similar"
}
AdaptiveAnthropicUsage extends Anthropic's Usage with cache tier information
type AdaptiveChatCompletionChoice ¶
type AdaptiveChatCompletionChoice struct {
FinishReason string `json:"finish_reason"`
Index int64 `json:"index"`
Logprobs openai.ChatCompletionChoiceLogprobs `json:"logprobs,omitzero"`
Message AdaptiveChatCompletionMessage `json:"message"`
}
AdaptiveChatCompletionChoice represents a chat completion choice with proper omitzero tags
type AdaptiveChatCompletionChunkChoice ¶
type AdaptiveChatCompletionChunkChoice struct {
Delta AdaptiveChatCompletionChunkChoiceDelta `json:"delta"`
FinishReason string `json:"finish_reason"`
Index int64 `json:"index"`
Logprobs openai.ChatCompletionChunkChoiceLogprobs `json:"logprobs,omitzero"`
}
AdaptiveChatCompletionChunkChoice represents a streaming chunk choice with proper omitzero tags
type AdaptiveChatCompletionChunkChoiceDelta ¶
type AdaptiveChatCompletionChunkChoiceDelta struct {
Content string `json:"content,omitzero"`
Refusal string `json:"refusal,omitzero"`
Role string `json:"role,omitzero"`
ToolCalls []openai.ChatCompletionChunkChoiceDeltaToolCall `json:"tool_calls,omitzero"`
}
AdaptiveChatCompletionChunkChoiceDelta represents a streaming delta with proper omitzero tags
type AdaptiveChatCompletionMessage ¶
type AdaptiveChatCompletionMessage struct {
Content string `json:"content"`
Refusal string `json:"refusal,omitzero"`
Role string `json:"role"`
Annotations []openai.ChatCompletionMessageAnnotation `json:"annotations,omitzero"`
Audio openai.ChatCompletionAudio `json:"audio,omitzero"`
ToolCalls []openai.ChatCompletionMessageToolCallUnion `json:"tool_calls,omitzero"`
}
AdaptiveChatCompletionMessage represents a chat completion message with proper omitzero tags
type AdaptiveGeminiUsage ¶
type AdaptiveGeminiUsage struct {
CacheTokensDetails []*genai.ModalityTokenCount `json:"cacheTokensDetails,omitzero"`
// Output only. Number of tokens in the cached part in the input (the cached content).
CachedContentTokenCount int32 `json:"cachedContentTokenCount,omitzero"`
// Number of tokens in the response(s). This includes all the generated response candidates.
CandidatesTokenCount int32 `json:"candidatesTokenCount,omitzero"`
// Output only. List of modalities that were returned in the response.
CandidatesTokensDetails []*genai.ModalityTokenCount `json:"candidatesTokensDetails,omitzero"`
// Number of tokens in the prompt. When cached_content is set, this is still the total
// effective prompt size meaning this includes the number of tokens in the cached content.
PromptTokenCount int32 `json:"promptTokenCount,omitzero"`
// Output only. List of modalities that were processed in the request input.
PromptTokensDetails []*genai.ModalityTokenCount `json:"promptTokensDetails,omitzero"`
// Output only. Number of tokens present in thoughts output.
ThoughtsTokenCount int32 `json:"thoughtsTokenCount,omitzero"`
// Output only. Number of tokens present in tool-use prompt(s).
ToolUsePromptTokenCount int32 `json:"toolUsePromptTokenCount,omitzero"`
// Output only. List of modalities that were processed for tool-use request inputs.
ToolUsePromptTokensDetails []*genai.ModalityTokenCount `json:"toolUsePromptTokensDetails,omitzero"`
// Total token count for prompt, response candidates, and tool-use prompts (if present).
TotalTokenCount int32 `json:"totalTokenCount,omitzero"`
// Output only. Traffic type. This shows whether a request consumes Pay-As-You-Go or
// Provisioned Throughput quota.
TrafficType genai.TrafficType `json:"trafficType,omitzero"`
CacheTier string `json:"cacheTier,omitzero"`
}
AdaptiveGeminiUsage extends genai.UsageMetadata with cache tier information
type AdaptiveUsage ¶
type AdaptiveUsage struct {
PromptTokens int64 `json:"prompt_tokens"`
CompletionTokens int64 `json:"completion_tokens"`
TotalTokens int64 `json:"total_tokens"`
// Cache tier information for adaptive system
CacheTier string `json:"cache_tier,omitzero"` // e.g., "semantic_exact", "semantic_similar"
}
AdaptiveUsage extends OpenAI's CompletionUsage with cache tier information
type AddCreditsParams ¶
type AddProjectMemberRequest ¶
type AddProjectMemberRequest struct {
UserID string `json:"user_id" validate:"required"`
Role ProjectMemberRole `json:"role" validate:"required,oneof=owner admin member"`
}
type Alternative ¶
Alternative represents a provider+model fallback candidate.
type AnthropicMessage ¶
type AnthropicMessage struct {
ID string `json:"id"`
Content []anthropic.ContentBlockUnion `json:"content,omitzero"`
Model string `json:"model"`
Role string `json:"role"`
StopReason string `json:"stop_reason,omitzero"`
StopSequence string `json:"stop_sequence,omitzero"`
Type string `json:"type"`
Usage AdaptiveAnthropicUsage `json:"usage,omitzero"`
Provider string `json:"provider,omitzero"`
}
AnthropicMessage extends Anthropic's Message with enhanced usage and provider info
type AnthropicMessageChunk ¶
type AnthropicMessageChunk struct {
Type string `json:"type"`
// Fields for different event types - only populated based on event type
Message *AnthropicMessage `json:"message,omitzero"` // message_start only
Delta *anthropic.MessageStreamEventUnionDelta `json:"delta,omitzero"` // content_block_delta, message_delta
Usage *AdaptiveAnthropicUsage `json:"usage,omitzero"` // message_delta only
ContentBlock *anthropic.ContentBlockStartEventContentBlockUnion `json:"content_block,omitzero"` // content_block_start only
Index *int64 `json:"index,omitzero"` // content_block_start, content_block_delta, content_block_stop
// Adaptive-specific fields
Provider string `json:"provider,omitzero"` // Keep this for internal tracking, but it will be omitted when empty
}
AnthropicMessageChunk matches Anthropic's streaming format exactly, with our provider extension
type AnthropicMessageRequest ¶
type AnthropicMessageRequest struct {
// Core Anthropic Messages API fields (from anthropic.MessageNewParams)
MaxTokens int64 `json:"max_tokens,omitzero"`
Messages []anthropic.MessageParam `json:"messages"`
Model anthropic.Model `json:"model"`
Temperature param.Opt[float64] `json:"temperature,omitzero"`
TopK param.Opt[int64] `json:"top_k,omitzero"`
TopP param.Opt[float64] `json:"top_p,omitzero"`
Metadata anthropic.MetadataParam `json:"metadata,omitzero"`
ServiceTier anthropic.MessageNewParamsServiceTier `json:"service_tier,omitzero"`
StopSequences []string `json:"stop_sequences,omitzero"`
System []anthropic.TextBlockParam `json:"system,omitzero"`
Stream *bool `json:"stream,omitzero"`
Thinking anthropic.ThinkingConfigParamUnion `json:"thinking,omitzero"`
ToolChoice anthropic.ToolChoiceUnionParam `json:"tool_choice,omitzero"`
Tools []anthropic.ToolUnionParam `json:"tools,omitzero"`
// Custom fields for our internal processing
ModelRouterConfig *ModelRouterConfig `json:"model_router,omitzero"`
Fallback *FallbackConfig `json:"fallback,omitzero"` // Fallback configuration with enabled toggle
ProviderConfigs map[string]*ProviderConfig `json:"provider_configs,omitzero"` // Custom provider configurations by provider name
}
AnthropicMessageRequest uses individual fields from anthropic.MessageNewParams with our custom fields
type AuthConfig ¶
type AuthConfig struct {
Provider string `json:"provider" yaml:"provider"`
ClerkConfig *ClerkAuthConfig `json:"clerk,omitempty" yaml:"clerk,omitempty"`
DatabaseConfig *DatabaseAuthConfig `json:"database,omitempty" yaml:"database,omitempty"`
}
type CacheBackendType ¶
type CacheBackendType string
CacheBackendType represents the type of cache backend to use
const ( CacheBackendRedis CacheBackendType = "redis" CacheBackendMemory CacheBackendType = "memory" )
type CacheConfig ¶
type CacheConfig struct {
// Backend configuration
Backend CacheBackendType `json:"backend,omitzero" yaml:"backend"` // "redis" or "memory"
RedisURL string `json:"redis_url,omitzero" yaml:"redis_url"` // Required if backend is "redis"
Capacity int `json:"capacity,omitzero" yaml:"capacity"` // Required if backend is "memory" (LRU cache size)
// Cache behavior
Enabled bool `json:"enabled,omitzero" yaml:"enabled"`
SemanticThreshold float64 `json:"semantic_threshold,omitzero" yaml:"semantic_threshold"`
OpenAIAPIKey string `json:"openai_api_key,omitzero" yaml:"openai_api_key"`
EmbeddingModel string `json:"embedding_model,omitzero" yaml:"embedding_model"`
}
CacheConfig holds configuration for model router caching
type CacheResult ¶
type CacheResult struct {
Response *ModelSelectionResponse `json:"response,omitzero"`
Source string `json:"source,omitzero"`
Hit bool `json:"hit"`
}
CacheResult represents the result of a cache lookup operation
type ChatCompletion ¶
type ChatCompletion struct {
ID string `json:"id"`
Choices []AdaptiveChatCompletionChoice `json:"choices"`
Created int64 `json:"created"`
Model string `json:"model"`
Object string `json:"object"`
ServiceTier openai.ChatCompletionServiceTier `json:"service_tier,omitzero"`
Usage AdaptiveUsage `json:"usage"`
Provider string `json:"provider,omitzero"`
}
ChatCompletion extends OpenAI's ChatCompletion with enhanced usage
type ChatCompletionChunk ¶
type ChatCompletionChunk struct {
ID string `json:"id"`
Choices []AdaptiveChatCompletionChunkChoice `json:"choices"`
Created int64 `json:"created"`
Model string `json:"model"`
Object string `json:"object"`
ServiceTier openai.ChatCompletionChunkServiceTier `json:"service_tier,omitzero"`
Usage AdaptiveUsage `json:"usage,omitzero"`
Provider string `json:"provider,omitzero"`
}
ChatCompletionChunk extends OpenAI's ChatCompletionChunk with enhanced usage
type ChatCompletionRequest ¶
type ChatCompletionRequest struct {
// Messages comprising the conversation so far.
Messages []openai.ChatCompletionMessageParamUnion `json:"messages,omitzero"`
// Model ID used to generate the response.
Model shared.ChatModel `json:"model,omitzero"`
// Number between -2.0 and 2.0. Positive values penalize new tokens based on their
// existing frequency in the text so far, decreasing the model's likelihood to
// repeat the same line verbatim.
FrequencyPenalty param.Opt[float64] `json:"frequency_penalty,omitzero"`
// Whether to return log probabilities of the output tokens or not. If true,
// returns the log probabilities of each output token returned in the `content` of
// `message`.
Logprobs param.Opt[bool] `json:"logprobs,omitzero"`
// An upper bound for the number of tokens that can be generated for a completion,
// including visible output tokens and
// [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
MaxCompletionTokens param.Opt[int64] `json:"max_completion_tokens,omitzero"`
// The maximum number of [tokens](/tokenizer) that can be generated in the chat
// completion. This value can be used to control
// [costs](https://openai.com/api/pricing/) for text generated via API.
//
// This value is now deprecated in favor of `max_completion_tokens`, and is not
// compatible with
// [o-series models](https://platform.openai.com/docs/guides/reasoning).
MaxTokens param.Opt[int64] `json:"max_tokens,omitzero"`
// How many chat completion choices to generate for each input message. Note that
// you will be charged based on the number of generated tokens across all of the
// choices. Keep `n` as `1` to minimize costs.
N param.Opt[int64] `json:"n,omitzero"`
// Number between -2.0 and 2.0. Positive values penalize new tokens based on
// whether they appear in the text so far, increasing the model's likelihood to
// talk about new topics.
PresencePenalty param.Opt[float64] `json:"presence_penalty,omitzero"`
// This feature is in Beta. If specified, our system will make a best effort to
// sample deterministically, such that repeated requests with the same `seed` and
// parameters should return the same result. Determinism is not guaranteed, and you
// should refer to the `system_fingerprint` response parameter to monitor changes
// in the backend.
Seed param.Opt[int64] `json:"seed,omitzero"`
// Whether or not to store the output of this chat completion request for use in
// our [model distillation](https://platform.openai.com/docs/guides/distillation)
// or [evals](https://platform.openai.com/docs/guides/evals) products.
Store param.Opt[bool] `json:"store,omitzero"`
// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
// make the output more random, while lower values like 0.2 will make it more
// focused and deterministic. We generally recommend altering this or `top_p` but
// not both.
Temperature param.Opt[float64] `json:"temperature,omitzero"`
// An integer between 0 and 20 specifying the number of most likely tokens to
// return at each token position, each with an associated log probability.
// `logprobs` must be set to `true` if this parameter is used.
TopLogprobs param.Opt[int64] `json:"top_logprobs,omitzero"`
// An alternative to sampling with temperature, called nucleus sampling, where the
// model considers the results of the tokens with top_p probability mass. So 0.1
// means only the tokens comprising the top 10% probability mass are considered.
//
// We generally recommend altering this or `temperature` but not both.
TopP param.Opt[float64] `json:"top_p,omitzero"`
// Whether to enable
// [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
// during tool use.
ParallelToolCalls param.Opt[bool] `json:"parallel_tool_calls,omitzero"`
// A stable identifier for your end-users. Used to boost cache hit rates by better
// bucketing similar requests and to help OpenAI detect and prevent abuse.
// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
User param.Opt[string] `json:"user,omitzero"`
// Parameters for audio output. Required when audio output is requested with
// `modalities: ["audio"]`.
// [Learn more](https://platform.openai.com/docs/guides/audio).
Audio openai.ChatCompletionAudioParam `json:"audio,omitzero"`
// Modify the likelihood of specified tokens appearing in the completion.
//
// Accepts a JSON object that maps tokens (specified by their token ID in the
// tokenizer) to an associated bias value from -100 to 100. Mathematically, the
// bias is added to the logits generated by the model prior to sampling. The exact
// effect will vary per model, but values between -1 and 1 should decrease or
// increase likelihood of selection; values like -100 or 100 should result in a ban
// or exclusive selection of the relevant token.
LogitBias map[string]int64 `json:"logit_bias,omitzero"`
// Set of 16 key-value pairs that can be attached to an object. This can be useful
// for storing additional information about the object in a structured format, and
// querying for objects via API or the dashboard.
//
// Keys are strings with a maximum length of 64 characters. Values are strings with
// a maximum length of 512 characters.
Metadata shared.Metadata `json:"metadata,omitzero"`
// Output types that you would like the model to generate. Most models are capable
// of generating text, which is the default:
//
// `["text"]`
//
// The `gpt-4o-audio-preview` model can also be used to
// [generate audio](https://platform.openai.com/docs/guides/audio). To request that
// this model generate both text and audio responses, you can use:
//
// `["text", "audio"]`
//
// Any of "text", "audio".
Modalities []string `json:"modalities,omitzero"`
// **o-series models only**
//
// Constrains effort on reasoning for
// [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
// supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
// result in faster responses and fewer tokens used on reasoning in a response.
//
// Any of "low", "medium", "high".
ReasoningEffort shared.ReasoningEffort `json:"reasoning_effort,omitzero"`
// Specifies the latency tier to use for processing the request. This parameter is
// relevant for customers subscribed to the scale tier service:
//
// - If set to 'auto', and the Project is Scale tier enabled, the system will
// utilize scale tier credits until they are exhausted.
// - If set to 'auto', and the Project is not Scale tier enabled, the request will
// be processed using the default service tier with a lower uptime SLA and no
// latency guarentee.
// - If set to 'default', the request will be processed using the default service
// tier with a lower uptime SLA and no latency guarentee.
// - If set to 'flex', the request will be processed with the Flex Processing
// service tier.
// [Learn more](https://platform.openai.com/docs/guides/flex-processing).
// - When not set, the default behavior is 'auto'.
//
// When this parameter is set, the response body will include the `service_tier`
// utilized.
//
// Any of "auto", "default", "flex".
ServiceTier openai.ChatCompletionNewParamsServiceTier `json:"service_tier,omitzero"`
// Not supported with latest reasoning models `o3` and `o4-mini`.
//
// Up to 4 sequences where the API will stop generating further tokens. The
// returned text will not contain the stop sequence.
Stop openai.ChatCompletionNewParamsStopUnion `json:"stop,omitzero"`
// Options for streaming response. Only set this when you set `stream: true`.
StreamOptions openai.ChatCompletionStreamOptionsParam `json:"stream_options,omitzero"`
// Deprecated in favor of `tool_choice`.
//
// Controls which (if any) function is called by the model.
//
// `none` means the model will not call a function and instead generates a message.
//
// `auto` means the model can pick between generating a message or calling a
// function.
//
// Specifying a particular function via `{"name": "my_function"}` forces the model
// to call that function.
//
// `none` is the default when no functions are present. `auto` is the default if
// Deprecated in favor of `tools`.
// Static predicted output content, such as the content of a text file that is
// being regenerated.
Prediction openai.ChatCompletionPredictionContentParam `json:"prediction,omitzero"`
// An object specifying the format that the model must output.
//
// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
// Outputs which ensures the model will match your supplied JSON schema. Learn more
// in the
// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
//
// Setting to `{ "type": "json_object" }` enables the older JSON mode, which
// ensures the message the model generates is valid JSON. Using `json_schema` is
// preferred for models that support it.
ResponseFormat openai.ChatCompletionNewParamsResponseFormatUnion `json:"response_format,omitzero"`
// Controls which (if any) tool is called by the model. `none` means the model will
// not call any tool and instead generates a message. `auto` means the model can
// pick between generating a message or calling one or more tools. `required` means
// the model must call one or more tools. Specifying a particular tool via
// `{"type": "function", "function": {"name": "my_function"}}` forces the model to
// call that tool.
//
// `none` is the default when no tools are present. `auto` is the default if tools
// are present.
ToolChoice openai.ChatCompletionToolChoiceOptionUnionParam `json:"tool_choice,omitzero"`
// A list of tools the model may call. Currently, only functions are supported as a
// tool. Use this to provide a list of functions the model may generate JSON inputs
// for. A max of 128 functions are supported.
Tools []openai.ChatCompletionToolUnionParam `json:"tools,omitzero"`
// This tool searches the web for relevant results to use in a response. Learn more
// about the
// [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
WebSearchOptions openai.ChatCompletionNewParamsWebSearchOptions `json:"web_search_options,omitzero"`
Stream bool `json:"stream,omitzero"` // Whether to stream the response or not
ModelRouterConfig *ModelRouterConfig `json:"model_router,omitzero"`
Fallback *FallbackConfig `json:"fallback,omitzero"` // Fallback configuration with enabled toggle
ProviderConfigs map[string]*ProviderConfig `json:"provider_configs,omitzero"` // Custom provider configurations by provider name
}
ChatCompletionRequest represents a request for a chat completion, including all OpenAI parameters and extensions.
type CircuitBreakerConfig ¶
type CircuitBreakerConfig struct {
FailureThreshold int `json:"failure_threshold,omitzero" yaml:"failure_threshold,omitempty"` // Number of failures before opening circuit
SuccessThreshold int `json:"success_threshold,omitzero" yaml:"success_threshold,omitempty"` // Number of successes to close circuit
TimeoutMs int `json:"timeout_ms,omitzero" yaml:"timeout_ms,omitempty"` // Timeout for circuit breaker in milliseconds
ResetAfterMs int `json:"reset_after_ms,omitzero" yaml:"reset_after_ms,omitempty"` // Time to wait before trying to close circuit
}
CircuitBreakerConfig holds circuit breaker configuration
type ClerkAuthConfig ¶
type CreditPackage ¶
type CreditTransaction ¶
type CreditTransaction struct {
ID uint `gorm:"primaryKey;autoIncrement"`
OrganizationID string `gorm:"index"`
UserID string `gorm:"index"`
Type CreditTransactionType `gorm:"index"`
Amount float64
BalanceAfter float64
Description string
Metadata string
StripePaymentIntentID string `gorm:"index"`
StripeSessionID string
APIKeyID uint `gorm:"index"`
APIUsageID uint `gorm:"index"`
CreatedAt time.Time `gorm:"autoCreateTime;index"`
}
type CreditTransactionType ¶
type CreditTransactionType string
const ( CreditTransactionPurchase CreditTransactionType = "purchase" CreditTransactionUsage CreditTransactionType = "usage" CreditTransactionRefund CreditTransactionType = "refund" CreditTransactionPromotional CreditTransactionType = "promotional" )
type DatabaseAuthConfig ¶
type DatabaseAuthConfig struct {
DatabaseURL string `json:"database_url,omitempty" yaml:"database_url,omitempty"`
}
type DatabaseConfig ¶
type DatabaseConfig struct {
Type DatabaseType `yaml:"type" json:"type"`
DSN string `yaml:"dsn,omitempty" json:"dsn,omitzero"`
Host string `yaml:"host,omitempty" json:"host,omitzero"`
Port int `yaml:"port,omitempty" json:"port,omitzero"`
Username string `yaml:"username,omitempty" json:"username,omitzero"`
Password string `yaml:"password,omitempty" json:"password,omitzero"`
Database string `yaml:"database" json:"database"`
SSLMode string `yaml:"ssl_mode,omitempty" json:"ssl_mode,omitzero"`
FilePath string `yaml:"file_path,omitempty" json:"file_path,omitzero"`
MaxOpenConns int `yaml:"max_open_conns,omitempty" json:"max_open_conns,omitzero"`
MaxIdleConns int `yaml:"max_idle_conns,omitempty" json:"max_idle_conns,omitzero"`
ConnMaxLifetime int `yaml:"conn_max_lifetime,omitempty" json:"conn_max_lifetime,omitzero"`
}
type DatabaseType ¶
type DatabaseType string
const ( PostgreSQL DatabaseType = "postgresql" MySQL DatabaseType = "mysql" SQLite DatabaseType = "sqlite" ClickHouse DatabaseType = "clickhouse" )
type DeductCreditsParams ¶
type EndpointConfig ¶
type EndpointConfig struct {
Providers map[string]ProviderConfig `yaml:"providers"`
}
EndpointConfig holds endpoint-specific provider configurations
type EndpointsConfig ¶
type EndpointsConfig struct {
ChatCompletions EndpointConfig `yaml:"chat_completions"`
Messages EndpointConfig `yaml:"messages"`
SelectModel EndpointConfig `yaml:"select_model"`
Generate EndpointConfig `yaml:"generate"`
CountTokens EndpointConfig `yaml:"count_tokens"`
}
EndpointsConfig holds all endpoint configurations
type ExecutionFunc ¶
type ExecutionFunc func(c *fiber.Ctx, provider Alternative, requestID string) error
ExecutionFunc is the function signature for executing a completion with a specific provider
type FallbackConfig ¶
type FallbackConfig struct {
Mode FallbackMode `json:"mode,omitzero" yaml:"mode,omitempty"` // Fallback mode (sequential/race). Empty = disabled, non-empty = enabled
TimeoutMs int `json:"timeout_ms,omitzero" yaml:"timeout_ms,omitempty"` // Timeout in milliseconds
MaxRetries int `json:"max_retries,omitzero" yaml:"max_retries,omitempty"` // Maximum number of retries
CircuitBreaker *CircuitBreakerConfig `json:"circuit_breaker,omitzero" yaml:"circuit_breaker,omitempty"` // Circuit breaker configuration
}
FallbackConfig holds the fallback configuration Fallback is enabled when Mode is non-empty, disabled when Mode is empty
type FallbackMode ¶
type FallbackMode string
FallbackMode defines the strategy for handling provider failures
const ( FallbackModeSequential FallbackMode = "sequential" FallbackModeRace FallbackMode = "race" )
type FallbackResult ¶
type FallbackResult struct {
Success bool
Provider Alternative
Error error
Duration time.Duration
}
FallbackResult represents the result of a provider execution attempt
type GeminiGenerateContentResponse ¶
type GeminiGenerateContentResponse struct {
// Optional. Used to retain the full HTTP response.
SDKHTTPResponse *genai.HTTPResponse `json:"sdkHttpResponse,omitzero"`
// Response variations returned by the model.
Candidates []*genai.Candidate `json:"candidates,omitzero"`
// Timestamp when the request is made to the server.
CreateTime time.Time `json:"createTime,omitzero"`
// Output only. The model version used to generate the response.
ModelVersion string `json:"modelVersion,omitzero"`
// Output only. Content filter results for a prompt sent in the request. Note: Sent
// only in the first stream chunk. Only happens when no candidates were generated due
// to content violations.
PromptFeedback *genai.GenerateContentResponsePromptFeedback `json:"promptFeedback,omitzero"`
// Output only. response_id is used to identify each response. It is the encoding of
// the event_id.
ResponseID string `json:"responseId,omitzero"`
// Usage metadata about the response(s).
UsageMetadata *AdaptiveGeminiUsage `json:"usageMetadata,omitzero"`
Provider string `json:"provider,omitzero"`
}
type GeminiGenerateRequest ¶
type GeminiGenerateRequest struct {
// Core Gemini API fields - use SDK types directly
Model string `json:"model,omitzero"`
Contents []*genai.Content `json:"contents,omitzero"`
Tools []*genai.Tool `json:"tools,omitzero"`
ToolConfig *genai.ToolConfig `json:"tool_config,omitzero"`
SafetySettings []*genai.SafetySetting `json:"safety_settings,omitzero"`
SystemInstruction *genai.Content `json:"system_instruction,omitzero"`
GenerationConfig *genai.GenerateContentConfig `json:"generation_config,omitzero"`
// Custom fields for our internal processing
ModelRouterConfig *ModelRouterConfig `json:"model_router,omitzero"`
Fallback *FallbackConfig `json:"fallback,omitzero"`
ProviderConfigs map[string]*ProviderConfig `json:"provider_configs,omitzero"`
}
GeminiGenerateRequest represents a request for Gemini's GenerateContent API Uses the actual genai SDK types with our custom extensions
type ModelCapability ¶
type ModelCapability struct {
Description string `json:"description,omitzero"`
Provider string `json:"provider,omitzero"`
ModelName string `json:"model_name,omitzero"`
CostPer1MInputTokens float64 `json:"cost_per_1m_input_tokens,omitzero"`
CostPer1MOutputTokens float64 `json:"cost_per_1m_output_tokens,omitzero"`
MaxContextTokens int `json:"max_context_tokens,omitzero"`
MaxOutputTokens int `json:"max_output_tokens,omitzero"`
SupportsToolCalling bool `json:"supports_tool_calling,omitzero"`
LanguagesSupported []string `json:"languages_supported,omitzero"`
ModelSizeParams string `json:"model_size_params,omitzero"`
LatencyTier string `json:"latency_tier,omitzero"`
TaskType string `json:"task_type,omitzero"`
Complexity string `json:"complexity,omitzero"`
}
ModelCapability represents a model with its capabilities and constraints
type ModelRouterClientConfig ¶
type ModelRouterClientConfig struct {
AdaptiveRouterURL string `json:"adaptive_router_url,omitzero" yaml:"adaptive_router_url"`
JWTSecret string `json:"jwt_secret,omitzero" yaml:"jwt_secret"`
TimeoutMs int `json:"timeout_ms,omitzero" yaml:"timeout_ms"`
CircuitBreaker *CircuitBreakerConfig `json:"circuit_breaker,omitzero" yaml:"circuit_breaker,omitempty"`
}
ModelRouterClientConfig holds client configuration for model router
type ModelRouterConfig ¶
type ModelRouterConfig struct {
Cache CacheConfig `json:"cache" yaml:"cache"`
Client ModelRouterClientConfig `json:"client" yaml:"client"`
CostBias float32 `json:"cost_bias,omitzero" yaml:"cost_bias"`
Models []ModelCapability `json:"models,omitzero"`
}
ModelRouterConfig holds configuration for the model router
type ModelSelectionRequest ¶
type ModelSelectionRequest struct {
// The user prompt to analyze
Prompt string `json:"prompt"`
// Tool-related fields for function calling detection
ToolCall any `json:"tool_call,omitzero"` // Current tool call being made
Tools any `json:"tools,omitzero"` // Available tool definitions
// Our custom parameters for model selection (flattened for Python service)
UserID string `json:"user_id,omitzero"`
Models []ModelCapability `json:"models,omitzero"`
CostBias *float32 `json:"cost_bias,omitzero"`
}
ModelSelectionRequest represents a request for model selection. This matches the Python AI service expected format.
type ModelSelectionResponse ¶
type ModelSelectionResponse struct {
Provider string `json:"provider"`
Model string `json:"model"`
Alternatives []Alternative `json:"alternatives,omitzero"`
}
ModelSelectionResponse represents the simplified response from model selection.
func (*ModelSelectionResponse) IsValid ¶
func (m *ModelSelectionResponse) IsValid() bool
IsValid validates that the ModelSelectionResponse has required fields
type Organization ¶
type Organization struct {
ID string `gorm:"primaryKey;type:varchar(255)" json:"id"`
Name string `gorm:"not null;type:varchar(255)" json:"name"`
OwnerID string `gorm:"not null;index;type:varchar(255)" json:"owner_id"`
CreatedAt time.Time `gorm:"not null;autoCreateTime" json:"created_at"`
UpdatedAt time.Time `gorm:"not null;autoUpdateTime" json:"updated_at"`
}
func (Organization) TableName ¶
func (Organization) TableName() string
type OrganizationCredit ¶
type OrganizationCredit struct {
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
OrganizationID string `gorm:"uniqueIndex;not null" json:"organization_id"`
Balance float64 `gorm:"not null;default:0" json:"balance"`
TotalPurchased float64 `gorm:"not null;default:0" json:"total_purchased"`
TotalUsed float64 `gorm:"not null;default:0" json:"total_used"`
CreatedAt time.Time `gorm:"not null;autoCreateTime" json:"created_at"`
UpdatedAt time.Time `gorm:"not null;autoUpdateTime" json:"updated_at"`
}
type OrganizationMember ¶
type OrganizationMember struct {
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
UserID string `gorm:"not null;index;type:varchar(255)" json:"user_id"`
OrganizationID string `gorm:"not null;index;type:varchar(255)" json:"organization_id"`
Role string `gorm:"not null;type:varchar(50)" json:"role"`
CreatedAt time.Time `gorm:"not null;autoCreateTime" json:"created_at"`
UpdatedAt time.Time `gorm:"not null;autoUpdateTime" json:"updated_at"`
}
func (OrganizationMember) TableName ¶
func (OrganizationMember) TableName() string
type OrganizationUpdateRequest ¶
type OrganizationUpdateRequest struct {
Name string `json:"name,omitempty" validate:"omitempty,min=1,max=255"`
}
type PeriodStats ¶
type Project ¶
type Project struct {
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
Name string `gorm:"not null;type:varchar(255)" json:"name"`
Description string `gorm:"type:text" json:"description"`
Status ProjectStatus `gorm:"not null;default:'active';type:varchar(50)" json:"status"`
Progress int `gorm:"not null;default:0" json:"progress"`
OrganizationID string `gorm:"not null;index;type:varchar(255)" json:"organization_id"`
CreatedAt time.Time `gorm:"not null;autoCreateTime" json:"created_at"`
UpdatedAt time.Time `gorm:"not null;autoUpdateTime" json:"updated_at"`
Members []ProjectMember `gorm:"foreignKey:ProjectID" json:"members"`
}
func (*Project) ToResponse ¶
func (p *Project) ToResponse() *ProjectResponse
type ProjectCreateRequest ¶
type ProjectCreateRequest struct {
Name string `json:"name" validate:"required,min=1,max=255"`
Description string `json:"description,omitempty"`
OrganizationID string `json:"organization_id" validate:"required"`
Status ProjectStatus `json:"status,omitempty"`
}
type ProjectMember ¶
type ProjectMember struct {
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
UserID string `gorm:"not null;index;type:varchar(255);uniqueIndex:idx_user_project" json:"user_id"`
ProjectID uint `gorm:"not null;index;uniqueIndex:idx_user_project" json:"project_id"`
Role ProjectMemberRole `gorm:"not null;type:varchar(50)" json:"role"`
CreatedAt time.Time `gorm:"not null;autoCreateTime" json:"created_at"`
UpdatedAt time.Time `gorm:"not null;autoUpdateTime" json:"updated_at"`
Project *Project `gorm:"foreignKey:ProjectID" json:"-"`
}
func (ProjectMember) TableName ¶
func (ProjectMember) TableName() string
type ProjectMemberRole ¶
type ProjectMemberRole string
const ( ProjectMemberRoleOwner ProjectMemberRole = "owner" ProjectMemberRoleAdmin ProjectMemberRole = "admin" ProjectMemberRoleMember ProjectMemberRole = "member" )
type ProjectResponse ¶
type ProjectResponse struct {
ID uint `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Status ProjectStatus `json:"status"`
Progress int `json:"progress"`
OrganizationID string `json:"organization_id"`
Members []ProjectMember `json:"members"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
type ProjectStatus ¶
type ProjectStatus string
const ( ProjectStatusActive ProjectStatus = "active" ProjectStatusInactive ProjectStatus = "inactive" ProjectStatusPaused ProjectStatus = "paused" )
type ProjectUpdateRequest ¶
type ProjectUpdateRequest struct {
Name string `json:"name,omitempty" validate:"omitempty,min=1,max=255"`
Description *string `json:"description,omitempty"`
Status ProjectStatus `json:"status,omitempty" validate:"omitempty,oneof=active inactive paused"`
Progress *int `json:"progress,omitempty" validate:"omitempty,min=0,max=100"`
}
type ProviderConfig ¶
type ProviderConfig struct {
APIKey string `yaml:"api_key" json:"api_key,omitzero"`
BaseURL string `yaml:"base_url" json:"base_url,omitzero"` // Optional custom base URL
AuthorizationHeader string `yaml:"authorization_header" json:"authorization_header,omitzero"` // Optional custom Authorization header value
}
ProviderConfig holds configuration for LLM providers (unified for both YAML config and request overrides)
type ProviderConfigCreateRequest ¶
type ProviderConfigCreateRequest struct {
ProviderName string `json:"provider_name" validate:"required,min=1,max=100"`
APIKey string `json:"api_key" validate:"required"`
BaseURL string `json:"base_url,omitempty"`
AuthorizationHeader string `json:"authorization_header,omitempty"`
}
ProviderConfigCreateRequest is the request body for creating a provider config
type ProviderConfigUpdateRequest ¶
type ProviderConfigUpdateRequest struct {
APIKey *string `json:"api_key,omitempty"`
BaseURL *string `json:"base_url,omitempty"`
AuthorizationHeader *string `json:"authorization_header,omitempty"`
Enabled *bool `json:"enabled,omitempty"`
}
ProviderConfigUpdateRequest is the request body for updating a provider config
type ProviderConfigWithSource ¶
type ProviderConfigWithSource struct {
ID uint `json:"id"`
ProviderName string `json:"provider_name"`
BaseURL string `json:"base_url"`
HasAPIKey bool `json:"has_api_key"`
HasAuthHeader bool `json:"has_authorization_header"`
Enabled bool `json:"enabled"`
Source string `json:"source"`
CreatedAt string `json:"created_at"`
UpdatedAt string `json:"updated_at"`
CreatedBy string `json:"created_by"`
UpdatedBy string `json:"updated_by"`
}
ProviderConfigWithSource represents a provider config with its source information
type ProviderConfiguration ¶
type ProviderConfiguration struct {
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
ProjectID *uint `gorm:"index" json:"project_id"` // Nullable for org-level configs
OrganizationID string `gorm:"not null;index;type:varchar(255)" json:"organization_id"`
ProviderName string `gorm:"not null;type:varchar(100)" json:"provider_name"`
APIKey string `gorm:"not null;type:text" json:"api_key"` // Encrypted in production
BaseURL string `gorm:"type:varchar(500)" json:"base_url"`
AuthorizationHeader string `gorm:"type:text" json:"authorization_header"`
Enabled bool `gorm:"not null;default:true" json:"enabled"`
CreatedAt time.Time `gorm:"not null;autoCreateTime" json:"created_at"`
UpdatedAt time.Time `gorm:"not null;autoUpdateTime" json:"updated_at"`
CreatedBy string `gorm:"not null;type:varchar(255)" json:"created_by"`
UpdatedBy string `gorm:"not null;type:varchar(255)" json:"updated_by"`
Project *Project `gorm:"foreignKey:ProjectID" json:"-"`
}
ProviderConfiguration represents a provider config stored in the database Note: The unique constraints are manually managed via migration SQL because GORM doesn't support partial unique indexes with WHERE clauses. We need: - idx_project_provider: UNIQUE(project_id, organization_id, provider_name) WHERE project_id IS NOT NULL - idx_org_provider: UNIQUE(organization_id, provider_name) WHERE project_id IS NULL
func (ProviderConfiguration) TableName ¶
func (ProviderConfiguration) TableName() string
func (*ProviderConfiguration) ToProviderConfig ¶
func (pc *ProviderConfiguration) ToProviderConfig() ProviderConfig
ToProviderConfig converts database model to runtime config
type ProviderConfigurationHistory ¶
type ProviderConfigurationHistory struct {
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
ConfigID uint `gorm:"not null;index" json:"config_id"`
Action string `gorm:"not null;type:varchar(50)" json:"action"` // "created", "updated", "deleted"
Changes string `gorm:"type:text" json:"changes"` // JSON of what changed
ChangedBy string `gorm:"not null;type:varchar(255)" json:"changed_by"`
ChangedAt time.Time `gorm:"not null;autoCreateTime" json:"changed_at"`
}
ProviderConfigurationHistory tracks changes to provider configurations for audit purposes
func (ProviderConfigurationHistory) TableName ¶
func (ProviderConfigurationHistory) TableName() string
type RecordUsageParams ¶
type RecordUsageParams struct {
APIKeyID uint
OrganizationID string
UserID string
Endpoint string
Provider string
Model string
TokensInput int
TokensOutput int
Cost float64
Currency string
StatusCode int
LatencyMs int
Metadata string
RequestID string
UserAgent string
IPAddress string
ErrorMessage string
IsCustomProviderConfig bool
}
type RedisConfig ¶
type RedisConfig struct {
URL string `json:"url,omitzero" yaml:"url"`
}
RedisConfig holds configuration for Redis
type SelectModelRequest ¶
type SelectModelRequest struct {
// Available models with their capabilities and constraints
Models []ModelCapability `json:"models"`
// The prompt text to analyze for optimal model selection
Prompt string `json:"prompt"`
// Optional user identifier for tracking and personalization
User *string `json:"user,omitzero"`
// Cost bias for model selection (0.0 = cheapest, 1.0 = best performance)
CostBias *float32 `json:"cost_bias,omitzero"`
// Model router cache configuration
ModelRouterCache *CacheConfig `json:"model_router_cache,omitzero"`
// Tool-related fields for function calling detection
ToolCall any `json:"tool_call,omitzero"` // Current tool call being made
Tools any `json:"tools,omitzero"` // Available tool definitions
}
SelectModelRequest represents a provider-agnostic request for model selection
type SelectModelResponse ¶
type SelectModelResponse struct {
// Selected provider
Provider string `json:"provider"`
// Selected model
Model string `json:"model"`
// Alternative provider/model combinations
Alternatives []Alternative `json:"alternatives,omitzero"`
CacheTier string `json:"cache_tier,omitzero"`
}
SelectModelResponse represents the response for model selection
type ServerConfig ¶
type StripeConfig ¶
type TaskType ¶
type TaskType string
TaskType represents different types of tasks.
const ( TaskOpenQA TaskType = "Open QA" TaskClosedQA TaskType = "Closed QA" TaskSummarization TaskType = "Summarization" TaskTextGeneration TaskType = "Text Generation" TaskCodeGeneration TaskType = "Code Generation" TaskChatbot TaskType = "Chatbot" TaskClassification TaskType = "Classification" TaskRewrite TaskType = "Rewrite" TaskBrainstorming TaskType = "Brainstorming" TaskExtraction TaskType = "Extraction" TaskOther TaskType = "Other" )
type UpdateProjectMemberRoleRequest ¶
type UpdateProjectMemberRoleRequest struct {
Role string `json:"role" validate:"required,oneof=admin member"`
}
type UsageByPeriod ¶
type UsageByPeriod struct {
Period string `json:"period"`
Stats UsageStats `json:"stats"`
}
type UsageStats ¶
type User ¶
type User struct {
ID string `gorm:"primaryKey;type:varchar(255)" json:"id"`
Email string `gorm:"uniqueIndex;not null;type:varchar(255)" json:"email"`
Name string `gorm:"not null;type:varchar(255)" json:"name"`
CreatedAt time.Time `gorm:"not null;autoCreateTime" json:"created_at"`
UpdatedAt time.Time `gorm:"not null;autoUpdateTime" json:"updated_at"`
}