Documentation
¶
Index ¶
- Constants
- func BuildErrorEvent(message, errorType, code string) map[string]interface{}
- func CheckSSESupport(c *gin.Context) bool
- func GenerateCurlCommand(apiBase, apiStyle, token, model string) string
- func GetInputValue(input responses.ResponseNewParamsInputUnion) any
- func GetShutdownChannel() <-chan struct{}
- func MarshalAndSendErrorEvent(c *gin.Context, message, errorType, code string)
- func NewGinHandlerWrapper(h gin.HandlerFunc) swagger.Handler
- func ParseAndSendStreamError(c *gin.Context, err error)
- func SendAdapterDisabledError(c *gin.Context, providerName string)
- func SendFinishEvent(c *gin.Context)
- func SendForwardingError(c *gin.Context, err error)
- func SendInternalError(c *gin.Context, errMsg string)
- func SendInvalidRequestBodyError(c *gin.Context, err error)
- func SendSSEEvent(c *gin.Context, eventType string, data interface{}) error
- func SendSSErrorEvent(c *gin.Context, message, errorType string)
- func SendSSErrorEventJSON(c *gin.Context, errorJSON []byte)
- func SendStreamingError(c *gin.Context, err error)
- func SetGlobalServer(server *Server)
- func SetupSSEHeaders(c *gin.Context)
- func StreamRecoveryHandler(c *gin.Context, stream StreamClosable)
- type AdaptiveProbe
- func (ap *AdaptiveProbe) GetModelCapability(providerUUID, modelID string) (*ModelEndpointCapability, error)
- func (ap *AdaptiveProbe) GetPreferredEndpoint(provider *typ.Provider, modelID string) string
- func (ap *AdaptiveProbe) InvalidateProviderCache(providerUUID string)
- func (ap *AdaptiveProbe) ProbeModelEndpoints(ctx context.Context, req ModelProbeRequest) (*ProbeResult, error)
- func (ap *AdaptiveProbe) ProbeProviderModels(ctx context.Context, provider *typ.Provider, models []string) map[string]*ProbeResult
- type AddSkillLocationRequest
- type AddSkillLocationResponse
- type AggregatedStat
- type AllStatsResponse
- type AnthropicModel
- type AnthropicModelsResponse
- type ApplyConfigResponse
- type ApplyOpenCodeConfigResponse
- type CachedModelCapability
- type ClearStatsResponse
- type ConfigInfo
- type ConfigInfoResponse
- type CreateProviderRequest
- type CreateProviderResponse
- type CreateRuleRequest
- type CurrentServiceResponse
- type DeleteOldRecordsRequest
- type DeleteOldRecordsResponse
- type DeleteProviderResponse
- type DeleteRuleResponse
- type DiscoverIdesResponse
- type EndpointProbeStatus
- type EndpointStatus
- type ErrorDetail
- type ErrorResponse
- type FetchProviderModelsResponse
- type GenerateTokenRequest
- type GitHubRelease
- type HealthInfoResponse
- type HistoryResponse
- type ImportSkillLocationsRequest
- type ImportSkillLocationsResponse
- type LatestVersionInfo
- type LatestVersionResponse
- type LoadBalancer
- func (lb *LoadBalancer) ClearAllStats()
- func (lb *LoadBalancer) ClearServiceStats(provider, model string)
- func (lb *LoadBalancer) GetAllServiceStats() map[string]*loadbalance.ServiceStats
- func (lb *LoadBalancer) GetRuleSummary(rule *typ.Rule) map[string]interface{}
- func (lb *LoadBalancer) GetServiceStats(provider, model string) *loadbalance.ServiceStats
- func (lb *LoadBalancer) RecordUsage(provider, model string, inputTokens, outputTokens int)
- func (lb *LoadBalancer) RegisterTactic(tacticType loadbalance.TacticType, tactic typ.LoadBalancingTactic)
- func (lb *LoadBalancer) SelectService(rule *typ.Rule) (*loadbalance.Service, error)
- func (lb *LoadBalancer) Stop()
- func (lb *LoadBalancer) UpdateServiceIndex(rule *typ.Rule, selectedService *loadbalance.Service)
- func (lb *LoadBalancer) ValidateRule(rule *typ.Rule) error
- type LoadBalancerAPI
- func (api *LoadBalancerAPI) ClearAllStats(c *gin.Context)
- func (api *LoadBalancerAPI) ClearRuleStats(c *gin.Context)
- func (api *LoadBalancerAPI) ClearServiceStats(c *gin.Context)
- func (api *LoadBalancerAPI) GetAllStats(c *gin.Context)
- func (api *LoadBalancerAPI) GetCurrentService(c *gin.Context)
- func (api *LoadBalancerAPI) GetMetrics(c *gin.Context)
- func (api *LoadBalancerAPI) GetRule(c *gin.Context)
- func (api *LoadBalancerAPI) GetRuleStats(c *gin.Context)
- func (api *LoadBalancerAPI) GetRuleSummary(c *gin.Context)
- func (api *LoadBalancerAPI) GetServiceHealth(c *gin.Context)
- func (api *LoadBalancerAPI) GetServiceStats(c *gin.Context)
- func (api *LoadBalancerAPI) RegisterRoutes(loadBalancer *gin.RouterGroup)
- func (api *LoadBalancerAPI) UpdateRuleTactic(c *gin.Context)
- type LogEntry
- type LogsResponse
- type MetricsResponse
- type ModelEndpointCapability
- type ModelProbeData
- type ModelProbeRequest
- type ModelProbeResponse
- type NpmPackage
- type OAuthAuthorizeRequest
- type OAuthAuthorizeResponse
- type OAuthCallbackDataResponse
- type OAuthCancelRequest
- type OAuthDeviceCodeResponse
- type OAuthErrorResponse
- type OAuthMessageResponse
- type OAuthProviderDataResponse
- type OAuthProviderInfo
- type OAuthProvidersResponse
- type OAuthRefreshTokenRequest
- type OAuthRefreshTokenResponse
- type OAuthSessionStatusResponse
- type OAuthTokenResponse
- type OAuthTokensResponse
- type OAuthUpdateProviderRequest
- type OAuthUpdateProviderResponse
- type OpenAIChatCompletionResponse
- type OpenAIModel
- type OpenAIModelsResponse
- type OpenCodeConfigPreviewResponse
- type PassthroughHandler
- type ProbeCache
- func (pc *ProbeCache) CleanupExpired()
- func (pc *ProbeCache) Clear()
- func (pc *ProbeCache) Get(providerUUID, modelID string) *ModelEndpointCapability
- func (pc *ProbeCache) Invalidate(providerUUID, modelID string)
- func (pc *ProbeCache) InvalidateProvider(providerUUID string)
- func (pc *ProbeCache) Set(providerUUID, modelID string, capability *ModelEndpointCapability)
- func (pc *ProbeCache) SetFromProbeResult(result *ProbeResult)
- func (pc *ProbeCache) StartCleanupTask(interval time.Duration)
- type ProbeCacheRequest
- type ProbeProviderRequest
- type ProbeProviderResponse
- type ProbeProviderResponseData
- type ProbeRequest
- type ProbeRequestDetail
- type ProbeResponse
- type ProbeResponseData
- type ProbeResponseDetail
- type ProbeResult
- type ProbeUsage
- type ProviderModelInfo
- type ProviderModelsResponse
- type ProviderResponse
- type ProvidersResponse
- type RefreshSkillLocationResponse
- type RemoveSkillLocationResponse
- type RequestConfig
- type Response
- type ResponseCreateRequest
- type ResponseInputItemUnionParam
- type ResponseNewParams
- type ResponseNewParamsInputUnion
- type RuleResponse
- type RuleStatsResponse
- type RuleSummaryResponse
- type RulesResponse
- type SSEEventWriter
- type ScanIdesResponse
- type ScenarioFlagResponse
- type ScenarioFlagUpdateRequest
- type ScenarioRecorder
- func (sr *ScenarioRecorder) EnableStreaming()
- func (sr *ScenarioRecorder) GetStreamChunks() []map[string]interface{}
- func (sr *ScenarioRecorder) RecordError(err error)
- func (sr *ScenarioRecorder) RecordResponse(provider *typ.Provider, model string)
- func (sr *ScenarioRecorder) RecordStreamChunk(eventType string, chunk interface{})
- func (sr *ScenarioRecorder) SetAssembledResponse(response any)
- type ScenarioResponse
- type ScenarioUpdateRequest
- type ScenarioUpdateResponse
- type ScenariosResponse
- type Server
- func (s *Server) AddSkillLocation(c *gin.Context)
- func (s *Server) AnthropicCountTokens(c *gin.Context)
- func (s *Server) AnthropicListModels(c *gin.Context)
- func (s *Server) AnthropicMessages(c *gin.Context)
- func (s *Server) ApplyClaudeConfig(c *gin.Context)
- func (s *Server) ApplyOpenCodeConfigFromState(c *gin.Context)
- func (s *Server) ApplyRecording(scenario typ.RuleScenario) bool
- func (s *Server) ApplySmartCompact(scenario typ.RuleScenario) bool
- func (s *Server) AuthorizeOAuth(c *gin.Context)
- func (s *Server) CancelOAuthSession(c *gin.Context)
- func (s *Server) ClearLogs(c *gin.Context)
- func (s *Server) CreateProvider(c *gin.Context)
- func (s *Server) CreateRule(c *gin.Context)
- func (s *Server) DeleteOAuthProvider(c *gin.Context)
- func (s *Server) DeleteProvider(c *gin.Context)
- func (s *Server) DeleteRule(c *gin.Context)
- func (s *Server) DetermineProviderAndModel(modelName string) (*typ.Provider, *loadbalance.Service, *typ.Rule, error)
- func (s *Server) DetermineProviderAndModelWithScenario(scenario typ.RuleScenario, modelName string, req interface{}) (*typ.Provider, *loadbalance.Service, *typ.Rule, error)
- func (s *Server) DiscoverIdes(c *gin.Context)
- func (s *Server) ExtractRequestContext(req interface{}) (*smartrouting.RequestContext, error)
- func (s *Server) GenerateToken(c *gin.Context)
- func (s *Server) GetHealthInfo(c *gin.Context)
- func (s *Server) GetHistory(c *gin.Context)
- func (s *Server) GetInfoConfig(c *gin.Context)
- func (s *Server) GetInfoVersion(c *gin.Context)
- func (s *Server) GetLatestVersion(c *gin.Context)
- func (s *Server) GetLoadBalancer() *LoadBalancer
- func (s *Server) GetLogStats(c *gin.Context)
- func (s *Server) GetLogs(c *gin.Context)
- func (s *Server) GetOAuthProvider(c *gin.Context)
- func (s *Server) GetOAuthSessionStatus(c *gin.Context)
- func (s *Server) GetOAuthToken(c *gin.Context)
- func (s *Server) GetOpenCodeConfigPreview(c *gin.Context)
- func (s *Server) GetOrCreateScenarioSink(scenario typ.RuleScenario) *obs.Sink
- func (s *Server) GetPreferredEndpointForModel(provider *typ.Provider, modelID string) string
- func (s *Server) GetProvider(c *gin.Context)
- func (s *Server) GetProviderModelsByUUID(c *gin.Context)
- func (s *Server) GetProviderTemplate(c *gin.Context)
- func (s *Server) GetProviderTemplateVersion(c *gin.Context)
- func (s *Server) GetProviderTemplates(c *gin.Context)
- func (s *Server) GetProviders(c *gin.Context)
- func (s *Server) GetRouter() *gin.Engine
- func (s *Server) GetRule(c *gin.Context)
- func (s *Server) GetRules(c *gin.Context)
- func (s *Server) GetScenarioConfig(c *gin.Context)
- func (s *Server) GetScenarioFlag(c *gin.Context)
- func (s *Server) GetScenarios(c *gin.Context)
- func (s *Server) GetSkillContent(c *gin.Context)
- func (s *Server) GetSkillLocation(c *gin.Context)
- func (s *Server) GetSkillLocations(c *gin.Context)
- func (s *Server) GetStatus(c *gin.Context)
- func (s *Server) GetToken(c *gin.Context)
- func (s *Server) HandleProbeModel(c *gin.Context)
- func (s *Server) HandleProbeModelEndpoints(c *gin.Context)
- func (s *Server) HandleProbeProvider(c *gin.Context)
- func (s *Server) ImportSkillLocations(c *gin.Context)
- func (s *Server) InvalidateProviderCache(providerUUID string)
- func (s *Server) IsExperimentalFeatureEnabled(scenario typ.RuleScenario, feature string) bool
- func (s *Server) IsFeatureEnabled(feature string) bool
- func (s *Server) ListModelsByScenario(c *gin.Context)
- func (s *Server) ListOAuthProviders(c *gin.Context)
- func (s *Server) ListOAuthTokens(c *gin.Context)
- func (s *Server) NewUsageTracker() *UsageTracker
- func (s *Server) OAuthCallback(c *gin.Context)
- func (s *Server) OpenAIChatCompletions(c *gin.Context)
- func (s *Server) OpenAIListModels(c *gin.Context)
- func (s *Server) PassthroughAnthropic(c *gin.Context)
- func (s *Server) PassthroughOpenAI(c *gin.Context)
- func (s *Server) RecordScenarioRequest(c *gin.Context, scenario string) *ScenarioRecorder
- func (s *Server) RefreshOAuthToken(c *gin.Context)
- func (s *Server) RefreshProviderTemplates(c *gin.Context)
- func (s *Server) RefreshSkillLocation(c *gin.Context)
- func (s *Server) RegisterConfigApplyRoutes(manager *swagger.RouteManager)
- func (s *Server) RegisterUsageRoutes(manager *swagger.RouteManager)
- func (s *Server) RemoveSkillLocation(c *gin.Context)
- func (s *Server) ResponsesCreate(c *gin.Context)
- func (s *Server) ResponsesGet(c *gin.Context)
- func (s *Server) RestartServer(c *gin.Context)
- func (s *Server) RevokeOAuthToken(c *gin.Context)
- func (s *Server) ScanIdes(c *gin.Context)
- func (s *Server) SelectServiceFromSmartRouting(matchedServices []*loadbalance.Service, rule *typ.Rule) (*loadbalance.Service, error)
- func (s *Server) SetScenarioConfig(c *gin.Context)
- func (s *Server) SetScenarioFlag(c *gin.Context)
- func (s *Server) SetupAnthropicEndpoints(group *gin.RouterGroup)
- func (s *Server) SetupMixinEndpoints(group *gin.RouterGroup)
- func (s *Server) SetupOpenAIEndpoints(group *gin.RouterGroup)
- func (s *Server) SetupPassthroughAnthropicEndpoints(group *gin.RouterGroup)
- func (s *Server) SetupPassthroughOpenAIEndpoints(group *gin.RouterGroup)
- func (s *Server) Start(port int) error
- func (s *Server) StartServer(c *gin.Context)
- func (s *Server) Stop(ctx context.Context) error
- func (s *Server) StopServer(c *gin.Context)
- func (s *Server) ToggleProvider(c *gin.Context)
- func (s *Server) UpdateOAuthProvider(c *gin.Context)
- func (s *Server) UpdateProvider(c *gin.Context)
- func (s *Server) UpdateProviderModelsByUUID(c *gin.Context)
- func (s *Server) UpdateRule(c *gin.Context)
- func (s *Server) UseAIEndpoints()
- func (s *Server) UseIndexHTML(c *gin.Context)
- func (s *Server) UseLoadBalanceEndpoints()
- func (s *Server) UseUIEndpoints()
- type ServerActionResponse
- type ServerOption
- func WithAdaptor(enabled bool) ServerOption
- func WithDebug(enabled bool) ServerOption
- func WithDefault() ServerOption
- func WithExperimentalFeatures(features map[string]bool) ServerOption
- func WithHTTPSCertDir(certDir string) ServerOption
- func WithHTTPSEnabled(enabled bool) ServerOption
- func WithHTTPSRegenerate(regenerate bool) ServerOption
- func WithHost(host string) ServerOption
- func WithOpenBrowser(enabled bool) ServerOption
- func WithRecordDir(dir string) ServerOption
- func WithRecordMode(mode obs.RecordMode) ServerOption
- func WithUI(enabled bool) ServerOption
- func WithVersion(version string) ServerOption
- type ServiceHealthResponse
- type ServiceMetric
- type ServiceStatsResponse
- type SingleTemplateResponse
- type SkillContentResponse
- type SkillLocationResponse
- type SkillLocationsResponse
- type StatusResponse
- type StreamClosable
- type TemplateResponse
- type TimeSeriesData
- type TimeSeriesMeta
- type TimeSeriesQuery
- type TimeSeriesResponse
- type ToggleProviderResponse
- type TokenInfo
- type TokenResponse
- type UpdateProviderRequest
- type UpdateProviderResponse
- type UpdateRuleRequest
- type UpdateRuleResponse
- type UpdateRuleTacticRequest
- type UpdateRuleTacticResponse
- type UsageAPI
- type UsageRecordResponse
- type UsageRecordsMeta
- type UsageRecordsQuery
- type UsageRecordsResponse
- type UsageStatsMeta
- type UsageStatsQuery
- type UsageStatsResponse
- type UsageTracker
- type VersionChecker
- type VersionInfo
- type VersionInfoResponse
Constants ¶
const ( // DefaultProbeTimeout is the default timeout for each endpoint probe DefaultProbeTimeout = 10 * time.Second // DefaultCacheTTL is the default time-to-live for cached probe results DefaultCacheTTL = 24 * time.Hour )
const ( ExperimentalFeatureSmartCompact = "smart_compact" ExperimentalFeatureRecording = "recording" )
Experimental feature flag names
Variables ¶
This section is empty.
Functions ¶
func BuildErrorEvent ¶ added in v0.260124.900
BuildErrorEvent builds a standard error event map
func CheckSSESupport ¶ added in v0.260124.900
CheckSSESupport verifies if the connection supports SSE
func GenerateCurlCommand ¶
GenerateCurlCommand generates a curl command for testing the provider
func GetInputValue ¶ added in v0.260124.900
func GetInputValue(input responses.ResponseNewParamsInputUnion) any
GetInputValue extracts the raw input value from ResponseNewParamsInputUnion. Returns the underlying string, array, or nil.
func GetShutdownChannel ¶
func GetShutdownChannel() <-chan struct{}
GetShutdownChannel returns the shutdown channel for the main process to listen on
func MarshalAndSendErrorEvent ¶ added in v0.260124.900
MarshalAndSendErrorEvent marshals and sends an error event
func NewGinHandlerWrapper ¶
func NewGinHandlerWrapper(h gin.HandlerFunc) swagger.Handler
NewGinHandlerWrapper converts gin.HandlerFunc to swagger.Handler
func ParseAndSendStreamError ¶ added in v0.260124.900
ParseAndSendStreamError handles stream errors and sends appropriate error events
func SendAdapterDisabledError ¶ added in v0.260124.900
SendAdapterDisabledError sends an error response when adapter is disabled
func SendFinishEvent ¶ added in v0.260124.900
SendFinishEvent sends a message_stop event to indicate completion
func SendForwardingError ¶ added in v0.260124.900
SendForwardingError sends an error response for request forwarding failures
func SendInternalError ¶ added in v0.260124.900
SendInternalError sends an error response for internal errors
func SendInvalidRequestBodyError ¶ added in v0.260124.900
SendInvalidRequestBodyError sends an error response for invalid request body
func SendSSEEvent ¶ added in v0.260124.900
SendSSEEvent sends a generic SSE event with JSON data
func SendSSErrorEvent ¶ added in v0.260124.900
SendSSErrorEvent sends an error event through SSE
func SendSSErrorEventJSON ¶ added in v0.260124.900
SendSSErrorEventJSON sends a JSON error event through SSE
func SendStreamingError ¶ added in v0.260124.900
SendStreamingError sends an error response for streaming request failures
func SetGlobalServer ¶
func SetGlobalServer(server *Server)
SetGlobalServer sets the global server instance for web UI control
func SetupSSEHeaders ¶ added in v0.260124.900
SetupSSEHeaders sets up the required headers for Server-Sent Events
func StreamRecoveryHandler ¶ added in v0.260124.900
func StreamRecoveryHandler(c *gin.Context, stream StreamClosable)
StreamRecoveryHandler provides panic recovery for streaming handlers
Types ¶
type AdaptiveProbe ¶
type AdaptiveProbe struct {
// contains filtered or unexported fields
}
AdaptiveProbe handles concurrent endpoint probing for model capabilities
func NewAdaptiveProbe ¶
func NewAdaptiveProbe(s *Server) *AdaptiveProbe
NewAdaptiveProbe creates a new adaptive probe instance
func (*AdaptiveProbe) GetModelCapability ¶
func (ap *AdaptiveProbe) GetModelCapability(providerUUID, modelID string) (*ModelEndpointCapability, error)
GetModelCapability retrieves cached capability for a model, or triggers a probe if not cached
func (*AdaptiveProbe) GetPreferredEndpoint ¶
func (ap *AdaptiveProbe) GetPreferredEndpoint(provider *typ.Provider, modelID string) string
GetPreferredEndpoint returns the preferred endpoint for a model
func (*AdaptiveProbe) InvalidateProviderCache ¶
func (ap *AdaptiveProbe) InvalidateProviderCache(providerUUID string)
InvalidateProviderCache invalidates all cached capabilities for a provider
func (*AdaptiveProbe) ProbeModelEndpoints ¶
func (ap *AdaptiveProbe) ProbeModelEndpoints(ctx context.Context, req ModelProbeRequest) (*ProbeResult, error)
ProbeModelEndpoints probes both chat and responses endpoints concurrently for a model
func (*AdaptiveProbe) ProbeProviderModels ¶
func (ap *AdaptiveProbe) ProbeProviderModels(ctx context.Context, provider *typ.Provider, models []string) map[string]*ProbeResult
ProbeProviderModels probes all models for a provider concurrently
type AddSkillLocationRequest ¶ added in v0.260204.1200
type AddSkillLocationRequest struct {
Name string `json:"name" binding:"required" description:"Display name for the location" example:"Claude Code Skills"`
Path string `` /* 127-byte string literal not displayed */
IDESource typ.IDESource `json:"ide_source" binding:"required" description:"IDE/source type" example:"claude_code"`
}
AddSkillLocationRequest represents the request to add a skill location
type AddSkillLocationResponse ¶ added in v0.260204.1200
type AddSkillLocationResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message" example:"Skill location added successfully"`
Data *typ.SkillLocation `json:"data,omitempty"`
}
AddSkillLocationResponse represents the response for adding a skill location
type AggregatedStat ¶ added in v0.260124.900
type AggregatedStat struct {
Key string `json:"key" example:"gpt-4"`
ProviderUUID string `json:"provider_uuid,omitempty" example:"uuid-123"`
ProviderName string `json:"provider_name,omitempty" example:"openai"`
Model string `json:"model,omitempty" example:"gpt-4"`
Scenario string `json:"scenario,omitempty" example:"openai"`
RequestCount int64 `json:"request_count" example:"5420"`
TotalTokens int64 `json:"total_tokens" example:"2140000"`
InputTokens int64 `json:"total_input_tokens" example:"1250000"`
OutputTokens int64 `json:"total_output_tokens" example:"890000"`
AvgInputTokens float64 `json:"avg_input_tokens" example:"230.6"`
AvgOutputTokens float64 `json:"avg_output_tokens" example:"164.2"`
AvgLatencyMs float64 `json:"avg_latency_ms" example:"1250"`
ErrorCount int64 `json:"error_count" example:"12"`
ErrorRate float64 `json:"error_rate" example:"0.0022"`
StreamedCount int64 `json:"streamed_count" example:"4800"`
StreamedRate float64 `json:"streamed_rate" example:"0.885"`
}
AggregatedStat represents aggregated usage statistics
type AllStatsResponse ¶
type AllStatsResponse struct {
Stats map[string]interface{} `json:"stats"`
}
AllStatsResponse represents the response for all statistics
type AnthropicModel ¶
type AnthropicModel struct {
ID string `json:"id"`
CreatedAt string `json:"created_at"`
DisplayName string `json:"display_name"`
Type string `json:"type"`
}
Model types - based on Anthropic's official models API format
type AnthropicModelsResponse ¶
type AnthropicModelsResponse struct {
Data []AnthropicModel `json:"data"`
FirstID string `json:"first_id"`
HasMore bool `json:"has_more"`
LastID string `json:"last_id"`
}
type ApplyConfigResponse ¶ added in v0.260204.1200
type ApplyConfigResponse struct {
Success bool `json:"success"`
SettingsResult config.ApplyResult `json:"settingsResult"`
OnboardingResult config.ApplyResult `json:"onboardingResult"`
CreatedFiles []string `json:"createdFiles"`
UpdatedFiles []string `json:"updatedFiles"`
BackupPaths []string `json:"backupPaths"`
}
ApplyConfigResponse is the response for ApplyClaudeConfig
type ApplyOpenCodeConfigResponse ¶ added in v0.260204.1200
type ApplyOpenCodeConfigResponse struct {
config.ApplyResult
}
ApplyOpenCodeConfigResponse is the response for ApplyOpenCodeConfigFromState
type CachedModelCapability ¶
type CachedModelCapability struct {
Capability ModelEndpointCapability
ExpiresAt time.Time
}
CachedModelCapability represents a cached model endpoint capability with expiration
type ClearStatsResponse ¶
type ClearStatsResponse struct {
Message string `json:"message" example:"Statistics cleared for rule: gpt-4"`
}
ClearStatsResponse represents the response for clearing statistics
type ConfigInfo ¶
type ConfigInfo struct {
ConfigPath string `json:"config_path" example:"/Users/user/.tingly-box/config.json"`
ConfigDir string `json:"config_dir" example:"/Users/user/.tingly-box"`
}
ConfigInfo represents configuration information
type ConfigInfoResponse ¶
type ConfigInfoResponse struct {
Success bool `json:"success" example:"true"`
Data ConfigInfo `json:"data"`
}
ConfigInfoResponse represents the response for config info endpoint
type CreateProviderRequest ¶
type CreateProviderRequest struct {
Name string `json:"name" binding:"required" description:"Provider name" example:"openai"`
APIBase string `json:"api_base" binding:"required" description:"API base URL" example:"https://api.openai.com/v1"`
APIStyle string `json:"api_style" description:"API style" example:"openai"`
Token string `json:"token" description:"API token" example:"sk-..."`
NoKeyRequired bool `json:"no_key_required" description:"Whether provider requires no API key" example:"false"`
Enabled bool `json:"enabled" description:"Whether provider is enabled" example:"true"`
ProxyURL string `` /* 153-byte string literal not displayed */
}
CreateProviderRequest represents the request to add a new provider
type CreateProviderResponse ¶
type CreateProviderResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message" example:"Provider added successfully"`
Data interface{} `json:"data"`
}
CreateProviderResponse represents the response for adding a provider
type CreateRuleRequest ¶ added in v0.260124.900
type CurrentServiceResponse ¶
type CurrentServiceResponse struct {
Rule string `json:"rule" example:"gpt-4"`
Service interface{} `json:"service"`
ServiceID string `json:"service_id" example:"openai:gpt-4"`
Tactic string `json:"tactic" example:"round_robin"`
Stats map[string]interface{} `json:"stats,omitempty"`
}
CurrentServiceResponse represents the current service response
type DeleteOldRecordsRequest ¶ added in v0.260124.900
type DeleteOldRecordsRequest struct {
OlderThanDays int `json:"older_than_days" binding:"required,min=1" description:"Delete records older than this many days" example:"90"`
}
DeleteOldRecordsRequest represents the request to delete old usage records
type DeleteOldRecordsResponse ¶ added in v0.260124.900
type DeleteOldRecordsResponse struct {
Message string `json:"message" example:"Records deleted successfully"`
DeletedCount int64 `json:"deleted_count" example:"1500"`
CutoffDate string `json:"cutoff_date" example:"2024-10-13T00:00:00Z"`
}
DeleteOldRecordsResponse represents the response for deleting old records
type DeleteProviderResponse ¶
type DeleteProviderResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message" example:"Provider deleted successfully"`
}
DeleteProviderResponse represents the response for deleting a provider
type DeleteRuleResponse ¶ added in v0.260124.900
type DeleteRuleResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message" example:"Rule deleted successfully"`
}
DeleteRuleResponse represents the response for deleting a rule
type DiscoverIdesResponse ¶ added in v0.260204.1200
type DiscoverIdesResponse struct {
Success bool `json:"success" example:"true"`
Data *typ.DiscoveryResult `json:"data,omitempty"`
}
DiscoverIdesResponse represents the response for discovering IDEs
type EndpointProbeStatus ¶
type EndpointProbeStatus struct {
Available bool `json:"available" example:"true"`
LatencyMs int `json:"latency_ms" example:"234"`
ErrorMessage string `json:"error_message,omitempty" example:""`
LastChecked string `json:"last_checked" example:"2026-01-23T10:30:00Z"`
}
EndpointProbeStatus represents the status of an endpoint probe
type EndpointStatus ¶
type EndpointStatus struct {
Available bool
LatencyMs int
ErrorMessage string
LastChecked time.Time
}
EndpointStatus represents the status of a single endpoint
type ErrorDetail ¶
type ErrorDetail struct {
Message string `json:"message"`
Type string `json:"type"`
Code string `json:"code,omitempty"`
}
ErrorDetail represents error details
type ErrorResponse ¶
type ErrorResponse struct {
Error ErrorDetail `json:"error"`
}
ErrorResponse represents an error response
type FetchProviderModelsResponse ¶
type FetchProviderModelsResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message" example:"Successfully fetched 150 models for provider openai"`
Data interface{} `json:"data"`
}
FetchProviderModelsResponse represents the response for fetching provider models
type GenerateTokenRequest ¶
type GenerateTokenRequest struct {
ClientID string `json:"client_id" binding:"required" description:"Client ID for token generation" example:"user123"`
}
GenerateTokenRequest represents the request to generate a token
type GitHubRelease ¶ added in v0.260127.1200
type GitHubRelease struct {
TagName string `json:"tag_name"`
HTMLURL string `json:"html_url"`
Name string `json:"name"`
Body string `json:"body"`
}
GitHubRelease represents a GitHub release response
type HealthInfoResponse ¶
type HealthInfoResponse struct {
Status string `json:"status" example:"healthy"`
Service string `json:"service" example:"tingly-box"`
Health bool `json:"health" example:"healthy"`
}
HealthInfoResponse represents the health check response
type HistoryResponse ¶
type HistoryResponse struct {
Success bool `json:"success" example:"true"`
Data interface{} `json:"data"`
}
HistoryResponse represents the response for request history
type ImportSkillLocationsRequest ¶ added in v0.260204.1200
type ImportSkillLocationsRequest struct {
Locations []typ.SkillLocation `json:"locations" binding:"required" description:"Array of skill locations to import"`
}
ImportSkillLocationsRequest represents the request to import skill locations
type ImportSkillLocationsResponse ¶ added in v0.260204.1200
type ImportSkillLocationsResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message" example:"Imported 5 skill locations"`
Data []typ.SkillLocation `json:"data,omitempty"`
}
ImportSkillLocationsResponse represents the response for importing skill locations
type LatestVersionInfo ¶
type LatestVersionInfo struct {
CurrentVersion string `json:"current_version" example:"0.260124.1430"`
LatestVersion string `json:"latest_version" example:"0.260130.1200"`
HasUpdate bool `json:"has_update" example:"true"`
ReleaseURL string `json:"release_url" example:"https://github.com/tingly-dev/tingly-box/releases/tag/v0.260130.1200"`
ShouldNotify bool `json:"should_notify" example:"true"`
}
LatestVersionInfo contains version comparison information
type LatestVersionResponse ¶
type LatestVersionResponse struct {
Success bool `json:"success"`
Error string `json:"error,omitempty"`
Data LatestVersionInfo `json:"data,omitempty"`
}
LatestVersionResponse represents the response for version check endpoint
type LoadBalancer ¶
type LoadBalancer struct {
// contains filtered or unexported fields
}
LoadBalancer manages load balancing across multiple services
func NewLoadBalancer ¶
func NewLoadBalancer(statsMW *middleware.StatsMiddleware, cfg *config.Config) *LoadBalancer
NewLoadBalancer creates a new load balancer
func (*LoadBalancer) ClearAllStats ¶
func (lb *LoadBalancer) ClearAllStats()
ClearAllStats clears all statistics (both in-memory and persisted in config)
func (*LoadBalancer) ClearServiceStats ¶
func (lb *LoadBalancer) ClearServiceStats(provider, model string)
ClearServiceStats clears statistics for a specific service
func (*LoadBalancer) GetAllServiceStats ¶
func (lb *LoadBalancer) GetAllServiceStats() map[string]*loadbalance.ServiceStats
GetAllServiceStats returns all service statistics from all active rules. Stats are keyed by provider:model since stats are global (shared across rules).
func (*LoadBalancer) GetRuleSummary ¶
func (lb *LoadBalancer) GetRuleSummary(rule *typ.Rule) map[string]interface{}
GetRuleSummary returns a summary of rule configuration and statistics
func (*LoadBalancer) GetServiceStats ¶
func (lb *LoadBalancer) GetServiceStats(provider, model string) *loadbalance.ServiceStats
GetServiceStats returns statistics for a specific service
func (*LoadBalancer) RecordUsage ¶
func (lb *LoadBalancer) RecordUsage(provider, model string, inputTokens, outputTokens int)
RecordUsage records usage for a service
func (*LoadBalancer) RegisterTactic ¶
func (lb *LoadBalancer) RegisterTactic(tacticType loadbalance.TacticType, tactic typ.LoadBalancingTactic)
RegisterTactic registers a custom tactic
func (*LoadBalancer) SelectService ¶
func (lb *LoadBalancer) SelectService(rule *typ.Rule) (*loadbalance.Service, error)
SelectService selects the best service for a rule based on the configured tactic
func (*LoadBalancer) Stop ¶
func (lb *LoadBalancer) Stop()
Stop stops the load balancer and cleanup resources
func (*LoadBalancer) UpdateServiceIndex ¶
func (lb *LoadBalancer) UpdateServiceIndex(rule *typ.Rule, selectedService *loadbalance.Service)
UpdateServiceIndex updates the current service index for a rule
func (*LoadBalancer) ValidateRule ¶
func (lb *LoadBalancer) ValidateRule(rule *typ.Rule) error
ValidateRule validates a rule configuration
type LoadBalancerAPI ¶
type LoadBalancerAPI struct {
// contains filtered or unexported fields
}
LoadBalancerAPI provides REST endpoints for load balancer management
func NewLoadBalancerAPI ¶
func NewLoadBalancerAPI(loadBalancer *LoadBalancer, cfg *config.Config) *LoadBalancerAPI
NewLoadBalancerAPI creates a new load balancer API
func (*LoadBalancerAPI) ClearAllStats ¶
func (api *LoadBalancerAPI) ClearAllStats(c *gin.Context)
ClearAllStats clears all statistics
func (*LoadBalancerAPI) ClearRuleStats ¶
func (api *LoadBalancerAPI) ClearRuleStats(c *gin.Context)
ClearRuleStats clears statistics for all services in a rule
func (*LoadBalancerAPI) ClearServiceStats ¶
func (api *LoadBalancerAPI) ClearServiceStats(c *gin.Context)
ClearServiceStats clears statistics for a specific service
func (*LoadBalancerAPI) GetAllStats ¶
func (api *LoadBalancerAPI) GetAllStats(c *gin.Context)
GetAllStats returns statistics for all services
func (*LoadBalancerAPI) GetCurrentService ¶
func (api *LoadBalancerAPI) GetCurrentService(c *gin.Context)
GetCurrentService returns the currently active service for a rule
func (*LoadBalancerAPI) GetMetrics ¶
func (api *LoadBalancerAPI) GetMetrics(c *gin.Context)
GetMetrics returns load balancing metrics
func (*LoadBalancerAPI) GetRule ¶
func (api *LoadBalancerAPI) GetRule(c *gin.Context)
GetRule returns a specific rule configuration
func (*LoadBalancerAPI) GetRuleStats ¶
func (api *LoadBalancerAPI) GetRuleStats(c *gin.Context)
GetRuleStats returns statistics for all services in a rule
func (*LoadBalancerAPI) GetRuleSummary ¶
func (api *LoadBalancerAPI) GetRuleSummary(c *gin.Context)
GetRuleSummary returns a comprehensive summary of a rule including statistics
func (*LoadBalancerAPI) GetServiceHealth ¶
func (api *LoadBalancerAPI) GetServiceHealth(c *gin.Context)
GetServiceHealth checks the health of all services in a rule
func (*LoadBalancerAPI) GetServiceStats ¶
func (api *LoadBalancerAPI) GetServiceStats(c *gin.Context)
GetServiceStats returns statistics for a specific service
func (*LoadBalancerAPI) RegisterRoutes ¶
func (api *LoadBalancerAPI) RegisterRoutes(loadBalancer *gin.RouterGroup)
RegisterRoutes registers the load balancer API routes
func (*LoadBalancerAPI) UpdateRuleTactic ¶
func (api *LoadBalancerAPI) UpdateRuleTactic(c *gin.Context)
UpdateRuleTactic updates the load balancing tactic for a rule
type LogEntry ¶
type LogEntry struct {
Time time.Time `json:"time"`
Level string `json:"level"`
Message string `json:"message"`
Data map[string]interface{} `json:"data,omitempty"`
Fields map[string]interface{} `json:"fields,omitempty"`
}
LogEntry represents a log entry for API response
type LogsResponse ¶
LogsResponse represents the API response for logs
type MetricsResponse ¶
type MetricsResponse struct {
Metrics []ServiceMetric `json:"metrics"`
TotalServices int `json:"total_services" example:"5"`
}
MetricsResponse represents the metrics response
type ModelEndpointCapability ¶
type ModelEndpointCapability struct {
ProviderUUID string
ModelID string
SupportsChat bool
ChatLatencyMs int
ChatError string
SupportsResponses bool
ResponsesLatencyMs int
ResponsesError string
PreferredEndpoint string // "chat", "responses", or ""
LastVerified time.Time
}
ModelEndpointCapability represents the endpoint capability information for a model
type ModelProbeData ¶
type ModelProbeData struct {
ProviderUUID string `json:"provider_uuid" example:"uuid-123"`
ModelID string `json:"model_id" example:"gpt-4"`
ChatEndpoint EndpointProbeStatus `json:"chat_endpoint"`
ResponsesEndpoint EndpointProbeStatus `json:"responses_endpoint"`
PreferredEndpoint string `json:"preferred_endpoint" example:"responses"`
LastUpdated string `json:"last_updated" example:"2026-01-23T10:30:00Z"`
}
ModelProbeData represents the probe result data
type ModelProbeRequest ¶
type ModelProbeRequest struct {
ProviderUUID string `json:"provider_uuid" binding:"required" description:"Provider UUID to probe" example:"uuid-123"`
ModelID string `json:"model_id" binding:"required" description:"Model ID to probe" example:"gpt-4"`
ForceRefresh bool `json:"force_refresh" description:"Force new probe even if cached" example:"false"`
}
ModelProbeRequest represents the request to probe a specific model
type ModelProbeResponse ¶
type ModelProbeResponse struct {
Success bool `json:"success" example:"true"`
Error *ErrorDetail `json:"error,omitempty"`
Data *ModelProbeData `json:"data,omitempty"`
}
ModelProbeResponse represents the response from model endpoint probing
type NpmPackage ¶
type NpmPackage struct {
Name string `json:"name"`
DistTags struct {
Latest string `json:"latest"`
} `json:"dist-tags"`
}
NpmPackage represents an npm registry package response
type OAuthAuthorizeRequest ¶
type OAuthAuthorizeRequest struct {
Provider string `json:"provider" binding:"required" description:"OAuth provider type" example:"anthropic"`
UserID string `json:"user_id" description:"User ID for the OAuth flow" example:"user123"`
Redirect string `json:"redirect" description:"URL to redirect after OAuth completion" example:"http://localhost:3000/callback"`
ResponseType string `json:"response_type" description:"Response type: 'redirect' or 'json'" example:"json"`
Name string `json:"name" description:"Custom name for the provider (optional, auto-generated if empty)" example:"my-claude-account"`
ProxyURL string `` /* 158-byte string literal not displayed */
}
OAuthAuthorizeRequest represents the request to initiate OAuth flow
type OAuthAuthorizeResponse ¶
type OAuthAuthorizeResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message,omitempty" example:"Authorization initiated"`
Data struct {
AuthURL string `json:"auth_url,omitempty" example:"https://claude.ai/oauth/authorize?..."`
State string `json:"state,omitempty" example:"random_state_string"`
SessionID string `json:"session_id,omitempty" example:"abc123def456"` // For status tracking
// Device code flow fields
DeviceCode string `json:"device_code,omitempty" example:"MN-12345678-abcdef"`
UserCode string `json:"user_code,omitempty" example:"ABCD-EFGH"`
VerificationURI string `json:"verification_uri,omitempty" example:"https://chat.qwen.ai/activate"`
VerificationURIComplete string `json:"verification_uri_complete,omitempty" example:"https://chat.qwen.ai/activate?user_code=ABCD-EFGH"`
ExpiresIn int64 `json:"expires_in,omitempty" example:"1800"`
Interval int64 `json:"interval,omitempty" example:"5"`
Provider string `json:"provider,omitempty" example:"qwen_code"`
} `json:"data"`
}
OAuthAuthorizeResponse represents the response for OAuth authorization initiation
type OAuthCallbackDataResponse ¶
type OAuthCallbackDataResponse struct {
Success bool `json:"success" example:"true"`
AccessToken string `json:"access_token,omitempty" example:"sk-ant-..."`
RefreshToken string `json:"refresh_token,omitempty" example:"refresh_..."`
TokenType string `json:"token_type,omitempty" example:"Bearer"`
ExpiresAt string `json:"expires_at,omitempty" example:"2024-01-01T12:00:00Z"`
Provider string `json:"provider,omitempty" example:"anthropic"`
}
OAuthCallbackDataResponse represents the OAuth callback response with token data
type OAuthCancelRequest ¶
type OAuthCancelRequest struct {
SessionID string `json:"session_id" binding:"required" description:"Session ID to cancel" example:"abc123def456"`
}
OAuthCancelRequest represents the request to cancel an OAuth session
type OAuthDeviceCodeResponse ¶
type OAuthDeviceCodeResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message,omitempty" example:"Device code flow initiated"`
Data struct {
DeviceCode string `json:"device_code" example:"MN-12345678-abcdef"`
UserCode string `json:"user_code" example:"ABCD-EFGH"`
VerificationURI string `json:"verification_uri" example:"https://chat.qwen.ai/activate"`
VerificationURIComplete string `json:"verification_uri_complete,omitempty" example:"https://chat.qwen.ai/activate?user_code=ABCD-EFGH"`
ExpiresIn int64 `json:"expires_in" example:"1800"`
Interval int64 `json:"interval" example:"5"`
Provider string `json:"provider" example:"qwen_code"`
} `json:"data"`
}
OAuthDeviceCodeResponse represents the response for device code flow initiation
type OAuthErrorResponse ¶
type OAuthErrorResponse struct {
Success bool `json:"success" example:"false"`
Error string `json:"error" example:"Error message"`
}
OAuthErrorResponse represents a standard error response
type OAuthMessageResponse ¶
type OAuthMessageResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message" example:"Operation successful"`
}
OAuthMessageResponse represents a simple success message response
type OAuthProviderDataResponse ¶
type OAuthProviderDataResponse struct {
Success bool `json:"success" example:"true"`
Data OAuthProviderInfo `json:"data"`
}
OAuthProviderDataResponse represents a single provider data response
type OAuthProviderInfo ¶
type OAuthProviderInfo struct {
Type string `json:"type" example:"anthropic"`
DisplayName string `json:"display_name" example:"Anthropic Claude"`
AuthURL string `json:"auth_url,omitempty" example:"https://claude.ai/oauth/authorize"`
Scopes []string `json:"scopes,omitempty" example:"api"`
Configured bool `json:"configured" example:"true"`
}
OAuthProviderInfo represents OAuth provider information
type OAuthProvidersResponse ¶
type OAuthProvidersResponse struct {
Success bool `json:"success" example:"true"`
Data []OAuthProviderInfo `json:"data"`
}
OAuthProvidersResponse represents the response for listing OAuth providers
type OAuthRefreshTokenRequest ¶
type OAuthRefreshTokenRequest struct {
ProviderUUID string `` /* 135-byte string literal not displayed */
}
OAuthRefreshTokenRequest represents the request to refresh an OAuth token
type OAuthRefreshTokenResponse ¶
type OAuthRefreshTokenResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message,omitempty" example:"Token refreshed successfully"`
Data struct {
ProviderUUID string `json:"provider_uuid" example:"550e8400-e29b-41d4-a716-446655440000"`
AccessToken string `json:"access_token" example:"sk-ant-..."`
RefreshToken string `json:"refresh_token,omitempty" example:"refresh_..."`
TokenType string `json:"token_type" example:"Bearer"`
ExpiresAt string `json:"expires_at,omitempty" example:"2024-01-01T12:00:00Z"`
ProviderType string `json:"provider_type" example:"claude_code"`
} `json:"data"`
}
OAuthRefreshTokenResponse represents the response for refreshing an OAuth token
type OAuthSessionStatusResponse ¶
type OAuthSessionStatusResponse struct {
Success bool `json:"success" example:"true"`
Data struct {
SessionID string `json:"session_id" example:"abc123def456"`
Status string `json:"status" example:"success"`
ProviderUUID string `json:"provider_uuid,omitempty" example:"550e8400-e29b-41d4-a716-446655440000"`
Error string `json:"error,omitempty" example:"Authorization failed"`
} `json:"data"`
}
OAuthSessionStatusResponse represents the session status check response
type OAuthTokenResponse ¶
type OAuthTokenResponse struct {
Success bool `json:"success" example:"true"`
Data struct {
AccessToken string `json:"access_token" example:"sk-ant-..."`
RefreshToken string `json:"refresh_token,omitempty" example:"refresh_..."`
TokenType string `json:"token_type" example:"Bearer"`
ExpiresAt string `json:"expires_at,omitempty" example:"2024-01-01T12:00:00Z"`
Provider string `json:"provider" example:"anthropic"`
Valid bool `json:"valid" example:"true"`
} `json:"data"`
}
OAuthTokenResponse represents the OAuth token response
type OAuthTokensResponse ¶
type OAuthTokensResponse struct {
Success bool `json:"success" example:"true"`
Data []TokenInfo `json:"data"`
}
OAuthTokensResponse represents the response for listing all user tokens
type OAuthUpdateProviderRequest ¶
type OAuthUpdateProviderRequest struct {
ClientID string `json:"client_id" binding:"required" description:"OAuth client ID" example:"your_client_id"`
ClientSecret string `json:"client_secret" description:"OAuth client secret" example:"your_client_secret"`
RedirectURL string `json:"redirect_url" description:"OAuth redirect URI" example:"http://localhost:12580/oauth/callback"`
}
OAuthUpdateProviderRequest represents the request to update OAuth provider config
type OAuthUpdateProviderResponse ¶
type OAuthUpdateProviderResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message" example:"Provider configuration updated"`
Type string `json:"type,omitempty" example:"anthropic"`
}
OAuthUpdateProviderResponse represents the response for updating provider config
type OpenAIChatCompletionResponse ¶
type OpenAIChatCompletionResponse struct {
ID string `json:"id" example:"chatcmpl-123"`
Object string `json:"object" example:"chat.completion"`
Created int64 `json:"created" example:"1677652288"`
Model string `json:"model" example:"gpt-3.5-turbo"`
Choices []struct {
Index int `json:"index" example:"0"`
Message struct {
Role string `json:"role" example:"assistant"`
Content string `json:"content" example:"Hello! How can I help you?"`
} `json:"message"`
FinishReason string `json:"finish_reason" example:"stop"`
} `json:"choices"`
Usage struct {
PromptTokens int `json:"prompt_tokens" example:"10"`
CompletionTokens int `json:"completion_tokens" example:"20"`
TotalTokens int `json:"total_tokens" example:"30"`
} `json:"usage"`
}
OpenAIChatCompletionResponse represents the OpenAI chat completion response
type OpenAIModel ¶
type OpenAIModel struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
OwnedBy string `json:"owned_by"`
}
OpenAIModel represents a model in OpenAI's models API format
type OpenAIModelsResponse ¶
type OpenAIModelsResponse struct {
Object string `json:"object"`
Data []OpenAIModel `json:"data"`
}
OpenAIModelsResponse represents OpenAI's models API response format
type OpenCodeConfigPreviewResponse ¶ added in v0.260204.1200
type OpenCodeConfigPreviewResponse struct {
Success bool `json:"success"`
ConfigJSON string `json:"configJson"`
ScriptWin string `json:"scriptWindows"`
ScriptUnix string `json:"scriptUnix"`
Message string `json:"message,omitempty"`
}
OpenCodeConfigPreviewResponse is the response for GetOpenCodeConfigPreview
type PassthroughHandler ¶
type PassthroughHandler struct {
// contains filtered or unexported fields
}
PassthroughHandler handles pass-through requests It replaces the model name and proxies the request without any transformations
func NewPassthroughHandler ¶
func NewPassthroughHandler(server *Server) *PassthroughHandler
NewPassthroughHandler creates a new pass-through handler
type ProbeCache ¶
type ProbeCache struct {
// contains filtered or unexported fields
}
ProbeCache provides in-memory caching for model endpoint capability probe results
func NewProbeCache ¶
func NewProbeCache(ttl time.Duration) *ProbeCache
NewProbeCache creates a new probe cache with the specified TTL
func (*ProbeCache) CleanupExpired ¶
func (pc *ProbeCache) CleanupExpired()
CleanupExpired removes expired entries from cache
func (*ProbeCache) Get ¶
func (pc *ProbeCache) Get(providerUUID, modelID string) *ModelEndpointCapability
Get retrieves cached capability for a model
func (*ProbeCache) Invalidate ¶
func (pc *ProbeCache) Invalidate(providerUUID, modelID string)
Invalidate removes cached capability for a specific model
func (*ProbeCache) InvalidateProvider ¶
func (pc *ProbeCache) InvalidateProvider(providerUUID string)
InvalidateProvider removes all cached capabilities for a provider
func (*ProbeCache) Set ¶
func (pc *ProbeCache) Set(providerUUID, modelID string, capability *ModelEndpointCapability)
Set stores capability in cache
func (*ProbeCache) SetFromProbeResult ¶
func (pc *ProbeCache) SetFromProbeResult(result *ProbeResult)
SetFromProbeResult stores probe result in cache
func (*ProbeCache) StartCleanupTask ¶
func (pc *ProbeCache) StartCleanupTask(interval time.Duration)
StartCleanupTask starts a background task to periodically clean up expired entries
type ProbeCacheRequest ¶
type ProbeCacheRequest struct {
ProviderUUID string
ModelID string
ForceRefresh bool // Force new probe even if cached
}
ProbeCacheRequest represents a request to probe a model
type ProbeProviderRequest ¶
type ProbeProviderRequest struct {
Name string `json:"name" binding:"required" description:"Provider name" example:"openai"`
APIBase string `json:"api_base" binding:"required" description:"API base URL" example:"https://api.openai.com/v1"`
APIStyle string `json:"api_style" binding:"required,oneof=openai anthropic" description:"API style" example:"openai"`
Token string `json:"token" binding:"required" description:"API token to test" example:"sk-..."`
}
ProbeProviderRequest represents the request to probe/test a provider's API key and connectivity
type ProbeProviderResponse ¶
type ProbeProviderResponse struct {
Success bool `json:"success" example:"true"`
Error *ErrorDetail `json:"error,omitempty"`
Data *ProbeProviderResponseData `json:"data,omitempty"`
}
ProbeProviderResponse represents the response from provider probing
type ProbeProviderResponseData ¶
type ProbeProviderResponseData struct {
Provider string `json:"provider" example:"openai"`
APIBase string `json:"api_base" example:"https://api.openai.com/v1"`
APIStyle string `json:"api_style" example:"openai"`
Valid bool `json:"valid" example:"true"`
Message string `json:"message" example:"API key is valid and accessible"`
TestResult string `json:"test_result" example:"models_endpoint_success"`
ResponseTime int64 `json:"response_time_ms" example:"250"`
ModelsCount int `json:"models_count,omitempty" example:"150"`
}
ProbeProviderResponseData represents the data returned from provider probing
type ProbeRequest ¶
type ProbeRequest struct {
Provider string `json:"provider" binding:"required" description:"Provider UUID to test against" example:"550e8400-e29b-41d4-a716-446655440000"`
Model string `json:"model" binding:"required" description:"Model name to test against" example:"gpt-4-latest"`
}
ProbeRequest represents the request to probe/test a provider and model
type ProbeRequestDetail ¶
type ProbeRequestDetail struct {
Messages []map[string]interface{} `json:"messages"`
Model string `json:"model"`
MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"`
Provider string `json:"provider"`
Timestamp string `json:"timestamp"`
}
ProbeRequestDetail represents the mock request data for probing
func NewMockRequest ¶
func NewMockRequest(provider, model string) ProbeRequestDetail
NewMockRequest creates a new mock request with default values
type ProbeResponse ¶
type ProbeResponse struct {
Success bool `json:"success"`
Error *ErrorDetail `json:"error,omitempty"`
Data *ProbeResponseData `json:"data,omitempty"`
}
ProbeResponse represents the overall probe response
type ProbeResponseData ¶
type ProbeResponseData struct {
Request ProbeRequestDetail `json:"request"`
Response ProbeResponseDetail `json:"response"`
Usage ProbeUsage `json:"usage"`
CurlCommand string `json:"curl_command,omitempty"`
}
ProbeResponseData represents the response data structure
type ProbeResponseDetail ¶
type ProbeResponseDetail struct {
Content string `json:"content"`
Model string `json:"model"`
Provider string `json:"provider"`
FinishReason string `json:"finish_reason"`
Error string `json:"error,omitempty"`
}
ProbeResponseDetail represents the API response
type ProbeResult ¶
type ProbeResult struct {
ProviderUUID string
ModelID string
ChatEndpoint EndpointStatus
ResponsesEndpoint EndpointStatus
PreferredEndpoint string
LastUpdated time.Time
}
ProbeResult represents the complete probe result for a model
type ProbeUsage ¶
type ProbeUsage struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
TimeCost int `json:"time_cost"`
}
ProbeUsage represents token usage information
type ProviderModelInfo ¶
type ProviderModelInfo struct {
Models []string `json:"models" example:"gpt-3.5-turbo,gpt-4"`
StarModels []string `json:"star_models" example:"gpt-4"`
CustomModel []string `json:"custom_model" example:"custom-gpt-model"`
APIBase string `json:"api_base" example:"https://api.openai.com/v1"`
LastUpdated string `json:"last_updated,omitempty" example:"2024-01-15 10:30:00"`
}
ProviderModelInfo represents model information for a specific provider
type ProviderModelsResponse ¶
type ProviderModelsResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message" example:"Provider models successfully"`
Data ProviderModelInfo `json:"data"`
}
ProviderModelsResponse represents the response for getting provider models
type ProviderResponse ¶
type ProviderResponse struct {
UUID string `json:"uuid" example:"0123456789ABCDEF"`
Name string `json:"name" example:"openai"`
APIBase string `json:"api_base" example:"https://api.openai.com/v1"`
APIStyle string `json:"api_style" example:"openai"`
Token string `json:"token" example:"sk-***...***"` // Only populated for api_key auth type
NoKeyRequired bool `json:"no_key_required" example:"false"`
Enabled bool `json:"enabled" example:"true"`
ProxyURL string `json:"proxy_url,omitempty" example:"http://127.0.0.1:7890"`
AuthType string `json:"auth_type,omitempty" example:"api_key"` // api_key or oauth
OAuthDetail *typ.OAuthDetail `json:"oauth_detail,omitempty"` // OAuth credentials (only for oauth auth type)
}
ProviderResponse represents a provider configuration with masked token
type ProvidersResponse ¶
type ProvidersResponse struct {
Success bool `json:"success" example:"true"`
Data []ProviderResponse `json:"data"`
}
ProvidersResponse represents the response for listing providers
type RefreshSkillLocationResponse ¶ added in v0.260204.1200
type RefreshSkillLocationResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message" example:"Skill location refreshed successfully"`
Data *typ.ScanResult `json:"data,omitempty"`
}
RefreshSkillLocationResponse represents the response for refreshing a skill location
type RemoveSkillLocationResponse ¶ added in v0.260204.1200
type RemoveSkillLocationResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message" example:"Skill location removed successfully"`
}
RemoveSkillLocationResponse represents the response for removing a skill location
type RequestConfig ¶
type RequestConfig struct {
RequestModel string `json:"request_model" example:"gpt-3.5-turbo"`
ResponseModel string `json:"response_model" example:"gpt-3.5-turbo"`
Provider string `json:"provider" example:"openai"`
DefaultModel string `json:"default_model" example:"gpt-3.5-turbo"`
}
RequestConfig represents a request configuration in defaults response
type ResponseCreateRequest ¶ added in v0.260124.900
type ResponseCreateRequest struct {
// Stream indicates whether to stream the response
// This is not part of ResponseNewParams as streaming is controlled
// by using NewStreaming() method on the SDK client
Stream bool `json:"stream"`
// Embed the native SDK type for all other fields
responses.ResponseNewParams
}
ResponseCreateRequest wraps the native ResponseNewParams with additional fields for proxy-specific handling like the `stream` parameter.
func (*ResponseCreateRequest) UnmarshalJSON ¶ added in v0.260124.900
func (r *ResponseCreateRequest) UnmarshalJSON(data []byte) error
UnmarshalJSON implements custom JSON unmarshaling for ResponseCreateRequest It handles both the custom Stream field and the embedded ResponseNewParams
type ResponseInputItemUnionParam ¶ added in v0.260124.900
type ResponseInputItemUnionParam = responses.ResponseInputItemUnionParam
ResponseInputItemUnionParam is an alias to the native OpenAI SDK type
type ResponseNewParams ¶ added in v0.260124.900
type ResponseNewParams = responses.ResponseNewParams
ResponseNewParams is an alias to the native OpenAI SDK type
type ResponseNewParamsInputUnion ¶ added in v0.260124.900
type ResponseNewParamsInputUnion = responses.ResponseNewParamsInputUnion
ResponseNewParamsInputUnion is an alias to the native OpenAI SDK type
type RuleResponse ¶ added in v0.260124.900
type RuleResponse struct {
Success bool `json:"success" example:"true"`
Data *typ.Rule `json:"data"`
}
RuleResponse represents a rule configuration response
type RuleStatsResponse ¶
type RuleStatsResponse struct {
Rule string `json:"rule" example:"gpt-4"`
Stats map[string]interface{} `json:"stats"`
}
RuleStatsResponse represents the statistics response for a rule
type RuleSummaryResponse ¶
type RuleSummaryResponse struct {
Summary interface{} `json:"summary"`
}
RuleSummaryResponse represents a rule summary response
type RulesResponse ¶ added in v0.260124.900
type RulesResponse struct {
Success bool `json:"success" example:"true"`
Data interface{} `json:"data"`
}
RulesResponse represents the response for getting all rules
type SSEEventWriter ¶ added in v0.260124.900
SSEEventWriter is an interface for writing SSE events
type ScanIdesResponse ¶ added in v0.260204.1200
type ScanIdesResponse struct {
Success bool `json:"success" example:"true"`
Data *typ.DiscoveryResult `json:"data,omitempty"`
}
ScanIdesResponse represents the response for scanning all IDEs
type ScenarioFlagResponse ¶ added in v0.260124.900
type ScenarioFlagResponse struct {
Success bool `json:"success" example:"true"`
Data struct {
Scenario typ.RuleScenario `json:"scenario" example:"claude_code"`
Flag string `json:"flag" example:"unified"`
Value bool `json:"value" example:"true"`
} `json:"data"`
}
ScenarioFlagResponse represents the response for a scenario flag
type ScenarioFlagUpdateRequest ¶ added in v0.260204.1200
type ScenarioFlagUpdateRequest struct {
Value bool `json:"value"`
}
type ScenarioRecorder ¶
type ScenarioRecorder struct {
// contains filtered or unexported fields
}
ScenarioRecorder captures scenario-level request/response recording
func (*ScenarioRecorder) EnableStreaming ¶
func (sr *ScenarioRecorder) EnableStreaming()
EnableStreaming enables streaming mode for this recorder
func (*ScenarioRecorder) GetStreamChunks ¶
func (sr *ScenarioRecorder) GetStreamChunks() []map[string]interface{}
GetStreamChunks returns the collected stream chunks
func (*ScenarioRecorder) RecordError ¶
func (sr *ScenarioRecorder) RecordError(err error)
RecordError records an error for the scenario-level request
func (*ScenarioRecorder) RecordResponse ¶
func (sr *ScenarioRecorder) RecordResponse(provider *typ.Provider, model string)
RecordResponse records the scenario-level response (tingly-box -> client) This captures the response sent back to the client
func (*ScenarioRecorder) RecordStreamChunk ¶
func (sr *ScenarioRecorder) RecordStreamChunk(eventType string, chunk interface{})
RecordStreamChunk records a single stream chunk
func (*ScenarioRecorder) SetAssembledResponse ¶
func (sr *ScenarioRecorder) SetAssembledResponse(response any)
SetAssembledResponse sets the assembled response for streaming Accepts any type (e.g., anthropic.Message) and converts to map for storage
type ScenarioResponse ¶ added in v0.260124.900
type ScenarioResponse struct {
Success bool `json:"success" example:"true"`
Data typ.ScenarioConfig `json:"data"`
}
ScenarioResponse represents the response for a single scenario
type ScenarioUpdateRequest ¶ added in v0.260124.900
type ScenarioUpdateRequest struct {
Scenario typ.RuleScenario `json:"scenario" binding:"required" example:"claude_code"`
Flags typ.ScenarioFlags `json:"flags" binding:"required"`
}
ScenarioUpdateRequest represents the request to update a scenario
type ScenarioUpdateResponse ¶ added in v0.260124.900
type ScenarioUpdateResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message" example:"Scenario config saved successfully"`
Data typ.ScenarioConfig `json:"data"`
}
ScenarioUpdateResponse represents the response for updating scenario
type ScenariosResponse ¶ added in v0.260124.900
type ScenariosResponse struct {
Success bool `json:"success" example:"true"`
Data []typ.ScenarioConfig `json:"data"`
}
ScenariosResponse represents the response for getting all scenarios
type Server ¶
type Server struct {
// contains filtered or unexported fields
}
Server represents the HTTP server
func GetGlobalServer ¶
func GetGlobalServer() *Server
GetGlobalServer gets the global server instance
func NewServer ¶
func NewServer(cfg *config.Config, opts ...ServerOption) *Server
NewServer creates a new HTTP server instance with functional options
func (*Server) AddSkillLocation ¶ added in v0.260204.1200
AddSkillLocation adds a new skill location
func (*Server) AnthropicCountTokens ¶
AnthropicCountTokens handles Anthropic v1 count_tokens endpoint This is the entry point that delegates to the appropriate implementation (v1 or beta)
func (*Server) AnthropicListModels ¶
AnthropicListModels handles Anthropic v1 models endpoint
func (*Server) AnthropicMessages ¶ added in v0.260124.900
AnthropicMessages handles Anthropic v1 messages API requests This is the entry point that delegates to the appropriate implementation (v1 or beta)
func (*Server) ApplyClaudeConfig ¶ added in v0.260204.1200
ApplyClaudeConfig generates and applies Claude Code configuration from system state
func (*Server) ApplyOpenCodeConfigFromState ¶ added in v0.260204.1200
ApplyOpenCodeConfig generates and applies OpenCode configuration from system state
func (*Server) ApplyRecording ¶
func (s *Server) ApplyRecording(scenario typ.RuleScenario) bool
ApplyRecording checks if recording should be applied for a scenario
func (*Server) ApplySmartCompact ¶
func (s *Server) ApplySmartCompact(scenario typ.RuleScenario) bool
ApplySmartCompact checks if smart_compact should be applied for a scenario
func (*Server) AuthorizeOAuth ¶ added in v0.260124.900
AuthorizeOAuth initiates the OAuth flow POST /api/v1/oauth/authorize
func (*Server) CancelOAuthSession ¶ added in v0.260127.1200
CancelOAuthSession cancels an in-progress OAuth session and cleans up resources POST /api/v1/oauth/cancel
func (*Server) CreateProvider ¶
CreateProvider adds a new provider
func (*Server) CreateRule ¶ added in v0.260124.900
func (*Server) DeleteOAuthProvider ¶ added in v0.260124.900
DeleteOAuthProvider removes an OAuth provider configuration (clears credentials) DELETE /api/v1/oauth/providers/:type
func (*Server) DeleteProvider ¶
DeleteProvider removes a provider
func (*Server) DeleteRule ¶ added in v0.260124.900
func (*Server) DetermineProviderAndModel ¶
func (s *Server) DetermineProviderAndModel(modelName string) (*typ.Provider, *loadbalance.Service, *typ.Rule, error)
DetermineProviderAndModel resolves the model name and finds the appropriate provider using load balancing
func (*Server) DetermineProviderAndModelWithScenario ¶
func (s *Server) DetermineProviderAndModelWithScenario(scenario typ.RuleScenario, modelName string, req interface{}) (*typ.Provider, *loadbalance.Service, *typ.Rule, error)
DetermineProviderAndModelWithScenario
func (*Server) DiscoverIdes ¶ added in v0.260204.1200
DiscoverIdes scans the home directory for installed IDEs with skills
func (*Server) ExtractRequestContext ¶
func (s *Server) ExtractRequestContext(req interface{}) (*smartrouting.RequestContext, error)
ExtractRequestContext extracts RequestContext from request based on type
func (*Server) GenerateToken ¶
GenerateToken handles token generation requests
func (*Server) GetHealthInfo ¶
GetHealthInfo handles health check requests This is a lightweight health check that can be called frequently
func (*Server) GetHistory ¶
func (*Server) GetInfoConfig ¶
func (*Server) GetInfoVersion ¶
func (*Server) GetLatestVersion ¶
GetLatestVersion checks GitHub releases for the latest version
func (*Server) GetLoadBalancer ¶
func (s *Server) GetLoadBalancer() *LoadBalancer
GetLoadBalancer returns the load balancer instance
func (*Server) GetLogStats ¶
GetLogStats returns statistics about the logs
func (*Server) GetLogs ¶
GetLogs retrieves logs with optional filtering Query parameters:
- limit: maximum number of entries to return (default: 100)
- level: filter by log level (debug, info, warn, error)
- since: RFC3339 timestamp to filter entries after this time
func (*Server) GetOAuthProvider ¶ added in v0.260124.900
GetOAuthProvider returns a specific OAuth provider configuration GET /api/v1/oauth/providers/:type
func (*Server) GetOAuthSessionStatus ¶ added in v0.260124.900
GetOAuthSessionStatus returns the status of an OAuth session GET /api/v1/oauth/status?session_id=xxx
func (*Server) GetOAuthToken ¶ added in v0.260124.900
GetOAuthToken returns the OAuth token for a user and provider GET /api/v1/oauth/token?provider=anthropic&user_id=xxx
func (*Server) GetOpenCodeConfigPreview ¶ added in v0.260204.1200
GetOpenCodeConfigPreview generates OpenCode configuration preview from system state This endpoint returns the config JSON for display purposes without applying it
func (*Server) GetOrCreateScenarioSink ¶
func (s *Server) GetOrCreateScenarioSink(scenario typ.RuleScenario) *obs.Sink
GetOrCreateScenarioSink gets or creates a recording sink for the specified scenario The sink is created on-demand and cached for subsequent use
func (*Server) GetPreferredEndpointForModel ¶
GetPreferredEndpointForModel returns the preferred endpoint (chat or responses) for a model Returns "responses" if the model supports the Responses API, otherwise returns "chat"
func (*Server) GetProvider ¶
GetProvider returns details for a specific provider (with masked token)
func (*Server) GetProviderModelsByUUID ¶
func (*Server) GetProviderTemplate ¶ added in v0.260124.900
GetProviderTemplate returns a single provider template by ID
func (*Server) GetProviderTemplateVersion ¶ added in v0.260124.900
GetProviderTemplateVersion returns the current template registry version
func (*Server) GetProviderTemplates ¶ added in v0.260124.900
GetProviderTemplates returns all provider templates
func (*Server) GetProviders ¶
func (*Server) GetRules ¶ added in v0.260124.900
GetRules returns all rules, require filtered by scenario
func (*Server) GetScenarioConfig ¶ added in v0.260124.900
GetScenarioConfig returns configuration for a specific scenario
func (*Server) GetScenarioFlag ¶ added in v0.260124.900
GetScenarioFlag returns a specific flag value for a scenario
func (*Server) GetScenarios ¶ added in v0.260124.900
GetScenarios returns all scenario configurations
func (*Server) GetSkillContent ¶ added in v0.260204.1200
GetSkillContent returns the content of a skill file
func (*Server) GetSkillLocation ¶ added in v0.260204.1200
GetSkillLocation retrieves a specific skill location
func (*Server) GetSkillLocations ¶ added in v0.260204.1200
GetSkillLocations returns all skill locations
func (*Server) GetToken ¶
GetToken handles token retrieval requests - generates a token if it doesn't exist
func (*Server) HandleProbeModel ¶
HandleProbeModel tests a rule configuration by sending a sample request to the configured provider
func (*Server) HandleProbeModelEndpoints ¶
HandleProbeModelEndpoints handles adaptive probe for model endpoints (chat and responses)
func (*Server) HandleProbeProvider ¶
HandleProbeProvider tests a provider's API key and connectivity
func (*Server) ImportSkillLocations ¶ added in v0.260204.1200
ImportSkillLocations imports discovered skill locations
func (*Server) InvalidateProviderCache ¶
InvalidateProviderCache invalidates cached capabilities for a provider
func (*Server) IsExperimentalFeatureEnabled ¶
func (s *Server) IsExperimentalFeatureEnabled(scenario typ.RuleScenario, feature string) bool
IsExperimentalFeatureEnabled checks if an experimental feature is enabled for a scenario
func (*Server) IsFeatureEnabled ¶
IsFeatureEnabled checks if a specific feature is enabled
func (*Server) ListModelsByScenario ¶
ListModelsByScenario handles the /v1/models endpoint for scenario-based routing
func (*Server) ListOAuthProviders ¶ added in v0.260124.900
ListOAuthProviders returns all available OAuth providers GET /api/v1/oauth/providers
func (*Server) ListOAuthTokens ¶ added in v0.260124.900
ListOAuthTokens returns all tokens for a user GET /api/v1/oauth/tokens?user_id=xxx
func (*Server) NewUsageTracker ¶
func (s *Server) NewUsageTracker() *UsageTracker
NewUsageTracker creates a new UsageTracker
func (*Server) OAuthCallback ¶ added in v0.260124.900
OAuthCallback handles the OAuth callback from the provider This is typically called by the OAuth provider redirect GET /oauth/callback?code=xxx&state=xxx
func (*Server) OpenAIChatCompletions ¶ added in v0.260124.900
OpenAIChatCompletions handles OpenAI v1 chat completion requests
func (*Server) OpenAIListModels ¶
OpenAIListModels handles the /v1/models endpoint (OpenAI compatible)
func (*Server) PassthroughAnthropic ¶
PassthroughAnthropic handles Anthropic-style pass-through requests Consolidates: PassthroughAnthropicMessages, PassthroughAnthropicCountTokens
func (*Server) PassthroughOpenAI ¶
PassthroughOpenAI handles OpenAI-style pass-through requests Consolidates: PassthroughOpenAIChatCompletions, PassthroughOpenAIResponsesCreate, PassthroughOpenAIResponsesGet
func (*Server) RecordScenarioRequest ¶
func (s *Server) RecordScenarioRequest(c *gin.Context, scenario string) *ScenarioRecorder
RecordScenarioRequest records the scenario-level request (client -> tingly-box) This captures the original request from the client before any transformation
func (*Server) RefreshOAuthToken ¶ added in v0.260124.900
RefreshOAuthToken refreshes an OAuth token using a refresh token POST /api/v1/oauth/refresh
func (*Server) RefreshProviderTemplates ¶ added in v0.260124.900
RefreshProviderTemplates fetches the latest templates from GitHub
func (*Server) RefreshSkillLocation ¶ added in v0.260204.1200
RefreshSkillLocation scans a location for updated skill list
func (*Server) RegisterConfigApplyRoutes ¶ added in v0.260204.1200
func (s *Server) RegisterConfigApplyRoutes(manager *swagger.RouteManager)
RegisterConfigApplyRoutes registers the config apply API routes
func (*Server) RegisterUsageRoutes ¶ added in v0.260124.900
func (s *Server) RegisterUsageRoutes(manager *swagger.RouteManager)
RegisterUsageRoutes registers the usage API routes with swagger documentation
func (*Server) RemoveSkillLocation ¶ added in v0.260204.1200
RemoveSkillLocation removes a skill location
func (*Server) ResponsesCreate ¶
ResponsesCreate handles POST /v1/responses
func (*Server) ResponsesGet ¶
ResponsesGet handles GET /v1/responses/{id}
func (*Server) RestartServer ¶
func (*Server) RevokeOAuthToken ¶ added in v0.260124.900
RevokeOAuthToken removes the OAuth token for a user and provider DELETE /api/v1/oauth/token?provider=anthropic&user_id=xxx
func (*Server) ScanIdes ¶ added in v0.260204.1200
ScanIdes scans all IDE locations and returns discovered skills This is a comprehensive scan that checks all default IDE locations
func (*Server) SelectServiceFromSmartRouting ¶
func (s *Server) SelectServiceFromSmartRouting(matchedServices []*loadbalance.Service, rule *typ.Rule) (*loadbalance.Service, error)
SelectServiceFromSmartRouting selects a service from matched smart routing services Creates a temporary rule with the matched services and uses the configured load balancing tactic
func (*Server) SetScenarioConfig ¶ added in v0.260124.900
SetScenarioConfig creates or updates scenario configuration
func (*Server) SetScenarioFlag ¶ added in v0.260124.900
SetScenarioFlag sets a specific flag value for a scenario
func (*Server) SetupAnthropicEndpoints ¶
func (s *Server) SetupAnthropicEndpoints(group *gin.RouterGroup)
func (*Server) SetupMixinEndpoints ¶
func (s *Server) SetupMixinEndpoints(group *gin.RouterGroup)
func (*Server) SetupOpenAIEndpoints ¶
func (s *Server) SetupOpenAIEndpoints(group *gin.RouterGroup)
func (*Server) SetupPassthroughAnthropicEndpoints ¶
func (s *Server) SetupPassthroughAnthropicEndpoints(group *gin.RouterGroup)
SetupPassthroughAnthropicEndpoints sets up pass-through endpoints for Anthropic-style requests These endpoints bypass request/response transformations and only replace the model name
func (*Server) SetupPassthroughOpenAIEndpoints ¶
func (s *Server) SetupPassthroughOpenAIEndpoints(group *gin.RouterGroup)
SetupPassthroughOpenAIEndpoints sets up pass-through endpoints for OpenAI-style requests These endpoints bypass request/response transformations and only replace the model name
func (*Server) StartServer ¶
func (*Server) StopServer ¶
func (*Server) ToggleProvider ¶
ToggleProvider enables/disables a provider
func (*Server) UpdateOAuthProvider ¶ added in v0.260124.900
UpdateOAuthProvider updates an OAuth provider configuration PUT /api/v1/oauth/providers/:type
func (*Server) UpdateProvider ¶
UpdateProvider updates an existing provider
func (*Server) UpdateProviderModelsByUUID ¶
func (*Server) UpdateRule ¶ added in v0.260124.900
UpdateRule creates or updates a rule
func (*Server) UseAIEndpoints ¶
func (s *Server) UseAIEndpoints()
func (*Server) UseIndexHTML ¶
func (*Server) UseLoadBalanceEndpoints ¶
func (s *Server) UseLoadBalanceEndpoints()
func (*Server) UseUIEndpoints ¶
func (s *Server) UseUIEndpoints()
Init sets up Server routes and templates on the main server engine
type ServerActionResponse ¶
type ServerActionResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message" example:"Server stopped successfully"`
}
ServerActionResponse represents the response for server actions (start/stop/restart)
type ServerOption ¶
type ServerOption func(*Server)
ServerOption defines a functional option for Server configuration
func WithAdaptor ¶
func WithAdaptor(enabled bool) ServerOption
WithAdaptor enables or disables the adaptor for the server
func WithDebug ¶
func WithDebug(enabled bool) ServerOption
WithDebug enables or disables debug mode for the server
func WithExperimentalFeatures ¶
func WithExperimentalFeatures(features map[string]bool) ServerOption
WithExperimentalFeatures sets the experimental features for the server
func WithHTTPSCertDir ¶
func WithHTTPSCertDir(certDir string) ServerOption
WithHTTPSCertDir sets the HTTPS certificate directory
func WithHTTPSEnabled ¶
func WithHTTPSEnabled(enabled bool) ServerOption
WithHTTPSEnabled enables or disables HTTPS
func WithHTTPSRegenerate ¶
func WithHTTPSRegenerate(regenerate bool) ServerOption
WithHTTPSRegenerate sets the HTTPS certificate regenerate flag
func WithHost ¶
func WithHost(host string) ServerOption
func WithOpenBrowser ¶
func WithOpenBrowser(enabled bool) ServerOption
WithOpenBrowser enables or disables automatic browser opening
func WithRecordDir ¶
func WithRecordDir(dir string) ServerOption
WithRecordDir sets the scenario-level record directory
func WithRecordMode ¶
func WithRecordMode(mode obs.RecordMode) ServerOption
WithRecordMode sets the record mode for request/response recording mode: empty string = disabled, "all" = record all, "response" = response only, "scenario" = record scenario only
func WithUI ¶
func WithUI(enabled bool) ServerOption
WithUI enables or disables the UI for the server
func WithVersion ¶
func WithVersion(version string) ServerOption
type ServiceHealthResponse ¶
type ServiceHealthResponse struct {
Rule string `json:"rule" example:"gpt-4"`
Health map[string]interface{} `json:"health"`
}
ServiceHealthResponse represents the health check response for services
type ServiceMetric ¶
type ServiceMetric struct {
ServiceID string `json:"service_id" example:"openai:gpt-4"`
RequestCount int64 `json:"request_count" example:"100"`
WindowRequestCount int64 `json:"window_request_count" example:"50"`
WindowTokensConsumed int64 `json:"window_tokens_consumed" example:"25000"`
WindowInputTokens int64 `json:"window_input_tokens" example:"15000"`
WindowOutputTokens int64 `json:"window_output_tokens" example:"10000"`
LastUsed string `json:"last_used" example:"2024-01-01T12:00:00Z"`
}
ServiceMetric represents a service metric entry
type ServiceStatsResponse ¶
type ServiceStatsResponse struct {
ServiceID string `json:"service_id" example:"openai:gpt-4"`
Stats map[string]interface{} `json:"stats,omitempty"`
}
ServiceStatsResponse represents the statistics response for a service
type SingleTemplateResponse ¶ added in v0.260124.900
type SingleTemplateResponse struct {
Success bool `json:"success"`
Data *data.ProviderTemplate `json:"data,omitempty"`
Message string `json:"message,omitempty"`
}
SingleTemplateResponse represents the response for a single templatwe
type SkillContentResponse ¶ added in v0.260204.1200
type SkillContentResponse struct {
Success bool `json:"success" example:"true"`
Data *typ.Skill `json:"data,omitempty"`
}
SkillContentResponse represents the response for getting skill content
type SkillLocationResponse ¶ added in v0.260204.1200
type SkillLocationResponse struct {
Success bool `json:"success" example:"true"`
Data *typ.SkillLocation `json:"data,omitempty"`
}
SkillLocationResponse represents the response for getting a skill location
type SkillLocationsResponse ¶ added in v0.260204.1200
type SkillLocationsResponse struct {
Success bool `json:"success" example:"true"`
Data []typ.SkillLocation `json:"data"`
}
SkillLocationsResponse represents the response for listing skill locations
type StatusResponse ¶
type StatusResponse struct {
Success bool `json:"success" example:"true"`
Data struct {
ServerRunning bool `json:"server_running" example:"true"`
Port int `json:"port" example:"12580"`
ProvidersTotal int `json:"providers_total" example:"3"`
ProvidersEnabled int `json:"providers_enabled" example:"2"`
RequestCount int `json:"request_count" example:"100"`
} `json:"data"`
}
StatusResponse represents the server status response
type StreamClosable ¶ added in v0.260124.900
type StreamClosable interface {
Close() error
}
StreamClosable is an interface for types that can be closed
type TemplateResponse ¶ added in v0.260124.900
type TemplateResponse struct {
Success bool `json:"success"`
Data map[string]*data.ProviderTemplate `json:"data,omitempty"`
Message string `json:"message,omitempty"`
Version string `json:"version,omitempty"`
}
TemplateResponse represents the response for provider template endpoints
type TimeSeriesData ¶ added in v0.260124.900
type TimeSeriesData struct {
Timestamp string `json:"timestamp" example:"2025-01-10T00:00:00Z"`
RequestCount int64 `json:"request_count" example:"245"`
TotalTokens int64 `json:"total_tokens" example:"52000"`
InputTokens int64 `json:"input_tokens" example:"32000"`
OutputTokens int64 `json:"output_tokens" example:"20000"`
ErrorCount int64 `json:"error_count" example:"0"`
AvgLatencyMs float64 `json:"avg_latency_ms" example:"1100"`
}
TimeSeriesData represents a single time bucket in time series data
type TimeSeriesMeta ¶ added in v0.260124.900
type TimeSeriesMeta struct {
Interval string `json:"interval" example:"hour"`
StartTime string `json:"start_time" example:"2025-01-10T00:00:00Z"`
EndTime string `json:"end_time" example:"2025-01-11T00:00:00Z"`
}
TimeSeriesMeta represents metadata for time-series response
type TimeSeriesQuery ¶ added in v0.260124.900
type TimeSeriesQuery struct {
Interval string `json:"interval" form:"interval" description:"Time bucket: minute, hour, day, week" example:"hour"`
StartTime string `json:"start_time" form:"start_time" description:"ISO 8601 start time" example:"2025-01-10T00:00:00Z"`
EndTime string `json:"end_time" form:"end_time" description:"ISO 8601 end time" example:"2025-01-11T00:00:00Z"`
Provider string `json:"provider" form:"provider" description:"Filter by provider UUID"`
Model string `json:"model" form:"model" description:"Filter by model name"`
Scenario string `json:"scenario" form:"scenario" description:"Filter by scenario"`
}
TimeSeriesQuery represents query parameters for time-series data
type TimeSeriesResponse ¶ added in v0.260124.900
type TimeSeriesResponse struct {
Meta TimeSeriesMeta `json:"meta"`
Data []TimeSeriesData `json:"data"`
}
TimeSeriesResponse represents the response for time-series data
type ToggleProviderResponse ¶
type ToggleProviderResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message" example:"Provider openai enabled successfully"`
Data struct {
Enabled bool `json:"enabled" example:"true"`
} `json:"data"`
}
ToggleProviderResponse represents the response for toggling a provider
type TokenInfo ¶
type TokenInfo struct {
Provider string `json:"provider" example:"anthropic"`
Valid bool `json:"valid" example:"true"`
ExpiresAt string `json:"expires_at,omitempty" example:"2024-01-01T12:00:00Z"`
}
TokenInfo represents OAuth token information
type TokenResponse ¶
type TokenResponse struct {
Token string `json:"token" example:"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..."`
Type string `json:"type" example:"Bearer"`
}
TokenResponse represents the token response
type UpdateProviderRequest ¶
type UpdateProviderRequest struct {
Name *string `json:"name,omitempty" description:"New provider name"`
APIBase *string `json:"api_base,omitempty" description:"New API base URL"`
APIStyle *string `json:"api_style,omitempty" description:"New API style"`
Token *string `json:"token,omitempty" description:"New API token"`
NoKeyRequired *bool `json:"no_key_required,omitempty" description:"Whether provider requires no API key"`
Enabled *bool `json:"enabled,omitempty" description:"New enabled status"`
ProxyURL *string `json:"proxy_url,omitempty" description:"HTTP or SOCKS proxy URL"`
}
UpdateProviderRequest represents the request to update a provider
type UpdateProviderResponse ¶
type UpdateProviderResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message" example:"Provider updated successfully"`
Data ProviderResponse `json:"data"`
}
UpdateProviderResponse represents the response for updating a provider
type UpdateRuleRequest ¶ added in v0.260124.900
UpdateRuleRequest represents the request to set/update a rule
type UpdateRuleResponse ¶ added in v0.260124.900
type UpdateRuleResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message" example:"Rule saved successfully"`
Data struct {
UUID string `json:"uuid"`
RequestModel string `json:"request_model" example:"gpt-3.5-turbo"`
ResponseModel string `json:"response_model" example:"gpt-3.5-turbo"`
Description string `json:"description" example:"My rule description"`
Provider string `json:"provider" example:"openai"`
DefaultModel string `json:"default_model" example:"gpt-3.5-turbo"`
Active bool `json:"active" example:"true"`
SmartEnabled bool `json:"smart_enabled" example:"false"`
SmartRouting []smartrouting.SmartRouting `json:"smart_routing,omitempty"`
} `json:"data"`
}
UpdateRuleResponse represents the response for setting/updating a rule
type UpdateRuleTacticRequest ¶
type UpdateRuleTacticRequest struct {
Tactic string `` /* 152-byte string literal not displayed */
}
UpdateRuleTacticRequest represents the request to update rule tactic
type UpdateRuleTacticResponse ¶
type UpdateRuleTacticResponse struct {
Message string `json:"message" example:"Tactic updated successfully"`
Tactic string `json:"tactic" example:"round_robin"`
}
UpdateRuleTacticResponse represents the response for updating rule tactic
type UsageAPI ¶ added in v0.260124.900
type UsageAPI struct {
// contains filtered or unexported fields
}
UsageAPI provides REST endpoints for usage statistics
func NewUsageAPI ¶ added in v0.260124.900
NewUsageAPI creates a new usage API
func (*UsageAPI) DeleteOldRecords ¶ added in v0.260124.900
DeleteOldRecords deletes usage records older than the specified date
func (*UsageAPI) GetRecords ¶ added in v0.260124.900
GetRecords returns individual usage records
func (*UsageAPI) GetTimeSeries ¶ added in v0.260124.900
GetTimeSeries returns time-series data for usage
type UsageRecordResponse ¶ added in v0.260124.900
type UsageRecordResponse struct {
ID uint `json:"id" example:"1"`
ProviderUUID string `json:"provider_uuid" example:"uuid-123"`
ProviderName string `json:"provider_name" example:"openai"`
Model string `json:"model" example:"gpt-4"`
Scenario string `json:"scenario" example:"openai"`
RuleUUID string `json:"rule_uuid,omitempty" example:"rule-uuid"`
RequestModel string `json:"request_model,omitempty" example:"gpt-4"`
Timestamp string `json:"timestamp" example:"2025-01-10T12:00:00Z"`
InputTokens int `json:"input_tokens" example:"1000"`
OutputTokens int `json:"output_tokens" example:"500"`
TotalTokens int `json:"total_tokens" example:"1500"`
Status string `json:"status" example:"success"`
ErrorCode string `json:"error_code,omitempty"`
LatencyMs int `json:"latency_ms" example:"1200"`
Streamed bool `json:"streamed" example:"true"`
}
UsageRecordResponse represents a single usage record
type UsageRecordsMeta ¶ added in v0.260124.900
type UsageRecordsMeta struct {
Total int `json:"total" example:"1000"`
Limit int `json:"limit" example:"50"`
Offset int `json:"offset" example:"0"`
}
UsageRecordsMeta represents metadata for usage records response
type UsageRecordsQuery ¶ added in v0.260124.900
type UsageRecordsQuery struct {
StartTime string `json:"start_time" form:"start_time" description:"ISO 8601 start time" example:"2025-01-10T00:00:00Z"`
EndTime string `json:"end_time" form:"end_time" description:"ISO 8601 end time" example:"2025-01-11T00:00:00Z"`
Provider string `json:"provider" form:"provider" description:"Filter by provider UUID"`
Model string `json:"model" form:"model" description:"Filter by model name"`
Scenario string `json:"scenario" form:"scenario" description:"Filter by scenario"`
Status string `json:"status" form:"status" description:"Filter by status"`
Limit int `json:"limit" form:"limit" description:"Max results (max 1000)" example:"50"`
Offset int `json:"offset" form:"offset" description:"Pagination offset" example:"0"`
}
UsageRecordsQuery represents query parameters for usage records
type UsageRecordsResponse ¶ added in v0.260124.900
type UsageRecordsResponse struct {
Meta UsageRecordsMeta `json:"meta"`
Data []UsageRecordResponse `json:"data"`
}
UsageRecordsResponse represents the response for usage records
type UsageStatsMeta ¶ added in v0.260124.900
type UsageStatsMeta struct {
StartTime string `json:"start_time" example:"2025-01-10T00:00:00Z"`
EndTime string `json:"end_time" example:"2025-01-11T00:00:00Z"`
GroupBy string `json:"group_by" example:"model"`
TotalCount int `json:"total_count" example:"10"`
}
UsageStatsMeta represents metadata for usage statistics response
type UsageStatsQuery ¶ added in v0.260124.900
type UsageStatsQuery struct {
GroupBy string `` /* 127-byte string literal not displayed */
StartTime string `json:"start_time" form:"start_time" description:"ISO 8601 start time" example:"2025-01-10T00:00:00Z"`
EndTime string `json:"end_time" form:"end_time" description:"ISO 8601 end time" example:"2025-01-11T00:00:00Z"`
Provider string `json:"provider" form:"provider" description:"Filter by provider UUID"`
Model string `json:"model" form:"model" description:"Filter by model name"`
Scenario string `json:"scenario" form:"scenario" description:"Filter by scenario"`
RuleUUID string `json:"rule_uuid" form:"rule_uuid" description:"Filter by rule UUID"`
Status string `json:"status" form:"status" description:"Filter by status: success, error, partial" example:"success"`
Limit int `json:"limit" form:"limit" description:"Max results to return" example:"100"`
SortBy string `json:"sort_by" form:"sort_by" description:"Sort field: total_tokens, request_count, avg_latency" example:"total_tokens"`
SortOrder string `json:"sort_order" form:"sort_order" description:"asc or desc" example:"desc"`
}
UsageStatsQuery represents query parameters for usage statistics
type UsageStatsResponse ¶ added in v0.260124.900
type UsageStatsResponse struct {
Meta UsageStatsMeta `json:"meta"`
Data []AggregatedStat `json:"data"`
}
UsageStatsResponse represents the response for usage statistics
type UsageTracker ¶
type UsageTracker struct {
// contains filtered or unexported fields
}
UsageTracker provides usage tracking methods for handlers. It encapsulates the logic for recording token usage to both service stats and detailed usage records.
func (*UsageTracker) RecordUsage ¶
func (t *UsageTracker) RecordUsage( c *gin.Context, rule *typ.Rule, provider *typ.Provider, model, requestModel string, inputTokens, outputTokens int, streamed bool, status, errorCode string, )
RecordUsage records token usage from a handler. It updates both the service-level stats and the detailed usage records.
Parameters:
- c: Gin context for accessing request metadata
- rule: The load balancer rule that was used
- provider: The provider that handled the request
- model: The actual model name used (not the requested model)
- requestModel: The original model name requested by the user
- inputTokens: Number of input/prompt tokens consumed
- outputTokens: Number of output/completion tokens consumed
- streamed: Whether this was a streaming request
- status: Request status - "success", "error", or "partial"
- errorCode: Error code if status is not "success"
type VersionChecker ¶
type VersionChecker struct {
// contains filtered or unexported fields
}
VersionChecker handles version-related operations
func (*VersionChecker) CheckLatestVersion ¶
func (vc *VersionChecker) CheckLatestVersion() (version, releaseURL string, err error)
CheckLatestVersion checks for the latest version with multiple fallbacks: 1. GitHub releases API (preferred) 2. npm registry (fallback) 3. npmmirror (China mirror, last resort)
type VersionInfo ¶
type VersionInfo struct {
Version string `json:"version" example:"1.0.0"`
}
type VersionInfoResponse ¶
type VersionInfoResponse struct {
Success bool `json:"success" example:"true"`
Message string `json:"message" example:"Provider models successfully"`
Data VersionInfo `json:"data"`
}
Source Files
¶
- adaptive_probe.go
- anthropic.go
- anthropic_error.go
- anthropic_token.go
- anthropic_v1.go
- anthropic_v1_beta.go
- codex_responses.go
- config_apply_handler.go
- config_apply_routes.go
- experimental.go
- handlers.go
- info_handler.go
- load_balancer.go
- load_balancer_api.go
- log_handler.go
- oauth_handler.go
- openai.go
- openai_responses.go
- openai_responses_models.go
- passthrough.go
- probe_cache.go
- probe_handler.go
- provider_handler.go
- rule_handler.go
- scenario_handler.go
- scenario_recording.go
- server.go
- server_models.go
- skill_handler.go
- smart_routing_helper.go
- template_handler.go
- tracking.go
- usage_api.go
- version_check.go
- webui.go