models

package
v1.2.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 18, 2026 License: MIT Imports: 12 Imported by: 0

Documentation

Index

Constants

View Source
const (
	ProviderAnthropic ModelProvider = "anthropic"

	// Models
	Claude35Sonnet   ModelID = "claude-3.5-sonnet"
	Claude3Haiku     ModelID = "claude-3-haiku"
	Claude37Sonnet   ModelID = "claude-3.7-sonnet"
	Claude35Haiku    ModelID = "claude-3.5-haiku"
	Claude3Opus      ModelID = "claude-3-opus"
	Claude4Opus      ModelID = "claude-4-opus"
	Claude4Sonnet    ModelID = "claude-4-sonnet"
	Claude45Sonnet1M ModelID = "claude-4-5-sonnet[1m]"
	Claude45Opus     ModelID = "claude-4.5-opus"
	Claude46Opus     ModelID = "claude-4.6-opus"
)
View Source
const (
	ProviderDeepSeek ModelProvider = "deepseek"

	DeepSeekChat     ModelID = "deepseek-chat"
	DeepSeekReasoner ModelID = "deepseek-reasoner"
)
View Source
const (
	ProviderGemini ModelProvider = "gemini"

	// Models
	Gemini20Flash     ModelID = "gemini-2.0-flash"
	Gemini20FlashLite ModelID = "gemini-2.0-flash-lite"
	Gemini25Flash     ModelID = "gemini-2.5-flash"
	Gemini25          ModelID = "gemini-2.5"
	Gemini30Pro       ModelID = "gemini-3.0-pro"
	Gemini30Flash     ModelID = "gemini-3.0-flash"
)
View Source
const (
	ProviderGrok ModelProvider = "groq"

	// GROQ
	QWENQwq ModelID = "qwen-qwq"

	// GROQ preview models
	Llama4Scout               ModelID = "meta-llama/llama-4-scout-17b-16e-instruct"
	Llama4Maverick            ModelID = "meta-llama/llama-4-maverick-17b-128e-instruct"
	Llama3_3_70BVersatile     ModelID = "llama-3.3-70b-versatile"
	DeepseekR1DistillLlama70b ModelID = "deepseek-r1-distill-llama-70b"
	Kimi_K2_0905              ModelID = "moonshotai/kimi-k2-instruct-0905"
)
View Source
const (
	// Provider Constants
	ProviderBedrock ModelProvider = "bedrock"

	// ForTests
	ProviderMock          ModelProvider = "__mock"
	BedrockClaude45Sonnet ModelID       = "bedrock.claude-4.5-sonnet"
)
View Source
const (
	ProviderOpenAI ModelProvider = "openai"

	GPT41        ModelID = "gpt-4.1"
	GPT41Mini    ModelID = "gpt-4.1-mini"
	GPT41Nano    ModelID = "gpt-4.1-nano"
	GPT45Preview ModelID = "gpt-4.5-preview"
	GPT4o        ModelID = "gpt-4o"
	GPT4oMini    ModelID = "gpt-4o-mini"
	O1           ModelID = "o1"
	O1Pro        ModelID = "o1-pro"
	O1Mini       ModelID = "o1-mini"
	O3           ModelID = "o3"
	O3Mini       ModelID = "o3-mini"
	O4Mini       ModelID = "o4-mini"
	GPT5         ModelID = "gpt-5"
)
View Source
const (
	ProviderOpenRouter ModelProvider = "openrouter"

	OpenRouterFree           ModelID = "openrouter.free"
	OpenRouterGPT41          ModelID = "openrouter.gpt-4.1"
	OpenRouterGPT41Mini      ModelID = "openrouter.gpt-4.1-mini"
	OpenRouterGPT41Nano      ModelID = "openrouter.gpt-4.1-nano"
	OpenRouterGPT45Preview   ModelID = "openrouter.gpt-4.5-preview"
	OpenRouterGPT4o          ModelID = "openrouter.gpt-4o"
	OpenRouterGPT4oMini      ModelID = "openrouter.gpt-4o-mini"
	OpenRouterO1             ModelID = "openrouter.o1"
	OpenRouterO1Pro          ModelID = "openrouter.o1-pro"
	OpenRouterO1Mini         ModelID = "openrouter.o1-mini"
	OpenRouterO3             ModelID = "openrouter.o3"
	OpenRouterO3Mini         ModelID = "openrouter.o3-mini"
	OpenRouterO4Mini         ModelID = "openrouter.o4-mini"
	OpenRouterGemini25Flash  ModelID = "openrouter.gemini-2.5-flash"
	OpenRouterGemini25       ModelID = "openrouter.gemini-2.5"
	OpenRouterGemini3Flash   ModelID = "openrouter.gemini-3-flash-preview"
	OpenRouterGemini3        ModelID = "openrouter.gemini-3-pro-preview"
	OpenRouterClaude35Sonnet ModelID = "openrouter.claude-3.5-sonnet"
	OpenRouterClaude3Haiku   ModelID = "openrouter.claude-3-haiku"
	OpenRouterClaude37Sonnet ModelID = "openrouter.claude-3.7-sonnet"
	OpenRouterClaude35Haiku  ModelID = "openrouter.claude-3.5-haiku"
	OpenRouterClaude3Opus    ModelID = "openrouter.claude-3-opus"
	OpenRouterKimiK2         ModelID = "openrouter.kimi-k2"
	OpenRouterNemotron3Nano  ModelID = "openrouter.nemotron-3-nano"
	OpenRouterGLM47Flash     ModelID = "openrouter.glm-4.7-flash"
	OpenRouterGPT52          ModelID = "openrouter.gpt-5.2"
	OpenRouterGPT52Codex     ModelID = "openrouter.gpt-5.2-codex"
	OpenRouterDeepSeekR1Free ModelID = "openrouter.deepseek-r1-free"
	OpenRouterDeepSeekV32    ModelID = "openrouter.deepseek-v3.2"
	OpenRouterDevstral2      ModelID = "openrouter.devstral-2"
	OpenRouterMiMoV2         ModelID = "openrouter.mimo-v2"
	OpenRouterMiMoV2Flash    ModelID = "openrouter.mimo-v2-flash"
	OpenRouterGrok4Fast      ModelID = "openrouter.grok-4-fast"
	OpenRouterGrok4FastFree  ModelID = "openrouter.grok-4-fast:free"
	OpenRouterGrok41Fast     ModelID = "openrouter.grok-4.1-fast"
	OpenRouterMiniMax01      ModelID = "openrouter.minimax-01"
	OpenRouterMiniMaxM1      ModelID = "openrouter.minimax-m1"
	OpenRouterMiniMaxM2      ModelID = "openrouter.minimax-m2"
	OpenRouterMiniMaxM21     ModelID = "openrouter.minimax-m2.1"
	OpenRouterMiniMaxM25     ModelID = "openrouter.minimax-m2.5"
	OpenRouterTrinityLarge   ModelID = "openrouter.trinity-large-preview:free"
)
View Source
const (
	ProviderVertexAI ModelProvider = "vertexai"

	// Models
	VertexAIGemini30Flash ModelID = "vertexai.gemini-3.0-flash"
	VertexAIGemini30Pro   ModelID = "vertexai.gemini-3.0-pro"
	VertexAISonnet45M     ModelID = "vertexai.claude-sonnet-4-5-m"
	VertexAIOpus45        ModelID = "vertexai.claude-opus-4-5"
	VertexAIOpus46        ModelID = "vertexai.claude-opus-4-6"
)
View Source
const (
	ProviderXAI ModelProvider = "xai"

	XAIGrok41FastReasoning    ModelID = "grok-4-1-fast-reasoning"
	XAIGrok41FastNonReasoning ModelID = "grok-4-1-fast-non-reasoning"
	XAIGrokCodeFast1          ModelID = "grok-code-fast-1"
	XAIGrok4FastReasoning     ModelID = "grok-4-fast-reasoning"
	XAIGrok4FastNonReasoning  ModelID = "grok-4-fast-non-reasoning"
	XAIGrok40709              ModelID = "grok-4-0709"
)

Variables

View Source
var AnthropicModels = map[ModelID]Model{
	Claude35Sonnet: {
		ID:                  Claude35Sonnet,
		Name:                "Claude 3.5 Sonnet",
		Provider:            ProviderAnthropic,
		APIModel:            "claude-3-5-sonnet-latest",
		CostPer1MIn:         3.0,
		CostPer1MInCached:   3.75,
		CostPer1MOutCached:  0.30,
		CostPer1MOut:        15.0,
		ContextWindow:       200000,
		DefaultMaxTokens:    5000,
		SupportsAttachments: true,
	},
	Claude3Haiku: {
		ID:                  Claude3Haiku,
		Name:                "Claude 3 Haiku",
		Provider:            ProviderAnthropic,
		APIModel:            "claude-3-haiku-20240307",
		CostPer1MIn:         0.25,
		CostPer1MInCached:   0.30,
		CostPer1MOutCached:  0.03,
		CostPer1MOut:        1.25,
		ContextWindow:       200000,
		DefaultMaxTokens:    4096,
		SupportsAttachments: true,
	},
	Claude37Sonnet: {
		ID:                  Claude37Sonnet,
		Name:                "Claude 3.7 Sonnet",
		Provider:            ProviderAnthropic,
		APIModel:            "claude-3-7-sonnet-latest",
		CostPer1MIn:         3.0,
		CostPer1MInCached:   3.75,
		CostPer1MOutCached:  0.30,
		CostPer1MOut:        15.0,
		ContextWindow:       200000,
		DefaultMaxTokens:    50000,
		CanReason:           true,
		SupportsAttachments: true,
	},
	Claude35Haiku: {
		ID:                  Claude35Haiku,
		Name:                "Claude 3.5 Haiku",
		Provider:            ProviderAnthropic,
		APIModel:            "claude-3-5-haiku-latest",
		CostPer1MIn:         0.80,
		CostPer1MInCached:   1.0,
		CostPer1MOutCached:  0.08,
		CostPer1MOut:        4.0,
		ContextWindow:       200000,
		DefaultMaxTokens:    4096,
		SupportsAttachments: true,
	},
	Claude3Opus: {
		ID:                  Claude3Opus,
		Name:                "Claude 3 Opus",
		Provider:            ProviderAnthropic,
		APIModel:            "claude-3-opus-latest",
		CostPer1MIn:         15.0,
		CostPer1MInCached:   18.75,
		CostPer1MOutCached:  1.50,
		CostPer1MOut:        75.0,
		ContextWindow:       200000,
		DefaultMaxTokens:    4096,
		SupportsAttachments: true,
	},
	Claude4Sonnet: {
		ID:                  Claude4Sonnet,
		Name:                "Claude 4 Sonnet",
		Provider:            ProviderAnthropic,
		APIModel:            "claude-sonnet-4-20250514",
		CostPer1MIn:         3.0,
		CostPer1MInCached:   3.75,
		CostPer1MOutCached:  0.30,
		CostPer1MOut:        15.0,
		ContextWindow:       200000,
		DefaultMaxTokens:    50000,
		CanReason:           true,
		SupportsAttachments: true,
	},
	Claude4Opus: {
		ID:                  Claude4Opus,
		Name:                "Claude 4 Opus",
		Provider:            ProviderAnthropic,
		APIModel:            "claude-opus-4-20250514",
		CostPer1MIn:         15.0,
		CostPer1MInCached:   18.75,
		CostPer1MOutCached:  1.50,
		CostPer1MOut:        75.0,
		ContextWindow:       200000,
		DefaultMaxTokens:    4096,
		SupportsAttachments: true,
	},
	Claude45Sonnet1M: {
		ID:                  Claude45Sonnet1M,
		Name:                "Claude 4.5 Sonnet",
		Provider:            ProviderAnthropic,
		APIModel:            "claude-sonnet-4-5",
		CostPer1MIn:         3.0,
		CostPer1MInCached:   3.75,
		CostPer1MOutCached:  0.30,
		CostPer1MOut:        15.0,
		ContextWindow:       1000000,
		DefaultMaxTokens:    64000,
		CanReason:           true,
		SupportsAttachments: true,
	},
	Claude45Opus: {
		ID:                  Claude45Opus,
		Name:                "Claude 4.5 Opus",
		Provider:            ProviderAnthropic,
		APIModel:            "claude-opus-4-5-20251101",
		CostPer1MIn:         5.0,
		CostPer1MInCached:   6.75,
		CostPer1MOutCached:  0.50,
		CostPer1MOut:        25.0,
		ContextWindow:       200000,
		DefaultMaxTokens:    32000,
		CanReason:           true,
		SupportsAttachments: true,
	},
	Claude46Opus: {
		ID:                       Claude46Opus,
		Name:                     "Claude 4.6 Opus",
		Provider:                 ProviderAnthropic,
		APIModel:                 "claude-opus-4-6",
		CostPer1MIn:              5.0,
		CostPer1MInCached:        6.25,
		CostPer1MOutCached:       0.50,
		CostPer1MOut:             25.0,
		ContextWindow:            1000000,
		DefaultMaxTokens:         128000,
		CanReason:                true,
		SupportsAdaptiveThinking: true,
		SupportsMaximumThinking:  true,
		SupportsAttachments:      true,
	},
}

https://docs.anthropic.com/en/docs/about-claude/models/all-models

View Source
var DeepSeekModels = map[ModelID]Model{
	DeepSeekChat: {
		ID:                  DeepSeekChat,
		Name:                "DeepSeek Chat",
		Provider:            ProviderDeepSeek,
		APIModel:            "deepseek-chat",
		CostPer1MIn:         0.28,
		CostPer1MInCached:   0.028,
		CostPer1MOut:        0.42,
		ContextWindow:       131_072,
		DefaultMaxTokens:    8192,
		SupportsAttachments: true,
	},
	DeepSeekReasoner: {
		ID:                  DeepSeekReasoner,
		Name:                "DeepSeek Reasoner",
		Provider:            ProviderDeepSeek,
		APIModel:            "deepseek-reasoner",
		CostPer1MIn:         0.28,
		CostPer1MInCached:   0.028,
		CostPer1MOut:        0.42,
		ContextWindow:       131072,
		DefaultMaxTokens:    65536,
		CanReason:           true,
		SupportsAttachments: true,
	},
}
View Source
var GeminiModels = map[ModelID]Model{
	Gemini20Flash: {
		ID:                  Gemini20Flash,
		Name:                "Gemini 2.0 Flash",
		Provider:            ProviderGemini,
		APIModel:            "gemini-2.0-flash",
		CostPer1MIn:         0.10,
		CostPer1MInCached:   0,
		CostPer1MOutCached:  0,
		CostPer1MOut:        0.40,
		ContextWindow:       1000000,
		DefaultMaxTokens:    6000,
		SupportsAttachments: true,
	},
	Gemini20FlashLite: {
		ID:                  Gemini20FlashLite,
		Name:                "Gemini 2.0 Flash Lite",
		Provider:            ProviderGemini,
		APIModel:            "gemini-2.0-flash-lite",
		CostPer1MIn:         0.05,
		CostPer1MInCached:   0,
		CostPer1MOutCached:  0,
		CostPer1MOut:        0.30,
		ContextWindow:       1000000,
		DefaultMaxTokens:    6000,
		SupportsAttachments: true,
	},
	Gemini25Flash: {
		ID:                  Gemini25Flash,
		Name:                "Gemini 2.5 Flash",
		Provider:            ProviderGemini,
		APIModel:            "gemini-2.5-flash-preview-04-17",
		CostPer1MIn:         0.15,
		CostPer1MInCached:   0,
		CostPer1MOutCached:  0,
		CostPer1MOut:        0.60,
		ContextWindow:       1000000,
		DefaultMaxTokens:    50000,
		SupportsAttachments: true,
	},
	Gemini25: {
		ID:                  Gemini25,
		Name:                "Gemini 2.5 Pro",
		Provider:            ProviderGemini,
		APIModel:            "gemini-2.5-pro-preview-05-06",
		CostPer1MIn:         1.25,
		CostPer1MInCached:   0,
		CostPer1MOutCached:  0,
		CostPer1MOut:        10,
		ContextWindow:       1000000,
		DefaultMaxTokens:    50000,
		SupportsAttachments: true,
	},
	Gemini30Pro: {
		ID:                       Gemini30Pro,
		Name:                     "Gemini 3.0 Pro",
		Provider:                 ProviderGemini,
		APIModel:                 "gemini-3-pro-preview",
		CostPer1MIn:              2,
		CostPer1MInCached:        0.2,
		CostPer1MOutCached:       0.3833,
		CostPer1MOut:             12,
		ContextWindow:            1048576,
		DefaultMaxTokens:         65535,
		SupportsAttachments:      true,
		SupportsAdaptiveThinking: true,
		CanReason:                true,
	},
	Gemini30Flash: {
		ID:                       Gemini30Flash,
		Name:                     "Gemini 3.0 Flash",
		Provider:                 ProviderGemini,
		APIModel:                 "gemini-3-flash-preview",
		CostPer1MIn:              0.5,
		CostPer1MInCached:        0.05,
		CostPer1MOutCached:       0.3833,
		CostPer1MOut:             3,
		ContextWindow:            1048576,
		DefaultMaxTokens:         65535,
		SupportsAttachments:      true,
		SupportsAdaptiveThinking: true,
		CanReason:                true,
	},
}
View Source
var GroqModels = map[ModelID]Model{

	QWENQwq: {
		ID:                 QWENQwq,
		Name:               "Qwen Qwq",
		Provider:           ProviderGrok,
		APIModel:           "qwen-qwq-32b",
		CostPer1MIn:        0.29,
		CostPer1MInCached:  0.275,
		CostPer1MOutCached: 0.0,
		CostPer1MOut:       0.39,
		ContextWindow:      128_000,
		DefaultMaxTokens:   50000,

		CanReason:           false,
		SupportsAttachments: false,
	},

	Llama4Scout: {
		ID:                  Llama4Scout,
		Name:                "Llama4Scout",
		Provider:            ProviderGrok,
		APIModel:            "meta-llama/llama-4-scout-17b-16e-instruct",
		CostPer1MIn:         0.11,
		CostPer1MInCached:   0,
		CostPer1MOutCached:  0,
		CostPer1MOut:        0.34,
		ContextWindow:       128_000,
		SupportsAttachments: true,
	},

	Llama4Maverick: {
		ID:                  Llama4Maverick,
		Name:                "Llama4Maverick",
		Provider:            ProviderGrok,
		APIModel:            "meta-llama/llama-4-maverick-17b-128e-instruct",
		CostPer1MIn:         0.20,
		CostPer1MInCached:   0,
		CostPer1MOutCached:  0,
		CostPer1MOut:        0.20,
		ContextWindow:       128_000,
		SupportsAttachments: true,
	},

	Llama3_3_70BVersatile: {
		ID:                  Llama3_3_70BVersatile,
		Name:                "Llama3_3_70BVersatile",
		Provider:            ProviderGrok,
		APIModel:            "llama-3.3-70b-versatile",
		CostPer1MIn:         0.59,
		CostPer1MInCached:   0,
		CostPer1MOutCached:  0,
		CostPer1MOut:        0.79,
		ContextWindow:       128_000,
		SupportsAttachments: false,
	},

	DeepseekR1DistillLlama70b: {
		ID:                  DeepseekR1DistillLlama70b,
		Name:                "DeepseekR1DistillLlama70b",
		Provider:            ProviderGrok,
		APIModel:            "deepseek-r1-distill-llama-70b",
		CostPer1MIn:         0.75,
		CostPer1MInCached:   0,
		CostPer1MOutCached:  0,
		CostPer1MOut:        0.99,
		ContextWindow:       128_000,
		CanReason:           true,
		SupportsAttachments: false,
	},
	Kimi_K2_0905: {
		ID:                  Kimi_K2_0905,
		Name:                "Kimi K2 0905",
		Provider:            ProviderGrok,
		APIModel:            "moonshotai/kimi-k2-instruct-0905",
		CostPer1MIn:         1,
		CostPer1MInCached:   0,
		CostPer1MOutCached:  0,
		CostPer1MOut:        3,
		ContextWindow:       131_072,
		DefaultMaxTokens:    16_384,
		SupportsAttachments: true,
	},
}
View Source
var OpenAIModels = map[ModelID]Model{
	GPT41: {
		ID:                  GPT41,
		Name:                "GPT 4.1",
		Provider:            ProviderOpenAI,
		APIModel:            "gpt-4.1",
		CostPer1MIn:         2.00,
		CostPer1MInCached:   0.50,
		CostPer1MOutCached:  0.0,
		CostPer1MOut:        8.00,
		ContextWindow:       1_047_576,
		DefaultMaxTokens:    20000,
		SupportsAttachments: true,
	},
	GPT41Mini: {
		ID:                  GPT41Mini,
		Name:                "GPT 4.1 mini",
		Provider:            ProviderOpenAI,
		APIModel:            "gpt-4.1",
		CostPer1MIn:         0.40,
		CostPer1MInCached:   0.10,
		CostPer1MOutCached:  0.0,
		CostPer1MOut:        1.60,
		ContextWindow:       200_000,
		DefaultMaxTokens:    20000,
		SupportsAttachments: true,
	},
	GPT41Nano: {
		ID:                  GPT41Nano,
		Name:                "GPT 4.1 nano",
		Provider:            ProviderOpenAI,
		APIModel:            "gpt-4.1-nano",
		CostPer1MIn:         0.10,
		CostPer1MInCached:   0.025,
		CostPer1MOutCached:  0.0,
		CostPer1MOut:        0.40,
		ContextWindow:       1_047_576,
		DefaultMaxTokens:    20000,
		SupportsAttachments: true,
	},
	GPT45Preview: {
		ID:                  GPT45Preview,
		Name:                "GPT 4.5 preview",
		Provider:            ProviderOpenAI,
		APIModel:            "gpt-4.5-preview",
		CostPer1MIn:         75.00,
		CostPer1MInCached:   37.50,
		CostPer1MOutCached:  0.0,
		CostPer1MOut:        150.00,
		ContextWindow:       128_000,
		DefaultMaxTokens:    15000,
		SupportsAttachments: true,
	},
	GPT4o: {
		ID:                  GPT4o,
		Name:                "GPT 4o",
		Provider:            ProviderOpenAI,
		APIModel:            "gpt-4o",
		CostPer1MIn:         2.50,
		CostPer1MInCached:   1.25,
		CostPer1MOutCached:  0.0,
		CostPer1MOut:        10.00,
		ContextWindow:       128_000,
		DefaultMaxTokens:    4096,
		SupportsAttachments: true,
	},
	GPT4oMini: {
		ID:                  GPT4oMini,
		Name:                "GPT 4o mini",
		Provider:            ProviderOpenAI,
		APIModel:            "gpt-4o-mini",
		CostPer1MIn:         0.15,
		CostPer1MInCached:   0.075,
		CostPer1MOutCached:  0.0,
		CostPer1MOut:        0.60,
		ContextWindow:       128_000,
		SupportsAttachments: true,
	},
	O1: {
		ID:                  O1,
		Name:                "O1",
		Provider:            ProviderOpenAI,
		APIModel:            "o1",
		CostPer1MIn:         15.00,
		CostPer1MInCached:   7.50,
		CostPer1MOutCached:  0.0,
		CostPer1MOut:        60.00,
		ContextWindow:       200_000,
		DefaultMaxTokens:    50000,
		CanReason:           true,
		SupportsAttachments: true,
	},
	O1Pro: {
		ID:                  O1Pro,
		Name:                "o1 pro",
		Provider:            ProviderOpenAI,
		APIModel:            "o1-pro",
		CostPer1MIn:         150.00,
		CostPer1MInCached:   0.0,
		CostPer1MOutCached:  0.0,
		CostPer1MOut:        600.00,
		ContextWindow:       200_000,
		DefaultMaxTokens:    50000,
		CanReason:           true,
		SupportsAttachments: true,
	},
	O1Mini: {
		ID:                  O1Mini,
		Name:                "o1 mini",
		Provider:            ProviderOpenAI,
		APIModel:            "o1-mini",
		CostPer1MIn:         1.10,
		CostPer1MInCached:   0.55,
		CostPer1MOutCached:  0.0,
		CostPer1MOut:        4.40,
		ContextWindow:       128_000,
		DefaultMaxTokens:    50000,
		CanReason:           true,
		SupportsAttachments: true,
	},
	O3: {
		ID:                  O3,
		Name:                "o3",
		Provider:            ProviderOpenAI,
		APIModel:            "o3",
		CostPer1MIn:         10.00,
		CostPer1MInCached:   2.50,
		CostPer1MOutCached:  0.0,
		CostPer1MOut:        40.00,
		ContextWindow:       200_000,
		CanReason:           true,
		SupportsAttachments: true,
	},
	O3Mini: {
		ID:                  O3Mini,
		Name:                "o3 mini",
		Provider:            ProviderOpenAI,
		APIModel:            "o3-mini",
		CostPer1MIn:         1.10,
		CostPer1MInCached:   0.55,
		CostPer1MOutCached:  0.0,
		CostPer1MOut:        4.40,
		ContextWindow:       200_000,
		DefaultMaxTokens:    50000,
		CanReason:           true,
		SupportsAttachments: false,
	},
	O4Mini: {
		ID:                  O4Mini,
		Name:                "o4 mini",
		Provider:            ProviderOpenAI,
		APIModel:            "o4-mini",
		CostPer1MIn:         1.10,
		CostPer1MInCached:   0.275,
		CostPer1MOutCached:  0.0,
		CostPer1MOut:        4.40,
		ContextWindow:       128_000,
		DefaultMaxTokens:    50000,
		CanReason:           true,
		SupportsAttachments: true,
	},
	GPT5: {
		ID:                  GPT5,
		Name:                "GPT 5",
		Provider:            ProviderOpenAI,
		APIModel:            "gpt-5",
		CostPer1MIn:         1.25,
		CostPer1MInCached:   0.125,
		CostPer1MOutCached:  0.0,
		CostPer1MOut:        10,
		ContextWindow:       400_000,
		DefaultMaxTokens:    128_000,
		CanReason:           true,
		SupportsAttachments: true,
	},
}
View Source
var OpenRouterModels = map[ModelID]Model{
	OpenRouterFree: {
		ID:                  OpenRouterFree,
		Name:                "OpenRouter - Free Models Router",
		Provider:            ProviderOpenRouter,
		APIModel:            "openrouter/free",
		CostPer1MIn:         0,
		CostPer1MInCached:   0,
		CostPer1MOut:        0,
		CostPer1MOutCached:  0,
		ContextWindow:       200_000,
		DefaultMaxTokens:    50000,
		SupportsAttachments: true,
		CanReason:           true,
	},
	OpenRouterGPT41: {
		ID:                 OpenRouterGPT41,
		Name:               "OpenRouter - GPT 4.1",
		Provider:           ProviderOpenRouter,
		APIModel:           "openai/gpt-4.1",
		CostPer1MIn:        OpenAIModels[GPT41].CostPer1MIn,
		CostPer1MInCached:  OpenAIModels[GPT41].CostPer1MInCached,
		CostPer1MOut:       OpenAIModels[GPT41].CostPer1MOut,
		CostPer1MOutCached: OpenAIModels[GPT41].CostPer1MOutCached,
		ContextWindow:      OpenAIModels[GPT41].ContextWindow,
		DefaultMaxTokens:   OpenAIModels[GPT41].DefaultMaxTokens,
	},
	OpenRouterGPT41Mini: {
		ID:                 OpenRouterGPT41Mini,
		Name:               "OpenRouter - GPT 4.1 mini",
		Provider:           ProviderOpenRouter,
		APIModel:           "openai/gpt-4.1-mini",
		CostPer1MIn:        OpenAIModels[GPT41Mini].CostPer1MIn,
		CostPer1MInCached:  OpenAIModels[GPT41Mini].CostPer1MInCached,
		CostPer1MOut:       OpenAIModels[GPT41Mini].CostPer1MOut,
		CostPer1MOutCached: OpenAIModels[GPT41Mini].CostPer1MOutCached,
		ContextWindow:      OpenAIModels[GPT41Mini].ContextWindow,
		DefaultMaxTokens:   OpenAIModels[GPT41Mini].DefaultMaxTokens,
	},
	OpenRouterGPT41Nano: {
		ID:                 OpenRouterGPT41Nano,
		Name:               "OpenRouter - GPT 4.1 nano",
		Provider:           ProviderOpenRouter,
		APIModel:           "openai/gpt-4.1-nano",
		CostPer1MIn:        OpenAIModels[GPT41Nano].CostPer1MIn,
		CostPer1MInCached:  OpenAIModels[GPT41Nano].CostPer1MInCached,
		CostPer1MOut:       OpenAIModels[GPT41Nano].CostPer1MOut,
		CostPer1MOutCached: OpenAIModels[GPT41Nano].CostPer1MOutCached,
		ContextWindow:      OpenAIModels[GPT41Nano].ContextWindow,
		DefaultMaxTokens:   OpenAIModels[GPT41Nano].DefaultMaxTokens,
	},
	OpenRouterGPT45Preview: {
		ID:                 OpenRouterGPT45Preview,
		Name:               "OpenRouter - GPT 4.5 preview",
		Provider:           ProviderOpenRouter,
		APIModel:           "openai/gpt-4.5-preview",
		CostPer1MIn:        OpenAIModels[GPT45Preview].CostPer1MIn,
		CostPer1MInCached:  OpenAIModels[GPT45Preview].CostPer1MInCached,
		CostPer1MOut:       OpenAIModels[GPT45Preview].CostPer1MOut,
		CostPer1MOutCached: OpenAIModels[GPT45Preview].CostPer1MOutCached,
		ContextWindow:      OpenAIModels[GPT45Preview].ContextWindow,
		DefaultMaxTokens:   OpenAIModels[GPT45Preview].DefaultMaxTokens,
	},
	OpenRouterGPT4o: {
		ID:                 OpenRouterGPT4o,
		Name:               "OpenRouter - GPT 4o",
		Provider:           ProviderOpenRouter,
		APIModel:           "openai/gpt-4o",
		CostPer1MIn:        OpenAIModels[GPT4o].CostPer1MIn,
		CostPer1MInCached:  OpenAIModels[GPT4o].CostPer1MInCached,
		CostPer1MOut:       OpenAIModels[GPT4o].CostPer1MOut,
		CostPer1MOutCached: OpenAIModels[GPT4o].CostPer1MOutCached,
		ContextWindow:      OpenAIModels[GPT4o].ContextWindow,
		DefaultMaxTokens:   OpenAIModels[GPT4o].DefaultMaxTokens,
	},
	OpenRouterGPT4oMini: {
		ID:                 OpenRouterGPT4oMini,
		Name:               "OpenRouter - GPT 4o mini",
		Provider:           ProviderOpenRouter,
		APIModel:           "openai/gpt-4o-mini",
		CostPer1MIn:        OpenAIModels[GPT4oMini].CostPer1MIn,
		CostPer1MInCached:  OpenAIModels[GPT4oMini].CostPer1MInCached,
		CostPer1MOut:       OpenAIModels[GPT4oMini].CostPer1MOut,
		CostPer1MOutCached: OpenAIModels[GPT4oMini].CostPer1MOutCached,
		ContextWindow:      OpenAIModels[GPT4oMini].ContextWindow,
	},
	OpenRouterO1: {
		ID:                 OpenRouterO1,
		Name:               "OpenRouter - O1",
		Provider:           ProviderOpenRouter,
		APIModel:           "openai/o1",
		CostPer1MIn:        OpenAIModels[O1].CostPer1MIn,
		CostPer1MInCached:  OpenAIModels[O1].CostPer1MInCached,
		CostPer1MOut:       OpenAIModels[O1].CostPer1MOut,
		CostPer1MOutCached: OpenAIModels[O1].CostPer1MOutCached,
		ContextWindow:      OpenAIModels[O1].ContextWindow,
		DefaultMaxTokens:   OpenAIModels[O1].DefaultMaxTokens,
		CanReason:          OpenAIModels[O1].CanReason,
	},
	OpenRouterO1Pro: {
		ID:                 OpenRouterO1Pro,
		Name:               "OpenRouter - o1 pro",
		Provider:           ProviderOpenRouter,
		APIModel:           "openai/o1-pro",
		CostPer1MIn:        OpenAIModels[O1Pro].CostPer1MIn,
		CostPer1MInCached:  OpenAIModels[O1Pro].CostPer1MInCached,
		CostPer1MOut:       OpenAIModels[O1Pro].CostPer1MOut,
		CostPer1MOutCached: OpenAIModels[O1Pro].CostPer1MOutCached,
		ContextWindow:      OpenAIModels[O1Pro].ContextWindow,
		DefaultMaxTokens:   OpenAIModels[O1Pro].DefaultMaxTokens,
		CanReason:          OpenAIModels[O1Pro].CanReason,
	},
	OpenRouterO1Mini: {
		ID:                 OpenRouterO1Mini,
		Name:               "OpenRouter - o1 mini",
		Provider:           ProviderOpenRouter,
		APIModel:           "openai/o1-mini",
		CostPer1MIn:        OpenAIModels[O1Mini].CostPer1MIn,
		CostPer1MInCached:  OpenAIModels[O1Mini].CostPer1MInCached,
		CostPer1MOut:       OpenAIModels[O1Mini].CostPer1MOut,
		CostPer1MOutCached: OpenAIModels[O1Mini].CostPer1MOutCached,
		ContextWindow:      OpenAIModels[O1Mini].ContextWindow,
		DefaultMaxTokens:   OpenAIModels[O1Mini].DefaultMaxTokens,
		CanReason:          OpenAIModels[O1Mini].CanReason,
	},
	OpenRouterO3: {
		ID:                 OpenRouterO3,
		Name:               "OpenRouter - o3",
		Provider:           ProviderOpenRouter,
		APIModel:           "openai/o3",
		CostPer1MIn:        OpenAIModels[O3].CostPer1MIn,
		CostPer1MInCached:  OpenAIModels[O3].CostPer1MInCached,
		CostPer1MOut:       OpenAIModels[O3].CostPer1MOut,
		CostPer1MOutCached: OpenAIModels[O3].CostPer1MOutCached,
		ContextWindow:      OpenAIModels[O3].ContextWindow,
		DefaultMaxTokens:   OpenAIModels[O3].DefaultMaxTokens,
		CanReason:          OpenAIModels[O3].CanReason,
	},
	OpenRouterO3Mini: {
		ID:                 OpenRouterO3Mini,
		Name:               "OpenRouter - o3 mini",
		Provider:           ProviderOpenRouter,
		APIModel:           "openai/o3-mini-high",
		CostPer1MIn:        OpenAIModels[O3Mini].CostPer1MIn,
		CostPer1MInCached:  OpenAIModels[O3Mini].CostPer1MInCached,
		CostPer1MOut:       OpenAIModels[O3Mini].CostPer1MOut,
		CostPer1MOutCached: OpenAIModels[O3Mini].CostPer1MOutCached,
		ContextWindow:      OpenAIModels[O3Mini].ContextWindow,
		DefaultMaxTokens:   OpenAIModels[O3Mini].DefaultMaxTokens,
		CanReason:          OpenAIModels[O3Mini].CanReason,
	},
	OpenRouterO4Mini: {
		ID:                 OpenRouterO4Mini,
		Name:               "OpenRouter - o4 mini",
		Provider:           ProviderOpenRouter,
		APIModel:           "openai/o4-mini-high",
		CostPer1MIn:        OpenAIModels[O4Mini].CostPer1MIn,
		CostPer1MInCached:  OpenAIModels[O4Mini].CostPer1MInCached,
		CostPer1MOut:       OpenAIModels[O4Mini].CostPer1MOut,
		CostPer1MOutCached: OpenAIModels[O4Mini].CostPer1MOutCached,
		ContextWindow:      OpenAIModels[O4Mini].ContextWindow,
		DefaultMaxTokens:   OpenAIModels[O4Mini].DefaultMaxTokens,
		CanReason:          OpenAIModels[O4Mini].CanReason,
	},
	OpenRouterGemini25Flash: {
		ID:                       OpenRouterGemini25Flash,
		Name:                     "OpenRouter - Gemini 2.5 Flash",
		Provider:                 ProviderOpenRouter,
		APIModel:                 "google/gemini-2.5-flash-preview:thinking",
		CostPer1MIn:              GeminiModels[Gemini25Flash].CostPer1MIn,
		CostPer1MInCached:        GeminiModels[Gemini25Flash].CostPer1MInCached,
		CostPer1MOut:             GeminiModels[Gemini25Flash].CostPer1MOut,
		CostPer1MOutCached:       GeminiModels[Gemini25Flash].CostPer1MOutCached,
		ContextWindow:            GeminiModels[Gemini25Flash].ContextWindow,
		DefaultMaxTokens:         GeminiModels[Gemini25Flash].DefaultMaxTokens,
		SupportsAttachments:      GeminiModels[Gemini25Flash].SupportsAttachments,
		SupportsAdaptiveThinking: GeminiModels[Gemini25Flash].SupportsAdaptiveThinking,
		CanReason:                GeminiModels[Gemini25Flash].CanReason,
	},
	OpenRouterGemini25: {
		ID:                       OpenRouterGemini25,
		Name:                     "OpenRouter - Gemini 2.5 Pro",
		Provider:                 ProviderOpenRouter,
		APIModel:                 "google/gemini-2.5-pro-preview-03-25",
		CostPer1MIn:              GeminiModels[Gemini25].CostPer1MIn,
		CostPer1MInCached:        GeminiModels[Gemini25].CostPer1MInCached,
		CostPer1MOut:             GeminiModels[Gemini25].CostPer1MOut,
		CostPer1MOutCached:       GeminiModels[Gemini25].CostPer1MOutCached,
		ContextWindow:            GeminiModels[Gemini25].ContextWindow,
		DefaultMaxTokens:         GeminiModels[Gemini25].DefaultMaxTokens,
		SupportsAttachments:      GeminiModels[Gemini25].SupportsAttachments,
		SupportsAdaptiveThinking: GeminiModels[Gemini25].SupportsAdaptiveThinking,
		CanReason:                GeminiModels[Gemini25].CanReason,
	},
	OpenRouterGemini3Flash: {
		ID:                       OpenRouterGemini3,
		Name:                     "OpenRouter - Gemini 3 Flash Preview",
		Provider:                 ProviderOpenRouter,
		APIModel:                 "google/gemini-3-flash-preview",
		CostPer1MIn:              GeminiModels[Gemini30Flash].CostPer1MIn,
		CostPer1MInCached:        GeminiModels[Gemini30Flash].CostPer1MInCached,
		CostPer1MOut:             GeminiModels[Gemini30Flash].CostPer1MOut,
		CostPer1MOutCached:       GeminiModels[Gemini30Flash].CostPer1MOutCached,
		ContextWindow:            GeminiModels[Gemini30Flash].ContextWindow,
		DefaultMaxTokens:         GeminiModels[Gemini30Flash].DefaultMaxTokens,
		SupportsAttachments:      GeminiModels[Gemini30Flash].SupportsAttachments,
		SupportsAdaptiveThinking: GeminiModels[Gemini30Flash].SupportsAdaptiveThinking,
		CanReason:                GeminiModels[Gemini30Flash].CanReason,
	},
	OpenRouterGemini3: {
		ID:                       OpenRouterGemini3,
		Name:                     "OpenRouter - Gemini 3 Pro Preview",
		Provider:                 ProviderOpenRouter,
		APIModel:                 "google/gemini-3-pro-preview",
		CostPer1MIn:              GeminiModels[Gemini30Pro].CostPer1MIn,
		CostPer1MInCached:        GeminiModels[Gemini30Pro].CostPer1MInCached,
		CostPer1MOut:             GeminiModels[Gemini30Pro].CostPer1MOut,
		CostPer1MOutCached:       GeminiModels[Gemini30Pro].CostPer1MOutCached,
		ContextWindow:            GeminiModels[Gemini30Pro].ContextWindow,
		DefaultMaxTokens:         GeminiModels[Gemini30Pro].DefaultMaxTokens,
		SupportsAttachments:      GeminiModels[Gemini30Pro].SupportsAttachments,
		SupportsAdaptiveThinking: GeminiModels[Gemini30Pro].SupportsAdaptiveThinking,
		CanReason:                GeminiModels[Gemini30Pro].CanReason,
	},
	OpenRouterClaude35Sonnet: {
		ID:                 OpenRouterClaude35Sonnet,
		Name:               "OpenRouter - Claude 3.5 Sonnet",
		Provider:           ProviderOpenRouter,
		APIModel:           "anthropic/claude-3.5-sonnet",
		CostPer1MIn:        AnthropicModels[Claude35Sonnet].CostPer1MIn,
		CostPer1MInCached:  AnthropicModels[Claude35Sonnet].CostPer1MInCached,
		CostPer1MOut:       AnthropicModels[Claude35Sonnet].CostPer1MOut,
		CostPer1MOutCached: AnthropicModels[Claude35Sonnet].CostPer1MOutCached,
		ContextWindow:      AnthropicModels[Claude35Sonnet].ContextWindow,
		DefaultMaxTokens:   AnthropicModels[Claude35Sonnet].DefaultMaxTokens,
	},
	OpenRouterClaude3Haiku: {
		ID:                 OpenRouterClaude3Haiku,
		Name:               "OpenRouter - Claude 3 Haiku",
		Provider:           ProviderOpenRouter,
		APIModel:           "anthropic/claude-3-haiku",
		CostPer1MIn:        AnthropicModels[Claude3Haiku].CostPer1MIn,
		CostPer1MInCached:  AnthropicModels[Claude3Haiku].CostPer1MInCached,
		CostPer1MOut:       AnthropicModels[Claude3Haiku].CostPer1MOut,
		CostPer1MOutCached: AnthropicModels[Claude3Haiku].CostPer1MOutCached,
		ContextWindow:      AnthropicModels[Claude3Haiku].ContextWindow,
		DefaultMaxTokens:   AnthropicModels[Claude3Haiku].DefaultMaxTokens,
	},
	OpenRouterClaude37Sonnet: {
		ID:                 OpenRouterClaude37Sonnet,
		Name:               "OpenRouter - Claude 3.7 Sonnet",
		Provider:           ProviderOpenRouter,
		APIModel:           "anthropic/claude-3.7-sonnet",
		CostPer1MIn:        AnthropicModels[Claude37Sonnet].CostPer1MIn,
		CostPer1MInCached:  AnthropicModels[Claude37Sonnet].CostPer1MInCached,
		CostPer1MOut:       AnthropicModels[Claude37Sonnet].CostPer1MOut,
		CostPer1MOutCached: AnthropicModels[Claude37Sonnet].CostPer1MOutCached,
		ContextWindow:      AnthropicModels[Claude37Sonnet].ContextWindow,
		DefaultMaxTokens:   AnthropicModels[Claude37Sonnet].DefaultMaxTokens,
		CanReason:          AnthropicModels[Claude37Sonnet].CanReason,
	},
	OpenRouterClaude35Haiku: {
		ID:                 OpenRouterClaude35Haiku,
		Name:               "OpenRouter - Claude 3.5 Haiku",
		Provider:           ProviderOpenRouter,
		APIModel:           "anthropic/claude-3.5-haiku",
		CostPer1MIn:        AnthropicModels[Claude35Haiku].CostPer1MIn,
		CostPer1MInCached:  AnthropicModels[Claude35Haiku].CostPer1MInCached,
		CostPer1MOut:       AnthropicModels[Claude35Haiku].CostPer1MOut,
		CostPer1MOutCached: AnthropicModels[Claude35Haiku].CostPer1MOutCached,
		ContextWindow:      AnthropicModels[Claude35Haiku].ContextWindow,
		DefaultMaxTokens:   AnthropicModels[Claude35Haiku].DefaultMaxTokens,
	},
	OpenRouterClaude3Opus: {
		ID:                 OpenRouterClaude3Opus,
		Name:               "OpenRouter - Claude 3 Opus",
		Provider:           ProviderOpenRouter,
		APIModel:           "anthropic/claude-3-opus",
		CostPer1MIn:        AnthropicModels[Claude3Opus].CostPer1MIn,
		CostPer1MInCached:  AnthropicModels[Claude3Opus].CostPer1MInCached,
		CostPer1MOut:       AnthropicModels[Claude3Opus].CostPer1MOut,
		CostPer1MOutCached: AnthropicModels[Claude3Opus].CostPer1MOutCached,
		ContextWindow:      AnthropicModels[Claude3Opus].ContextWindow,
		DefaultMaxTokens:   AnthropicModels[Claude3Opus].DefaultMaxTokens,
	},
	OpenRouterKimiK2: {
		ID:                 OpenRouterKimiK2,
		Name:               "OpenRouter - Kimi K2",
		Provider:           ProviderOpenRouter,
		APIModel:           "moonshotai/kimi-k2",
		CostPer1MIn:        0,
		CostPer1MInCached:  0,
		CostPer1MOut:       0,
		CostPer1MOutCached: 0,
		ContextWindow:      200000,
		DefaultMaxTokens:   4096,
	},
	OpenRouterNemotron3Nano: {
		ID:                 OpenRouterNemotron3Nano,
		Name:               "OpenRouter - Nemotron 3 Nano 30B A3B",
		Provider:           ProviderOpenRouter,
		APIModel:           "nvidia/nemotron-3-nano-30b-a3b",
		CostPer1MIn:        0.06,
		CostPer1MInCached:  0,
		CostPer1MOut:       0.24,
		CostPer1MOutCached: 0,
		ContextWindow:      262144,
		DefaultMaxTokens:   128000,
	},
	OpenRouterGLM47Flash: {
		ID:                 OpenRouterGLM47Flash,
		Name:               "OpenRouter - Z.AI: GLM 4.7 Flash",
		Provider:           ProviderOpenRouter,
		APIModel:           "z-ai/glm-4.7-flash",
		CostPer1MIn:        0.07,
		CostPer1MInCached:  0,
		CostPer1MOut:       0.40,
		CostPer1MOutCached: 0,
		ContextWindow:      200000,
		DefaultMaxTokens:   131000,
	},
	OpenRouterGPT52: {
		ID:                 OpenRouterGPT52,
		Name:               "OpenRouter - GPT 5.2",
		Provider:           ProviderOpenRouter,
		APIModel:           "openai/gpt-5.2",
		CostPer1MIn:        10.0,
		CostPer1MInCached:  5.0,
		CostPer1MOut:       40.0,
		CostPer1MOutCached: 0,
		ContextWindow:      400000,
		DefaultMaxTokens:   20000,
	},
	OpenRouterGPT52Codex: {
		ID:                 OpenRouterGPT52Codex,
		Name:               "OpenRouter - GPT 5.2 Codex",
		Provider:           ProviderOpenRouter,
		APIModel:           "openai/gpt-5.2-codex",
		CostPer1MIn:        8.0,
		CostPer1MInCached:  4.0,
		CostPer1MOut:       32.0,
		CostPer1MOutCached: 0,
		ContextWindow:      256000,
		DefaultMaxTokens:   16384,
	},
	OpenRouterDeepSeekR1Free: {
		ID:                 OpenRouterDeepSeekR1Free,
		Name:               "OpenRouter - DeepSeek R1 0528 (free)",
		Provider:           ProviderOpenRouter,
		APIModel:           "deepseek/deepseek-r1-0528:free",
		CostPer1MIn:        0,
		CostPer1MInCached:  0,
		CostPer1MOut:       0,
		CostPer1MOutCached: 0,
		ContextWindow:      163840,
		DefaultMaxTokens:   10000,
	},
	OpenRouterDeepSeekV32: {
		ID:                 OpenRouterDeepSeekV32,
		Name:               "OpenRouter - DeepSeek V3.2",
		Provider:           ProviderOpenRouter,
		APIModel:           "deepseek/deepseek-v3.2",
		CostPer1MIn:        0.14,
		CostPer1MInCached:  0.07,
		CostPer1MOut:       0.28,
		CostPer1MOutCached: 0,
		ContextWindow:      128000,
		DefaultMaxTokens:   8192,
	},
	OpenRouterDevstral2: {
		ID:                 OpenRouterDevstral2,
		Name:               "OpenRouter - Devstral 2 2512",
		Provider:           ProviderOpenRouter,
		APIModel:           "mistralai/devstral-2-2512",
		CostPer1MIn:        0.30,
		CostPer1MInCached:  0.15,
		CostPer1MOut:       0.90,
		CostPer1MOutCached: 0,
		ContextWindow:      256000,
		DefaultMaxTokens:   32768,
	},
	OpenRouterMiMoV2: {
		ID:                 OpenRouterMiMoV2Flash,
		Name:               "OpenRouter - MiMo-V2",
		Provider:           ProviderOpenRouter,
		APIModel:           "xiaomi/mimo-v2",
		CostPer1MIn:        0.05,
		CostPer1MInCached:  0,
		CostPer1MOut:       0.20,
		CostPer1MOutCached: 0,
		ContextWindow:      256000,
		DefaultMaxTokens:   16384,
	},
	OpenRouterMiMoV2Flash: {
		ID:                 OpenRouterMiMoV2Flash,
		Name:               "OpenRouter - MiMo-V2-Flash",
		Provider:           ProviderOpenRouter,
		APIModel:           "xiaomi/mimo-v2-flash",
		CostPer1MIn:        0.05,
		CostPer1MInCached:  0,
		CostPer1MOut:       0.20,
		CostPer1MOutCached: 0,
		ContextWindow:      256000,
		DefaultMaxTokens:   16384,
	},
	OpenRouterGrok4Fast: {
		ID:                 OpenRouterGrok4Fast,
		Name:               "OpenRouter - Grok 4 Fast",
		Provider:           ProviderOpenRouter,
		APIModel:           "x-ai/grok-4-fast",
		CostPer1MIn:        0.20,
		CostPer1MInCached:  0,
		CostPer1MOut:       0.50,
		CostPer1MOutCached: 0.05,
		ContextWindow:      2048000,
		DefaultMaxTokens:   32768,
		CanReason:          true,
	},
	OpenRouterGrok4FastFree: {
		ID:                 OpenRouterGrok4FastFree,
		Name:               "OpenRouter - Grok 4 Fast (Free)",
		Provider:           ProviderOpenRouter,
		APIModel:           "x-ai/grok-4-fast:free",
		CostPer1MIn:        0.20,
		CostPer1MInCached:  0,
		CostPer1MOut:       0.50,
		CostPer1MOutCached: 0.05,
		ContextWindow:      2048000,
		DefaultMaxTokens:   32768,
		CanReason:          true,
	},
	OpenRouterGrok41Fast: {
		ID:                 OpenRouterGrok41Fast,
		Name:               "OpenRouter - Grok 4.1 Fast",
		Provider:           ProviderOpenRouter,
		APIModel:           "x-ai/grok-4.1-fast",
		CostPer1MIn:        0.20,
		CostPer1MInCached:  0,
		CostPer1MOut:       0.50,
		CostPer1MOutCached: 0.05,
		ContextWindow:      2048000,
		DefaultMaxTokens:   32768,
		CanReason:          true,
	},
	OpenRouterMiniMax01: {
		ID:                 OpenRouterMiniMax01,
		Name:               "OpenRouter - MiniMax 01",
		Provider:           ProviderOpenRouter,
		APIModel:           "minimax/minimax-01",
		CostPer1MIn:        0.20,
		CostPer1MInCached:  0,
		CostPer1MOut:       1.10,
		CostPer1MOutCached: 0,
		ContextWindow:      1024000,
		DefaultMaxTokens:   1024000,
		CanReason:          false,
	},
	OpenRouterMiniMaxM1: {
		ID:                 OpenRouterMiniMaxM1,
		Name:               "OpenRouter - MiniMax M1",
		Provider:           ProviderOpenRouter,
		APIModel:           "minimax/minimax-m1",
		CostPer1MIn:        0.40,
		CostPer1MInCached:  0,
		CostPer1MOut:       2.20,
		CostPer1MOutCached: 0,
		ContextWindow:      1024000,
		DefaultMaxTokens:   40000,
		CanReason:          false,
	},
	OpenRouterMiniMaxM2: {
		ID:                 OpenRouterMiniMaxM2,
		Name:               "OpenRouter - MiniMax M2",
		Provider:           ProviderOpenRouter,
		APIModel:           "minimax/minimax-m2",
		CostPer1MIn:        0.225,
		CostPer1MInCached:  0,
		CostPer1MOut:       1.02,
		CostPer1MOutCached: 0,
		ContextWindow:      204800,
		DefaultMaxTokens:   131100,
		CanReason:          false,
	},
	OpenRouterMiniMaxM21: {
		ID:                 OpenRouterMiniMaxM21,
		Name:               "OpenRouter - MiniMax M2.1",
		Provider:           ProviderOpenRouter,
		APIModel:           "minimax/minimax-m2.1",
		CostPer1MIn:        0.30,
		CostPer1MInCached:  0.03,
		CostPer1MOut:       0,
		CostPer1MOutCached: 1.20,
		ContextWindow:      204800,
		DefaultMaxTokens:   131100,
		CanReason:          false,
	},
	OpenRouterMiniMaxM25: {
		ID:                 OpenRouterMiniMaxM25,
		Name:               "OpenRouter - MiniMax M2.5",
		Provider:           ProviderOpenRouter,
		APIModel:           "minimax/minimax-m2.5",
		CostPer1MIn:        0.30,
		CostPer1MInCached:  0,
		CostPer1MOut:       1.20,
		CostPer1MOutCached: 0.03,
		ContextWindow:      196600,
		DefaultMaxTokens:   204800,
		CanReason:          false,
	},
	OpenRouterTrinityLarge: {
		ID:                 OpenRouterTrinityLarge,
		Name:               "OpenRouter - Trinity Large Preview (free)",
		Provider:           ProviderOpenRouter,
		APIModel:           "arcee-ai/trinity-large-preview:free",
		CostPer1MIn:        0,
		CostPer1MInCached:  0,
		CostPer1MOut:       0,
		CostPer1MOutCached: 0,
		ContextWindow:      131000,
		DefaultMaxTokens:   131000,
		CanReason:          false,
	},
}
View Source
var ProviderPopularity = map[ModelProvider]int{
	ProviderVertexAI:   1,
	ProviderAnthropic:  2,
	ProviderOpenAI:     3,
	ProviderGemini:     4,
	ProviderGrok:       5,
	ProviderXAI:        6,
	ProviderOpenRouter: 7,
	ProviderDeepSeek:   8,
	ProviderBedrock:    9,
	ProviderLocal:      10,
}

Providers in order of popularity

View Source
var SupportedModels = map[ModelID]Model{
	BedrockClaude45Sonnet: {
		ID:                 BedrockClaude45Sonnet,
		Name:               "Bedrock: Claude 4.5 Sonnet",
		Provider:           ProviderBedrock,
		APIModel:           "eu.anthropic.claude-sonnet-4-5-20250929-v1:0",
		CostPer1MIn:        3.0,
		CostPer1MInCached:  3.75,
		CostPer1MOutCached: 0.30,
		CostPer1MOut:       15.0,
	},
}
View Source
var VertexAIAnthropicModels = map[ModelID]Model{
	VertexAISonnet45M: {
		ID:                  VertexAISonnet45M,
		Name:                "VertexAI: Claude Sonnet 4.5 [1m]",
		Provider:            ProviderVertexAI,
		APIModel:            "claude-sonnet-4-5@20250929",
		CostPer1MIn:         AnthropicModels[Claude45Sonnet1M].CostPer1MIn,
		CostPer1MInCached:   AnthropicModels[Claude45Sonnet1M].CostPer1MInCached,
		CostPer1MOut:        AnthropicModels[Claude45Sonnet1M].CostPer1MOut,
		CostPer1MOutCached:  AnthropicModels[Claude45Sonnet1M].CostPer1MOutCached,
		ContextWindow:       AnthropicModels[Claude45Sonnet1M].ContextWindow,
		DefaultMaxTokens:    AnthropicModels[Claude45Sonnet1M].DefaultMaxTokens,
		SupportsAttachments: AnthropicModels[Claude45Sonnet1M].SupportsAttachments,
		CanReason:           AnthropicModels[Claude45Sonnet1M].CanReason,
	},
	VertexAIOpus45: {
		ID:                  VertexAIOpus45,
		Name:                "VertexAI: Claude Opus 4.5",
		Provider:            ProviderVertexAI,
		APIModel:            "claude-opus-4-5@20251101",
		CostPer1MIn:         AnthropicModels[Claude45Opus].CostPer1MIn,
		CostPer1MInCached:   AnthropicModels[Claude45Opus].CostPer1MInCached,
		CostPer1MOut:        AnthropicModels[Claude45Opus].CostPer1MOut,
		CostPer1MOutCached:  AnthropicModels[Claude45Opus].CostPer1MOutCached,
		ContextWindow:       AnthropicModels[Claude45Opus].ContextWindow,
		DefaultMaxTokens:    AnthropicModels[Claude45Opus].DefaultMaxTokens,
		SupportsAttachments: AnthropicModels[Claude45Opus].SupportsAttachments,
		CanReason:           AnthropicModels[Claude45Opus].CanReason,
	},
	VertexAIOpus46: {
		ID:                       VertexAIOpus46,
		Name:                     "VertexAI: Claude Opus 4.6",
		Provider:                 ProviderVertexAI,
		APIModel:                 "claude-opus-4-6",
		CostPer1MIn:              AnthropicModels[Claude46Opus].CostPer1MIn,
		CostPer1MInCached:        AnthropicModels[Claude46Opus].CostPer1MInCached,
		CostPer1MOut:             AnthropicModels[Claude46Opus].CostPer1MOut,
		CostPer1MOutCached:       AnthropicModels[Claude46Opus].CostPer1MOutCached,
		ContextWindow:            AnthropicModels[Claude46Opus].ContextWindow,
		DefaultMaxTokens:         AnthropicModels[Claude46Opus].DefaultMaxTokens,
		SupportsAttachments:      AnthropicModels[Claude46Opus].SupportsAttachments,
		CanReason:                AnthropicModels[Claude46Opus].CanReason,
		SupportsAdaptiveThinking: AnthropicModels[Claude46Opus].SupportsAdaptiveThinking,
		SupportsMaximumThinking:  AnthropicModels[Claude46Opus].SupportsMaximumThinking,
	},
}
View Source
var VertexAIGeminiModels = map[ModelID]Model{
	VertexAIGemini30Pro: {
		ID:                  VertexAIGemini30Pro,
		Name:                "VertexAI: Gemini 3.0 Pro",
		Provider:            ProviderVertexAI,
		APIModel:            "gemini-3-pro-preview",
		CostPer1MIn:         GeminiModels[Gemini30Pro].CostPer1MIn,
		CostPer1MInCached:   GeminiModels[Gemini30Pro].CostPer1MInCached,
		CostPer1MOut:        GeminiModels[Gemini30Pro].CostPer1MOut,
		CostPer1MOutCached:  GeminiModels[Gemini30Pro].CostPer1MOutCached,
		ContextWindow:       GeminiModels[Gemini30Pro].ContextWindow,
		DefaultMaxTokens:    GeminiModels[Gemini30Pro].DefaultMaxTokens,
		SupportsAttachments: true,
		CanReason:           true,
	},
	VertexAIGemini30Flash: {
		ID:                  VertexAIGemini30Flash,
		Name:                "VertexAI: Gemini 3.0 Flash",
		Provider:            ProviderVertexAI,
		APIModel:            "gemini-3-flash-preview",
		CostPer1MIn:         GeminiModels[Gemini30Flash].CostPer1MIn,
		CostPer1MInCached:   GeminiModels[Gemini30Flash].CostPer1MInCached,
		CostPer1MOut:        GeminiModels[Gemini30Flash].CostPer1MOut,
		CostPer1MOutCached:  GeminiModels[Gemini30Flash].CostPer1MOutCached,
		ContextWindow:       GeminiModels[Gemini30Flash].ContextWindow,
		DefaultMaxTokens:    GeminiModels[Gemini30Flash].DefaultMaxTokens,
		SupportsAttachments: true,
		CanReason:           true,
	},
}
View Source
var XAIModels = map[ModelID]Model{
	XAIGrok41FastReasoning: {
		ID:                 XAIGrok41FastReasoning,
		Name:               "Grok 4.1 Fast Reasoning",
		Provider:           ProviderXAI,
		APIModel:           "grok-4-1-fast-reasoning",
		CostPer1MIn:        0.20,
		CostPer1MInCached:  0.05,
		CostPer1MOut:       0.50,
		CostPer1MOutCached: 0,
		ContextWindow:      2_000_000,
		DefaultMaxTokens:   64_000,
	},
	XAIGrok41FastNonReasoning: {
		ID:                 XAIGrok41FastNonReasoning,
		Name:               "Grok 4.1 Fast Non-Reasoning",
		Provider:           ProviderXAI,
		APIModel:           "grok-4-1-fast-non-reasoning",
		CostPer1MIn:        0.20,
		CostPer1MInCached:  0.05,
		CostPer1MOut:       0.50,
		CostPer1MOutCached: 0,
		ContextWindow:      2_000_000,
		DefaultMaxTokens:   16_000,
	},
	XAIGrokCodeFast1: {
		ID:                 XAIGrokCodeFast1,
		Name:               "Grok Code Fast 1",
		Provider:           ProviderXAI,
		APIModel:           "grok-code-fast-1",
		CostPer1MIn:        0.20,
		CostPer1MInCached:  0.02,
		CostPer1MOut:       1.50,
		CostPer1MOutCached: 0,
		ContextWindow:      256_000,
		DefaultMaxTokens:   32_000,
	},
	XAIGrok4FastReasoning: {
		ID:                 XAIGrok4FastReasoning,
		Name:               "Grok 4 Fast Reasoning",
		Provider:           ProviderXAI,
		APIModel:           "grok-4-fast-reasoning",
		CostPer1MIn:        0.20,
		CostPer1MInCached:  0.05,
		CostPer1MOut:       0.50,
		CostPer1MOutCached: 0,
		ContextWindow:      2_000_000,
		DefaultMaxTokens:   64_000,
	},
	XAIGrok4FastNonReasoning: {
		ID:                 XAIGrok4FastNonReasoning,
		Name:               "Grok 4 Fast Non-Reasoning",
		Provider:           ProviderXAI,
		APIModel:           "grok-4-fast-non-reasoning",
		CostPer1MIn:        0.20,
		CostPer1MInCached:  0.05,
		CostPer1MOut:       0.50,
		CostPer1MOutCached: 0,
		ContextWindow:      2_000_000,
		DefaultMaxTokens:   16_000,
	},
	XAIGrok40709: {
		ID:                 XAIGrok40709,
		Name:               "Grok 4 0709",
		Provider:           ProviderXAI,
		APIModel:           "grok-4-0709",
		CostPer1MIn:        3.0,
		CostPer1MInCached:  0.75,
		CostPer1MOut:       15.0,
		CostPer1MOutCached: 0.75,
		ContextWindow:      256_000,
		DefaultMaxTokens:   20_000,
	},
}

Functions

This section is empty.

Types

type Model

type Model struct {
	ID                       ModelID       `json:"id"`
	Name                     string        `json:"name"`
	Provider                 ModelProvider `json:"provider"`
	APIModel                 string        `json:"api_model"`
	CostPer1MIn              float64       `json:"cost_per_1m_in"`
	CostPer1MOut             float64       `json:"cost_per_1m_out"`
	CostPer1MInCached        float64       `json:"cost_per_1m_in_cached"`
	CostPer1MOutCached       float64       `json:"cost_per_1m_out_cached"`
	ContextWindow            int64         `json:"context_window"`
	DefaultMaxTokens         int64         `json:"default_max_tokens"`
	CanReason                bool          `json:"can_reason"`
	SupportsAdaptiveThinking bool          `json:"supports_adaptive_thinking"`
	SupportsMaximumThinking  bool          `json:"supports_maximum_thinking"`
	SupportsAttachments      bool          `json:"supports_attachments"`
}

type ModelID

type ModelID string

type ModelProvider

type ModelProvider string
const (
	ProviderLocal ModelProvider = "local"
)

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL