llm

package
v0.0.0-...-0714061 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 18, 2025 License: MIT Imports: 10 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var GlobalRegistry = NewProviderRegistry()

Global provider registry

View Source
var ModelContextSizes = map[string]int{

	"gpt-3.5-turbo":       16385,
	"gpt-3.5-turbo-16k":   16385,
	"gpt-4":               8192,
	"gpt-4-32k":           32768,
	"gpt-4-turbo":         128000,
	"gpt-4-turbo-preview": 128000,
	"gpt-4o":              128000,
	"gpt-4o-mini":         128000,
	"o1":                  200000,
	"o1-mini":             128000,
	"o1-preview":          128000,

	"claude-3-5-sonnet-20241022": 200000,
	"claude-3-5-sonnet":          200000,
	"claude-3-opus":              200000,
	"claude-3-sonnet":            200000,
	"claude-3-haiku":             200000,
	"claude-2.1":                 200000,
	"claude-2":                   100000,
	"claude-instant":             100000,

	"deepseek-chat":  64000,
	"deepseek-coder": 16000,

	"llama2":    4096,
	"llama3":    8192,
	"mistral":   8192,
	"mixtral":   32768,
	"codellama": 16384,

	"yandexgpt":      8000,
	"yandexgpt-lite": 8000,
}

ModelContextSize maps model names to their context window sizes

Functions

func GetModelContextSize

func GetModelContextSize(model string) int

GetModelContextSize returns the context window size for a given model If the model is not found, it tries to match by prefix, otherwise returns a conservative default

Types

type AnthropicProvider

type AnthropicProvider struct {
	*BaseProvider // Embed base functionality
	// contains filtered or unexported fields
}

AnthropicProvider implements the Provider interface for Anthropic's Claude API

func (*AnthropicProvider) Chat

func (p *AnthropicProvider) Chat(ctx context.Context, messages []Message) (*ChatResponse, error)

Chat sends a chat request to Anthropic

func (*AnthropicProvider) ChatWithTools

func (p *AnthropicProvider) ChatWithTools(ctx context.Context, messages []Message, tools []Tool) (*ChatResponse, error)

ChatWithTools sends a chat request to Anthropic with tool support

func (*AnthropicProvider) Close

func (p *AnthropicProvider) Close() error

Close cleans up resources

func (*AnthropicProvider) Configure

func (p *AnthropicProvider) Configure(config Config) error

Configure configures the Anthropic provider

func (*AnthropicProvider) GetCapabilities

func (p *AnthropicProvider) GetCapabilities() Capabilities

GetCapabilities returns the provider's capabilities

func (*AnthropicProvider) GetModel

func (p *AnthropicProvider) GetModel() string

GetModel returns the current model name

func (*AnthropicProvider) IsAvailable

func (p *AnthropicProvider) IsAvailable() bool

IsAvailable checks if the provider is configured and available

func (*AnthropicProvider) ListModels

func (p *AnthropicProvider) ListModels(ctx context.Context) ([]string, error)

ListModels returns available models by fetching from Anthropic documentation

func (*AnthropicProvider) StreamChat

func (p *AnthropicProvider) StreamChat(ctx context.Context, messages []Message, callback StreamCallback) (*ChatResponse, error)

StreamChat streams a chat response from Anthropic

func (*AnthropicProvider) StreamChatWithTools

func (p *AnthropicProvider) StreamChatWithTools(ctx context.Context, messages []Message, tools []Tool, callback StreamCallback) (*ChatResponse, error)

StreamChatWithTools sends messages to Anthropic with tool support and streams the response via callback NOTE: langchaingo v0.1.13 has a bug with Anthropic streaming+tools - it fails with "invalid delta text field type" when processing tool_use deltas because it expects a "text" field that doesn't exist in tool_use events. We use fallback to non-streaming. See: https://github.com/tmc/langchaingo/blob/v0.1.13/llms/anthropic/internal/anthropicclient/messages.go#L232-238

type BaseProvider

type BaseProvider struct {
	// contains filtered or unexported fields
}

BaseProvider provides common functionality for all LLM providers This reduces code duplication and provides consistent behavior

func NewBaseProvider

func NewBaseProvider(name string) *BaseProvider

NewBaseProvider creates a new base provider

func (*BaseProvider) BuildChatResponse

func (b *BaseProvider) BuildChatResponse(content, model, finishReason string, usage TokenUsage, toolCalls []ToolCall, metadata map[string]string) *ChatResponse

BuildChatResponse creates a standardized ChatResponse This eliminates response construction duplication

func (*BaseProvider) CalculateUsageWithCost

func (b *BaseProvider) CalculateUsageWithCost(promptTokens, completionTokens int, costCalculator func(string, int) float64, model string) TokenUsage

CalculateUsageWithCost calculates token usage with cost

func (*BaseProvider) ConvertMessages

func (b *BaseProvider) ConvertMessages(messages []Message) []map[string]interface{}

ConvertMessages converts our Message format to a generic format Providers can override this if they need specific conversion logic

func (*BaseProvider) ConvertMessagesToLangChainGo

func (b *BaseProvider) ConvertMessagesToLangChainGo(messages []Message) []llms.MessageContent

ConvertMessagesToLangChainGo converts our Message format to langchaingo MessageContent This eliminates the 15+ line duplication across OpenAI, DeepSeek, Ollama, Yandex

func (*BaseProvider) ConvertTools

func (b *BaseProvider) ConvertTools(tools []Tool) []map[string]interface{}

ConvertTools converts our Tool format to a generic format Providers can override this if they need specific conversion logic

func (*BaseProvider) ConvertToolsToLangChainGo

func (b *BaseProvider) ConvertToolsToLangChainGo(tools []Tool) []llms.Tool

ConvertToolsToLangChainGo converts our Tool format to langchaingo Tool format This eliminates tool conversion duplication across providers

func (*BaseProvider) CreateOpenAICompatibleClient

func (b *BaseProvider) CreateOpenAICompatibleClient(config Config, baseURL string, requiresAPIKey bool) (*openai.LLM, error)

CreateOpenAICompatibleClient creates an OpenAI-compatible client with standard options This eliminates duplication across DeepSeek, Ollama, Yandex providers

func (*BaseProvider) CreateStandardHTTPClient

func (b *BaseProvider) CreateStandardHTTPClient() *http.Client

CreateStandardHTTPClient creates an HTTP client with standard timeout This eliminates duplication across all providers that make HTTP calls

func (*BaseProvider) CreateStreamingCallback

func (b *BaseProvider) CreateStreamingCallback(callback StreamCallback, fullContent *strings.Builder) func(string) error

CreateStreamingCallback creates a streaming callback with tool call filtering

func (*BaseProvider) DefaultStreamChatWithTools

func (b *BaseProvider) DefaultStreamChatWithTools(
	ctx context.Context,
	messages []Message,
	tools []Tool,
	callback StreamCallback,
	chatWithToolsFunc func(context.Context, []Message, []Tool) (*ChatResponse, error),
	streamChatFunc func(context.Context, []Message, StreamCallback) (*ChatResponse, error),
) (*ChatResponse, error)

DefaultStreamChatWithTools provides a standard implementation for providers that don't have native streaming+tools This eliminates the duplicate fallback pattern across Anthropic, DeepSeek, Ollama, and Yandex providers

func (*BaseProvider) EstimateTokens

func (b *BaseProvider) EstimateTokens(text string) int

EstimateTokens provides a basic token estimation Providers can override with more accurate estimation

func (*BaseProvider) ExtractToolCallsFromLangChainResponse

func (b *BaseProvider) ExtractToolCallsFromLangChainResponse(response *llms.ContentResponse) []ToolCall

ExtractToolCallsFromLangChainResponse extracts tool calls from langchaingo response This eliminates the duplicate tool call extraction logic across OpenAI, Anthropic, DeepSeek, and Ollama

func (*BaseProvider) FallbackToNonStreaming

func (b *BaseProvider) FallbackToNonStreaming(ctx context.Context, messages []Message, tools []Tool, callback StreamCallback, chatWithTools func(context.Context, []Message, []Tool) (*ChatResponse, error)) (*ChatResponse, error)

FallbackToNonStreaming provides a standard fallback for providers that don't support streaming with tools It simulates streaming by breaking the response into chunks and sending them with realistic timing

func (*BaseProvider) SetConfigured

func (b *BaseProvider) SetConfigured(configured bool)

SetConfigured marks the provider as configured

func (*BaseProvider) ValidateConfiguration

func (b *BaseProvider) ValidateConfiguration() error

ValidateConfiguration checks if the provider is properly configured

type Capabilities

type Capabilities struct {
	Name              string   `json:"name"`               // Provider name
	Models            []string `json:"models"`             // Available models
	MaxTokens         int      `json:"max_tokens"`         // Maximum tokens per request
	SupportsStreaming bool     `json:"supports_streaming"` // Whether streaming is supported
	SupportsFunctions bool     `json:"supports_functions"` // Whether function calling is supported
	CostPerToken      float64  `json:"cost_per_token"`     // Cost per token (if known)
	RequiresAuth      bool     `json:"requires_auth"`      // Whether authentication is required
}

Capabilities describes what a provider can do

type ChatResponse

type ChatResponse struct {
	Content      string            `json:"content"`       // Response content
	Usage        TokenUsage        `json:"usage"`         // Token usage information
	Model        string            `json:"model"`         // Model used
	FinishReason string            `json:"finish_reason"` // Why the response ended
	Metadata     map[string]string `json:"metadata"`      // Additional metadata
	GeneratedAt  time.Time         `json:"generated_at"`  // When response was generated
	ToolCalls    []ToolCall        `json:"tool_calls"`    // Tool/function calls requested by LLM
}

ChatResponse represents a response from the LLM

type Config

type Config struct {
	Provider    string            `json:"provider"`    // Provider name (openai, local, etc.)
	Model       string            `json:"model"`       // Model to use
	APIKey      string            `json:"api_key"`     // API key (if required)
	BaseURL     string            `json:"base_url"`    // Base URL for API calls
	MaxTokens   int               `json:"max_tokens"`  // Maximum tokens per request
	Temperature float32           `json:"temperature"` // Temperature setting (0.0-1.0)
	TopP        float32           `json:"top_p"`       // Top-p sampling parameter
	Timeout     time.Duration     `json:"timeout"`     // Request timeout
	Metadata    map[string]string `json:"metadata"`    // Additional configuration
}

Config holds configuration for LLM providers

func DefaultConfig

func DefaultConfig() Config

DefaultConfig returns default LLM configuration

type DeepSeekProvider

type DeepSeekProvider struct {
	*BaseProvider // Embed base functionality
	// contains filtered or unexported fields
}

DeepSeekProvider implements Provider for DeepSeek (OpenAI-compatible API)

func (*DeepSeekProvider) Chat

func (p *DeepSeekProvider) Chat(ctx context.Context, messages []Message) (*ChatResponse, error)

Chat sends messages to DeepSeek and returns a response

func (*DeepSeekProvider) ChatWithTools

func (p *DeepSeekProvider) ChatWithTools(ctx context.Context, messages []Message, tools []Tool) (*ChatResponse, error)

ChatWithTools sends messages to DeepSeek with tool support and returns a response

func (*DeepSeekProvider) Close

func (p *DeepSeekProvider) Close() error

Close cleans up resources

func (*DeepSeekProvider) Configure

func (p *DeepSeekProvider) Configure(config Config) error

Configure configures the DeepSeek provider

func (*DeepSeekProvider) GetCapabilities

func (p *DeepSeekProvider) GetCapabilities() Capabilities

GetCapabilities returns DeepSeek capabilities

func (*DeepSeekProvider) GetModel

func (p *DeepSeekProvider) GetModel() string

GetModel returns the current model

func (*DeepSeekProvider) IsAvailable

func (p *DeepSeekProvider) IsAvailable() bool

IsAvailable checks if the provider is available

func (*DeepSeekProvider) ListModels

func (p *DeepSeekProvider) ListModels(ctx context.Context) ([]string, error)

ListModels returns available models from DeepSeek API

func (*DeepSeekProvider) StreamChat

func (p *DeepSeekProvider) StreamChat(ctx context.Context, messages []Message, callback StreamCallback) (*ChatResponse, error)

StreamChat sends messages to DeepSeek and streams the response via callback

func (*DeepSeekProvider) StreamChatWithTools

func (p *DeepSeekProvider) StreamChatWithTools(ctx context.Context, messages []Message, tools []Tool, callback StreamCallback) (*ChatResponse, error)

StreamChatWithTools sends messages to DeepSeek with tool support and streams the response via callback NOTE: For reliability, we use fallback to non-streaming when tools are present. This ensures tool calls are properly extracted and processed.

type FunctionCall

type FunctionCall struct {
	Name      string                 `json:"name"`      // Function name
	Arguments map[string]interface{} `json:"arguments"` // Function arguments
}

FunctionCall represents the function details in a tool call

type FunctionDef

type FunctionDef struct {
	Name        string                 `json:"name"`        // Function name
	Description string                 `json:"description"` // Function description
	Parameters  map[string]interface{} `json:"parameters"`  // JSON schema for parameters
}

FunctionDef defines a function that can be called by the LLM

type Message

type Message struct {
	Role      string                 `json:"role"`      // "user", "assistant", "system"
	Content   string                 `json:"content"`   // Message content
	Timestamp time.Time              `json:"timestamp"` // When message was created
	Metadata  map[string]interface{} `json:"metadata"`  // Additional context
}

Message represents a single message in a conversation

func TrimMessagesToContextSize

func TrimMessagesToContextSize(messages []Message, model string, reserveTokens int) []Message

TrimMessagesToContextSize intelligently trims message history using priority-based sliding window

Priority order (highest to lowest): 1. System prompt - always included fully (critical for model behavior) 2. Last user message - always included fully (API requirement) 3. Messages with tool calls - high priority for context 4. Recent history - sliding window from newest to oldest

Strategy: - Reserve space for system prompt + last message first - Fill remaining space with prioritized history - Ensures optimal context usage while maintaining coherence

type OllamaProvider

type OllamaProvider struct {
	*BaseProvider // Embed base functionality
	// contains filtered or unexported fields
}

OllamaProvider implements Provider for Ollama (OpenAI-compatible API)

func (*OllamaProvider) Chat

func (p *OllamaProvider) Chat(ctx context.Context, messages []Message) (*ChatResponse, error)

Chat sends messages to Ollama and returns a response

func (*OllamaProvider) ChatWithTools

func (p *OllamaProvider) ChatWithTools(ctx context.Context, messages []Message, tools []Tool) (*ChatResponse, error)

ChatWithTools sends messages to Ollama with tool support and returns a response

func (*OllamaProvider) Close

func (p *OllamaProvider) Close() error

Close cleans up resources

func (*OllamaProvider) Configure

func (p *OllamaProvider) Configure(config Config) error

Configure configures the Ollama provider

func (*OllamaProvider) GetCapabilities

func (p *OllamaProvider) GetCapabilities() Capabilities

GetCapabilities returns Ollama capabilities

func (*OllamaProvider) GetModel

func (p *OllamaProvider) GetModel() string

GetModel returns the current model

func (*OllamaProvider) IsAvailable

func (p *OllamaProvider) IsAvailable() bool

IsAvailable checks if the provider is available

func (*OllamaProvider) ListModels

func (p *OllamaProvider) ListModels(ctx context.Context) ([]string, error)

ListModels returns available models from Ollama API

func (*OllamaProvider) StreamChat

func (p *OllamaProvider) StreamChat(ctx context.Context, messages []Message, callback StreamCallback) (*ChatResponse, error)

StreamChat sends messages to Ollama and streams the response via callback

func (*OllamaProvider) StreamChatWithTools

func (p *OllamaProvider) StreamChatWithTools(ctx context.Context, messages []Message, tools []Tool, callback StreamCallback) (*ChatResponse, error)

StreamChatWithTools sends messages to Ollama with tool support and streams the response via callback NOTE: For reliability, we use fallback to non-streaming when tools are present. This ensures tool calls are properly extracted and processed.

type OpenAIProvider

type OpenAIProvider struct {
	*BaseProvider // Embed base functionality
	// contains filtered or unexported fields
}

OpenAIProvider implements Provider for OpenAI's GPT models

func (*OpenAIProvider) Chat

func (p *OpenAIProvider) Chat(ctx context.Context, messages []Message) (*ChatResponse, error)

Chat sends messages to OpenAI and returns a response

func (*OpenAIProvider) ChatWithTools

func (p *OpenAIProvider) ChatWithTools(ctx context.Context, messages []Message, tools []Tool) (*ChatResponse, error)

ChatWithTools sends messages to OpenAI with tool support and returns a response

func (*OpenAIProvider) Close

func (p *OpenAIProvider) Close() error

Close cleans up resources

func (*OpenAIProvider) Configure

func (p *OpenAIProvider) Configure(config Config) error

Configure configures the OpenAI provider

func (*OpenAIProvider) GetCapabilities

func (p *OpenAIProvider) GetCapabilities() Capabilities

GetCapabilities returns OpenAI capabilities

func (*OpenAIProvider) GetModel

func (p *OpenAIProvider) GetModel() string

GetModel returns the current model

func (*OpenAIProvider) IsAvailable

func (p *OpenAIProvider) IsAvailable() bool

IsAvailable checks if the provider is available

func (*OpenAIProvider) ListModels

func (p *OpenAIProvider) ListModels(ctx context.Context) ([]string, error)

ListModels returns available models from OpenAI API

func (*OpenAIProvider) StreamChat

func (p *OpenAIProvider) StreamChat(ctx context.Context, messages []Message, callback StreamCallback) (*ChatResponse, error)

StreamChat sends messages to OpenAI and streams the response via callback Uses direct streaming (not tool-enabled) for optimal continuation performance

func (*OpenAIProvider) StreamChatWithTools

func (p *OpenAIProvider) StreamChatWithTools(ctx context.Context, messages []Message, tools []Tool, callback StreamCallback) (*ChatResponse, error)

StreamChatWithTools sends messages to OpenAI with tool support and streams the response via callback

type Provider

type Provider interface {
	// Chat sends messages to the LLM and returns a response
	Chat(ctx context.Context, messages []Message) (*ChatResponse, error)

	// ChatWithTools sends messages to the LLM with available tools and returns a response
	ChatWithTools(ctx context.Context, messages []Message, tools []Tool) (*ChatResponse, error)

	// StreamChat sends messages to the LLM and streams the response via callback
	StreamChat(ctx context.Context, messages []Message, callback StreamCallback) (*ChatResponse, error)

	// StreamChatWithTools sends messages to the LLM with available tools and streams the response via callback
	StreamChatWithTools(ctx context.Context, messages []Message, tools []Tool, callback StreamCallback) (*ChatResponse, error)

	// GetCapabilities returns the provider's capabilities
	GetCapabilities() Capabilities

	// Configure configures the provider with given settings
	Configure(config Config) error

	// GetModel returns the model name being used
	GetModel() string

	// ListModels returns available models from the provider (via API if possible)
	ListModels(ctx context.Context) ([]string, error)

	// IsAvailable checks if the provider is available and configured
	IsAvailable() bool

	// Close cleans up resources
	Close() error
}

Provider defines the interface for LLM providers

func NewAnthropicProvider

func NewAnthropicProvider() Provider

NewAnthropicProvider creates a new Anthropic provider

func NewDeepSeekProvider

func NewDeepSeekProvider() Provider

NewDeepSeekProvider creates a new DeepSeek provider

func NewOllamaProvider

func NewOllamaProvider() Provider

NewOllamaProvider creates a new Ollama provider

func NewOpenAIProvider

func NewOpenAIProvider() Provider

NewOpenAIProvider creates a new OpenAI provider

func NewYandexProvider

func NewYandexProvider() Provider

NewYandexProvider creates a new Yandex provider

type ProviderRegistry

type ProviderRegistry struct {
	// contains filtered or unexported fields
}

ProviderRegistry manages available LLM providers

func NewProviderRegistry

func NewProviderRegistry() *ProviderRegistry

NewProviderRegistry creates a new provider registry

func (*ProviderRegistry) Create

func (r *ProviderRegistry) Create(name string) Provider

Create creates a provider instance by name

func (*ProviderRegistry) List

func (r *ProviderRegistry) List() []string

List returns available provider names

func (*ProviderRegistry) Register

func (r *ProviderRegistry) Register(name string, factory func() Provider)

Register registers a new provider factory

type StreamCallback

type StreamCallback func(chunk StreamChunk) error

StreamCallback is called for each chunk of streaming response

type StreamChunk

type StreamChunk struct {
	Content     string            `json:"content"`      // Partial content
	Delta       string            `json:"delta"`        // New content since last chunk
	IsComplete  bool              `json:"is_complete"`  // Whether this is the final chunk
	Usage       *TokenUsage       `json:"usage"`        // Token usage (only on final chunk)
	Metadata    map[string]string `json:"metadata"`     // Additional metadata
	GeneratedAt time.Time         `json:"generated_at"` // When chunk was generated
}

StreamChunk represents a chunk of streaming response

type TokenUsage

type TokenUsage struct {
	PromptTokens     int     `json:"prompt_tokens"`     // Tokens in the prompt
	CompletionTokens int     `json:"completion_tokens"` // Tokens in the completion
	TotalTokens      int     `json:"total_tokens"`      // Total tokens used
	Cost             float64 `json:"cost"`              // Estimated cost (if available)
}

TokenUsage tracks token consumption

type Tool

type Tool struct {
	Type     string      `json:"type"`     // Tool type (usually "function")
	Function FunctionDef `json:"function"` // Function definition
}

Tool defines a tool/function that can be called by the LLM

type ToolCall

type ToolCall struct {
	ID       string       `json:"id"`       // Unique ID for this tool call
	Type     string       `json:"type"`     // Type of call (usually "function")
	Function FunctionCall `json:"function"` // Function call details
}

ToolCall represents a function/tool call requested by the LLM

type ToolCallFilter

type ToolCallFilter struct {
	// contains filtered or unexported fields
}

ToolCallFilter helps providers filter out tool call JSON from streaming content This provides a provider-agnostic way to handle the common issue where LLMs stream raw tool call JSON that should not be displayed to users.

func NewToolCallFilter

func NewToolCallFilter() *ToolCallFilter

NewToolCallFilter creates a new tool call filter for streaming content

func (*ToolCallFilter) Reset

func (f *ToolCallFilter) Reset()

Reset resets the filter state (useful when starting a new request)

func (*ToolCallFilter) ShouldFilterChunk

func (f *ToolCallFilter) ShouldFilterChunk(chunkStr string) bool

ShouldFilterChunk determines if a streaming chunk should be filtered out because it's part of tool call JSON that shouldn't be shown to users. Returns true if the chunk should be filtered (not shown), false otherwise.

type YandexProvider

type YandexProvider struct {
	*BaseProvider // Embed base functionality
	// contains filtered or unexported fields
}

YandexProvider implements Provider for Yandex ChatGPT (OpenAI-compatible API)

func (*YandexProvider) Chat

func (p *YandexProvider) Chat(ctx context.Context, messages []Message) (*ChatResponse, error)

Chat sends messages to Yandex and returns a response

func (*YandexProvider) ChatWithTools

func (p *YandexProvider) ChatWithTools(ctx context.Context, messages []Message, tools []Tool) (*ChatResponse, error)

ChatWithTools sends messages to Yandex with tools (not supported)

func (*YandexProvider) Close

func (p *YandexProvider) Close() error

Close cleans up resources

func (*YandexProvider) Configure

func (p *YandexProvider) Configure(config Config) error

Configure configures the Yandex provider

func (*YandexProvider) GetCapabilities

func (p *YandexProvider) GetCapabilities() Capabilities

GetCapabilities returns Yandex capabilities

func (*YandexProvider) GetModel

func (p *YandexProvider) GetModel() string

GetModel returns the current model

func (*YandexProvider) IsAvailable

func (p *YandexProvider) IsAvailable() bool

IsAvailable checks if the provider is available

func (*YandexProvider) ListModels

func (p *YandexProvider) ListModels(ctx context.Context) ([]string, error)

ListModels returns available models from Yandex API

func (*YandexProvider) StreamChat

func (p *YandexProvider) StreamChat(ctx context.Context, messages []Message, callback StreamCallback) (*ChatResponse, error)

StreamChat sends messages to Yandex and streams the response via callback

func (*YandexProvider) StreamChatWithTools

func (p *YandexProvider) StreamChatWithTools(ctx context.Context, messages []Message, tools []Tool, callback StreamCallback) (*ChatResponse, error)

StreamChatWithTools sends messages to Yandex with tool support and streams the response via callback NOTE: For reliability, we use fallback to non-streaming when tools are present. This ensures tool calls are properly extracted and processed.

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL