Documentation
¶
Index ¶
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type EmbeddingConfig ¶
type EmbeddingConfig struct {
Model string `json:"embedding_model,omitempty" toml:"embedding_model" comment:"Model used for embeddings"`
ChunkSize int `json:"chunk_size,omitempty" toml:"chunk_size,commented" comment:"Number of characters per chunk"`
Overlap int `` /* 148-byte string literal not displayed */
TopK int `json:"top_k,omitempty" toml:"top_k,commented" comment:"Number of chunks to retrieve during RAG"`
}
type LLMConfig ¶
type LLMConfig struct {
DefaultModel string `json:"default_model,omitempty" toml:"default_model" comment:"Default model to use"`
Providers []ProviderConfig `` /* 258-byte string literal not displayed */
Models []ModelConfig `` /* 315-byte string literal not displayed */
}
type LoggingConfig ¶
type ModelConfig ¶
type ModelConfig struct {
ID string `json:"id,omitempty" toml:"id,commented" comment:"Model identifier"`
Context int `json:"context,omitempty" toml:"context,commented" comment:"Maximum context length in tokens"`
Temperature *float64 `json:"temperature,omitempty" toml:"temperature,commented" comment:"Optional model-level temperature override"`
}
type PromptConfig ¶
type Provider ¶
type Provider struct {
Client *llm.Client
Session *llm.ChatSession
AvailableModels []string
}
type ProviderConfig ¶
type ProviderConfig struct {
BaseURL string `` /* 133-byte string literal not displayed */
APIKey string `json:"api_key,omitempty" toml:"api_key,commented" comment:"Optional API key if required"`
Temperature *float64 `json:"temperature,omitempty" toml:"temperature,commented" comment:"Default temperature for this provider (optional)"`
}
Click to show internal directories.
Click to hide internal directories.