Documentation
¶
Index ¶
- Constants
- Variables
- type DataType
- type JSONSchema
- type Ollama
- func (g *Ollama) Embed(request *embed.Request) (*embed.Response, error)
- func (g *Ollama) EmbedDocument(request *embed.DocumentRequest) (*embed.DocumentResponse, error)
- func (g *Ollama) Generator(options ...gen.Option) *gen.Generator
- func (g *Ollama) Provider() string
- func (g *Ollama) SetLogger(logger *slog.Logger) *Ollama
Constants ¶
View Source
const Provider = "Ollama"
Variables ¶
View Source
var EmbedModel_bge_large = embed.Model{ Provider: Provider, Name: "bge-large", Description: "Embedding model from BAAI mapping texts to vectors, https://ollama.com/library/bge-large", InputMaxTokens: 512, OutputDimensions: 1024, }
View Source
var EmbedModel_bge_m3 = embed.Model{ Provider: Provider, Name: "bge-m3", Description: "GE-M3 is a new model from BAAI distinguished for its versatility in Multi-Functionality, Multi-Linguality, and Multi-Granularity. https://ollama.com/library/bge-m3", InputMaxTokens: 8192, OutputDimensions: 1024, }
View Source
var EmbedModel_mxbai_embed_large = embed.Model{ Provider: Provider, Name: "mxbai-embed-large", Description: "State-of-the-art large embedding model from mixedbread.ai, https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1", InputMaxTokens: 512, OutputDimensions: 1024, }
View Source
var EmbedModel_nomic_embed_text = embed.Model{ Provider: Provider, Name: "nomic-embed-text", Description: "Most capable embedding Model for both english and non-english tasks, https://huggingface.co/nomic-ai/nomic-embed-text-v1.5", InputMaxTokens: 2048, OutputDimensions: 768, }
View Source
var EmbedModel_paraphrase_multilingual = embed.Model{ Provider: Provider, Name: "paraphrase-multilingual", Description: "Sentence-transformers model that can be used for tasks like clustering or semantic search., https://ollama.com/library/paraphrase-multilingual", InputMaxTokens: 512, OutputDimensions: 768, }
View Source
var EmbedModel_qwen_3_06b = embed.Model{ Provider: Provider, Name: "qwen3-embedding:0.6b", InputMaxTokens: 32_768, OutputDimensions: 4096, }
View Source
var EmbedModel_qwen_3_8b = embed.Model{ Provider: Provider, Name: "qwen3-embedding:8b", InputMaxTokens: 32_768, OutputDimensions: 4096, }
View Source
var EmbedModels = map[string]embed.Model{ EmbedModel_nomic_embed_text.Name: EmbedModel_nomic_embed_text, EmbedModel_mxbai_embed_large.Name: EmbedModel_mxbai_embed_large, EmbedModel_paraphrase_multilingual.Name: EmbedModel_paraphrase_multilingual, EmbedModel_bge_large.Name: EmbedModel_bge_large, EmbedModel_bge_m3.Name: EmbedModel_bge_m3, }
View Source
var GenModel_gemma2 = gen.Model{ Provider: Provider, Name: "gemma2", Description: "Google Gemma 2 is a high-performing and efficient model available in three sizes: 9B", }
View Source
var GenModel_gemma2_27b = gen.Model{ Provider: Provider, Name: "gemma2:27b", Description: "Google Gemma 2 is a high-performing and efficient model available in three sizes: 2B, 9B, and 27B.", }
View Source
var GenModel_gemma2_2b = gen.Model{ Provider: Provider, Name: "gemma2:2b", Description: "Google Gemma 2 is a high-performing and efficient model available in three sizes: 2B, 9B, and 27B.", }
View Source
var GenModel_gemma2_9b = gen.Model{ Provider: Provider, Name: "gemma2:9b", Description: "Google Gemma 2 is a high-performing and efficient model available in three sizes: 2B, 9B, and 27B.", }
View Source
var GenModel_glm_4_7_flash_q4 = gen.Model{ Provider: Provider, Name: "glm-4.7-flash:q4_K_M", }
View Source
var GenModel_llama_3_1_405b = gen.Model{ Provider: Provider, Name: "llama3.1:405b", Description: "Llama 3.1 is a new state-of-the-art model from Meta available in 405B parameter sizes.", }
View Source
var GenModel_llama_3_1_70b = gen.Model{ Provider: Provider, Name: "llama3.1:70b", Description: "Llama 3.1 is a new state-of-the-art model from Meta available in 70B parameter sizes.", }
View Source
var GenModel_llama_3_1_8b = gen.Model{ Provider: Provider, Name: "llama3.1:8b", Description: "Llama 3.1 is a new state-of-the-art model from Meta available in 8Bparameter sizes.", }
View Source
var GenModel_llama_3_2 = gen.Model{ Provider: Provider, Name: "llama3.2", Description: "Meta's Llama 3.2 goes small with 3B models. alias for llama3.2:3b", }
View Source
var GenModel_llama_3_2_1b = gen.Model{ Provider: Provider, Name: "llama3.2:1b", Description: "Meta's Llama 3.2 goes small with 1B models.", }
View Source
var GenModel_llama_3_2_3b = gen.Model{ Provider: Provider, Name: "llama3.2:3b", Description: "Meta's Llama 3.2 goes small with 3B models.", }
View Source
var GenModel_llama_3_2_vision_11b = gen.Model{ Provider: Provider, Name: "llama3.2-vision:11b", Description: "Llama 3.2 Vision is a collection of instruction-tuned image reasoning generative models in 11B sizes.", }
View Source
var GenModel_llama_3_2_vision_90b = gen.Model{ Provider: Provider, Name: "llama3.2-vision:90b", Description: "Llama 3.2 Vision is a collection of instruction-tuned image reasoning generative models in 90B sizes.", }
View Source
var GenModel_llama_3_3 = gen.Model{ Provider: Provider, Name: "llama3.3", Description: "New state of the art 70B model. Llama 3.3 70B offers similar performance compared to Llama 3.1 405B model.", }
View Source
var GenModels = map[string]gen.Model{ GenModel_llama_3_3.Name: GenModel_llama_3_3, GenModel_llama_3_2_vision_11b.Name: GenModel_llama_3_2_vision_11b, GenModel_llama_3_2_vision_90b.Name: GenModel_llama_3_2_vision_90b, GenModel_llama_3_2_3b.Name: GenModel_llama_3_2_3b, GenModel_llama_3_2_1b.Name: GenModel_llama_3_2_1b, GenModel_llama_3_1_8b.Name: GenModel_llama_3_1_8b, GenModel_llama_3_1_70b.Name: GenModel_llama_3_1_70b, GenModel_llama_3_1_405b.Name: GenModel_llama_3_1_405b, }
Functions ¶
This section is empty.
Types ¶
type JSONSchema ¶ added in v0.8.0
type JSONSchema struct {
Ref string `json:"$ref,omitempty"` // #/$defs/... etc, overrides everything else
Defs map[string]*JSONSchema `json:"$defs,omitempty"` // for $ref
// Type specifies the data type of the schema. OpenAI uses []string{Type, Null} to represent nullable types.
Type any `json:"type,omitempty"`
// Description is the description of the schema.
Description string `json:"description,omitempty"`
Format string `json:"format,omitempty"` // Format of the data, e.g. "email", "date-time", etc.
// Enum is used to restrict a value to a fixed set of values. It must be an array with at least
// one element, where each element is unique. You will probably only use this with strings.
Enum []any `json:"enum,omitempty"`
// Properties describes the properties of an object, if the schema type is Object.
Properties map[string]JSONSchema `json:"properties,omitempty"`
// Required specifies which properties are required, if the schema type is Object.
Required []string `json:"required,omitempty"`
// Items specifies which data type an array contains, if the schema type is Array.
Items *JSONSchema `json:"items,omitempty"`
// AdditionalProperties is used to control the handling of properties in an object
// that are not explicitly defined in the properties section of the schema. example:
// additionalProperties: true
// additionalProperties: false
// additionalProperties: jsonschema.JSONSchema{Type: jsonschema.String}
AdditionalProperties any `json:"additionalProperties,omitempty"`
}
type Ollama ¶
func (*Ollama) EmbedDocument ¶ added in v1.0.0
func (g *Ollama) EmbedDocument(request *embed.DocumentRequest) (*embed.DocumentResponse, error)
Click to show internal directories.
Click to hide internal directories.