Documentation
¶
Index ¶
Constants ¶
View Source
const EmbedDimensions = 768
Variables ¶
View Source
var EmbedMode_multilang_002 = bellman.EmbedModel{
Name: "text-multilingual-embedding-002",
Description: "see https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text-embeddings-api",
InputMaxTokens: 2048,
OutputDimensions: 768,
}
View Source
var EmbedModel_text_004 = bellman.EmbedModel{
Name: "text-embedding-004",
Description: "see https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text-embeddings-api",
InputMaxTokens: 2048,
OutputDimensions: 768,
}
View Source
var EmbedModel_text_005 = bellman.EmbedModel{
Name: "text-embedding-005",
Description: "see https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text-embeddings-api",
InputMaxTokens: 2048,
OutputDimensions: 768,
}
View Source
var EmbedModel_text_gecko_001 = bellman.EmbedModel{
Name: "textembedding-gecko@001",
Description: "see https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text-embeddings-api",
InputMaxTokens: 2048,
OutputDimensions: 768,
}
View Source
var EmbedModel_text_gecko_003 = bellman.EmbedModel{
Name: "textembedding-gecko@003",
Description: "see https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text-embeddings-api",
InputMaxTokens: 2048,
OutputDimensions: 768,
}
View Source
var EmbedModel_text_gecko_multilang_001 = bellman.EmbedModel{
Name: "textembedding-gecko-multilingual@001",
Description: "see https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text-embeddings-api",
InputMaxTokens: 2048,
OutputDimensions: 768,
}
View Source
var EmbedModels = map[string]bellman.EmbedModel{ EmbedModel_text_005.Name: EmbedModel_text_005, EmbedModel_text_004.Name: EmbedModel_text_004, EmbedMode_multilang_002.Name: EmbedMode_multilang_002, EmbedModel_text_gecko_001.Name: EmbedModel_text_gecko_001, EmbedModel_text_gecko_003.Name: EmbedModel_text_gecko_003, EmbedModel_text_gecko_multilang_001.Name: EmbedModel_text_gecko_multilang_001, }
View Source
var GenModel_gemini_1_5_flash = bellman.GenModel{ Name: "gemini-1.5-flash", Description: "Fast and versatile performance across a diverse variety of tasks", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gemini_1_5_flash_001 = bellman.GenModel{ Name: "gemini-1.5-flash-002", Description: "Fast and versatile performance across a diverse variety of tasks", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gemini_1_5_flash_002 = bellman.GenModel{ Name: "gemini-1.5-flash-001", Description: "Fast and versatile performance across a diverse variety of tasks", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gemini_1_5_flash_8b = bellman.GenModel{ Name: "gemini-1.5-flash-8b", Description: "High volume and lower intelligence tasks", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gemini_1_5_flash_8b_001 = bellman.GenModel{ Name: "gemini-1.5-flash-8b-001", Description: "High volume and lower intelligence tasks", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gemini_1_5_pro = bellman.GenModel{ Name: "gemini-1.5-pro", Description: "Complex reasoning tasks requiring more intelligence", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gemini_1_5_pro_001 = bellman.GenModel{ Name: "gemini-1.5-pro-001", Description: "Complex reasoning tasks requiring more intelligence", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gemini_1_5_pro_002 = bellman.GenModel{ Name: "gemini-1.5-pro-002", Description: "Complex reasoning tasks requiring more intelligence", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModel_gemini_Experiment_114 = bellman.GenModel{ Name: "gemini-exp-1114", Description: "", InputContentTypes: nil, InputMaxToken: 0, OutputMaxToken: 0, SupportTools: false, SupportStructuredOutput: false, }
View Source
var GenModels = map[string]bellman.GenModel{ GenModel_gemini_Experiment_114.Name: GenModel_gemini_Experiment_114, GenModel_gemini_1_5_flash.Name: GenModel_gemini_1_5_flash, GenModel_gemini_1_5_flash_001.Name: GenModel_gemini_1_5_flash_001, GenModel_gemini_1_5_flash_002.Name: GenModel_gemini_1_5_flash_002, GenModel_gemini_1_5_flash_8b.Name: GenModel_gemini_1_5_flash_8b, GenModel_gemini_1_5_flash_8b_001.Name: GenModel_gemini_1_5_flash_8b_001, GenModel_gemini_1_5_pro.Name: GenModel_gemini_1_5_pro, GenModel_gemini_1_5_pro_002.Name: GenModel_gemini_1_5_pro_002, GenModel_gemini_1_5_pro_001.Name: GenModel_gemini_1_5_pro_001, }
Functions ¶
This section is empty.
Types ¶
type EmbedType ¶
type EmbedType string
const EmbedTypeClassification EmbedType = "CLASSIFICATION"
const EmbedTypeClustring EmbedType = "CLUSTERING"
const EmbedTypeCode EmbedType = "CODE_RETRIEVAL_QUERY"
const EmbedTypeDocument EmbedType = "RETRIEVAL_DOCUMENT"
const EmbedTypeQA EmbedType = "QUESTION_ANSWERING"
const EmbedTypeQuery EmbedType = "RETRIEVAL_QUERY"
const EmbedTypeSimilarity EmbedType = "SEMANTIC_SIMILARITY"
const EmbedTypeVerification EmbedType = "FACT_VERIFICATION"
type Google ¶
type Google struct {
// contains filtered or unexported fields
}
func New ¶
func New(config GoogleConfig) (*Google, error)
type GoogleConfig ¶
type GoogleEmbedRequest ¶
type GoogleEmbedResponse ¶
Click to show internal directories.
Click to hide internal directories.