gemini

package
v0.29.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Aug 11, 2025 License: MIT Imports: 11 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type ChatModel

type ChatModel struct {
	// contains filtered or unexported fields
}

func NewChatModel

func NewChatModel(_ context.Context, cfg *Config) (*ChatModel, error)

NewChatModel creates a new Gemini chat model instance

Parameters:

  • ctx: The context for the operation
  • cfg: Configuration for the Gemini model

Returns:

  • model.ChatModel: A chat model interface implementation
  • error: Any error that occurred during creation

Example:

model, err := gemini.NewChatModel(ctx, &gemini.Config{
    Client: client,
    Model: "gemini-pro",
})

func (*ChatModel) BindForcedTools

func (cm *ChatModel) BindForcedTools(tools []*schema.ToolInfo) error

func (*ChatModel) BindTools

func (cm *ChatModel) BindTools(tools []*schema.ToolInfo) error

func (*ChatModel) Generate

func (cm *ChatModel) Generate(ctx context.Context, input []*schema.Message, opts ...model.Option) (message *schema.Message, err error)

func (*ChatModel) GetType

func (cm *ChatModel) GetType() string

func (*ChatModel) IsCallbacksEnabled

func (cm *ChatModel) IsCallbacksEnabled() bool

func (*ChatModel) Stream

func (cm *ChatModel) Stream(ctx context.Context, input []*schema.Message, opts ...model.Option) (result *schema.StreamReader[*schema.Message], err error)

func (*ChatModel) WithTools

func (cm *ChatModel) WithTools(tools []*schema.ToolInfo) (model.ToolCallingChatModel, error)

type Config

type Config struct {
	// Client is the Gemini API client instance
	// Required for making API calls to Gemini
	Client *genai.Client

	// Model specifies which Gemini model to use
	// Examples: "gemini-pro", "gemini-pro-vision", "gemini-1.5-flash"
	Model string

	// MaxTokens limits the maximum number of tokens in the response
	// Optional. Example: maxTokens := 100
	MaxTokens *int

	// Temperature controls randomness in responses
	// Range: [0.0, 1.0], where 0.0 is more focused and 1.0 is more creative
	// Optional. Example: temperature := float32(0.7)
	Temperature *float32

	// TopP controls diversity via nucleus sampling
	// Range: [0.0, 1.0], where 1.0 disables nucleus sampling
	// Optional. Example: topP := float32(0.95)
	TopP *float32

	// TopK controls diversity by limiting the top K tokens to sample from
	// Optional. Example: topK := int32(40)
	TopK *int32

	// ResponseSchema defines the structure for JSON responses
	// Optional. Used when you want structured output in JSON format
	ResponseSchema *openapi3.Schema

	// EnableCodeExecution allows the model to execute code
	// Warning: Be cautious with code execution in production
	// Optional. Default: false
	EnableCodeExecution bool

	// SafetySettings configures content filtering for different harm categories
	// Controls the model's filtering behavior for potentially harmful content
	// Optional.
	SafetySettings []*genai.SafetySetting
}

Config contains the configuration options for the Gemini model

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL