Documentation
¶
Overview ¶
mistral implements an API client for the Mistral AI API. https://docs.mistral.ai/api/
Index ¶
- func GenerateRequest(model string, session *schema.Conversation, opts ...opt.Opt) (any, error)
- func WithFrequencyPenalty(value float64) opt.Opt
- func WithJSONOutput(schema *jsonschema.Schema) opt.Opt
- func WithMaxTokens(value uint) opt.Opt
- func WithPresencePenalty(value float64) opt.Opt
- func WithSafePrompt() opt.Opt
- func WithSeed(value uint) opt.Opt
- func WithStopSequences(values ...string) opt.Opt
- func WithSystemPrompt(value string) opt.Opt
- func WithTemperature(value float64) opt.Opt
- func WithToolChoiceAny() opt.Opt
- func WithToolChoiceAuto() opt.Opt
- func WithToolChoiceNone() opt.Opt
- func WithToolChoiceRequired() opt.Opt
- func WithTopP(value float64) opt.Opt
- type Client
- func (c *Client) BatchEmbedding(ctx context.Context, model schema.Model, texts []string, _ ...opt.Opt) ([][]float64, error)
- func (c *Client) Embedding(ctx context.Context, model schema.Model, text string, opts ...opt.Opt) ([]float64, error)
- func (c *Client) GetModel(ctx context.Context, name string, opts ...opt.Opt) (*schema.Model, error)
- func (c *Client) ListModels(ctx context.Context, opts ...opt.Opt) ([]schema.Model, error)
- func (*Client) Name() string
- func (c *Client) WithSession(ctx context.Context, model schema.Model, session *schema.Conversation, ...) (*schema.Message, *schema.Usage, error)
- func (c *Client) WithoutSession(ctx context.Context, model schema.Model, message *schema.Message, ...) (*schema.Message, *schema.Usage, error)
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func GenerateRequest ¶
GenerateRequest builds a generate request from options without sending it. Useful for testing and debugging.
func WithFrequencyPenalty ¶
WithFrequencyPenalty sets the frequency penalty (-2.0 to 2.0). Positive values penalise tokens proportionally to how often they have appeared so far, reducing repetition.
func WithJSONOutput ¶
func WithJSONOutput(schema *jsonschema.Schema) opt.Opt
WithJSONOutput constrains the model to produce JSON conforming to the given schema.
func WithMaxTokens ¶
WithMaxTokens sets the maximum number of tokens to generate (minimum 1).
func WithPresencePenalty ¶
WithPresencePenalty sets the presence penalty (-2.0 to 2.0). Positive values penalise tokens that have already appeared, encouraging the model to talk about new topics.
func WithSafePrompt ¶
WithSafePrompt enables the safety prompt injection.
func WithStopSequences ¶
WithStopSequences sets custom stop sequences for the request. Generation stops when any of the specified sequences is encountered.
func WithSystemPrompt ¶
WithSystemPrompt sets the system prompt for the request.
func WithTemperature ¶
WithTemperature sets the temperature for the request (0.0 to 1.5). Higher values produce more random output, lower values more deterministic.
func WithToolChoiceAny ¶
WithToolChoiceAny forces the model to use one of the available tools.
func WithToolChoiceAuto ¶
WithToolChoiceAuto lets the model decide whether to use tools.
func WithToolChoiceNone ¶
WithToolChoiceNone prevents the model from using any tools.
func WithToolChoiceRequired ¶
WithToolChoiceRequired forces the model to use a tool (alias for "required").
Types ¶
type Client ¶
type Client struct {
*client.Client
*modelcache.ModelCache
}
func (*Client) BatchEmbedding ¶
func (c *Client) BatchEmbedding(ctx context.Context, model schema.Model, texts []string, _ ...opt.Opt) ([][]float64, error)
BatchEmbedding generates embedding vectors for multiple texts using the specified model.
func (*Client) Embedding ¶
func (c *Client) Embedding(ctx context.Context, model schema.Model, text string, opts ...opt.Opt) ([]float64, error)
Embedding generates an embedding vector for a single text using the specified model.
func (*Client) ListModels ¶
ListModels returns all available models from the Mistral API