Documentation
¶
Overview ¶
Package providers implements various AI model service provider connectors supported by MindTrial.
Index ¶
Constants ¶
This section is empty.
Variables ¶
var ( // ErrUnknownProviderName is returned when provider name is not recognized. ErrUnknownProviderName = errors.New("unknown provider name") // ErrCreateClient is returned when provider client initialization fails. ErrCreateClient = errors.New("failed to create client") // ErrInvalidModelParams is returned when model parameters are invalid. ErrInvalidModelParams = errors.New("invalid model parameters for run") // ErrCompileSchema is returned when response schema compilation fails. ErrCompileSchema = errors.New("failed to compile response schema") // ErrGenerateResponse is returned when response generation fails. ErrGenerateResponse = errors.New("failed to generate response") // ErrCreatePromptRequest is returned when request generation fails. ErrCreatePromptRequest = errors.New("failed to create prompt request") // ErrFeatureNotSupported is returned when a requested feature is not supported by the provider. ErrFeatureNotSupported = errors.New("feature not supported by provider") // ErrFileNotSupported is returned when a task context file is not supported by the provider. ErrFileNotSupported = fmt.Errorf("%w: file type", ErrFeatureNotSupported) )
var DefaultResponseFormatInstruction = sync.OnceValue(func() string { schema, err := json.Marshal(ResultJSONSchema()) if err != nil { panic(fmt.Errorf("%w: %v", ErrCompileSchema, err)) } return fmt.Sprintf("Structure the response according to this JSON schema: %s", schema) })
DefaultResponseFormatInstruction generates default response formatting instruction to be passed to AI models that require it.
var ResultJSONSchema = sync.OnceValue(func() *jsonschema.Schema { reflector := jsonschema.Reflector{ AllowAdditionalProperties: false, DoNotReference: true, } return reflector.Reflect(Result{}) })
ResultJSONSchema is a lazily initialized JSON schema for the Result type.
Functions ¶
func DefaultAnswerFormatInstruction ¶
DefaultAnswerFormatInstruction generates default answer formatting instruction for a given task to be passed to the AI model.
func DefaultTaskFileNameInstruction ¶
DefaultTaskFileNameInstruction generates default task file name instruction to be passed to AI models that require it.
Types ¶
type Anthropic ¶
type Anthropic struct {
// contains filtered or unexported fields
}
Anthropic implements the Provider interface for Anthropic generative models.
func NewAnthropic ¶
func NewAnthropic(cfg config.AnthropicClientConfig) *Anthropic
NewAnthropic creates a new Anthropic provider instance with the given configuration.
type Deepseek ¶
type Deepseek struct {
// contains filtered or unexported fields
}
Deepseek implements the Provider interface for Deepseek generative models.
func NewDeepseek ¶
func NewDeepseek(cfg config.DeepseekClientConfig) (*Deepseek, error)
NewDeepseek creates a new Deepseek provider instance with the given configuration.
type ErrUnmarshalResponse ¶
type ErrUnmarshalResponse struct {
// Cause is the underlying error that caused the unmarshaling to fail.
Cause error
// RawMessage is the raw message that failed to be unmarshaled.
RawMessage []byte
// StopReason contains the reason why the AI model stopped generating the response.
StopReason []byte
}
ErrUnmarshalResponse is returned when response unmarshaling fails.
func NewErrUnmarshalResponse ¶
func NewErrUnmarshalResponse(cause error, rawMessage []byte, stopReason []byte) *ErrUnmarshalResponse
NewErrUnmarshalResponse creates a new ErrUnmarshalResponse instance.
func (*ErrUnmarshalResponse) Details ¶
func (e *ErrUnmarshalResponse) Details() string
Details returns a formatted string containing the stop reason and raw message from the ErrUnmarshalResponse error. This provides additional context for debugging and understanding the error.
func (*ErrUnmarshalResponse) Error ¶
func (e *ErrUnmarshalResponse) Error() string
type GoogleAI ¶
type GoogleAI struct {
// contains filtered or unexported fields
}
GoogleAI implements the Provider interface for Google AI generative models.
func NewGoogleAI ¶
NewGoogleAI creates a new GoogleAI provider instance with the given configuration. It returns an error if client initialization fails.
type OpenAI ¶
type OpenAI struct {
// contains filtered or unexported fields
}
OpenAI implements the Provider interface for OpenAI generative models.
func NewOpenAI ¶
func NewOpenAI(cfg config.OpenAIClientConfig) *OpenAI
NewOpenAI creates a new OpenAI provider instance with the given configuration.
type Provider ¶
type Provider interface {
// Name returns the provider's unique identifier.
Name() string
// Validator creates a validator for checking response correctness.
Validator(expected utils.StringSet) Validator
// Run executes a task using specified configuration and returns the result.
Run(ctx context.Context, cfg config.RunConfig, task config.Task) (result Result, err error)
// Close releases resources when the provider is no longer needed.
Close(ctx context.Context) error
}
Provider interacts with AI model services.
func NewProvider ¶
NewProvider creates a new AI model provider based on the given configuration. It returns an error if the provider name is unknown or initialization fails.
type Result ¶
type Result struct {
// Title is a brief summary of the response.
Title string `json:"title" validate:"required"`
// Explanation is a detailed explanation of the answer.
Explanation string `json:"explanation" validate:"required"`
// FinalAnswer is the final answer to the task's query.
FinalAnswer string `json:"final_answer" validate:"required"`
// contains filtered or unexported fields
}
Result represents the structured response received from an AI model.
func (Result) Explain ¶
Explain returns a formatted explanation of the result as generated by the AI model.
func (Result) GetDuration ¶
GetDuration returns the time duration it took to generate this result.
func (Result) GetPrompts ¶
GetPrompts returns the prompts used to generate this result.
type Usage ¶
type Usage struct {
InputTokens *int64 `json:"-"` // Tokens used by the input if available.
OutputTokens *int64 `json:"-"` // Tokens used by the output if available.
}
Usage represents the token usage statistics for a response.
type Validator ¶
type Validator interface {
// IsCorrect checks if result matches expected value.
IsCorrect(ctx context.Context, actual Result) bool
// ToCanonical normalizes string for validation.
ToCanonical(value string) string
}
Validator verifies AI model responses.
func NewDefaultValidator ¶
NewDefaultValidator returns a new Validator to check results against the provided expected value(s).