Documentation
¶
Overview ¶
Package backend defines interfaces for workflow step execution backends. This allows easy extension with new backend types (e.g., Anthropic, Ollama) and facilitates testing through mock implementations.
Index ¶
- type LLMBackend
- type LlamaBackend
- func (b *LlamaBackend) Close() error
- func (b *LlamaBackend) Generate(ctx context.Context, prompt string, model string, maxTokens int) (string, error)
- func (b *LlamaBackend) LoadModel(modelPath string) (*llama.Model, error)
- func (b *LlamaBackend) Name() string
- func (b *LlamaBackend) SetWorker(w WorkerClient)
- type LlamaConfig
- type OpenAIBackend
- type OpenAIConfig
- type Registry
- func (r *Registry) Close() error
- func (r *Registry) GetLLM(name string) (LLMBackend, bool)
- func (r *Registry) GetShell() ShellBackend
- func (r *Registry) ListLLMBackends() []string
- func (r *Registry) RegisterLLM(name string, backend LLMBackend)
- func (r *Registry) RegisterShell(backend ShellBackend)
- func (r *Registry) SetDefaultLLM(name string)
- type ShellBackend
- type ShellBackendImpl
- type ShellConfig
- type WorkerClient
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type LLMBackend ¶
type LLMBackend interface {
// Generate produces a completion for the given prompt.
// model specifies which model to use (interpretation is backend-specific).
// maxTokens limits the response length (0 means use backend default).
Generate(ctx context.Context, prompt string, model string, maxTokens int) (string, error)
// Name returns a human-readable name for the backend.
Name() string
// Close releases any resources held by the backend.
Close() error
}
LLMBackend is the interface for language model backends. Implementations include OpenAI API, local llama.cpp, and potentially other providers like Anthropic, Ollama, etc.
type LlamaBackend ¶
type LlamaBackend struct {
// contains filtered or unexported fields
}
LlamaBackend implements LLMBackend using local llama.cpp inference.
func NewLlamaBackend ¶
func NewLlamaBackend(cfg LlamaConfig) *LlamaBackend
NewLlamaBackend creates a new local llama backend.
func (*LlamaBackend) Generate ¶
func (b *LlamaBackend) Generate(ctx context.Context, prompt string, model string, maxTokens int) (string, error)
Generate implements LLMBackend.
func (*LlamaBackend) LoadModel ¶
func (b *LlamaBackend) LoadModel(modelPath string) (*llama.Model, error)
LoadModel loads a GGUF model from the given path.
func (*LlamaBackend) SetWorker ¶
func (b *LlamaBackend) SetWorker(w WorkerClient)
SetWorker sets the worker client for subprocess-based inference.
type LlamaConfig ¶
type LlamaConfig struct {
// UseSubprocess enables subprocess-based inference for concurrency safety.
UseSubprocess bool
// WorkerClient is an optional pre-configured worker client.
// If nil and UseSubprocess is true, a new worker will be created.
WorkerClient WorkerClient
// Default generation parameters
MaxTokens int
TopK int
TopP float64
Temp float64
}
LlamaConfig holds configuration for the Llama backend.
type OpenAIBackend ¶
type OpenAIBackend struct {
// contains filtered or unexported fields
}
OpenAIBackend implements LLMBackend using the OpenAI API.
func NewOpenAIBackend ¶
func NewOpenAIBackend(cfg OpenAIConfig) (*OpenAIBackend, error)
NewOpenAIBackend creates a new OpenAI backend.
type OpenAIConfig ¶
type OpenAIConfig struct {
APIKey string
BaseURL string // Optional: for Azure or compatible APIs
DefaultModel string
}
OpenAIConfig holds configuration for the OpenAI backend.
type Registry ¶
type Registry struct {
// contains filtered or unexported fields
}
Registry manages available backends and allows lookup by name.
func (*Registry) GetLLM ¶
func (r *Registry) GetLLM(name string) (LLMBackend, bool)
GetLLM returns an LLM backend by name, or the default if name is empty.
func (*Registry) GetShell ¶
func (r *Registry) GetShell() ShellBackend
GetShell returns the shell backend.
func (*Registry) ListLLMBackends ¶
ListLLMBackends returns names of all registered LLM backends.
func (*Registry) RegisterLLM ¶
func (r *Registry) RegisterLLM(name string, backend LLMBackend)
RegisterLLM adds an LLM backend to the registry.
func (*Registry) RegisterShell ¶
func (r *Registry) RegisterShell(backend ShellBackend)
RegisterShell sets the shell backend.
func (*Registry) SetDefaultLLM ¶
SetDefaultLLM sets which LLM backend to use when none is specified.
type ShellBackend ¶
type ShellBackend interface {
// Run executes a shell command and returns combined stdout/stderr.
Run(ctx context.Context, command string) (string, error)
// RunWithEnv executes with additional environment variables.
RunWithEnv(ctx context.Context, command string, env map[string]string) (string, error)
}
ShellBackend executes shell commands.
type ShellBackendImpl ¶
type ShellBackendImpl struct {
// contains filtered or unexported fields
}
ShellBackendImpl implements ShellBackend using os/exec.
func NewShellBackend ¶
func NewShellBackend(cfg ShellConfig) *ShellBackendImpl
NewShellBackend creates a new shell backend.
func (*ShellBackendImpl) RunWithEnv ¶
func (s *ShellBackendImpl) RunWithEnv(ctx context.Context, command string, env map[string]string) (string, error)
RunWithEnv implements ShellBackend.
type ShellConfig ¶
type ShellConfig struct {
// Shell is the shell to use (default: "sh")
Shell string
}
ShellConfig holds configuration for the shell backend.