Documentation
¶
Index ¶
- Variables
- func ByLibrary(l []DeviceInfo) [][]DeviceInfo
- func ByPerformance(l []DeviceInfo) [][]DeviceInfo
- func Dump(ctx Context, t Tensor, optsFuncs ...DumpOptions) string
- func FlashAttentionSupported(l []DeviceInfo) bool
- func GetVisibleDevicesEnv(l []DeviceInfo, mustFilter bool) map[string]string
- func LibraryPaths(l []DeviceInfo) []string
- func RegisterBackend(name string, f func(string, BackendParams) (Backend, error))
- type Backend
- type BackendCacheConfig
- type BackendMemory
- type BackendParams
- type BaseRunner
- type ByFreeMemory
- type CacheConfig
- type Context
- type DType
- type DeviceComparison
- type DeviceID
- type DeviceInfo
- func (d DeviceInfo) AddInitValidation(env map[string]string)
- func (a DeviceInfo) Compare(b DeviceInfo) DeviceComparison
- func (d DeviceInfo) Compute() string
- func (d DeviceInfo) Driver() string
- func (a DeviceInfo) IsBetter(b DeviceInfo) bool
- func (d DeviceInfo) MinimumMemory() uint64
- func (d DeviceInfo) NeedsInitValidation() bool
- func (d DeviceInfo) PreferredLibrary(other DeviceInfo) bool
- type DeviceMemory
- type DumpOptions
- type ErrNoMem
- type FilteredRunnerDiscovery
- type FlashAttentionType
- type GPULayers
- type GPULayersList
- type RunnerDiscovery
- type SamplingMode
- type ScaledDotProductAttention
- type SystemInfo
- type Tensor
Constants ¶
This section is empty.
Variables ¶
var LibOllamaPath string = func() string { exe, err := os.Executable() if err != nil { return "" } if eval, err := filepath.EvalSymlinks(exe); err == nil { exe = eval } var libPath string switch runtime.GOOS { case "windows": libPath = filepath.Join(filepath.Dir(exe), "lib", "ollama") case "linux": libPath = filepath.Join(filepath.Dir(exe), "..", "lib", "ollama") case "darwin": libPath = filepath.Dir(exe) } cwd, err := os.Getwd() if err != nil { return "" } paths := []string{ libPath, filepath.Join(filepath.Dir(exe), "build", "lib", "ollama"), filepath.Join(cwd, "build", "lib", "ollama"), } for _, p := range paths { if _, err := os.Stat(p); err == nil { return p } } return filepath.Dir(exe) }()
LibPath is a path to lookup dynamic libraries in development it's usually 'build/lib/ollama' in distribution builds it's 'lib/ollama' on Windows '../lib/ollama' on Linux and the executable's directory on macOS note: distribution builds, additional GPU-specific libraries are found in subdirectories of the returned path, such as 'cuda_v12', 'rocm', etc.
Functions ¶
func ByLibrary ¶ added in v0.12.7
func ByLibrary(l []DeviceInfo) [][]DeviceInfo
func ByPerformance ¶ added in v0.12.11
func ByPerformance(l []DeviceInfo) [][]DeviceInfo
ByPerformance groups devices by similar speed
func FlashAttentionSupported ¶ added in v0.12.7
func FlashAttentionSupported(l []DeviceInfo) bool
For each GPU, check if it does NOT support flash attention
func GetVisibleDevicesEnv ¶ added in v0.12.7
func GetVisibleDevicesEnv(l []DeviceInfo, mustFilter bool) map[string]string
Given the list of GPUs this instantiation is targeted for, figure out the visible devices environment variables Set mustFilter true to enable filtering of CUDA devices
func LibraryPaths ¶ added in v0.12.7
func LibraryPaths(l []DeviceInfo) []string
func RegisterBackend ¶
func RegisterBackend(name string, f func(string, BackendParams) (Backend, error))
Types ¶
type Backend ¶
type Backend interface {
// Close frees all memory associated with this backend
Close()
Load(ctx context.Context, progress func(float32)) error
// BackendMemory returns the memory allocations that were made for this model
BackendMemory() BackendMemory
Config() fs.Config
Get(name string) Tensor
NewContext() Context
NewContextSize(size int) Context
// Enumerate the devices available for inference via this backend
BackendDevices() []DeviceInfo
}
func NewBackend ¶
func NewBackend(modelPath string, params BackendParams) (Backend, error)
type BackendCacheConfig ¶ added in v0.5.13
type BackendCacheConfig interface {
CacheConfig() CacheConfig
}
BackendCacheConfig should be implemented by backends that need special output from the cache to meet specific requirements. It is frequently implemented in conjunction with ScaledDotProductAttention.
type BackendMemory ¶ added in v0.7.1
type BackendMemory struct {
// InputWeights are always located on the CPU and cannot be moved
InputWeights uint64
// CPU model components are located in system memory. This does not
// include unified memory allocated through the GPU.
CPU DeviceMemory
// GPU model components are located on one or more GPUs.
GPUs []DeviceMemory
}
BackendMemory provides the amount of memory required to load the model per device based on the BackendParams. In some cases, not all required allocations will be known at this point. However, the size of the most recent allocation is guaranteed to be provided so that if it failed, the caller can accommodate that to make forward progress.
func (BackendMemory) Log ¶ added in v0.11.5
func (m BackendMemory) Log(level slog.Level)
Log prints a high level summary of the memory
func (BackendMemory) LogValue ¶ added in v0.8.0
func (m BackendMemory) LogValue() slog.Value
type BackendParams ¶
type BackendParams struct {
// AllocMemory causes the backend to allocate memory for the model. If
// false, this is only being used for discovering the required amount of
// memory and cannot load the model for running.
AllocMemory bool
// NumThreads sets the number of threads to use if running on the CPU
NumThreads int
// GPULayers is the set of layers to offload to GPUs
GPULayers GPULayersList
// FlashAttention indicates that we should use a fused flash attention kernel
FlashAttention FlashAttentionType
}
BackendParams controls how the backend loads and executes models
type BaseRunner ¶ added in v0.12.7
type BaseRunner interface {
// GetPort returns the localhost port number the runner is running on
GetPort() int
// HasExited indicates if the runner is no longer running. This can be used during
// bootstrap to detect if a given filtered device is incompatible and triggered an assert
HasExited() bool
}
type ByFreeMemory ¶ added in v0.12.7
type ByFreeMemory []DeviceInfo
Sort by Free Space. iGPUs are reported first, thus Reverse() yields the largest discrete GPU first
func (ByFreeMemory) Len ¶ added in v0.12.7
func (a ByFreeMemory) Len() int
func (ByFreeMemory) Less ¶ added in v0.12.7
func (a ByFreeMemory) Less(i, j int) bool
func (ByFreeMemory) Swap ¶ added in v0.12.7
func (a ByFreeMemory) Swap(i, j int)
type CacheConfig ¶ added in v0.5.13
type CacheConfig struct {
// CachePadding specifies the multiple for the number of tokens of cache history
// that will be returned from cache Get for k, v and mask. The capacity of the
// cache itself will also be increased to a multiple of this size if needed.
CachePadding int
// PermutedV performs Permute(ctx, 1, 2, 0, 3) on v tensors stored via Put
// and return the permuted version via Get. This uses the cache copy operation
// to avoid a Contiguous call on the permuted tensor.
PermutedV bool
// MaskDType specifies the data type for generating the mask. If unset it will
// default to DTypeF32.
MaskDType DType
}
CacheConfig controls optimizations (mostly backend-specific) that may transform the output the cache to work better with specific kernels.
type Context ¶
type Context interface {
Empty(dtype DType, shape ...int) Tensor
Zeros(dtype DType, shape ...int) Tensor
FromBytes(dtype DType, s []byte, shape ...int) Tensor
FromFloats(s []float32, shape ...int) Tensor
FromInts(s []int32, shape ...int) Tensor
// Arange creates a 1D tensor with values within an interval (start, stop] increased by step.
Arange(start, stop, step float32, dtype DType) Tensor
Forward(...Tensor) Context
// SetBatchSize provides a hint on the batch size to optimize processing
// Uses heuristics if not set
SetBatchSize(int)
Compute(...Tensor)
ComputeWithNotify(func(), ...Tensor) // notify callback once compute has begun
// Reserve is analogous to Compute but rather than executing a
// graph, simply preallocates memory. Typically called with a
// worst case graph to ensure all resources are available for
// for future inference.
Reserve()
MaxGraphNodes() int
Close()
// Input returns a context appropriate for creating tensors that are
// inputs to the model (which includes things like output locations)
Input() Context
// Layer returns a context appropriate for creating intermediate tensors
Layer(int) Context
}
type DeviceComparison ¶ added in v0.12.4
type DeviceComparison int
const ( UniqueDevice DeviceComparison = iota SameBackendDevice // The device is the same, and the library/backend is the same DuplicateDevice // The same physical device but different library/backend (overlapping device) )
type DeviceID ¶ added in v0.12.4
type DeviceID struct {
// ID is an identifier for the device for matching with system
// management libraries. The ID is only unique for other devices
// using the same Library.
// This ID represents a "post filtered" view of the enumerated devices
// if the ID is numeric
ID string `json:"id"`
// Library identifies which library is used for the device (e.g. CUDA, ROCm, etc.)
Library string `json:"backend,omitempty"`
}
Minimal unique device identification
type DeviceInfo ¶ added in v0.12.4
type DeviceInfo struct {
DeviceID
// Name is the name of the device as labeled by the backend. It
// may not be persistent across instances of the runner.
Name string `json:"name"`
// Description is the longer user-friendly identification of the device
Description string `json:"description"`
// FilterID is populated with the unfiltered device ID if a numeric ID is used
// so the device can be included.
FilterID string `json:"filter_id,omitempty"`
// Integrated is set true for integrated GPUs, false for Discrete GPUs
Integrated bool `json:"integration,omitempty"`
// PCIID is the bus, device and domain ID of the device for deduplication
// when discovered by multiple backends
PCIID string `json:"pci_id,omitempty"`
// TotalMemory is the total amount of memory the device can use for loading models
TotalMemory uint64 `json:"total_memory"`
// FreeMemory is the amount of memory currently available on the device for loading models
FreeMemory uint64 `json:"free_memory,omitempty"`
// ComputeMajor is the major version of capabilities of the device
// if unsupported by the backend, -1 will be returned
ComputeMajor int
// ComputeMinor is the minor version of capabilities of the device
// if unsupported by the backend, -1 will be returned
ComputeMinor int
// Driver Information
DriverMajor int `json:"driver_major,omitempty"`
DriverMinor int `json:"driver_minor,omitempty"`
// Where backends were loaded from
LibraryPath []string
}
func GetDevicesFromRunner ¶ added in v0.12.7
func GetDevicesFromRunner(ctx context.Context, runner BaseRunner) ([]DeviceInfo, error)
func (DeviceInfo) AddInitValidation ¶ added in v0.12.10
func (d DeviceInfo) AddInitValidation(env map[string]string)
Set the init validation environment variable
func (DeviceInfo) Compare ¶ added in v0.12.4
func (a DeviceInfo) Compare(b DeviceInfo) DeviceComparison
func (DeviceInfo) Compute ¶ added in v0.12.4
func (d DeviceInfo) Compute() string
func (DeviceInfo) Driver ¶ added in v0.12.4
func (d DeviceInfo) Driver() string
func (DeviceInfo) IsBetter ¶ added in v0.12.4
func (a DeviceInfo) IsBetter(b DeviceInfo) bool
For a SameBackendDevice, return true if b is better than a e.g. newer GPU library version
func (DeviceInfo) MinimumMemory ¶ added in v0.12.7
func (d DeviceInfo) MinimumMemory() uint64
MinimumMemory reports the amount of memory that should be set aside on the device for overhead (e.g. VRAM consumed by context structures independent of model allocations)
func (DeviceInfo) NeedsInitValidation ¶ added in v0.12.10
func (d DeviceInfo) NeedsInitValidation() bool
NeedsInitValidation returns true if the device in question has the potential to crash at inference time and requires deeper validation before we include it in the supported devices list.
func (DeviceInfo) PreferredLibrary ¶ added in v0.12.10
func (d DeviceInfo) PreferredLibrary(other DeviceInfo) bool
PreferredLibrary returns true if this library is preferred over the other input library Used to filter out Vulkan in favor of CUDA or ROCm
type DeviceMemory ¶ added in v0.7.1
type DeviceMemory struct {
DeviceID
// Name is the name of the device as labeled by the backend. It
// may not be persistent across instances of the runner.
Name string
// Weights is the per-layer memory needed for the model weights.
Weights []uint64
// Cache is the per-layer memory needed for the KV cache.
Cache []uint64
// Graph is the size of the compute graph. It is not per-layer.
Graph uint64
}
DeviceMemory provides a breakdown of the memory needed per device, such as a CPU or GPU.
func (DeviceMemory) LogValue ¶ added in v0.8.0
func (m DeviceMemory) LogValue() slog.Value
func (DeviceMemory) Size ¶ added in v0.12.4
func (m DeviceMemory) Size() uint64
Size returns the total size of the memory required by this device
type DumpOptions ¶
type DumpOptions func(*dumpOptions)
func DumpWithEdgeItems ¶ added in v0.7.0
func DumpWithEdgeItems(n int) DumpOptions
DumpWithEdgeItems sets the number of elements to print at the beginning and end of each dimension.
func DumpWithPrecision ¶ added in v0.7.0
func DumpWithPrecision(n int) DumpOptions
DumpWithPrecision sets the number of decimal places to print. Applies to float32 and float64.
func DumpWithThreshold ¶ added in v0.7.0
func DumpWithThreshold(n int) DumpOptions
DumpWithThreshold sets the threshold for printing the entire tensor. If the number of elements is less than or equal to this value, the entire tensor will be printed. Otherwise, only the beginning and end of each dimension will be printed.
type ErrNoMem ¶ added in v0.7.1
type ErrNoMem struct {
BackendMemory
}
ErrNoMem is returned when panicing due to insufficient memory. It includes the attempted memory allocation.
type FilteredRunnerDiscovery ¶ added in v0.12.7
type FilteredRunnerDiscovery interface {
RunnerDiscovery
// GetActiveDeviceIDs returns the filtered set of devices actively in
// use by this runner for running models. If the runner is a bootstrap runner, no devices
// will be active yet so no device IDs are returned.
// This routine will not query the underlying device and will return immediately
GetActiveDeviceIDs() []DeviceID
}
type FlashAttentionType ¶ added in v0.13.4
type FlashAttentionType int32
const ( // Aligned with llama_flash_attn_type FlashAttentionAuto FlashAttentionType = -1 FlashAttentionDisabled FlashAttentionType = 0 FlashAttentionEnabled FlashAttentionType = 1 )
func (FlashAttentionType) LogValue ¶ added in v0.13.4
func (f FlashAttentionType) LogValue() slog.Value
func (FlashAttentionType) String ¶ added in v0.13.4
func (f FlashAttentionType) String() string
type GPULayers ¶ added in v0.11.5
GPULayers is a set of layers to be allocated on a single GPU
func (GPULayers) FirstLayer ¶ added in v0.12.11
FirstLayer returns the smallest layer index scheduled on this GPU, or MaxInt when empty.
type GPULayersList ¶ added in v0.11.5
type GPULayersList []GPULayers
GPULayersList is a set of layer allocations across multiple GPUs
func (GPULayersList) Hash ¶ added in v0.11.5
func (l GPULayersList) Hash() uint64
Hash is an identifier of this layer assignment
func (GPULayersList) Len ¶ added in v0.12.11
func (l GPULayersList) Len() int
func (GPULayersList) Less ¶ added in v0.12.11
func (l GPULayersList) Less(i, j int) bool
Sort by the ordering of the layers offloaded
func (GPULayersList) String ¶ added in v0.11.5
func (l GPULayersList) String() string
func (GPULayersList) Sum ¶ added in v0.11.5
func (l GPULayersList) Sum() int
Sum is the total number of layers assigned across all GPUs
func (GPULayersList) Swap ¶ added in v0.12.11
func (l GPULayersList) Swap(i, j int)
type RunnerDiscovery ¶ added in v0.12.7
type RunnerDiscovery interface {
BaseRunner
// GetDeviceInfos will perform a query of the underlying device libraries
// for device identification and free VRAM information
// During bootstrap scenarios, this routine may take seconds to complete
GetDeviceInfos(ctx context.Context) []DeviceInfo
}
type SamplingMode ¶ added in v0.13.0
type SamplingMode int
const ( SamplingModeNearest SamplingMode = iota SamplingModeBilinear )
type ScaledDotProductAttention ¶
type ScaledDotProductAttention interface {
ScaledDotProductAttention(ctx Context, key, value, mask, sinks Tensor, vmla Tensor, scale float64, cacheConfigApplied bool) Tensor
}
ScaledDotProductAttention implements a fused attention operation equivalent to following code on a tensor named query:
query = query.Permute(ctx, 0, 2, 1, 3) key = key.Permute(ctx, 0, 2, 1, 3) value = value.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx)
kq := key.MulmatFullPrec(ctx, query)
kq = kq.Scale(ctx, scale)
if mask != nil {
kq = kq.Add(ctx, mask)
}
kq = kq.Softmax(ctx)
kqv := value.Mulmat(ctx, kq) return kqv.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx)
cacheConfigApplied indicates whether the optimizations requested through CacheConfig have been performed
type SystemInfo ¶ added in v0.12.7
type SystemInfo struct {
// ThreadCount is the optimal number of threads to use for inference
ThreadCount int `json:"threads,omitempty"`
// TotalMemory is the total amount of system memory
TotalMemory uint64 `json:"total_memory,omitempty"`
// FreeMemory is the amount of memory currently available on the system for loading models
FreeMemory uint64 `json:"free_memory,omitempty"`
// FreeSwap is the amount of system swap space reported as available
FreeSwap uint64 `json:"free_swap,omitempty"`
}
type Tensor ¶
type Tensor interface {
Dim(n int) int
Stride(n int) int
Shape() []int
DType() DType
Cast(ctx Context, dtype DType) Tensor
Bytes() []byte
Floats() []float32
FromBytes([]byte)
FromFloats([]float32)
FromInts([]int32)
Add(ctx Context, t2 Tensor) Tensor
Sub(ctx Context, t2 Tensor) Tensor
Mul(ctx Context, t2 Tensor) Tensor
Div(ctx Context, t2 Tensor) Tensor
Mulmat(ctx Context, t2 Tensor) Tensor
MulmatFullPrec(ctx Context, t2 Tensor) Tensor
MulmatID(ctx Context, t2, ids Tensor) Tensor
AddID(ctx Context, t2, ids Tensor) Tensor
Softmax(ctx Context) Tensor
L2Norm(ctx Context, eps float32) Tensor
LayerNorm(ctx Context, weight, bias Tensor, eps float32) Tensor
RMSNorm(ctx Context, weight Tensor, eps float32) Tensor
Scale(ctx Context, s float64) Tensor
SumRows(ctx Context) Tensor
AvgPool2D(ctx Context, k, s int, p float32) Tensor
Conv2D(ctx Context, weight Tensor, s0, s1, p0, p1, d0, d1 int) Tensor
Conv3D(ctx Context, weight Tensor, c, s0, s1, s2, p0, p1, p2, d0, d1, d2 int) Tensor
SSMConv(ctx Context, kernel Tensor) Tensor
SSMScan(ctx Context, x, dt, A, B, C, ids Tensor) Tensor
IM2Col(ctx Context, weight Tensor, s0, s1, p0, p1, d0, d1 int) Tensor
Sin(ctx Context) Tensor
Cos(ctx Context) Tensor
Tanh(ctx Context) Tensor
GELU(ctx Context, up ...Tensor) Tensor
GELU_ERF(ctx Context) Tensor
QuickGELU(ctx Context, up ...Tensor) Tensor
SILU(ctx Context, up ...Tensor) Tensor
RELU(ctx Context, up ...Tensor) Tensor
Sigmoid(ctx Context) Tensor
SigmoidOut(ctx Context) Tensor
// AlphaLimitSILU is a variant of SILU that clamps the input to the range [-limit, limit]
SILUAlphaLimit(ctx Context, up Tensor, alpha, limit float32) Tensor
Reshape(ctx Context, shape ...int) Tensor
View(ctx Context, offset int, shape ...int) Tensor
Permute(ctx Context, shape ...int) Tensor
Contiguous(ctx Context, shape ...int) Tensor
Pad(ctx Context, shape ...int) Tensor
Stack(ctx Context, dim int, s ...Tensor) Tensor
// Repeat repeats the tensor n times along dimension dim
Repeat(ctx Context, dim, n int) Tensor
Concat(ctx Context, t2 Tensor, dim int) Tensor
Rows(ctx Context, t2 Tensor) Tensor
SetRows(ctx Context, src Tensor, idxs Tensor) Tensor
SetInplace(ctx Context, src Tensor, nb1, nb2, nb3, offset int) Tensor
Copy(ctx Context, t2 Tensor) Tensor
Duplicate(ctx Context) Tensor
Slice(ctx Context, dim, low, high, step int) Tensor
Chunk(ctx Context, dim int, size int) []Tensor
ChunkSections(ctx Context, dim int, sections ...int) []Tensor
TopK(ctx Context, k int) Tensor
Argsort(ctx Context) Tensor
Mean(ctx Context) Tensor
Variance(ctx Context) Tensor
Stddev(ctx Context) Tensor
Sqr(ctx Context) Tensor
Sqrt(ctx Context) Tensor
Exp(ctx Context) Tensor
Neg(ctx Context) Tensor
// Clamp clamps values to [min, max] range
Clamp(ctx Context, min, max float32) Tensor
// Softplus computes ln(1 + exp(x))
Softplus(ctx Context) Tensor
// CumSum computes cumulative sum along dimension 0
CumSum(ctx Context) Tensor
// Diag creates a diagonal matrix from a 1D tensor
Diag(ctx Context) Tensor
// Tri converts a matrix to triangular form (0=upper+diag, 1=upper, 2=lower+diag, 3=lower)
Tri(ctx Context, triType int) Tensor
// Fill fills a tensor with a constant value (in-place)
Fill(ctx Context, value float32) Tensor
// Repeat4D repeats tensor to match target shape
Repeat4D(ctx Context, dim0, dim1, dim2, dim3 int) Tensor
// SolveTri solves a triangular system Ax = B
SolveTri(ctx Context, b Tensor, lower, left, unitDiag bool) Tensor
Interpolate(ctx Context, dims [4]int, samplingMode SamplingMode) Tensor
}