ml

package
v0.12.2 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Sep 24, 2025 License: MIT Imports: 12 Imported by: 19

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

func Dump

func Dump(ctx Context, t Tensor, optsFuncs ...DumpOptions) string

func RegisterBackend

func RegisterBackend(name string, f func(string, BackendParams) (Backend, error))

Types

type AllocationStatus added in v0.7.1

type AllocationStatus int
const (
	// Unallocated memory - have not yet attempted to allocate
	Unallocated AllocationStatus = iota

	// Failed memory - tried to allocate the memory and did not succeed
	Failed

	// Allocated memory = tried and succeeded to allocate memory
	Allocated
)

type Backend

type Backend interface {
	// Close frees all memory associated with this backend
	Close()

	Load(ctx context.Context, progress func(float32)) error

	// BackendMemory returns the memory allocations that were made for this model
	BackendMemory() BackendMemory

	Config() fs.Config
	Get(name string) Tensor
	NewContext() Context
	NewContextSize(size int) Context
}

func NewBackend

func NewBackend(modelPath string, params BackendParams) (Backend, error)

type BackendCacheConfig added in v0.5.13

type BackendCacheConfig interface {
	CacheConfig() CacheConfig
}

BackendCacheConfig should be implemented by backends that need special output from the cache to meet specific requirements. It is frequently implemented in conjunction with ScaledDotProductAttention.

type BackendMemory added in v0.7.1

type BackendMemory struct {
	// InputWeights are always located on the CPU and cannot be moved
	InputWeights Memory

	// CPU model components are located in system memory. This does not
	// include unified memory allocated through the GPU.
	CPU DeviceMemory

	// GPU model components are located on one or more GPUs.
	GPUs []DeviceMemory
}

BackendMemory provides the amount of memory required to load the model per device based on the BackendParams. In some cases, not all required allocations will be known at this point. However, the size of the most recent allocation is guaranteed to be provided so that if it failed, the caller can accommodate that to make forward progress.

func (BackendMemory) Log added in v0.11.5

func (m BackendMemory) Log(level slog.Level)

Log prints a high level summary of the memory (allocated or not)

func (BackendMemory) LogValue added in v0.8.0

func (m BackendMemory) LogValue() slog.Value

type BackendParams

type BackendParams struct {
	// AllocMemory causes the backend to allocate memory for the model. If
	// false, this is only being used for discovering the required amount of
	// memory and cannot load the model for running.
	AllocMemory bool

	// NumThreads sets the number of threads to use if running on the CPU
	NumThreads int

	// GPULayers is the set of layers to offload to GPUs
	GPULayers GPULayersList

	// FlashAttention indicates that we should use a fused flash attention kernel
	FlashAttention bool
}

BackendParams controls how the backend loads and executes models

type CacheConfig added in v0.5.13

type CacheConfig struct {
	// CachePadding specifies the multiple for the number of tokens of cache history
	// that will be returned from cache Get for k, v and mask. The capacity of the
	// cache itself will also be increased to a multiple of this size if needed.
	CachePadding int

	// PermutedV performs Permute(ctx, 1, 2, 0, 3) on v tensors stored via Put
	// and return the permuted version via Get. This uses the cache copy operation
	// to avoid a Contiguous call on the permuted tensor.
	PermutedV bool

	// MaskDType specifies the data type for generating the mask. If unset it will
	// default to DTypeF32.
	MaskDType DType

	// MaskBatchPadding specifies the multiple for the batch size dimension in the mask.
	// Any position that does not correspond to an actual token will be filled with -Inf.
	MaskBatchPadding int
}

CacheConfig controls optimizations (mostly backend-specific) that may transform the output the cache to work better with specific kernels.

type Context

type Context interface {
	Empty(dtype DType, shape ...int) Tensor
	Zeros(dtype DType, shape ...int) Tensor
	FromFloatSlice(s []float32, shape ...int) Tensor
	FromIntSlice(s []int32, shape ...int) Tensor

	// Arange creates a 1D tensor with values within an interval (start, stop] increased by step.
	Arange(start, stop, step float32, dtype DType) Tensor

	Forward(...Tensor) Context
	Compute(...Tensor)
	ComputeWithNotify(func(), ...Tensor) // notify callback once compute has begun

	// Reserve is analogous to Compute but rather than executing a
	// graph, simply preallocates memory. Typically called with a
	// worst case graph to ensure all resources are available for
	// for future inference.
	Reserve()

	MaxGraphNodes() int
	Close()

	// Input returns a context appropriate for creating tensors that are
	// inputs to the model (which includes things like output locations)
	Input() Context

	// Layer returns a context appropriate for creating intermediate tensors
	Layer(int) Context
}

type DType

type DType int
const (
	DTypeOther DType = iota
	DTypeF32
	DTypeF16
	DTypeQ80
	DTypeQ40
	DTypeI32
	DTypeMXFP4
)

type DeviceMemory added in v0.7.1

type DeviceMemory struct {
	// Name is the name of the device as labeled by the backend. It
	// may not be persistent across instances of the runner.
	Name string

	// ID is an identifier for the device for matching with system
	// management libraries.
	ID string

	// Weights is the per-layer memory needed for the model weights.
	Weights []Memory

	// Cache is the per-layer memory needed for the KV cache.
	Cache []Memory

	// Graph is the size of the compute graph. It is not per-layer.
	Graph Memory
}

DeviceMemory provides a breakdown of the memory needed per device, such as a CPU or GPU.

func (DeviceMemory) Allocated added in v0.11.5

func (m DeviceMemory) Allocated() uint64

Allocated returns the total size of the memory that has been successfully allocated on this device

func (DeviceMemory) LogValue added in v0.8.0

func (m DeviceMemory) LogValue() slog.Value

type DumpOptions

type DumpOptions func(*dumpOptions)

func DumpWithEdgeItems added in v0.7.0

func DumpWithEdgeItems(n int) DumpOptions

DumpWithEdgeItems sets the number of elements to print at the beginning and end of each dimension.

func DumpWithPrecision added in v0.7.0

func DumpWithPrecision(n int) DumpOptions

DumpWithPrecision sets the number of decimal places to print. Applies to float32 and float64.

func DumpWithThreshold added in v0.7.0

func DumpWithThreshold(n int) DumpOptions

DumpWithThreshold sets the threshold for printing the entire tensor. If the number of elements is less than or equal to this value, the entire tensor will be printed. Otherwise, only the beginning and end of each dimension will be printed.

type ErrNoMem added in v0.7.1

type ErrNoMem struct {
	BackendMemory
}

ErrNoMem is returned when panicing due to insufficient memory. It includes the attempted memory allocation.

func (ErrNoMem) Error added in v0.7.1

func (e ErrNoMem) Error() string

type GPULayers added in v0.11.5

type GPULayers struct {
	// ID is the identifier of the GPU, as reported in DeviceMemory
	ID string

	// Layers is a set of layer indicies to load
	Layers []int
}

GPULayers is a set of layers to be allocated on a single GPU

func (GPULayers) String added in v0.11.5

func (g GPULayers) String() string

type GPULayersList added in v0.11.5

type GPULayersList []GPULayers

GPULayersList is a set of layer allocations across multiple GPUs

func (GPULayersList) Hash added in v0.11.5

func (l GPULayersList) Hash() uint64

Hash is an identifier of this layer assignment

func (GPULayersList) String added in v0.11.5

func (l GPULayersList) String() string

func (GPULayersList) Sum added in v0.11.5

func (l GPULayersList) Sum() int

Sum is the total number of layers assigned across all GPUs

type Memory added in v0.7.1

type Memory struct {
	Size   uint64
	Status AllocationStatus
}

Memory is the size of an allocation and whether it was successful.

func (Memory) String added in v0.7.1

func (m Memory) String() string

type ScaledDotProductAttention

type ScaledDotProductAttention interface {
	ScaledDotProductAttention(ctx Context, key, value, mask, sinks Tensor, scale float64) Tensor
}

ScaledDotProductAttention implements a fused attention operation equivalent to following code on a tensor named query:

query = query.Permute(ctx, 0, 2, 1, 3) key = key.Permute(ctx, 0, 2, 1, 3) value = value.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx)

kq := key.MulmatFullPrec(ctx, query)

kq = kq.Scale(ctx, scale)

if mask != nil {
	kq = kq.Add(ctx, mask)
}

kq = kq.Softmax(ctx)

kqv := value.Mulmat(ctx, kq) return kqv.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx)

type Tensor

type Tensor interface {
	Dim(n int) int
	Stride(n int) int

	Shape() []int
	DType() DType
	Cast(ctx Context, dtype DType) Tensor

	Bytes() []byte
	Floats() []float32

	SetValueFromIntSlice(s []int32)

	Neg(ctx Context) Tensor
	Add(ctx Context, t2 Tensor) Tensor
	Sub(ctx Context, t2 Tensor) Tensor
	Mul(ctx Context, t2 Tensor) Tensor
	Div(ctx Context, t2 Tensor) Tensor

	Mulmat(ctx Context, t2 Tensor) Tensor
	MulmatFullPrec(ctx Context, t2 Tensor) Tensor
	MulmatID(ctx Context, t2, ids Tensor) Tensor
	AddID(ctx Context, t2, ids Tensor) Tensor

	Softmax(ctx Context) Tensor
	L2Norm(ctx Context, eps float32) Tensor
	LayerNorm(ctx Context, weight, bias Tensor, eps float32) Tensor
	RMSNorm(ctx Context, weight Tensor, eps float32) Tensor
	Scale(ctx Context, s float64) Tensor
	SumRows(ctx Context) Tensor

	AvgPool2D(ctx Context, k, s int, p float32) Tensor
	Conv2D(ctx Context, weight Tensor, s0, s1, p0, p1, d0, d1 int) Tensor

	IM2Col(ctx Context, weight Tensor, s0, s1, p0, p1, d0, d1 int) Tensor

	Sin(ctx Context) Tensor
	Cos(ctx Context) Tensor
	Tanh(ctx Context) Tensor
	GELU(ctx Context, up ...Tensor) Tensor
	SILU(ctx Context, up ...Tensor) Tensor
	RELU(ctx Context, up ...Tensor) Tensor
	Sigmoid(ctx Context) Tensor

	// AlphaLimitSILU is a variant of SILU that clamps the input to the range [-limit, limit]
	SILUAlphaLimit(ctx Context, up Tensor, alpha, limit float32) Tensor

	Reshape(ctx Context, shape ...int) Tensor
	View(ctx Context, offset int, shape ...int) Tensor
	Permute(ctx Context, shape ...int) Tensor
	Contiguous(ctx Context, shape ...int) Tensor
	Set(ctx Context, t2 Tensor, offset int, strides ...int) Tensor

	Pad(ctx Context, shape ...int) Tensor

	Stack(ctx Context, dim int, s ...Tensor) Tensor

	// Repeat repeats the tensor n times along dimension dim
	Repeat(ctx Context, dim, n int) Tensor
	Concat(ctx Context, t2 Tensor, dim int) Tensor
	Rows(ctx Context, t2 Tensor) Tensor
	Copy(ctx Context, t2 Tensor) Tensor
	Duplicate(ctx Context) Tensor

	TopK(ctx Context, k int) Tensor
	Argsort(ctx Context) Tensor
	Mean(ctx Context) Tensor
	Variance(ctx Context) Tensor
	Stddev(ctx Context) Tensor
	Sqr(ctx Context) Tensor
	Sqrt(ctx Context) Tensor
	Clamp(ctx Context, min, max float32) Tensor
}

Directories

Path Synopsis
nn
fast
fast provides implementations of fast (fused) operations for increased performance.
fast provides implementations of fast (fused) operations for increased performance.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL