nn

package
v0.2.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 28, 2025 License: Apache-2.0 Imports: 2 Imported by: 0

Documentation

Overview

Package nn provides neural network layers and building blocks.

Overview

This package contains:

  • Layers: Linear, Conv2D, MaxPool2D
  • Activations: ReLU, Sigmoid, Tanh
  • Loss functions: CrossEntropyLoss, MSELoss
  • Utilities: Sequential, Module interface, Parameter
  • Initialization: Xavier, Zeros, Ones, Randn

Basic Usage

import (
    "github.com/born-ml/born/nn"
    "github.com/born-ml/born/backend/cpu"
)

func main() {
    backend := cpu.New()

    // Build a simple MLP
    model := nn.NewSequential(
        nn.NewLinear(784, 128, backend),
        nn.NewReLU(),
        nn.NewLinear(128, 10, backend),
    )

    // Forward pass
    output := model.Forward(input)
}

Layers

Linear: Fully connected layer with Xavier initialization

layer := nn.NewLinear(inFeatures, outFeatures, backend)

Conv2D: 2D convolutional layer with im2col algorithm

conv := nn.NewConv2D(inChannels, outChannels, kernelSize, stride, padding, backend)

MaxPool2D: 2D max pooling layer

pool := nn.NewMaxPool2D(kernelSize, stride, backend)

Activations

Common activation functions:

relu := nn.NewReLU()
sigmoid := nn.NewSigmoid()
tanh := nn.NewTanh()

Loss Functions

CrossEntropyLoss: For classification tasks (numerically stable)

criterion := nn.NewCrossEntropyLoss(backend)
loss := criterion.Forward(logits, labels)

MSELoss: For regression tasks

criterion := nn.NewMSELoss(backend)
loss := criterion.Forward(predictions, targets)

Sequential Models

Build models by composing layers:

model := nn.NewSequential(
    nn.NewLinear(784, 256, backend),
    nn.NewReLU(),
    nn.NewLinear(256, 128, backend),
    nn.NewReLU(),
    nn.NewLinear(128, 10, backend),
)

Parameter Management

Access model parameters for optimization:

params := model.Parameters()
for _, param := range params {
    fmt.Println(param.Name(), param.Tensor().Shape())
}

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

func Accuracy

func Accuracy[B tensor.Backend](
	logits *tensor.Tensor[float32, B],
	targets *tensor.Tensor[int32, B],
) float32

Accuracy computes the classification accuracy.

Example:

acc := nn.Accuracy(predictions, labels)
fmt.Printf("Accuracy: %.2f%%\n", acc*100)

func CrossEntropyBackward

func CrossEntropyBackward[B tensor.Backend](
	logits *tensor.Tensor[float32, B],
	targets *tensor.Tensor[int32, B],
	backend B,
) *tensor.Tensor[float32, B]

CrossEntropyBackward computes the backward pass for cross-entropy loss.

func Ones

func Ones[B tensor.Backend](shape tensor.Shape, backend B) *tensor.Tensor[float32, B]

Ones initializes a tensor with ones.

Example:

backend := cpu.New()
weights := nn.Ones(tensor.Shape{128, 784}, backend)

func Randn

func Randn[B tensor.Backend](shape tensor.Shape, backend B) *tensor.Tensor[float32, B]

Randn initializes a tensor with random values from N(0, 1).

Example:

backend := cpu.New()
weights := nn.Randn(tensor.Shape{128, 784}, backend)

func Xavier

func Xavier[B tensor.Backend](fanIn, fanOut int, shape tensor.Shape, backend B) *tensor.Tensor[float32, B]

Xavier initializes a tensor using Xavier/Glorot initialization.

Example:

backend := cpu.New()
weights := nn.Xavier(784, 128, tensor.Shape{128, 784}, backend)

func Zeros

func Zeros[B tensor.Backend](shape tensor.Shape, backend B) *tensor.Tensor[float32, B]

Zeros initializes a tensor with zeros (for biases).

Example:

backend := cpu.New()
bias := nn.Zeros(tensor.Shape{128}, backend)

Types

type Conv2D

type Conv2D[B tensor.Backend] = nn.Conv2D[B]

Conv2D represents a 2D convolutional layer.

func NewConv2D

func NewConv2D[B tensor.Backend](
	inChannels, outChannels int,
	kernelH, kernelW int,
	stride, padding int,
	useBias bool,
	backend B,
) *Conv2D[B]

NewConv2D creates a new 2D convolutional layer.

Example:

backend := cpu.New()
conv := nn.NewConv2D(1, 32, 3, 3, 1, 1, true, backend)  // in_channels=1, out_channels=32, kernel=3x3, stride=1, padding=1, useBias=true

type CrossEntropyLoss

type CrossEntropyLoss[B tensor.Backend] = nn.CrossEntropyLoss[B]

CrossEntropyLoss represents the cross-entropy loss for classification.

func NewCrossEntropyLoss

func NewCrossEntropyLoss[B tensor.Backend](backend B) *CrossEntropyLoss[B]

NewCrossEntropyLoss creates a new cross-entropy loss function.

Example:

backend := cpu.New()
criterion := nn.NewCrossEntropyLoss(backend)
loss := criterion.Forward(logits, labels)

type Linear

type Linear[B tensor.Backend] = nn.Linear[B]

Linear represents a fully connected (dense) layer.

func NewLinear

func NewLinear[B tensor.Backend](inFeatures, outFeatures int, backend B) *Linear[B]

NewLinear creates a new linear layer with Xavier initialization.

Example:

backend := cpu.New()
layer := nn.NewLinear(784, 128, backend)

type MSELoss

type MSELoss[B tensor.Backend] = nn.MSELoss[B]

MSELoss represents the mean squared error loss for regression.

func NewMSELoss

func NewMSELoss[B tensor.Backend](backend B) *MSELoss[B]

NewMSELoss creates a new MSE loss function.

Example:

backend := cpu.New()
criterion := nn.NewMSELoss(backend)
loss := criterion.Forward(predictions, targets)

type MaxPool2D

type MaxPool2D[B tensor.Backend] = nn.MaxPool2D[B]

MaxPool2D represents a 2D max pooling layer.

func NewMaxPool2D

func NewMaxPool2D[B tensor.Backend](kernelSize, stride int, backend B) *MaxPool2D[B]

NewMaxPool2D creates a new 2D max pooling layer.

Example:

backend := cpu.New()
pool := nn.NewMaxPool2D(2, 2, backend)  // kernel=2, stride=2

type Module

type Module[B tensor.Backend] = nn.Module[B]

Module interface defines the common interface for all neural network modules.

type Parameter

type Parameter[B tensor.Backend] = nn.Parameter[B]

Parameter represents a trainable parameter in a neural network.

func NewParameter

func NewParameter[B tensor.Backend](name string, t *tensor.Tensor[float32, B]) *Parameter[B]

NewParameter creates a new parameter with the given name and tensor.

type ReLU

type ReLU[B tensor.Backend] = nn.ReLU[B]

ReLU represents the Rectified Linear Unit activation function.

func NewReLU

func NewReLU[B tensor.Backend]() *ReLU[B]

NewReLU creates a new ReLU activation layer.

Example:

relu := nn.NewReLU()

type Sequential

type Sequential[B tensor.Backend] = nn.Sequential[B]

Sequential represents a sequential container of modules.

func NewSequential

func NewSequential[B tensor.Backend](modules ...Module[B]) *Sequential[B]

NewSequential creates a new sequential model.

Example:

backend := cpu.New()
model := nn.NewSequential(
    nn.NewLinear(784, 128, backend),
    nn.NewReLU(),
    nn.NewLinear(128, 10, backend),
)

type Sigmoid

type Sigmoid[B tensor.Backend] = nn.Sigmoid[B]

Sigmoid represents the Sigmoid activation function.

func NewSigmoid

func NewSigmoid[B tensor.Backend]() *Sigmoid[B]

NewSigmoid creates a new Sigmoid activation layer.

Example:

sigmoid := nn.NewSigmoid()

type Tanh

type Tanh[B tensor.Backend] = nn.Tanh[B]

Tanh represents the Tanh activation function.

func NewTanh

func NewTanh[B tensor.Backend]() *Tanh[B]

NewTanh creates a new Tanh activation layer.

Example:

tanh := nn.NewTanh()

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL