viva_tensor
viva_tensor - Pure Gleam tensor library
N-dimensional arrays with named axes, broadcasting, and zero-copy views.
Features
- NumPy-inspired API
- Named tensors (Batch, Seq, Feature axes)
- Broadcasting
- Zero-copy transpose/reshape via strides
- O(1) random access with Erlang arrays
Quick Start
import viva_tensor as t
import viva_tensor/axis
// Create tensors
let a = t.zeros([2, 3])
let b = t.ones([2, 3])
// Operations
let c = t.add(a, b)
let d = t.matmul(a, t.transpose(b))
// Named tensors
let named = t.named.zeros([axis.batch(32), axis.feature(128)])
let summed = t.named.sum_along(named, axis.Batch)
Types
Conv2d configuration type
pub type Conv2dConfig =
tensor.Conv2dConfig
Values
pub fn add(
a: tensor.Tensor,
b: tensor.Tensor,
) -> Result(tensor.Tensor, tensor.TensorError)
Element-wise addition
pub fn add_broadcast(
a: tensor.Tensor,
b: tensor.Tensor,
) -> Result(tensor.Tensor, tensor.TensorError)
Add with broadcasting
pub fn avg_pool2d(
input: tensor.Tensor,
pool_h: Int,
pool_w: Int,
stride_h: Int,
stride_w: Int,
) -> Result(tensor.Tensor, tensor.TensorError)
Average pooling 2D
pub fn can_broadcast(a: List(Int), b: List(Int)) -> Bool
Check if shapes can broadcast
pub fn clamp(
t: tensor.Tensor,
min_val: Float,
max_val: Float,
) -> tensor.Tensor
Clamp values
pub fn conv2d(
input: tensor.Tensor,
kernel: tensor.Tensor,
config: tensor.Conv2dConfig,
) -> Result(tensor.Tensor, tensor.TensorError)
2D Convolution Input: [H, W] or [C, H, W] or [N, C, H, W] Kernel: [KH, KW] or [C, KH, KW] or [C_out, C_in, KH, KW]
pub fn conv2d_config() -> tensor.Conv2dConfig
Default conv2d config (3x3 kernel, stride 1, no padding)
pub fn conv2d_same(
kernel_h: Int,
kernel_w: Int,
) -> tensor.Conv2dConfig
Conv2d config with “same” padding (output same size as input)
pub fn div(
a: tensor.Tensor,
b: tensor.Tensor,
) -> Result(tensor.Tensor, tensor.TensorError)
Element-wise division
pub fn dot(
a: tensor.Tensor,
b: tensor.Tensor,
) -> Result(Float, tensor.TensorError)
Dot product of two vectors
pub fn fill(shape: List(Int), value: Float) -> tensor.Tensor
Create tensor filled with value
pub fn from_list2d(
rows: List(List(Float)),
) -> Result(tensor.Tensor, tensor.TensorError)
Create 2D tensor from list of lists
pub fn global_avg_pool2d(
input: tensor.Tensor,
) -> Result(tensor.Tensor, tensor.TensorError)
Global average pooling - reduces [N, C, H, W] to [N, C, 1, 1]
pub fn he_init(fan_in: Int, fan_out: Int) -> tensor.Tensor
He initialization (for ReLU networks)
pub fn map(
t: tensor.Tensor,
f: fn(Float) -> Float,
) -> tensor.Tensor
Apply function to each element
pub fn matmul(
a: tensor.Tensor,
b: tensor.Tensor,
) -> Result(tensor.Tensor, tensor.TensorError)
Matrix-matrix multiplication
pub fn matmul_vec(
mat: tensor.Tensor,
vec: tensor.Tensor,
) -> Result(tensor.Tensor, tensor.TensorError)
Matrix-vector multiplication
pub fn matrix(
rows: Int,
cols: Int,
data: List(Float),
) -> Result(tensor.Tensor, tensor.TensorError)
Create matrix (2D tensor)
pub fn max_pool2d(
input: tensor.Tensor,
pool_h: Int,
pool_w: Int,
stride_h: Int,
stride_w: Int,
) -> Result(tensor.Tensor, tensor.TensorError)
Max pooling 2D Input: [H, W] or [N, C, H, W]
pub fn mul(
a: tensor.Tensor,
b: tensor.Tensor,
) -> Result(tensor.Tensor, tensor.TensorError)
Element-wise multiplication
pub fn mul_broadcast(
a: tensor.Tensor,
b: tensor.Tensor,
) -> Result(tensor.Tensor, tensor.TensorError)
Multiply with broadcasting
pub fn outer(
a: tensor.Tensor,
b: tensor.Tensor,
) -> Result(tensor.Tensor, tensor.TensorError)
Outer product
pub fn pad2d(
t: tensor.Tensor,
pad_h: Int,
pad_w: Int,
) -> Result(tensor.Tensor, tensor.TensorError)
Pad 2D tensor with zeros
pub fn pad4d(
t: tensor.Tensor,
pad_h: Int,
pad_w: Int,
) -> Result(tensor.Tensor, tensor.TensorError)
Pad 4D tensor (batch) with zeros
pub fn random_normal(
shape: List(Int),
mean: Float,
std: Float,
) -> tensor.Tensor
Tensor with normal random values
pub fn random_uniform(shape: List(Int)) -> tensor.Tensor
Tensor with uniform random values [0, 1)
pub fn reshape(
t: tensor.Tensor,
new_shape: List(Int),
) -> Result(tensor.Tensor, tensor.TensorError)
Reshape tensor
pub fn sub(
a: tensor.Tensor,
b: tensor.Tensor,
) -> Result(tensor.Tensor, tensor.TensorError)
Element-wise subtraction
pub fn to_contiguous(t: tensor.Tensor) -> tensor.Tensor
Convert to contiguous tensor
pub fn to_strided(t: tensor.Tensor) -> tensor.Tensor
Convert to strided tensor (O(1) access)
pub fn transpose(
t: tensor.Tensor,
) -> Result(tensor.Tensor, tensor.TensorError)
Transpose matrix
pub fn transpose_strided(
t: tensor.Tensor,
) -> Result(tensor.Tensor, tensor.TensorError)
Zero-copy transpose
pub fn unsqueeze(
t: tensor.Tensor,
axis_idx: Int,
) -> tensor.Tensor
Add dimension of size 1
pub fn xavier_init(fan_in: Int, fan_out: Int) -> tensor.Tensor
Xavier initialization for neural network weights