viva_tensor

viva_tensor - NumPy for the BEAM.

Born from the frustration of “why can’t I do tensor math in Erlang/Elixir without calling Python?” Now you can.

The name: “viva” = alive in Portuguese/Spanish. Tensors that live on the BEAM. Also, “viva” sounds better than “gleam_tensor” (sorry, marketing decision).

Architecture:

Performance tip: for matrices > 100x100, make sure the NIF is compiled. The difference is ~100-1000x. No, that’s not a typo.

import viva_tensor as t

let a = t.zeros([2, 3])
let b = t.ones([2, 3])
let assert Ok(c) = t.add(a, b)  // [2.0, 2.0, 2.0, 2.0, 2.0, 2.0]

Types

pub type Tensor =
  tensor.Tensor
pub type TflopsBackend =
  tflops.Backend

Values

pub fn add(
  a: tensor.Tensor,
  b: tensor.Tensor,
) -> Result(tensor.Tensor, error.TensorError)

Add element-wise

pub fn add_broadcast(
  a: tensor.Tensor,
  b: tensor.Tensor,
) -> Result(tensor.Tensor, error.TensorError)

Add with broadcasting

pub fn argmax(t: tensor.Tensor) -> Int

Index of maximum value

pub fn argmin(t: tensor.Tensor) -> Int

Index of minimum value

pub fn avg_pool2d(
  input: tensor.Tensor,
  pool_h: Int,
  pool_w: Int,
  stride_h: Int,
  stride_w: Int,
) -> Result(tensor.Tensor, error.TensorError)

Average pooling 2D

pub fn can_broadcast(a: List(Int), b: List(Int)) -> Bool

Can these shapes broadcast together?

pub fn clamp(
  t: tensor.Tensor,
  min_val: Float,
  max_val: Float,
) -> tensor.Tensor

Clamp values

pub fn conv2d(
  input: tensor.Tensor,
  kernel: tensor.Tensor,
  config: tensor.Conv2dConfig,
) -> Result(tensor.Tensor, error.TensorError)

2D Convolution

pub fn conv2d_config() -> tensor.Conv2dConfig

Default conv2d config (3x3 kernel, stride 1, no padding)

pub fn conv2d_same(
  kernel_h: Int,
  kernel_w: Int,
) -> tensor.Conv2dConfig

Conv2d config with “same” padding

pub fn detect_backends() -> List(tflops.Backend)

Detect available compute backends

pub fn div(
  a: tensor.Tensor,
  b: tensor.Tensor,
) -> Result(tensor.Tensor, error.TensorError)

Element-wise division

pub fn dot(
  a: tensor.Tensor,
  b: tensor.Tensor,
) -> Result(Float, error.TensorError)

Dot product (vectors only)

pub fn fill(shape: List(Int), value: Float) -> tensor.Tensor

Create tensor filled with value

pub fn flatten(t: tensor.Tensor) -> tensor.Tensor

Flatten to 1D

pub fn from_list(data: List(Float)) -> tensor.Tensor

Create tensor from list (1D)

pub fn from_list2d(
  rows: List(List(Float)),
) -> Result(tensor.Tensor, error.TensorError)

Create 2D tensor from list of lists

pub fn global_avg_pool2d(
  input: tensor.Tensor,
) -> Result(tensor.Tensor, error.TensorError)

Global average pooling

pub fn he_init(fan_in: Int, fan_out: Int) -> tensor.Tensor

He initialization (for ReLU networks)

pub fn is_contiguous(t: tensor.Tensor) -> Bool

Check if contiguous

pub fn map(
  t: tensor.Tensor,
  f: fn(Float) -> Float,
) -> tensor.Tensor

Apply function to each element

pub fn matmul(
  a: tensor.Tensor,
  b: tensor.Tensor,
) -> Result(tensor.Tensor, error.TensorError)

Matrix-matrix multiplication

pub fn matmul_vec(
  mat: tensor.Tensor,
  vec: tensor.Tensor,
) -> Result(tensor.Tensor, error.TensorError)

Matrix-vector multiplication

pub fn matrix(
  rows: Int,
  cols: Int,
  data: List(Float),
) -> Result(tensor.Tensor, error.TensorError)

Create matrix (2D tensor)

pub fn max(t: tensor.Tensor) -> Float

Maximum value

pub fn max_pool2d(
  input: tensor.Tensor,
  pool_h: Int,
  pool_w: Int,
  stride_h: Int,
  stride_w: Int,
) -> Result(tensor.Tensor, error.TensorError)

Max pooling 2D

pub fn mean(t: tensor.Tensor) -> Float

Mean of all elements

pub fn measure_tflops(
  backend: tflops.Backend,
  m: Int,
  n: Int,
  k: Int,
) -> tflops.TflopsResult

Measure TFLOPS for a single matmul operation

pub fn measure_tflops_averaged(
  backend: tflops.Backend,
  m: Int,
  n: Int,
  k: Int,
  iterations: Int,
) -> tflops.TflopsResult

Measure averaged TFLOPS (warmup + iterations)

pub fn min(t: tensor.Tensor) -> Float

Minimum value

pub fn mul(
  a: tensor.Tensor,
  b: tensor.Tensor,
) -> Result(tensor.Tensor, error.TensorError)

Element-wise multiplication

pub fn mul_broadcast(
  a: tensor.Tensor,
  b: tensor.Tensor,
) -> Result(tensor.Tensor, error.TensorError)

Multiply with broadcasting

pub fn norm(t: tensor.Tensor) -> Float

L2 norm (Euclidean length)

pub fn normalize(t: tensor.Tensor) -> tensor.Tensor

Normalize to unit length

pub fn ones(shape: List(Int)) -> tensor.Tensor

Create tensor of ones

pub fn outer(
  a: tensor.Tensor,
  b: tensor.Tensor,
) -> Result(tensor.Tensor, error.TensorError)

Outer product

pub fn pad2d(
  t: tensor.Tensor,
  pad_h: Int,
  pad_w: Int,
) -> Result(tensor.Tensor, error.TensorError)

Pad 2D tensor with zeros

pub fn pad4d(
  t: tensor.Tensor,
  pad_h: Int,
  pad_w: Int,
) -> Result(tensor.Tensor, error.TensorError)

Pad 4D tensor with zeros

pub fn random_normal(
  shape: List(Int),
  mean: Float,
  std: Float,
) -> tensor.Tensor

Tensor with normal random values

pub fn random_uniform(shape: List(Int)) -> tensor.Tensor

Random uniform [0, 1)

pub fn rank(t: tensor.Tensor) -> Int

Get rank (number of dimensions)

pub fn reshape(
  t: tensor.Tensor,
  new_shape: List(Int),
) -> Result(tensor.Tensor, error.TensorError)

Reshape (total size must match)

pub fn scale(t: tensor.Tensor, s: Float) -> tensor.Tensor

Scale by constant

pub fn shape(t: tensor.Tensor) -> List(Int)

Shape as list of dimensions

pub fn size(t: tensor.Tensor) -> Int

Get total size

pub fn squeeze(t: tensor.Tensor) -> tensor.Tensor

Remove dimensions of size 1

pub fn std(t: tensor.Tensor) -> Float

Standard deviation

pub fn sub(
  a: tensor.Tensor,
  b: tensor.Tensor,
) -> Result(tensor.Tensor, error.TensorError)

Element-wise subtraction

pub fn sum(t: tensor.Tensor) -> Float

Sum everything

pub fn to_contiguous(t: tensor.Tensor) -> tensor.Tensor

Convert to contiguous tensor

pub fn to_list(t: tensor.Tensor) -> List(Float)

Convert to list

pub fn to_strided(t: tensor.Tensor) -> tensor.Tensor

Convert to strided representation for O(1) element access

pub fn transpose(
  t: tensor.Tensor,
) -> Result(tensor.Tensor, error.TensorError)

Matrix transpose

pub fn transpose_strided(
  t: tensor.Tensor,
) -> Result(tensor.Tensor, error.TensorError)

Zero-copy transpose

pub fn unsqueeze(t: tensor.Tensor, axis: Int) -> tensor.Tensor

Add dimension of size 1

pub fn variance(t: tensor.Tensor) -> Float

Variance

pub fn vector(data: List(Float)) -> tensor.Tensor

Create vector (1D tensor)

pub fn xavier_init(fan_in: Int, fan_out: Int) -> tensor.Tensor

Xavier initialization for neural network weights

pub fn zeros(shape: List(Int)) -> tensor.Tensor

All zeros. The tensor equivalent of a blank canvas.

Search Document