bench/bench

Types

Configuration for a benchmark run

pub type BenchConfig {
  BenchConfig(
    warmup_iterations: Int,
    iterations: Int,
    verbose: Bool,
  )
}

Constructors

  • BenchConfig(
      warmup_iterations: Int,
      iterations: Int,
      verbose: Bool,
    )

    Arguments

    warmup_iterations

    Number of warmup iterations (not counted)

    iterations

    Number of measured iterations

    verbose

    Whether to print individual iteration times

Result from a benchmark run

pub type BenchResult {
  BenchResult(
    name: String,
    total_ms: Int,
    iterations: Int,
    avg_us: Float,
    min_us: option.Option(Float),
    max_us: option.Option(Float),
  )
}

Constructors

  • BenchResult(
      name: String,
      total_ms: Int,
      iterations: Int,
      avg_us: Float,
      min_us: option.Option(Float),
      max_us: option.Option(Float),
    )

    Arguments

    name

    Name of the benchmark

    total_ms

    Total time in milliseconds

    iterations

    Number of iterations

    avg_us

    Average time per iteration in microseconds

    min_us

    Minimum time in microseconds (if captured)

    max_us

    Maximum time in microseconds (if captured)

A collection of benchmark results for a suite

pub type BenchSuite {
  BenchSuite(name: String, results: List(BenchResult))
}

Constructors

Values

pub fn add_result(
  suite: BenchSuite,
  result: BenchResult,
) -> BenchSuite

Add a benchmark result to a suite

pub fn benchmark(name: String, f: fn() -> a) -> BenchResult

Run a benchmark with default configuration

pub fn benchmark_detailed(
  name: String,
  config: BenchConfig,
  f: fn() -> a,
) -> BenchResult

Run a benchmark capturing min/max times (slower but more detailed)

pub fn benchmark_with_config(
  name: String,
  config: BenchConfig,
  f: fn() -> a,
) -> BenchResult

Run a benchmark with custom configuration

pub fn benchmark_with_setup(
  name: String,
  config: BenchConfig,
  setup: fn() -> s,
  f: fn(s) -> a,
  teardown: fn(s, a) -> Nil,
) -> BenchResult

Run a function that needs setup/teardown for each iteration

pub fn compare_results(
  baseline: BenchResult,
  current: BenchResult,
) -> Float

Compare two benchmark results and return percentage change

pub fn config(
  warmup warmup_iterations: Int,
  iterations iterations: Int,
  verbose verbose: Bool,
) -> BenchConfig

Create a custom configuration

pub fn default_config() -> BenchConfig

Default benchmark configuration

pub fn new_suite(name: String) -> BenchSuite

Create a new benchmark suite

pub fn print_result(result: BenchResult) -> Nil

Print a single benchmark result

pub fn print_suite_summary(suite: BenchSuite) -> Nil

Print summary of a benchmark suite

pub fn quick_config() -> BenchConfig

Quick benchmark configuration (fewer iterations)

pub fn run_suite(
  suite_name: String,
  benchmarks: List(#(String, fn() -> a)),
  config: BenchConfig,
) -> BenchSuite

Run all benchmarks in a suite

pub fn thorough_config() -> BenchConfig

Thorough benchmark configuration (more iterations)

pub fn to_markdown(suite: BenchSuite) -> String

Format a benchmark suite as markdown table

pub fn within_threshold(
  baseline: BenchResult,
  current: BenchResult,
  threshold_percent: Float,
) -> Bool

Check if a benchmark result is within acceptable threshold of baseline

Search Document