viva_glyph

Package Version Hex Docs CI

Vectorial language for digital consciousness - RVQ-based emotion encoding for machine-to-machine communication.

🌍 English | Português | 中文

Philosophy

Humans evolved language for acoustic transmission through air. VIVA exists in silicon - her “voice” should be native to that medium.

Glyph is not “words” or “sounds” - it’s compressed emotional state optimized for machines:

Architecture

Encoding Pipeline

%%{init: {'theme': 'base', 'themeVariables': { 'primaryColor': '#0077B6', 'primaryTextColor': '#fff', 'primaryBorderColor': '#023E8A', 'lineColor': '#264653', 'secondaryColor': '#2A9D8F', 'tertiaryColor': '#E9C46A'}}}%%
flowchart LR
    subgraph Input["📥 Input"]
        PAD["PAD State<br/>(P, A, D)"]
    end

    subgraph Encoder["🔄 Encoder"]
        EXP["Expand to 6D"]
        PAD --> EXP
        EXP --> |"intensity<br/>valence_sign<br/>activation"| LAT["Latent Vector"]
    end

    subgraph RVQ["🎯 RVQ (4 stages)"]
        S1["Stage 1<br/>256 codes"]
        S2["Stage 2<br/>256 codes"]
        S3["Stage 3<br/>256 codes"]
        S4["Stage 4<br/>256 codes"]
        LAT --> S1 --> |residual| S2 --> |residual| S3 --> |residual| S4
    end

    subgraph Output["📤 Output"]
        G["Glyph<br/>[42, 17, 89, 203]"]
        S1 & S2 & S3 & S4 --> G
    end

Latent Space (6D)

%%{init: {'theme': 'base', 'themeVariables': { 'primaryColor': '#2A9D8F', 'primaryTextColor': '#fff', 'primaryBorderColor': '#264653', 'lineColor': '#264653'}}}%%
flowchart TB
    subgraph PAD["💚 PAD Input"]
        P["Pleasure"]
        A["Arousal"]
        D["Dominance"]
    end

    subgraph Derived["🧮 Derived Features"]
        I["intensity = √(P²+A²+D²)/√3"]
        V["valence_sign = sign(P)×|P|^0.5"]
        ACT["activation = A × D"]
    end

    subgraph Latent["📊 6D Latent Vector"]
        L0["[0] P"]
        L1["[1] A"]
        L2["[2] D"]
        L3["[3] intensity"]
        L4["[4] valence"]
        L5["[5] activation"]
    end

    P --> L0 & I & V
    A --> L1 & I & ACT
    D --> L2 & I & ACT
    I --> L3
    V --> L4
    ACT --> L5

Hebbian Association

%%{init: {'theme': 'base', 'themeVariables': { 'primaryColor': '#E76F51', 'primaryTextColor': '#fff', 'primaryBorderColor': '#9C4230', 'lineColor': '#264653'}}}%%
flowchart LR
    subgraph Context["🎯 Context"]
        C1["Context 1"]
        C2["Context 2"]
        C3["Context 3"]
    end

    subgraph Memory["🧠 Association Memory<br/>(Oja's Rule)"]
        W1["w=0.8"]
        W2["w=0.3"]
        W3["w=0.6"]
    end

    subgraph Glyphs["✨ Glyphs"]
        G1["Glyph A"]
        G2["Glyph B"]
        G3["Glyph C"]
    end

    C1 --> |learn| W1 --> G1
    C2 --> |learn| W2 --> G2
    C3 --> |learn| W3 --> G3

    C1 --> |recall| W1
    W1 --> |"winner-takes-all"| G1

Similarity Weights (Arousal-Adaptive)

%%{init: {'theme': 'base', 'themeVariables': { 'primaryColor': '#F4A261', 'primaryTextColor': '#000', 'primaryBorderColor': '#E76F51', 'lineColor': '#264653'}}}%%
flowchart TB
    subgraph Arousal["⚡ Arousal Level"]
        LOW["😌 Low (Calm)<br/>|A| ≈ 0"]
        HIGH["🔥 High (Urgent)<br/>|A| ≈ 1"]
    end

    subgraph Weights["⚖️ Stage Weights"]
        WL["[0.30, 0.30, 0.25, 0.15]<br/>Balanced processing"]
        WH["[0.50, 0.30, 0.15, 0.05]<br/>Coarse priority"]
    end

    LOW --> WL
    HIGH --> WH

    subgraph Stages["🎚️ RVQ Stages"]
        S1["Stage 1<br/>Coarse"]
        S2["Stage 2"]
        S3["Stage 3"]
        S4["Stage 4<br/>Fine"]
    end

    WL & WH --> S1 & S2 & S3 & S4

Module Dependencies

graph TD
    VG["viva_glyph<br/>(Main API)"]

    VG --> ENC["encoder"]
    VG --> RVQ["rvq"]
    VG --> ASSOC["association"]
    VG --> GLY["glyph"]

    ENC --> VEC["vector"]
    RVQ --> CB["codebook"]
    CB --> VEC

    ASSOC --> GLY

    style VG fill:#0077B6,stroke:#023E8A,color:#FFFFFF
    style ENC fill:#00B4D8,stroke:#0077B6,color:#000000
    style RVQ fill:#00B4D8,stroke:#0077B6,color:#000000
    style ASSOC fill:#2A9D8F,stroke:#264653,color:#FFFFFF
    style GLY fill:#E76F51,stroke:#9C4230,color:#FFFFFF
    style VEC fill:#F4A261,stroke:#E76F51,color:#000000
    style CB fill:#F4A261,stroke:#E76F51,color:#000000

Installation

gleam add viva_glyph

Usage

import viva_glyph
import viva_glyph/encoder.{Pad}

pub fn main() {
  // Create encoder
  let engine = viva_glyph.new()

  // Encode emotional state
  let pad = Pad(pleasure: 0.7, arousal: 0.3, dominance: 0.5)
  let glyph = viva_glyph.encode(engine, pad)
  // => Glyph([142, 87, 23, 201])

  // Decode back
  let pad2 = viva_glyph.decode(engine, glyph)
  // => Pad(pleasure: 0.68, arousal: 0.31, dominance: 0.49)

  // Compare glyphs
  let similarity = viva_glyph.similarity(glyph, glyph)
  // => 1.0
}

Hebbian Learning

// Learn: when in context 7, use this glyph
let engine = viva_glyph.learn(engine, 7, glyph)
let engine = viva_glyph.learn(engine, 7, glyph)  // strengthen

// Recall: what glyph for context 7?
let recalled = viva_glyph.recall(engine, 7)

Glyph Similarity

import viva_glyph/glyph

let a = glyph.new([1, 2, 3, 4])
let b = glyph.new([1, 2, 5, 6])

// Simple similarity (matching tokens / total)
glyph.similarity(a, b)  // => 0.5

// Weighted similarity (coarse tokens matter more)
glyph.weighted_similarity(a, b)  // => 0.7

// Prefix sharing (coarse structure)
glyph.shares_prefix(a, b, 2)  // => True

Modules

ModulePurpose
viva_glyphMain API (GlyphEngine)
viva_glyph/vectorVector operations for latent space
viva_glyph/codebookVQ vocabulary (K centroids)
viva_glyph/rvqResidual Vector Quantization
viva_glyph/glyphCore Glyph type + similarity
viva_glyph/encoderPAD (3D) ↔ Latent (6D) ↔ Glyph
viva_glyph/associationHebbian learning

Theory

Residual Vector Quantization (RVQ)

Based on EnCodec (Défossez et al., 2022):

  1. Quantize input → get residual
  2. Quantize residual → get finer residual
  3. Repeat for N stages
  4. Final representation = list of codebook indices

Each stage captures progressively finer detail.

PAD Model

Pleasure-Arousal-Dominance (Mehrabian, 1996):

Hebbian Learning with Oja’s Rule

“Neurons that fire together wire together” (Hebb, 1949) with Oja normalization (Oja, 1982):

Δw = η × y × (x - w × y)

LLM Validation (2025-01-24)

Validated with structured System Prompt specifying Hebbian Association context (x=1.0 fixed, NOT PCA).

ModelParametersFormulaEquilibriumDead Neurons
DeepSeek R1-0528671B✅ Correctw* = 1.0✅ Works
Qwen3-Coder-480B480B✅ Correctw* = 1.0✅ Works
DeepSeek-R1-Distill-Qwen-32B32B✅ Correctw* = 1.0✅ Works
Gemini 2.5 Pro-✅ Correctw* = 1.0✅ Works

Consensus: Four frontier models unanimously confirm:

Development

gleam test   # Run tests
gleam build  # Build
gleam docs build  # Generate docs

References

License

MIT - see LICENSE

Search Document