SubAgent Examples
View Sourcerepo_root = Path.expand("..", __DIR__)
deps =
if File.exists?(Path.join(repo_root, "mix.exs")) do
[{:ptc_runner, path: repo_root}, {:llm_client, path: Path.join(repo_root, "llm_client")}]
else
[{:ptc_runner, "~> 0.5.0"}]
end
Mix.install(deps ++ [{:req_llm, "~> 1.0"}, {:kino, "~> 0.14"}], consolidate_protocols: false)Setup
Add your API key in the Secrets panel (ss) for cloud models. Ollama works without a key.
# For testing locally and reloading the library
# IEx.Helpers.recompile()api_key = System.get_env("LB_OPENROUTER_API_KEY") || System.get_env("OPENROUTER_API_KEY")
if api_key, do: System.put_env("OPENROUTER_API_KEY", api_key)
if(api_key, do: "API key configured", else: "No API key - Ollama only")model_options =
if Code.ensure_loaded?(LLMClient) do
LLMClient.list_models()
|> Enum.filter(& &1.available)
|> Enum.map(&{&1.model_id, "#{&1.alias} - #{&1.description}"})
|> Enum.sort_by(&elem(&1, 1))
else
[
{"openrouter:anthropic/claude-haiku-4.5", "haiku - Claude Haiku 4.5"},
{"openrouter:google/gemini-2.5-flash", "gemini - Gemini 2.5 Flash"},
{"openrouter:deepseek/deepseek-chat-v3-0324", "deepseek - DeepSeek V3"}
]
end
model_input = Kino.Input.select("Model", model_options)model = Kino.Input.read(model_input)
my_llm =
if Code.ensure_loaded?(LLMClient) do
fn %{system: system, messages: messages} ->
case LLMClient.generate_text(model, [%{role: :system, content: system} | messages], receive_timeout: 60_000) do
{:ok, r} -> {:ok, r}
error -> error
end
end
else
fn %{system: system, messages: messages} ->
case ReqLLM.generate_text(model, [%{role: :system, content: system} | messages], receive_timeout: 30_000) do
{:ok, r} -> {:ok, %{content: ReqLLM.Response.text(r), tokens: ReqLLM.Response.usage(r)}}
error -> error
end
end
end
"Ready: #{model}"Output Modes
SubAgents support two output modes:
| Mode | Use When | Output |
|---|---|---|
:json | Classification, extraction, summarization | Structured JSON |
:ptc_lisp (default) | Computation, tool orchestration, multi-step reasoning | PTC-Lisp program result |
JSON Mode - Direct LLM Tasks
Use output: :json when the LLM can answer directly without computation:
alias PtcRunner.SubAgent
alias PtcRunner.SubAgent.Debug
review = "Great product, fast shipping! Would buy again."
{:ok, step} = SubAgent.run(
"Classify as positive/negative/neutral with confidence 0.0-1.0: {{review}}",
output: :json,
signature: "(review :string) -> {sentiment :string, confidence :float}",
context: %{review: review},
llm: my_llm
)
Debug.print_trace(step, raw: true)
step.returnPTC-Lisp Mode - Computational Tasks
The default mode. The LLM writes a program to solve tasks that need accurate computation:
{:ok, step} = SubAgent.run(
"How many r's are in raspberry?",
llm: my_llm,
max_turns: 1
)
Debug.print_trace(step, raw: true)
step.returnExecution Modes
max_turns | Mode | Behavior |
|---|---|---|
1 | Single-shot | One LLM call, answer immediately |
> 1 (default: 10) | Multi-turn | Can iterate, fix errors, explore data |
Single-shot is faster and cheaper - use when the task is straightforward.
Multi-turn allows the LLM to inspect results with println, retry on errors, and call return when confident.
Signatures
Signatures define input/output types. They work with both output modes.
Format: (input1 :type, input2 :type) -> output_type
| Type | Examples |
|---|---|
:string, :int, :float, :bool | Primitives |
{field :type, ...} | Object with named fields |
[element_type] | List of elements |
{:optional, :type} | Optional field |
# Input: two strings, Output: object with score and explanation
sig1 = "(text1 :string, text2 :string) -> {similarity :float, explanation :string}"
# Input: list of items, Output: object with categorized lists
sig2 = "(items [{name :string, price :float}]) -> {expensive [{name :string}], cheap [{name :string}]}"
# Output only (no inputs from context)
sig3 = "{count :int, items [:string]}"
:okCompiled SubAgents
Compile an agent once to derive reusable PTC-Lisp logic. Runs without further LLM calls:
agent = SubAgent.new(
prompt: "Count r's in {{word}}",
signature: "(word :string) -> :int"
)
{:ok, compiled} = SubAgent.compile(agent, llm: my_llm)
IO.puts("Compiled source:\n#{compiled.source}")# Execute on multiple inputs - no LLM calls
words = ["strawberry", "raspberry", "program", "error"]
for word <- words do
step = compiled.execute.(%{"word" => word})
"#{word}: #{step.return}"
endWorking with Tools
Tools let agents fetch external data or perform actions:
expenses = [
%{"id" => 1, "category" => "travel", "amount" => 450.00, "vendor" => "Airlines Inc"},
%{"id" => 2, "category" => "food", "amount" => 32.50, "vendor" => "Cafe Luna"},
%{"id" => 3, "category" => "travel", "amount" => 189.00, "vendor" => "Hotel Central"},
%{"id" => 4, "category" => "office", "amount" => 299.99, "vendor" => "Tech Store"},
%{"id" => 5, "category" => "food", "amount" => 28.00, "vendor" => "Deli Express"}
]
tools = %{
"list-expenses" => {fn _ -> expenses end,
signature: "() -> [{id :int, category :string, amount :float, vendor :string}]",
description: "Returns all expense records"
}
}
Kino.DataTable.new(expenses){:ok, step} = SubAgent.run(
"What is the total travel expense?",
tools: tools,
signature: "{total :float}",
llm: my_llm
)
Debug.print_trace(step, raw: true)
step.returnInteractive Query
question_input = Kino.Input.textarea("Question", default: "Show spending by category")question = Kino.Input.read(question_input)
case SubAgent.run(question, tools: tools, llm: my_llm) do
{:ok, step} ->
Debug.print_trace(step)
step.return
{:error, step} ->
Debug.print_trace(step)
"Failed: #{step.fail.message}"
endDebug Options
# Preview the prompt before running
agent = SubAgent.new(prompt: "What is 2 + 2?")
SubAgent.preview_prompt(agent).system |> IO.puts()print_trace options:
| Option | Description |
|---|---|
raw: true | Show raw LLM input/output |
messages: true | Show all messages including system prompt |
usage: true | Show token usage |
view: :compressed | Show what LLM sees (compressed format) |
Learn More
- Playground - PTC-Lisp basics
- SubAgent Guide
- PTC-Lisp Spec