dagger/dsl/l_l_m

Types

pub type Opts {
  Opts(
    model: option.Option(String),
    max_a_p_i_calls: option.Option(Int),
  )
}

Constructors

Values

pub fn attempt(
  parent: types.LLM,
  number number: Int,
) -> types.LLM

create a branch in the LLM’s history

pub fn bind_result(
  parent: types.LLM,
  name name: String,
) -> types.Binding

returns the type of the current state

pub fn env(parent: types.LLM) -> types.Env

return the LLM’s current environment

pub fn has_prompt(
  parent: types.LLM,
  client client: types.Client,
  then handler: fn(Result(Bool, types.QueryError)) -> a,
) -> a

Indicates whether there are any queued prompts or tool results to send to the model

pub fn history(
  parent: types.LLM,
  client client: types.Client,
  then handler: fn(Result(List(String), types.QueryError)) -> a,
) -> a

return the llm message history

pub fn history_j_s_o_n(
  parent: types.LLM,
  client client: types.Client,
  then handler: fn(Result(String, types.QueryError)) -> a,
) -> a

return the raw llm message history as json

pub fn id(parent: types.LLM) -> types.LLM

A unique identifier for this LLM.

pub fn l_l_m(with with_fn: fn(Opts) -> Opts) -> types.LLM
pub fn last_reply(
  parent: types.LLM,
  client client: types.Client,
  then handler: fn(Result(String, types.QueryError)) -> a,
) -> a

return the last llm reply from the history

pub fn loop(parent: types.LLM) -> types.LLM

Submit the queued prompt, evaluate any tool calls, queue their results, and keep going until the model ends its turn

pub fn max_a_p_i_calls(opts: Opts, val: Int) -> Opts
pub fn model(
  parent: types.LLM,
  client client: types.Client,
  then handler: fn(Result(String, types.QueryError)) -> a,
) -> a

return the model used by the llm

pub fn none(opts: Opts) -> Opts
pub fn opt_model(opts: Opts, val: String) -> Opts
pub fn provider(
  parent: types.LLM,
  client client: types.Client,
  then handler: fn(Result(String, types.QueryError)) -> a,
) -> a

return the provider used by the llm

pub fn step(parent: types.LLM) -> types.LLM

Submit the queued prompt or tool call results, evaluate any tool calls, and queue their results

pub fn sync(parent: types.LLM) -> types.LLM

synchronize LLM state

pub fn token_usage(parent: types.LLM) -> types.LLMTokenUsage

returns the token usage of the current state

pub fn tools(
  parent: types.LLM,
  client client: types.Client,
  then handler: fn(Result(String, types.QueryError)) -> a,
) -> a

print documentation for available tools

pub fn with_blocked_function(
  parent: types.LLM,
  type_name type_name: String,
  function function: String,
) -> types.LLM

Return a new LLM with the specified function no longer exposed as a tool

pub fn with_env(
  parent: types.LLM,
  env env: types.Env,
) -> types.LLM

allow the LLM to interact with an environment via MCP

pub fn with_m_c_p_server(
  parent: types.LLM,
  name name: String,
  service service: types.Service,
) -> types.LLM

Add an external MCP server to the LLM

pub fn with_model(
  parent: types.LLM,
  model model: String,
) -> types.LLM

swap out the llm model

pub fn with_prompt(
  parent: types.LLM,
  prompt prompt: String,
) -> types.LLM

append a prompt to the llm context

pub fn with_prompt_file(
  parent: types.LLM,
  file file: types.File,
) -> types.LLM

append the contents of a file to the llm context

pub fn with_static_tools(parent: types.LLM) -> types.LLM

Use a static set of tools for method calls, e.g. for MCP clients that do not support dynamic tool registration

pub fn with_system_prompt(
  parent: types.LLM,
  prompt prompt: String,
) -> types.LLM

Add a system prompt to the LLM’s environment

pub fn without_default_system_prompt(
  parent: types.LLM,
) -> types.LLM

Disable the default system prompt

pub fn without_message_history(parent: types.LLM) -> types.LLM

Clear the message history, leaving only the system prompts

pub fn without_system_prompts(parent: types.LLM) -> types.LLM

Clear the system prompts, leaving only the default system prompt

Search Document