View Source HyperLLM
Call all LLM APIs using the OpenAI format.
Installation
Add hyper_llm to your list of dependencies in mix.exs:
def deps do
[
{:hyper_llm, "~> 0.0.1"}
]
endConfigurations
config :hyper_llm,
openai: [
api_key: "sk-..."
],
anthropic: [
api_key: "sk-..."
]Usage
HyperLLM.Chat.start(model: "openai/gpt-4o-mini")
|> HyperLLM.Chat.append(:developer, "You are a helpful assistant.")
|> HyperLLM.Chat.append(:user, "Spell \"strawberry\"")
|> HyperLLM.Chat.completion()
#=> {:ok, "Strawberry. 🍓"}If you are using Phoenix, you can use the HyperLLM.Chat module in your LiveView.
defmodule ChatLive do
use Phoenix.LiveView
def render(assigns) do
~H"""
<div>
<dl>
<%= for message <- @chat.messages do %>
<dt><%= message.role %></dt>
<dd><%= message.content %></dd>
<% end %>
</dl>
</div>
"""
end
def mount(params, session, socket) do
{:ok,
socket
|> assign(chat: HyperLLM.Chat.start(model: "gpt-4o-mini"))}
end
def handle_event("send_message", %{"message" => message}, socket) do
chat = HyperLLM.Chat.append(socket.assigns.chat, message)
send(self(), :chat_completion)
{:noreply, socket |> assign(chat: chat)}
end
def handle_info(:chat_completion, socket) do
with {:ok, response} <- HyperLLM.Chat.completion(socket.assigns.chat) do
chat = HyperLLM.Chat.append(socket.assigns.chat, :assistant, response)
{:noreply, socket |> assign(chat: chat)}
end
end
endProviders
| Provider | Completion | Streaming |
|---|---|---|
| Anthropic | ✅ | ❌ |
| Cloudflare | ✅ | ❌ |
| Groq | ✅ | ❌ |
| Ollama | ✅ | ❌ |
| OpenAI | ✅ | ❌ |
| Azure | ❌ | ❌ |
| AWS SageMaker | ❌ | ❌ |
| AWS Bedrock | ❌ | ❌ |
| Cohere | ❌ | ❌ |
| DeepSeek | ❌ | ❌ |
| Empower | ❌ | ❌ |
| Google - Vertex AI | ❌ | ❌ |
| Google - Palm | ❌ | ❌ |
| Google AI Studio | ❌ | ❌ |
| Hugging Face | ❌ | ❌ |
| Mistral AI | ❌ | ❌ |
| Perplexity | ❌ | ❌ |
| Replicate | ❌ | ❌ |
| TogetherAI | ❌ | ❌ |
| Vertex AI | ❌ | ❌ |