hello_ai
Demonstrates AI agent integration with function calling capabilities.
Source Code
Path: examples/hello_ai/
// llama-server -hf Qwen/Qwen3-8B-GGUF:Q8_0 --jinja --reasoning-format deepseek -ngl 99 -fa on --temp 0.6 --top-k 20 --top-p 0.95 --min-p 0
// TODO: llama-server --jinja -hf unsloth/gemma-3-4b-it-GGUF:Q4_K_XL
const std = @import("std");
const tk = @import("tokamak");
const Config = struct {
sendmail: tk.sendmail.Config = .{},
ai_client: tk.ai.ClientConfig = .{
.base_url = "http://localhost:8080/v1/",
},
};
const MathService = struct {
n_used: i32 = 0,
// TODO: auto-translate from tuple to object (at least for openai)
pub fn add(self: *MathService, params: struct { a: i32, b: i32 }) i32 {
defer self.n_used += 1;
return params.a + params.b;
}
pub fn mul(self: *MathService, params: struct { a: i32, b: i32 }) i32 {
defer self.n_used += 1;
return params.a * params.b;
}
};
const MailMessage = struct {
from: []const u8,
title: []const u8,
date: []const u8,
status: enum { read, unread },
fn init(from: []const u8, title: []const u8, date: []const u8, read: bool) MailMessage {
return .{ .from = from, .title = title, .date = date, .status = if (read) .read else .unread };
}
};
const MailService = struct {
const items: []const MailMessage = &.{
.init("Sarah Chen", "Project Alpha Kick-off Meeting Notes", "2025-05-25", false),
.init("Marketing Team", "Your Monthly Newsletter - May 2025", "2025-05-25", true),
.init("DevOps Alerts", "High CPU Usage on Server 1", "2025-05-25", false),
.init("Finance Department", "Important: Upcoming Payroll Changes", "2025-05-25", false),
.init("Jessica", "Love you", "2025-05-24", true),
.init("John Wick", "We need to talk.", "2025-05-24", false),
};
pub fn listMessages(_: *MailService, params: struct { limit: u32 = 10 }) []const MailMessage {
return items[0..@min(params.limit, items.len)];
}
};
const App = struct {
math: MathService,
mail: MailService,
sendmail: tk.sendmail.Sendmail,
http_client: tk.http.StdClient,
ai_client: tk.ai.Client,
agent_toolbox: tk.ai.AgentToolbox,
agent_runtime: tk.ai.AgentRuntime,
pub fn configure(bundle: *tk.Bundle) void {
bundle.addInitHook(initTools);
}
fn initTools(tbox: *tk.ai.AgentToolbox) !void {
try tbox.addTool("add", "Add two numbers", MathService.add);
try tbox.addTool("mul", "Multiply two numbers", MathService.mul);
try tbox.addTool("checkMailbox", "List email messages (limit = 10)", MailService.listMessages);
try tbox.addTool("sendMail", "Send email (from can be null)", tk.sendmail.Sendmail.sendMail);
}
fn hello_ai(gpa: std.mem.Allocator, agr: *tk.ai.AgentRuntime, math: *MathService) !void {
try runAgent(gpa, agr, "Can you tell how much is 12 * (32 + 4) and send the answer to foo@bar.com?", &.{ "add", "mul", "sendMail" });
try runAgent(gpa, agr, "Is there anything important in my mailbox? Show me table, sorted on priority", &.{"checkMailbox"});
try std.testing.expectEqual(2, math.n_used);
}
fn runAgent(gpa: std.mem.Allocator, agr: *tk.ai.AgentRuntime, prompt: []const u8, tools: []const []const u8) !void {
var agent = try agr.createAgent(gpa, .{ .model = "", .tools = tools });
defer agent.deinit();
try agent.addMessage(.{ .role = .system, .content = .{ .text = "You are a helpful assistant./no_think" } });
try agent.addMessage(.{ .role = .user, .content = .{ .text = prompt } });
const res = try agent.run();
std.debug.print("{s}\n", .{res});
}
};
pub fn main() !void {
try tk.app.run(App.hello_ai, &.{ Config, App });
}
Features Demonstrated
- AI client configuration
- Agent runtime and toolbox
- Function/tool registration
- Multi-step agent workflows
- Service dependencies and state management
Prerequisites
This example requires a local LLM server. Example commands for starting one:
# Using llama-server with Qwen
llama-server -hf Qwen/Qwen3-8B-GGUF:Q8_0 --jinja --reasoning-format deepseek -ngl 99 -fa --temp 0.6 --top-k 20 --top-p 0.95 --min-p 0
# Or with Gemma
llama-server --jinja -hf unsloth/gemma-3-4b-it-GGUF:Q4_K_XL
Architecture
Configuration
const Config = struct {
sendmail: tk.sendmail.Config = .{},
ai_client: tk.ai.ClientConfig = .{
.base_url = "http://localhost:8080/v1/",
},
};
Services
MathService - Basic arithmetic operations with usage tracking:
add(a, b)- Add two numbersmul(a, b)- Multiply two numbers
MailService - Email message management:
listMessages(limit)- List email messages
Tool Registration
Tools are registered in an init hook:
fn initTools(tbox: *tk.ai.AgentToolbox) !void {
try tbox.addTool("add", "Add two numbers", MathService.add);
try tbox.addTool("mul", "Multiply two numbers", MathService.mul);
try tbox.addTool("checkMailbox", "List email messages (limit = 10)", MailService.listMessages);
try tbox.addTool("sendMail", "Send email (from can be null)", tk.sendmail.Sendmail.sendMail);
}
Example Tasks
The example runs two agent tasks:
Task 1: Math Calculation and Email
"Can you tell how much is 12 * (32 + 4) and send the answer to foo@bar.com?"
The agent will:
- Use
addto calculate 32 + 4 = 36 - Use
multo calculate 12 * 36 = 432 - Use
sendMailto send the result
Task 2: Email Analysis
"Is there anything important in my mailbox? Show me table, sorted on priority"
The agent will:
- Use
checkMailboxto retrieve messages - Analyze and format them as a prioritized table
Running
cd examples/hello_ai
zig build run
Make sure your LLM server is running first!
How It Works
- Agent Creation: Create an agent with specific tools
- Message Addition: Add system and user messages
- Execution: Call
agent.run()which handles the tool calling loop - Tool Calls: The LLM decides which tools to call and when
- Results: Final response is returned
Key Concepts
- AgentToolbox: Registry of available tools
- AgentRuntime: Manages agent lifecycle and execution
- Tool Functions: Regular Zig functions exposed to the LLM
- Automatic Serialization: Parameters and results are automatically JSON-serialized
Next Steps
- See blog for service layer patterns
- Check out the AI client documentation in the Reference section