Skip to content

Instantly share code, notes, and snippets.

@sammyjoyce
Last active April 11, 2025 02:31
Show Gist options
  • Select an option

  • Save sammyjoyce/e12f238d944cf81ccead78a6dffba828 to your computer and use it in GitHub Desktop.

Select an option

Save sammyjoyce/e12f238d944cf81ccead78a6dffba828 to your computer and use it in GitHub Desktop.
An example of a Horsehead plugin that sets up a ReAct workflow with tool use
--------------------------------------------------------------------------------
-- init.lua for "react_tool_use_multi_model_example" Plugin
--------------------------------------------------------------------------------
-- This plugin demonstrates:
-- 1) Multiple role-specific agents using different models:
-- • Architect uses a cloud-based model (cloud_llm)
-- • Reviewer uses a thinking model (thinking_llm)
-- • Coder uses a local model (local_llm_http)
-- 2) A ReAct-style workflow where agents can natively call tools.
-- 3) A search tool that uses ripgrep ("rg") on the local machine.
-- 4) A code editor tool that applies a code patch.
--------------------------------------------------------------------------------
local log = Horsehead.log
local spawn = Horsehead.async.spawn
local await = Horsehead.async.await
--------------------------------------------------------------------------------
-- 1. Model Registrations
--------------------------------------------------------------------------------
-- Local LLM for coding tasks
Horsehead.register.model({
name = "local_llm_http",
provider = "Local LLM Demo",
generate = function(model_def, agent_ref, prompt)
local endpoint = Horsehead.os.getenv("LOCAL_LLM_ENDPOINT") or "http://localhost:1234/api/generate"
local payload = { prompt = prompt.content, stream = false }
local err, resp = await(Horsehead.http.request_async("POST", endpoint, {
headers = { ["Content-Type"] = "application/json" },
body = Horsehead.json.encode(payload),
}))
if err then
error("E_LOCAL_LLM_FAILED: " .. err.message)
end
if not resp or not resp.body then
error("E_LOCAL_LLM_NO_RESPONSE: No valid response from Local LLM.")
end
local data = Horsehead.json.decode(resp.body)
if not data or not data.response then
error("E_LOCAL_LLM_BAD_RESPONSE: Missing 'response'.")
end
return { content = data.response }
end
})
-- Cloud LLM for architectural planning
Horsehead.register.model({
name = "cloud_llm",
provider = "Cloud LLM Service",
generate = function(model_def, agent_ref, prompt)
local endpoint = Horsehead.os.getenv("CLOUD_LLM_ENDPOINT") or "https://cloud.example.com/api/generate"
local payload = { prompt = prompt.content, stream = false }
local err, resp = await(Horsehead.http.request_async("POST", endpoint, {
headers = { ["Content-Type"] = "application/json" },
body = Horsehead.json.encode(payload),
}))
if err then
error("E_CLOUD_LLM_FAILED: " .. err.message)
end
if not resp or not resp.body then
error("E_CLOUD_LLM_NO_RESPONSE: No valid response from Cloud LLM.")
end
local data = Horsehead.json.decode(resp.body)
if not data or not data.response then
error("E_CLOUD_LLM_BAD_RESPONSE: Missing 'response'.")
end
return { content = data.response }
end
})
-- Thinking LLM for reviewing tasks (simulating extra "pondering")
Horsehead.register.model({
name = "thinking_llm",
provider = "Thinking LLM",
generate = function(model_def, agent_ref, prompt)
-- Simulate an extra delay to "think"
local err_sleep = await(Horsehead.os.sleep_async(600))
if err_sleep then
error("E_THINKING_SLEEP_FAILED: " .. err_sleep.message)
end
-- For demonstration, simply echo approval.
return { content = "REVIEW: After careful thought, the plan looks APPROVED. #END_REVIEW" }
end
})
--------------------------------------------------------------------------------
-- 2. Agents Registration with Different Models
--------------------------------------------------------------------------------
Horsehead.agent.register("architect", {
name = "architect",
default_model = "cloud_llm",
description = "Drafts high-level architectural plans using a cloud-based LLM."
})
Horsehead.agent.register("reviewer", {
name = "reviewer",
default_model = "thinking_llm",
description = "Reviews plans with deliberation using a thinking model."
})
Horsehead.agent.register("coder", {
name = "coder",
default_model = "local_llm_http",
description = "Generates code patches using a local LLM."
})
--------------------------------------------------------------------------------
-- 3. Tools
-- (A) search_codebase_tool using ripgrep ("rg")
--------------------------------------------------------------------------------
local function search_codebase_tool_run(tool_def, session_ref, args_table)
local query = args_table.query
if type(query) ~= "string" or query == "" then
return { status = "error", message = "Query parameter is missing or invalid." }
end
Horsehead.ui.print("[search_codebase_tool] Running ripgrep for query: " .. query)
-- Call ripgrep in JSON mode over the src/ directory.
local err, output = await(Horsehead.os.spawn_async("rg", {"--json", query, "src/"}))
if err then
return { status = "error", message = "E_RG_FAILED: " .. err.message }
end
-- We assume ripgrep outputs valid JSON (or a JSON-like string). In a real scenario, you might need to parse raw text.
local results = output or "[]"
return {
status = "ok",
content = results,
content_type = "application/json"
}
end
-- (B) edit_code_tool: Append a patch to a file.
local function edit_code_tool_run(tool_def, session_ref, args_table)
local file = args_table.file
local patch = args_table.patch
if type(file) ~= "string" or file == "" then
return { status = "error", message = "Missing or invalid file parameter." }
end
if type(patch) ~= "string" or patch == "" then
return { status = "error", message = "Missing or invalid patch parameter." }
end
Horsehead.ui.print("[edit_code_tool] Appending patch to file: " .. file)
local err_read, content = await(Horsehead.fs.read_file_async(file))
if err_read then
return { status = "error", message = "E_READ_FAILED: " .. err_read.message }
end
content = content .. "\n-- PATCH APPLIED:\n" .. patch
local err_write = await(Horsehead.fs.write_file_async(file, content))
if err_write then
return { status = "error", message = "E_WRITE_FAILED: " .. err_write.message }
end
return { status = "ok", content = "Patch applied to " .. file }
end
local function register_tools()
Horsehead.register.tool({
name = "search_codebase_tool",
description = "Uses ripgrep to search the src/ directory for a query.",
parameters = { { name = "query", type = "string", description = "Search query." } },
run = search_codebase_tool_run,
isAvailableFn = function() return true end,
isDangerous = false
})
Horsehead.register.tool({
name = "edit_code_tool",
description = "Appends a patch to a given file.",
parameters = {
{ name = "file", type = "string", description = "Relative file path to edit." },
{ name = "patch", type = "string", description = "The patch text to append." }
},
run = edit_code_tool_run,
isAvailableFn = function() return true end,
isDangerous = true
})
end
--------------------------------------------------------------------------------
-- 4. ReAct Workflow
-- States:
-- init -> ask_architect -> ask_reviewer -> [loop if revision needed] -> ask_coder -> implement -> final
--------------------------------------------------------------------------------
local function register_workflow()
Horsehead.agent.register_workflow("multi_agent_react_tooluse_workflow", {
description = "Multi-agent ReAct workflow that uses different models and ripgrep search.",
states = {
["init"] = {
on_enter = function(context)
Horsehead.ui.print("[Workflow] (init) Please describe the code change you want to implement.")
end,
transitions = {
on_message = function(context, message)
if message.sender == "User" and message.content and message.content ~= "" then
context.user_request = message.content
return "ask_architect"
end
end
}
},
["ask_architect"] = {
on_enter = function(context)
Horsehead.ui.print("[Workflow] (ask_architect) The Architect (cloud_llm) is drafting a plan...")
local prompt_content = table.concat({
"User request: ", context.user_request, "\n\n",
"As the Architect, draft a high-level plan to implement this change. ",
"You may call `search_codebase_tool` if needed. End your response with \"#END_PLAN\"."
})
local err, final_response = await(Horsehead.agent.send_react_prompt_async(
"architect",
{ role = "system", content = prompt_content },
{
tools = { { name = "search_codebase_tool", parameters = { "query" } } },
auto_loop = true
}
))
if err then
Horsehead.ui.print("[ask_architect] Error: " .. err.message)
return
end
context.architect_plan = final_response.content
end,
transitions = { default = "ask_reviewer" }
},
["ask_reviewer"] = {
on_enter = function(context)
Horsehead.ui.print("[Workflow] (ask_reviewer) The Reviewer (thinking_llm) is evaluating the plan...")
local prompt_content = table.concat({
"Architect's plan:\n", context.architect_plan or "(none)", "\n\n",
"As the Reviewer, evaluate the plan. Respond with either \"APPROVED\" or \"NEEDS_REVISION\". ",
"End your response with \"#END_REVIEW\"."
})
local err, final_response = await(Horsehead.agent.send_react_prompt_async(
"reviewer",
{ role = "system", content = prompt_content },
{
tools = { { name = "search_codebase_tool", parameters = { "query" } } },
auto_loop = true
}
))
if err then
Horsehead.ui.print("[ask_reviewer] Error: " .. err.message)
return
end
local review_text = final_response.content or ""
context.reviewer_feedback = review_text
if review_text:upper():find("APPROVED") then
context.review_approved = true
else
context.review_approved = false
end
end,
transitions = {
default = function(context)
if context.review_approved then
return "ask_coder"
else
Horsehead.ui.print("[Workflow] Reviewer requests revision. Looping back to Architect.")
return "ask_architect"
end
end
}
},
["ask_coder"] = {
on_enter = function(context)
Horsehead.ui.print("[Workflow] (ask_coder) The Coder (local_llm) is generating a patch...")
local prompt_content = table.concat({
"User request: ", context.user_request, "\n\n",
"Architect's plan:\n", context.architect_plan or "", "\n\n",
"Reviewer Feedback:\n", context.reviewer_feedback or "", "\n\n",
"As the Coder, produce a JSON object in the format {\"file\": \"<relative path>\", \"patch\": \"<patch text>\"}. ",
"You may call `search_codebase_tool` if necessary. End your response with \"#END_PATCH\"."
})
local err, final_response = await(Horsehead.agent.send_react_prompt_async(
"coder",
{ role = "system", content = prompt_content },
{
tools = { { name = "search_codebase_tool", parameters = { "query" } } },
auto_loop = true
}
))
if err then
Horsehead.ui.print("[ask_coder] Error: " .. err.message)
return
end
context.coder_patch_output = final_response.content
end,
transitions = { default = "implement" }
},
["implement"] = {
on_enter = function(context)
Horsehead.ui.print("[Workflow] (implement) Applying the patch...")
local ok, parsed = pcall(Horsehead.json.decode, context.coder_patch_output or "")
if not ok or type(parsed) ~= "table" or not parsed.file or not parsed.patch then
Horsehead.ui.print("[Workflow] Coder patch output invalid: " .. tostring(context.coder_patch_output))
return
end
local err_tool, result_tool = await(Horsehead.tool.run(
"edit_code_tool",
{ file = parsed.file, patch = parsed.patch },
Horsehead.session.get_current()
))
if err_tool then
Horsehead.ui.print("[implement] edit_code_tool error: " .. err_tool.message)
elseif result_tool and result_tool.status == "ok" then
Horsehead.ui.print("[implement] " .. result_tool.content)
else
Horsehead.ui.print("[implement] edit_code_tool failed.")
end
end,
transitions = { default = "final" }
},
["final"] = {
on_enter = function(context)
Horsehead.ui.print("[Workflow] (final) Workflow complete. Code changes have been applied.")
end
}
}
})
end
--------------------------------------------------------------------------------
-- 5. Command Registration: Start Workflow
--------------------------------------------------------------------------------
local function register_commands()
Horsehead.register.command("react_tool_task", function(args_table)
spawn(function()
Horsehead.ui.print("[ReAct Multi-Model] Starting workflow for code change request.")
local context = {}
local err = await(Horsehead.agent.start_workflow_async(
"architect",
"multi_agent_react_tooluse_workflow",
context
))
if err then
Horsehead.ui.print("[ReAct Multi-Model] Workflow failed to start: " .. err.message)
else
Horsehead.ui.print("[ReAct Multi-Model] Workflow started. Please type your code change request.")
end
end)
end, {
description = "Starts the multi-agent ReAct workflow using different models and ripgrep search."
})
end
--------------------------------------------------------------------------------
-- 6. Startup: Register Everything
--------------------------------------------------------------------------------
Horsehead.events.on("startup", function()
log("info", "[react_tool_use_multi_model_example] Plugin starting up...")
register_tools()
register_workflow()
register_commands()
log("info", "[react_tool_use_multi_model_example] Startup complete. Agents, tools, and workflow registered.")
end)
--------------------------------------------------------------------------------
-- 7. Standard Message & Job-Complete Handlers
--------------------------------------------------------------------------------
Horsehead.events.on("message", function(session_ref, message)
return true
end)
Horsehead.events.on("job_complete", function(job_info)
if job_info.type == "tool_invocation" and job_info.metadata then
log("info", "[react_tool_use_multi_model_example] Tool finished: " ..
tostring(job_info.metadata.tool_name) .. " status=" .. job_info.status)
elseif job_info.type == "model_generation" then
log("info", "[react_tool_use_multi_model_example] Model job finished. ID=" ..
job_info.id .. " status=" .. job_info.status)
end
return true
end)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment