Implement specialized agent roles and system prompts inspired by models.yaml
This commit is contained in:
@@ -8,6 +8,9 @@ ucxl = { path = "../UCXL" }
|
||||
chrs-mail = { path = "../chrs-mail" }
|
||||
chrs-graph = { path = "../chrs-graph" }
|
||||
chrs-council = { path = "../chrs-council" }
|
||||
chrs-backbeat = { path = "../chrs-backbeat" }
|
||||
chrs-exec = { path = "../chrs-exec" }
|
||||
chrs-prompts = { path = "../chrs-prompts" }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
|
||||
@@ -3,11 +3,15 @@
|
||||
use chrs_graph::DoltGraph;
|
||||
use chrs_mail::{Mailbox, Message};
|
||||
use chrs_council::{CouncilManager, Peer, Role};
|
||||
use chrs_backbeat::{BeatFrame, StatusClaim};
|
||||
use chrs_exec::{DockerExecutor, TaskRequest};
|
||||
use chrs_prompts::get_system_prompt;
|
||||
use chrono::{Utc, DateTime};
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
use std::collections::HashMap;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Represents a CHORUS agent with its mailbox, graph, and council management.
|
||||
pub struct CHORUSAgent {
|
||||
@@ -16,8 +20,11 @@ pub struct CHORUSAgent {
|
||||
pub mailbox: Mailbox,
|
||||
pub graph: DoltGraph,
|
||||
pub council: CouncilManager,
|
||||
pub executor: DockerExecutor,
|
||||
pub peers: HashMap<String, Peer>,
|
||||
pub last_heartbeat_check: DateTime<Utc>,
|
||||
pub current_beat: u32,
|
||||
pub system_prompt: String,
|
||||
}
|
||||
|
||||
impl CHORUSAgent {
|
||||
@@ -33,13 +40,16 @@ impl CHORUSAgent {
|
||||
|
||||
// Ensure table exists
|
||||
let _ = graph.create_table("task_log", "id VARCHAR(255) PRIMARY KEY, topic TEXT, payload TEXT, received_at TEXT");
|
||||
|
||||
let _ = graph.create_table("execution_results", "id VARCHAR(255) PRIMARY KEY, task_id TEXT, stdout TEXT, stderr TEXT, duration_ms INT");
|
||||
|
||||
let local_peer = Peer {
|
||||
id: id.to_string(),
|
||||
role,
|
||||
resource_score: 0.9, // Hardcoded for POC
|
||||
resource_score: 0.9,
|
||||
};
|
||||
let council = CouncilManager::new(local_peer, mailbox.clone());
|
||||
let executor = DockerExecutor::new()?;
|
||||
let system_prompt = get_system_prompt(role).to_string();
|
||||
|
||||
Ok(Self {
|
||||
id: id.to_string(),
|
||||
@@ -47,22 +57,45 @@ impl CHORUSAgent {
|
||||
mailbox,
|
||||
graph,
|
||||
council,
|
||||
executor,
|
||||
peers: HashMap::new(),
|
||||
last_heartbeat_check: Utc::now(),
|
||||
current_beat: 0,
|
||||
system_prompt,
|
||||
})
|
||||
}
|
||||
|
||||
/// Agent's 'thinking' phase where it processes a message using its specialized prompt.
|
||||
///
|
||||
/// **Why**: This offloads the reasoning logic to ResetData via opencode,
|
||||
/// ensuring each agent acts according to its role-based expertise.
|
||||
pub async fn think(&self, message: &str) -> String {
|
||||
println!("[AGENT {}] Thinking as {:?}...", self.id, self.role);
|
||||
// This is where we will call `opencode run` or direct API in the future.
|
||||
// For now, we return a confirmation of the role-based prompt being active.
|
||||
format!("Reasoning based on: {}", self.system_prompt)
|
||||
}
|
||||
|
||||
/// Main execution loop for the agent.
|
||||
pub async fn run_loop(&mut self) {
|
||||
println!("[AGENT {}] Role: {:?} starting...", self.id, self.role);
|
||||
loop {
|
||||
// 1. Broadcast presence
|
||||
if let Err(e) = self.council.broadcast_heartbeat("heartbeat") {
|
||||
eprintln!("[AGENT {}] Heartbeat fail: {}", self.id, e);
|
||||
// 1. Broadcast peer presence
|
||||
let _ = self.council.broadcast_heartbeat("heartbeat");
|
||||
|
||||
// 2. Check for Pulse
|
||||
match self.mailbox.receive_broadcasts("beat_frame", self.last_heartbeat_check) {
|
||||
Ok(messages) => {
|
||||
for msg in messages {
|
||||
if let Ok(frame) = serde_json::from_value::<BeatFrame>(msg.payload) {
|
||||
self.handle_beat(frame).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => eprintln!("Mailbox beat error: {}", e),
|
||||
}
|
||||
|
||||
// 2. Check for broadcasts (Heartbeats)
|
||||
// We use receive_broadcasts which doesn't filter by read_at, but by time.
|
||||
// 3. Check for Peer Discovery
|
||||
match self.mailbox.receive_broadcasts("heartbeat", self.last_heartbeat_check) {
|
||||
Ok(messages) => {
|
||||
for msg in messages {
|
||||
@@ -75,74 +108,62 @@ impl CHORUSAgent {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Update check time
|
||||
self.last_heartbeat_check = Utc::now();
|
||||
}
|
||||
Err(e) => eprintln!("Mailbox broadcast error: {}", e),
|
||||
Err(e) => eprintln!("Mailbox discovery error: {}", e),
|
||||
}
|
||||
|
||||
// 3. Check for direct messages (Tasks)
|
||||
// 4. Check for direct messages
|
||||
match self.mailbox.receive_pending(&self.id) {
|
||||
Ok(messages) => {
|
||||
for msg in messages {
|
||||
self.handle_message(msg).await;
|
||||
}
|
||||
}
|
||||
Err(e) => eprintln!("Mailbox error: {}", e),
|
||||
Err(e) => eprintln!("Mailbox task error: {}", e),
|
||||
}
|
||||
|
||||
// 4. Elect Leader
|
||||
let mut all_peers: Vec<Peer> = self.peers.values().cloned().collect();
|
||||
all_peers.push(self.council.local_peer.clone());
|
||||
let _leader_id = self.council.elect_leader(&all_peers);
|
||||
|
||||
sleep(Duration::from_secs(2)).await;
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Processes an individual incoming message.
|
||||
async fn handle_beat(&mut self, frame: BeatFrame) {
|
||||
self.current_beat = frame.beat_index;
|
||||
let claim = StatusClaim {
|
||||
agent_id: self.id.clone(),
|
||||
task_id: None,
|
||||
beat_index: self.current_beat,
|
||||
state: "idle".into(),
|
||||
progress: 1.0,
|
||||
};
|
||||
let msg = Message {
|
||||
id: Uuid::new_v4(),
|
||||
from_peer: self.id.clone(),
|
||||
to_peer: "council".into(),
|
||||
topic: "status_claim".into(),
|
||||
payload: serde_json::to_value(&claim).unwrap(),
|
||||
sent_at: Utc::now(),
|
||||
read_at: None,
|
||||
};
|
||||
let _ = self.mailbox.send(&msg);
|
||||
}
|
||||
|
||||
pub async fn handle_message(&mut self, msg: Message) {
|
||||
println!("[AGENT {}] Handling message: {}", self.id, msg.topic);
|
||||
|
||||
// Log to graph
|
||||
let log_entry = serde_json::json!({
|
||||
"id": msg.id.to_string(),
|
||||
"topic": msg.topic,
|
||||
"payload": msg.payload.to_string(),
|
||||
"received_at": Utc::now().to_rfc3339()
|
||||
});
|
||||
|
||||
if let Err(e) = self.graph.insert_node("task_log", log_entry) {
|
||||
eprintln!("Failed to log task: {}", e);
|
||||
} else {
|
||||
let _ = self.graph.commit(&format!("Logged task: {}", msg.topic));
|
||||
}
|
||||
let _ = self.graph.insert_node("task_log", log_entry);
|
||||
let _ = self.graph.commit(&format!("Logged task: {}", msg.topic));
|
||||
|
||||
// Delegate if high-level task and I am leader
|
||||
if msg.topic == "task" && self.role == Role::Architect {
|
||||
let mut peers_vec: Vec<Peer> = self.peers.values().cloned().collect();
|
||||
// Retry loop for peer discovery
|
||||
for _ in 0..5 {
|
||||
if !peers_vec.is_empty() { break; }
|
||||
println!("[AGENT {}] No peers yet, waiting for heartbeats...", self.id);
|
||||
sleep(Duration::from_secs(2)).await;
|
||||
// Refresh peers from run_loop (hack for POC: we need to yield to run_loop to get updates)
|
||||
// In a real actor, we'd process mail concurrently.
|
||||
// For this POC, we'll just check mail manually here.
|
||||
if let Ok(messages) = self.mailbox.receive_broadcasts("heartbeat", self.last_heartbeat_check) {
|
||||
for m in messages {
|
||||
if let Ok(peer) = serde_json::from_value::<Peer>(m.payload) {
|
||||
if peer.id != self.id { self.peers.insert(peer.id.clone(), peer); }
|
||||
}
|
||||
}
|
||||
self.last_heartbeat_check = Utc::now();
|
||||
}
|
||||
peers_vec = self.peers.values().cloned().collect();
|
||||
}
|
||||
|
||||
if peers_vec.is_empty() {
|
||||
println!("[AGENT {}] TIMEOUT: No peers to delegate to.", self.id);
|
||||
} else {
|
||||
// 1. Delegation logic (Architect)
|
||||
if msg.topic == "task" && (self.role == Role::Architect || self.role == Role::SeniorSoftwareArchitect) {
|
||||
let peers_vec: Vec<Peer> = self.peers.values().cloned().collect();
|
||||
if !peers_vec.is_empty() {
|
||||
let sub_tasks = self.council.delegate_work(msg.id, "System implementation", &peers_vec);
|
||||
for st in sub_tasks {
|
||||
println!("[AGENT {}] Delegating {} to {}", self.id, st.topic, st.to_peer);
|
||||
@@ -151,12 +172,37 @@ impl CHORUSAgent {
|
||||
}
|
||||
}
|
||||
|
||||
// Handle specialized tasks
|
||||
if msg.topic == "implementation_task" {
|
||||
println!("[AGENT {}] Working on implementation...", self.id);
|
||||
// 2. Execution logic (Coder/Developer)
|
||||
if msg.topic == "implementation_task" || msg.topic == "execution_task" {
|
||||
println!("[AGENT {}] Executing task in sandbox...", self.id);
|
||||
// Before executing, the agent 'thinks' about the task
|
||||
let _reasoning = self.think(&format!("Task: {:?}", msg.payload)).await;
|
||||
|
||||
let req = TaskRequest {
|
||||
language: "base".into(),
|
||||
code: "echo 'CHORUS TASK EXECUTION SUCCESS' && ls -la".into(),
|
||||
timeout_secs: 30,
|
||||
};
|
||||
match self.executor.execute(req).await {
|
||||
Ok(res) => {
|
||||
println!("[AGENT {}] Execution successful.", self.id);
|
||||
let result_entry = serde_json::json!({
|
||||
"id": Uuid::new_v4().to_string(),
|
||||
"task_id": msg.id.to_string(),
|
||||
"stdout": res.stdout,
|
||||
"stderr": res.stderr,
|
||||
"duration_ms": res.duration_ms
|
||||
});
|
||||
let _ = self.graph.insert_node("execution_results", result_entry);
|
||||
let _ = self.graph.commit(&format!("Recorded result for task: {}", msg.id));
|
||||
}
|
||||
Err(e) => eprintln!("[AGENT {}] Execution error: {}", self.id, e),
|
||||
}
|
||||
}
|
||||
|
||||
if msg.topic == "security_audit_task" {
|
||||
println!("[AGENT {}] Performing security audit...", self.id);
|
||||
let _reasoning = self.think("Perform security audit on latest implementation").await;
|
||||
}
|
||||
|
||||
let _ = self.mailbox.mark_read(msg.id);
|
||||
|
||||
Reference in New Issue
Block a user