Implement chrs-council: Governance layer with weighted leader election and task delegation

This commit is contained in:
anthonyrawlins
2026-03-04 02:55:47 +11:00
parent 0f28e4b669
commit ffe37a4292
9 changed files with 475 additions and 247 deletions

View File

@@ -7,9 +7,17 @@ edition = "2021"
ucxl = { path = "../UCXL" }
chrs-mail = { path = "../chrs-mail" }
chrs-graph = { path = "../chrs-graph" }
chrs-council = { path = "../chrs-council" }
tokio = { version = "1", features = ["full"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
thiserror = "1"
uuid = { version = "1", features = ["v4"] }
chrono = { version = "0.4", features = ["serde"] }
[lib]
path = "src/lib.rs"
[[bin]]
name = "chrs-agent"
path = "src/bin/main.rs"

View File

@@ -0,0 +1,15 @@
use chrs_agent::CHORUSAgent;
use chrs_council::Role;
use std::path::Path;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let agent_id = "agent-architect";
let role = Role::Architect;
let base_path = Path::new("/home/tony/rust/projects/reset/CHORUS/data/architect");
let mut agent = CHORUSAgent::init(agent_id, role, base_path).await?;
agent.run_loop().await;
Ok(())
}

164
chrs-agent/src/lib.rs Normal file
View File

@@ -0,0 +1,164 @@
//! chrs-agent: The main coordinator for CHORUS agents.
use chrs_graph::DoltGraph;
use chrs_mail::{Mailbox, Message};
use chrs_council::{CouncilManager, Peer, Role};
use chrono::{Utc, DateTime};
use std::path::Path;
use std::time::Duration;
use tokio::time::sleep;
use std::collections::HashMap;
/// Represents a CHORUS agent with its mailbox, graph, and council management.
pub struct CHORUSAgent {
pub id: String,
pub role: Role,
pub mailbox: Mailbox,
pub graph: DoltGraph,
pub council: CouncilManager,
pub peers: HashMap<String, Peer>,
pub last_heartbeat_check: DateTime<Utc>,
}
impl CHORUSAgent {
/// Initialize a new CHORUSAgent with its own identity and storage paths.
pub async fn init(id: &str, role: Role, base_path: &Path) -> Result<Self, Box<dyn std::error::Error>> {
let mail_path = base_path.join("mail.sqlite");
let graph_path = base_path.join("state_graph");
std::fs::create_dir_all(&graph_path)?;
let mailbox = Mailbox::open(mail_path)?;
let graph = DoltGraph::init(&graph_path)?;
// Ensure table exists
let _ = graph.create_table("task_log", "id VARCHAR(255) PRIMARY KEY, topic TEXT, payload TEXT, received_at TEXT");
let local_peer = Peer {
id: id.to_string(),
role,
resource_score: 0.9, // Hardcoded for POC
};
let council = CouncilManager::new(local_peer, mailbox.clone());
Ok(Self {
id: id.to_string(),
role,
mailbox,
graph,
council,
peers: HashMap::new(),
last_heartbeat_check: Utc::now(),
})
}
/// Main execution loop for the agent.
pub async fn run_loop(&mut self) {
println!("[AGENT {}] Role: {:?} starting...", self.id, self.role);
loop {
// 1. Broadcast presence
if let Err(e) = self.council.broadcast_heartbeat("heartbeat") {
eprintln!("[AGENT {}] Heartbeat fail: {}", self.id, e);
}
// 2. Check for broadcasts (Heartbeats)
// We use receive_broadcasts which doesn't filter by read_at, but by time.
match self.mailbox.receive_broadcasts("heartbeat", self.last_heartbeat_check) {
Ok(messages) => {
for msg in messages {
if let Ok(peer) = serde_json::from_value::<Peer>(msg.payload) {
if peer.id != self.id {
if !self.peers.contains_key(&peer.id) {
println!("[AGENT {}] Discovered peer: {} ({:?})", self.id, peer.id, peer.role);
}
self.peers.insert(peer.id.clone(), peer);
}
}
}
// Update check time
self.last_heartbeat_check = Utc::now();
}
Err(e) => eprintln!("Mailbox broadcast error: {}", e),
}
// 3. Check for direct messages (Tasks)
match self.mailbox.receive_pending(&self.id) {
Ok(messages) => {
for msg in messages {
self.handle_message(msg).await;
}
}
Err(e) => eprintln!("Mailbox error: {}", e),
}
// 4. Elect Leader
let mut all_peers: Vec<Peer> = self.peers.values().cloned().collect();
all_peers.push(self.council.local_peer.clone());
let _leader_id = self.council.elect_leader(&all_peers);
sleep(Duration::from_secs(2)).await;
}
}
/// Processes an individual incoming message.
pub async fn handle_message(&mut self, msg: Message) {
println!("[AGENT {}] Handling message: {}", self.id, msg.topic);
// Log to graph
let log_entry = serde_json::json!({
"id": msg.id.to_string(),
"topic": msg.topic,
"payload": msg.payload.to_string(),
"received_at": Utc::now().to_rfc3339()
});
if let Err(e) = self.graph.insert_node("task_log", log_entry) {
eprintln!("Failed to log task: {}", e);
} else {
let _ = self.graph.commit(&format!("Logged task: {}", msg.topic));
}
// Delegate if high-level task and I am leader
if msg.topic == "task" && self.role == Role::Architect {
let mut peers_vec: Vec<Peer> = self.peers.values().cloned().collect();
// Retry loop for peer discovery
for _ in 0..5 {
if !peers_vec.is_empty() { break; }
println!("[AGENT {}] No peers yet, waiting for heartbeats...", self.id);
sleep(Duration::from_secs(2)).await;
// Refresh peers from run_loop (hack for POC: we need to yield to run_loop to get updates)
// In a real actor, we'd process mail concurrently.
// For this POC, we'll just check mail manually here.
if let Ok(messages) = self.mailbox.receive_broadcasts("heartbeat", self.last_heartbeat_check) {
for m in messages {
if let Ok(peer) = serde_json::from_value::<Peer>(m.payload) {
if peer.id != self.id { self.peers.insert(peer.id.clone(), peer); }
}
}
self.last_heartbeat_check = Utc::now();
}
peers_vec = self.peers.values().cloned().collect();
}
if peers_vec.is_empty() {
println!("[AGENT {}] TIMEOUT: No peers to delegate to.", self.id);
} else {
let sub_tasks = self.council.delegate_work(msg.id, "System implementation", &peers_vec);
for st in sub_tasks {
println!("[AGENT {}] Delegating {} to {}", self.id, st.topic, st.to_peer);
let _ = self.mailbox.send(&st);
}
}
}
// Handle specialized tasks
if msg.topic == "implementation_task" {
println!("[AGENT {}] Working on implementation...", self.id);
}
if msg.topic == "security_audit_task" {
println!("[AGENT {}] Performing security audit...", self.id);
}
let _ = self.mailbox.mark_read(msg.id);
}
}

View File

@@ -1,115 +0,0 @@
/// chrs-agent crate implements the core CHORUS agent runtime.
///
/// An agent runs a message loop that receives tasks from a `Mailbox`, logs them to a
/// `DoltGraph` (the persistent state graph), and marks them as read. The design
/// follows the CHORUS architectural pattern where agents are autonomous workers
/// that interact through the `chrs_mail` messaging layer and maintain a provable
/// execution history in the graph.
use chrs_graph::DoltGraph;
use chrs_mail::{Mailbox, Message};
use chrono::Utc;
use std::path::Path;
use std::time::Duration;
use tokio::time::sleep;
use uuid::Uuid;
/// Represents a running CHORUS agent.
///
/// # Fields
/// * `id` Logical identifier for the agent (e.g., "agent-001").
/// * `mailbox` The `Mailbox` used for interagent communication.
/// * `graph` Persistence layer (`DoltGraph`) where task logs are stored.
///
/// # Rationale
/// Agents are isolated units of work. By keeping a dedicated mailbox and a graph
/// per agent we guarantee that each agent can be started, stopped, and reasoned
/// about independently while still contributing to the global CHORUS state.
pub struct CHORUSAgent {
id: String,
mailbox: Mailbox,
graph: DoltGraph,
}
impl CHORUSAgent {
/// Initializes a new `CHORUSAgent`.
///
/// This creates the filesystem layout under `base_path`, opens or creates the
/// SQLite mailbox, and initialises a `DoltGraph` for state persistence.
/// It also ensures that a `task_log` table exists for recording incoming
/// messages.
///
/// # Parameters
/// * `id` Identifier for the agent instance.
/// * `base_path` Directory where the agent stores its data.
///
/// Returns an instance ready to run its event loop.
async fn init(id: &str, base_path: &Path) -> Result<Self, Box<dyn std::error::Error>> {
let mail_path = base_path.join("mail.sqlite");
let graph_path = base_path.join("state_graph");
std::fs::create_dir_all(&graph_path)?;
let mailbox = Mailbox::open(mail_path)?;
let graph = DoltGraph::init(&graph_path)?;
// Ensure table exists
let _ = graph.create_table("task_log", "id TEXT PRIMARY KEY, topic TEXT, payload TEXT, received_at TEXT");
Ok(Self {
id: id.to_string(),
mailbox,
graph,
})
}
/// Main event loop of the agent.
///
/// It repeatedly polls the mailbox for pending messages addressed to this
/// agent, logs each message into the `task_log` table, commits the graph, and
/// acknowledges the message. The loop sleeps for a configurable interval to
/// avoid busywaiting.
async fn run_loop(&self) {
println!("Agent {} starting run loop...", self.id);
loop {
match self.mailbox.receive_pending(&self.id) {
Ok(messages) => {
for msg in messages {
println!("Received message: {:?}", msg.topic);
let log_entry = serde_json::json!({
"id": msg.id.to_string(),
"topic": msg.topic,
"payload": msg.payload.to_string(),
"received_at": Utc::now().to_rfc3339()
});
if let Err(e) = self.graph.insert_node("task_log", log_entry) {
eprintln!("Failed to log task to graph: {}", e);
} else {
let _ = self.graph.commit(&format!("Logged task: {}", msg.id));
let _ = self.mailbox.mark_read(msg.id);
}
}
}
Err(e) => eprintln!("Mailbox error: {}", e),
}
sleep(Duration::from_secs(5)).await;
}
}
}
/// Entry point for the CHORUS agent binary.
///
/// It creates a data directory under `/home/Tony/rust/projects/reset/CHORUS/data`
/// (note the capitalised `Tony` matches the original path), initialises the
/// `CHORUSAgent`, and starts its run loop.
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let agent_id = "agent-001";
let base_path = Path::new("/home/Tony/rust/projects/reset/CHORUS/data/agent-001");
std::fs::create_dir_all(base_path)?;
let agent = CHORUSAgent::init(agent_id, base_path).await?;
agent.run_loop().await;
Ok(())
}