feat: Integrate Ollama reasoning for initial task planning

This commit introduces the new 'reasoning' module, which allows the bzzz-agent to connect to a local Ollama instance.

It also modifies the 'github' integration module to use this new capability. When a task is claimed, the agent now generates a step-by-step execution plan using an Ollama model and publishes this plan to the Antennae meta-discussion channel for peer review before proceeding.

This completes Phase 1 of the Ollama integration plan.
This commit is contained in:
anthonyrawlins
2025-07-12 20:31:38 +10:00
parent 88d54c8408
commit c490ad2390
2 changed files with 133 additions and 12 deletions

View File

@@ -6,6 +6,7 @@ import (
"time" "time"
"github.com/deepblackcloud/bzzz/pubsub" "github.com/deepblackcloud/bzzz/pubsub"
"github.com/deepblackcloud/bzzz/reasoning" // Import the new reasoning module
) )
// Integration handles the integration between GitHub tasks and Bzzz P2P coordination // Integration handles the integration between GitHub tasks and Bzzz P2P coordination
@@ -150,33 +151,74 @@ func (i *Integration) announceTaskClaim(task *Task) error {
return i.pubsub.PublishBzzzMessage(pubsub.TaskClaim, data) return i.pubsub.PublishBzzzMessage(pubsub.TaskClaim, data)
} }
// executeTask simulates task execution // executeTask is where the agent performs the work for a claimed task.
// It now includes a reasoning step to form a plan before execution.
func (i *Integration) executeTask(task *Task) { func (i *Integration) executeTask(task *Task) {
fmt.Printf("🚀 Starting execution of task #%d: %s\n", task.Number, task.Title) fmt.Printf("🚀 Starting execution of task #%d: %s\n", task.Number, task.Title)
// Announce task progress // Announce that the task has started
progressData := map[string]interface{}{ progressData := map[string]interface{}{
"task_id": task.Number, "task_id": task.Number,
"agent_id": i.config.AgentID, "agent_id": i.config.AgentID,
"status": "started", "status": "started",
"timestamp": time.Now().Unix(), "timestamp": time.Now().Unix(),
} }
if err := i.pubsub.PublishBzzzMessage(pubsub.TaskProgress, progressData); err != nil { if err := i.pubsub.PublishBzzzMessage(pubsub.TaskProgress, progressData); err != nil {
fmt.Printf("⚠️ Failed to announce task progress: %v\n", err) fmt.Printf("⚠️ Failed to announce task start: %v\n", err)
} }
// Simulate work (in a real implementation, this would be actual task execution) // === REASONING STEP ===
workDuration := time.Duration(30+task.Priority*10) * time.Second // Use Ollama to generate a plan based on the task's title and body.
fmt.Printf("⏳ Working on task for %v...\n", workDuration) fmt.Printf("🧠 Reasoning about task #%d to form a plan...\n", task.Number)
time.Sleep(workDuration)
// Complete the task // Construct the prompt for the reasoning model
prompt := fmt.Sprintf("You are an expert AI developer. Based on the following GitHub issue, create a concise, step-by-step plan to resolve it. Issue Title: %s. Issue Body: %s.", task.Title, task.Body)
// Select a model (this could be made more dynamic later)
model := "phi3"
// Generate the plan using the reasoning module
plan, err := reasoning.GenerateResponse(i.ctx, model, prompt)
if err != nil {
fmt.Printf("❌ Failed to generate execution plan for task #%d: %v\n", task.Number, err)
// Announce failure and return
// (Error handling logic would be more robust in a real implementation)
return
}
fmt.Printf("📝 Generated Plan for task #%d:\n%s\n", task.Number, plan)
// === META-DISCUSSION STEP ===
// Announce the plan on the Antennae channel for peer review.
metaMsg := map[string]interface{}{
"issue_id": task.Number,
"node_id": i.config.AgentID,
"message": "Here is my proposed plan of action. Please review and provide feedback within the next 60 seconds.",
"plan": plan,
"msg_id": fmt.Sprintf("plan-%d-%d", task.Number, time.Now().UnixNano()),
"parent_id": nil,
"hop_count": 1,
"timestamp": time.Now().Unix(),
}
if err := i.pubsub.PublishAntennaeMessage(pubsub.MetaDiscussion, metaMsg); err != nil {
fmt.Printf("⚠️ Failed to publish plan to meta-discussion channel: %v\n", err)
}
// Wait for a short "objection period"
fmt.Println("⏳ Waiting 60 seconds for peer feedback on the plan...")
time.Sleep(60 * time.Second)
// In a full implementation, this would listen for responses on the meta-discussion topic.
// For now, we assume the plan is good and proceed.
fmt.Printf("✅ Plan for task #%d approved. Proceeding with execution.\n", task.Number)
// Complete the task on GitHub
results := map[string]interface{}{ results := map[string]interface{}{
"status": "completed", "status": "completed",
"execution_time": workDuration.String(), "execution_plan": plan,
"agent_id": i.config.AgentID, "agent_id": i.config.AgentID,
"deliverables": []string{"Implementation completed", "Tests passed", "Documentation updated"}, "deliverables": []string{"Plan generated and approved", "Execution logic would run here"},
} }
if err := i.client.CompleteTask(task.Number, i.config.AgentID, results); err != nil { if err := i.client.CompleteTask(task.Number, i.config.AgentID, results); err != nil {

79
reasoning/reasoning.go Normal file
View File

@@ -0,0 +1,79 @@
package reasoning
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
)
const (
ollamaAPIURL = "http://localhost:11434/api/generate"
defaultTimeout = 60 * time.Second
)
// OllamaRequest represents the request payload for the Ollama API.
type OllamaRequest struct {
Model string `json:"model"`
Prompt string `json:"prompt"`
Stream bool `json:"stream"`
}
// OllamaResponse represents a single streamed response object from the Ollama API.
type OllamaResponse struct {
Model string `json:"model"`
CreatedAt time.Time `json:"created_at"`
Response string `json:"response"`
Done bool `json:"done"`
}
// GenerateResponse queries the Ollama API with a given prompt and model,
// and returns the complete generated response as a single string.
func GenerateResponse(ctx context.Context, model, prompt string) (string, error) {
// Set up a timeout for the request
ctx, cancel := context.WithTimeout(ctx, defaultTimeout)
defer cancel()
// Create the request payload
requestPayload := OllamaRequest{
Model: model,
Prompt: prompt,
Stream: false, // We will handle the full response at once for simplicity
}
payloadBytes, err := json.Marshal(requestPayload)
if err != nil {
return "", fmt.Errorf("failed to marshal ollama request: %w", err)
}
// Create the HTTP request
req, err := http.NewRequestWithContext(ctx, "POST", ollamaAPIURL, bytes.NewBuffer(payloadBytes))
if err != nil {
return "", fmt.Errorf("failed to create http request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
// Execute the request
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", fmt.Errorf("failed to execute http request to ollama: %w", err)
}
defer resp.Body.Close()
// Check for non-200 status codes
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
return "", fmt.Errorf("ollama api returned non-200 status: %d - %s", resp.StatusCode, string(bodyBytes))
}
// Decode the JSON response
var ollamaResp OllamaResponse
if err := json.NewDecoder(resp.Body).Decode(&ollamaResp); err != nil {
return "", fmt.Errorf("failed to decode ollama response: %w", err)
}
return ollamaResp.Response, nil
}