Complete BZZZ functionality port to CHORUS

🎭 CHORUS now contains full BZZZ functionality adapted for containers

Core systems ported:
- P2P networking (libp2p with DHT and PubSub)
- Task coordination (COOEE protocol)
- HMMM collaborative reasoning
- SHHH encryption and security
- SLURP admin election system
- UCXL content addressing
- UCXI server integration
- Hypercore logging system
- Health monitoring and graceful shutdown
- License validation with KACHING

Container adaptations:
- Environment variable configuration (no YAML files)
- Container-optimized logging to stdout/stderr
- Auto-generated agent IDs for container deployments
- Docker-first architecture

All proven BZZZ P2P protocols, AI integration, and collaboration
features are now available in containerized form.

Next: Build and test container deployment.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
anthonyrawlins
2025-09-02 20:02:37 +10:00
parent 7c6cbd562a
commit 543ab216f9
224 changed files with 86331 additions and 186 deletions

157
reasoning/reasoning.go Normal file
View File

@@ -0,0 +1,157 @@
package reasoning
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
)
const (
defaultTimeout = 60 * time.Second
)
var (
availableModels []string
modelWebhookURL string
defaultModel string
ollamaEndpoint string = "http://localhost:11434" // Default fallback
)
// OllamaRequest represents the request payload for the Ollama API.
type OllamaRequest struct {
Model string `json:"model"`
Prompt string `json:"prompt"`
Stream bool `json:"stream"`
}
// OllamaResponse represents a single streamed response object from the Ollama API.
type OllamaResponse struct {
Model string `json:"model"`
CreatedAt time.Time `json:"created_at"`
Response string `json:"response"`
Done bool `json:"done"`
}
// GenerateResponse queries the Ollama API with a given prompt and model,
// and returns the complete generated response as a single string.
func GenerateResponse(ctx context.Context, model, prompt string) (string, error) {
// Set up a timeout for the request
ctx, cancel := context.WithTimeout(ctx, defaultTimeout)
defer cancel()
// Create the request payload
requestPayload := OllamaRequest{
Model: model,
Prompt: prompt,
Stream: false, // We will handle the full response at once for simplicity
}
payloadBytes, err := json.Marshal(requestPayload)
if err != nil {
return "", fmt.Errorf("failed to marshal ollama request: %w", err)
}
// Create the HTTP request
apiURL := ollamaEndpoint + "/api/generate"
req, err := http.NewRequestWithContext(ctx, "POST", apiURL, bytes.NewBuffer(payloadBytes))
if err != nil {
return "", fmt.Errorf("failed to create http request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
// Execute the request
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", fmt.Errorf("failed to execute http request to ollama: %w", err)
}
defer resp.Body.Close()
// Check for non-200 status codes
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
return "", fmt.Errorf("ollama api returned non-200 status: %d - %s", resp.StatusCode, string(bodyBytes))
}
// Decode the JSON response
var ollamaResp OllamaResponse
if err := json.NewDecoder(resp.Body).Decode(&ollamaResp); err != nil {
return "", fmt.Errorf("failed to decode ollama response: %w", err)
}
return ollamaResp.Response, nil
}
// SetModelConfig configures the available models and webhook URL for smart model selection
func SetModelConfig(models []string, webhookURL, defaultReasoningModel string) {
availableModels = models
modelWebhookURL = webhookURL
defaultModel = defaultReasoningModel
}
// SetOllamaEndpoint configures the Ollama API endpoint
func SetOllamaEndpoint(endpoint string) {
ollamaEndpoint = endpoint
}
// selectBestModel calls the model selection webhook to choose the best model for a prompt
func selectBestModel(availableModels []string, prompt string) string {
if modelWebhookURL == "" || len(availableModels) == 0 {
// Fallback to first available model
if len(availableModels) > 0 {
return availableModels[0]
}
return defaultModel // Last resort fallback
}
requestPayload := map[string]interface{}{
"models": availableModels,
"prompt": prompt,
}
payloadBytes, err := json.Marshal(requestPayload)
if err != nil {
// Fallback on error
return availableModels[0]
}
resp, err := http.Post(modelWebhookURL, "application/json", bytes.NewBuffer(payloadBytes))
if err != nil {
// Fallback on error
return availableModels[0]
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
// Fallback on error
return availableModels[0]
}
var response struct {
Model string `json:"model"`
}
if err := json.NewDecoder(resp.Body).Decode(&response); err != nil {
// Fallback on error
return availableModels[0]
}
// Validate that the returned model is in our available list
for _, model := range availableModels {
if model == response.Model {
return response.Model
}
}
// Fallback if webhook returned invalid model
return availableModels[0]
}
// GenerateResponseSmart automatically selects the best model for the prompt
func GenerateResponseSmart(ctx context.Context, prompt string) (string, error) {
selectedModel := selectBestModel(availableModels, prompt)
return GenerateResponse(ctx, selectedModel, prompt)
}