Initial commit: Complete Hive distributed AI orchestration platform

This comprehensive implementation includes:
- FastAPI backend with MCP server integration
- React/TypeScript frontend with Vite
- PostgreSQL database with Redis caching
- Grafana/Prometheus monitoring stack
- Docker Compose orchestration
- Full MCP protocol support for Claude Code integration

Features:
- Agent discovery and management across network
- Visual workflow editor and execution engine
- Real-time task coordination and monitoring
- Multi-model support with specialized agents
- Distributed development task allocation

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
anthonyrawlins
2025-07-07 21:44:31 +10:00
commit d7ad321176
2631 changed files with 870175 additions and 0 deletions

342
scripts/auto_discover_agents.py Executable file
View File

@@ -0,0 +1,342 @@
#!/usr/bin/env python3
"""
Auto-Discovery Agent Registration Script for Hive
Automatically discovers Ollama endpoints on the subnet and registers them as agents
"""
import asyncio
import aiohttp
import json
import socket
import subprocess
import sys
from concurrent.futures import ThreadPoolExecutor
from typing import Dict, List, Optional, Tuple
import time
# Configuration
HIVE_API_URL = "http://localhost:8087"
SUBNET_BASE = "192.168.1"
OLLAMA_PORT = 11434
DISCOVERY_TIMEOUT = 3
class AgentDiscovery:
def __init__(self):
self.session = None
self.discovered_agents = []
async def __aenter__(self):
self.session = aiohttp.ClientSession(
timeout=aiohttp.ClientTimeout(total=DISCOVERY_TIMEOUT)
)
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self.session:
await self.session.close()
def get_subnet_hosts(self) -> List[str]:
"""Get list of potential hosts in subnet"""
# Get network info from cluster docs
known_hosts = [
"192.168.1.27", # WALNUT
"192.168.1.72", # ACACIA
"192.168.1.113", # IRONWOOD
"192.168.1.106", # FORSTEINET
"192.168.1.132", # ROSEWOOD
]
# Also scan common IP ranges
additional_hosts = [f"{SUBNET_BASE}.{i}" for i in range(1, 255)]
# Combine and deduplicate
all_hosts = list(set(known_hosts + additional_hosts))
return all_hosts
def is_port_open(self, host: str, port: int) -> bool:
"""Check if port is open on host"""
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
result = sock.connect_ex((host, port))
return result == 0
except:
return False
async def get_ollama_info(self, host: str) -> Optional[Dict]:
"""Get Ollama instance information"""
try:
endpoint = f"http://{host}:{OLLAMA_PORT}"
# Test basic connectivity
async with self.session.get(f"{endpoint}/api/tags") as response:
if response.status != 200:
return None
models_data = await response.json()
models = [m["name"] for m in models_data.get("models", [])]
if not models:
return None
# Get system info if available
system_info = await self.get_system_info(host)
return {
"host": host,
"endpoint": endpoint,
"models": models,
"model_count": len(models),
"primary_model": models[0], # Use first model as primary
"system_info": system_info
}
except Exception as e:
print(f" ❌ Error checking {host}: {e}")
return None
async def get_system_info(self, host: str) -> Dict:
"""Get system information (if available)"""
try:
# Try to get hostname via reverse DNS
try:
hostname = socket.gethostbyaddr(host)[0]
# Clean up .local suffix and use short name
if hostname.endswith('.local'):
hostname = hostname[:-6]
except:
hostname = host
# Special mapping for known hosts
hostname_map = {
"192.168.1.135": "oak",
"192.168.1.27": "walnut",
"192.168.1.72": "acacia",
"192.168.1.113": "ironwood",
"192.168.1.132": "rosewood",
"192.168.1.106": "forsteinet"
}
if host in hostname_map:
hostname = hostname_map[host]
return {
"hostname": hostname,
"ip": host
}
except:
return {"hostname": host, "ip": host}
async def discover_agents(self) -> List[Dict]:
"""Discover all Ollama agents on the network"""
print("🔍 Scanning subnet for Ollama endpoints...")
hosts = self.get_subnet_hosts()
# Filter hosts with open Ollama port
print(f" 📡 Checking {len(hosts)} potential hosts...")
open_hosts = []
with ThreadPoolExecutor(max_workers=50) as executor:
futures = {executor.submit(self.is_port_open, host, OLLAMA_PORT): host
for host in hosts}
for future in futures:
host = futures[future]
try:
if future.result():
open_hosts.append(host)
print(f" ✅ Found open port: {host}:{OLLAMA_PORT}")
except:
pass
print(f" 📊 Found {len(open_hosts)} hosts with open Ollama ports")
# Get detailed info for each host
print(" 📋 Gathering agent information...")
discovered = []
for host in open_hosts:
print(f" 🔍 Checking {host}...")
info = await self.get_ollama_info(host)
if info:
discovered.append(info)
print(f"{host}: {info['model_count']} models")
else:
print(f"{host}: No response")
return discovered
def determine_agent_specialty(self, models: List[str], hostname: str) -> str:
"""Determine agent specialty based on models and hostname"""
model_str = " ".join(models).lower()
hostname_lower = hostname.lower()
# Check hostname patterns
if "walnut" in hostname_lower:
return "Senior Full-Stack Development & Architecture"
elif "acacia" in hostname_lower:
return "Infrastructure, DevOps & System Architecture"
elif "ironwood" in hostname_lower:
return "Backend Development & Code Analysis"
elif "forsteinet" in hostname_lower:
return "AI Compute & Processing"
elif "rosewood" in hostname_lower:
return "Quality Assurance, Testing & Code Review"
elif "oak" in hostname_lower:
return "iOS/macOS Development & Apple Ecosystem"
# Check model patterns
if "starcoder" in model_str:
return "Full-Stack Development & Code Generation"
elif "deepseek-coder" in model_str:
return "Backend Development & Code Analysis"
elif "deepseek-r1" in model_str:
return "Infrastructure & System Architecture"
elif "devstral" in model_str:
return "Development & Code Review"
elif "llava" in model_str:
return "Vision & Multimodal Analysis"
else:
return "General AI Development"
def determine_capabilities(self, specialty: str) -> List[str]:
"""Determine capabilities based on specialty"""
capability_map = {
"Senior Full-Stack Development & Architecture": [
"full_stack_development", "frontend_frameworks", "backend_apis",
"database_integration", "performance_optimization", "code_architecture"
],
"Infrastructure, DevOps & System Architecture": [
"infrastructure_design", "devops_automation", "system_architecture",
"database_design", "security_implementation", "container_orchestration"
],
"Backend Development & Code Analysis": [
"backend_development", "api_design", "code_analysis", "debugging",
"testing_frameworks", "database_optimization"
],
"AI Compute & Processing": [
"ai_model_inference", "gpu_computing", "distributed_processing",
"model_optimization", "performance_tuning"
],
"Quality Assurance, Testing & Code Review": [
"quality_assurance", "automated_testing", "code_review",
"test_automation", "performance_testing"
],
"iOS/macOS Development & Apple Ecosystem": [
"ios_development", "macos_development", "swift_programming",
"objective_c_development", "xcode_automation", "app_store_deployment",
"swiftui_development", "uikit_development", "apple_framework_integration"
]
}
return capability_map.get(specialty, ["general_development", "code_assistance"])
async def register_agent(self, agent_info: Dict) -> bool:
"""Register a discovered agent with Hive"""
try:
hostname = agent_info["system_info"]["hostname"]
specialty = self.determine_agent_specialty(agent_info["models"], hostname)
capabilities = self.determine_capabilities(specialty)
agent_data = {
"id": hostname.lower().replace(".", "_"),
"endpoint": agent_info["endpoint"],
"model": agent_info["primary_model"],
"specialty": specialty,
"capabilities": capabilities,
"available_models": agent_info["models"],
"model_count": agent_info["model_count"],
"hostname": hostname,
"ip_address": agent_info["host"],
"status": "available",
"current_tasks": 0,
"max_concurrent": 3,
"discovered_at": time.time()
}
async with self.session.post(
f"{HIVE_API_URL}/api/agents",
json=agent_data,
headers={"Content-Type": "application/json"}
) as response:
if response.status == 200:
result = await response.json()
print(f" ✅ Registered {hostname} as {specialty}")
return True
else:
text = await response.text()
print(f" ❌ Failed to register {hostname}: {response.status} - {text}")
return False
except Exception as e:
print(f" ❌ Error registering {agent_info['host']}: {e}")
return False
async def test_hive_connection(self) -> bool:
"""Test connection to Hive API"""
try:
async with self.session.get(f"{HIVE_API_URL}/health") as response:
if response.status == 200:
print("✅ Connected to Hive API")
return True
else:
print(f"❌ Hive API returned status {response.status}")
return False
except Exception as e:
print(f"❌ Failed to connect to Hive API: {e}")
return False
async def main():
"""Main discovery and registration process"""
print("🐝 Hive Agent Auto-Discovery Script")
print("=" * 50)
async with AgentDiscovery() as discovery:
# Test Hive connection
if not await discovery.test_hive_connection():
print("❌ Cannot connect to Hive API. Make sure Hive is running.")
sys.exit(1)
# Discover agents
discovered_agents = await discovery.discover_agents()
if not discovered_agents:
print("❌ No Ollama agents discovered on the network")
sys.exit(1)
print(f"\n📊 Discovered {len(discovered_agents)} agents:")
for agent in discovered_agents:
hostname = agent["system_info"]["hostname"]
print(f"{hostname} ({agent['host']}) - {agent['model_count']} models")
# Register agents
print("\n🔄 Registering discovered agents...")
successful = 0
failed = 0
for agent_info in discovered_agents:
hostname = agent_info["system_info"]["hostname"]
print(f"\n📡 Registering {hostname}...")
if await discovery.register_agent(agent_info):
successful += 1
else:
failed += 1
# Summary
print("\n" + "=" * 50)
print(f"📊 Discovery & Registration Summary:")
print(f" 🔍 Discovered: {len(discovered_agents)}")
print(f" ✅ Registered: {successful}")
print(f" ❌ Failed: {failed}")
if successful > 0:
print(f"\n🎉 Successfully registered {successful} agents!")
print("🔗 Check status: curl http://localhost:8087/api/status")
else:
print("\n💔 No agents were successfully registered.")
sys.exit(1)
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,322 @@
#!/usr/bin/env python3
"""
Hive Cluster Coordination for n8n Workflow Development
Coordinates distributed development of intelligent task allocation workflows
"""
import asyncio
import aiohttp
import json
import time
from typing import Dict, List
from datetime import datetime
# Cluster configuration
AGENTS = {
"walnut": {
"endpoint": "http://192.168.1.27:11434",
"model": "starcoder2:15b",
"specialty": "Senior Full-Stack Development & Architecture",
"capabilities": ["workflow_design", "frontend_architecture", "complex_coordination"]
},
"ironwood": {
"endpoint": "http://192.168.1.113:11434",
"model": "deepseek-coder-v2",
"specialty": "Backend Development & Code Analysis",
"capabilities": ["api_design", "database_schema", "backend_logic"]
},
"acacia": {
"endpoint": "http://192.168.1.72:11434",
"model": "deepseek-r1:7b",
"specialty": "Infrastructure, DevOps & System Architecture",
"capabilities": ["deployment", "n8n_integration", "system_architecture"]
},
"rosewood": {
"endpoint": "http://192.168.1.132:11434",
"model": "deepseek-r1:8b",
"specialty": "Quality Assurance, Testing & Code Review",
"capabilities": ["testing_workflows", "quality_validation", "performance_testing"]
},
"oak": {
"endpoint": "http://192.168.1.135:11434",
"model": "mistral:7b-instruct",
"specialty": "iOS/macOS Development & Apple Ecosystem",
"capabilities": ["mobile_integration", "apple_ecosystem", "native_apps"]
}
}
class HiveN8NCoordinator:
def __init__(self):
self.session = None
self.results = {}
async def __aenter__(self):
self.session = aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=300))
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self.session:
await self.session.close()
async def execute_agent_task(self, agent_id: str, task: Dict) -> Dict:
"""Execute a task on a specific agent"""
agent = AGENTS[agent_id]
print(f"🎯 Assigning to {agent_id.upper()}: {task['title']}")
prompt = f"""You are a {agent['specialty']} specialist working as part of a distributed AI development cluster.
TASK: {task['title']}
CONTEXT: We are building intelligent n8n workflows for automatic task allocation based on a 25-person software company model. The existing framework includes role-based AI agent workflows with:
- Executive Leadership Roles (CEO, CTO, Product Manager)
- Engineering Roles (Frontend, Backend, DevOps, Security, QA)
- Support & Business Roles (Technical Writer, Developer Advocate, Marketing, Customer Success)
- Coordination & Management Roles (Agent Coordinator, Knowledge Manager, Scrum Master)
Your specific assignment: {task['description']}
REQUIREMENTS:
{chr(10).join(f"- {req}" for req in task['requirements'])}
DELIVERABLES:
{chr(10).join(f"- {deliverable}" for deliverable in task['deliverables'])}
Please provide a comprehensive solution that integrates with the existing framework and enhances the automatic task allocation capabilities. Focus on your area of expertise while considering the broader system architecture.
Respond with detailed technical solutions, code examples, and implementation guidance."""
try:
async with self.session.post(
f"{agent['endpoint']}/api/generate",
json={
"model": agent['model'],
"prompt": prompt,
"stream": False,
"options": {
"num_predict": 4000,
"temperature": 0.7
}
}
) as response:
if response.status == 200:
result = await response.json()
return {
"agent": agent_id,
"task": task['title'],
"status": "completed",
"response": result.get('response', ''),
"model": agent['model'],
"tokens_generated": result.get('eval_count', 0),
"generation_time": result.get('eval_duration', 0) / 1000000000, # Convert to seconds
"timestamp": datetime.now().isoformat()
}
else:
return {
"agent": agent_id,
"task": task['title'],
"status": "failed",
"error": f"HTTP {response.status}",
"timestamp": datetime.now().isoformat()
}
except Exception as e:
return {
"agent": agent_id,
"task": task['title'],
"status": "failed",
"error": str(e),
"timestamp": datetime.now().isoformat()
}
async def coordinate_development(self):
"""Coordinate the distributed development of n8n workflows"""
print("🐝 HIVE CLUSTER N8N WORKFLOW DEVELOPMENT COORDINATION")
print("=" * 70)
print(f"🚀 Coordinating {len(AGENTS)} specialized AI agents")
print(f"🎯 Target: Intelligent task allocation workflows for 25-person software company")
print()
# Define tasks for each agent
tasks = {
"walnut": {
"title": "Intelligent Task Allocation Algorithm Design",
"description": "Design the core intelligent task allocation algorithms and workflow architecture for n8n",
"requirements": [
"Analyze agent capabilities and performance characteristics",
"Design dynamic task routing based on complexity and specialty",
"Create load balancing algorithms for optimal resource utilization",
"Design failure handling and fallback mechanisms",
"Plan integration with existing role-based workflow system"
],
"deliverables": [
"Task allocation algorithm specifications",
"Workflow architecture diagrams and documentation",
"Agent capability mapping and scoring system",
"Dynamic routing logic and decision trees",
"Integration plan with existing n8n workflows"
]
},
"ironwood": {
"title": "Backend APIs and Database Schema for Task Routing",
"description": "Implement robust backend APIs and database schema for intelligent task routing and monitoring",
"requirements": [
"Design REST APIs for task submission and agent management",
"Create database schema for task tracking and agent performance",
"Implement real-time task queue management system",
"Build agent health monitoring and performance metrics",
"Design webhook endpoints for n8n integration"
],
"deliverables": [
"Complete REST API specification and implementation",
"Database schema with indexes and performance optimization",
"Task queue management system with priority handling",
"Real-time monitoring APIs with metrics collection",
"Webhook endpoints for seamless n8n integration"
]
},
"acacia": {
"title": "n8n Workflow Deployment and Cluster Integration",
"description": "Set up production-ready n8n workflow deployment with full cluster integration",
"requirements": [
"Deploy enhanced n8n workflows to production environment",
"Configure cluster integration with all 6 agents",
"Set up monitoring and alerting for workflow performance",
"Implement backup and recovery procedures",
"Configure security and access controls"
],
"deliverables": [
"Production deployment scripts and configurations",
"Complete cluster integration with agent discovery",
"Monitoring dashboard and alerting system",
"Backup and recovery documentation and scripts",
"Security configuration and access control setup"
]
},
"rosewood": {
"title": "Comprehensive Testing and Quality Assurance Workflows",
"description": "Develop comprehensive testing strategies and quality assurance workflows for the task allocation system",
"requirements": [
"Create automated testing suites for all workflow components",
"Design performance testing and load testing strategies",
"Implement quality metrics and success criteria",
"Create integration testing for agent coordination",
"Design monitoring and alerting for system health"
],
"deliverables": [
"Automated test suites for n8n workflows and APIs",
"Performance testing framework and benchmarks",
"Quality metrics dashboard and reporting",
"Integration testing scenarios and validation",
"System health monitoring and alerting configuration"
]
},
"oak": {
"title": "iOS/macOS Integration and Mobile Task Management",
"description": "Create iOS/macOS integration components for mobile task management and monitoring",
"requirements": [
"Design native iOS/macOS app for task monitoring",
"Create API integration for real-time cluster status",
"Implement push notifications for task completion",
"Design mobile-friendly task submission interface",
"Plan Apple ecosystem integration features"
],
"deliverables": [
"iOS/macOS app design and architecture",
"API integration specifications and implementation",
"Push notification system design",
"Mobile task submission interface mockups",
"Apple ecosystem integration roadmap"
]
}
}
# Execute all tasks in parallel
print("🔄 Executing tasks across the cluster...")
print()
task_coroutines = []
for agent_id, task in tasks.items():
task_coroutines.append(self.execute_agent_task(agent_id, task))
# Wait for all tasks to complete
results = await asyncio.gather(*task_coroutines, return_exceptions=True)
# Process results
successful_tasks = 0
failed_tasks = 0
print("\n" + "=" * 70)
print("📊 DEVELOPMENT COORDINATION RESULTS")
print("=" * 70)
for result in results:
if isinstance(result, Exception):
print(f"❌ Task failed with exception: {result}")
failed_tasks += 1
continue
if result['status'] == 'completed':
print(f"{result['agent'].upper()}: {result['task']}")
print(f" 📝 Response: {len(result['response'])} characters")
print(f" 🎯 Tokens: {result['tokens_generated']}")
print(f" ⏱️ Time: {result['generation_time']:.1f}s")
successful_tasks += 1
else:
print(f"{result['agent'].upper()}: {result['task']} - {result.get('error', 'Unknown error')}")
failed_tasks += 1
print()
# Save detailed results
timestamp = int(time.time())
results_file = f"/home/tony/AI/projects/hive/results/n8n_coordination_{timestamp}.json"
with open(results_file, 'w') as f:
json.dump({
"coordination_summary": {
"total_agents": len(AGENTS),
"successful_tasks": successful_tasks,
"failed_tasks": failed_tasks,
"coordination_time": datetime.now().isoformat(),
"target": "n8n intelligent task allocation workflows"
},
"task_results": [r for r in results if not isinstance(r, Exception)],
"agent_configuration": AGENTS
}, f, indent=2)
print("🎉 COORDINATION SUMMARY")
print(f" 📊 Total Agents: {len(AGENTS)}")
print(f" ✅ Successful: {successful_tasks}")
print(f" ❌ Failed: {failed_tasks}")
print(f" 📁 Results: {results_file}")
print()
if successful_tasks > 0:
print("🚀 Next Steps:")
print(" 1. Review detailed agent responses for implementation details")
print(" 2. Integrate solutions from each agent into cohesive system")
print(" 3. Deploy enhanced workflows to n8n production environment")
print(" 4. Test intelligent task allocation with real workloads")
print(" 5. Monitor performance and optimize based on metrics")
return results
async def main():
"""Main coordination function"""
# Ensure results directory exists
import os
os.makedirs("/home/tony/AI/projects/hive/results", exist_ok=True)
async with HiveN8NCoordinator() as coordinator:
await coordinator.coordinate_development()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,703 @@
#!/usr/bin/env python3
"""
Migration script to consolidate existing distributed AI projects into Hive
"""
import os
import sys
import json
import yaml
import shutil
import asyncio
import sqlite3
from pathlib import Path
from typing import Dict, List, Any, Optional
from datetime import datetime
# Add paths for importing from existing projects
sys.path.append('/home/tony/AI/projects/distributed-ai-dev')
sys.path.append('/home/tony/AI/projects/McPlan/mcplan-web/backend')
class HiveMigrator:
"""
Migrates and consolidates data from existing distributed AI projects
"""
def __init__(self):
self.hive_root = Path("/home/tony/AI/projects/hive")
self.projects = {
'distributed-ai-dev': Path("/home/tony/AI/projects/distributed-ai-dev"),
'mcplan': Path("/home/tony/AI/projects/McPlan"),
'cluster': Path("/home/tony/AI/projects/cluster"),
'n8n-integration': Path("/home/tony/AI/projects/n8n-integration")
}
# Migration results
self.migration_log = []
self.errors = []
def log(self, message: str, level: str = "INFO"):
"""Log migration step"""
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
log_entry = f"[{timestamp}] {level}: {message}"
print(log_entry)
self.migration_log.append(log_entry)
def error(self, message: str):
"""Log error"""
self.log(message, "ERROR")
self.errors.append(message)
async def migrate_all(self):
"""Execute complete migration process"""
self.log("🚀 Starting Hive migration from existing projects")
try:
# Phase 1: Setup and validation
await self.setup_hive_structure()
await self.validate_source_projects()
# Phase 2: Configuration migration
await self.migrate_agent_configurations()
await self.migrate_monitoring_configs()
# Phase 3: Code integration
await self.extract_core_components()
await self.migrate_database_schemas()
# Phase 4: Data migration
await self.migrate_workflows()
await self.migrate_execution_history()
# Phase 5: Documentation and cleanup
await self.generate_migration_report()
self.log("✅ Migration completed successfully!")
except Exception as e:
self.error(f"Migration failed: {str(e)}")
raise
async def setup_hive_structure(self):
"""Create Hive project directory structure"""
self.log("📁 Setting up Hive project structure")
directories = [
"backend/app/core",
"backend/app/api",
"backend/app/models",
"backend/app/services",
"backend/app/utils",
"backend/migrations",
"backend/tests",
"frontend/src/components/dashboard",
"frontend/src/components/workflows",
"frontend/src/components/agents",
"frontend/src/components/projects",
"frontend/src/stores",
"frontend/src/services",
"frontend/src/types",
"frontend/src/hooks",
"frontend/src/utils",
"config/agents",
"config/workflows",
"config/monitoring",
"scripts",
"docker",
"docs/api",
"docs/user-guide",
"docs/admin-guide",
"docs/developer-guide",
"tests/e2e",
"tests/integration",
"tests/performance"
]
for directory in directories:
(self.hive_root / directory).mkdir(parents=True, exist_ok=True)
self.log(f"Created {len(directories)} directories")
async def validate_source_projects(self):
"""Validate that source projects exist and are accessible"""
self.log("🔍 Validating source projects")
for name, path in self.projects.items():
if path.exists():
self.log(f"✅ Found {name} at {path}")
else:
self.error(f"❌ Missing {name} at {path}")
async def migrate_agent_configurations(self):
"""Migrate agent configurations from distributed-ai-dev"""
self.log("🤖 Migrating agent configurations")
source_config = self.projects['distributed-ai-dev'] / 'config' / 'agents.yaml'
if not source_config.exists():
self.log("⚠️ No agent configuration found in distributed-ai-dev")
return
# Load existing configuration
with open(source_config, 'r') as f:
agents_config = yaml.safe_load(f)
# Create enhanced Hive configuration
hive_config = {
'hive': {
'cluster': {
'name': 'Development Cluster',
'region': 'home.deepblack.cloud'
},
'agents': {},
'monitoring': {
'metrics_retention_days': 30,
'alert_thresholds': {
'cpu_usage': 85,
'memory_usage': 90,
'gpu_usage': 95,
'response_time': 60
},
'health_check_interval': 30
},
'workflows': {
'templates': {
'web_development': {
'agents': ['walnut', 'ironwood'],
'stages': ['planning', 'frontend', 'backend', 'integration', 'testing']
},
'infrastructure': {
'agents': ['acacia', 'ironwood'],
'stages': ['design', 'provisioning', 'deployment', 'monitoring']
}
}
},
'mcp_servers': {
'registry': {
'comfyui': 'ws://localhost:8188/api/mcp',
'code_review': 'http://localhost:8000/mcp'
}
},
'security': {
'require_approval': True,
'api_rate_limit': 100,
'session_timeout': 3600
}
}
}
# Migrate agent configurations with enhancements
if 'agents' in agents_config:
for agent_id, agent_config in agents_config['agents'].items():
enhanced_config = {
'name': agent_config.get('name', f'{agent_id.upper()} Agent'),
'endpoint': agent_config.get('endpoint', f'http://localhost:11434'),
'model': agent_config.get('model', 'llama2'),
'specialization': agent_config.get('specialization', 'general'),
'capabilities': agent_config.get('capabilities', []),
'hardware': agent_config.get('hardware', {}),
'performance_targets': agent_config.get('performance_targets', {
'min_tps': 10,
'max_response_time': 30
})
}
hive_config['hive']['agents'][agent_id] = enhanced_config
# Add default agents if none exist
if not hive_config['hive']['agents']:
hive_config['hive']['agents'] = {
'acacia': {
'name': 'ACACIA Infrastructure Specialist',
'endpoint': 'http://192.168.1.72:11434',
'model': 'deepseek-r1:7b',
'specialization': 'infrastructure',
'capabilities': ['devops', 'architecture', 'deployment'],
'hardware': {
'gpu_type': 'AMD Radeon RX 7900 XTX',
'vram_gb': 24,
'cpu_cores': 16
},
'performance_targets': {
'min_tps': 15,
'max_response_time': 30
}
},
'walnut': {
'name': 'WALNUT Full-Stack Developer',
'endpoint': 'http://192.168.1.27:11434',
'model': 'starcoder2:15b',
'specialization': 'full-stack',
'capabilities': ['frontend', 'backend', 'ui-design'],
'hardware': {
'gpu_type': 'NVIDIA RTX 4090',
'vram_gb': 24,
'cpu_cores': 12
},
'performance_targets': {
'min_tps': 20,
'max_response_time': 25
}
},
'ironwood': {
'name': 'IRONWOOD Backend Specialist',
'endpoint': 'http://192.168.1.113:11434',
'model': 'deepseek-coder-v2',
'specialization': 'backend',
'capabilities': ['optimization', 'databases', 'apis'],
'hardware': {
'gpu_type': 'NVIDIA RTX 4080',
'vram_gb': 16,
'cpu_cores': 8
},
'performance_targets': {
'min_tps': 18,
'max_response_time': 35
}
}
}
# Save unified configuration
config_path = self.hive_root / 'config' / 'hive.yaml'
with open(config_path, 'w') as f:
yaml.dump(hive_config, f, default_flow_style=False, sort_keys=False)
self.log(f"✅ Migrated {len(hive_config['hive']['agents'])} agent configurations")
async def migrate_monitoring_configs(self):
"""Migrate monitoring configurations from cluster project"""
self.log("📊 Migrating monitoring configurations")
# Create Prometheus configuration
prometheus_config = {
'global': {
'scrape_interval': '30s',
'evaluation_interval': '30s'
},
'rule_files': ['hive_alerts.yml'],
'scrape_configs': [
{
'job_name': 'hive-backend',
'static_configs': [{'targets': ['hive-coordinator:8000']}],
'metrics_path': '/api/metrics'
},
{
'job_name': 'hive-agents',
'static_configs': [
{'targets': ['192.168.1.72:11434']},
{'targets': ['192.168.1.27:11434']},
{'targets': ['192.168.1.113:11434']}
]
}
]
}
prometheus_path = self.hive_root / 'config' / 'monitoring' / 'prometheus.yml'
with open(prometheus_path, 'w') as f:
yaml.dump(prometheus_config, f)
# Create Grafana dashboard configurations
grafana_config = {
'dashboards': {
'hive_overview': {
'title': 'Hive Cluster Overview',
'panels': [
'Agent Status',
'Task Queue Length',
'Execution Success Rate',
'Response Times',
'Resource Utilization'
]
},
'agent_performance': {
'title': 'Agent Performance Details',
'panels': [
'Tokens per Second',
'GPU Utilization',
'Memory Usage',
'Active Tasks'
]
}
}
}
grafana_path = self.hive_root / 'config' / 'monitoring' / 'grafana.yml'
with open(grafana_path, 'w') as f:
yaml.dump(grafana_config, f)
self.log("✅ Created monitoring configurations")
async def extract_core_components(self):
"""Extract and adapt core components from existing projects"""
self.log("🔧 Extracting core components")
# Map of source files to destination files
component_mapping = {
# From distributed-ai-dev
'distributed-ai-dev/src/core/ai_dev_coordinator.py': 'backend/app/core/hive_coordinator.py',
'distributed-ai-dev/src/monitoring/performance_monitor.py': 'backend/app/core/performance_monitor.py',
'distributed-ai-dev/src/config/agent_manager.py': 'backend/app/core/agent_manager.py',
# From McPlan
'McPlan/mcplan-web/backend/app/core/mcplan_engine.py': 'backend/app/core/workflow_engine.py',
'McPlan/mcplan-web/backend/app/api/workflows.py': 'backend/app/api/workflows.py',
'McPlan/mcplan-web/backend/app/api/execution.py': 'backend/app/api/executions.py',
'McPlan/mcplan-web/backend/app/models/workflow.py': 'backend/app/models/workflow.py',
# Frontend components
'McPlan/mcplan-web/frontend/src/components/WorkflowEditor/': 'frontend/src/components/workflows/',
'McPlan/mcplan-web/frontend/src/components/ExecutionPanel/': 'frontend/src/components/executions/',
'McPlan/mcplan-web/frontend/src/stores/': 'frontend/src/stores/'
}
for source_rel, dest_rel in component_mapping.items():
source_path = None
for project_name, project_path in self.projects.items():
potential_source = project_path / source_rel.split('/', 1)[1]
if potential_source.exists():
source_path = potential_source
break
if source_path:
dest_path = self.hive_root / dest_rel
if source_path.is_file():
dest_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(source_path, dest_path)
self.log(f"📄 Copied {source_path.name}")
elif source_path.is_dir():
dest_path.mkdir(parents=True, exist_ok=True)
shutil.copytree(source_path, dest_path, dirs_exist_ok=True)
self.log(f"📁 Copied directory {source_path.name}")
else:
self.log(f"⚠️ Could not find source: {source_rel}")
self.log("✅ Core components extracted")
async def migrate_database_schemas(self):
"""Create unified database schema"""
self.log("🗄️ Creating unified database schema")
schema_sql = """
-- Hive Unified Database Schema
-- User Management
CREATE TABLE users (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
email VARCHAR(255) UNIQUE NOT NULL,
hashed_password VARCHAR(255) NOT NULL,
is_active BOOLEAN DEFAULT true,
role VARCHAR(50) DEFAULT 'developer',
created_at TIMESTAMP DEFAULT NOW(),
last_login TIMESTAMP
);
-- Agent Management
CREATE TABLE agents (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
name VARCHAR(255) NOT NULL,
endpoint VARCHAR(512) NOT NULL,
model VARCHAR(255),
specialization VARCHAR(100),
capabilities JSONB,
hardware_config JSONB,
status VARCHAR(50) DEFAULT 'offline',
performance_targets JSONB,
created_at TIMESTAMP DEFAULT NOW(),
last_seen TIMESTAMP
);
-- Workflow Management
CREATE TABLE workflows (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
name VARCHAR(255) NOT NULL,
description TEXT,
n8n_data JSONB NOT NULL,
mcp_tools JSONB,
created_by UUID REFERENCES users(id),
version INTEGER DEFAULT 1,
active BOOLEAN DEFAULT true,
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW()
);
-- Execution Tracking
CREATE TABLE executions (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
workflow_id UUID REFERENCES workflows(id),
status VARCHAR(50) DEFAULT 'pending',
input_data JSONB,
output_data JSONB,
error_message TEXT,
progress INTEGER DEFAULT 0,
started_at TIMESTAMP,
completed_at TIMESTAMP,
created_at TIMESTAMP DEFAULT NOW()
);
-- Task Management
CREATE TABLE tasks (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
title VARCHAR(255) NOT NULL,
description TEXT,
priority INTEGER DEFAULT 5,
status VARCHAR(50) DEFAULT 'pending',
assigned_agent_id UUID REFERENCES agents(id),
workflow_id UUID REFERENCES workflows(id),
execution_id UUID REFERENCES executions(id),
metadata JSONB,
created_at TIMESTAMP DEFAULT NOW(),
started_at TIMESTAMP,
completed_at TIMESTAMP
);
-- Performance Metrics (Time Series)
CREATE TABLE agent_metrics (
agent_id UUID REFERENCES agents(id),
timestamp TIMESTAMP NOT NULL,
cpu_usage FLOAT,
memory_usage FLOAT,
gpu_usage FLOAT,
tokens_per_second FLOAT,
response_time FLOAT,
active_tasks INTEGER,
status VARCHAR(50),
PRIMARY KEY (agent_id, timestamp)
);
-- System Alerts
CREATE TABLE alerts (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
type VARCHAR(100) NOT NULL,
severity VARCHAR(20) NOT NULL,
message TEXT NOT NULL,
agent_id UUID REFERENCES agents(id),
resolved BOOLEAN DEFAULT false,
created_at TIMESTAMP DEFAULT NOW(),
resolved_at TIMESTAMP
);
-- API Keys
CREATE TABLE api_keys (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
user_id UUID REFERENCES users(id),
name VARCHAR(255) NOT NULL,
key_hash VARCHAR(255) NOT NULL,
is_active BOOLEAN DEFAULT true,
expires_at TIMESTAMP,
created_at TIMESTAMP DEFAULT NOW()
);
-- Indexes for performance
CREATE INDEX idx_agents_status ON agents(status);
CREATE INDEX idx_workflows_active ON workflows(active, created_at);
CREATE INDEX idx_executions_status ON executions(status, created_at);
CREATE INDEX idx_tasks_status_priority ON tasks(status, priority DESC, created_at);
CREATE INDEX idx_agent_metrics_timestamp ON agent_metrics(timestamp);
CREATE INDEX idx_agent_metrics_agent_time ON agent_metrics(agent_id, timestamp);
CREATE INDEX idx_alerts_unresolved ON alerts(resolved, created_at) WHERE resolved = false;
-- Sample data
INSERT INTO users (email, hashed_password, role) VALUES
('admin@hive.local', '$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/lewohT6ZErjH.2T.2', 'admin'),
('developer@hive.local', '$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/lewohT6ZErjH.2T.2', 'developer');
"""
schema_path = self.hive_root / 'backend' / 'migrations' / '001_initial_schema.sql'
with open(schema_path, 'w') as f:
f.write(schema_sql)
self.log("✅ Database schema created")
async def migrate_workflows(self):
"""Migrate existing workflows from McPlan"""
self.log("🔄 Migrating workflows")
workflows_migrated = 0
# Look for workflow files in McPlan
mcplan_workflows = self.projects['mcplan'] / 'mcplan-web'
workflow_files = []
# Find JSON workflow files
for json_file in mcplan_workflows.rglob('*.json'):
if 'workflow' in json_file.name.lower() or 'n8n' in json_file.name.lower():
workflow_files.append(json_file)
# Migrate each workflow file
for workflow_file in workflow_files:
try:
with open(workflow_file, 'r') as f:
workflow_data = json.load(f)
# Create workflow migration record
migration_record = {
'source_file': str(workflow_file),
'name': workflow_data.get('name', workflow_file.stem),
'description': f'Migrated from {workflow_file.name}',
'n8n_data': workflow_data,
'migrated_at': datetime.now().isoformat()
}
# Save to Hive workflows directory
dest_file = self.hive_root / 'config' / 'workflows' / f'{workflow_file.stem}.json'
with open(dest_file, 'w') as f:
json.dump(migration_record, f, indent=2)
workflows_migrated += 1
self.log(f"📄 Migrated workflow: {workflow_file.name}")
except Exception as e:
self.error(f"Failed to migrate {workflow_file.name}: {str(e)}")
self.log(f"✅ Migrated {workflows_migrated} workflows")
async def migrate_execution_history(self):
"""Migrate execution history from McPlan database"""
self.log("📊 Migrating execution history")
# Look for McPlan database
mcplan_db = self.projects['mcplan'] / 'mcplan-web' / 'mcplan.db'
if not mcplan_db.exists():
self.log("⚠️ No McPlan database found, skipping execution history")
return
executions_migrated = 0
try:
# Connect to McPlan SQLite database
conn = sqlite3.connect(mcplan_db)
cursor = conn.cursor()
# Export execution data
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = cursor.fetchall()
migration_data = {}
for (table_name,) in tables:
cursor.execute(f"SELECT * FROM {table_name}")
rows = cursor.fetchall()
# Get column names
cursor.execute(f"PRAGMA table_info({table_name})")
columns = [row[1] for row in cursor.fetchall()]
# Convert to dictionaries
table_data = []
for row in rows:
table_data.append(dict(zip(columns, row)))
migration_data[table_name] = table_data
conn.close()
# Save migration data
migration_file = self.hive_root / 'scripts' / 'mcplan_data_export.json'
with open(migration_file, 'w') as f:
json.dump(migration_data, f, indent=2, default=str)
executions_migrated = len(migration_data.get('executions', []))
self.log(f"✅ Exported {executions_migrated} execution records")
except Exception as e:
self.error(f"Failed to migrate execution history: {str(e)}")
async def generate_migration_report(self):
"""Generate comprehensive migration report"""
self.log("📋 Generating migration report")
report = {
'migration_summary': {
'timestamp': datetime.now().isoformat(),
'source_projects': list(self.projects.keys()),
'hive_version': '1.0.0',
'migration_status': 'completed' if not self.errors else 'completed_with_errors'
},
'components_migrated': {
'agent_configurations': 'config/hive.yaml',
'monitoring_configs': 'config/monitoring/',
'database_schema': 'backend/migrations/001_initial_schema.sql',
'core_components': 'backend/app/core/',
'api_endpoints': 'backend/app/api/',
'frontend_components': 'frontend/src/components/',
'workflows': 'config/workflows/'
},
'next_steps': [
'Review and update imported configurations',
'Set up development environment with docker-compose up',
'Run database migrations',
'Test agent connectivity',
'Verify workflow execution',
'Configure monitoring and alerting',
'Update documentation'
],
'migration_log': self.migration_log,
'errors': self.errors
}
report_path = self.hive_root / 'MIGRATION_REPORT.json'
with open(report_path, 'w') as f:
json.dump(report, f, indent=2)
# Also create a markdown summary
md_report = f"""# Hive Migration Report
## Summary
- **Migration Date**: {report['migration_summary']['timestamp']}
- **Status**: {report['migration_summary']['migration_status']}
- **Source Projects**: {', '.join(report['migration_summary']['source_projects'])}
- **Errors**: {len(self.errors)}
## Components Migrated
"""
for component, location in report['components_migrated'].items():
md_report += f"- **{component.replace('_', ' ').title()}**: `{location}`\n"
md_report += f"""
## Next Steps
"""
for i, step in enumerate(report['next_steps'], 1):
md_report += f"{i}. {step}\n"
if self.errors:
md_report += f"""
## Errors Encountered
"""
for error in self.errors:
md_report += f"- {error}\n"
md_report_path = self.hive_root / 'MIGRATION_REPORT.md'
with open(md_report_path, 'w') as f:
f.write(md_report)
self.log("✅ Migration report generated")
self.log(f"📄 Report saved to: {report_path}")
self.log(f"📄 Summary saved to: {md_report_path}")
async def main():
"""Main migration function"""
migrator = HiveMigrator()
try:
await migrator.migrate_all()
print("\n" + "="*60)
print("🎉 HIVE MIGRATION COMPLETED!")
print("="*60)
print(f"✅ Migration successful with {len(migrator.errors)} errors")
print(f"📁 Hive project created at: {migrator.hive_root}")
print(f"📋 Check MIGRATION_REPORT.md for detailed results")
print("\nNext steps:")
print("1. cd /home/tony/AI/projects/hive")
print("2. Review config/hive.yaml")
print("3. docker-compose up -d")
print("4. Visit http://localhost:3000")
print("="*60)
except Exception as e:
print(f"\n❌ Migration failed: {str(e)}")
print(f"📋 Check logs for details")
sys.exit(1)
if __name__ == "__main__":
asyncio.run(main())

145
scripts/register_agents.py Executable file
View File

@@ -0,0 +1,145 @@
#!/usr/bin/env python3
"""
Agent Registration Script for Hive
Registers cluster agents with the Hive orchestration system
"""
import json
import requests
import yaml
import sys
import time
from pathlib import Path
# Configuration
HIVE_API_URL = "http://localhost:8087"
CONFIG_FILE = "/home/tony/AI/projects/hive/config/hive.yaml"
def load_config():
"""Load the hive.yaml configuration file"""
try:
with open(CONFIG_FILE, 'r') as f:
return yaml.safe_load(f)
except Exception as e:
print(f"❌ Error loading config: {e}")
sys.exit(1)
def test_hive_connection():
"""Test connection to Hive API"""
try:
response = requests.get(f"{HIVE_API_URL}/health", timeout=5)
if response.status_code == 200:
print("✅ Connected to Hive API")
return True
else:
print(f"❌ Hive API returned status {response.status_code}")
return False
except Exception as e:
print(f"❌ Failed to connect to Hive API: {e}")
return False
def test_agent_connectivity(endpoint):
"""Test if an agent endpoint is responsive"""
try:
response = requests.get(f"{endpoint}/api/tags", timeout=5)
return response.status_code == 200
except:
return False
def register_agent(agent_id, agent_config):
"""Register a single agent with Hive"""
# Check if agent is responsive
if not test_agent_connectivity(agent_config['endpoint']):
print(f"⚠️ {agent_id.upper()} is not responsive at {agent_config['endpoint']}")
return False
# Prepare agent registration data
agent_data = {
"id": agent_id,
"endpoint": agent_config['endpoint'],
"model": agent_config['model'],
"specialty": agent_config['specialization'],
"capabilities": agent_config['capabilities'],
"hardware": agent_config['hardware'],
"performance_targets": agent_config['performance_targets'],
"status": "available",
"current_tasks": 0,
"max_concurrent": 3 # Default concurrent task limit
}
try:
# Register the agent
response = requests.post(
f"{HIVE_API_URL}/api/agents",
json=agent_data,
headers={"Content-Type": "application/json"},
timeout=10
)
if response.status_code == 200:
result = response.json()
print(f"✅ Registered {agent_id.upper()} - Agent ID: {result.get('agent_id', 'Unknown')}")
return True
else:
print(f"❌ Failed to register {agent_id.upper()}: {response.status_code} - {response.text}")
return False
except Exception as e:
print(f"❌ Error registering {agent_id.upper()}: {e}")
return False
def main():
"""Main registration process"""
print("🐝 Hive Agent Registration Script")
print("=" * 50)
# Test Hive connection
if not test_hive_connection():
print("❌ Cannot connect to Hive API. Make sure Hive is running.")
sys.exit(1)
# Load configuration
config = load_config()
agents = config.get('hive', {}).get('agents', {})
if not agents:
print("❌ No agents found in configuration")
sys.exit(1)
print(f"📋 Found {len(agents)} agents to register:")
for agent_id in agents.keys():
print(f"{agent_id.upper()}")
print("\n🔄 Starting registration process...")
# Register each agent
successful_registrations = 0
failed_registrations = 0
for agent_id, agent_config in agents.items():
print(f"\n📡 Registering {agent_id.upper()}...")
if register_agent(agent_id, agent_config):
successful_registrations += 1
else:
failed_registrations += 1
time.sleep(1) # Brief pause between registrations
# Summary
print("\n" + "=" * 50)
print(f"📊 Registration Summary:")
print(f" ✅ Successful: {successful_registrations}")
print(f" ❌ Failed: {failed_registrations}")
print(f" 📈 Total: {successful_registrations + failed_registrations}")
if successful_registrations > 0:
print(f"\n🎉 Successfully registered {successful_registrations} agents!")
print("🔗 Check agent status: curl http://localhost:8087/api/agents")
else:
print("\n💔 No agents were successfully registered.")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,97 @@
#!/bin/bash
# 🐝 Hive Claude Integration Setup Script
# Sets up MCP server configuration for Claude Desktop
set -e
echo "🐝 Setting up Hive MCP Server for Claude Integration..."
# Get the absolute path to the Hive project
HIVE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
MCP_SERVER_PATH="$HIVE_DIR/mcp-server/dist/index.js"
echo "📁 Hive directory: $HIVE_DIR"
echo "🔧 MCP server path: $MCP_SERVER_PATH"
# Check if MCP server is built
if [ ! -f "$MCP_SERVER_PATH" ]; then
echo "❌ MCP server not found. Building..."
cd "$HIVE_DIR/mcp-server"
npm install
npm run build
echo "✅ MCP server built successfully"
fi
# Detect Claude Desktop config location
if [[ "$OSTYPE" == "darwin"* ]]; then
# macOS
CLAUDE_CONFIG_DIR="$HOME/Library/Application Support/Claude"
elif [[ "$OSTYPE" == "linux-gnu"* ]]; then
# Linux
CLAUDE_CONFIG_DIR="$HOME/.config/claude-desktop"
elif [[ "$OSTYPE" == "msys" || "$OSTYPE" == "win32" ]]; then
# Windows
CLAUDE_CONFIG_DIR="$APPDATA/Claude"
else
echo "❓ Unknown OS type: $OSTYPE"
echo "Please manually configure Claude Desktop using the example config."
exit 1
fi
CLAUDE_CONFIG_FILE="$CLAUDE_CONFIG_DIR/claude_desktop_config.json"
echo "🔍 Claude config directory: $CLAUDE_CONFIG_DIR"
echo "📝 Claude config file: $CLAUDE_CONFIG_FILE"
# Create Claude config directory if it doesn't exist
mkdir -p "$CLAUDE_CONFIG_DIR"
# Check if config file exists
if [ -f "$CLAUDE_CONFIG_FILE" ]; then
echo "⚠️ Claude Desktop config already exists"
echo "📋 Backing up existing config..."
cp "$CLAUDE_CONFIG_FILE" "$CLAUDE_CONFIG_FILE.backup.$(date +%Y%m%d_%H%M%S)"
echo "✅ Backup created: $CLAUDE_CONFIG_FILE.backup.*"
fi
# Generate Claude Desktop config
echo "📝 Creating Claude Desktop configuration..."
cat > "$CLAUDE_CONFIG_FILE" << EOF
{
"mcpServers": {
"hive": {
"command": "node",
"args": ["$MCP_SERVER_PATH"],
"env": {
"HIVE_API_URL": "http://localhost:8087",
"HIVE_WS_URL": "ws://localhost:8087"
}
}
}
}
EOF
echo "✅ Claude Desktop configuration created!"
echo ""
echo "🎯 Next Steps:"
echo "1. Ensure your Hive cluster is running:"
echo " cd $HIVE_DIR && docker compose ps"
echo ""
echo "2. Restart Claude Desktop to load the MCP server"
echo ""
echo "3. In Claude, you can now use commands like:"
echo " • 'Show me my Hive cluster status'"
echo " • 'Register a new agent at http://walnut.local:11434'"
echo " • 'Create a kernel development task for FlashAttention optimization'"
echo " • 'Coordinate development across my distributed team'"
echo ""
echo "🐝 Hive MCP integration is ready!"
echo ""
echo "📋 Configuration Details:"
echo " • MCP Server: $MCP_SERVER_PATH"
echo " • Hive API: http://localhost:8087"
echo " • Claude Config: $CLAUDE_CONFIG_FILE"
echo ""
echo "🔧 To modify the configuration later, edit: $CLAUDE_CONFIG_FILE"

183
scripts/start_hive.sh Executable file
View File

@@ -0,0 +1,183 @@
#!/bin/bash
# Hive Startup Script
# Unified Distributed AI Orchestration Platform
set -e
HIVE_ROOT="/home/tony/AI/projects/hive"
LOG_FILE="$HIVE_ROOT/logs/startup.log"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
PURPLE='\033[0;35m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
# Create logs directory
mkdir -p "$HIVE_ROOT/logs"
log() {
echo -e "$1" | tee -a "$LOG_FILE"
}
log_info() {
log "${BLUE}[INFO]${NC} $1"
}
log_success() {
log "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
log "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
log "${RED}[ERROR]${NC} $1"
}
# Header
echo -e "${PURPLE}"
cat << "EOF"
🐝 =============================================== 🐝
HIVE - Distributed AI Orchestration Platform
Consolidating the power of:
• McPlan (n8n → MCP Bridge)
• Distributed AI Development
• Multi-Agent Coordination
• Real-time Monitoring
🐝 =============================================== 🐝
EOF
echo -e "${NC}"
# Change to Hive directory
cd "$HIVE_ROOT"
log_info "Starting Hive initialization..."
log_info "Working directory: $(pwd)"
log_info "Timestamp: $(date)"
# Check if Docker is running
if ! docker info &> /dev/null; then
log_error "Docker is not running. Please start Docker first."
exit 1
fi
log_success "Docker is running"
# Check if docker compose is available
if ! docker compose version &> /dev/null; then
log_error "docker compose is not available. Please install Docker with Compose plugin."
exit 1
fi
log_success "docker compose is available"
# Pull latest images
log_info "Pulling latest base images..."
docker compose pull postgres redis prometheus grafana
# Build Hive services
log_info "Building Hive services..."
if docker compose build; then
log_success "Hive services built successfully"
else
log_error "Failed to build Hive services"
exit 1
fi
# Start services
log_info "Starting Hive services..."
if docker compose up -d; then
log_success "Hive services started successfully"
else
log_error "Failed to start Hive services"
exit 1
fi
# Wait for services to be ready
log_info "Waiting for services to be ready..."
sleep 10
# Check service health
log_info "Checking service health..."
services=("postgres" "redis" "hive-backend" "hive-frontend" "prometheus" "grafana")
healthy_services=0
for service in "${services[@]}"; do
if docker compose ps "$service" | grep -q "Up"; then
log_success "$service is running"
((healthy_services++))
else
log_warning "$service is not running properly"
fi
done
if [ $healthy_services -eq ${#services[@]} ]; then
log_success "All services are healthy!"
else
log_warning "$healthy_services/${#services[@]} services are healthy"
fi
# Display service URLs
echo -e "\n${CYAN}🔗 Service URLs:${NC}"
echo -e "${GREEN} • Hive Frontend:${NC} http://localhost:3000"
echo -e "${GREEN} • Hive API:${NC} http://localhost:8000"
echo -e "${GREEN} • API Documentation:${NC} http://localhost:8000/docs"
echo -e "${GREEN} • Grafana Dashboard:${NC} http://localhost:3001 (admin/hiveadmin)"
echo -e "${GREEN} • Prometheus:${NC} http://localhost:9090"
echo -e "${GREEN} • PostgreSQL:${NC} localhost:5432 (hive/hivepass)"
echo -e "${GREEN} • Redis:${NC} localhost:6379"
# Display agent status
echo -e "\n${CYAN}🤖 Configured Agents:${NC}"
echo -e "${GREEN} • ACACIA:${NC} http://192.168.1.72:11434 (Infrastructure)"
echo -e "${GREEN} • WALNUT:${NC} http://192.168.1.27:11434 (Full-Stack)"
echo -e "${GREEN} • IRONWOOD:${NC} http://192.168.1.113:11434 (Backend)"
echo -e "${GREEN} • ROSEWOOD:${NC} http://192.168.1.132:11434 (QA/Testing)"
echo -e "${GREEN} • OAK:${NC} http://oak.local:11434 (iOS/macOS)"
echo -e "${GREEN} • TULLY:${NC} http://Tullys-MacBook-Air.local:11434 (Mobile)"
# Display next steps
echo -e "\n${PURPLE}📋 Next Steps:${NC}"
echo -e "${YELLOW} 1.${NC} Open Hive Dashboard: ${BLUE}http://localhost:3000${NC}"
echo -e "${YELLOW} 2.${NC} Check agent connectivity in the dashboard"
echo -e "${YELLOW} 3.${NC} Import or create your first workflow"
echo -e "${YELLOW} 4.${NC} Monitor execution in real-time"
echo -e "${YELLOW} 5.${NC} View metrics in Grafana: ${BLUE}http://localhost:3001${NC}"
# Display management commands
echo -e "\n${PURPLE}🛠️ Management Commands:${NC}"
echo -e "${YELLOW} • View logs:${NC} docker compose logs -f"
echo -e "${YELLOW} • Stop services:${NC} docker compose down"
echo -e "${YELLOW} • Restart:${NC} docker compose restart"
echo -e "${YELLOW} • Shell access:${NC} docker compose exec hive-backend bash"
# Check agent connectivity
echo -e "\n${CYAN}🔍 Testing Agent Connectivity:${NC}"
agents=(
"ACACIA:192.168.1.72:11434"
"WALNUT:192.168.1.27:11434"
"IRONWOOD:192.168.1.113:11434"
)
for agent_info in "${agents[@]}"; do
IFS=':' read -r name host port <<< "$agent_info"
if timeout 5 curl -s "http://$host:$port/api/tags" &> /dev/null; then
log_success "$name agent is responsive"
else
log_warning "$name agent is not responsive (http://$host:$port)"
fi
done
echo -e "\n${GREEN}🎉 Hive startup complete!${NC}"
echo -e "${CYAN}🐝 Welcome to the distributed AI future!${NC}"
log_info "Hive startup completed at $(date)"