This comprehensive refactoring addresses critical architectural issues: IMPORT CYCLE RESOLUTION: • pkg/crypto ↔ pkg/slurp/roles: Created pkg/security/access_levels.go • pkg/ucxl → pkg/dht: Created pkg/storage/interfaces.go • pkg/slurp/leader → pkg/election → pkg/slurp/storage: Moved types to pkg/election/interfaces.go MODULE PATH MIGRATION: • Changed from github.com/anthonyrawlins/bzzz to chorus.services/bzzz • Updated all import statements across 115+ files • Maintains compatibility while removing personal GitHub account dependency TYPE SYSTEM IMPROVEMENTS: • Resolved duplicate type declarations in crypto package • Added missing type definitions (RoleStatus, TimeRestrictions, KeyStatus, KeyRotationResult) • Proper interface segregation to prevent future cycles ARCHITECTURAL BENEFITS: • Build now progresses past structural issues to normal dependency resolution • Cleaner separation of concerns between packages • Eliminates circular dependencies that prevented compilation • Establishes foundation for scalable codebase growth 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
514 lines
15 KiB
Bash
Executable File
514 lines
15 KiB
Bash
Executable File
#!/bin/bash
|
|
set -euo pipefail
|
|
|
|
# BZZZ v1 to v2 Migration Script
|
|
# This script handles the complete migration from BZZZ v1 (SystemD) to v2 (Docker Swarm)
|
|
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
LOG_FILE="/var/log/bzzz-migration-$(date +%Y%m%d-%H%M%S).log"
|
|
BACKUP_DIR="/rust/bzzz-v2/backup/$(date +%Y%m%d-%H%M%S)"
|
|
DRY_RUN=${DRY_RUN:-false}
|
|
|
|
# Colors for output
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
NC='\033[0m' # No Color
|
|
|
|
log() {
|
|
echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1" | tee -a "$LOG_FILE"
|
|
}
|
|
|
|
error() {
|
|
echo -e "${RED}[ERROR]${NC} $1" | tee -a "$LOG_FILE"
|
|
exit 1
|
|
}
|
|
|
|
warn() {
|
|
echo -e "${YELLOW}[WARN]${NC} $1" | tee -a "$LOG_FILE"
|
|
}
|
|
|
|
success() {
|
|
echo -e "${GREEN}[SUCCESS]${NC} $1" | tee -a "$LOG_FILE"
|
|
}
|
|
|
|
check_prerequisites() {
|
|
log "Checking prerequisites..."
|
|
|
|
# Check if running as root for some operations
|
|
if [[ $EUID -eq 0 ]]; then
|
|
error "This script should not be run as root. Run as tony user with sudo access."
|
|
fi
|
|
|
|
# Check required commands
|
|
local commands=("docker" "systemctl" "pg_dump" "rsync" "curl")
|
|
for cmd in "${commands[@]}"; do
|
|
if ! command -v "$cmd" &> /dev/null; then
|
|
error "Required command '$cmd' not found"
|
|
fi
|
|
done
|
|
|
|
# Check Docker Swarm status
|
|
if ! docker info | grep -q "Swarm: active"; then
|
|
error "Docker Swarm is not active. Please initialize swarm first."
|
|
fi
|
|
|
|
# Check available disk space
|
|
local available=$(df /rust | awk 'NR==2 {print $4}')
|
|
local required=10485760 # 10GB in KB
|
|
if [[ $available -lt $required ]]; then
|
|
error "Insufficient disk space. Need at least 10GB available in /rust"
|
|
fi
|
|
|
|
success "Prerequisites check passed"
|
|
}
|
|
|
|
backup_v1_data() {
|
|
log "Creating backup of v1 data..."
|
|
|
|
if [[ "$DRY_RUN" == "true" ]]; then
|
|
log "[DRY RUN] Would create backup at: $BACKUP_DIR"
|
|
return 0
|
|
fi
|
|
|
|
mkdir -p "$BACKUP_DIR"
|
|
|
|
# Backup v1 configuration
|
|
if [[ -d "/home/tony/chorus/project-queues/active/BZZZ" ]]; then
|
|
rsync -av "/home/tony/chorus/project-queues/active/BZZZ/" "$BACKUP_DIR/v1-source/"
|
|
fi
|
|
|
|
# Backup systemd service files
|
|
sudo cp /etc/systemd/system/bzzz.service "$BACKUP_DIR/" 2>/dev/null || true
|
|
|
|
# Backup hypercore logs (if any)
|
|
if [[ -d "/home/tony/.config/bzzz" ]]; then
|
|
rsync -av "/home/tony/.config/bzzz/" "$BACKUP_DIR/config/"
|
|
fi
|
|
|
|
# Backup any existing data directories
|
|
for node in walnut ironwood acacia; do
|
|
if [[ -d "/rust/bzzz/$node" ]]; then
|
|
rsync -av "/rust/bzzz/$node/" "$BACKUP_DIR/data/$node/"
|
|
fi
|
|
done
|
|
|
|
success "Backup completed at: $BACKUP_DIR"
|
|
}
|
|
|
|
stop_v1_services() {
|
|
log "Stopping BZZZ v1 services..."
|
|
|
|
if [[ "$DRY_RUN" == "true" ]]; then
|
|
log "[DRY RUN] Would stop v1 systemd services"
|
|
return 0
|
|
fi
|
|
|
|
local nodes=("walnut" "ironwood" "acacia")
|
|
for node in "${nodes[@]}"; do
|
|
if sudo systemctl is-active --quiet "bzzz@$node" 2>/dev/null || sudo systemctl is-active --quiet bzzz 2>/dev/null; then
|
|
log "Stopping BZZZ service on $node..."
|
|
sudo systemctl stop "bzzz@$node" 2>/dev/null || sudo systemctl stop bzzz 2>/dev/null || true
|
|
sudo systemctl disable "bzzz@$node" 2>/dev/null || sudo systemctl disable bzzz 2>/dev/null || true
|
|
fi
|
|
done
|
|
|
|
# Wait for services to fully stop
|
|
sleep 10
|
|
|
|
success "v1 services stopped"
|
|
}
|
|
|
|
setup_v2_infrastructure() {
|
|
log "Setting up v2 infrastructure..."
|
|
|
|
if [[ "$DRY_RUN" == "true" ]]; then
|
|
log "[DRY RUN] Would create v2 directory structure"
|
|
return 0
|
|
fi
|
|
|
|
# Create directory structure
|
|
mkdir -p /rust/bzzz-v2/{config,data,logs}
|
|
mkdir -p /rust/bzzz-v2/data/{blobs,conversations,dht,postgres,redis}
|
|
mkdir -p /rust/bzzz-v2/data/blobs/{data,index,temp}
|
|
mkdir -p /rust/bzzz-v2/data/dht/{walnut,ironwood,acacia}
|
|
mkdir -p /rust/bzzz-v2/config/{swarm,systemd,secrets}
|
|
mkdir -p /rust/bzzz-v2/logs/{application,p2p,monitoring}
|
|
|
|
# Set permissions
|
|
sudo chown -R tony:tony /rust/bzzz-v2
|
|
chmod -R 755 /rust/bzzz-v2
|
|
|
|
# Create placeholder configuration files
|
|
cat > /rust/bzzz-v2/config/bzzz-config.yaml << 'EOF'
|
|
agent:
|
|
id: ""
|
|
specialization: "advanced_reasoning"
|
|
capabilities: ["code_generation", "debugging", "analysis"]
|
|
models: ["llama3.2:70b", "qwen2.5:72b"]
|
|
max_tasks: 3
|
|
|
|
whoosh_api:
|
|
base_url: "http://whoosh.deepblack.cloud"
|
|
api_key: ""
|
|
|
|
dht:
|
|
bootstrap_nodes:
|
|
- "walnut:9101"
|
|
- "ironwood:9102"
|
|
- "acacia:9103"
|
|
|
|
content_store:
|
|
path: "/app/data/blobs"
|
|
replication_factor: 3
|
|
shard_depth: 2
|
|
|
|
openai:
|
|
rate_limit_rpm: 1000
|
|
rate_limit_tpm: 100000
|
|
cost_tracking: true
|
|
EOF
|
|
|
|
success "v2 infrastructure setup completed"
|
|
}
|
|
|
|
migrate_conversation_data() {
|
|
log "Migrating conversation data..."
|
|
|
|
if [[ "$DRY_RUN" == "true" ]]; then
|
|
log "[DRY RUN] Would migrate hypercore logs to content-addressed storage"
|
|
return 0
|
|
fi
|
|
|
|
# Check if there are any hypercore logs to migrate
|
|
local log_files=()
|
|
for node in walnut ironwood acacia; do
|
|
if [[ -f "/home/tony/.config/bzzz/hypercore-$node.log" ]]; then
|
|
log_files+=("/home/tony/.config/bzzz/hypercore-$node.log")
|
|
fi
|
|
done
|
|
|
|
if [[ ${#log_files[@]} -eq 0 ]]; then
|
|
warn "No hypercore logs found for migration"
|
|
return 0
|
|
fi
|
|
|
|
# Process each log file and create content-addressed blobs
|
|
local migration_script="$SCRIPT_DIR/convert-hypercore-to-cas.py"
|
|
if [[ -f "$migration_script" ]]; then
|
|
python3 "$migration_script" "${log_files[@]}" --output-dir "/rust/bzzz-v2/data/blobs/data"
|
|
success "Conversation data migrated to content-addressed storage"
|
|
else
|
|
warn "Migration script not found, skipping conversation data migration"
|
|
fi
|
|
}
|
|
|
|
setup_docker_secrets() {
|
|
log "Setting up Docker secrets..."
|
|
|
|
if [[ "$DRY_RUN" == "true" ]]; then
|
|
log "[DRY RUN] Would create Docker secrets"
|
|
return 0
|
|
fi
|
|
|
|
# Create PostgreSQL password secret
|
|
if [[ -f "/home/tony/chorus/business/secrets/postgres-bzzz-password" ]]; then
|
|
docker secret create bzzz_postgres_password /home/tony/chorus/business/secrets/postgres-bzzz-password 2>/dev/null || true
|
|
else
|
|
# Generate random password
|
|
openssl rand -base64 32 | docker secret create bzzz_postgres_password - 2>/dev/null || true
|
|
fi
|
|
|
|
# Create OpenAI API key secret
|
|
if [[ -f "/home/tony/chorus/business/secrets/openai-api-key" ]]; then
|
|
docker secret create bzzz_openai_api_key /home/tony/chorus/business/secrets/openai-api-key 2>/dev/null || true
|
|
else
|
|
warn "OpenAI API key not found in secrets directory"
|
|
fi
|
|
|
|
success "Docker secrets configured"
|
|
}
|
|
|
|
setup_docker_configs() {
|
|
log "Setting up Docker configs..."
|
|
|
|
if [[ "$DRY_RUN" == "true" ]]; then
|
|
log "[DRY RUN] Would create Docker configs"
|
|
return 0
|
|
fi
|
|
|
|
# Create main BZZZ config
|
|
docker config create bzzz_v2_config /rust/bzzz-v2/config/bzzz-config.yaml 2>/dev/null || true
|
|
|
|
# Create MCP server config
|
|
cat > /tmp/mcp-config.yaml << 'EOF'
|
|
server:
|
|
port: 3001
|
|
max_connections: 1000
|
|
timeout_seconds: 30
|
|
|
|
tools:
|
|
enabled: true
|
|
max_execution_time: 300
|
|
|
|
logging:
|
|
level: info
|
|
format: json
|
|
EOF
|
|
docker config create bzzz_mcp_config /tmp/mcp-config.yaml 2>/dev/null || true
|
|
rm /tmp/mcp-config.yaml
|
|
|
|
# Create proxy config
|
|
cat > /tmp/proxy-config.yaml << 'EOF'
|
|
openai:
|
|
rate_limit:
|
|
requests_per_minute: 1000
|
|
tokens_per_minute: 100000
|
|
cost_tracking:
|
|
enabled: true
|
|
log_requests: true
|
|
models:
|
|
- "gpt-4"
|
|
- "gpt-4-turbo"
|
|
- "gpt-3.5-turbo"
|
|
|
|
server:
|
|
port: 3002
|
|
timeout: 30s
|
|
EOF
|
|
docker config create bzzz_proxy_config /tmp/proxy-config.yaml 2>/dev/null || true
|
|
rm /tmp/proxy-config.yaml
|
|
|
|
# Create Redis config
|
|
cat > /tmp/redis.conf << 'EOF'
|
|
bind 0.0.0.0
|
|
port 6379
|
|
timeout 0
|
|
keepalive 300
|
|
maxclients 10000
|
|
maxmemory 1gb
|
|
maxmemory-policy allkeys-lru
|
|
save 900 1
|
|
save 300 10
|
|
save 60 10000
|
|
EOF
|
|
docker config create bzzz_redis_config /tmp/redis.conf 2>/dev/null || true
|
|
rm /tmp/redis.conf
|
|
|
|
success "Docker configs created"
|
|
}
|
|
|
|
deploy_v2_stack() {
|
|
log "Deploying BZZZ v2 Docker stack..."
|
|
|
|
if [[ "$DRY_RUN" == "true" ]]; then
|
|
log "[DRY RUN] Would deploy Docker stack with: docker stack deploy -c docker-compose.swarm.yml bzzz-v2"
|
|
return 0
|
|
fi
|
|
|
|
cd "$SCRIPT_DIR/.."
|
|
|
|
# Verify compose file
|
|
if ! docker-compose -f infrastructure/docker-compose.swarm.yml config > /dev/null; then
|
|
error "Docker compose file validation failed"
|
|
fi
|
|
|
|
# Deploy the stack
|
|
docker stack deploy -c infrastructure/docker-compose.swarm.yml bzzz-v2
|
|
|
|
# Wait for services to start
|
|
log "Waiting for services to become ready..."
|
|
local max_wait=300 # 5 minutes
|
|
local wait_time=0
|
|
|
|
while [[ $wait_time -lt $max_wait ]]; do
|
|
local ready_services=$(docker service ls --filter label=com.docker.stack.namespace=bzzz-v2 --format "table {{.Name}}\t{{.Replicas}}" | grep -v "0/" | wc -l)
|
|
local total_services=$(docker service ls --filter label=com.docker.stack.namespace=bzzz-v2 --format "table {{.Name}}" | wc -l)
|
|
|
|
if [[ $ready_services -eq $total_services ]]; then
|
|
success "All services are ready"
|
|
break
|
|
fi
|
|
|
|
log "Waiting for services... ($ready_services/$total_services ready)"
|
|
sleep 10
|
|
wait_time=$((wait_time + 10))
|
|
done
|
|
|
|
if [[ $wait_time -ge $max_wait ]]; then
|
|
error "Timeout waiting for services to become ready"
|
|
fi
|
|
}
|
|
|
|
verify_v2_deployment() {
|
|
log "Verifying v2 deployment..."
|
|
|
|
# Check service health
|
|
local services=("bzzz-v2_bzzz-agent" "bzzz-v2_postgres" "bzzz-v2_redis" "bzzz-v2_mcp-server")
|
|
for service in "${services[@]}"; do
|
|
if ! docker service ps "$service" | grep -q "Running"; then
|
|
error "Service $service is not running properly"
|
|
fi
|
|
done
|
|
|
|
# Test DHT connectivity
|
|
log "Testing DHT connectivity..."
|
|
if ! timeout 30 docker exec "$(docker ps -q -f label=com.docker.swarm.service.name=bzzz-v2_dht-bootstrap-walnut)" \
|
|
curl -f http://localhost:9101/health > /dev/null 2>&1; then
|
|
warn "DHT bootstrap node (walnut) health check failed"
|
|
fi
|
|
|
|
# Test MCP server
|
|
log "Testing MCP server..."
|
|
if ! timeout 10 curl -f http://localhost:3001/health > /dev/null 2>&1; then
|
|
warn "MCP server health check failed"
|
|
fi
|
|
|
|
# Test content resolver
|
|
log "Testing content resolver..."
|
|
if ! timeout 10 curl -f http://localhost:3003/health > /dev/null 2>&1; then
|
|
warn "Content resolver health check failed"
|
|
fi
|
|
|
|
success "v2 deployment verification completed"
|
|
}
|
|
|
|
update_node_labels() {
|
|
log "Updating Docker node labels for service placement..."
|
|
|
|
if [[ "$DRY_RUN" == "true" ]]; then
|
|
log "[DRY RUN] Would update node labels"
|
|
return 0
|
|
fi
|
|
|
|
# Set node labels for service placement
|
|
docker node update --label-add bzzz.role=agent walnut 2>/dev/null || true
|
|
docker node update --label-add bzzz.role=agent ironwood 2>/dev/null || true
|
|
docker node update --label-add bzzz.role=agent acacia 2>/dev/null || true
|
|
|
|
success "Node labels updated"
|
|
}
|
|
|
|
cleanup_v1_artifacts() {
|
|
log "Cleaning up v1 artifacts..."
|
|
|
|
if [[ "$DRY_RUN" == "true" ]]; then
|
|
log "[DRY RUN] Would clean up v1 systemd files and binaries"
|
|
return 0
|
|
fi
|
|
|
|
# Remove systemd service files (but keep backup)
|
|
sudo rm -f /etc/systemd/system/bzzz.service
|
|
sudo rm -f /etc/systemd/system/bzzz@.service
|
|
sudo systemctl daemon-reload
|
|
|
|
# Move v1 binaries to backup location
|
|
if [[ -f "/home/tony/chorus/project-queues/active/BZZZ/bzzz" ]]; then
|
|
mv "/home/tony/chorus/project-queues/active/BZZZ/bzzz" "$BACKUP_DIR/bzzz-v1-binary"
|
|
fi
|
|
|
|
success "v1 cleanup completed"
|
|
}
|
|
|
|
print_migration_summary() {
|
|
log "Migration Summary:"
|
|
log "=================="
|
|
log "✅ v1 services stopped and disabled"
|
|
log "✅ v2 infrastructure deployed to Docker Swarm"
|
|
log "✅ Data migrated to content-addressed storage"
|
|
log "✅ DHT network established across 3 nodes"
|
|
log "✅ MCP server and OpenAI proxy deployed"
|
|
log "✅ Monitoring and health checks configured"
|
|
log ""
|
|
log "Access Points:"
|
|
log "- BZZZ Agent API: https://bzzz.deepblack.cloud"
|
|
log "- MCP Server: https://mcp.deepblack.cloud"
|
|
log "- Content Resolver: https://resolve.deepblack.cloud"
|
|
log "- OpenAI Proxy: https://openai.deepblack.cloud"
|
|
log ""
|
|
log "Monitoring:"
|
|
log "- docker service ls --filter label=com.docker.stack.namespace=bzzz-v2"
|
|
log "- docker stack ps bzzz-v2"
|
|
log "- docker service logs bzzz-v2_bzzz-agent"
|
|
log ""
|
|
log "Backup Location: $BACKUP_DIR"
|
|
log "Migration Log: $LOG_FILE"
|
|
}
|
|
|
|
rollback_to_v1() {
|
|
log "Rolling back to v1..."
|
|
|
|
# Stop v2 services
|
|
docker stack rm bzzz-v2 2>/dev/null || true
|
|
sleep 30
|
|
|
|
# Restore v1 systemd service
|
|
if [[ -f "$BACKUP_DIR/bzzz.service" ]]; then
|
|
sudo cp "$BACKUP_DIR/bzzz.service" /etc/systemd/system/
|
|
sudo systemctl daemon-reload
|
|
sudo systemctl enable bzzz
|
|
sudo systemctl start bzzz
|
|
fi
|
|
|
|
# Restore v1 binary
|
|
if [[ -f "$BACKUP_DIR/bzzz-v1-binary" ]]; then
|
|
cp "$BACKUP_DIR/bzzz-v1-binary" "/home/tony/chorus/project-queues/active/BZZZ/bzzz"
|
|
chmod +x "/home/tony/chorus/project-queues/active/BZZZ/bzzz"
|
|
fi
|
|
|
|
success "Rollback to v1 completed"
|
|
}
|
|
|
|
main() {
|
|
log "Starting BZZZ v1 to v2 migration..."
|
|
log "DRY_RUN mode: $DRY_RUN"
|
|
|
|
# Handle rollback if requested
|
|
if [[ "${1:-}" == "--rollback" ]]; then
|
|
rollback_to_v1
|
|
return 0
|
|
fi
|
|
|
|
# Trap to handle errors
|
|
trap 'error "Migration failed at line $LINENO"' ERR
|
|
|
|
check_prerequisites
|
|
backup_v1_data
|
|
stop_v1_services
|
|
setup_v2_infrastructure
|
|
migrate_conversation_data
|
|
setup_docker_secrets
|
|
setup_docker_configs
|
|
update_node_labels
|
|
deploy_v2_stack
|
|
verify_v2_deployment
|
|
cleanup_v1_artifacts
|
|
print_migration_summary
|
|
|
|
success "BZZZ v2 migration completed successfully!"
|
|
log "Run with --rollback to revert to v1 if needed"
|
|
}
|
|
|
|
# Handle script arguments
|
|
case "${1:-}" in
|
|
--dry-run)
|
|
DRY_RUN=true
|
|
main
|
|
;;
|
|
--rollback)
|
|
main --rollback
|
|
;;
|
|
--help|-h)
|
|
echo "Usage: $0 [--dry-run|--rollback|--help]"
|
|
echo ""
|
|
echo "Options:"
|
|
echo " --dry-run Preview migration steps without making changes"
|
|
echo " --rollback Rollback to v1 (emergency use only)"
|
|
echo " --help Show this help message"
|
|
exit 0
|
|
;;
|
|
*)
|
|
main
|
|
;;
|
|
esac |