Major WHOOSH system refactoring and feature enhancements
- Migrated from HIVE branding to WHOOSH across all components - Enhanced backend API with new services: AI models, BZZZ integration, templates, members - Added comprehensive testing suite with security, performance, and integration tests - Improved frontend with new components for project setup, AI models, and team management - Updated MCP server implementation with WHOOSH-specific tools and resources - Enhanced deployment configurations with production-ready Docker setups - Added comprehensive documentation and setup guides - Implemented age encryption service and UCXL integration 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Distributed Hive Workflow Deployment Script
|
||||
# Distributed WHOOSH Workflow Deployment Script
|
||||
# Deploys the enhanced distributed development workflow system across the cluster
|
||||
|
||||
set -e
|
||||
@@ -13,7 +13,7 @@ BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
PROJECT_ROOT="/home/tony/AI/projects/hive"
|
||||
PROJECT_ROOT="/home/tony/AI/projects/whoosh"
|
||||
CLUSTER_NODES=("192.168.1.72" "192.168.1.27" "192.168.1.113" "192.168.1.132" "192.168.1.106")
|
||||
CLUSTER_NAMES=("ACACIA" "WALNUT" "IRONWOOD" "ROSEWOOD" "FORSTEINET")
|
||||
SSH_USER="tony"
|
||||
@@ -98,8 +98,8 @@ setup_redis() {
|
||||
sudo systemctl enable redis-server
|
||||
|
||||
# Configure Redis for cluster coordination
|
||||
sudo tee /etc/redis/redis.conf.d/hive-distributed.conf > /dev/null <<EOF
|
||||
# Hive Distributed Workflow Configuration
|
||||
sudo tee /etc/redis/redis.conf.d/whoosh-distributed.conf > /dev/null <<EOF
|
||||
# WHOOSH Distributed Workflow Configuration
|
||||
maxmemory 512mb
|
||||
maxmemory-policy allkeys-lru
|
||||
save 900 1
|
||||
@@ -143,7 +143,7 @@ deploy_cluster_config() {
|
||||
|
||||
# Create configuration package
|
||||
cd "$PROJECT_ROOT"
|
||||
tar -czf /tmp/hive-distributed-config.tar.gz config/distributed_config.yaml
|
||||
tar -czf /tmp/whoosh-distributed-config.tar.gz config/distributed_config.yaml
|
||||
|
||||
for i in "${!CLUSTER_NODES[@]}"; do
|
||||
node="${CLUSTER_NODES[$i]}"
|
||||
@@ -152,13 +152,13 @@ deploy_cluster_config() {
|
||||
log "Deploying to $name ($node)..."
|
||||
|
||||
# Copy configuration
|
||||
sshpass -p "$SSH_PASS" scp -o StrictHostKeyChecking=no /tmp/hive-distributed-config.tar.gz "$SSH_USER@$node:/tmp/"
|
||||
sshpass -p "$SSH_PASS" scp -o StrictHostKeyChecking=no /tmp/whoosh-distributed-config.tar.gz "$SSH_USER@$node:/tmp/"
|
||||
|
||||
# Extract and setup configuration
|
||||
sshpass -p "$SSH_PASS" ssh -o StrictHostKeyChecking=no "$SSH_USER@$node" "
|
||||
mkdir -p /home/$SSH_USER/AI/projects/hive/config
|
||||
cd /home/$SSH_USER/AI/projects/hive/config
|
||||
tar -xzf /tmp/hive-distributed-config.tar.gz
|
||||
mkdir -p /home/$SSH_USER/AI/projects/whoosh/config
|
||||
cd /home/$SSH_USER/AI/projects/whoosh/config
|
||||
tar -xzf /tmp/whoosh-distributed-config.tar.gz
|
||||
chmod 644 distributed_config.yaml
|
||||
"
|
||||
|
||||
@@ -166,7 +166,7 @@ deploy_cluster_config() {
|
||||
done
|
||||
|
||||
# Clean up
|
||||
rm -f /tmp/hive-distributed-config.tar.gz
|
||||
rm -f /tmp/whoosh-distributed-config.tar.gz
|
||||
}
|
||||
|
||||
# Update Ollama configurations for distributed workflows
|
||||
@@ -209,9 +209,9 @@ start_distributed_system() {
|
||||
cd "$PROJECT_ROOT/backend"
|
||||
source venv/bin/activate
|
||||
|
||||
# Start the main Hive application with distributed workflows
|
||||
# Start the main WHOOSH application with distributed workflows
|
||||
export PYTHONPATH="$PROJECT_ROOT/backend:$PYTHONPATH"
|
||||
export HIVE_CONFIG_PATH="$PROJECT_ROOT/config/distributed_config.yaml"
|
||||
export WHOOSH_CONFIG_PATH="$PROJECT_ROOT/config/distributed_config.yaml"
|
||||
|
||||
# Run database migrations
|
||||
log "Running database migrations..."
|
||||
@@ -222,23 +222,23 @@ print('Database initialized')
|
||||
"
|
||||
|
||||
# Start the application in the background
|
||||
log "Starting Hive with distributed workflows..."
|
||||
log "Starting WHOOSH with distributed workflows..."
|
||||
nohup python -m uvicorn app.main:app \
|
||||
--host 0.0.0.0 \
|
||||
--port 8000 \
|
||||
--reload \
|
||||
--log-level info > /tmp/hive-distributed.log 2>&1 &
|
||||
--log-level info > /tmp/whoosh-distributed.log 2>&1 &
|
||||
|
||||
HIVE_PID=$!
|
||||
echo $HIVE_PID > /tmp/hive-distributed.pid
|
||||
WHOOSH_PID=$!
|
||||
echo $WHOOSH_PID > /tmp/whoosh-distributed.pid
|
||||
|
||||
# Wait for startup
|
||||
sleep 10
|
||||
|
||||
# Check if the service is running
|
||||
if kill -0 $HIVE_PID 2>/dev/null; then
|
||||
success "Distributed workflow system started (PID: $HIVE_PID)"
|
||||
log "Application logs: tail -f /tmp/hive-distributed.log"
|
||||
if kill -0 $WHOOSH_PID 2>/dev/null; then
|
||||
success "Distributed workflow system started (PID: $WHOOSH_PID)"
|
||||
log "Application logs: tail -f /tmp/whoosh-distributed.log"
|
||||
log "Health check: curl http://localhost:8000/health"
|
||||
log "Distributed API: curl http://localhost:8000/api/distributed/cluster/status"
|
||||
else
|
||||
@@ -297,9 +297,9 @@ except:
|
||||
create_systemd_service() {
|
||||
log "Creating systemd service for production deployment..."
|
||||
|
||||
sudo tee /etc/systemd/system/hive-distributed.service > /dev/null <<EOF
|
||||
sudo tee /etc/systemd/system/whoosh-distributed.service > /dev/null <<EOF
|
||||
[Unit]
|
||||
Description=Hive Distributed Workflow System
|
||||
Description=WHOOSH Distributed Workflow System
|
||||
After=network.target redis.service
|
||||
Wants=redis.service
|
||||
|
||||
@@ -309,7 +309,7 @@ User=$USER
|
||||
Group=$USER
|
||||
WorkingDirectory=$PROJECT_ROOT/backend
|
||||
Environment=PYTHONPATH=$PROJECT_ROOT/backend
|
||||
Environment=HIVE_CONFIG_PATH=$PROJECT_ROOT/config/distributed_config.yaml
|
||||
Environment=WHOOSH_CONFIG_PATH=$PROJECT_ROOT/config/distributed_config.yaml
|
||||
ExecStart=$PROJECT_ROOT/backend/venv/bin/python -m uvicorn app.main:app --host 0.0.0.0 --port 8000
|
||||
ExecReload=/bin/kill -HUP \$MAINPID
|
||||
Restart=always
|
||||
@@ -323,28 +323,28 @@ EOF
|
||||
|
||||
# Enable the service
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable hive-distributed.service
|
||||
sudo systemctl enable whoosh-distributed.service
|
||||
|
||||
success "Systemd service created and enabled"
|
||||
log "Use 'sudo systemctl start hive-distributed' to start the service"
|
||||
log "Use 'sudo systemctl status hive-distributed' to check status"
|
||||
log "Use 'sudo systemctl start whoosh-distributed' to start the service"
|
||||
log "Use 'sudo systemctl status whoosh-distributed' to check status"
|
||||
}
|
||||
|
||||
# Generate deployment report
|
||||
generate_report() {
|
||||
log "Generating deployment report..."
|
||||
|
||||
report_file="/tmp/hive-distributed-deployment-report.txt"
|
||||
report_file="/tmp/whoosh-distributed-deployment-report.txt"
|
||||
|
||||
cat > "$report_file" <<EOF
|
||||
# Hive Distributed Workflow System - Deployment Report
|
||||
# WHOOSH Distributed Workflow System - Deployment Report
|
||||
Generated: $(date)
|
||||
|
||||
## Deployment Summary
|
||||
- Project Directory: $PROJECT_ROOT
|
||||
- Configuration: $PROJECT_ROOT/config/distributed_config.yaml
|
||||
- Log File: /tmp/hive-distributed.log
|
||||
- PID File: /tmp/hive-distributed.pid
|
||||
- Log File: /tmp/whoosh-distributed.log
|
||||
- PID File: /tmp/whoosh-distributed.pid
|
||||
|
||||
## Cluster Configuration
|
||||
EOF
|
||||
@@ -366,11 +366,11 @@ EOF
|
||||
- Performance Metrics: http://localhost:8000/api/distributed/performance/metrics
|
||||
|
||||
## Management Commands
|
||||
- Start Service: sudo systemctl start hive-distributed
|
||||
- Stop Service: sudo systemctl stop hive-distributed
|
||||
- Restart Service: sudo systemctl restart hive-distributed
|
||||
- View Logs: sudo journalctl -u hive-distributed -f
|
||||
- View Application Logs: tail -f /tmp/hive-distributed.log
|
||||
- Start Service: sudo systemctl start whoosh-distributed
|
||||
- Stop Service: sudo systemctl stop whoosh-distributed
|
||||
- Restart Service: sudo systemctl restart whoosh-distributed
|
||||
- View Logs: sudo journalctl -u whoosh-distributed -f
|
||||
- View Application Logs: tail -f /tmp/whoosh-distributed.log
|
||||
|
||||
## Cluster Operations
|
||||
- Check Cluster Status: curl http://localhost:8000/api/distributed/cluster/status
|
||||
@@ -400,13 +400,13 @@ EOF
|
||||
main() {
|
||||
echo -e "${GREEN}"
|
||||
echo "╔══════════════════════════════════════════════════════════════╗"
|
||||
echo "║ Hive Distributed Workflow Deployment ║"
|
||||
echo "║ WHOOSH Distributed Workflow Deployment ║"
|
||||
echo "║ ║"
|
||||
echo "║ Deploying cluster-wide development workflow orchestration ║"
|
||||
echo "╚══════════════════════════════════════════════════════════════╝"
|
||||
echo -e "${NC}"
|
||||
|
||||
log "Starting deployment of Hive Distributed Workflow System..."
|
||||
log "Starting deployment of WHOOSH Distributed Workflow System..."
|
||||
|
||||
# Run deployment steps
|
||||
check_prerequisites
|
||||
@@ -424,7 +424,7 @@ main() {
|
||||
echo "╔══════════════════════════════════════════════════════════════╗"
|
||||
echo "║ Deployment Completed! ║"
|
||||
echo "║ ║"
|
||||
echo "║ 🚀 Hive Distributed Workflow System is now running ║"
|
||||
echo "║ 🚀 WHOOSH Distributed Workflow System is now running ║"
|
||||
echo "║ 📊 Visit http://localhost:8000/docs for API documentation ║"
|
||||
echo "║ 🌐 Cluster status: http://localhost:8000/api/distributed/ ║"
|
||||
echo "║ cluster/status ║"
|
||||
@@ -438,24 +438,24 @@ case "${1:-deploy}" in
|
||||
main
|
||||
;;
|
||||
"start")
|
||||
log "Starting Hive Distributed Workflow System..."
|
||||
sudo systemctl start hive-distributed
|
||||
log "Starting WHOOSH Distributed Workflow System..."
|
||||
sudo systemctl start whoosh-distributed
|
||||
;;
|
||||
"stop")
|
||||
log "Stopping Hive Distributed Workflow System..."
|
||||
sudo systemctl stop hive-distributed
|
||||
if [ -f /tmp/hive-distributed.pid ]; then
|
||||
kill $(cat /tmp/hive-distributed.pid) 2>/dev/null || true
|
||||
rm -f /tmp/hive-distributed.pid
|
||||
log "Stopping WHOOSH Distributed Workflow System..."
|
||||
sudo systemctl stop whoosh-distributed
|
||||
if [ -f /tmp/whoosh-distributed.pid ]; then
|
||||
kill $(cat /tmp/whoosh-distributed.pid) 2>/dev/null || true
|
||||
rm -f /tmp/whoosh-distributed.pid
|
||||
fi
|
||||
;;
|
||||
"status")
|
||||
log "Checking system status..."
|
||||
sudo systemctl status hive-distributed
|
||||
sudo systemctl status whoosh-distributed
|
||||
;;
|
||||
"logs")
|
||||
log "Showing application logs..."
|
||||
tail -f /tmp/hive-distributed.log
|
||||
tail -f /tmp/whoosh-distributed.log
|
||||
;;
|
||||
"health")
|
||||
log "Running health checks..."
|
||||
|
||||
Reference in New Issue
Block a user