Enhance deployment system with retry functionality and improved UX

Major Improvements:
- Added retry deployment buttons in machine list for failed deployments
- Added retry button in SSH console modal footer for enhanced UX
- Enhanced deployment process with comprehensive cleanup of existing services
- Improved binary installation with password-based sudo authentication
- Updated configuration generation to include all required sections (agent, ai, network, security)
- Fixed deployment verification and error handling

Security Enhancements:
- Enhanced verifiedStopExistingServices with thorough cleanup process
- Improved binary copying with proper sudo authentication
- Added comprehensive configuration validation

UX Improvements:
- Users can retry deployments without re-running machine discovery
- Retry buttons available from both machine list and console modal
- Real-time deployment progress with detailed console output
- Clear error states with actionable retry options

Technical Changes:
- Modified ServiceDeployment.tsx with retry button components
- Enhanced api/setup_manager.go with improved deployment functions
- Updated main.go with command line argument support (--config, --setup)
- Added comprehensive zero-trust security validation system

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
anthonyrawlins
2025-08-31 10:23:27 +10:00
parent df4d98bf30
commit be761cfe20
234 changed files with 7508 additions and 38528 deletions

View File

@@ -11,7 +11,6 @@ import (
)
const (
ollamaAPIURL = "http://localhost:11434/api/generate"
defaultTimeout = 60 * time.Second
)
@@ -19,6 +18,7 @@ var (
availableModels []string
modelWebhookURL string
defaultModel string
ollamaEndpoint string = "http://localhost:11434" // Default fallback
)
// OllamaRequest represents the request payload for the Ollama API.
@@ -56,7 +56,8 @@ func GenerateResponse(ctx context.Context, model, prompt string) (string, error)
}
// Create the HTTP request
req, err := http.NewRequestWithContext(ctx, "POST", ollamaAPIURL, bytes.NewBuffer(payloadBytes))
apiURL := ollamaEndpoint + "/api/generate"
req, err := http.NewRequestWithContext(ctx, "POST", apiURL, bytes.NewBuffer(payloadBytes))
if err != nil {
return "", fmt.Errorf("failed to create http request: %w", err)
}
@@ -91,6 +92,11 @@ func SetModelConfig(models []string, webhookURL, defaultReasoningModel string) {
defaultModel = defaultReasoningModel
}
// SetOllamaEndpoint configures the Ollama API endpoint
func SetOllamaEndpoint(endpoint string) {
ollamaEndpoint = endpoint
}
// selectBestModel calls the model selection webhook to choose the best model for a prompt
func selectBestModel(availableModels []string, prompt string) string {
if modelWebhookURL == "" || len(availableModels) == 0 {