Files
bzzz/pkg/election/slurp_scoring.go
anthonyrawlins d96c931a29 Resolve import cycles and migrate to chorus.services module path
This comprehensive refactoring addresses critical architectural issues:

IMPORT CYCLE RESOLUTION:
• pkg/crypto ↔ pkg/slurp/roles: Created pkg/security/access_levels.go
• pkg/ucxl → pkg/dht: Created pkg/storage/interfaces.go
• pkg/slurp/leader → pkg/election → pkg/slurp/storage: Moved types to pkg/election/interfaces.go

MODULE PATH MIGRATION:
• Changed from github.com/anthonyrawlins/bzzz to chorus.services/bzzz
• Updated all import statements across 115+ files
• Maintains compatibility while removing personal GitHub account dependency

TYPE SYSTEM IMPROVEMENTS:
• Resolved duplicate type declarations in crypto package
• Added missing type definitions (RoleStatus, TimeRestrictions, KeyStatus, KeyRotationResult)
• Proper interface segregation to prevent future cycles

ARCHITECTURAL BENEFITS:
• Build now progresses past structural issues to normal dependency resolution
• Cleaner separation of concerns between packages
• Eliminates circular dependencies that prevented compilation
• Establishes foundation for scalable codebase growth

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-08-17 10:04:25 +10:00

559 lines
24 KiB
Go

package election
import (
"fmt"
"log"
"time"
"chorus.services/bzzz/pkg/config"
)
// SLURPCandidateCapabilities represents SLURP-specific capabilities for election candidates
type SLURPCandidateCapabilities struct {
// Context generation capabilities
ContextGeneration bool `json:"context_generation"` // Can generate context
ContextCuration bool `json:"context_curation"` // Can curate context
ContextDistribution bool `json:"context_distribution"` // Can distribute context
ContextStorage bool `json:"context_storage"` // Has context storage
// Intelligence capabilities
SemanticAnalysis bool `json:"semantic_analysis"` // Can perform semantic analysis
RAGIntegration bool `json:"rag_integration"` // Has RAG integration
TemporalAnalysis bool `json:"temporal_analysis"` // Can do temporal analysis
DecisionTracking bool `json:"decision_tracking"` // Can track decisions
// Coordination capabilities
ClusterCoordination bool `json:"cluster_coordination"` // Can coordinate cluster
LoadBalancing bool `json:"load_balancing"` // Can balance load
HealthMonitoring bool `json:"health_monitoring"` // Can monitor health
ResourceManagement bool `json:"resource_management"` // Can manage resources
// Quality and performance metrics
GenerationQuality float64 `json:"generation_quality"` // Context generation quality (0-1)
ProcessingSpeed float64 `json:"processing_speed"` // Processing speed score (0-1)
AccuracyScore float64 `json:"accuracy_score"` // Accuracy score (0-1)
ReliabilityScore float64 `json:"reliability_score"` // Reliability score (0-1)
// Historical performance
SuccessfulOperations int64 `json:"successful_operations"` // Number of successful operations
FailedOperations int64 `json:"failed_operations"` // Number of failed operations
AverageResponseTime time.Duration `json:"average_response_time"` // Average response time
UptimePercentage float64 `json:"uptime_percentage"` // Uptime percentage
// Specialized capabilities
Languages []string `json:"languages"` // Programming languages supported
Frameworks []string `json:"frameworks"` // Frameworks supported
Technologies []string `json:"technologies"` // Technologies supported
DomainExpertise []string `json:"domain_expertise"` // Domain expertise areas
// Resource availability
AvailableCPU float64 `json:"available_cpu"` // Available CPU cores
AvailableMemory int64 `json:"available_memory"` // Available memory in bytes
AvailableStorage int64 `json:"available_storage"` // Available storage in bytes
NetworkBandwidth int64 `json:"network_bandwidth"` // Network bandwidth
// Configuration and preferences
MaxConcurrentTasks int `json:"max_concurrent_tasks"` // Maximum concurrent tasks
PreferredTaskTypes []string `json:"preferred_task_types"` // Preferred task types
SpecializationScore float64 `json:"specialization_score"` // Specialization score (0-1)
GeneralCapabilityScore float64 `json:"general_capability_score"` // General capability score (0-1)
}
// SLURPScoringWeights defines weights for SLURP-specific candidate scoring
type SLURPScoringWeights struct {
// Base election weights (from existing system)
UptimeWeight float64 `json:"uptime_weight"` // Weight for uptime
CapabilityWeight float64 `json:"capability_weight"` // Weight for capabilities
ResourceWeight float64 `json:"resource_weight"` // Weight for resources
NetworkWeight float64 `json:"network_weight"` // Weight for network quality
ExperienceWeight float64 `json:"experience_weight"` // Weight for experience
// SLURP-specific weights
ContextCapabilityWeight float64 `json:"context_capability_weight"` // Weight for context capabilities
IntelligenceWeight float64 `json:"intelligence_weight"` // Weight for intelligence capabilities
CoordinationWeight float64 `json:"coordination_weight"` // Weight for coordination capabilities
QualityWeight float64 `json:"quality_weight"` // Weight for quality metrics
PerformanceWeight float64 `json:"performance_weight"` // Weight for performance history
SpecializationWeight float64 `json:"specialization_weight"` // Weight for specialization
AvailabilityWeight float64 `json:"availability_weight"` // Weight for resource availability
ReliabilityWeight float64 `json:"reliability_weight"` // Weight for reliability
}
// SLURPCandidateScorer handles SLURP-specific candidate scoring
type SLURPCandidateScorer struct {
weights *SLURPScoringWeights
config *config.Config
// Capability requirements
requirements *SLURPLeadershipRequirements
// Performance thresholds
minQualityScore float64
minReliabilityScore float64
minUptimeThreshold float64
}
// SLURPLeadershipRequirements defines requirements for SLURP leadership
type SLURPLeadershipRequirements struct {
// Required capabilities
RequiredCapabilities []string `json:"required_capabilities"` // Must-have capabilities
PreferredCapabilities []string `json:"preferred_capabilities"` // Nice-to-have capabilities
MinQualityScore float64 `json:"min_quality_score"` // Minimum quality score
MinReliabilityScore float64 `json:"min_reliability_score"` // Minimum reliability score
MinUptimePercentage float64 `json:"min_uptime_percentage"` // Minimum uptime percentage
// Resource requirements
MinCPU float64 `json:"min_cpu"` // Minimum CPU cores
MinMemory int64 `json:"min_memory"` // Minimum memory
MinStorage int64 `json:"min_storage"` // Minimum storage
MinNetworkBandwidth int64 `json:"min_network_bandwidth"` // Minimum network bandwidth
// Experience requirements
MinSuccessfulOperations int64 `json:"min_successful_operations"` // Minimum successful operations
MaxFailureRate float64 `json:"max_failure_rate"` // Maximum failure rate
MaxResponseTime time.Duration `json:"max_response_time"` // Maximum average response time
}
// NewSLURPCandidateScorer creates a new SLURP candidate scorer
func NewSLURPCandidateScorer(cfg *config.Config) *SLURPCandidateScorer {
weights := DefaultSLURPScoringWeights()
requirements := DefaultSLURPLeadershipRequirements()
// Override with config values if available
if cfg.Security != nil && cfg.Security.ElectionConfig != nil {
// Map existing election config weights to SLURP weights
if cfg.Security.ElectionConfig.LeadershipScoring != nil {
scoring := cfg.Security.ElectionConfig.LeadershipScoring
weights.UptimeWeight = scoring.UptimeWeight
weights.CapabilityWeight = scoring.CapabilityWeight
weights.ResourceWeight = scoring.ResourceWeight
weights.NetworkWeight = scoring.NetworkWeight
weights.ExperienceWeight = scoring.ExperienceWeight
}
}
return &SLURPCandidateScorer{
weights: weights,
config: cfg,
requirements: requirements,
minQualityScore: 0.7,
minReliabilityScore: 0.8,
minUptimeThreshold: 0.9,
}
}
// CalculateSLURPCandidateScore calculates comprehensive SLURP-aware candidate score
func (scs *SLURPCandidateScorer) CalculateSLURPCandidateScore(
candidate *AdminCandidate,
slurpCapabilities *SLURPCandidateCapabilities,
) (float64, *SLURPScoringBreakdown, error) {
if candidate == nil {
return 0.0, nil, fmt.Errorf("candidate is nil")
}
if slurpCapabilities == nil {
// Use default/minimal capabilities if none provided
slurpCapabilities = &SLURPCandidateCapabilities{
GeneralCapabilityScore: 0.5,
ReliabilityScore: 0.7,
UptimePercentage: 0.9,
}
}
breakdown := &SLURPScoringBreakdown{
CandidateID: candidate.NodeID,
Timestamp: time.Now(),
}
// Calculate base election score (from existing system)
baseScore := scs.calculateBaseElectionScore(candidate, breakdown)
// Calculate SLURP-specific scores
contextScore := scs.calculateContextCapabilityScore(slurpCapabilities, breakdown)
intelligenceScore := scs.calculateIntelligenceScore(slurpCapabilities, breakdown)
coordinationScore := scs.calculateCoordinationScore(slurpCapabilities, breakdown)
qualityScore := scs.calculateQualityScore(slurpCapabilities, breakdown)
performanceScore := scs.calculatePerformanceScore(slurpCapabilities, breakdown)
specializationScore := scs.calculateSpecializationScore(slurpCapabilities, breakdown)
availabilityScore := scs.calculateAvailabilityScore(slurpCapabilities, breakdown)
reliabilityScore := scs.calculateReliabilityScore(slurpCapabilities, breakdown)
// Apply requirements filtering
if !scs.meetsRequirements(candidate, slurpCapabilities, breakdown) {
breakdown.MeetsRequirements = false
breakdown.DisqualificationReasons = append(breakdown.DisqualificationReasons,
"Does not meet minimum SLURP leadership requirements")
return 0.0, breakdown, nil
}
breakdown.MeetsRequirements = true
// Calculate weighted final score
weights := scs.weights
finalScore :=
baseScore * (weights.UptimeWeight + weights.CapabilityWeight + weights.ResourceWeight +
weights.NetworkWeight + weights.ExperienceWeight) +
contextScore * weights.ContextCapabilityWeight +
intelligenceScore * weights.IntelligenceWeight +
coordinationScore * weights.CoordinationWeight +
qualityScore * weights.QualityWeight +
performanceScore * weights.PerformanceWeight +
specializationScore * weights.SpecializationWeight +
availabilityScore * weights.AvailabilityWeight +
reliabilityScore * weights.ReliabilityWeight
// Normalize to 0-1 range
totalWeight := weights.UptimeWeight + weights.CapabilityWeight + weights.ResourceWeight +
weights.NetworkWeight + weights.ExperienceWeight + weights.ContextCapabilityWeight +
weights.IntelligenceWeight + weights.CoordinationWeight + weights.QualityWeight +
weights.PerformanceWeight + weights.SpecializationWeight + weights.AvailabilityWeight +
weights.ReliabilityWeight
if totalWeight > 0 {
finalScore = finalScore / totalWeight
}
// Apply bonus/penalty adjustments
finalScore = scs.applyAdjustments(candidate, slurpCapabilities, finalScore, breakdown)
// Clamp to valid range
if finalScore < 0 {
finalScore = 0
}
if finalScore > 1 {
finalScore = 1
}
breakdown.FinalScore = finalScore
log.Printf("📊 SLURP candidate score for %s: %.3f (base: %.3f, context: %.3f, intelligence: %.3f)",
candidate.NodeID, finalScore, baseScore, contextScore, intelligenceScore)
return finalScore, breakdown, nil
}
// calculateBaseElectionScore calculates the base election score using existing logic
func (scs *SLURPCandidateScorer) calculateBaseElectionScore(candidate *AdminCandidate, breakdown *SLURPScoringBreakdown) float64 {
// Replicate logic from existing calculateCandidateScore function
weights := scs.weights
// Normalize metrics to 0-1 range
uptimeScore := min(1.0, candidate.Uptime.Hours()/24.0) // Up to 24 hours gets full score
// Capability score - higher for admin/coordination capabilities
capabilityScore := 0.0
adminCapabilities := []string{"admin_election", "context_curation", "key_reconstruction", "semantic_analysis"}
for _, cap := range candidate.Capabilities {
for _, adminCap := range adminCapabilities {
if cap == adminCap {
capabilityScore += 0.25 // Each admin capability adds 25%
}
}
}
capabilityScore = min(1.0, capabilityScore)
// Resource score - lower usage is better
resourceScore := (1.0 - candidate.Resources.CPUUsage) * 0.3 +
(1.0 - candidate.Resources.MemoryUsage) * 0.3 +
(1.0 - candidate.Resources.DiskUsage) * 0.2 +
candidate.Resources.NetworkQuality * 0.2
experienceScore := min(1.0, candidate.Experience.Hours()/168.0) // Up to 1 week gets full score
// Store breakdown
breakdown.BaseScores = &BaseElectionScores{
UptimeScore: uptimeScore,
CapabilityScore: capabilityScore,
ResourceScore: resourceScore,
NetworkScore: candidate.Resources.NetworkQuality,
ExperienceScore: experienceScore,
}
// Weighted base score
baseScore := uptimeScore*weights.UptimeWeight +
capabilityScore*weights.CapabilityWeight +
resourceScore*weights.ResourceWeight +
candidate.Resources.NetworkQuality*weights.NetworkWeight +
experienceScore*weights.ExperienceWeight
return baseScore
}
// calculateContextCapabilityScore calculates score for context-related capabilities
func (scs *SLURPCandidateScorer) calculateContextCapabilityScore(caps *SLURPCandidateCapabilities, breakdown *SLURPScoringBreakdown) float64 {
score := 0.0
// Core context capabilities (required for leadership)
if caps.ContextGeneration { score += 0.3 }
if caps.ContextCuration { score += 0.2 }
if caps.ContextDistribution { score += 0.2 }
if caps.ContextStorage { score += 0.1 }
// Advanced context capabilities (bonus)
if caps.SemanticAnalysis { score += 0.1 }
if caps.RAGIntegration { score += 0.1 }
breakdown.ContextCapabilityScore = min(1.0, score)
return breakdown.ContextCapabilityScore
}
// calculateIntelligenceScore calculates score for intelligence capabilities
func (scs *SLURPCandidateScorer) calculateIntelligenceScore(caps *SLURPCandidateCapabilities, breakdown *SLURPScoringBreakdown) float64 {
score := 0.0
if caps.SemanticAnalysis { score += 0.25 }
if caps.RAGIntegration { score += 0.25 }
if caps.TemporalAnalysis { score += 0.25 }
if caps.DecisionTracking { score += 0.25 }
// Quality multiplier
score = score * caps.GenerationQuality
breakdown.IntelligenceScore = score
return score
}
// calculateCoordinationScore calculates score for coordination capabilities
func (scs *SLURPCandidateScorer) calculateCoordinationScore(caps *SLURPCandidateCapabilities, breakdown *SLURPScoringBreakdown) float64 {
score := 0.0
if caps.ClusterCoordination { score += 0.3 }
if caps.LoadBalancing { score += 0.25 }
if caps.HealthMonitoring { score += 0.2 }
if caps.ResourceManagement { score += 0.25 }
breakdown.CoordinationScore = min(1.0, score)
return breakdown.CoordinationScore
}
// calculateQualityScore calculates score based on quality metrics
func (scs *SLURPCandidateScorer) calculateQualityScore(caps *SLURPCandidateCapabilities, breakdown *SLURPScoringBreakdown) float64 {
// Average of quality metrics
score := (caps.GenerationQuality + caps.ProcessingSpeed + caps.AccuracyScore) / 3.0
breakdown.QualityScore = score
return score
}
// calculatePerformanceScore calculates score based on historical performance
func (scs *SLURPCandidateScorer) calculatePerformanceScore(caps *SLURPCandidateCapabilities, breakdown *SLURPScoringBreakdown) float64 {
if caps.SuccessfulOperations + caps.FailedOperations == 0 {
// No history, return neutral score
breakdown.PerformanceScore = 0.5
return 0.5
}
// Calculate success rate
totalOperations := caps.SuccessfulOperations + caps.FailedOperations
successRate := float64(caps.SuccessfulOperations) / float64(totalOperations)
// Response time score (lower is better, normalize to reasonable range)
responseTimeScore := 1.0
if caps.AverageResponseTime > 0 {
// Assume 1 second is optimal, 10 seconds is poor
maxAcceptableTime := 10 * time.Second
if caps.AverageResponseTime <= time.Second {
responseTimeScore = 1.0
} else if caps.AverageResponseTime >= maxAcceptableTime {
responseTimeScore = 0.1
} else {
responseTimeScore = 1.0 - (float64(caps.AverageResponseTime - time.Second) / float64(maxAcceptableTime - time.Second)) * 0.9
}
}
// Combine success rate and response time
score := (successRate * 0.7) + (responseTimeScore * 0.3)
breakdown.PerformanceScore = score
return score
}
// calculateSpecializationScore calculates score based on specialization
func (scs *SLURPCandidateScorer) calculateSpecializationScore(caps *SLURPCandidateCapabilities, breakdown *SLURPScoringBreakdown) float64 {
// Combine specialization score with domain coverage
domainCoverage := float64(len(caps.DomainExpertise)) / 10.0 // Assume 10 domains is excellent coverage
if domainCoverage > 1.0 {
domainCoverage = 1.0
}
score := (caps.SpecializationScore * 0.6) + (domainCoverage * 0.4)
breakdown.SpecializationScore = score
return score
}
// calculateAvailabilityScore calculates score based on resource availability
func (scs *SLURPCandidateScorer) calculateAvailabilityScore(caps *SLURPCandidateCapabilities, breakdown *SLURPScoringBreakdown) float64 {
// Normalize resource availability (assuming reasonable ranges)
cpuScore := min(1.0, caps.AvailableCPU / 8.0) // 8 cores is excellent
memoryScore := min(1.0, float64(caps.AvailableMemory) / (16 * 1024 * 1024 * 1024)) // 16GB is excellent
storageScore := min(1.0, float64(caps.AvailableStorage) / (1024 * 1024 * 1024 * 1024)) // 1TB is excellent
networkScore := min(1.0, float64(caps.NetworkBandwidth) / (1024 * 1024 * 1024)) // 1Gbps is excellent
score := (cpuScore * 0.3) + (memoryScore * 0.3) + (storageScore * 0.2) + (networkScore * 0.2)
breakdown.AvailabilityScore = score
return score
}
// calculateReliabilityScore calculates score based on reliability metrics
func (scs *SLURPCandidateScorer) calculateReliabilityScore(caps *SLURPCandidateCapabilities, breakdown *SLURPScoringBreakdown) float64 {
// Combine reliability score with uptime percentage
score := (caps.ReliabilityScore * 0.6) + (caps.UptimePercentage * 0.4)
breakdown.ReliabilityScore = score
return score
}
// meetsRequirements checks if candidate meets minimum SLURP leadership requirements
func (scs *SLURPCandidateScorer) meetsRequirements(candidate *AdminCandidate, caps *SLURPCandidateCapabilities, breakdown *SLURPScoringBreakdown) bool {
req := scs.requirements
issues := []string{}
// Check quality thresholds
if caps.GenerationQuality < req.MinQualityScore {
issues = append(issues, fmt.Sprintf("Quality score %.2f below minimum %.2f", caps.GenerationQuality, req.MinQualityScore))
}
if caps.ReliabilityScore < req.MinReliabilityScore {
issues = append(issues, fmt.Sprintf("Reliability score %.2f below minimum %.2f", caps.ReliabilityScore, req.MinReliabilityScore))
}
if caps.UptimePercentage < req.MinUptimePercentage {
issues = append(issues, fmt.Sprintf("Uptime %.2f%% below minimum %.2f%%", caps.UptimePercentage*100, req.MinUptimePercentage*100))
}
// Check resource requirements
if caps.AvailableCPU < req.MinCPU {
issues = append(issues, fmt.Sprintf("Available CPU %.1f below minimum %.1f", caps.AvailableCPU, req.MinCPU))
}
if caps.AvailableMemory < req.MinMemory {
issues = append(issues, fmt.Sprintf("Available memory %d below minimum %d", caps.AvailableMemory, req.MinMemory))
}
// Check failure rate
if caps.SuccessfulOperations + caps.FailedOperations > 0 {
failureRate := float64(caps.FailedOperations) / float64(caps.SuccessfulOperations + caps.FailedOperations)
if failureRate > req.MaxFailureRate {
issues = append(issues, fmt.Sprintf("Failure rate %.2f%% above maximum %.2f%%", failureRate*100, req.MaxFailureRate*100))
}
}
breakdown.RequirementIssues = issues
return len(issues) == 0
}
// applyAdjustments applies bonus/penalty adjustments to the final score
func (scs *SLURPCandidateScorer) applyAdjustments(candidate *AdminCandidate, caps *SLURPCandidateCapabilities, baseScore float64, breakdown *SLURPScoringBreakdown) float64 {
adjustments := []string{}
finalScore := baseScore
// Bonus for exceptional capabilities
if caps.GenerationQuality > 0.95 {
finalScore += 0.05
adjustments = append(adjustments, "Exceptional generation quality bonus (+0.05)")
}
if caps.UptimePercentage > 0.99 {
finalScore += 0.03
adjustments = append(adjustments, "Exceptional uptime bonus (+0.03)")
}
// Bonus for broad capability coverage
if caps.ContextGeneration && caps.ContextCuration && caps.SemanticAnalysis && caps.ClusterCoordination {
finalScore += 0.02
adjustments = append(adjustments, "Full capability coverage bonus (+0.02)")
}
// Penalty for concerning metrics
if caps.GenerationQuality < 0.5 {
finalScore -= 0.1
adjustments = append(adjustments, "Low generation quality penalty (-0.1)")
}
if caps.FailedOperations > caps.SuccessfulOperations {
finalScore -= 0.15
adjustments = append(adjustments, "High failure rate penalty (-0.15)")
}
breakdown.ScoreAdjustments = adjustments
return finalScore
}
// Supporting types and defaults
// SLURPScoringBreakdown provides detailed breakdown of SLURP candidate scoring
type SLURPScoringBreakdown struct {
CandidateID string `json:"candidate_id"`
Timestamp time.Time `json:"timestamp"`
FinalScore float64 `json:"final_score"`
MeetsRequirements bool `json:"meets_requirements"`
// Score components
BaseScores *BaseElectionScores `json:"base_scores"`
ContextCapabilityScore float64 `json:"context_capability_score"`
IntelligenceScore float64 `json:"intelligence_score"`
CoordinationScore float64 `json:"coordination_score"`
QualityScore float64 `json:"quality_score"`
PerformanceScore float64 `json:"performance_score"`
SpecializationScore float64 `json:"specialization_score"`
AvailabilityScore float64 `json:"availability_score"`
ReliabilityScore float64 `json:"reliability_score"`
// Requirements and adjustments
RequirementIssues []string `json:"requirement_issues,omitempty"`
DisqualificationReasons []string `json:"disqualification_reasons,omitempty"`
ScoreAdjustments []string `json:"score_adjustments,omitempty"`
}
// BaseElectionScores contains base election scoring breakdown
type BaseElectionScores struct {
UptimeScore float64 `json:"uptime_score"`
CapabilityScore float64 `json:"capability_score"`
ResourceScore float64 `json:"resource_score"`
NetworkScore float64 `json:"network_score"`
ExperienceScore float64 `json:"experience_score"`
}
// DefaultSLURPScoringWeights returns default SLURP scoring weights
func DefaultSLURPScoringWeights() *SLURPScoringWeights {
return &SLURPScoringWeights{
// Base election weights (total: 0.4)
UptimeWeight: 0.08,
CapabilityWeight: 0.10,
ResourceWeight: 0.08,
NetworkWeight: 0.06,
ExperienceWeight: 0.08,
// SLURP-specific weights (total: 0.6)
ContextCapabilityWeight: 0.15, // Most important for context leadership
IntelligenceWeight: 0.12,
CoordinationWeight: 0.10,
QualityWeight: 0.08,
PerformanceWeight: 0.06,
SpecializationWeight: 0.04,
AvailabilityWeight: 0.03,
ReliabilityWeight: 0.02,
}
}
// DefaultSLURPLeadershipRequirements returns default SLURP leadership requirements
func DefaultSLURPLeadershipRequirements() *SLURPLeadershipRequirements {
return &SLURPLeadershipRequirements{
RequiredCapabilities: []string{"context_generation", "context_curation"},
PreferredCapabilities: []string{"semantic_analysis", "cluster_coordination", "rag_integration"},
MinQualityScore: 0.6,
MinReliabilityScore: 0.7,
MinUptimePercentage: 0.8,
MinCPU: 2.0, // 2 CPU cores minimum
MinMemory: 4 * 1024 * 1024 * 1024, // 4GB minimum
MinStorage: 100 * 1024 * 1024 * 1024, // 100GB minimum
MinNetworkBandwidth: 100 * 1024 * 1024, // 100 Mbps minimum
MinSuccessfulOperations: 10,
MaxFailureRate: 0.1, // 10% max failure rate
MaxResponseTime: 5 * time.Second,
}
}