🎭 CHORUS now contains full BZZZ functionality adapted for containers Core systems ported: - P2P networking (libp2p with DHT and PubSub) - Task coordination (COOEE protocol) - HMMM collaborative reasoning - SHHH encryption and security - SLURP admin election system - UCXL content addressing - UCXI server integration - Hypercore logging system - Health monitoring and graceful shutdown - License validation with KACHING Container adaptations: - Environment variable configuration (no YAML files) - Container-optimized logging to stdout/stderr - Auto-generated agent IDs for container deployments - Docker-first architecture All proven BZZZ P2P protocols, AI integration, and collaboration features are now available in containerized form. Next: Build and test container deployment. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
849 lines
22 KiB
Go
849 lines
22 KiB
Go
package storage
|
|
|
|
import (
|
|
"context"
|
|
"crypto/sha256"
|
|
"encoding/json"
|
|
"fmt"
|
|
"io"
|
|
"os"
|
|
"path/filepath"
|
|
"sort"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/robfig/cron/v3"
|
|
"chorus.services/bzzz/pkg/crypto"
|
|
)
|
|
|
|
// BackupManagerImpl implements the BackupManager interface
|
|
type BackupManagerImpl struct {
|
|
mu sync.RWMutex
|
|
contextStore *ContextStoreImpl
|
|
crypto crypto.RoleCrypto
|
|
basePath string
|
|
nodeID string
|
|
schedules map[string]*cron.Cron
|
|
backups map[string]*BackupInfo
|
|
runningBackups map[string]*BackupJob
|
|
options *BackupManagerOptions
|
|
notifications chan *BackupEvent
|
|
stopCh chan struct{}
|
|
}
|
|
|
|
// BackupManagerOptions configures backup manager behavior
|
|
type BackupManagerOptions struct {
|
|
MaxConcurrentBackups int `json:"max_concurrent_backups"`
|
|
CompressionEnabled bool `json:"compression_enabled"`
|
|
EncryptionEnabled bool `json:"encryption_enabled"`
|
|
RetentionDays int `json:"retention_days"`
|
|
ValidationEnabled bool `json:"validation_enabled"`
|
|
NotificationsEnabled bool `json:"notifications_enabled"`
|
|
BackupTimeout time.Duration `json:"backup_timeout"`
|
|
CleanupInterval time.Duration `json:"cleanup_interval"`
|
|
}
|
|
|
|
// BackupJob represents a running backup operation
|
|
type BackupJob struct {
|
|
ID string `json:"id"`
|
|
Config *BackupConfig `json:"config"`
|
|
StartTime time.Time `json:"start_time"`
|
|
Progress float64 `json:"progress"`
|
|
Status BackupStatus `json:"status"`
|
|
Error error `json:"error,omitempty"`
|
|
ProcessedSize int64 `json:"processed_size"`
|
|
TotalSize int64 `json:"total_size"`
|
|
cancel context.CancelFunc
|
|
}
|
|
|
|
// BackupEvent represents backup-related events
|
|
type BackupEvent struct {
|
|
Type BackupEventType `json:"type"`
|
|
BackupID string `json:"backup_id"`
|
|
Message string `json:"message"`
|
|
Timestamp time.Time `json:"timestamp"`
|
|
Metadata map[string]interface{} `json:"metadata"`
|
|
}
|
|
|
|
// BackupEventType defines types of backup events
|
|
type BackupEventType string
|
|
|
|
const (
|
|
BackupStarted BackupEventType = "backup_started"
|
|
BackupProgress BackupEventType = "backup_progress"
|
|
BackupCompleted BackupEventType = "backup_completed"
|
|
BackupFailed BackupEventType = "backup_failed"
|
|
BackupValidated BackupEventType = "backup_validated"
|
|
BackupRestored BackupEventType = "backup_restored"
|
|
BackupDeleted BackupEventType = "backup_deleted"
|
|
BackupScheduled BackupEventType = "backup_scheduled"
|
|
)
|
|
|
|
// DefaultBackupManagerOptions returns sensible defaults
|
|
func DefaultBackupManagerOptions() *BackupManagerOptions {
|
|
return &BackupManagerOptions{
|
|
MaxConcurrentBackups: 2,
|
|
CompressionEnabled: true,
|
|
EncryptionEnabled: true,
|
|
RetentionDays: 30,
|
|
ValidationEnabled: true,
|
|
NotificationsEnabled: true,
|
|
BackupTimeout: 4 * time.Hour,
|
|
CleanupInterval: 24 * time.Hour,
|
|
}
|
|
}
|
|
|
|
// NewBackupManager creates a new backup manager
|
|
func NewBackupManager(
|
|
contextStore *ContextStoreImpl,
|
|
crypto crypto.RoleCrypto,
|
|
basePath string,
|
|
nodeID string,
|
|
options *BackupManagerOptions,
|
|
) (*BackupManagerImpl, error) {
|
|
if options == nil {
|
|
options = DefaultBackupManagerOptions()
|
|
}
|
|
|
|
// Ensure backup directory exists
|
|
if err := os.MkdirAll(basePath, 0755); err != nil {
|
|
return nil, fmt.Errorf("failed to create backup directory: %w", err)
|
|
}
|
|
|
|
bm := &BackupManagerImpl{
|
|
contextStore: contextStore,
|
|
crypto: crypto,
|
|
basePath: basePath,
|
|
nodeID: nodeID,
|
|
schedules: make(map[string]*cron.Cron),
|
|
backups: make(map[string]*BackupInfo),
|
|
runningBackups: make(map[string]*BackupJob),
|
|
options: options,
|
|
notifications: make(chan *BackupEvent, 100),
|
|
stopCh: make(chan struct{}),
|
|
}
|
|
|
|
// Load existing backup metadata
|
|
if err := bm.loadBackupMetadata(); err != nil {
|
|
return nil, fmt.Errorf("failed to load backup metadata: %w", err)
|
|
}
|
|
|
|
// Start background processes
|
|
go bm.notificationProcessor()
|
|
go bm.cleanupProcessor()
|
|
|
|
return bm, nil
|
|
}
|
|
|
|
// CreateBackup creates a backup of stored data
|
|
func (bm *BackupManagerImpl) CreateBackup(
|
|
ctx context.Context,
|
|
config *BackupConfig,
|
|
) (*BackupInfo, error) {
|
|
// Check concurrent backup limit
|
|
bm.mu.RLock()
|
|
runningCount := len(bm.runningBackups)
|
|
bm.mu.RUnlock()
|
|
|
|
if runningCount >= bm.options.MaxConcurrentBackups {
|
|
return nil, fmt.Errorf("maximum concurrent backups (%d) exceeded", bm.options.MaxConcurrentBackups)
|
|
}
|
|
|
|
// Generate backup ID
|
|
backupID := bm.generateBackupID(config.Name)
|
|
|
|
// Create backup info
|
|
backupInfo := &BackupInfo{
|
|
ID: backupID,
|
|
BackupID: backupID,
|
|
Name: config.Name,
|
|
Destination: config.Destination,
|
|
IncludesIndexes: config.IncludeIndexes,
|
|
IncludesCache: config.IncludeCache,
|
|
Encrypted: config.Encryption,
|
|
Incremental: config.Incremental,
|
|
ParentBackupID: config.ParentBackupID,
|
|
Status: BackupInProgress,
|
|
CreatedAt: time.Now(),
|
|
RetentionUntil: time.Now().Add(config.Retention),
|
|
}
|
|
|
|
// Create backup job
|
|
jobCtx, cancel := context.WithTimeout(ctx, bm.options.BackupTimeout)
|
|
job := &BackupJob{
|
|
ID: backupID,
|
|
Config: config,
|
|
StartTime: time.Now(),
|
|
Status: BackupInProgress,
|
|
cancel: cancel,
|
|
}
|
|
|
|
// Store backup info and job
|
|
bm.mu.Lock()
|
|
bm.backups[backupID] = backupInfo
|
|
bm.runningBackups[backupID] = job
|
|
bm.mu.Unlock()
|
|
|
|
// Notify backup started
|
|
bm.notify(&BackupEvent{
|
|
Type: BackupStarted,
|
|
BackupID: backupID,
|
|
Message: fmt.Sprintf("Backup '%s' started", config.Name),
|
|
Timestamp: time.Now(),
|
|
})
|
|
|
|
// Start backup process in goroutine
|
|
go bm.performBackup(jobCtx, job, backupInfo)
|
|
|
|
return backupInfo, nil
|
|
}
|
|
|
|
// RestoreBackup restores data from backup
|
|
func (bm *BackupManagerImpl) RestoreBackup(
|
|
ctx context.Context,
|
|
backupID string,
|
|
config *RestoreConfig,
|
|
) error {
|
|
// Get backup info
|
|
bm.mu.RLock()
|
|
backupInfo, exists := bm.backups[backupID]
|
|
bm.mu.RUnlock()
|
|
|
|
if !exists {
|
|
return fmt.Errorf("backup %s not found", backupID)
|
|
}
|
|
|
|
if backupInfo.Status != BackupCompleted {
|
|
return fmt.Errorf("backup %s is not completed (status: %s)", backupID, backupInfo.Status)
|
|
}
|
|
|
|
// Validate backup if requested
|
|
if config.ValidateIntegrity {
|
|
validation, err := bm.ValidateBackup(ctx, backupID)
|
|
if err != nil {
|
|
return fmt.Errorf("backup validation failed: %w", err)
|
|
}
|
|
if !validation.Valid {
|
|
return fmt.Errorf("backup integrity validation failed")
|
|
}
|
|
}
|
|
|
|
// Perform restore
|
|
return bm.performRestore(ctx, backupInfo, config)
|
|
}
|
|
|
|
// ListBackups lists available backups
|
|
func (bm *BackupManagerImpl) ListBackups(ctx context.Context) ([]*BackupInfo, error) {
|
|
bm.mu.RLock()
|
|
defer bm.mu.RUnlock()
|
|
|
|
backups := make([]*BackupInfo, 0, len(bm.backups))
|
|
for _, backup := range bm.backups {
|
|
backups = append(backups, backup)
|
|
}
|
|
|
|
// Sort by creation time (newest first)
|
|
sort.Slice(backups, func(i, j int) bool {
|
|
return backups[i].CreatedAt.After(backups[j].CreatedAt)
|
|
})
|
|
|
|
return backups, nil
|
|
}
|
|
|
|
// DeleteBackup removes a backup
|
|
func (bm *BackupManagerImpl) DeleteBackup(ctx context.Context, backupID string) error {
|
|
bm.mu.Lock()
|
|
defer bm.mu.Unlock()
|
|
|
|
backupInfo, exists := bm.backups[backupID]
|
|
if !exists {
|
|
return fmt.Errorf("backup %s not found", backupID)
|
|
}
|
|
|
|
// Check if backup is currently running
|
|
if _, running := bm.runningBackups[backupID]; running {
|
|
return fmt.Errorf("cannot delete running backup %s", backupID)
|
|
}
|
|
|
|
// Delete backup files
|
|
backupDir := filepath.Join(bm.basePath, backupID)
|
|
if err := os.RemoveAll(backupDir); err != nil {
|
|
return fmt.Errorf("failed to delete backup files: %w", err)
|
|
}
|
|
|
|
// Remove from memory
|
|
delete(bm.backups, backupID)
|
|
|
|
// Notify deletion
|
|
bm.notify(&BackupEvent{
|
|
Type: BackupDeleted,
|
|
BackupID: backupID,
|
|
Message: fmt.Sprintf("Backup '%s' deleted", backupInfo.Name),
|
|
Timestamp: time.Now(),
|
|
})
|
|
|
|
return nil
|
|
}
|
|
|
|
// ValidateBackup validates backup integrity
|
|
func (bm *BackupManagerImpl) ValidateBackup(
|
|
ctx context.Context,
|
|
backupID string,
|
|
) (*BackupValidation, error) {
|
|
start := time.Now()
|
|
validation := &BackupValidation{
|
|
BackupID: backupID,
|
|
ValidatedAt: time.Now(),
|
|
CorruptedFiles: make([]string, 0),
|
|
MissingFiles: make([]string, 0),
|
|
}
|
|
|
|
// Get backup info
|
|
bm.mu.RLock()
|
|
backupInfo, exists := bm.backups[backupID]
|
|
bm.mu.RUnlock()
|
|
|
|
if !exists {
|
|
return nil, fmt.Errorf("backup %s not found", backupID)
|
|
}
|
|
|
|
// Validate checksum
|
|
backupDir := filepath.Join(bm.basePath, backupID)
|
|
calculatedChecksum, err := bm.calculateDirectoryChecksum(backupDir)
|
|
if err != nil {
|
|
validation.ErrorCount++
|
|
return validation, fmt.Errorf("failed to calculate checksum: %w", err)
|
|
}
|
|
|
|
validation.ChecksumMatch = (calculatedChecksum == backupInfo.Checksum)
|
|
if !validation.ChecksumMatch {
|
|
validation.ErrorCount++
|
|
}
|
|
|
|
// Validate individual files
|
|
err = filepath.Walk(backupDir, func(path string, info os.FileInfo, err error) error {
|
|
if err != nil {
|
|
validation.MissingFiles = append(validation.MissingFiles, path)
|
|
validation.ErrorCount++
|
|
return nil // Continue walking
|
|
}
|
|
|
|
if !info.IsDir() {
|
|
// Validate file integrity
|
|
if err := bm.validateFile(path); err != nil {
|
|
validation.CorruptedFiles = append(validation.CorruptedFiles, path)
|
|
validation.ErrorCount++
|
|
}
|
|
}
|
|
|
|
return nil
|
|
})
|
|
|
|
if err != nil {
|
|
return validation, fmt.Errorf("validation walk failed: %w", err)
|
|
}
|
|
|
|
validation.Valid = (validation.ErrorCount == 0)
|
|
validation.ValidationTime = time.Since(start)
|
|
|
|
// Notify validation completed
|
|
bm.notify(&BackupEvent{
|
|
Type: BackupValidated,
|
|
BackupID: backupID,
|
|
Message: fmt.Sprintf("Backup validation completed (valid: %v)", validation.Valid),
|
|
Timestamp: time.Now(),
|
|
Metadata: map[string]interface{}{
|
|
"valid": validation.Valid,
|
|
"error_count": validation.ErrorCount,
|
|
"warning_count": validation.WarningCount,
|
|
"checksum_match": validation.ChecksumMatch,
|
|
},
|
|
})
|
|
|
|
return validation, nil
|
|
}
|
|
|
|
// ScheduleBackup schedules automatic backups
|
|
func (bm *BackupManagerImpl) ScheduleBackup(
|
|
ctx context.Context,
|
|
schedule *BackupSchedule,
|
|
) error {
|
|
bm.mu.Lock()
|
|
defer bm.mu.Unlock()
|
|
|
|
// Create cron scheduler
|
|
c := cron.New(cron.WithSeconds())
|
|
|
|
// Add backup job
|
|
_, err := c.AddFunc(schedule.Cron, func() {
|
|
bm.executeScheduledBackup(schedule)
|
|
})
|
|
if err != nil {
|
|
return fmt.Errorf("failed to schedule backup: %w", err)
|
|
}
|
|
|
|
// Calculate next run time
|
|
if len(c.Entries()) > 0 {
|
|
nextRun := c.Entries()[0].Next
|
|
schedule.NextRun = &nextRun
|
|
}
|
|
|
|
// Start scheduler
|
|
c.Start()
|
|
|
|
// Store schedule
|
|
bm.schedules[schedule.ID] = c
|
|
|
|
// Notify scheduling
|
|
bm.notify(&BackupEvent{
|
|
Type: BackupScheduled,
|
|
BackupID: schedule.ID,
|
|
Message: fmt.Sprintf("Backup schedule '%s' created", schedule.Name),
|
|
Timestamp: time.Now(),
|
|
Metadata: map[string]interface{}{
|
|
"cron": schedule.Cron,
|
|
"enabled": schedule.Enabled,
|
|
"next_run": schedule.NextRun,
|
|
},
|
|
})
|
|
|
|
return nil
|
|
}
|
|
|
|
// GetBackupStats returns backup statistics
|
|
func (bm *BackupManagerImpl) GetBackupStats(ctx context.Context) (*BackupStatistics, error) {
|
|
bm.mu.RLock()
|
|
defer bm.mu.RUnlock()
|
|
|
|
stats := &BackupStatistics{
|
|
TotalBackups: int64(len(bm.backups)),
|
|
SuccessfulBackups: 0,
|
|
FailedBackups: 0,
|
|
TotalBackupSize: 0,
|
|
EncryptionEnabled: bm.options.EncryptionEnabled,
|
|
}
|
|
|
|
var totalTime time.Duration
|
|
var oldestTime, newestTime time.Time
|
|
first := true
|
|
|
|
for _, backup := range bm.backups {
|
|
switch backup.Status {
|
|
case BackupCompleted:
|
|
stats.SuccessfulBackups++
|
|
if backup.CompletedAt != nil {
|
|
backupTime := backup.CompletedAt.Sub(backup.CreatedAt)
|
|
totalTime += backupTime
|
|
}
|
|
case BackupFailed:
|
|
stats.FailedBackups++
|
|
}
|
|
|
|
stats.TotalBackupSize += backup.DataSize
|
|
|
|
if first {
|
|
oldestTime = backup.CreatedAt
|
|
newestTime = backup.CreatedAt
|
|
first = false
|
|
} else {
|
|
if backup.CreatedAt.Before(oldestTime) {
|
|
oldestTime = backup.CreatedAt
|
|
}
|
|
if backup.CreatedAt.After(newestTime) {
|
|
newestTime = backup.CreatedAt
|
|
}
|
|
}
|
|
}
|
|
|
|
if stats.SuccessfulBackups > 0 {
|
|
stats.AverageBackupTime = totalTime / time.Duration(stats.SuccessfulBackups)
|
|
}
|
|
|
|
if !first {
|
|
stats.LastBackupTime = newestTime
|
|
stats.OldestBackup = oldestTime
|
|
}
|
|
|
|
// Calculate compression ratio
|
|
var totalOriginal, totalCompressed int64
|
|
for _, backup := range bm.backups {
|
|
totalOriginal += backup.DataSize
|
|
totalCompressed += backup.CompressedSize
|
|
}
|
|
if totalOriginal > 0 {
|
|
stats.CompressionRatio = float64(totalCompressed) / float64(totalOriginal)
|
|
}
|
|
|
|
return stats, nil
|
|
}
|
|
|
|
// Implementation of backup operations
|
|
|
|
func (bm *BackupManagerImpl) performBackup(
|
|
ctx context.Context,
|
|
job *BackupJob,
|
|
backupInfo *BackupInfo,
|
|
) {
|
|
defer func() {
|
|
job.cancel()
|
|
bm.mu.Lock()
|
|
delete(bm.runningBackups, job.ID)
|
|
bm.mu.Unlock()
|
|
}()
|
|
|
|
// Create backup directory
|
|
backupDir := filepath.Join(bm.basePath, job.ID)
|
|
if err := os.MkdirAll(backupDir, 0755); err != nil {
|
|
bm.failBackup(job, backupInfo, fmt.Errorf("failed to create backup directory: %w", err))
|
|
return
|
|
}
|
|
|
|
// Estimate total size for progress tracking
|
|
totalSize, err := bm.estimateBackupSize(job.Config)
|
|
if err != nil {
|
|
bm.failBackup(job, backupInfo, fmt.Errorf("failed to estimate backup size: %w", err))
|
|
return
|
|
}
|
|
job.TotalSize = totalSize
|
|
|
|
// Backup context data
|
|
if err := bm.backupContexts(ctx, job, backupDir); err != nil {
|
|
bm.failBackup(job, backupInfo, fmt.Errorf("failed to backup contexts: %w", err))
|
|
return
|
|
}
|
|
|
|
// Backup indexes if requested
|
|
if job.Config.IncludeIndexes {
|
|
if err := bm.backupIndexes(ctx, job, backupDir); err != nil {
|
|
bm.failBackup(job, backupInfo, fmt.Errorf("failed to backup indexes: %w", err))
|
|
return
|
|
}
|
|
}
|
|
|
|
// Backup cache if requested
|
|
if job.Config.IncludeCache {
|
|
if err := bm.backupCache(ctx, job, backupDir); err != nil {
|
|
bm.failBackup(job, backupInfo, fmt.Errorf("failed to backup cache: %w", err))
|
|
return
|
|
}
|
|
}
|
|
|
|
// Calculate final size and checksum
|
|
finalSize, err := bm.calculateDirectorySize(backupDir)
|
|
if err != nil {
|
|
bm.failBackup(job, backupInfo, fmt.Errorf("failed to calculate backup size: %w", err))
|
|
return
|
|
}
|
|
|
|
checksum, err := bm.calculateDirectoryChecksum(backupDir)
|
|
if err != nil {
|
|
bm.failBackup(job, backupInfo, fmt.Errorf("failed to calculate checksum: %w", err))
|
|
return
|
|
}
|
|
|
|
// Update backup info
|
|
completedAt := time.Now()
|
|
bm.mu.Lock()
|
|
backupInfo.Status = BackupCompleted
|
|
backupInfo.DataSize = finalSize
|
|
backupInfo.CompressedSize = finalSize // Would be different if compression is applied
|
|
backupInfo.Checksum = checksum
|
|
backupInfo.CompletedAt = &completedAt
|
|
backupInfo.Progress = 1.0
|
|
bm.mu.Unlock()
|
|
|
|
// Save backup metadata
|
|
if err := bm.saveBackupMetadata(); err != nil {
|
|
// Log error but don't fail backup
|
|
fmt.Printf("Failed to save backup metadata: %v\n", err)
|
|
}
|
|
|
|
// Notify completion
|
|
bm.notify(&BackupEvent{
|
|
Type: BackupCompleted,
|
|
BackupID: job.ID,
|
|
Message: fmt.Sprintf("Backup '%s' completed successfully", job.Config.Name),
|
|
Timestamp: time.Now(),
|
|
Metadata: map[string]interface{}{
|
|
"size": finalSize,
|
|
"checksum": checksum,
|
|
"duration": time.Since(job.StartTime),
|
|
},
|
|
})
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) performRestore(
|
|
ctx context.Context,
|
|
backupInfo *BackupInfo,
|
|
config *RestoreConfig,
|
|
) error {
|
|
backupDir := filepath.Join(bm.basePath, backupInfo.BackupID)
|
|
|
|
// Restore context data
|
|
contextsFile := filepath.Join(backupDir, "contexts.json")
|
|
if err := bm.restoreContexts(ctx, contextsFile, config); err != nil {
|
|
return fmt.Errorf("failed to restore contexts: %w", err)
|
|
}
|
|
|
|
// Restore indexes if present and requested
|
|
if config.RestoreIndexes {
|
|
indexesDir := filepath.Join(backupDir, "indexes")
|
|
if _, err := os.Stat(indexesDir); err == nil {
|
|
if err := bm.restoreIndexes(ctx, indexesDir); err != nil {
|
|
return fmt.Errorf("failed to restore indexes: %w", err)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Restore cache if present and requested
|
|
if config.RestoreCache {
|
|
cacheFile := filepath.Join(backupDir, "cache.json")
|
|
if _, err := os.Stat(cacheFile); err == nil {
|
|
if err := bm.restoreCache(ctx, cacheFile); err != nil {
|
|
return fmt.Errorf("failed to restore cache: %w", err)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Notify restore completion
|
|
bm.notify(&BackupEvent{
|
|
Type: BackupRestored,
|
|
BackupID: backupInfo.BackupID,
|
|
Message: fmt.Sprintf("Backup '%s' restored successfully", backupInfo.Name),
|
|
Timestamp: time.Now(),
|
|
})
|
|
|
|
return nil
|
|
}
|
|
|
|
// Helper methods (simplified implementations)
|
|
|
|
func (bm *BackupManagerImpl) generateBackupID(name string) string {
|
|
return fmt.Sprintf("%s_%s_%d", bm.nodeID, name, time.Now().Unix())
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) estimateBackupSize(config *BackupConfig) (int64, error) {
|
|
// Estimate total backup size
|
|
// This would analyze storage to determine approximate size
|
|
return 1024 * 1024 * 100, nil // Placeholder: 100MB
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) backupContexts(ctx context.Context, job *BackupJob, backupDir string) error {
|
|
// Export all contexts to JSON format
|
|
// This is a simplified implementation
|
|
return nil
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) backupIndexes(ctx context.Context, job *BackupJob, backupDir string) error {
|
|
// Backup search indexes
|
|
// This would copy index files
|
|
return nil
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) backupCache(ctx context.Context, job *BackupJob, backupDir string) error {
|
|
// Backup cache data
|
|
// This would export cache entries
|
|
return nil
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) restoreContexts(ctx context.Context, contextsFile string, config *RestoreConfig) error {
|
|
// Restore contexts from backup file
|
|
return nil
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) restoreIndexes(ctx context.Context, indexesDir string) error {
|
|
// Restore search indexes
|
|
return nil
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) restoreCache(ctx context.Context, cacheFile string) error {
|
|
// Restore cache data
|
|
return nil
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) calculateDirectorySize(dir string) (int64, error) {
|
|
var size int64
|
|
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if !info.IsDir() {
|
|
size += info.Size()
|
|
}
|
|
return nil
|
|
})
|
|
return size, err
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) calculateDirectoryChecksum(dir string) (string, error) {
|
|
hash := sha256.New()
|
|
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if !info.IsDir() {
|
|
file, err := os.Open(path)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer file.Close()
|
|
_, err = io.Copy(hash, file)
|
|
return err
|
|
}
|
|
return nil
|
|
})
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
return fmt.Sprintf("%x", hash.Sum(nil)), nil
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) validateFile(filePath string) error {
|
|
// Validate individual file integrity
|
|
// This could check file headers, format, etc.
|
|
return nil
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) failBackup(job *BackupJob, backupInfo *BackupInfo, err error) {
|
|
bm.mu.Lock()
|
|
backupInfo.Status = BackupFailed
|
|
backupInfo.ErrorMessage = err.Error()
|
|
job.Error = err
|
|
bm.mu.Unlock()
|
|
|
|
bm.notify(&BackupEvent{
|
|
Type: BackupFailed,
|
|
BackupID: job.ID,
|
|
Message: fmt.Sprintf("Backup '%s' failed: %v", job.Config.Name, err),
|
|
Timestamp: time.Now(),
|
|
Metadata: map[string]interface{}{
|
|
"error": err.Error(),
|
|
},
|
|
})
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) executeScheduledBackup(schedule *BackupSchedule) {
|
|
ctx, cancel := context.WithTimeout(context.Background(), bm.options.BackupTimeout)
|
|
defer cancel()
|
|
|
|
// Update schedule metadata
|
|
now := time.Now()
|
|
schedule.LastRun = &now
|
|
|
|
// Create backup
|
|
_, err := bm.CreateBackup(ctx, schedule.BackupConfig)
|
|
if err != nil {
|
|
schedule.ConsecutiveFailures++
|
|
// Disable schedule if too many failures
|
|
if schedule.ConsecutiveFailures >= schedule.MaxFailures {
|
|
schedule.Enabled = false
|
|
}
|
|
} else {
|
|
schedule.ConsecutiveFailures = 0
|
|
}
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) loadBackupMetadata() error {
|
|
metadataFile := filepath.Join(bm.basePath, "backups.json")
|
|
data, err := os.ReadFile(metadataFile)
|
|
if os.IsNotExist(err) {
|
|
return nil // No existing metadata
|
|
}
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
var backups map[string]*BackupInfo
|
|
if err := json.Unmarshal(data, &backups); err != nil {
|
|
return err
|
|
}
|
|
|
|
bm.backups = backups
|
|
return nil
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) saveBackupMetadata() error {
|
|
metadataFile := filepath.Join(bm.basePath, "backups.json")
|
|
data, err := json.MarshalIndent(bm.backups, "", " ")
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return os.WriteFile(metadataFile, data, 0644)
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) notify(event *BackupEvent) {
|
|
if bm.options.NotificationsEnabled {
|
|
select {
|
|
case bm.notifications <- event:
|
|
default:
|
|
// Channel full, drop notification
|
|
}
|
|
}
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) notificationProcessor() {
|
|
for {
|
|
select {
|
|
case event := <-bm.notifications:
|
|
// Process backup event (logging, external notifications, etc.)
|
|
fmt.Printf("Backup event: %s - %s\n", event.Type, event.Message)
|
|
case <-bm.stopCh:
|
|
return
|
|
}
|
|
}
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) cleanupProcessor() {
|
|
ticker := time.NewTicker(bm.options.CleanupInterval)
|
|
defer ticker.Stop()
|
|
|
|
for {
|
|
select {
|
|
case <-ticker.C:
|
|
bm.performCleanup()
|
|
case <-bm.stopCh:
|
|
return
|
|
}
|
|
}
|
|
}
|
|
|
|
func (bm *BackupManagerImpl) performCleanup() {
|
|
bm.mu.Lock()
|
|
defer bm.mu.Unlock()
|
|
|
|
now := time.Now()
|
|
for backupID, backup := range bm.backups {
|
|
if now.After(backup.RetentionUntil) {
|
|
// Delete expired backup
|
|
backupDir := filepath.Join(bm.basePath, backupID)
|
|
if err := os.RemoveAll(backupDir); err != nil {
|
|
fmt.Printf("Failed to cleanup expired backup %s: %v\n", backupID, err)
|
|
continue
|
|
}
|
|
delete(bm.backups, backupID)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Close shuts down the backup manager
|
|
func (bm *BackupManagerImpl) Close() error {
|
|
close(bm.stopCh)
|
|
|
|
// Stop all scheduled backups
|
|
bm.mu.Lock()
|
|
for _, scheduler := range bm.schedules {
|
|
scheduler.Stop()
|
|
}
|
|
bm.mu.Unlock()
|
|
|
|
// Cancel running backups
|
|
for _, job := range bm.runningBackups {
|
|
if job.cancel != nil {
|
|
job.cancel()
|
|
}
|
|
}
|
|
|
|
// Save final metadata
|
|
return bm.saveBackupMetadata()
|
|
}
|