Disambiguate backup status constants for SLURP storage

This commit is contained in:
anthonyrawlins
2025-09-27 15:47:18 +10:00
parent a99469f346
commit acc4361463
2 changed files with 274 additions and 271 deletions

View File

@@ -12,35 +12,35 @@ import (
"sync" "sync"
"time" "time"
"github.com/robfig/cron/v3"
"chorus/pkg/crypto" "chorus/pkg/crypto"
"github.com/robfig/cron/v3"
) )
// BackupManagerImpl implements the BackupManager interface // BackupManagerImpl implements the BackupManager interface
type BackupManagerImpl struct { type BackupManagerImpl struct {
mu sync.RWMutex mu sync.RWMutex
contextStore *ContextStoreImpl contextStore *ContextStoreImpl
crypto crypto.RoleCrypto crypto crypto.RoleCrypto
basePath string basePath string
nodeID string nodeID string
schedules map[string]*cron.Cron schedules map[string]*cron.Cron
backups map[string]*BackupInfo backups map[string]*BackupInfo
runningBackups map[string]*BackupJob runningBackups map[string]*BackupJob
options *BackupManagerOptions options *BackupManagerOptions
notifications chan *BackupEvent notifications chan *BackupEvent
stopCh chan struct{} stopCh chan struct{}
} }
// BackupManagerOptions configures backup manager behavior // BackupManagerOptions configures backup manager behavior
type BackupManagerOptions struct { type BackupManagerOptions struct {
MaxConcurrentBackups int `json:"max_concurrent_backups"` MaxConcurrentBackups int `json:"max_concurrent_backups"`
CompressionEnabled bool `json:"compression_enabled"` CompressionEnabled bool `json:"compression_enabled"`
EncryptionEnabled bool `json:"encryption_enabled"` EncryptionEnabled bool `json:"encryption_enabled"`
RetentionDays int `json:"retention_days"` RetentionDays int `json:"retention_days"`
ValidationEnabled bool `json:"validation_enabled"` ValidationEnabled bool `json:"validation_enabled"`
NotificationsEnabled bool `json:"notifications_enabled"` NotificationsEnabled bool `json:"notifications_enabled"`
BackupTimeout time.Duration `json:"backup_timeout"` BackupTimeout time.Duration `json:"backup_timeout"`
CleanupInterval time.Duration `json:"cleanup_interval"` CleanupInterval time.Duration `json:"cleanup_interval"`
} }
// BackupJob represents a running backup operation // BackupJob represents a running backup operation
@@ -69,14 +69,14 @@ type BackupEvent struct {
type BackupEventType string type BackupEventType string
const ( const (
BackupStarted BackupEventType = "backup_started" BackupEventStarted BackupEventType = "backup_started"
BackupProgress BackupEventType = "backup_progress" BackupEventProgress BackupEventType = "backup_progress"
BackupCompleted BackupEventType = "backup_completed" BackupEventCompleted BackupEventType = "backup_completed"
BackupFailed BackupEventType = "backup_failed" BackupEventFailed BackupEventType = "backup_failed"
BackupValidated BackupEventType = "backup_validated" BackupEventValidated BackupEventType = "backup_validated"
BackupRestored BackupEventType = "backup_restored" BackupEventRestored BackupEventType = "backup_restored"
BackupDeleted BackupEventType = "backup_deleted" BackupEventDeleted BackupEventType = "backup_deleted"
BackupScheduled BackupEventType = "backup_scheduled" BackupEventScheduled BackupEventType = "backup_scheduled"
) )
// DefaultBackupManagerOptions returns sensible defaults // DefaultBackupManagerOptions returns sensible defaults
@@ -112,15 +112,15 @@ func NewBackupManager(
bm := &BackupManagerImpl{ bm := &BackupManagerImpl{
contextStore: contextStore, contextStore: contextStore,
crypto: crypto, crypto: crypto,
basePath: basePath, basePath: basePath,
nodeID: nodeID, nodeID: nodeID,
schedules: make(map[string]*cron.Cron), schedules: make(map[string]*cron.Cron),
backups: make(map[string]*BackupInfo), backups: make(map[string]*BackupInfo),
runningBackups: make(map[string]*BackupJob), runningBackups: make(map[string]*BackupJob),
options: options, options: options,
notifications: make(chan *BackupEvent, 100), notifications: make(chan *BackupEvent, 100),
stopCh: make(chan struct{}), stopCh: make(chan struct{}),
} }
// Load existing backup metadata // Load existing backup metadata
@@ -154,16 +154,16 @@ func (bm *BackupManagerImpl) CreateBackup(
// Create backup info // Create backup info
backupInfo := &BackupInfo{ backupInfo := &BackupInfo{
ID: backupID, ID: backupID,
BackupID: backupID, BackupID: backupID,
Name: config.Name, Name: config.Name,
Destination: config.Destination, Destination: config.Destination,
IncludesIndexes: config.IncludeIndexes, IncludesIndexes: config.IncludeIndexes,
IncludesCache: config.IncludeCache, IncludesCache: config.IncludeCache,
Encrypted: config.Encryption, Encrypted: config.Encryption,
Incremental: config.Incremental, Incremental: config.Incremental,
ParentBackupID: config.ParentBackupID, ParentBackupID: config.ParentBackupID,
Status: BackupInProgress, Status: BackupStatusInProgress,
CreatedAt: time.Now(), CreatedAt: time.Now(),
RetentionUntil: time.Now().Add(config.Retention), RetentionUntil: time.Now().Add(config.Retention),
} }
@@ -174,7 +174,7 @@ func (bm *BackupManagerImpl) CreateBackup(
ID: backupID, ID: backupID,
Config: config, Config: config,
StartTime: time.Now(), StartTime: time.Now(),
Status: BackupInProgress, Status: BackupStatusInProgress,
cancel: cancel, cancel: cancel,
} }
@@ -186,7 +186,7 @@ func (bm *BackupManagerImpl) CreateBackup(
// Notify backup started // Notify backup started
bm.notify(&BackupEvent{ bm.notify(&BackupEvent{
Type: BackupStarted, Type: BackupEventStarted,
BackupID: backupID, BackupID: backupID,
Message: fmt.Sprintf("Backup '%s' started", config.Name), Message: fmt.Sprintf("Backup '%s' started", config.Name),
Timestamp: time.Now(), Timestamp: time.Now(),
@@ -213,7 +213,7 @@ func (bm *BackupManagerImpl) RestoreBackup(
return fmt.Errorf("backup %s not found", backupID) return fmt.Errorf("backup %s not found", backupID)
} }
if backupInfo.Status != BackupCompleted { if backupInfo.Status != BackupStatusCompleted {
return fmt.Errorf("backup %s is not completed (status: %s)", backupID, backupInfo.Status) return fmt.Errorf("backup %s is not completed (status: %s)", backupID, backupInfo.Status)
} }
@@ -276,7 +276,7 @@ func (bm *BackupManagerImpl) DeleteBackup(ctx context.Context, backupID string)
// Notify deletion // Notify deletion
bm.notify(&BackupEvent{ bm.notify(&BackupEvent{
Type: BackupDeleted, Type: BackupEventDeleted,
BackupID: backupID, BackupID: backupID,
Message: fmt.Sprintf("Backup '%s' deleted", backupInfo.Name), Message: fmt.Sprintf("Backup '%s' deleted", backupInfo.Name),
Timestamp: time.Now(), Timestamp: time.Now(),
@@ -348,7 +348,7 @@ func (bm *BackupManagerImpl) ValidateBackup(
// Notify validation completed // Notify validation completed
bm.notify(&BackupEvent{ bm.notify(&BackupEvent{
Type: BackupValidated, Type: BackupEventValidated,
BackupID: backupID, BackupID: backupID,
Message: fmt.Sprintf("Backup validation completed (valid: %v)", validation.Valid), Message: fmt.Sprintf("Backup validation completed (valid: %v)", validation.Valid),
Timestamp: time.Now(), Timestamp: time.Now(),
@@ -396,7 +396,7 @@ func (bm *BackupManagerImpl) ScheduleBackup(
// Notify scheduling // Notify scheduling
bm.notify(&BackupEvent{ bm.notify(&BackupEvent{
Type: BackupScheduled, Type: BackupEventScheduled,
BackupID: schedule.ID, BackupID: schedule.ID,
Message: fmt.Sprintf("Backup schedule '%s' created", schedule.Name), Message: fmt.Sprintf("Backup schedule '%s' created", schedule.Name),
Timestamp: time.Now(), Timestamp: time.Now(),
@@ -429,13 +429,13 @@ func (bm *BackupManagerImpl) GetBackupStats(ctx context.Context) (*BackupStatist
for _, backup := range bm.backups { for _, backup := range bm.backups {
switch backup.Status { switch backup.Status {
case BackupCompleted: case BackupStatusCompleted:
stats.SuccessfulBackups++ stats.SuccessfulBackups++
if backup.CompletedAt != nil { if backup.CompletedAt != nil {
backupTime := backup.CompletedAt.Sub(backup.CreatedAt) backupTime := backup.CompletedAt.Sub(backup.CreatedAt)
totalTime += backupTime totalTime += backupTime
} }
case BackupFailed: case BackupStatusFailed:
stats.FailedBackups++ stats.FailedBackups++
} }
@@ -544,7 +544,7 @@ func (bm *BackupManagerImpl) performBackup(
// Update backup info // Update backup info
completedAt := time.Now() completedAt := time.Now()
bm.mu.Lock() bm.mu.Lock()
backupInfo.Status = BackupCompleted backupInfo.Status = BackupStatusCompleted
backupInfo.DataSize = finalSize backupInfo.DataSize = finalSize
backupInfo.CompressedSize = finalSize // Would be different if compression is applied backupInfo.CompressedSize = finalSize // Would be different if compression is applied
backupInfo.Checksum = checksum backupInfo.Checksum = checksum
@@ -560,7 +560,7 @@ func (bm *BackupManagerImpl) performBackup(
// Notify completion // Notify completion
bm.notify(&BackupEvent{ bm.notify(&BackupEvent{
Type: BackupCompleted, Type: BackupEventCompleted,
BackupID: job.ID, BackupID: job.ID,
Message: fmt.Sprintf("Backup '%s' completed successfully", job.Config.Name), Message: fmt.Sprintf("Backup '%s' completed successfully", job.Config.Name),
Timestamp: time.Now(), Timestamp: time.Now(),
@@ -607,7 +607,7 @@ func (bm *BackupManagerImpl) performRestore(
// Notify restore completion // Notify restore completion
bm.notify(&BackupEvent{ bm.notify(&BackupEvent{
Type: BackupRestored, Type: BackupEventRestored,
BackupID: backupInfo.BackupID, BackupID: backupInfo.BackupID,
Message: fmt.Sprintf("Backup '%s' restored successfully", backupInfo.Name), Message: fmt.Sprintf("Backup '%s' restored successfully", backupInfo.Name),
Timestamp: time.Now(), Timestamp: time.Now(),
@@ -706,13 +706,13 @@ func (bm *BackupManagerImpl) validateFile(filePath string) error {
func (bm *BackupManagerImpl) failBackup(job *BackupJob, backupInfo *BackupInfo, err error) { func (bm *BackupManagerImpl) failBackup(job *BackupJob, backupInfo *BackupInfo, err error) {
bm.mu.Lock() bm.mu.Lock()
backupInfo.Status = BackupFailed backupInfo.Status = BackupStatusFailed
backupInfo.ErrorMessage = err.Error() backupInfo.ErrorMessage = err.Error()
job.Error = err job.Error = err
bm.mu.Unlock() bm.mu.Unlock()
bm.notify(&BackupEvent{ bm.notify(&BackupEvent{
Type: BackupFailed, Type: BackupEventFailed,
BackupID: job.ID, BackupID: job.ID,
Message: fmt.Sprintf("Backup '%s' failed: %v", job.Config.Name, err), Message: fmt.Sprintf("Backup '%s' failed: %v", job.Config.Name, err),
Timestamp: time.Now(), Timestamp: time.Now(),

View File

@@ -3,83 +3,83 @@ package storage
import ( import (
"time" "time"
"chorus/pkg/ucxl"
"chorus/pkg/crypto" "chorus/pkg/crypto"
slurpContext "chorus/pkg/slurp/context" slurpContext "chorus/pkg/slurp/context"
"chorus/pkg/ucxl"
) )
// ListCriteria represents criteria for listing contexts // ListCriteria represents criteria for listing contexts
type ListCriteria struct { type ListCriteria struct {
// Filter criteria // Filter criteria
Tags []string `json:"tags"` // Required tags Tags []string `json:"tags"` // Required tags
Technologies []string `json:"technologies"` // Required technologies Technologies []string `json:"technologies"` // Required technologies
Roles []string `json:"roles"` // Accessible roles Roles []string `json:"roles"` // Accessible roles
PathPattern string `json:"path_pattern"` // Path pattern to match PathPattern string `json:"path_pattern"` // Path pattern to match
// Date filters // Date filters
CreatedAfter *time.Time `json:"created_after,omitempty"` // Created after date CreatedAfter *time.Time `json:"created_after,omitempty"` // Created after date
CreatedBefore *time.Time `json:"created_before,omitempty"` // Created before date CreatedBefore *time.Time `json:"created_before,omitempty"` // Created before date
UpdatedAfter *time.Time `json:"updated_after,omitempty"` // Updated after date UpdatedAfter *time.Time `json:"updated_after,omitempty"` // Updated after date
UpdatedBefore *time.Time `json:"updated_before,omitempty"` // Updated before date UpdatedBefore *time.Time `json:"updated_before,omitempty"` // Updated before date
// Quality filters // Quality filters
MinConfidence float64 `json:"min_confidence"` // Minimum confidence score MinConfidence float64 `json:"min_confidence"` // Minimum confidence score
MaxAge *time.Duration `json:"max_age,omitempty"` // Maximum age MaxAge *time.Duration `json:"max_age,omitempty"` // Maximum age
// Pagination // Pagination
Offset int `json:"offset"` // Result offset Offset int `json:"offset"` // Result offset
Limit int `json:"limit"` // Maximum results Limit int `json:"limit"` // Maximum results
// Sorting // Sorting
SortBy string `json:"sort_by"` // Sort field SortBy string `json:"sort_by"` // Sort field
SortOrder string `json:"sort_order"` // Sort order (asc, desc) SortOrder string `json:"sort_order"` // Sort order (asc, desc)
// Options // Options
IncludeStale bool `json:"include_stale"` // Include stale contexts IncludeStale bool `json:"include_stale"` // Include stale contexts
} }
// SearchQuery represents a search query for contexts // SearchQuery represents a search query for contexts
type SearchQuery struct { type SearchQuery struct {
// Query terms // Query terms
Query string `json:"query"` // Main search query Query string `json:"query"` // Main search query
Tags []string `json:"tags"` // Required tags Tags []string `json:"tags"` // Required tags
Technologies []string `json:"technologies"` // Required technologies Technologies []string `json:"technologies"` // Required technologies
FileTypes []string `json:"file_types"` // File types to include FileTypes []string `json:"file_types"` // File types to include
// Filters // Filters
MinConfidence float64 `json:"min_confidence"` // Minimum confidence MinConfidence float64 `json:"min_confidence"` // Minimum confidence
MaxAge *time.Duration `json:"max_age"` // Maximum age MaxAge *time.Duration `json:"max_age"` // Maximum age
Roles []string `json:"roles"` // Required access roles Roles []string `json:"roles"` // Required access roles
// Scope // Scope
Scope []string `json:"scope"` // Paths to search within Scope []string `json:"scope"` // Paths to search within
ExcludeScope []string `json:"exclude_scope"` // Paths to exclude ExcludeScope []string `json:"exclude_scope"` // Paths to exclude
// Result options // Result options
Limit int `json:"limit"` // Maximum results Limit int `json:"limit"` // Maximum results
Offset int `json:"offset"` // Result offset Offset int `json:"offset"` // Result offset
SortBy string `json:"sort_by"` // Sort field SortBy string `json:"sort_by"` // Sort field
SortOrder string `json:"sort_order"` // asc, desc SortOrder string `json:"sort_order"` // asc, desc
// Advanced options // Advanced options
FuzzyMatch bool `json:"fuzzy_match"` // Enable fuzzy matching FuzzyMatch bool `json:"fuzzy_match"` // Enable fuzzy matching
IncludeStale bool `json:"include_stale"` // Include stale contexts IncludeStale bool `json:"include_stale"` // Include stale contexts
HighlightTerms bool `json:"highlight_terms"` // Highlight search terms HighlightTerms bool `json:"highlight_terms"` // Highlight search terms
// Faceted search // Faceted search
Facets []string `json:"facets"` // Facets to include Facets []string `json:"facets"` // Facets to include
FacetFilters map[string][]string `json:"facet_filters"` // Facet filters FacetFilters map[string][]string `json:"facet_filters"` // Facet filters
} }
// SearchResults represents search query results // SearchResults represents search query results
type SearchResults struct { type SearchResults struct {
Query *SearchQuery `json:"query"` // Original query Query *SearchQuery `json:"query"` // Original query
Results []*SearchResult `json:"results"` // Search results Results []*SearchResult `json:"results"` // Search results
TotalResults int64 `json:"total_results"` // Total matching results TotalResults int64 `json:"total_results"` // Total matching results
ProcessingTime time.Duration `json:"processing_time"` // Query processing time ProcessingTime time.Duration `json:"processing_time"` // Query processing time
Facets map[string]map[string]int `json:"facets"` // Faceted results Facets map[string]map[string]int `json:"facets"` // Faceted results
Suggestions []string `json:"suggestions"` // Query suggestions Suggestions []string `json:"suggestions"` // Query suggestions
ProcessedAt time.Time `json:"processed_at"` // When query was processed ProcessedAt time.Time `json:"processed_at"` // When query was processed
} }
// SearchResult represents a single search result // SearchResult represents a single search result
@@ -94,76 +94,76 @@ type SearchResult struct {
// BatchStoreRequest represents a batch store operation // BatchStoreRequest represents a batch store operation
type BatchStoreRequest struct { type BatchStoreRequest struct {
Contexts []*ContextStoreItem `json:"contexts"` // Contexts to store Contexts []*ContextStoreItem `json:"contexts"` // Contexts to store
Roles []string `json:"roles"` // Default roles for all contexts Roles []string `json:"roles"` // Default roles for all contexts
Options *StoreOptions `json:"options"` // Store options Options *StoreOptions `json:"options"` // Store options
Transaction bool `json:"transaction"` // Use transaction Transaction bool `json:"transaction"` // Use transaction
FailOnError bool `json:"fail_on_error"` // Fail entire batch on error FailOnError bool `json:"fail_on_error"` // Fail entire batch on error
} }
// ContextStoreItem represents a single item in batch store // ContextStoreItem represents a single item in batch store
type ContextStoreItem struct { type ContextStoreItem struct {
Context *slurpContext.ContextNode `json:"context"` // Context to store Context *slurpContext.ContextNode `json:"context"` // Context to store
Roles []string `json:"roles"` // Specific roles (overrides default) Roles []string `json:"roles"` // Specific roles (overrides default)
Options *StoreOptions `json:"options"` // Item-specific options Options *StoreOptions `json:"options"` // Item-specific options
} }
// BatchStoreResult represents the result of batch store operation // BatchStoreResult represents the result of batch store operation
type BatchStoreResult struct { type BatchStoreResult struct {
SuccessCount int `json:"success_count"` // Number of successful stores SuccessCount int `json:"success_count"` // Number of successful stores
ErrorCount int `json:"error_count"` // Number of failed stores ErrorCount int `json:"error_count"` // Number of failed stores
Errors map[string]error `json:"errors"` // Errors by context path Errors map[string]error `json:"errors"` // Errors by context path
ProcessingTime time.Duration `json:"processing_time"` // Total processing time ProcessingTime time.Duration `json:"processing_time"` // Total processing time
ProcessedAt time.Time `json:"processed_at"` // When batch was processed ProcessedAt time.Time `json:"processed_at"` // When batch was processed
} }
// BatchRetrieveRequest represents a batch retrieve operation // BatchRetrieveRequest represents a batch retrieve operation
type BatchRetrieveRequest struct { type BatchRetrieveRequest struct {
Addresses []ucxl.Address `json:"addresses"` // Addresses to retrieve Addresses []ucxl.Address `json:"addresses"` // Addresses to retrieve
Role string `json:"role"` // Role for access control Role string `json:"role"` // Role for access control
Options *RetrieveOptions `json:"options"` // Retrieve options Options *RetrieveOptions `json:"options"` // Retrieve options
FailOnError bool `json:"fail_on_error"` // Fail entire batch on error FailOnError bool `json:"fail_on_error"` // Fail entire batch on error
} }
// BatchRetrieveResult represents the result of batch retrieve operation // BatchRetrieveResult represents the result of batch retrieve operation
type BatchRetrieveResult struct { type BatchRetrieveResult struct {
Contexts map[string]*slurpContext.ContextNode `json:"contexts"` // Retrieved contexts by address Contexts map[string]*slurpContext.ContextNode `json:"contexts"` // Retrieved contexts by address
SuccessCount int `json:"success_count"` // Number of successful retrieves SuccessCount int `json:"success_count"` // Number of successful retrieves
ErrorCount int `json:"error_count"` // Number of failed retrieves ErrorCount int `json:"error_count"` // Number of failed retrieves
Errors map[string]error `json:"errors"` // Errors by address Errors map[string]error `json:"errors"` // Errors by address
ProcessingTime time.Duration `json:"processing_time"` // Total processing time ProcessingTime time.Duration `json:"processing_time"` // Total processing time
ProcessedAt time.Time `json:"processed_at"` // When batch was processed ProcessedAt time.Time `json:"processed_at"` // When batch was processed
} }
// StoreOptions represents options for storing contexts // StoreOptions represents options for storing contexts
type StoreOptions struct { type StoreOptions struct {
Encrypt bool `json:"encrypt"` // Whether to encrypt data Encrypt bool `json:"encrypt"` // Whether to encrypt data
Replicate bool `json:"replicate"` // Whether to replicate across nodes Replicate bool `json:"replicate"` // Whether to replicate across nodes
Index bool `json:"index"` // Whether to add to search index Index bool `json:"index"` // Whether to add to search index
Cache bool `json:"cache"` // Whether to cache locally Cache bool `json:"cache"` // Whether to cache locally
Compress bool `json:"compress"` // Whether to compress data Compress bool `json:"compress"` // Whether to compress data
TTL *time.Duration `json:"ttl,omitempty"` // Time to live TTL *time.Duration `json:"ttl,omitempty"` // Time to live
AccessLevel crypto.AccessLevel `json:"access_level"` // Required access level AccessLevel crypto.AccessLevel `json:"access_level"` // Required access level
Metadata map[string]interface{} `json:"metadata"` // Additional metadata Metadata map[string]interface{} `json:"metadata"` // Additional metadata
} }
// RetrieveOptions represents options for retrieving contexts // RetrieveOptions represents options for retrieving contexts
type RetrieveOptions struct { type RetrieveOptions struct {
UseCache bool `json:"use_cache"` // Whether to use cache UseCache bool `json:"use_cache"` // Whether to use cache
RefreshCache bool `json:"refresh_cache"` // Whether to refresh cache RefreshCache bool `json:"refresh_cache"` // Whether to refresh cache
IncludeStale bool `json:"include_stale"` // Include stale contexts IncludeStale bool `json:"include_stale"` // Include stale contexts
MaxAge *time.Duration `json:"max_age,omitempty"` // Maximum acceptable age MaxAge *time.Duration `json:"max_age,omitempty"` // Maximum acceptable age
Decompress bool `json:"decompress"` // Whether to decompress data Decompress bool `json:"decompress"` // Whether to decompress data
ValidateIntegrity bool `json:"validate_integrity"` // Validate data integrity ValidateIntegrity bool `json:"validate_integrity"` // Validate data integrity
} }
// DistributedStoreOptions represents options for distributed storage // DistributedStoreOptions represents options for distributed storage
type DistributedStoreOptions struct { type DistributedStoreOptions struct {
ReplicationFactor int `json:"replication_factor"` // Number of replicas ReplicationFactor int `json:"replication_factor"` // Number of replicas
ConsistencyLevel ConsistencyLevel `json:"consistency_level"` // Consistency requirements ConsistencyLevel ConsistencyLevel `json:"consistency_level"` // Consistency requirements
Timeout time.Duration `json:"timeout"` // Operation timeout Timeout time.Duration `json:"timeout"` // Operation timeout
PreferLocal bool `json:"prefer_local"` // Prefer local storage PreferLocal bool `json:"prefer_local"` // Prefer local storage
SyncMode SyncMode `json:"sync_mode"` // Synchronization mode SyncMode SyncMode `json:"sync_mode"` // Synchronization mode
} }
// ConsistencyLevel represents consistency requirements // ConsistencyLevel represents consistency requirements
@@ -179,184 +179,187 @@ const (
type SyncMode string type SyncMode string
const ( const (
SyncAsync SyncMode = "async" // Asynchronous synchronization SyncAsync SyncMode = "async" // Asynchronous synchronization
SyncSync SyncMode = "sync" // Synchronous synchronization SyncSync SyncMode = "sync" // Synchronous synchronization
SyncLazy SyncMode = "lazy" // Lazy synchronization SyncLazy SyncMode = "lazy" // Lazy synchronization
) )
// StorageStatistics represents overall storage statistics // StorageStatistics represents overall storage statistics
type StorageStatistics struct { type StorageStatistics struct {
TotalContexts int64 `json:"total_contexts"` // Total stored contexts TotalContexts int64 `json:"total_contexts"` // Total stored contexts
LocalContexts int64 `json:"local_contexts"` // Locally stored contexts LocalContexts int64 `json:"local_contexts"` // Locally stored contexts
DistributedContexts int64 `json:"distributed_contexts"` // Distributed contexts DistributedContexts int64 `json:"distributed_contexts"` // Distributed contexts
TotalSize int64 `json:"total_size"` // Total storage size TotalSize int64 `json:"total_size"` // Total storage size
CompressedSize int64 `json:"compressed_size"` // Compressed storage size CompressedSize int64 `json:"compressed_size"` // Compressed storage size
IndexSize int64 `json:"index_size"` // Search index size IndexSize int64 `json:"index_size"` // Search index size
CacheSize int64 `json:"cache_size"` // Cache size CacheSize int64 `json:"cache_size"` // Cache size
ReplicationFactor float64 `json:"replication_factor"` // Average replication factor ReplicationFactor float64 `json:"replication_factor"` // Average replication factor
AvailableSpace int64 `json:"available_space"` // Available storage space AvailableSpace int64 `json:"available_space"` // Available storage space
LastSyncTime time.Time `json:"last_sync_time"` // Last synchronization LastSyncTime time.Time `json:"last_sync_time"` // Last synchronization
SyncErrors int64 `json:"sync_errors"` // Synchronization errors SyncErrors int64 `json:"sync_errors"` // Synchronization errors
OperationsPerSecond float64 `json:"operations_per_second"` // Operations per second OperationsPerSecond float64 `json:"operations_per_second"` // Operations per second
AverageLatency time.Duration `json:"average_latency"` // Average operation latency AverageLatency time.Duration `json:"average_latency"` // Average operation latency
} }
// LocalStorageStats represents local storage statistics // LocalStorageStats represents local storage statistics
type LocalStorageStats struct { type LocalStorageStats struct {
TotalFiles int64 `json:"total_files"` // Total stored files TotalFiles int64 `json:"total_files"` // Total stored files
TotalSize int64 `json:"total_size"` // Total storage size TotalSize int64 `json:"total_size"` // Total storage size
CompressedSize int64 `json:"compressed_size"` // Compressed size CompressedSize int64 `json:"compressed_size"` // Compressed size
AvailableSpace int64 `json:"available_space"` // Available disk space AvailableSpace int64 `json:"available_space"` // Available disk space
FragmentationRatio float64 `json:"fragmentation_ratio"` // Storage fragmentation FragmentationRatio float64 `json:"fragmentation_ratio"` // Storage fragmentation
LastCompaction time.Time `json:"last_compaction"` // Last compaction time LastCompaction time.Time `json:"last_compaction"` // Last compaction time
ReadOperations int64 `json:"read_operations"` // Read operations count ReadOperations int64 `json:"read_operations"` // Read operations count
WriteOperations int64 `json:"write_operations"` // Write operations count WriteOperations int64 `json:"write_operations"` // Write operations count
AverageReadTime time.Duration `json:"average_read_time"` // Average read time AverageReadTime time.Duration `json:"average_read_time"` // Average read time
AverageWriteTime time.Duration `json:"average_write_time"` // Average write time AverageWriteTime time.Duration `json:"average_write_time"` // Average write time
} }
// DistributedStorageStats represents distributed storage statistics // DistributedStorageStats represents distributed storage statistics
type DistributedStorageStats struct { type DistributedStorageStats struct {
TotalNodes int `json:"total_nodes"` // Total nodes in cluster TotalNodes int `json:"total_nodes"` // Total nodes in cluster
ActiveNodes int `json:"active_nodes"` // Active nodes ActiveNodes int `json:"active_nodes"` // Active nodes
FailedNodes int `json:"failed_nodes"` // Failed nodes FailedNodes int `json:"failed_nodes"` // Failed nodes
TotalReplicas int64 `json:"total_replicas"` // Total replicas TotalReplicas int64 `json:"total_replicas"` // Total replicas
HealthyReplicas int64 `json:"healthy_replicas"` // Healthy replicas HealthyReplicas int64 `json:"healthy_replicas"` // Healthy replicas
UnderReplicated int64 `json:"under_replicated"` // Under-replicated data UnderReplicated int64 `json:"under_replicated"` // Under-replicated data
NetworkLatency time.Duration `json:"network_latency"` // Average network latency NetworkLatency time.Duration `json:"network_latency"` // Average network latency
ReplicationLatency time.Duration `json:"replication_latency"` // Average replication latency ReplicationLatency time.Duration `json:"replication_latency"` // Average replication latency
ConsensusTime time.Duration `json:"consensus_time"` // Average consensus time ConsensusTime time.Duration `json:"consensus_time"` // Average consensus time
LastRebalance time.Time `json:"last_rebalance"` // Last rebalance operation LastRebalance time.Time `json:"last_rebalance"` // Last rebalance operation
} }
// CacheStatistics represents cache performance statistics // CacheStatistics represents cache performance statistics
type CacheStatistics struct { type CacheStatistics struct {
HitRate float64 `json:"hit_rate"` // Cache hit rate HitRate float64 `json:"hit_rate"` // Cache hit rate
MissRate float64 `json:"miss_rate"` // Cache miss rate MissRate float64 `json:"miss_rate"` // Cache miss rate
TotalHits int64 `json:"total_hits"` // Total cache hits TotalHits int64 `json:"total_hits"` // Total cache hits
TotalMisses int64 `json:"total_misses"` // Total cache misses TotalMisses int64 `json:"total_misses"` // Total cache misses
CurrentSize int64 `json:"current_size"` // Current cache size CurrentSize int64 `json:"current_size"` // Current cache size
MaxSize int64 `json:"max_size"` // Maximum cache size MaxSize int64 `json:"max_size"` // Maximum cache size
EvictionCount int64 `json:"eviction_count"` // Number of evictions EvictionCount int64 `json:"eviction_count"` // Number of evictions
AverageLoadTime time.Duration `json:"average_load_time"` // Average cache load time AverageLoadTime time.Duration `json:"average_load_time"` // Average cache load time
LastEviction time.Time `json:"last_eviction"` // Last eviction time LastEviction time.Time `json:"last_eviction"` // Last eviction time
MemoryUsage int64 `json:"memory_usage"` // Memory usage in bytes MemoryUsage int64 `json:"memory_usage"` // Memory usage in bytes
} }
// CachePolicy represents caching policy configuration // CachePolicy represents caching policy configuration
type CachePolicy struct { type CachePolicy struct {
TTL time.Duration `json:"ttl"` // Default TTL TTL time.Duration `json:"ttl"` // Default TTL
MaxSize int64 `json:"max_size"` // Maximum cache size MaxSize int64 `json:"max_size"` // Maximum cache size
EvictionPolicy string `json:"eviction_policy"` // Eviction policy (LRU, LFU, etc.) EvictionPolicy string `json:"eviction_policy"` // Eviction policy (LRU, LFU, etc.)
RefreshThreshold float64 `json:"refresh_threshold"` // Refresh threshold RefreshThreshold float64 `json:"refresh_threshold"` // Refresh threshold
WarmupEnabled bool `json:"warmup_enabled"` // Enable cache warmup WarmupEnabled bool `json:"warmup_enabled"` // Enable cache warmup
CompressEntries bool `json:"compress_entries"` // Compress cache entries CompressEntries bool `json:"compress_entries"` // Compress cache entries
MaxEntrySize int64 `json:"max_entry_size"` // Maximum entry size MaxEntrySize int64 `json:"max_entry_size"` // Maximum entry size
} }
// IndexConfig represents search index configuration // IndexConfig represents search index configuration
type IndexConfig struct { type IndexConfig struct {
Name string `json:"name"` // Index name Name string `json:"name"` // Index name
Fields []string `json:"fields"` // Indexed fields Fields []string `json:"fields"` // Indexed fields
Analyzer string `json:"analyzer"` // Text analyzer Analyzer string `json:"analyzer"` // Text analyzer
Language string `json:"language"` // Index language Language string `json:"language"` // Index language
CaseSensitive bool `json:"case_sensitive"` // Case sensitivity CaseSensitive bool `json:"case_sensitive"` // Case sensitivity
Stemming bool `json:"stemming"` // Enable stemming Stemming bool `json:"stemming"` // Enable stemming
StopWords []string `json:"stop_words"` // Stop words list StopWords []string `json:"stop_words"` // Stop words list
Synonyms map[string][]string `json:"synonyms"` // Synonym mappings Synonyms map[string][]string `json:"synonyms"` // Synonym mappings
MaxDocumentSize int64 `json:"max_document_size"` // Max document size MaxDocumentSize int64 `json:"max_document_size"` // Max document size
RefreshInterval time.Duration `json:"refresh_interval"` // Index refresh interval RefreshInterval time.Duration `json:"refresh_interval"` // Index refresh interval
} }
// IndexStatistics represents search index statistics // IndexStatistics represents search index statistics
type IndexStatistics struct { type IndexStatistics struct {
Name string `json:"name"` // Index name Name string `json:"name"` // Index name
DocumentCount int64 `json:"document_count"` // Total documents DocumentCount int64 `json:"document_count"` // Total documents
IndexSize int64 `json:"index_size"` // Index size in bytes IndexSize int64 `json:"index_size"` // Index size in bytes
LastUpdate time.Time `json:"last_update"` // Last update time LastUpdate time.Time `json:"last_update"` // Last update time
QueryCount int64 `json:"query_count"` // Total queries QueryCount int64 `json:"query_count"` // Total queries
AverageQueryTime time.Duration `json:"average_query_time"` // Average query time AverageQueryTime time.Duration `json:"average_query_time"` // Average query time
SuccessRate float64 `json:"success_rate"` // Query success rate SuccessRate float64 `json:"success_rate"` // Query success rate
FragmentationRatio float64 `json:"fragmentation_ratio"` // Index fragmentation FragmentationRatio float64 `json:"fragmentation_ratio"` // Index fragmentation
LastOptimization time.Time `json:"last_optimization"` // Last optimization time LastOptimization time.Time `json:"last_optimization"` // Last optimization time
} }
// BackupConfig represents backup configuration // BackupConfig represents backup configuration
type BackupConfig struct { type BackupConfig struct {
Name string `json:"name"` // Backup name Name string `json:"name"` // Backup name
Destination string `json:"destination"` // Backup destination Destination string `json:"destination"` // Backup destination
IncludeIndexes bool `json:"include_indexes"` // Include search indexes IncludeIndexes bool `json:"include_indexes"` // Include search indexes
IncludeCache bool `json:"include_cache"` // Include cache data IncludeCache bool `json:"include_cache"` // Include cache data
Compression bool `json:"compression"` // Enable compression Compression bool `json:"compression"` // Enable compression
Encryption bool `json:"encryption"` // Enable encryption Encryption bool `json:"encryption"` // Enable encryption
EncryptionKey string `json:"encryption_key"` // Encryption key EncryptionKey string `json:"encryption_key"` // Encryption key
Incremental bool `json:"incremental"` // Incremental backup Incremental bool `json:"incremental"` // Incremental backup
Retention time.Duration `json:"retention"` // Backup retention period Retention time.Duration `json:"retention"` // Backup retention period
Metadata map[string]interface{} `json:"metadata"` // Additional metadata Metadata map[string]interface{} `json:"metadata"` // Additional metadata
} }
// BackupInfo represents information about a backup // BackupInfo represents information about a backup
type BackupInfo struct { type BackupInfo struct {
ID string `json:"id"` // Backup ID ID string `json:"id"` // Backup ID
Name string `json:"name"` // Backup name Name string `json:"name"` // Backup name
CreatedAt time.Time `json:"created_at"` // Creation time CreatedAt time.Time `json:"created_at"` // Creation time
Size int64 `json:"size"` // Backup size Size int64 `json:"size"` // Backup size
CompressedSize int64 `json:"compressed_size"` // Compressed size CompressedSize int64 `json:"compressed_size"` // Compressed size
ContextCount int64 `json:"context_count"` // Number of contexts ContextCount int64 `json:"context_count"` // Number of contexts
Encrypted bool `json:"encrypted"` // Whether encrypted Encrypted bool `json:"encrypted"` // Whether encrypted
Incremental bool `json:"incremental"` // Whether incremental Incremental bool `json:"incremental"` // Whether incremental
ParentBackupID string `json:"parent_backup_id"` // Parent backup for incremental ParentBackupID string `json:"parent_backup_id"` // Parent backup for incremental
Checksum string `json:"checksum"` // Backup checksum Checksum string `json:"checksum"` // Backup checksum
Status BackupStatus `json:"status"` // Backup status Status BackupStatus `json:"status"` // Backup status
Metadata map[string]interface{} `json:"metadata"` // Additional metadata Metadata map[string]interface{} `json:"metadata"` // Additional metadata
} }
// BackupStatus represents backup status // BackupStatus represents backup status
type BackupStatus string type BackupStatus string
const ( const (
BackupInProgress BackupStatus = "in_progress" BackupStatusInProgress BackupStatus = "in_progress"
BackupCompleted BackupStatus = "completed" BackupStatusCompleted BackupStatus = "completed"
BackupFailed BackupStatus = "failed" BackupStatusFailed BackupStatus = "failed"
BackupCorrupted BackupStatus = "corrupted" BackupStatusCorrupted BackupStatus = "corrupted"
) )
// DistributedStorageOptions aliases DistributedStoreOptions for backwards compatibility.
type DistributedStorageOptions = DistributedStoreOptions
// RestoreConfig represents restore configuration // RestoreConfig represents restore configuration
type RestoreConfig struct { type RestoreConfig struct {
BackupID string `json:"backup_id"` // Backup to restore from BackupID string `json:"backup_id"` // Backup to restore from
Destination string `json:"destination"` // Restore destination Destination string `json:"destination"` // Restore destination
OverwriteExisting bool `json:"overwrite_existing"` // Overwrite existing data OverwriteExisting bool `json:"overwrite_existing"` // Overwrite existing data
RestoreIndexes bool `json:"restore_indexes"` // Restore search indexes RestoreIndexes bool `json:"restore_indexes"` // Restore search indexes
RestoreCache bool `json:"restore_cache"` // Restore cache data RestoreCache bool `json:"restore_cache"` // Restore cache data
ValidateIntegrity bool `json:"validate_integrity"` // Validate data integrity ValidateIntegrity bool `json:"validate_integrity"` // Validate data integrity
DecryptionKey string `json:"decryption_key"` // Decryption key DecryptionKey string `json:"decryption_key"` // Decryption key
Metadata map[string]interface{} `json:"metadata"` // Additional metadata Metadata map[string]interface{} `json:"metadata"` // Additional metadata
} }
// BackupValidation represents backup validation results // BackupValidation represents backup validation results
type BackupValidation struct { type BackupValidation struct {
BackupID string `json:"backup_id"` // Backup ID BackupID string `json:"backup_id"` // Backup ID
Valid bool `json:"valid"` // Whether backup is valid Valid bool `json:"valid"` // Whether backup is valid
ChecksumMatch bool `json:"checksum_match"` // Whether checksum matches ChecksumMatch bool `json:"checksum_match"` // Whether checksum matches
CorruptedFiles []string `json:"corrupted_files"` // List of corrupted files CorruptedFiles []string `json:"corrupted_files"` // List of corrupted files
MissingFiles []string `json:"missing_files"` // List of missing files MissingFiles []string `json:"missing_files"` // List of missing files
ValidationTime time.Duration `json:"validation_time"` // Validation duration ValidationTime time.Duration `json:"validation_time"` // Validation duration
ValidatedAt time.Time `json:"validated_at"` // When validated ValidatedAt time.Time `json:"validated_at"` // When validated
ErrorCount int `json:"error_count"` // Number of errors ErrorCount int `json:"error_count"` // Number of errors
WarningCount int `json:"warning_count"` // Number of warnings WarningCount int `json:"warning_count"` // Number of warnings
} }
// BackupSchedule represents automatic backup scheduling // BackupSchedule represents automatic backup scheduling
type BackupSchedule struct { type BackupSchedule struct {
ID string `json:"id"` // Schedule ID ID string `json:"id"` // Schedule ID
Name string `json:"name"` // Schedule name Name string `json:"name"` // Schedule name
Cron string `json:"cron"` // Cron expression Cron string `json:"cron"` // Cron expression
BackupConfig *BackupConfig `json:"backup_config"` // Backup configuration BackupConfig *BackupConfig `json:"backup_config"` // Backup configuration
Enabled bool `json:"enabled"` // Whether schedule is enabled Enabled bool `json:"enabled"` // Whether schedule is enabled
LastRun *time.Time `json:"last_run,omitempty"` // Last execution time LastRun *time.Time `json:"last_run,omitempty"` // Last execution time
NextRun *time.Time `json:"next_run,omitempty"` // Next scheduled execution NextRun *time.Time `json:"next_run,omitempty"` // Next scheduled execution
ConsecutiveFailures int `json:"consecutive_failures"` // Consecutive failure count ConsecutiveFailures int `json:"consecutive_failures"` // Consecutive failure count
MaxFailures int `json:"max_failures"` // Max allowed failures MaxFailures int `json:"max_failures"` // Max allowed failures
} }
// BackupStatistics represents backup statistics // BackupStatistics represents backup statistics
@@ -370,4 +373,4 @@ type BackupStatistics struct {
OldestBackup time.Time `json:"oldest_backup"` // Oldest backup time OldestBackup time.Time `json:"oldest_backup"` // Oldest backup time
CompressionRatio float64 `json:"compression_ratio"` // Average compression ratio CompressionRatio float64 `json:"compression_ratio"` // Average compression ratio
EncryptionEnabled bool `json:"encryption_enabled"` // Whether encryption is enabled EncryptionEnabled bool `json:"encryption_enabled"` // Whether encryption is enabled
} }