chore: align slurp config and scaffolding
This commit is contained in:
@@ -164,6 +164,8 @@ func (bm *BackupManagerImpl) CreateBackup(
|
||||
Incremental: config.Incremental,
|
||||
ParentBackupID: config.ParentBackupID,
|
||||
Status: BackupStatusInProgress,
|
||||
Progress: 0,
|
||||
ErrorMessage: "",
|
||||
CreatedAt: time.Now(),
|
||||
RetentionUntil: time.Now().Add(config.Retention),
|
||||
}
|
||||
@@ -707,6 +709,7 @@ func (bm *BackupManagerImpl) validateFile(filePath string) error {
|
||||
func (bm *BackupManagerImpl) failBackup(job *BackupJob, backupInfo *BackupInfo, err error) {
|
||||
bm.mu.Lock()
|
||||
backupInfo.Status = BackupStatusFailed
|
||||
backupInfo.Progress = 0
|
||||
backupInfo.ErrorMessage = err.Error()
|
||||
job.Error = err
|
||||
bm.mu.Unlock()
|
||||
|
||||
@@ -3,18 +3,19 @@ package storage
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"chorus/pkg/ucxl"
|
||||
slurpContext "chorus/pkg/slurp/context"
|
||||
"chorus/pkg/ucxl"
|
||||
)
|
||||
|
||||
// BatchOperationsImpl provides efficient batch operations for context storage
|
||||
type BatchOperationsImpl struct {
|
||||
contextStore *ContextStoreImpl
|
||||
batchSize int
|
||||
maxConcurrency int
|
||||
contextStore *ContextStoreImpl
|
||||
batchSize int
|
||||
maxConcurrency int
|
||||
operationTimeout time.Duration
|
||||
}
|
||||
|
||||
@@ -22,8 +23,8 @@ type BatchOperationsImpl struct {
|
||||
func NewBatchOperations(contextStore *ContextStoreImpl, batchSize, maxConcurrency int, timeout time.Duration) *BatchOperationsImpl {
|
||||
return &BatchOperationsImpl{
|
||||
contextStore: contextStore,
|
||||
batchSize: batchSize,
|
||||
maxConcurrency: maxConcurrency,
|
||||
batchSize: batchSize,
|
||||
maxConcurrency: maxConcurrency,
|
||||
operationTimeout: timeout,
|
||||
}
|
||||
}
|
||||
@@ -89,7 +90,7 @@ func (cs *ContextStoreImpl) BatchStore(
|
||||
result.ErrorCount++
|
||||
key := workResult.Item.Context.UCXLAddress.String()
|
||||
result.Errors[key] = workResult.Error
|
||||
|
||||
|
||||
if batch.FailOnError {
|
||||
// Cancel remaining operations
|
||||
result.ProcessingTime = time.Since(start)
|
||||
@@ -164,11 +165,11 @@ func (cs *ContextStoreImpl) BatchRetrieve(
|
||||
// Process results
|
||||
for workResult := range resultsCh {
|
||||
addressStr := workResult.Address.String()
|
||||
|
||||
|
||||
if workResult.Error != nil {
|
||||
result.ErrorCount++
|
||||
result.Errors[addressStr] = workResult.Error
|
||||
|
||||
|
||||
if batch.FailOnError {
|
||||
// Cancel remaining operations
|
||||
result.ProcessingTime = time.Since(start)
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -13,13 +12,13 @@ import (
|
||||
|
||||
// CacheManagerImpl implements the CacheManager interface using Redis
|
||||
type CacheManagerImpl struct {
|
||||
mu sync.RWMutex
|
||||
client *redis.Client
|
||||
stats *CacheStatistics
|
||||
policy *CachePolicy
|
||||
prefix string
|
||||
nodeID string
|
||||
warmupKeys map[string]bool
|
||||
mu sync.RWMutex
|
||||
client *redis.Client
|
||||
stats *CacheStatistics
|
||||
policy *CachePolicy
|
||||
prefix string
|
||||
nodeID string
|
||||
warmupKeys map[string]bool
|
||||
}
|
||||
|
||||
// NewCacheManager creates a new cache manager with Redis backend
|
||||
@@ -43,7 +42,7 @@ func NewCacheManager(redisAddr, nodeID string, policy *CachePolicy) (*CacheManag
|
||||
// Test connection
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
|
||||
if err := client.Ping(ctx).Err(); err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to Redis: %w", err)
|
||||
}
|
||||
@@ -68,13 +67,13 @@ func NewCacheManager(redisAddr, nodeID string, policy *CachePolicy) (*CacheManag
|
||||
// DefaultCachePolicy returns default caching policy
|
||||
func DefaultCachePolicy() *CachePolicy {
|
||||
return &CachePolicy{
|
||||
TTL: 24 * time.Hour,
|
||||
MaxSize: 1024 * 1024 * 1024, // 1GB
|
||||
EvictionPolicy: "LRU",
|
||||
RefreshThreshold: 0.8, // Refresh when 80% of TTL elapsed
|
||||
WarmupEnabled: true,
|
||||
CompressEntries: true,
|
||||
MaxEntrySize: 10 * 1024 * 1024, // 10MB
|
||||
TTL: 24 * time.Hour,
|
||||
MaxSize: 1024 * 1024 * 1024, // 1GB
|
||||
EvictionPolicy: "LRU",
|
||||
RefreshThreshold: 0.8, // Refresh when 80% of TTL elapsed
|
||||
WarmupEnabled: true,
|
||||
CompressEntries: true,
|
||||
MaxEntrySize: 10 * 1024 * 1024, // 10MB
|
||||
}
|
||||
}
|
||||
|
||||
@@ -203,7 +202,7 @@ func (cm *CacheManagerImpl) Set(
|
||||
// Delete removes data from cache
|
||||
func (cm *CacheManagerImpl) Delete(ctx context.Context, key string) error {
|
||||
cacheKey := cm.buildCacheKey(key)
|
||||
|
||||
|
||||
if err := cm.client.Del(ctx, cacheKey).Err(); err != nil {
|
||||
return fmt.Errorf("cache delete error: %w", err)
|
||||
}
|
||||
@@ -215,37 +214,37 @@ func (cm *CacheManagerImpl) Delete(ctx context.Context, key string) error {
|
||||
func (cm *CacheManagerImpl) DeletePattern(ctx context.Context, pattern string) error {
|
||||
// Build full pattern with prefix
|
||||
fullPattern := cm.buildCacheKey(pattern)
|
||||
|
||||
|
||||
// Use Redis SCAN to find matching keys
|
||||
var cursor uint64
|
||||
var keys []string
|
||||
|
||||
|
||||
for {
|
||||
result, nextCursor, err := cm.client.Scan(ctx, cursor, fullPattern, 100).Result()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cache scan error: %w", err)
|
||||
}
|
||||
|
||||
|
||||
keys = append(keys, result...)
|
||||
cursor = nextCursor
|
||||
|
||||
|
||||
if cursor == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Delete found keys in batches
|
||||
if len(keys) > 0 {
|
||||
pipeline := cm.client.Pipeline()
|
||||
for _, key := range keys {
|
||||
pipeline.Del(ctx, key)
|
||||
}
|
||||
|
||||
|
||||
if _, err := pipeline.Exec(ctx); err != nil {
|
||||
return fmt.Errorf("cache batch delete error: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -282,7 +281,7 @@ func (cm *CacheManagerImpl) GetCacheStats() (*CacheStatistics, error) {
|
||||
// Update Redis memory usage
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
|
||||
info, err := cm.client.Info(ctx, "memory").Result()
|
||||
if err == nil {
|
||||
// Parse memory info to get actual usage
|
||||
@@ -314,17 +313,17 @@ func (cm *CacheManagerImpl) SetCachePolicy(policy *CachePolicy) error {
|
||||
|
||||
// CacheEntry represents a cached data entry with metadata
|
||||
type CacheEntry struct {
|
||||
Key string `json:"key"`
|
||||
Data []byte `json:"data"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
ExpiresAt time.Time `json:"expires_at"`
|
||||
Key string `json:"key"`
|
||||
Data []byte `json:"data"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
ExpiresAt time.Time `json:"expires_at"`
|
||||
TTL time.Duration `json:"ttl"`
|
||||
AccessCount int64 `json:"access_count"`
|
||||
LastAccessedAt time.Time `json:"last_accessed_at"`
|
||||
Compressed bool `json:"compressed"`
|
||||
OriginalSize int64 `json:"original_size"`
|
||||
CompressedSize int64 `json:"compressed_size"`
|
||||
NodeID string `json:"node_id"`
|
||||
AccessCount int64 `json:"access_count"`
|
||||
LastAccessedAt time.Time `json:"last_accessed_at"`
|
||||
Compressed bool `json:"compressed"`
|
||||
OriginalSize int64 `json:"original_size"`
|
||||
CompressedSize int64 `json:"compressed_size"`
|
||||
NodeID string `json:"node_id"`
|
||||
}
|
||||
|
||||
// Helper methods
|
||||
@@ -361,7 +360,7 @@ func (cm *CacheManagerImpl) recordMiss() {
|
||||
func (cm *CacheManagerImpl) updateAccessStats(duration time.Duration) {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
|
||||
|
||||
if cm.stats.AverageLoadTime == 0 {
|
||||
cm.stats.AverageLoadTime = duration
|
||||
} else {
|
||||
|
||||
@@ -3,20 +3,18 @@ package storage
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestLocalStorageCompression(t *testing.T) {
|
||||
// Create temporary directory for test
|
||||
tempDir := t.TempDir()
|
||||
|
||||
|
||||
// Create storage with compression enabled
|
||||
options := DefaultLocalStorageOptions()
|
||||
options.Compression = true
|
||||
|
||||
|
||||
storage, err := NewLocalStorage(tempDir, options)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create storage: %v", err)
|
||||
@@ -25,24 +23,24 @@ func TestLocalStorageCompression(t *testing.T) {
|
||||
|
||||
// Test data that should compress well
|
||||
largeData := strings.Repeat("This is a test string that should compress well! ", 100)
|
||||
|
||||
|
||||
// Store with compression enabled
|
||||
storeOptions := &StoreOptions{
|
||||
Compress: true,
|
||||
}
|
||||
|
||||
|
||||
ctx := context.Background()
|
||||
err = storage.Store(ctx, "test-compress", largeData, storeOptions)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to store compressed data: %v", err)
|
||||
}
|
||||
|
||||
|
||||
// Retrieve and verify
|
||||
retrieved, err := storage.Retrieve(ctx, "test-compress")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to retrieve compressed data: %v", err)
|
||||
}
|
||||
|
||||
|
||||
// Verify data integrity
|
||||
if retrievedStr, ok := retrieved.(string); ok {
|
||||
if retrievedStr != largeData {
|
||||
@@ -51,21 +49,21 @@ func TestLocalStorageCompression(t *testing.T) {
|
||||
} else {
|
||||
t.Error("Retrieved data is not a string")
|
||||
}
|
||||
|
||||
|
||||
// Check compression stats
|
||||
stats, err := storage.GetCompressionStats()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get compression stats: %v", err)
|
||||
}
|
||||
|
||||
|
||||
if stats.CompressedEntries == 0 {
|
||||
t.Error("Expected at least one compressed entry")
|
||||
}
|
||||
|
||||
|
||||
if stats.CompressionRatio == 0 {
|
||||
t.Error("Expected non-zero compression ratio")
|
||||
}
|
||||
|
||||
|
||||
t.Logf("Compression stats: %d/%d entries compressed, ratio: %.2f",
|
||||
stats.CompressedEntries, stats.TotalEntries, stats.CompressionRatio)
|
||||
}
|
||||
@@ -81,27 +79,27 @@ func TestCompressionMethods(t *testing.T) {
|
||||
|
||||
// Test data
|
||||
originalData := []byte(strings.Repeat("Hello, World! ", 1000))
|
||||
|
||||
|
||||
// Test compression
|
||||
compressed, err := storage.compress(originalData)
|
||||
if err != nil {
|
||||
t.Fatalf("Compression failed: %v", err)
|
||||
}
|
||||
|
||||
|
||||
t.Logf("Original size: %d bytes", len(originalData))
|
||||
t.Logf("Compressed size: %d bytes", len(compressed))
|
||||
|
||||
|
||||
// Compressed data should be smaller for repetitive data
|
||||
if len(compressed) >= len(originalData) {
|
||||
t.Log("Compression didn't reduce size (may be expected for small or non-repetitive data)")
|
||||
}
|
||||
|
||||
|
||||
// Test decompression
|
||||
decompressed, err := storage.decompress(compressed)
|
||||
if err != nil {
|
||||
t.Fatalf("Decompression failed: %v", err)
|
||||
}
|
||||
|
||||
|
||||
// Verify data integrity
|
||||
if !bytes.Equal(originalData, decompressed) {
|
||||
t.Error("Decompressed data doesn't match original")
|
||||
@@ -111,7 +109,7 @@ func TestCompressionMethods(t *testing.T) {
|
||||
func TestStorageOptimization(t *testing.T) {
|
||||
// Create temporary directory for test
|
||||
tempDir := t.TempDir()
|
||||
|
||||
|
||||
storage, err := NewLocalStorage(tempDir, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create storage: %v", err)
|
||||
@@ -119,7 +117,7 @@ func TestStorageOptimization(t *testing.T) {
|
||||
defer storage.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
|
||||
// Store multiple entries without compression
|
||||
testData := []struct {
|
||||
key string
|
||||
@@ -130,50 +128,50 @@ func TestStorageOptimization(t *testing.T) {
|
||||
{"large2", strings.Repeat("Another large repetitive dataset ", 100)},
|
||||
{"medium", strings.Repeat("Medium data ", 50)},
|
||||
}
|
||||
|
||||
|
||||
for _, item := range testData {
|
||||
err = storage.Store(ctx, item.key, item.data, &StoreOptions{Compress: false})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to store %s: %v", item.key, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Check initial stats
|
||||
initialStats, err := storage.GetCompressionStats()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get initial stats: %v", err)
|
||||
}
|
||||
|
||||
|
||||
t.Logf("Initial: %d entries, %d compressed",
|
||||
initialStats.TotalEntries, initialStats.CompressedEntries)
|
||||
|
||||
|
||||
// Optimize storage with threshold (only compress entries larger than 100 bytes)
|
||||
err = storage.OptimizeStorage(ctx, 100)
|
||||
if err != nil {
|
||||
t.Fatalf("Storage optimization failed: %v", err)
|
||||
}
|
||||
|
||||
|
||||
// Check final stats
|
||||
finalStats, err := storage.GetCompressionStats()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get final stats: %v", err)
|
||||
}
|
||||
|
||||
|
||||
t.Logf("Final: %d entries, %d compressed",
|
||||
finalStats.TotalEntries, finalStats.CompressedEntries)
|
||||
|
||||
|
||||
// Should have more compressed entries after optimization
|
||||
if finalStats.CompressedEntries <= initialStats.CompressedEntries {
|
||||
t.Log("Note: Optimization didn't increase compressed entries (may be expected)")
|
||||
}
|
||||
|
||||
|
||||
// Verify all data is still retrievable
|
||||
for _, item := range testData {
|
||||
retrieved, err := storage.Retrieve(ctx, item.key)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to retrieve %s after optimization: %v", item.key, err)
|
||||
}
|
||||
|
||||
|
||||
if retrievedStr, ok := retrieved.(string); ok {
|
||||
if retrievedStr != item.data {
|
||||
t.Errorf("Data mismatch for %s after optimization", item.key)
|
||||
@@ -193,26 +191,26 @@ func TestCompressionFallback(t *testing.T) {
|
||||
|
||||
// Random-like data that won't compress well
|
||||
randomData := []byte("a1b2c3d4e5f6g7h8i9j0k1l2m3n4o5p6q7r8s9t0u1v2w3x4y5z6")
|
||||
|
||||
|
||||
// Test compression
|
||||
compressed, err := storage.compress(randomData)
|
||||
if err != nil {
|
||||
t.Fatalf("Compression failed: %v", err)
|
||||
}
|
||||
|
||||
|
||||
// Should return original data if compression doesn't help
|
||||
if len(compressed) >= len(randomData) {
|
||||
t.Log("Compression correctly returned original data for incompressible input")
|
||||
}
|
||||
|
||||
|
||||
// Test decompression of uncompressed data
|
||||
decompressed, err := storage.decompress(randomData)
|
||||
if err != nil {
|
||||
t.Fatalf("Decompression fallback failed: %v", err)
|
||||
}
|
||||
|
||||
|
||||
// Should return original data unchanged
|
||||
if !bytes.Equal(randomData, decompressed) {
|
||||
t.Error("Decompression fallback changed data")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,71 +2,68 @@ package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"chorus/pkg/crypto"
|
||||
"chorus/pkg/dht"
|
||||
"chorus/pkg/ucxl"
|
||||
slurpContext "chorus/pkg/slurp/context"
|
||||
"chorus/pkg/ucxl"
|
||||
)
|
||||
|
||||
// ContextStoreImpl is the main implementation of the ContextStore interface
|
||||
// It coordinates between local storage, distributed storage, encryption, caching, and indexing
|
||||
type ContextStoreImpl struct {
|
||||
mu sync.RWMutex
|
||||
localStorage LocalStorage
|
||||
mu sync.RWMutex
|
||||
localStorage LocalStorage
|
||||
distributedStorage DistributedStorage
|
||||
encryptedStorage EncryptedStorage
|
||||
cacheManager CacheManager
|
||||
indexManager IndexManager
|
||||
backupManager BackupManager
|
||||
eventNotifier EventNotifier
|
||||
|
||||
encryptedStorage EncryptedStorage
|
||||
cacheManager CacheManager
|
||||
indexManager IndexManager
|
||||
backupManager BackupManager
|
||||
eventNotifier EventNotifier
|
||||
|
||||
// Configuration
|
||||
nodeID string
|
||||
options *ContextStoreOptions
|
||||
|
||||
nodeID string
|
||||
options *ContextStoreOptions
|
||||
|
||||
// Statistics and monitoring
|
||||
statistics *StorageStatistics
|
||||
metricsCollector *MetricsCollector
|
||||
|
||||
statistics *StorageStatistics
|
||||
metricsCollector *MetricsCollector
|
||||
|
||||
// Background processes
|
||||
stopCh chan struct{}
|
||||
syncTicker *time.Ticker
|
||||
compactionTicker *time.Ticker
|
||||
cleanupTicker *time.Ticker
|
||||
stopCh chan struct{}
|
||||
syncTicker *time.Ticker
|
||||
compactionTicker *time.Ticker
|
||||
cleanupTicker *time.Ticker
|
||||
}
|
||||
|
||||
// ContextStoreOptions configures the context store behavior
|
||||
type ContextStoreOptions struct {
|
||||
// Storage configuration
|
||||
PreferLocal bool `json:"prefer_local"`
|
||||
AutoReplicate bool `json:"auto_replicate"`
|
||||
DefaultReplicas int `json:"default_replicas"`
|
||||
EncryptionEnabled bool `json:"encryption_enabled"`
|
||||
CompressionEnabled bool `json:"compression_enabled"`
|
||||
|
||||
PreferLocal bool `json:"prefer_local"`
|
||||
AutoReplicate bool `json:"auto_replicate"`
|
||||
DefaultReplicas int `json:"default_replicas"`
|
||||
EncryptionEnabled bool `json:"encryption_enabled"`
|
||||
CompressionEnabled bool `json:"compression_enabled"`
|
||||
|
||||
// Caching configuration
|
||||
CachingEnabled bool `json:"caching_enabled"`
|
||||
CacheTTL time.Duration `json:"cache_ttl"`
|
||||
CacheSize int64 `json:"cache_size"`
|
||||
|
||||
CachingEnabled bool `json:"caching_enabled"`
|
||||
CacheTTL time.Duration `json:"cache_ttl"`
|
||||
CacheSize int64 `json:"cache_size"`
|
||||
|
||||
// Indexing configuration
|
||||
IndexingEnabled bool `json:"indexing_enabled"`
|
||||
IndexingEnabled bool `json:"indexing_enabled"`
|
||||
IndexRefreshInterval time.Duration `json:"index_refresh_interval"`
|
||||
|
||||
|
||||
// Background processes
|
||||
SyncInterval time.Duration `json:"sync_interval"`
|
||||
CompactionInterval time.Duration `json:"compaction_interval"`
|
||||
CleanupInterval time.Duration `json:"cleanup_interval"`
|
||||
|
||||
SyncInterval time.Duration `json:"sync_interval"`
|
||||
CompactionInterval time.Duration `json:"compaction_interval"`
|
||||
CleanupInterval time.Duration `json:"cleanup_interval"`
|
||||
|
||||
// Performance tuning
|
||||
BatchSize int `json:"batch_size"`
|
||||
MaxConcurrentOps int `json:"max_concurrent_ops"`
|
||||
OperationTimeout time.Duration `json:"operation_timeout"`
|
||||
BatchSize int `json:"batch_size"`
|
||||
MaxConcurrentOps int `json:"max_concurrent_ops"`
|
||||
OperationTimeout time.Duration `json:"operation_timeout"`
|
||||
}
|
||||
|
||||
// MetricsCollector collects and aggregates storage metrics
|
||||
@@ -87,16 +84,16 @@ func DefaultContextStoreOptions() *ContextStoreOptions {
|
||||
EncryptionEnabled: true,
|
||||
CompressionEnabled: true,
|
||||
CachingEnabled: true,
|
||||
CacheTTL: 24 * time.Hour,
|
||||
CacheSize: 1024 * 1024 * 1024, // 1GB
|
||||
IndexingEnabled: true,
|
||||
CacheTTL: 24 * time.Hour,
|
||||
CacheSize: 1024 * 1024 * 1024, // 1GB
|
||||
IndexingEnabled: true,
|
||||
IndexRefreshInterval: 5 * time.Minute,
|
||||
SyncInterval: 10 * time.Minute,
|
||||
CompactionInterval: 24 * time.Hour,
|
||||
CleanupInterval: 1 * time.Hour,
|
||||
BatchSize: 100,
|
||||
MaxConcurrentOps: 10,
|
||||
OperationTimeout: 30 * time.Second,
|
||||
SyncInterval: 10 * time.Minute,
|
||||
CompactionInterval: 24 * time.Hour,
|
||||
CleanupInterval: 1 * time.Hour,
|
||||
BatchSize: 100,
|
||||
MaxConcurrentOps: 10,
|
||||
OperationTimeout: 30 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,8 +121,8 @@ func NewContextStore(
|
||||
indexManager: indexManager,
|
||||
backupManager: backupManager,
|
||||
eventNotifier: eventNotifier,
|
||||
nodeID: nodeID,
|
||||
options: options,
|
||||
nodeID: nodeID,
|
||||
options: options,
|
||||
statistics: &StorageStatistics{
|
||||
LastSyncTime: time.Now(),
|
||||
},
|
||||
@@ -174,11 +171,11 @@ func (cs *ContextStoreImpl) StoreContext(
|
||||
} else {
|
||||
// Store unencrypted
|
||||
storeOptions := &StoreOptions{
|
||||
Encrypt: false,
|
||||
Replicate: cs.options.AutoReplicate,
|
||||
Index: cs.options.IndexingEnabled,
|
||||
Cache: cs.options.CachingEnabled,
|
||||
Compress: cs.options.CompressionEnabled,
|
||||
Encrypt: false,
|
||||
Replicate: cs.options.AutoReplicate,
|
||||
Index: cs.options.IndexingEnabled,
|
||||
Cache: cs.options.CachingEnabled,
|
||||
Compress: cs.options.CompressionEnabled,
|
||||
}
|
||||
storeErr = cs.localStorage.Store(ctx, storageKey, node, storeOptions)
|
||||
}
|
||||
@@ -212,14 +209,14 @@ func (cs *ContextStoreImpl) StoreContext(
|
||||
go func() {
|
||||
replicateCtx, cancel := context.WithTimeout(context.Background(), cs.options.OperationTimeout)
|
||||
defer cancel()
|
||||
|
||||
|
||||
distOptions := &DistributedStoreOptions{
|
||||
ReplicationFactor: cs.options.DefaultReplicas,
|
||||
ConsistencyLevel: ConsistencyQuorum,
|
||||
Timeout: cs.options.OperationTimeout,
|
||||
SyncMode: SyncAsync,
|
||||
Timeout: cs.options.OperationTimeout,
|
||||
SyncMode: SyncAsync,
|
||||
}
|
||||
|
||||
|
||||
if err := cs.distributedStorage.Store(replicateCtx, storageKey, node, distOptions); err != nil {
|
||||
cs.recordError("replicate", err)
|
||||
}
|
||||
@@ -523,11 +520,11 @@ func (cs *ContextStoreImpl) recordOperation(operation string) {
|
||||
func (cs *ContextStoreImpl) recordLatency(operation string, latency time.Duration) {
|
||||
cs.metricsCollector.mu.Lock()
|
||||
defer cs.metricsCollector.mu.Unlock()
|
||||
|
||||
|
||||
if cs.metricsCollector.latencyHistogram[operation] == nil {
|
||||
cs.metricsCollector.latencyHistogram[operation] = make([]time.Duration, 0, 100)
|
||||
}
|
||||
|
||||
|
||||
// Keep only last 100 samples
|
||||
histogram := cs.metricsCollector.latencyHistogram[operation]
|
||||
if len(histogram) >= 100 {
|
||||
@@ -541,7 +538,7 @@ func (cs *ContextStoreImpl) recordError(operation string, err error) {
|
||||
cs.metricsCollector.mu.Lock()
|
||||
defer cs.metricsCollector.mu.Unlock()
|
||||
cs.metricsCollector.errorCount[operation]++
|
||||
|
||||
|
||||
// Log the error (in production, use proper logging)
|
||||
fmt.Printf("Storage error in %s: %v\n", operation, err)
|
||||
}
|
||||
@@ -614,7 +611,7 @@ func (cs *ContextStoreImpl) performCleanup(ctx context.Context) {
|
||||
if err := cs.cacheManager.Clear(ctx); err != nil {
|
||||
cs.recordError("cache_cleanup", err)
|
||||
}
|
||||
|
||||
|
||||
// Clean old metrics
|
||||
cs.cleanupMetrics()
|
||||
}
|
||||
@@ -622,7 +619,7 @@ func (cs *ContextStoreImpl) performCleanup(ctx context.Context) {
|
||||
func (cs *ContextStoreImpl) cleanupMetrics() {
|
||||
cs.metricsCollector.mu.Lock()
|
||||
defer cs.metricsCollector.mu.Unlock()
|
||||
|
||||
|
||||
// Reset histograms that are too large
|
||||
for operation, histogram := range cs.metricsCollector.latencyHistogram {
|
||||
if len(histogram) > 1000 {
|
||||
@@ -729,7 +726,7 @@ func (cs *ContextStoreImpl) Sync(ctx context.Context) error {
|
||||
Type: EventSynced,
|
||||
Timestamp: time.Now(),
|
||||
Metadata: map[string]interface{}{
|
||||
"node_id": cs.nodeID,
|
||||
"node_id": cs.nodeID,
|
||||
"sync_time": time.Since(start),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -8,69 +8,68 @@ import (
|
||||
"time"
|
||||
|
||||
"chorus/pkg/dht"
|
||||
"chorus/pkg/types"
|
||||
)
|
||||
|
||||
// DistributedStorageImpl implements the DistributedStorage interface
|
||||
type DistributedStorageImpl struct {
|
||||
mu sync.RWMutex
|
||||
dht dht.DHT
|
||||
nodeID string
|
||||
metrics *DistributedStorageStats
|
||||
replicas map[string][]string // key -> replica node IDs
|
||||
heartbeat *HeartbeatManager
|
||||
consensus *ConsensusManager
|
||||
options *DistributedStorageOptions
|
||||
mu sync.RWMutex
|
||||
dht dht.DHT
|
||||
nodeID string
|
||||
metrics *DistributedStorageStats
|
||||
replicas map[string][]string // key -> replica node IDs
|
||||
heartbeat *HeartbeatManager
|
||||
consensus *ConsensusManager
|
||||
options *DistributedStorageOptions
|
||||
}
|
||||
|
||||
// HeartbeatManager manages node heartbeats and health
|
||||
type HeartbeatManager struct {
|
||||
mu sync.RWMutex
|
||||
nodes map[string]*NodeHealth
|
||||
mu sync.RWMutex
|
||||
nodes map[string]*NodeHealth
|
||||
heartbeatInterval time.Duration
|
||||
timeoutThreshold time.Duration
|
||||
stopCh chan struct{}
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NodeHealth tracks the health of a distributed storage node
|
||||
type NodeHealth struct {
|
||||
NodeID string `json:"node_id"`
|
||||
LastSeen time.Time `json:"last_seen"`
|
||||
NodeID string `json:"node_id"`
|
||||
LastSeen time.Time `json:"last_seen"`
|
||||
Latency time.Duration `json:"latency"`
|
||||
IsActive bool `json:"is_active"`
|
||||
FailureCount int `json:"failure_count"`
|
||||
Load float64 `json:"load"`
|
||||
IsActive bool `json:"is_active"`
|
||||
FailureCount int `json:"failure_count"`
|
||||
Load float64 `json:"load"`
|
||||
}
|
||||
|
||||
// ConsensusManager handles consensus operations for distributed storage
|
||||
type ConsensusManager struct {
|
||||
mu sync.RWMutex
|
||||
pendingOps map[string]*ConsensusOperation
|
||||
votingTimeout time.Duration
|
||||
quorumSize int
|
||||
mu sync.RWMutex
|
||||
pendingOps map[string]*ConsensusOperation
|
||||
votingTimeout time.Duration
|
||||
quorumSize int
|
||||
}
|
||||
|
||||
// ConsensusOperation represents a distributed operation requiring consensus
|
||||
type ConsensusOperation struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Key string `json:"key"`
|
||||
Data interface{} `json:"data"`
|
||||
Initiator string `json:"initiator"`
|
||||
Votes map[string]bool `json:"votes"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Status ConsensusStatus `json:"status"`
|
||||
Callback func(bool, error) `json:"-"`
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Key string `json:"key"`
|
||||
Data interface{} `json:"data"`
|
||||
Initiator string `json:"initiator"`
|
||||
Votes map[string]bool `json:"votes"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Status ConsensusStatus `json:"status"`
|
||||
Callback func(bool, error) `json:"-"`
|
||||
}
|
||||
|
||||
// ConsensusStatus represents the status of a consensus operation
|
||||
type ConsensusStatus string
|
||||
|
||||
const (
|
||||
ConsensusPending ConsensusStatus = "pending"
|
||||
ConsensusApproved ConsensusStatus = "approved"
|
||||
ConsensusRejected ConsensusStatus = "rejected"
|
||||
ConsensusTimeout ConsensusStatus = "timeout"
|
||||
ConsensusPending ConsensusStatus = "pending"
|
||||
ConsensusApproved ConsensusStatus = "approved"
|
||||
ConsensusRejected ConsensusStatus = "rejected"
|
||||
ConsensusTimeout ConsensusStatus = "timeout"
|
||||
)
|
||||
|
||||
// NewDistributedStorage creates a new distributed storage implementation
|
||||
@@ -83,9 +82,9 @@ func NewDistributedStorage(
|
||||
options = &DistributedStoreOptions{
|
||||
ReplicationFactor: 3,
|
||||
ConsistencyLevel: ConsistencyQuorum,
|
||||
Timeout: 30 * time.Second,
|
||||
PreferLocal: true,
|
||||
SyncMode: SyncAsync,
|
||||
Timeout: 30 * time.Second,
|
||||
PreferLocal: true,
|
||||
SyncMode: SyncAsync,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,10 +97,10 @@ func NewDistributedStorage(
|
||||
LastRebalance: time.Now(),
|
||||
},
|
||||
heartbeat: &HeartbeatManager{
|
||||
nodes: make(map[string]*NodeHealth),
|
||||
nodes: make(map[string]*NodeHealth),
|
||||
heartbeatInterval: 30 * time.Second,
|
||||
timeoutThreshold: 90 * time.Second,
|
||||
stopCh: make(chan struct{}),
|
||||
stopCh: make(chan struct{}),
|
||||
},
|
||||
consensus: &ConsensusManager{
|
||||
pendingOps: make(map[string]*ConsensusOperation),
|
||||
@@ -125,8 +124,6 @@ func (ds *DistributedStorageImpl) Store(
|
||||
data interface{},
|
||||
options *DistributedStoreOptions,
|
||||
) error {
|
||||
start := time.Now()
|
||||
|
||||
if options == nil {
|
||||
options = ds.options
|
||||
}
|
||||
@@ -179,7 +176,7 @@ func (ds *DistributedStorageImpl) Retrieve(
|
||||
|
||||
// Try local first if prefer local is enabled
|
||||
if ds.options.PreferLocal {
|
||||
if localData, err := ds.dht.Get(key); err == nil {
|
||||
if localData, err := ds.dht.GetValue(ctx, key); err == nil {
|
||||
return ds.deserializeEntry(localData)
|
||||
}
|
||||
}
|
||||
@@ -226,25 +223,9 @@ func (ds *DistributedStorageImpl) Exists(
|
||||
ctx context.Context,
|
||||
key string,
|
||||
) (bool, error) {
|
||||
// Try local first
|
||||
if ds.options.PreferLocal {
|
||||
if exists, err := ds.dht.Exists(key); err == nil {
|
||||
return exists, nil
|
||||
}
|
||||
if _, err := ds.dht.GetValue(ctx, key); err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Check replicas
|
||||
replicas, err := ds.getReplicationNodes(key)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get replication nodes: %w", err)
|
||||
}
|
||||
|
||||
for _, nodeID := range replicas {
|
||||
if exists, err := ds.checkExistsOnNode(ctx, nodeID, key); err == nil && exists {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -306,10 +287,7 @@ func (ds *DistributedStorageImpl) FindReplicas(
|
||||
|
||||
// Sync synchronizes with other DHT nodes
|
||||
func (ds *DistributedStorageImpl) Sync(ctx context.Context) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
ds.metrics.LastRebalance = time.Now()
|
||||
}()
|
||||
ds.metrics.LastRebalance = time.Now()
|
||||
|
||||
// Get list of active nodes
|
||||
activeNodes := ds.heartbeat.getActiveNodes()
|
||||
@@ -346,7 +324,7 @@ func (ds *DistributedStorageImpl) GetDistributedStats() (*DistributedStorageStat
|
||||
healthyReplicas := int64(0)
|
||||
underReplicated := int64(0)
|
||||
|
||||
for key, replicas := range ds.replicas {
|
||||
for _, replicas := range ds.replicas {
|
||||
totalReplicas += int64(len(replicas))
|
||||
healthy := 0
|
||||
for _, nodeID := range replicas {
|
||||
@@ -371,14 +349,14 @@ func (ds *DistributedStorageImpl) GetDistributedStats() (*DistributedStorageStat
|
||||
|
||||
// DistributedEntry represents a distributed storage entry
|
||||
type DistributedEntry struct {
|
||||
Key string `json:"key"`
|
||||
Data []byte `json:"data"`
|
||||
ReplicationFactor int `json:"replication_factor"`
|
||||
Key string `json:"key"`
|
||||
Data []byte `json:"data"`
|
||||
ReplicationFactor int `json:"replication_factor"`
|
||||
ConsistencyLevel ConsistencyLevel `json:"consistency_level"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
Version int64 `json:"version"`
|
||||
Checksum string `json:"checksum"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
Version int64 `json:"version"`
|
||||
Checksum string `json:"checksum"`
|
||||
}
|
||||
|
||||
// Helper methods implementation
|
||||
@@ -394,7 +372,7 @@ func (ds *DistributedStorageImpl) selectReplicationNodes(key string, replication
|
||||
// This is a simplified version - production would use proper consistent hashing
|
||||
nodes := make([]string, 0, replicationFactor)
|
||||
hash := ds.calculateKeyHash(key)
|
||||
|
||||
|
||||
// Select nodes in a deterministic way based on key hash
|
||||
for i := 0; i < replicationFactor && i < len(activeNodes); i++ {
|
||||
nodeIndex := (int(hash) + i) % len(activeNodes)
|
||||
@@ -405,13 +383,13 @@ func (ds *DistributedStorageImpl) selectReplicationNodes(key string, replication
|
||||
}
|
||||
|
||||
func (ds *DistributedStorageImpl) storeEventual(ctx context.Context, entry *DistributedEntry, nodes []string) error {
|
||||
// Store asynchronously on all nodes
|
||||
// Store asynchronously on all nodes for SEC-SLURP-1.1a replication policy
|
||||
errCh := make(chan error, len(nodes))
|
||||
|
||||
|
||||
for _, nodeID := range nodes {
|
||||
go func(node string) {
|
||||
err := ds.storeOnNode(ctx, node, entry)
|
||||
errorCh <- err
|
||||
errCh <- err
|
||||
}(nodeID)
|
||||
}
|
||||
|
||||
@@ -429,7 +407,7 @@ func (ds *DistributedStorageImpl) storeEventual(ctx context.Context, entry *Dist
|
||||
// If first failed, try to get at least one success
|
||||
timer := time.NewTimer(10 * time.Second)
|
||||
defer timer.Stop()
|
||||
|
||||
|
||||
for i := 1; i < len(nodes); i++ {
|
||||
select {
|
||||
case err := <-errCh:
|
||||
@@ -445,13 +423,13 @@ func (ds *DistributedStorageImpl) storeEventual(ctx context.Context, entry *Dist
|
||||
}
|
||||
|
||||
func (ds *DistributedStorageImpl) storeStrong(ctx context.Context, entry *DistributedEntry, nodes []string) error {
|
||||
// Store synchronously on all nodes
|
||||
// Store synchronously on all nodes per SEC-SLURP-1.1a durability target
|
||||
errCh := make(chan error, len(nodes))
|
||||
|
||||
|
||||
for _, nodeID := range nodes {
|
||||
go func(node string) {
|
||||
err := ds.storeOnNode(ctx, node, entry)
|
||||
errorCh <- err
|
||||
errCh <- err
|
||||
}(nodeID)
|
||||
}
|
||||
|
||||
@@ -476,21 +454,21 @@ func (ds *DistributedStorageImpl) storeStrong(ctx context.Context, entry *Distri
|
||||
}
|
||||
|
||||
func (ds *DistributedStorageImpl) storeQuorum(ctx context.Context, entry *DistributedEntry, nodes []string) error {
|
||||
// Store on quorum of nodes
|
||||
// Store on quorum of nodes per SEC-SLURP-1.1a availability guardrail
|
||||
quorumSize := (len(nodes) / 2) + 1
|
||||
errCh := make(chan error, len(nodes))
|
||||
|
||||
|
||||
for _, nodeID := range nodes {
|
||||
go func(node string) {
|
||||
err := ds.storeOnNode(ctx, node, entry)
|
||||
errorCh <- err
|
||||
errCh <- err
|
||||
}(nodeID)
|
||||
}
|
||||
|
||||
// Wait for quorum
|
||||
successCount := 0
|
||||
errorCount := 0
|
||||
|
||||
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
select {
|
||||
case err := <-errCh:
|
||||
@@ -537,7 +515,7 @@ func (ds *DistributedStorageImpl) generateOperationID() string {
|
||||
func (ds *DistributedStorageImpl) updateLatencyMetrics(latency time.Duration) {
|
||||
ds.mu.Lock()
|
||||
defer ds.mu.Unlock()
|
||||
|
||||
|
||||
if ds.metrics.NetworkLatency == 0 {
|
||||
ds.metrics.NetworkLatency = latency
|
||||
} else {
|
||||
@@ -553,11 +531,11 @@ func (ds *DistributedStorageImpl) updateLatencyMetrics(latency time.Duration) {
|
||||
func (ds *DistributedStorageImpl) getReplicationNodes(key string) ([]string, error) {
|
||||
ds.mu.RLock()
|
||||
defer ds.mu.RUnlock()
|
||||
|
||||
|
||||
if replicas, exists := ds.replicas[key]; exists {
|
||||
return replicas, nil
|
||||
}
|
||||
|
||||
|
||||
// Fall back to consistent hashing
|
||||
return ds.selectReplicationNodes(key, ds.options.ReplicationFactor)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"time"
|
||||
|
||||
"chorus/pkg/crypto"
|
||||
"chorus/pkg/ucxl"
|
||||
slurpContext "chorus/pkg/slurp/context"
|
||||
)
|
||||
|
||||
@@ -19,25 +18,25 @@ type EncryptedStorageImpl struct {
|
||||
crypto crypto.RoleCrypto
|
||||
localStorage LocalStorage
|
||||
keyManager crypto.KeyManager
|
||||
accessControl crypto.AccessController
|
||||
auditLogger crypto.AuditLogger
|
||||
accessControl crypto.StorageAccessController
|
||||
auditLogger crypto.StorageAuditLogger
|
||||
metrics *EncryptionMetrics
|
||||
}
|
||||
|
||||
// EncryptionMetrics tracks encryption-related metrics
|
||||
type EncryptionMetrics struct {
|
||||
mu sync.RWMutex
|
||||
EncryptOperations int64
|
||||
DecryptOperations int64
|
||||
KeyRotations int64
|
||||
AccessDenials int64
|
||||
EncryptionErrors int64
|
||||
DecryptionErrors int64
|
||||
LastKeyRotation time.Time
|
||||
AverageEncryptTime time.Duration
|
||||
AverageDecryptTime time.Duration
|
||||
ActiveEncryptionKeys int
|
||||
ExpiredKeys int
|
||||
mu sync.RWMutex
|
||||
EncryptOperations int64
|
||||
DecryptOperations int64
|
||||
KeyRotations int64
|
||||
AccessDenials int64
|
||||
EncryptionErrors int64
|
||||
DecryptionErrors int64
|
||||
LastKeyRotation time.Time
|
||||
AverageEncryptTime time.Duration
|
||||
AverageDecryptTime time.Duration
|
||||
ActiveEncryptionKeys int
|
||||
ExpiredKeys int
|
||||
}
|
||||
|
||||
// NewEncryptedStorage creates a new encrypted storage implementation
|
||||
@@ -45,8 +44,8 @@ func NewEncryptedStorage(
|
||||
crypto crypto.RoleCrypto,
|
||||
localStorage LocalStorage,
|
||||
keyManager crypto.KeyManager,
|
||||
accessControl crypto.AccessController,
|
||||
auditLogger crypto.AuditLogger,
|
||||
accessControl crypto.StorageAccessController,
|
||||
auditLogger crypto.StorageAuditLogger,
|
||||
) *EncryptedStorageImpl {
|
||||
return &EncryptedStorageImpl{
|
||||
crypto: crypto,
|
||||
@@ -286,12 +285,11 @@ func (es *EncryptedStorageImpl) GetAccessRoles(
|
||||
return roles, nil
|
||||
}
|
||||
|
||||
// RotateKeys rotates encryption keys
|
||||
// RotateKeys rotates encryption keys in line with SEC-SLURP-1.1 retention constraints
|
||||
func (es *EncryptedStorageImpl) RotateKeys(
|
||||
ctx context.Context,
|
||||
maxAge time.Duration,
|
||||
) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
es.metrics.mu.Lock()
|
||||
es.metrics.KeyRotations++
|
||||
@@ -334,7 +332,7 @@ func (es *EncryptedStorageImpl) ValidateEncryption(
|
||||
// Validate each encrypted version
|
||||
for _, role := range roles {
|
||||
roleKey := es.generateRoleKey(key, role)
|
||||
|
||||
|
||||
// Retrieve encrypted context
|
||||
encryptedData, err := es.localStorage.Retrieve(ctx, roleKey)
|
||||
if err != nil {
|
||||
|
||||
@@ -9,22 +9,23 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
slurpContext "chorus/pkg/slurp/context"
|
||||
"chorus/pkg/ucxl"
|
||||
"github.com/blevesearch/bleve/v2"
|
||||
"github.com/blevesearch/bleve/v2/analysis/analyzer/standard"
|
||||
"github.com/blevesearch/bleve/v2/analysis/lang/en"
|
||||
"github.com/blevesearch/bleve/v2/mapping"
|
||||
"chorus/pkg/ucxl"
|
||||
slurpContext "chorus/pkg/slurp/context"
|
||||
"github.com/blevesearch/bleve/v2/search/query"
|
||||
)
|
||||
|
||||
// IndexManagerImpl implements the IndexManager interface using Bleve
|
||||
type IndexManagerImpl struct {
|
||||
mu sync.RWMutex
|
||||
indexes map[string]bleve.Index
|
||||
stats map[string]*IndexStatistics
|
||||
basePath string
|
||||
nodeID string
|
||||
options *IndexManagerOptions
|
||||
mu sync.RWMutex
|
||||
indexes map[string]bleve.Index
|
||||
stats map[string]*IndexStatistics
|
||||
basePath string
|
||||
nodeID string
|
||||
options *IndexManagerOptions
|
||||
}
|
||||
|
||||
// IndexManagerOptions configures index manager behavior
|
||||
@@ -60,11 +61,11 @@ func NewIndexManager(basePath, nodeID string, options *IndexManagerOptions) (*In
|
||||
}
|
||||
|
||||
im := &IndexManagerImpl{
|
||||
indexes: make(map[string]bleve.Index),
|
||||
stats: make(map[string]*IndexStatistics),
|
||||
basePath: basePath,
|
||||
nodeID: nodeID,
|
||||
options: options,
|
||||
indexes: make(map[string]bleve.Index),
|
||||
stats: make(map[string]*IndexStatistics),
|
||||
basePath: basePath,
|
||||
nodeID: nodeID,
|
||||
options: options,
|
||||
}
|
||||
|
||||
// Start background optimization if enabled
|
||||
@@ -356,11 +357,11 @@ func (im *IndexManagerImpl) createIndexMapping(config *IndexConfig) (mapping.Ind
|
||||
fieldMapping.Analyzer = analyzer
|
||||
fieldMapping.Store = true
|
||||
fieldMapping.Index = true
|
||||
|
||||
|
||||
if im.options.EnableHighlighting {
|
||||
fieldMapping.IncludeTermVectors = true
|
||||
}
|
||||
|
||||
|
||||
docMapping.AddFieldMappingsAt(field, fieldMapping)
|
||||
}
|
||||
|
||||
@@ -432,31 +433,31 @@ func (im *IndexManagerImpl) createIndexDocument(data interface{}) (map[string]in
|
||||
return doc, nil
|
||||
}
|
||||
|
||||
func (im *IndexManagerImpl) buildSearchRequest(query *SearchQuery) (*bleve.SearchRequest, error) {
|
||||
// Build Bleve search request from our search query
|
||||
var bleveQuery bleve.Query
|
||||
func (im *IndexManagerImpl) buildSearchRequest(searchQuery *SearchQuery) (*bleve.SearchRequest, error) {
|
||||
// Build Bleve search request from our search query (SEC-SLURP-1.1 search path)
|
||||
var bleveQuery query.Query
|
||||
|
||||
if query.Query == "" {
|
||||
if searchQuery.Query == "" {
|
||||
// Match all query
|
||||
bleveQuery = bleve.NewMatchAllQuery()
|
||||
} else {
|
||||
// Text search query
|
||||
if query.FuzzyMatch {
|
||||
if searchQuery.FuzzyMatch {
|
||||
// Use fuzzy query
|
||||
bleveQuery = bleve.NewFuzzyQuery(query.Query)
|
||||
bleveQuery = bleve.NewFuzzyQuery(searchQuery.Query)
|
||||
} else {
|
||||
// Use match query for better scoring
|
||||
bleveQuery = bleve.NewMatchQuery(query.Query)
|
||||
bleveQuery = bleve.NewMatchQuery(searchQuery.Query)
|
||||
}
|
||||
}
|
||||
|
||||
// Add filters
|
||||
var conjuncts []bleve.Query
|
||||
var conjuncts []query.Query
|
||||
conjuncts = append(conjuncts, bleveQuery)
|
||||
|
||||
// Technology filters
|
||||
if len(query.Technologies) > 0 {
|
||||
for _, tech := range query.Technologies {
|
||||
if len(searchQuery.Technologies) > 0 {
|
||||
for _, tech := range searchQuery.Technologies {
|
||||
techQuery := bleve.NewTermQuery(tech)
|
||||
techQuery.SetField("technologies_facet")
|
||||
conjuncts = append(conjuncts, techQuery)
|
||||
@@ -464,8 +465,8 @@ func (im *IndexManagerImpl) buildSearchRequest(query *SearchQuery) (*bleve.Searc
|
||||
}
|
||||
|
||||
// Tag filters
|
||||
if len(query.Tags) > 0 {
|
||||
for _, tag := range query.Tags {
|
||||
if len(searchQuery.Tags) > 0 {
|
||||
for _, tag := range searchQuery.Tags {
|
||||
tagQuery := bleve.NewTermQuery(tag)
|
||||
tagQuery.SetField("tags_facet")
|
||||
conjuncts = append(conjuncts, tagQuery)
|
||||
@@ -479,20 +480,20 @@ func (im *IndexManagerImpl) buildSearchRequest(query *SearchQuery) (*bleve.Searc
|
||||
|
||||
// Create search request
|
||||
searchRequest := bleve.NewSearchRequest(bleveQuery)
|
||||
|
||||
|
||||
// Set result options
|
||||
if query.Limit > 0 && query.Limit <= im.options.MaxResults {
|
||||
searchRequest.Size = query.Limit
|
||||
if searchQuery.Limit > 0 && searchQuery.Limit <= im.options.MaxResults {
|
||||
searchRequest.Size = searchQuery.Limit
|
||||
} else {
|
||||
searchRequest.Size = im.options.MaxResults
|
||||
}
|
||||
|
||||
if query.Offset > 0 {
|
||||
searchRequest.From = query.Offset
|
||||
|
||||
if searchQuery.Offset > 0 {
|
||||
searchRequest.From = searchQuery.Offset
|
||||
}
|
||||
|
||||
// Enable highlighting if requested
|
||||
if query.HighlightTerms && im.options.EnableHighlighting {
|
||||
if searchQuery.HighlightTerms && im.options.EnableHighlighting {
|
||||
searchRequest.Highlight = bleve.NewHighlight()
|
||||
searchRequest.Highlight.AddField("content")
|
||||
searchRequest.Highlight.AddField("summary")
|
||||
@@ -500,9 +501,9 @@ func (im *IndexManagerImpl) buildSearchRequest(query *SearchQuery) (*bleve.Searc
|
||||
}
|
||||
|
||||
// Add facets if requested
|
||||
if len(query.Facets) > 0 && im.options.EnableFaceting {
|
||||
if len(searchQuery.Facets) > 0 && im.options.EnableFaceting {
|
||||
searchRequest.Facets = make(bleve.FacetsRequest)
|
||||
for _, facet := range query.Facets {
|
||||
for _, facet := range searchQuery.Facets {
|
||||
switch facet {
|
||||
case "technologies":
|
||||
searchRequest.Facets["technologies"] = bleve.NewFacetRequest("technologies_facet", 10)
|
||||
@@ -535,7 +536,7 @@ func (im *IndexManagerImpl) convertSearchResults(
|
||||
searchHit := &SearchResult{
|
||||
MatchScore: hit.Score,
|
||||
MatchedFields: make([]string, 0),
|
||||
Highlights: make(map[string][]string),
|
||||
Highlights: make(map[string][]string),
|
||||
Rank: i + 1,
|
||||
}
|
||||
|
||||
@@ -558,8 +559,8 @@ func (im *IndexManagerImpl) convertSearchResults(
|
||||
|
||||
// Parse UCXL address
|
||||
if ucxlStr, ok := hit.Fields["ucxl_address"].(string); ok {
|
||||
if addr, err := ucxl.ParseAddress(ucxlStr); err == nil {
|
||||
contextNode.UCXLAddress = addr
|
||||
if addr, err := ucxl.Parse(ucxlStr); err == nil {
|
||||
contextNode.UCXLAddress = *addr
|
||||
}
|
||||
}
|
||||
|
||||
@@ -572,8 +573,10 @@ func (im *IndexManagerImpl) convertSearchResults(
|
||||
results.Facets = make(map[string]map[string]int)
|
||||
for facetName, facetResult := range searchResult.Facets {
|
||||
facetCounts := make(map[string]int)
|
||||
for _, term := range facetResult.Terms {
|
||||
facetCounts[term.Term] = term.Count
|
||||
if facetResult.Terms != nil {
|
||||
for _, term := range facetResult.Terms.Terms() {
|
||||
facetCounts[term.Term] = term.Count
|
||||
}
|
||||
}
|
||||
results.Facets[facetName] = facetCounts
|
||||
}
|
||||
|
||||
@@ -4,9 +4,8 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"chorus/pkg/ucxl"
|
||||
"chorus/pkg/crypto"
|
||||
slurpContext "chorus/pkg/slurp/context"
|
||||
"chorus/pkg/ucxl"
|
||||
)
|
||||
|
||||
// ContextStore provides the main interface for context storage and retrieval
|
||||
@@ -17,40 +16,40 @@ import (
|
||||
type ContextStore interface {
|
||||
// StoreContext stores a context node with role-based encryption
|
||||
StoreContext(ctx context.Context, node *slurpContext.ContextNode, roles []string) error
|
||||
|
||||
|
||||
// RetrieveContext retrieves context for a UCXL address and role
|
||||
RetrieveContext(ctx context.Context, address ucxl.Address, role string) (*slurpContext.ContextNode, error)
|
||||
|
||||
|
||||
// UpdateContext updates an existing context node
|
||||
UpdateContext(ctx context.Context, node *slurpContext.ContextNode, roles []string) error
|
||||
|
||||
|
||||
// DeleteContext removes a context node from storage
|
||||
DeleteContext(ctx context.Context, address ucxl.Address) error
|
||||
|
||||
|
||||
// ExistsContext checks if context exists for an address
|
||||
ExistsContext(ctx context.Context, address ucxl.Address) (bool, error)
|
||||
|
||||
|
||||
// ListContexts lists contexts matching criteria
|
||||
ListContexts(ctx context.Context, criteria *ListCriteria) ([]*slurpContext.ContextNode, error)
|
||||
|
||||
|
||||
// SearchContexts searches contexts using query criteria
|
||||
SearchContexts(ctx context.Context, query *SearchQuery) (*SearchResults, error)
|
||||
|
||||
|
||||
// BatchStore stores multiple contexts efficiently
|
||||
BatchStore(ctx context.Context, batch *BatchStoreRequest) (*BatchStoreResult, error)
|
||||
|
||||
|
||||
// BatchRetrieve retrieves multiple contexts efficiently
|
||||
BatchRetrieve(ctx context.Context, batch *BatchRetrieveRequest) (*BatchRetrieveResult, error)
|
||||
|
||||
|
||||
// GetStorageStats returns storage statistics and health information
|
||||
GetStorageStats(ctx context.Context) (*StorageStatistics, error)
|
||||
|
||||
|
||||
// Sync synchronizes with distributed storage
|
||||
Sync(ctx context.Context) error
|
||||
|
||||
|
||||
// Backup creates a backup of stored contexts
|
||||
Backup(ctx context.Context, destination string) error
|
||||
|
||||
|
||||
// Restore restores contexts from backup
|
||||
Restore(ctx context.Context, source string) error
|
||||
}
|
||||
@@ -59,25 +58,25 @@ type ContextStore interface {
|
||||
type LocalStorage interface {
|
||||
// Store stores context data locally with optional encryption
|
||||
Store(ctx context.Context, key string, data interface{}, options *StoreOptions) error
|
||||
|
||||
|
||||
// Retrieve retrieves context data from local storage
|
||||
Retrieve(ctx context.Context, key string) (interface{}, error)
|
||||
|
||||
|
||||
// Delete removes data from local storage
|
||||
Delete(ctx context.Context, key string) error
|
||||
|
||||
|
||||
// Exists checks if data exists locally
|
||||
Exists(ctx context.Context, key string) (bool, error)
|
||||
|
||||
|
||||
// List lists all keys matching a pattern
|
||||
List(ctx context.Context, pattern string) ([]string, error)
|
||||
|
||||
|
||||
// Size returns the size of stored data
|
||||
Size(ctx context.Context, key string) (int64, error)
|
||||
|
||||
|
||||
// Compact compacts local storage to reclaim space
|
||||
Compact(ctx context.Context) error
|
||||
|
||||
|
||||
// GetLocalStats returns local storage statistics
|
||||
GetLocalStats() (*LocalStorageStats, error)
|
||||
}
|
||||
@@ -86,25 +85,25 @@ type LocalStorage interface {
|
||||
type DistributedStorage interface {
|
||||
// Store stores data in the distributed DHT with replication
|
||||
Store(ctx context.Context, key string, data interface{}, options *DistributedStoreOptions) error
|
||||
|
||||
|
||||
// Retrieve retrieves data from the distributed DHT
|
||||
Retrieve(ctx context.Context, key string) (interface{}, error)
|
||||
|
||||
|
||||
// Delete removes data from the distributed DHT
|
||||
Delete(ctx context.Context, key string) error
|
||||
|
||||
|
||||
// Exists checks if data exists in the DHT
|
||||
Exists(ctx context.Context, key string) (bool, error)
|
||||
|
||||
|
||||
// Replicate ensures data is replicated across nodes
|
||||
Replicate(ctx context.Context, key string, replicationFactor int) error
|
||||
|
||||
|
||||
// FindReplicas finds all replicas of data
|
||||
FindReplicas(ctx context.Context, key string) ([]string, error)
|
||||
|
||||
|
||||
// Sync synchronizes with other DHT nodes
|
||||
Sync(ctx context.Context) error
|
||||
|
||||
|
||||
// GetDistributedStats returns distributed storage statistics
|
||||
GetDistributedStats() (*DistributedStorageStats, error)
|
||||
}
|
||||
@@ -113,25 +112,25 @@ type DistributedStorage interface {
|
||||
type EncryptedStorage interface {
|
||||
// StoreEncrypted stores data encrypted for specific roles
|
||||
StoreEncrypted(ctx context.Context, key string, data interface{}, roles []string) error
|
||||
|
||||
|
||||
// RetrieveDecrypted retrieves and decrypts data for current role
|
||||
RetrieveDecrypted(ctx context.Context, key string, role string) (interface{}, error)
|
||||
|
||||
|
||||
// CanAccess checks if a role can access specific data
|
||||
CanAccess(ctx context.Context, key string, role string) (bool, error)
|
||||
|
||||
|
||||
// ListAccessibleKeys lists keys accessible to a role
|
||||
ListAccessibleKeys(ctx context.Context, role string) ([]string, error)
|
||||
|
||||
|
||||
// ReEncryptForRoles re-encrypts data for different roles
|
||||
ReEncryptForRoles(ctx context.Context, key string, newRoles []string) error
|
||||
|
||||
|
||||
// GetAccessRoles gets roles that can access specific data
|
||||
GetAccessRoles(ctx context.Context, key string) ([]string, error)
|
||||
|
||||
|
||||
// RotateKeys rotates encryption keys
|
||||
RotateKeys(ctx context.Context, maxAge time.Duration) error
|
||||
|
||||
|
||||
// ValidateEncryption validates encryption integrity
|
||||
ValidateEncryption(ctx context.Context, key string) error
|
||||
}
|
||||
@@ -140,25 +139,25 @@ type EncryptedStorage interface {
|
||||
type CacheManager interface {
|
||||
// Get retrieves data from cache
|
||||
Get(ctx context.Context, key string) (interface{}, bool, error)
|
||||
|
||||
|
||||
// Set stores data in cache with TTL
|
||||
Set(ctx context.Context, key string, data interface{}, ttl time.Duration) error
|
||||
|
||||
|
||||
// Delete removes data from cache
|
||||
Delete(ctx context.Context, key string) error
|
||||
|
||||
|
||||
// DeletePattern removes cache entries matching pattern
|
||||
DeletePattern(ctx context.Context, pattern string) error
|
||||
|
||||
|
||||
// Clear clears all cache entries
|
||||
Clear(ctx context.Context) error
|
||||
|
||||
|
||||
// Warm pre-loads cache with frequently accessed data
|
||||
Warm(ctx context.Context, keys []string) error
|
||||
|
||||
|
||||
// GetCacheStats returns cache performance statistics
|
||||
GetCacheStats() (*CacheStatistics, error)
|
||||
|
||||
|
||||
// SetCachePolicy sets caching policy
|
||||
SetCachePolicy(policy *CachePolicy) error
|
||||
}
|
||||
@@ -167,25 +166,25 @@ type CacheManager interface {
|
||||
type IndexManager interface {
|
||||
// CreateIndex creates a search index for contexts
|
||||
CreateIndex(ctx context.Context, indexName string, config *IndexConfig) error
|
||||
|
||||
|
||||
// UpdateIndex updates search index with new data
|
||||
UpdateIndex(ctx context.Context, indexName string, key string, data interface{}) error
|
||||
|
||||
|
||||
// DeleteFromIndex removes data from search index
|
||||
DeleteFromIndex(ctx context.Context, indexName string, key string) error
|
||||
|
||||
|
||||
// Search searches indexed data using query
|
||||
Search(ctx context.Context, indexName string, query *SearchQuery) (*SearchResults, error)
|
||||
|
||||
|
||||
// RebuildIndex rebuilds search index from stored data
|
||||
RebuildIndex(ctx context.Context, indexName string) error
|
||||
|
||||
|
||||
// OptimizeIndex optimizes search index for performance
|
||||
OptimizeIndex(ctx context.Context, indexName string) error
|
||||
|
||||
|
||||
// GetIndexStats returns index statistics
|
||||
GetIndexStats(ctx context.Context, indexName string) (*IndexStatistics, error)
|
||||
|
||||
|
||||
// ListIndexes lists all available indexes
|
||||
ListIndexes(ctx context.Context) ([]string, error)
|
||||
}
|
||||
@@ -194,22 +193,22 @@ type IndexManager interface {
|
||||
type BackupManager interface {
|
||||
// CreateBackup creates a backup of stored data
|
||||
CreateBackup(ctx context.Context, config *BackupConfig) (*BackupInfo, error)
|
||||
|
||||
|
||||
// RestoreBackup restores data from backup
|
||||
RestoreBackup(ctx context.Context, backupID string, config *RestoreConfig) error
|
||||
|
||||
|
||||
// ListBackups lists available backups
|
||||
ListBackups(ctx context.Context) ([]*BackupInfo, error)
|
||||
|
||||
|
||||
// DeleteBackup removes a backup
|
||||
DeleteBackup(ctx context.Context, backupID string) error
|
||||
|
||||
|
||||
// ValidateBackup validates backup integrity
|
||||
ValidateBackup(ctx context.Context, backupID string) (*BackupValidation, error)
|
||||
|
||||
|
||||
// ScheduleBackup schedules automatic backups
|
||||
ScheduleBackup(ctx context.Context, schedule *BackupSchedule) error
|
||||
|
||||
|
||||
// GetBackupStats returns backup statistics
|
||||
GetBackupStats(ctx context.Context) (*BackupStatistics, error)
|
||||
}
|
||||
@@ -218,13 +217,13 @@ type BackupManager interface {
|
||||
type TransactionManager interface {
|
||||
// BeginTransaction starts a new transaction
|
||||
BeginTransaction(ctx context.Context) (*Transaction, error)
|
||||
|
||||
|
||||
// CommitTransaction commits a transaction
|
||||
CommitTransaction(ctx context.Context, tx *Transaction) error
|
||||
|
||||
|
||||
// RollbackTransaction rolls back a transaction
|
||||
RollbackTransaction(ctx context.Context, tx *Transaction) error
|
||||
|
||||
|
||||
// GetActiveTransactions returns list of active transactions
|
||||
GetActiveTransactions(ctx context.Context) ([]*Transaction, error)
|
||||
}
|
||||
@@ -233,19 +232,19 @@ type TransactionManager interface {
|
||||
type EventNotifier interface {
|
||||
// NotifyStored notifies when data is stored
|
||||
NotifyStored(ctx context.Context, event *StorageEvent) error
|
||||
|
||||
|
||||
// NotifyRetrieved notifies when data is retrieved
|
||||
NotifyRetrieved(ctx context.Context, event *StorageEvent) error
|
||||
|
||||
|
||||
// NotifyUpdated notifies when data is updated
|
||||
NotifyUpdated(ctx context.Context, event *StorageEvent) error
|
||||
|
||||
|
||||
// NotifyDeleted notifies when data is deleted
|
||||
NotifyDeleted(ctx context.Context, event *StorageEvent) error
|
||||
|
||||
|
||||
// Subscribe subscribes to storage events
|
||||
Subscribe(ctx context.Context, eventType EventType, handler EventHandler) error
|
||||
|
||||
|
||||
// Unsubscribe unsubscribes from storage events
|
||||
Unsubscribe(ctx context.Context, eventType EventType, handler EventHandler) error
|
||||
}
|
||||
@@ -270,35 +269,35 @@ type EventHandler func(event *StorageEvent) error
|
||||
|
||||
// StorageEvent represents a storage operation event
|
||||
type StorageEvent struct {
|
||||
Type EventType `json:"type"` // Event type
|
||||
Key string `json:"key"` // Storage key
|
||||
Data interface{} `json:"data"` // Event data
|
||||
Timestamp time.Time `json:"timestamp"` // When event occurred
|
||||
Metadata map[string]interface{} `json:"metadata"` // Additional metadata
|
||||
Type EventType `json:"type"` // Event type
|
||||
Key string `json:"key"` // Storage key
|
||||
Data interface{} `json:"data"` // Event data
|
||||
Timestamp time.Time `json:"timestamp"` // When event occurred
|
||||
Metadata map[string]interface{} `json:"metadata"` // Additional metadata
|
||||
}
|
||||
|
||||
// Transaction represents a storage transaction
|
||||
type Transaction struct {
|
||||
ID string `json:"id"` // Transaction ID
|
||||
StartTime time.Time `json:"start_time"` // When transaction started
|
||||
ID string `json:"id"` // Transaction ID
|
||||
StartTime time.Time `json:"start_time"` // When transaction started
|
||||
Operations []*TransactionOperation `json:"operations"` // Transaction operations
|
||||
Status TransactionStatus `json:"status"` // Transaction status
|
||||
Status TransactionStatus `json:"status"` // Transaction status
|
||||
}
|
||||
|
||||
// TransactionOperation represents a single operation in a transaction
|
||||
type TransactionOperation struct {
|
||||
Type string `json:"type"` // Operation type
|
||||
Key string `json:"key"` // Storage key
|
||||
Data interface{} `json:"data"` // Operation data
|
||||
Metadata map[string]interface{} `json:"metadata"` // Operation metadata
|
||||
Type string `json:"type"` // Operation type
|
||||
Key string `json:"key"` // Storage key
|
||||
Data interface{} `json:"data"` // Operation data
|
||||
Metadata map[string]interface{} `json:"metadata"` // Operation metadata
|
||||
}
|
||||
|
||||
// TransactionStatus represents transaction status
|
||||
type TransactionStatus string
|
||||
|
||||
const (
|
||||
TransactionActive TransactionStatus = "active"
|
||||
TransactionCommitted TransactionStatus = "committed"
|
||||
TransactionActive TransactionStatus = "active"
|
||||
TransactionCommitted TransactionStatus = "committed"
|
||||
TransactionRolledBack TransactionStatus = "rolled_back"
|
||||
TransactionFailed TransactionStatus = "failed"
|
||||
)
|
||||
TransactionFailed TransactionStatus = "failed"
|
||||
)
|
||||
|
||||
@@ -33,12 +33,12 @@ type LocalStorageImpl struct {
|
||||
|
||||
// LocalStorageOptions configures local storage behavior
|
||||
type LocalStorageOptions struct {
|
||||
Compression bool `json:"compression"` // Enable compression
|
||||
CacheSize int `json:"cache_size"` // Cache size in MB
|
||||
WriteBuffer int `json:"write_buffer"` // Write buffer size in MB
|
||||
MaxOpenFiles int `json:"max_open_files"` // Maximum open files
|
||||
BlockSize int `json:"block_size"` // Block size in KB
|
||||
SyncWrites bool `json:"sync_writes"` // Synchronous writes
|
||||
Compression bool `json:"compression"` // Enable compression
|
||||
CacheSize int `json:"cache_size"` // Cache size in MB
|
||||
WriteBuffer int `json:"write_buffer"` // Write buffer size in MB
|
||||
MaxOpenFiles int `json:"max_open_files"` // Maximum open files
|
||||
BlockSize int `json:"block_size"` // Block size in KB
|
||||
SyncWrites bool `json:"sync_writes"` // Synchronous writes
|
||||
CompactionInterval time.Duration `json:"compaction_interval"` // Auto-compaction interval
|
||||
}
|
||||
|
||||
@@ -46,11 +46,11 @@ type LocalStorageOptions struct {
|
||||
func DefaultLocalStorageOptions() *LocalStorageOptions {
|
||||
return &LocalStorageOptions{
|
||||
Compression: true,
|
||||
CacheSize: 64, // 64MB cache
|
||||
WriteBuffer: 16, // 16MB write buffer
|
||||
MaxOpenFiles: 1000,
|
||||
BlockSize: 4, // 4KB blocks
|
||||
SyncWrites: false,
|
||||
CacheSize: 64, // 64MB cache
|
||||
WriteBuffer: 16, // 16MB write buffer
|
||||
MaxOpenFiles: 1000,
|
||||
BlockSize: 4, // 4KB blocks
|
||||
SyncWrites: false,
|
||||
CompactionInterval: 24 * time.Hour,
|
||||
}
|
||||
}
|
||||
@@ -135,13 +135,14 @@ func (ls *LocalStorageImpl) Store(
|
||||
UpdatedAt: time.Now(),
|
||||
Metadata: make(map[string]interface{}),
|
||||
}
|
||||
entry.Checksum = ls.computeChecksum(dataBytes)
|
||||
|
||||
// Apply options
|
||||
if options != nil {
|
||||
entry.TTL = options.TTL
|
||||
entry.Compressed = options.Compress
|
||||
entry.AccessLevel = string(options.AccessLevel)
|
||||
|
||||
|
||||
// Copy metadata
|
||||
for k, v := range options.Metadata {
|
||||
entry.Metadata[k] = v
|
||||
@@ -179,6 +180,7 @@ func (ls *LocalStorageImpl) Store(
|
||||
if entry.Compressed {
|
||||
ls.metrics.CompressedSize += entry.CompressedSize
|
||||
}
|
||||
ls.updateFileMetricsLocked()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -231,6 +233,14 @@ func (ls *LocalStorageImpl) Retrieve(ctx context.Context, key string) (interface
|
||||
dataBytes = decompressedData
|
||||
}
|
||||
|
||||
// Verify integrity against stored checksum (SEC-SLURP-1.1a requirement)
|
||||
if entry.Checksum != "" {
|
||||
computed := ls.computeChecksum(dataBytes)
|
||||
if computed != entry.Checksum {
|
||||
return nil, fmt.Errorf("data integrity check failed for key %s", key)
|
||||
}
|
||||
}
|
||||
|
||||
// Deserialize data
|
||||
var result interface{}
|
||||
if err := json.Unmarshal(dataBytes, &result); err != nil {
|
||||
@@ -260,6 +270,7 @@ func (ls *LocalStorageImpl) Delete(ctx context.Context, key string) error {
|
||||
if entryBytes != nil {
|
||||
ls.metrics.TotalSize -= int64(len(entryBytes))
|
||||
}
|
||||
ls.updateFileMetricsLocked()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -350,7 +361,7 @@ func (ls *LocalStorageImpl) Compact(ctx context.Context) error {
|
||||
// Update metrics
|
||||
ls.metrics.LastCompaction = time.Now()
|
||||
compactionTime := time.Since(start)
|
||||
|
||||
|
||||
// Calculate new fragmentation ratio
|
||||
ls.updateFragmentationRatio()
|
||||
|
||||
@@ -397,6 +408,7 @@ type StorageEntry struct {
|
||||
Compressed bool `json:"compressed"`
|
||||
OriginalSize int64 `json:"original_size"`
|
||||
CompressedSize int64 `json:"compressed_size"`
|
||||
Checksum string `json:"checksum"`
|
||||
AccessLevel string `json:"access_level"`
|
||||
Metadata map[string]interface{} `json:"metadata"`
|
||||
}
|
||||
@@ -406,34 +418,70 @@ type StorageEntry struct {
|
||||
func (ls *LocalStorageImpl) compress(data []byte) ([]byte, error) {
|
||||
// Use gzip compression for efficient data storage
|
||||
var buf bytes.Buffer
|
||||
|
||||
|
||||
// Create gzip writer with best compression
|
||||
writer := gzip.NewWriter(&buf)
|
||||
writer.Header.Name = "storage_data"
|
||||
writer.Header.Comment = "CHORUS SLURP local storage compressed data"
|
||||
|
||||
|
||||
// Write data to gzip writer
|
||||
if _, err := writer.Write(data); err != nil {
|
||||
writer.Close()
|
||||
return nil, fmt.Errorf("failed to write compressed data: %w", err)
|
||||
}
|
||||
|
||||
|
||||
// Close writer to flush data
|
||||
if err := writer.Close(); err != nil {
|
||||
return nil, fmt.Errorf("failed to close gzip writer: %w", err)
|
||||
}
|
||||
|
||||
|
||||
compressed := buf.Bytes()
|
||||
|
||||
|
||||
// Only return compressed data if it's actually smaller
|
||||
if len(compressed) >= len(data) {
|
||||
// Compression didn't help, return original data
|
||||
return data, nil
|
||||
}
|
||||
|
||||
|
||||
return compressed, nil
|
||||
}
|
||||
|
||||
func (ls *LocalStorageImpl) computeChecksum(data []byte) string {
|
||||
// Compute SHA-256 checksum to satisfy SEC-SLURP-1.1a integrity tracking
|
||||
digest := sha256.Sum256(data)
|
||||
return fmt.Sprintf("%x", digest)
|
||||
}
|
||||
|
||||
func (ls *LocalStorageImpl) updateFileMetricsLocked() {
|
||||
// Refresh filesystem metrics using io/fs traversal (SEC-SLURP-1.1a durability telemetry)
|
||||
var fileCount int64
|
||||
var aggregateSize int64
|
||||
|
||||
walkErr := fs.WalkDir(os.DirFS(ls.basePath), ".", func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
fileCount++
|
||||
if info, infoErr := d.Info(); infoErr == nil {
|
||||
aggregateSize += info.Size()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if walkErr != nil {
|
||||
fmt.Printf("filesystem metrics refresh failed: %v\n", walkErr)
|
||||
return
|
||||
}
|
||||
|
||||
ls.metrics.TotalFiles = fileCount
|
||||
if aggregateSize > 0 {
|
||||
ls.metrics.TotalSize = aggregateSize
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *LocalStorageImpl) decompress(data []byte) ([]byte, error) {
|
||||
// Create gzip reader
|
||||
reader, err := gzip.NewReader(bytes.NewReader(data))
|
||||
@@ -442,13 +490,13 @@ func (ls *LocalStorageImpl) decompress(data []byte) ([]byte, error) {
|
||||
return data, nil
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
|
||||
// Read decompressed data
|
||||
var buf bytes.Buffer
|
||||
if _, err := io.Copy(&buf, reader); err != nil {
|
||||
return nil, fmt.Errorf("failed to decompress data: %w", err)
|
||||
}
|
||||
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
@@ -462,7 +510,7 @@ func (ls *LocalStorageImpl) getAvailableSpace() (int64, error) {
|
||||
// Calculate available space in bytes
|
||||
// Available blocks * block size
|
||||
availableBytes := int64(stat.Bavail) * int64(stat.Bsize)
|
||||
|
||||
|
||||
return availableBytes, nil
|
||||
}
|
||||
|
||||
@@ -498,11 +546,11 @@ func (ls *LocalStorageImpl) GetCompressionStats() (*CompressionStats, error) {
|
||||
defer ls.mu.RUnlock()
|
||||
|
||||
stats := &CompressionStats{
|
||||
TotalEntries: 0,
|
||||
TotalEntries: 0,
|
||||
CompressedEntries: 0,
|
||||
TotalSize: ls.metrics.TotalSize,
|
||||
CompressedSize: ls.metrics.CompressedSize,
|
||||
CompressionRatio: 0.0,
|
||||
TotalSize: ls.metrics.TotalSize,
|
||||
CompressedSize: ls.metrics.CompressedSize,
|
||||
CompressionRatio: 0.0,
|
||||
}
|
||||
|
||||
// Iterate through all entries to get accurate stats
|
||||
@@ -511,7 +559,7 @@ func (ls *LocalStorageImpl) GetCompressionStats() (*CompressionStats, error) {
|
||||
|
||||
for iter.Next() {
|
||||
stats.TotalEntries++
|
||||
|
||||
|
||||
// Try to parse entry to check if compressed
|
||||
var entry StorageEntry
|
||||
if err := json.Unmarshal(iter.Value(), &entry); err == nil {
|
||||
@@ -549,7 +597,7 @@ func (ls *LocalStorageImpl) OptimizeStorage(ctx context.Context, compressThresho
|
||||
}
|
||||
|
||||
key := string(iter.Key())
|
||||
|
||||
|
||||
// Parse existing entry
|
||||
var entry StorageEntry
|
||||
if err := json.Unmarshal(iter.Value(), &entry); err != nil {
|
||||
@@ -599,11 +647,11 @@ func (ls *LocalStorageImpl) OptimizeStorage(ctx context.Context, compressThresho
|
||||
|
||||
// CompressionStats holds compression statistics
|
||||
type CompressionStats struct {
|
||||
TotalEntries int64 `json:"total_entries"`
|
||||
TotalEntries int64 `json:"total_entries"`
|
||||
CompressedEntries int64 `json:"compressed_entries"`
|
||||
TotalSize int64 `json:"total_size"`
|
||||
CompressedSize int64 `json:"compressed_size"`
|
||||
CompressionRatio float64 `json:"compression_ratio"`
|
||||
TotalSize int64 `json:"total_size"`
|
||||
CompressedSize int64 `json:"compressed_size"`
|
||||
CompressionRatio float64 `json:"compression_ratio"`
|
||||
}
|
||||
|
||||
// Close closes the local storage
|
||||
|
||||
@@ -14,77 +14,77 @@ import (
|
||||
|
||||
// MonitoringSystem provides comprehensive monitoring for the storage system
|
||||
type MonitoringSystem struct {
|
||||
mu sync.RWMutex
|
||||
nodeID string
|
||||
metrics *StorageMetrics
|
||||
alerts *AlertManager
|
||||
healthChecker *HealthChecker
|
||||
mu sync.RWMutex
|
||||
nodeID string
|
||||
metrics *StorageMetrics
|
||||
alerts *AlertManager
|
||||
healthChecker *HealthChecker
|
||||
performanceProfiler *PerformanceProfiler
|
||||
logger *StructuredLogger
|
||||
notifications chan *MonitoringEvent
|
||||
stopCh chan struct{}
|
||||
logger *StructuredLogger
|
||||
notifications chan *MonitoringEvent
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// StorageMetrics contains all Prometheus metrics for storage operations
|
||||
type StorageMetrics struct {
|
||||
// Operation counters
|
||||
StoreOperations prometheus.Counter
|
||||
RetrieveOperations prometheus.Counter
|
||||
DeleteOperations prometheus.Counter
|
||||
UpdateOperations prometheus.Counter
|
||||
SearchOperations prometheus.Counter
|
||||
BatchOperations prometheus.Counter
|
||||
StoreOperations prometheus.Counter
|
||||
RetrieveOperations prometheus.Counter
|
||||
DeleteOperations prometheus.Counter
|
||||
UpdateOperations prometheus.Counter
|
||||
SearchOperations prometheus.Counter
|
||||
BatchOperations prometheus.Counter
|
||||
|
||||
// Error counters
|
||||
StoreErrors prometheus.Counter
|
||||
RetrieveErrors prometheus.Counter
|
||||
EncryptionErrors prometheus.Counter
|
||||
DecryptionErrors prometheus.Counter
|
||||
ReplicationErrors prometheus.Counter
|
||||
CacheErrors prometheus.Counter
|
||||
IndexErrors prometheus.Counter
|
||||
StoreErrors prometheus.Counter
|
||||
RetrieveErrors prometheus.Counter
|
||||
EncryptionErrors prometheus.Counter
|
||||
DecryptionErrors prometheus.Counter
|
||||
ReplicationErrors prometheus.Counter
|
||||
CacheErrors prometheus.Counter
|
||||
IndexErrors prometheus.Counter
|
||||
|
||||
// Latency histograms
|
||||
StoreLatency prometheus.Histogram
|
||||
RetrieveLatency prometheus.Histogram
|
||||
EncryptionLatency prometheus.Histogram
|
||||
DecryptionLatency prometheus.Histogram
|
||||
ReplicationLatency prometheus.Histogram
|
||||
SearchLatency prometheus.Histogram
|
||||
StoreLatency prometheus.Histogram
|
||||
RetrieveLatency prometheus.Histogram
|
||||
EncryptionLatency prometheus.Histogram
|
||||
DecryptionLatency prometheus.Histogram
|
||||
ReplicationLatency prometheus.Histogram
|
||||
SearchLatency prometheus.Histogram
|
||||
|
||||
// Cache metrics
|
||||
CacheHits prometheus.Counter
|
||||
CacheMisses prometheus.Counter
|
||||
CacheEvictions prometheus.Counter
|
||||
CacheSize prometheus.Gauge
|
||||
CacheHits prometheus.Counter
|
||||
CacheMisses prometheus.Counter
|
||||
CacheEvictions prometheus.Counter
|
||||
CacheSize prometheus.Gauge
|
||||
|
||||
// Storage size metrics
|
||||
LocalStorageSize prometheus.Gauge
|
||||
LocalStorageSize prometheus.Gauge
|
||||
DistributedStorageSize prometheus.Gauge
|
||||
CompressedStorageSize prometheus.Gauge
|
||||
IndexStorageSize prometheus.Gauge
|
||||
|
||||
// Replication metrics
|
||||
ReplicationFactor prometheus.Gauge
|
||||
HealthyReplicas prometheus.Gauge
|
||||
UnderReplicated prometheus.Gauge
|
||||
ReplicationLag prometheus.Histogram
|
||||
ReplicationFactor prometheus.Gauge
|
||||
HealthyReplicas prometheus.Gauge
|
||||
UnderReplicated prometheus.Gauge
|
||||
ReplicationLag prometheus.Histogram
|
||||
|
||||
// Encryption metrics
|
||||
EncryptedContexts prometheus.Gauge
|
||||
KeyRotations prometheus.Counter
|
||||
AccessDenials prometheus.Counter
|
||||
ActiveKeys prometheus.Gauge
|
||||
EncryptedContexts prometheus.Gauge
|
||||
KeyRotations prometheus.Counter
|
||||
AccessDenials prometheus.Counter
|
||||
ActiveKeys prometheus.Gauge
|
||||
|
||||
// Performance metrics
|
||||
Throughput prometheus.Gauge
|
||||
Throughput prometheus.Gauge
|
||||
ConcurrentOperations prometheus.Gauge
|
||||
QueueDepth prometheus.Gauge
|
||||
QueueDepth prometheus.Gauge
|
||||
|
||||
// Health metrics
|
||||
StorageHealth prometheus.Gauge
|
||||
NodeConnectivity prometheus.Gauge
|
||||
SyncLatency prometheus.Histogram
|
||||
StorageHealth prometheus.Gauge
|
||||
NodeConnectivity prometheus.Gauge
|
||||
SyncLatency prometheus.Histogram
|
||||
}
|
||||
|
||||
// AlertManager handles storage-related alerts and notifications
|
||||
@@ -97,18 +97,96 @@ type AlertManager struct {
|
||||
maxHistory int
|
||||
}
|
||||
|
||||
func (am *AlertManager) severityRank(severity AlertSeverity) int {
|
||||
switch severity {
|
||||
case SeverityCritical:
|
||||
return 4
|
||||
case SeverityError:
|
||||
return 3
|
||||
case SeverityWarning:
|
||||
return 2
|
||||
case SeverityInfo:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// GetActiveAlerts returns sorted active alerts (SEC-SLURP-1.1 monitoring path)
|
||||
func (am *AlertManager) GetActiveAlerts() []*Alert {
|
||||
am.mu.RLock()
|
||||
defer am.mu.RUnlock()
|
||||
|
||||
if len(am.activealerts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
alerts := make([]*Alert, 0, len(am.activealerts))
|
||||
for _, alert := range am.activealerts {
|
||||
alerts = append(alerts, alert)
|
||||
}
|
||||
|
||||
sort.Slice(alerts, func(i, j int) bool {
|
||||
iRank := am.severityRank(alerts[i].Severity)
|
||||
jRank := am.severityRank(alerts[j].Severity)
|
||||
if iRank == jRank {
|
||||
return alerts[i].StartTime.After(alerts[j].StartTime)
|
||||
}
|
||||
return iRank > jRank
|
||||
})
|
||||
|
||||
return alerts
|
||||
}
|
||||
|
||||
// Snapshot marshals monitoring state for UCXL persistence (SEC-SLURP-1.1a telemetry)
|
||||
func (ms *MonitoringSystem) Snapshot(ctx context.Context) (string, error) {
|
||||
ms.mu.RLock()
|
||||
defer ms.mu.RUnlock()
|
||||
|
||||
if ms.alerts == nil {
|
||||
return "", fmt.Errorf("alert manager not initialised")
|
||||
}
|
||||
|
||||
active := ms.alerts.GetActiveAlerts()
|
||||
alertPayload := make([]map[string]interface{}, 0, len(active))
|
||||
for _, alert := range active {
|
||||
alertPayload = append(alertPayload, map[string]interface{}{
|
||||
"id": alert.ID,
|
||||
"name": alert.Name,
|
||||
"severity": alert.Severity,
|
||||
"message": fmt.Sprintf("%s (threshold %.2f)", alert.Description, alert.Threshold),
|
||||
"labels": alert.Labels,
|
||||
"started_at": alert.StartTime,
|
||||
})
|
||||
}
|
||||
|
||||
snapshot := map[string]interface{}{
|
||||
"node_id": ms.nodeID,
|
||||
"generated_at": time.Now().UTC(),
|
||||
"alert_count": len(active),
|
||||
"alerts": alertPayload,
|
||||
}
|
||||
|
||||
encoded, err := json.MarshalIndent(snapshot, "", " ")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to marshal monitoring snapshot: %w", err)
|
||||
}
|
||||
|
||||
return string(encoded), nil
|
||||
}
|
||||
|
||||
// AlertRule defines conditions for triggering alerts
|
||||
type AlertRule struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Metric string `json:"metric"`
|
||||
Condition string `json:"condition"` // >, <, ==, !=, etc.
|
||||
Threshold float64 `json:"threshold"`
|
||||
Duration time.Duration `json:"duration"`
|
||||
Severity AlertSeverity `json:"severity"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
Enabled bool `json:"enabled"`
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Metric string `json:"metric"`
|
||||
Condition string `json:"condition"` // >, <, ==, !=, etc.
|
||||
Threshold float64 `json:"threshold"`
|
||||
Duration time.Duration `json:"duration"`
|
||||
Severity AlertSeverity `json:"severity"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
// Alert represents an active or resolved alert
|
||||
@@ -163,30 +241,30 @@ type HealthChecker struct {
|
||||
|
||||
// HealthCheck defines a single health check
|
||||
type HealthCheck struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Checker func(ctx context.Context) HealthResult `json:"-"`
|
||||
Interval time.Duration `json:"interval"`
|
||||
Timeout time.Duration `json:"timeout"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Interval time.Duration `json:"interval"`
|
||||
Timeout time.Duration `json:"timeout"`
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
// HealthResult represents the result of a health check
|
||||
type HealthResult struct {
|
||||
Healthy bool `json:"healthy"`
|
||||
Message string `json:"message"`
|
||||
Latency time.Duration `json:"latency"`
|
||||
Healthy bool `json:"healthy"`
|
||||
Message string `json:"message"`
|
||||
Latency time.Duration `json:"latency"`
|
||||
Metadata map[string]interface{} `json:"metadata"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
// SystemHealth represents the overall health of the storage system
|
||||
type SystemHealth struct {
|
||||
OverallStatus HealthStatus `json:"overall_status"`
|
||||
Components map[string]HealthResult `json:"components"`
|
||||
LastUpdate time.Time `json:"last_update"`
|
||||
Uptime time.Duration `json:"uptime"`
|
||||
StartTime time.Time `json:"start_time"`
|
||||
OverallStatus HealthStatus `json:"overall_status"`
|
||||
Components map[string]HealthResult `json:"components"`
|
||||
LastUpdate time.Time `json:"last_update"`
|
||||
Uptime time.Duration `json:"uptime"`
|
||||
StartTime time.Time `json:"start_time"`
|
||||
}
|
||||
|
||||
// HealthStatus represents system health status
|
||||
@@ -200,82 +278,82 @@ const (
|
||||
|
||||
// PerformanceProfiler analyzes storage performance patterns
|
||||
type PerformanceProfiler struct {
|
||||
mu sync.RWMutex
|
||||
mu sync.RWMutex
|
||||
operationProfiles map[string]*OperationProfile
|
||||
resourceUsage *ResourceUsage
|
||||
bottlenecks []*Bottleneck
|
||||
recommendations []*PerformanceRecommendation
|
||||
resourceUsage *ResourceUsage
|
||||
bottlenecks []*Bottleneck
|
||||
recommendations []*PerformanceRecommendation
|
||||
}
|
||||
|
||||
// OperationProfile contains performance analysis for a specific operation type
|
||||
type OperationProfile struct {
|
||||
Operation string `json:"operation"`
|
||||
TotalOperations int64 `json:"total_operations"`
|
||||
AverageLatency time.Duration `json:"average_latency"`
|
||||
P50Latency time.Duration `json:"p50_latency"`
|
||||
P95Latency time.Duration `json:"p95_latency"`
|
||||
P99Latency time.Duration `json:"p99_latency"`
|
||||
Throughput float64 `json:"throughput"`
|
||||
ErrorRate float64 `json:"error_rate"`
|
||||
LatencyHistory []time.Duration `json:"-"`
|
||||
LastUpdated time.Time `json:"last_updated"`
|
||||
Operation string `json:"operation"`
|
||||
TotalOperations int64 `json:"total_operations"`
|
||||
AverageLatency time.Duration `json:"average_latency"`
|
||||
P50Latency time.Duration `json:"p50_latency"`
|
||||
P95Latency time.Duration `json:"p95_latency"`
|
||||
P99Latency time.Duration `json:"p99_latency"`
|
||||
Throughput float64 `json:"throughput"`
|
||||
ErrorRate float64 `json:"error_rate"`
|
||||
LatencyHistory []time.Duration `json:"-"`
|
||||
LastUpdated time.Time `json:"last_updated"`
|
||||
}
|
||||
|
||||
// ResourceUsage tracks resource consumption
|
||||
type ResourceUsage struct {
|
||||
CPUUsage float64 `json:"cpu_usage"`
|
||||
MemoryUsage int64 `json:"memory_usage"`
|
||||
DiskUsage int64 `json:"disk_usage"`
|
||||
NetworkIn int64 `json:"network_in"`
|
||||
NetworkOut int64 `json:"network_out"`
|
||||
OpenFiles int `json:"open_files"`
|
||||
Goroutines int `json:"goroutines"`
|
||||
LastUpdated time.Time `json:"last_updated"`
|
||||
CPUUsage float64 `json:"cpu_usage"`
|
||||
MemoryUsage int64 `json:"memory_usage"`
|
||||
DiskUsage int64 `json:"disk_usage"`
|
||||
NetworkIn int64 `json:"network_in"`
|
||||
NetworkOut int64 `json:"network_out"`
|
||||
OpenFiles int `json:"open_files"`
|
||||
Goroutines int `json:"goroutines"`
|
||||
LastUpdated time.Time `json:"last_updated"`
|
||||
}
|
||||
|
||||
// Bottleneck represents a performance bottleneck
|
||||
type Bottleneck struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"` // cpu, memory, disk, network, etc.
|
||||
Component string `json:"component"`
|
||||
Description string `json:"description"`
|
||||
Severity AlertSeverity `json:"severity"`
|
||||
Impact float64 `json:"impact"`
|
||||
DetectedAt time.Time `json:"detected_at"`
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"` // cpu, memory, disk, network, etc.
|
||||
Component string `json:"component"`
|
||||
Description string `json:"description"`
|
||||
Severity AlertSeverity `json:"severity"`
|
||||
Impact float64 `json:"impact"`
|
||||
DetectedAt time.Time `json:"detected_at"`
|
||||
Metadata map[string]interface{} `json:"metadata"`
|
||||
}
|
||||
|
||||
// PerformanceRecommendation suggests optimizations
|
||||
type PerformanceRecommendation struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Title string `json:"title"`
|
||||
Description string `json:"description"`
|
||||
Priority int `json:"priority"`
|
||||
Impact string `json:"impact"`
|
||||
Effort string `json:"effort"`
|
||||
GeneratedAt time.Time `json:"generated_at"`
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Title string `json:"title"`
|
||||
Description string `json:"description"`
|
||||
Priority int `json:"priority"`
|
||||
Impact string `json:"impact"`
|
||||
Effort string `json:"effort"`
|
||||
GeneratedAt time.Time `json:"generated_at"`
|
||||
Metadata map[string]interface{} `json:"metadata"`
|
||||
}
|
||||
|
||||
// MonitoringEvent represents a monitoring system event
|
||||
type MonitoringEvent struct {
|
||||
Type string `json:"type"`
|
||||
Level string `json:"level"`
|
||||
Message string `json:"message"`
|
||||
Component string `json:"component"`
|
||||
NodeID string `json:"node_id"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Metadata map[string]interface{} `json:"metadata"`
|
||||
Type string `json:"type"`
|
||||
Level string `json:"level"`
|
||||
Message string `json:"message"`
|
||||
Component string `json:"component"`
|
||||
NodeID string `json:"node_id"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Metadata map[string]interface{} `json:"metadata"`
|
||||
}
|
||||
|
||||
// StructuredLogger provides structured logging for storage operations
|
||||
type StructuredLogger struct {
|
||||
mu sync.RWMutex
|
||||
level LogLevel
|
||||
output LogOutput
|
||||
mu sync.RWMutex
|
||||
level LogLevel
|
||||
output LogOutput
|
||||
formatter LogFormatter
|
||||
buffer []*LogEntry
|
||||
buffer []*LogEntry
|
||||
maxBuffer int
|
||||
}
|
||||
|
||||
@@ -303,27 +381,27 @@ type LogFormatter interface {
|
||||
|
||||
// LogEntry represents a single log entry
|
||||
type LogEntry struct {
|
||||
Level LogLevel `json:"level"`
|
||||
Message string `json:"message"`
|
||||
Component string `json:"component"`
|
||||
Operation string `json:"operation"`
|
||||
NodeID string `json:"node_id"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Level LogLevel `json:"level"`
|
||||
Message string `json:"message"`
|
||||
Component string `json:"component"`
|
||||
Operation string `json:"operation"`
|
||||
NodeID string `json:"node_id"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Fields map[string]interface{} `json:"fields"`
|
||||
Error error `json:"error,omitempty"`
|
||||
Error error `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// NewMonitoringSystem creates a new monitoring system
|
||||
func NewMonitoringSystem(nodeID string) *MonitoringSystem {
|
||||
ms := &MonitoringSystem{
|
||||
nodeID: nodeID,
|
||||
metrics: initializeMetrics(nodeID),
|
||||
alerts: newAlertManager(),
|
||||
healthChecker: newHealthChecker(),
|
||||
nodeID: nodeID,
|
||||
metrics: initializeMetrics(nodeID),
|
||||
alerts: newAlertManager(),
|
||||
healthChecker: newHealthChecker(),
|
||||
performanceProfiler: newPerformanceProfiler(),
|
||||
logger: newStructuredLogger(),
|
||||
notifications: make(chan *MonitoringEvent, 1000),
|
||||
stopCh: make(chan struct{}),
|
||||
logger: newStructuredLogger(),
|
||||
notifications: make(chan *MonitoringEvent, 1000),
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
// Start monitoring goroutines
|
||||
@@ -571,7 +649,7 @@ func (ms *MonitoringSystem) executeHealthCheck(check HealthCheck) {
|
||||
defer cancel()
|
||||
|
||||
result := check.Checker(ctx)
|
||||
|
||||
|
||||
ms.healthChecker.mu.Lock()
|
||||
ms.healthChecker.status.Components[check.Name] = result
|
||||
ms.healthChecker.mu.Unlock()
|
||||
@@ -592,21 +670,21 @@ func (ms *MonitoringSystem) analyzePerformance() {
|
||||
|
||||
func newAlertManager() *AlertManager {
|
||||
return &AlertManager{
|
||||
rules: make([]*AlertRule, 0),
|
||||
rules: make([]*AlertRule, 0),
|
||||
activealerts: make(map[string]*Alert),
|
||||
notifiers: make([]AlertNotifier, 0),
|
||||
history: make([]*Alert, 0),
|
||||
maxHistory: 1000,
|
||||
history: make([]*Alert, 0),
|
||||
maxHistory: 1000,
|
||||
}
|
||||
}
|
||||
|
||||
func newHealthChecker() *HealthChecker {
|
||||
return &HealthChecker{
|
||||
checks: make(map[string]HealthCheck),
|
||||
status: &SystemHealth{
|
||||
checks: make(map[string]HealthCheck),
|
||||
status: &SystemHealth{
|
||||
OverallStatus: HealthHealthy,
|
||||
Components: make(map[string]HealthResult),
|
||||
StartTime: time.Now(),
|
||||
Components: make(map[string]HealthResult),
|
||||
StartTime: time.Now(),
|
||||
},
|
||||
checkInterval: 1 * time.Minute,
|
||||
timeout: 30 * time.Second,
|
||||
@@ -664,8 +742,8 @@ func (ms *MonitoringSystem) GetMonitoringStats() (*MonitoringStats, error) {
|
||||
defer ms.mu.RUnlock()
|
||||
|
||||
stats := &MonitoringStats{
|
||||
NodeID: ms.nodeID,
|
||||
Timestamp: time.Now(),
|
||||
NodeID: ms.nodeID,
|
||||
Timestamp: time.Now(),
|
||||
HealthStatus: ms.healthChecker.status.OverallStatus,
|
||||
ActiveAlerts: len(ms.alerts.activealerts),
|
||||
Bottlenecks: len(ms.performanceProfiler.bottlenecks),
|
||||
|
||||
@@ -3,9 +3,8 @@ package storage
|
||||
import (
|
||||
"time"
|
||||
|
||||
"chorus/pkg/ucxl"
|
||||
"chorus/pkg/crypto"
|
||||
slurpContext "chorus/pkg/slurp/context"
|
||||
"chorus/pkg/ucxl"
|
||||
)
|
||||
|
||||
// DatabaseSchema defines the complete schema for encrypted context storage
|
||||
@@ -14,325 +13,325 @@ import (
|
||||
// ContextRecord represents the main context storage record
|
||||
type ContextRecord struct {
|
||||
// Primary identification
|
||||
ID string `json:"id" db:"id"` // Unique record ID
|
||||
UCXLAddress ucxl.Address `json:"ucxl_address" db:"ucxl_address"` // UCXL address
|
||||
Path string `json:"path" db:"path"` // File system path
|
||||
PathHash string `json:"path_hash" db:"path_hash"` // Hash of path for indexing
|
||||
|
||||
ID string `json:"id" db:"id"` // Unique record ID
|
||||
UCXLAddress ucxl.Address `json:"ucxl_address" db:"ucxl_address"` // UCXL address
|
||||
Path string `json:"path" db:"path"` // File system path
|
||||
PathHash string `json:"path_hash" db:"path_hash"` // Hash of path for indexing
|
||||
|
||||
// Core context data
|
||||
Summary string `json:"summary" db:"summary"`
|
||||
Purpose string `json:"purpose" db:"purpose"`
|
||||
Technologies []byte `json:"technologies" db:"technologies"` // JSON array
|
||||
Tags []byte `json:"tags" db:"tags"` // JSON array
|
||||
Insights []byte `json:"insights" db:"insights"` // JSON array
|
||||
|
||||
Summary string `json:"summary" db:"summary"`
|
||||
Purpose string `json:"purpose" db:"purpose"`
|
||||
Technologies []byte `json:"technologies" db:"technologies"` // JSON array
|
||||
Tags []byte `json:"tags" db:"tags"` // JSON array
|
||||
Insights []byte `json:"insights" db:"insights"` // JSON array
|
||||
|
||||
// Hierarchy control
|
||||
OverridesParent bool `json:"overrides_parent" db:"overrides_parent"`
|
||||
ContextSpecificity int `json:"context_specificity" db:"context_specificity"`
|
||||
AppliesToChildren bool `json:"applies_to_children" db:"applies_to_children"`
|
||||
|
||||
OverridesParent bool `json:"overrides_parent" db:"overrides_parent"`
|
||||
ContextSpecificity int `json:"context_specificity" db:"context_specificity"`
|
||||
AppliesToChildren bool `json:"applies_to_children" db:"applies_to_children"`
|
||||
|
||||
// Quality metrics
|
||||
RAGConfidence float64 `json:"rag_confidence" db:"rag_confidence"`
|
||||
StalenessScore float64 `json:"staleness_score" db:"staleness_score"`
|
||||
ValidationScore float64 `json:"validation_score" db:"validation_score"`
|
||||
|
||||
RAGConfidence float64 `json:"rag_confidence" db:"rag_confidence"`
|
||||
StalenessScore float64 `json:"staleness_score" db:"staleness_score"`
|
||||
ValidationScore float64 `json:"validation_score" db:"validation_score"`
|
||||
|
||||
// Versioning
|
||||
Version int64 `json:"version" db:"version"`
|
||||
ParentVersion *int64 `json:"parent_version" db:"parent_version"`
|
||||
ContextHash string `json:"context_hash" db:"context_hash"`
|
||||
|
||||
Version int64 `json:"version" db:"version"`
|
||||
ParentVersion *int64 `json:"parent_version" db:"parent_version"`
|
||||
ContextHash string `json:"context_hash" db:"context_hash"`
|
||||
|
||||
// Temporal metadata
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
|
||||
GeneratedAt time.Time `json:"generated_at" db:"generated_at"`
|
||||
LastAccessedAt *time.Time `json:"last_accessed_at" db:"last_accessed_at"`
|
||||
ExpiresAt *time.Time `json:"expires_at" db:"expires_at"`
|
||||
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
|
||||
GeneratedAt time.Time `json:"generated_at" db:"generated_at"`
|
||||
LastAccessedAt *time.Time `json:"last_accessed_at" db:"last_accessed_at"`
|
||||
ExpiresAt *time.Time `json:"expires_at" db:"expires_at"`
|
||||
|
||||
// Storage metadata
|
||||
StorageType string `json:"storage_type" db:"storage_type"` // local, distributed, hybrid
|
||||
CompressionType string `json:"compression_type" db:"compression_type"`
|
||||
EncryptionLevel int `json:"encryption_level" db:"encryption_level"`
|
||||
ReplicationFactor int `json:"replication_factor" db:"replication_factor"`
|
||||
Checksum string `json:"checksum" db:"checksum"`
|
||||
DataSize int64 `json:"data_size" db:"data_size"`
|
||||
CompressedSize int64 `json:"compressed_size" db:"compressed_size"`
|
||||
StorageType string `json:"storage_type" db:"storage_type"` // local, distributed, hybrid
|
||||
CompressionType string `json:"compression_type" db:"compression_type"`
|
||||
EncryptionLevel int `json:"encryption_level" db:"encryption_level"`
|
||||
ReplicationFactor int `json:"replication_factor" db:"replication_factor"`
|
||||
Checksum string `json:"checksum" db:"checksum"`
|
||||
DataSize int64 `json:"data_size" db:"data_size"`
|
||||
CompressedSize int64 `json:"compressed_size" db:"compressed_size"`
|
||||
}
|
||||
|
||||
// EncryptedContextRecord represents role-based encrypted context storage
|
||||
type EncryptedContextRecord struct {
|
||||
// Primary keys
|
||||
ID string `json:"id" db:"id"`
|
||||
ContextID string `json:"context_id" db:"context_id"` // FK to ContextRecord
|
||||
Role string `json:"role" db:"role"`
|
||||
UCXLAddress ucxl.Address `json:"ucxl_address" db:"ucxl_address"`
|
||||
|
||||
ID string `json:"id" db:"id"`
|
||||
ContextID string `json:"context_id" db:"context_id"` // FK to ContextRecord
|
||||
Role string `json:"role" db:"role"`
|
||||
UCXLAddress ucxl.Address `json:"ucxl_address" db:"ucxl_address"`
|
||||
|
||||
// Encryption details
|
||||
AccessLevel slurpContext.RoleAccessLevel `json:"access_level" db:"access_level"`
|
||||
EncryptedData []byte `json:"encrypted_data" db:"encrypted_data"`
|
||||
KeyFingerprint string `json:"key_fingerprint" db:"key_fingerprint"`
|
||||
EncryptionAlgo string `json:"encryption_algo" db:"encryption_algo"`
|
||||
KeyVersion int `json:"key_version" db:"key_version"`
|
||||
|
||||
AccessLevel slurpContext.RoleAccessLevel `json:"access_level" db:"access_level"`
|
||||
EncryptedData []byte `json:"encrypted_data" db:"encrypted_data"`
|
||||
KeyFingerprint string `json:"key_fingerprint" db:"key_fingerprint"`
|
||||
EncryptionAlgo string `json:"encryption_algo" db:"encryption_algo"`
|
||||
KeyVersion int `json:"key_version" db:"key_version"`
|
||||
|
||||
// Data integrity
|
||||
DataChecksum string `json:"data_checksum" db:"data_checksum"`
|
||||
EncryptionHash string `json:"encryption_hash" db:"encryption_hash"`
|
||||
|
||||
DataChecksum string `json:"data_checksum" db:"data_checksum"`
|
||||
EncryptionHash string `json:"encryption_hash" db:"encryption_hash"`
|
||||
|
||||
// Temporal data
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
|
||||
LastDecryptedAt *time.Time `json:"last_decrypted_at" db:"last_decrypted_at"`
|
||||
ExpiresAt *time.Time `json:"expires_at" db:"expires_at"`
|
||||
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
|
||||
LastDecryptedAt *time.Time `json:"last_decrypted_at" db:"last_decrypted_at"`
|
||||
ExpiresAt *time.Time `json:"expires_at" db:"expires_at"`
|
||||
|
||||
// Access tracking
|
||||
AccessCount int64 `json:"access_count" db:"access_count"`
|
||||
LastAccessedBy string `json:"last_accessed_by" db:"last_accessed_by"`
|
||||
AccessHistory []byte `json:"access_history" db:"access_history"` // JSON access log
|
||||
AccessCount int64 `json:"access_count" db:"access_count"`
|
||||
LastAccessedBy string `json:"last_accessed_by" db:"last_accessed_by"`
|
||||
AccessHistory []byte `json:"access_history" db:"access_history"` // JSON access log
|
||||
}
|
||||
|
||||
// ContextHierarchyRecord represents hierarchical relationships between contexts
|
||||
type ContextHierarchyRecord struct {
|
||||
ID string `json:"id" db:"id"`
|
||||
ParentAddress ucxl.Address `json:"parent_address" db:"parent_address"`
|
||||
ChildAddress ucxl.Address `json:"child_address" db:"child_address"`
|
||||
ParentPath string `json:"parent_path" db:"parent_path"`
|
||||
ChildPath string `json:"child_path" db:"child_path"`
|
||||
|
||||
ID string `json:"id" db:"id"`
|
||||
ParentAddress ucxl.Address `json:"parent_address" db:"parent_address"`
|
||||
ChildAddress ucxl.Address `json:"child_address" db:"child_address"`
|
||||
ParentPath string `json:"parent_path" db:"parent_path"`
|
||||
ChildPath string `json:"child_path" db:"child_path"`
|
||||
|
||||
// Relationship metadata
|
||||
RelationshipType string `json:"relationship_type" db:"relationship_type"` // parent, sibling, dependency
|
||||
InheritanceWeight float64 `json:"inheritance_weight" db:"inheritance_weight"`
|
||||
OverrideStrength int `json:"override_strength" db:"override_strength"`
|
||||
Distance int `json:"distance" db:"distance"` // Hierarchy depth distance
|
||||
|
||||
RelationshipType string `json:"relationship_type" db:"relationship_type"` // parent, sibling, dependency
|
||||
InheritanceWeight float64 `json:"inheritance_weight" db:"inheritance_weight"`
|
||||
OverrideStrength int `json:"override_strength" db:"override_strength"`
|
||||
Distance int `json:"distance" db:"distance"` // Hierarchy depth distance
|
||||
|
||||
// Temporal tracking
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
ValidatedAt time.Time `json:"validated_at" db:"validated_at"`
|
||||
LastResolvedAt *time.Time `json:"last_resolved_at" db:"last_resolved_at"`
|
||||
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
ValidatedAt time.Time `json:"validated_at" db:"validated_at"`
|
||||
LastResolvedAt *time.Time `json:"last_resolved_at" db:"last_resolved_at"`
|
||||
|
||||
// Resolution statistics
|
||||
ResolutionCount int64 `json:"resolution_count" db:"resolution_count"`
|
||||
ResolutionTime float64 `json:"resolution_time" db:"resolution_time"` // Average ms
|
||||
ResolutionCount int64 `json:"resolution_count" db:"resolution_count"`
|
||||
ResolutionTime float64 `json:"resolution_time" db:"resolution_time"` // Average ms
|
||||
}
|
||||
|
||||
// DecisionHopRecord represents temporal decision analysis storage
|
||||
type DecisionHopRecord struct {
|
||||
// Primary identification
|
||||
ID string `json:"id" db:"id"`
|
||||
DecisionID string `json:"decision_id" db:"decision_id"`
|
||||
UCXLAddress ucxl.Address `json:"ucxl_address" db:"ucxl_address"`
|
||||
ContextVersion int64 `json:"context_version" db:"context_version"`
|
||||
|
||||
ID string `json:"id" db:"id"`
|
||||
DecisionID string `json:"decision_id" db:"decision_id"`
|
||||
UCXLAddress ucxl.Address `json:"ucxl_address" db:"ucxl_address"`
|
||||
ContextVersion int64 `json:"context_version" db:"context_version"`
|
||||
|
||||
// Decision metadata
|
||||
ChangeReason string `json:"change_reason" db:"change_reason"`
|
||||
DecisionMaker string `json:"decision_maker" db:"decision_maker"`
|
||||
DecisionRationale string `json:"decision_rationale" db:"decision_rationale"`
|
||||
ImpactScope string `json:"impact_scope" db:"impact_scope"`
|
||||
ConfidenceLevel float64 `json:"confidence_level" db:"confidence_level"`
|
||||
|
||||
ChangeReason string `json:"change_reason" db:"change_reason"`
|
||||
DecisionMaker string `json:"decision_maker" db:"decision_maker"`
|
||||
DecisionRationale string `json:"decision_rationale" db:"decision_rationale"`
|
||||
ImpactScope string `json:"impact_scope" db:"impact_scope"`
|
||||
ConfidenceLevel float64 `json:"confidence_level" db:"confidence_level"`
|
||||
|
||||
// Context evolution
|
||||
PreviousHash string `json:"previous_hash" db:"previous_hash"`
|
||||
CurrentHash string `json:"current_hash" db:"current_hash"`
|
||||
ContextDelta []byte `json:"context_delta" db:"context_delta"` // JSON diff
|
||||
StalenessScore float64 `json:"staleness_score" db:"staleness_score"`
|
||||
|
||||
PreviousHash string `json:"previous_hash" db:"previous_hash"`
|
||||
CurrentHash string `json:"current_hash" db:"current_hash"`
|
||||
ContextDelta []byte `json:"context_delta" db:"context_delta"` // JSON diff
|
||||
StalenessScore float64 `json:"staleness_score" db:"staleness_score"`
|
||||
|
||||
// Temporal data
|
||||
Timestamp time.Time `json:"timestamp" db:"timestamp"`
|
||||
PreviousDecisionTime *time.Time `json:"previous_decision_time" db:"previous_decision_time"`
|
||||
ProcessingTime float64 `json:"processing_time" db:"processing_time"` // ms
|
||||
|
||||
Timestamp time.Time `json:"timestamp" db:"timestamp"`
|
||||
PreviousDecisionTime *time.Time `json:"previous_decision_time" db:"previous_decision_time"`
|
||||
ProcessingTime float64 `json:"processing_time" db:"processing_time"` // ms
|
||||
|
||||
// External references
|
||||
ExternalRefs []byte `json:"external_refs" db:"external_refs"` // JSON array
|
||||
CommitHash string `json:"commit_hash" db:"commit_hash"`
|
||||
TicketID string `json:"ticket_id" db:"ticket_id"`
|
||||
ExternalRefs []byte `json:"external_refs" db:"external_refs"` // JSON array
|
||||
CommitHash string `json:"commit_hash" db:"commit_hash"`
|
||||
TicketID string `json:"ticket_id" db:"ticket_id"`
|
||||
}
|
||||
|
||||
// DecisionInfluenceRecord represents decision influence relationships
|
||||
type DecisionInfluenceRecord struct {
|
||||
ID string `json:"id" db:"id"`
|
||||
SourceDecisionID string `json:"source_decision_id" db:"source_decision_id"`
|
||||
TargetDecisionID string `json:"target_decision_id" db:"target_decision_id"`
|
||||
SourceAddress ucxl.Address `json:"source_address" db:"source_address"`
|
||||
TargetAddress ucxl.Address `json:"target_address" db:"target_address"`
|
||||
|
||||
ID string `json:"id" db:"id"`
|
||||
SourceDecisionID string `json:"source_decision_id" db:"source_decision_id"`
|
||||
TargetDecisionID string `json:"target_decision_id" db:"target_decision_id"`
|
||||
SourceAddress ucxl.Address `json:"source_address" db:"source_address"`
|
||||
TargetAddress ucxl.Address `json:"target_address" db:"target_address"`
|
||||
|
||||
// Influence metrics
|
||||
InfluenceStrength float64 `json:"influence_strength" db:"influence_strength"`
|
||||
InfluenceType string `json:"influence_type" db:"influence_type"` // direct, indirect, cascading
|
||||
PropagationDelay float64 `json:"propagation_delay" db:"propagation_delay"` // hours
|
||||
HopDistance int `json:"hop_distance" db:"hop_distance"`
|
||||
|
||||
InfluenceStrength float64 `json:"influence_strength" db:"influence_strength"`
|
||||
InfluenceType string `json:"influence_type" db:"influence_type"` // direct, indirect, cascading
|
||||
PropagationDelay float64 `json:"propagation_delay" db:"propagation_delay"` // hours
|
||||
HopDistance int `json:"hop_distance" db:"hop_distance"`
|
||||
|
||||
// Path analysis
|
||||
ShortestPath []byte `json:"shortest_path" db:"shortest_path"` // JSON path array
|
||||
AlternatePaths []byte `json:"alternate_paths" db:"alternate_paths"` // JSON paths
|
||||
PathConfidence float64 `json:"path_confidence" db:"path_confidence"`
|
||||
|
||||
ShortestPath []byte `json:"shortest_path" db:"shortest_path"` // JSON path array
|
||||
AlternatePaths []byte `json:"alternate_paths" db:"alternate_paths"` // JSON paths
|
||||
PathConfidence float64 `json:"path_confidence" db:"path_confidence"`
|
||||
|
||||
// Temporal tracking
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
LastAnalyzedAt time.Time `json:"last_analyzed_at" db:"last_analyzed_at"`
|
||||
ValidatedAt *time.Time `json:"validated_at" db:"validated_at"`
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
LastAnalyzedAt time.Time `json:"last_analyzed_at" db:"last_analyzed_at"`
|
||||
ValidatedAt *time.Time `json:"validated_at" db:"validated_at"`
|
||||
}
|
||||
|
||||
// AccessControlRecord represents role-based access control metadata
|
||||
type AccessControlRecord struct {
|
||||
ID string `json:"id" db:"id"`
|
||||
UCXLAddress ucxl.Address `json:"ucxl_address" db:"ucxl_address"`
|
||||
Role string `json:"role" db:"role"`
|
||||
Permissions []byte `json:"permissions" db:"permissions"` // JSON permissions array
|
||||
|
||||
ID string `json:"id" db:"id"`
|
||||
UCXLAddress ucxl.Address `json:"ucxl_address" db:"ucxl_address"`
|
||||
Role string `json:"role" db:"role"`
|
||||
Permissions []byte `json:"permissions" db:"permissions"` // JSON permissions array
|
||||
|
||||
// Access levels
|
||||
ReadAccess bool `json:"read_access" db:"read_access"`
|
||||
WriteAccess bool `json:"write_access" db:"write_access"`
|
||||
DeleteAccess bool `json:"delete_access" db:"delete_access"`
|
||||
AdminAccess bool `json:"admin_access" db:"admin_access"`
|
||||
AccessLevel slurpContext.RoleAccessLevel `json:"access_level" db:"access_level"`
|
||||
|
||||
ReadAccess bool `json:"read_access" db:"read_access"`
|
||||
WriteAccess bool `json:"write_access" db:"write_access"`
|
||||
DeleteAccess bool `json:"delete_access" db:"delete_access"`
|
||||
AdminAccess bool `json:"admin_access" db:"admin_access"`
|
||||
AccessLevel slurpContext.RoleAccessLevel `json:"access_level" db:"access_level"`
|
||||
|
||||
// Constraints
|
||||
TimeConstraints []byte `json:"time_constraints" db:"time_constraints"` // JSON time rules
|
||||
IPConstraints []byte `json:"ip_constraints" db:"ip_constraints"` // JSON IP rules
|
||||
ContextFilters []byte `json:"context_filters" db:"context_filters"` // JSON filter rules
|
||||
|
||||
TimeConstraints []byte `json:"time_constraints" db:"time_constraints"` // JSON time rules
|
||||
IPConstraints []byte `json:"ip_constraints" db:"ip_constraints"` // JSON IP rules
|
||||
ContextFilters []byte `json:"context_filters" db:"context_filters"` // JSON filter rules
|
||||
|
||||
// Audit trail
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
CreatedBy string `json:"created_by" db:"created_by"`
|
||||
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
|
||||
UpdatedBy string `json:"updated_by" db:"updated_by"`
|
||||
ExpiresAt *time.Time `json:"expires_at" db:"expires_at"`
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
CreatedBy string `json:"created_by" db:"created_by"`
|
||||
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
|
||||
UpdatedBy string `json:"updated_by" db:"updated_by"`
|
||||
ExpiresAt *time.Time `json:"expires_at" db:"expires_at"`
|
||||
}
|
||||
|
||||
// ContextIndexRecord represents search index entries for contexts
|
||||
type ContextIndexRecord struct {
|
||||
ID string `json:"id" db:"id"`
|
||||
UCXLAddress ucxl.Address `json:"ucxl_address" db:"ucxl_address"`
|
||||
IndexName string `json:"index_name" db:"index_name"`
|
||||
|
||||
ID string `json:"id" db:"id"`
|
||||
UCXLAddress ucxl.Address `json:"ucxl_address" db:"ucxl_address"`
|
||||
IndexName string `json:"index_name" db:"index_name"`
|
||||
|
||||
// Indexed content
|
||||
Tokens []byte `json:"tokens" db:"tokens"` // JSON token array
|
||||
NGrams []byte `json:"ngrams" db:"ngrams"` // JSON n-gram array
|
||||
SemanticVector []byte `json:"semantic_vector" db:"semantic_vector"` // Embedding vector
|
||||
|
||||
Tokens []byte `json:"tokens" db:"tokens"` // JSON token array
|
||||
NGrams []byte `json:"ngrams" db:"ngrams"` // JSON n-gram array
|
||||
SemanticVector []byte `json:"semantic_vector" db:"semantic_vector"` // Embedding vector
|
||||
|
||||
// Search metadata
|
||||
IndexWeight float64 `json:"index_weight" db:"index_weight"`
|
||||
BoostFactor float64 `json:"boost_factor" db:"boost_factor"`
|
||||
Language string `json:"language" db:"language"`
|
||||
ContentType string `json:"content_type" db:"content_type"`
|
||||
|
||||
IndexWeight float64 `json:"index_weight" db:"index_weight"`
|
||||
BoostFactor float64 `json:"boost_factor" db:"boost_factor"`
|
||||
Language string `json:"language" db:"language"`
|
||||
ContentType string `json:"content_type" db:"content_type"`
|
||||
|
||||
// Quality metrics
|
||||
RelevanceScore float64 `json:"relevance_score" db:"relevance_score"`
|
||||
FreshnessScore float64 `json:"freshness_score" db:"freshness_score"`
|
||||
PopularityScore float64 `json:"popularity_score" db:"popularity_score"`
|
||||
|
||||
RelevanceScore float64 `json:"relevance_score" db:"relevance_score"`
|
||||
FreshnessScore float64 `json:"freshness_score" db:"freshness_score"`
|
||||
PopularityScore float64 `json:"popularity_score" db:"popularity_score"`
|
||||
|
||||
// Temporal tracking
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
|
||||
LastReindexed time.Time `json:"last_reindexed" db:"last_reindexed"`
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
|
||||
LastReindexed time.Time `json:"last_reindexed" db:"last_reindexed"`
|
||||
}
|
||||
|
||||
// CacheEntryRecord represents cached context data
|
||||
type CacheEntryRecord struct {
|
||||
ID string `json:"id" db:"id"`
|
||||
CacheKey string `json:"cache_key" db:"cache_key"`
|
||||
UCXLAddress ucxl.Address `json:"ucxl_address" db:"ucxl_address"`
|
||||
Role string `json:"role" db:"role"`
|
||||
|
||||
ID string `json:"id" db:"id"`
|
||||
CacheKey string `json:"cache_key" db:"cache_key"`
|
||||
UCXLAddress ucxl.Address `json:"ucxl_address" db:"ucxl_address"`
|
||||
Role string `json:"role" db:"role"`
|
||||
|
||||
// Cached data
|
||||
CachedData []byte `json:"cached_data" db:"cached_data"`
|
||||
DataHash string `json:"data_hash" db:"data_hash"`
|
||||
Compressed bool `json:"compressed" db:"compressed"`
|
||||
OriginalSize int64 `json:"original_size" db:"original_size"`
|
||||
CompressedSize int64 `json:"compressed_size" db:"compressed_size"`
|
||||
|
||||
CachedData []byte `json:"cached_data" db:"cached_data"`
|
||||
DataHash string `json:"data_hash" db:"data_hash"`
|
||||
Compressed bool `json:"compressed" db:"compressed"`
|
||||
OriginalSize int64 `json:"original_size" db:"original_size"`
|
||||
CompressedSize int64 `json:"compressed_size" db:"compressed_size"`
|
||||
|
||||
// Cache metadata
|
||||
TTL int64 `json:"ttl" db:"ttl"` // seconds
|
||||
Priority int `json:"priority" db:"priority"`
|
||||
AccessCount int64 `json:"access_count" db:"access_count"`
|
||||
HitCount int64 `json:"hit_count" db:"hit_count"`
|
||||
|
||||
TTL int64 `json:"ttl" db:"ttl"` // seconds
|
||||
Priority int `json:"priority" db:"priority"`
|
||||
AccessCount int64 `json:"access_count" db:"access_count"`
|
||||
HitCount int64 `json:"hit_count" db:"hit_count"`
|
||||
|
||||
// Temporal data
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
LastAccessedAt time.Time `json:"last_accessed_at" db:"last_accessed_at"`
|
||||
LastHitAt *time.Time `json:"last_hit_at" db:"last_hit_at"`
|
||||
ExpiresAt time.Time `json:"expires_at" db:"expires_at"`
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
LastAccessedAt time.Time `json:"last_accessed_at" db:"last_accessed_at"`
|
||||
LastHitAt *time.Time `json:"last_hit_at" db:"last_hit_at"`
|
||||
ExpiresAt time.Time `json:"expires_at" db:"expires_at"`
|
||||
}
|
||||
|
||||
// BackupRecord represents backup metadata
|
||||
type BackupRecord struct {
|
||||
ID string `json:"id" db:"id"`
|
||||
BackupID string `json:"backup_id" db:"backup_id"`
|
||||
Name string `json:"name" db:"name"`
|
||||
Destination string `json:"destination" db:"destination"`
|
||||
|
||||
ID string `json:"id" db:"id"`
|
||||
BackupID string `json:"backup_id" db:"backup_id"`
|
||||
Name string `json:"name" db:"name"`
|
||||
Destination string `json:"destination" db:"destination"`
|
||||
|
||||
// Backup content
|
||||
ContextCount int64 `json:"context_count" db:"context_count"`
|
||||
DataSize int64 `json:"data_size" db:"data_size"`
|
||||
CompressedSize int64 `json:"compressed_size" db:"compressed_size"`
|
||||
Checksum string `json:"checksum" db:"checksum"`
|
||||
|
||||
ContextCount int64 `json:"context_count" db:"context_count"`
|
||||
DataSize int64 `json:"data_size" db:"data_size"`
|
||||
CompressedSize int64 `json:"compressed_size" db:"compressed_size"`
|
||||
Checksum string `json:"checksum" db:"checksum"`
|
||||
|
||||
// Backup metadata
|
||||
IncludesIndexes bool `json:"includes_indexes" db:"includes_indexes"`
|
||||
IncludesCache bool `json:"includes_cache" db:"includes_cache"`
|
||||
Encrypted bool `json:"encrypted" db:"encrypted"`
|
||||
Incremental bool `json:"incremental" db:"incremental"`
|
||||
ParentBackupID string `json:"parent_backup_id" db:"parent_backup_id"`
|
||||
|
||||
IncludesIndexes bool `json:"includes_indexes" db:"includes_indexes"`
|
||||
IncludesCache bool `json:"includes_cache" db:"includes_cache"`
|
||||
Encrypted bool `json:"encrypted" db:"encrypted"`
|
||||
Incremental bool `json:"incremental" db:"incremental"`
|
||||
ParentBackupID string `json:"parent_backup_id" db:"parent_backup_id"`
|
||||
|
||||
// Status tracking
|
||||
Status BackupStatus `json:"status" db:"status"`
|
||||
Progress float64 `json:"progress" db:"progress"`
|
||||
ErrorMessage string `json:"error_message" db:"error_message"`
|
||||
|
||||
Status BackupStatus `json:"status" db:"status"`
|
||||
Progress float64 `json:"progress" db:"progress"`
|
||||
ErrorMessage string `json:"error_message" db:"error_message"`
|
||||
|
||||
// Temporal data
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
StartedAt *time.Time `json:"started_at" db:"started_at"`
|
||||
CompletedAt *time.Time `json:"completed_at" db:"completed_at"`
|
||||
RetentionUntil time.Time `json:"retention_until" db:"retention_until"`
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
StartedAt *time.Time `json:"started_at" db:"started_at"`
|
||||
CompletedAt *time.Time `json:"completed_at" db:"completed_at"`
|
||||
RetentionUntil time.Time `json:"retention_until" db:"retention_until"`
|
||||
}
|
||||
|
||||
// MetricsRecord represents storage performance metrics
|
||||
type MetricsRecord struct {
|
||||
ID string `json:"id" db:"id"`
|
||||
MetricType string `json:"metric_type" db:"metric_type"` // storage, encryption, cache, etc.
|
||||
NodeID string `json:"node_id" db:"node_id"`
|
||||
|
||||
ID string `json:"id" db:"id"`
|
||||
MetricType string `json:"metric_type" db:"metric_type"` // storage, encryption, cache, etc.
|
||||
NodeID string `json:"node_id" db:"node_id"`
|
||||
|
||||
// Metric data
|
||||
MetricName string `json:"metric_name" db:"metric_name"`
|
||||
MetricValue float64 `json:"metric_value" db:"metric_value"`
|
||||
MetricUnit string `json:"metric_unit" db:"metric_unit"`
|
||||
Tags []byte `json:"tags" db:"tags"` // JSON tag object
|
||||
|
||||
MetricName string `json:"metric_name" db:"metric_name"`
|
||||
MetricValue float64 `json:"metric_value" db:"metric_value"`
|
||||
MetricUnit string `json:"metric_unit" db:"metric_unit"`
|
||||
Tags []byte `json:"tags" db:"tags"` // JSON tag object
|
||||
|
||||
// Aggregation data
|
||||
AggregationType string `json:"aggregation_type" db:"aggregation_type"` // avg, sum, count, etc.
|
||||
TimeWindow int64 `json:"time_window" db:"time_window"` // seconds
|
||||
SampleCount int64 `json:"sample_count" db:"sample_count"`
|
||||
|
||||
AggregationType string `json:"aggregation_type" db:"aggregation_type"` // avg, sum, count, etc.
|
||||
TimeWindow int64 `json:"time_window" db:"time_window"` // seconds
|
||||
SampleCount int64 `json:"sample_count" db:"sample_count"`
|
||||
|
||||
// Temporal tracking
|
||||
Timestamp time.Time `json:"timestamp" db:"timestamp"`
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
Timestamp time.Time `json:"timestamp" db:"timestamp"`
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
}
|
||||
|
||||
// ContextEvolutionRecord tracks how contexts evolve over time
|
||||
type ContextEvolutionRecord struct {
|
||||
ID string `json:"id" db:"id"`
|
||||
UCXLAddress ucxl.Address `json:"ucxl_address" db:"ucxl_address"`
|
||||
FromVersion int64 `json:"from_version" db:"from_version"`
|
||||
ToVersion int64 `json:"to_version" db:"to_version"`
|
||||
|
||||
ID string `json:"id" db:"id"`
|
||||
UCXLAddress ucxl.Address `json:"ucxl_address" db:"ucxl_address"`
|
||||
FromVersion int64 `json:"from_version" db:"from_version"`
|
||||
ToVersion int64 `json:"to_version" db:"to_version"`
|
||||
|
||||
// Evolution analysis
|
||||
EvolutionType string `json:"evolution_type" db:"evolution_type"` // enhancement, refactor, fix, etc.
|
||||
SimilarityScore float64 `json:"similarity_score" db:"similarity_score"`
|
||||
ChangesMagnitude float64 `json:"changes_magnitude" db:"changes_magnitude"`
|
||||
SemanticDrift float64 `json:"semantic_drift" db:"semantic_drift"`
|
||||
|
||||
EvolutionType string `json:"evolution_type" db:"evolution_type"` // enhancement, refactor, fix, etc.
|
||||
SimilarityScore float64 `json:"similarity_score" db:"similarity_score"`
|
||||
ChangesMagnitude float64 `json:"changes_magnitude" db:"changes_magnitude"`
|
||||
SemanticDrift float64 `json:"semantic_drift" db:"semantic_drift"`
|
||||
|
||||
// Change details
|
||||
ChangedFields []byte `json:"changed_fields" db:"changed_fields"` // JSON array
|
||||
FieldDeltas []byte `json:"field_deltas" db:"field_deltas"` // JSON delta object
|
||||
ImpactAnalysis []byte `json:"impact_analysis" db:"impact_analysis"` // JSON analysis
|
||||
|
||||
ChangedFields []byte `json:"changed_fields" db:"changed_fields"` // JSON array
|
||||
FieldDeltas []byte `json:"field_deltas" db:"field_deltas"` // JSON delta object
|
||||
ImpactAnalysis []byte `json:"impact_analysis" db:"impact_analysis"` // JSON analysis
|
||||
|
||||
// Quality assessment
|
||||
QualityImprovement float64 `json:"quality_improvement" db:"quality_improvement"`
|
||||
ConfidenceChange float64 `json:"confidence_change" db:"confidence_change"`
|
||||
ValidationPassed bool `json:"validation_passed" db:"validation_passed"`
|
||||
|
||||
QualityImprovement float64 `json:"quality_improvement" db:"quality_improvement"`
|
||||
ConfidenceChange float64 `json:"confidence_change" db:"confidence_change"`
|
||||
ValidationPassed bool `json:"validation_passed" db:"validation_passed"`
|
||||
|
||||
// Temporal tracking
|
||||
EvolutionTime time.Time `json:"evolution_time" db:"evolution_time"`
|
||||
AnalyzedAt time.Time `json:"analyzed_at" db:"analyzed_at"`
|
||||
ProcessingTime float64 `json:"processing_time" db:"processing_time"` // ms
|
||||
EvolutionTime time.Time `json:"evolution_time" db:"evolution_time"`
|
||||
AnalyzedAt time.Time `json:"analyzed_at" db:"analyzed_at"`
|
||||
ProcessingTime float64 `json:"processing_time" db:"processing_time"` // ms
|
||||
}
|
||||
|
||||
// Schema validation and creation functions
|
||||
@@ -365,44 +364,44 @@ func CreateIndexStatements() []string {
|
||||
"CREATE INDEX IF NOT EXISTS idx_context_version ON contexts(version)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_context_staleness ON contexts(staleness_score)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_context_confidence ON contexts(rag_confidence)",
|
||||
|
||||
|
||||
// Encrypted context indexes
|
||||
"CREATE INDEX IF NOT EXISTS idx_encrypted_context_role ON encrypted_contexts(role)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_encrypted_context_ucxl ON encrypted_contexts(ucxl_address)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_encrypted_context_access_level ON encrypted_contexts(access_level)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_encrypted_context_key_fp ON encrypted_contexts(key_fingerprint)",
|
||||
|
||||
|
||||
// Hierarchy indexes
|
||||
"CREATE INDEX IF NOT EXISTS idx_hierarchy_parent ON context_hierarchy(parent_address)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_hierarchy_child ON context_hierarchy(child_address)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_hierarchy_distance ON context_hierarchy(distance)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_hierarchy_weight ON context_hierarchy(inheritance_weight)",
|
||||
|
||||
|
||||
// Decision hop indexes
|
||||
"CREATE INDEX IF NOT EXISTS idx_decision_ucxl ON decision_hops(ucxl_address)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_decision_timestamp ON decision_hops(timestamp)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_decision_reason ON decision_hops(change_reason)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_decision_maker ON decision_hops(decision_maker)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_decision_version ON decision_hops(context_version)",
|
||||
|
||||
|
||||
// Decision influence indexes
|
||||
"CREATE INDEX IF NOT EXISTS idx_influence_source ON decision_influence(source_decision_id)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_influence_target ON decision_influence(target_decision_id)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_influence_strength ON decision_influence(influence_strength)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_influence_hop_distance ON decision_influence(hop_distance)",
|
||||
|
||||
|
||||
// Access control indexes
|
||||
"CREATE INDEX IF NOT EXISTS idx_access_role ON access_control(role)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_access_ucxl ON access_control(ucxl_address)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_access_level ON access_control(access_level)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_access_expires ON access_control(expires_at)",
|
||||
|
||||
|
||||
// Search index indexes
|
||||
"CREATE INDEX IF NOT EXISTS idx_context_index_name ON context_indexes(index_name)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_context_index_ucxl ON context_indexes(ucxl_address)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_context_index_relevance ON context_indexes(relevance_score)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_context_index_freshness ON context_indexes(freshness_score)",
|
||||
|
||||
|
||||
// Cache indexes
|
||||
"CREATE INDEX IF NOT EXISTS idx_cache_key ON cache_entries(cache_key)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_cache_ucxl ON cache_entries(ucxl_address)",
|
||||
@@ -410,13 +409,13 @@ func CreateIndexStatements() []string {
|
||||
"CREATE INDEX IF NOT EXISTS idx_cache_expires ON cache_entries(expires_at)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_cache_priority ON cache_entries(priority)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_cache_access_count ON cache_entries(access_count)",
|
||||
|
||||
|
||||
// Metrics indexes
|
||||
"CREATE INDEX IF NOT EXISTS idx_metrics_type ON metrics(metric_type)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_metrics_name ON metrics(metric_name)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_metrics_node ON metrics(node_id)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_metrics_timestamp ON metrics(timestamp)",
|
||||
|
||||
|
||||
// Evolution indexes
|
||||
"CREATE INDEX IF NOT EXISTS idx_evolution_ucxl ON context_evolution(ucxl_address)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_evolution_from_version ON context_evolution(from_version)",
|
||||
|
||||
@@ -283,32 +283,42 @@ type IndexStatistics struct {
|
||||
|
||||
// BackupConfig represents backup configuration
|
||||
type BackupConfig struct {
|
||||
Name string `json:"name"` // Backup name
|
||||
Destination string `json:"destination"` // Backup destination
|
||||
IncludeIndexes bool `json:"include_indexes"` // Include search indexes
|
||||
IncludeCache bool `json:"include_cache"` // Include cache data
|
||||
Compression bool `json:"compression"` // Enable compression
|
||||
Encryption bool `json:"encryption"` // Enable encryption
|
||||
EncryptionKey string `json:"encryption_key"` // Encryption key
|
||||
Incremental bool `json:"incremental"` // Incremental backup
|
||||
Retention time.Duration `json:"retention"` // Backup retention period
|
||||
Metadata map[string]interface{} `json:"metadata"` // Additional metadata
|
||||
Name string `json:"name"` // Backup name
|
||||
Destination string `json:"destination"` // Backup destination
|
||||
IncludeIndexes bool `json:"include_indexes"` // Include search indexes
|
||||
IncludeCache bool `json:"include_cache"` // Include cache data
|
||||
Compression bool `json:"compression"` // Enable compression
|
||||
Encryption bool `json:"encryption"` // Enable encryption
|
||||
EncryptionKey string `json:"encryption_key"` // Encryption key
|
||||
Incremental bool `json:"incremental"` // Incremental backup
|
||||
ParentBackupID string `json:"parent_backup_id"` // Parent backup reference
|
||||
Retention time.Duration `json:"retention"` // Backup retention period
|
||||
Metadata map[string]interface{} `json:"metadata"` // Additional metadata
|
||||
}
|
||||
|
||||
// BackupInfo represents information about a backup
|
||||
type BackupInfo struct {
|
||||
ID string `json:"id"` // Backup ID
|
||||
Name string `json:"name"` // Backup name
|
||||
CreatedAt time.Time `json:"created_at"` // Creation time
|
||||
Size int64 `json:"size"` // Backup size
|
||||
CompressedSize int64 `json:"compressed_size"` // Compressed size
|
||||
ContextCount int64 `json:"context_count"` // Number of contexts
|
||||
Encrypted bool `json:"encrypted"` // Whether encrypted
|
||||
Incremental bool `json:"incremental"` // Whether incremental
|
||||
ParentBackupID string `json:"parent_backup_id"` // Parent backup for incremental
|
||||
Checksum string `json:"checksum"` // Backup checksum
|
||||
Status BackupStatus `json:"status"` // Backup status
|
||||
Metadata map[string]interface{} `json:"metadata"` // Additional metadata
|
||||
ID string `json:"id"` // Backup ID
|
||||
BackupID string `json:"backup_id"` // Legacy identifier
|
||||
Name string `json:"name"` // Backup name
|
||||
Destination string `json:"destination"` // Destination path
|
||||
CreatedAt time.Time `json:"created_at"` // Creation time
|
||||
Size int64 `json:"size"` // Backup size
|
||||
CompressedSize int64 `json:"compressed_size"` // Compressed size
|
||||
DataSize int64 `json:"data_size"` // Total data size
|
||||
ContextCount int64 `json:"context_count"` // Number of contexts
|
||||
Encrypted bool `json:"encrypted"` // Whether encrypted
|
||||
Incremental bool `json:"incremental"` // Whether incremental
|
||||
ParentBackupID string `json:"parent_backup_id"` // Parent backup for incremental
|
||||
IncludesIndexes bool `json:"includes_indexes"` // Include indexes
|
||||
IncludesCache bool `json:"includes_cache"` // Include cache data
|
||||
Checksum string `json:"checksum"` // Backup checksum
|
||||
Status BackupStatus `json:"status"` // Backup status
|
||||
Progress float64 `json:"progress"` // Completion progress 0-1
|
||||
ErrorMessage string `json:"error_message"` // Last error message
|
||||
RetentionUntil time.Time `json:"retention_until"` // Retention deadline
|
||||
CompletedAt *time.Time `json:"completed_at"` // Completion time
|
||||
Metadata map[string]interface{} `json:"metadata"` // Additional metadata
|
||||
}
|
||||
|
||||
// BackupStatus represents backup status
|
||||
|
||||
Reference in New Issue
Block a user