chore: align slurp config and scaffolding

This commit is contained in:
anthonyrawlins
2025-09-27 21:03:12 +10:00
parent acc4361463
commit 4a77862289
47 changed files with 5133 additions and 4274 deletions

View File

@@ -2,71 +2,68 @@ package storage
import (
"context"
"encoding/json"
"fmt"
"sync"
"time"
"chorus/pkg/crypto"
"chorus/pkg/dht"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
"chorus/pkg/ucxl"
)
// ContextStoreImpl is the main implementation of the ContextStore interface
// It coordinates between local storage, distributed storage, encryption, caching, and indexing
type ContextStoreImpl struct {
mu sync.RWMutex
localStorage LocalStorage
mu sync.RWMutex
localStorage LocalStorage
distributedStorage DistributedStorage
encryptedStorage EncryptedStorage
cacheManager CacheManager
indexManager IndexManager
backupManager BackupManager
eventNotifier EventNotifier
encryptedStorage EncryptedStorage
cacheManager CacheManager
indexManager IndexManager
backupManager BackupManager
eventNotifier EventNotifier
// Configuration
nodeID string
options *ContextStoreOptions
nodeID string
options *ContextStoreOptions
// Statistics and monitoring
statistics *StorageStatistics
metricsCollector *MetricsCollector
statistics *StorageStatistics
metricsCollector *MetricsCollector
// Background processes
stopCh chan struct{}
syncTicker *time.Ticker
compactionTicker *time.Ticker
cleanupTicker *time.Ticker
stopCh chan struct{}
syncTicker *time.Ticker
compactionTicker *time.Ticker
cleanupTicker *time.Ticker
}
// ContextStoreOptions configures the context store behavior
type ContextStoreOptions struct {
// Storage configuration
PreferLocal bool `json:"prefer_local"`
AutoReplicate bool `json:"auto_replicate"`
DefaultReplicas int `json:"default_replicas"`
EncryptionEnabled bool `json:"encryption_enabled"`
CompressionEnabled bool `json:"compression_enabled"`
PreferLocal bool `json:"prefer_local"`
AutoReplicate bool `json:"auto_replicate"`
DefaultReplicas int `json:"default_replicas"`
EncryptionEnabled bool `json:"encryption_enabled"`
CompressionEnabled bool `json:"compression_enabled"`
// Caching configuration
CachingEnabled bool `json:"caching_enabled"`
CacheTTL time.Duration `json:"cache_ttl"`
CacheSize int64 `json:"cache_size"`
CachingEnabled bool `json:"caching_enabled"`
CacheTTL time.Duration `json:"cache_ttl"`
CacheSize int64 `json:"cache_size"`
// Indexing configuration
IndexingEnabled bool `json:"indexing_enabled"`
IndexingEnabled bool `json:"indexing_enabled"`
IndexRefreshInterval time.Duration `json:"index_refresh_interval"`
// Background processes
SyncInterval time.Duration `json:"sync_interval"`
CompactionInterval time.Duration `json:"compaction_interval"`
CleanupInterval time.Duration `json:"cleanup_interval"`
SyncInterval time.Duration `json:"sync_interval"`
CompactionInterval time.Duration `json:"compaction_interval"`
CleanupInterval time.Duration `json:"cleanup_interval"`
// Performance tuning
BatchSize int `json:"batch_size"`
MaxConcurrentOps int `json:"max_concurrent_ops"`
OperationTimeout time.Duration `json:"operation_timeout"`
BatchSize int `json:"batch_size"`
MaxConcurrentOps int `json:"max_concurrent_ops"`
OperationTimeout time.Duration `json:"operation_timeout"`
}
// MetricsCollector collects and aggregates storage metrics
@@ -87,16 +84,16 @@ func DefaultContextStoreOptions() *ContextStoreOptions {
EncryptionEnabled: true,
CompressionEnabled: true,
CachingEnabled: true,
CacheTTL: 24 * time.Hour,
CacheSize: 1024 * 1024 * 1024, // 1GB
IndexingEnabled: true,
CacheTTL: 24 * time.Hour,
CacheSize: 1024 * 1024 * 1024, // 1GB
IndexingEnabled: true,
IndexRefreshInterval: 5 * time.Minute,
SyncInterval: 10 * time.Minute,
CompactionInterval: 24 * time.Hour,
CleanupInterval: 1 * time.Hour,
BatchSize: 100,
MaxConcurrentOps: 10,
OperationTimeout: 30 * time.Second,
SyncInterval: 10 * time.Minute,
CompactionInterval: 24 * time.Hour,
CleanupInterval: 1 * time.Hour,
BatchSize: 100,
MaxConcurrentOps: 10,
OperationTimeout: 30 * time.Second,
}
}
@@ -124,8 +121,8 @@ func NewContextStore(
indexManager: indexManager,
backupManager: backupManager,
eventNotifier: eventNotifier,
nodeID: nodeID,
options: options,
nodeID: nodeID,
options: options,
statistics: &StorageStatistics{
LastSyncTime: time.Now(),
},
@@ -174,11 +171,11 @@ func (cs *ContextStoreImpl) StoreContext(
} else {
// Store unencrypted
storeOptions := &StoreOptions{
Encrypt: false,
Replicate: cs.options.AutoReplicate,
Index: cs.options.IndexingEnabled,
Cache: cs.options.CachingEnabled,
Compress: cs.options.CompressionEnabled,
Encrypt: false,
Replicate: cs.options.AutoReplicate,
Index: cs.options.IndexingEnabled,
Cache: cs.options.CachingEnabled,
Compress: cs.options.CompressionEnabled,
}
storeErr = cs.localStorage.Store(ctx, storageKey, node, storeOptions)
}
@@ -212,14 +209,14 @@ func (cs *ContextStoreImpl) StoreContext(
go func() {
replicateCtx, cancel := context.WithTimeout(context.Background(), cs.options.OperationTimeout)
defer cancel()
distOptions := &DistributedStoreOptions{
ReplicationFactor: cs.options.DefaultReplicas,
ConsistencyLevel: ConsistencyQuorum,
Timeout: cs.options.OperationTimeout,
SyncMode: SyncAsync,
Timeout: cs.options.OperationTimeout,
SyncMode: SyncAsync,
}
if err := cs.distributedStorage.Store(replicateCtx, storageKey, node, distOptions); err != nil {
cs.recordError("replicate", err)
}
@@ -523,11 +520,11 @@ func (cs *ContextStoreImpl) recordOperation(operation string) {
func (cs *ContextStoreImpl) recordLatency(operation string, latency time.Duration) {
cs.metricsCollector.mu.Lock()
defer cs.metricsCollector.mu.Unlock()
if cs.metricsCollector.latencyHistogram[operation] == nil {
cs.metricsCollector.latencyHistogram[operation] = make([]time.Duration, 0, 100)
}
// Keep only last 100 samples
histogram := cs.metricsCollector.latencyHistogram[operation]
if len(histogram) >= 100 {
@@ -541,7 +538,7 @@ func (cs *ContextStoreImpl) recordError(operation string, err error) {
cs.metricsCollector.mu.Lock()
defer cs.metricsCollector.mu.Unlock()
cs.metricsCollector.errorCount[operation]++
// Log the error (in production, use proper logging)
fmt.Printf("Storage error in %s: %v\n", operation, err)
}
@@ -614,7 +611,7 @@ func (cs *ContextStoreImpl) performCleanup(ctx context.Context) {
if err := cs.cacheManager.Clear(ctx); err != nil {
cs.recordError("cache_cleanup", err)
}
// Clean old metrics
cs.cleanupMetrics()
}
@@ -622,7 +619,7 @@ func (cs *ContextStoreImpl) performCleanup(ctx context.Context) {
func (cs *ContextStoreImpl) cleanupMetrics() {
cs.metricsCollector.mu.Lock()
defer cs.metricsCollector.mu.Unlock()
// Reset histograms that are too large
for operation, histogram := range cs.metricsCollector.latencyHistogram {
if len(histogram) > 1000 {
@@ -729,7 +726,7 @@ func (cs *ContextStoreImpl) Sync(ctx context.Context) error {
Type: EventSynced,
Timestamp: time.Now(),
Metadata: map[string]interface{}{
"node_id": cs.nodeID,
"node_id": cs.nodeID,
"sync_time": time.Since(start),
},
}