chore: align slurp config and scaffolding

This commit is contained in:
anthonyrawlins
2025-09-27 21:03:12 +10:00
parent acc4361463
commit 4a77862289
47 changed files with 5133 additions and 4274 deletions

View File

@@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"fmt"
"regexp"
"sync"
"time"
@@ -13,13 +12,13 @@ import (
// CacheManagerImpl implements the CacheManager interface using Redis
type CacheManagerImpl struct {
mu sync.RWMutex
client *redis.Client
stats *CacheStatistics
policy *CachePolicy
prefix string
nodeID string
warmupKeys map[string]bool
mu sync.RWMutex
client *redis.Client
stats *CacheStatistics
policy *CachePolicy
prefix string
nodeID string
warmupKeys map[string]bool
}
// NewCacheManager creates a new cache manager with Redis backend
@@ -43,7 +42,7 @@ func NewCacheManager(redisAddr, nodeID string, policy *CachePolicy) (*CacheManag
// Test connection
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := client.Ping(ctx).Err(); err != nil {
return nil, fmt.Errorf("failed to connect to Redis: %w", err)
}
@@ -68,13 +67,13 @@ func NewCacheManager(redisAddr, nodeID string, policy *CachePolicy) (*CacheManag
// DefaultCachePolicy returns default caching policy
func DefaultCachePolicy() *CachePolicy {
return &CachePolicy{
TTL: 24 * time.Hour,
MaxSize: 1024 * 1024 * 1024, // 1GB
EvictionPolicy: "LRU",
RefreshThreshold: 0.8, // Refresh when 80% of TTL elapsed
WarmupEnabled: true,
CompressEntries: true,
MaxEntrySize: 10 * 1024 * 1024, // 10MB
TTL: 24 * time.Hour,
MaxSize: 1024 * 1024 * 1024, // 1GB
EvictionPolicy: "LRU",
RefreshThreshold: 0.8, // Refresh when 80% of TTL elapsed
WarmupEnabled: true,
CompressEntries: true,
MaxEntrySize: 10 * 1024 * 1024, // 10MB
}
}
@@ -203,7 +202,7 @@ func (cm *CacheManagerImpl) Set(
// Delete removes data from cache
func (cm *CacheManagerImpl) Delete(ctx context.Context, key string) error {
cacheKey := cm.buildCacheKey(key)
if err := cm.client.Del(ctx, cacheKey).Err(); err != nil {
return fmt.Errorf("cache delete error: %w", err)
}
@@ -215,37 +214,37 @@ func (cm *CacheManagerImpl) Delete(ctx context.Context, key string) error {
func (cm *CacheManagerImpl) DeletePattern(ctx context.Context, pattern string) error {
// Build full pattern with prefix
fullPattern := cm.buildCacheKey(pattern)
// Use Redis SCAN to find matching keys
var cursor uint64
var keys []string
for {
result, nextCursor, err := cm.client.Scan(ctx, cursor, fullPattern, 100).Result()
if err != nil {
return fmt.Errorf("cache scan error: %w", err)
}
keys = append(keys, result...)
cursor = nextCursor
if cursor == 0 {
break
}
}
// Delete found keys in batches
if len(keys) > 0 {
pipeline := cm.client.Pipeline()
for _, key := range keys {
pipeline.Del(ctx, key)
}
if _, err := pipeline.Exec(ctx); err != nil {
return fmt.Errorf("cache batch delete error: %w", err)
}
}
return nil
}
@@ -282,7 +281,7 @@ func (cm *CacheManagerImpl) GetCacheStats() (*CacheStatistics, error) {
// Update Redis memory usage
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
info, err := cm.client.Info(ctx, "memory").Result()
if err == nil {
// Parse memory info to get actual usage
@@ -314,17 +313,17 @@ func (cm *CacheManagerImpl) SetCachePolicy(policy *CachePolicy) error {
// CacheEntry represents a cached data entry with metadata
type CacheEntry struct {
Key string `json:"key"`
Data []byte `json:"data"`
CreatedAt time.Time `json:"created_at"`
ExpiresAt time.Time `json:"expires_at"`
Key string `json:"key"`
Data []byte `json:"data"`
CreatedAt time.Time `json:"created_at"`
ExpiresAt time.Time `json:"expires_at"`
TTL time.Duration `json:"ttl"`
AccessCount int64 `json:"access_count"`
LastAccessedAt time.Time `json:"last_accessed_at"`
Compressed bool `json:"compressed"`
OriginalSize int64 `json:"original_size"`
CompressedSize int64 `json:"compressed_size"`
NodeID string `json:"node_id"`
AccessCount int64 `json:"access_count"`
LastAccessedAt time.Time `json:"last_accessed_at"`
Compressed bool `json:"compressed"`
OriginalSize int64 `json:"original_size"`
CompressedSize int64 `json:"compressed_size"`
NodeID string `json:"node_id"`
}
// Helper methods
@@ -361,7 +360,7 @@ func (cm *CacheManagerImpl) recordMiss() {
func (cm *CacheManagerImpl) updateAccessStats(duration time.Duration) {
cm.mu.Lock()
defer cm.mu.Unlock()
if cm.stats.AverageLoadTime == 0 {
cm.stats.AverageLoadTime = duration
} else {