chore: align slurp config and scaffolding
This commit is contained in:
@@ -33,12 +33,12 @@ type LocalStorageImpl struct {
|
||||
|
||||
// LocalStorageOptions configures local storage behavior
|
||||
type LocalStorageOptions struct {
|
||||
Compression bool `json:"compression"` // Enable compression
|
||||
CacheSize int `json:"cache_size"` // Cache size in MB
|
||||
WriteBuffer int `json:"write_buffer"` // Write buffer size in MB
|
||||
MaxOpenFiles int `json:"max_open_files"` // Maximum open files
|
||||
BlockSize int `json:"block_size"` // Block size in KB
|
||||
SyncWrites bool `json:"sync_writes"` // Synchronous writes
|
||||
Compression bool `json:"compression"` // Enable compression
|
||||
CacheSize int `json:"cache_size"` // Cache size in MB
|
||||
WriteBuffer int `json:"write_buffer"` // Write buffer size in MB
|
||||
MaxOpenFiles int `json:"max_open_files"` // Maximum open files
|
||||
BlockSize int `json:"block_size"` // Block size in KB
|
||||
SyncWrites bool `json:"sync_writes"` // Synchronous writes
|
||||
CompactionInterval time.Duration `json:"compaction_interval"` // Auto-compaction interval
|
||||
}
|
||||
|
||||
@@ -46,11 +46,11 @@ type LocalStorageOptions struct {
|
||||
func DefaultLocalStorageOptions() *LocalStorageOptions {
|
||||
return &LocalStorageOptions{
|
||||
Compression: true,
|
||||
CacheSize: 64, // 64MB cache
|
||||
WriteBuffer: 16, // 16MB write buffer
|
||||
MaxOpenFiles: 1000,
|
||||
BlockSize: 4, // 4KB blocks
|
||||
SyncWrites: false,
|
||||
CacheSize: 64, // 64MB cache
|
||||
WriteBuffer: 16, // 16MB write buffer
|
||||
MaxOpenFiles: 1000,
|
||||
BlockSize: 4, // 4KB blocks
|
||||
SyncWrites: false,
|
||||
CompactionInterval: 24 * time.Hour,
|
||||
}
|
||||
}
|
||||
@@ -135,13 +135,14 @@ func (ls *LocalStorageImpl) Store(
|
||||
UpdatedAt: time.Now(),
|
||||
Metadata: make(map[string]interface{}),
|
||||
}
|
||||
entry.Checksum = ls.computeChecksum(dataBytes)
|
||||
|
||||
// Apply options
|
||||
if options != nil {
|
||||
entry.TTL = options.TTL
|
||||
entry.Compressed = options.Compress
|
||||
entry.AccessLevel = string(options.AccessLevel)
|
||||
|
||||
|
||||
// Copy metadata
|
||||
for k, v := range options.Metadata {
|
||||
entry.Metadata[k] = v
|
||||
@@ -179,6 +180,7 @@ func (ls *LocalStorageImpl) Store(
|
||||
if entry.Compressed {
|
||||
ls.metrics.CompressedSize += entry.CompressedSize
|
||||
}
|
||||
ls.updateFileMetricsLocked()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -231,6 +233,14 @@ func (ls *LocalStorageImpl) Retrieve(ctx context.Context, key string) (interface
|
||||
dataBytes = decompressedData
|
||||
}
|
||||
|
||||
// Verify integrity against stored checksum (SEC-SLURP-1.1a requirement)
|
||||
if entry.Checksum != "" {
|
||||
computed := ls.computeChecksum(dataBytes)
|
||||
if computed != entry.Checksum {
|
||||
return nil, fmt.Errorf("data integrity check failed for key %s", key)
|
||||
}
|
||||
}
|
||||
|
||||
// Deserialize data
|
||||
var result interface{}
|
||||
if err := json.Unmarshal(dataBytes, &result); err != nil {
|
||||
@@ -260,6 +270,7 @@ func (ls *LocalStorageImpl) Delete(ctx context.Context, key string) error {
|
||||
if entryBytes != nil {
|
||||
ls.metrics.TotalSize -= int64(len(entryBytes))
|
||||
}
|
||||
ls.updateFileMetricsLocked()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -350,7 +361,7 @@ func (ls *LocalStorageImpl) Compact(ctx context.Context) error {
|
||||
// Update metrics
|
||||
ls.metrics.LastCompaction = time.Now()
|
||||
compactionTime := time.Since(start)
|
||||
|
||||
|
||||
// Calculate new fragmentation ratio
|
||||
ls.updateFragmentationRatio()
|
||||
|
||||
@@ -397,6 +408,7 @@ type StorageEntry struct {
|
||||
Compressed bool `json:"compressed"`
|
||||
OriginalSize int64 `json:"original_size"`
|
||||
CompressedSize int64 `json:"compressed_size"`
|
||||
Checksum string `json:"checksum"`
|
||||
AccessLevel string `json:"access_level"`
|
||||
Metadata map[string]interface{} `json:"metadata"`
|
||||
}
|
||||
@@ -406,34 +418,70 @@ type StorageEntry struct {
|
||||
func (ls *LocalStorageImpl) compress(data []byte) ([]byte, error) {
|
||||
// Use gzip compression for efficient data storage
|
||||
var buf bytes.Buffer
|
||||
|
||||
|
||||
// Create gzip writer with best compression
|
||||
writer := gzip.NewWriter(&buf)
|
||||
writer.Header.Name = "storage_data"
|
||||
writer.Header.Comment = "CHORUS SLURP local storage compressed data"
|
||||
|
||||
|
||||
// Write data to gzip writer
|
||||
if _, err := writer.Write(data); err != nil {
|
||||
writer.Close()
|
||||
return nil, fmt.Errorf("failed to write compressed data: %w", err)
|
||||
}
|
||||
|
||||
|
||||
// Close writer to flush data
|
||||
if err := writer.Close(); err != nil {
|
||||
return nil, fmt.Errorf("failed to close gzip writer: %w", err)
|
||||
}
|
||||
|
||||
|
||||
compressed := buf.Bytes()
|
||||
|
||||
|
||||
// Only return compressed data if it's actually smaller
|
||||
if len(compressed) >= len(data) {
|
||||
// Compression didn't help, return original data
|
||||
return data, nil
|
||||
}
|
||||
|
||||
|
||||
return compressed, nil
|
||||
}
|
||||
|
||||
func (ls *LocalStorageImpl) computeChecksum(data []byte) string {
|
||||
// Compute SHA-256 checksum to satisfy SEC-SLURP-1.1a integrity tracking
|
||||
digest := sha256.Sum256(data)
|
||||
return fmt.Sprintf("%x", digest)
|
||||
}
|
||||
|
||||
func (ls *LocalStorageImpl) updateFileMetricsLocked() {
|
||||
// Refresh filesystem metrics using io/fs traversal (SEC-SLURP-1.1a durability telemetry)
|
||||
var fileCount int64
|
||||
var aggregateSize int64
|
||||
|
||||
walkErr := fs.WalkDir(os.DirFS(ls.basePath), ".", func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
fileCount++
|
||||
if info, infoErr := d.Info(); infoErr == nil {
|
||||
aggregateSize += info.Size()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if walkErr != nil {
|
||||
fmt.Printf("filesystem metrics refresh failed: %v\n", walkErr)
|
||||
return
|
||||
}
|
||||
|
||||
ls.metrics.TotalFiles = fileCount
|
||||
if aggregateSize > 0 {
|
||||
ls.metrics.TotalSize = aggregateSize
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *LocalStorageImpl) decompress(data []byte) ([]byte, error) {
|
||||
// Create gzip reader
|
||||
reader, err := gzip.NewReader(bytes.NewReader(data))
|
||||
@@ -442,13 +490,13 @@ func (ls *LocalStorageImpl) decompress(data []byte) ([]byte, error) {
|
||||
return data, nil
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
|
||||
// Read decompressed data
|
||||
var buf bytes.Buffer
|
||||
if _, err := io.Copy(&buf, reader); err != nil {
|
||||
return nil, fmt.Errorf("failed to decompress data: %w", err)
|
||||
}
|
||||
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
@@ -462,7 +510,7 @@ func (ls *LocalStorageImpl) getAvailableSpace() (int64, error) {
|
||||
// Calculate available space in bytes
|
||||
// Available blocks * block size
|
||||
availableBytes := int64(stat.Bavail) * int64(stat.Bsize)
|
||||
|
||||
|
||||
return availableBytes, nil
|
||||
}
|
||||
|
||||
@@ -498,11 +546,11 @@ func (ls *LocalStorageImpl) GetCompressionStats() (*CompressionStats, error) {
|
||||
defer ls.mu.RUnlock()
|
||||
|
||||
stats := &CompressionStats{
|
||||
TotalEntries: 0,
|
||||
TotalEntries: 0,
|
||||
CompressedEntries: 0,
|
||||
TotalSize: ls.metrics.TotalSize,
|
||||
CompressedSize: ls.metrics.CompressedSize,
|
||||
CompressionRatio: 0.0,
|
||||
TotalSize: ls.metrics.TotalSize,
|
||||
CompressedSize: ls.metrics.CompressedSize,
|
||||
CompressionRatio: 0.0,
|
||||
}
|
||||
|
||||
// Iterate through all entries to get accurate stats
|
||||
@@ -511,7 +559,7 @@ func (ls *LocalStorageImpl) GetCompressionStats() (*CompressionStats, error) {
|
||||
|
||||
for iter.Next() {
|
||||
stats.TotalEntries++
|
||||
|
||||
|
||||
// Try to parse entry to check if compressed
|
||||
var entry StorageEntry
|
||||
if err := json.Unmarshal(iter.Value(), &entry); err == nil {
|
||||
@@ -549,7 +597,7 @@ func (ls *LocalStorageImpl) OptimizeStorage(ctx context.Context, compressThresho
|
||||
}
|
||||
|
||||
key := string(iter.Key())
|
||||
|
||||
|
||||
// Parse existing entry
|
||||
var entry StorageEntry
|
||||
if err := json.Unmarshal(iter.Value(), &entry); err != nil {
|
||||
@@ -599,11 +647,11 @@ func (ls *LocalStorageImpl) OptimizeStorage(ctx context.Context, compressThresho
|
||||
|
||||
// CompressionStats holds compression statistics
|
||||
type CompressionStats struct {
|
||||
TotalEntries int64 `json:"total_entries"`
|
||||
TotalEntries int64 `json:"total_entries"`
|
||||
CompressedEntries int64 `json:"compressed_entries"`
|
||||
TotalSize int64 `json:"total_size"`
|
||||
CompressedSize int64 `json:"compressed_size"`
|
||||
CompressionRatio float64 `json:"compression_ratio"`
|
||||
TotalSize int64 `json:"total_size"`
|
||||
CompressedSize int64 `json:"compressed_size"`
|
||||
CompressionRatio float64 `json:"compression_ratio"`
|
||||
}
|
||||
|
||||
// Close closes the local storage
|
||||
|
||||
Reference in New Issue
Block a user