Fix temporal persistence wiring and restore slurp_full suite

This commit is contained in:
anthonyrawlins
2025-09-28 11:39:03 +10:00
parent 9c32755632
commit 2ff408729c
16 changed files with 1195 additions and 802 deletions

View File

@@ -8,7 +8,6 @@ import (
"time"
"chorus/pkg/slurp/storage"
"chorus/pkg/ucxl"
)
// persistenceManagerImpl handles persistence and synchronization of temporal graph data
@@ -151,6 +150,8 @@ func NewPersistenceManager(
config *PersistenceConfig,
) *persistenceManagerImpl {
cfg := normalizePersistenceConfig(config)
pm := &persistenceManagerImpl{
contextStore: contextStore,
localStorage: localStorage,
@@ -158,30 +159,96 @@ func NewPersistenceManager(
encryptedStore: encryptedStore,
backupManager: backupManager,
graph: graph,
config: config,
config: cfg,
pendingChanges: make(map[string]*PendingChange),
conflictResolver: NewDefaultConflictResolver(),
batchSize: config.BatchSize,
writeBuffer: make([]*TemporalNode, 0, config.BatchSize),
flushInterval: config.FlushInterval,
batchSize: cfg.BatchSize,
writeBuffer: make([]*TemporalNode, 0, cfg.BatchSize),
flushInterval: cfg.FlushInterval,
}
if graph != nil {
graph.persistence = pm
}
// Start background processes
if config.EnableAutoSync {
if cfg.EnableAutoSync {
go pm.syncWorker()
}
if config.EnableWriteBuffer {
if cfg.EnableWriteBuffer {
go pm.flushWorker()
}
if config.EnableAutoBackup {
if cfg.EnableAutoBackup {
go pm.backupWorker()
}
return pm
}
func normalizePersistenceConfig(config *PersistenceConfig) *PersistenceConfig {
if config == nil {
return defaultPersistenceConfig()
}
cloned := *config
if cloned.BatchSize <= 0 {
cloned.BatchSize = 1
}
if cloned.FlushInterval <= 0 {
cloned.FlushInterval = 30 * time.Second
}
if cloned.SyncInterval <= 0 {
cloned.SyncInterval = 15 * time.Minute
}
if cloned.MaxSyncRetries <= 0 {
cloned.MaxSyncRetries = 3
}
if len(cloned.EncryptionRoles) == 0 {
cloned.EncryptionRoles = []string{"default"}
} else {
cloned.EncryptionRoles = append([]string(nil), cloned.EncryptionRoles...)
}
if cloned.KeyPrefix == "" {
cloned.KeyPrefix = "temporal_graph"
}
if cloned.NodeKeyPattern == "" {
cloned.NodeKeyPattern = "temporal_graph/nodes/%s"
}
if cloned.GraphKeyPattern == "" {
cloned.GraphKeyPattern = "temporal_graph/graph/%s"
}
if cloned.MetadataKeyPattern == "" {
cloned.MetadataKeyPattern = "temporal_graph/metadata/%s"
}
return &cloned
}
func defaultPersistenceConfig() *PersistenceConfig {
return &PersistenceConfig{
EnableLocalStorage: true,
EnableDistributedStorage: false,
EnableEncryption: false,
EncryptionRoles: []string{"default"},
SyncInterval: 15 * time.Minute,
ConflictResolutionStrategy: "latest_wins",
EnableAutoSync: false,
MaxSyncRetries: 3,
BatchSize: 1,
FlushInterval: 30 * time.Second,
EnableWriteBuffer: false,
EnableAutoBackup: false,
BackupInterval: 24 * time.Hour,
RetainBackupCount: 3,
KeyPrefix: "temporal_graph",
NodeKeyPattern: "temporal_graph/nodes/%s",
GraphKeyPattern: "temporal_graph/graph/%s",
MetadataKeyPattern: "temporal_graph/metadata/%s",
}
}
// PersistTemporalNode persists a temporal node to storage
func (pm *persistenceManagerImpl) PersistTemporalNode(ctx context.Context, node *TemporalNode) error {
pm.mu.Lock()
@@ -355,7 +422,7 @@ func (pm *persistenceManagerImpl) flushWriteBuffer() error {
for i, node := range pm.writeBuffer {
batch.Contexts[i] = &storage.ContextStoreItem{
Context: node,
Context: node.Context,
Roles: pm.config.EncryptionRoles,
}
}
@@ -419,8 +486,13 @@ func (pm *persistenceManagerImpl) loadFromLocalStorage(ctx context.Context) erro
return fmt.Errorf("failed to load metadata: %w", err)
}
var metadata *GraphMetadata
if err := json.Unmarshal(metadataData.([]byte), &metadata); err != nil {
metadataBytes, err := json.Marshal(metadataData)
if err != nil {
return fmt.Errorf("failed to marshal metadata: %w", err)
}
var metadata GraphMetadata
if err := json.Unmarshal(metadataBytes, &metadata); err != nil {
return fmt.Errorf("failed to unmarshal metadata: %w", err)
}
@@ -431,17 +503,6 @@ func (pm *persistenceManagerImpl) loadFromLocalStorage(ctx context.Context) erro
return fmt.Errorf("failed to list nodes: %w", err)
}
// Load nodes in batches
batchReq := &storage.BatchRetrieveRequest{
Keys: nodeKeys,
}
batchResult, err := pm.contextStore.BatchRetrieve(ctx, batchReq)
if err != nil {
return fmt.Errorf("failed to batch retrieve nodes: %w", err)
}
// Reconstruct graph
pm.graph.mu.Lock()
defer pm.graph.mu.Unlock()
@@ -450,17 +511,23 @@ func (pm *persistenceManagerImpl) loadFromLocalStorage(ctx context.Context) erro
pm.graph.influences = make(map[string][]string)
pm.graph.influencedBy = make(map[string][]string)
for key, result := range batchResult.Results {
if result.Error != nil {
continue // Skip failed retrievals
for _, key := range nodeKeys {
data, err := pm.localStorage.Retrieve(ctx, key)
if err != nil {
continue
}
var node *TemporalNode
if err := json.Unmarshal(result.Data.([]byte), &node); err != nil {
continue // Skip invalid nodes
nodeBytes, err := json.Marshal(data)
if err != nil {
continue
}
pm.reconstructGraphNode(node)
var node TemporalNode
if err := json.Unmarshal(nodeBytes, &node); err != nil {
continue
}
pm.reconstructGraphNode(&node)
}
return nil
@@ -695,7 +762,7 @@ func (pm *persistenceManagerImpl) identifyConflicts(local, remote *GraphSnapshot
if remoteNode, exists := remote.Nodes[nodeID]; exists {
if pm.hasNodeConflict(localNode, remoteNode) {
conflict := &SyncConflict{
Type: ConflictTypeNodeMismatch,
Type: ConflictVersionMismatch,
NodeID: nodeID,
LocalData: localNode,
RemoteData: remoteNode,
@@ -724,16 +791,19 @@ func (pm *persistenceManagerImpl) resolveConflict(ctx context.Context, conflict
}
return &ConflictResolution{
ConflictID: conflict.NodeID,
Resolution: "merged",
ResolvedData: resolvedNode,
ResolvedAt: time.Now(),
ConflictID: conflict.NodeID,
ResolutionMethod: "merged",
ResolvedAt: time.Now(),
ResolvedBy: "persistence_manager",
ResultingNode: resolvedNode,
Confidence: 1.0,
Changes: []string{"merged local and remote node"},
}, nil
}
func (pm *persistenceManagerImpl) applyConflictResolution(ctx context.Context, resolution *ConflictResolution) error {
// Apply the resolved node back to the graph
resolvedNode := resolution.ResolvedData.(*TemporalNode)
resolvedNode := resolution.ResultingNode
pm.graph.mu.Lock()
pm.graph.nodes[resolvedNode.ID] = resolvedNode