chore: align slurp config and scaffolding

This commit is contained in:
anthonyrawlins
2025-09-27 21:03:12 +10:00
parent acc4361463
commit 4a77862289
47 changed files with 5133 additions and 4274 deletions

View File

@@ -5,7 +5,9 @@ import (
"fmt"
"time"
slurpContext "chorus/pkg/slurp/context"
"chorus/pkg/slurp/storage"
"chorus/pkg/ucxl"
)
// TemporalGraphFactory creates and configures temporal graph components
@@ -17,44 +19,44 @@ type TemporalGraphFactory struct {
// TemporalConfig represents configuration for the temporal graph system
type TemporalConfig struct {
// Core graph settings
MaxDepth int `json:"max_depth"`
StalenessWeights *StalenessWeights `json:"staleness_weights"`
CacheTimeout time.Duration `json:"cache_timeout"`
MaxDepth int `json:"max_depth"`
StalenessWeights *StalenessWeights `json:"staleness_weights"`
CacheTimeout time.Duration `json:"cache_timeout"`
// Analysis settings
InfluenceAnalysisConfig *InfluenceAnalysisConfig `json:"influence_analysis_config"`
NavigationConfig *NavigationConfig `json:"navigation_config"`
QueryConfig *QueryConfig `json:"query_config"`
// Persistence settings
PersistenceConfig *PersistenceConfig `json:"persistence_config"`
PersistenceConfig *PersistenceConfig `json:"persistence_config"`
// Performance settings
EnableCaching bool `json:"enable_caching"`
EnableCompression bool `json:"enable_compression"`
EnableMetrics bool `json:"enable_metrics"`
EnableCaching bool `json:"enable_caching"`
EnableCompression bool `json:"enable_compression"`
EnableMetrics bool `json:"enable_metrics"`
// Debug settings
EnableDebugLogging bool `json:"enable_debug_logging"`
EnableValidation bool `json:"enable_validation"`
EnableDebugLogging bool `json:"enable_debug_logging"`
EnableValidation bool `json:"enable_validation"`
}
// InfluenceAnalysisConfig represents configuration for influence analysis
type InfluenceAnalysisConfig struct {
DampingFactor float64 `json:"damping_factor"`
MaxIterations int `json:"max_iterations"`
ConvergenceThreshold float64 `json:"convergence_threshold"`
CacheValidDuration time.Duration `json:"cache_valid_duration"`
EnableCentralityMetrics bool `json:"enable_centrality_metrics"`
EnableCommunityDetection bool `json:"enable_community_detection"`
DampingFactor float64 `json:"damping_factor"`
MaxIterations int `json:"max_iterations"`
ConvergenceThreshold float64 `json:"convergence_threshold"`
CacheValidDuration time.Duration `json:"cache_valid_duration"`
EnableCentralityMetrics bool `json:"enable_centrality_metrics"`
EnableCommunityDetection bool `json:"enable_community_detection"`
}
// NavigationConfig represents configuration for decision navigation
type NavigationConfig struct {
MaxNavigationHistory int `json:"max_navigation_history"`
BookmarkRetention time.Duration `json:"bookmark_retention"`
SessionTimeout time.Duration `json:"session_timeout"`
EnablePathCaching bool `json:"enable_path_caching"`
MaxNavigationHistory int `json:"max_navigation_history"`
BookmarkRetention time.Duration `json:"bookmark_retention"`
SessionTimeout time.Duration `json:"session_timeout"`
EnablePathCaching bool `json:"enable_path_caching"`
}
// QueryConfig represents configuration for decision-hop queries
@@ -68,17 +70,17 @@ type QueryConfig struct {
// TemporalGraphSystem represents the complete temporal graph system
type TemporalGraphSystem struct {
Graph TemporalGraph
Navigator DecisionNavigator
InfluenceAnalyzer InfluenceAnalyzer
StalenessDetector StalenessDetector
ConflictDetector ConflictDetector
PatternAnalyzer PatternAnalyzer
VersionManager VersionManager
HistoryManager HistoryManager
MetricsCollector MetricsCollector
QuerySystem *querySystemImpl
PersistenceManager *persistenceManagerImpl
Graph TemporalGraph
Navigator DecisionNavigator
InfluenceAnalyzer InfluenceAnalyzer
StalenessDetector StalenessDetector
ConflictDetector ConflictDetector
PatternAnalyzer PatternAnalyzer
VersionManager VersionManager
HistoryManager HistoryManager
MetricsCollector MetricsCollector
QuerySystem *querySystemImpl
PersistenceManager *persistenceManagerImpl
}
// NewTemporalGraphFactory creates a new temporal graph factory
@@ -86,7 +88,7 @@ func NewTemporalGraphFactory(storage storage.ContextStore, config *TemporalConfi
if config == nil {
config = DefaultTemporalConfig()
}
return &TemporalGraphFactory{
storage: storage,
config: config,
@@ -100,22 +102,22 @@ func (tgf *TemporalGraphFactory) CreateTemporalGraphSystem(
encryptedStorage storage.EncryptedStorage,
backupManager storage.BackupManager,
) (*TemporalGraphSystem, error) {
// Create core temporal graph
graph := NewTemporalGraph(tgf.storage).(*temporalGraphImpl)
// Create navigator
navigator := NewDecisionNavigator(graph)
// Create influence analyzer
analyzer := NewInfluenceAnalyzer(graph)
// Create staleness detector
detector := NewStalenessDetector(graph)
// Create query system
querySystem := NewQuerySystem(graph, navigator, analyzer, detector)
// Create persistence manager
persistenceManager := NewPersistenceManager(
tgf.storage,
@@ -126,28 +128,28 @@ func (tgf *TemporalGraphFactory) CreateTemporalGraphSystem(
graph,
tgf.config.PersistenceConfig,
)
// Create additional components
conflictDetector := NewConflictDetector(graph)
patternAnalyzer := NewPatternAnalyzer(graph)
versionManager := NewVersionManager(graph, persistenceManager)
historyManager := NewHistoryManager(graph, persistenceManager)
metricsCollector := NewMetricsCollector(graph)
system := &TemporalGraphSystem{
Graph: graph,
Navigator: navigator,
InfluenceAnalyzer: analyzer,
StalenessDetector: detector,
ConflictDetector: conflictDetector,
PatternAnalyzer: patternAnalyzer,
VersionManager: versionManager,
HistoryManager: historyManager,
MetricsCollector: metricsCollector,
QuerySystem: querySystem,
PersistenceManager: persistenceManager,
Graph: graph,
Navigator: navigator,
InfluenceAnalyzer: analyzer,
StalenessDetector: detector,
ConflictDetector: conflictDetector,
PatternAnalyzer: patternAnalyzer,
VersionManager: versionManager,
HistoryManager: historyManager,
MetricsCollector: metricsCollector,
QuerySystem: querySystem,
PersistenceManager: persistenceManager,
}
return system, nil
}
@@ -159,19 +161,19 @@ func (tgf *TemporalGraphFactory) LoadExistingSystem(
encryptedStorage storage.EncryptedStorage,
backupManager storage.BackupManager,
) (*TemporalGraphSystem, error) {
// Create system
system, err := tgf.CreateTemporalGraphSystem(localStorage, distributedStorage, encryptedStorage, backupManager)
if err != nil {
return nil, fmt.Errorf("failed to create system: %w", err)
}
// Load graph data
err = system.PersistenceManager.LoadTemporalGraph(ctx)
if err != nil {
return nil, fmt.Errorf("failed to load temporal graph: %w", err)
}
return system, nil
}
@@ -188,23 +190,23 @@ func DefaultTemporalConfig() *TemporalConfig {
DependencyWeight: 0.3,
},
CacheTimeout: time.Minute * 15,
InfluenceAnalysisConfig: &InfluenceAnalysisConfig{
DampingFactor: 0.85,
MaxIterations: 100,
ConvergenceThreshold: 1e-6,
CacheValidDuration: time.Minute * 30,
EnableCentralityMetrics: true,
DampingFactor: 0.85,
MaxIterations: 100,
ConvergenceThreshold: 1e-6,
CacheValidDuration: time.Minute * 30,
EnableCentralityMetrics: true,
EnableCommunityDetection: true,
},
NavigationConfig: &NavigationConfig{
MaxNavigationHistory: 100,
BookmarkRetention: time.Hour * 24 * 30, // 30 days
SessionTimeout: time.Hour * 2,
EnablePathCaching: true,
},
QueryConfig: &QueryConfig{
DefaultMaxHops: 10,
MaxQueryResults: 1000,
@@ -212,28 +214,28 @@ func DefaultTemporalConfig() *TemporalConfig {
CacheQueryResults: true,
EnableQueryOptimization: true,
},
PersistenceConfig: &PersistenceConfig{
EnableLocalStorage: true,
EnableDistributedStorage: true,
EnableEncryption: true,
EncryptionRoles: []string{"analyst", "architect", "developer"},
SyncInterval: time.Minute * 15,
EnableLocalStorage: true,
EnableDistributedStorage: true,
EnableEncryption: true,
EncryptionRoles: []string{"analyst", "architect", "developer"},
SyncInterval: time.Minute * 15,
ConflictResolutionStrategy: "latest_wins",
EnableAutoSync: true,
MaxSyncRetries: 3,
BatchSize: 50,
FlushInterval: time.Second * 30,
EnableWriteBuffer: true,
EnableAutoBackup: true,
BackupInterval: time.Hour * 6,
RetainBackupCount: 10,
KeyPrefix: "temporal_graph",
NodeKeyPattern: "temporal_graph/nodes/%s",
GraphKeyPattern: "temporal_graph/graph/%s",
MetadataKeyPattern: "temporal_graph/metadata/%s",
EnableAutoSync: true,
MaxSyncRetries: 3,
BatchSize: 50,
FlushInterval: time.Second * 30,
EnableWriteBuffer: true,
EnableAutoBackup: true,
BackupInterval: time.Hour * 6,
RetainBackupCount: 10,
KeyPrefix: "temporal_graph",
NodeKeyPattern: "temporal_graph/nodes/%s",
GraphKeyPattern: "temporal_graph/graph/%s",
MetadataKeyPattern: "temporal_graph/metadata/%s",
},
EnableCaching: true,
EnableCompression: false,
EnableMetrics: true,
@@ -308,11 +310,11 @@ func (cd *conflictDetectorImpl) ValidateDecisionSequence(ctx context.Context, ad
func (cd *conflictDetectorImpl) ResolveTemporalConflict(ctx context.Context, conflict *TemporalConflict) (*ConflictResolution, error) {
// Implementation would resolve specific temporal conflicts
return &ConflictResolution{
ConflictID: conflict.ID,
Resolution: "auto_resolved",
ResolvedAt: time.Now(),
ResolvedBy: "system",
Confidence: 0.8,
ConflictID: conflict.ID,
ResolutionMethod: "auto_resolved",
ResolvedAt: time.Now(),
ResolvedBy: "system",
Confidence: 0.8,
}, nil
}
@@ -373,7 +375,7 @@ type versionManagerImpl struct {
persistence *persistenceManagerImpl
}
func (vm *versionManagerImpl) CreateVersion(ctx context.Context, address ucxl.Address,
func (vm *versionManagerImpl) CreateVersion(ctx context.Context, address ucxl.Address,
contextNode *slurpContext.ContextNode, metadata *VersionMetadata) (*TemporalNode, error) {
// Implementation would create a new temporal version
return vm.graph.EvolveContext(ctx, address, contextNode, metadata.Reason, metadata.Decision)
@@ -390,7 +392,7 @@ func (vm *versionManagerImpl) ListVersions(ctx context.Context, address ucxl.Add
if err != nil {
return nil, err
}
versions := make([]*VersionInfo, len(history))
for i, node := range history {
versions[i] = &VersionInfo{
@@ -402,11 +404,11 @@ func (vm *versionManagerImpl) ListVersions(ctx context.Context, address ucxl.Add
DecisionID: node.DecisionID,
}
}
return versions, nil
}
func (vm *versionManagerImpl) CompareVersions(ctx context.Context, address ucxl.Address,
func (vm *versionManagerImpl) CompareVersions(ctx context.Context, address ucxl.Address,
version1, version2 int) (*VersionComparison, error) {
// Implementation would compare two temporal versions
return &VersionComparison{
@@ -420,7 +422,7 @@ func (vm *versionManagerImpl) CompareVersions(ctx context.Context, address ucxl.
}, nil
}
func (vm *versionManagerImpl) MergeVersions(ctx context.Context, address ucxl.Address,
func (vm *versionManagerImpl) MergeVersions(ctx context.Context, address ucxl.Address,
versions []int, strategy MergeStrategy) (*TemporalNode, error) {
// Implementation would merge multiple versions
return vm.graph.GetLatestVersion(ctx, address)
@@ -447,7 +449,7 @@ func (hm *historyManagerImpl) GetFullHistory(ctx context.Context, address ucxl.A
if err != nil {
return nil, err
}
return &ContextHistory{
Address: address,
Versions: history,
@@ -455,7 +457,7 @@ func (hm *historyManagerImpl) GetFullHistory(ctx context.Context, address ucxl.A
}, nil
}
func (hm *historyManagerImpl) GetHistoryRange(ctx context.Context, address ucxl.Address,
func (hm *historyManagerImpl) GetHistoryRange(ctx context.Context, address ucxl.Address,
startHop, endHop int) (*ContextHistory, error) {
// Implementation would get history within a specific range
return hm.GetFullHistory(ctx, address)
@@ -539,13 +541,13 @@ func (mc *metricsCollectorImpl) GetInfluenceMetrics(ctx context.Context) (*Influ
func (mc *metricsCollectorImpl) GetQualityMetrics(ctx context.Context) (*QualityMetrics, error) {
// Implementation would get temporal data quality metrics
return &QualityMetrics{
DataCompleteness: 1.0,
DataConsistency: 1.0,
DataAccuracy: 1.0,
AverageConfidence: 0.8,
ConflictsDetected: 0,
ConflictsResolved: 0,
LastQualityCheck: time.Now(),
DataCompleteness: 1.0,
DataConsistency: 1.0,
DataAccuracy: 1.0,
AverageConfidence: 0.8,
ConflictsDetected: 0,
ConflictsResolved: 0,
LastQualityCheck: time.Now(),
}, nil
}
@@ -560,4 +562,4 @@ func (mc *metricsCollectorImpl) calculateInfluenceConnections() int {
total += len(influences)
}
return total
}
}

View File

@@ -9,36 +9,36 @@ import (
"sync"
"time"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
"chorus/pkg/slurp/storage"
"chorus/pkg/ucxl"
)
// temporalGraphImpl implements the TemporalGraph interface
type temporalGraphImpl struct {
mu sync.RWMutex
// Core storage
storage storage.ContextStore
// In-memory graph structures for fast access
nodes map[string]*TemporalNode // nodeID -> TemporalNode
addressToNodes map[string][]*TemporalNode // address -> list of temporal nodes
influences map[string][]string // nodeID -> list of influenced nodeIDs
influencedBy map[string][]string // nodeID -> list of influencer nodeIDs
nodes map[string]*TemporalNode // nodeID -> TemporalNode
addressToNodes map[string][]*TemporalNode // address -> list of temporal nodes
influences map[string][]string // nodeID -> list of influenced nodeIDs
influencedBy map[string][]string // nodeID -> list of influencer nodeIDs
// Decision tracking
decisions map[string]*DecisionMetadata // decisionID -> DecisionMetadata
decisionToNodes map[string][]*TemporalNode // decisionID -> list of affected nodes
decisions map[string]*DecisionMetadata // decisionID -> DecisionMetadata
decisionToNodes map[string][]*TemporalNode // decisionID -> list of affected nodes
// Performance optimization
pathCache map[string][]*DecisionStep // cache for decision paths
metricsCache map[string]interface{} // cache for expensive metrics
cacheTimeout time.Duration
lastCacheClean time.Time
pathCache map[string][]*DecisionStep // cache for decision paths
metricsCache map[string]interface{} // cache for expensive metrics
cacheTimeout time.Duration
lastCacheClean time.Time
// Configuration
maxDepth int // Maximum depth for path finding
maxDepth int // Maximum depth for path finding
stalenessWeight *StalenessWeights
}
@@ -69,113 +69,113 @@ func NewTemporalGraph(storage storage.ContextStore) TemporalGraph {
}
// CreateInitialContext creates the first temporal version of context
func (tg *temporalGraphImpl) CreateInitialContext(ctx context.Context, address ucxl.Address,
func (tg *temporalGraphImpl) CreateInitialContext(ctx context.Context, address ucxl.Address,
contextData *slurpContext.ContextNode, creator string) (*TemporalNode, error) {
tg.mu.Lock()
defer tg.mu.Unlock()
// Generate node ID
nodeID := tg.generateNodeID(address, 1)
// Create temporal node
temporalNode := &TemporalNode{
ID: nodeID,
UCXLAddress: address,
Version: 1,
Context: contextData,
Timestamp: time.Now(),
DecisionID: fmt.Sprintf("initial-%s", creator),
ChangeReason: ReasonInitialCreation,
ParentNode: nil,
ContextHash: tg.calculateContextHash(contextData),
Confidence: contextData.RAGConfidence,
Staleness: 0.0,
Influences: make([]ucxl.Address, 0),
InfluencedBy: make([]ucxl.Address, 0),
ValidatedBy: []string{creator},
ID: nodeID,
UCXLAddress: address,
Version: 1,
Context: contextData,
Timestamp: time.Now(),
DecisionID: fmt.Sprintf("initial-%s", creator),
ChangeReason: ReasonInitialCreation,
ParentNode: nil,
ContextHash: tg.calculateContextHash(contextData),
Confidence: contextData.RAGConfidence,
Staleness: 0.0,
Influences: make([]ucxl.Address, 0),
InfluencedBy: make([]ucxl.Address, 0),
ValidatedBy: []string{creator},
LastValidated: time.Now(),
ImpactScope: ImpactLocal,
PropagatedTo: make([]ucxl.Address, 0),
Metadata: make(map[string]interface{}),
ImpactScope: ImpactLocal,
PropagatedTo: make([]ucxl.Address, 0),
Metadata: make(map[string]interface{}),
}
// Store in memory structures
tg.nodes[nodeID] = temporalNode
addressKey := address.String()
tg.addressToNodes[addressKey] = []*TemporalNode{temporalNode}
// Initialize influence maps
tg.influences[nodeID] = make([]string, 0)
tg.influencedBy[nodeID] = make([]string, 0)
// Store decision metadata
decisionMeta := &DecisionMetadata{
ID: temporalNode.DecisionID,
Maker: creator,
Rationale: "Initial context creation",
Scope: ImpactLocal,
ConfidenceLevel: contextData.RAGConfidence,
ExternalRefs: make([]string, 0),
CreatedAt: time.Now(),
ID: temporalNode.DecisionID,
Maker: creator,
Rationale: "Initial context creation",
Scope: ImpactLocal,
ConfidenceLevel: contextData.RAGConfidence,
ExternalRefs: make([]string, 0),
CreatedAt: time.Now(),
ImplementationStatus: "complete",
Metadata: make(map[string]interface{}),
Metadata: make(map[string]interface{}),
}
tg.decisions[temporalNode.DecisionID] = decisionMeta
tg.decisionToNodes[temporalNode.DecisionID] = []*TemporalNode{temporalNode}
// Persist to storage
if err := tg.persistTemporalNode(ctx, temporalNode); err != nil {
return nil, fmt.Errorf("failed to persist initial temporal node: %w", err)
}
return temporalNode, nil
}
// EvolveContext creates a new temporal version due to a decision
func (tg *temporalGraphImpl) EvolveContext(ctx context.Context, address ucxl.Address,
newContext *slurpContext.ContextNode, reason ChangeReason,
func (tg *temporalGraphImpl) EvolveContext(ctx context.Context, address ucxl.Address,
newContext *slurpContext.ContextNode, reason ChangeReason,
decision *DecisionMetadata) (*TemporalNode, error) {
tg.mu.Lock()
defer tg.mu.Unlock()
// Get latest version
addressKey := address.String()
nodes, exists := tg.addressToNodes[addressKey]
if !exists || len(nodes) == 0 {
return nil, fmt.Errorf("no existing context found for address %s", address.String())
}
// Find latest version
latestNode := nodes[len(nodes)-1]
newVersion := latestNode.Version + 1
// Generate new node ID
nodeID := tg.generateNodeID(address, newVersion)
// Create new temporal node
temporalNode := &TemporalNode{
ID: nodeID,
UCXLAddress: address,
Version: newVersion,
Context: newContext,
Timestamp: time.Now(),
DecisionID: decision.ID,
ChangeReason: reason,
ParentNode: &latestNode.ID,
ContextHash: tg.calculateContextHash(newContext),
Confidence: newContext.RAGConfidence,
Staleness: 0.0, // New version, not stale
Influences: make([]ucxl.Address, 0),
InfluencedBy: make([]ucxl.Address, 0),
ValidatedBy: []string{decision.Maker},
ID: nodeID,
UCXLAddress: address,
Version: newVersion,
Context: newContext,
Timestamp: time.Now(),
DecisionID: decision.ID,
ChangeReason: reason,
ParentNode: &latestNode.ID,
ContextHash: tg.calculateContextHash(newContext),
Confidence: newContext.RAGConfidence,
Staleness: 0.0, // New version, not stale
Influences: make([]ucxl.Address, 0),
InfluencedBy: make([]ucxl.Address, 0),
ValidatedBy: []string{decision.Maker},
LastValidated: time.Now(),
ImpactScope: decision.Scope,
PropagatedTo: make([]ucxl.Address, 0),
Metadata: make(map[string]interface{}),
ImpactScope: decision.Scope,
PropagatedTo: make([]ucxl.Address, 0),
Metadata: make(map[string]interface{}),
}
// Copy influence relationships from parent
if latestNodeInfluences, exists := tg.influences[latestNode.ID]; exists {
tg.influences[nodeID] = make([]string, len(latestNodeInfluences))
@@ -183,18 +183,18 @@ func (tg *temporalGraphImpl) EvolveContext(ctx context.Context, address ucxl.Add
} else {
tg.influences[nodeID] = make([]string, 0)
}
if latestNodeInfluencedBy, exists := tg.influencedBy[latestNode.ID]; exists {
tg.influencedBy[nodeID] = make([]string, len(latestNodeInfluencedBy))
copy(tg.influencedBy[nodeID], latestNodeInfluencedBy)
} else {
tg.influencedBy[nodeID] = make([]string, 0)
}
// Store in memory structures
tg.nodes[nodeID] = temporalNode
tg.addressToNodes[addressKey] = append(tg.addressToNodes[addressKey], temporalNode)
// Store decision metadata
tg.decisions[decision.ID] = decision
if existing, exists := tg.decisionToNodes[decision.ID]; exists {
@@ -202,18 +202,18 @@ func (tg *temporalGraphImpl) EvolveContext(ctx context.Context, address ucxl.Add
} else {
tg.decisionToNodes[decision.ID] = []*TemporalNode{temporalNode}
}
// Update staleness for related contexts
tg.updateStalenessAfterChange(temporalNode)
// Clear relevant caches
tg.clearCacheForAddress(address)
// Persist to storage
if err := tg.persistTemporalNode(ctx, temporalNode); err != nil {
return nil, fmt.Errorf("failed to persist evolved temporal node: %w", err)
}
return temporalNode, nil
}
@@ -221,38 +221,38 @@ func (tg *temporalGraphImpl) EvolveContext(ctx context.Context, address ucxl.Add
func (tg *temporalGraphImpl) GetLatestVersion(ctx context.Context, address ucxl.Address) (*TemporalNode, error) {
tg.mu.RLock()
defer tg.mu.RUnlock()
addressKey := address.String()
nodes, exists := tg.addressToNodes[addressKey]
if !exists || len(nodes) == 0 {
return nil, fmt.Errorf("no temporal nodes found for address %s", address.String())
}
// Return the latest version (last in slice)
return nodes[len(nodes)-1], nil
}
// GetVersionAtDecision gets context as it was at a specific decision hop
func (tg *temporalGraphImpl) GetVersionAtDecision(ctx context.Context, address ucxl.Address,
func (tg *temporalGraphImpl) GetVersionAtDecision(ctx context.Context, address ucxl.Address,
decisionHop int) (*TemporalNode, error) {
tg.mu.RLock()
defer tg.mu.RUnlock()
addressKey := address.String()
nodes, exists := tg.addressToNodes[addressKey]
if !exists || len(nodes) == 0 {
return nil, fmt.Errorf("no temporal nodes found for address %s", address.String())
}
// Find node at specific decision hop (version)
for _, node := range nodes {
if node.Version == decisionHop {
return node, nil
}
}
return nil, fmt.Errorf("no temporal node found at decision hop %d for address %s",
return nil, fmt.Errorf("no temporal node found at decision hop %d for address %s",
decisionHop, address.String())
}
@@ -260,20 +260,20 @@ func (tg *temporalGraphImpl) GetVersionAtDecision(ctx context.Context, address u
func (tg *temporalGraphImpl) GetEvolutionHistory(ctx context.Context, address ucxl.Address) ([]*TemporalNode, error) {
tg.mu.RLock()
defer tg.mu.RUnlock()
addressKey := address.String()
nodes, exists := tg.addressToNodes[addressKey]
if !exists || len(nodes) == 0 {
return []*TemporalNode{}, nil
}
// Sort by version to ensure proper order
sortedNodes := make([]*TemporalNode, len(nodes))
copy(sortedNodes, nodes)
sort.Slice(sortedNodes, func(i, j int) bool {
return sortedNodes[i].Version < sortedNodes[j].Version
})
return sortedNodes, nil
}
@@ -281,22 +281,22 @@ func (tg *temporalGraphImpl) GetEvolutionHistory(ctx context.Context, address uc
func (tg *temporalGraphImpl) AddInfluenceRelationship(ctx context.Context, influencer, influenced ucxl.Address) error {
tg.mu.Lock()
defer tg.mu.Unlock()
// Get latest nodes for both addresses
influencerNode, err := tg.getLatestNodeUnsafe(influencer)
if err != nil {
return fmt.Errorf("influencer node not found: %w", err)
}
influencedNode, err := tg.getLatestNodeUnsafe(influenced)
if err != nil {
return fmt.Errorf("influenced node not found: %w", err)
}
// Add to influence mappings
influencerNodeID := influencerNode.ID
influencedNodeID := influencedNode.ID
// Add to influences map (influencer -> influenced)
if influences, exists := tg.influences[influencerNodeID]; exists {
// Check if relationship already exists
@@ -309,7 +309,7 @@ func (tg *temporalGraphImpl) AddInfluenceRelationship(ctx context.Context, influ
} else {
tg.influences[influencerNodeID] = []string{influencedNodeID}
}
// Add to influencedBy map (influenced <- influencer)
if influencedBy, exists := tg.influencedBy[influencedNodeID]; exists {
// Check if relationship already exists
@@ -322,14 +322,14 @@ func (tg *temporalGraphImpl) AddInfluenceRelationship(ctx context.Context, influ
} else {
tg.influencedBy[influencedNodeID] = []string{influencerNodeID}
}
// Update temporal nodes with the influence relationship
influencerNode.Influences = append(influencerNode.Influences, influenced)
influencedNode.InfluencedBy = append(influencedNode.InfluencedBy, influencer)
// Clear path cache as influence graph has changed
tg.pathCache = make(map[string][]*DecisionStep)
// Persist changes
if err := tg.persistTemporalNode(ctx, influencerNode); err != nil {
return fmt.Errorf("failed to persist influencer node: %w", err)
@@ -337,7 +337,7 @@ func (tg *temporalGraphImpl) AddInfluenceRelationship(ctx context.Context, influ
if err := tg.persistTemporalNode(ctx, influencedNode); err != nil {
return fmt.Errorf("failed to persist influenced node: %w", err)
}
return nil
}
@@ -345,39 +345,39 @@ func (tg *temporalGraphImpl) AddInfluenceRelationship(ctx context.Context, influ
func (tg *temporalGraphImpl) RemoveInfluenceRelationship(ctx context.Context, influencer, influenced ucxl.Address) error {
tg.mu.Lock()
defer tg.mu.Unlock()
// Get latest nodes for both addresses
influencerNode, err := tg.getLatestNodeUnsafe(influencer)
if err != nil {
return fmt.Errorf("influencer node not found: %w", err)
}
influencedNode, err := tg.getLatestNodeUnsafe(influenced)
if err != nil {
return fmt.Errorf("influenced node not found: %w", err)
}
// Remove from influence mappings
influencerNodeID := influencerNode.ID
influencedNodeID := influencedNode.ID
// Remove from influences map
if influences, exists := tg.influences[influencerNodeID]; exists {
tg.influences[influencerNodeID] = tg.removeFromSlice(influences, influencedNodeID)
}
// Remove from influencedBy map
if influencedBy, exists := tg.influencedBy[influencedNodeID]; exists {
tg.influencedBy[influencedNodeID] = tg.removeFromSlice(influencedBy, influencerNodeID)
}
// Update temporal nodes
influencerNode.Influences = tg.removeAddressFromSlice(influencerNode.Influences, influenced)
influencedNode.InfluencedBy = tg.removeAddressFromSlice(influencedNode.InfluencedBy, influencer)
// Clear path cache
tg.pathCache = make(map[string][]*DecisionStep)
// Persist changes
if err := tg.persistTemporalNode(ctx, influencerNode); err != nil {
return fmt.Errorf("failed to persist influencer node: %w", err)
@@ -385,7 +385,7 @@ func (tg *temporalGraphImpl) RemoveInfluenceRelationship(ctx context.Context, in
if err := tg.persistTemporalNode(ctx, influencedNode); err != nil {
return fmt.Errorf("failed to persist influenced node: %w", err)
}
return nil
}
@@ -393,28 +393,28 @@ func (tg *temporalGraphImpl) RemoveInfluenceRelationship(ctx context.Context, in
func (tg *temporalGraphImpl) GetInfluenceRelationships(ctx context.Context, address ucxl.Address) ([]ucxl.Address, []ucxl.Address, error) {
tg.mu.RLock()
defer tg.mu.RUnlock()
node, err := tg.getLatestNodeUnsafe(address)
if err != nil {
return nil, nil, fmt.Errorf("node not found: %w", err)
}
influences := make([]ucxl.Address, len(node.Influences))
copy(influences, node.Influences)
influencedBy := make([]ucxl.Address, len(node.InfluencedBy))
copy(influencedBy, node.InfluencedBy)
return influences, influencedBy, nil
}
// FindRelatedDecisions finds decisions within N decision hops
func (tg *temporalGraphImpl) FindRelatedDecisions(ctx context.Context, address ucxl.Address,
func (tg *temporalGraphImpl) FindRelatedDecisions(ctx context.Context, address ucxl.Address,
maxHops int) ([]*DecisionPath, error) {
tg.mu.RLock()
defer tg.mu.RUnlock()
// Check cache first
cacheKey := fmt.Sprintf("related-%s-%d", address.String(), maxHops)
if cached, exists := tg.pathCache[cacheKey]; exists {
@@ -430,27 +430,27 @@ func (tg *temporalGraphImpl) FindRelatedDecisions(ctx context.Context, address u
}
return paths, nil
}
startNode, err := tg.getLatestNodeUnsafe(address)
if err != nil {
return nil, fmt.Errorf("start node not found: %w", err)
}
// Use BFS to find all nodes within maxHops
visited := make(map[string]bool)
queue := []*bfsItem{{node: startNode, distance: 0, path: []*DecisionStep{}}}
relatedPaths := make([]*DecisionPath, 0)
for len(queue) > 0 {
current := queue[0]
queue = queue[1:]
nodeID := current.node.ID
if visited[nodeID] || current.distance > maxHops {
continue
}
visited[nodeID] = true
// If this is not the starting node, add it to results
if current.distance > 0 {
step := &DecisionStep{
@@ -459,7 +459,7 @@ func (tg *temporalGraphImpl) FindRelatedDecisions(ctx context.Context, address u
HopDistance: current.distance,
Relationship: "influence",
}
path := &DecisionPath{
From: address,
To: current.node.UCXLAddress,
@@ -469,7 +469,7 @@ func (tg *temporalGraphImpl) FindRelatedDecisions(ctx context.Context, address u
}
relatedPaths = append(relatedPaths, path)
}
// Add influenced nodes to queue
if influences, exists := tg.influences[nodeID]; exists {
for _, influencedID := range influences {
@@ -491,7 +491,7 @@ func (tg *temporalGraphImpl) FindRelatedDecisions(ctx context.Context, address u
}
}
}
// Add influencer nodes to queue
if influencedBy, exists := tg.influencedBy[nodeID]; exists {
for _, influencerID := range influencedBy {
@@ -514,7 +514,7 @@ func (tg *temporalGraphImpl) FindRelatedDecisions(ctx context.Context, address u
}
}
}
return relatedPaths, nil
}
@@ -522,44 +522,44 @@ func (tg *temporalGraphImpl) FindRelatedDecisions(ctx context.Context, address u
func (tg *temporalGraphImpl) FindDecisionPath(ctx context.Context, from, to ucxl.Address) ([]*DecisionStep, error) {
tg.mu.RLock()
defer tg.mu.RUnlock()
// Check cache first
cacheKey := fmt.Sprintf("path-%s-%s", from.String(), to.String())
if cached, exists := tg.pathCache[cacheKey]; exists {
return cached, nil
}
fromNode, err := tg.getLatestNodeUnsafe(from)
if err != nil {
return nil, fmt.Errorf("from node not found: %w", err)
}
toNode, err := tg.getLatestNodeUnsafe(to)
_, err := tg.getLatestNodeUnsafe(to)
if err != nil {
return nil, fmt.Errorf("to node not found: %w", err)
}
// Use BFS to find shortest path
visited := make(map[string]bool)
queue := []*pathItem{{node: fromNode, path: []*DecisionStep{}}}
for len(queue) > 0 {
current := queue[0]
queue = queue[1:]
nodeID := current.node.ID
if visited[nodeID] {
continue
}
visited[nodeID] = true
// Check if we reached the target
if current.node.UCXLAddress.String() == to.String() {
// Cache the result
tg.pathCache[cacheKey] = current.path
return current.path, nil
}
// Explore influenced nodes
if influences, exists := tg.influences[nodeID]; exists {
for _, influencedID := range influences {
@@ -580,7 +580,7 @@ func (tg *temporalGraphImpl) FindDecisionPath(ctx context.Context, from, to ucxl
}
}
}
// Explore influencer nodes
if influencedBy, exists := tg.influencedBy[nodeID]; exists {
for _, influencerID := range influencedBy {
@@ -602,7 +602,7 @@ func (tg *temporalGraphImpl) FindDecisionPath(ctx context.Context, from, to ucxl
}
}
}
return nil, fmt.Errorf("no path found from %s to %s", from.String(), to.String())
}
@@ -610,7 +610,7 @@ func (tg *temporalGraphImpl) FindDecisionPath(ctx context.Context, from, to ucxl
func (tg *temporalGraphImpl) AnalyzeDecisionPatterns(ctx context.Context) (*DecisionAnalysis, error) {
tg.mu.RLock()
defer tg.mu.RUnlock()
analysis := &DecisionAnalysis{
TimeRange: 24 * time.Hour, // Analyze last 24 hours by default
TotalDecisions: len(tg.decisions),
@@ -620,10 +620,10 @@ func (tg *temporalGraphImpl) AnalyzeDecisionPatterns(ctx context.Context) (*Deci
MostInfluentialDecisions: make([]*InfluentialDecision, 0),
DecisionClusters: make([]*DecisionCluster, 0),
Patterns: make([]*DecisionPattern, 0),
Anomalies: make([]*AnomalousDecision, 0),
AnalyzedAt: time.Now(),
Anomalies: make([]*AnomalousDecision, 0),
AnalyzedAt: time.Now(),
}
// Calculate decision velocity
cutoff := time.Now().Add(-analysis.TimeRange)
recentDecisions := 0
@@ -633,7 +633,7 @@ func (tg *temporalGraphImpl) AnalyzeDecisionPatterns(ctx context.Context) (*Deci
}
}
analysis.DecisionVelocity = float64(recentDecisions) / analysis.TimeRange.Hours()
// Calculate average influence distance
totalDistance := 0.0
connections := 0
@@ -648,37 +648,37 @@ func (tg *temporalGraphImpl) AnalyzeDecisionPatterns(ctx context.Context) (*Deci
if connections > 0 {
analysis.AverageInfluenceDistance = totalDistance / float64(connections)
}
// Find most influential decisions (simplified)
influenceScores := make(map[string]float64)
for nodeID, node := range tg.nodes {
score := float64(len(tg.influences[nodeID])) * 1.0 // Direct influences
score := float64(len(tg.influences[nodeID])) * 1.0 // Direct influences
score += float64(len(tg.influencedBy[nodeID])) * 0.5 // Being influenced
influenceScores[nodeID] = score
if score > 3.0 { // Threshold for "influential"
influential := &InfluentialDecision{
Address: node.UCXLAddress,
DecisionHop: node.Version,
InfluenceScore: score,
AffectedContexts: node.Influences,
DecisionMetadata: tg.decisions[node.DecisionID],
InfluenceReasons: []string{"high_connectivity", "multiple_influences"},
Address: node.UCXLAddress,
DecisionHop: node.Version,
InfluenceScore: score,
AffectedContexts: node.Influences,
DecisionMetadata: tg.decisions[node.DecisionID],
InfluenceReasons: []string{"high_connectivity", "multiple_influences"},
}
analysis.MostInfluentialDecisions = append(analysis.MostInfluentialDecisions, influential)
}
}
// Sort influential decisions by score
sort.Slice(analysis.MostInfluentialDecisions, func(i, j int) bool {
return analysis.MostInfluentialDecisions[i].InfluenceScore > analysis.MostInfluentialDecisions[j].InfluenceScore
})
// Limit to top 10
if len(analysis.MostInfluentialDecisions) > 10 {
analysis.MostInfluentialDecisions = analysis.MostInfluentialDecisions[:10]
}
return analysis, nil
}
@@ -686,19 +686,19 @@ func (tg *temporalGraphImpl) AnalyzeDecisionPatterns(ctx context.Context) (*Deci
func (tg *temporalGraphImpl) ValidateTemporalIntegrity(ctx context.Context) error {
tg.mu.RLock()
defer tg.mu.RUnlock()
errors := make([]string, 0)
// Check for orphaned nodes
for nodeID, node := range tg.nodes {
if node.ParentNode != nil {
if _, exists := tg.nodes[*node.ParentNode]; !exists {
errors = append(errors, fmt.Sprintf("orphaned node %s has non-existent parent %s",
errors = append(errors, fmt.Sprintf("orphaned node %s has non-existent parent %s",
nodeID, *node.ParentNode))
}
}
}
// Check influence consistency
for nodeID := range tg.influences {
if influences, exists := tg.influences[nodeID]; exists {
@@ -713,33 +713,33 @@ func (tg *temporalGraphImpl) ValidateTemporalIntegrity(ctx context.Context) erro
}
}
if !found {
errors = append(errors, fmt.Sprintf("influence inconsistency: %s -> %s not reflected in influencedBy",
errors = append(errors, fmt.Sprintf("influence inconsistency: %s -> %s not reflected in influencedBy",
nodeID, influencedID))
}
}
}
}
}
// Check version sequence integrity
for address, nodes := range tg.addressToNodes {
sort.Slice(nodes, func(i, j int) bool {
return nodes[i].Version < nodes[j].Version
})
for i, node := range nodes {
expectedVersion := i + 1
if node.Version != expectedVersion {
errors = append(errors, fmt.Sprintf("version sequence error for address %s: expected %d, got %d",
errors = append(errors, fmt.Sprintf("version sequence error for address %s: expected %d, got %d",
address, expectedVersion, node.Version))
}
}
}
if len(errors) > 0 {
return fmt.Errorf("temporal integrity violations: %v", errors)
}
return nil
}
@@ -747,21 +747,21 @@ func (tg *temporalGraphImpl) ValidateTemporalIntegrity(ctx context.Context) erro
func (tg *temporalGraphImpl) CompactHistory(ctx context.Context, beforeTime time.Time) error {
tg.mu.Lock()
defer tg.mu.Unlock()
compacted := 0
// For each address, keep only the latest version and major milestones before the cutoff
for address, nodes := range tg.addressToNodes {
toKeep := make([]*TemporalNode, 0)
toRemove := make([]*TemporalNode, 0)
for _, node := range nodes {
// Always keep nodes after the cutoff time
if node.Timestamp.After(beforeTime) {
toKeep = append(toKeep, node)
continue
}
// Keep major changes and influential decisions
if tg.isMajorChange(node) || tg.isInfluentialDecision(node) {
toKeep = append(toKeep, node)
@@ -769,10 +769,10 @@ func (tg *temporalGraphImpl) CompactHistory(ctx context.Context, beforeTime time
toRemove = append(toRemove, node)
}
}
// Update the address mapping
tg.addressToNodes[address] = toKeep
// Remove old nodes from main maps
for _, node := range toRemove {
delete(tg.nodes, node.ID)
@@ -781,11 +781,11 @@ func (tg *temporalGraphImpl) CompactHistory(ctx context.Context, beforeTime time
compacted++
}
}
// Clear caches after compaction
tg.pathCache = make(map[string][]*DecisionStep)
tg.metricsCache = make(map[string]interface{})
return nil
}
@@ -847,13 +847,13 @@ func (tg *temporalGraphImpl) calculateStaleness(node *TemporalNode, changedNode
// Simple staleness calculation based on time since last update and influence strength
timeSinceUpdate := time.Since(node.Timestamp)
timeWeight := math.Min(timeSinceUpdate.Hours()/168.0, 1.0) // Max staleness from time: 1 week
// Influence weight based on connection strength
influenceWeight := 0.0
if len(node.InfluencedBy) > 0 {
influenceWeight = 1.0 / float64(len(node.InfluencedBy)) // Stronger if fewer influencers
}
// Impact scope weight
impactWeight := 0.0
switch changedNode.ImpactScope {
@@ -866,23 +866,23 @@ func (tg *temporalGraphImpl) calculateStaleness(node *TemporalNode, changedNode
case ImpactLocal:
impactWeight = 0.4
}
return math.Min(
tg.stalenessWeight.TimeWeight*timeWeight+
tg.stalenessWeight.InfluenceWeight*influenceWeight+
tg.stalenessWeight.ImportanceWeight*impactWeight, 1.0)
tg.stalenessWeight.InfluenceWeight*influenceWeight+
tg.stalenessWeight.ImportanceWeight*impactWeight, 1.0)
}
func (tg *temporalGraphImpl) clearCacheForAddress(address ucxl.Address) {
addressStr := address.String()
keysToDelete := make([]string, 0)
for key := range tg.pathCache {
if contains(key, addressStr) {
keysToDelete = append(keysToDelete, key)
}
}
for _, key := range keysToDelete {
delete(tg.pathCache, key)
}
@@ -908,7 +908,7 @@ func (tg *temporalGraphImpl) persistTemporalNode(ctx context.Context, node *Temp
}
func contains(s, substr string) bool {
return len(s) >= len(substr) && (s == substr ||
return len(s) >= len(substr) && (s == substr ||
(len(s) > len(substr) && (s[:len(substr)] == substr || s[len(s)-len(substr):] == substr)))
}
@@ -923,4 +923,4 @@ type bfsItem struct {
type pathItem struct {
node *TemporalNode
path []*DecisionStep
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -13,36 +13,36 @@ import (
// decisionNavigatorImpl implements the DecisionNavigator interface
type decisionNavigatorImpl struct {
mu sync.RWMutex
// Reference to the temporal graph
graph *temporalGraphImpl
// Navigation state
navigationSessions map[string]*NavigationSession
bookmarks map[string]*DecisionBookmark
// Configuration
maxNavigationHistory int
}
// NavigationSession represents a navigation session
type NavigationSession struct {
ID string `json:"id"`
UserID string `json:"user_id"`
StartedAt time.Time `json:"started_at"`
LastActivity time.Time `json:"last_activity"`
CurrentPosition ucxl.Address `json:"current_position"`
History []*DecisionStep `json:"history"`
Bookmarks []string `json:"bookmarks"`
Preferences *NavPreferences `json:"preferences"`
ID string `json:"id"`
UserID string `json:"user_id"`
StartedAt time.Time `json:"started_at"`
LastActivity time.Time `json:"last_activity"`
CurrentPosition ucxl.Address `json:"current_position"`
History []*DecisionStep `json:"history"`
Bookmarks []string `json:"bookmarks"`
Preferences *NavPreferences `json:"preferences"`
}
// NavPreferences represents navigation preferences
type NavPreferences struct {
MaxHops int `json:"max_hops"`
MaxHops int `json:"max_hops"`
PreferRecentDecisions bool `json:"prefer_recent_decisions"`
FilterByConfidence float64 `json:"filter_by_confidence"`
IncludeStaleContexts bool `json:"include_stale_contexts"`
FilterByConfidence float64 `json:"filter_by_confidence"`
IncludeStaleContexts bool `json:"include_stale_contexts"`
}
// NewDecisionNavigator creates a new decision navigator
@@ -50,24 +50,24 @@ func NewDecisionNavigator(graph *temporalGraphImpl) DecisionNavigator {
return &decisionNavigatorImpl{
graph: graph,
navigationSessions: make(map[string]*NavigationSession),
bookmarks: make(map[string]*DecisionBookmark),
bookmarks: make(map[string]*DecisionBookmark),
maxNavigationHistory: 100,
}
}
// NavigateDecisionHops navigates by decision distance, not time
func (dn *decisionNavigatorImpl) NavigateDecisionHops(ctx context.Context, address ucxl.Address,
func (dn *decisionNavigatorImpl) NavigateDecisionHops(ctx context.Context, address ucxl.Address,
hops int, direction NavigationDirection) (*TemporalNode, error) {
dn.mu.RLock()
defer dn.mu.RUnlock()
// Get starting node
startNode, err := dn.graph.getLatestNodeUnsafe(address)
if err != nil {
return nil, fmt.Errorf("failed to get starting node: %w", err)
}
// Navigate by hops
currentNode := startNode
for i := 0; i < hops; i++ {
@@ -77,23 +77,23 @@ func (dn *decisionNavigatorImpl) NavigateDecisionHops(ctx context.Context, addre
}
currentNode = nextNode
}
return currentNode, nil
}
// GetDecisionTimeline gets timeline ordered by decision sequence
func (dn *decisionNavigatorImpl) GetDecisionTimeline(ctx context.Context, address ucxl.Address,
func (dn *decisionNavigatorImpl) GetDecisionTimeline(ctx context.Context, address ucxl.Address,
includeRelated bool, maxHops int) (*DecisionTimeline, error) {
dn.mu.RLock()
defer dn.mu.RUnlock()
// Get evolution history for the primary address
history, err := dn.graph.GetEvolutionHistory(ctx, address)
if err != nil {
return nil, fmt.Errorf("failed to get evolution history: %w", err)
}
// Build decision timeline entries
decisionSequence := make([]*DecisionTimelineEntry, len(history))
for i, node := range history {
@@ -112,7 +112,7 @@ func (dn *decisionNavigatorImpl) GetDecisionTimeline(ctx context.Context, addres
}
decisionSequence[i] = entry
}
// Get related decisions if requested
relatedDecisions := make([]*RelatedDecision, 0)
if includeRelated && maxHops > 0 {
@@ -136,16 +136,16 @@ func (dn *decisionNavigatorImpl) GetDecisionTimeline(ctx context.Context, addres
}
}
}
// Calculate timeline analysis
analysis := dn.analyzeTimeline(decisionSequence, relatedDecisions)
// Calculate time span
var timeSpan time.Duration
if len(history) > 1 {
timeSpan = history[len(history)-1].Timestamp.Sub(history[0].Timestamp)
}
timeline := &DecisionTimeline{
PrimaryAddress: address,
DecisionSequence: decisionSequence,
@@ -154,7 +154,7 @@ func (dn *decisionNavigatorImpl) GetDecisionTimeline(ctx context.Context, addres
TimeSpan: timeSpan,
AnalysisMetadata: analysis,
}
return timeline, nil
}
@@ -162,31 +162,31 @@ func (dn *decisionNavigatorImpl) GetDecisionTimeline(ctx context.Context, addres
func (dn *decisionNavigatorImpl) FindStaleContexts(ctx context.Context, stalenessThreshold float64) ([]*StaleContext, error) {
dn.mu.RLock()
defer dn.mu.RUnlock()
staleContexts := make([]*StaleContext, 0)
// Check all nodes for staleness
for _, node := range dn.graph.nodes {
if node.Staleness >= stalenessThreshold {
staleness := &StaleContext{
UCXLAddress: node.UCXLAddress,
TemporalNode: node,
StalenessScore: node.Staleness,
LastUpdated: node.Timestamp,
Reasons: dn.getStalenessReasons(node),
UCXLAddress: node.UCXLAddress,
TemporalNode: node,
StalenessScore: node.Staleness,
LastUpdated: node.Timestamp,
Reasons: dn.getStalenessReasons(node),
SuggestedActions: dn.getSuggestedActions(node),
RelatedChanges: dn.getRelatedChanges(node),
Priority: dn.calculateStalePriority(node),
RelatedChanges: dn.getRelatedChanges(node),
Priority: dn.calculateStalePriority(node),
}
staleContexts = append(staleContexts, staleness)
}
}
// Sort by staleness score (highest first)
sort.Slice(staleContexts, func(i, j int) bool {
return staleContexts[i].StalenessScore > staleContexts[j].StalenessScore
})
return staleContexts, nil
}
@@ -195,28 +195,28 @@ func (dn *decisionNavigatorImpl) ValidateDecisionPath(ctx context.Context, path
if len(path) == 0 {
return fmt.Errorf("empty decision path")
}
dn.mu.RLock()
defer dn.mu.RUnlock()
// Validate each step in the path
for i, step := range path {
// Check if the temporal node exists
if step.TemporalNode == nil {
return fmt.Errorf("step %d has nil temporal node", i)
}
nodeID := step.TemporalNode.ID
if _, exists := dn.graph.nodes[nodeID]; !exists {
return fmt.Errorf("step %d references non-existent node %s", i, nodeID)
}
// Validate hop distance
if step.HopDistance != i {
return fmt.Errorf("step %d has incorrect hop distance: expected %d, got %d",
return fmt.Errorf("step %d has incorrect hop distance: expected %d, got %d",
i, i, step.HopDistance)
}
// Validate relationship to next step
if i < len(path)-1 {
nextStep := path[i+1]
@@ -225,7 +225,7 @@ func (dn *decisionNavigatorImpl) ValidateDecisionPath(ctx context.Context, path
}
}
}
return nil
}
@@ -233,16 +233,16 @@ func (dn *decisionNavigatorImpl) ValidateDecisionPath(ctx context.Context, path
func (dn *decisionNavigatorImpl) GetNavigationHistory(ctx context.Context, sessionID string) ([]*DecisionStep, error) {
dn.mu.RLock()
defer dn.mu.RUnlock()
session, exists := dn.navigationSessions[sessionID]
if !exists {
return nil, fmt.Errorf("navigation session %s not found", sessionID)
}
// Return a copy of the history
history := make([]*DecisionStep, len(session.History))
copy(history, session.History)
return history, nil
}
@@ -250,22 +250,22 @@ func (dn *decisionNavigatorImpl) GetNavigationHistory(ctx context.Context, sessi
func (dn *decisionNavigatorImpl) ResetNavigation(ctx context.Context, address ucxl.Address) error {
dn.mu.Lock()
defer dn.mu.Unlock()
// Clear any navigation sessions for this address
for sessionID, session := range dn.navigationSessions {
for _, session := range dn.navigationSessions {
if session.CurrentPosition.String() == address.String() {
// Reset to latest version
latestNode, err := dn.graph.getLatestNodeUnsafe(address)
if err != nil {
return fmt.Errorf("failed to get latest node: %w", err)
}
session.CurrentPosition = address
session.History = []*DecisionStep{}
session.LastActivity = time.Now()
}
}
return nil
}
@@ -273,13 +273,13 @@ func (dn *decisionNavigatorImpl) ResetNavigation(ctx context.Context, address uc
func (dn *decisionNavigatorImpl) BookmarkDecision(ctx context.Context, address ucxl.Address, hop int, name string) error {
dn.mu.Lock()
defer dn.mu.Unlock()
// Validate the decision point exists
node, err := dn.graph.GetVersionAtDecision(ctx, address, hop)
if err != nil {
return fmt.Errorf("decision point not found: %w", err)
}
// Create bookmark
bookmarkID := fmt.Sprintf("%s-%d-%d", address.String(), hop, time.Now().Unix())
bookmark := &DecisionBookmark{
@@ -293,14 +293,14 @@ func (dn *decisionNavigatorImpl) BookmarkDecision(ctx context.Context, address u
Tags: []string{},
Metadata: make(map[string]interface{}),
}
// Add context information to metadata
bookmark.Metadata["change_reason"] = node.ChangeReason
bookmark.Metadata["decision_id"] = node.DecisionID
bookmark.Metadata["confidence"] = node.Confidence
dn.bookmarks[bookmarkID] = bookmark
return nil
}
@@ -308,17 +308,17 @@ func (dn *decisionNavigatorImpl) BookmarkDecision(ctx context.Context, address u
func (dn *decisionNavigatorImpl) ListBookmarks(ctx context.Context) ([]*DecisionBookmark, error) {
dn.mu.RLock()
defer dn.mu.RUnlock()
bookmarks := make([]*DecisionBookmark, 0, len(dn.bookmarks))
for _, bookmark := range dn.bookmarks {
bookmarks = append(bookmarks, bookmark)
}
// Sort by creation time (newest first)
sort.Slice(bookmarks, func(i, j int) bool {
return bookmarks[i].CreatedAt.After(bookmarks[j].CreatedAt)
})
return bookmarks, nil
}
@@ -342,14 +342,14 @@ func (dn *decisionNavigatorImpl) navigateForward(currentNode *TemporalNode) (*Te
if !exists {
return nil, fmt.Errorf("no nodes found for address")
}
// Find current node in the list and get the next one
for i, node := range nodes {
if node.ID == currentNode.ID && i < len(nodes)-1 {
return nodes[i+1], nil
}
}
return nil, fmt.Errorf("no forward navigation possible")
}
@@ -358,12 +358,12 @@ func (dn *decisionNavigatorImpl) navigateBackward(currentNode *TemporalNode) (*T
if currentNode.ParentNode == nil {
return nil, fmt.Errorf("no backward navigation possible: no parent node")
}
parentNode, exists := dn.graph.nodes[*currentNode.ParentNode]
if !exists {
return nil, fmt.Errorf("parent node not found: %s", *currentNode.ParentNode)
}
return parentNode, nil
}
@@ -387,7 +387,7 @@ func (dn *decisionNavigatorImpl) analyzeTimeline(sequence []*DecisionTimelineEnt
AnalyzedAt: time.Now(),
}
}
// Calculate change velocity
var changeVelocity float64
if len(sequence) > 1 {
@@ -398,27 +398,27 @@ func (dn *decisionNavigatorImpl) analyzeTimeline(sequence []*DecisionTimelineEnt
changeVelocity = float64(len(sequence)-1) / duration.Hours()
}
}
// Analyze confidence trend
confidenceTrend := "stable"
if len(sequence) > 1 {
firstConfidence := sequence[0].ConfidenceEvolution
lastConfidence := sequence[len(sequence)-1].ConfidenceEvolution
diff := lastConfidence - firstConfidence
if diff > 0.1 {
confidenceTrend = "increasing"
} else if diff < -0.1 {
confidenceTrend = "decreasing"
}
}
// Count change reasons
reasonCounts := make(map[ChangeReason]int)
for _, entry := range sequence {
reasonCounts[entry.ChangeReason]++
}
// Find dominant reasons
dominantReasons := make([]ChangeReason, 0)
maxCount := 0
@@ -430,19 +430,19 @@ func (dn *decisionNavigatorImpl) analyzeTimeline(sequence []*DecisionTimelineEnt
dominantReasons = append(dominantReasons, reason)
}
}
// Count decision makers
makerCounts := make(map[string]int)
for _, entry := range sequence {
makerCounts[entry.DecisionMaker]++
}
// Count impact scope distribution
scopeCounts := make(map[ImpactScope]int)
for _, entry := range sequence {
scopeCounts[entry.ImpactScope]++
}
return &TimelineAnalysis{
ChangeVelocity: changeVelocity,
ConfidenceTrend: confidenceTrend,
@@ -456,47 +456,47 @@ func (dn *decisionNavigatorImpl) analyzeTimeline(sequence []*DecisionTimelineEnt
func (dn *decisionNavigatorImpl) getStalenessReasons(node *TemporalNode) []string {
reasons := make([]string, 0)
// Time-based staleness
timeSinceUpdate := time.Since(node.Timestamp)
if timeSinceUpdate > 7*24*time.Hour {
reasons = append(reasons, "not updated in over a week")
}
// Influence-based staleness
if len(node.InfluencedBy) > 0 {
reasons = append(reasons, "influenced by other contexts that may have changed")
}
// Confidence-based staleness
if node.Confidence < 0.7 {
reasons = append(reasons, "low confidence score")
}
return reasons
}
func (dn *decisionNavigatorImpl) getSuggestedActions(node *TemporalNode) []string {
actions := make([]string, 0)
actions = append(actions, "review context for accuracy")
actions = append(actions, "check related decisions for impact")
if node.Confidence < 0.7 {
actions = append(actions, "improve context confidence through additional analysis")
}
if len(node.InfluencedBy) > 3 {
actions = append(actions, "validate dependencies are still accurate")
}
return actions
}
func (dn *decisionNavigatorImpl) getRelatedChanges(node *TemporalNode) []ucxl.Address {
// Find contexts that have changed recently and might affect this one
relatedChanges := make([]ucxl.Address, 0)
cutoff := time.Now().Add(-24 * time.Hour)
for _, otherNode := range dn.graph.nodes {
if otherNode.Timestamp.After(cutoff) && otherNode.ID != node.ID {
@@ -509,18 +509,18 @@ func (dn *decisionNavigatorImpl) getRelatedChanges(node *TemporalNode) []ucxl.Ad
}
}
}
return relatedChanges
}
func (dn *decisionNavigatorImpl) calculateStalePriority(node *TemporalNode) StalePriority {
score := node.Staleness
// Adjust based on influence
if len(node.Influences) > 5 {
score += 0.2 // Higher priority if it influences many others
}
// Adjust based on impact scope
switch node.ImpactScope {
case ImpactSystem:
@@ -530,7 +530,7 @@ func (dn *decisionNavigatorImpl) calculateStalePriority(node *TemporalNode) Stal
case ImpactModule:
score += 0.1
}
if score >= 0.9 {
return PriorityCritical
} else if score >= 0.7 {
@@ -545,7 +545,7 @@ func (dn *decisionNavigatorImpl) validateStepRelationship(step, nextStep *Decisi
// Check if there's a valid relationship between the steps
currentNodeID := step.TemporalNode.ID
nextNodeID := nextStep.TemporalNode.ID
switch step.Relationship {
case "influences":
if influences, exists := dn.graph.influences[currentNodeID]; exists {
@@ -564,6 +564,6 @@ func (dn *decisionNavigatorImpl) validateStepRelationship(step, nextStep *Decisi
}
}
}
return false
}
}

View File

@@ -7,93 +7,93 @@ import (
"sync"
"time"
"chorus/pkg/ucxl"
"chorus/pkg/slurp/storage"
"chorus/pkg/ucxl"
)
// persistenceManagerImpl handles persistence and synchronization of temporal graph data
type persistenceManagerImpl struct {
mu sync.RWMutex
// Storage interfaces
contextStore storage.ContextStore
localStorage storage.LocalStorage
distributedStore storage.DistributedStorage
encryptedStore storage.EncryptedStorage
backupManager storage.BackupManager
// Reference to temporal graph
graph *temporalGraphImpl
// Persistence configuration
config *PersistenceConfig
// Synchronization state
lastSyncTime time.Time
syncInProgress bool
pendingChanges map[string]*PendingChange
conflictResolver ConflictResolver
// Performance optimization
batchSize int
writeBuffer []*TemporalNode
bufferMutex sync.Mutex
flushInterval time.Duration
lastFlush time.Time
batchSize int
writeBuffer []*TemporalNode
bufferMutex sync.Mutex
flushInterval time.Duration
lastFlush time.Time
}
// PersistenceConfig represents configuration for temporal graph persistence
type PersistenceConfig struct {
// Storage settings
EnableLocalStorage bool `json:"enable_local_storage"`
EnableDistributedStorage bool `json:"enable_distributed_storage"`
EnableEncryption bool `json:"enable_encryption"`
EncryptionRoles []string `json:"encryption_roles"`
EnableLocalStorage bool `json:"enable_local_storage"`
EnableDistributedStorage bool `json:"enable_distributed_storage"`
EnableEncryption bool `json:"enable_encryption"`
EncryptionRoles []string `json:"encryption_roles"`
// Synchronization settings
SyncInterval time.Duration `json:"sync_interval"`
ConflictResolutionStrategy string `json:"conflict_resolution_strategy"`
EnableAutoSync bool `json:"enable_auto_sync"`
MaxSyncRetries int `json:"max_sync_retries"`
SyncInterval time.Duration `json:"sync_interval"`
ConflictResolutionStrategy string `json:"conflict_resolution_strategy"`
EnableAutoSync bool `json:"enable_auto_sync"`
MaxSyncRetries int `json:"max_sync_retries"`
// Performance settings
BatchSize int `json:"batch_size"`
FlushInterval time.Duration `json:"flush_interval"`
EnableWriteBuffer bool `json:"enable_write_buffer"`
BatchSize int `json:"batch_size"`
FlushInterval time.Duration `json:"flush_interval"`
EnableWriteBuffer bool `json:"enable_write_buffer"`
// Backup settings
EnableAutoBackup bool `json:"enable_auto_backup"`
BackupInterval time.Duration `json:"backup_interval"`
RetainBackupCount int `json:"retain_backup_count"`
EnableAutoBackup bool `json:"enable_auto_backup"`
BackupInterval time.Duration `json:"backup_interval"`
RetainBackupCount int `json:"retain_backup_count"`
// Storage keys and patterns
KeyPrefix string `json:"key_prefix"`
NodeKeyPattern string `json:"node_key_pattern"`
GraphKeyPattern string `json:"graph_key_pattern"`
MetadataKeyPattern string `json:"metadata_key_pattern"`
KeyPrefix string `json:"key_prefix"`
NodeKeyPattern string `json:"node_key_pattern"`
GraphKeyPattern string `json:"graph_key_pattern"`
MetadataKeyPattern string `json:"metadata_key_pattern"`
}
// PendingChange represents a change waiting to be synchronized
type PendingChange struct {
ID string `json:"id"`
Type ChangeType `json:"type"`
NodeID string `json:"node_id"`
Data interface{} `json:"data"`
Timestamp time.Time `json:"timestamp"`
Retries int `json:"retries"`
LastError string `json:"last_error"`
Metadata map[string]interface{} `json:"metadata"`
ID string `json:"id"`
Type ChangeType `json:"type"`
NodeID string `json:"node_id"`
Data interface{} `json:"data"`
Timestamp time.Time `json:"timestamp"`
Retries int `json:"retries"`
LastError string `json:"last_error"`
Metadata map[string]interface{} `json:"metadata"`
}
// ChangeType represents the type of change to be synchronized
type ChangeType string
const (
ChangeTypeNodeCreated ChangeType = "node_created"
ChangeTypeNodeUpdated ChangeType = "node_updated"
ChangeTypeNodeDeleted ChangeType = "node_deleted"
ChangeTypeGraphUpdated ChangeType = "graph_updated"
ChangeTypeInfluenceAdded ChangeType = "influence_added"
ChangeTypeNodeCreated ChangeType = "node_created"
ChangeTypeNodeUpdated ChangeType = "node_updated"
ChangeTypeNodeDeleted ChangeType = "node_deleted"
ChangeTypeGraphUpdated ChangeType = "graph_updated"
ChangeTypeInfluenceAdded ChangeType = "influence_added"
ChangeTypeInfluenceRemoved ChangeType = "influence_removed"
)
@@ -105,39 +105,39 @@ type ConflictResolver interface {
// GraphSnapshot represents a snapshot of the temporal graph for synchronization
type GraphSnapshot struct {
Timestamp time.Time `json:"timestamp"`
Nodes map[string]*TemporalNode `json:"nodes"`
Influences map[string][]string `json:"influences"`
InfluencedBy map[string][]string `json:"influenced_by"`
Decisions map[string]*DecisionMetadata `json:"decisions"`
Metadata *GraphMetadata `json:"metadata"`
Checksum string `json:"checksum"`
Timestamp time.Time `json:"timestamp"`
Nodes map[string]*TemporalNode `json:"nodes"`
Influences map[string][]string `json:"influences"`
InfluencedBy map[string][]string `json:"influenced_by"`
Decisions map[string]*DecisionMetadata `json:"decisions"`
Metadata *GraphMetadata `json:"metadata"`
Checksum string `json:"checksum"`
}
// GraphMetadata represents metadata about the temporal graph
type GraphMetadata struct {
Version int `json:"version"`
LastModified time.Time `json:"last_modified"`
NodeCount int `json:"node_count"`
EdgeCount int `json:"edge_count"`
DecisionCount int `json:"decision_count"`
CreatedBy string `json:"created_by"`
CreatedAt time.Time `json:"created_at"`
Version int `json:"version"`
LastModified time.Time `json:"last_modified"`
NodeCount int `json:"node_count"`
EdgeCount int `json:"edge_count"`
DecisionCount int `json:"decision_count"`
CreatedBy string `json:"created_by"`
CreatedAt time.Time `json:"created_at"`
}
// SyncResult represents the result of a synchronization operation
type SyncResult struct {
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
Duration time.Duration `json:"duration"`
NodesProcessed int `json:"nodes_processed"`
NodesCreated int `json:"nodes_created"`
NodesUpdated int `json:"nodes_updated"`
NodesDeleted int `json:"nodes_deleted"`
ConflictsFound int `json:"conflicts_found"`
ConflictsResolved int `json:"conflicts_resolved"`
Errors []string `json:"errors"`
Success bool `json:"success"`
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
Duration time.Duration `json:"duration"`
NodesProcessed int `json:"nodes_processed"`
NodesCreated int `json:"nodes_created"`
NodesUpdated int `json:"nodes_updated"`
NodesDeleted int `json:"nodes_deleted"`
ConflictsFound int `json:"conflicts_found"`
ConflictsResolved int `json:"conflicts_resolved"`
Errors []string `json:"errors"`
Success bool `json:"success"`
}
// NewPersistenceManager creates a new persistence manager
@@ -150,7 +150,7 @@ func NewPersistenceManager(
graph *temporalGraphImpl,
config *PersistenceConfig,
) *persistenceManagerImpl {
pm := &persistenceManagerImpl{
contextStore: contextStore,
localStorage: localStorage,
@@ -165,20 +165,20 @@ func NewPersistenceManager(
writeBuffer: make([]*TemporalNode, 0, config.BatchSize),
flushInterval: config.FlushInterval,
}
// Start background processes
if config.EnableAutoSync {
go pm.syncWorker()
}
if config.EnableWriteBuffer {
go pm.flushWorker()
}
if config.EnableAutoBackup {
go pm.backupWorker()
}
return pm
}
@@ -186,12 +186,12 @@ func NewPersistenceManager(
func (pm *persistenceManagerImpl) PersistTemporalNode(ctx context.Context, node *TemporalNode) error {
pm.mu.Lock()
defer pm.mu.Unlock()
// Add to write buffer if enabled
if pm.config.EnableWriteBuffer {
return pm.addToWriteBuffer(node)
}
// Direct persistence
return pm.persistNodeDirect(ctx, node)
}
@@ -200,20 +200,20 @@ func (pm *persistenceManagerImpl) PersistTemporalNode(ctx context.Context, node
func (pm *persistenceManagerImpl) LoadTemporalGraph(ctx context.Context) error {
pm.mu.Lock()
defer pm.mu.Unlock()
// Load from different storage layers
if pm.config.EnableLocalStorage {
if err := pm.loadFromLocalStorage(ctx); err != nil {
return fmt.Errorf("failed to load from local storage: %w", err)
}
}
if pm.config.EnableDistributedStorage {
if err := pm.loadFromDistributedStorage(ctx); err != nil {
return fmt.Errorf("failed to load from distributed storage: %w", err)
}
}
return nil
}
@@ -226,19 +226,19 @@ func (pm *persistenceManagerImpl) SynchronizeGraph(ctx context.Context) (*SyncRe
}
pm.syncInProgress = true
pm.mu.Unlock()
defer func() {
pm.mu.Lock()
pm.syncInProgress = false
pm.lastSyncTime = time.Now()
pm.mu.Unlock()
}()
result := &SyncResult{
StartTime: time.Now(),
Errors: make([]string, 0),
}
// Create local snapshot
localSnapshot, err := pm.createGraphSnapshot()
if err != nil {
@@ -246,31 +246,31 @@ func (pm *persistenceManagerImpl) SynchronizeGraph(ctx context.Context) (*SyncRe
result.Success = false
return result, err
}
// Get remote snapshot
remoteSnapshot, err := pm.getRemoteSnapshot(ctx)
if err != nil {
// Remote might not exist yet, continue with local
remoteSnapshot = nil
}
// Perform synchronization
if remoteSnapshot != nil {
err = pm.performBidirectionalSync(ctx, localSnapshot, remoteSnapshot, result)
} else {
err = pm.performInitialSync(ctx, localSnapshot, result)
}
if err != nil {
result.Errors = append(result.Errors, fmt.Sprintf("sync failed: %v", err))
result.Success = false
} else {
result.Success = true
}
result.EndTime = time.Now()
result.Duration = result.EndTime.Sub(result.StartTime)
return result, err
}
@@ -278,35 +278,27 @@ func (pm *persistenceManagerImpl) SynchronizeGraph(ctx context.Context) (*SyncRe
func (pm *persistenceManagerImpl) BackupGraph(ctx context.Context) error {
pm.mu.RLock()
defer pm.mu.RUnlock()
if !pm.config.EnableAutoBackup {
return fmt.Errorf("backup not enabled")
}
// Create graph snapshot
snapshot, err := pm.createGraphSnapshot()
if err != nil {
return fmt.Errorf("failed to create snapshot: %w", err)
}
// Serialize snapshot
data, err := json.Marshal(snapshot)
if err != nil {
return fmt.Errorf("failed to serialize snapshot: %w", err)
}
// Create backup configuration
backupConfig := &storage.BackupConfig{
Type: "temporal_graph",
Description: "Temporal graph backup",
Tags: []string{"temporal", "graph", "decision"},
Name: "temporal_graph",
Metadata: map[string]interface{}{
"node_count": snapshot.Metadata.NodeCount,
"edge_count": snapshot.Metadata.EdgeCount,
"decision_count": snapshot.Metadata.DecisionCount,
},
}
// Create backup
_, err = pm.backupManager.CreateBackup(ctx, backupConfig)
return err
@@ -316,19 +308,19 @@ func (pm *persistenceManagerImpl) BackupGraph(ctx context.Context) error {
func (pm *persistenceManagerImpl) RestoreGraph(ctx context.Context, backupID string) error {
pm.mu.Lock()
defer pm.mu.Unlock()
// Create restore configuration
restoreConfig := &storage.RestoreConfig{
OverwriteExisting: true,
ValidateIntegrity: true,
}
// Restore from backup
err := pm.backupManager.RestoreBackup(ctx, backupID, restoreConfig)
if err != nil {
return fmt.Errorf("failed to restore backup: %w", err)
}
// Reload graph from storage
return pm.LoadTemporalGraph(ctx)
}
@@ -338,14 +330,14 @@ func (pm *persistenceManagerImpl) RestoreGraph(ctx context.Context, backupID str
func (pm *persistenceManagerImpl) addToWriteBuffer(node *TemporalNode) error {
pm.bufferMutex.Lock()
defer pm.bufferMutex.Unlock()
pm.writeBuffer = append(pm.writeBuffer, node)
// Check if buffer is full
if len(pm.writeBuffer) >= pm.batchSize {
return pm.flushWriteBuffer()
}
return nil
}
@@ -353,59 +345,57 @@ func (pm *persistenceManagerImpl) flushWriteBuffer() error {
if len(pm.writeBuffer) == 0 {
return nil
}
// Create batch store request
batch := &storage.BatchStoreRequest{
Operations: make([]*storage.BatchStoreOperation, len(pm.writeBuffer)),
Contexts: make([]*storage.ContextStoreItem, len(pm.writeBuffer)),
Roles: pm.config.EncryptionRoles,
FailOnError: true,
}
for i, node := range pm.writeBuffer {
key := pm.generateNodeKey(node)
batch.Operations[i] = &storage.BatchStoreOperation{
Type: "store",
Key: key,
Data: node,
Roles: pm.config.EncryptionRoles,
batch.Contexts[i] = &storage.ContextStoreItem{
Context: node,
Roles: pm.config.EncryptionRoles,
}
}
// Execute batch store
ctx := context.Background()
_, err := pm.contextStore.BatchStore(ctx, batch)
if err != nil {
return fmt.Errorf("failed to flush write buffer: %w", err)
}
// Clear buffer
pm.writeBuffer = pm.writeBuffer[:0]
pm.lastFlush = time.Now()
return nil
}
func (pm *persistenceManagerImpl) persistNodeDirect(ctx context.Context, node *TemporalNode) error {
key := pm.generateNodeKey(node)
// Store in different layers
if pm.config.EnableLocalStorage {
if err := pm.localStorage.Store(ctx, key, node, nil); err != nil {
return fmt.Errorf("failed to store in local storage: %w", err)
}
}
if pm.config.EnableDistributedStorage {
if err := pm.distributedStore.Store(ctx, key, node, nil); err != nil {
return fmt.Errorf("failed to store in distributed storage: %w", err)
}
}
if pm.config.EnableEncryption {
if err := pm.encryptedStore.StoreEncrypted(ctx, key, node, pm.config.EncryptionRoles); err != nil {
return fmt.Errorf("failed to store encrypted: %w", err)
}
}
// Add to pending changes for synchronization
change := &PendingChange{
ID: fmt.Sprintf("%s-%d", node.ID, time.Now().UnixNano()),
@@ -415,9 +405,9 @@ func (pm *persistenceManagerImpl) persistNodeDirect(ctx context.Context, node *T
Timestamp: time.Now(),
Metadata: make(map[string]interface{}),
}
pm.pendingChanges[change.ID] = change
return nil
}
@@ -428,51 +418,51 @@ func (pm *persistenceManagerImpl) loadFromLocalStorage(ctx context.Context) erro
if err != nil {
return fmt.Errorf("failed to load metadata: %w", err)
}
var metadata *GraphMetadata
if err := json.Unmarshal(metadataData.([]byte), &metadata); err != nil {
return fmt.Errorf("failed to unmarshal metadata: %w", err)
}
// Load all nodes
pattern := pm.generateNodeKeyPattern()
nodeKeys, err := pm.localStorage.List(ctx, pattern)
if err != nil {
return fmt.Errorf("failed to list nodes: %w", err)
}
// Load nodes in batches
batchReq := &storage.BatchRetrieveRequest{
Keys: nodeKeys,
}
batchResult, err := pm.contextStore.BatchRetrieve(ctx, batchReq)
if err != nil {
return fmt.Errorf("failed to batch retrieve nodes: %w", err)
}
// Reconstruct graph
pm.graph.mu.Lock()
defer pm.graph.mu.Unlock()
pm.graph.nodes = make(map[string]*TemporalNode)
pm.graph.addressToNodes = make(map[string][]*TemporalNode)
pm.graph.influences = make(map[string][]string)
pm.graph.influencedBy = make(map[string][]string)
for key, result := range batchResult.Results {
if result.Error != nil {
continue // Skip failed retrievals
}
var node *TemporalNode
if err := json.Unmarshal(result.Data.([]byte), &node); err != nil {
continue // Skip invalid nodes
}
pm.reconstructGraphNode(node)
}
return nil
}
@@ -485,7 +475,7 @@ func (pm *persistenceManagerImpl) loadFromDistributedStorage(ctx context.Context
func (pm *persistenceManagerImpl) createGraphSnapshot() (*GraphSnapshot, error) {
pm.graph.mu.RLock()
defer pm.graph.mu.RUnlock()
snapshot := &GraphSnapshot{
Timestamp: time.Now(),
Nodes: make(map[string]*TemporalNode),
@@ -502,48 +492,48 @@ func (pm *persistenceManagerImpl) createGraphSnapshot() (*GraphSnapshot, error)
CreatedAt: time.Now(),
},
}
// Copy nodes
for id, node := range pm.graph.nodes {
snapshot.Nodes[id] = node
}
// Copy influences
for id, influences := range pm.graph.influences {
snapshot.Influences[id] = make([]string, len(influences))
copy(snapshot.Influences[id], influences)
}
// Copy influenced by
for id, influencedBy := range pm.graph.influencedBy {
snapshot.InfluencedBy[id] = make([]string, len(influencedBy))
copy(snapshot.InfluencedBy[id], influencedBy)
}
// Copy decisions
for id, decision := range pm.graph.decisions {
snapshot.Decisions[id] = decision
}
// Calculate checksum
snapshot.Checksum = pm.calculateSnapshotChecksum(snapshot)
return snapshot, nil
}
func (pm *persistenceManagerImpl) getRemoteSnapshot(ctx context.Context) (*GraphSnapshot, error) {
key := pm.generateGraphKey()
data, err := pm.distributedStore.Retrieve(ctx, key)
if err != nil {
return nil, err
}
var snapshot *GraphSnapshot
if err := json.Unmarshal(data.([]byte), &snapshot); err != nil {
return nil, fmt.Errorf("failed to unmarshal remote snapshot: %w", err)
}
return snapshot, nil
}
@@ -551,7 +541,7 @@ func (pm *persistenceManagerImpl) performBidirectionalSync(ctx context.Context,
// Compare snapshots and identify differences
conflicts := pm.identifyConflicts(local, remote)
result.ConflictsFound = len(conflicts)
// Resolve conflicts
for _, conflict := range conflicts {
resolved, err := pm.resolveConflict(ctx, conflict)
@@ -559,48 +549,48 @@ func (pm *persistenceManagerImpl) performBidirectionalSync(ctx context.Context,
result.Errors = append(result.Errors, fmt.Sprintf("failed to resolve conflict %s: %v", conflict.NodeID, err))
continue
}
// Apply resolution
if err := pm.applyConflictResolution(ctx, resolved); err != nil {
result.Errors = append(result.Errors, fmt.Sprintf("failed to apply resolution for %s: %v", conflict.NodeID, err))
continue
}
result.ConflictsResolved++
}
// Sync local changes to remote
err := pm.syncLocalToRemote(ctx, local, remote, result)
if err != nil {
return fmt.Errorf("failed to sync local to remote: %w", err)
}
// Sync remote changes to local
err = pm.syncRemoteToLocal(ctx, remote, local, result)
if err != nil {
return fmt.Errorf("failed to sync remote to local: %w", err)
}
return nil
}
func (pm *persistenceManagerImpl) performInitialSync(ctx context.Context, local *GraphSnapshot, result *SyncResult) error {
// Store entire local snapshot to remote
key := pm.generateGraphKey()
data, err := json.Marshal(local)
if err != nil {
return fmt.Errorf("failed to marshal snapshot: %w", err)
}
err = pm.distributedStore.Store(ctx, key, data, nil)
if err != nil {
return fmt.Errorf("failed to store snapshot: %w", err)
}
result.NodesProcessed = len(local.Nodes)
result.NodesCreated = len(local.Nodes)
return nil
}
@@ -609,7 +599,7 @@ func (pm *persistenceManagerImpl) performInitialSync(ctx context.Context, local
func (pm *persistenceManagerImpl) syncWorker() {
ticker := time.NewTicker(pm.config.SyncInterval)
defer ticker.Stop()
for range ticker.C {
ctx := context.Background()
if _, err := pm.SynchronizeGraph(ctx); err != nil {
@@ -622,7 +612,7 @@ func (pm *persistenceManagerImpl) syncWorker() {
func (pm *persistenceManagerImpl) flushWorker() {
ticker := time.NewTicker(pm.flushInterval)
defer ticker.Stop()
for range ticker.C {
pm.bufferMutex.Lock()
if time.Since(pm.lastFlush) >= pm.flushInterval && len(pm.writeBuffer) > 0 {
@@ -637,7 +627,7 @@ func (pm *persistenceManagerImpl) flushWorker() {
func (pm *persistenceManagerImpl) backupWorker() {
ticker := time.NewTicker(pm.config.BackupInterval)
defer ticker.Stop()
for range ticker.C {
ctx := context.Background()
if err := pm.BackupGraph(ctx); err != nil {
@@ -681,7 +671,7 @@ func (pm *persistenceManagerImpl) calculateSnapshotChecksum(snapshot *GraphSnaps
func (pm *persistenceManagerImpl) reconstructGraphNode(node *TemporalNode) {
// Add node to graph
pm.graph.nodes[node.ID] = node
// Update address mapping
addressKey := node.UCXLAddress.String()
if existing, exists := pm.graph.addressToNodes[addressKey]; exists {
@@ -689,17 +679,17 @@ func (pm *persistenceManagerImpl) reconstructGraphNode(node *TemporalNode) {
} else {
pm.graph.addressToNodes[addressKey] = []*TemporalNode{node}
}
// Reconstruct influence relationships
pm.graph.influences[node.ID] = make([]string, 0)
pm.graph.influencedBy[node.ID] = make([]string, 0)
// These would be rebuilt from the influence data in the snapshot
}
func (pm *persistenceManagerImpl) identifyConflicts(local, remote *GraphSnapshot) []*SyncConflict {
conflicts := make([]*SyncConflict, 0)
// Compare nodes
for nodeID, localNode := range local.Nodes {
if remoteNode, exists := remote.Nodes[nodeID]; exists {
@@ -714,7 +704,7 @@ func (pm *persistenceManagerImpl) identifyConflicts(local, remote *GraphSnapshot
}
}
}
return conflicts
}
@@ -727,28 +717,28 @@ func (pm *persistenceManagerImpl) resolveConflict(ctx context.Context, conflict
// Use conflict resolver to resolve the conflict
localNode := conflict.LocalData.(*TemporalNode)
remoteNode := conflict.RemoteData.(*TemporalNode)
resolvedNode, err := pm.conflictResolver.ResolveConflict(ctx, localNode, remoteNode)
if err != nil {
return nil, err
}
return &ConflictResolution{
ConflictID: conflict.NodeID,
Resolution: "merged",
ResolvedData: resolvedNode,
ResolvedAt: time.Now(),
ConflictID: conflict.NodeID,
Resolution: "merged",
ResolvedData: resolvedNode,
ResolvedAt: time.Now(),
}, nil
}
func (pm *persistenceManagerImpl) applyConflictResolution(ctx context.Context, resolution *ConflictResolution) error {
// Apply the resolved node back to the graph
resolvedNode := resolution.ResolvedData.(*TemporalNode)
pm.graph.mu.Lock()
pm.graph.nodes[resolvedNode.ID] = resolvedNode
pm.graph.mu.Unlock()
// Persist the resolved node
return pm.persistNodeDirect(ctx, resolvedNode)
}
@@ -757,7 +747,7 @@ func (pm *persistenceManagerImpl) syncLocalToRemote(ctx context.Context, local,
// Sync nodes that exist locally but not remotely, or are newer locally
for nodeID, localNode := range local.Nodes {
shouldSync := false
if remoteNode, exists := remote.Nodes[nodeID]; exists {
// Check if local is newer
if localNode.Timestamp.After(remoteNode.Timestamp) {
@@ -768,7 +758,7 @@ func (pm *persistenceManagerImpl) syncLocalToRemote(ctx context.Context, local,
shouldSync = true
result.NodesCreated++
}
if shouldSync {
key := pm.generateNodeKey(localNode)
data, err := json.Marshal(localNode)
@@ -776,19 +766,19 @@ func (pm *persistenceManagerImpl) syncLocalToRemote(ctx context.Context, local,
result.Errors = append(result.Errors, fmt.Sprintf("failed to marshal node %s: %v", nodeID, err))
continue
}
err = pm.distributedStore.Store(ctx, key, data, nil)
if err != nil {
result.Errors = append(result.Errors, fmt.Sprintf("failed to sync node %s to remote: %v", nodeID, err))
continue
}
if remoteNode, exists := remote.Nodes[nodeID]; exists && localNode.Timestamp.After(remoteNode.Timestamp) {
result.NodesUpdated++
}
}
}
return nil
}
@@ -796,7 +786,7 @@ func (pm *persistenceManagerImpl) syncRemoteToLocal(ctx context.Context, remote,
// Sync nodes that exist remotely but not locally, or are newer remotely
for nodeID, remoteNode := range remote.Nodes {
shouldSync := false
if localNode, exists := local.Nodes[nodeID]; exists {
// Check if remote is newer
if remoteNode.Timestamp.After(localNode.Timestamp) {
@@ -807,55 +797,41 @@ func (pm *persistenceManagerImpl) syncRemoteToLocal(ctx context.Context, remote,
shouldSync = true
result.NodesCreated++
}
if shouldSync {
// Add to local graph
pm.graph.mu.Lock()
pm.graph.nodes[remoteNode.ID] = remoteNode
pm.reconstructGraphNode(remoteNode)
pm.graph.mu.Unlock()
// Persist locally
err := pm.persistNodeDirect(ctx, remoteNode)
if err != nil {
result.Errors = append(result.Errors, fmt.Sprintf("failed to sync node %s to local: %v", nodeID, err))
continue
}
if localNode, exists := local.Nodes[nodeID]; exists && remoteNode.Timestamp.After(localNode.Timestamp) {
result.NodesUpdated++
}
}
}
return nil
}
// Supporting types for conflict resolution
type SyncConflict struct {
Type ConflictType `json:"type"`
NodeID string `json:"node_id"`
LocalData interface{} `json:"local_data"`
RemoteData interface{} `json:"remote_data"`
Severity string `json:"severity"`
Type ConflictType `json:"type"`
NodeID string `json:"node_id"`
LocalData interface{} `json:"local_data"`
RemoteData interface{} `json:"remote_data"`
Severity string `json:"severity"`
}
type ConflictType string
const (
ConflictTypeNodeMismatch ConflictType = "node_mismatch"
ConflictTypeInfluenceMismatch ConflictType = "influence_mismatch"
ConflictTypeMetadataMismatch ConflictType = "metadata_mismatch"
)
type ConflictResolution struct {
ConflictID string `json:"conflict_id"`
Resolution string `json:"resolution"`
ResolvedData interface{} `json:"resolved_data"`
ResolvedAt time.Time `json:"resolved_at"`
ResolvedBy string `json:"resolved_by"`
}
// Default conflict resolver implementation
// Default conflict resolver implementation
@@ -886,4 +862,4 @@ func (dcr *defaultConflictResolver) ResolveGraphConflict(ctx context.Context, lo
return localGraph, nil
}
return remoteGraph, nil
}
}