feat: wire context store scaffolding and dht test skeleton
This commit is contained in:
@@ -52,3 +52,11 @@ WHOOSH currently loads a curated library of role prompts at startup. These promp
|
||||
- Existing prompt catalog: `project-queues/active/WHOOSH/prompts/`
|
||||
- Temporal wiring roadmap: `project-queues/active/CHORUS/docs/development/sec-slurp-ucxl-beacon-pin-steward.md`
|
||||
- Prior policy discussions (for context): `project-queues/active/CHORUS/docs/progress/report-SEC-SLURP-1.1.md`
|
||||
|
||||
## Integration Plan
|
||||
|
||||
1. **Mapper Service Stub** — add a `policy.NewPromptDerivedMapper` module under `pkg/whoosh/policy` that consumes the runtime prompt bundle, emits the JSON/YAML policy envelope, and persists it via SLURP's context store (tagged under `whoosh:policy`).
|
||||
2. **SLURP Startup Hook** — extend `pkg/slurp/slurp.go` to request the mapper output during initialisation; cache parsed ACLs and expose them to the temporal persistence manager and SHHH envelope writer.
|
||||
3. **SHHH Enforcement** — update `pkg/crypto/role_crypto_stub.go` (and the eventual production implementation) to honour the generated ACL templates when issuing wrapped keys or verifying access.
|
||||
4. **WHOOSH Overrides UI** — surface the optional override editor in WHOOSH UI, writing deltas back to UCXL as described in this brief; ensure SLURP refreshes policies on UCXL change events.
|
||||
5. **Testing** — create end-to-end tests that mutate prompt definitions, run the mapper, and assert the resulting policies gate SLURP context retrieval and DHT envelope sealing correctly.
|
||||
|
||||
@@ -64,6 +64,7 @@ type SLURP struct {
|
||||
dht dht.DHT
|
||||
crypto *crypto.AgeCrypto
|
||||
election *election.ElectionManager
|
||||
nodeID string
|
||||
|
||||
// Roadmap: SEC-SLURP 1.1 persistent storage wiring
|
||||
storagePath string
|
||||
@@ -85,9 +86,15 @@ type SLURP struct {
|
||||
currentAdmin string
|
||||
|
||||
// SEC-SLURP 1.1: lightweight in-memory context persistence
|
||||
contextsMu sync.RWMutex
|
||||
contextStore map[string]*slurpContext.ContextNode
|
||||
resolvedCache map[string]*slurpContext.ResolvedContext
|
||||
contextsMu sync.RWMutex
|
||||
contextCache map[string]*slurpContext.ContextNode
|
||||
resolvedCache map[string]*slurpContext.ResolvedContext
|
||||
contextBackend storage.ContextStore
|
||||
distributedStorage storage.DistributedStorage
|
||||
cacheManager storage.CacheManager
|
||||
indexManager storage.IndexManager
|
||||
backupManager storage.BackupManager
|
||||
eventNotifier storage.EventNotifier
|
||||
|
||||
// Background processing
|
||||
ctx context.Context
|
||||
@@ -382,16 +389,22 @@ func NewSLURP(
|
||||
|
||||
storagePath := defaultStoragePath(config)
|
||||
|
||||
nodeID := config.Agent.ID
|
||||
if nodeID == "" {
|
||||
nodeID = fmt.Sprintf("slurp-node-%d", time.Now().UnixNano())
|
||||
}
|
||||
|
||||
slurp := &SLURP{
|
||||
config: config,
|
||||
dht: dhtInstance,
|
||||
crypto: cryptoInstance,
|
||||
election: electionManager,
|
||||
nodeID: nodeID,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
metrics: &SLURPMetrics{LastUpdated: time.Now()},
|
||||
eventHandlers: make(map[EventType][]EventHandler),
|
||||
contextStore: make(map[string]*slurpContext.ContextNode),
|
||||
contextCache: make(map[string]*slurpContext.ContextNode),
|
||||
resolvedCache: make(map[string]*slurpContext.ResolvedContext),
|
||||
storagePath: storagePath,
|
||||
}
|
||||
@@ -443,8 +456,8 @@ func (s *SLURP) Initialize(ctx context.Context) error {
|
||||
|
||||
// Initialize in-memory persistence (SEC-SLURP 1.1 bootstrap)
|
||||
s.contextsMu.Lock()
|
||||
if s.contextStore == nil {
|
||||
s.contextStore = make(map[string]*slurpContext.ContextNode)
|
||||
if s.contextCache == nil {
|
||||
s.contextCache = make(map[string]*slurpContext.ContextNode)
|
||||
}
|
||||
if s.resolvedCache == nil {
|
||||
s.resolvedCache = make(map[string]*slurpContext.ResolvedContext)
|
||||
@@ -561,7 +574,7 @@ func (s *SLURP) Resolve(ctx context.Context, ucxlAddress string) (*ResolvedConte
|
||||
|
||||
built := buildResolvedContext(node)
|
||||
s.contextsMu.Lock()
|
||||
s.contextStore[key] = node
|
||||
s.contextCache[key] = node
|
||||
s.resolvedCache[key] = built
|
||||
s.contextsMu.Unlock()
|
||||
|
||||
@@ -726,7 +739,7 @@ func (s *SLURP) UpsertContext(ctx context.Context, node *slurpContext.ContextNod
|
||||
key := clone.UCXLAddress.String()
|
||||
|
||||
s.contextsMu.Lock()
|
||||
s.contextStore[key] = clone
|
||||
s.contextCache[key] = clone
|
||||
s.resolvedCache[key] = resolved
|
||||
s.contextsMu.Unlock()
|
||||
|
||||
@@ -1136,7 +1149,7 @@ func (s *SLURP) getContextNode(key string) *slurpContext.ContextNode {
|
||||
s.contextsMu.RLock()
|
||||
defer s.contextsMu.RUnlock()
|
||||
|
||||
if node, ok := s.contextStore[key]; ok {
|
||||
if node, ok := s.contextCache[key]; ok {
|
||||
return node
|
||||
}
|
||||
return nil
|
||||
@@ -1186,6 +1199,59 @@ func (s *SLURP) setupPersistentStorage() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// initializeContextStore constructs the multi-tier context store facade.
|
||||
func (s *SLURP) initializeContextStore(ctx context.Context) error {
|
||||
if s.contextBackend != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if s.localStorage == nil {
|
||||
return fmt.Errorf("context store requires local storage")
|
||||
}
|
||||
|
||||
if s.cacheManager == nil {
|
||||
s.cacheManager = storage.NewNoopCacheManager()
|
||||
}
|
||||
if s.indexManager == nil {
|
||||
s.indexManager = storage.NewNoopIndexManager()
|
||||
}
|
||||
if s.backupManager == nil {
|
||||
s.backupManager = storage.NewNoopBackupManager()
|
||||
}
|
||||
if s.eventNotifier == nil {
|
||||
s.eventNotifier = storage.NewNoopEventNotifier()
|
||||
}
|
||||
|
||||
var distributed storage.DistributedStorage
|
||||
if s.dht != nil {
|
||||
if s.distributedStorage == nil {
|
||||
s.distributedStorage = storage.NewDistributedStorage(s.dht, s.nodeID, nil)
|
||||
}
|
||||
distributed = s.distributedStorage
|
||||
}
|
||||
|
||||
options := storage.DefaultContextStoreOptions()
|
||||
options.CachingEnabled = false
|
||||
options.IndexingEnabled = false
|
||||
options.EncryptionEnabled = false
|
||||
options.AutoReplicate = distributed != nil
|
||||
|
||||
s.contextBackend = storage.NewContextStore(
|
||||
s.nodeID,
|
||||
s.localStorage,
|
||||
distributed,
|
||||
nil,
|
||||
s.cacheManager,
|
||||
s.indexManager,
|
||||
s.backupManager,
|
||||
s.eventNotifier,
|
||||
options,
|
||||
)
|
||||
s.temporalStore = s.contextBackend
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// initializeTemporalSystem wires the temporal graph to the DHT-backed persistence layer.
|
||||
func (s *SLURP) initializeTemporalSystem(ctx context.Context) error {
|
||||
if s.temporalGraph != nil {
|
||||
@@ -1196,8 +1262,8 @@ func (s *SLURP) initializeTemporalSystem(ctx context.Context) error {
|
||||
return fmt.Errorf("temporal persistence requires local storage")
|
||||
}
|
||||
|
||||
if s.temporalStore == nil {
|
||||
s.temporalStore = storage.NewInMemoryContextStore()
|
||||
if err := s.initializeContextStore(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg := temporal.DefaultTemporalConfig()
|
||||
@@ -1285,7 +1351,7 @@ func (s *SLURP) loadPersistedContexts(ctx context.Context) error {
|
||||
|
||||
address := strings.TrimPrefix(key, contextStoragePrefix)
|
||||
nodeClone := node.Clone()
|
||||
s.contextStore[address] = nodeClone
|
||||
s.contextCache[address] = nodeClone
|
||||
s.resolvedCache[address] = buildResolvedContext(nodeClone)
|
||||
loaded++
|
||||
}
|
||||
|
||||
39
pkg/slurp/storage/backup_manager_noop.go
Normal file
39
pkg/slurp/storage/backup_manager_noop.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package storage
|
||||
|
||||
import "context"
|
||||
|
||||
// noopBackupManager provides a BackupManager that performs no operations.
|
||||
type noopBackupManager struct{}
|
||||
|
||||
// NewNoopBackupManager returns a no-op backup manager.
|
||||
func NewNoopBackupManager() BackupManager {
|
||||
return &noopBackupManager{}
|
||||
}
|
||||
|
||||
func (n *noopBackupManager) CreateBackup(ctx context.Context, config *BackupConfig) (*BackupInfo, error) {
|
||||
return &BackupInfo{Status: BackupStatusCompleted}, nil
|
||||
}
|
||||
|
||||
func (n *noopBackupManager) RestoreBackup(ctx context.Context, backupID string, config *RestoreConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noopBackupManager) ListBackups(ctx context.Context) ([]*BackupInfo, error) {
|
||||
return []*BackupInfo{}, nil
|
||||
}
|
||||
|
||||
func (n *noopBackupManager) DeleteBackup(ctx context.Context, backupID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noopBackupManager) ValidateBackup(ctx context.Context, backupID string) (*BackupValidation, error) {
|
||||
return &BackupValidation{BackupID: backupID, Valid: true}, nil
|
||||
}
|
||||
|
||||
func (n *noopBackupManager) ScheduleBackup(ctx context.Context, schedule *BackupSchedule) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noopBackupManager) GetBackupStats(ctx context.Context) (*BackupStatistics, error) {
|
||||
return &BackupStatistics{}, nil
|
||||
}
|
||||
46
pkg/slurp/storage/cache_manager_noop.go
Normal file
46
pkg/slurp/storage/cache_manager_noop.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// noopCacheManager satisfies CacheManager when external cache infrastructure is unavailable.
|
||||
type noopCacheManager struct{}
|
||||
|
||||
// NewNoopCacheManager returns a cache manager that always misses and performs no persistence.
|
||||
func NewNoopCacheManager() CacheManager {
|
||||
return &noopCacheManager{}
|
||||
}
|
||||
|
||||
func (n *noopCacheManager) Get(ctx context.Context, key string) (interface{}, bool, error) {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
func (n *noopCacheManager) Set(ctx context.Context, key string, data interface{}, ttl time.Duration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noopCacheManager) Delete(ctx context.Context, key string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noopCacheManager) DeletePattern(ctx context.Context, pattern string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noopCacheManager) Clear(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noopCacheManager) Warm(ctx context.Context, keys []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noopCacheManager) GetCacheStats() (*CacheStatistics, error) {
|
||||
return &CacheStatistics{}, nil
|
||||
}
|
||||
|
||||
func (n *noopCacheManager) SetCachePolicy(policy *CachePolicy) error {
|
||||
return nil
|
||||
}
|
||||
24
pkg/slurp/storage/event_notifier_noop.go
Normal file
24
pkg/slurp/storage/event_notifier_noop.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package storage
|
||||
|
||||
import "context"
|
||||
|
||||
// noopEventNotifier implements EventNotifier with no side effects.
|
||||
type noopEventNotifier struct{}
|
||||
|
||||
// NewNoopEventNotifier returns a no-op event notifier implementation.
|
||||
func NewNoopEventNotifier() EventNotifier {
|
||||
return &noopEventNotifier{}
|
||||
}
|
||||
|
||||
func (n *noopEventNotifier) NotifyStored(ctx context.Context, event *StorageEvent) error { return nil }
|
||||
func (n *noopEventNotifier) NotifyRetrieved(ctx context.Context, event *StorageEvent) error {
|
||||
return nil
|
||||
}
|
||||
func (n *noopEventNotifier) NotifyUpdated(ctx context.Context, event *StorageEvent) error { return nil }
|
||||
func (n *noopEventNotifier) NotifyDeleted(ctx context.Context, event *StorageEvent) error { return nil }
|
||||
func (n *noopEventNotifier) Subscribe(ctx context.Context, eventType EventType, handler EventHandler) error {
|
||||
return nil
|
||||
}
|
||||
func (n *noopEventNotifier) Unsubscribe(ctx context.Context, eventType EventType, handler EventHandler) error {
|
||||
return nil
|
||||
}
|
||||
43
pkg/slurp/storage/index_manager_noop.go
Normal file
43
pkg/slurp/storage/index_manager_noop.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package storage
|
||||
|
||||
import "context"
|
||||
|
||||
// noopIndexManager satisfies the IndexManager interface without maintaining indexes.
|
||||
type noopIndexManager struct{}
|
||||
|
||||
// NewNoopIndexManager returns a no-op index manager implementation.
|
||||
func NewNoopIndexManager() IndexManager {
|
||||
return &noopIndexManager{}
|
||||
}
|
||||
|
||||
func (n *noopIndexManager) CreateIndex(ctx context.Context, indexName string, config *IndexConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noopIndexManager) UpdateIndex(ctx context.Context, indexName string, key string, data interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noopIndexManager) DeleteFromIndex(ctx context.Context, indexName string, key string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noopIndexManager) Search(ctx context.Context, indexName string, query *SearchQuery) (*SearchResults, error) {
|
||||
return &SearchResults{Query: query, Results: []*SearchResult{}}, nil
|
||||
}
|
||||
|
||||
func (n *noopIndexManager) RebuildIndex(ctx context.Context, indexName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noopIndexManager) OptimizeIndex(ctx context.Context, indexName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noopIndexManager) GetIndexStats(ctx context.Context, indexName string) (*IndexStatistics, error) {
|
||||
return &IndexStatistics{Name: indexName}, nil
|
||||
}
|
||||
|
||||
func (n *noopIndexManager) ListIndexes(ctx context.Context) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
28
pkg/slurp/temporal/dht_integration_test.go
Normal file
28
pkg/slurp/temporal/dht_integration_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
//go:build slurp_full
|
||||
|
||||
package temporal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"chorus/pkg/dht"
|
||||
slurpStorage "chorus/pkg/slurp/storage"
|
||||
)
|
||||
|
||||
// TestDHTBackedTemporalSync exercises the temporal persistence manager against the mock DHT.
|
||||
// The body is TBD; it establishes the scaffolding for a full integration test once the
|
||||
// storage wiring and replication hooks are stabilised.
|
||||
func TestDHTBackedTemporalSync(t *testing.T) {
|
||||
t.Skip("TODO: implement DHT-backed temporal sync integration test")
|
||||
|
||||
ctx := context.Background()
|
||||
mockDHT := dht.NewMockDHTInterface()
|
||||
defer mockDHT.Close()
|
||||
|
||||
contextStore := slurpStorage.NewInMemoryContextStore()
|
||||
|
||||
_ = ctx
|
||||
_ = mockDHT
|
||||
_ = contextStore
|
||||
}
|
||||
Reference in New Issue
Block a user