Initial commit - BUBBLE decision tracking system
- Added core BUBBLE architecture with decision envelopes and policy store - Implemented bundle API with FastAPI skeleton and OpenAPI specification - Added Go-based storage implementation with SQLite and RocksDB support - Created integrations for peer sync, vector search, and N8N workflows - Added comprehensive testing framework and documentation - Implemented provenance walking and decision checking algorithms 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
7
src/storage/04-forward-index.jsonc
Normal file
7
src/storage/04-forward-index.jsonc
Normal file
@@ -0,0 +1,7 @@
|
||||
// forward_index.json
|
||||
{
|
||||
"dr:sha256:prev1": [
|
||||
{ "relation": "influenced", "target": "dr:sha256:curr1" },
|
||||
{ "relation": "superseded", "target": "dr:sha256:curr2" }
|
||||
]
|
||||
}
|
||||
7
src/storage/05-reverse-index.jsonc
Normal file
7
src/storage/05-reverse-index.jsonc
Normal file
@@ -0,0 +1,7 @@
|
||||
// reverse_index.json
|
||||
{
|
||||
"dr:sha256:curr1": [
|
||||
{ "relation": "influenced_by", "source": "dr:sha256:prev1" },
|
||||
{ "relation": "derived_from", "source": "dr:sha256:prev2" }
|
||||
]
|
||||
}
|
||||
7
src/storage/07-metadata-cache.jsonc
Normal file
7
src/storage/07-metadata-cache.jsonc
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"id": "dr:sha256:prev1",
|
||||
"statement": "We rejected library Z due to licensing issues",
|
||||
"timestamp": "2025-06-10T09:15:00Z",
|
||||
"lifecycle_state": "rejected",
|
||||
"role_exposure": {"engineer": true, "pm": true, "research": false}
|
||||
}
|
||||
7
src/storage/16-metadata-cache-layout.jsonc
Normal file
7
src/storage/16-metadata-cache-layout.jsonc
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"statement": "...",
|
||||
"timestamp": "...",
|
||||
"lifecycle_state": "rejected",
|
||||
"role_exposure": {"engineer": true, "pm": true},
|
||||
"tags": ["cloud", "licensing"]
|
||||
}
|
||||
21
src/storage/18-rocksdb-sqlite-schema.sql
Normal file
21
src/storage/18-rocksdb-sqlite-schema.sql
Normal file
@@ -0,0 +1,21 @@
|
||||
CREATE TABLE decisions (
|
||||
id TEXT PRIMARY KEY,
|
||||
statement TEXT,
|
||||
lifecycle_state TEXT,
|
||||
role_exposure TEXT, -- JSON
|
||||
tags TEXT, -- JSON array
|
||||
timestamp DATETIME
|
||||
);
|
||||
|
||||
CREATE TABLE edges (
|
||||
source_id TEXT,
|
||||
target_id TEXT,
|
||||
relation TEXT,
|
||||
PRIMARY KEY (source_id, target_id)
|
||||
);
|
||||
|
||||
CREATE TABLE constraints (
|
||||
id TEXT PRIMARY KEY,
|
||||
scope TEXT, -- "global" or "role:<role>"
|
||||
description TEXT
|
||||
);
|
||||
58
src/storage/19-write-sync-pseudocode.py
Normal file
58
src/storage/19-write-sync-pseudocode.py
Normal file
@@ -0,0 +1,58 @@
|
||||
def store_decision(dr_id, metadata, ancestors, descendants):
|
||||
# RocksDB writes
|
||||
rocks.put(f"meta:{dr_id}", serialize(metadata))
|
||||
|
||||
for anc in ancestors:
|
||||
rocks.append_list(f"rev:{dr_id}", anc)
|
||||
rocks.append_list(f"fwd:{anc}", dr_id)
|
||||
|
||||
# WAL append for sync
|
||||
wal.write({
|
||||
"type": "decision",
|
||||
"id": dr_id,
|
||||
"metadata": metadata,
|
||||
"ancestors": ancestors,
|
||||
"descendants": descendants
|
||||
})
|
||||
|
||||
|
||||
def sync_to_sqlite():
|
||||
while True:
|
||||
batch = wal.read_batch(limit=100)
|
||||
if not batch:
|
||||
break
|
||||
with sqlite_conn:
|
||||
for entry in batch:
|
||||
if entry["type"] == "decision":
|
||||
# Upsert into decisions table
|
||||
sqlite_conn.execute("""
|
||||
INSERT INTO decisions (id, statement, lifecycle_state, role_exposure, tags, timestamp)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(id) DO UPDATE SET
|
||||
statement=excluded.statement,
|
||||
lifecycle_state=excluded.lifecycle_state,
|
||||
role_exposure=excluded.role_exposure,
|
||||
tags=excluded.tags,
|
||||
timestamp=excluded.timestamp
|
||||
""", (
|
||||
entry["id"],
|
||||
entry["metadata"]["statement"],
|
||||
entry["metadata"]["lifecycle_state"],
|
||||
json.dumps(entry["metadata"]["role_exposure"]),
|
||||
json.dumps(entry["metadata"]["tags"]),
|
||||
entry["metadata"]["timestamp"]
|
||||
))
|
||||
|
||||
# Edges
|
||||
for anc in entry["ancestors"]:
|
||||
sqlite_conn.execute("""
|
||||
INSERT OR IGNORE INTO edges (source_id, target_id, relation)
|
||||
VALUES (?, ?, ?)
|
||||
""", (anc, entry["id"], "influences"))
|
||||
|
||||
for desc in entry["descendants"]:
|
||||
sqlite_conn.execute("""
|
||||
INSERT OR IGNORE INTO edges (source_id, target_id, relation)
|
||||
VALUES (?, ?, ?)
|
||||
""", (entry["id"], desc, "influences"))
|
||||
wal.mark_batch_complete(batch)
|
||||
78
src/storage/rocksdb.go
Normal file
78
src/storage/rocksdb.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"gitea.deepblack.cloud/chorus/bubble/models"
|
||||
"github.com/tecbot/gorocksdb"
|
||||
)
|
||||
|
||||
// RocksDBStore is an implementation of the Storage interface using RocksDB.
|
||||
type RocksDBStore struct {
|
||||
DB *gorocksdb.DB
|
||||
}
|
||||
|
||||
// NewRocksDBStore creates and initializes a new RocksDB database.
|
||||
func NewRocksDBStore(dbPath string) (*RocksDBStore, error) {
|
||||
opts := gorocksdb.NewDefaultOptions()
|
||||
opts.SetCreateIfMissing(true)
|
||||
db, err := gorocksdb.OpenDb(opts, dbPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &RocksDBStore{DB: db}, nil
|
||||
}
|
||||
|
||||
// GetDecisionMetadata retrieves a decision's metadata from RocksDB.
|
||||
func (r *RocksDBStore) GetDecisionMetadata(drID string) (*models.DecisionRecordSummary, error) {
|
||||
ro := gorocksdb.NewDefaultReadOptions()
|
||||
// Keys are stored as "meta:<id>"
|
||||
key := []byte("meta:" + drID)
|
||||
|
||||
slice, err := r.DB.Get(ro, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer slice.Free()
|
||||
|
||||
if !slice.Exists() {
|
||||
return nil, nil // Not found
|
||||
}
|
||||
|
||||
var summary models.DecisionRecordSummary
|
||||
if err := json.Unmarshal(slice.Data(), &summary); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &summary, nil
|
||||
}
|
||||
|
||||
// GetAncestors retrieves a decision's ancestor IDs from RocksDB.
|
||||
func (r *RocksDBStore) GetAncestors(drID string) ([]string, error) {
|
||||
ro := gorocksdb.NewDefaultReadOptions()
|
||||
// Keys are stored as "rev:<id>"
|
||||
key := []byte("rev:" + drID)
|
||||
|
||||
slice, err := r.DB.Get(ro, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer slice.Free()
|
||||
|
||||
if !slice.Exists() {
|
||||
return nil, nil // Not found, no ancestors
|
||||
}
|
||||
|
||||
var ancestorIDs []string
|
||||
if err := json.Unmarshal(slice.Data(), &ancestorIDs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ancestorIDs, nil
|
||||
}
|
||||
|
||||
// Close closes the RocksDB database connection.
|
||||
func (r *RocksDBStore) Close() {
|
||||
if r.DB != nil {
|
||||
r.DB.Close()
|
||||
}
|
||||
}
|
||||
90
src/storage/sqlite.go
Normal file
90
src/storage/sqlite.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"gitea.deepblack.cloud/chorus/bubble/models"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// SQLiteStore is an implementation of the Storage interface using SQLite.
|
||||
type SQLiteStore struct {
|
||||
DB *sql.DB
|
||||
}
|
||||
|
||||
// NewSQLiteStore connects to the SQLite database and returns a new SQLiteStore.
|
||||
func NewSQLiteStore(dbPath string) (*SQLiteStore, error) {
|
||||
db, err := sql.Open("sqlite3", dbPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = db.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SQLiteStore{DB: db}, nil
|
||||
}
|
||||
|
||||
// Setup reads the schema file and executes it to create the database tables.
|
||||
func (s *SQLiteStore) Setup(schemaPath string) error {
|
||||
schema, err := ioutil.ReadFile(filepath.Clean(schemaPath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.DB.Exec(string(schema))
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDecisionMetadata retrieves a single decision record's metadata from the database.
|
||||
func (s *SQLiteStore) GetDecisionMetadata(drID string) (*models.DecisionRecordSummary, error) {
|
||||
row := s.DB.QueryRow("SELECT id, statement, lifecycle_state, role_exposure, tags, timestamp FROM decisions WHERE id = ?", drID)
|
||||
|
||||
var summary models.DecisionRecordSummary
|
||||
var roleExposureJSON, tagsJSON string
|
||||
|
||||
err := row.Scan(
|
||||
&summary.ID,
|
||||
&summary.Statement,
|
||||
&summary.LifecycleState,
|
||||
&roleExposureJSON,
|
||||
&tagsJSON,
|
||||
&summary.Timestamp,
|
||||
)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil // Return nil, nil if not found
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unmarshal JSON fields
|
||||
if err := json.Unmarshal([]byte(roleExposureJSON), &summary.RoleExposure); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.Unmarshal([]byte(tagsJSON), &summary.Tags); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &summary, nil
|
||||
}
|
||||
|
||||
// GetAncestors retrieves the IDs of all direct ancestors for a given decision record.
|
||||
func (s *SQLiteStore) GetAncestors(drID string) ([]string, error) {
|
||||
rows, err := s.DB.Query("SELECT source_id FROM edges WHERE target_id = ?", drID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var ancestorIDs []string
|
||||
for rows.Next() {
|
||||
var id string
|
||||
if err := rows.Scan(&id); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ancestorIDs = append(ancestorIDs, id)
|
||||
}
|
||||
|
||||
return ancestorIDs, nil
|
||||
}
|
||||
33
src/storage/storage.go
Normal file
33
src/storage/storage.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package storage
|
||||
|
||||
import "gitea.deepblack.cloud/chorus/bubble/models"
|
||||
|
||||
// Storage defines the interface for accessing the decision provenance data.
|
||||
// This allows for swapping the underlying database implementation.
|
||||
type Storage interface {
|
||||
GetDecisionMetadata(drID string) (*models.DecisionRecordSummary, error)
|
||||
GetAncestors(drID string) ([]string, error)
|
||||
// Add more methods as needed, e.g., for writing, caching, etc.
|
||||
}
|
||||
|
||||
// RocksDBStore is an implementation of the Storage interface using RocksDB.
|
||||
type RocksDBStore struct {
|
||||
// DB *gorocksdb.DB // Placeholder for the actual RocksDB client
|
||||
}
|
||||
|
||||
// NewRocksDBStore creates a new RocksDBStore.
|
||||
func NewRocksDBStore() (*RocksDBStore, error) {
|
||||
// Placeholder for RocksDB initialization logic
|
||||
return &RocksDBStore{}, nil
|
||||
}
|
||||
|
||||
func (r *RocksDBStore) GetDecisionMetadata(drID string) (*models.DecisionRecordSummary, error) {
|
||||
// Placeholder: Implement logic to fetch metadata from RocksDB.
|
||||
// This will involve deserializing the data into the models.DecisionRecordSummary struct.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *RocksDBStore) GetAncestors(drID string) ([]string, error) {
|
||||
// Placeholder: Implement logic to fetch ancestor IDs from the reverse index in RocksDB.
|
||||
return nil, nil
|
||||
}
|
||||
Reference in New Issue
Block a user