Initial commit - BUBBLE decision tracking system
- Added core BUBBLE architecture with decision envelopes and policy store - Implemented bundle API with FastAPI skeleton and OpenAPI specification - Added Go-based storage implementation with SQLite and RocksDB support - Created integrations for peer sync, vector search, and N8N workflows - Added comprehensive testing framework and documentation - Implemented provenance walking and decision checking algorithms 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
41
src/MANIFEST.md
Normal file
41
src/MANIFEST.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# BUBBLE Project Snippet Manifest
|
||||
|
||||
This file maps the reference keys in `BUBBLE-PROJECT-CONVERSATION.md` to the extracted source files.
|
||||
|
||||
| Reference Key | Source File |
|
||||
|---|---|
|
||||
| `BUBBLE-SNIPPET-01` | `src/01-dr-envelope.jsonc` |
|
||||
| `BUBBLE-SNIPPET-02` | `src/02-dr-payload.jsonc` |
|
||||
| `BUBBLE-SNIPPET-03` | `src/03-policy-store-example.jsonc` |
|
||||
| `BUBBLE-SNIPPET-04` | `src/04-forward-index.jsonc` |
|
||||
| `BUBBLE-SNIPPET-05` | `src/05-reverse-index.jsonc` |
|
||||
| `BUBBLE-SNIPPET-06` | `src/06-provenance-walk.py` |
|
||||
| `BUBBLE-SNIPPET-07` | `src/07-metadata-cache.jsonc` |
|
||||
| `BUBBLE-SNIPPET-08` | `src/08-fast-check.py` |
|
||||
| `BUBBLE-SNIPPET-09` | `src/09-decision-walk-request.jsonc` |
|
||||
| `BUBBLE-SNIPPET-10` | `src/10-decision-walk-response.jsonc` |
|
||||
| `BUBBLE-SNIPPET-11` | `src/11-decision-check-request.jsonc` |
|
||||
| `BUBBLE-SNIPPET-12` | `src/12-decision-check-response.jsonc` |
|
||||
| `BUBBLE-SNIPPET-13` | `src/13-decision-bundle-request.jsonc` |
|
||||
| `BUBBLE-SNIPPET-14` | `src/14-decision-bundle-response.jsonc` |
|
||||
| `BUBBLE-SNIPPET-15` | `src/15-walk-back-pseudocode.py` |
|
||||
| `BUBBLE-SNIPPET-16` | `src/16-metadata-cache-layout.jsonc` |
|
||||
| `BUBBLE-SNIPPET-17` | `src/17-peer-sync.json` |
|
||||
| `BUBBLE-SNIPPET-18` | `src/18-rocksdb-sqlite-schema.sql` |
|
||||
| `BUBBLE-SNIPPET-19` | `src/19-write-sync-pseudocode.py` |
|
||||
| `BUBBLE-SNIPPET-20` | `src/20-walk-back-hybrid.py` |
|
||||
| `BUBBLE-SNIPPET-21` | `src/21-bundle-api-request.json` |
|
||||
| `BUBBLE-SNIPPET-22` | `src/22-bundle-api-response.json` |
|
||||
| `BUBBLE-SNIPPET-23` | `src/23-bundle-timeline-element.json` |
|
||||
| `BUBBLE-SNIPPET-24` | `src/24-assemble-bundle-pseudocode.py` |
|
||||
| `BUBBLE-SNIPPET-25` | `src/25-minimal-bundle-example.json` |
|
||||
| `BUBBLE-SNIPPET-26` | `src/26-fastapi-skeleton.py` |
|
||||
| `BUBBLE-SNIPPET-27` | `src/27-vector-search-integration.py` |
|
||||
| `BUBBLE-SNIPPET-28` | `src/28-decryption-stub.py` |
|
||||
| `BUBBLE-SNIPPET-29` | `src/29-llm-prompt-template.py` |
|
||||
| `BUBBLE-SNIPPET-30` | `src/30-unit-tests.py` |
|
||||
| `BUBBLE-SNIPPET-31` | `src/31-llm-prompt-template.txt` |
|
||||
| `BUBBLE-SNIPPET-32` | `src/32-n8n-webhook-send.py` |
|
||||
| `BUBBLE-SNIPPET-33` | `src/33-flask-handler.py` |
|
||||
| `BUBBLE-SNIPPET-34` | `src/34-n8n-workflow.json` |
|
||||
| `BUBBLE-SNIPPET-35` | `src/35-openapi-spec.yaml` |
|
||||
8
src/api/09-decision-walk-request.jsonc
Normal file
8
src/api/09-decision-walk-request.jsonc
Normal file
@@ -0,0 +1,8 @@
|
||||
// POST body
|
||||
{
|
||||
"start_id": "dr:sha256:abc123",
|
||||
"direction": "upstream", // or "downstream"
|
||||
"max_hops": 3,
|
||||
"role": "engineer",
|
||||
"filter_state": ["rejected", "approved"]
|
||||
}
|
||||
13
src/api/10-decision-walk-response.jsonc
Normal file
13
src/api/10-decision-walk-response.jsonc
Normal file
@@ -0,0 +1,13 @@
|
||||
// response
|
||||
{
|
||||
"start_id": "dr:sha256:abc123",
|
||||
"visited": [
|
||||
{
|
||||
"id": "dr:sha256:prev1",
|
||||
"relation": "influenced_by",
|
||||
"statement": "Rejected libZ due to licensing",
|
||||
"timestamp": "2025-06-10T09:15:00Z"
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
5
src/api/11-decision-check-request.jsonc
Normal file
5
src/api/11-decision-check-request.jsonc
Normal file
@@ -0,0 +1,5 @@
|
||||
// POST body
|
||||
{
|
||||
"query": "use library Z for cloud deployment",
|
||||
"role": "engineer"
|
||||
}
|
||||
6
src/api/12-decision-check-response.jsonc
Normal file
6
src/api/12-decision-check-response.jsonc
Normal file
@@ -0,0 +1,6 @@
|
||||
// response
|
||||
{
|
||||
"tried_before": true,
|
||||
"matched_id": "dr:sha256:prev1",
|
||||
"reason": "Rejected libZ due to incompatible licensing with SaaS model"
|
||||
}
|
||||
6
src/api/13-decision-bundle-request.jsonc
Normal file
6
src/api/13-decision-bundle-request.jsonc
Normal file
@@ -0,0 +1,6 @@
|
||||
// POST body
|
||||
{
|
||||
"start_id": "dr:sha256:abc123",
|
||||
"max_hops": 3,
|
||||
"role": "pm"
|
||||
}
|
||||
20
src/api/14-decision-bundle-response.jsonc
Normal file
20
src/api/14-decision-bundle-response.jsonc
Normal file
@@ -0,0 +1,20 @@
|
||||
// response
|
||||
{
|
||||
"timeline": [
|
||||
{
|
||||
"id": "dr:sha256:prev3",
|
||||
"timestamp": "...",
|
||||
"statement": "...",
|
||||
"constraints": ["budget < $10k"]
|
||||
},
|
||||
...
|
||||
],
|
||||
"constraints_summary": [
|
||||
"Budgetary constraint prevents line of reasoning",
|
||||
"Cloud infra incompatible with library Z"
|
||||
],
|
||||
"goal_alignment": {
|
||||
"project_goal": "...",
|
||||
"alignment_score": 0.87
|
||||
}
|
||||
}
|
||||
9
src/api/21-bundle-api-request.json
Normal file
9
src/api/21-bundle-api-request.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"start_id": "dr:sha256:abc123", // or null if query-based anchor
|
||||
"query": "evaluate using lib Z for streaming",
|
||||
"role": "engineer",
|
||||
"max_hops": 3,
|
||||
"top_k": 10,
|
||||
"include_full_dr": false, // whether to include decrypted full payloads (auth required)
|
||||
"redaction": true // apply role-based redaction rules
|
||||
}
|
||||
14
src/api/22-bundle-api-response.json
Normal file
14
src/api/22-bundle-api-response.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"bundle_id": "bundle:sha256:...",
|
||||
"start_id": "dr:sha256:abc123",
|
||||
"generated_at": "2025-08-11T...",
|
||||
"summary": "Short human-readable summary of provenance & decision state",
|
||||
"timeline": [ ... ], // ordered list of ancestor DR summaries
|
||||
"constraints_summary": [...],
|
||||
"key_evidence_refs": [...], // UCXL addresses
|
||||
"goal_alignment": { "score": 0.82, "reasons": [...] },
|
||||
"suggested_actions": [...],
|
||||
"escalation": { "required": false, "who": [] },
|
||||
"signatures": [...], // leader + assembler sign
|
||||
"cache_hit": true|false
|
||||
}
|
||||
241
src/api/26-fastapi-skeleton.py
Normal file
241
src/api/26-fastapi-skeleton.py
Normal file
@@ -0,0 +1,241 @@
|
||||
from fastapi import FastAPI, HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Optional, List
|
||||
import json
|
||||
import hashlib
|
||||
import time
|
||||
import rocksdb
|
||||
import sqlite3
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
# === RocksDB setup ===
|
||||
rocks = rocksdb.DB("rocksdb_data", rocksdb.Options(create_if_missing=True))
|
||||
|
||||
# === SQLite setup ===
|
||||
sqlite_conn = sqlite3.connect("decisions.db")
|
||||
sqlite_conn.row_factory = sqlite3.Row
|
||||
|
||||
# --- Models ---
|
||||
class BundleRequest(BaseModel):
|
||||
start_id: Optional[str] = None
|
||||
query: Optional[str] = None
|
||||
role: str = Field(..., example="engineer")
|
||||
max_hops: int = Field(3, ge=1, le=10)
|
||||
top_k: int = Field(10, ge=1, le=50)
|
||||
include_full_dr: bool = False
|
||||
redaction: bool = True
|
||||
|
||||
|
||||
class DRMetadata(BaseModel):
|
||||
id: str
|
||||
statement: str
|
||||
lifecycle_state: Optional[str]
|
||||
role_exposure: dict
|
||||
tags: List[str] = []
|
||||
timestamp: str
|
||||
relation: Optional[str] = None
|
||||
score: Optional[float] = None
|
||||
|
||||
|
||||
# --- Helpers ---
|
||||
def serialize(obj) -> bytes:
|
||||
return json.dumps(obj).encode("utf-8")
|
||||
|
||||
|
||||
def deserialize(data: bytes):
|
||||
return json.loads(data.decode("utf-8"))
|
||||
|
||||
|
||||
def sha256_of_content(content: str) -> str:
|
||||
return hashlib.sha256(content.encode("utf-8")).hexdigest()
|
||||
|
||||
|
||||
def now_iso() -> str:
|
||||
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
|
||||
|
||||
|
||||
def get_metadata(dr_id: str) -> Optional[dict]:
|
||||
# Try RocksDB
|
||||
val = rocks.get(f"meta:{dr_id}".encode())
|
||||
if val:
|
||||
return deserialize(val)
|
||||
# Fallback SQLite
|
||||
row = sqlite_conn.execute(
|
||||
"SELECT statement, lifecycle_state, role_exposure, tags, timestamp FROM decisions WHERE id=?",
|
||||
(dr_id,),
|
||||
).fetchone()
|
||||
if row:
|
||||
return {
|
||||
"id": dr_id,
|
||||
"statement": row["statement"],
|
||||
"lifecycle_state": row["lifecycle_state"],
|
||||
"role_exposure": json.loads(row["role_exposure"]),
|
||||
"tags": json.loads(row["tags"]),
|
||||
"timestamp": row["timestamp"],
|
||||
}
|
||||
return None
|
||||
|
||||
|
||||
def get_ancestors(dr_id: str) -> List[str]:
|
||||
# RocksDB reverse index fallback to SQLite edges
|
||||
val = rocks.get(f"rev:{dr_id}".encode())
|
||||
if val:
|
||||
return deserialize(val)
|
||||
rows = sqlite_conn.execute(
|
||||
"SELECT source_id FROM edges WHERE target_id=? AND relation='influences'",
|
||||
(dr_id,),
|
||||
).fetchall()
|
||||
return [r[0] for r in rows]
|
||||
|
||||
|
||||
def walk_back(
|
||||
start_id: str, n: int, role: str, top_k: int
|
||||
) -> List[DRMetadata]:
|
||||
from collections import deque
|
||||
import heapq
|
||||
|
||||
visited = set()
|
||||
results = []
|
||||
queue = deque([(start_id, 0)])
|
||||
|
||||
# Dummy embed/sim functions — replace with your actual implementations
|
||||
def embed(text): return text
|
||||
def cosine_similarity(a, b): return 1.0 if a == b else 0.5
|
||||
|
||||
query_vec = embed("") # empty here — extend as needed
|
||||
|
||||
while queue:
|
||||
dr_id, depth = queue.popleft()
|
||||
if dr_id in visited or depth > n:
|
||||
continue
|
||||
visited.add(dr_id)
|
||||
|
||||
meta = get_metadata(dr_id)
|
||||
if not meta:
|
||||
continue
|
||||
|
||||
# Role exposure filter
|
||||
if role and not meta["role_exposure"].get(role, False):
|
||||
continue
|
||||
|
||||
# Score heuristic: favor close ancestors and "approved" states
|
||||
dist_score = max(0, (n - depth) / n)
|
||||
state_bonus = 1.1 if meta.get("lifecycle_state") == "approved" else 1.0
|
||||
score = dist_score * state_bonus
|
||||
|
||||
heapq.heappush(results, (-score, DRMetadata(**meta, score=score)))
|
||||
|
||||
for anc in get_ancestors(dr_id):
|
||||
queue.append((anc, depth + 1))
|
||||
|
||||
# Return top_k sorted descending by score
|
||||
sorted_results = [md for _, md in heapq.nsmallest(top_k, results)]
|
||||
return sorted_results
|
||||
|
||||
|
||||
def assemble_bundle(
|
||||
start_id: str,
|
||||
role: str,
|
||||
max_hops: int,
|
||||
top_k: int,
|
||||
include_full_dr: bool,
|
||||
redact: bool,
|
||||
query: Optional[str] = None,
|
||||
) -> dict:
|
||||
# TODO: integrate query embedding and refined scoring later
|
||||
|
||||
timeline = []
|
||||
evidence_refs = set()
|
||||
|
||||
ancestors = walk_back(start_id, max_hops, role, top_k)
|
||||
|
||||
for meta in ancestors:
|
||||
item = {
|
||||
"id": meta.id,
|
||||
"timestamp": meta.timestamp,
|
||||
"statement": meta.statement if not redact else redact_field(meta.statement, role),
|
||||
"lifecycle_state": meta.lifecycle_state,
|
||||
"score": meta.score,
|
||||
"role_exposure": meta.role_exposure,
|
||||
"tags": meta.tags,
|
||||
"relation_to_start": meta.relation,
|
||||
# "full_payload": ... fetch + decrypt if include_full_dr and authorized
|
||||
# For now omitted for brevity
|
||||
}
|
||||
timeline.append(item)
|
||||
# Collect evidence refs from meta if available (stub)
|
||||
# evidence_refs.update(meta.get("evidence_refs", []))
|
||||
|
||||
summary = f"Decision bundle starting at {start_id} with {len(timeline)} items."
|
||||
|
||||
bundle_content = {
|
||||
"bundle_id": f"bundle:sha256:{sha256_of_content(summary)}",
|
||||
"start_id": start_id,
|
||||
"generated_at": now_iso(),
|
||||
"summary": summary,
|
||||
"timeline": timeline,
|
||||
"constraints_summary": [],
|
||||
"key_evidence_refs": list(evidence_refs),
|
||||
"goal_alignment": {},
|
||||
"suggested_actions": [],
|
||||
"escalation": {"required": False, "who": []},
|
||||
"signatures": [],
|
||||
"cache_hit": False,
|
||||
}
|
||||
|
||||
return bundle_content
|
||||
|
||||
|
||||
def redact_field(text: str, role: str) -> str:
|
||||
# Stub: redact sensitive info based on role
|
||||
# Replace with your own policy
|
||||
if role == "pm":
|
||||
return text
|
||||
else:
|
||||
return text.replace("SECRET", "[REDACTED]")
|
||||
|
||||
|
||||
def cache_get(key: str):
|
||||
val = rocks.get(key.encode())
|
||||
if val:
|
||||
return deserialize(val)
|
||||
return None
|
||||
|
||||
|
||||
def cache_set(key: str, value: dict):
|
||||
rocks.put(key.encode(), serialize(value))
|
||||
|
||||
|
||||
# --- Endpoint ---
|
||||
@app.post("/decision/bundle")
|
||||
async def decision_bundle(req: BundleRequest):
|
||||
# Validate inputs
|
||||
if not req.start_id and not req.query:
|
||||
raise HTTPException(status_code=400, detail="start_id or query required")
|
||||
|
||||
# Resolve anchor if query only
|
||||
start_id = req.start_id
|
||||
if not start_id:
|
||||
# Placeholder: your vector search to find anchor DR by query
|
||||
# For now raise error
|
||||
raise HTTPException(status_code=400, detail="Query-based anchor resolution not implemented")
|
||||
|
||||
cache_key = f"bundle:{start_id}:{req.role}:{req.max_hops}:{hash(req.query or '')}:{req.top_k}"
|
||||
cached_bundle = cache_get(cache_key)
|
||||
if cached_bundle:
|
||||
cached_bundle["cache_hit"] = True
|
||||
return cached_bundle
|
||||
|
||||
bundle = assemble_bundle(
|
||||
start_id=start_id,
|
||||
role=req.role,
|
||||
max_hops=req.max_hops,
|
||||
top_k=req.top_k,
|
||||
include_full_dr=req.include_full_dr,
|
||||
redact=req.redaction,
|
||||
query=req.query,
|
||||
)
|
||||
|
||||
cache_set(cache_key, bundle)
|
||||
return bundle
|
||||
207
src/api/35-openapi-spec.yaml
Normal file
207
src/api/35-openapi-spec.yaml
Normal file
@@ -0,0 +1,207 @@
|
||||
paths:
|
||||
/decision/bundle:
|
||||
post:
|
||||
summary: Generate a decision provenance bundle tailored to a role
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/DecisionBundleRequest'
|
||||
responses:
|
||||
'200':
|
||||
description: Decision bundle assembled successfully
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/DecisionBundleResponse'
|
||||
'400':
|
||||
description: Invalid request parameters
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ErrorResponse'
|
||||
'500':
|
||||
description: Internal server error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ErrorResponse'
|
||||
|
||||
components:
|
||||
schemas:
|
||||
DecisionBundleRequest:
|
||||
type: object
|
||||
properties:
|
||||
start_id:
|
||||
type: string
|
||||
description: >
|
||||
The starting decision record ID to anchor the bundle.
|
||||
Optional if 'query' is provided.
|
||||
query:
|
||||
type: string
|
||||
description: >
|
||||
Search query to find a decision anchor if start_id not provided.
|
||||
role:
|
||||
type: string
|
||||
description: Role requesting the bundle (e.g. engineer, pm)
|
||||
max_hops:
|
||||
type: integer
|
||||
description: Max ancestry depth to walk back
|
||||
minimum: 1
|
||||
maximum: 10
|
||||
default: 3
|
||||
top_k:
|
||||
type: integer
|
||||
description: Max number of decision records to include
|
||||
minimum: 1
|
||||
maximum: 50
|
||||
default: 10
|
||||
include_full_dr:
|
||||
type: boolean
|
||||
description: Whether to include decrypted full decision record payloads (auth required)
|
||||
default: false
|
||||
redaction:
|
||||
type: boolean
|
||||
description: Whether to apply role-based redaction to sensitive fields
|
||||
default: true
|
||||
oneOf:
|
||||
- required: ["start_id"]
|
||||
- required: ["query"]
|
||||
additionalProperties: false
|
||||
|
||||
DecisionRecordSummary:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
description: Unique decision record ID (e.g. dr:sha256:...)
|
||||
timestamp:
|
||||
type: string
|
||||
format: date-time
|
||||
type:
|
||||
type: string
|
||||
enum: [experiment, decision, rejection, escalation]
|
||||
description: Type of decision record
|
||||
statement:
|
||||
type: string
|
||||
description: Brief statement of the decision or rationale
|
||||
rationale:
|
||||
type: string
|
||||
description: Explanation or reasoning (may be redacted)
|
||||
lifecycle_state:
|
||||
type: string
|
||||
enum: [proposed, approved, rejected, deprecated]
|
||||
relation_to_start:
|
||||
type: string
|
||||
enum: [influenced_by, derived_from, unrelated]
|
||||
score:
|
||||
type: number
|
||||
format: float
|
||||
description: Relevance or confidence score (0.0 - 1.0)
|
||||
tags:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
role_exposure:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: boolean
|
||||
description: Role access flags for redaction control
|
||||
evidence_refs:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: UCXL address or DHT reference
|
||||
full_payload_ref:
|
||||
type: string
|
||||
description: Reference to full payload if included (optional)
|
||||
|
||||
GoalAlignment:
|
||||
type: object
|
||||
properties:
|
||||
score:
|
||||
type: number
|
||||
format: float
|
||||
description: Alignment score with project goals (0.0 - 1.0)
|
||||
reasons:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Brief reasons explaining the alignment score
|
||||
|
||||
SuggestedAction:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
enum: [experiment, research, escalate, review, defer]
|
||||
description:
|
||||
type: string
|
||||
assignee:
|
||||
type: string
|
||||
confidence:
|
||||
type: number
|
||||
format: float
|
||||
minimum: 0
|
||||
maximum: 1
|
||||
|
||||
Escalation:
|
||||
type: object
|
||||
properties:
|
||||
required:
|
||||
type: boolean
|
||||
who:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Roles or teams to escalate to
|
||||
|
||||
DecisionBundleResponse:
|
||||
type: object
|
||||
properties:
|
||||
bundle_id:
|
||||
type: string
|
||||
description: Unique ID for the bundle
|
||||
start_id:
|
||||
type: string
|
||||
description: The starting decision record ID
|
||||
generated_at:
|
||||
type: string
|
||||
format: date-time
|
||||
summary:
|
||||
type: string
|
||||
description: Human-readable summary of the bundle content
|
||||
timeline:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/DecisionRecordSummary'
|
||||
constraints_summary:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
key_evidence_refs:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
goal_alignment:
|
||||
$ref: '#/components/schemas/GoalAlignment'
|
||||
suggested_actions:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/SuggestedAction'
|
||||
escalation:
|
||||
$ref: '#/components/schemas/Escalation'
|
||||
signatures:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
cache_hit:
|
||||
type: boolean
|
||||
description: Whether the bundle was served from cache
|
||||
|
||||
ErrorResponse:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
87
src/api/api.go
Normal file
87
src/api/api.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"gitea.deepblack.cloud/chorus/bubble/core"
|
||||
"gitea.deepblack.cloud/chorus/bubble/models"
|
||||
"gitea.deepblack.cloud/chorus/bubble/storage"
|
||||
)
|
||||
|
||||
// Server holds the dependencies for the API server.
|
||||
type Server struct {
|
||||
Store storage.Storage
|
||||
}
|
||||
|
||||
// NewServer creates a new API server.
|
||||
func NewServer(store storage.Storage) *Server {
|
||||
return &Server{Store: store}
|
||||
}
|
||||
|
||||
// Start begins listening for HTTP requests.
|
||||
func (s *Server) Start(addr string) error {
|
||||
http.HandleFunc("/decision/bundle", s.handleDecisionBundle)
|
||||
return http.ListenAndServe(addr, nil)
|
||||
}
|
||||
|
||||
// handleDecisionBundle is the handler for the /decision/bundle endpoint.
|
||||
func (s *Server) handleDecisionBundle(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Only POST method is allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
var req models.DecisionBundleRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if req.StartID == "" && req.Query == "" {
|
||||
http.Error(w, "start_id or query is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// --- Core Logic ---
|
||||
// Use StartID for now. Query-based anchor resolution will be added later.
|
||||
startNode, err := s.Store.GetDecisionMetadata(req.StartID)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to get start node: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if startNode == nil {
|
||||
http.Error(w, "Start node not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
// Perform the provenance walk.
|
||||
timeline, err := core.WalkBack(s.Store, req.StartID, req.MaxHops, req.Role, req.TopK)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to walk provenance graph: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Assemble the response bundle.
|
||||
// This is a simplified version of the logic in the blueprint.
|
||||
response := models.DecisionBundleResponse{
|
||||
BundleID: fmt.Sprintf("bundle:%s", req.StartID), // Simplified ID
|
||||
StartID: req.StartID,
|
||||
GeneratedAt: time.Now().UTC().Format(time.RFC3339),
|
||||
Summary: fmt.Sprintf("Decision bundle for %s, found %d ancestors.", req.StartID, len(timeline)),
|
||||
Timeline: timeline,
|
||||
ConstraintsSummary: []string{}, // Placeholder
|
||||
KeyEvidenceRefs: []string{}, // Placeholder
|
||||
GoalAlignment: models.GoalAlignment{}, // Placeholder
|
||||
SuggestedActions: []models.SuggestedAction{}, // Placeholder
|
||||
Escalation: models.Escalation{}, // Placeholder
|
||||
CacheHit: false, // Caching not yet implemented
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if err := json.NewEncoder(w).Encode(response); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
BIN
src/bubble.db
Normal file
BIN
src/bubble.db
Normal file
Binary file not shown.
BIN
src/bubble_server
Executable file
BIN
src/bubble_server
Executable file
Binary file not shown.
18
src/core/01-dr-envelope.jsonc
Normal file
18
src/core/01-dr-envelope.jsonc
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"envelope_version": "1.0",
|
||||
"payload": { /* decision_record */ },
|
||||
"signatures": [
|
||||
{
|
||||
"actor_id": "user:alice",
|
||||
"role": "sys-arch",
|
||||
"signature": "BASE64-SIG-OF-PAYLOAD"
|
||||
}
|
||||
],
|
||||
"encryption": {
|
||||
"alg": "age",
|
||||
"recipients": [
|
||||
"age1...",
|
||||
"age1..."
|
||||
]
|
||||
}
|
||||
}
|
||||
59
src/core/02-dr-payload.jsonc
Normal file
59
src/core/02-dr-payload.jsonc
Normal file
@@ -0,0 +1,59 @@
|
||||
{
|
||||
"id": "dr:sha256:...", // immutable unique ID
|
||||
"ucxl_path": "ucxl://project:foo@repo:main/path/to/doc.md",
|
||||
|
||||
"type": "decision", // decision|proposal|experiment|rejection
|
||||
"lifecycle_state": "active", // active|superseded|rejected|archived
|
||||
|
||||
"timestamp": "2025-08-11T07:30:00Z",
|
||||
"actor": {
|
||||
"id": "user:alice",
|
||||
"role": "sys-arch",
|
||||
"org_unit": "infrastructure"
|
||||
},
|
||||
|
||||
"statement": "We will adopt crate X for parsing JSON streams.",
|
||||
"rationale": "Performance improvement and lower memory footprint.",
|
||||
|
||||
"alternatives": [
|
||||
{
|
||||
"id": "dr:sha256:alt1...",
|
||||
"statement": "Use crate Y",
|
||||
"reason": "Legacy support",
|
||||
"rejected": true
|
||||
}
|
||||
],
|
||||
|
||||
"evidence": [
|
||||
"ucxl://project:foo@repo:main/docs/benchmarks.md",
|
||||
"ucxl://project:foo@repo:main/reports/compatibility.json"
|
||||
],
|
||||
|
||||
"metrics": {
|
||||
"cost_estimate": { "hours": 40, "budget": 0 },
|
||||
"confidence": 0.67
|
||||
},
|
||||
|
||||
"constraints": [
|
||||
"policy:budget-l1",
|
||||
"policy:cloud-aws-only"
|
||||
],
|
||||
|
||||
"provenance": {
|
||||
"influenced_by": [
|
||||
"dr:sha256:prev1...",
|
||||
"dr:sha256:prev2..."
|
||||
],
|
||||
"derived_from": [],
|
||||
"supersedes": []
|
||||
},
|
||||
|
||||
"embeddings_id": "vec:abc123",
|
||||
"tags": ["infra", "parser", "performance"],
|
||||
|
||||
"role_exposure": {
|
||||
"engineer": true,
|
||||
"pm": true,
|
||||
"research": false
|
||||
}
|
||||
}
|
||||
9
src/core/03-policy-store-example.jsonc
Normal file
9
src/core/03-policy-store-example.jsonc
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"id": "policy:budget-l1",
|
||||
"description": "Budget limit for experimental features in project Foo",
|
||||
"applies_to": ["project:foo"],
|
||||
"rules": {
|
||||
"max_budget": 10000,
|
||||
"currency": "USD"
|
||||
}
|
||||
}
|
||||
43
src/core/06-provenance-walk.py
Normal file
43
src/core/06-provenance-walk.py
Normal file
@@ -0,0 +1,43 @@
|
||||
def walk_back(dr_id, max_hops=3, role=None, filter_state=None):
|
||||
"""
|
||||
Walks upstream in the decision space, returning relevant ancestors.
|
||||
"""
|
||||
visited = set()
|
||||
results = []
|
||||
|
||||
# BFS queue
|
||||
queue = [(dr_id, 0)]
|
||||
|
||||
while queue:
|
||||
current_id, depth = queue.pop(0)
|
||||
|
||||
if current_id in visited or depth > max_hops:
|
||||
continue
|
||||
visited.add(current_id)
|
||||
|
||||
# fetch upstream edges from reverse index
|
||||
edges = reverse_index.get(current_id, [])
|
||||
for edge in edges:
|
||||
ancestor_id = edge["source"]
|
||||
|
||||
# fetch DR metadata (not full payload)
|
||||
meta = get_dr_metadata(ancestor_id)
|
||||
|
||||
# role-based filter
|
||||
if role and not meta["role_exposure"].get(role, False):
|
||||
continue
|
||||
|
||||
# state filter
|
||||
if filter_state and meta["lifecycle_state"] != filter_state:
|
||||
continue
|
||||
|
||||
results.append({
|
||||
"id": ancestor_id,
|
||||
"relation": edge["relation"],
|
||||
"statement": meta["statement"],
|
||||
"timestamp": meta["timestamp"]
|
||||
})
|
||||
|
||||
queue.append((ancestor_id, depth + 1))
|
||||
|
||||
return results
|
||||
10
src/core/08-fast-check.py
Normal file
10
src/core/08-fast-check.py
Normal file
@@ -0,0 +1,10 @@
|
||||
def tried_before(query_vec, tags=None, role=None):
|
||||
candidates = vector_search(query_vec, top_k=10)
|
||||
|
||||
for dr in candidates:
|
||||
meta = get_dr_metadata(dr["id"])
|
||||
if role and not meta["role_exposure"].get(role, False):
|
||||
continue
|
||||
if meta["lifecycle_state"] in ("rejected", "superseded"):
|
||||
return True, dr["id"], meta["statement"]
|
||||
return False, None, None
|
||||
53
src/core/15-walk-back-pseudocode.py
Normal file
53
src/core/15-walk-back-pseudocode.py
Normal file
@@ -0,0 +1,53 @@
|
||||
def walk_back(start_id, n_hops=3, role=None, filter_state=None):
|
||||
visited = set()
|
||||
results = []
|
||||
queue = [(start_id, 0)]
|
||||
|
||||
while queue:
|
||||
current_id, depth = queue.pop(0)
|
||||
|
||||
if current_id in visited or depth > n_hops:
|
||||
continue
|
||||
visited.add(current_id)
|
||||
|
||||
edges = reverse_index.get(current_id, [])
|
||||
for edge in edges:
|
||||
ancestor_id = edge["source"]
|
||||
|
||||
meta = metadata_cache.get(ancestor_id)
|
||||
if not meta:
|
||||
meta = fetch_metadata_from_dht(ancestor_id)
|
||||
metadata_cache[ancestor_id] = meta # Cache store
|
||||
|
||||
# Role filter
|
||||
if role and not meta["role_exposure"].get(role, False):
|
||||
continue
|
||||
|
||||
# State filter
|
||||
if filter_state and meta["lifecycle_state"] not in filter_state:
|
||||
continue
|
||||
|
||||
score = provenance_score(meta, depth)
|
||||
results.append({
|
||||
"id": ancestor_id,
|
||||
"relation": edge["relation"],
|
||||
"statement": meta["statement"],
|
||||
"timestamp": meta["timestamp"],
|
||||
"score": score
|
||||
})
|
||||
|
||||
queue.append((ancestor_id, depth + 1))
|
||||
|
||||
# Sort results by score descending
|
||||
results.sort(key=lambda r: r["score"], reverse=True)
|
||||
return results
|
||||
|
||||
|
||||
def provenance_score(meta, depth):
|
||||
"""Heuristic scoring for relevance."""
|
||||
score = 1.0 / (depth + 1) # closer ancestors score higher
|
||||
if meta["lifecycle_state"] == "rejected":
|
||||
score *= 0.9
|
||||
if "critical" in meta.get("tags", []):
|
||||
score *= 1.2
|
||||
return score
|
||||
77
src/core/20-walk-back-hybrid.py
Normal file
77
src/core/20-walk-back-hybrid.py
Normal file
@@ -0,0 +1,77 @@
|
||||
from collections import deque
|
||||
import heapq
|
||||
|
||||
def walk_back(start_id, n, query):
|
||||
visited = set()
|
||||
results = []
|
||||
queue = deque([(start_id, 0)]) # (dr_id, hops)
|
||||
|
||||
query_vec = embed(query) # Precompute embedding for semantic scoring
|
||||
|
||||
while queue:
|
||||
dr_id, depth = queue.popleft()
|
||||
if dr_id in visited or depth > n:
|
||||
continue
|
||||
visited.add(dr_id)
|
||||
|
||||
# Get metadata from RocksDB first, fallback to SQLite
|
||||
metadata = get_metadata(dr_id)
|
||||
if not metadata:
|
||||
continue
|
||||
|
||||
# Score
|
||||
sim_score = cosine_similarity(query_vec, embed(metadata['statement']))
|
||||
dist_score = max(0, (n - depth) / n) # Closer ancestors score higher
|
||||
constraint_penalty = -0.2 if metadata.get("blocked") else 0
|
||||
|
||||
total_score = (0.6 * sim_score) + (0.3 * dist_score) + constraint_penalty
|
||||
heapq.heappush(results, (-total_score, metadata)) # Max-heap
|
||||
|
||||
# Traverse ancestors
|
||||
for anc_id in get_ancestors(dr_id):
|
||||
queue.append((anc_id, depth + 1))
|
||||
|
||||
# Return top results sorted
|
||||
sorted_results = [md for _, md in sorted(results)]
|
||||
cache_walk_results(start_id, n, query, sorted_results)
|
||||
return sorted_results
|
||||
|
||||
|
||||
def get_metadata(dr_id):
|
||||
val = rocks.get(f"meta:{dr_id}")
|
||||
if val:
|
||||
return deserialize(val)
|
||||
|
||||
# Fallback to SQLite
|
||||
row = sqlite_conn.execute("""
|
||||
SELECT statement, lifecycle_state, role_exposure, tags, timestamp
|
||||
FROM decisions WHERE id=?
|
||||
""", (dr_id,)).fetchone()
|
||||
if row:
|
||||
return {
|
||||
"id": dr_id,
|
||||
"statement": row[0],
|
||||
"lifecycle_state": row[1],
|
||||
"role_exposure": json.loads(row[2]),
|
||||
"tags": json.loads(row[3]),
|
||||
"timestamp": row[4]
|
||||
}
|
||||
return None
|
||||
|
||||
|
||||
def get_ancestors(dr_id):
|
||||
val = rocks.get(f"rev:{dr_id}")
|
||||
if val:
|
||||
return deserialize(val)
|
||||
|
||||
# Fallback to SQLite edges
|
||||
rows = sqlite_conn.execute("""
|
||||
SELECT source_id FROM edges
|
||||
WHERE target_id=? AND relation='influences'
|
||||
""", (dr_id,)).fetchall()
|
||||
return [r[0] for r in rows]
|
||||
|
||||
|
||||
def cache_walk_results(start_id, n, query, results):
|
||||
cache_key = f"walkcache:{start_id}:{n}:{hash(query)}"
|
||||
rocks.put(cache_key, serialize(results))
|
||||
14
src/core/23-bundle-timeline-element.json
Normal file
14
src/core/23-bundle-timeline-element.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"id": "dr:sha256:prev1",
|
||||
"timestamp": "...",
|
||||
"type": "experiment|decision|rejection",
|
||||
"statement": "Rejected lib Z due to license",
|
||||
"rationale": "Incompatible with SaaS licensing",
|
||||
"lifecycle_state": "rejected",
|
||||
"relation_to_start": "influenced_by|derived_from",
|
||||
"score": 0.93,
|
||||
"tags": ["licensing","cloud"],
|
||||
"role_exposure": {"engineer": true, "pm": true},
|
||||
"evidence_refs": ["ucxl://..."],
|
||||
"full_payload_ref": "ucxl://.../dr:sha256:prev1" // only if include_full_dr=true and authorized
|
||||
}
|
||||
97
src/core/24-assemble-bundle-pseudocode.py
Normal file
97
src/core/24-assemble-bundle-pseudocode.py
Normal file
@@ -0,0 +1,97 @@
|
||||
def assemble_bundle(start_id=None, query=None, role="engineer",
|
||||
max_hops=3, top_k=10, include_full_dr=False, redact=True):
|
||||
|
||||
# 0. anchor resolution
|
||||
if not start_id:
|
||||
anchors = vector_search(query, top_k=3) # maybe return DR ids
|
||||
if not anchors:
|
||||
return {"error":"no anchors found"}
|
||||
start_id = anchors[0]["id"]
|
||||
|
||||
# 1. try cache
|
||||
cache_key = f"bundle:{start_id}:{role}:{max_hops}:{hash(query)}:{top_k}"
|
||||
cached = rocks.get(cache_key)
|
||||
if cached:
|
||||
return cached # signed object
|
||||
|
||||
# 2. provenance walk
|
||||
ancestors = walk_back(start_id, n=max_hops, query=query, role=role, top_k=top_k)
|
||||
# walk_back returns sorted list of metadata objects with scores
|
||||
|
||||
# 3. fetch required metadata and optionally full payloads
|
||||
timeline = []
|
||||
evidence_refs = set()
|
||||
constraints_violations = set()
|
||||
for meta in ancestors[:top_k]:
|
||||
# meta is fetched from RocksDB or SQLite (fast)
|
||||
item = {
|
||||
"id": meta["id"],
|
||||
"timestamp": meta["timestamp"],
|
||||
"type": meta.get("type"),
|
||||
"statement": redact_field(meta["statement"], role) if redact else meta["statement"],
|
||||
"rationale": redact_field(meta.get("rationale",""), role) if redact else meta.get("rationale",""),
|
||||
"lifecycle_state": meta.get("lifecycle_state"),
|
||||
"relation_to_start": meta.get("relation"),
|
||||
"score": meta.get("score"),
|
||||
"tags": meta.get("tags", []),
|
||||
"role_exposure": meta.get("role_exposure", {})
|
||||
}
|
||||
|
||||
if include_full_dr and is_authorized(role, meta["id"]):
|
||||
item["full_payload"] = fetch_and_decrypt_dr(meta["id"]) # heavy, audited
|
||||
else:
|
||||
item["full_payload_ref"] = meta.get("ucxl_path")
|
||||
|
||||
timeline.append(item)
|
||||
for c in meta.get("constraints", []):
|
||||
if violates_policy(c):
|
||||
constraints_violations.add(c)
|
||||
evidence_refs.update(meta.get("evidence", []))
|
||||
|
||||
# 4. constraints summary (human readable)
|
||||
constraints_summary = [render_policy_summary(c) for c in constraints_violations]
|
||||
|
||||
# 5. goal alignment (heuristic)
|
||||
project_goal_vec = get_project_goal_embedding() # from project metadata
|
||||
alignment_scores = []
|
||||
for t in timeline:
|
||||
alignment_scores.append( cosine_similarity(project_goal_vec, embed(t["statement"])) )
|
||||
goal_alignment_score = mean(alignment_scores)
|
||||
goal_reasons = top_reasons_from_timeline(timeline, project_goal_vec, n=3)
|
||||
|
||||
# 6. suggested actions (LLM-assisted)
|
||||
# Provide LLM with:
|
||||
# - start statement
|
||||
# - concise timeline (n items)
|
||||
# - constraints summary
|
||||
# Ask: "Given constraints, what next steps? Who to ask? What to avoid?"
|
||||
llm_prompt = build_prompt_for_actions(start_statement=timeline[0]["statement"],
|
||||
timeline=timeline[:5],
|
||||
constraints=constraints_summary,
|
||||
role=role)
|
||||
suggested_actions = llm_call("action_suggester", llm_prompt, max_tokens=400)
|
||||
# LLM output should be parsed into structured actions {type, desc, assignee, confidence}
|
||||
|
||||
# 7. escalation check
|
||||
escalation = {"required": False, "who": []}
|
||||
if any("budget" in c for c in constraints_violations) and goal_alignment_score > 0.8:
|
||||
escalation = {"required": True, "who": ["pm","finance"]}
|
||||
|
||||
# 8. assemble, sign, cache
|
||||
bundle = {
|
||||
"bundle_id": "bundle:sha256:"+sha256_of_content(...),
|
||||
"start_id": start_id,
|
||||
"generated_at": now_iso(),
|
||||
"summary": auto_summary_from_timeline(timeline[:5]),
|
||||
"timeline": timeline,
|
||||
"constraints_summary": constraints_summary,
|
||||
"key_evidence_refs": list(evidence_refs)[:10],
|
||||
"goal_alignment": {"score": goal_alignment_score, "reasons": goal_reasons},
|
||||
"suggested_actions": suggested_actions,
|
||||
"escalation": escalation,
|
||||
"signatures": [signer_sig("leader"), signer_sig("assembler")],
|
||||
"cache_hit": False
|
||||
}
|
||||
|
||||
rocks.put(cache_key, serialize(bundle))
|
||||
return bundle
|
||||
26
src/core/25-minimal-bundle-example.json
Normal file
26
src/core/25-minimal-bundle-example.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"bundle_id": "bundle:sha256:deadbeef",
|
||||
"start_id": "dr:sha256:abc123",
|
||||
"generated_at": "2025-08-11T09:00:00Z",
|
||||
"summary": "Library Z was previously rejected (licensing). Budget constraints apply. Engineering suggests an alternative X.",
|
||||
"timeline": [
|
||||
{
|
||||
"id": "dr:sha256:prev1",
|
||||
"timestamp": "2025-06-10T09:15:00Z",
|
||||
"statement": "Rejected lib Z due to restrictive SaaS licensing",
|
||||
"lifecycle_state": "rejected",
|
||||
"relation_to_start": "influenced_by",
|
||||
"score": 0.93,
|
||||
"evidence_refs": ["ucxl://.../license_report.md"]
|
||||
}
|
||||
],
|
||||
"constraints_summary": ["policy:budget-l1 (max $10k)","policy:cloud-aws-only"],
|
||||
"key_evidence_refs": ["ucxl://.../license_report.md"],
|
||||
"goal_alignment": {"score": 0.72, "reasons":["performance vs cost tradeoff"]},
|
||||
"suggested_actions": [
|
||||
{"type":"experiment","desc":"Run compatibility shim test with lib X","assignee":"eng-team","confidence":0.6}
|
||||
],
|
||||
"escalation": {"required": true, "who": ["pm","legal"]},
|
||||
"signatures": ["leader:SIG...","assembler:SIG..."],
|
||||
"cache_hit": false
|
||||
}
|
||||
129
src/core/bundle.go
Normal file
129
src/core/bundle.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"gitea.deepblack.cloud/chorus/bubble/models"
|
||||
"gitea.deepblack.cloud/chorus/bubble/storage"
|
||||
)
|
||||
|
||||
// --- Priority Queue for Scoring ---
|
||||
|
||||
// An Item is something we manage in a priority queue.
|
||||
type scoredItem struct {
|
||||
summary models.DecisionRecordSummary
|
||||
priority float64 // The priority of the item in the queue.
|
||||
index int // The index of the item in the heap.
|
||||
}
|
||||
|
||||
// A PriorityQueue implements heap.Interface and holds Items.
|
||||
type PriorityQueue []*scoredItem
|
||||
|
||||
func (pq PriorityQueue) Len() int { return len(pq) }
|
||||
|
||||
func (pq PriorityQueue) Less(i, j int) bool {
|
||||
// We want Pop to give us the highest, not lowest, priority so we use greater than here.
|
||||
return pq[i].priority > pq[j].priority
|
||||
}
|
||||
|
||||
func (pq PriorityQueue) Swap(i, j int) {
|
||||
pq[i], pq[j] = pq[j], pq[i]
|
||||
pq[i].index = i
|
||||
pq[j].index = j
|
||||
}
|
||||
|
||||
func (pq *PriorityQueue) Push(x interface{}) {
|
||||
n := len(*pq)
|
||||
item := x.(*scoredItem)
|
||||
item.index = n
|
||||
*pq = append(*pq, item)
|
||||
}
|
||||
|
||||
func (pq *PriorityQueue) Pop() interface{} {
|
||||
old := *pq
|
||||
n := len(old)
|
||||
item := old[n-1]
|
||||
old[n-1] = nil // avoid memory leak
|
||||
item.index = -1 // for safety
|
||||
*pq = old[0 : n-1]
|
||||
return item
|
||||
}
|
||||
|
||||
// --- Core Logic ---
|
||||
|
||||
// WalkBack performs a scored, role-aware walk of the provenance graph.
|
||||
func WalkBack(store storage.Storage, startID, query, role string, maxHops, topK int) ([]models.DecisionRecordSummary, error) {
|
||||
visited := make(map[string]bool)
|
||||
pq := make(PriorityQueue, 0)
|
||||
heap.Init(&pq)
|
||||
|
||||
queue := []struct {
|
||||
id string
|
||||
depth int
|
||||
}{{startID, 0}}
|
||||
visited[startID] = true
|
||||
|
||||
// Placeholder for query embedding
|
||||
// queryVec := embed(query)
|
||||
|
||||
for len(queue) > 0 {
|
||||
current := queue[0]
|
||||
queue = queue[1:]
|
||||
|
||||
if current.depth > maxHops {
|
||||
continue
|
||||
}
|
||||
|
||||
// Fetch metadata for the current node
|
||||
meta, err := store.GetDecisionMetadata(current.id)
|
||||
if err != nil {
|
||||
// Log or handle error, maybe continue
|
||||
continue
|
||||
}
|
||||
if meta == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Role-based filtering
|
||||
if exposure, ok := meta.RoleExposure[role]; !ok || !exposure {
|
||||
// If role not in map or is false, skip
|
||||
// (Unless it's a global admin role, etc. - logic can be added here)
|
||||
}
|
||||
|
||||
// --- Scoring ---
|
||||
// simScore := cosineSimilarity(queryVec, embed(meta.Statement))
|
||||
simScore := 0.5 // Placeholder value
|
||||
distScore := float64(maxHops-current.depth) / float64(maxHops)
|
||||
constraintPenalty := 0.0 // Placeholder
|
||||
|
||||
totalScore := (0.6*simScore) + (0.3*distScore) + constraintPenalty
|
||||
meta.Score = totalScore
|
||||
|
||||
heap.Push(&pq, &scoredItem{summary: *meta, priority: totalScore})
|
||||
|
||||
// --- Traverse Ancestors ---
|
||||
ancestors, err := store.GetAncestors(current.id)
|
||||
if err != nil {
|
||||
// Log or handle error
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ancID := range ancestors {
|
||||
if !visited[ancID] {
|
||||
visited[ancID] = true
|
||||
queue = append(queue, struct {
|
||||
id string
|
||||
depth int
|
||||
}{ancID, current.depth + 1})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract top K results from the priority queue
|
||||
var results []models.DecisionRecordSummary
|
||||
for i := 0; i < topK && pq.Len() > 0; i++ {
|
||||
item := heap.Pop(&pq).(*scoredItem)
|
||||
results = append(results, item.summary)
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
8
src/go.mod
Normal file
8
src/go.mod
Normal file
@@ -0,0 +1,8 @@
|
||||
module gitea.deepblack.cloud/chorus/bubble
|
||||
|
||||
go 1.24.5
|
||||
|
||||
require (
|
||||
github.com/mattn/go-sqlite3 v1.14.31 // indirect
|
||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect
|
||||
)
|
||||
4
src/go.sum
Normal file
4
src/go.sum
Normal file
@@ -0,0 +1,4 @@
|
||||
github.com/mattn/go-sqlite3 v1.14.31 h1:ldt6ghyPJsokUIlksH63gWZkG6qVGeEAu4zLeS4aVZM=
|
||||
github.com/mattn/go-sqlite3 v1.14.31/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok=
|
||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8=
|
||||
7
src/integrations/17-peer-sync.json
Normal file
7
src/integrations/17-peer-sync.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"id": "dr:sha256:new1",
|
||||
"edges": [
|
||||
{"relation": "influenced_by", "source": "dr:sha256:old1"}
|
||||
],
|
||||
"metadata": {...}
|
||||
}
|
||||
22
src/integrations/27-vector-search-integration.py
Normal file
22
src/integrations/27-vector-search-integration.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from sentence_transformers import SentenceTransformer
|
||||
import faiss
|
||||
import numpy as np
|
||||
|
||||
# Load your embedding model once
|
||||
embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
|
||||
|
||||
# Load FAISS index from disk (or create it on startup)
|
||||
faiss_index = faiss.read_index("dr_faiss.index")
|
||||
|
||||
# Map of vector index to DR IDs
|
||||
dr_id_map = {} # Load from persistent storage
|
||||
|
||||
def vector_search(query: str, top_k=3) -> list:
|
||||
vec = embedding_model.encode([query], convert_to_numpy=True)
|
||||
D, I = faiss_index.search(vec, top_k)
|
||||
results = []
|
||||
for dist, idx in zip(D[0], I[0]):
|
||||
dr_id = dr_id_map.get(idx)
|
||||
if dr_id:
|
||||
results.append({"id": dr_id, "distance": dist})
|
||||
return results
|
||||
16
src/integrations/28-decryption-stub.py
Normal file
16
src/integrations/28-decryption-stub.py
Normal file
@@ -0,0 +1,16 @@
|
||||
def fetch_and_decrypt_dr(dr_id: str) -> dict:
|
||||
# This should pull the encrypted markdown+metadata envelope from the DHT (UCXL address)
|
||||
# Then decrypt with your key management system based on role/ACL
|
||||
# Return the decrypted DR content as dict
|
||||
|
||||
# Placeholder:
|
||||
encrypted_blob = dht_get(dr_id) # Implement your DHT fetch
|
||||
if not encrypted_blob:
|
||||
return {}
|
||||
|
||||
try:
|
||||
decrypted = decrypt_blob(encrypted_blob, role=...) # Your crypto module
|
||||
return json.loads(decrypted)
|
||||
except Exception as e:
|
||||
# Log error
|
||||
return {}
|
||||
47
src/integrations/29-llm-prompt-template.py
Normal file
47
src/integrations/29-llm-prompt-template.py
Normal file
@@ -0,0 +1,47 @@
|
||||
def build_prompt_for_actions(start_statement, timeline, constraints, role):
|
||||
prompt = f"""
|
||||
You are an expert {role} advisor in a software project.
|
||||
|
||||
Given the starting decision statement:
|
||||
"""{start_statement}"""
|
||||
|
||||
And the following timeline of relevant prior decisions (summarized):
|
||||
|
||||
{chr(10).join(['- ' + t['statement'] for t in timeline])}
|
||||
|
||||
Current known constraints and policies:
|
||||
|
||||
{chr(10).join(['- ' + c for c in constraints])}
|
||||
|
||||
Provide a **structured** list of next recommended actions, including:
|
||||
|
||||
- Type (e.g. experiment, research, escalate)
|
||||
- Description
|
||||
- Assignee (role or team)
|
||||
- Confidence (0.0 - 1.0)
|
||||
|
||||
Respond ONLY in JSON format as a list of objects with those fields. Do not include any additional commentary.
|
||||
|
||||
Example:
|
||||
[
|
||||
{{"type": "experiment", "desc": "Run compatibility test with lib X", "assignee": "engineers", "confidence": 0.75}},
|
||||
{{"type": "escalate", "desc": "Review licensing risk", "assignee": "legal", "confidence": 0.9}}
|
||||
]
|
||||
|
||||
Begin now.
|
||||
"""
|
||||
return prompt
|
||||
|
||||
|
||||
def llm_call(model_name: str, prompt: str, max_tokens=400) -> list:
|
||||
# Wrap your favorite LLM call here, e.g. OpenAI, Ollama, Claude
|
||||
# Return parsed JSON list or empty list on failure
|
||||
|
||||
raw_response = call_llm_api(model_name, prompt, max_tokens=max_tokens)
|
||||
|
||||
try:
|
||||
parsed = json.loads(raw_response)
|
||||
return parsed
|
||||
except Exception:
|
||||
# Log parse failure, return empty
|
||||
return []
|
||||
44
src/integrations/32-n8n-webhook-send.py
Normal file
44
src/integrations/32-n8n-webhook-send.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import requests
|
||||
import json
|
||||
from typing import List, Dict, Any
|
||||
|
||||
def send_to_n8n_webhook(
|
||||
webhook_url: str,
|
||||
role: str,
|
||||
start_statement: str,
|
||||
timeline: List[str],
|
||||
constraints: List[str],
|
||||
timeout: int = 10,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Sends action suggestion request to n8n webhook.
|
||||
|
||||
Args:
|
||||
webhook_url: Full URL to your n8n webhook endpoint.
|
||||
role: Role requesting advice (e.g. 'engineer', 'pm').
|
||||
start_statement: Main decision statement string.
|
||||
timeline: List of up to 5 concise prior decision summaries.
|
||||
constraints: List of active constraints/policies strings.
|
||||
timeout: HTTP timeout in seconds.
|
||||
|
||||
Returns:
|
||||
Parsed JSON response from n8n webhook.
|
||||
|
||||
Raises:
|
||||
requests.RequestException for HTTP/network errors.
|
||||
json.JSONDecodeError if response is not valid JSON.
|
||||
ValueError if response fails schema validation.
|
||||
"""
|
||||
payload = {
|
||||
"role": role,
|
||||
"start_statement": start_statement,
|
||||
"timeline": timeline[:5], # cap to 5
|
||||
"constraints": constraints,
|
||||
}
|
||||
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
response = requests.post(webhook_url, json=payload, headers=headers, timeout=timeout)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
32
src/integrations/33-flask-handler.py
Normal file
32
src/integrations/33-flask-handler.py
Normal file
@@ -0,0 +1,32 @@
|
||||
from flask import Flask, request, jsonify
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route("/n8n-action-suggester", methods=["POST"])
|
||||
def n8n_action_suggester():
|
||||
data = request.get_json(force=True)
|
||||
|
||||
role = data.get("role")
|
||||
start_statement = data.get("start_statement")
|
||||
timeline = data.get("timeline", [])
|
||||
constraints = data.get("constraints", [])
|
||||
|
||||
# Here you’d run your prompt builder + LLM call logic
|
||||
# For demo, return a static sample
|
||||
|
||||
response = [
|
||||
{
|
||||
"type": "experiment",
|
||||
"description": "Run compatibility tests for Library X",
|
||||
"assignee": "engineering",
|
||||
"confidence": 0.85,
|
||||
},
|
||||
{
|
||||
"type": "escalate",
|
||||
"description": "Review licensing risk with legal team",
|
||||
"assignee": "legal",
|
||||
"confidence": 0.9,
|
||||
},
|
||||
]
|
||||
|
||||
return jsonify(response)
|
||||
76
src/integrations/34-n8n-workflow.json
Normal file
76
src/integrations/34-n8n-workflow.json
Normal file
@@ -0,0 +1,76 @@
|
||||
{
|
||||
"name": "Action Suggester Workflow",
|
||||
"nodes": [
|
||||
{
|
||||
"parameters": {},
|
||||
"id": "1",
|
||||
"name": "Webhook Trigger",
|
||||
"type": "n8n-nodes-base.webhook",
|
||||
"typeVersion": 1,
|
||||
"position": [250, 300],
|
||||
"webhookId": "your-webhook-id",
|
||||
"path": "action-suggester"
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"requestMethod": "POST",
|
||||
"url": "http://localhost:5000/n8n-action-suggester",
|
||||
"jsonParameters": true,
|
||||
"options": {},
|
||||
"bodyParametersJson": "={
|
||||
\"role\": $json[\"role\"],
|
||||
\"start_statement\": $json[\"start_statement\"],
|
||||
\"timeline\": $json[\"timeline\"].slice(0,5),
|
||||
\"constraints\": $json[\"constraints\"]
|
||||
}"
|
||||
},
|
||||
"id": "2",
|
||||
"name": "Call Action Suggester",
|
||||
"type": "n8n-nodes-base.httpRequest",
|
||||
"typeVersion": 1,
|
||||
"position": [550, 300]
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"values": {
|
||||
"string": [
|
||||
{
|
||||
"name": "suggested_actions",
|
||||
"value": "={{JSON.stringify($node[\"Call Action Suggester\"].json)}}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"options": {}
|
||||
},
|
||||
"id": "3",
|
||||
"name": "Set Output",
|
||||
"type": "n8n-nodes-base.set",
|
||||
"typeVersion": 1,
|
||||
"position": [750, 300]
|
||||
}
|
||||
],
|
||||
"connections": {
|
||||
"Webhook Trigger": {
|
||||
"main": [
|
||||
[
|
||||
{
|
||||
"node": "Call Action Suggester",
|
||||
"type": "main",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
]
|
||||
},
|
||||
"Call Action Suggester": {
|
||||
"main": [
|
||||
[
|
||||
{
|
||||
"node": "Set Output",
|
||||
"type": "main",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
32
src/main.go
Normal file
32
src/main.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"gitea.deepblack.cloud/chorus/bubble/api"
|
||||
"gitea.deepblack.cloud/chorus/bubble/storage"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// --- Storage Initialization ---
|
||||
dbPath := "./bubble_rocksdb"
|
||||
|
||||
// Initialize the RocksDB store.
|
||||
store, err := storage.NewRocksDBStore(dbPath)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to initialize rocksdb store: %v", err)
|
||||
}
|
||||
// defer store.Close() // Close the DB when the application exits
|
||||
|
||||
fmt.Println("RocksDB store initialized successfully.")
|
||||
|
||||
// --- API Server Initialization ---
|
||||
server := api.NewServer(store)
|
||||
|
||||
// Start the server.
|
||||
port := "8080"
|
||||
fmt.Printf("Starting BUBBLE Decision Agent on port %s...\n", port)
|
||||
if err := server.Start(":" + port); err != nil {
|
||||
log.Fatalf("Failed to start server: %v", err)
|
||||
}
|
||||
}
|
||||
64
src/models/models.go
Normal file
64
src/models/models.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package models
|
||||
|
||||
// DecisionBundleRequest defines the structure for a request to the /decision/bundle endpoint.
|
||||
type DecisionBundleRequest struct {
|
||||
StartID string `json:"start_id,omitempty"`
|
||||
Query string `json:"query,omitempty"`
|
||||
Role string `json:"role"`
|
||||
MaxHops int `json:"max_hops,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
IncludeFullDR bool `json:"include_full_dr,omitempty"`
|
||||
Redaction bool `json:"redaction,omitempty"`
|
||||
}
|
||||
|
||||
// DecisionRecordSummary represents a concise summary of a Decision Record.
|
||||
type DecisionRecordSummary struct {
|
||||
ID string `json:"id"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
Type string `json:"type"`
|
||||
Statement string `json:"statement"`
|
||||
Rationale string `json:"rationale,omitempty"`
|
||||
LifecycleState string `json:"lifecycle_state"`
|
||||
RelationToStart string `json:"relation_to_start,omitempty"`
|
||||
Score float64 `json:"score,omitempty"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
RoleExposure map[string]bool `json:"role_exposure,omitempty"`
|
||||
EvidenceRefs []string `json:"evidence_refs,omitempty"`
|
||||
FullPayloadRef string `json:"full_payload_ref,omitempty"`
|
||||
}
|
||||
|
||||
// GoalAlignment indicates how a decision aligns with project goals.
|
||||
type GoalAlignment struct {
|
||||
Score float64 `json:"score"`
|
||||
Reasons []string `json:"reasons,omitempty"`
|
||||
}
|
||||
|
||||
// SuggestedAction defines a recommended next step.
|
||||
type SuggestedAction struct {
|
||||
Type string `json:"type"`
|
||||
Description string `json:"description"`
|
||||
Assignee string `json:"assignee"`
|
||||
Confidence float64 `json:"confidence"`
|
||||
}
|
||||
|
||||
// Escalation indicates if a decision requires human attention.
|
||||
type Escalation struct {
|
||||
Required bool `json:"required"`
|
||||
Who []string `json:"who,omitempty"`
|
||||
}
|
||||
|
||||
// DecisionBundleResponse is the structure of the dossier returned by the /decision/bundle endpoint.
|
||||
type DecisionBundleResponse struct {
|
||||
BundleID string `json:"bundle_id"`
|
||||
StartID string `json:"start_id"`
|
||||
GeneratedAt string `json:"generated_at"`
|
||||
Summary string `json:"summary"`
|
||||
Timeline []DecisionRecordSummary `json:"timeline"`
|
||||
ConstraintsSummary []string `json:"constraints_summary"`
|
||||
KeyEvidenceRefs []string `json:"key_evidence_refs"`
|
||||
GoalAlignment GoalAlignment `json:"goal_alignment"`
|
||||
SuggestedActions []SuggestedAction `json:"suggested_actions"`
|
||||
Escalation Escalation `json:"escalation"`
|
||||
Signatures []string `json:"signatures,omitempty"`
|
||||
CacheHit bool `json:"cache_hit"`
|
||||
}
|
||||
42
src/prompts/31-llm-prompt-template.txt
Normal file
42
src/prompts/31-llm-prompt-template.txt
Normal file
@@ -0,0 +1,42 @@
|
||||
You are an expert {role} advisor in a software project.
|
||||
|
||||
Given the starting decision statement:
|
||||
"""
|
||||
{start_statement}
|
||||
"""
|
||||
|
||||
And the following recent timeline of relevant prior decisions (each a concise summary):
|
||||
{timeline}
|
||||
|
||||
Current known constraints and policies affecting this decision:
|
||||
{constraints}
|
||||
|
||||
Your task:
|
||||
Provide a structured list of **recommended next actions** to move the project forward, considering constraints and project goals.
|
||||
|
||||
Output **only JSON** as a list of objects, each with:
|
||||
- "type": one of ["experiment", "research", "escalate", "review", "defer"]
|
||||
- "description": brief action description
|
||||
- "assignee": role or team responsible
|
||||
- "confidence": decimal from 0.0 to 1.0 indicating your confidence
|
||||
|
||||
Example output:
|
||||
[
|
||||
{{
|
||||
"type": "experiment",
|
||||
"description": "Run compatibility tests for Library X",
|
||||
"assignee": "engineering",
|
||||
"confidence": 0.85
|
||||
}},
|
||||
{{
|
||||
"type": "escalate",
|
||||
"description": "Review licensing risk with legal team",
|
||||
"assignee": "legal",
|
||||
"confidence": 0.9
|
||||
}}
|
||||
]
|
||||
|
||||
Do not include any explanation, commentary, or additional text.
|
||||
If unsure, provide lower confidence scores rather than fabricate details.
|
||||
|
||||
Begin output now:
|
||||
72
src/seed.go
Normal file
72
src/seed.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"log"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
func main() {
|
||||
db, err := sql.Open("sqlite3", "./bubble.db")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Sample Decisions
|
||||
decisions := []struct {
|
||||
ID string
|
||||
Statement string
|
||||
LifecycleState string
|
||||
RoleExposure string
|
||||
Tags string
|
||||
Timestamp string
|
||||
}{
|
||||
{"dr:1", "Adopt Go for new microservices", "active", `{"engineer": true, "pm": true}`, `["language", "backend"]`, "2025-08-12T10:00:00Z"},
|
||||
{"dr:2", "Use FastAPI for Python services", "superseded", `{"engineer": true}`, `["python", "api"]`, "2025-08-10T11:00:00Z"},
|
||||
{"dr:3", "Evaluate RocksDB for storage", "active", `{"engineer": true, "research": true}`, `["database", "storage"]`, "2025-08-11T15:00:00Z"},
|
||||
{"dr:4", "Decision to use Go was influenced by performance needs", "active", `{"pm": true}`, `["performance"]`, "2025-08-12T09:00:00Z"},
|
||||
}
|
||||
|
||||
// Sample Edges (Provenance)
|
||||
// dr:4 -> dr:1 (dr:4 influenced dr:1)
|
||||
// dr:2 -> dr:1 (dr:2 was superseded by dr:1)
|
||||
edges := []struct {
|
||||
SourceID string
|
||||
TargetID string
|
||||
Relation string
|
||||
}{
|
||||
{"dr:4", "dr:1", "influences"},
|
||||
{"dr:2", "dr:1", "supersedes"},
|
||||
{"dr:3", "dr:4", "influences"},
|
||||
}
|
||||
|
||||
log.Println("Seeding database...")
|
||||
|
||||
// Insert Decisions
|
||||
for _, d := range decisions {
|
||||
_, err := db.Exec(`
|
||||
INSERT INTO decisions (id, statement, lifecycle_state, role_exposure, tags, timestamp)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(id) DO NOTHING;
|
||||
`, d.ID, d.Statement, d.LifecycleState, d.RoleExposure, d.Tags, d.Timestamp)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to insert decision %s: %v", d.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Insert Edges
|
||||
for _, e := range edges {
|
||||
_, err := db.Exec(`
|
||||
INSERT INTO edges (source_id, target_id, relation)
|
||||
VALUES (?, ?, ?)
|
||||
ON CONFLICT(source_id, target_id) DO NOTHING;
|
||||
`, e.SourceID, e.TargetID, e.Relation)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to insert edge %s -> %s: %v", e.SourceID, e.TargetID, err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Println("Database seeded successfully.")
|
||||
}
|
||||
7
src/storage/04-forward-index.jsonc
Normal file
7
src/storage/04-forward-index.jsonc
Normal file
@@ -0,0 +1,7 @@
|
||||
// forward_index.json
|
||||
{
|
||||
"dr:sha256:prev1": [
|
||||
{ "relation": "influenced", "target": "dr:sha256:curr1" },
|
||||
{ "relation": "superseded", "target": "dr:sha256:curr2" }
|
||||
]
|
||||
}
|
||||
7
src/storage/05-reverse-index.jsonc
Normal file
7
src/storage/05-reverse-index.jsonc
Normal file
@@ -0,0 +1,7 @@
|
||||
// reverse_index.json
|
||||
{
|
||||
"dr:sha256:curr1": [
|
||||
{ "relation": "influenced_by", "source": "dr:sha256:prev1" },
|
||||
{ "relation": "derived_from", "source": "dr:sha256:prev2" }
|
||||
]
|
||||
}
|
||||
7
src/storage/07-metadata-cache.jsonc
Normal file
7
src/storage/07-metadata-cache.jsonc
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"id": "dr:sha256:prev1",
|
||||
"statement": "We rejected library Z due to licensing issues",
|
||||
"timestamp": "2025-06-10T09:15:00Z",
|
||||
"lifecycle_state": "rejected",
|
||||
"role_exposure": {"engineer": true, "pm": true, "research": false}
|
||||
}
|
||||
7
src/storage/16-metadata-cache-layout.jsonc
Normal file
7
src/storage/16-metadata-cache-layout.jsonc
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"statement": "...",
|
||||
"timestamp": "...",
|
||||
"lifecycle_state": "rejected",
|
||||
"role_exposure": {"engineer": true, "pm": true},
|
||||
"tags": ["cloud", "licensing"]
|
||||
}
|
||||
21
src/storage/18-rocksdb-sqlite-schema.sql
Normal file
21
src/storage/18-rocksdb-sqlite-schema.sql
Normal file
@@ -0,0 +1,21 @@
|
||||
CREATE TABLE decisions (
|
||||
id TEXT PRIMARY KEY,
|
||||
statement TEXT,
|
||||
lifecycle_state TEXT,
|
||||
role_exposure TEXT, -- JSON
|
||||
tags TEXT, -- JSON array
|
||||
timestamp DATETIME
|
||||
);
|
||||
|
||||
CREATE TABLE edges (
|
||||
source_id TEXT,
|
||||
target_id TEXT,
|
||||
relation TEXT,
|
||||
PRIMARY KEY (source_id, target_id)
|
||||
);
|
||||
|
||||
CREATE TABLE constraints (
|
||||
id TEXT PRIMARY KEY,
|
||||
scope TEXT, -- "global" or "role:<role>"
|
||||
description TEXT
|
||||
);
|
||||
58
src/storage/19-write-sync-pseudocode.py
Normal file
58
src/storage/19-write-sync-pseudocode.py
Normal file
@@ -0,0 +1,58 @@
|
||||
def store_decision(dr_id, metadata, ancestors, descendants):
|
||||
# RocksDB writes
|
||||
rocks.put(f"meta:{dr_id}", serialize(metadata))
|
||||
|
||||
for anc in ancestors:
|
||||
rocks.append_list(f"rev:{dr_id}", anc)
|
||||
rocks.append_list(f"fwd:{anc}", dr_id)
|
||||
|
||||
# WAL append for sync
|
||||
wal.write({
|
||||
"type": "decision",
|
||||
"id": dr_id,
|
||||
"metadata": metadata,
|
||||
"ancestors": ancestors,
|
||||
"descendants": descendants
|
||||
})
|
||||
|
||||
|
||||
def sync_to_sqlite():
|
||||
while True:
|
||||
batch = wal.read_batch(limit=100)
|
||||
if not batch:
|
||||
break
|
||||
with sqlite_conn:
|
||||
for entry in batch:
|
||||
if entry["type"] == "decision":
|
||||
# Upsert into decisions table
|
||||
sqlite_conn.execute("""
|
||||
INSERT INTO decisions (id, statement, lifecycle_state, role_exposure, tags, timestamp)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(id) DO UPDATE SET
|
||||
statement=excluded.statement,
|
||||
lifecycle_state=excluded.lifecycle_state,
|
||||
role_exposure=excluded.role_exposure,
|
||||
tags=excluded.tags,
|
||||
timestamp=excluded.timestamp
|
||||
""", (
|
||||
entry["id"],
|
||||
entry["metadata"]["statement"],
|
||||
entry["metadata"]["lifecycle_state"],
|
||||
json.dumps(entry["metadata"]["role_exposure"]),
|
||||
json.dumps(entry["metadata"]["tags"]),
|
||||
entry["metadata"]["timestamp"]
|
||||
))
|
||||
|
||||
# Edges
|
||||
for anc in entry["ancestors"]:
|
||||
sqlite_conn.execute("""
|
||||
INSERT OR IGNORE INTO edges (source_id, target_id, relation)
|
||||
VALUES (?, ?, ?)
|
||||
""", (anc, entry["id"], "influences"))
|
||||
|
||||
for desc in entry["descendants"]:
|
||||
sqlite_conn.execute("""
|
||||
INSERT OR IGNORE INTO edges (source_id, target_id, relation)
|
||||
VALUES (?, ?, ?)
|
||||
""", (entry["id"], desc, "influences"))
|
||||
wal.mark_batch_complete(batch)
|
||||
78
src/storage/rocksdb.go
Normal file
78
src/storage/rocksdb.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"gitea.deepblack.cloud/chorus/bubble/models"
|
||||
"github.com/tecbot/gorocksdb"
|
||||
)
|
||||
|
||||
// RocksDBStore is an implementation of the Storage interface using RocksDB.
|
||||
type RocksDBStore struct {
|
||||
DB *gorocksdb.DB
|
||||
}
|
||||
|
||||
// NewRocksDBStore creates and initializes a new RocksDB database.
|
||||
func NewRocksDBStore(dbPath string) (*RocksDBStore, error) {
|
||||
opts := gorocksdb.NewDefaultOptions()
|
||||
opts.SetCreateIfMissing(true)
|
||||
db, err := gorocksdb.OpenDb(opts, dbPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &RocksDBStore{DB: db}, nil
|
||||
}
|
||||
|
||||
// GetDecisionMetadata retrieves a decision's metadata from RocksDB.
|
||||
func (r *RocksDBStore) GetDecisionMetadata(drID string) (*models.DecisionRecordSummary, error) {
|
||||
ro := gorocksdb.NewDefaultReadOptions()
|
||||
// Keys are stored as "meta:<id>"
|
||||
key := []byte("meta:" + drID)
|
||||
|
||||
slice, err := r.DB.Get(ro, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer slice.Free()
|
||||
|
||||
if !slice.Exists() {
|
||||
return nil, nil // Not found
|
||||
}
|
||||
|
||||
var summary models.DecisionRecordSummary
|
||||
if err := json.Unmarshal(slice.Data(), &summary); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &summary, nil
|
||||
}
|
||||
|
||||
// GetAncestors retrieves a decision's ancestor IDs from RocksDB.
|
||||
func (r *RocksDBStore) GetAncestors(drID string) ([]string, error) {
|
||||
ro := gorocksdb.NewDefaultReadOptions()
|
||||
// Keys are stored as "rev:<id>"
|
||||
key := []byte("rev:" + drID)
|
||||
|
||||
slice, err := r.DB.Get(ro, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer slice.Free()
|
||||
|
||||
if !slice.Exists() {
|
||||
return nil, nil // Not found, no ancestors
|
||||
}
|
||||
|
||||
var ancestorIDs []string
|
||||
if err := json.Unmarshal(slice.Data(), &ancestorIDs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ancestorIDs, nil
|
||||
}
|
||||
|
||||
// Close closes the RocksDB database connection.
|
||||
func (r *RocksDBStore) Close() {
|
||||
if r.DB != nil {
|
||||
r.DB.Close()
|
||||
}
|
||||
}
|
||||
90
src/storage/sqlite.go
Normal file
90
src/storage/sqlite.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"gitea.deepblack.cloud/chorus/bubble/models"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// SQLiteStore is an implementation of the Storage interface using SQLite.
|
||||
type SQLiteStore struct {
|
||||
DB *sql.DB
|
||||
}
|
||||
|
||||
// NewSQLiteStore connects to the SQLite database and returns a new SQLiteStore.
|
||||
func NewSQLiteStore(dbPath string) (*SQLiteStore, error) {
|
||||
db, err := sql.Open("sqlite3", dbPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = db.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SQLiteStore{DB: db}, nil
|
||||
}
|
||||
|
||||
// Setup reads the schema file and executes it to create the database tables.
|
||||
func (s *SQLiteStore) Setup(schemaPath string) error {
|
||||
schema, err := ioutil.ReadFile(filepath.Clean(schemaPath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.DB.Exec(string(schema))
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDecisionMetadata retrieves a single decision record's metadata from the database.
|
||||
func (s *SQLiteStore) GetDecisionMetadata(drID string) (*models.DecisionRecordSummary, error) {
|
||||
row := s.DB.QueryRow("SELECT id, statement, lifecycle_state, role_exposure, tags, timestamp FROM decisions WHERE id = ?", drID)
|
||||
|
||||
var summary models.DecisionRecordSummary
|
||||
var roleExposureJSON, tagsJSON string
|
||||
|
||||
err := row.Scan(
|
||||
&summary.ID,
|
||||
&summary.Statement,
|
||||
&summary.LifecycleState,
|
||||
&roleExposureJSON,
|
||||
&tagsJSON,
|
||||
&summary.Timestamp,
|
||||
)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil // Return nil, nil if not found
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unmarshal JSON fields
|
||||
if err := json.Unmarshal([]byte(roleExposureJSON), &summary.RoleExposure); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.Unmarshal([]byte(tagsJSON), &summary.Tags); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &summary, nil
|
||||
}
|
||||
|
||||
// GetAncestors retrieves the IDs of all direct ancestors for a given decision record.
|
||||
func (s *SQLiteStore) GetAncestors(drID string) ([]string, error) {
|
||||
rows, err := s.DB.Query("SELECT source_id FROM edges WHERE target_id = ?", drID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var ancestorIDs []string
|
||||
for rows.Next() {
|
||||
var id string
|
||||
if err := rows.Scan(&id); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ancestorIDs = append(ancestorIDs, id)
|
||||
}
|
||||
|
||||
return ancestorIDs, nil
|
||||
}
|
||||
33
src/storage/storage.go
Normal file
33
src/storage/storage.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package storage
|
||||
|
||||
import "gitea.deepblack.cloud/chorus/bubble/models"
|
||||
|
||||
// Storage defines the interface for accessing the decision provenance data.
|
||||
// This allows for swapping the underlying database implementation.
|
||||
type Storage interface {
|
||||
GetDecisionMetadata(drID string) (*models.DecisionRecordSummary, error)
|
||||
GetAncestors(drID string) ([]string, error)
|
||||
// Add more methods as needed, e.g., for writing, caching, etc.
|
||||
}
|
||||
|
||||
// RocksDBStore is an implementation of the Storage interface using RocksDB.
|
||||
type RocksDBStore struct {
|
||||
// DB *gorocksdb.DB // Placeholder for the actual RocksDB client
|
||||
}
|
||||
|
||||
// NewRocksDBStore creates a new RocksDBStore.
|
||||
func NewRocksDBStore() (*RocksDBStore, error) {
|
||||
// Placeholder for RocksDB initialization logic
|
||||
return &RocksDBStore{}, nil
|
||||
}
|
||||
|
||||
func (r *RocksDBStore) GetDecisionMetadata(drID string) (*models.DecisionRecordSummary, error) {
|
||||
// Placeholder: Implement logic to fetch metadata from RocksDB.
|
||||
// This will involve deserializing the data into the models.DecisionRecordSummary struct.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *RocksDBStore) GetAncestors(drID string) ([]string, error) {
|
||||
// Placeholder: Implement logic to fetch ancestor IDs from the reverse index in RocksDB.
|
||||
return nil, nil
|
||||
}
|
||||
39
src/tests/30-unit-tests.py
Normal file
39
src/tests/30-unit-tests.py
Normal file
@@ -0,0 +1,39 @@
|
||||
from fastapi.testclient import TestClient
|
||||
import pytest
|
||||
|
||||
client = TestClient(app)
|
||||
|
||||
def test_bundle_requires_start_id_or_query():
|
||||
response = client.post("/decision/bundle", json={"role": "engineer"})
|
||||
assert response.status_code == 400
|
||||
|
||||
def test_bundle_with_fake_start_id():
|
||||
req = {
|
||||
"start_id": "dr:nonexistent",
|
||||
"role": "engineer",
|
||||
"max_hops": 1,
|
||||
"top_k": 1,
|
||||
"include_full_dr": False,
|
||||
"redaction": True,
|
||||
}
|
||||
response = client.post("/decision/bundle", json=req)
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["start_id"] == "dr:nonexistent"
|
||||
assert "timeline" in data
|
||||
|
||||
def test_bundle_cache_hit():
|
||||
req = {
|
||||
"start_id": "dr:nonexistent",
|
||||
"role": "engineer",
|
||||
"max_hops": 1,
|
||||
"top_k": 1,
|
||||
"include_full_dr": False,
|
||||
"redaction": True,
|
||||
}
|
||||
# First call to populate cache
|
||||
client.post("/decision/bundle", json=req)
|
||||
# Second call to hit cache
|
||||
response = client.post("/decision/bundle", json=req)
|
||||
data = response.json()
|
||||
assert data.get("cache_hit") is True
|
||||
Reference in New Issue
Block a user