Integrate BACKBEAT SDK and resolve KACHING license validation

Major integrations and fixes:
- Added BACKBEAT SDK integration for P2P operation timing
- Implemented beat-aware status tracking for distributed operations
- Added Docker secrets support for secure license management
- Resolved KACHING license validation via HTTPS/TLS
- Updated docker-compose configuration for clean stack deployment
- Disabled rollback policies to prevent deployment failures
- Added license credential storage (CHORUS-DEV-MULTI-001)

Technical improvements:
- BACKBEAT P2P operation tracking with phase management
- Enhanced configuration system with file-based secrets
- Improved error handling for license validation
- Clean separation of KACHING and CHORUS deployment stacks

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
anthonyrawlins
2025-09-06 07:56:26 +10:00
parent 543ab216f9
commit 9bdcbe0447
4730 changed files with 1480093 additions and 1916 deletions

108
vendor/go.etcd.io/bbolt/internal/freelist/array.go generated vendored Normal file
View File

@@ -0,0 +1,108 @@
package freelist
import (
"fmt"
"sort"
"go.etcd.io/bbolt/internal/common"
)
type array struct {
*shared
ids []common.Pgid // all free and available free page ids.
}
func (f *array) Init(ids common.Pgids) {
f.ids = ids
f.reindex()
}
func (f *array) Allocate(txid common.Txid, n int) common.Pgid {
if len(f.ids) == 0 {
return 0
}
var initial, previd common.Pgid
for i, id := range f.ids {
if id <= 1 {
panic(fmt.Sprintf("invalid page allocation: %d", id))
}
// Reset initial page if this is not contiguous.
if previd == 0 || id-previd != 1 {
initial = id
}
// If we found a contiguous block then remove it and return it.
if (id-initial)+1 == common.Pgid(n) {
// If we're allocating off the beginning then take the fast path
// and just adjust the existing slice. This will use extra memory
// temporarily but the append() in free() will realloc the slice
// as is necessary.
if (i + 1) == n {
f.ids = f.ids[i+1:]
} else {
copy(f.ids[i-n+1:], f.ids[i+1:])
f.ids = f.ids[:len(f.ids)-n]
}
// Remove from the free cache.
for i := common.Pgid(0); i < common.Pgid(n); i++ {
delete(f.cache, initial+i)
}
f.allocs[initial] = txid
return initial
}
previd = id
}
return 0
}
func (f *array) FreeCount() int {
return len(f.ids)
}
func (f *array) freePageIds() common.Pgids {
return f.ids
}
func (f *array) mergeSpans(ids common.Pgids) {
sort.Sort(ids)
common.Verify(func() {
idsIdx := make(map[common.Pgid]struct{})
for _, id := range f.ids {
// The existing f.ids shouldn't have duplicated free ID.
if _, ok := idsIdx[id]; ok {
panic(fmt.Sprintf("detected duplicated free page ID: %d in existing f.ids: %v", id, f.ids))
}
idsIdx[id] = struct{}{}
}
prev := common.Pgid(0)
for _, id := range ids {
// The ids shouldn't have duplicated free ID. Note page 0 and 1
// are reserved for meta pages, so they can never be free page IDs.
if prev == id {
panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids))
}
prev = id
// The ids shouldn't have any overlap with the existing f.ids.
if _, ok := idsIdx[id]; ok {
panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.ids: %v", id, ids, f.ids))
}
}
})
f.ids = common.Pgids(f.ids).Merge(ids)
}
func NewArrayFreelist() Interface {
a := &array{
shared: newShared(),
ids: []common.Pgid{},
}
a.Interface = a
return a
}

82
vendor/go.etcd.io/bbolt/internal/freelist/freelist.go generated vendored Normal file
View File

@@ -0,0 +1,82 @@
package freelist
import (
"go.etcd.io/bbolt/internal/common"
)
type ReadWriter interface {
// Read calls Init with the page ids stored in the given page.
Read(page *common.Page)
// Write writes the freelist into the given page.
Write(page *common.Page)
// EstimatedWritePageSize returns the size in bytes of the freelist after serialization in Write.
// This should never underestimate the size.
EstimatedWritePageSize() int
}
type Interface interface {
ReadWriter
// Init initializes this freelist with the given list of pages.
Init(ids common.Pgids)
// Allocate tries to allocate the given number of contiguous pages
// from the free list pages. It returns the starting page ID if
// available; otherwise, it returns 0.
Allocate(txid common.Txid, numPages int) common.Pgid
// Count returns the number of free and pending pages.
Count() int
// FreeCount returns the number of free pages.
FreeCount() int
// PendingCount returns the number of pending pages.
PendingCount() int
// AddReadonlyTXID adds a given read-only transaction id for pending page tracking.
AddReadonlyTXID(txid common.Txid)
// RemoveReadonlyTXID removes a given read-only transaction id for pending page tracking.
RemoveReadonlyTXID(txid common.Txid)
// ReleasePendingPages releases any pages associated with closed read-only transactions.
ReleasePendingPages()
// Free releases a page and its overflow for a given transaction id.
// If the page is already free or is one of the meta pages, then a panic will occur.
Free(txId common.Txid, p *common.Page)
// Freed returns whether a given page is in the free list.
Freed(pgId common.Pgid) bool
// Rollback removes the pages from a given pending tx.
Rollback(txId common.Txid)
// Copyall copies a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst.
Copyall(dst []common.Pgid)
// Reload reads the freelist from a page and filters out pending items.
Reload(p *common.Page)
// NoSyncReload reads the freelist from Pgids and filters out pending items.
NoSyncReload(pgIds common.Pgids)
// freePageIds returns the IDs of all free pages. Returns an empty slice if no free pages are available.
freePageIds() common.Pgids
// pendingPageIds returns all pending pages by transaction id.
pendingPageIds() map[common.Txid]*txPending
// release moves all page ids for a transaction id (or older) to the freelist.
release(txId common.Txid)
// releaseRange moves pending pages allocated within an extent [begin,end] to the free list.
releaseRange(begin, end common.Txid)
// mergeSpans is merging the given pages into the freelist
mergeSpans(ids common.Pgids)
}

292
vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go generated vendored Normal file
View File

@@ -0,0 +1,292 @@
package freelist
import (
"fmt"
"reflect"
"sort"
"go.etcd.io/bbolt/internal/common"
)
// pidSet holds the set of starting pgids which have the same span size
type pidSet map[common.Pgid]struct{}
type hashMap struct {
*shared
freePagesCount uint64 // count of free pages(hashmap version)
freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size
forwardMap map[common.Pgid]uint64 // key is start pgid, value is its span size
backwardMap map[common.Pgid]uint64 // key is end pgid, value is its span size
}
func (f *hashMap) Init(pgids common.Pgids) {
// reset the counter when freelist init
f.freePagesCount = 0
f.freemaps = make(map[uint64]pidSet)
f.forwardMap = make(map[common.Pgid]uint64)
f.backwardMap = make(map[common.Pgid]uint64)
if len(pgids) == 0 {
return
}
if !sort.SliceIsSorted([]common.Pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) {
panic("pgids not sorted")
}
size := uint64(1)
start := pgids[0]
for i := 1; i < len(pgids); i++ {
// continuous page
if pgids[i] == pgids[i-1]+1 {
size++
} else {
f.addSpan(start, size)
size = 1
start = pgids[i]
}
}
// init the tail
if size != 0 && start != 0 {
f.addSpan(start, size)
}
f.reindex()
}
func (f *hashMap) Allocate(txid common.Txid, n int) common.Pgid {
if n == 0 {
return 0
}
// if we have a exact size match just return short path
if bm, ok := f.freemaps[uint64(n)]; ok {
for pid := range bm {
// remove the span
f.delSpan(pid, uint64(n))
f.allocs[pid] = txid
for i := common.Pgid(0); i < common.Pgid(n); i++ {
delete(f.cache, pid+i)
}
return pid
}
}
// lookup the map to find larger span
for size, bm := range f.freemaps {
if size < uint64(n) {
continue
}
for pid := range bm {
// remove the initial
f.delSpan(pid, size)
f.allocs[pid] = txid
remain := size - uint64(n)
// add remain span
f.addSpan(pid+common.Pgid(n), remain)
for i := common.Pgid(0); i < common.Pgid(n); i++ {
delete(f.cache, pid+i)
}
return pid
}
}
return 0
}
func (f *hashMap) FreeCount() int {
common.Verify(func() {
expectedFreePageCount := f.hashmapFreeCountSlow()
common.Assert(int(f.freePagesCount) == expectedFreePageCount,
"freePagesCount (%d) is out of sync with free pages map (%d)", f.freePagesCount, expectedFreePageCount)
})
return int(f.freePagesCount)
}
func (f *hashMap) freePageIds() common.Pgids {
count := f.FreeCount()
if count == 0 {
return common.Pgids{}
}
m := make([]common.Pgid, 0, count)
startPageIds := make([]common.Pgid, 0, len(f.forwardMap))
for k := range f.forwardMap {
startPageIds = append(startPageIds, k)
}
sort.Sort(common.Pgids(startPageIds))
for _, start := range startPageIds {
if size, ok := f.forwardMap[start]; ok {
for i := 0; i < int(size); i++ {
m = append(m, start+common.Pgid(i))
}
}
}
return m
}
func (f *hashMap) hashmapFreeCountSlow() int {
count := 0
for _, size := range f.forwardMap {
count += int(size)
}
return count
}
func (f *hashMap) addSpan(start common.Pgid, size uint64) {
f.backwardMap[start-1+common.Pgid(size)] = size
f.forwardMap[start] = size
if _, ok := f.freemaps[size]; !ok {
f.freemaps[size] = make(map[common.Pgid]struct{})
}
f.freemaps[size][start] = struct{}{}
f.freePagesCount += size
}
func (f *hashMap) delSpan(start common.Pgid, size uint64) {
delete(f.forwardMap, start)
delete(f.backwardMap, start+common.Pgid(size-1))
delete(f.freemaps[size], start)
if len(f.freemaps[size]) == 0 {
delete(f.freemaps, size)
}
f.freePagesCount -= size
}
func (f *hashMap) mergeSpans(ids common.Pgids) {
common.Verify(func() {
ids1Freemap := f.idsFromFreemaps()
ids2Forward := f.idsFromForwardMap()
ids3Backward := f.idsFromBackwardMap()
if !reflect.DeepEqual(ids1Freemap, ids2Forward) {
panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.forwardMap: %v", f.freemaps, f.forwardMap))
}
if !reflect.DeepEqual(ids1Freemap, ids3Backward) {
panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.backwardMap: %v", f.freemaps, f.backwardMap))
}
sort.Sort(ids)
prev := common.Pgid(0)
for _, id := range ids {
// The ids shouldn't have duplicated free ID.
if prev == id {
panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids))
}
prev = id
// The ids shouldn't have any overlap with the existing f.freemaps.
if _, ok := ids1Freemap[id]; ok {
panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.freemaps: %v", id, ids, f.freemaps))
}
}
})
for _, id := range ids {
// try to see if we can merge and update
f.mergeWithExistingSpan(id)
}
}
// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward
func (f *hashMap) mergeWithExistingSpan(pid common.Pgid) {
prev := pid - 1
next := pid + 1
preSize, mergeWithPrev := f.backwardMap[prev]
nextSize, mergeWithNext := f.forwardMap[next]
newStart := pid
newSize := uint64(1)
if mergeWithPrev {
//merge with previous span
start := prev + 1 - common.Pgid(preSize)
f.delSpan(start, preSize)
newStart -= common.Pgid(preSize)
newSize += preSize
}
if mergeWithNext {
// merge with next span
f.delSpan(next, nextSize)
newSize += nextSize
}
f.addSpan(newStart, newSize)
}
// idsFromFreemaps get all free page IDs from f.freemaps.
// used by test only.
func (f *hashMap) idsFromFreemaps() map[common.Pgid]struct{} {
ids := make(map[common.Pgid]struct{})
for size, idSet := range f.freemaps {
for start := range idSet {
for i := 0; i < int(size); i++ {
id := start + common.Pgid(i)
if _, ok := ids[id]; ok {
panic(fmt.Sprintf("detected duplicated free page ID: %d in f.freemaps: %v", id, f.freemaps))
}
ids[id] = struct{}{}
}
}
}
return ids
}
// idsFromForwardMap get all free page IDs from f.forwardMap.
// used by test only.
func (f *hashMap) idsFromForwardMap() map[common.Pgid]struct{} {
ids := make(map[common.Pgid]struct{})
for start, size := range f.forwardMap {
for i := 0; i < int(size); i++ {
id := start + common.Pgid(i)
if _, ok := ids[id]; ok {
panic(fmt.Sprintf("detected duplicated free page ID: %d in f.forwardMap: %v", id, f.forwardMap))
}
ids[id] = struct{}{}
}
}
return ids
}
// idsFromBackwardMap get all free page IDs from f.backwardMap.
// used by test only.
func (f *hashMap) idsFromBackwardMap() map[common.Pgid]struct{} {
ids := make(map[common.Pgid]struct{})
for end, size := range f.backwardMap {
for i := 0; i < int(size); i++ {
id := end - common.Pgid(i)
if _, ok := ids[id]; ok {
panic(fmt.Sprintf("detected duplicated free page ID: %d in f.backwardMap: %v", id, f.backwardMap))
}
ids[id] = struct{}{}
}
}
return ids
}
func NewHashMapFreelist() Interface {
hm := &hashMap{
shared: newShared(),
freemaps: make(map[uint64]pidSet),
forwardMap: make(map[common.Pgid]uint64),
backwardMap: make(map[common.Pgid]uint64),
}
hm.Interface = hm
return hm
}

310
vendor/go.etcd.io/bbolt/internal/freelist/shared.go generated vendored Normal file
View File

@@ -0,0 +1,310 @@
package freelist
import (
"fmt"
"math"
"sort"
"unsafe"
"go.etcd.io/bbolt/internal/common"
)
type txPending struct {
ids []common.Pgid
alloctx []common.Txid // txids allocating the ids
lastReleaseBegin common.Txid // beginning txid of last matching releaseRange
}
type shared struct {
Interface
readonlyTXIDs []common.Txid // all readonly transaction IDs.
allocs map[common.Pgid]common.Txid // mapping of Txid that allocated a pgid.
cache map[common.Pgid]struct{} // fast lookup of all free and pending page ids.
pending map[common.Txid]*txPending // mapping of soon-to-be free page ids by tx.
}
func newShared() *shared {
return &shared{
pending: make(map[common.Txid]*txPending),
allocs: make(map[common.Pgid]common.Txid),
cache: make(map[common.Pgid]struct{}),
}
}
func (t *shared) pendingPageIds() map[common.Txid]*txPending {
return t.pending
}
func (t *shared) PendingCount() int {
var count int
for _, txp := range t.pending {
count += len(txp.ids)
}
return count
}
func (t *shared) Count() int {
return t.FreeCount() + t.PendingCount()
}
func (t *shared) Freed(pgId common.Pgid) bool {
_, ok := t.cache[pgId]
return ok
}
func (t *shared) Free(txid common.Txid, p *common.Page) {
if p.Id() <= 1 {
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.Id()))
}
// Free page and all its overflow pages.
txp := t.pending[txid]
if txp == nil {
txp = &txPending{}
t.pending[txid] = txp
}
allocTxid, ok := t.allocs[p.Id()]
common.Verify(func() {
if allocTxid == txid {
panic(fmt.Sprintf("free: freed page (%d) was allocated by the same transaction (%d)", p.Id(), txid))
}
})
if ok {
delete(t.allocs, p.Id())
}
for id := p.Id(); id <= p.Id()+common.Pgid(p.Overflow()); id++ {
// Verify that page is not already free.
if _, ok := t.cache[id]; ok {
panic(fmt.Sprintf("page %d already freed", id))
}
// Add to the freelist and cache.
txp.ids = append(txp.ids, id)
txp.alloctx = append(txp.alloctx, allocTxid)
t.cache[id] = struct{}{}
}
}
func (t *shared) Rollback(txid common.Txid) {
// Remove page ids from cache.
txp := t.pending[txid]
if txp == nil {
return
}
for i, pgid := range txp.ids {
delete(t.cache, pgid)
tx := txp.alloctx[i]
if tx == 0 {
continue
}
if tx != txid {
// Pending free aborted; restore page back to alloc list.
t.allocs[pgid] = tx
} else {
// A writing TXN should never free a page which was allocated by itself.
panic(fmt.Sprintf("rollback: freed page (%d) was allocated by the same transaction (%d)", pgid, txid))
}
}
// Remove pages from pending list and mark as free if allocated by txid.
delete(t.pending, txid)
// Remove pgids which are allocated by this txid
for pgid, tid := range t.allocs {
if tid == txid {
delete(t.allocs, pgid)
}
}
}
func (t *shared) AddReadonlyTXID(tid common.Txid) {
t.readonlyTXIDs = append(t.readonlyTXIDs, tid)
}
func (t *shared) RemoveReadonlyTXID(tid common.Txid) {
for i := range t.readonlyTXIDs {
if t.readonlyTXIDs[i] == tid {
last := len(t.readonlyTXIDs) - 1
t.readonlyTXIDs[i] = t.readonlyTXIDs[last]
t.readonlyTXIDs = t.readonlyTXIDs[:last]
break
}
}
}
type txIDx []common.Txid
func (t txIDx) Len() int { return len(t) }
func (t txIDx) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t txIDx) Less(i, j int) bool { return t[i] < t[j] }
func (t *shared) ReleasePendingPages() {
// Free all pending pages prior to the earliest open transaction.
sort.Sort(txIDx(t.readonlyTXIDs))
minid := common.Txid(math.MaxUint64)
if len(t.readonlyTXIDs) > 0 {
minid = t.readonlyTXIDs[0]
}
if minid > 0 {
t.release(minid - 1)
}
// Release unused txid extents.
for _, tid := range t.readonlyTXIDs {
t.releaseRange(minid, tid-1)
minid = tid + 1
}
t.releaseRange(minid, common.Txid(math.MaxUint64))
// Any page both allocated and freed in an extent is safe to release.
}
func (t *shared) release(txid common.Txid) {
m := make(common.Pgids, 0)
for tid, txp := range t.pending {
if tid <= txid {
// Move transaction's pending pages to the available freelist.
// Don't remove from the cache since the page is still free.
m = append(m, txp.ids...)
delete(t.pending, tid)
}
}
t.mergeSpans(m)
}
func (t *shared) releaseRange(begin, end common.Txid) {
if begin > end {
return
}
m := common.Pgids{}
for tid, txp := range t.pending {
if tid < begin || tid > end {
continue
}
// Don't recompute freed pages if ranges haven't updated.
if txp.lastReleaseBegin == begin {
continue
}
for i := 0; i < len(txp.ids); i++ {
if atx := txp.alloctx[i]; atx < begin || atx > end {
continue
}
m = append(m, txp.ids[i])
txp.ids[i] = txp.ids[len(txp.ids)-1]
txp.ids = txp.ids[:len(txp.ids)-1]
txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1]
txp.alloctx = txp.alloctx[:len(txp.alloctx)-1]
i--
}
txp.lastReleaseBegin = begin
if len(txp.ids) == 0 {
delete(t.pending, tid)
}
}
t.mergeSpans(m)
}
// Copyall copies a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst.
func (t *shared) Copyall(dst []common.Pgid) {
m := make(common.Pgids, 0, t.PendingCount())
for _, txp := range t.pendingPageIds() {
m = append(m, txp.ids...)
}
sort.Sort(m)
common.Mergepgids(dst, t.freePageIds(), m)
}
func (t *shared) Reload(p *common.Page) {
t.Read(p)
t.NoSyncReload(t.freePageIds())
}
func (t *shared) NoSyncReload(pgIds common.Pgids) {
// Build a cache of only pending pages.
pcache := make(map[common.Pgid]bool)
for _, txp := range t.pending {
for _, pendingID := range txp.ids {
pcache[pendingID] = true
}
}
// Check each page in the freelist and build a new available freelist
// with any pages not in the pending lists.
a := []common.Pgid{}
for _, id := range pgIds {
if !pcache[id] {
a = append(a, id)
}
}
t.Init(a)
}
// reindex rebuilds the free cache based on available and pending free lists.
func (t *shared) reindex() {
free := t.freePageIds()
pending := t.pendingPageIds()
t.cache = make(map[common.Pgid]struct{}, len(free))
for _, id := range free {
t.cache[id] = struct{}{}
}
for _, txp := range pending {
for _, pendingID := range txp.ids {
t.cache[pendingID] = struct{}{}
}
}
}
func (t *shared) Read(p *common.Page) {
if !p.IsFreelistPage() {
panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.Id(), p.Typ()))
}
ids := p.FreelistPageIds()
// Copy the list of page ids from the freelist.
if len(ids) == 0 {
t.Init([]common.Pgid{})
} else {
// copy the ids, so we don't modify on the freelist page directly
idsCopy := make([]common.Pgid, len(ids))
copy(idsCopy, ids)
// Make sure they're sorted.
sort.Sort(common.Pgids(idsCopy))
t.Init(idsCopy)
}
}
func (t *shared) EstimatedWritePageSize() int {
n := t.Count()
if n >= 0xFFFF {
// The first element will be used to store the count. See freelist.write.
n++
}
return int(common.PageHeaderSize) + (int(unsafe.Sizeof(common.Pgid(0))) * n)
}
func (t *shared) Write(p *common.Page) {
// Combine the old free pgids and pgids waiting on an open transaction.
// Update the header flag.
p.SetFlags(common.FreelistPageFlag)
// The page.count can only hold up to 64k elements so if we overflow that
// number then we handle it by putting the size in the first element.
l := t.Count()
if l == 0 {
p.SetCount(uint16(l))
} else if l < 0xFFFF {
p.SetCount(uint16(l))
data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
ids := unsafe.Slice((*common.Pgid)(data), l)
t.Copyall(ids)
} else {
p.SetCount(0xFFFF)
data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
ids := unsafe.Slice((*common.Pgid)(data), l+1)
ids[0] = common.Pgid(l)
t.Copyall(ids[1:])
}
}