Integrate BACKBEAT SDK and resolve KACHING license validation

Major integrations and fixes:
- Added BACKBEAT SDK integration for P2P operation timing
- Implemented beat-aware status tracking for distributed operations
- Added Docker secrets support for secure license management
- Resolved KACHING license validation via HTTPS/TLS
- Updated docker-compose configuration for clean stack deployment
- Disabled rollback policies to prevent deployment failures
- Added license credential storage (CHORUS-DEV-MULTI-001)

Technical improvements:
- BACKBEAT P2P operation tracking with phase management
- Enhanced configuration system with file-based secrets
- Improved error handling for license validation
- Clean separation of KACHING and CHORUS deployment stacks

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
anthonyrawlins
2025-09-06 07:56:26 +10:00
parent 543ab216f9
commit 9bdcbe0447
4730 changed files with 1480093 additions and 1916 deletions

93
vendor/lukechampine.com/blake3/compress_noasm.go generated vendored Normal file
View File

@@ -0,0 +1,93 @@
//go:build !amd64
// +build !amd64
package blake3
import "encoding/binary"
func compressNode(n node) (out [16]uint32) {
compressNodeGeneric(&out, n)
return
}
func compressBuffer(buf *[maxSIMD * chunkSize]byte, buflen int, key *[8]uint32, counter uint64, flags uint32) node {
return compressBufferGeneric(buf, buflen, key, counter, flags)
}
func compressChunk(chunk []byte, key *[8]uint32, counter uint64, flags uint32) node {
n := node{
cv: *key,
counter: counter,
blockLen: blockSize,
flags: flags | flagChunkStart,
}
var block [blockSize]byte
for len(chunk) > blockSize {
copy(block[:], chunk)
chunk = chunk[blockSize:]
bytesToWords(block, &n.block)
n.cv = chainingValue(n)
n.flags &^= flagChunkStart
}
// pad last block with zeros
block = [blockSize]byte{}
n.blockLen = uint32(len(chunk))
copy(block[:], chunk)
bytesToWords(block, &n.block)
n.flags |= flagChunkEnd
return n
}
func hashBlock(out *[64]byte, buf []byte) {
var block [64]byte
var words [16]uint32
copy(block[:], buf)
bytesToWords(block, &words)
compressNodeGeneric(&words, node{
cv: iv,
block: words,
blockLen: uint32(len(buf)),
flags: flagChunkStart | flagChunkEnd | flagRoot,
})
wordsToBytes(words, out)
}
func compressBlocks(out *[maxSIMD * blockSize]byte, n node) {
var outs [maxSIMD][64]byte
compressBlocksGeneric(&outs, n)
for i := range outs {
copy(out[i*64:], outs[i][:])
}
}
func mergeSubtrees(cvs *[maxSIMD][8]uint32, numCVs uint64, key *[8]uint32, flags uint32) node {
return mergeSubtreesGeneric(cvs, numCVs, key, flags)
}
func bytesToWords(bytes [64]byte, words *[16]uint32) {
for i := range words {
words[i] = binary.LittleEndian.Uint32(bytes[4*i:])
}
}
func wordsToBytes(words [16]uint32, block *[64]byte) {
for i, w := range words {
binary.LittleEndian.PutUint32(block[4*i:], w)
}
}
func bytesToCV(b []byte) [8]uint32 {
var cv [8]uint32
for i := range cv {
cv[i] = binary.LittleEndian.Uint32(b[4*i:])
}
return cv
}
func cvToBytes(cv *[8]uint32) *[32]byte {
var b [32]byte
for i, w := range cv {
binary.LittleEndian.PutUint32(b[4*i:], w)
}
return &b
}