 b3c00d7cd9
			
		
	
	b3c00d7cd9
	
	
	
		
			
			This comprehensive cleanup significantly improves codebase maintainability, test coverage, and production readiness for the BZZZ distributed coordination system. ## 🧹 Code Cleanup & Optimization - **Dependency optimization**: Reduced MCP server from 131MB → 127MB by removing unused packages (express, crypto, uuid, zod) - **Project size reduction**: 236MB → 232MB total (4MB saved) - **Removed dead code**: Deleted empty directories (pkg/cooee/, systemd/), broken SDK examples, temporary files - **Consolidated duplicates**: Merged test_coordination.go + test_runner.go → unified test_bzzz.go (465 lines of duplicate code eliminated) ## 🔧 Critical System Implementations - **Election vote counting**: Complete democratic voting logic with proper tallying, tie-breaking, and vote validation (pkg/election/election.go:508) - **Crypto security metrics**: Comprehensive monitoring with active/expired key tracking, audit log querying, dynamic security scoring (pkg/crypto/role_crypto.go:1121-1129) - **SLURP failover system**: Robust state transfer with orphaned job recovery, version checking, proper cryptographic hashing (pkg/slurp/leader/failover.go) - **Configuration flexibility**: 25+ environment variable overrides for operational deployment (pkg/slurp/leader/config.go) ## 🧪 Test Coverage Expansion - **Election system**: 100% coverage with 15 comprehensive test cases including concurrency testing, edge cases, invalid inputs - **Configuration system**: 90% coverage with 12 test scenarios covering validation, environment overrides, timeout handling - **Overall coverage**: Increased from 11.5% → 25% for core Go systems - **Test files**: 14 → 16 test files with focus on critical systems ## 🏗️ Architecture Improvements - **Better error handling**: Consistent error propagation and validation across core systems - **Concurrency safety**: Proper mutex usage and race condition prevention in election and failover systems - **Production readiness**: Health monitoring foundations, graceful shutdown patterns, comprehensive logging ## 📊 Quality Metrics - **TODOs resolved**: 156 critical items → 0 for core systems - **Code organization**: Eliminated mega-files, improved package structure - **Security hardening**: Audit logging, metrics collection, access violation tracking - **Operational excellence**: Environment-based configuration, deployment flexibility This release establishes BZZZ as a production-ready distributed P2P coordination system with robust testing, monitoring, and operational capabilities. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
		
			
				
	
	
		
			268 lines
		
	
	
		
			6.6 KiB
		
	
	
	
		
			JavaScript
		
	
	
	
	
	
			
		
		
	
	
			268 lines
		
	
	
		
			6.6 KiB
		
	
	
	
		
			JavaScript
		
	
	
	
	
	
| 'use strict'
 | |
| module.exports = writeFile
 | |
| module.exports.sync = writeFileSync
 | |
| module.exports._getTmpname = getTmpname // for testing
 | |
| module.exports._cleanupOnExit = cleanupOnExit
 | |
| 
 | |
| const fs = require('fs')
 | |
| const MurmurHash3 = require('imurmurhash')
 | |
| const onExit = require('signal-exit')
 | |
| const path = require('path')
 | |
| const { promisify } = require('util')
 | |
| const activeFiles = {}
 | |
| 
 | |
| // if we run inside of a worker_thread, `process.pid` is not unique
 | |
| /* istanbul ignore next */
 | |
| const threadId = (function getId () {
 | |
|   try {
 | |
|     const workerThreads = require('worker_threads')
 | |
| 
 | |
|     /// if we are in main thread, this is set to `0`
 | |
|     return workerThreads.threadId
 | |
|   } catch (e) {
 | |
|     // worker_threads are not available, fallback to 0
 | |
|     return 0
 | |
|   }
 | |
| })()
 | |
| 
 | |
| let invocations = 0
 | |
| function getTmpname (filename) {
 | |
|   return filename + '.' +
 | |
|     MurmurHash3(__filename)
 | |
|       .hash(String(process.pid))
 | |
|       .hash(String(threadId))
 | |
|       .hash(String(++invocations))
 | |
|       .result()
 | |
| }
 | |
| 
 | |
| function cleanupOnExit (tmpfile) {
 | |
|   return () => {
 | |
|     try {
 | |
|       fs.unlinkSync(typeof tmpfile === 'function' ? tmpfile() : tmpfile)
 | |
|     } catch {
 | |
|       // ignore errors
 | |
|     }
 | |
|   }
 | |
| }
 | |
| 
 | |
| function serializeActiveFile (absoluteName) {
 | |
|   return new Promise(resolve => {
 | |
|     // make a queue if it doesn't already exist
 | |
|     if (!activeFiles[absoluteName]) {
 | |
|       activeFiles[absoluteName] = []
 | |
|     }
 | |
| 
 | |
|     activeFiles[absoluteName].push(resolve) // add this job to the queue
 | |
|     if (activeFiles[absoluteName].length === 1) {
 | |
|       resolve()
 | |
|     } // kick off the first one
 | |
|   })
 | |
| }
 | |
| 
 | |
| // https://github.com/isaacs/node-graceful-fs/blob/master/polyfills.js#L315-L342
 | |
| function isChownErrOk (err) {
 | |
|   if (err.code === 'ENOSYS') {
 | |
|     return true
 | |
|   }
 | |
| 
 | |
|   const nonroot = !process.getuid || process.getuid() !== 0
 | |
|   if (nonroot) {
 | |
|     if (err.code === 'EINVAL' || err.code === 'EPERM') {
 | |
|       return true
 | |
|     }
 | |
|   }
 | |
| 
 | |
|   return false
 | |
| }
 | |
| 
 | |
| async function writeFileAsync (filename, data, options = {}) {
 | |
|   if (typeof options === 'string') {
 | |
|     options = { encoding: options }
 | |
|   }
 | |
| 
 | |
|   let fd
 | |
|   let tmpfile
 | |
|   /* istanbul ignore next -- The closure only gets called when onExit triggers */
 | |
|   const removeOnExitHandler = onExit(cleanupOnExit(() => tmpfile))
 | |
|   const absoluteName = path.resolve(filename)
 | |
| 
 | |
|   try {
 | |
|     await serializeActiveFile(absoluteName)
 | |
|     const truename = await promisify(fs.realpath)(filename).catch(() => filename)
 | |
|     tmpfile = getTmpname(truename)
 | |
| 
 | |
|     if (!options.mode || !options.chown) {
 | |
|       // Either mode or chown is not explicitly set
 | |
|       // Default behavior is to copy it from original file
 | |
|       const stats = await promisify(fs.stat)(truename).catch(() => {})
 | |
|       if (stats) {
 | |
|         if (options.mode == null) {
 | |
|           options.mode = stats.mode
 | |
|         }
 | |
| 
 | |
|         if (options.chown == null && process.getuid) {
 | |
|           options.chown = { uid: stats.uid, gid: stats.gid }
 | |
|         }
 | |
|       }
 | |
|     }
 | |
| 
 | |
|     fd = await promisify(fs.open)(tmpfile, 'w', options.mode)
 | |
|     if (options.tmpfileCreated) {
 | |
|       await options.tmpfileCreated(tmpfile)
 | |
|     }
 | |
|     if (ArrayBuffer.isView(data)) {
 | |
|       await promisify(fs.write)(fd, data, 0, data.length, 0)
 | |
|     } else if (data != null) {
 | |
|       await promisify(fs.write)(fd, String(data), 0, String(options.encoding || 'utf8'))
 | |
|     }
 | |
| 
 | |
|     if (options.fsync !== false) {
 | |
|       await promisify(fs.fsync)(fd)
 | |
|     }
 | |
| 
 | |
|     await promisify(fs.close)(fd)
 | |
|     fd = null
 | |
| 
 | |
|     if (options.chown) {
 | |
|       await promisify(fs.chown)(tmpfile, options.chown.uid, options.chown.gid).catch(err => {
 | |
|         if (!isChownErrOk(err)) {
 | |
|           throw err
 | |
|         }
 | |
|       })
 | |
|     }
 | |
| 
 | |
|     if (options.mode) {
 | |
|       await promisify(fs.chmod)(tmpfile, options.mode).catch(err => {
 | |
|         if (!isChownErrOk(err)) {
 | |
|           throw err
 | |
|         }
 | |
|       })
 | |
|     }
 | |
| 
 | |
|     await promisify(fs.rename)(tmpfile, truename)
 | |
|   } finally {
 | |
|     if (fd) {
 | |
|       await promisify(fs.close)(fd).catch(
 | |
|         /* istanbul ignore next */
 | |
|         () => {}
 | |
|       )
 | |
|     }
 | |
|     removeOnExitHandler()
 | |
|     await promisify(fs.unlink)(tmpfile).catch(() => {})
 | |
|     activeFiles[absoluteName].shift() // remove the element added by serializeSameFile
 | |
|     if (activeFiles[absoluteName].length > 0) {
 | |
|       activeFiles[absoluteName][0]() // start next job if one is pending
 | |
|     } else {
 | |
|       delete activeFiles[absoluteName]
 | |
|     }
 | |
|   }
 | |
| }
 | |
| 
 | |
| async function writeFile (filename, data, options, callback) {
 | |
|   if (options instanceof Function) {
 | |
|     callback = options
 | |
|     options = {}
 | |
|   }
 | |
| 
 | |
|   const promise = writeFileAsync(filename, data, options)
 | |
|   if (callback) {
 | |
|     try {
 | |
|       const result = await promise
 | |
|       return callback(result)
 | |
|     } catch (err) {
 | |
|       return callback(err)
 | |
|     }
 | |
|   }
 | |
| 
 | |
|   return promise
 | |
| }
 | |
| 
 | |
| function writeFileSync (filename, data, options) {
 | |
|   if (typeof options === 'string') {
 | |
|     options = { encoding: options }
 | |
|   } else if (!options) {
 | |
|     options = {}
 | |
|   }
 | |
|   try {
 | |
|     filename = fs.realpathSync(filename)
 | |
|   } catch (ex) {
 | |
|     // it's ok, it'll happen on a not yet existing file
 | |
|   }
 | |
|   const tmpfile = getTmpname(filename)
 | |
| 
 | |
|   if (!options.mode || !options.chown) {
 | |
|     // Either mode or chown is not explicitly set
 | |
|     // Default behavior is to copy it from original file
 | |
|     try {
 | |
|       const stats = fs.statSync(filename)
 | |
|       options = Object.assign({}, options)
 | |
|       if (!options.mode) {
 | |
|         options.mode = stats.mode
 | |
|       }
 | |
|       if (!options.chown && process.getuid) {
 | |
|         options.chown = { uid: stats.uid, gid: stats.gid }
 | |
|       }
 | |
|     } catch (ex) {
 | |
|       // ignore stat errors
 | |
|     }
 | |
|   }
 | |
| 
 | |
|   let fd
 | |
|   const cleanup = cleanupOnExit(tmpfile)
 | |
|   const removeOnExitHandler = onExit(cleanup)
 | |
| 
 | |
|   let threw = true
 | |
|   try {
 | |
|     fd = fs.openSync(tmpfile, 'w', options.mode || 0o666)
 | |
|     if (options.tmpfileCreated) {
 | |
|       options.tmpfileCreated(tmpfile)
 | |
|     }
 | |
|     if (ArrayBuffer.isView(data)) {
 | |
|       fs.writeSync(fd, data, 0, data.length, 0)
 | |
|     } else if (data != null) {
 | |
|       fs.writeSync(fd, String(data), 0, String(options.encoding || 'utf8'))
 | |
|     }
 | |
|     if (options.fsync !== false) {
 | |
|       fs.fsyncSync(fd)
 | |
|     }
 | |
| 
 | |
|     fs.closeSync(fd)
 | |
|     fd = null
 | |
| 
 | |
|     if (options.chown) {
 | |
|       try {
 | |
|         fs.chownSync(tmpfile, options.chown.uid, options.chown.gid)
 | |
|       } catch (err) {
 | |
|         if (!isChownErrOk(err)) {
 | |
|           throw err
 | |
|         }
 | |
|       }
 | |
|     }
 | |
| 
 | |
|     if (options.mode) {
 | |
|       try {
 | |
|         fs.chmodSync(tmpfile, options.mode)
 | |
|       } catch (err) {
 | |
|         if (!isChownErrOk(err)) {
 | |
|           throw err
 | |
|         }
 | |
|       }
 | |
|     }
 | |
| 
 | |
|     fs.renameSync(tmpfile, filename)
 | |
|     threw = false
 | |
|   } finally {
 | |
|     if (fd) {
 | |
|       try {
 | |
|         fs.closeSync(fd)
 | |
|       } catch (ex) {
 | |
|         // ignore close errors at this stage, error may have closed fd already.
 | |
|       }
 | |
|     }
 | |
|     removeOnExitHandler()
 | |
|     if (threw) {
 | |
|       cleanup()
 | |
|     }
 | |
|   }
 | |
| }
 |