Harden CHORUS security and messaging stack

This commit is contained in:
anthonyrawlins
2025-09-20 23:21:35 +10:00
parent 57751f277a
commit 1bb736c09a
25 changed files with 2793 additions and 2474 deletions

View File

@@ -2,27 +2,26 @@ package metrics
import (
"context"
"fmt"
"log"
"net/http"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
// CHORUSMetrics provides comprehensive Prometheus metrics for the CHORUS system
type CHORUSMetrics struct {
registry *prometheus.Registry
httpServer *http.Server
registry *prometheus.Registry
httpServer *http.Server
// System metrics
systemInfo *prometheus.GaugeVec
uptime prometheus.Gauge
buildInfo *prometheus.GaugeVec
systemInfo *prometheus.GaugeVec
uptime prometheus.Gauge
buildInfo *prometheus.GaugeVec
// P2P metrics
p2pConnectedPeers prometheus.Gauge
p2pMessagesSent *prometheus.CounterVec
@@ -30,95 +29,98 @@ type CHORUSMetrics struct {
p2pMessageLatency *prometheus.HistogramVec
p2pConnectionDuration *prometheus.HistogramVec
p2pPeerScore *prometheus.GaugeVec
// DHT metrics
dhtPutOperations *prometheus.CounterVec
dhtGetOperations *prometheus.CounterVec
dhtOperationLatency *prometheus.HistogramVec
dhtProviderRecords prometheus.Gauge
dhtReplicationFactor *prometheus.GaugeVec
dhtContentKeys prometheus.Gauge
dhtCacheHits *prometheus.CounterVec
dhtCacheMisses *prometheus.CounterVec
dhtPutOperations *prometheus.CounterVec
dhtGetOperations *prometheus.CounterVec
dhtOperationLatency *prometheus.HistogramVec
dhtProviderRecords prometheus.Gauge
dhtReplicationFactor *prometheus.GaugeVec
dhtContentKeys prometheus.Gauge
dhtCacheHits *prometheus.CounterVec
dhtCacheMisses *prometheus.CounterVec
// PubSub metrics
pubsubTopics prometheus.Gauge
pubsubSubscribers *prometheus.GaugeVec
pubsubMessages *prometheus.CounterVec
pubsubMessageLatency *prometheus.HistogramVec
pubsubMessageSize *prometheus.HistogramVec
pubsubTopics prometheus.Gauge
pubsubSubscribers *prometheus.GaugeVec
pubsubMessages *prometheus.CounterVec
pubsubMessageLatency *prometheus.HistogramVec
pubsubMessageSize *prometheus.HistogramVec
// Election metrics
electionTerm prometheus.Gauge
electionState *prometheus.GaugeVec
heartbeatsSent prometheus.Counter
heartbeatsReceived prometheus.Counter
leadershipChanges prometheus.Counter
leaderUptime prometheus.Gauge
electionLatency prometheus.Histogram
electionTerm prometheus.Gauge
electionState *prometheus.GaugeVec
heartbeatsSent prometheus.Counter
heartbeatsReceived prometheus.Counter
leadershipChanges prometheus.Counter
leaderUptime prometheus.Gauge
electionLatency prometheus.Histogram
// Health metrics
healthChecksPassed *prometheus.CounterVec
healthChecksFailed *prometheus.CounterVec
healthCheckDuration *prometheus.HistogramVec
systemHealthScore prometheus.Gauge
componentHealthScore *prometheus.GaugeVec
healthChecksPassed *prometheus.CounterVec
healthChecksFailed *prometheus.CounterVec
healthCheckDuration *prometheus.HistogramVec
systemHealthScore prometheus.Gauge
componentHealthScore *prometheus.GaugeVec
// Task metrics
tasksActive prometheus.Gauge
tasksQueued prometheus.Gauge
tasksCompleted *prometheus.CounterVec
taskDuration *prometheus.HistogramVec
taskQueueWaitTime prometheus.Histogram
tasksActive prometheus.Gauge
tasksQueued prometheus.Gauge
tasksCompleted *prometheus.CounterVec
taskDuration *prometheus.HistogramVec
taskQueueWaitTime prometheus.Histogram
// SLURP metrics (context generation)
slurpGenerated *prometheus.CounterVec
slurpGenerationTime prometheus.Histogram
slurpQueueLength prometheus.Gauge
slurpActiveJobs prometheus.Gauge
slurpLeadershipEvents prometheus.Counter
// SHHH sentinel metrics
shhhFindings *prometheus.CounterVec
// UCXI metrics (protocol resolution)
ucxiRequests *prometheus.CounterVec
ucxiResolutionLatency prometheus.Histogram
ucxiCacheHits prometheus.Counter
ucxiCacheMisses prometheus.Counter
ucxiContentSize prometheus.Histogram
// Resource metrics
cpuUsage prometheus.Gauge
memoryUsage prometheus.Gauge
diskUsage *prometheus.GaugeVec
networkBytesIn prometheus.Counter
networkBytesOut prometheus.Counter
goroutines prometheus.Gauge
cpuUsage prometheus.Gauge
memoryUsage prometheus.Gauge
diskUsage *prometheus.GaugeVec
networkBytesIn prometheus.Counter
networkBytesOut prometheus.Counter
goroutines prometheus.Gauge
// Error metrics
errors *prometheus.CounterVec
panics prometheus.Counter
startTime time.Time
mu sync.RWMutex
errors *prometheus.CounterVec
panics prometheus.Counter
startTime time.Time
mu sync.RWMutex
}
// MetricsConfig configures the metrics system
type MetricsConfig struct {
// HTTP server config
ListenAddr string
MetricsPath string
ListenAddr string
MetricsPath string
// Histogram buckets
LatencyBuckets []float64
SizeBuckets []float64
// Labels
NodeID string
Version string
Environment string
Cluster string
NodeID string
Version string
Environment string
Cluster string
// Collection intervals
SystemMetricsInterval time.Duration
SystemMetricsInterval time.Duration
ResourceMetricsInterval time.Duration
}
@@ -143,20 +145,20 @@ func NewCHORUSMetrics(config *MetricsConfig) *CHORUSMetrics {
if config == nil {
config = DefaultMetricsConfig()
}
registry := prometheus.NewRegistry()
metrics := &CHORUSMetrics{
registry: registry,
startTime: time.Now(),
}
// Initialize all metrics
metrics.initializeMetrics(config)
// Register with custom registry
metrics.registerMetrics()
return metrics
}
@@ -170,14 +172,14 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"node_id", "version", "go_version", "cluster", "environment"},
)
m.uptime = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "chorus_uptime_seconds",
Help: "System uptime in seconds",
},
)
// P2P metrics
m.p2pConnectedPeers = promauto.NewGauge(
prometheus.GaugeOpts{
@@ -185,7 +187,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
Help: "Number of connected P2P peers",
},
)
m.p2pMessagesSent = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "chorus_p2p_messages_sent_total",
@@ -193,7 +195,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"message_type", "peer_id"},
)
m.p2pMessagesReceived = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "chorus_p2p_messages_received_total",
@@ -201,7 +203,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"message_type", "peer_id"},
)
m.p2pMessageLatency = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Name: "chorus_p2p_message_latency_seconds",
@@ -210,7 +212,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"message_type"},
)
// DHT metrics
m.dhtPutOperations = promauto.NewCounterVec(
prometheus.CounterOpts{
@@ -219,7 +221,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"status"},
)
m.dhtGetOperations = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "chorus_dht_get_operations_total",
@@ -227,7 +229,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"status"},
)
m.dhtOperationLatency = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Name: "chorus_dht_operation_latency_seconds",
@@ -236,21 +238,21 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"operation", "status"},
)
m.dhtProviderRecords = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "chorus_dht_provider_records",
Help: "Number of DHT provider records",
},
)
m.dhtContentKeys = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "chorus_dht_content_keys",
Help: "Number of DHT content keys",
},
)
m.dhtReplicationFactor = promauto.NewGaugeVec(
prometheus.GaugeOpts{
Name: "chorus_dht_replication_factor",
@@ -258,7 +260,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"key_hash"},
)
// PubSub metrics
m.pubsubTopics = promauto.NewGauge(
prometheus.GaugeOpts{
@@ -266,7 +268,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
Help: "Number of active PubSub topics",
},
)
m.pubsubMessages = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "chorus_pubsub_messages_total",
@@ -274,7 +276,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"topic", "direction", "message_type"},
)
m.pubsubMessageLatency = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Name: "chorus_pubsub_message_latency_seconds",
@@ -283,7 +285,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"topic"},
)
// Election metrics
m.electionTerm = promauto.NewGauge(
prometheus.GaugeOpts{
@@ -291,7 +293,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
Help: "Current election term",
},
)
m.electionState = promauto.NewGaugeVec(
prometheus.GaugeOpts{
Name: "chorus_election_state",
@@ -299,28 +301,28 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"state"},
)
m.heartbeatsSent = promauto.NewCounter(
prometheus.CounterOpts{
Name: "chorus_heartbeats_sent_total",
Help: "Total number of heartbeats sent",
},
)
m.heartbeatsReceived = promauto.NewCounter(
prometheus.CounterOpts{
Name: "chorus_heartbeats_received_total",
Help: "Total number of heartbeats received",
},
)
m.leadershipChanges = promauto.NewCounter(
prometheus.CounterOpts{
Name: "chorus_leadership_changes_total",
Help: "Total number of leadership changes",
},
)
// Health metrics
m.healthChecksPassed = promauto.NewCounterVec(
prometheus.CounterOpts{
@@ -329,7 +331,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"check_name"},
)
m.healthChecksFailed = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "chorus_health_checks_failed_total",
@@ -337,14 +339,14 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"check_name", "reason"},
)
m.systemHealthScore = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "chorus_system_health_score",
Help: "Overall system health score (0-1)",
},
)
m.componentHealthScore = promauto.NewGaugeVec(
prometheus.GaugeOpts{
Name: "chorus_component_health_score",
@@ -352,7 +354,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"component"},
)
// Task metrics
m.tasksActive = promauto.NewGauge(
prometheus.GaugeOpts{
@@ -360,14 +362,14 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
Help: "Number of active tasks",
},
)
m.tasksQueued = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "chorus_tasks_queued",
Help: "Number of queued tasks",
},
)
m.tasksCompleted = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "chorus_tasks_completed_total",
@@ -375,7 +377,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"status", "task_type"},
)
m.taskDuration = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Name: "chorus_task_duration_seconds",
@@ -384,7 +386,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"task_type", "status"},
)
// SLURP metrics
m.slurpGenerated = promauto.NewCounterVec(
prometheus.CounterOpts{
@@ -393,7 +395,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"role", "status"},
)
m.slurpGenerationTime = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "chorus_slurp_generation_time_seconds",
@@ -401,14 +403,23 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
Buckets: []float64{0.1, 0.5, 1.0, 2.0, 5.0, 10.0, 30.0, 60.0, 120.0},
},
)
m.slurpQueueLength = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "chorus_slurp_queue_length",
Help: "Length of SLURP generation queue",
},
)
// SHHH metrics
m.shhhFindings = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "chorus_shhh_findings_total",
Help: "Total number of SHHH redaction findings",
},
[]string{"rule", "severity"},
)
// UCXI metrics
m.ucxiRequests = promauto.NewCounterVec(
prometheus.CounterOpts{
@@ -417,7 +428,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"method", "status"},
)
m.ucxiResolutionLatency = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "chorus_ucxi_resolution_latency_seconds",
@@ -425,7 +436,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
Buckets: config.LatencyBuckets,
},
)
// Resource metrics
m.cpuUsage = promauto.NewGauge(
prometheus.GaugeOpts{
@@ -433,14 +444,14 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
Help: "CPU usage ratio (0-1)",
},
)
m.memoryUsage = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "chorus_memory_usage_bytes",
Help: "Memory usage in bytes",
},
)
m.diskUsage = promauto.NewGaugeVec(
prometheus.GaugeOpts{
Name: "chorus_disk_usage_ratio",
@@ -448,14 +459,14 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"mount_point"},
)
m.goroutines = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "chorus_goroutines",
Help: "Number of goroutines",
},
)
// Error metrics
m.errors = promauto.NewCounterVec(
prometheus.CounterOpts{
@@ -464,7 +475,7 @@ func (m *CHORUSMetrics) initializeMetrics(config *MetricsConfig) {
},
[]string{"component", "error_type"},
)
m.panics = promauto.NewCounter(
prometheus.CounterOpts{
Name: "chorus_panics_total",
@@ -482,31 +493,31 @@ func (m *CHORUSMetrics) registerMetrics() {
// StartServer starts the Prometheus metrics HTTP server
func (m *CHORUSMetrics) StartServer(config *MetricsConfig) error {
mux := http.NewServeMux()
// Use custom registry
handler := promhttp.HandlerFor(m.registry, promhttp.HandlerOpts{
EnableOpenMetrics: true,
})
mux.Handle(config.MetricsPath, handler)
// Health endpoint
mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte("OK"))
})
m.httpServer = &http.Server{
Addr: config.ListenAddr,
Handler: mux,
}
go func() {
log.Printf("Starting metrics server on %s%s", config.ListenAddr, config.MetricsPath)
if err := m.httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
log.Printf("Metrics server error: %v", err)
}
}()
return nil
}
@@ -656,6 +667,15 @@ func (m *CHORUSMetrics) SetSLURPQueueLength(length int) {
m.slurpQueueLength.Set(float64(length))
}
// SHHH Metrics Methods
func (m *CHORUSMetrics) IncrementSHHHFindings(rule, severity string, count int) {
if m == nil || m.shhhFindings == nil || count <= 0 {
return
}
m.shhhFindings.WithLabelValues(rule, severity).Add(float64(count))
}
// UCXI Metrics Methods
func (m *CHORUSMetrics) IncrementUCXIRequests(method, status string) {
@@ -708,21 +728,21 @@ func (m *CHORUSMetrics) UpdateUptime() {
func (m *CHORUSMetrics) CollectMetrics(config *MetricsConfig) {
systemTicker := time.NewTicker(config.SystemMetricsInterval)
resourceTicker := time.NewTicker(config.ResourceMetricsInterval)
go func() {
defer systemTicker.Stop()
defer resourceTicker.Stop()
for {
select {
case <-systemTicker.C:
m.UpdateUptime()
// Collect other system metrics
case <-resourceTicker.C:
// Collect resource metrics (would integrate with actual system monitoring)
// m.collectResourceMetrics()
}
}
}()
}
}