feat: Production readiness improvements for WHOOSH council formation
Major security, observability, and configuration improvements:
## Security Hardening
- Implemented configurable CORS (no more wildcards)
- Added comprehensive auth middleware for admin endpoints
- Enhanced webhook HMAC validation
- Added input validation and rate limiting
- Security headers and CSP policies
## Configuration Management
- Made N8N webhook URL configurable (WHOOSH_N8N_BASE_URL)
- Replaced all hardcoded endpoints with environment variables
- Added feature flags for LLM vs heuristic composition
- Gitea fetch hardening with EAGER_FILTER and FULL_RESCAN options
## API Completeness
- Implemented GetCouncilComposition function
- Added GET /api/v1/councils/{id} endpoint
- Council artifacts API (POST/GET /api/v1/councils/{id}/artifacts)
- /admin/health/details endpoint with component status
- Database lookup for repository URLs (no hardcoded fallbacks)
## Observability & Performance
- Added OpenTelemetry distributed tracing with goal/pulse correlation
- Performance optimization database indexes
- Comprehensive health monitoring
- Enhanced logging and error handling
## Infrastructure
- Production-ready P2P discovery (replaces mock implementation)
- Removed unused Redis configuration
- Enhanced Docker Swarm integration
- Added migration files for performance indexes
## Code Quality
- Comprehensive input validation
- Graceful error handling and failsafe fallbacks
- Backwards compatibility maintained
- Following security best practices
🤖 Generated with [Claude Code](https://claude.ai/code)
Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
132
vendor/github.com/go-chi/chi/v5/middleware/throttle.go
generated
vendored
Normal file
132
vendor/github.com/go-chi/chi/v5/middleware/throttle.go
generated
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
errCapacityExceeded = "Server capacity exceeded."
|
||||
errTimedOut = "Timed out while waiting for a pending request to complete."
|
||||
errContextCanceled = "Context was canceled."
|
||||
)
|
||||
|
||||
var (
|
||||
defaultBacklogTimeout = time.Second * 60
|
||||
)
|
||||
|
||||
// ThrottleOpts represents a set of throttling options.
|
||||
type ThrottleOpts struct {
|
||||
RetryAfterFn func(ctxDone bool) time.Duration
|
||||
Limit int
|
||||
BacklogLimit int
|
||||
BacklogTimeout time.Duration
|
||||
}
|
||||
|
||||
// Throttle is a middleware that limits number of currently processed requests
|
||||
// at a time across all users. Note: Throttle is not a rate-limiter per user,
|
||||
// instead it just puts a ceiling on the number of currently in-flight requests
|
||||
// being processed from the point from where the Throttle middleware is mounted.
|
||||
func Throttle(limit int) func(http.Handler) http.Handler {
|
||||
return ThrottleWithOpts(ThrottleOpts{Limit: limit, BacklogTimeout: defaultBacklogTimeout})
|
||||
}
|
||||
|
||||
// ThrottleBacklog is a middleware that limits number of currently processed
|
||||
// requests at a time and provides a backlog for holding a finite number of
|
||||
// pending requests.
|
||||
func ThrottleBacklog(limit, backlogLimit int, backlogTimeout time.Duration) func(http.Handler) http.Handler {
|
||||
return ThrottleWithOpts(ThrottleOpts{Limit: limit, BacklogLimit: backlogLimit, BacklogTimeout: backlogTimeout})
|
||||
}
|
||||
|
||||
// ThrottleWithOpts is a middleware that limits number of currently processed requests using passed ThrottleOpts.
|
||||
func ThrottleWithOpts(opts ThrottleOpts) func(http.Handler) http.Handler {
|
||||
if opts.Limit < 1 {
|
||||
panic("chi/middleware: Throttle expects limit > 0")
|
||||
}
|
||||
|
||||
if opts.BacklogLimit < 0 {
|
||||
panic("chi/middleware: Throttle expects backlogLimit to be positive")
|
||||
}
|
||||
|
||||
t := throttler{
|
||||
tokens: make(chan token, opts.Limit),
|
||||
backlogTokens: make(chan token, opts.Limit+opts.BacklogLimit),
|
||||
backlogTimeout: opts.BacklogTimeout,
|
||||
retryAfterFn: opts.RetryAfterFn,
|
||||
}
|
||||
|
||||
// Filling tokens.
|
||||
for i := 0; i < opts.Limit+opts.BacklogLimit; i++ {
|
||||
if i < opts.Limit {
|
||||
t.tokens <- token{}
|
||||
}
|
||||
t.backlogTokens <- token{}
|
||||
}
|
||||
|
||||
return func(next http.Handler) http.Handler {
|
||||
fn := func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
select {
|
||||
|
||||
case <-ctx.Done():
|
||||
t.setRetryAfterHeaderIfNeeded(w, true)
|
||||
http.Error(w, errContextCanceled, http.StatusTooManyRequests)
|
||||
return
|
||||
|
||||
case btok := <-t.backlogTokens:
|
||||
timer := time.NewTimer(t.backlogTimeout)
|
||||
|
||||
defer func() {
|
||||
t.backlogTokens <- btok
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-timer.C:
|
||||
t.setRetryAfterHeaderIfNeeded(w, false)
|
||||
http.Error(w, errTimedOut, http.StatusTooManyRequests)
|
||||
return
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
t.setRetryAfterHeaderIfNeeded(w, true)
|
||||
http.Error(w, errContextCanceled, http.StatusTooManyRequests)
|
||||
return
|
||||
case tok := <-t.tokens:
|
||||
defer func() {
|
||||
timer.Stop()
|
||||
t.tokens <- tok
|
||||
}()
|
||||
next.ServeHTTP(w, r)
|
||||
}
|
||||
return
|
||||
|
||||
default:
|
||||
t.setRetryAfterHeaderIfNeeded(w, false)
|
||||
http.Error(w, errCapacityExceeded, http.StatusTooManyRequests)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return http.HandlerFunc(fn)
|
||||
}
|
||||
}
|
||||
|
||||
// token represents a request that is being processed.
|
||||
type token struct{}
|
||||
|
||||
// throttler limits number of currently processed requests at a time.
|
||||
type throttler struct {
|
||||
tokens chan token
|
||||
backlogTokens chan token
|
||||
retryAfterFn func(ctxDone bool) time.Duration
|
||||
backlogTimeout time.Duration
|
||||
}
|
||||
|
||||
// setRetryAfterHeaderIfNeeded sets Retry-After HTTP header if corresponding retryAfterFn option of throttler is initialized.
|
||||
func (t throttler) setRetryAfterHeaderIfNeeded(w http.ResponseWriter, ctxDone bool) {
|
||||
if t.retryAfterFn == nil {
|
||||
return
|
||||
}
|
||||
w.Header().Set("Retry-After", strconv.Itoa(int(t.retryAfterFn(ctxDone).Seconds())))
|
||||
}
|
||||
Reference in New Issue
Block a user