This commit captures both the prior accumulated work-in-progress
(framework migration web/→svelte/, postgres storage, conversation
viewer, dashboard auth, OpenAPI spec, integration tests) AND today's
operational improvements layered on top. History wasn't checkpointed
incrementally; happy to split it via interactive rebase if a reviewer
wants smaller commits.
Today's changes (in addition to the older WIP):
1. Configurable upstream response-header timeout
- ANTHROPIC_RESPONSE_HEADER_TIMEOUT env (default 300s)
- Replaces hardcoded 300s in provider/anthropic.go that was firing
on opus + 1M-context + extended thinking non-streaming requests
- Files: internal/config/config.go, internal/provider/anthropic.go
2. Structured forward-error diagnostic logging
- When a forward to Anthropic fails, log a single key=value line
with request_id, model, stream, body_bytes, has_thinking,
anthropic_beta, query, elapsed, ctx_err — alongside the existing
human-readable error line for back-compat
- Files: internal/handler/handlers.go (logForwardFailure)
3. Full SSE protocol passthrough + Flusher fix
- handler/handlers.go: forward all SSE lines verbatim (event:, id:,
retry:, : comments, blank-line terminators), not only data:.
Previous code produced malformed SSE for strict parsers.
- middleware/logging.go: explicit Flush() method on responseWriter.
Embedding http.ResponseWriter (interface) does not auto-promote
Flush(), so every w.(http.Flusher) check in the streaming
handler was returning ok=false and SSE writes buffered in net/http
until the body closed.
4. Non-streaming → streaming demotion (feature-flagged)
- ANTHROPIC_DEMOTE_NONSTREAMING env (default false)
- When enabled and the routed provider is anthropic, force stream=true
upstream for clients that asked for stream=false. Receive SSE,
accumulate via accumulateSSEToMessage (handles text, tool_use with
partial_json reassembly, thinking, signature, citations_delta,
usage merge), and synthesize a single non-streaming JSON response.
- Eliminates the ResponseHeaderTimeout class of failure entirely.
- Body rewrite uses json.Decoder + UseNumber() to preserve integer
precision in unknown nested fields (tool inputs from prior turns).
- Files: internal/config/config.go, internal/handler/handlers.go,
cmd/proxy/main.go, cmd/proxy/main_test.go
5. Live operational state: /livez gauge + graceful drain
- New internal/runtime package: atomic in-flight counter + draining flag
- New middleware/inflight.go: increments runtime gauge, applied to
/v1/* subrouter so Messages, ChatCompletions, and ProxyPassthrough
are all counted
- /v1/* moved to a gorilla/mux subrouter so the InFlight middleware
applies surgically; /health, /livez, /openapi.* remain on parent
router (unauthenticated, uncounted)
- Health handler returns 503 draining when runtime.IsDraining() is
true, so Traefik stops routing to a slot before drain begins
- New /livez handler returns {status, in_flight, draining, timestamp}
- SIGTERM handler in main.go: SetDraining(true), poll for in_flight==0
with 32-min ceiling and 1s tick (logs every 10s), then srv.Shutdown
- Auth bypass list extended with /livez
- Files: internal/runtime/runtime.go (new),
internal/middleware/inflight.go (new),
internal/middleware/auth.go,
internal/handler/handlers.go (Health, Livez, runtime import),
cmd/proxy/main.go (subrouter, drain loop)
6. OpenAPI spec updates
- Document Health 503 response and new DrainingResponse schema
- Add /livez path with LivezResponse schema
- Files: internal/handler/openapi.go
Verified: go build ./... clean, go test ./... all pass, go vet clean.
Three rounds of codex peer review across changes 1-5; all feedback
addressed (citations_delta, json.Number precision, drain-loop logging
via lastLog timestamp, PathPrefix tightened to "/v1/").
216 lines
5.6 KiB
Go
216 lines
5.6 KiB
Go
package handler
|
|
|
|
import (
|
|
"crypto/sha256"
|
|
"fmt"
|
|
"net/http"
|
|
"strconv"
|
|
"strings"
|
|
|
|
"github.com/seifghazi/claude-code-monitor/internal/model"
|
|
)
|
|
|
|
var hopByHopHeaders = map[string]bool{
|
|
"connection": true,
|
|
"keep-alive": true,
|
|
"proxy-authenticate": true,
|
|
"proxy-authorization": true,
|
|
"te": true,
|
|
"trailers": true,
|
|
"transfer-encoding": true,
|
|
"upgrade": true,
|
|
"content-encoding": true, // We handle decompression ourselves
|
|
"content-length": true, // May change after decompression
|
|
}
|
|
|
|
// ApplyHeaderRules applies block/set/replace rules to an http.Header in-place.
|
|
func ApplyHeaderRules(headers http.Header, rules []model.HeaderRule) {
|
|
for _, rule := range rules {
|
|
if !rule.Enabled {
|
|
continue
|
|
}
|
|
key := http.CanonicalHeaderKey(rule.Header)
|
|
switch rule.Action {
|
|
case "block":
|
|
headers.Del(key)
|
|
case "set":
|
|
headers.Set(key, rule.Value)
|
|
case "replace":
|
|
if rule.Find == "" {
|
|
continue
|
|
}
|
|
for i, v := range headers.Values(key) {
|
|
if strings.Contains(v, rule.Find) {
|
|
replaced := strings.ReplaceAll(v, rule.Find, rule.Value)
|
|
if i == 0 {
|
|
headers.Set(key, replaced)
|
|
} else {
|
|
headers.Add(key, replaced)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// CopyAllResponseHeaders forwards all upstream response headers to the client,
|
|
// stripping only hop-by-hop headers that must not be forwarded by a proxy.
|
|
func CopyAllResponseHeaders(w http.ResponseWriter, resp *http.Response) {
|
|
for key, values := range resp.Header {
|
|
if hopByHopHeaders[strings.ToLower(key)] {
|
|
continue
|
|
}
|
|
for _, value := range values {
|
|
w.Header().Add(key, value)
|
|
}
|
|
}
|
|
}
|
|
|
|
// SanitizeResponseHeaders strips hop-by-hop proxy headers before applying the
|
|
// generic sensitive-header sanitization used for stored metadata.
|
|
func SanitizeResponseHeaders(headers http.Header) http.Header {
|
|
filtered := make(http.Header)
|
|
|
|
for key, values := range headers {
|
|
if hopByHopHeaders[strings.ToLower(key)] {
|
|
continue
|
|
}
|
|
copiedValues := append([]string(nil), values...)
|
|
filtered[key] = copiedValues
|
|
}
|
|
|
|
return SanitizeHeaders(filtered)
|
|
}
|
|
|
|
// ExtractRateLimitInfo parses rate limit headers from the upstream response
|
|
func ExtractRateLimitInfo(headers http.Header) *model.RateLimitInfo {
|
|
info := &model.RateLimitInfo{}
|
|
found := false
|
|
|
|
// Organization ID
|
|
if v := headers.Get("anthropic-organization-id"); v != "" {
|
|
info.OrganizationID = v
|
|
found = true
|
|
}
|
|
|
|
// Unified quota system (current Anthropic model)
|
|
if v := headers.Get("anthropic-ratelimit-unified-status"); v != "" {
|
|
info.UnifiedStatus = v
|
|
found = true
|
|
}
|
|
if v := headers.Get("anthropic-ratelimit-unified-5h-utilization"); v != "" {
|
|
info.UnifiedUtilization5h, _ = strconv.ParseFloat(v, 64)
|
|
found = true
|
|
}
|
|
if v := headers.Get("anthropic-ratelimit-unified-5h-reset"); v != "" {
|
|
info.UnifiedReset5h = v
|
|
found = true
|
|
}
|
|
if v := headers.Get("anthropic-ratelimit-unified-7d-utilization"); v != "" {
|
|
info.UnifiedUtilization7d, _ = strconv.ParseFloat(v, 64)
|
|
found = true
|
|
}
|
|
if v := headers.Get("anthropic-ratelimit-unified-7d-reset"); v != "" {
|
|
info.UnifiedReset7d = v
|
|
found = true
|
|
}
|
|
if v := headers.Get("anthropic-ratelimit-unified-fallback-percentage"); v != "" {
|
|
info.UnifiedFallbackPercentage, _ = strconv.ParseFloat(v, 64)
|
|
found = true
|
|
}
|
|
if v := headers.Get("anthropic-ratelimit-unified-overage-status"); v != "" {
|
|
info.UnifiedOverageStatus = v
|
|
found = true
|
|
}
|
|
if v := headers.Get("anthropic-ratelimit-unified-representative-claim"); v != "" {
|
|
info.UnifiedRepresentativeClaim = v
|
|
found = true
|
|
}
|
|
|
|
// Legacy per-resource rate limits
|
|
if v := headers.Get("anthropic-ratelimit-requests-limit"); v != "" {
|
|
info.RequestsLimit, _ = strconv.Atoi(v)
|
|
found = true
|
|
}
|
|
if v := headers.Get("anthropic-ratelimit-requests-remaining"); v != "" {
|
|
info.RequestsRemaining, _ = strconv.Atoi(v)
|
|
found = true
|
|
}
|
|
if v := headers.Get("anthropic-ratelimit-requests-reset"); v != "" {
|
|
info.RequestsReset = v
|
|
found = true
|
|
}
|
|
if v := headers.Get("anthropic-ratelimit-tokens-limit"); v != "" {
|
|
info.TokensLimit, _ = strconv.Atoi(v)
|
|
found = true
|
|
}
|
|
if v := headers.Get("anthropic-ratelimit-tokens-remaining"); v != "" {
|
|
info.TokensRemaining, _ = strconv.Atoi(v)
|
|
found = true
|
|
}
|
|
if v := headers.Get("anthropic-ratelimit-tokens-reset"); v != "" {
|
|
info.TokensReset = v
|
|
found = true
|
|
}
|
|
|
|
// Fall back to standard rate limit headers
|
|
if !found {
|
|
if v := headers.Get("x-ratelimit-limit"); v != "" {
|
|
info.RequestsLimit, _ = strconv.Atoi(v)
|
|
found = true
|
|
}
|
|
if v := headers.Get("x-ratelimit-remaining"); v != "" {
|
|
info.RequestsRemaining, _ = strconv.Atoi(v)
|
|
found = true
|
|
}
|
|
if v := headers.Get("x-ratelimit-reset"); v != "" {
|
|
info.RequestsReset = v
|
|
found = true
|
|
}
|
|
}
|
|
|
|
if !found {
|
|
return nil
|
|
}
|
|
return info
|
|
}
|
|
|
|
// SanitizeHeaders removes sensitive headers before logging/storage
|
|
func SanitizeHeaders(headers http.Header) http.Header {
|
|
sanitized := make(http.Header)
|
|
|
|
sensitiveHeaders := []string{
|
|
"x-api-key",
|
|
"api-key",
|
|
"authorization",
|
|
"anthropic-api-key",
|
|
"openai-api-key",
|
|
"bearer",
|
|
}
|
|
|
|
for key, values := range headers {
|
|
lowerKey := strings.ToLower(key)
|
|
isSensitive := false
|
|
|
|
for _, sensitive := range sensitiveHeaders {
|
|
if strings.Contains(lowerKey, sensitive) {
|
|
isSensitive = true
|
|
break
|
|
}
|
|
}
|
|
|
|
if isSensitive {
|
|
// Calculate SHA256 hash for each sensitive header value
|
|
hashedValues := make([]string, len(values))
|
|
for i, value := range values {
|
|
hash := sha256.Sum256([]byte(value))
|
|
hashedValues[i] = fmt.Sprintf("sha256:%x", hash)
|
|
}
|
|
sanitized[key] = hashedValues
|
|
} else {
|
|
sanitized[key] = values
|
|
}
|
|
}
|
|
|
|
return sanitized
|
|
}
|