package handler import ( "crypto/sha256" "fmt" "net/http" "strconv" "strings" "github.com/seifghazi/claude-code-monitor/internal/model" ) var hopByHopHeaders = map[string]bool{ "connection": true, "keep-alive": true, "proxy-authenticate": true, "proxy-authorization": true, "te": true, "trailers": true, "transfer-encoding": true, "upgrade": true, "content-encoding": true, // We handle decompression ourselves "content-length": true, // May change after decompression } // ApplyHeaderRules applies block/set/replace rules to an http.Header in-place. func ApplyHeaderRules(headers http.Header, rules []model.HeaderRule) { for _, rule := range rules { if !rule.Enabled { continue } key := http.CanonicalHeaderKey(rule.Header) switch rule.Action { case "block": headers.Del(key) case "set": headers.Set(key, rule.Value) case "replace": if rule.Find == "" { continue } for i, v := range headers.Values(key) { if strings.Contains(v, rule.Find) { replaced := strings.ReplaceAll(v, rule.Find, rule.Value) if i == 0 { headers.Set(key, replaced) } else { headers.Add(key, replaced) } } } } } } // CopyAllResponseHeaders forwards all upstream response headers to the client, // stripping only hop-by-hop headers that must not be forwarded by a proxy. func CopyAllResponseHeaders(w http.ResponseWriter, resp *http.Response) { for key, values := range resp.Header { if hopByHopHeaders[strings.ToLower(key)] { continue } for _, value := range values { w.Header().Add(key, value) } } } // SanitizeResponseHeaders strips hop-by-hop proxy headers before applying the // generic sensitive-header sanitization used for stored metadata. func SanitizeResponseHeaders(headers http.Header) http.Header { filtered := make(http.Header) for key, values := range headers { if hopByHopHeaders[strings.ToLower(key)] { continue } copiedValues := append([]string(nil), values...) filtered[key] = copiedValues } return SanitizeHeaders(filtered) } // ExtractRateLimitInfo parses rate limit headers from the upstream response func ExtractRateLimitInfo(headers http.Header) *model.RateLimitInfo { info := &model.RateLimitInfo{} found := false // Organization ID if v := headers.Get("anthropic-organization-id"); v != "" { info.OrganizationID = v found = true } // Unified quota system (current Anthropic model) if v := headers.Get("anthropic-ratelimit-unified-status"); v != "" { info.UnifiedStatus = v found = true } if v := headers.Get("anthropic-ratelimit-unified-5h-utilization"); v != "" { info.UnifiedUtilization5h, _ = strconv.ParseFloat(v, 64) found = true } if v := headers.Get("anthropic-ratelimit-unified-5h-reset"); v != "" { info.UnifiedReset5h = v found = true } if v := headers.Get("anthropic-ratelimit-unified-7d-utilization"); v != "" { info.UnifiedUtilization7d, _ = strconv.ParseFloat(v, 64) found = true } if v := headers.Get("anthropic-ratelimit-unified-7d-reset"); v != "" { info.UnifiedReset7d = v found = true } if v := headers.Get("anthropic-ratelimit-unified-fallback-percentage"); v != "" { info.UnifiedFallbackPercentage, _ = strconv.ParseFloat(v, 64) found = true } if v := headers.Get("anthropic-ratelimit-unified-overage-status"); v != "" { info.UnifiedOverageStatus = v found = true } if v := headers.Get("anthropic-ratelimit-unified-representative-claim"); v != "" { info.UnifiedRepresentativeClaim = v found = true } // Legacy per-resource rate limits if v := headers.Get("anthropic-ratelimit-requests-limit"); v != "" { info.RequestsLimit, _ = strconv.Atoi(v) found = true } if v := headers.Get("anthropic-ratelimit-requests-remaining"); v != "" { info.RequestsRemaining, _ = strconv.Atoi(v) found = true } if v := headers.Get("anthropic-ratelimit-requests-reset"); v != "" { info.RequestsReset = v found = true } if v := headers.Get("anthropic-ratelimit-tokens-limit"); v != "" { info.TokensLimit, _ = strconv.Atoi(v) found = true } if v := headers.Get("anthropic-ratelimit-tokens-remaining"); v != "" { info.TokensRemaining, _ = strconv.Atoi(v) found = true } if v := headers.Get("anthropic-ratelimit-tokens-reset"); v != "" { info.TokensReset = v found = true } // Fall back to standard rate limit headers if !found { if v := headers.Get("x-ratelimit-limit"); v != "" { info.RequestsLimit, _ = strconv.Atoi(v) found = true } if v := headers.Get("x-ratelimit-remaining"); v != "" { info.RequestsRemaining, _ = strconv.Atoi(v) found = true } if v := headers.Get("x-ratelimit-reset"); v != "" { info.RequestsReset = v found = true } } if !found { return nil } return info } // SanitizeHeaders removes sensitive headers before logging/storage func SanitizeHeaders(headers http.Header) http.Header { sanitized := make(http.Header) sensitiveHeaders := []string{ "x-api-key", "api-key", "authorization", "anthropic-api-key", "openai-api-key", "bearer", } for key, values := range headers { lowerKey := strings.ToLower(key) isSensitive := false for _, sensitive := range sensitiveHeaders { if strings.Contains(lowerKey, sensitive) { isSensitive = true break } } if isSensitive { // Calculate SHA256 hash for each sensitive header value hashedValues := make([]string, len(values)) for i, value := range values { hash := sha256.Sum256([]byte(value)) hashedValues[i] = fmt.Sprintf("sha256:%x", hash) } sanitized[key] = hashedValues } else { sanitized[key] = values } } return sanitized }