claude-code-proxy/proxy/internal/service/storage_payload.go

204 lines
5.1 KiB
Go
Raw Normal View History

Local fork: hardening + ops improvements (timeout knob, demotion, /livez, drain) This commit captures both the prior accumulated work-in-progress (framework migration web/→svelte/, postgres storage, conversation viewer, dashboard auth, OpenAPI spec, integration tests) AND today's operational improvements layered on top. History wasn't checkpointed incrementally; happy to split it via interactive rebase if a reviewer wants smaller commits. Today's changes (in addition to the older WIP): 1. Configurable upstream response-header timeout - ANTHROPIC_RESPONSE_HEADER_TIMEOUT env (default 300s) - Replaces hardcoded 300s in provider/anthropic.go that was firing on opus + 1M-context + extended thinking non-streaming requests - Files: internal/config/config.go, internal/provider/anthropic.go 2. Structured forward-error diagnostic logging - When a forward to Anthropic fails, log a single key=value line with request_id, model, stream, body_bytes, has_thinking, anthropic_beta, query, elapsed, ctx_err — alongside the existing human-readable error line for back-compat - Files: internal/handler/handlers.go (logForwardFailure) 3. Full SSE protocol passthrough + Flusher fix - handler/handlers.go: forward all SSE lines verbatim (event:, id:, retry:, : comments, blank-line terminators), not only data:. Previous code produced malformed SSE for strict parsers. - middleware/logging.go: explicit Flush() method on responseWriter. Embedding http.ResponseWriter (interface) does not auto-promote Flush(), so every w.(http.Flusher) check in the streaming handler was returning ok=false and SSE writes buffered in net/http until the body closed. 4. Non-streaming → streaming demotion (feature-flagged) - ANTHROPIC_DEMOTE_NONSTREAMING env (default false) - When enabled and the routed provider is anthropic, force stream=true upstream for clients that asked for stream=false. Receive SSE, accumulate via accumulateSSEToMessage (handles text, tool_use with partial_json reassembly, thinking, signature, citations_delta, usage merge), and synthesize a single non-streaming JSON response. - Eliminates the ResponseHeaderTimeout class of failure entirely. - Body rewrite uses json.Decoder + UseNumber() to preserve integer precision in unknown nested fields (tool inputs from prior turns). - Files: internal/config/config.go, internal/handler/handlers.go, cmd/proxy/main.go, cmd/proxy/main_test.go 5. Live operational state: /livez gauge + graceful drain - New internal/runtime package: atomic in-flight counter + draining flag - New middleware/inflight.go: increments runtime gauge, applied to /v1/* subrouter so Messages, ChatCompletions, and ProxyPassthrough are all counted - /v1/* moved to a gorilla/mux subrouter so the InFlight middleware applies surgically; /health, /livez, /openapi.* remain on parent router (unauthenticated, uncounted) - Health handler returns 503 draining when runtime.IsDraining() is true, so Traefik stops routing to a slot before drain begins - New /livez handler returns {status, in_flight, draining, timestamp} - SIGTERM handler in main.go: SetDraining(true), poll for in_flight==0 with 32-min ceiling and 1s tick (logs every 10s), then srv.Shutdown - Auth bypass list extended with /livez - Files: internal/runtime/runtime.go (new), internal/middleware/inflight.go (new), internal/middleware/auth.go, internal/handler/handlers.go (Health, Livez, runtime import), cmd/proxy/main.go (subrouter, drain loop) 6. OpenAPI spec updates - Document Health 503 response and new DrainingResponse schema - Add /livez path with LivezResponse schema - Files: internal/handler/openapi.go Verified: go build ./... clean, go test ./... all pass, go vet clean. Three rounds of codex peer review across changes 1-5; all feedback addressed (citations_delta, json.Number precision, drain-loop logging via lastLog timestamp, PathPrefix tightened to "/v1/").
2026-05-02 15:15:58 -06:00
package service
import (
"encoding/json"
"log"
"strings"
"github.com/seifghazi/claude-code-monitor/internal/config"
"github.com/seifghazi/claude-code-monitor/internal/model"
)
const redactionPlaceholder = "[REDACTED]"
// maxStoredBodyBytes is the maximum serialized size of a request body stored in the DB.
// Bodies larger than this (e.g. 1M context payloads) are replaced with a metadata summary.
const maxStoredBodyBytes = 512 * 1024 // 512 KB
func prepareRequestBodyForStorage(cfg *config.StorageConfig, body interface{}) (interface{}, error) {
if shouldSuppressBodies(cfg) {
return storageBodyPlaceholder("metadata_only"), nil
}
if cfg != nil && !cfg.CaptureRequestBody {
return storageBodyPlaceholder("request_body_disabled"), nil
}
normalized, err := normalizeJSONValue(body)
if err != nil {
return nil, err
}
redacted := redactJSONValue(normalized, redactedFieldSet(redactedFields(cfg)))
// Check serialized size; if too large, store a lightweight summary instead
data, err := json.Marshal(redacted)
if err != nil {
return redacted, nil
}
if len(data) > maxStoredBodyBytes {
return truncatedBodySummary(redacted, len(data)), nil
}
return redacted, nil
}
// truncatedBodySummary extracts key metadata from an oversized request body
// so the DB row stays small while retaining useful diagnostic info.
func truncatedBodySummary(body interface{}, originalBytes int) map[string]interface{} {
summary := map[string]interface{}{
"_truncated": true,
"_original_bytes": originalBytes,
}
if m, ok := body.(map[string]interface{}); ok {
if v, ok := m["model"]; ok {
summary["model"] = v
}
if v, ok := m["stream"]; ok {
summary["stream"] = v
}
if v, ok := m["max_tokens"]; ok {
summary["max_tokens"] = v
}
if msgs, ok := m["messages"].([]interface{}); ok {
summary["message_count"] = len(msgs)
}
if sys, ok := m["system"].([]interface{}); ok {
summary["system_count"] = len(sys)
}
if tools, ok := m["tools"].([]interface{}); ok {
summary["tool_count"] = len(tools)
}
}
return summary
}
func prepareResponseForStorage(cfg *config.StorageConfig, logger *log.Logger, response *model.ResponseLog) (*model.ResponseLog, error) {
if response == nil {
return nil, nil
}
clone := *response
if shouldSuppressBodies(cfg) || (cfg != nil && !cfg.CaptureResponseBody) {
clone.Body = nil
clone.BodyText = ""
clone.StreamingChunks = nil
clone.ChunkTimings = nil
return &clone, nil
}
if len(clone.Body) > 0 {
sanitizedBody, err := sanitizeRawJSON(clone.Body, redactedFieldSet(redactedFields(cfg)))
if err != nil {
if logger != nil {
logger.Printf("Warning: failed to redact response body: %v", err)
}
} else {
clone.Body = sanitizedBody
}
// Cap stored response body size
if len(clone.Body) > maxStoredBodyBytes {
clone.Body = json.RawMessage(`{"_truncated":true}`)
}
}
// Cap stored streaming chunks to avoid huge DB rows on long streams
const maxStoredChunks = 500
if len(clone.StreamingChunks) > maxStoredChunks {
clone.StreamingChunks = clone.StreamingChunks[:maxStoredChunks]
}
if len(clone.ChunkTimings) > maxStoredChunks {
clone.ChunkTimings = clone.ChunkTimings[:maxStoredChunks]
}
return &clone, nil
}
func shouldSuppressBodies(cfg *config.StorageConfig) bool {
return cfg != nil && cfg.MetadataOnly
}
func redactedFields(cfg *config.StorageConfig) []string {
if cfg == nil {
return nil
}
return cfg.RedactedFields
}
func normalizeJSONValue(value interface{}) (interface{}, error) {
if value == nil {
return nil, nil
}
data, err := json.Marshal(value)
if err != nil {
return nil, err
}
var normalized interface{}
if err := json.Unmarshal(data, &normalized); err != nil {
return nil, err
}
return normalized, nil
}
func sanitizeRawJSON(raw json.RawMessage, redacted map[string]struct{}) (json.RawMessage, error) {
if len(raw) == 0 {
return raw, nil
}
var value interface{}
if err := json.Unmarshal(raw, &value); err != nil {
return raw, err
}
sanitized := redactJSONValue(value, redacted)
data, err := json.Marshal(sanitized)
if err != nil {
return raw, err
}
return json.RawMessage(data), nil
}
func redactJSONValue(value interface{}, redacted map[string]struct{}) interface{} {
switch typed := value.(type) {
case map[string]interface{}:
result := make(map[string]interface{}, len(typed))
for key, child := range typed {
if _, ok := redacted[strings.ToLower(key)]; ok {
result[key] = redactionPlaceholder
continue
}
result[key] = redactJSONValue(child, redacted)
}
return result
case []interface{}:
result := make([]interface{}, len(typed))
for i, child := range typed {
result[i] = redactJSONValue(child, redacted)
}
return result
default:
return value
}
}
func storageBodyPlaceholder(mode string) map[string]interface{} {
return map[string]interface{}{
"_storage_mode": mode,
}
}
func redactedFieldSet(fields []string) map[string]struct{} {
set := make(map[string]struct{}, len(fields))
for _, field := range fields {
field = strings.TrimSpace(strings.ToLower(field))
if field == "" {
continue
}
set[field] = struct{}{}
}
return set
}