claude-code-proxy/proxy/internal/service/storage_payload_test.go

164 lines
4.5 KiB
Go
Raw Normal View History

Local fork: hardening + ops improvements (timeout knob, demotion, /livez, drain) This commit captures both the prior accumulated work-in-progress (framework migration web/→svelte/, postgres storage, conversation viewer, dashboard auth, OpenAPI spec, integration tests) AND today's operational improvements layered on top. History wasn't checkpointed incrementally; happy to split it via interactive rebase if a reviewer wants smaller commits. Today's changes (in addition to the older WIP): 1. Configurable upstream response-header timeout - ANTHROPIC_RESPONSE_HEADER_TIMEOUT env (default 300s) - Replaces hardcoded 300s in provider/anthropic.go that was firing on opus + 1M-context + extended thinking non-streaming requests - Files: internal/config/config.go, internal/provider/anthropic.go 2. Structured forward-error diagnostic logging - When a forward to Anthropic fails, log a single key=value line with request_id, model, stream, body_bytes, has_thinking, anthropic_beta, query, elapsed, ctx_err — alongside the existing human-readable error line for back-compat - Files: internal/handler/handlers.go (logForwardFailure) 3. Full SSE protocol passthrough + Flusher fix - handler/handlers.go: forward all SSE lines verbatim (event:, id:, retry:, : comments, blank-line terminators), not only data:. Previous code produced malformed SSE for strict parsers. - middleware/logging.go: explicit Flush() method on responseWriter. Embedding http.ResponseWriter (interface) does not auto-promote Flush(), so every w.(http.Flusher) check in the streaming handler was returning ok=false and SSE writes buffered in net/http until the body closed. 4. Non-streaming → streaming demotion (feature-flagged) - ANTHROPIC_DEMOTE_NONSTREAMING env (default false) - When enabled and the routed provider is anthropic, force stream=true upstream for clients that asked for stream=false. Receive SSE, accumulate via accumulateSSEToMessage (handles text, tool_use with partial_json reassembly, thinking, signature, citations_delta, usage merge), and synthesize a single non-streaming JSON response. - Eliminates the ResponseHeaderTimeout class of failure entirely. - Body rewrite uses json.Decoder + UseNumber() to preserve integer precision in unknown nested fields (tool inputs from prior turns). - Files: internal/config/config.go, internal/handler/handlers.go, cmd/proxy/main.go, cmd/proxy/main_test.go 5. Live operational state: /livez gauge + graceful drain - New internal/runtime package: atomic in-flight counter + draining flag - New middleware/inflight.go: increments runtime gauge, applied to /v1/* subrouter so Messages, ChatCompletions, and ProxyPassthrough are all counted - /v1/* moved to a gorilla/mux subrouter so the InFlight middleware applies surgically; /health, /livez, /openapi.* remain on parent router (unauthenticated, uncounted) - Health handler returns 503 draining when runtime.IsDraining() is true, so Traefik stops routing to a slot before drain begins - New /livez handler returns {status, in_flight, draining, timestamp} - SIGTERM handler in main.go: SetDraining(true), poll for in_flight==0 with 32-min ceiling and 1s tick (logs every 10s), then srv.Shutdown - Auth bypass list extended with /livez - Files: internal/runtime/runtime.go (new), internal/middleware/inflight.go (new), internal/middleware/auth.go, internal/handler/handlers.go (Health, Livez, runtime import), cmd/proxy/main.go (subrouter, drain loop) 6. OpenAPI spec updates - Document Health 503 response and new DrainingResponse schema - Add /livez path with LivezResponse schema - Files: internal/handler/openapi.go Verified: go build ./... clean, go test ./... all pass, go vet clean. Three rounds of codex peer review across changes 1-5; all feedback addressed (citations_delta, json.Number precision, drain-loop logging via lastLog timestamp, PathPrefix tightened to "/v1/").
2026-05-02 15:15:58 -06:00
package service
import (
"encoding/json"
"io"
"log"
"testing"
"github.com/seifghazi/claude-code-monitor/internal/config"
"github.com/seifghazi/claude-code-monitor/internal/model"
)
func TestPrepareRequestBodyForStorage(t *testing.T) {
t.Parallel()
tests := []struct {
name string
cfg *config.StorageConfig
body interface{}
assert func(t *testing.T, got interface{})
}{
{
name: "metadata only returns placeholder",
cfg: &config.StorageConfig{
MetadataOnly: true,
},
body: map[string]interface{}{"secret": "value"},
assert: func(t *testing.T, got interface{}) {
t.Helper()
body, ok := got.(map[string]interface{})
if !ok || body["_storage_mode"] != "metadata_only" {
t.Fatalf("expected metadata_only placeholder, got %#v", got)
}
},
},
{
name: "request capture disabled returns placeholder",
cfg: &config.StorageConfig{
CaptureRequestBody: false,
},
body: map[string]interface{}{"secret": "value"},
assert: func(t *testing.T, got interface{}) {
t.Helper()
body, ok := got.(map[string]interface{})
if !ok || body["_storage_mode"] != "request_body_disabled" {
t.Fatalf("expected request_body_disabled placeholder, got %#v", got)
}
},
},
{
name: "redacts nested fields",
cfg: &config.StorageConfig{
CaptureRequestBody: true,
RedactedFields: []string{"authorization", "password"},
},
body: map[string]interface{}{
"authorization": "top-secret",
"nested": map[string]interface{}{
"password": "hide-me",
"keep": "visible",
},
},
assert: func(t *testing.T, got interface{}) {
t.Helper()
body := got.(map[string]interface{})
if body["authorization"] != redactionPlaceholder {
t.Fatalf("expected top-level field redacted, got %#v", body["authorization"])
}
nested := body["nested"].(map[string]interface{})
if nested["password"] != redactionPlaceholder {
t.Fatalf("expected nested field redacted, got %#v", nested["password"])
}
if nested["keep"] != "visible" {
t.Fatalf("expected keep field preserved, got %#v", nested["keep"])
}
},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := prepareRequestBodyForStorage(tt.cfg, tt.body)
if err != nil {
t.Fatalf("prepareRequestBodyForStorage() error = %v", err)
}
tt.assert(t, got)
})
}
}
func TestPrepareResponseForStorage(t *testing.T) {
t.Parallel()
logger := log.New(io.Discard, "", 0)
t.Run("metadata only strips body fields", func(t *testing.T) {
t.Parallel()
got, err := prepareResponseForStorage(&config.StorageConfig{MetadataOnly: true}, logger, &model.ResponseLog{
Body: json.RawMessage(`{"secret":"value"}`),
BodyText: "raw",
StreamingChunks: []string{"chunk"},
ChunkTimings: []model.ChunkTiming{{Index: 0}},
})
if err != nil {
t.Fatalf("prepareResponseForStorage() error = %v", err)
}
if got == nil {
t.Fatal("expected response clone")
}
if got.Body != nil || got.BodyText != "" || got.StreamingChunks != nil || got.ChunkTimings != nil {
t.Fatalf("expected body fields stripped, got %#v", got)
}
})
t.Run("redacts json response bodies", func(t *testing.T) {
t.Parallel()
got, err := prepareResponseForStorage(&config.StorageConfig{
CaptureResponseBody: true,
RedactedFields: []string{"api_key"},
}, logger, &model.ResponseLog{
Body: json.RawMessage(`{"api_key":"secret","nested":{"keep":"ok"}}`),
})
if err != nil {
t.Fatalf("prepareResponseForStorage() error = %v", err)
}
var body map[string]interface{}
if err := json.Unmarshal(got.Body, &body); err != nil {
t.Fatalf("unmarshal redacted body: %v", err)
}
if body["api_key"] != redactionPlaceholder {
t.Fatalf("expected api_key redacted, got %#v", body["api_key"])
}
nested := body["nested"].(map[string]interface{})
if nested["keep"] != "ok" {
t.Fatalf("expected nested field preserved, got %#v", nested["keep"])
}
})
t.Run("preserves non json body bytes", func(t *testing.T) {
t.Parallel()
original := json.RawMessage(`not-json`)
got, err := prepareResponseForStorage(&config.StorageConfig{
CaptureResponseBody: true,
RedactedFields: []string{"token"},
}, logger, &model.ResponseLog{
Body: original,
})
if err != nil {
t.Fatalf("prepareResponseForStorage() error = %v", err)
}
if string(got.Body) != string(original) {
t.Fatalf("expected original non-json body preserved, got %q", string(got.Body))
}
})
}