claude-code-proxy/proxy/internal/service/storage_contract_test.go
sid 8e550b9785 Local fork: hardening + ops improvements (timeout knob, demotion, /livez, drain)
This commit captures both the prior accumulated work-in-progress
(framework migration web/→svelte/, postgres storage, conversation
viewer, dashboard auth, OpenAPI spec, integration tests) AND today's
operational improvements layered on top. History wasn't checkpointed
incrementally; happy to split it via interactive rebase if a reviewer
wants smaller commits.

Today's changes (in addition to the older WIP):

1. Configurable upstream response-header timeout
   - ANTHROPIC_RESPONSE_HEADER_TIMEOUT env (default 300s)
   - Replaces hardcoded 300s in provider/anthropic.go that was firing
     on opus + 1M-context + extended thinking non-streaming requests
   - Files: internal/config/config.go, internal/provider/anthropic.go

2. Structured forward-error diagnostic logging
   - When a forward to Anthropic fails, log a single key=value line
     with request_id, model, stream, body_bytes, has_thinking,
     anthropic_beta, query, elapsed, ctx_err — alongside the existing
     human-readable error line for back-compat
   - Files: internal/handler/handlers.go (logForwardFailure)

3. Full SSE protocol passthrough + Flusher fix
   - handler/handlers.go: forward all SSE lines verbatim (event:, id:,
     retry:, : comments, blank-line terminators), not only data:.
     Previous code produced malformed SSE for strict parsers.
   - middleware/logging.go: explicit Flush() method on responseWriter.
     Embedding http.ResponseWriter (interface) does not auto-promote
     Flush(), so every w.(http.Flusher) check in the streaming
     handler was returning ok=false and SSE writes buffered in net/http
     until the body closed.

4. Non-streaming → streaming demotion (feature-flagged)
   - ANTHROPIC_DEMOTE_NONSTREAMING env (default false)
   - When enabled and the routed provider is anthropic, force stream=true
     upstream for clients that asked for stream=false. Receive SSE,
     accumulate via accumulateSSEToMessage (handles text, tool_use with
     partial_json reassembly, thinking, signature, citations_delta,
     usage merge), and synthesize a single non-streaming JSON response.
   - Eliminates the ResponseHeaderTimeout class of failure entirely.
   - Body rewrite uses json.Decoder + UseNumber() to preserve integer
     precision in unknown nested fields (tool inputs from prior turns).
   - Files: internal/config/config.go, internal/handler/handlers.go,
     cmd/proxy/main.go, cmd/proxy/main_test.go

5. Live operational state: /livez gauge + graceful drain
   - New internal/runtime package: atomic in-flight counter + draining flag
   - New middleware/inflight.go: increments runtime gauge, applied to
     /v1/* subrouter so Messages, ChatCompletions, and ProxyPassthrough
     are all counted
   - /v1/* moved to a gorilla/mux subrouter so the InFlight middleware
     applies surgically; /health, /livez, /openapi.* remain on parent
     router (unauthenticated, uncounted)
   - Health handler returns 503 draining when runtime.IsDraining() is
     true, so Traefik stops routing to a slot before drain begins
   - New /livez handler returns {status, in_flight, draining, timestamp}
   - SIGTERM handler in main.go: SetDraining(true), poll for in_flight==0
     with 32-min ceiling and 1s tick (logs every 10s), then srv.Shutdown
   - Auth bypass list extended with /livez
   - Files: internal/runtime/runtime.go (new),
     internal/middleware/inflight.go (new),
     internal/middleware/auth.go,
     internal/handler/handlers.go (Health, Livez, runtime import),
     cmd/proxy/main.go (subrouter, drain loop)

6. OpenAPI spec updates
   - Document Health 503 response and new DrainingResponse schema
   - Add /livez path with LivezResponse schema
   - Files: internal/handler/openapi.go

Verified: go build ./... clean, go test ./... all pass, go vet clean.
Three rounds of codex peer review across changes 1-5; all feedback
addressed (citations_delta, json.Number precision, drain-loop logging
via lastLog timestamp, PathPrefix tightened to "/v1/").
2026-05-02 15:15:58 -06:00

217 lines
6.8 KiB
Go

package service
import (
"encoding/json"
"path/filepath"
"testing"
"time"
"github.com/seifghazi/claude-code-monitor/internal/config"
"github.com/seifghazi/claude-code-monitor/internal/model"
)
type storageFactory struct {
name string
new func(t *testing.T, cfg config.StorageConfig) StorageService
}
func runStorageContractTests(t *testing.T, factory storageFactory) {
t.Helper()
t.Run("save and fetch by short id", func(t *testing.T) {
storage := factory.new(t, config.StorageConfig{
DBPath: filepath.Join(t.TempDir(), factory.name+".db"),
})
req := newContractRequest("fetch-123")
mustSaveRequest(t, storage, req)
got := mustGetByShortID(t, storage, "123")
if got.RequestID != req.RequestID {
t.Fatalf("expected request id %q, got %q", req.RequestID, got.RequestID)
}
if got.Method != req.Method || got.Endpoint != req.Endpoint || got.Model != req.Model {
t.Fatalf("unexpected fetched request: %#v", got)
}
})
t.Run("update response persists status and usage metadata", func(t *testing.T) {
storage := factory.new(t, config.StorageConfig{
DBPath: filepath.Join(t.TempDir(), factory.name+".db"),
})
req := newContractRequest("response-123")
mustSaveRequest(t, storage, req)
req.Response = newContractResponse()
if err := storage.UpdateRequestWithResponse(req); err != nil {
t.Fatalf("UpdateRequestWithResponse() error = %v", err)
}
got := mustGetByShortID(t, storage, "123")
if got.Response == nil || got.Response.StatusCode != 200 {
t.Fatalf("expected stored response, got %#v", got.Response)
}
})
t.Run("redaction survives round trip", func(t *testing.T) {
storage := factory.new(t, config.StorageConfig{
DBPath: filepath.Join(t.TempDir(), factory.name+".db"),
CaptureRequestBody: true,
CaptureResponseBody: true,
RedactedFields: []string{"api_key", "secret"},
})
req := newContractRequest("redact-123")
req.Body = map[string]interface{}{
"api_key": "abc123",
"nested": map[string]interface{}{
"secret": "top-secret",
"keep": "ok",
},
}
req.Response = &model.ResponseLog{
StatusCode: httpStatusOK,
Headers: map[string][]string{"Content-Type": {"application/json"}},
Body: json.RawMessage(`{"secret":"response-secret","visible":"yes"}`),
ResponseTime: 12,
CompletedAt: time.Now().UTC().Format(time.RFC3339),
}
mustSaveRequest(t, storage, req)
if err := storage.UpdateRequestWithResponse(req); err != nil {
t.Fatalf("UpdateRequestWithResponse() error = %v", err)
}
got := mustGetByShortID(t, storage, "123")
body := got.Body.(map[string]interface{})
if body["api_key"] != redactionPlaceholder {
t.Fatalf("expected api_key redacted, got %#v", body["api_key"])
}
nested := body["nested"].(map[string]interface{})
if nested["secret"] != redactionPlaceholder || nested["keep"] != "ok" {
t.Fatalf("unexpected nested redaction result: %#v", nested)
}
})
t.Run("body suppression semantics", func(t *testing.T) {
storage := factory.new(t, config.StorageConfig{
DBPath: filepath.Join(t.TempDir(), factory.name+".db"),
CaptureRequestBody: false,
CaptureResponseBody: false,
})
req := newContractRequest("suppress-123")
req.Body = map[string]interface{}{"message": "do not store me"}
req.Response = &model.ResponseLog{
StatusCode: httpStatusOK,
Headers: map[string][]string{"Content-Type": {"application/json"}},
Body: json.RawMessage(`{"answer":"do not store me"}`),
BodyText: "sensitive text",
StreamingChunks: []string{"data: chunk-1"},
ResponseTime: 10,
CompletedAt: time.Now().UTC().Format(time.RFC3339),
}
mustSaveRequest(t, storage, req)
if err := storage.UpdateRequestWithResponse(req); err != nil {
t.Fatalf("UpdateRequestWithResponse() error = %v", err)
}
got := mustGetByShortID(t, storage, "123")
body := got.Body.(map[string]interface{})
if body["_storage_mode"] != "request_body_disabled" {
t.Fatalf("expected request body placeholder, got %#v", body)
}
if got.Response == nil || len(got.Response.Body) != 0 || got.Response.BodyText != "" || len(got.Response.StreamingChunks) != 0 {
t.Fatalf("expected suppressed response body fields, got %#v", got.Response)
}
})
t.Run("retention cleanup on write", func(t *testing.T) {
storage := factory.new(t, config.StorageConfig{
DBPath: filepath.Join(t.TempDir(), factory.name+".db"),
RetentionDays: 1,
})
oldReq := newContractRequest("old-123")
oldReq.Timestamp = time.Now().Add(-48 * time.Hour).UTC().Format(time.RFC3339)
mustSaveRequest(t, storage, oldReq)
recentReq := newContractRequest("recent-123")
mustSaveRequest(t, storage, recentReq)
got, err := storage.GetAllRequests("all")
if err != nil {
t.Fatalf("GetAllRequests() error = %v", err)
}
if len(got) != 1 || got[0].RequestID != "recent-123" {
t.Fatalf("expected only recent request to remain, got %#v", got)
}
})
t.Run("clear requests removes all rows", func(t *testing.T) {
storage := factory.new(t, config.StorageConfig{
DBPath: filepath.Join(t.TempDir(), factory.name+".db"),
})
mustSaveRequest(t, storage, newContractRequest("clear-123"))
mustSaveRequest(t, storage, newContractRequest("clear-456"))
deleted, err := storage.ClearRequests()
if err != nil {
t.Fatalf("ClearRequests() error = %v", err)
}
if deleted != 2 {
t.Fatalf("expected 2 deleted rows, got %d", deleted)
}
got, err := storage.GetAllRequests("all")
if err != nil {
t.Fatalf("GetAllRequests() error = %v", err)
}
if len(got) != 0 {
t.Fatalf("expected no remaining requests, got %d", len(got))
}
})
}
func newContractRequest(id string) *model.RequestLog {
return &model.RequestLog{
RequestID: id,
Timestamp: time.Now().UTC().Format(time.RFC3339),
Method: "POST",
Endpoint: "/v1/messages",
Headers: map[string][]string{"Content-Type": {"application/json"}},
Body: map[string]interface{}{"message": "hello"},
Model: "claude-3-5-sonnet",
UserAgent: "test",
ContentType: "application/json",
}
}
func newContractResponse() *model.ResponseLog {
return &model.ResponseLog{
StatusCode: httpStatusOK,
Headers: map[string][]string{"Content-Type": {"application/json"}},
Body: json.RawMessage(`{"usage":{"input_tokens":11,"output_tokens":22},"stop_reason":"end_turn"}`),
ResponseTime: 17,
CompletedAt: time.Now().UTC().Format(time.RFC3339),
}
}
func mustSaveRequest(t *testing.T, storage StorageService, req *model.RequestLog) {
t.Helper()
if _, err := storage.SaveRequest(req); err != nil {
t.Fatalf("SaveRequest() error = %v", err)
}
}
func mustGetByShortID(t *testing.T, storage StorageService, shortID string) *model.RequestLog {
t.Helper()
got, _, err := storage.GetRequestByShortID(shortID)
if err != nil {
t.Fatalf("GetRequestByShortID(%q) error = %v", shortID, err)
}
return got
}