claude-code-proxy/proxy/internal/service/storage_analytics.go

164 lines
3.8 KiB
Go
Raw Normal View History

Local fork: hardening + ops improvements (timeout knob, demotion, /livez, drain) This commit captures both the prior accumulated work-in-progress (framework migration web/→svelte/, postgres storage, conversation viewer, dashboard auth, OpenAPI spec, integration tests) AND today's operational improvements layered on top. History wasn't checkpointed incrementally; happy to split it via interactive rebase if a reviewer wants smaller commits. Today's changes (in addition to the older WIP): 1. Configurable upstream response-header timeout - ANTHROPIC_RESPONSE_HEADER_TIMEOUT env (default 300s) - Replaces hardcoded 300s in provider/anthropic.go that was firing on opus + 1M-context + extended thinking non-streaming requests - Files: internal/config/config.go, internal/provider/anthropic.go 2. Structured forward-error diagnostic logging - When a forward to Anthropic fails, log a single key=value line with request_id, model, stream, body_bytes, has_thinking, anthropic_beta, query, elapsed, ctx_err — alongside the existing human-readable error line for back-compat - Files: internal/handler/handlers.go (logForwardFailure) 3. Full SSE protocol passthrough + Flusher fix - handler/handlers.go: forward all SSE lines verbatim (event:, id:, retry:, : comments, blank-line terminators), not only data:. Previous code produced malformed SSE for strict parsers. - middleware/logging.go: explicit Flush() method on responseWriter. Embedding http.ResponseWriter (interface) does not auto-promote Flush(), so every w.(http.Flusher) check in the streaming handler was returning ok=false and SSE writes buffered in net/http until the body closed. 4. Non-streaming → streaming demotion (feature-flagged) - ANTHROPIC_DEMOTE_NONSTREAMING env (default false) - When enabled and the routed provider is anthropic, force stream=true upstream for clients that asked for stream=false. Receive SSE, accumulate via accumulateSSEToMessage (handles text, tool_use with partial_json reassembly, thinking, signature, citations_delta, usage merge), and synthesize a single non-streaming JSON response. - Eliminates the ResponseHeaderTimeout class of failure entirely. - Body rewrite uses json.Decoder + UseNumber() to preserve integer precision in unknown nested fields (tool inputs from prior turns). - Files: internal/config/config.go, internal/handler/handlers.go, cmd/proxy/main.go, cmd/proxy/main_test.go 5. Live operational state: /livez gauge + graceful drain - New internal/runtime package: atomic in-flight counter + draining flag - New middleware/inflight.go: increments runtime gauge, applied to /v1/* subrouter so Messages, ChatCompletions, and ProxyPassthrough are all counted - /v1/* moved to a gorilla/mux subrouter so the InFlight middleware applies surgically; /health, /livez, /openapi.* remain on parent router (unauthenticated, uncounted) - Health handler returns 503 draining when runtime.IsDraining() is true, so Traefik stops routing to a slot before drain begins - New /livez handler returns {status, in_flight, draining, timestamp} - SIGTERM handler in main.go: SetDraining(true), poll for in_flight==0 with 32-min ceiling and 1s tick (logs every 10s), then srv.Shutdown - Auth bypass list extended with /livez - Files: internal/runtime/runtime.go (new), internal/middleware/inflight.go (new), internal/middleware/auth.go, internal/handler/handlers.go (Health, Livez, runtime import), cmd/proxy/main.go (subrouter, drain loop) 6. OpenAPI spec updates - Document Health 503 response and new DrainingResponse schema - Add /livez path with LivezResponse schema - Files: internal/handler/openapi.go Verified: go build ./... clean, go test ./... all pass, go vet clean. Three rounds of codex peer review across changes 1-5; all feedback addressed (citations_delta, json.Number precision, drain-loop logging via lastLog timestamp, PathPrefix tightened to "/v1/").
2026-05-02 15:15:58 -06:00
package service
import (
"database/sql"
"encoding/json"
"github.com/seifghazi/claude-code-monitor/internal/model"
)
type responseBodySummary struct {
Usage *model.AnthropicUsage `json:"usage"`
StopReason string `json:"stop_reason"`
}
func decodeStoredResponse(responseJSON sql.NullString) (*model.ResponseLog, bool) {
if !responseJSON.Valid || responseJSON.String == "" {
return nil, false
}
var resp model.ResponseLog
if err := json.Unmarshal([]byte(responseJSON.String), &resp); err != nil {
return nil, false
}
return &resp, true
}
func decodeResponseBodySummary(body json.RawMessage) (*responseBodySummary, bool) {
if len(body) == 0 {
return nil, false
}
var summary responseBodySummary
if err := json.Unmarshal(body, &summary); err != nil {
return nil, false
}
return &summary, true
}
func totalTokensFromUsage(usage *model.AnthropicUsage) int64 {
if usage == nil {
return 0
}
return int64(
usage.InputTokens +
usage.OutputTokens +
usage.CacheReadInputTokens +
usage.CacheCreationInputTokens)
}
func totalTokensFromStoredResponse(responseJSON sql.NullString) int64 {
input, output, cache, ok := usageCountsFromStoredResponse(responseJSON)
if !ok {
return 0
}
return input + output + cache
}
func responseTimeFromStoredResponse(responseJSON sql.NullString) int64 {
resp, ok := decodeStoredResponse(responseJSON)
if !ok {
return 0
}
return resp.ResponseTime
}
func applyStoredResponseToSummary(summary *model.RequestSummary, responseJSON sql.NullString) {
resp, ok := decodeStoredResponse(responseJSON)
if !ok {
return
}
summary.StatusCode = resp.StatusCode
summary.ResponseTime = resp.ResponseTime
bodySummary, ok := decodeResponseBodySummary(resp.Body)
if !ok {
return
}
summary.Usage = bodySummary.Usage
summary.StopReason = bodySummary.StopReason
}
func usageCountsFromStoredResponse(responseJSON sql.NullString) (input, output, cache int64, ok bool) {
resp, ok := decodeStoredResponse(responseJSON)
if !ok {
return 0, 0, 0, false
}
bodySummary, ok := decodeResponseBodySummary(resp.Body)
if !ok || bodySummary.Usage == nil {
return 0, 0, 0, false
}
return int64(bodySummary.Usage.InputTokens),
int64(bodySummary.Usage.OutputTokens),
int64(bodySummary.Usage.CacheCreationInputTokens + bodySummary.Usage.CacheReadInputTokens),
true
}
func addDailyTokens(dailyMap map[string]*model.DailyTokens, date, modelName string, tokens int64) {
if daily, ok := dailyMap[date]; ok {
daily.Tokens += tokens
daily.Requests++
daily.Models = addDailyModelStat(daily.Models, modelName, tokens)
return
}
dailyMap[date] = &model.DailyTokens{
Date: date,
Tokens: tokens,
Requests: 1,
Models: addDailyModelStat(nil, modelName, tokens),
}
}
func addHourlyTokens(bucketMap map[string]*model.HourlyTokens, bucketKey, bucketLabel, modelName string, tokens int64) {
if bucket, ok := bucketMap[bucketKey]; ok {
bucket.Tokens += tokens
bucket.Requests++
bucket.Models = addDailyModelStat(bucket.Models, modelName, tokens)
return
}
bucketMap[bucketKey] = &model.HourlyTokens{
Hour: 0,
Label: bucketLabel,
Tokens: tokens,
Requests: 1,
Models: addDailyModelStat(nil, modelName, tokens),
}
}
func addModelTokens(modelMap map[string]*model.ModelTokens, modelName string, tokens int64) {
if modelStat, ok := modelMap[modelName]; ok {
modelStat.Tokens += tokens
modelStat.Requests++
return
}
modelMap[modelName] = &model.ModelTokens{
Model: modelName,
Tokens: tokens,
Requests: 1,
}
}
func addDailyModelStat(models map[string]model.DailyModelStat, modelName string, tokens int64) map[string]model.DailyModelStat {
if models == nil {
models = make(map[string]model.DailyModelStat)
}
modelStat := models[modelName]
modelStat.Tokens += tokens
modelStat.Requests++
models[modelName] = modelStat
return models
}