claude-code-proxy/proxy/internal/service/storage_postgres.go
sid 8e550b9785 Local fork: hardening + ops improvements (timeout knob, demotion, /livez, drain)
This commit captures both the prior accumulated work-in-progress
(framework migration web/→svelte/, postgres storage, conversation
viewer, dashboard auth, OpenAPI spec, integration tests) AND today's
operational improvements layered on top. History wasn't checkpointed
incrementally; happy to split it via interactive rebase if a reviewer
wants smaller commits.

Today's changes (in addition to the older WIP):

1. Configurable upstream response-header timeout
   - ANTHROPIC_RESPONSE_HEADER_TIMEOUT env (default 300s)
   - Replaces hardcoded 300s in provider/anthropic.go that was firing
     on opus + 1M-context + extended thinking non-streaming requests
   - Files: internal/config/config.go, internal/provider/anthropic.go

2. Structured forward-error diagnostic logging
   - When a forward to Anthropic fails, log a single key=value line
     with request_id, model, stream, body_bytes, has_thinking,
     anthropic_beta, query, elapsed, ctx_err — alongside the existing
     human-readable error line for back-compat
   - Files: internal/handler/handlers.go (logForwardFailure)

3. Full SSE protocol passthrough + Flusher fix
   - handler/handlers.go: forward all SSE lines verbatim (event:, id:,
     retry:, : comments, blank-line terminators), not only data:.
     Previous code produced malformed SSE for strict parsers.
   - middleware/logging.go: explicit Flush() method on responseWriter.
     Embedding http.ResponseWriter (interface) does not auto-promote
     Flush(), so every w.(http.Flusher) check in the streaming
     handler was returning ok=false and SSE writes buffered in net/http
     until the body closed.

4. Non-streaming → streaming demotion (feature-flagged)
   - ANTHROPIC_DEMOTE_NONSTREAMING env (default false)
   - When enabled and the routed provider is anthropic, force stream=true
     upstream for clients that asked for stream=false. Receive SSE,
     accumulate via accumulateSSEToMessage (handles text, tool_use with
     partial_json reassembly, thinking, signature, citations_delta,
     usage merge), and synthesize a single non-streaming JSON response.
   - Eliminates the ResponseHeaderTimeout class of failure entirely.
   - Body rewrite uses json.Decoder + UseNumber() to preserve integer
     precision in unknown nested fields (tool inputs from prior turns).
   - Files: internal/config/config.go, internal/handler/handlers.go,
     cmd/proxy/main.go, cmd/proxy/main_test.go

5. Live operational state: /livez gauge + graceful drain
   - New internal/runtime package: atomic in-flight counter + draining flag
   - New middleware/inflight.go: increments runtime gauge, applied to
     /v1/* subrouter so Messages, ChatCompletions, and ProxyPassthrough
     are all counted
   - /v1/* moved to a gorilla/mux subrouter so the InFlight middleware
     applies surgically; /health, /livez, /openapi.* remain on parent
     router (unauthenticated, uncounted)
   - Health handler returns 503 draining when runtime.IsDraining() is
     true, so Traefik stops routing to a slot before drain begins
   - New /livez handler returns {status, in_flight, draining, timestamp}
   - SIGTERM handler in main.go: SetDraining(true), poll for in_flight==0
     with 32-min ceiling and 1s tick (logs every 10s), then srv.Shutdown
   - Auth bypass list extended with /livez
   - Files: internal/runtime/runtime.go (new),
     internal/middleware/inflight.go (new),
     internal/middleware/auth.go,
     internal/handler/handlers.go (Health, Livez, runtime import),
     cmd/proxy/main.go (subrouter, drain loop)

6. OpenAPI spec updates
   - Document Health 503 response and new DrainingResponse schema
   - Add /livez path with LivezResponse schema
   - Files: internal/handler/openapi.go

Verified: go build ./... clean, go test ./... all pass, go vet clean.
Three rounds of codex peer review across changes 1-5; all feedback
addressed (citations_delta, json.Number precision, drain-loop logging
via lastLog timestamp, PathPrefix tightened to "/v1/").
2026-05-02 15:15:58 -06:00

1074 lines
29 KiB
Go

package service
import (
"database/sql"
"encoding/json"
"fmt"
"log"
"sort"
"strings"
"time"
_ "github.com/lib/pq"
"github.com/seifghazi/claude-code-monitor/internal/config"
"github.com/seifghazi/claude-code-monitor/internal/model"
)
type postgresStorageService struct {
db *sql.DB
config *config.StorageConfig
logger *log.Logger
// Prepared statements for frequently used queries
stmtInsertRequest *sql.Stmt
stmtUpdateResponse *sql.Stmt
stmtUpdateGrading *sql.Stmt
stmtGetRequestByID *sql.Stmt
stmtGetRequestsPage *sql.Stmt
stmtGetRequestsCount *sql.Stmt
stmtDeleteOldRequests *sql.Stmt
}
func NewPostgresStorageService(cfg *config.StorageConfig) (StorageService, error) {
return NewPostgresStorageServiceWithLogger(cfg, log.Default())
}
func NewPostgresStorageServiceWithLogger(cfg *config.StorageConfig, logger *log.Logger) (StorageService, error) {
db, err := sql.Open("postgres", cfg.DatabaseURL)
if err != nil {
return nil, fmt.Errorf("failed to open postgres database: %w", err)
}
// Configure connection pool — PostgreSQL handles concurrency well
db.SetMaxOpenConns(25)
db.SetMaxIdleConns(5)
db.SetConnMaxLifetime(5 * time.Minute)
// Verify connection
if err := db.Ping(); err != nil {
db.Close()
return nil, fmt.Errorf("failed to ping postgres database: %w", err)
}
service := &postgresStorageService{
db: db,
config: cfg,
logger: logger,
}
if err := service.createTables(); err != nil {
db.Close()
return nil, fmt.Errorf("failed to create tables: %w", err)
}
if err := service.prepareStatements(); err != nil {
db.Close()
return nil, fmt.Errorf("failed to prepare statements: %w", err)
}
if err := service.cleanupExpiredRequests(); err != nil {
logger.Printf("Warning: failed to apply retention policy during startup: %v", err)
}
return service, nil
}
func (s *postgresStorageService) createTables() error {
schema := `
CREATE TABLE IF NOT EXISTS requests (
id TEXT PRIMARY KEY,
timestamp TIMESTAMPTZ NOT NULL,
method TEXT NOT NULL,
endpoint TEXT NOT NULL,
headers TEXT NOT NULL,
body TEXT NOT NULL,
user_agent TEXT,
content_type TEXT,
prompt_grade TEXT,
response TEXT,
model TEXT,
original_model TEXT,
routed_model TEXT
);
CREATE INDEX IF NOT EXISTS idx_requests_timestamp ON requests(timestamp DESC);
CREATE INDEX IF NOT EXISTS idx_requests_model ON requests(model);
CREATE INDEX IF NOT EXISTS idx_requests_endpoint ON requests(endpoint);
`
_, err := s.db.Exec(schema)
if err != nil {
return err
}
return runMigrations(s.db, []string{
"ALTER TABLE requests ADD COLUMN IF NOT EXISTS conversation_hash TEXT",
"ALTER TABLE requests ADD COLUMN IF NOT EXISTS message_count INTEGER DEFAULT 0",
"CREATE INDEX IF NOT EXISTS idx_requests_conversation_hash ON requests(conversation_hash)",
"ALTER TABLE requests ADD COLUMN IF NOT EXISTS organization_id TEXT",
"CREATE INDEX IF NOT EXISTS idx_requests_organization_id ON requests(organization_id)",
`CREATE TABLE IF NOT EXISTS settings (key TEXT PRIMARY KEY, value TEXT NOT NULL)`,
}, nil)
}
func (s *postgresStorageService) prepareStatements() error {
var err error
s.stmtInsertRequest, err = s.db.Prepare(`
INSERT INTO requests (id, timestamp, method, endpoint, headers, body, user_agent, content_type, model, original_model, routed_model, conversation_hash, message_count)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
`)
if err != nil {
return fmt.Errorf("failed to prepare insert statement: %w", err)
}
s.stmtUpdateResponse, err = s.db.Prepare(`
UPDATE requests SET response = $1, organization_id = COALESCE(NULLIF($3, ''), organization_id) WHERE id = $2
`)
if err != nil {
return fmt.Errorf("failed to prepare update response statement: %w", err)
}
s.stmtUpdateGrading, err = s.db.Prepare(`
UPDATE requests SET prompt_grade = $1 WHERE id = $2
`)
if err != nil {
return fmt.Errorf("failed to prepare update grading statement: %w", err)
}
s.stmtGetRequestByID, err = s.db.Prepare(`
SELECT id, timestamp, method, endpoint, headers, body, model, user_agent, content_type, prompt_grade, response, original_model, routed_model
FROM requests
WHERE id = $1
`)
if err != nil {
return fmt.Errorf("failed to prepare get by ID statement: %w", err)
}
s.stmtGetRequestsPage, err = s.db.Prepare(`
SELECT id, timestamp, method, endpoint, headers, body, model, user_agent, content_type, prompt_grade, response, original_model, routed_model
FROM requests
ORDER BY timestamp DESC
LIMIT $1 OFFSET $2
`)
if err != nil {
return fmt.Errorf("failed to prepare get requests page statement: %w", err)
}
s.stmtGetRequestsCount, err = s.db.Prepare(`
SELECT COUNT(*) FROM requests
`)
if err != nil {
return fmt.Errorf("failed to prepare count statement: %w", err)
}
s.stmtDeleteOldRequests, err = s.db.Prepare(`
DELETE FROM requests WHERE timestamp < $1
`)
if err != nil {
return fmt.Errorf("failed to prepare delete old requests statement: %w", err)
}
return nil
}
func (s *postgresStorageService) SaveRequest(request *model.RequestLog) (string, error) {
headersJSON, err := json.Marshal(request.Headers)
if err != nil {
return "", fmt.Errorf("failed to marshal headers: %w", err)
}
bodyForStorage, err := prepareRequestBodyForStorage(s.config, request.Body)
if err != nil {
return "", fmt.Errorf("failed to prepare body for storage: %w", err)
}
bodyJSON, err := json.Marshal(bodyForStorage)
if err != nil {
return "", fmt.Errorf("failed to marshal body: %w", err)
}
_, err = s.stmtInsertRequest.Exec(
request.RequestID,
request.Timestamp,
request.Method,
request.Endpoint,
string(headersJSON),
string(bodyJSON),
request.UserAgent,
request.ContentType,
request.Model,
request.OriginalModel,
request.RoutedModel,
request.ConversationHash,
request.MessageCount,
)
if err != nil {
return "", fmt.Errorf("failed to insert request: %w", err)
}
if err := s.cleanupExpiredRequests(); err != nil {
s.logger.Printf("Warning: failed to apply retention policy: %v", err)
}
return request.RequestID, nil
}
func (s *postgresStorageService) GetRequests(page, limit int, modelFilter string) ([]model.RequestLog, int, error) {
whereClause := ""
countArgs := []interface{}{}
queryArgs := []interface{}{}
argIdx := 1
if filterValue, ok := modelFilterPattern(modelFilter, escapePostgresLikePattern); ok {
whereClause = fmt.Sprintf(" WHERE LOWER(model) LIKE $%d", argIdx)
countArgs = append(countArgs, filterValue)
queryArgs = append(queryArgs, filterValue)
argIdx++
}
// Get total count
var total int
countQuery := "SELECT COUNT(*) FROM requests" + whereClause
err := s.db.QueryRow(countQuery, countArgs...).Scan(&total)
if err != nil {
return nil, 0, fmt.Errorf("failed to get total count: %w", err)
}
// Get paginated results
offset := (page - 1) * limit
query := fmt.Sprintf(`
SELECT id, timestamp, method, endpoint, headers, body, model, user_agent, content_type, prompt_grade, response, original_model, routed_model
FROM requests%s
ORDER BY timestamp DESC
LIMIT $%d OFFSET $%d
`, whereClause, argIdx, argIdx+1)
queryArgs = append(queryArgs, limit, offset)
rows, err := s.db.Query(query, queryArgs...)
if err != nil {
return nil, 0, fmt.Errorf("failed to query requests: %w", err)
}
defer rows.Close()
requests, err := s.scanRequestRows(rows)
if err != nil {
return nil, 0, err
}
return requests, total, nil
}
func (s *postgresStorageService) ClearRequests() (int, error) {
result, err := s.db.Exec("DELETE FROM requests")
if err != nil {
return 0, fmt.Errorf("failed to clear requests: %w", err)
}
rowsAffected, err := result.RowsAffected()
if err != nil {
return 0, fmt.Errorf("failed to get rows affected: %w", err)
}
return int(rowsAffected), nil
}
func (s *postgresStorageService) UpdateRequestWithGrading(requestID string, grade *model.PromptGrade) error {
gradeJSON, err := json.Marshal(grade)
if err != nil {
return fmt.Errorf("failed to marshal grade: %w", err)
}
result, err := s.stmtUpdateGrading.Exec(string(gradeJSON), requestID)
if err != nil {
return fmt.Errorf("failed to update request with grading: %w", err)
}
rowsAffected, _ := result.RowsAffected()
if rowsAffected == 0 {
return fmt.Errorf("request %s not found", requestID)
}
if err := s.cleanupExpiredRequests(); err != nil {
s.logger.Printf("Warning: failed to apply retention policy: %v", err)
}
return nil
}
func (s *postgresStorageService) UpdateRequestWithResponse(request *model.RequestLog) error {
responseForStorage, err := prepareResponseForStorage(s.config, s.logger, request.Response)
if err != nil {
return fmt.Errorf("failed to prepare response for storage: %w", err)
}
responseJSON, err := json.Marshal(responseForStorage)
if err != nil {
return fmt.Errorf("failed to marshal response: %w", err)
}
orgID := request.OrganizationID
result, err := s.stmtUpdateResponse.Exec(string(responseJSON), request.RequestID, orgID)
if err != nil {
return fmt.Errorf("failed to update request with response: %w", err)
}
rowsAffected, _ := result.RowsAffected()
if rowsAffected == 0 {
return fmt.Errorf("request %s not found", request.RequestID)
}
return nil
}
func (s *postgresStorageService) EnsureDirectoryExists() error {
return nil
}
func (s *postgresStorageService) GetRequestByShortID(shortID string) (*model.RequestLog, string, error) {
escapedID := escapePostgresLikePattern(shortID)
query := `
SELECT id, timestamp, method, endpoint, headers, body, model, user_agent, content_type, prompt_grade, response, original_model, routed_model
FROM requests
WHERE id LIKE $1
ORDER BY timestamp DESC
LIMIT 1
`
var req model.RequestLog
var headersJSON, bodyJSON string
var promptGradeJSON, responseJSON sql.NullString
var timestamp time.Time
err := s.db.QueryRow(query, "%"+escapedID).Scan(
&req.RequestID,
&timestamp,
&req.Method,
&req.Endpoint,
&headersJSON,
&bodyJSON,
&req.Model,
&req.UserAgent,
&req.ContentType,
&promptGradeJSON,
&responseJSON,
&req.OriginalModel,
&req.RoutedModel,
)
if err == sql.ErrNoRows {
return nil, "", fmt.Errorf("request with ID %s not found", shortID)
}
if err != nil {
return nil, "", fmt.Errorf("failed to query request: %w", err)
}
req.Timestamp = timestamp.Format(time.RFC3339)
if err := unmarshalStoredRequestFields(s.logger, &req, headersJSON, bodyJSON, promptGradeJSON, responseJSON); err != nil {
return nil, "", err
}
return &req, req.RequestID, nil
}
func (s *postgresStorageService) GetConfig() *config.StorageConfig {
return s.config
}
func (s *postgresStorageService) GetAllRequests(modelFilter string) ([]*model.RequestLog, error) {
return s.getAllRequestsWithLimit(modelFilter, 0)
}
func (s *postgresStorageService) getAllRequestsWithLimit(modelFilter string, limit int) ([]*model.RequestLog, error) {
var query string
args := []interface{}{}
argIdx := 1
if filterValue, ok := modelFilterPattern(modelFilter, escapePostgresLikePattern); ok {
query = fmt.Sprintf(`
SELECT id, timestamp, method, endpoint, headers, body, model, user_agent, content_type, prompt_grade, response, original_model, routed_model
FROM requests
WHERE LOWER(model) LIKE $%d
ORDER BY timestamp DESC
`, argIdx)
args = append(args, filterValue)
argIdx++
} else {
query = `
SELECT id, timestamp, method, endpoint, headers, body, model, user_agent, content_type, prompt_grade, response, original_model, routed_model
FROM requests
ORDER BY timestamp DESC
`
}
if limit > 0 {
query += fmt.Sprintf(" LIMIT $%d", argIdx)
args = append(args, limit)
}
rows, err := s.db.Query(query, args...)
if err != nil {
return nil, fmt.Errorf("failed to query requests: %w", err)
}
defer rows.Close()
var requests []*model.RequestLog
for rows.Next() {
req, err := s.scanSingleRow(rows)
if err != nil {
s.logger.Printf("Warning: failed to scan request row: %v", err)
continue
}
requests = append(requests, req)
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("error iterating rows: %w", err)
}
return requests, nil
}
func (s *postgresStorageService) DeleteRequestsOlderThan(age time.Duration) (int, error) {
cutoff := time.Now().Add(-age)
result, err := s.stmtDeleteOldRequests.Exec(cutoff.Format(time.RFC3339))
if err != nil {
return 0, fmt.Errorf("failed to delete old requests: %w", err)
}
rowsAffected, err := result.RowsAffected()
if err != nil {
return 0, fmt.Errorf("failed to get rows affected: %w", err)
}
return int(rowsAffected), nil
}
func (s *postgresStorageService) GetDatabaseStats() (map[string]interface{}, error) {
stats := make(map[string]interface{})
// Get row count
var count int
err := s.stmtGetRequestsCount.QueryRow().Scan(&count)
if err != nil {
return nil, fmt.Errorf("failed to get count: %w", err)
}
stats["total_requests"] = count
// Get database size
var dbSize int64
err = s.db.QueryRow("SELECT pg_database_size(current_database())").Scan(&dbSize)
if err == nil {
stats["database_size_bytes"] = dbSize
}
// Get oldest and newest timestamps
var oldest, newest sql.NullTime
err = s.db.QueryRow("SELECT MIN(timestamp), MAX(timestamp) FROM requests").Scan(&oldest, &newest)
if err == nil {
if oldest.Valid {
stats["oldest_request"] = oldest.Time.Format(time.RFC3339)
}
if newest.Valid {
stats["newest_request"] = newest.Time.Format(time.RFC3339)
}
}
return stats, nil
}
func (s *postgresStorageService) Close() error {
if s.stmtInsertRequest != nil {
s.stmtInsertRequest.Close()
}
if s.stmtUpdateResponse != nil {
s.stmtUpdateResponse.Close()
}
if s.stmtUpdateGrading != nil {
s.stmtUpdateGrading.Close()
}
if s.stmtGetRequestByID != nil {
s.stmtGetRequestByID.Close()
}
if s.stmtGetRequestsPage != nil {
s.stmtGetRequestsPage.Close()
}
if s.stmtGetRequestsCount != nil {
s.stmtGetRequestsCount.Close()
}
if s.stmtDeleteOldRequests != nil {
s.stmtDeleteOldRequests.Close()
}
return s.db.Close()
}
// Helper functions
// escapePostgresLikePattern escapes special characters in LIKE patterns for PostgreSQL
func escapePostgresLikePattern(s string) string {
s = strings.ReplaceAll(s, `\`, `\\`)
s = strings.ReplaceAll(s, `%`, `\%`)
s = strings.ReplaceAll(s, `_`, `\_`)
return s
}
func (s *postgresStorageService) scanRequestRows(rows *sql.Rows) ([]model.RequestLog, error) {
var requests []model.RequestLog
for rows.Next() {
var req model.RequestLog
var headersJSON, bodyJSON string
var promptGradeJSON, responseJSON sql.NullString
var timestamp time.Time
err := rows.Scan(
&req.RequestID,
&timestamp,
&req.Method,
&req.Endpoint,
&headersJSON,
&bodyJSON,
&req.Model,
&req.UserAgent,
&req.ContentType,
&promptGradeJSON,
&responseJSON,
&req.OriginalModel,
&req.RoutedModel,
)
if err != nil {
s.logger.Printf("Warning: failed to scan row: %v", err)
continue
}
req.Timestamp = timestamp.Format(time.RFC3339)
if err := unmarshalStoredRequestFields(s.logger, &req, headersJSON, bodyJSON, promptGradeJSON, responseJSON); err != nil {
s.logger.Printf("Warning: failed to unmarshal request fields: %v", err)
continue
}
requests = append(requests, req)
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("error iterating rows: %w", err)
}
return requests, nil
}
func (s *postgresStorageService) scanSingleRow(rows *sql.Rows) (*model.RequestLog, error) {
var req model.RequestLog
var headersJSON, bodyJSON string
var promptGradeJSON, responseJSON sql.NullString
var timestamp time.Time
err := rows.Scan(
&req.RequestID,
&timestamp,
&req.Method,
&req.Endpoint,
&headersJSON,
&bodyJSON,
&req.Model,
&req.UserAgent,
&req.ContentType,
&promptGradeJSON,
&responseJSON,
&req.OriginalModel,
&req.RoutedModel,
)
if err != nil {
return nil, fmt.Errorf("failed to scan row: %w", err)
}
req.Timestamp = timestamp.Format(time.RFC3339)
if err := unmarshalStoredRequestFields(s.logger, &req, headersJSON, bodyJSON, promptGradeJSON, responseJSON); err != nil {
return nil, err
}
return &req, nil
}
func (s *postgresStorageService) cleanupExpiredRequests() error {
if s.config == nil || s.config.RetentionDays <= 0 {
return nil
}
_, err := s.DeleteRequestsOlderThan(time.Duration(s.config.RetentionDays) * 24 * time.Hour)
return err
}
// GetUsageStats returns aggregated token usage statistics
func (s *postgresStorageService) GetUsageStats(startDate, endDate, modelFilter, orgFilter string) (*model.UsageStats, error) {
stats := &model.UsageStats{
RequestsByModel: make(map[string]model.ModelStats),
}
whereClause := "WHERE response IS NOT NULL"
args := []interface{}{}
argIdx := 1
if startDate != "" {
whereClause += fmt.Sprintf(" AND timestamp >= $%d", argIdx)
args = append(args, startDate)
argIdx++
stats.StartDate = startDate
}
if endDate != "" {
whereClause += fmt.Sprintf(" AND timestamp <= $%d", argIdx)
args = append(args, endDate)
argIdx++
stats.EndDate = endDate
}
if filterValue, ok := modelFilterPattern(modelFilter, escapePostgresLikePattern); ok {
whereClause += fmt.Sprintf(" AND LOWER(model) LIKE $%d", argIdx)
args = append(args, filterValue)
argIdx++
}
if orgFilter != "" {
whereClause += fmt.Sprintf(" AND organization_id = $%d", argIdx)
args = append(args, orgFilter)
argIdx++
}
query := `
SELECT model, response
FROM requests
` + whereClause
rows, err := s.db.Query(query, args...)
if err != nil {
return nil, fmt.Errorf("failed to query usage stats: %w", err)
}
defer rows.Close()
for rows.Next() {
var modelName string
var responseJSON sql.NullString
if err := rows.Scan(&modelName, &responseJSON); err != nil {
s.logger.Printf("Warning: failed to scan usage row: %v", err)
continue
}
resp, ok := decodeStoredResponse(responseJSON)
if !ok {
continue
}
bodySummary, ok := decodeResponseBodySummary(resp.Body)
if !ok || bodySummary.Usage == nil {
continue
}
addUsageStats(stats, modelName, bodySummary.Usage)
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("error iterating usage rows: %w", err)
}
if stats.StartDate == "" || stats.EndDate == "" {
var oldest, newest sql.NullTime
err := s.db.QueryRow("SELECT MIN(timestamp), MAX(timestamp) FROM requests WHERE response IS NOT NULL").Scan(&oldest, &newest)
if err == nil {
if stats.StartDate == "" && oldest.Valid {
stats.StartDate = oldest.Time.Format(time.RFC3339)
}
if stats.EndDate == "" && newest.Valid {
stats.EndDate = newest.Time.Format(time.RFC3339)
}
}
}
return stats, nil
}
// GetRequestsSummary returns minimal data for list view
func (s *postgresStorageService) GetRequestsSummary(modelFilter string) ([]*model.RequestSummary, error) {
query := `
SELECT id, timestamp, method, endpoint, model, original_model, routed_model, response, COALESCE(conversation_hash, ''), COALESCE(message_count, 0)
FROM requests
`
args := []interface{}{}
if filterValue, ok := modelFilterPattern(modelFilter, escapePostgresLikePattern); ok {
query += " WHERE LOWER(model) LIKE $1"
args = append(args, filterValue)
}
query += " ORDER BY timestamp DESC"
rows, err := s.db.Query(query, args...)
if err != nil {
return nil, fmt.Errorf("failed to query requests: %w", err)
}
defer rows.Close()
return s.scanSummaryRows(rows)
}
// GetRequestsSummaryPaginated returns minimal data for list view with pagination
func (s *postgresStorageService) GetRequestsSummaryPaginated(modelFilter, startTime, endTime string, offset, limit int) ([]*model.RequestSummary, int, error) {
whereClauses := []string{}
args := []interface{}{}
argIdx := 1
if filterValue, ok := modelFilterPattern(modelFilter, escapePostgresLikePattern); ok {
whereClauses = append(whereClauses, fmt.Sprintf("LOWER(model) LIKE $%d", argIdx))
args = append(args, filterValue)
argIdx++
}
if startTime != "" && endTime != "" {
whereClauses = append(whereClauses, fmt.Sprintf("timestamp >= $%d AND timestamp <= $%d", argIdx, argIdx+1))
args = append(args, startTime, endTime)
argIdx += 2
}
whereClause := ""
if len(whereClauses) > 0 {
whereClause = " WHERE " + strings.Join(whereClauses, " AND ")
}
// Get total count
var total int
countQuery := "SELECT COUNT(*) FROM requests" + whereClause
countArgs := make([]interface{}, len(args))
copy(countArgs, args)
if err := s.db.QueryRow(countQuery, countArgs...).Scan(&total); err != nil {
return nil, 0, fmt.Errorf("failed to get total count: %w", err)
}
// Get the requested page
query := `
SELECT id, timestamp, method, endpoint, model, original_model, routed_model, response, COALESCE(conversation_hash, ''), COALESCE(message_count, 0)
FROM requests
` + whereClause + " ORDER BY timestamp DESC"
if limit > 0 {
query += fmt.Sprintf(" LIMIT $%d OFFSET $%d", argIdx, argIdx+1)
args = append(args, limit, offset)
} else if offset > 0 {
query += fmt.Sprintf(" OFFSET $%d", argIdx)
args = append(args, offset)
}
rows, err := s.db.Query(query, args...)
if err != nil {
return nil, 0, fmt.Errorf("failed to query requests: %w", err)
}
defer rows.Close()
summaries, err := s.scanSummaryRows(rows)
if err != nil {
return nil, 0, err
}
s.logger.Printf("GetRequestsSummaryPaginated: returned %d requests (total: %d, limit: %d, offset: %d)", len(summaries), total, limit, offset)
return summaries, total, nil
}
func (s *postgresStorageService) scanSummaryRows(rows *sql.Rows) ([]*model.RequestSummary, error) {
var summaries []*model.RequestSummary
for rows.Next() {
var summary model.RequestSummary
var responseJSON sql.NullString
var timestamp time.Time
err := rows.Scan(
&summary.RequestID,
&timestamp,
&summary.Method,
&summary.Endpoint,
&summary.Model,
&summary.OriginalModel,
&summary.RoutedModel,
&responseJSON,
&summary.ConversationHash,
&summary.MessageCount,
)
if err != nil {
s.logger.Printf("Warning: failed to scan summary row: %v", err)
continue
}
summary.Timestamp = timestamp.Format(time.RFC3339)
applyStoredResponseToSummary(&summary, responseJSON)
summaries = append(summaries, &summary)
}
return summaries, nil
}
// GetStats returns aggregated statistics for the dashboard
func (s *postgresStorageService) GetStats(startDate, endDate, orgFilter string) (*model.DashboardStats, error) {
stats := &model.DashboardStats{
DailyStats: make([]model.DailyTokens, 0),
}
query := `
SELECT timestamp, COALESCE(model, 'unknown') as model, response
FROM requests
WHERE timestamp >= $1 AND timestamp <= $2
`
args := []interface{}{startDate, endDate}
if orgFilter != "" {
query += ` AND organization_id = $3`
args = append(args, orgFilter)
}
query += ` ORDER BY timestamp`
rows, err := s.db.Query(query, args...)
if err != nil {
return nil, fmt.Errorf("failed to query stats: %w", err)
}
defer rows.Close()
dailyMap := make(map[string]*model.DailyTokens)
for rows.Next() {
var timestamp time.Time
var modelName string
var responseJSON sql.NullString
if err := rows.Scan(&timestamp, &modelName, &responseJSON); err != nil {
continue
}
date := timestamp.Format("2006-01-02")
tokens := int64(0)
if resp, ok := decodeStoredResponse(responseJSON); ok {
if bodySummary, ok := decodeResponseBodySummary(resp.Body); ok {
tokens = totalTokensFromUsage(bodySummary.Usage)
}
}
addDailyTokens(dailyMap, date, modelName, tokens)
}
for _, v := range dailyMap {
stats.DailyStats = append(stats.DailyStats, *v)
}
return stats, nil
}
// GetHourlyStats returns time-bucketed breakdown for a specific time range.
// bucketMinutes controls the granularity (e.g. 5, 15, 30, 60).
func (s *postgresStorageService) GetHourlyStats(startTime, endTime string, bucketMinutes int, orgFilter string) (*model.HourlyStatsResponse, error) {
if bucketMinutes <= 0 {
bucketMinutes = 60
}
query := `
SELECT timestamp, COALESCE(model, 'unknown') as model, response
FROM requests
WHERE timestamp >= $1 AND timestamp <= $2
`
args := []interface{}{startTime, endTime}
if orgFilter != "" {
query += ` AND organization_id = $3`
args = append(args, orgFilter)
}
query += ` ORDER BY timestamp`
rows, err := s.db.Query(query, args...)
if err != nil {
return nil, fmt.Errorf("failed to query hourly stats: %w", err)
}
defer rows.Close()
bucketMap := make(map[string]*model.HourlyTokens)
var totalTokens int64
var totalRequests int
var totalResponseTime int64
var responseCount int
for rows.Next() {
var timestamp time.Time
var modelName string
var responseJSON sql.NullString
if err := rows.Scan(&timestamp, &modelName, &responseJSON); err != nil {
continue
}
// Always use absolute time buckets so multi-day ranges show per-slot data
var bucketKey, bucketLabel string
minuteOfDay := timestamp.Hour()*60 + timestamp.Minute()
bucketStart := (minuteOfDay / bucketMinutes) * bucketMinutes
bucketTime := time.Date(timestamp.Year(), timestamp.Month(), timestamp.Day(), bucketStart/60, bucketStart%60, 0, 0, timestamp.Location())
bucketKey = bucketTime.Format("2006-01-02T15:04")
bucketLabel = bucketTime.Format("Jan 2 15:04")
tokens := int64(0)
responseTime := int64(0)
if resp, ok := decodeStoredResponse(responseJSON); ok {
responseTime = resp.ResponseTime
if bodySummary, ok := decodeResponseBodySummary(resp.Body); ok {
tokens = totalTokensFromUsage(bodySummary.Usage)
}
}
totalTokens += tokens
totalRequests++
if responseTime > 0 {
totalResponseTime += responseTime
responseCount++
}
addHourlyTokens(bucketMap, bucketKey, bucketLabel, modelName, tokens)
}
// Convert map to sorted slice
keys := make([]string, 0, len(bucketMap))
for k := range bucketMap {
keys = append(keys, k)
}
sort.Strings(keys)
hourlyStats := make([]model.HourlyTokens, 0, len(keys))
for _, k := range keys {
hourlyStats = append(hourlyStats, *bucketMap[k])
}
avgResponseTime := int64(0)
if responseCount > 0 {
avgResponseTime = totalResponseTime / int64(responseCount)
}
return &model.HourlyStatsResponse{
HourlyStats: hourlyStats,
TodayTokens: totalTokens,
TodayRequests: totalRequests,
AvgResponseTime: avgResponseTime,
}, nil
}
// GetModelStats returns model breakdown for a specific time range
func (s *postgresStorageService) GetModelStats(startTime, endTime, orgFilter string) (*model.ModelStatsResponse, error) {
query := `
SELECT COALESCE(model, 'unknown') as model, response
FROM requests
WHERE timestamp >= $1 AND timestamp <= $2
`
args := []interface{}{startTime, endTime}
if orgFilter != "" {
query += ` AND organization_id = $3`
args = append(args, orgFilter)
}
rows, err := s.db.Query(query, args...)
if err != nil {
return nil, fmt.Errorf("failed to query model stats: %w", err)
}
defer rows.Close()
modelMap := make(map[string]*model.ModelTokens)
for rows.Next() {
var modelName string
var responseJSON sql.NullString
if err := rows.Scan(&modelName, &responseJSON); err != nil {
continue
}
tokens := int64(0)
if resp, ok := decodeStoredResponse(responseJSON); ok {
if bodySummary, ok := decodeResponseBodySummary(resp.Body); ok {
tokens = totalTokensFromUsage(bodySummary.Usage)
}
}
addModelTokens(modelMap, modelName, tokens)
}
modelStats := make([]model.ModelTokens, 0)
for _, v := range modelMap {
modelStats = append(modelStats, *v)
}
return &model.ModelStatsResponse{
ModelStats: modelStats,
}, nil
}
// GetLatestRequestDate returns the timestamp of the most recent request
func (s *postgresStorageService) GetLatestRequestDate() (*time.Time, error) {
var timestamp sql.NullTime
err := s.db.QueryRow("SELECT timestamp FROM requests ORDER BY timestamp DESC LIMIT 1").Scan(&timestamp)
if err == sql.ErrNoRows || !timestamp.Valid {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("failed to query latest request: %w", err)
}
t := timestamp.Time
return &t, nil
}
func (s *postgresStorageService) GetSettings() (*model.ProxySettings, error) {
var value string
err := s.db.QueryRow("SELECT value FROM settings WHERE key = 'proxy_settings'").Scan(&value)
if err == sql.ErrNoRows {
return &model.ProxySettings{}, nil
}
if err != nil {
return nil, fmt.Errorf("failed to get settings: %w", err)
}
var settings model.ProxySettings
if err := json.Unmarshal([]byte(value), &settings); err != nil {
return nil, fmt.Errorf("failed to parse settings: %w", err)
}
return &settings, nil
}
func (s *postgresStorageService) SaveSettings(settings *model.ProxySettings) error {
data, err := json.Marshal(settings)
if err != nil {
return fmt.Errorf("failed to marshal settings: %w", err)
}
_, err = s.db.Exec(
"INSERT INTO settings (key, value) VALUES ('proxy_settings', $1) ON CONFLICT (key) DO UPDATE SET value = $1",
string(data),
)
if err != nil {
return fmt.Errorf("failed to save settings: %w", err)
}
return nil
}
func (s *postgresStorageService) GetDistinctOrganizations() ([]string, error) {
rows, err := s.db.Query(`SELECT DISTINCT organization_id FROM requests WHERE organization_id IS NOT NULL AND organization_id != '' ORDER BY organization_id`)
if err != nil {
return nil, fmt.Errorf("failed to query organizations: %w", err)
}
defer rows.Close()
var orgs []string
for rows.Next() {
var org string
if err := rows.Scan(&org); err != nil {
continue
}
orgs = append(orgs, org)
}
return orgs, nil
}