This commit captures both the prior accumulated work-in-progress
(framework migration web/→svelte/, postgres storage, conversation
viewer, dashboard auth, OpenAPI spec, integration tests) AND today's
operational improvements layered on top. History wasn't checkpointed
incrementally; happy to split it via interactive rebase if a reviewer
wants smaller commits.
Today's changes (in addition to the older WIP):
1. Configurable upstream response-header timeout
- ANTHROPIC_RESPONSE_HEADER_TIMEOUT env (default 300s)
- Replaces hardcoded 300s in provider/anthropic.go that was firing
on opus + 1M-context + extended thinking non-streaming requests
- Files: internal/config/config.go, internal/provider/anthropic.go
2. Structured forward-error diagnostic logging
- When a forward to Anthropic fails, log a single key=value line
with request_id, model, stream, body_bytes, has_thinking,
anthropic_beta, query, elapsed, ctx_err — alongside the existing
human-readable error line for back-compat
- Files: internal/handler/handlers.go (logForwardFailure)
3. Full SSE protocol passthrough + Flusher fix
- handler/handlers.go: forward all SSE lines verbatim (event:, id:,
retry:, : comments, blank-line terminators), not only data:.
Previous code produced malformed SSE for strict parsers.
- middleware/logging.go: explicit Flush() method on responseWriter.
Embedding http.ResponseWriter (interface) does not auto-promote
Flush(), so every w.(http.Flusher) check in the streaming
handler was returning ok=false and SSE writes buffered in net/http
until the body closed.
4. Non-streaming → streaming demotion (feature-flagged)
- ANTHROPIC_DEMOTE_NONSTREAMING env (default false)
- When enabled and the routed provider is anthropic, force stream=true
upstream for clients that asked for stream=false. Receive SSE,
accumulate via accumulateSSEToMessage (handles text, tool_use with
partial_json reassembly, thinking, signature, citations_delta,
usage merge), and synthesize a single non-streaming JSON response.
- Eliminates the ResponseHeaderTimeout class of failure entirely.
- Body rewrite uses json.Decoder + UseNumber() to preserve integer
precision in unknown nested fields (tool inputs from prior turns).
- Files: internal/config/config.go, internal/handler/handlers.go,
cmd/proxy/main.go, cmd/proxy/main_test.go
5. Live operational state: /livez gauge + graceful drain
- New internal/runtime package: atomic in-flight counter + draining flag
- New middleware/inflight.go: increments runtime gauge, applied to
/v1/* subrouter so Messages, ChatCompletions, and ProxyPassthrough
are all counted
- /v1/* moved to a gorilla/mux subrouter so the InFlight middleware
applies surgically; /health, /livez, /openapi.* remain on parent
router (unauthenticated, uncounted)
- Health handler returns 503 draining when runtime.IsDraining() is
true, so Traefik stops routing to a slot before drain begins
- New /livez handler returns {status, in_flight, draining, timestamp}
- SIGTERM handler in main.go: SetDraining(true), poll for in_flight==0
with 32-min ceiling and 1s tick (logs every 10s), then srv.Shutdown
- Auth bypass list extended with /livez
- Files: internal/runtime/runtime.go (new),
internal/middleware/inflight.go (new),
internal/middleware/auth.go,
internal/handler/handlers.go (Health, Livez, runtime import),
cmd/proxy/main.go (subrouter, drain loop)
6. OpenAPI spec updates
- Document Health 503 response and new DrainingResponse schema
- Add /livez path with LivezResponse schema
- Files: internal/handler/openapi.go
Verified: go build ./... clean, go test ./... all pass, go vet clean.
Three rounds of codex peer review across changes 1-5; all feedback
addressed (citations_delta, json.Number precision, drain-loop logging
via lastLog timestamp, PathPrefix tightened to "/v1/").
1216 lines
33 KiB
Go
1216 lines
33 KiB
Go
package service
|
|
|
|
import (
|
|
"database/sql"
|
|
"encoding/json"
|
|
"fmt"
|
|
"log"
|
|
"sort"
|
|
"strings"
|
|
"time"
|
|
|
|
_ "github.com/mattn/go-sqlite3"
|
|
|
|
"github.com/seifghazi/claude-code-monitor/internal/config"
|
|
"github.com/seifghazi/claude-code-monitor/internal/model"
|
|
)
|
|
|
|
type sqliteStorageService struct {
|
|
db *sql.DB
|
|
config *config.StorageConfig
|
|
logger *log.Logger
|
|
|
|
// Prepared statements for frequently used queries
|
|
stmtInsertRequest *sql.Stmt
|
|
stmtUpdateResponse *sql.Stmt
|
|
stmtUpdateGrading *sql.Stmt
|
|
stmtGetRequestByID *sql.Stmt
|
|
stmtGetRequestsPage *sql.Stmt
|
|
stmtGetRequestsCount *sql.Stmt
|
|
stmtDeleteOldRequests *sql.Stmt
|
|
}
|
|
|
|
func NewSQLiteStorageService(cfg *config.StorageConfig) (StorageService, error) {
|
|
return NewSQLiteStorageServiceWithLogger(cfg, log.Default())
|
|
}
|
|
|
|
func NewSQLiteStorageServiceWithLogger(cfg *config.StorageConfig, logger *log.Logger) (StorageService, error) {
|
|
// Enable WAL mode and other optimizations via connection string
|
|
// _journal_mode=WAL: Write-Ahead Logging for better concurrent read performance
|
|
// _synchronous=NORMAL: Good balance of safety and performance
|
|
// _busy_timeout=5000: Wait up to 5 seconds if database is locked
|
|
// _cache_size=-20000: Use 20MB of memory for cache (negative = KB)
|
|
connStr := cfg.DBPath + "?_journal_mode=WAL&_synchronous=NORMAL&_busy_timeout=5000&_cache_size=-20000"
|
|
|
|
db, err := sql.Open("sqlite3", connStr)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to open database: %w", err)
|
|
}
|
|
|
|
// Configure connection pool
|
|
// SQLite only supports one writer at a time, but can handle multiple readers
|
|
db.SetMaxOpenConns(1) // Serialize writes to avoid SQLITE_BUSY errors
|
|
db.SetMaxIdleConns(1)
|
|
db.SetConnMaxLifetime(time.Hour)
|
|
|
|
// Verify connection
|
|
if err := db.Ping(); err != nil {
|
|
db.Close()
|
|
return nil, fmt.Errorf("failed to ping database: %w", err)
|
|
}
|
|
|
|
service := &sqliteStorageService{
|
|
db: db,
|
|
config: cfg,
|
|
logger: logger,
|
|
}
|
|
|
|
if err := service.createTables(); err != nil {
|
|
db.Close()
|
|
return nil, fmt.Errorf("failed to create tables: %w", err)
|
|
}
|
|
|
|
if err := service.prepareStatements(); err != nil {
|
|
db.Close()
|
|
return nil, fmt.Errorf("failed to prepare statements: %w", err)
|
|
}
|
|
|
|
if err := service.cleanupExpiredRequests(); err != nil {
|
|
logger.Printf("Warning: failed to apply retention policy during startup: %v", err)
|
|
}
|
|
|
|
return service, nil
|
|
}
|
|
|
|
func (s *sqliteStorageService) createTables() error {
|
|
schema := `
|
|
CREATE TABLE IF NOT EXISTS requests (
|
|
id TEXT PRIMARY KEY,
|
|
timestamp DATETIME NOT NULL,
|
|
method TEXT NOT NULL,
|
|
endpoint TEXT NOT NULL,
|
|
headers TEXT NOT NULL,
|
|
body TEXT NOT NULL,
|
|
user_agent TEXT,
|
|
content_type TEXT,
|
|
prompt_grade TEXT,
|
|
response TEXT,
|
|
model TEXT,
|
|
original_model TEXT,
|
|
routed_model TEXT
|
|
);
|
|
|
|
-- Index for listing requests by time (most common query)
|
|
CREATE INDEX IF NOT EXISTS idx_requests_timestamp ON requests(timestamp DESC);
|
|
|
|
-- Index for filtering by model
|
|
CREATE INDEX IF NOT EXISTS idx_requests_model ON requests(model);
|
|
|
|
-- Index for filtering by endpoint
|
|
CREATE INDEX IF NOT EXISTS idx_requests_endpoint ON requests(endpoint);
|
|
`
|
|
|
|
_, err := s.db.Exec(schema)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Run migrations
|
|
if err := s.migrateSchema(); err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (s *sqliteStorageService) migrateSchema() error {
|
|
// Ensure WAL mode is enabled (in case opened without connection string params)
|
|
_, err := s.db.Exec("PRAGMA journal_mode=WAL")
|
|
if err != nil {
|
|
return fmt.Errorf("failed to set WAL mode: %w", err)
|
|
}
|
|
|
|
return runMigrations(s.db, []string{
|
|
"DROP INDEX IF EXISTS idx_timestamp",
|
|
"ALTER TABLE requests ADD COLUMN conversation_hash TEXT",
|
|
"ALTER TABLE requests ADD COLUMN message_count INTEGER DEFAULT 0",
|
|
"CREATE INDEX IF NOT EXISTS idx_requests_conversation_hash ON requests(conversation_hash)",
|
|
"ALTER TABLE requests ADD COLUMN organization_id TEXT",
|
|
"CREATE INDEX IF NOT EXISTS idx_requests_organization_id ON requests(organization_id)",
|
|
`CREATE TABLE IF NOT EXISTS settings (key TEXT PRIMARY KEY, value TEXT NOT NULL)`,
|
|
}, ignoreSQLiteDuplicateColumn)
|
|
}
|
|
|
|
func (s *sqliteStorageService) prepareStatements() error {
|
|
var err error
|
|
|
|
s.stmtInsertRequest, err = s.db.Prepare(`
|
|
INSERT INTO requests (id, timestamp, method, endpoint, headers, body, user_agent, content_type, model, original_model, routed_model, conversation_hash, message_count)
|
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
`)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to prepare insert statement: %w", err)
|
|
}
|
|
|
|
s.stmtUpdateResponse, err = s.db.Prepare(`
|
|
UPDATE requests SET response = ?, organization_id = COALESCE(NULLIF(?, ''), organization_id) WHERE id = ?
|
|
`)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to prepare update response statement: %w", err)
|
|
}
|
|
|
|
s.stmtUpdateGrading, err = s.db.Prepare(`
|
|
UPDATE requests SET prompt_grade = ? WHERE id = ?
|
|
`)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to prepare update grading statement: %w", err)
|
|
}
|
|
|
|
s.stmtGetRequestByID, err = s.db.Prepare(`
|
|
SELECT id, timestamp, method, endpoint, headers, body, model, user_agent, content_type, prompt_grade, response, original_model, routed_model
|
|
FROM requests
|
|
WHERE id = ?
|
|
`)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to prepare get by ID statement: %w", err)
|
|
}
|
|
|
|
s.stmtGetRequestsPage, err = s.db.Prepare(`
|
|
SELECT id, timestamp, method, endpoint, headers, body, model, user_agent, content_type, prompt_grade, response, original_model, routed_model
|
|
FROM requests
|
|
ORDER BY timestamp DESC
|
|
LIMIT ? OFFSET ?
|
|
`)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to prepare get requests page statement: %w", err)
|
|
}
|
|
|
|
s.stmtGetRequestsCount, err = s.db.Prepare(`
|
|
SELECT COUNT(*) FROM requests
|
|
`)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to prepare count statement: %w", err)
|
|
}
|
|
|
|
s.stmtDeleteOldRequests, err = s.db.Prepare(`
|
|
DELETE FROM requests WHERE timestamp < ?
|
|
`)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to prepare delete old requests statement: %w", err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (s *sqliteStorageService) SaveRequest(request *model.RequestLog) (string, error) {
|
|
headersJSON, err := json.Marshal(request.Headers)
|
|
if err != nil {
|
|
return "", fmt.Errorf("failed to marshal headers: %w", err)
|
|
}
|
|
|
|
bodyForStorage, err := prepareRequestBodyForStorage(s.config, request.Body)
|
|
if err != nil {
|
|
return "", fmt.Errorf("failed to prepare body for storage: %w", err)
|
|
}
|
|
|
|
bodyJSON, err := json.Marshal(bodyForStorage)
|
|
if err != nil {
|
|
return "", fmt.Errorf("failed to marshal body: %w", err)
|
|
}
|
|
|
|
_, err = s.stmtInsertRequest.Exec(
|
|
request.RequestID,
|
|
request.Timestamp,
|
|
request.Method,
|
|
request.Endpoint,
|
|
string(headersJSON),
|
|
string(bodyJSON),
|
|
request.UserAgent,
|
|
request.ContentType,
|
|
request.Model,
|
|
request.OriginalModel,
|
|
request.RoutedModel,
|
|
request.ConversationHash,
|
|
request.MessageCount,
|
|
)
|
|
|
|
if err != nil {
|
|
return "", fmt.Errorf("failed to insert request: %w", err)
|
|
}
|
|
|
|
if err := s.cleanupExpiredRequests(); err != nil {
|
|
s.logger.Printf("Warning: failed to apply retention policy: %v", err)
|
|
}
|
|
|
|
return request.RequestID, nil
|
|
}
|
|
|
|
func (s *sqliteStorageService) GetRequests(page, limit int, modelFilter string) ([]model.RequestLog, int, error) {
|
|
whereClause := ""
|
|
countArgs := []interface{}{}
|
|
queryArgs := []interface{}{}
|
|
|
|
if filterValue, ok := modelFilterPattern(modelFilter, escapeLikePattern); ok {
|
|
whereClause = " WHERE LOWER(model) LIKE ? ESCAPE '\\'"
|
|
countArgs = append(countArgs, filterValue)
|
|
queryArgs = append(queryArgs, filterValue)
|
|
}
|
|
|
|
// Get total count
|
|
var total int
|
|
countQuery := "SELECT COUNT(*) FROM requests" + whereClause
|
|
err := s.db.QueryRow(countQuery, countArgs...).Scan(&total)
|
|
if err != nil {
|
|
return nil, 0, fmt.Errorf("failed to get total count: %w", err)
|
|
}
|
|
|
|
// Get paginated results
|
|
offset := (page - 1) * limit
|
|
query := `
|
|
SELECT id, timestamp, method, endpoint, headers, body, model, user_agent, content_type, prompt_grade, response, original_model, routed_model
|
|
FROM requests` + whereClause + `
|
|
ORDER BY timestamp DESC
|
|
LIMIT ? OFFSET ?
|
|
`
|
|
queryArgs = append(queryArgs, limit, offset)
|
|
|
|
rows, err := s.db.Query(query, queryArgs...)
|
|
if err != nil {
|
|
return nil, 0, fmt.Errorf("failed to query requests: %w", err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
requests, err := s.scanRequestRows(rows)
|
|
if err != nil {
|
|
return nil, 0, err
|
|
}
|
|
|
|
return requests, total, nil
|
|
}
|
|
|
|
func (s *sqliteStorageService) ClearRequests() (int, error) {
|
|
result, err := s.db.Exec("DELETE FROM requests")
|
|
if err != nil {
|
|
return 0, fmt.Errorf("failed to clear requests: %w", err)
|
|
}
|
|
|
|
rowsAffected, err := result.RowsAffected()
|
|
if err != nil {
|
|
return 0, fmt.Errorf("failed to get rows affected: %w", err)
|
|
}
|
|
|
|
// Reclaim space after clearing all data
|
|
_, err = s.db.Exec("VACUUM")
|
|
if err != nil {
|
|
s.logger.Printf("Warning: failed to vacuum database: %v", err)
|
|
}
|
|
|
|
return int(rowsAffected), nil
|
|
}
|
|
|
|
func (s *sqliteStorageService) UpdateRequestWithGrading(requestID string, grade *model.PromptGrade) error {
|
|
gradeJSON, err := json.Marshal(grade)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to marshal grade: %w", err)
|
|
}
|
|
|
|
result, err := s.stmtUpdateGrading.Exec(string(gradeJSON), requestID)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to update request with grading: %w", err)
|
|
}
|
|
|
|
rowsAffected, _ := result.RowsAffected()
|
|
if rowsAffected == 0 {
|
|
return fmt.Errorf("request %s not found", requestID)
|
|
}
|
|
|
|
if err := s.cleanupExpiredRequests(); err != nil {
|
|
s.logger.Printf("Warning: failed to apply retention policy: %v", err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (s *sqliteStorageService) UpdateRequestWithResponse(request *model.RequestLog) error {
|
|
responseForStorage, err := prepareResponseForStorage(s.config, s.logger, request.Response)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to prepare response for storage: %w", err)
|
|
}
|
|
|
|
responseJSON, err := json.Marshal(responseForStorage)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to marshal response: %w", err)
|
|
}
|
|
|
|
orgID := request.OrganizationID
|
|
result, err := s.stmtUpdateResponse.Exec(string(responseJSON), orgID, request.RequestID)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to update request with response: %w", err)
|
|
}
|
|
|
|
rowsAffected, _ := result.RowsAffected()
|
|
if rowsAffected == 0 {
|
|
return fmt.Errorf("request %s not found", request.RequestID)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// SaveRequestWithResponse saves a request and its response in a single transaction
|
|
func (s *sqliteStorageService) SaveRequestWithResponse(request *model.RequestLog) error {
|
|
tx, err := s.db.Begin()
|
|
if err != nil {
|
|
return fmt.Errorf("failed to begin transaction: %w", err)
|
|
}
|
|
defer tx.Rollback()
|
|
|
|
headersJSON, err := json.Marshal(request.Headers)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to marshal headers: %w", err)
|
|
}
|
|
|
|
bodyForStorage, err := prepareRequestBodyForStorage(s.config, request.Body)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to prepare body for storage: %w", err)
|
|
}
|
|
|
|
bodyJSON, err := json.Marshal(bodyForStorage)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to marshal body: %w", err)
|
|
}
|
|
|
|
// Insert request
|
|
_, err = tx.Stmt(s.stmtInsertRequest).Exec(
|
|
request.RequestID,
|
|
request.Timestamp,
|
|
request.Method,
|
|
request.Endpoint,
|
|
string(headersJSON),
|
|
string(bodyJSON),
|
|
request.UserAgent,
|
|
request.ContentType,
|
|
request.Model,
|
|
request.OriginalModel,
|
|
request.RoutedModel,
|
|
request.ConversationHash,
|
|
request.MessageCount,
|
|
)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to insert request: %w", err)
|
|
}
|
|
|
|
// Update with response if present
|
|
if request.Response != nil {
|
|
responseForStorage, err := prepareResponseForStorage(s.config, s.logger, request.Response)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to prepare response for storage: %w", err)
|
|
}
|
|
|
|
responseJSON, err := json.Marshal(responseForStorage)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to marshal response: %w", err)
|
|
}
|
|
|
|
_, err = tx.Stmt(s.stmtUpdateResponse).Exec(string(responseJSON), request.RequestID)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to update response: %w", err)
|
|
}
|
|
}
|
|
|
|
if err := tx.Commit(); err != nil {
|
|
return fmt.Errorf("failed to commit transaction: %w", err)
|
|
}
|
|
|
|
if err := s.cleanupExpiredRequests(); err != nil {
|
|
s.logger.Printf("Warning: failed to apply retention policy: %v", err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (s *sqliteStorageService) EnsureDirectoryExists() error {
|
|
// No directory needed for SQLite
|
|
return nil
|
|
}
|
|
|
|
func (s *sqliteStorageService) GetRequestByShortID(shortID string) (*model.RequestLog, string, error) {
|
|
// Escape LIKE special characters to prevent pattern injection
|
|
escapedID := escapeLikePattern(shortID)
|
|
|
|
query := `
|
|
SELECT id, timestamp, method, endpoint, headers, body, model, user_agent, content_type, prompt_grade, response, original_model, routed_model
|
|
FROM requests
|
|
WHERE id LIKE ? ESCAPE '\'
|
|
ORDER BY timestamp DESC
|
|
LIMIT 1
|
|
`
|
|
|
|
var req model.RequestLog
|
|
var headersJSON, bodyJSON string
|
|
var promptGradeJSON, responseJSON sql.NullString
|
|
|
|
err := s.db.QueryRow(query, "%"+escapedID).Scan(
|
|
&req.RequestID,
|
|
&req.Timestamp,
|
|
&req.Method,
|
|
&req.Endpoint,
|
|
&headersJSON,
|
|
&bodyJSON,
|
|
&req.Model,
|
|
&req.UserAgent,
|
|
&req.ContentType,
|
|
&promptGradeJSON,
|
|
&responseJSON,
|
|
&req.OriginalModel,
|
|
&req.RoutedModel,
|
|
)
|
|
|
|
if err == sql.ErrNoRows {
|
|
return nil, "", fmt.Errorf("request with ID %s not found", shortID)
|
|
}
|
|
if err != nil {
|
|
return nil, "", fmt.Errorf("failed to query request: %w", err)
|
|
}
|
|
|
|
if err := unmarshalStoredRequestFields(s.logger, &req, headersJSON, bodyJSON, promptGradeJSON, responseJSON); err != nil {
|
|
return nil, "", err
|
|
}
|
|
|
|
return &req, req.RequestID, nil
|
|
}
|
|
|
|
func (s *sqliteStorageService) GetConfig() *config.StorageConfig {
|
|
return s.config
|
|
}
|
|
|
|
func (s *sqliteStorageService) GetAllRequests(modelFilter string) ([]*model.RequestLog, error) {
|
|
return s.GetAllRequestsWithLimit(modelFilter, 0) // 0 means no limit
|
|
}
|
|
|
|
// GetAllRequestsWithLimit returns requests with an optional limit (0 = no limit)
|
|
func (s *sqliteStorageService) GetAllRequestsWithLimit(modelFilter string, limit int) ([]*model.RequestLog, error) {
|
|
var query string
|
|
args := []interface{}{}
|
|
|
|
if filterValue, ok := modelFilterPattern(modelFilter, escapeLikePattern); ok {
|
|
query = `
|
|
SELECT id, timestamp, method, endpoint, headers, body, model, user_agent, content_type, prompt_grade, response, original_model, routed_model
|
|
FROM requests
|
|
WHERE LOWER(model) LIKE ? ESCAPE '\'
|
|
ORDER BY timestamp DESC
|
|
`
|
|
args = append(args, filterValue)
|
|
} else {
|
|
query = `
|
|
SELECT id, timestamp, method, endpoint, headers, body, model, user_agent, content_type, prompt_grade, response, original_model, routed_model
|
|
FROM requests
|
|
ORDER BY timestamp DESC
|
|
`
|
|
}
|
|
|
|
if limit > 0 {
|
|
query += " LIMIT ?"
|
|
args = append(args, limit)
|
|
}
|
|
|
|
rows, err := s.db.Query(query, args...)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to query requests: %w", err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
var requests []*model.RequestLog
|
|
for rows.Next() {
|
|
req, err := s.scanSingleRow(rows)
|
|
if err != nil {
|
|
s.logger.Printf("Warning: failed to scan request row: %v", err)
|
|
continue
|
|
}
|
|
requests = append(requests, req)
|
|
}
|
|
|
|
if err := rows.Err(); err != nil {
|
|
return nil, fmt.Errorf("error iterating rows: %w", err)
|
|
}
|
|
|
|
return requests, nil
|
|
}
|
|
|
|
// DeleteRequestsOlderThan removes requests older than the specified duration
|
|
func (s *sqliteStorageService) DeleteRequestsOlderThan(age time.Duration) (int, error) {
|
|
cutoff := time.Now().Add(-age)
|
|
|
|
result, err := s.stmtDeleteOldRequests.Exec(cutoff.Format(time.RFC3339))
|
|
if err != nil {
|
|
return 0, fmt.Errorf("failed to delete old requests: %w", err)
|
|
}
|
|
|
|
rowsAffected, err := result.RowsAffected()
|
|
if err != nil {
|
|
return 0, fmt.Errorf("failed to get rows affected: %w", err)
|
|
}
|
|
|
|
return int(rowsAffected), nil
|
|
}
|
|
|
|
// GetDatabaseStats returns statistics about the database
|
|
func (s *sqliteStorageService) GetDatabaseStats() (map[string]interface{}, error) {
|
|
stats := make(map[string]interface{})
|
|
|
|
// Get row count
|
|
var count int
|
|
err := s.stmtGetRequestsCount.QueryRow().Scan(&count)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to get count: %w", err)
|
|
}
|
|
stats["total_requests"] = count
|
|
|
|
// Get database size
|
|
var pageCount, pageSize int
|
|
err = s.db.QueryRow("PRAGMA page_count").Scan(&pageCount)
|
|
if err == nil {
|
|
err = s.db.QueryRow("PRAGMA page_size").Scan(&pageSize)
|
|
if err == nil {
|
|
stats["database_size_bytes"] = pageCount * pageSize
|
|
}
|
|
}
|
|
|
|
// Get oldest and newest timestamps
|
|
var oldest, newest sql.NullString
|
|
err = s.db.QueryRow("SELECT MIN(timestamp), MAX(timestamp) FROM requests").Scan(&oldest, &newest)
|
|
if err == nil {
|
|
if oldest.Valid {
|
|
stats["oldest_request"] = oldest.String
|
|
}
|
|
if newest.Valid {
|
|
stats["newest_request"] = newest.String
|
|
}
|
|
}
|
|
|
|
return stats, nil
|
|
}
|
|
|
|
func (s *sqliteStorageService) Close() error {
|
|
// Close prepared statements
|
|
if s.stmtInsertRequest != nil {
|
|
s.stmtInsertRequest.Close()
|
|
}
|
|
if s.stmtUpdateResponse != nil {
|
|
s.stmtUpdateResponse.Close()
|
|
}
|
|
if s.stmtUpdateGrading != nil {
|
|
s.stmtUpdateGrading.Close()
|
|
}
|
|
if s.stmtGetRequestByID != nil {
|
|
s.stmtGetRequestByID.Close()
|
|
}
|
|
if s.stmtGetRequestsPage != nil {
|
|
s.stmtGetRequestsPage.Close()
|
|
}
|
|
if s.stmtGetRequestsCount != nil {
|
|
s.stmtGetRequestsCount.Close()
|
|
}
|
|
if s.stmtDeleteOldRequests != nil {
|
|
s.stmtDeleteOldRequests.Close()
|
|
}
|
|
|
|
// Checkpoint WAL before closing
|
|
_, err := s.db.Exec("PRAGMA wal_checkpoint(TRUNCATE)")
|
|
if err != nil {
|
|
s.logger.Printf("Warning: failed to checkpoint WAL: %v", err)
|
|
}
|
|
|
|
return s.db.Close()
|
|
}
|
|
|
|
// Helper functions
|
|
|
|
// escapeLikePattern escapes special characters in LIKE patterns
|
|
func escapeLikePattern(s string) string {
|
|
// Escape \, %, and _ characters
|
|
s = strings.ReplaceAll(s, `\`, `\\`)
|
|
s = strings.ReplaceAll(s, `%`, `\%`)
|
|
s = strings.ReplaceAll(s, `_`, `\_`)
|
|
return s
|
|
}
|
|
|
|
// scanRequestRows scans multiple rows into a slice of RequestLog
|
|
func (s *sqliteStorageService) scanRequestRows(rows *sql.Rows) ([]model.RequestLog, error) {
|
|
var requests []model.RequestLog
|
|
|
|
for rows.Next() {
|
|
var req model.RequestLog
|
|
var headersJSON, bodyJSON string
|
|
var promptGradeJSON, responseJSON sql.NullString
|
|
|
|
err := rows.Scan(
|
|
&req.RequestID,
|
|
&req.Timestamp,
|
|
&req.Method,
|
|
&req.Endpoint,
|
|
&headersJSON,
|
|
&bodyJSON,
|
|
&req.Model,
|
|
&req.UserAgent,
|
|
&req.ContentType,
|
|
&promptGradeJSON,
|
|
&responseJSON,
|
|
&req.OriginalModel,
|
|
&req.RoutedModel,
|
|
)
|
|
if err != nil {
|
|
s.logger.Printf("Warning: failed to scan row: %v", err)
|
|
continue
|
|
}
|
|
|
|
if err := unmarshalStoredRequestFields(s.logger, &req, headersJSON, bodyJSON, promptGradeJSON, responseJSON); err != nil {
|
|
s.logger.Printf("Warning: failed to unmarshal request fields: %v", err)
|
|
continue
|
|
}
|
|
|
|
requests = append(requests, req)
|
|
}
|
|
|
|
if err := rows.Err(); err != nil {
|
|
return nil, fmt.Errorf("error iterating rows: %w", err)
|
|
}
|
|
|
|
return requests, nil
|
|
}
|
|
|
|
// scanSingleRow scans a single row into a RequestLog pointer
|
|
func (s *sqliteStorageService) scanSingleRow(rows *sql.Rows) (*model.RequestLog, error) {
|
|
var req model.RequestLog
|
|
var headersJSON, bodyJSON string
|
|
var promptGradeJSON, responseJSON sql.NullString
|
|
|
|
err := rows.Scan(
|
|
&req.RequestID,
|
|
&req.Timestamp,
|
|
&req.Method,
|
|
&req.Endpoint,
|
|
&headersJSON,
|
|
&bodyJSON,
|
|
&req.Model,
|
|
&req.UserAgent,
|
|
&req.ContentType,
|
|
&promptGradeJSON,
|
|
&responseJSON,
|
|
&req.OriginalModel,
|
|
&req.RoutedModel,
|
|
)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to scan row: %w", err)
|
|
}
|
|
|
|
if err := unmarshalStoredRequestFields(s.logger, &req, headersJSON, bodyJSON, promptGradeJSON, responseJSON); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return &req, nil
|
|
}
|
|
|
|
func (s *sqliteStorageService) cleanupExpiredRequests() error {
|
|
if s.config == nil || s.config.RetentionDays <= 0 {
|
|
return nil
|
|
}
|
|
|
|
_, err := s.DeleteRequestsOlderThan(time.Duration(s.config.RetentionDays) * 24 * time.Hour)
|
|
return err
|
|
}
|
|
|
|
// GetUsageStats returns aggregated token usage statistics
|
|
func (s *sqliteStorageService) GetUsageStats(startDate, endDate, modelFilter, orgFilter string) (*model.UsageStats, error) {
|
|
stats := &model.UsageStats{
|
|
RequestsByModel: make(map[string]model.ModelStats),
|
|
}
|
|
|
|
// Build query with optional filters
|
|
whereClause := "WHERE response IS NOT NULL"
|
|
args := []interface{}{}
|
|
|
|
if startDate != "" {
|
|
whereClause += " AND timestamp >= ?"
|
|
args = append(args, startDate)
|
|
stats.StartDate = startDate
|
|
}
|
|
|
|
if endDate != "" {
|
|
whereClause += " AND timestamp <= ?"
|
|
args = append(args, endDate)
|
|
stats.EndDate = endDate
|
|
}
|
|
|
|
if filterValue, ok := modelFilterPattern(modelFilter, escapeLikePattern); ok {
|
|
whereClause += " AND LOWER(model) LIKE ? ESCAPE '\\'"
|
|
args = append(args, filterValue)
|
|
}
|
|
|
|
if orgFilter != "" {
|
|
whereClause += " AND organization_id = ?"
|
|
args = append(args, orgFilter)
|
|
}
|
|
|
|
// Query all responses and aggregate token usage
|
|
query := `
|
|
SELECT model, response
|
|
FROM requests
|
|
` + whereClause
|
|
|
|
rows, err := s.db.Query(query, args...)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to query usage stats: %w", err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
for rows.Next() {
|
|
var modelName string
|
|
var responseJSON sql.NullString
|
|
|
|
if err := rows.Scan(&modelName, &responseJSON); err != nil {
|
|
s.logger.Printf("Warning: failed to scan usage row: %v", err)
|
|
continue
|
|
}
|
|
|
|
resp, ok := decodeStoredResponse(responseJSON)
|
|
if !ok {
|
|
continue
|
|
}
|
|
bodySummary, ok := decodeResponseBodySummary(resp.Body)
|
|
if !ok || bodySummary.Usage == nil {
|
|
continue
|
|
}
|
|
|
|
addUsageStats(stats, modelName, bodySummary.Usage)
|
|
}
|
|
|
|
if err := rows.Err(); err != nil {
|
|
return nil, fmt.Errorf("error iterating usage rows: %w", err)
|
|
}
|
|
|
|
// Get date range if not specified
|
|
if stats.StartDate == "" || stats.EndDate == "" {
|
|
var oldest, newest sql.NullString
|
|
err := s.db.QueryRow("SELECT MIN(timestamp), MAX(timestamp) FROM requests WHERE response IS NOT NULL").Scan(&oldest, &newest)
|
|
if err == nil {
|
|
if stats.StartDate == "" && oldest.Valid {
|
|
stats.StartDate = oldest.String
|
|
}
|
|
if stats.EndDate == "" && newest.Valid {
|
|
stats.EndDate = newest.String
|
|
}
|
|
}
|
|
}
|
|
|
|
return stats, nil
|
|
}
|
|
|
|
// GetRequestsSummary returns minimal data for list view - no body/headers, only usage from response
|
|
func (s *sqliteStorageService) GetRequestsSummary(modelFilter string) ([]*model.RequestSummary, error) {
|
|
query := `
|
|
SELECT id, timestamp, method, endpoint, model, original_model, routed_model, response, COALESCE(conversation_hash, ''), COALESCE(message_count, 0)
|
|
FROM requests
|
|
`
|
|
args := []interface{}{}
|
|
|
|
if filterValue, ok := modelFilterPattern(modelFilter, escapeLikePattern); ok {
|
|
query += " WHERE LOWER(model) LIKE ? ESCAPE '\\'"
|
|
args = append(args, filterValue)
|
|
}
|
|
|
|
query += " ORDER BY timestamp DESC"
|
|
|
|
rows, err := s.db.Query(query, args...)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to query requests: %w", err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
var summaries []*model.RequestSummary
|
|
for rows.Next() {
|
|
var summary model.RequestSummary
|
|
var responseJSON sql.NullString
|
|
|
|
err := rows.Scan(
|
|
&summary.RequestID,
|
|
&summary.Timestamp,
|
|
&summary.Method,
|
|
&summary.Endpoint,
|
|
&summary.Model,
|
|
&summary.OriginalModel,
|
|
&summary.RoutedModel,
|
|
&responseJSON,
|
|
&summary.ConversationHash,
|
|
&summary.MessageCount,
|
|
)
|
|
if err != nil {
|
|
s.logger.Printf("Warning: failed to scan summary row: %v", err)
|
|
continue
|
|
}
|
|
|
|
// Only parse response to extract usage and status
|
|
applyStoredResponseToSummary(&summary, responseJSON)
|
|
|
|
summaries = append(summaries, &summary)
|
|
}
|
|
|
|
return summaries, nil
|
|
}
|
|
|
|
// GetRequestsSummaryPaginated returns minimal data for list view with pagination
|
|
func (s *sqliteStorageService) GetRequestsSummaryPaginated(modelFilter, startTime, endTime string, offset, limit int) ([]*model.RequestSummary, int, error) {
|
|
// Build WHERE clauses
|
|
whereClauses := []string{}
|
|
args := []interface{}{}
|
|
|
|
if filterValue, ok := modelFilterPattern(modelFilter, escapeLikePattern); ok {
|
|
whereClauses = append(whereClauses, "LOWER(model) LIKE ? ESCAPE '\\'")
|
|
args = append(args, filterValue)
|
|
}
|
|
|
|
if startTime != "" && endTime != "" {
|
|
whereClauses = append(whereClauses, "datetime(timestamp) >= datetime(?) AND datetime(timestamp) <= datetime(?)")
|
|
args = append(args, startTime, endTime)
|
|
}
|
|
|
|
whereClause := ""
|
|
if len(whereClauses) > 0 {
|
|
whereClause = " WHERE " + strings.Join(whereClauses, " AND ")
|
|
}
|
|
|
|
// Get total count
|
|
var total int
|
|
countQuery := "SELECT COUNT(*) FROM requests" + whereClause
|
|
countArgs := make([]interface{}, len(args))
|
|
copy(countArgs, args)
|
|
if err := s.db.QueryRow(countQuery, countArgs...).Scan(&total); err != nil {
|
|
return nil, 0, fmt.Errorf("failed to get total count: %w", err)
|
|
}
|
|
|
|
// Get the requested page
|
|
query := `
|
|
SELECT id, timestamp, method, endpoint, model, original_model, routed_model, response, COALESCE(conversation_hash, ''), COALESCE(message_count, 0)
|
|
FROM requests
|
|
` + whereClause + " ORDER BY timestamp DESC"
|
|
|
|
// Add pagination
|
|
if limit > 0 {
|
|
query += " LIMIT ? OFFSET ?"
|
|
args = append(args, limit, offset)
|
|
} else if offset > 0 {
|
|
query += " OFFSET ?"
|
|
args = append(args, offset)
|
|
}
|
|
|
|
rows, err := s.db.Query(query, args...)
|
|
if err != nil {
|
|
return nil, 0, fmt.Errorf("failed to query requests: %w", err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
var summaries []*model.RequestSummary
|
|
for rows.Next() {
|
|
var summary model.RequestSummary
|
|
var responseJSON sql.NullString
|
|
|
|
err := rows.Scan(
|
|
&summary.RequestID,
|
|
&summary.Timestamp,
|
|
&summary.Method,
|
|
&summary.Endpoint,
|
|
&summary.Model,
|
|
&summary.OriginalModel,
|
|
&summary.RoutedModel,
|
|
&responseJSON,
|
|
&summary.ConversationHash,
|
|
&summary.MessageCount,
|
|
)
|
|
if err != nil {
|
|
s.logger.Printf("Warning: failed to scan summary row: %v", err)
|
|
continue
|
|
}
|
|
|
|
// Only parse response to extract usage and status
|
|
applyStoredResponseToSummary(&summary, responseJSON)
|
|
|
|
summaries = append(summaries, &summary)
|
|
}
|
|
|
|
s.logger.Printf("📊 GetRequestsSummaryPaginated: returned %d requests (total: %d, limit: %d, offset: %d)", len(summaries), total, limit, offset)
|
|
return summaries, total, nil
|
|
}
|
|
|
|
// GetStats returns aggregated statistics for the dashboard - daily token usage
|
|
func (s *sqliteStorageService) GetStats(startDate, endDate, orgFilter string) (*model.DashboardStats, error) {
|
|
stats := &model.DashboardStats{
|
|
DailyStats: make([]model.DailyTokens, 0),
|
|
}
|
|
|
|
query := `
|
|
SELECT timestamp, COALESCE(model, 'unknown') as model, response
|
|
FROM requests
|
|
WHERE datetime(timestamp) >= datetime(?) AND datetime(timestamp) <= datetime(?)
|
|
`
|
|
args := []interface{}{startDate, endDate}
|
|
if orgFilter != "" {
|
|
query += ` AND organization_id = ?`
|
|
args = append(args, orgFilter)
|
|
}
|
|
query += ` ORDER BY timestamp`
|
|
|
|
rows, err := s.db.Query(query, args...)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to query stats: %w", err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
// Aggregate data in memory
|
|
dailyMap := make(map[string]*model.DailyTokens)
|
|
|
|
for rows.Next() {
|
|
var timestamp, modelName string
|
|
var responseJSON sql.NullString
|
|
|
|
if err := rows.Scan(×tamp, &modelName, &responseJSON); err != nil {
|
|
continue
|
|
}
|
|
|
|
// Extract date from timestamp (format: 2025-11-28T13:03:29-08:00)
|
|
date := strings.Split(timestamp, "T")[0]
|
|
|
|
// Parse response to get usage
|
|
tokens := int64(0)
|
|
if resp, ok := decodeStoredResponse(responseJSON); ok {
|
|
if bodySummary, ok := decodeResponseBodySummary(resp.Body); ok {
|
|
tokens = totalTokensFromUsage(bodySummary.Usage)
|
|
}
|
|
}
|
|
|
|
addDailyTokens(dailyMap, date, modelName, tokens)
|
|
}
|
|
|
|
// Convert map to slice
|
|
for _, v := range dailyMap {
|
|
stats.DailyStats = append(stats.DailyStats, *v)
|
|
}
|
|
|
|
return stats, nil
|
|
}
|
|
|
|
// GetHourlyStats returns time-bucketed breakdown for a specific time range.
|
|
// bucketMinutes controls the granularity (e.g. 5, 15, 30, 60).
|
|
func (s *sqliteStorageService) GetHourlyStats(startTime, endTime string, bucketMinutes int, orgFilter string) (*model.HourlyStatsResponse, error) {
|
|
if bucketMinutes <= 0 {
|
|
bucketMinutes = 60
|
|
}
|
|
|
|
query := `
|
|
SELECT timestamp, COALESCE(model, 'unknown') as model, response
|
|
FROM requests
|
|
WHERE datetime(timestamp) >= datetime(?) AND datetime(timestamp) <= datetime(?)
|
|
`
|
|
args := []interface{}{startTime, endTime}
|
|
if orgFilter != "" {
|
|
query += ` AND organization_id = ?`
|
|
args = append(args, orgFilter)
|
|
}
|
|
query += ` ORDER BY timestamp`
|
|
|
|
rows, err := s.db.Query(query, args...)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to query hourly stats: %w", err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
bucketMap := make(map[string]*model.HourlyTokens)
|
|
var totalTokens int64
|
|
var totalRequests int
|
|
var totalResponseTime int64
|
|
var responseCount int
|
|
|
|
for rows.Next() {
|
|
var timestamp, modelName string
|
|
var responseJSON sql.NullString
|
|
|
|
if err := rows.Scan(×tamp, &modelName, &responseJSON); err != nil {
|
|
continue
|
|
}
|
|
|
|
// Compute bucket key from timestamp
|
|
bucketKey := ""
|
|
bucketLabel := ""
|
|
if t, err := time.Parse(time.RFC3339, timestamp); err == nil {
|
|
// Always use absolute time buckets so multi-day ranges show per-slot data
|
|
minuteOfDay := t.Hour()*60 + t.Minute()
|
|
bucketStart := (minuteOfDay / bucketMinutes) * bucketMinutes
|
|
bucketTime := time.Date(t.Year(), t.Month(), t.Day(), bucketStart/60, bucketStart%60, 0, 0, t.Location())
|
|
bucketKey = bucketTime.Format("2006-01-02T15:04")
|
|
bucketLabel = bucketTime.Format("Jan 2 15:04")
|
|
}
|
|
|
|
// Parse response to get usage and response time
|
|
tokens := int64(0)
|
|
responseTime := int64(0)
|
|
if resp, ok := decodeStoredResponse(responseJSON); ok {
|
|
responseTime = resp.ResponseTime
|
|
if bodySummary, ok := decodeResponseBodySummary(resp.Body); ok {
|
|
tokens = totalTokensFromUsage(bodySummary.Usage)
|
|
}
|
|
}
|
|
|
|
totalTokens += tokens
|
|
totalRequests++
|
|
|
|
// Track response time
|
|
if responseTime > 0 {
|
|
totalResponseTime += responseTime
|
|
responseCount++
|
|
}
|
|
|
|
addHourlyTokens(bucketMap, bucketKey, bucketLabel, modelName, tokens)
|
|
}
|
|
|
|
// Convert map to sorted slice
|
|
keys := make([]string, 0, len(bucketMap))
|
|
for k := range bucketMap {
|
|
keys = append(keys, k)
|
|
}
|
|
sort.Strings(keys)
|
|
|
|
hourlyStats := make([]model.HourlyTokens, 0, len(keys))
|
|
for _, k := range keys {
|
|
hourlyStats = append(hourlyStats, *bucketMap[k])
|
|
}
|
|
|
|
// Calculate average response time
|
|
avgResponseTime := int64(0)
|
|
if responseCount > 0 {
|
|
avgResponseTime = totalResponseTime / int64(responseCount)
|
|
}
|
|
|
|
return &model.HourlyStatsResponse{
|
|
HourlyStats: hourlyStats,
|
|
TodayTokens: totalTokens,
|
|
TodayRequests: totalRequests,
|
|
AvgResponseTime: avgResponseTime,
|
|
}, nil
|
|
}
|
|
|
|
// GetModelStats returns model breakdown for a specific time range
|
|
func (s *sqliteStorageService) GetModelStats(startTime, endTime, orgFilter string) (*model.ModelStatsResponse, error) {
|
|
query := `
|
|
SELECT COALESCE(model, 'unknown') as model, response
|
|
FROM requests
|
|
WHERE datetime(timestamp) >= datetime(?) AND datetime(timestamp) <= datetime(?)
|
|
`
|
|
args := []interface{}{startTime, endTime}
|
|
if orgFilter != "" {
|
|
query += ` AND organization_id = ?`
|
|
args = append(args, orgFilter)
|
|
}
|
|
|
|
rows, err := s.db.Query(query, args...)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to query model stats: %w", err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
modelMap := make(map[string]*model.ModelTokens)
|
|
|
|
for rows.Next() {
|
|
var modelName string
|
|
var responseJSON sql.NullString
|
|
|
|
if err := rows.Scan(&modelName, &responseJSON); err != nil {
|
|
continue
|
|
}
|
|
|
|
// Parse response to get usage
|
|
tokens := int64(0)
|
|
if resp, ok := decodeStoredResponse(responseJSON); ok {
|
|
if bodySummary, ok := decodeResponseBodySummary(resp.Body); ok {
|
|
tokens = totalTokensFromUsage(bodySummary.Usage)
|
|
}
|
|
}
|
|
|
|
addModelTokens(modelMap, modelName, tokens)
|
|
}
|
|
|
|
// Convert map to slice
|
|
modelStats := make([]model.ModelTokens, 0)
|
|
for _, v := range modelMap {
|
|
modelStats = append(modelStats, *v)
|
|
}
|
|
|
|
return &model.ModelStatsResponse{
|
|
ModelStats: modelStats,
|
|
}, nil
|
|
}
|
|
|
|
// GetLatestRequestDate returns the timestamp of the most recent request
|
|
func (s *sqliteStorageService) GetLatestRequestDate() (*time.Time, error) {
|
|
var timestamp string
|
|
err := s.db.QueryRow("SELECT timestamp FROM requests ORDER BY timestamp DESC LIMIT 1").Scan(×tamp)
|
|
if err == sql.ErrNoRows {
|
|
return nil, nil
|
|
}
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to query latest request: %w", err)
|
|
}
|
|
|
|
t, err := time.Parse(time.RFC3339, timestamp)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to parse timestamp: %w", err)
|
|
}
|
|
|
|
return &t, nil
|
|
}
|
|
|
|
func (s *sqliteStorageService) GetSettings() (*model.ProxySettings, error) {
|
|
var value string
|
|
err := s.db.QueryRow("SELECT value FROM settings WHERE key = 'proxy_settings'").Scan(&value)
|
|
if err == sql.ErrNoRows {
|
|
return &model.ProxySettings{}, nil
|
|
}
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to get settings: %w", err)
|
|
}
|
|
var settings model.ProxySettings
|
|
if err := json.Unmarshal([]byte(value), &settings); err != nil {
|
|
return nil, fmt.Errorf("failed to parse settings: %w", err)
|
|
}
|
|
return &settings, nil
|
|
}
|
|
|
|
func (s *sqliteStorageService) SaveSettings(settings *model.ProxySettings) error {
|
|
data, err := json.Marshal(settings)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to marshal settings: %w", err)
|
|
}
|
|
_, err = s.db.Exec(
|
|
"INSERT OR REPLACE INTO settings (key, value) VALUES ('proxy_settings', ?)",
|
|
string(data),
|
|
)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to save settings: %w", err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (s *sqliteStorageService) GetDistinctOrganizations() ([]string, error) {
|
|
rows, err := s.db.Query(`SELECT DISTINCT organization_id FROM requests WHERE organization_id IS NOT NULL AND organization_id != '' ORDER BY organization_id`)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to query organizations: %w", err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
var orgs []string
|
|
for rows.Next() {
|
|
var org string
|
|
if err := rows.Scan(&org); err != nil {
|
|
continue
|
|
}
|
|
orgs = append(orgs, org)
|
|
}
|
|
return orgs, nil
|
|
}
|