Files
CLIProxyAPI/internal/runtime/executor/helps/usage_helpers_test.go
Luis Pater e50cabac4b chore: upgrade CLIProxyAPI dependency to v7 across the project
- Updated all references from v6 to v7 for `github.com/router-for-me/CLIProxyAPI`.
- Ensured consistency in imports within core libraries, tests, and integration tests.
- Added missing tests for new features in Redis Protocol integration.
2026-05-08 11:46:46 +08:00

141 lines
4.9 KiB
Go

package helps
import (
"context"
"testing"
"time"
"github.com/router-for-me/CLIProxyAPI/v7/sdk/cliproxy/usage"
)
func TestParseOpenAIUsageChatCompletions(t *testing.T) {
data := []byte(`{"usage":{"prompt_tokens":1,"completion_tokens":2,"total_tokens":3,"prompt_tokens_details":{"cached_tokens":4},"completion_tokens_details":{"reasoning_tokens":5}}}`)
detail := ParseOpenAIUsage(data)
if detail.InputTokens != 1 {
t.Fatalf("input tokens = %d, want %d", detail.InputTokens, 1)
}
if detail.OutputTokens != 2 {
t.Fatalf("output tokens = %d, want %d", detail.OutputTokens, 2)
}
if detail.TotalTokens != 3 {
t.Fatalf("total tokens = %d, want %d", detail.TotalTokens, 3)
}
if detail.CachedTokens != 4 {
t.Fatalf("cached tokens = %d, want %d", detail.CachedTokens, 4)
}
if detail.ReasoningTokens != 5 {
t.Fatalf("reasoning tokens = %d, want %d", detail.ReasoningTokens, 5)
}
}
func TestParseOpenAIUsageResponses(t *testing.T) {
data := []byte(`{"usage":{"input_tokens":10,"output_tokens":20,"total_tokens":30,"input_tokens_details":{"cached_tokens":7},"output_tokens_details":{"reasoning_tokens":9}}}`)
detail := ParseOpenAIUsage(data)
if detail.InputTokens != 10 {
t.Fatalf("input tokens = %d, want %d", detail.InputTokens, 10)
}
if detail.OutputTokens != 20 {
t.Fatalf("output tokens = %d, want %d", detail.OutputTokens, 20)
}
if detail.TotalTokens != 30 {
t.Fatalf("total tokens = %d, want %d", detail.TotalTokens, 30)
}
if detail.CachedTokens != 7 {
t.Fatalf("cached tokens = %d, want %d", detail.CachedTokens, 7)
}
if detail.ReasoningTokens != 9 {
t.Fatalf("reasoning tokens = %d, want %d", detail.ReasoningTokens, 9)
}
}
func TestParseGeminiCLIUsage_TopLevelUsageMetadata(t *testing.T) {
data := []byte(`{"usageMetadata":{"promptTokenCount":11,"candidatesTokenCount":7,"thoughtsTokenCount":3,"totalTokenCount":21,"cachedContentTokenCount":5}}`)
detail := ParseGeminiCLIUsage(data)
if detail.InputTokens != 11 {
t.Fatalf("input tokens = %d, want %d", detail.InputTokens, 11)
}
if detail.OutputTokens != 7 {
t.Fatalf("output tokens = %d, want %d", detail.OutputTokens, 7)
}
if detail.ReasoningTokens != 3 {
t.Fatalf("reasoning tokens = %d, want %d", detail.ReasoningTokens, 3)
}
if detail.TotalTokens != 21 {
t.Fatalf("total tokens = %d, want %d", detail.TotalTokens, 21)
}
if detail.CachedTokens != 5 {
t.Fatalf("cached tokens = %d, want %d", detail.CachedTokens, 5)
}
}
func TestParseGeminiCLIStreamUsage_ResponseSnakeCaseUsageMetadata(t *testing.T) {
line := []byte(`data: {"response":{"usage_metadata":{"promptTokenCount":13,"candidatesTokenCount":2,"totalTokenCount":15}}}`)
detail, ok := ParseGeminiCLIStreamUsage(line)
if !ok {
t.Fatal("ParseGeminiCLIStreamUsage() ok = false, want true")
}
if detail.InputTokens != 13 {
t.Fatalf("input tokens = %d, want %d", detail.InputTokens, 13)
}
if detail.OutputTokens != 2 {
t.Fatalf("output tokens = %d, want %d", detail.OutputTokens, 2)
}
if detail.TotalTokens != 15 {
t.Fatalf("total tokens = %d, want %d", detail.TotalTokens, 15)
}
}
func TestParseGeminiCLIStreamUsage_IgnoresTrafficTypeOnlyUsageMetadata(t *testing.T) {
line := []byte(`data: {"response":{"usageMetadata":{"trafficType":"ON_DEMAND"}}}`)
if detail, ok := ParseGeminiCLIStreamUsage(line); ok {
t.Fatalf("ParseGeminiCLIStreamUsage() = (%+v, true), want false for traffic-only usage metadata", detail)
}
}
func TestUsageReporterBuildRecordIncludesLatency(t *testing.T) {
reporter := &UsageReporter{
provider: "openai",
model: "gpt-5.4",
requestedAt: time.Now().Add(-1500 * time.Millisecond),
}
record := reporter.buildRecord(usage.Detail{TotalTokens: 3}, false)
if record.Latency < time.Second {
t.Fatalf("latency = %v, want >= 1s", record.Latency)
}
if record.Latency > 3*time.Second {
t.Fatalf("latency = %v, want <= 3s", record.Latency)
}
}
func TestUsageReporterBuildRecordIncludesRequestedModelAlias(t *testing.T) {
ctx := usage.WithRequestedModelAlias(context.Background(), "client-gpt")
reporter := NewUsageReporter(ctx, "openai", "gpt-5.4", nil)
record := reporter.buildRecord(usage.Detail{TotalTokens: 3}, false)
if record.Model != "gpt-5.4" {
t.Fatalf("model = %q, want %q", record.Model, "gpt-5.4")
}
if record.Alias != "client-gpt" {
t.Fatalf("alias = %q, want %q", record.Alias, "client-gpt")
}
}
func TestUsageReporterBuildAdditionalModelRecordSkipsZeroTokens(t *testing.T) {
reporter := &UsageReporter{
provider: "codex",
model: "gpt-5.4",
requestedAt: time.Now(),
}
if _, ok := reporter.buildAdditionalModelRecord("gpt-image-2", usage.Detail{}); ok {
t.Fatalf("expected all-zero token usage to be skipped")
}
if _, ok := reporter.buildAdditionalModelRecord("gpt-image-2", usage.Detail{InputTokens: 2}); !ok {
t.Fatalf("expected non-zero input token usage to be recorded")
}
if _, ok := reporter.buildAdditionalModelRecord("gpt-image-2", usage.Detail{CachedTokens: 2}); !ok {
t.Fatalf("expected non-zero cached token usage to be recorded")
}
}