From 6ac13ffdad25872a963a688fd26dffe75d01074d Mon Sep 17 00:00:00 2001 From: Andreas Haida Date: Sun, 3 May 2026 18:44:03 +0200 Subject: [PATCH] Handle OpenAI token-limit errors as context-window failures --- rust/crates/api/src/error.rs | 25 ++++++++++++++++++ rust/crates/rusty-claude-cli/src/main.rs | 33 ++++++++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/rust/crates/api/src/error.rs b/rust/crates/api/src/error.rs index 836f46e0..21d980af 100644 --- a/rust/crates/api/src/error.rs +++ b/rust/crates/api/src/error.rs @@ -14,6 +14,11 @@ const CONTEXT_WINDOW_ERROR_MARKERS: &[&str] = &[ "too many tokens", "prompt is too long", "input is too long", + "input tokens exceed", + "configured limit", + "messages resulted in", + "completion tokens", + "prompt tokens", "request is too large", ]; @@ -542,6 +547,26 @@ mod tests { assert_eq!(error.request_id(), Some("req_ctx_123")); } + #[test] + fn classifies_openai_configured_limit_errors_as_context_window_failures() { + let error = ApiError::Api { + status: reqwest::StatusCode::BAD_REQUEST, + error_type: Some("invalid_request_error".to_string()), + message: Some( + "Input tokens exceed the configured limit of 922000 tokens. Your messages resulted in 1860900 tokens. Please reduce the length of the messages." + .to_string(), + ), + request_id: Some("req_ctx_openai_123".to_string()), + body: String::new(), + retryable: false, + suggested_action: None, + }; + + assert!(error.is_context_window_failure()); + assert_eq!(error.safe_failure_class(), "context_window"); + assert_eq!(error.request_id(), Some("req_ctx_openai_123")); + } + #[test] fn missing_credentials_without_hint_renders_the_canonical_message() { // given diff --git a/rust/crates/rusty-claude-cli/src/main.rs b/rust/crates/rusty-claude-cli/src/main.rs index dbdbd07b..e2c889e9 100644 --- a/rust/crates/rusty-claude-cli/src/main.rs +++ b/rust/crates/rusty-claude-cli/src/main.rs @@ -9424,6 +9424,39 @@ mod tests { ); } + #[test] + fn openai_configured_limit_errors_are_rendered_as_context_window_guidance() { + let error = ApiError::Api { + status: "400".parse().expect("status"), + error_type: Some("invalid_request_error".to_string()), + message: Some( + "Input tokens exceed the configured limit of 922000 tokens. Your messages resulted in 1860900 tokens. Please reduce the length of the messages." + .to_string(), + ), + request_id: Some("req_ctx_openai_456".to_string()), + body: String::new(), + retryable: false, + suggested_action: None, + }; + + let rendered = format_user_visible_api_error("session-issue-32", &error); + assert!(rendered.contains("Context window blocked"), "{rendered}"); + assert!(rendered.contains("context_window_blocked"), "{rendered}"); + assert!( + rendered.contains("Trace req_ctx_openai_456"), + "{rendered}" + ); + assert!( + rendered.contains("Detail Input tokens exceed the configured limit of 922000 tokens."), + "{rendered}" + ); + assert!(rendered.contains("Compact /compact"), "{rendered}"); + assert!( + rendered.contains("Fresh session /clear --confirm"), + "{rendered}" + ); + } + #[test] fn retry_wrapped_context_window_errors_keep_recovery_guidance() { let error = ApiError::RetriesExhausted {