feat: handle 429 rate limit in OpenAI non-stream generate
All checks were successful
BotServer CI / build (push) Successful in 11m7s
All checks were successful
BotServer CI / build (push) Successful in 11m7s
This commit is contained in:
parent
f34d401c2e
commit
786d404938
1 changed files with 6 additions and 0 deletions
|
|
@ -303,6 +303,12 @@ impl LLMProvider for OpenAIClient {
|
|||
let status = response.status();
|
||||
if status != reqwest::StatusCode::OK {
|
||||
let error_text = response.text().await.unwrap_or_default();
|
||||
|
||||
// Handle 429 rate limit with user-friendly message
|
||||
if status == reqwest::StatusCode::TOO_MANY_REQUESTS {
|
||||
return Ok("Server is busy, please try again in a few seconds...".to_string());
|
||||
}
|
||||
|
||||
error!("LLM generate error: {}", error_text);
|
||||
return Err(format!("LLM request failed with status: {}", status).into());
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue