diff --git a/prompts/dev/platform/README.md b/prompts/dev/platform/README.md index 43a9d071..01d3a6b4 100644 --- a/prompts/dev/platform/README.md +++ b/prompts/dev/platform/README.md @@ -15,4 +15,5 @@ When initial attempts fail, sequentially try these LLMs: - **Change progression**: Start with DeepSeek, conclude with gpt-oss-120b - If a big req. fail, specify a @code file that has similar pattern or sample from official docs. - **Final validation**: Use prompt "cargo check" with gpt-oss-120b -- Be humble, one requirement, one commit. But sometimes, freedom of caos is welcome - when no deadlines are set. \ No newline at end of file +- Be humble, one requirement, one commit. But sometimes, freedom of caos is welcome - when no deadlines are set. +- Keep in the source codebase only deployed and tested source, no lab source code in main project. At least, use optional features to introduce new behaviour gradually in PRODUCTION. diff --git a/src/bot/mod.rs b/src/bot/mod.rs index 9d12fdd5..9596e520 100644 --- a/src/bot/mod.rs +++ b/src/bot/mod.rs @@ -362,10 +362,13 @@ impl BotOrchestrator { let mut sm = self.state.session_manager.lock().await; sm.get_session_by_id(session_id)? } - .ok_or_else(|| { - error!("Failed to create session for streaming"); - "Failed to create session" - })?; + .ok_or_else(|| "Failed to create session")?; + + // Save user message to history + { + let mut sm = self.state.session_manager.lock().await; + sm.save_message(session.id, user_id, 1, &message.content, 1)?; + } if message.message_type == 4 { if let Some(context_name) = &message.context_name { @@ -598,7 +601,6 @@ io::stdout().flush().unwrap(); }; if response_tx.send(partial).await.is_err() { - warn!("Response channel closed, stopping stream processing"); break; } }