feat(automation): improve prompt handling and message processing
- Add initial instruction to compact_prompt_for_bots summary request - Store processed content separately before formatting as summary - Save filtered content instead of formatted summary in session manager - Remove max_tokens limit from OpenAI client request - Refactor message parsing logic to avoid empty content messages - Improve role-based message handling in OpenAIClient
This commit is contained in:
parent
035d867c2f
commit
be87cc82b5
2 changed files with 17 additions and 13 deletions
|
|
@ -101,7 +101,7 @@ async fn compact_prompt_for_bots(
|
|||
messages_since_summary
|
||||
);
|
||||
|
||||
let mut compacted = String::new();
|
||||
let mut compacted = "Please summarize the following conversation between a human and an AI assistant:\n".to_string();
|
||||
|
||||
// Include messages from start_index onward
|
||||
let messages_to_include = history.iter().skip(start_index);
|
||||
|
|
@ -114,6 +114,7 @@ async fn compact_prompt_for_bots(
|
|||
}
|
||||
let llm_provider = state.llm_provider.clone();
|
||||
trace!("Starting summarization for session {}", session.id);
|
||||
let mut filtered = String::new();
|
||||
let summarized = match llm_provider.generate(&compacted, &serde_json::Value::Null).await {
|
||||
Ok(summary) => {
|
||||
trace!(
|
||||
|
|
@ -128,7 +129,7 @@ async fn compact_prompt_for_bots(
|
|||
.unwrap().as_str(),
|
||||
);
|
||||
|
||||
let filtered = handler.process_content(&summary);
|
||||
filtered = handler.process_content(&summary);
|
||||
format!("SUMMARY: {}", filtered)
|
||||
}
|
||||
Err(e) => {
|
||||
|
|
@ -147,7 +148,7 @@ async fn compact_prompt_for_bots(
|
|||
);
|
||||
{
|
||||
let mut session_manager = state.session_manager.lock().await;
|
||||
session_manager.save_message(session.id, session.user_id, 9, &summarized, 1)?;
|
||||
session_manager.save_message(session.id, session.user_id, 9, &filtered, 1)?;
|
||||
}
|
||||
|
||||
let _session_cleanup = guard((), |_| {
|
||||
|
|
|
|||
|
|
@ -81,7 +81,6 @@ impl LLMProvider for OpenAIClient {
|
|||
.json(&serde_json::json!({
|
||||
"model": "gpt-3.5-turbo",
|
||||
"messages": messages,
|
||||
"max_tokens": 1000,
|
||||
"stream": true
|
||||
}))
|
||||
.send()
|
||||
|
|
@ -135,18 +134,20 @@ impl OpenAIClient {
|
|||
"compact" => "system",
|
||||
_ => continue
|
||||
};
|
||||
{
|
||||
if let Some(r) = current_role.take() {
|
||||
|
||||
if let Some(r) = current_role.take() {
|
||||
if !current_content.is_empty() {
|
||||
messages.push(serde_json::json!({
|
||||
"role": r,
|
||||
"content": current_content.trim()
|
||||
}));
|
||||
}
|
||||
current_role = Some(role);
|
||||
current_content = line[role_end + 1..].trim_start().to_string();
|
||||
continue;
|
||||
}
|
||||
current_role = Some(role);
|
||||
current_content = line[role_end + 1..].trim_start().to_string();
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(_) = current_role {
|
||||
if !current_content.is_empty() {
|
||||
current_content.push('\n');
|
||||
|
|
@ -156,10 +157,12 @@ impl OpenAIClient {
|
|||
}
|
||||
|
||||
if let Some(role) = current_role {
|
||||
messages.push(serde_json::json!({
|
||||
"role": role,
|
||||
"content": current_content.trim()
|
||||
}));
|
||||
if !current_content.is_empty() {
|
||||
messages.push(serde_json::json!({
|
||||
"role": role,
|
||||
"content": current_content.trim()
|
||||
}));
|
||||
}
|
||||
}
|
||||
messages
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue