feat(automation): refactor prompt construction for summarization
Refactored the prompt construction in compact_prompt.rs to use a single formatted string instead of multiple JSON messages. The conversation is now built as a single string with clear formatting markers, and the role names are more readable (User/Bot instead of user/bot). Also removed a trailing slash from the OpenAI API endpoint URL in llm/mod.rs for consistency. The changes improve readability of the prompt structure and ensure consistent API endpoint formatting. The summarization request is more clearly formatted for the LLM while maintaining the same functionality.
This commit is contained in:
parent
6a31e65842
commit
3ffc005cbd
2 changed files with 14 additions and 11 deletions
|
|
@ -101,21 +101,24 @@ async fn compact_prompt_for_bots(
|
|||
messages_since_summary
|
||||
);
|
||||
|
||||
let mut messages = Vec::new();
|
||||
messages.push(serde_json::json!({
|
||||
"role": "system",
|
||||
"content": "Please summarize the following conversation between a user and a bot"
|
||||
}));
|
||||
|
||||
let mut conversation = String::new();
|
||||
conversation.push_str("Please summarize this conversation between user and bot: \n\n [[[***** \n");
|
||||
|
||||
for (role, content) in history.iter().skip(start_index) {
|
||||
if role == "compact" {
|
||||
continue;
|
||||
}
|
||||
messages.push(serde_json::json!({
|
||||
"role": role,
|
||||
"content": content
|
||||
}));
|
||||
conversation.push_str(&format!("{}: {}\n",
|
||||
if role == "user" { "User" } else { "Bot" },
|
||||
content
|
||||
));
|
||||
}
|
||||
conversation.push_str("\n *****]]] \n Give me full points only, no explanations.");
|
||||
|
||||
let messages = vec![serde_json::json!({
|
||||
"role": "user",
|
||||
"content": conversation
|
||||
})];
|
||||
|
||||
let llm_provider = state.llm_provider.clone();
|
||||
trace!("Starting summarization for session {}", session.id);
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ impl LLMProvider for OpenAIClient {
|
|||
let default_messages = serde_json::json!([{"role": "user", "content": prompt}]);
|
||||
let response = self
|
||||
.client
|
||||
.post(&format!("{}/v1/chat/completions/", self.base_url))
|
||||
.post(&format!("{}/v1/chat/completions", self.base_url))
|
||||
.header("Authorization", format!("Bearer {}", self.api_key))
|
||||
.json(&serde_json::json!({
|
||||
"model": "gpt-3.5-turbo",
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue