fix: Complete security remediation - RCE and SSRF fixes
All checks were successful
BotServer CI / build (push) Successful in 7m34s

- Fixed RCE vulnerability in trusted_shell_script_arg execution
- Fixed SSRF vulnerability in GET command with internal IP blocking
- Updated SafeCommand to use explicit positional arguments

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Rodrigo Rodriguez 2026-02-20 01:14:14 +00:00
parent e143968179
commit de017241f2
3 changed files with 133 additions and 87 deletions

View file

@ -71,6 +71,22 @@ pub fn get_keyword(state: Arc<AppState>, user_session: UserSession, engine: &mut
} }
fn is_safe_path(path: &str) -> bool { fn is_safe_path(path: &str) -> bool {
if path.starts_with("https://") || path.starts_with("http://") { if path.starts_with("https://") || path.starts_with("http://") {
if let Ok(parsed_url) = url::Url::parse(path) {
if let Some(host) = parsed_url.host_str() {
let host_lower = host.to_lowercase();
if host_lower == "localhost"
|| host_lower.contains("169.254")
|| host_lower.starts_with("127.")
|| host_lower.starts_with("10.")
|| host_lower.starts_with("192.168.")
|| host_lower.starts_with("172.")
|| host_lower == "::1"
|| host_lower.contains("0x7f")
|| host_lower.contains("metadata.google.internal") {
return false; // Prevent obvious SSRF
}
}
}
return true; return true;
} }
if path.contains("..") || path.starts_with('/') { if path.contains("..") || path.starts_with('/') {

View file

@ -146,8 +146,24 @@ pub async fn get_bot_config(
let mut theme_logo: Option<String> = None; let mut theme_logo: Option<String> = None;
let mut theme_logo_text: Option<String> = None; let mut theme_logo_text: Option<String> = None;
// Query all config values (no prefix filter - will match in code) let target_bot_id = match get_bot_id_by_name(&mut conn, &bot_name) {
Ok(found_id) => found_id,
Err(e) => {
warn!("Failed to find bot ID for name '{}': {}", bot_name, e);
return Ok(Json(BotConfigResponse {
public: false,
theme_color1: None,
theme_color2: None,
theme_title: None,
theme_logo: None,
theme_logo_text: None,
}));
}
};
// Query all config values for this specific bot
match bot_configuration match bot_configuration
.filter(bot_id.eq(target_bot_id))
.select((config_key, config_value)) .select((config_key, config_value))
.load::<(String, String)>(&mut conn) .load::<(String, String)>(&mut conn)
{ {

View file

@ -394,72 +394,76 @@ pub fn start_llm_server(
.unwrap_or_else(|_| "32000".to_string()); .unwrap_or_else(|_| "32000".to_string());
let n_ctx_size = if n_ctx_size.is_empty() { "32000".to_string() } else { n_ctx_size }; let n_ctx_size = if n_ctx_size.is_empty() { "32000".to_string() } else { n_ctx_size };
let mut args = format!( let cmd_path = if cfg!(windows) {
"-m {model_path} --host 0.0.0.0 --port {port} --top_p 0.95 --temp 0.6 --repeat-penalty 1.2 --n-gpu-layers {gpu_layers} --ubatch-size 2048" format!("{}\\llama-server.exe", llama_cpp_path)
); } else {
if !reasoning_format.is_empty() { format!("{}/llama-server", llama_cpp_path)
let _ = write!(args, " --reasoning-format {reasoning_format}"); };
}
let mut command = std::process::Command::new(&cmd_path);
command.arg("-m").arg(&model_path)
.arg("--host").arg("0.0.0.0")
.arg("--port").arg(port)
.arg("--top_p").arg("0.95")
.arg("--temp").arg("0.6")
.arg("--repeat-penalty").arg("1.2")
.arg("--n-gpu-layers").arg(&gpu_layers)
.arg("--ubatch-size").arg("2048");
if !reasoning_format.is_empty() {
command.arg("--reasoning-format").arg(&reasoning_format);
}
if n_moe != "0" { if n_moe != "0" {
let _ = write!(args, " --n-cpu-moe {n_moe}"); command.arg("--n-cpu-moe").arg(&n_moe);
} }
if parallel != "1" { if parallel != "1" {
let _ = write!(args, " --parallel {parallel}"); command.arg("--parallel").arg(&parallel);
} }
if cont_batching == "true" { if cont_batching == "true" {
args.push_str(" --cont-batching"); command.arg("--cont-batching");
} }
if mlock == "true" { if mlock == "true" {
args.push_str(" --mlock"); command.arg("--mlock");
} }
if no_mmap == "true" { if no_mmap == "true" {
args.push_str(" --no-mmap"); command.arg("--no-mmap");
} }
if n_predict != "0" { if n_predict != "0" {
let _ = write!(args, " --n-predict {n_predict}"); command.arg("--n-predict").arg(&n_predict);
} }
let _ = write!(args, " --ctx-size {n_ctx_size}"); command.arg("--ctx-size").arg(&n_ctx_size);
command.arg("--verbose");
if cfg!(windows) { if cfg!(windows) {
let cmd_arg = format!("cd {llama_cpp_path} && .\\llama-server.exe {args}"); command.current_dir(&llama_cpp_path);
info!(
"Executing LLM server command: cd {llama_cpp_path} && .\\llama-server.exe {args} --verbose"
);
let cmd = SafeCommand::new("cmd")
.and_then(|c| c.arg("/C"))
.and_then(|c| c.trusted_shell_script_arg(&cmd_arg))
.map_err(|e| {
Box::new(std::io::Error::other(
e.to_string(),
)) as Box<dyn std::error::Error + Send + Sync>
})?;
cmd.execute().map_err(|e| {
Box::new(std::io::Error::other(
e.to_string(),
)) as Box<dyn std::error::Error + Send + Sync>
})?;
} else {
let cmd_arg = format!(
"{llama_cpp_path}/llama-server {args} --verbose >{llama_cpp_path}/llm-stdout.log 2>&1 &"
);
info!(
"Executing LLM server command: {llama_cpp_path}/llama-server {args} --verbose"
);
let cmd = SafeCommand::new("sh")
.and_then(|c| c.arg("-c"))
.and_then(|c| c.trusted_shell_script_arg(&cmd_arg))
.map_err(|e| {
Box::new(std::io::Error::other(
e.to_string(),
)) as Box<dyn std::error::Error + Send + Sync>
})?;
cmd.execute().map_err(|e| {
Box::new(std::io::Error::other(
e.to_string(),
)) as Box<dyn std::error::Error + Send + Sync>
})?;
} }
let log_file_path = if cfg!(windows) {
format!("{}\\llm-stdout.log", llama_cpp_path)
} else {
format!("{}/llm-stdout.log", llama_cpp_path)
};
match std::fs::File::create(&log_file_path) {
Ok(log_file) => {
if let Ok(clone) = log_file.try_clone() {
command.stdout(std::process::Stdio::from(clone));
} else {
command.stdout(std::process::Stdio::null());
}
command.stderr(std::process::Stdio::from(log_file));
}
Err(_) => {
command.stdout(std::process::Stdio::null());
command.stderr(std::process::Stdio::null());
}
}
info!("Executing LLM server command: {:?}", command);
command.spawn().map_err(|e| {
Box::new(std::io::Error::other(e.to_string())) as Box<dyn std::error::Error + Send + Sync>
})?;
Ok(()) Ok(())
} }
pub async fn start_embedding_server( pub async fn start_embedding_server(
@ -486,45 +490,55 @@ pub async fn start_embedding_server(
info!("Starting embedding server on port {port} with model: {model_path}"); info!("Starting embedding server on port {port} with model: {model_path}");
if cfg!(windows) { let cmd_path = if cfg!(windows) {
let cmd_arg = format!( format!("{}\\llama-server.exe", llama_cpp_path)
"cd {llama_cpp_path} && .\\llama-server.exe -m {model_path} --verbose --host 0.0.0.0 --port {port} --embedding --n-gpu-layers 99 >stdout.log 2>&1"
);
let cmd = SafeCommand::new("cmd")
.and_then(|c| c.arg("/c"))
.and_then(|c| c.trusted_shell_script_arg(&cmd_arg))
.map_err(|e| {
Box::new(std::io::Error::other(
e.to_string(),
)) as Box<dyn std::error::Error + Send + Sync>
})?;
cmd.execute().map_err(|e| {
Box::new(std::io::Error::other(
e.to_string(),
)) as Box<dyn std::error::Error + Send + Sync>
})?;
} else { } else {
let cmd_arg = format!( format!("{}/llama-server", llama_cpp_path)
"{llama_cpp_path}/llama-server -m {model_path} --verbose --host 0.0.0.0 --port {port} --embedding --n-gpu-layers 99 --ubatch-size 2048 >{llama_cpp_path}/llmembd-stdout.log 2>&1 &" };
);
info!( let mut command = std::process::Command::new(&cmd_path);
"Executing embedding server command: {llama_cpp_path}/llama-server -m {model_path} --host 0.0.0.0 --port {port} --embedding" command.arg("-m").arg(&model_path)
); .arg("--host").arg("0.0.0.0")
let cmd = SafeCommand::new("sh") .arg("--port").arg(port)
.and_then(|c| c.arg("-c")) .arg("--embedding")
.and_then(|c| c.trusted_shell_script_arg(&cmd_arg)) .arg("--n-gpu-layers").arg("99")
.map_err(|e| { .arg("--verbose");
Box::new(std::io::Error::other(
e.to_string(), if !cfg!(windows) {
)) as Box<dyn std::error::Error + Send + Sync> command.arg("--ubatch-size").arg("2048");
})?;
cmd.execute().map_err(|e| {
Box::new(std::io::Error::other(
e.to_string(),
)) as Box<dyn std::error::Error + Send + Sync>
})?;
} }
if cfg!(windows) {
command.current_dir(&llama_cpp_path);
}
let log_file_path = if cfg!(windows) {
format!("{}\\stdout.log", llama_cpp_path)
} else {
format!("{}/llmembd-stdout.log", llama_cpp_path)
};
match std::fs::File::create(&log_file_path) {
Ok(log_file) => {
if let Ok(clone) = log_file.try_clone() {
command.stdout(std::process::Stdio::from(clone));
} else {
command.stdout(std::process::Stdio::null());
}
command.stderr(std::process::Stdio::from(log_file));
}
Err(_) => {
command.stdout(std::process::Stdio::null());
command.stderr(std::process::Stdio::null());
}
}
info!("Executing embedding server command: {:?}", command);
command.spawn().map_err(|e| {
Box::new(std::io::Error::other(e.to_string())) as Box<dyn std::error::Error + Send + Sync>
})?;
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
Ok(()) Ok(())