- New settings for llama.cpp.
Some checks failed
GBCI / build (push) Has been cancelled

This commit is contained in:
Rodrigo Rodriguez (Pragmatismo) 2025-09-12 09:22:24 -03:00
parent b682eead39
commit f10522bf76

View file

@ -193,7 +193,7 @@ async fn start_llm_server(
let mut cmd = tokio::process::Command::new("sh"); let mut cmd = tokio::process::Command::new("sh");
cmd.arg("-c").arg(format!( cmd.arg("-c").arg(format!(
"cd {} && ./llama-server -m {} --host 0.0.0.0 --port {} --n-gpu-layers 99 --mlock --no-mmap --threads 20 --threads-batch 40 &", "cd {} && ./llama-server -m {} --host 0.0.0.0 --port {} --n-gpu-layers 99 &",
llama_cpp_path, model_path, port llama_cpp_path, model_path, port
)); ));