Update llama.cpp to b7345 with platform-specific builds and checksums
- Update 3rdparty.toml: llama.cpp b4547 -> b7345 with SHA256 checksums - Add config/llm_releases.json with complete checksums for all 24 release assets - Fix Windows binary naming in installer.rs (win-cpu-x64, win-cpu-arm64) - Add Vulkan detection for Windows - Add platform-specific variants: CUDA 12/13, Vulkan, HIP, SYCL, OpenCL
This commit is contained in:
parent
a8863d9051
commit
b6d3e0a2d5
4 changed files with 348 additions and 14 deletions
|
|
@ -47,9 +47,9 @@ sha256 = ""
|
|||
|
||||
[components.llm]
|
||||
name = "Llama.cpp Server"
|
||||
url = "https://github.com/ggml-org/llama.cpp/releases/download/b4547/llama-b4547-bin-ubuntu-x64.zip"
|
||||
filename = "llama-b4547-bin-ubuntu-x64.zip"
|
||||
sha256 = ""
|
||||
url = "https://github.com/ggml-org/llama.cpp/releases/download/b7345/llama-b7345-bin-ubuntu-x64.zip"
|
||||
filename = "llama-b7345-bin-ubuntu-x64.zip"
|
||||
sha256 = "91b066ecc53c20693a2d39703c12bc7a69c804b0768fee064d47df702f616e52"
|
||||
|
||||
[components.email]
|
||||
name = "Stalwart Mail Server"
|
||||
|
|
@ -145,6 +145,77 @@ url = "https://huggingface.co/CompendiumLabs/bge-small-en-v1.5-gguf/resolve/main
|
|||
filename = "bge-small-en-v1.5-f32.gguf"
|
||||
sha256 = ""
|
||||
|
||||
# Platform-specific llama.cpp variants
|
||||
# =====================================
|
||||
# These are alternative builds for different platforms/GPU support
|
||||
|
||||
[components.llm_linux_vulkan]
|
||||
name = "Llama.cpp Server (Linux Vulkan)"
|
||||
url = "https://github.com/ggml-org/llama.cpp/releases/download/b7345/llama-b7345-bin-ubuntu-vulkan-x64.zip"
|
||||
filename = "llama-b7345-bin-ubuntu-vulkan-x64.zip"
|
||||
sha256 = "03f0b3acbead2ddc23267073a8f8e0207937c849d3704c46c61cf167c1001442"
|
||||
|
||||
[components.llm_linux_s390x]
|
||||
name = "Llama.cpp Server (Linux s390x)"
|
||||
url = "https://github.com/ggml-org/llama.cpp/releases/download/b7345/llama-b7345-bin-ubuntu-s390x.zip"
|
||||
filename = "llama-b7345-bin-ubuntu-s390x.zip"
|
||||
sha256 = "688ddad6996b1166eaaa76d5025e304c684116efe655e6e881d877505ecffccb"
|
||||
|
||||
[components.llm_macos_arm64]
|
||||
name = "Llama.cpp Server (macOS ARM64)"
|
||||
url = "https://github.com/ggml-org/llama.cpp/releases/download/b7345/llama-b7345-bin-macos-arm64.zip"
|
||||
filename = "llama-b7345-bin-macos-arm64.zip"
|
||||
sha256 = "72ae9b4a4605aa1223d7aabaa5326c66c268b12d13a449fcc06f61099cd02a52"
|
||||
|
||||
[components.llm_macos_x64]
|
||||
name = "Llama.cpp Server (macOS x64)"
|
||||
url = "https://github.com/ggml-org/llama.cpp/releases/download/b7345/llama-b7345-bin-macos-x64.zip"
|
||||
filename = "llama-b7345-bin-macos-x64.zip"
|
||||
sha256 = "bec6b805cf7533f66b38f29305429f521dcb2be6b25dbce73a18df448ec55cc5"
|
||||
|
||||
[components.llm_win_cpu_x64]
|
||||
name = "Llama.cpp Server (Windows x64 CPU)"
|
||||
url = "https://github.com/ggml-org/llama.cpp/releases/download/b7345/llama-b7345-bin-win-cpu-x64.zip"
|
||||
filename = "llama-b7345-bin-win-cpu-x64.zip"
|
||||
sha256 = "ea449082c8e808a289d9a1e8331f90a0379ead4dd288a1b9a2d2c0a7151836cd"
|
||||
|
||||
[components.llm_win_cpu_arm64]
|
||||
name = "Llama.cpp Server (Windows ARM64 CPU)"
|
||||
url = "https://github.com/ggml-org/llama.cpp/releases/download/b7345/llama-b7345-bin-win-cpu-arm64.zip"
|
||||
filename = "llama-b7345-bin-win-cpu-arm64.zip"
|
||||
sha256 = "91e3ff43c123c7c30decfe5a44c291827c1e47359abaa2fbad1eb5392b3a0d85"
|
||||
|
||||
[components.llm_win_cuda12]
|
||||
name = "Llama.cpp Server (Windows CUDA 12.4)"
|
||||
url = "https://github.com/ggml-org/llama.cpp/releases/download/b7345/llama-b7345-bin-win-cuda-12.4-x64.zip"
|
||||
filename = "llama-b7345-bin-win-cuda-12.4-x64.zip"
|
||||
sha256 = "7a82aba2662fa7d4477a7a40894de002854bae1ab8b0039888577c9a2ca24cae"
|
||||
|
||||
[components.llm_win_cuda13]
|
||||
name = "Llama.cpp Server (Windows CUDA 13.1)"
|
||||
url = "https://github.com/ggml-org/llama.cpp/releases/download/b7345/llama-b7345-bin-win-cuda-13.1-x64.zip"
|
||||
filename = "llama-b7345-bin-win-cuda-13.1-x64.zip"
|
||||
sha256 = "06ea715cefb07e9862394e6d1ffa066f4c33add536b1f1aa058723f86ae05572"
|
||||
|
||||
[components.llm_win_vulkan]
|
||||
name = "Llama.cpp Server (Windows Vulkan)"
|
||||
url = "https://github.com/ggml-org/llama.cpp/releases/download/b7345/llama-b7345-bin-win-vulkan-x64.zip"
|
||||
filename = "llama-b7345-bin-win-vulkan-x64.zip"
|
||||
sha256 = "3e948bee438f46c8ea0a3faf0416549391ee945ffa624b25bc1f73d60d668679"
|
||||
|
||||
# CUDA runtime libraries (required for CUDA builds on Windows)
|
||||
[components.cudart_win_cuda12]
|
||||
name = "CUDA Runtime (Windows CUDA 12.4)"
|
||||
url = "https://github.com/ggml-org/llama.cpp/releases/download/b7345/cudart-llama-bin-win-cuda-12.4-x64.zip"
|
||||
filename = "cudart-llama-bin-win-cuda-12.4-x64.zip"
|
||||
sha256 = "8c79a9b226de4b3cacfd1f83d24f962d0773be79f1e7b75c6af4ded7e32ae1d6"
|
||||
|
||||
[components.cudart_win_cuda13]
|
||||
name = "CUDA Runtime (Windows CUDA 13.1)"
|
||||
url = "https://github.com/ggml-org/llama.cpp/releases/download/b7345/cudart-llama-bin-win-cuda-13.1-x64.zip"
|
||||
filename = "cudart-llama-bin-win-cuda-13.1-x64.zip"
|
||||
sha256 = "f96935e7e385e3b2d0189239077c10fe8fd7e95690fea4afec455b1b6c7e3f18"
|
||||
|
||||
# Optional larger models (uncomment to include)
|
||||
# [models.gpt_oss_20b]
|
||||
# name = "GPT-OSS 20B F16 (requires 16GB+ VRAM)"
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"base_url": "http://localhost:8080",
|
||||
"default_org": {
|
||||
"id": "350429244142125070",
|
||||
"id": "350450804827619342",
|
||||
"name": "default",
|
||||
"domain": "default.localhost"
|
||||
},
|
||||
|
|
@ -13,8 +13,8 @@
|
|||
"first_name": "Admin",
|
||||
"last_name": "User"
|
||||
},
|
||||
"admin_token": "XASXP8scib6GgXIgRcntDTJfP1gfFc5fZImx-eaRBBTdZRPJD4WnlsrevtnPeDiNw3HeXxk",
|
||||
"admin_token": "zBxbF2StNdIRTSPJI1q3ND7NFNeFQK5qoB8aS69bQSN3bwLvVi3jxTVFnEVDGur4kaltPgc",
|
||||
"project_id": "",
|
||||
"client_id": "350429248856588302",
|
||||
"client_secret": "BSvRlGBmckWfZAAZpkdlpccRF3sVJXsDIMDnTu8J58bFg8bzcMkkbDkBcMIBNdXm"
|
||||
"client_id": "350450809542082574",
|
||||
"client_secret": "bgUNSAXTzOwaZnyatVgWceBTLtQrySQc8BGrJb7sT1hMOeiKAtWwD7638fg7biRq"
|
||||
}
|
||||
101
config/llm_releases.json
Normal file
101
config/llm_releases.json
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
{
|
||||
"llama_cpp": {
|
||||
"version": "b7345",
|
||||
"base_url": "https://github.com/ggml-org/llama.cpp/releases/download",
|
||||
"binaries": {
|
||||
"linux": {
|
||||
"x64": {
|
||||
"cpu": "llama-{version}-bin-ubuntu-x64.zip",
|
||||
"cpu_tar": "llama-{version}-bin-ubuntu-x64.tar.gz",
|
||||
"vulkan": "llama-{version}-bin-ubuntu-vulkan-x64.zip",
|
||||
"vulkan_tar": "llama-{version}-bin-ubuntu-vulkan-x64.tar.gz"
|
||||
},
|
||||
"s390x": {
|
||||
"cpu": "llama-{version}-bin-ubuntu-s390x.zip",
|
||||
"cpu_tar": "llama-{version}-bin-ubuntu-s390x.tar.gz"
|
||||
}
|
||||
},
|
||||
"macos": {
|
||||
"arm64": {
|
||||
"cpu": "llama-{version}-bin-macos-arm64.zip",
|
||||
"cpu_tar": "llama-{version}-bin-macos-arm64.tar.gz"
|
||||
},
|
||||
"x64": {
|
||||
"cpu": "llama-{version}-bin-macos-x64.zip",
|
||||
"cpu_tar": "llama-{version}-bin-macos-x64.tar.gz"
|
||||
}
|
||||
},
|
||||
"windows": {
|
||||
"x64": {
|
||||
"cpu": "llama-{version}-bin-win-cpu-x64.zip",
|
||||
"cuda_12": "llama-{version}-bin-win-cuda-12.4-x64.zip",
|
||||
"cuda_13": "llama-{version}-bin-win-cuda-13.1-x64.zip",
|
||||
"vulkan": "llama-{version}-bin-win-vulkan-x64.zip",
|
||||
"sycl": "llama-{version}-bin-win-sycl-x64.zip",
|
||||
"hip": "llama-{version}-bin-win-hip-radeon-x64.zip"
|
||||
},
|
||||
"arm64": {
|
||||
"cpu": "llama-{version}-bin-win-cpu-arm64.zip",
|
||||
"opencl_adreno": "llama-{version}-bin-win-opencl-adreno-arm64.zip"
|
||||
}
|
||||
},
|
||||
"ios": {
|
||||
"xcframework": "llama-{version}-xcframework.zip",
|
||||
"xcframework_tar": "llama-{version}-xcframework.tar.gz"
|
||||
}
|
||||
},
|
||||
"cuda_runtime": {
|
||||
"windows": {
|
||||
"cuda_12": "cudart-llama-bin-win-cuda-12.4-x64.zip",
|
||||
"cuda_13": "cudart-llama-bin-win-cuda-13.1-x64.zip"
|
||||
}
|
||||
},
|
||||
"checksums": {
|
||||
"llama-b7345-bin-ubuntu-x64.zip": "sha256:91b066ecc53c20693a2d39703c12bc7a69c804b0768fee064d47df702f616e52",
|
||||
"llama-b7345-bin-ubuntu-x64.tar.gz": "sha256:c5f4c8111887072a5687b42e0700116e93eddf14c5401fa7eba3ab0b8481ff4e",
|
||||
"llama-b7345-bin-ubuntu-vulkan-x64.zip": "sha256:03f0b3acbead2ddc23267073a8f8e0207937c849d3704c46c61cf167c1001442",
|
||||
"llama-b7345-bin-ubuntu-vulkan-x64.tar.gz": "sha256:9b02b406106cd20ea0568c43c28c587d7e4908b5b649e943adebb0e1ae726076",
|
||||
"llama-b7345-bin-ubuntu-s390x.zip": "sha256:688ddad6996b1166eaaa76d5025e304c684116efe655e6e881d877505ecffccb",
|
||||
"llama-b7345-bin-ubuntu-s390x.tar.gz": "sha256:118011b38b02fee21596ab5b1c40b56369da514645394b6528a466e18f4336f5",
|
||||
"llama-b7345-bin-macos-arm64.zip": "sha256:72ae9b4a4605aa1223d7aabaa5326c66c268b12d13a449fcc06f61099cd02a52",
|
||||
"llama-b7345-bin-macos-arm64.tar.gz": "sha256:dc7c6b64848180259db19eb5d8ee8424cffcbb053960e5c45d79db6b9ac4f40d",
|
||||
"llama-b7345-bin-macos-x64.zip": "sha256:bec6b805cf7533f66b38f29305429f521dcb2be6b25dbce73a18df448ec55cc5",
|
||||
"llama-b7345-bin-macos-x64.tar.gz": "sha256:9267a292f39a86b2ee5eaa553a06f4a2fda2aee35142cde40a9099432b304313",
|
||||
"llama-b7345-bin-win-cpu-x64.zip": "sha256:ea449082c8e808a289d9a1e8331f90a0379ead4dd288a1b9a2d2c0a7151836cd",
|
||||
"llama-b7345-bin-win-cpu-arm64.zip": "sha256:91e3ff43c123c7c30decfe5a44c291827c1e47359abaa2fbad1eb5392b3a0d85",
|
||||
"llama-b7345-bin-win-cuda-12.4-x64.zip": "sha256:7a82aba2662fa7d4477a7a40894de002854bae1ab8b0039888577c9a2ca24cae",
|
||||
"llama-b7345-bin-win-cuda-13.1-x64.zip": "sha256:06ea715cefb07e9862394e6d1ffa066f4c33add536b1f1aa058723f86ae05572",
|
||||
"llama-b7345-bin-win-vulkan-x64.zip": "sha256:3e948bee438f46c8ea0a3faf0416549391ee945ffa624b25bc1f73d60d668679",
|
||||
"llama-b7345-bin-win-sycl-x64.zip": "sha256:708ddb786cdeb43ceadaa57c0ca669ce05b86753bf859f5a95012c2ea481f9da",
|
||||
"llama-b7345-bin-win-hip-radeon-x64.zip": "sha256:ba1fe643e27bae8dcdf6d7be459a6dc5d8385f179e71e749c53f52083c68e107",
|
||||
"llama-b7345-bin-win-opencl-adreno-arm64.zip": "sha256:59d625d21fb64294b075c61ec1a5f01d394baf826bee2df847d0ea3ed21fa3f3",
|
||||
"llama-b7345-xcframework.zip": "sha256:c94e870ba844e4938d6fccf0bfd64c9fe57884a14a3e2a4966e56e35a6cbaef4",
|
||||
"llama-b7345-xcframework.tar.gz": "sha256:a542ceace2621d9d860f2ec64c1b2294ac71f292106b95dcaf239aec0a06dd55",
|
||||
"cudart-llama-bin-win-cuda-12.4-x64.zip": "sha256:8c79a9b226de4b3cacfd1f83d24f962d0773be79f1e7b75c6af4ded7e32ae1d6",
|
||||
"cudart-llama-bin-win-cuda-13.1-x64.zip": "sha256:f96935e7e385e3b2d0189239077c10fe8fd7e95690fea4afec455b1b6c7e3f18"
|
||||
}
|
||||
},
|
||||
"models": {
|
||||
"default_llm": {
|
||||
"name": "DeepSeek-R1-Distill-Qwen-1.5B",
|
||||
"url": "https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-1.5B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-1.5B-Q3_K_M.gguf",
|
||||
"filename": "DeepSeek-R1-Distill-Qwen-1.5B-Q3_K_M.gguf",
|
||||
"size_mb": 1100,
|
||||
"description": "Small reasoning model, good for CPU or minimal GPU (4GB VRAM)"
|
||||
},
|
||||
"default_embedding": {
|
||||
"name": "BGE Small EN v1.5",
|
||||
"url": "https://huggingface.co/CompendiumLabs/bge-small-en-v1.5-gguf/resolve/main/bge-small-en-v1.5-f32.gguf",
|
||||
"filename": "bge-small-en-v1.5-f32.gguf",
|
||||
"size_mb": 130,
|
||||
"description": "Embedding model for vector search"
|
||||
},
|
||||
"large_llm": {
|
||||
"name": "GPT-OSS 20B",
|
||||
"url": "https://huggingface.co/unsloth/gpt-oss-20b-GGUF/resolve/main/gpt-oss-20b-F16.gguf",
|
||||
"filename": "gpt-oss-20b-F16.gguf",
|
||||
"size_mb": 40000,
|
||||
"description": "Large model for GPU with 16GB+ VRAM"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -6,6 +6,161 @@ use log::{info, trace, warn};
|
|||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Llama.cpp release version and download URLs
|
||||
const LLAMA_CPP_VERSION: &str = "b7345";
|
||||
|
||||
/// Get the appropriate llama.cpp download URL for the current platform
|
||||
fn get_llama_cpp_url() -> Option<String> {
|
||||
let base_url = format!(
|
||||
"https://github.com/ggml-org/llama.cpp/releases/download/{}",
|
||||
LLAMA_CPP_VERSION
|
||||
);
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
{
|
||||
// Check for CUDA support
|
||||
if std::path::Path::new("/usr/local/cuda").exists()
|
||||
|| std::path::Path::new("/opt/cuda").exists()
|
||||
|| std::env::var("CUDA_HOME").is_ok()
|
||||
{
|
||||
// Check CUDA version
|
||||
if let Ok(output) = std::process::Command::new("nvcc").arg("--version").output() {
|
||||
let version_str = String::from_utf8_lossy(&output.stdout);
|
||||
if version_str.contains("13.") {
|
||||
info!("Detected CUDA 13.x - using CUDA 13.1 build");
|
||||
return Some(format!(
|
||||
"{}/llama-{}-bin-linux-cuda-13.1-x64.zip",
|
||||
base_url, LLAMA_CPP_VERSION
|
||||
));
|
||||
} else if version_str.contains("12.") {
|
||||
info!("Detected CUDA 12.x - using CUDA 12.4 build");
|
||||
return Some(format!(
|
||||
"{}/llama-{}-bin-linux-cuda-12.4-x64.zip",
|
||||
base_url, LLAMA_CPP_VERSION
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for Vulkan support
|
||||
if std::path::Path::new("/usr/share/vulkan").exists()
|
||||
|| std::env::var("VULKAN_SDK").is_ok()
|
||||
{
|
||||
info!("Detected Vulkan - using Vulkan build");
|
||||
return Some(format!(
|
||||
"{}/llama-{}-bin-ubuntu-vulkan-x64.zip",
|
||||
base_url, LLAMA_CPP_VERSION
|
||||
));
|
||||
}
|
||||
|
||||
// Default to standard x64 build (CPU only)
|
||||
info!("Using standard Ubuntu x64 build (CPU)");
|
||||
return Some(format!(
|
||||
"{}/llama-{}-bin-ubuntu-x64.zip",
|
||||
base_url, LLAMA_CPP_VERSION
|
||||
));
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
{
|
||||
info!("Detected s390x architecture");
|
||||
return Some(format!(
|
||||
"{}/llama-{}-bin-ubuntu-s390x.zip",
|
||||
base_url, LLAMA_CPP_VERSION
|
||||
));
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
{
|
||||
info!("Detected ARM64 architecture on Linux");
|
||||
// No official ARM64 Linux build, would need to compile from source
|
||||
warn!("No pre-built llama.cpp for Linux ARM64 - LLM will not be available");
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
{
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
{
|
||||
info!("Detected macOS ARM64 (Apple Silicon)");
|
||||
return Some(format!(
|
||||
"{}/llama-{}-bin-macos-arm64.zip",
|
||||
base_url, LLAMA_CPP_VERSION
|
||||
));
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
{
|
||||
info!("Detected macOS x64 (Intel)");
|
||||
return Some(format!(
|
||||
"{}/llama-{}-bin-macos-x64.zip",
|
||||
base_url, LLAMA_CPP_VERSION
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
{
|
||||
// Check for CUDA on Windows
|
||||
if std::env::var("CUDA_PATH").is_ok() {
|
||||
if let Ok(output) = std::process::Command::new("nvcc").arg("--version").output() {
|
||||
let version_str = String::from_utf8_lossy(&output.stdout);
|
||||
if version_str.contains("13.") {
|
||||
info!("Detected CUDA 13.x on Windows");
|
||||
return Some(format!(
|
||||
"{}/llama-{}-bin-win-cuda-13.1-x64.zip",
|
||||
base_url, LLAMA_CPP_VERSION
|
||||
));
|
||||
} else if version_str.contains("12.") {
|
||||
info!("Detected CUDA 12.x on Windows");
|
||||
return Some(format!(
|
||||
"{}/llama-{}-bin-win-cuda-12.4-x64.zip",
|
||||
base_url, LLAMA_CPP_VERSION
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for Vulkan on Windows
|
||||
if std::env::var("VULKAN_SDK").is_ok() {
|
||||
info!("Detected Vulkan SDK on Windows");
|
||||
return Some(format!(
|
||||
"{}/llama-{}-bin-win-vulkan-x64.zip",
|
||||
base_url, LLAMA_CPP_VERSION
|
||||
));
|
||||
}
|
||||
|
||||
// Default Windows CPU build
|
||||
info!("Using standard Windows x64 CPU build");
|
||||
return Some(format!(
|
||||
"{}/llama-{}-bin-win-cpu-x64.zip",
|
||||
base_url, LLAMA_CPP_VERSION
|
||||
));
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
{
|
||||
info!("Detected Windows ARM64");
|
||||
return Some(format!(
|
||||
"{}/llama-{}-bin-win-cpu-arm64.zip",
|
||||
base_url, LLAMA_CPP_VERSION
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback for unknown platforms
|
||||
#[allow(unreachable_code)]
|
||||
{
|
||||
warn!("Unknown platform - no llama.cpp binary available");
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PackageManager {
|
||||
pub mode: InstallMode,
|
||||
|
|
@ -199,20 +354,29 @@ impl PackageManager {
|
|||
}
|
||||
|
||||
fn register_llm(&mut self) {
|
||||
// Use pre-built llama.cpp binaries
|
||||
// Detect platform and get appropriate llama.cpp URL
|
||||
let download_url = get_llama_cpp_url();
|
||||
|
||||
if download_url.is_none() {
|
||||
warn!("No llama.cpp binary available for this platform");
|
||||
warn!("Local LLM will not be available - use external API instead");
|
||||
}
|
||||
|
||||
info!(
|
||||
"LLM component using llama.cpp {} for this platform",
|
||||
LLAMA_CPP_VERSION
|
||||
);
|
||||
|
||||
self.components.insert(
|
||||
"llm".to_string(),
|
||||
ComponentConfig {
|
||||
name: "llm".to_string(),
|
||||
|
||||
ports: vec![8081, 8082],
|
||||
dependencies: vec![],
|
||||
linux_packages: vec![],
|
||||
macos_packages: vec![],
|
||||
windows_packages: vec![],
|
||||
download_url: Some(
|
||||
"https://github.com/ggml-org/llama.cpp/releases/download/b6148/llama-b6148-bin-ubuntu-x64.zip".to_string(),
|
||||
),
|
||||
download_url,
|
||||
binary_name: Some("llama-server".to_string()),
|
||||
pre_install_cmds_linux: vec![],
|
||||
post_install_cmds_linux: vec![],
|
||||
|
|
@ -226,8 +390,6 @@ impl PackageManager {
|
|||
"https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-1.5B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-1.5B-Q3_K_M.gguf".to_string(),
|
||||
// Embedding model for vector search
|
||||
"https://huggingface.co/CompendiumLabs/bge-small-en-v1.5-gguf/resolve/main/bge-small-en-v1.5-f32.gguf".to_string(),
|
||||
// GPT-OSS 20B F16 - Recommended for small GPU (16GB VRAM), no CPU
|
||||
// Uncomment to download: "https://huggingface.co/unsloth/gpt-oss-20b-GGUF/resolve/main/gpt-oss-20b-F16.gguf".to_string(),
|
||||
],
|
||||
exec_cmd: "nohup {{BIN_PATH}}/llama-server --port 8081 --ssl-key-file {{CONF_PATH}}/system/certificates/llm/server.key --ssl-cert-file {{CONF_PATH}}/system/certificates/llm/server.crt -m {{DATA_PATH}}/DeepSeek-R1-Distill-Qwen-1.5B-Q3_K_M.gguf > {{LOGS_PATH}}/llm.log 2>&1 & nohup {{BIN_PATH}}/llama-server --port 8082 --ssl-key-file {{CONF_PATH}}/system/certificates/embedding/server.key --ssl-cert-file {{CONF_PATH}}/system/certificates/embedding/server.crt -m {{DATA_PATH}}/bge-small-en-v1.5-f32.gguf --embedding > {{LOGS_PATH}}/embedding.log 2>&1 &".to_string(),
|
||||
check_cmd: "curl -f -k https://localhost:8081/health >/dev/null 2>&1 && curl -f -k https://localhost:8082/health >/dev/null 2>&1".to_string(),
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue