mirror of
https://github.com/tcsenpai/ollama.git
synced 2025-06-06 03:05:22 +00:00
Only enable numa on CPUs (#6484)
The numa flag may be having a performance impact on multi-socket systems with GPU loads
This commit is contained in:
parent
69be940bf6
commit
0f92b19bec
@ -258,7 +258,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||
params = append(params, "--mlock")
|
||||
}
|
||||
|
||||
if gpu.IsNUMA() {
|
||||
if gpu.IsNUMA() && gpus[0].Library == "cpu" {
|
||||
numaMode := "distribute"
|
||||
if runtime.GOOS == "linux" {
|
||||
if _, err := exec.LookPath("numactl"); err == nil {
|
||||
|
Loading…
x
Reference in New Issue
Block a user