mirror of
https://github.com/tcsenpai/ollama.git
synced 2025-06-10 04:57:07 +00:00
remove printing
This commit is contained in:
parent
9b8b7cd9b5
commit
b3c62dcafd
@ -100,12 +100,6 @@ func llamaProgressCallback(progress C.float, userData unsafe.Pointer) C.bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewModelParams(numGpuLayers int, mainGpu int, callback func(float32)) ModelParams {
|
func NewModelParams(numGpuLayers int, mainGpu int, callback func(float32)) ModelParams {
|
||||||
fmt.Println("Contents of ggml-common.h:")
|
|
||||||
fmt.Println(ggmlCommon)
|
|
||||||
|
|
||||||
fmt.Println("\nContents of ggml-metal.in.metal:")
|
|
||||||
fmt.Println(ggmlMetal)
|
|
||||||
|
|
||||||
params := C.llama_model_default_params()
|
params := C.llama_model_default_params()
|
||||||
params.n_gpu_layers = C.int(numGpuLayers)
|
params.n_gpu_layers = C.int(numGpuLayers)
|
||||||
params.main_gpu = C.int32_t(mainGpu)
|
params.main_gpu = C.int32_t(mainGpu)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user