This commit is contained in:
jmorganca 2024-06-11 11:12:00 -07:00
parent 763d7b601c
commit ce00e387c3
7 changed files with 6643 additions and 20 deletions

3
llama/.gitignore vendored
View File

@ -6,4 +6,5 @@
*.so
*.o
*.obj
ollama_runner*
ollama_runner*
ggml-metal.metal

View File

@ -1,6 +1,10 @@
# `llama`
<<<<<<< Updated upstream
This package integrates llama.cpp as a Go package that's easy to build with tags for different CPU and GPU processors.
=======
This package integrates the [llama.cpp](https://github.com/ggerganov/llama.cpp) library as a Go package and makes it easy to build it with tags for different CPU and GPU processors.
>>>>>>> Stashed changes
Supported:
@ -12,6 +16,15 @@ Supported:
- [x] Linux CUDA
- [x] Linux ROCm
- [x] Llava
<<<<<<< Updated upstream
=======
- [ ] Parallel Requests
Extra build steps are required for CUDA and ROCm on Windows since `nvcc` and `hipcc` both require using msvc as the host compiler. For these small dlls are created:
- `ggml-cuda.dll`
- `ggml-hipblas.dll`
>>>>>>> Stashed changes
> Note: it's important that memory is allocated and freed by the same compiler (e.g. entirely by code compiled with msvc or mingw). Issues from this should be rare, but there are some places where pointers are returned by the CUDA or HIP runtimes and freed elsewhere, causing a a crash. In a future change the same runtime should be used in both cases to avoid crashes.
@ -41,7 +54,15 @@ go build -tags=avx,avx2 .
### CUDA
<<<<<<< Updated upstream
Install the [CUDA toolkit v11.3.1](https://developer.nvidia.com/cuda-11-3-1-download-archive):
=======
Install the [CUDA toolkit v11.3.1](https://developer.nvidia.com/cuda-11-3-1-download-archive) then build `libggml-cuda.so`:
```shell
./build_cuda.sh
```
>>>>>>> Stashed changes
Then build the package with the `cuda` tag:
@ -51,8 +72,11 @@ go build -tags=cuda .
## Windows
<<<<<<< Updated upstream
Download [w64devkit](https://github.com/skeeto/w64devkit/releases/latest) for a simple MinGW development environment.
=======
>>>>>>> Stashed changes
### CUDA
Install the [CUDA toolkit v11.3.1](https://developer.nvidia.com/cuda-11-3-1-download-archive) then build the cuda code:
@ -60,7 +84,11 @@ Install the [CUDA toolkit v11.3.1](https://developer.nvidia.com/cuda-11-3-1-down
Build `ggml-cuda.dll`:
```shell
<<<<<<< Updated upstream
make ggml_cuda.dll
=======
./build_cuda.ps1
>>>>>>> Stashed changes
```
Then build the package with the `cuda` tag:
@ -71,10 +99,19 @@ go build -tags=cuda .
### ROCm
<<<<<<< Updated upstream
Install [ROCm 5.7.1](https://rocm.docs.amd.com/en/docs-5.7.1/).
```shell
make ggml_hipblas.dll
=======
Install [ROCm 5.7.1](https://rocm.docs.amd.com/en/docs-5.7.1/) and [Strawberry Perl](https://strawberryperl.com/).
Then, build `ggml-hipblas.dll`:
```shell
./build_hipblas.sh
>>>>>>> Stashed changes
```
Then build the package with the `rocm` tag:
@ -85,8 +122,16 @@ go build -tags=rocm .
## Syncing with llama.cpp
<<<<<<< Updated upstream
To update this package to the latest llama.cpp code, use the `sync_llama.sh` script from the root of this repo:
```
./sync_llama.sh ../../llama.cpp
=======
To update this package to the latest llama.cpp code, use the `scripts/sync_llama.sh` script from the root of this repo, providing the location of a llama.cpp checkout:
```
cd ollama
./scripts/sync_llama.sh ../llama.cpp
>>>>>>> Stashed changes
```

View File

@ -377,8 +377,8 @@ static struct ggml_metal_context * ggml_metal_init(int n_cb) {
#if GGML_METAL_EMBED_LIBRARY
GGML_METAL_LOG_INFO("%s: using embedded metal library\n", __func__);
extern const char ggml_metallib_start[];
extern const char ggml_metallib_end[];
extern const char* ggml_metallib_start;
extern const char* ggml_metallib_end;
NSString * src = [[NSString alloc] initWithBytes:ggml_metallib_start length:(ggml_metallib_end-ggml_metallib_start) encoding:NSUTF8StringEncoding];
#else

6566
llama/ggml-metal.in.metal Normal file

File diff suppressed because it is too large Load Diff

View File

@ -4,10 +4,10 @@ package llama
// #cgo CXXFLAGS: -std=c++11 -DNDEBUG -DLOG_DISABLE_LOGS
// #cgo darwin,arm64 CFLAGS: -DGGML_USE_METAL -DGGML_USE_ACCELERATE -DGGML_METAL_EMBED_LIBRARY -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64
// #cgo darwin,arm64 CXXFLAGS: -DGGML_USE_METAL -DGGML_USE_ACCELERATE -DGGML_METAL_EMBED_LIBRARY -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64
// #cgo darwin,arm64 LDFLAGS: ${SRCDIR}/ggml-metal.o -framework Foundation -framework Metal -framework MetalKit -framework Accelerate
// #cgo darwin,arm64 LDFLAGS: -framework Foundation -framework Metal -framework MetalKit -framework Accelerate
// #cgo darwin,amd64 CFLAGS: -Wno-incompatible-pointer-types-discards-qualifiers
// #cgo darwin,amd64 CXXFLAGS: -Wno-incompatible-pointer-types-discards-qualifiers
// #cgo darwin,amd64 LDFLAGS: ${SRCDIR}/ggml-metal.o -framework Foundation
// #cgo darwin,amd64 LDFLAGS: -framework Foundation
// #cgo darwin,amd64,avx2 CFLAGS: -DGGML_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64
// #cgo darwin,amd64,avx2 CXXFLAGS: -DGGML_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64
// #cgo darwin,amd64,avx2 LDFLAGS: -framework Accelerate
@ -35,8 +35,11 @@ package llama
// #include "sampling_ext.h"
//
// bool llamaProgressCallback(float progress, void *user_data);
// const char* ggml_metallib_start;
// const char* ggml_metallib_end;
import "C"
import (
_ "embed"
"errors"
"fmt"
"runtime"
@ -45,6 +48,21 @@ import (
"unsafe"
)
//go:embed ggml-common.h
var ggmlCommon string
//go:embed ggml-metal.metal
var ggmlMetal string
// TODO: write me somewhere else
func init() {
metal := strings.ReplaceAll(ggmlMetal, `#include "ggml-common.h"`, ggmlCommon)
fmt.Println(metal)
cMetal := C.CString(metal)
C.ggml_metallib_start = cMetal
C.ggml_metallib_end = (*C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(cMetal)) + uintptr(len(metal))))
}
func BackendInit() {
C.llama_backend_init()
}
@ -81,6 +99,12 @@ func llamaProgressCallback(progress C.float, userData unsafe.Pointer) C.bool {
}
func NewModelParams(numGpuLayers int, mainGpu int, callback func(float32)) ModelParams {
fmt.Println("Contents of ggml-common.h:")
fmt.Println(ggmlCommon)
fmt.Println("\nContents of ggml-metal.in.metal:")
fmt.Println(ggmlMetal)
params := C.llama_model_default_params()
params.n_gpu_layers = C.int(numGpuLayers)
params.main_gpu = C.int32_t(mainGpu)

1
llama/llama_test.go Normal file
View File

@ -0,0 +1 @@
package llama

View File

@ -28,7 +28,7 @@ cp $src_dir/ggml.c $dst_dir/ggml.c
cp $src_dir/ggml.h $dst_dir/ggml.h
cp $src_dir/ggml-quants.c $dst_dir/ggml-quants.c
cp $src_dir/ggml-quants.h $dst_dir/ggml-quants.h
cp $src_dir/ggml-metal.metal $dst_dir/ggml-metal.metal
cp $src_dir/ggml-metal.metal $dst_dir/ggml-metal.in.metal
cp $src_dir/ggml-metal.h $dst_dir/ggml-metal.h
cp $src_dir/ggml-metal.m $dst_dir/ggml-metal-darwin_arm64.m
cp $src_dir/ggml-impl.h $dst_dir/ggml-impl.h
@ -105,17 +105,3 @@ for IN in $dst_dir/*.{c,h,cpp,m,metal,cu}; do
cat $TEMP_LICENSE $IN >$TMP
mv $TMP $IN
done
# ggml-metal
# TODO: just embed the files
sed -e '/#include "ggml-common.h"/r ggml-common.h' -e '/#include "ggml-common.h"/d' < $dst_dir/ggml-metal.metal > temp.metal
TEMP_ASSEMBLY=$(mktemp)
echo ".section __DATA, __ggml_metallib" > $TEMP_ASSEMBLY
echo ".globl _ggml_metallib_start" >> $TEMP_ASSEMBLY
echo "_ggml_metallib_start:" >> $TEMP_ASSEMBLY
echo ".incbin \"temp.metal\"" >> $TEMP_ASSEMBLY
echo ".globl _ggml_metallib_end" >> $TEMP_ASSEMBLY
echo "_ggml_metallib_end:" >> $TEMP_ASSEMBLY
as $TEMP_ASSEMBLY -o $dst_dir/ggml-metal.o
rm -f $TEMP_ASSEMBLY
rm -rf temp.metal