diff --git a/llm/ext_server/server.cpp b/llm/ext_server/server.cpp index 5fd8be80..68d17e28 100644 --- a/llm/ext_server/server.cpp +++ b/llm/ext_server/server.cpp @@ -1272,7 +1272,7 @@ struct llama_server_context } } - bool process_images_paligemma(server_slot &slot, int n_batch) + /* bool process_images_paligemma(server_slot &slot, int n_batch) { // set_off_embeds(ctx); int n_past = 0; @@ -1288,8 +1288,8 @@ struct llama_server_context if (ctx) { - set_image_embeds(ctx, data); - // print_image_embeds(ctx); + // set_image_embeds(ctx, data); + // print_embeds(ctx); } else { @@ -1366,8 +1366,8 @@ struct llama_server_context } printf("done processing images paligemma\n"); // llama_batch_clear(batch); - return true; - } + return true; + } */ bool prepare_pali(server_slot &slot, int n_batch) { @@ -1382,21 +1382,12 @@ struct llama_server_context { data[i] = data[i] / sqrt(2048); } - - if (ctx) - { - set_image_embeds(ctx, data); - // print_image_embeds(ctx); - } - else - { - printf("ctx is null"); - } + set_image_embeds(ctx, data); // generate user_prompt -> this should contain image tokens prepended and a new line appended: // batch.n_tokens += (int)slot.images.size() * llama_n_embd(model); std::vector tokens; - std::string prompt = "caption es"; + std::string prompt = "How much ketchup is in this image?"; std::vector text = ::llama_tokenize(ctx, prompt, false, true); for (int i = 0; i < (int)slot.images.size() * 256; i++) diff --git a/llm/generate/gen_darwin.sh b/llm/generate/gen_darwin.sh index 6c0b62cb..bb2cc934 100755 --- a/llm/generate/gen_darwin.sh +++ b/llm/generate/gen_darwin.sh @@ -9,8 +9,8 @@ set -o pipefail echo "Starting darwin generate script" source $(dirname $0)/gen_common.sh init_vars -git_module_setup -apply_patches +#git_module_setup +#apply_patches sign() { if [ -n "$APPLE_IDENTITY" ]; then @@ -97,5 +97,5 @@ case "${GOARCH}" in ;; esac -cleanup +#cleanup echo "go generate completed. LLM runners: $(cd ${BUILD_DIR}/..; echo *)" diff --git a/llm/patches/12-paligemma.diff b/llm/patches/12-paligemma.diff index 8c6d70f5..9061f88b 100644 --- a/llm/patches/12-paligemma.diff +++ b/llm/patches/12-paligemma.diff @@ -31,7 +31,7 @@ index 54aa822c..45d03982 100644 if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) { return ctx->vision_model.mm_3_b->ne[0]; diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp -index 8c7dd2ae..3fe4759c 100644 +index 8c7dd2ae..38eeb305 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -18,7 +18,10 @@ static bool eval_tokens(struct llama_context * ctx_llama, std::vectorctx_llama, image_embed, params->n_batch, &n_past); - eval_string(ctx_llava->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, false); + // build user prompt with 256 image tokens -+ user_prompt = "caption es"; ++ user_prompt = "What is in this image?"; + std::string image_token_prefix = ""; + for (int i = 0; i < 256; i++) { + image_token_prefix += ""; @@ -100,23 +100,25 @@ index 8c7dd2ae..3fe4759c 100644 process_prompt(ctx_llava, image_embed, ¶ms, params.prompt); diff --git a/include/llama.h b/include/llama.h -index ce07f4fa..09cfe207 100644 +index ce07f4fa..c3465d68 100644 --- a/include/llama.h +++ b/include/llama.h -@@ -444,6 +444,11 @@ extern "C" { +@@ -444,6 +444,13 @@ extern "C" { // Frees all allocated memory LLAMA_API void llama_free(struct llama_context * ctx); + // save image embeddings + LLAMA_API void set_image_embeds(struct llama_context *ctx, float *data); + ++ LLAMA_API void print_embeds(struct llama_context *ctx); ++ + LLAMA_API void print_causal(struct llama_context *ctx); + LLAMA_API int64_t llama_time_us(void); LLAMA_API size_t llama_max_devices(void); diff --git a/src/llama.cpp b/src/llama.cpp -index 7f2f0003..74498632 100644 +index 7f2f0003..d5926202 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -2677,6 +2677,7 @@ struct llama_context { @@ -127,7 +129,7 @@ index 7f2f0003..74498632 100644 struct llama_cparams cparams; struct llama_sampling sampling; struct llama_kv_cache kv_self; -@@ -2760,6 +2761,22 @@ struct llama_context { +@@ -2760,6 +2761,33 @@ struct llama_context { struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch] }; @@ -135,6 +137,17 @@ index 7f2f0003..74498632 100644 + ctx->image_embeds = data; +} + ++void print_embeds(struct llama_context *ctx) ++{ ++ if (ctx->image_embeds) ++ { ++ for (int i = 0; i < 256; i++) ++ { ++ LLAMA_LOG_INFO("%f ", ctx->image_embeds[i]); ++ } ++ } ++} ++ +void print_causal(llama_context *ctx) +{ + if (ctx->cparams.causal_attn) @@ -150,7 +163,7 @@ index 7f2f0003..74498632 100644 struct llama_lora_weight { struct ggml_tensor * a = nullptr; struct ggml_tensor * b = nullptr; -@@ -3021,6 +3038,96 @@ static bool llama_kv_cache_init( +@@ -3021,6 +3049,96 @@ static bool llama_kv_cache_init( return true; } @@ -247,7 +260,7 @@ index 7f2f0003..74498632 100644 // find an empty slot of size "n_tokens" in the cache // updates the cache head // Note: On success, it's important that cache.head points -@@ -11660,6 +11767,17 @@ struct llm_build_context { +@@ -11660,6 +11778,18 @@ struct llm_build_context { inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb); @@ -256,7 +269,8 @@ index 7f2f0003..74498632 100644 + struct ggml_tensor *image_embeds = ggml_dup_tensor(ctx0, inpL); + image_embeds->data = lctx.image_embeds; + image_embeds->ne[1] = 256; -+ llama_log_tensor(image_embeds, "/Users/joshyan/ollama/tensordata"); ++ print_embeds(&lctx); ++ // llama_log_tensor(image_embeds, "/Users/joshyan/ollama/tensordata"); + + inpL = ggml_set_2d_inplace(ctx0, inpL, image_embeds, inpL->nb[1], 0); + lctx.image_embeds = NULL; @@ -265,7 +279,7 @@ index 7f2f0003..74498632 100644 inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd)); cb(inpL, "inp_scaled", -1); -@@ -14678,7 +14796,7 @@ static int llama_decode_internal( +@@ -14678,7 +14808,7 @@ static int llama_decode_internal( } // non-causal masks do not use the KV cache @@ -274,7 +288,7 @@ index 7f2f0003..74498632 100644 llama_kv_cache_update(&lctx); // if we have enough unused cells before the current head -> -@@ -18565,6 +18683,12 @@ float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) { +@@ -18565,6 +18695,12 @@ float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) { if (ctx->logits == nullptr) { throw std::runtime_error("no logits"); } @@ -287,7 +301,7 @@ index 7f2f0003..74498632 100644 if (i < 0) { j = ctx->n_outputs + i; -@@ -18577,6 +18701,7 @@ float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) { +@@ -18577,6 +18713,7 @@ float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) { j = ctx->output_ids[i]; } diff --git a/llm/server.go b/llm/server.go index d2b8db9b..91dbd06a 100644 --- a/llm/server.go +++ b/llm/server.go @@ -179,7 +179,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr } } } - + opts.NumGPU = 0 if len(servers) == 0 { return nil, fmt.Errorf("no servers found for %v", gpus) } @@ -733,7 +733,7 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu "n_predict": req.Options.NumPredict, "n_keep": req.Options.NumKeep, "main_gpu": req.Options.MainGPU, - "temperature": req.Options.Temperature, + "temperature": 0, "top_k": req.Options.TopK, "top_p": req.Options.TopP, "min_p": req.Options.MinP,