mirror of
https://github.com/tcsenpai/ollama.git
synced 2025-06-08 12:15:22 +00:00
114 lines
4.1 KiB
Diff
114 lines
4.1 KiB
Diff
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
|
|
index 54aa822c..67a02c4c 100644
|
|
--- a/examples/llava/clip.cpp
|
|
+++ b/examples/llava/clip.cpp
|
|
@@ -764,11 +764,12 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
|
if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
|
|
embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
|
|
embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
|
|
-
|
|
- embeddings = ggml_gelu(ctx0, embeddings);
|
|
- embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
|
|
- embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
|
|
-
|
|
+ if (model.mm_2_w)
|
|
+ {
|
|
+ embeddings = ggml_gelu(ctx0, embeddings);
|
|
+ embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
|
|
+ embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
|
|
+ }
|
|
} else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
|
|
embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
|
|
embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
|
|
@@ -2542,6 +2543,10 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
|
|
return ctx->vision_model.mm_model_peg_0_b->ne[0];
|
|
}
|
|
if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
|
|
+ if (ctx->vision_model.mm_2_b == nullptr)
|
|
+ {
|
|
+ return ctx->vision_model.mm_0_b->ne[0];
|
|
+ }
|
|
return ctx->vision_model.mm_2_b->ne[0];
|
|
}
|
|
if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
|
|
diff --git a/include/llama.h b/include/llama.h
|
|
index ce07f4fa..07b09c9a 100644
|
|
--- a/include/llama.h
|
|
+++ b/include/llama.h
|
|
@@ -444,6 +444,12 @@ extern "C" {
|
|
// Frees all allocated memory
|
|
LLAMA_API void llama_free(struct llama_context * ctx);
|
|
|
|
+ // Sets image embeddings
|
|
+ LLAMA_API void set_image_embeds(struct llama_context *ctx, float *data);
|
|
+
|
|
+ // Get architecture
|
|
+ LLAMA_API int llama_get_architecture(struct llama_model *model);
|
|
+
|
|
LLAMA_API int64_t llama_time_us(void);
|
|
|
|
LLAMA_API size_t llama_max_devices(void);
|
|
diff --git a/src/llama.cpp b/src/llama.cpp
|
|
index 7f2f0003..754d3d5f 100644
|
|
--- a/src/llama.cpp
|
|
+++ b/src/llama.cpp
|
|
@@ -2719,6 +2719,8 @@ struct llama_context {
|
|
|
|
bool logits_all = false;
|
|
|
|
+ float *image_embeds = nullptr;
|
|
+
|
|
// embeddings output (2-dimensional array: [n_outputs][n_embd])
|
|
// populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
|
|
size_t embd_size = 0; // capacity (of floats) for embeddings
|
|
@@ -11660,6 +11662,15 @@ struct llm_build_context {
|
|
|
|
inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
|
|
|
|
+ if (lctx.image_embeds)
|
|
+ {
|
|
+ struct ggml_tensor *image_embeds = ggml_dup_tensor(ctx0, inpL);
|
|
+ image_embeds->data = lctx.image_embeds;
|
|
+ image_embeds->ne[1] = 256;
|
|
+ inpL = ggml_set_2d_inplace(ctx0, inpL, image_embeds, inpL->nb[1], 0);
|
|
+ lctx.image_embeds = NULL;
|
|
+ }
|
|
+
|
|
inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
|
|
cb(inpL, "inp_scaled", -1);
|
|
|
|
@@ -14565,6 +14576,7 @@ static int llama_decode_internal(
|
|
|
|
const int64_t n_embd = hparams.n_embd;
|
|
const int64_t n_vocab = hparams.n_vocab;
|
|
+ const bool has_image_embeds = lctx.image_embeds;
|
|
|
|
uint32_t n_outputs = 0;
|
|
uint32_t n_outputs_prev = 0;
|
|
@@ -14678,7 +14690,7 @@ static int llama_decode_internal(
|
|
}
|
|
|
|
// non-causal masks do not use the KV cache
|
|
- if (hparams.causal_attn) {
|
|
+ if (hparams.causal_attn || lctx.image_embeds) {
|
|
llama_kv_cache_update(&lctx);
|
|
|
|
// if we have enough unused cells before the current head ->
|
|
@@ -16589,6 +16601,16 @@ void llama_free_model(struct llama_model * model) {
|
|
delete model;
|
|
}
|
|
|
|
+void set_image_embeds(llama_context *ctx, float *data)
|
|
+{
|
|
+ ctx->image_embeds = data;
|
|
+}
|
|
+
|
|
+int llama_get_architecture(llama_model *model)
|
|
+{
|
|
+ return model->arch;
|
|
+}
|
|
+
|
|
struct llama_context * llama_new_context_with_model(
|
|
struct llama_model * model,
|
|
struct llama_context_params params) {
|