fix prompt for non-mllama multimodal

This commit is contained in:
Patrick Devine 2024-09-26 01:31:53 -07:00
parent c48e2cfc0d
commit 96a8b2f7d8

View File

@ -84,8 +84,7 @@ func chatPrompt(ctx context.Context, m *Model, tokenize tokenizeFunc, opts *api.
msgs[lastMsgIdx].Content = strings.TrimSpace("<|image|>" + msgs[lastMsgIdx].Content) msgs[lastMsgIdx].Content = strings.TrimSpace("<|image|>" + msgs[lastMsgIdx].Content)
images = append(images, imgData) images = append(images, imgData)
} }
} } else {
for cnt, msg := range msgs[currMsgIdx:] { for cnt, msg := range msgs[currMsgIdx:] {
for _, i := range msg.Images { for _, i := range msg.Images {
imgData := llm.ImageData{ imgData := llm.ImageData{
@ -105,6 +104,7 @@ func chatPrompt(ctx context.Context, m *Model, tokenize tokenizeFunc, opts *api.
images = append(images, imgData) images = append(images, imgData)
} }
} }
}
// truncate any messages that do not fit into the context window // truncate any messages that do not fit into the context window
var b bytes.Buffer var b bytes.Buffer