mirror of
https://github.com/tcsenpai/ollama.git
synced 2025-06-07 11:45:21 +00:00
cleanup
This commit is contained in:
parent
cd776e49ad
commit
48de4b56c8
@ -220,10 +220,8 @@ func (s *Server) waiting() bool {
|
|||||||
|
|
||||||
// processImage processes an image embedding if it's next in any sequence
|
// processImage processes an image embedding if it's next in any sequence
|
||||||
func (s *Server) processImage() bool {
|
func (s *Server) processImage() bool {
|
||||||
for i, seq := range s.seqs {
|
for _, seq := range s.seqs {
|
||||||
fmt.Println("seq", i, "inputs", len(seq.inputs))
|
|
||||||
if len(seq.inputs) > 0 && seq.inputs[0].embd != nil {
|
if len(seq.inputs) > 0 && seq.inputs[0].embd != nil {
|
||||||
slog.Info("processing image", "seq", i, "nPast", seq.nPast)
|
|
||||||
llama.LlavaEvalImageEmbed(s.lc, seq.inputs[0].embd, s.batchSize, &seq.nPast)
|
llama.LlavaEvalImageEmbed(s.lc, seq.inputs[0].embd, s.batchSize, &seq.nPast)
|
||||||
llama.LlavaImageEmbedFree(seq.inputs[0].embd)
|
llama.LlavaImageEmbedFree(seq.inputs[0].embd)
|
||||||
seq.iBatch = seq.inputs[0].embd.Tokens() - 1
|
seq.iBatch = seq.inputs[0].embd.Tokens() - 1
|
||||||
@ -244,7 +242,6 @@ func (s *Server) run(ctx context.Context) {
|
|||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
slog.Info("Processing batch", "seqs", len(s.seqs))
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
for s.waiting() {
|
for s.waiting() {
|
||||||
s.cond.Wait()
|
s.cond.Wait()
|
||||||
@ -290,7 +287,6 @@ func (s *Server) run(ctx context.Context) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
slog.Info("adding token to batch", "token", t.token, "seq", i)
|
|
||||||
batch.Add(t.token, seq.nPast, []int{i}, !seq.isPromptProcessing())
|
batch.Add(t.token, seq.nPast, []int{i}, !seq.isPromptProcessing())
|
||||||
seq.nPast++
|
seq.nPast++
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user