github.com/weaviate/weaviate@v1.24.6/adapters/repos/db/index_queue.go (about)

     1  //                           _       _
     2  // __      _____  __ ___   ___  __ _| |_ ___
     3  // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
     4  //  \ V  V /  __/ (_| |\ V /| | (_| | ||  __/
     5  //   \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
     6  //
     7  //  Copyright © 2016 - 2024 Weaviate B.V. All rights reserved.
     8  //
     9  //  CONTACT: hello@weaviate.io
    10  //
    11  
    12  package db
    13  
    14  import (
    15  	"container/list"
    16  	"context"
    17  	"encoding/binary"
    18  	"math"
    19  	"runtime"
    20  	"sync"
    21  	"sync/atomic"
    22  	"time"
    23  
    24  	enterrors "github.com/weaviate/weaviate/entities/errors"
    25  
    26  	"github.com/pkg/errors"
    27  	"github.com/sirupsen/logrus"
    28  	"github.com/weaviate/weaviate/adapters/repos/db/helpers"
    29  	"github.com/weaviate/weaviate/adapters/repos/db/indexcheckpoint"
    30  	"github.com/weaviate/weaviate/adapters/repos/db/priorityqueue"
    31  	"github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer"
    32  	"github.com/weaviate/weaviate/entities/storagestate"
    33  	"github.com/weaviate/weaviate/entities/storobj"
    34  )
    35  
    36  // IndexQueue is an in-memory queue of vectors to index.
    37  // It batches vectors together before sending them to the indexing workers.
    38  // It is safe to use concurrently.
    39  type IndexQueue struct {
    40  	Shard        shardStatusUpdater
    41  	Index        batchIndexer
    42  	shardID      string
    43  	targetVector string
    44  
    45  	IndexQueueOptions
    46  
    47  	// indexCh is the channel used to send vectors to the shared indexing workers.
    48  	indexCh chan job
    49  
    50  	// context used to close pending tasks
    51  	// if canceled, prevents new vectors from being added to the queue.
    52  	ctx      context.Context
    53  	cancelFn context.CancelFunc
    54  
    55  	// tracks the background workers
    56  	wg sync.WaitGroup
    57  
    58  	// queue of not-yet-indexed vectors
    59  	queue *vectorQueue
    60  
    61  	// keeps track of the last call to Push()
    62  	lastPushed atomic.Pointer[time.Time]
    63  
    64  	pqMaxPool *pqMaxPool
    65  
    66  	checkpoints *indexcheckpoint.Checkpoints
    67  
    68  	paused atomic.Bool
    69  }
    70  
    71  type vectorDescriptor struct {
    72  	id     uint64
    73  	vector []float32
    74  }
    75  
    76  type IndexQueueOptions struct {
    77  	// BatchSize is the number of vectors to batch together
    78  	// before sending them to the indexing worker.
    79  	BatchSize int
    80  
    81  	// IndexInterval is the maximum time to wait before sending
    82  	// the pending vectors to the indexing worker.
    83  	IndexInterval time.Duration
    84  
    85  	// Max time a vector can stay in the queue before being indexed.
    86  	StaleTimeout time.Duration
    87  
    88  	// Logger is the logger used by the queue.
    89  	Logger logrus.FieldLogger
    90  
    91  	// Maximum number of vectors to use for brute force search
    92  	// when vectors are not indexed.
    93  	BruteForceSearchLimit int
    94  }
    95  
    96  type batchIndexer interface {
    97  	AddBatch(ctx context.Context, id []uint64, vector [][]float32) error
    98  	SearchByVector(vector []float32, k int, allowList helpers.AllowList) ([]uint64, []float32, error)
    99  	SearchByVectorDistance(vector []float32, dist float32,
   100  		maxLimit int64, allow helpers.AllowList) ([]uint64, []float32, error)
   101  	DistanceBetweenVectors(x, y []float32) (float32, bool, error)
   102  	ContainsNode(id uint64) bool
   103  	Delete(id ...uint64) error
   104  	DistancerProvider() distancer.Provider
   105  }
   106  
   107  type compressedIndexer interface {
   108  	Compressed() bool
   109  	AlreadyIndexed() uint64
   110  	TurnOnCompression(callback func()) error
   111  	ShouldCompress() (bool, int)
   112  }
   113  
   114  type shardStatusUpdater interface {
   115  	compareAndSwapStatus(old, new string) (storagestate.Status, error)
   116  }
   117  
   118  func NewIndexQueue(
   119  	shardID string,
   120  	targetVector string,
   121  	shard shardStatusUpdater,
   122  	index batchIndexer,
   123  	centralJobQueue chan job,
   124  	checkpoints *indexcheckpoint.Checkpoints,
   125  	opts IndexQueueOptions,
   126  ) (*IndexQueue, error) {
   127  	if opts.Logger == nil {
   128  		opts.Logger = logrus.New()
   129  	}
   130  	opts.Logger = opts.Logger.WithField("component", "index_queue")
   131  
   132  	if opts.BatchSize == 0 {
   133  		opts.BatchSize = 1000
   134  	}
   135  
   136  	if opts.IndexInterval == 0 {
   137  		opts.IndexInterval = 1 * time.Second
   138  	}
   139  
   140  	if opts.BruteForceSearchLimit == 0 {
   141  		opts.BruteForceSearchLimit = 100_000
   142  	}
   143  
   144  	if opts.StaleTimeout == 0 {
   145  		opts.StaleTimeout = 1 * time.Minute
   146  	}
   147  
   148  	q := IndexQueue{
   149  		shardID:           shardID,
   150  		targetVector:      targetVector,
   151  		IndexQueueOptions: opts,
   152  		Shard:             shard,
   153  		Index:             index,
   154  		indexCh:           centralJobQueue,
   155  		pqMaxPool:         newPqMaxPool(0),
   156  		checkpoints:       checkpoints,
   157  	}
   158  
   159  	q.queue = newVectorQueue(&q)
   160  
   161  	q.ctx, q.cancelFn = context.WithCancel(context.Background())
   162  
   163  	if !asyncEnabled() {
   164  		return &q, nil
   165  	}
   166  
   167  	q.wg.Add(1)
   168  	f := func() {
   169  		defer q.wg.Done()
   170  
   171  		q.indexer()
   172  	}
   173  	enterrors.GoWrapper(f, q.Logger)
   174  
   175  	return &q, nil
   176  }
   177  
   178  // Close immediately closes the queue and waits for workers to finish their current tasks.
   179  // Any pending vectors are discarded.
   180  func (q *IndexQueue) Close() error {
   181  	// check if the queue is closed
   182  	if q.ctx.Err() != nil {
   183  		return nil
   184  	}
   185  
   186  	// prevent new jobs from being added
   187  	q.cancelFn()
   188  
   189  	q.wg.Wait()
   190  
   191  	// loop over the chunks of the queue
   192  	// wait for the done chan to be closed
   193  	// then return
   194  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   195  	defer cancel()
   196  	q.queue.wait(ctx)
   197  
   198  	return nil
   199  }
   200  
   201  // Push adds a list of vectors to the queue.
   202  func (q *IndexQueue) Push(ctx context.Context, vectors ...vectorDescriptor) error {
   203  	if ctx.Err() != nil {
   204  		return ctx.Err()
   205  	}
   206  	if q.ctx.Err() != nil {
   207  		return errors.New("index queue closed")
   208  	}
   209  
   210  	// store the time of the last push
   211  	now := time.Now()
   212  	q.lastPushed.Store(&now)
   213  
   214  	q.queue.Add(vectors)
   215  	return nil
   216  }
   217  
   218  // Size returns the number of vectors waiting to be indexed.
   219  func (q *IndexQueue) Size() int64 {
   220  	var count int64
   221  	q.queue.fullChunks.Lock()
   222  	e := q.queue.fullChunks.list.Front()
   223  	for e != nil {
   224  		c := e.Value.(*chunk)
   225  		count += int64(c.cursor)
   226  
   227  		e = e.Next()
   228  	}
   229  	q.queue.fullChunks.Unlock()
   230  
   231  	q.queue.curBatch.Lock()
   232  	if q.queue.curBatch.c != nil {
   233  		count += int64(q.queue.curBatch.c.cursor)
   234  	}
   235  	q.queue.curBatch.Unlock()
   236  
   237  	return count
   238  }
   239  
   240  // Delete marks the given vectors as deleted.
   241  // This method can be called even if the async indexing is disabled.
   242  func (q *IndexQueue) Delete(ids ...uint64) error {
   243  	if !asyncEnabled() {
   244  		return q.Index.Delete(ids...)
   245  	}
   246  
   247  	remaining := make([]uint64, 0, len(ids))
   248  	indexed := make([]uint64, 0, len(ids))
   249  
   250  	for _, id := range ids {
   251  		if q.Index.ContainsNode(id) {
   252  			indexed = append(indexed, id)
   253  
   254  			// is it already marked as deleted in the queue?
   255  			if q.queue.IsDeleted(id) {
   256  				q.queue.ResetDeleted(id)
   257  			}
   258  
   259  			continue
   260  		}
   261  
   262  		remaining = append(remaining, id)
   263  	}
   264  
   265  	err := q.Index.Delete(indexed...)
   266  	if err != nil {
   267  		return errors.Wrap(err, "delete node from index")
   268  	}
   269  
   270  	q.queue.Delete(remaining)
   271  
   272  	return nil
   273  }
   274  
   275  // PreloadShard goes through the LSM store from the last checkpoint
   276  // and enqueues any unindexed vector.
   277  func (q *IndexQueue) PreloadShard(shard ShardLike) error {
   278  	if !asyncEnabled() {
   279  		return nil
   280  	}
   281  
   282  	// load non-indexed vectors and add them to the queue
   283  	checkpoint, exists, err := q.checkpoints.Get(q.shardID, q.targetVector)
   284  	if err != nil {
   285  		return errors.Wrap(err, "get last indexed id")
   286  	}
   287  	if !exists {
   288  		return nil
   289  	}
   290  
   291  	start := time.Now()
   292  
   293  	maxDocID := shard.Counter().Get()
   294  
   295  	var counter int
   296  
   297  	ctx := context.Background()
   298  
   299  	buf := make([]byte, 8)
   300  	for i := checkpoint; i < maxDocID; i++ {
   301  		binary.LittleEndian.PutUint64(buf, i)
   302  
   303  		v, err := shard.Store().Bucket(helpers.ObjectsBucketLSM).GetBySecondary(0, buf)
   304  		if err != nil {
   305  			return errors.Wrap(err, "get last indexed object")
   306  		}
   307  		if v == nil {
   308  			continue
   309  		}
   310  		obj, err := storobj.FromBinary(v)
   311  		if err != nil {
   312  			return errors.Wrap(err, "unmarshal last indexed object")
   313  		}
   314  		id := obj.DocID
   315  		if q.targetVector == "" {
   316  			if shard.VectorIndex().ContainsNode(id) {
   317  				continue
   318  			}
   319  			if len(obj.Vector) == 0 {
   320  				continue
   321  			}
   322  		} else {
   323  			if shard.VectorIndexes() == nil {
   324  				return errors.Errorf("vector index doesn't exist for target vector %s", q.targetVector)
   325  			}
   326  			vectorIndex, ok := shard.VectorIndexes()[q.targetVector]
   327  			if !ok {
   328  				return errors.Errorf("vector index doesn't exist for target vector %s", q.targetVector)
   329  			}
   330  			if vectorIndex.ContainsNode(id) {
   331  				continue
   332  			}
   333  			if len(obj.Vectors) == 0 {
   334  				continue
   335  			}
   336  		}
   337  		counter++
   338  
   339  		var vector []float32
   340  		if q.targetVector == "" {
   341  			vector = obj.Vector
   342  		} else {
   343  			if len(obj.Vectors) > 0 {
   344  				vector = obj.Vectors[q.targetVector]
   345  			}
   346  		}
   347  		desc := vectorDescriptor{
   348  			id:     id,
   349  			vector: vector,
   350  		}
   351  		err = q.Push(ctx, desc)
   352  		if err != nil {
   353  			return err
   354  		}
   355  	}
   356  
   357  	q.Logger.
   358  		WithField("checkpoint", checkpoint).
   359  		WithField("last_stored_id", maxDocID).
   360  		WithField("count", counter).
   361  		WithField("took", time.Since(start)).
   362  		WithField("shard_id", q.shardID).
   363  		WithField("target_vector", q.targetVector).
   364  		Debug("enqueued vectors from last indexed checkpoint")
   365  
   366  	return nil
   367  }
   368  
   369  // Drop removes all persisted data related to the queue.
   370  // It closes the queue if not already.
   371  // It does not remove the index.
   372  // It should be called only when the index is dropped.
   373  func (q *IndexQueue) Drop() error {
   374  	_ = q.Close()
   375  
   376  	if q.checkpoints != nil {
   377  		return q.checkpoints.Delete(q.shardID, q.targetVector)
   378  	}
   379  
   380  	return nil
   381  }
   382  
   383  func (q *IndexQueue) indexer() {
   384  	t := time.NewTicker(q.IndexInterval)
   385  
   386  	workerNb := runtime.GOMAXPROCS(0) - 1
   387  
   388  	for {
   389  		select {
   390  		case <-t.C:
   391  			if q.Size() == 0 {
   392  				_, _ = q.Shard.compareAndSwapStatus(storagestate.StatusIndexing.String(), storagestate.StatusReady.String())
   393  				continue
   394  			}
   395  			if q.paused.Load() {
   396  				continue
   397  			}
   398  			status, err := q.Shard.compareAndSwapStatus(storagestate.StatusReady.String(), storagestate.StatusIndexing.String())
   399  			if status != storagestate.StatusIndexing || err != nil {
   400  				q.Logger.WithField("status", status).WithError(err).Warn("failed to set shard status to 'indexing', trying again in " + q.IndexInterval.String())
   401  				continue
   402  			}
   403  
   404  			lastPushed := q.lastPushed.Load()
   405  			if lastPushed == nil || time.Since(*lastPushed) > time.Second {
   406  				// send at most 2 times the number of workers in one go,
   407  				// then wait for the next tick in case more vectors
   408  				// are added to the queue
   409  				q.pushToWorkers(2*workerNb, false)
   410  			} else {
   411  				// send only one batch at a time and wait for it to be indexed
   412  				// to avoid competing for resources with the Push() method.
   413  				// This ensures the resources are used for queueing vectors in priority,
   414  				// then for indexing them.
   415  				q.pushToWorkers(1, true)
   416  			}
   417  			q.checkCompressionSettings()
   418  		case <-q.ctx.Done():
   419  			// stop the ticker
   420  			t.Stop()
   421  			return
   422  		}
   423  	}
   424  }
   425  
   426  func (q *IndexQueue) pushToWorkers(max int, wait bool) {
   427  	chunks := q.queue.borrowChunks(max)
   428  	for i, c := range chunks {
   429  		select {
   430  		case <-q.ctx.Done():
   431  			// release unsent borrowed chunks
   432  			for _, c := range chunks[i:] {
   433  				q.queue.releaseChunk(c)
   434  			}
   435  
   436  			return
   437  		case q.indexCh <- job{
   438  			chunk:   c,
   439  			indexer: q.Index,
   440  			queue:   q.queue,
   441  			ctx:     q.ctx,
   442  		}:
   443  		}
   444  	}
   445  
   446  	if wait {
   447  		q.queue.wait(q.ctx)
   448  	}
   449  }
   450  
   451  // SearchByVector performs the search through the index first, then uses brute force to
   452  // query unindexed vectors.
   453  func (q *IndexQueue) SearchByVector(vector []float32, k int, allowList helpers.AllowList) ([]uint64, []float32, error) {
   454  	return q.search(vector, -1, k, allowList)
   455  }
   456  
   457  // SearchByVectorDistance performs the search through the index first, then uses brute force to
   458  // query unindexed vectors.
   459  func (q *IndexQueue) SearchByVectorDistance(vector []float32, dist float32, maxLimit int64, allowList helpers.AllowList) ([]uint64, []float32, error) {
   460  	return q.search(vector, dist, int(maxLimit), allowList)
   461  }
   462  
   463  func (q *IndexQueue) search(vector []float32, dist float32, maxLimit int, allowList helpers.AllowList) ([]uint64, []float32, error) {
   464  	var indexedResults []uint64
   465  	var distances []float32
   466  	var err error
   467  	if dist == -1 {
   468  		indexedResults, distances, err = q.Index.SearchByVector(vector, maxLimit, allowList)
   469  	} else {
   470  		indexedResults, distances, err = q.Index.SearchByVectorDistance(vector, dist, int64(maxLimit), allowList)
   471  	}
   472  	if err != nil {
   473  		return nil, nil, err
   474  	}
   475  
   476  	if !asyncEnabled() {
   477  		return indexedResults, distances, nil
   478  	}
   479  
   480  	if q.Index.DistancerProvider().Type() == "cosine-dot" {
   481  		// cosine-dot requires normalized vectors, as the dot product and cosine
   482  		// similarity are only identical if the vector is normalized
   483  		vector = distancer.Normalize(vector)
   484  	}
   485  
   486  	var results *priorityqueue.Queue[any]
   487  	var seen map[uint64]struct{}
   488  
   489  	err = q.queue.Iterate(allowList, func(objects []vectorDescriptor) error {
   490  		if results == nil {
   491  			results = q.pqMaxPool.GetMax(maxLimit)
   492  			seen = make(map[uint64]struct{}, len(indexedResults))
   493  			for i := range indexedResults {
   494  				seen[indexedResults[i]] = struct{}{}
   495  				results.Insert(indexedResults[i], distances[i])
   496  			}
   497  		}
   498  
   499  		return q.bruteForce(vector, objects, maxLimit, results, allowList, dist, seen)
   500  	})
   501  	if results != nil {
   502  		defer q.pqMaxPool.Put(results)
   503  	}
   504  	if err != nil {
   505  		return nil, nil, err
   506  	}
   507  	if results == nil {
   508  		return indexedResults, distances, nil
   509  	}
   510  
   511  	ids := make([]uint64, results.Len())
   512  	dists := make([]float32, results.Len())
   513  
   514  	i := results.Len() - 1
   515  	for results.Len() > 0 {
   516  		element := results.Pop()
   517  		ids[i] = element.ID
   518  		dists[i] = element.Dist
   519  		i--
   520  	}
   521  
   522  	return ids, dists, nil
   523  }
   524  
   525  func (q *IndexQueue) checkCompressionSettings() {
   526  	ci, ok := q.Index.(compressedIndexer)
   527  	if !ok {
   528  		return
   529  	}
   530  
   531  	shouldCompress, shouldCompressAt := ci.ShouldCompress()
   532  	if !shouldCompress || ci.Compressed() {
   533  		return
   534  	}
   535  
   536  	if ci.AlreadyIndexed() > uint64(shouldCompressAt) {
   537  		q.pauseIndexing()
   538  		err := ci.TurnOnCompression(q.resumeIndexing)
   539  		if err != nil {
   540  			q.Logger.WithError(err).Error("failed to turn on compression")
   541  		}
   542  	}
   543  }
   544  
   545  // pause indexing and wait for the workers to finish their current tasks
   546  // related to this queue.
   547  func (q *IndexQueue) pauseIndexing() {
   548  	q.Logger.Debug("pausing indexing, waiting for the current tasks to finish")
   549  	q.paused.Store(true)
   550  	q.queue.wait(q.ctx)
   551  	q.Logger.Debug("indexing paused")
   552  }
   553  
   554  // resume indexing
   555  func (q *IndexQueue) resumeIndexing() {
   556  	q.paused.Store(false)
   557  	q.Logger.Debug("indexing resumed")
   558  }
   559  
   560  func (q *IndexQueue) bruteForce(vector []float32, snapshot []vectorDescriptor, k int,
   561  	results *priorityqueue.Queue[any], allowList helpers.AllowList,
   562  	maxDistance float32, seen map[uint64]struct{},
   563  ) error {
   564  	for i := range snapshot {
   565  		// skip indexed data
   566  		if _, ok := seen[snapshot[i].id]; ok {
   567  			continue
   568  		}
   569  
   570  		// skip filtered data
   571  		if allowList != nil && !allowList.Contains(snapshot[i].id) {
   572  			continue
   573  		}
   574  
   575  		v := snapshot[i].vector
   576  		if q.Index.DistancerProvider().Type() == "cosine-dot" {
   577  			// cosine-dot requires normalized vectors, as the dot product and cosine
   578  			// similarity are only identical if the vector is normalized
   579  			v = distancer.Normalize(v)
   580  		}
   581  
   582  		dist, _, err := q.Index.DistanceBetweenVectors(vector, v)
   583  		if err != nil {
   584  			return err
   585  		}
   586  
   587  		// skip vectors that are too far away
   588  		if maxDistance > 0 && dist > maxDistance {
   589  			continue
   590  		}
   591  
   592  		if k < 0 || results.Len() < k || dist < results.Top().Dist {
   593  			results.Insert(snapshot[i].id, dist)
   594  			if k > 0 {
   595  				for results.Len() > k {
   596  					results.Pop()
   597  				}
   598  			}
   599  		}
   600  	}
   601  	return nil
   602  }
   603  
   604  type pqMaxPool struct {
   605  	pool *sync.Pool
   606  }
   607  
   608  func newPqMaxPool(defaultCap int) *pqMaxPool {
   609  	return &pqMaxPool{
   610  		pool: &sync.Pool{
   611  			New: func() interface{} {
   612  				return priorityqueue.NewMax[any](defaultCap)
   613  			},
   614  		},
   615  	}
   616  }
   617  
   618  func (pqh *pqMaxPool) GetMax(capacity int) *priorityqueue.Queue[any] {
   619  	pq := pqh.pool.Get().(*priorityqueue.Queue[any])
   620  	if pq.Cap() < capacity {
   621  		pq.ResetCap(capacity)
   622  	} else {
   623  		pq.Reset()
   624  	}
   625  
   626  	return pq
   627  }
   628  
   629  func (pqh *pqMaxPool) Put(pq *priorityqueue.Queue[any]) {
   630  	pqh.pool.Put(pq)
   631  }
   632  
   633  type vectorQueue struct {
   634  	IndexQueue *IndexQueue
   635  	pool       sync.Pool
   636  	curBatch   struct {
   637  		sync.Mutex
   638  
   639  		c *chunk
   640  	}
   641  	fullChunks struct {
   642  		sync.Mutex
   643  
   644  		list *list.List
   645  	}
   646  	deleted struct {
   647  		sync.RWMutex
   648  
   649  		m map[uint64]struct{}
   650  	}
   651  }
   652  
   653  func newVectorQueue(iq *IndexQueue) *vectorQueue {
   654  	q := vectorQueue{
   655  		IndexQueue: iq,
   656  		pool: sync.Pool{
   657  			New: func() any {
   658  				buf := make([]vectorDescriptor, iq.BatchSize)
   659  				return &buf
   660  			},
   661  		},
   662  	}
   663  
   664  	q.fullChunks.list = list.New()
   665  	q.deleted.m = make(map[uint64]struct{})
   666  
   667  	return &q
   668  }
   669  
   670  func (q *vectorQueue) getBuffer() []vectorDescriptor {
   671  	buff := *(q.pool.Get().(*[]vectorDescriptor))
   672  	return buff[:q.IndexQueue.BatchSize]
   673  }
   674  
   675  func (q *vectorQueue) getFreeChunk() *chunk {
   676  	c := chunk{
   677  		data: q.getBuffer(),
   678  	}
   679  	c.indexed = make(chan struct{})
   680  	return &c
   681  }
   682  
   683  func (q *vectorQueue) wait(ctx context.Context) {
   684  	for {
   685  		// get first non-closed channel
   686  		var ch chan struct{}
   687  
   688  		q.fullChunks.Lock()
   689  		e := q.fullChunks.list.Front()
   690  	LOOP:
   691  		for e != nil {
   692  			c := e.Value.(*chunk)
   693  			if c.borrowed {
   694  				select {
   695  				case <-c.indexed:
   696  				default:
   697  					ch = c.indexed
   698  					break LOOP
   699  				}
   700  			}
   701  
   702  			e = e.Next()
   703  		}
   704  		q.fullChunks.Unlock()
   705  
   706  		if ch == nil {
   707  			return
   708  		}
   709  
   710  		select {
   711  		case <-ch:
   712  		case <-time.After(5 * time.Second):
   713  		case <-ctx.Done():
   714  			return
   715  		}
   716  	}
   717  }
   718  
   719  func (q *vectorQueue) Add(vectors []vectorDescriptor) {
   720  	var full []*chunk
   721  
   722  	q.curBatch.Lock()
   723  	f := q.ensureHasSpace()
   724  	if f != nil {
   725  		full = append(full, f)
   726  	}
   727  
   728  	for len(vectors) != 0 {
   729  		curBatch := q.curBatch.c
   730  		n := copy(curBatch.data[curBatch.cursor:], vectors)
   731  		curBatch.cursor += n
   732  
   733  		vectors = vectors[n:]
   734  
   735  		f := q.ensureHasSpace()
   736  		if f != nil {
   737  			full = append(full, f)
   738  		}
   739  	}
   740  	q.curBatch.Unlock()
   741  
   742  	if len(full) > 0 {
   743  		q.fullChunks.Lock()
   744  		for _, f := range full {
   745  			f.elem = q.fullChunks.list.PushBack(f)
   746  		}
   747  		q.fullChunks.Unlock()
   748  	}
   749  }
   750  
   751  func (q *vectorQueue) ensureHasSpace() *chunk {
   752  	if q.curBatch.c == nil {
   753  		q.curBatch.c = q.getFreeChunk()
   754  	}
   755  
   756  	if q.curBatch.c.cursor == 0 {
   757  		now := time.Now()
   758  		q.curBatch.c.createdAt = &now
   759  	}
   760  
   761  	if q.curBatch.c.cursor < q.IndexQueue.BatchSize {
   762  		return nil
   763  	}
   764  
   765  	c := q.curBatch.c
   766  	q.curBatch.c = q.getFreeChunk()
   767  	now := time.Now()
   768  	q.curBatch.c.createdAt = &now
   769  	return c
   770  }
   771  
   772  func (q *vectorQueue) borrowChunks(max int) []*chunk {
   773  	if max <= 0 {
   774  		max = math.MaxInt64
   775  	}
   776  
   777  	q.fullChunks.Lock()
   778  	var chunks []*chunk
   779  	e := q.fullChunks.list.Front()
   780  	count := 0
   781  	for e != nil && count < max {
   782  		c := e.Value.(*chunk)
   783  		if !c.borrowed {
   784  			count++
   785  			c.borrowed = true
   786  			chunks = append(chunks, c)
   787  		}
   788  
   789  		e = e.Next()
   790  	}
   791  	q.fullChunks.Unlock()
   792  
   793  	if count < max {
   794  		var incompleteChunk *chunk
   795  		q.curBatch.Lock()
   796  		if q.curBatch.c != nil && time.Since(*q.curBatch.c.createdAt) > q.IndexQueue.StaleTimeout && q.curBatch.c.cursor > 0 {
   797  			q.curBatch.c.borrowed = true
   798  			chunks = append(chunks, q.curBatch.c)
   799  			incompleteChunk = q.curBatch.c
   800  			q.curBatch.c = nil
   801  		}
   802  		q.curBatch.Unlock()
   803  
   804  		// add the incomplete chunk to the full chunks list
   805  		if incompleteChunk != nil {
   806  			q.fullChunks.Lock()
   807  			q.fullChunks.list.PushBack(incompleteChunk)
   808  			q.fullChunks.Unlock()
   809  		}
   810  	}
   811  
   812  	return chunks
   813  }
   814  
   815  func (q *vectorQueue) releaseChunk(c *chunk) {
   816  	if c == nil {
   817  		return
   818  	}
   819  
   820  	if c.indexed != nil {
   821  		close(c.indexed)
   822  	}
   823  
   824  	q.fullChunks.Lock()
   825  	if c.elem != nil {
   826  		q.fullChunks.list.Remove(c.elem)
   827  	}
   828  
   829  	// reset the chunk to notify the search
   830  	// that it was released
   831  	c.borrowed = false
   832  	c.cursor = 0
   833  	c.elem = nil
   834  	c.createdAt = nil
   835  	c.indexed = nil
   836  	data := c.data
   837  	c.data = nil
   838  
   839  	q.fullChunks.Unlock()
   840  
   841  	if len(data) == q.IndexQueue.BatchSize {
   842  		q.pool.Put(&data)
   843  	}
   844  }
   845  
   846  // persistCheckpoint update the on-disk checkpoint that tracks the last indexed id
   847  // optimistically. It is not guaranteed to be accurate but it is guaranteed to be lower
   848  // than any vector in the queue.
   849  // To calculate the checkpoint, we use the lowest id in the current batch
   850  // minus the number of vectors in the queue (delta), which is capped at 10k vectors.
   851  // The calculation looks like this:
   852  // checkpoint = min(ids) - max(queueSize, 10_000)
   853  func (q *vectorQueue) persistCheckpoint(ids []uint64) {
   854  	if len(ids) == 0 {
   855  		return
   856  	}
   857  
   858  	q.fullChunks.Lock()
   859  	cl := q.fullChunks.list.Len()
   860  	q.fullChunks.Unlock()
   861  
   862  	// get the lowest id in the current batch
   863  	var minID uint64
   864  	for _, id := range ids {
   865  		if minID == 0 || id < minID {
   866  			minID = id
   867  		}
   868  	}
   869  
   870  	delta := uint64(cl * q.IndexQueue.BatchSize)
   871  	// cap the delta to 10k vectors
   872  	if delta > 10_000 {
   873  		delta = 10_000
   874  	}
   875  	var checkpoint uint64
   876  	if minID > delta {
   877  		checkpoint = minID - delta
   878  	} else {
   879  		checkpoint = 0
   880  	}
   881  
   882  	err := q.IndexQueue.checkpoints.Update(q.IndexQueue.shardID, q.IndexQueue.targetVector, checkpoint)
   883  	if err != nil {
   884  		q.IndexQueue.Logger.WithError(err).Error("update checkpoint")
   885  	}
   886  }
   887  
   888  // Iterate through all chunks in the queue and call the given function.
   889  // Deleted vectors are skipped, and if an allowlist is provided, only vectors
   890  // in the allowlist are returned.
   891  func (q *vectorQueue) Iterate(allowlist helpers.AllowList, fn func(objects []vectorDescriptor) error) error {
   892  	buf := q.getBuffer()
   893  	defer q.pool.Put(&buf)
   894  
   895  	var count int
   896  
   897  	// since chunks can get released concurrently,
   898  	// we first get the pointers to all chunks.
   899  	// then iterate over them.
   900  	// This will not give us the latest data, but
   901  	// will prevent us from losing access to the rest
   902  	// of the linked list if an intermediate chunk is released.
   903  	var elems []*list.Element
   904  	q.fullChunks.Lock()
   905  	e := q.fullChunks.list.Front()
   906  	for e != nil {
   907  		elems = append(elems, e)
   908  		e = e.Next()
   909  	}
   910  	q.fullChunks.Unlock()
   911  
   912  	for i := 0; i < len(elems); i++ {
   913  		// we need to lock the list to prevent the chunk from being released
   914  		q.fullChunks.Lock()
   915  		c := elems[i].Value.(*chunk)
   916  		if c.data == nil {
   917  			// the chunk was released in the meantime,
   918  			// skip it
   919  			q.fullChunks.Unlock()
   920  			continue
   921  		}
   922  
   923  		buf = buf[:0]
   924  		for i := 0; i < c.cursor; i++ {
   925  			if allowlist != nil && !allowlist.Contains(c.data[i].id) {
   926  				continue
   927  			}
   928  
   929  			if q.IsDeleted(c.data[i].id) {
   930  				continue
   931  			}
   932  
   933  			buf = append(buf, c.data[i])
   934  			count++
   935  			if count >= q.IndexQueue.BruteForceSearchLimit {
   936  				break
   937  			}
   938  		}
   939  		q.fullChunks.Unlock()
   940  
   941  		if len(buf) == 0 {
   942  			continue
   943  		}
   944  
   945  		err := fn(buf)
   946  		if err != nil {
   947  			return err
   948  		}
   949  
   950  		if count >= q.IndexQueue.BruteForceSearchLimit {
   951  			break
   952  		}
   953  	}
   954  
   955  	if count >= q.IndexQueue.BruteForceSearchLimit {
   956  		return nil
   957  	}
   958  
   959  	buf = buf[:0]
   960  	q.curBatch.Lock()
   961  	if q.curBatch.c != nil {
   962  		for i := 0; i < q.curBatch.c.cursor; i++ {
   963  			c := q.curBatch.c
   964  
   965  			if allowlist != nil && !allowlist.Contains(c.data[i].id) {
   966  				continue
   967  			}
   968  
   969  			if q.IsDeleted(c.data[i].id) {
   970  				continue
   971  			}
   972  
   973  			buf = append(buf, c.data[i])
   974  			count++
   975  
   976  			if count >= q.IndexQueue.BruteForceSearchLimit {
   977  				break
   978  			}
   979  		}
   980  	}
   981  	q.curBatch.Unlock()
   982  
   983  	if len(buf) == 0 {
   984  		return nil
   985  	}
   986  
   987  	err := fn(buf)
   988  	if err != nil {
   989  		return err
   990  	}
   991  
   992  	return nil
   993  }
   994  
   995  func (q *vectorQueue) Delete(ids []uint64) {
   996  	q.deleted.Lock()
   997  	for _, id := range ids {
   998  		q.deleted.m[id] = struct{}{}
   999  	}
  1000  	q.deleted.Unlock()
  1001  }
  1002  
  1003  func (q *vectorQueue) IsDeleted(id uint64) bool {
  1004  	q.deleted.RLock()
  1005  	_, ok := q.deleted.m[id]
  1006  	q.deleted.RUnlock()
  1007  	return ok
  1008  }
  1009  
  1010  func (q *vectorQueue) ResetDeleted(id ...uint64) {
  1011  	q.deleted.Lock()
  1012  	for _, id := range id {
  1013  		delete(q.deleted.m, id)
  1014  	}
  1015  	q.deleted.Unlock()
  1016  }
  1017  
  1018  type chunk struct {
  1019  	cursor    int
  1020  	borrowed  bool
  1021  	data      []vectorDescriptor
  1022  	elem      *list.Element
  1023  	createdAt *time.Time
  1024  	indexed   chan struct{}
  1025  }