github.com/whtcorpsinc/MilevaDB-Prod@v0.0.0-20211104133533-f57f4be3b597/interlock/index_merge_reader.go (about)

     1  // Copyright 2020 WHTCORPS INC, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package interlock
    15  
    16  import (
    17  	"context"
    18  	"runtime/trace"
    19  	"sync"
    20  	"sync/atomic"
    21  	"unsafe"
    22  
    23  	"github.com/whtcorpsinc/BerolinaSQL/perceptron"
    24  	"github.com/whtcorpsinc/BerolinaSQL/terror"
    25  	"github.com/whtcorpsinc/errors"
    26  	"github.com/whtcorpsinc/failpoint"
    27  	"github.com/whtcorpsinc/fidelpb/go-fidelpb"
    28  	"github.com/whtcorpsinc/milevadb/allegrosql"
    29  	"github.com/whtcorpsinc/milevadb/causet"
    30  	causetembedded "github.com/whtcorpsinc/milevadb/causet/embedded"
    31  	"github.com/whtcorpsinc/milevadb/ekv"
    32  	"github.com/whtcorpsinc/milevadb/memex"
    33  	"github.com/whtcorpsinc/milevadb/soliton"
    34  	"github.com/whtcorpsinc/milevadb/soliton/chunk"
    35  	"github.com/whtcorpsinc/milevadb/soliton/logutil"
    36  	"github.com/whtcorpsinc/milevadb/soliton/memory"
    37  	"github.com/whtcorpsinc/milevadb/soliton/ranger"
    38  	"github.com/whtcorpsinc/milevadb/statistics"
    39  	"github.com/whtcorpsinc/milevadb/stochastikctx"
    40  	"go.uber.org/zap"
    41  )
    42  
    43  var (
    44  	_ InterlockingDirectorate = &IndexMergeReaderInterlockingDirectorate{}
    45  )
    46  
    47  // IndexMergeReaderInterlockingDirectorate accesses a causet with multiple index/causet scan.
    48  // There are three types of workers:
    49  // 1. partialBlockWorker/partialIndexWorker, which are used to fetch the handles
    50  // 2. indexMergeProcessWorker, which is used to do the `Union` operation.
    51  // 3. indexMergeBlockScanWorker, which is used to get the causet tuples with the given handles.
    52  //
    53  // The execution flow is really like IndexLookUpReader. However, it uses multiple index scans
    54  // or causet scans to get the handles:
    55  // 1. use the partialBlockWorkers and partialIndexWorkers to fetch the handles (a batch per time)
    56  //    and send them to the indexMergeProcessWorker.
    57  // 2. indexMergeProcessWorker do the `Union` operation for a batch of handles it have got.
    58  //    For every handle in the batch:
    59  //    1. check whether it has been accessed.
    60  //    2. if not, record it and send it to the indexMergeBlockScanWorker.
    61  //    3. if accessed, just ignore it.
    62  type IndexMergeReaderInterlockingDirectorate struct {
    63  	baseInterlockingDirectorate
    64  
    65  	causet           causet.Block
    66  	indexes          []*perceptron.IndexInfo
    67  	descs            []bool
    68  	ranges           [][]*ranger.Range
    69  	posetPosetDagPBs []*fidelpb.PosetDagRequest
    70  	startTS          uint64
    71  	blockRequest     *fidelpb.PosetDagRequest
    72  	// defCausumns are only required by union scan.
    73  	defCausumns       []*perceptron.DeferredCausetInfo
    74  	partialStreamings []bool
    75  	blockStreaming    bool
    76  	*dataReaderBuilder
    77  	// All fields above are immublock.
    78  
    79  	tblWorkerWg    sync.WaitGroup
    80  	processWokerWg sync.WaitGroup
    81  	finished       chan struct{}
    82  
    83  	workerStarted bool
    84  	keyRanges     [][]ekv.KeyRange
    85  
    86  	resultCh   chan *lookupBlockTask
    87  	resultCurr *lookupBlockTask
    88  	feedbacks  []*statistics.QueryFeedback
    89  
    90  	// memTracker is used to track the memory usage of this interlock.
    91  	memTracker *memory.Tracker
    92  
    93  	// checHoTTexValue is used to check the consistency of the index data.
    94  	*checHoTTexValue
    95  
    96  	corDefCausInIdxSide bool
    97  	partialCausets      [][]causetembedded.PhysicalCauset
    98  	corDefCausInTblSide bool
    99  	tblCausets          []causetembedded.PhysicalCauset
   100  	corDefCausInAccess  bool
   101  	idxDefCauss         [][]*memex.DeferredCauset
   102  	defCausLens         [][]int
   103  
   104  	handleDefCauss causetembedded.HandleDefCauss
   105  }
   106  
   107  // Open implements the InterlockingDirectorate Open interface
   108  func (e *IndexMergeReaderInterlockingDirectorate) Open(ctx context.Context) error {
   109  	e.keyRanges = make([][]ekv.KeyRange, 0, len(e.partialCausets))
   110  	for i, plan := range e.partialCausets {
   111  		_, ok := plan[0].(*causetembedded.PhysicalIndexScan)
   112  		if !ok {
   113  			if e.causet.Meta().IsCommonHandle {
   114  				keyRanges, err := allegrosql.CommonHandleRangesToKVRanges(e.ctx.GetStochastikVars().StmtCtx, getPhysicalBlockID(e.causet), e.ranges[i])
   115  				if err != nil {
   116  					return err
   117  				}
   118  				e.keyRanges = append(e.keyRanges, keyRanges)
   119  			} else {
   120  				e.keyRanges = append(e.keyRanges, nil)
   121  			}
   122  			continue
   123  		}
   124  		keyRange, err := allegrosql.IndexRangesToKVRanges(e.ctx.GetStochastikVars().StmtCtx, getPhysicalBlockID(e.causet), e.indexes[i].ID, e.ranges[i], e.feedbacks[i])
   125  		if err != nil {
   126  			return err
   127  		}
   128  		e.keyRanges = append(e.keyRanges, keyRange)
   129  	}
   130  	e.finished = make(chan struct{})
   131  	e.resultCh = make(chan *lookupBlockTask, atomic.LoadInt32(&LookupBlockTaskChannelSize))
   132  	return nil
   133  }
   134  
   135  func (e *IndexMergeReaderInterlockingDirectorate) startWorkers(ctx context.Context) error {
   136  	exitCh := make(chan struct{})
   137  	workCh := make(chan *lookupBlockTask, 1)
   138  	fetchCh := make(chan *lookupBlockTask, len(e.keyRanges))
   139  
   140  	e.startIndexMergeProcessWorker(ctx, workCh, fetchCh)
   141  
   142  	var err error
   143  	var partialWorkerWg sync.WaitGroup
   144  	for i := 0; i < len(e.keyRanges); i++ {
   145  		partialWorkerWg.Add(1)
   146  		if e.indexes[i] != nil {
   147  			err = e.startPartialIndexWorker(ctx, exitCh, fetchCh, i, &partialWorkerWg, e.keyRanges[i])
   148  		} else {
   149  			err = e.startPartialBlockWorker(ctx, exitCh, fetchCh, i, &partialWorkerWg)
   150  		}
   151  		if err != nil {
   152  			partialWorkerWg.Done()
   153  			break
   154  		}
   155  	}
   156  	go e.waitPartialWorkersAndCloseFetchChan(&partialWorkerWg, fetchCh)
   157  	if err != nil {
   158  		close(exitCh)
   159  		return err
   160  	}
   161  	e.startIndexMergeBlockScanWorker(ctx, workCh)
   162  	e.workerStarted = true
   163  	return nil
   164  }
   165  
   166  func (e *IndexMergeReaderInterlockingDirectorate) waitPartialWorkersAndCloseFetchChan(partialWorkerWg *sync.WaitGroup, fetchCh chan *lookupBlockTask) {
   167  	partialWorkerWg.Wait()
   168  	close(fetchCh)
   169  }
   170  
   171  func (e *IndexMergeReaderInterlockingDirectorate) startIndexMergeProcessWorker(ctx context.Context, workCh chan<- *lookupBlockTask, fetch <-chan *lookupBlockTask) {
   172  	idxMergeProcessWorker := &indexMergeProcessWorker{}
   173  	e.processWokerWg.Add(1)
   174  	go func() {
   175  		defer trace.StartRegion(ctx, "IndexMergeProcessWorker").End()
   176  		soliton.WithRecovery(
   177  			func() {
   178  				idxMergeProcessWorker.fetchLoop(ctx, fetch, workCh, e.resultCh, e.finished)
   179  			},
   180  			idxMergeProcessWorker.handleLoopFetcherPanic(ctx, e.resultCh),
   181  		)
   182  		e.processWokerWg.Done()
   183  	}()
   184  }
   185  
   186  func (e *IndexMergeReaderInterlockingDirectorate) startPartialIndexWorker(ctx context.Context, exitCh <-chan struct{}, fetchCh chan<- *lookupBlockTask, workID int, partialWorkerWg *sync.WaitGroup, keyRange []ekv.KeyRange) error {
   187  	if e.runtimeStats != nil {
   188  		defCauslInterDirc := true
   189  		e.posetPosetDagPBs[workID].DefCauslectInterDircutionSummaries = &defCauslInterDirc
   190  	}
   191  
   192  	var builder allegrosql.RequestBuilder
   193  	ekvReq, err := builder.SetKeyRanges(keyRange).
   194  		SetPosetDagRequest(e.posetPosetDagPBs[workID]).
   195  		SetStartTS(e.startTS).
   196  		SetDesc(e.descs[workID]).
   197  		SetKeepOrder(false).
   198  		SetStreaming(e.partialStreamings[workID]).
   199  		SetFromStochastikVars(e.ctx.GetStochastikVars()).
   200  		SetMemTracker(e.memTracker).
   201  		Build()
   202  	if err != nil {
   203  		return err
   204  	}
   205  
   206  	result, err := allegrosql.SelectWithRuntimeStats(ctx, e.ctx, ekvReq, e.handleDefCauss.GetFieldsTypes(), e.feedbacks[workID], getPhysicalCausetIDs(e.partialCausets[workID]), e.id)
   207  	if err != nil {
   208  		return err
   209  	}
   210  
   211  	result.Fetch(ctx)
   212  	worker := &partialIndexWorker{
   213  		sc:           e.ctx,
   214  		batchSize:    e.maxChunkSize,
   215  		maxBatchSize: e.ctx.GetStochastikVars().IndexLookupSize,
   216  		maxChunkSize: e.maxChunkSize,
   217  	}
   218  
   219  	if worker.batchSize > worker.maxBatchSize {
   220  		worker.batchSize = worker.maxBatchSize
   221  	}
   222  
   223  	failpoint.Inject("startPartialIndexWorkerErr", func() error {
   224  		return errors.New("inject an error before start partialIndexWorker")
   225  	})
   226  
   227  	go func() {
   228  		defer trace.StartRegion(ctx, "IndexMergePartialIndexWorker").End()
   229  		defer partialWorkerWg.Done()
   230  		ctx1, cancel := context.WithCancel(ctx)
   231  		var err error
   232  		soliton.WithRecovery(
   233  			func() {
   234  				_, err = worker.fetchHandles(ctx1, result, exitCh, fetchCh, e.resultCh, e.finished, e.handleDefCauss)
   235  			},
   236  			e.handleHandlesFetcherPanic(ctx, e.resultCh, "partialIndexWorker"),
   237  		)
   238  		if err != nil {
   239  			e.feedbacks[workID].Invalidate()
   240  		}
   241  		cancel()
   242  		if err := result.Close(); err != nil {
   243  			logutil.Logger(ctx).Error("close Select result failed:", zap.Error(err))
   244  		}
   245  		e.ctx.StoreQueryFeedback(e.feedbacks[workID])
   246  	}()
   247  
   248  	return nil
   249  }
   250  
   251  func (e *IndexMergeReaderInterlockingDirectorate) buildPartialBlockReader(ctx context.Context, workID int) InterlockingDirectorate {
   252  	blockReaderInterDirc := &BlockReaderInterlockingDirectorate{
   253  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(e.ctx, e.schemaReplicant, 0),
   254  		causet:                      e.causet,
   255  		posetPosetDagPB:             e.posetPosetDagPBs[workID],
   256  		startTS:                     e.startTS,
   257  		streaming:                   e.partialStreamings[workID],
   258  		feedback:                    statistics.NewQueryFeedback(0, nil, 0, false),
   259  		plans:                       e.partialCausets[workID],
   260  		ranges:                      e.ranges[workID],
   261  	}
   262  	return blockReaderInterDirc
   263  }
   264  
   265  func (e *IndexMergeReaderInterlockingDirectorate) startPartialBlockWorker(ctx context.Context, exitCh <-chan struct{}, fetchCh chan<- *lookupBlockTask, workID int,
   266  	partialWorkerWg *sync.WaitGroup) error {
   267  	partialBlockReader := e.buildPartialBlockReader(ctx, workID)
   268  	err := partialBlockReader.Open(ctx)
   269  	if err != nil {
   270  		logutil.Logger(ctx).Error("open Select result failed:", zap.Error(err))
   271  		return err
   272  	}
   273  	blockInfo := e.partialCausets[workID][0].(*causetembedded.PhysicalBlockScan).Block
   274  	worker := &partialBlockWorker{
   275  		sc:           e.ctx,
   276  		batchSize:    e.maxChunkSize,
   277  		maxBatchSize: e.ctx.GetStochastikVars().IndexLookupSize,
   278  		maxChunkSize: e.maxChunkSize,
   279  		blockReader:  partialBlockReader,
   280  		blockInfo:    blockInfo,
   281  	}
   282  
   283  	if worker.batchSize > worker.maxBatchSize {
   284  		worker.batchSize = worker.maxBatchSize
   285  	}
   286  	go func() {
   287  		defer trace.StartRegion(ctx, "IndexMergePartialBlockWorker").End()
   288  		defer partialWorkerWg.Done()
   289  		ctx1, cancel := context.WithCancel(ctx)
   290  		var err error
   291  		soliton.WithRecovery(
   292  			func() {
   293  				_, err = worker.fetchHandles(ctx1, exitCh, fetchCh, e.resultCh, e.finished, e.handleDefCauss)
   294  			},
   295  			e.handleHandlesFetcherPanic(ctx, e.resultCh, "partialBlockWorker"),
   296  		)
   297  		if err != nil {
   298  			e.feedbacks[workID].Invalidate()
   299  		}
   300  		cancel()
   301  		if err := worker.blockReader.Close(); err != nil {
   302  			logutil.Logger(ctx).Error("close Select result failed:", zap.Error(err))
   303  		}
   304  		e.ctx.StoreQueryFeedback(e.feedbacks[workID])
   305  	}()
   306  	return nil
   307  }
   308  
   309  type partialBlockWorker struct {
   310  	sc           stochastikctx.Context
   311  	batchSize    int
   312  	maxBatchSize int
   313  	maxChunkSize int
   314  	blockReader  InterlockingDirectorate
   315  	blockInfo    *perceptron.BlockInfo
   316  }
   317  
   318  func (w *partialBlockWorker) fetchHandles(ctx context.Context, exitCh <-chan struct{}, fetchCh chan<- *lookupBlockTask, resultCh chan<- *lookupBlockTask,
   319  	finished <-chan struct{}, handleDefCauss causetembedded.HandleDefCauss) (count int64, err error) {
   320  	chk := chunk.NewChunkWithCapacity(retTypes(w.blockReader), w.maxChunkSize)
   321  	for {
   322  		handles, retChunk, err := w.extractTaskHandles(ctx, chk, handleDefCauss)
   323  		if err != nil {
   324  			doneCh := make(chan error, 1)
   325  			doneCh <- err
   326  			resultCh <- &lookupBlockTask{
   327  				doneCh: doneCh,
   328  			}
   329  			return count, err
   330  		}
   331  		if len(handles) == 0 {
   332  			return count, nil
   333  		}
   334  		count += int64(len(handles))
   335  		task := w.buildBlockTask(handles, retChunk)
   336  		select {
   337  		case <-ctx.Done():
   338  			return count, ctx.Err()
   339  		case <-exitCh:
   340  			return count, nil
   341  		case <-finished:
   342  			return count, nil
   343  		case fetchCh <- task:
   344  		}
   345  	}
   346  }
   347  
   348  func (w *partialBlockWorker) extractTaskHandles(ctx context.Context, chk *chunk.Chunk, handleDefCauss causetembedded.HandleDefCauss) (
   349  	handles []ekv.Handle, retChk *chunk.Chunk, err error) {
   350  	handles = make([]ekv.Handle, 0, w.batchSize)
   351  	for len(handles) < w.batchSize {
   352  		chk.SetRequiredEvents(w.batchSize-len(handles), w.maxChunkSize)
   353  		err = errors.Trace(w.blockReader.Next(ctx, chk))
   354  		if err != nil {
   355  			return handles, nil, err
   356  		}
   357  		if chk.NumEvents() == 0 {
   358  			return handles, retChk, nil
   359  		}
   360  		for i := 0; i < chk.NumEvents(); i++ {
   361  			handle, err := handleDefCauss.BuildHandle(chk.GetEvent(i))
   362  			if err != nil {
   363  				return nil, nil, err
   364  			}
   365  			handles = append(handles, handle)
   366  		}
   367  	}
   368  	w.batchSize *= 2
   369  	if w.batchSize > w.maxBatchSize {
   370  		w.batchSize = w.maxBatchSize
   371  	}
   372  	return handles, retChk, nil
   373  }
   374  
   375  func (w *partialBlockWorker) buildBlockTask(handles []ekv.Handle, retChk *chunk.Chunk) *lookupBlockTask {
   376  	task := &lookupBlockTask{
   377  		handles:   handles,
   378  		idxEvents: retChk,
   379  	}
   380  
   381  	task.doneCh = make(chan error, 1)
   382  	return task
   383  }
   384  
   385  func (e *IndexMergeReaderInterlockingDirectorate) startIndexMergeBlockScanWorker(ctx context.Context, workCh <-chan *lookupBlockTask) {
   386  	lookupConcurrencyLimit := e.ctx.GetStochastikVars().IndexLookupConcurrency()
   387  	e.tblWorkerWg.Add(lookupConcurrencyLimit)
   388  	for i := 0; i < lookupConcurrencyLimit; i++ {
   389  		worker := &indexMergeBlockScanWorker{
   390  			workCh:         workCh,
   391  			finished:       e.finished,
   392  			buildTblReader: e.buildFinalBlockReader,
   393  			tblCausets:     e.tblCausets,
   394  			memTracker:     memory.NewTracker(memory.LabelForSimpleTask, -1),
   395  		}
   396  		ctx1, cancel := context.WithCancel(ctx)
   397  		go func() {
   398  			defer trace.StartRegion(ctx, "IndexMergeBlockScanWorker").End()
   399  			var task *lookupBlockTask
   400  			soliton.WithRecovery(
   401  				func() { task = worker.pickAndInterDircTask(ctx1) },
   402  				worker.handlePickAndInterDircTaskPanic(ctx1, task),
   403  			)
   404  			cancel()
   405  			e.tblWorkerWg.Done()
   406  		}()
   407  	}
   408  }
   409  
   410  func (e *IndexMergeReaderInterlockingDirectorate) buildFinalBlockReader(ctx context.Context, handles []ekv.Handle) (InterlockingDirectorate, error) {
   411  	blockReaderInterDirc := &BlockReaderInterlockingDirectorate{
   412  		baseInterlockingDirectorate: newBaseInterlockingDirectorate(e.ctx, e.schemaReplicant, 0),
   413  		causet:                      e.causet,
   414  		posetPosetDagPB:             e.blockRequest,
   415  		startTS:                     e.startTS,
   416  		streaming:                   e.blockStreaming,
   417  		defCausumns:                 e.defCausumns,
   418  		feedback:                    statistics.NewQueryFeedback(0, nil, 0, false),
   419  		plans:                       e.tblCausets,
   420  	}
   421  	blockReaderInterDirc.buildVirtualDeferredCausetInfo()
   422  	blockReader, err := e.dataReaderBuilder.buildBlockReaderFromHandles(ctx, blockReaderInterDirc, handles)
   423  	if err != nil {
   424  		logutil.Logger(ctx).Error("build causet reader from handles failed", zap.Error(err))
   425  		return nil, err
   426  	}
   427  	return blockReader, nil
   428  }
   429  
   430  // Next implements InterlockingDirectorate Next interface.
   431  func (e *IndexMergeReaderInterlockingDirectorate) Next(ctx context.Context, req *chunk.Chunk) error {
   432  	if !e.workerStarted {
   433  		if err := e.startWorkers(ctx); err != nil {
   434  			return err
   435  		}
   436  	}
   437  
   438  	req.Reset()
   439  	for {
   440  		resultTask, err := e.getResultTask()
   441  		if err != nil {
   442  			return errors.Trace(err)
   443  		}
   444  		if resultTask == nil {
   445  			return nil
   446  		}
   447  		for resultTask.cursor < len(resultTask.rows) {
   448  			req.AppendEvent(resultTask.rows[resultTask.cursor])
   449  			resultTask.cursor++
   450  			if req.NumEvents() >= e.maxChunkSize {
   451  				return nil
   452  			}
   453  		}
   454  	}
   455  }
   456  
   457  func (e *IndexMergeReaderInterlockingDirectorate) getResultTask() (*lookupBlockTask, error) {
   458  	if e.resultCurr != nil && e.resultCurr.cursor < len(e.resultCurr.rows) {
   459  		return e.resultCurr, nil
   460  	}
   461  	task, ok := <-e.resultCh
   462  	if !ok {
   463  		return nil, nil
   464  	}
   465  	if err := <-task.doneCh; err != nil {
   466  		return nil, errors.Trace(err)
   467  	}
   468  
   469  	// Release the memory usage of last task before we handle a new task.
   470  	if e.resultCurr != nil {
   471  		e.resultCurr.memTracker.Consume(-e.resultCurr.memUsage)
   472  	}
   473  	e.resultCurr = task
   474  	return e.resultCurr, nil
   475  }
   476  
   477  func (e *IndexMergeReaderInterlockingDirectorate) handleHandlesFetcherPanic(ctx context.Context, resultCh chan<- *lookupBlockTask, worker string) func(r interface{}) {
   478  	return func(r interface{}) {
   479  		if r == nil {
   480  			return
   481  		}
   482  
   483  		err4Panic := errors.Errorf("panic in IndexMergeReaderInterlockingDirectorate %s: %v", worker, r)
   484  		logutil.Logger(ctx).Error(err4Panic.Error())
   485  		doneCh := make(chan error, 1)
   486  		doneCh <- err4Panic
   487  		resultCh <- &lookupBlockTask{
   488  			doneCh: doneCh,
   489  		}
   490  	}
   491  }
   492  
   493  // Close implements InterDirc Close interface.
   494  func (e *IndexMergeReaderInterlockingDirectorate) Close() error {
   495  	if e.finished == nil {
   496  		return nil
   497  	}
   498  	close(e.finished)
   499  	e.processWokerWg.Wait()
   500  	e.tblWorkerWg.Wait()
   501  	e.finished = nil
   502  	e.workerStarted = false
   503  	// TODO: how to causetstore e.feedbacks
   504  	return nil
   505  }
   506  
   507  type indexMergeProcessWorker struct {
   508  }
   509  
   510  func (w *indexMergeProcessWorker) fetchLoop(ctx context.Context, fetchCh <-chan *lookupBlockTask,
   511  	workCh chan<- *lookupBlockTask, resultCh chan<- *lookupBlockTask, finished <-chan struct{}) {
   512  	defer func() {
   513  		close(workCh)
   514  		close(resultCh)
   515  	}()
   516  
   517  	distinctHandles := ekv.NewHandleMap()
   518  
   519  	for task := range fetchCh {
   520  		handles := task.handles
   521  		fhs := make([]ekv.Handle, 0, 8)
   522  		for _, h := range handles {
   523  			if _, ok := distinctHandles.Get(h); !ok {
   524  				fhs = append(fhs, h)
   525  				distinctHandles.Set(h, true)
   526  			}
   527  		}
   528  		if len(fhs) == 0 {
   529  			continue
   530  		}
   531  		task := &lookupBlockTask{
   532  			handles: fhs,
   533  			doneCh:  make(chan error, 1),
   534  		}
   535  		select {
   536  		case <-ctx.Done():
   537  			return
   538  		case <-finished:
   539  			return
   540  		case workCh <- task:
   541  			resultCh <- task
   542  		}
   543  	}
   544  }
   545  
   546  func (w *indexMergeProcessWorker) handleLoopFetcherPanic(ctx context.Context, resultCh chan<- *lookupBlockTask) func(r interface{}) {
   547  	return func(r interface{}) {
   548  		if r == nil {
   549  			return
   550  		}
   551  
   552  		err4Panic := errors.Errorf("panic in IndexMergeReaderInterlockingDirectorate indexMergeBlockWorker: %v", r)
   553  		logutil.Logger(ctx).Error(err4Panic.Error())
   554  		doneCh := make(chan error, 1)
   555  		doneCh <- err4Panic
   556  		resultCh <- &lookupBlockTask{
   557  			doneCh: doneCh,
   558  		}
   559  	}
   560  }
   561  
   562  type partialIndexWorker struct {
   563  	sc           stochastikctx.Context
   564  	batchSize    int
   565  	maxBatchSize int
   566  	maxChunkSize int
   567  }
   568  
   569  func (w *partialIndexWorker) fetchHandles(
   570  	ctx context.Context,
   571  	result allegrosql.SelectResult,
   572  	exitCh <-chan struct{},
   573  	fetchCh chan<- *lookupBlockTask,
   574  	resultCh chan<- *lookupBlockTask,
   575  	finished <-chan struct{},
   576  	handleDefCauss causetembedded.HandleDefCauss) (count int64, err error) {
   577  	chk := chunk.NewChunkWithCapacity(handleDefCauss.GetFieldsTypes(), w.maxChunkSize)
   578  	for {
   579  		handles, retChunk, err := w.extractTaskHandles(ctx, chk, result, handleDefCauss)
   580  		if err != nil {
   581  			doneCh := make(chan error, 1)
   582  			doneCh <- err
   583  			resultCh <- &lookupBlockTask{
   584  				doneCh: doneCh,
   585  			}
   586  			return count, err
   587  		}
   588  		if len(handles) == 0 {
   589  			return count, nil
   590  		}
   591  		count += int64(len(handles))
   592  		task := w.buildBlockTask(handles, retChunk)
   593  		select {
   594  		case <-ctx.Done():
   595  			return count, ctx.Err()
   596  		case <-exitCh:
   597  			return count, nil
   598  		case <-finished:
   599  			return count, nil
   600  		case fetchCh <- task:
   601  		}
   602  	}
   603  }
   604  
   605  func (w *partialIndexWorker) extractTaskHandles(ctx context.Context, chk *chunk.Chunk, idxResult allegrosql.SelectResult, handleDefCauss causetembedded.HandleDefCauss) (
   606  	handles []ekv.Handle, retChk *chunk.Chunk, err error) {
   607  	handles = make([]ekv.Handle, 0, w.batchSize)
   608  	for len(handles) < w.batchSize {
   609  		chk.SetRequiredEvents(w.batchSize-len(handles), w.maxChunkSize)
   610  		err = errors.Trace(idxResult.Next(ctx, chk))
   611  		if err != nil {
   612  			return handles, nil, err
   613  		}
   614  		if chk.NumEvents() == 0 {
   615  			return handles, retChk, nil
   616  		}
   617  		for i := 0; i < chk.NumEvents(); i++ {
   618  			handle, err := handleDefCauss.BuildHandleFromIndexEvent(chk.GetEvent(i))
   619  			if err != nil {
   620  				return nil, nil, err
   621  			}
   622  			handles = append(handles, handle)
   623  		}
   624  	}
   625  	w.batchSize *= 2
   626  	if w.batchSize > w.maxBatchSize {
   627  		w.batchSize = w.maxBatchSize
   628  	}
   629  	return handles, retChk, nil
   630  }
   631  
   632  func (w *partialIndexWorker) buildBlockTask(handles []ekv.Handle, retChk *chunk.Chunk) *lookupBlockTask {
   633  	task := &lookupBlockTask{
   634  		handles:   handles,
   635  		idxEvents: retChk,
   636  	}
   637  
   638  	task.doneCh = make(chan error, 1)
   639  	return task
   640  }
   641  
   642  type indexMergeBlockScanWorker struct {
   643  	workCh         <-chan *lookupBlockTask
   644  	finished       <-chan struct{}
   645  	buildTblReader func(ctx context.Context, handles []ekv.Handle) (InterlockingDirectorate, error)
   646  	tblCausets     []causetembedded.PhysicalCauset
   647  
   648  	// memTracker is used to track the memory usage of this interlock.
   649  	memTracker *memory.Tracker
   650  }
   651  
   652  func (w *indexMergeBlockScanWorker) pickAndInterDircTask(ctx context.Context) (task *lookupBlockTask) {
   653  	var ok bool
   654  	for {
   655  		select {
   656  		case task, ok = <-w.workCh:
   657  			if !ok {
   658  				return
   659  			}
   660  		case <-w.finished:
   661  			return
   662  		}
   663  		err := w.executeTask(ctx, task)
   664  		task.doneCh <- err
   665  	}
   666  }
   667  
   668  func (w *indexMergeBlockScanWorker) handlePickAndInterDircTaskPanic(ctx context.Context, task *lookupBlockTask) func(r interface{}) {
   669  	return func(r interface{}) {
   670  		if r == nil {
   671  			return
   672  		}
   673  
   674  		err4Panic := errors.Errorf("panic in IndexMergeReaderInterlockingDirectorate indexMergeBlockWorker: %v", r)
   675  		logutil.Logger(ctx).Error(err4Panic.Error())
   676  		task.doneCh <- err4Panic
   677  	}
   678  }
   679  
   680  func (w *indexMergeBlockScanWorker) executeTask(ctx context.Context, task *lookupBlockTask) error {
   681  	blockReader, err := w.buildTblReader(ctx, task.handles)
   682  	if err != nil {
   683  		logutil.Logger(ctx).Error("build causet reader failed", zap.Error(err))
   684  		return err
   685  	}
   686  	defer terror.Call(blockReader.Close)
   687  	task.memTracker = w.memTracker
   688  	memUsage := int64(cap(task.handles) * 8)
   689  	task.memUsage = memUsage
   690  	task.memTracker.Consume(memUsage)
   691  	handleCnt := len(task.handles)
   692  	task.rows = make([]chunk.Event, 0, handleCnt)
   693  	for {
   694  		chk := newFirstChunk(blockReader)
   695  		err = Next(ctx, blockReader, chk)
   696  		if err != nil {
   697  			logutil.Logger(ctx).Error("causet reader fetch next chunk failed", zap.Error(err))
   698  			return err
   699  		}
   700  		if chk.NumEvents() == 0 {
   701  			break
   702  		}
   703  		memUsage = chk.MemoryUsage()
   704  		task.memUsage += memUsage
   705  		task.memTracker.Consume(memUsage)
   706  		iter := chunk.NewIterator4Chunk(chk)
   707  		for event := iter.Begin(); event != iter.End(); event = iter.Next() {
   708  			task.rows = append(task.rows, event)
   709  		}
   710  	}
   711  
   712  	memUsage = int64(cap(task.rows)) * int64(unsafe.Sizeof(chunk.Event{}))
   713  	task.memUsage += memUsage
   714  	task.memTracker.Consume(memUsage)
   715  	if handleCnt != len(task.rows) && len(w.tblCausets) == 1 {
   716  		return errors.Errorf("handle count %d isn't equal to value count %d", handleCnt, len(task.rows))
   717  	}
   718  	return nil
   719  }