github.com/whtcorpsinc/milevadb-prod@v0.0.0-20211104133533-f57f4be3b597/interlock/memtable_reader.go (about)

     1  // Copyright 2020 WHTCORPS INC, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package interlock
    15  
    16  import (
    17  	"container/heap"
    18  	"context"
    19  	"encoding/json"
    20  	"fmt"
    21  	"io"
    22  	"net/http"
    23  	"sort"
    24  	"strings"
    25  	"sync"
    26  	"time"
    27  
    28  	"github.com/whtcorpsinc/BerolinaSQL/perceptron"
    29  	"github.com/whtcorpsinc/BerolinaSQL/terror"
    30  	"github.com/whtcorpsinc/ekvproto/pkg/diagnosticspb"
    31  	"github.com/whtcorpsinc/errors"
    32  	"github.com/whtcorpsinc/failpoint"
    33  	"github.com/whtcorpsinc/log"
    34  	causetembedded "github.com/whtcorpsinc/milevadb/causet/embedded"
    35  	"github.com/whtcorpsinc/milevadb/config"
    36  	"github.com/whtcorpsinc/milevadb/schemareplicant"
    37  	"github.com/whtcorpsinc/milevadb/soliton"
    38  	"github.com/whtcorpsinc/milevadb/soliton/FIDelapi"
    39  	"github.com/whtcorpsinc/milevadb/soliton/chunk"
    40  	"github.com/whtcorpsinc/milevadb/soliton/set"
    41  	"github.com/whtcorpsinc/milevadb/stochastikctx"
    42  	"github.com/whtcorpsinc/milevadb/stochastikctx/variable"
    43  	"github.com/whtcorpsinc/milevadb/types"
    44  	"github.com/whtcorpsinc/sysutil"
    45  	"go.uber.org/zap"
    46  	"google.golang.org/grpc"
    47  	"google.golang.org/grpc/credentials"
    48  )
    49  
    50  const clusterLogBatchSize = 256
    51  
    52  type dummyCloser struct{}
    53  
    54  func (dummyCloser) close() error { return nil }
    55  
    56  type memBlockRetriever interface {
    57  	retrieve(ctx context.Context, sctx stochastikctx.Context) ([][]types.Causet, error)
    58  	close() error
    59  }
    60  
    61  // MemBlockReaderInterDirc executes memBlock information retrieving from the MemBlock components
    62  type MemBlockReaderInterDirc struct {
    63  	baseInterlockingDirectorate
    64  	causet    *perceptron.BlockInfo
    65  	retriever memBlockRetriever
    66  	// cacheRetrieved is used to indicate whether has the parent interlock retrieved
    67  	// from inspection cache in inspection mode.
    68  	cacheRetrieved bool
    69  }
    70  
    71  func (e *MemBlockReaderInterDirc) isInspectionCacheableBlock(tblName string) bool {
    72  	switch tblName {
    73  	case strings.ToLower(schemareplicant.BlockClusterConfig),
    74  		strings.ToLower(schemareplicant.BlockClusterInfo),
    75  		strings.ToLower(schemareplicant.BlockClusterSystemInfo),
    76  		strings.ToLower(schemareplicant.BlockClusterLoad),
    77  		strings.ToLower(schemareplicant.BlockClusterHardware):
    78  		return true
    79  	default:
    80  		return false
    81  	}
    82  }
    83  
    84  // Next implements the InterlockingDirectorate Next interface.
    85  func (e *MemBlockReaderInterDirc) Next(ctx context.Context, req *chunk.Chunk) error {
    86  	var (
    87  		rows [][]types.Causet
    88  		err  error
    89  	)
    90  
    91  	// The `InspectionBlockCache` will be assigned in the begin of retrieving` and be
    92  	// cleaned at the end of retrieving, so nil represents currently in non-inspection mode.
    93  	if cache, tbl := e.ctx.GetStochastikVars().InspectionBlockCache, e.causet.Name.L; cache != nil &&
    94  		e.isInspectionCacheableBlock(tbl) {
    95  		// TODO: cached rows will be returned fully, we should refactor this part.
    96  		if !e.cacheRetrieved {
    97  			// Obtain data from cache first.
    98  			cached, found := cache[tbl]
    99  			if !found {
   100  				rows, err := e.retriever.retrieve(ctx, e.ctx)
   101  				cached = variable.BlockSnapshot{Events: rows, Err: err}
   102  				cache[tbl] = cached
   103  			}
   104  			e.cacheRetrieved = true
   105  			rows, err = cached.Events, cached.Err
   106  		}
   107  	} else {
   108  		rows, err = e.retriever.retrieve(ctx, e.ctx)
   109  	}
   110  	if err != nil {
   111  		return err
   112  	}
   113  
   114  	if len(rows) == 0 {
   115  		req.Reset()
   116  		return nil
   117  	}
   118  
   119  	req.GrowAndReset(len(rows))
   120  	mublockEvent := chunk.MutEventFromTypes(retTypes(e))
   121  	for _, event := range rows {
   122  		mublockEvent.SetCausets(event...)
   123  		req.AppendEvent(mublockEvent.ToEvent())
   124  	}
   125  	return nil
   126  }
   127  
   128  // Close implements the InterlockingDirectorate Close interface.
   129  func (e *MemBlockReaderInterDirc) Close() error {
   130  	return e.retriever.close()
   131  }
   132  
   133  type clusterConfigRetriever struct {
   134  	dummyCloser
   135  	retrieved bool
   136  	extractor *causetembedded.ClusterBlockExtractor
   137  }
   138  
   139  // retrieve implements the memBlockRetriever interface
   140  func (e *clusterConfigRetriever) retrieve(_ context.Context, sctx stochastikctx.Context) ([][]types.Causet, error) {
   141  	if e.extractor.SkipRequest || e.retrieved {
   142  		return nil, nil
   143  	}
   144  	e.retrieved = true
   145  	return fetchClusterConfig(sctx, e.extractor.NodeTypes, e.extractor.Instances)
   146  }
   147  
   148  func fetchClusterConfig(sctx stochastikctx.Context, nodeTypes, nodeAddrs set.StringSet) ([][]types.Causet, error) {
   149  	type result struct {
   150  		idx  int
   151  		rows [][]types.Causet
   152  		err  error
   153  	}
   154  	serversInfo, err := schemareplicant.GetClusterServerInfo(sctx)
   155  	failpoint.Inject("mockClusterConfigServerInfo", func(val failpoint.Value) {
   156  		if s := val.(string); len(s) > 0 {
   157  			// erase the error
   158  			serversInfo, err = parseFailpointServerInfo(s), nil
   159  		}
   160  	})
   161  	if err != nil {
   162  		return nil, err
   163  	}
   164  	serversInfo = filterClusterServerInfo(serversInfo, nodeTypes, nodeAddrs)
   165  
   166  	var finalEvents [][]types.Causet
   167  	wg := sync.WaitGroup{}
   168  	ch := make(chan result, len(serversInfo))
   169  	for i, srv := range serversInfo {
   170  		typ := srv.ServerType
   171  		address := srv.Address
   172  		statusAddr := srv.StatusAddr
   173  		if len(statusAddr) == 0 {
   174  			sctx.GetStochastikVars().StmtCtx.AppendWarning(errors.Errorf("%s node %s does not contain status address", typ, address))
   175  			continue
   176  		}
   177  		wg.Add(1)
   178  		go func(index int) {
   179  			soliton.WithRecovery(func() {
   180  				defer wg.Done()
   181  				var url string
   182  				switch typ {
   183  				case "fidel":
   184  					url = fmt.Sprintf("%s://%s%s", soliton.InternalHTTPSchema(), statusAddr, FIDelapi.Config)
   185  				case "einsteindb", "milevadb":
   186  					url = fmt.Sprintf("%s://%s/config", soliton.InternalHTTPSchema(), statusAddr)
   187  				default:
   188  					ch <- result{err: errors.Errorf("unknown node type: %s(%s)", typ, address)}
   189  					return
   190  				}
   191  
   192  				req, err := http.NewRequest(http.MethodGet, url, nil)
   193  				if err != nil {
   194  					ch <- result{err: errors.Trace(err)}
   195  					return
   196  				}
   197  				req.Header.Add("FIDel-Allow-follower-handle", "true")
   198  				resp, err := soliton.InternalHTTPClient().Do(req)
   199  				if err != nil {
   200  					ch <- result{err: errors.Trace(err)}
   201  					return
   202  				}
   203  				defer func() {
   204  					terror.Log(resp.Body.Close())
   205  				}()
   206  				if resp.StatusCode != http.StatusOK {
   207  					ch <- result{err: errors.Errorf("request %s failed: %s", url, resp.Status)}
   208  					return
   209  				}
   210  				var nested map[string]interface{}
   211  				if err = json.NewCausetDecoder(resp.Body).Decode(&nested); err != nil {
   212  					ch <- result{err: errors.Trace(err)}
   213  					return
   214  				}
   215  				data := config.FlattenConfigItems(nested)
   216  				type item struct {
   217  					key string
   218  					val string
   219  				}
   220  				var items []item
   221  				for key, val := range data {
   222  					var str string
   223  					switch val.(type) {
   224  					case string: // remove quotes
   225  						str = val.(string)
   226  					default:
   227  						tmp, err := json.Marshal(val)
   228  						if err != nil {
   229  							ch <- result{err: errors.Trace(err)}
   230  							return
   231  						}
   232  						str = string(tmp)
   233  					}
   234  					items = append(items, item{key: key, val: str})
   235  				}
   236  				sort.Slice(items, func(i, j int) bool { return items[i].key < items[j].key })
   237  				var rows [][]types.Causet
   238  				for _, item := range items {
   239  					rows = append(rows, types.MakeCausets(
   240  						typ,
   241  						address,
   242  						item.key,
   243  						item.val,
   244  					))
   245  				}
   246  				ch <- result{idx: index, rows: rows}
   247  			}, nil)
   248  		}(i)
   249  	}
   250  
   251  	wg.Wait()
   252  	close(ch)
   253  
   254  	// Keep the original order to make the result more sblock
   255  	var results []result
   256  	for result := range ch {
   257  		if result.err != nil {
   258  			sctx.GetStochastikVars().StmtCtx.AppendWarning(result.err)
   259  			continue
   260  		}
   261  		results = append(results, result)
   262  	}
   263  	sort.Slice(results, func(i, j int) bool { return results[i].idx < results[j].idx })
   264  	for _, result := range results {
   265  		finalEvents = append(finalEvents, result.rows...)
   266  	}
   267  	return finalEvents, nil
   268  }
   269  
   270  type clusterServerInfoRetriever struct {
   271  	dummyCloser
   272  	extractor      *causetembedded.ClusterBlockExtractor
   273  	serverInfoType diagnosticspb.ServerInfoType
   274  	retrieved      bool
   275  }
   276  
   277  // retrieve implements the memBlockRetriever interface
   278  func (e *clusterServerInfoRetriever) retrieve(ctx context.Context, sctx stochastikctx.Context) ([][]types.Causet, error) {
   279  	if e.extractor.SkipRequest || e.retrieved {
   280  		return nil, nil
   281  	}
   282  	e.retrieved = true
   283  
   284  	serversInfo, err := schemareplicant.GetClusterServerInfo(sctx)
   285  	if err != nil {
   286  		return nil, err
   287  	}
   288  	serversInfo = filterClusterServerInfo(serversInfo, e.extractor.NodeTypes, e.extractor.Instances)
   289  
   290  	type result struct {
   291  		idx  int
   292  		rows [][]types.Causet
   293  		err  error
   294  	}
   295  	wg := sync.WaitGroup{}
   296  	ch := make(chan result, len(serversInfo))
   297  	infoTp := e.serverInfoType
   298  	finalEvents := make([][]types.Causet, 0, len(serversInfo)*10)
   299  	for i, srv := range serversInfo {
   300  		address := srv.Address
   301  		remote := address
   302  		if srv.ServerType == "milevadb" {
   303  			remote = srv.StatusAddr
   304  		}
   305  		wg.Add(1)
   306  		go func(index int, remote, address, serverTP string) {
   307  			soliton.WithRecovery(func() {
   308  				defer wg.Done()
   309  				items, err := getServerInfoByGRPC(ctx, remote, infoTp)
   310  				if err != nil {
   311  					ch <- result{idx: index, err: err}
   312  					return
   313  				}
   314  				partEvents := serverInfoItemToEvents(items, serverTP, address)
   315  				ch <- result{idx: index, rows: partEvents}
   316  			}, nil)
   317  		}(i, remote, address, srv.ServerType)
   318  	}
   319  	wg.Wait()
   320  	close(ch)
   321  	// Keep the original order to make the result more sblock
   322  	var results []result
   323  	for result := range ch {
   324  		if result.err != nil {
   325  			sctx.GetStochastikVars().StmtCtx.AppendWarning(result.err)
   326  			continue
   327  		}
   328  		results = append(results, result)
   329  	}
   330  	sort.Slice(results, func(i, j int) bool { return results[i].idx < results[j].idx })
   331  	for _, result := range results {
   332  		finalEvents = append(finalEvents, result.rows...)
   333  	}
   334  	return finalEvents, nil
   335  }
   336  
   337  func serverInfoItemToEvents(items []*diagnosticspb.ServerInfoItem, tp, addr string) [][]types.Causet {
   338  	rows := make([][]types.Causet, 0, len(items))
   339  	for _, v := range items {
   340  		for _, item := range v.Pairs {
   341  			event := types.MakeCausets(
   342  				tp,
   343  				addr,
   344  				v.Tp,
   345  				v.Name,
   346  				item.Key,
   347  				item.Value,
   348  			)
   349  			rows = append(rows, event)
   350  		}
   351  	}
   352  	return rows
   353  }
   354  
   355  func getServerInfoByGRPC(ctx context.Context, address string, tp diagnosticspb.ServerInfoType) ([]*diagnosticspb.ServerInfoItem, error) {
   356  	opt := grpc.WithInsecure()
   357  	security := config.GetGlobalConfig().Security
   358  	if len(security.ClusterSSLCA) != 0 {
   359  		tlsConfig, err := security.ToTLSConfig()
   360  		if err != nil {
   361  			return nil, errors.Trace(err)
   362  		}
   363  		opt = grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))
   364  	}
   365  	conn, err := grpc.Dial(address, opt)
   366  	if err != nil {
   367  		return nil, err
   368  	}
   369  	defer func() {
   370  		err := conn.Close()
   371  		if err != nil {
   372  			log.Error("close grpc connection error", zap.Error(err))
   373  		}
   374  	}()
   375  
   376  	cli := diagnosticspb.NewDiagnosticsClient(conn)
   377  	ctx, cancel := context.WithTimeout(ctx, time.Second*10)
   378  	defer cancel()
   379  	r, err := cli.ServerInfo(ctx, &diagnosticspb.ServerInfoRequest{Tp: tp})
   380  	if err != nil {
   381  		return nil, err
   382  	}
   383  	return r.Items, nil
   384  }
   385  
   386  func parseFailpointServerInfo(s string) []schemareplicant.ServerInfo {
   387  	var serversInfo []schemareplicant.ServerInfo
   388  	servers := strings.Split(s, ";")
   389  	for _, server := range servers {
   390  		parts := strings.Split(server, ",")
   391  		serversInfo = append(serversInfo, schemareplicant.ServerInfo{
   392  			ServerType: parts[0],
   393  			Address:    parts[1],
   394  			StatusAddr: parts[2],
   395  		})
   396  	}
   397  	return serversInfo
   398  }
   399  
   400  func filterClusterServerInfo(serversInfo []schemareplicant.ServerInfo, nodeTypes, addresses set.StringSet) []schemareplicant.ServerInfo {
   401  	if len(nodeTypes) == 0 && len(addresses) == 0 {
   402  		return serversInfo
   403  	}
   404  
   405  	filterServers := make([]schemareplicant.ServerInfo, 0, len(serversInfo))
   406  	for _, srv := range serversInfo {
   407  		// Skip some node type which has been filtered in WHERE clause
   408  		// e.g: SELECT * FROM cluster_config WHERE type='einsteindb'
   409  		if len(nodeTypes) > 0 && !nodeTypes.Exist(srv.ServerType) {
   410  			continue
   411  		}
   412  		// Skip some node address which has been filtered in WHERE clause
   413  		// e.g: SELECT * FROM cluster_config WHERE address='192.16.8.12:2379'
   414  		if len(addresses) > 0 && !addresses.Exist(srv.Address) {
   415  			continue
   416  		}
   417  		filterServers = append(filterServers, srv)
   418  	}
   419  	return filterServers
   420  }
   421  
   422  type clusterLogRetriever struct {
   423  	isDrained  bool
   424  	retrieving bool
   425  	heap       *logResponseHeap
   426  	extractor  *causetembedded.ClusterLogBlockExtractor
   427  	cancel     context.CancelFunc
   428  }
   429  
   430  type logStreamResult struct {
   431  	// Read the next stream result while current messages is drained
   432  	next chan logStreamResult
   433  
   434  	addr     string
   435  	typ      string
   436  	messages []*diagnosticspb.LogMessage
   437  	err      error
   438  }
   439  
   440  type logResponseHeap []logStreamResult
   441  
   442  func (h logResponseHeap) Len() int {
   443  	return len(h)
   444  }
   445  
   446  func (h logResponseHeap) Less(i, j int) bool {
   447  	if lhs, rhs := h[i].messages[0].Time, h[j].messages[0].Time; lhs != rhs {
   448  		return lhs < rhs
   449  	}
   450  	return h[i].typ < h[j].typ
   451  }
   452  
   453  func (h logResponseHeap) Swap(i, j int) {
   454  	h[i], h[j] = h[j], h[i]
   455  }
   456  
   457  func (h *logResponseHeap) Push(x interface{}) {
   458  	*h = append(*h, x.(logStreamResult))
   459  }
   460  
   461  func (h *logResponseHeap) Pop() interface{} {
   462  	old := *h
   463  	n := len(old)
   464  	x := old[n-1]
   465  	*h = old[0 : n-1]
   466  	return x
   467  }
   468  
   469  func (e *clusterLogRetriever) initialize(ctx context.Context, sctx stochastikctx.Context) ([]chan logStreamResult, error) {
   470  	serversInfo, err := schemareplicant.GetClusterServerInfo(sctx)
   471  	failpoint.Inject("mockClusterLogServerInfo", func(val failpoint.Value) {
   472  		// erase the error
   473  		err = nil
   474  		if s := val.(string); len(s) > 0 {
   475  			serversInfo = parseFailpointServerInfo(s)
   476  		}
   477  	})
   478  	if err != nil {
   479  		return nil, err
   480  	}
   481  
   482  	instances := e.extractor.Instances
   483  	nodeTypes := e.extractor.NodeTypes
   484  	serversInfo = filterClusterServerInfo(serversInfo, nodeTypes, instances)
   485  
   486  	var levels []diagnosticspb.LogLevel
   487  	for l := range e.extractor.LogLevels {
   488  		levels = append(levels, sysutil.ParseLogLevel(l))
   489  	}
   490  
   491  	// To avoid search log interface overload, the user should specify the time range, and at least one pattern
   492  	// in normally ALLEGROALLEGROSQL.
   493  	if e.extractor.StartTime == 0 {
   494  		return nil, errors.New("denied to scan logs, please specified the start time, such as `time > '2020-01-01 00:00:00'`")
   495  	}
   496  	if e.extractor.EndTime == 0 {
   497  		return nil, errors.New("denied to scan logs, please specified the end time, such as `time < '2020-01-01 00:00:00'`")
   498  	}
   499  	patterns := e.extractor.Patterns
   500  	if len(patterns) == 0 && len(levels) == 0 && len(instances) == 0 && len(nodeTypes) == 0 {
   501  		return nil, errors.New("denied to scan full logs (use `SELECT * FROM cluster_log WHERE message LIKE '%'` explicitly if intentionally)")
   502  	}
   503  
   504  	req := &diagnosticspb.SearchLogRequest{
   505  		StartTime: e.extractor.StartTime,
   506  		EndTime:   e.extractor.EndTime,
   507  		Levels:    levels,
   508  		Patterns:  patterns,
   509  	}
   510  
   511  	return e.startRetrieving(ctx, sctx, serversInfo, req)
   512  }
   513  
   514  func (e *clusterLogRetriever) startRetrieving(
   515  	ctx context.Context,
   516  	sctx stochastikctx.Context,
   517  	serversInfo []schemareplicant.ServerInfo,
   518  	req *diagnosticspb.SearchLogRequest) ([]chan logStreamResult, error) {
   519  	// gRPC options
   520  	opt := grpc.WithInsecure()
   521  	security := config.GetGlobalConfig().Security
   522  	if len(security.ClusterSSLCA) != 0 {
   523  		tlsConfig, err := security.ToTLSConfig()
   524  		if err != nil {
   525  			return nil, errors.Trace(err)
   526  		}
   527  		opt = grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))
   528  	}
   529  
   530  	// The retrieve progress may be abort
   531  	ctx, e.cancel = context.WithCancel(ctx)
   532  
   533  	var results []chan logStreamResult
   534  	for _, srv := range serversInfo {
   535  		typ := srv.ServerType
   536  		address := srv.Address
   537  		statusAddr := srv.StatusAddr
   538  		if len(statusAddr) == 0 {
   539  			sctx.GetStochastikVars().StmtCtx.AppendWarning(errors.Errorf("%s node %s does not contain status address", typ, address))
   540  			continue
   541  		}
   542  		ch := make(chan logStreamResult)
   543  		results = append(results, ch)
   544  
   545  		go func(ch chan logStreamResult, serverType, address, statusAddr string) {
   546  			soliton.WithRecovery(func() {
   547  				defer close(ch)
   548  
   549  				// The MilevaDB provides diagnostics service via status address
   550  				remote := address
   551  				if serverType == "milevadb" {
   552  					remote = statusAddr
   553  				}
   554  				conn, err := grpc.Dial(remote, opt)
   555  				if err != nil {
   556  					ch <- logStreamResult{addr: address, typ: serverType, err: err}
   557  					return
   558  				}
   559  				defer terror.Call(conn.Close)
   560  
   561  				cli := diagnosticspb.NewDiagnosticsClient(conn)
   562  				stream, err := cli.SearchLog(ctx, req)
   563  				if err != nil {
   564  					ch <- logStreamResult{addr: address, typ: serverType, err: err}
   565  					return
   566  				}
   567  
   568  				for {
   569  					res, err := stream.Recv()
   570  					if err != nil && err == io.EOF {
   571  						return
   572  					}
   573  					if err != nil {
   574  						select {
   575  						case ch <- logStreamResult{addr: address, typ: serverType, err: err}:
   576  						case <-ctx.Done():
   577  						}
   578  						return
   579  					}
   580  
   581  					result := logStreamResult{next: ch, addr: address, typ: serverType, messages: res.Messages}
   582  					select {
   583  					case ch <- result:
   584  					case <-ctx.Done():
   585  						return
   586  					}
   587  				}
   588  			}, nil)
   589  		}(ch, typ, address, statusAddr)
   590  	}
   591  
   592  	return results, nil
   593  }
   594  
   595  func (e *clusterLogRetriever) retrieve(ctx context.Context, sctx stochastikctx.Context) ([][]types.Causet, error) {
   596  	if e.extractor.SkipRequest || e.isDrained {
   597  		return nil, nil
   598  	}
   599  
   600  	if !e.retrieving {
   601  		e.retrieving = true
   602  		results, err := e.initialize(ctx, sctx)
   603  		if err != nil {
   604  			e.isDrained = true
   605  			return nil, err
   606  		}
   607  
   608  		// initialize the heap
   609  		e.heap = &logResponseHeap{}
   610  		for _, ch := range results {
   611  			result := <-ch
   612  			if result.err != nil || len(result.messages) == 0 {
   613  				if result.err != nil {
   614  					sctx.GetStochastikVars().StmtCtx.AppendWarning(result.err)
   615  				}
   616  				continue
   617  			}
   618  			*e.heap = append(*e.heap, result)
   619  		}
   620  		heap.Init(e.heap)
   621  	}
   622  
   623  	// Merge the results
   624  	var finalEvents [][]types.Causet
   625  	for e.heap.Len() > 0 && len(finalEvents) < clusterLogBatchSize {
   626  		minTimeItem := heap.Pop(e.heap).(logStreamResult)
   627  		headMessage := minTimeItem.messages[0]
   628  		loggingTime := time.Unix(headMessage.Time/1000, (headMessage.Time%1000)*int64(time.Millisecond))
   629  		finalEvents = append(finalEvents, types.MakeCausets(
   630  			loggingTime.Format("2006/01/02 15:04:05.000"),
   631  			minTimeItem.typ,
   632  			minTimeItem.addr,
   633  			strings.ToUpper(headMessage.Level.String()),
   634  			headMessage.Message,
   635  		))
   636  		minTimeItem.messages = minTimeItem.messages[1:]
   637  		// Current streaming result is drained, read the next to supply.
   638  		if len(minTimeItem.messages) == 0 {
   639  			result := <-minTimeItem.next
   640  			if result.err != nil {
   641  				sctx.GetStochastikVars().StmtCtx.AppendWarning(result.err)
   642  				continue
   643  			}
   644  			if len(result.messages) > 0 {
   645  				heap.Push(e.heap, result)
   646  			}
   647  		} else {
   648  			heap.Push(e.heap, minTimeItem)
   649  		}
   650  	}
   651  
   652  	// All streams are drained
   653  	e.isDrained = e.heap.Len() == 0
   654  
   655  	return finalEvents, nil
   656  }
   657  
   658  func (e *clusterLogRetriever) close() error {
   659  	if e.cancel != nil {
   660  		e.cancel()
   661  	}
   662  	return nil
   663  }