github.com/matrixorigin/matrixone@v1.2.0/pkg/txn/trace/service.go (about)

     1  // Copyright 2024 Matrix Origin
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package trace
    16  
    17  import (
    18  	"context"
    19  	"encoding/csv"
    20  	"encoding/hex"
    21  	"fmt"
    22  	"os"
    23  	"strings"
    24  	"sync"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/fagongzi/goetty/v2/buf"
    29  	"github.com/matrixorigin/matrixone/pkg/common/log"
    30  	"github.com/matrixorigin/matrixone/pkg/common/moerr"
    31  	"github.com/matrixorigin/matrixone/pkg/common/reuse"
    32  	"github.com/matrixorigin/matrixone/pkg/common/runtime"
    33  	"github.com/matrixorigin/matrixone/pkg/common/stopper"
    34  	"github.com/matrixorigin/matrixone/pkg/common/util"
    35  	"github.com/matrixorigin/matrixone/pkg/container/batch"
    36  	"github.com/matrixorigin/matrixone/pkg/container/types"
    37  	"github.com/matrixorigin/matrixone/pkg/container/vector"
    38  	"github.com/matrixorigin/matrixone/pkg/pb/api"
    39  	"github.com/matrixorigin/matrixone/pkg/pb/timestamp"
    40  	"github.com/matrixorigin/matrixone/pkg/txn/client"
    41  	"github.com/matrixorigin/matrixone/pkg/txn/clock"
    42  	"github.com/matrixorigin/matrixone/pkg/util/executor"
    43  	"go.uber.org/zap"
    44  )
    45  
    46  func WithEnable(
    47  	value bool,
    48  	tables []uint64) Option {
    49  	return func(s *service) {
    50  		s.atomic.dataEventEnabled.Store(value)
    51  		s.atomic.txnEventEnabled.Store(value)
    52  		s.atomic.txnWorkspaceEnabled.Store(value)
    53  		s.atomic.txnActionEventEnabled.Store(value)
    54  
    55  		if value {
    56  			s.atomic.txnFilters.Store(&txnFilters{
    57  				filters: []TxnFilter{&allTxnFilter{}},
    58  			})
    59  			f := &tableFilters{}
    60  			for _, id := range tables {
    61  				f.filters = append(f.filters,
    62  					NewKeepTableFilter(id, nil))
    63  			}
    64  			s.atomic.tableFilters.Store(f)
    65  		}
    66  	}
    67  }
    68  
    69  func WithBufferSize(value int) Option {
    70  	return func(s *service) {
    71  		s.options.bufferSize = value
    72  	}
    73  }
    74  
    75  func WithFlushBytes(value int) Option {
    76  	return func(s *service) {
    77  		s.options.flushBytes = value
    78  	}
    79  }
    80  
    81  func WithFlushDuration(value time.Duration) Option {
    82  	return func(s *service) {
    83  		s.options.flushDuration = value
    84  	}
    85  }
    86  
    87  type service struct {
    88  	cn         string
    89  	client     client.TxnClient
    90  	clock      clock.Clock
    91  	executor   executor.SQLExecutor
    92  	stopper    *stopper.Stopper
    93  	txnC       chan event
    94  	entryC     chan event
    95  	txnActionC chan event
    96  	statementC chan event
    97  
    98  	loadC  chan loadAction
    99  	seq    atomic.Uint64
   100  	dir    string
   101  	logger *log.MOLogger
   102  
   103  	atomic struct {
   104  		flushEnabled          atomic.Bool
   105  		txnEventEnabled       atomic.Bool
   106  		txnWorkspaceEnabled   atomic.Bool
   107  		txnActionEventEnabled atomic.Bool
   108  		dataEventEnabled      atomic.Bool
   109  		statementEnabled      atomic.Bool
   110  		tableFilters          atomic.Pointer[tableFilters]
   111  		txnFilters            atomic.Pointer[txnFilters]
   112  		statementFilters      atomic.Pointer[statementFilters]
   113  		closed                atomic.Bool
   114  		complexPKTables       sync.Map // uint64 -> bool
   115  	}
   116  
   117  	options struct {
   118  		flushDuration time.Duration
   119  		flushBytes    int
   120  		bufferSize    int
   121  	}
   122  }
   123  
   124  func NewService(
   125  	dataDir string,
   126  	cn string,
   127  	client client.TxnClient,
   128  	clock clock.Clock,
   129  	executor executor.SQLExecutor,
   130  	opts ...Option) (Service, error) {
   131  	if err := os.RemoveAll(dataDir); err != nil {
   132  		return nil, err
   133  	}
   134  	if err := os.MkdirAll(dataDir, 0755); err != nil {
   135  		return nil, err
   136  	}
   137  
   138  	s := &service{
   139  		stopper:  stopper.NewStopper("txn-trace"),
   140  		cn:       cn,
   141  		client:   client,
   142  		clock:    clock,
   143  		executor: executor,
   144  		dir:      dataDir,
   145  		logger:   runtime.ProcessLevelRuntime().Logger().Named("txn-trace"),
   146  		loadC:    make(chan loadAction, 100000),
   147  	}
   148  	for _, opt := range opts {
   149  		opt(s)
   150  	}
   151  
   152  	if s.options.flushDuration == 0 {
   153  		s.options.flushDuration = 30 * time.Second
   154  	}
   155  	if s.options.flushBytes == 0 {
   156  		s.options.flushBytes = 16 * 1024 * 1024
   157  	}
   158  	if s.options.bufferSize == 0 {
   159  		s.options.bufferSize = 1000000
   160  	}
   161  
   162  	s.entryC = make(chan event, s.options.bufferSize)
   163  	s.txnC = make(chan event, s.options.bufferSize)
   164  	s.txnActionC = make(chan event, s.options.bufferSize)
   165  	s.statementC = make(chan event, s.options.bufferSize)
   166  
   167  	if err := s.stopper.RunTask(s.handleTxnEvents); err != nil {
   168  		panic(err)
   169  	}
   170  	if err := s.stopper.RunTask(s.handleTxnActionEvents); err != nil {
   171  		panic(err)
   172  	}
   173  	if err := s.stopper.RunTask(s.handleDataEvents); err != nil {
   174  		panic(err)
   175  	}
   176  	if err := s.stopper.RunTask(s.handleLoad); err != nil {
   177  		panic(err)
   178  	}
   179  	if err := s.stopper.RunTask(s.handleStatements); err != nil {
   180  		panic(err)
   181  	}
   182  	if err := s.stopper.RunTask(s.watch); err != nil {
   183  		panic(err)
   184  	}
   185  	return s, nil
   186  }
   187  
   188  func (s *service) EnableFlush() {
   189  	s.atomic.flushEnabled.Store(true)
   190  }
   191  
   192  func (s *service) Enable(feature string) error {
   193  	return s.updateState(feature, StateEnable)
   194  }
   195  
   196  func (s *service) Disable(feature string) error {
   197  	return s.updateState(feature, StateDisable)
   198  }
   199  
   200  func (s *service) Enabled(feature string) bool {
   201  	switch feature {
   202  	case FeatureTraceData:
   203  		return s.atomic.dataEventEnabled.Load()
   204  	case FeatureTraceTxn:
   205  		return s.atomic.txnEventEnabled.Load()
   206  	case FeatureTraceTxnAction:
   207  		return s.atomic.txnActionEventEnabled.Load()
   208  	case FeatureTraceStatement:
   209  		return s.atomic.statementEnabled.Load()
   210  	case FeatureTraceTxnWorkspace:
   211  		return s.atomic.txnWorkspaceEnabled.Load()
   212  	}
   213  	return false
   214  }
   215  
   216  func (s *service) Sync() {
   217  	if !s.Enabled(FeatureTraceTxn) &&
   218  		!s.Enabled(FeatureTraceData) &&
   219  		!s.Enabled(FeatureTraceTxnAction) &&
   220  		!s.Enabled(FeatureTraceStatement) &&
   221  		!s.Enabled(FeatureTraceTxnWorkspace) {
   222  		return
   223  	}
   224  
   225  	if s.atomic.closed.Load() {
   226  		return
   227  	}
   228  
   229  	time.Sleep(s.options.flushDuration * 2)
   230  }
   231  
   232  func (s *service) DecodeHexComplexPK(hexPK string) (string, error) {
   233  	v, err := hex.DecodeString(hexPK)
   234  	if err != nil {
   235  		return "", err
   236  	}
   237  
   238  	buf := reuse.Alloc[buffer](nil)
   239  	defer buf.close()
   240  
   241  	dst := buf.alloc(100)
   242  	idx := buf.buf.GetWriteIndex()
   243  	writeCompletedValue(v, buf, dst)
   244  	return string(buf.buf.RawSlice(idx, buf.buf.GetWriteIndex())), nil
   245  }
   246  
   247  func (s *service) Close() {
   248  	s.stopper.Stop()
   249  	s.atomic.closed.Store(true)
   250  	close(s.entryC)
   251  	close(s.txnC)
   252  	close(s.loadC)
   253  }
   254  
   255  func (s *service) handleEvent(
   256  	ctx context.Context,
   257  	fileCreator func() string,
   258  	columns int,
   259  	tableName string,
   260  	eventC chan event) {
   261  	ticker := time.NewTicker(s.options.flushDuration)
   262  	defer ticker.Stop()
   263  
   264  	var w *csv.Writer
   265  	var f *os.File
   266  	records := make([]string, columns)
   267  	current := ""
   268  	sum := 0
   269  
   270  	buf := reuse.Alloc[buffer](nil)
   271  	defer buf.close()
   272  
   273  	open := func() {
   274  		current = fileCreator()
   275  
   276  		v, err := os.OpenFile(current, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0666)
   277  		if err != nil {
   278  			s.logger.Fatal("failed to open csv file",
   279  				zap.String("file", current),
   280  				zap.Error(err))
   281  		}
   282  		f = v
   283  		w = csv.NewWriter(f)
   284  	}
   285  
   286  	bytes := func() int {
   287  		n := 0
   288  		for _, s := range records {
   289  			n += len(s)
   290  		}
   291  		return n
   292  	}
   293  
   294  	flush := func() {
   295  		defer buf.reset()
   296  
   297  		if sum == 0 {
   298  			return
   299  		}
   300  
   301  		w.Flush()
   302  		if err := w.Error(); err != nil {
   303  			s.logger.Fatal("failed to flush csv file",
   304  				zap.String("table", tableName),
   305  				zap.Error(err))
   306  		}
   307  
   308  		if err := f.Close(); err != nil {
   309  			s.logger.Fatal("failed to close csv file",
   310  				zap.String("table", tableName),
   311  				zap.Error(err))
   312  		}
   313  
   314  		s.loadC <- loadAction{
   315  			sql: fmt.Sprintf("load data infile '%s' into table %s fields terminated by ','",
   316  				current,
   317  				tableName),
   318  			file: current,
   319  		}
   320  		sum = 0
   321  		open()
   322  	}
   323  
   324  	open()
   325  	for {
   326  		select {
   327  		case <-ctx.Done():
   328  			return
   329  		case <-ticker.C:
   330  			if s.atomic.flushEnabled.Load() {
   331  				flush()
   332  			}
   333  		case e := <-eventC:
   334  			if e.buffer != nil {
   335  				e.buffer.close()
   336  			} else {
   337  				e.csv.toCSVRecord(s.cn, buf, records)
   338  				if err := w.Write(records); err != nil {
   339  					s.logger.Fatal("failed to write csv record",
   340  						zap.Error(err))
   341  				}
   342  
   343  				sum += bytes()
   344  				if sum > s.options.flushBytes &&
   345  					s.atomic.flushEnabled.Load() {
   346  					flush()
   347  				}
   348  			}
   349  		}
   350  	}
   351  }
   352  
   353  func (s *service) handleLoad(ctx context.Context) {
   354  	load := func(sql string) error {
   355  		ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
   356  		defer cancel()
   357  		return s.executor.ExecTxn(
   358  			ctx,
   359  			func(txn executor.TxnExecutor) error {
   360  				res, err := txn.Exec(sql, executor.StatementOption{})
   361  				if err != nil {
   362  					return err
   363  				}
   364  				res.Close()
   365  				return nil
   366  			},
   367  			executor.Options{}.
   368  				WithDatabase(DebugDB).
   369  				WithDisableTrace())
   370  	}
   371  
   372  	for {
   373  		select {
   374  		case <-ctx.Done():
   375  			return
   376  		case e := <-s.loadC:
   377  			for {
   378  				select {
   379  				case <-ctx.Done():
   380  					return
   381  				default:
   382  				}
   383  
   384  				if err := load(e.sql); err != nil {
   385  					s.logger.Error("load trace data to table failed, retry later",
   386  						zap.String("sql", e.sql),
   387  						zap.Error(err))
   388  					time.Sleep(time.Second * 5)
   389  					continue
   390  				}
   391  
   392  				if err := os.Remove(e.file); err != nil {
   393  					s.logger.Fatal("failed to remove csv file",
   394  						zap.String("file", e.file),
   395  						zap.Error(err))
   396  				}
   397  				break
   398  			}
   399  		}
   400  	}
   401  }
   402  
   403  func (s *service) watch(ctx context.Context) {
   404  	ticker := time.NewTicker(time.Second * 10)
   405  	defer ticker.Stop()
   406  
   407  	fetch := func() ([]string, []string, error) {
   408  		ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
   409  		defer cancel()
   410  		var features []string
   411  		var states []string
   412  		err := s.executor.ExecTxn(
   413  			ctx,
   414  			func(txn executor.TxnExecutor) error {
   415  				sql := fmt.Sprintf("select name, state from %s", FeaturesTables)
   416  				res, err := txn.Exec(sql, executor.StatementOption{})
   417  				if err != nil {
   418  					return err
   419  				}
   420  				defer res.Close()
   421  				res.ReadRows(func(rows int, cols []*vector.Vector) bool {
   422  					for i := 0; i < rows; i++ {
   423  						features = append(features, cols[0].GetStringAt(i))
   424  						states = append(states, cols[1].GetStringAt(i))
   425  					}
   426  					return true
   427  				})
   428  				return nil
   429  			},
   430  			executor.Options{}.
   431  				WithDatabase(DebugDB).
   432  				WithDisableTrace())
   433  		return features, states, err
   434  	}
   435  
   436  	for {
   437  		select {
   438  		case <-ctx.Done():
   439  			return
   440  		case <-ticker.C:
   441  			if !s.atomic.flushEnabled.Load() {
   442  				continue
   443  			}
   444  
   445  			features, states, err := fetch()
   446  			if err != nil {
   447  				s.logger.Error("failed to fetch trace state",
   448  					zap.Error(err))
   449  				continue
   450  			}
   451  
   452  			needRefresh := false
   453  			for i, feature := range features {
   454  				enable := states[i] == StateEnable
   455  				if enable {
   456  					needRefresh = true
   457  				}
   458  				switch feature {
   459  				case FeatureTraceData:
   460  					s.atomic.dataEventEnabled.Store(enable)
   461  				case FeatureTraceTxnWorkspace:
   462  					s.atomic.txnWorkspaceEnabled.Store(enable)
   463  				case FeatureTraceTxn:
   464  					s.atomic.txnEventEnabled.Store(enable)
   465  				case FeatureTraceTxnAction:
   466  					s.atomic.txnActionEventEnabled.Store(enable)
   467  				case FeatureTraceStatement:
   468  					s.atomic.statementEnabled.Store(enable)
   469  				}
   470  			}
   471  
   472  			if needRefresh {
   473  				if err := s.RefreshTableFilters(); err != nil {
   474  					s.logger.Error("failed to refresh table filters",
   475  						zap.Error(err))
   476  				}
   477  
   478  				if err := s.RefreshTxnFilters(); err != nil {
   479  					s.logger.Error("failed to refresh txn filters",
   480  						zap.Error(err))
   481  				}
   482  
   483  				if err := s.RefreshStatementFilters(); err != nil {
   484  					s.logger.Error("failed to refresh statement filters",
   485  						zap.Error(err))
   486  				}
   487  			}
   488  		}
   489  	}
   490  }
   491  
   492  func (s *service) updateState(feature, state string) error {
   493  	switch feature {
   494  	case FeatureTraceData, FeatureTraceTxnAction, FeatureTraceTxn, FeatureTraceStatement, FeatureTraceTxnWorkspace:
   495  	default:
   496  		return moerr.NewNotSupportedNoCtx("feature %s", feature)
   497  	}
   498  
   499  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   500  	defer cancel()
   501  
   502  	now, _ := s.clock.Now()
   503  	return s.executor.ExecTxn(
   504  		ctx,
   505  		func(txn executor.TxnExecutor) error {
   506  			res, err := txn.Exec(
   507  				fmt.Sprintf("update %s set state = '%s' where name = '%s'",
   508  					FeaturesTables,
   509  					state,
   510  					feature),
   511  				executor.StatementOption{})
   512  			if err != nil {
   513  				return err
   514  			}
   515  			res.Close()
   516  			return nil
   517  		},
   518  		executor.Options{}.
   519  			WithDatabase(DebugDB).
   520  			WithMinCommittedTS(now).
   521  			WithWaitCommittedLogApplied().
   522  			WithDisableTrace())
   523  }
   524  
   525  // EntryData entry data
   526  type EntryData struct {
   527  	id         uint64
   528  	empty      bool
   529  	at         int64
   530  	snapshotTS timestamp.Timestamp
   531  	entryType  api.Entry_EntryType
   532  	columns    []string
   533  	vecs       []*vector.Vector
   534  	commitVec  *vector.Vector
   535  }
   536  
   537  func newEntryData(
   538  	entry *api.Entry,
   539  	commitTSIndex int,
   540  	at int64) *EntryData {
   541  	l := reuse.Alloc[EntryData](nil)
   542  	l.init(entry, commitTSIndex, at)
   543  	return l
   544  }
   545  
   546  func newReadEntryData(
   547  	tableID uint64,
   548  	snapshotTS timestamp.Timestamp,
   549  	bat *batch.Batch,
   550  	columns []string,
   551  	at int64) *EntryData {
   552  	l := reuse.Alloc[EntryData](nil)
   553  	l.at = at
   554  	l.snapshotTS = snapshotTS
   555  	l.id = tableID
   556  	l.columns = append(l.columns, columns...)
   557  	l.vecs = append(l.vecs, bat.Vecs...)
   558  	return l
   559  }
   560  
   561  func newWriteEntryData(
   562  	tableID uint64,
   563  	bat *batch.Batch,
   564  	at int64) *EntryData {
   565  	l := reuse.Alloc[EntryData](nil)
   566  	l.at = at
   567  	l.id = tableID
   568  	if bat != nil && bat.RowCount() > 0 {
   569  		l.columns = append(l.columns, bat.Attrs...)
   570  		l.vecs = append(l.vecs, bat.Vecs...)
   571  	}
   572  	return l
   573  }
   574  
   575  func newWorkspaceEntryData(
   576  	tableID uint64,
   577  	bat *batch.Batch,
   578  	at int64) *EntryData {
   579  	l := reuse.Alloc[EntryData](nil)
   580  	l.at = at
   581  	l.id = tableID
   582  	if bat != nil && bat.RowCount() > 0 {
   583  		l.columns = append(l.columns, bat.Attrs...)
   584  		l.vecs = append(l.vecs, bat.Vecs...)
   585  	}
   586  	return l
   587  }
   588  
   589  func newTableOnlyEntryData(tableID uint64) *EntryData {
   590  	l := reuse.Alloc[EntryData](nil)
   591  	l.id = tableID
   592  	return l
   593  }
   594  
   595  func (l *EntryData) needFilterColumns() bool {
   596  	return !l.snapshotTS.IsEmpty() || l.entryType == api.Entry_Insert
   597  }
   598  
   599  func (l *EntryData) init(
   600  	entry *api.Entry,
   601  	commitTSIndex int,
   602  	at int64) {
   603  	l.at = at
   604  	l.id = entry.TableId
   605  	l.entryType = entry.EntryType
   606  	l.columns = append(l.columns, entry.Bat.Attrs...)
   607  	for i, vec := range entry.Bat.Vecs {
   608  		v, err := vector.ProtoVectorToVector(vec)
   609  		if err != nil {
   610  			panic(err)
   611  		}
   612  		l.vecs = append(l.vecs, v)
   613  		if commitTSIndex == i {
   614  			l.commitVec = v
   615  		}
   616  	}
   617  }
   618  
   619  func (l *EntryData) reset() {
   620  	for i := range l.vecs {
   621  		l.vecs[i] = nil
   622  	}
   623  
   624  	l.id = 0
   625  	l.empty = false
   626  	l.commitVec = nil
   627  	l.columns = l.columns[:0]
   628  	l.vecs = l.vecs[:0]
   629  	l.entryType = api.Entry_EntryType(-1)
   630  	l.snapshotTS = timestamp.Timestamp{}
   631  }
   632  
   633  func (l *EntryData) close() {
   634  	reuse.Free(l, nil)
   635  }
   636  
   637  func (l *EntryData) createApply(
   638  	buf *buffer,
   639  	fn func(e dataEvent),
   640  	completedPKTables *sync.Map) {
   641  	commitTS := vector.MustFixedCol[types.TS](l.commitVec)
   642  
   643  	l.writeToBuf(
   644  		buf,
   645  		func(data []byte, row int) dataEvent {
   646  			return newApplyLogtailEvent(
   647  				l.at,
   648  				l.id,
   649  				l.entryType,
   650  				data,
   651  				commitTS[row].ToTimestamp())
   652  		},
   653  		fn,
   654  		completedPKTables)
   655  }
   656  
   657  func (l *EntryData) createCommit(
   658  	txnID []byte,
   659  	buf *buffer,
   660  	fn func(e dataEvent),
   661  	completedPKTables *sync.Map) {
   662  	l.writeToBuf(
   663  		buf,
   664  		func(data []byte, _ int) dataEvent {
   665  			return newCommitEntryEvent(
   666  				l.at,
   667  				txnID,
   668  				l.id,
   669  				l.entryType,
   670  				data)
   671  		},
   672  		fn,
   673  		completedPKTables)
   674  }
   675  
   676  func (l *EntryData) createRead(
   677  	txnID []byte,
   678  	buf *buffer,
   679  	fn func(e dataEvent),
   680  	completedPKTables *sync.Map) {
   681  	l.writeToBuf(
   682  		buf,
   683  		func(data []byte, row int) dataEvent {
   684  			return newReadEntryEvent(
   685  				l.at,
   686  				txnID,
   687  				l.id,
   688  				data,
   689  				l.snapshotTS)
   690  		},
   691  		fn,
   692  		completedPKTables)
   693  }
   694  
   695  func (l *EntryData) createWrite(
   696  	txnID []byte,
   697  	buf *buffer,
   698  	typ string,
   699  	fn func(e dataEvent),
   700  	completedPKTables *sync.Map) {
   701  
   702  	if typ == "delete" {
   703  		l.entryType = api.Entry_Delete
   704  	}
   705  
   706  	l.writeToBuf(
   707  		buf,
   708  		func(data []byte, row int) dataEvent {
   709  			return newWriteEntryEvent(
   710  				l.at,
   711  				txnID,
   712  				typ,
   713  				l.id,
   714  				data)
   715  		},
   716  		fn,
   717  		completedPKTables)
   718  }
   719  
   720  func (l *EntryData) createWorkspace(
   721  	txnID []byte,
   722  	buf *buffer,
   723  	typ string,
   724  	adjustCount int,
   725  	offsetCount int,
   726  	fn func(e dataEvent),
   727  	completedPKTables *sync.Map) {
   728  	l.writeToBuf(
   729  		buf,
   730  		func(data []byte, row int) dataEvent {
   731  			adjust := buf.writeInt(int64(adjustCount))
   732  			offset := buf.writeInt(int64(offsetCount))
   733  			idx := buf.buf.GetWriteIndex()
   734  			buf.buf.WriteString(adjust)
   735  			buf.buf.WriteString("-")
   736  			buf.buf.WriteString(offset)
   737  			buf.buf.WriteString(": ")
   738  			buf.buf.Write(data)
   739  			data = buf.buf.RawSlice(idx, buf.buf.GetWriteIndex())
   740  			return newWorkspaceEntryEvent(
   741  				l.at,
   742  				txnID,
   743  				typ,
   744  				l.id,
   745  				data)
   746  		},
   747  		fn,
   748  		completedPKTables)
   749  }
   750  
   751  func (l *EntryData) writeToBuf(
   752  	buf *buffer,
   753  	factory func([]byte, int) dataEvent,
   754  	fn func(e dataEvent),
   755  	completedPKTables *sync.Map) {
   756  	if len(l.vecs) == 0 {
   757  		fn(factory([]byte(""), 0))
   758  		return
   759  	}
   760  
   761  	_, ok := completedPKTables.Load(l.id)
   762  	isCompletedPKTable := false
   763  	dst := buf.alloc(100)
   764  	rows := l.vecs[0].Length()
   765  	for row := 0; row < rows; row++ {
   766  		idx := buf.buf.GetWriteIndex()
   767  		buf.buf.WriteString("row-")
   768  		buf.buf.MustWrite(intToString(dst, int64(row)))
   769  		buf.buf.WriteString("{")
   770  		for col, name := range l.columns {
   771  			if _, ok := disableColumns[name]; ok {
   772  				continue
   773  			}
   774  			columnsData := l.vecs[col]
   775  			buf.buf.WriteString(name)
   776  			buf.buf.WriteString(":")
   777  			if isComplexColumn(name) ||
   778  				(isDeletePKColumn(name) &&
   779  					ok &&
   780  					l.entryType == api.Entry_Delete) {
   781  				writeCompletedValue(columnsData.GetBytesAt(row), buf, dst)
   782  				isCompletedPKTable = true
   783  			} else {
   784  				writeValue(columnsData, row, buf, dst)
   785  			}
   786  			if col != len(l.columns)-1 {
   787  				buf.buf.WriteString(", ")
   788  			}
   789  		}
   790  		buf.buf.WriteString("}")
   791  		if buf.buf.GetWriteIndex() > idx {
   792  			data := buf.buf.RawSlice(idx, buf.buf.GetWriteIndex())
   793  			fn(factory(data, row))
   794  		}
   795  	}
   796  	if !ok && isCompletedPKTable {
   797  		completedPKTables.Store(l.id, true)
   798  	}
   799  }
   800  
   801  func (l EntryData) TypeName() string {
   802  	return "txn.trace.entryData"
   803  }
   804  
   805  type buffer struct {
   806  	buf *buf.ByteBuf
   807  }
   808  
   809  func (b buffer) TypeName() string {
   810  	return "trace.buffer"
   811  }
   812  
   813  func (b *buffer) reset() {
   814  	b.buf.Reset()
   815  }
   816  
   817  func (b *buffer) close() {
   818  	reuse.Free(b, nil)
   819  }
   820  
   821  func (b *buffer) alloc(n int) []byte {
   822  	b.buf.Grow(n)
   823  	idx := b.buf.GetWriteIndex()
   824  	b.buf.SetWriteIndex(idx + n)
   825  	return b.buf.RawSlice(idx, b.buf.GetWriteIndex())
   826  }
   827  
   828  func (b buffer) writeInt(v int64) string {
   829  	dst := b.alloc(20)
   830  	return util.UnsafeBytesToString(intToString(dst, v))
   831  }
   832  
   833  func (b buffer) writeIntWithBytes(v int64) []byte {
   834  	dst := b.alloc(20)
   835  	return intToString(dst, v)
   836  }
   837  
   838  func (b buffer) writeUint(v uint64) string {
   839  	dst := b.alloc(20)
   840  	return util.UnsafeBytesToString(uintToString(dst, v))
   841  }
   842  
   843  func (b buffer) writeHex(src []byte) string {
   844  	if len(src) == 0 {
   845  		return ""
   846  	}
   847  	dst := b.alloc(hex.EncodedLen(len(src)))
   848  	hex.Encode(dst, src)
   849  	return util.UnsafeBytesToString(dst)
   850  }
   851  
   852  func (b buffer) writeHexWithBytes(src []byte) []byte {
   853  	if len(src) == 0 {
   854  		return nil
   855  	}
   856  	dst := b.alloc(hex.EncodedLen(len(src)))
   857  	hex.Encode(dst, src)
   858  	return dst
   859  }
   860  
   861  func (b buffer) writeTimestamp(value timestamp.Timestamp) string {
   862  	dst := b.alloc(20)
   863  	dst2 := b.alloc(20)
   864  	idx := b.buf.GetWriteIndex()
   865  	b.buf.MustWrite(intToString(dst, value.PhysicalTime))
   866  	b.buf.MustWriteByte('-')
   867  	b.buf.MustWrite(uintToString(dst2, uint64(value.LogicalTime)))
   868  	return util.UnsafeBytesToString(b.buf.RawSlice(idx, b.buf.GetWriteIndex()))
   869  }
   870  
   871  func escape(value string) string {
   872  	return strings.ReplaceAll(value, "'", "''")
   873  }
   874  
   875  type loadAction struct {
   876  	sql  string
   877  	file string
   878  }
   879  
   880  type writer struct {
   881  	buf *buf.ByteBuf
   882  	dst []byte
   883  	idx int
   884  }
   885  
   886  func (w writer) WriteUint(v uint64) {
   887  	w.buf.MustWrite(uintToString(w.dst, v))
   888  }
   889  
   890  func (w writer) WriteInt(v int64) {
   891  	w.buf.MustWrite(intToString(w.dst, v))
   892  }
   893  
   894  func (w writer) WriteString(v string) {
   895  	w.buf.WriteString(v)
   896  }
   897  
   898  func (w writer) WriteHex(v []byte) {
   899  	if len(v) == 0 {
   900  		return
   901  	}
   902  	dst := w.dst[:hex.EncodedLen(len(v))]
   903  	hex.Encode(dst, v)
   904  	w.buf.MustWrite(dst)
   905  }
   906  
   907  func (w writer) data() string {
   908  	return util.UnsafeBytesToString(w.buf.RawSlice(w.idx, w.buf.GetWriteIndex()))
   909  }