github.com/matrixorigin/matrixone@v1.2.0/pkg/txn/trace/service_data_event.go (about)

     1  // Copyright 2024 Matrix Origin
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package trace
    16  
    17  import (
    18  	"context"
    19  	"fmt"
    20  	"path/filepath"
    21  	"strings"
    22  	"time"
    23  
    24  	"github.com/matrixorigin/matrixone/pkg/common/moerr"
    25  	"github.com/matrixorigin/matrixone/pkg/common/reuse"
    26  	"github.com/matrixorigin/matrixone/pkg/container/types"
    27  	"github.com/matrixorigin/matrixone/pkg/container/vector"
    28  	"github.com/matrixorigin/matrixone/pkg/pb/api"
    29  	"github.com/matrixorigin/matrixone/pkg/pb/timestamp"
    30  	"github.com/matrixorigin/matrixone/pkg/util/executor"
    31  )
    32  
    33  func (s *service) ApplyLogtail(
    34  	entry *api.Entry,
    35  	commitTSIndex int) {
    36  	if !s.Enabled(FeatureTraceData) {
    37  		return
    38  	}
    39  
    40  	if s.atomic.closed.Load() {
    41  		return
    42  	}
    43  
    44  	entryData := newEntryData(entry, commitTSIndex, time.Now().UnixNano())
    45  	defer func() {
    46  		entryData.close()
    47  	}()
    48  
    49  	filters := s.atomic.tableFilters.Load()
    50  	if skipped := filters.filter(entryData); skipped {
    51  		return
    52  	}
    53  
    54  	buf := reuse.Alloc[buffer](nil)
    55  	entryData.createApply(
    56  		buf,
    57  		func(e dataEvent) {
    58  			s.entryC <- event{
    59  				csv: e,
    60  			}
    61  		},
    62  		&s.atomic.complexPKTables)
    63  	s.entryC <- event{
    64  		buffer: buf,
    65  	}
    66  }
    67  
    68  func (s *service) ApplyFlush(
    69  	txnID []byte,
    70  	tableID uint64,
    71  	from, to timestamp.Timestamp,
    72  	count int) {
    73  	if !s.Enabled(FeatureTraceData) {
    74  		return
    75  	}
    76  
    77  	if s.atomic.closed.Load() {
    78  		return
    79  	}
    80  
    81  	entryData := newTableOnlyEntryData(tableID)
    82  	defer func() {
    83  		entryData.close()
    84  	}()
    85  
    86  	filters := s.atomic.tableFilters.Load()
    87  	if skipped := filters.filter(entryData); skipped {
    88  		return
    89  	}
    90  
    91  	buf := reuse.Alloc[buffer](nil)
    92  	fromTS := buf.writeTimestamp(from)
    93  	toTS := buf.writeTimestamp(to)
    94  	result := buf.writeIntWithBytes(int64(count))
    95  
    96  	idx := buf.buf.GetWriteIndex()
    97  	buf.buf.WriteString(fromTS)
    98  	buf.buf.WriteString(" ")
    99  	buf.buf.WriteString(toTS)
   100  	buf.buf.WriteString(" ")
   101  	buf.buf.MustWrite(result)
   102  
   103  	s.entryC <- event{
   104  		csv: newFlushEvent(
   105  			time.Now().UnixNano(),
   106  			txnID,
   107  			tableID,
   108  			buf.buf.RawSlice(idx, buf.buf.GetWriteIndex()),
   109  		),
   110  	}
   111  	s.entryC <- event{
   112  		buffer: buf,
   113  	}
   114  }
   115  
   116  func (s *service) ApplyTransferRowID(
   117  	txnID []byte,
   118  	tableID uint64,
   119  	from, to []byte,
   120  	fromBlockID, toBlockID []byte,
   121  	vec *vector.Vector,
   122  	row int) {
   123  	if !s.Enabled(FeatureTraceData) {
   124  		return
   125  	}
   126  
   127  	if s.atomic.closed.Load() {
   128  		return
   129  	}
   130  
   131  	entryData := newTableOnlyEntryData(tableID)
   132  	defer func() {
   133  		entryData.close()
   134  	}()
   135  
   136  	filters := s.atomic.tableFilters.Load()
   137  	if skipped := filters.filter(entryData); skipped {
   138  		return
   139  	}
   140  
   141  	buf := reuse.Alloc[buffer](nil)
   142  
   143  	fromRowID := types.Rowid(from)
   144  	toRowID := types.Rowid(to)
   145  	fromBlockIDHex := buf.writeHex(fromBlockID)
   146  	toBlockIDHex := buf.writeHex(toBlockID)
   147  
   148  	tmp := buf.alloc(100)
   149  	idx := buf.buf.GetWriteIndex()
   150  
   151  	_, ok := s.atomic.complexPKTables.Load(tableID)
   152  	if ok {
   153  		writeCompletedValue(vec.GetBytesAt(row), buf, tmp)
   154  	} else {
   155  		writeValue(vec, row, buf, tmp)
   156  	}
   157  
   158  	buf.buf.WriteString(" row_id: ")
   159  	buf.buf.WriteString(fromRowID.String())
   160  	buf.buf.WriteString("->")
   161  	buf.buf.WriteString(toRowID.String())
   162  	buf.buf.WriteString(" block_id:")
   163  	buf.buf.WriteString(fromBlockIDHex)
   164  	buf.buf.WriteString("->")
   165  	buf.buf.WriteString(toBlockIDHex)
   166  	data := buf.buf.RawSlice(idx, buf.buf.GetWriteIndex())
   167  
   168  	s.entryC <- event{
   169  		csv: newTransferEvent(
   170  			time.Now().UnixNano(),
   171  			txnID,
   172  			tableID,
   173  			data,
   174  		),
   175  	}
   176  	s.entryC <- event{
   177  		buffer: buf,
   178  	}
   179  }
   180  
   181  func (s *service) ApplyDeleteObject(
   182  	tableID uint64,
   183  	ts timestamp.Timestamp,
   184  	objName string,
   185  	tag string) {
   186  	if !s.Enabled(FeatureTraceData) {
   187  		return
   188  	}
   189  
   190  	if s.atomic.closed.Load() {
   191  		return
   192  	}
   193  
   194  	entryData := newTableOnlyEntryData(tableID)
   195  	defer func() {
   196  		entryData.close()
   197  	}()
   198  
   199  	filters := s.atomic.tableFilters.Load()
   200  	if skipped := filters.filter(entryData); skipped {
   201  		return
   202  	}
   203  
   204  	buf := reuse.Alloc[buffer](nil)
   205  	version := buf.writeTimestamp(ts)
   206  
   207  	idx := buf.buf.GetWriteIndex()
   208  	buf.buf.WriteString(objName)
   209  	buf.buf.MustWriteByte(' ')
   210  	buf.buf.WriteString(version)
   211  	buf.buf.MustWriteByte(' ')
   212  	buf.buf.WriteString(tag)
   213  	data := buf.buf.RawSlice(idx, buf.buf.GetWriteIndex())
   214  
   215  	s.entryC <- event{
   216  		csv: newDeleteObjectEvent(
   217  			time.Now().UnixNano(),
   218  			tableID,
   219  			data,
   220  		),
   221  	}
   222  	s.entryC <- event{
   223  		buffer: buf,
   224  	}
   225  }
   226  
   227  func (s *service) AddTableFilter(name string, columns []string) error {
   228  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   229  	defer cancel()
   230  
   231  	now, _ := s.clock.Now()
   232  	return s.executor.ExecTxn(
   233  		ctx,
   234  		func(txn executor.TxnExecutor) error {
   235  			txn.Use("mo_catalog")
   236  			res, err := txn.Exec(fmt.Sprintf("select rel_id from mo_tables where relname = '%s'", name), executor.StatementOption{})
   237  			if err != nil {
   238  				return err
   239  			}
   240  			defer res.Close()
   241  
   242  			var tables []uint64
   243  			res.ReadRows(func(rows int, cols []*vector.Vector) bool {
   244  				tables = append(tables, vector.MustFixedCol[uint64](cols[0])...)
   245  				return true
   246  			})
   247  			if len(tables) == 0 {
   248  				return moerr.NewNoSuchTableNoCtx("", name)
   249  			}
   250  
   251  			txn.Use(DebugDB)
   252  			for _, id := range tables {
   253  				r, err := txn.Exec(addTableFilterSQL(id, name, columns), executor.StatementOption{})
   254  				if err != nil {
   255  					return err
   256  				}
   257  				r.Close()
   258  			}
   259  			return nil
   260  		},
   261  		executor.Options{}.
   262  			WithMinCommittedTS(now).
   263  			WithWaitCommittedLogApplied().
   264  			WithDisableTrace())
   265  }
   266  
   267  func (s *service) ClearTableFilters() error {
   268  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   269  	defer cancel()
   270  
   271  	now, _ := s.clock.Now()
   272  	err := s.executor.ExecTxn(
   273  		ctx,
   274  		func(txn executor.TxnExecutor) error {
   275  			txn.Use(DebugDB)
   276  			res, err := txn.Exec(
   277  				fmt.Sprintf("truncate table %s",
   278  					TraceTableFilterTable),
   279  				executor.StatementOption{})
   280  			if err != nil {
   281  				return err
   282  			}
   283  			res.Close()
   284  			return nil
   285  		},
   286  		executor.Options{}.
   287  			WithDisableTrace().
   288  			WithMinCommittedTS(now).
   289  			WithWaitCommittedLogApplied())
   290  	if err != nil {
   291  		return err
   292  	}
   293  
   294  	return s.RefreshTableFilters()
   295  }
   296  
   297  func (s *service) RefreshTableFilters() error {
   298  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   299  	defer cancel()
   300  
   301  	var filters []EntryFilter
   302  	now, _ := s.clock.Now()
   303  	err := s.executor.ExecTxn(
   304  		ctx,
   305  		func(txn executor.TxnExecutor) error {
   306  			txn.Use(DebugDB)
   307  			res, err := txn.Exec(
   308  				fmt.Sprintf("select table_id, columns from %s",
   309  					TraceTableFilterTable),
   310  				executor.StatementOption{})
   311  			if err != nil {
   312  				return err
   313  			}
   314  			defer res.Close()
   315  
   316  			res.ReadRows(func(rows int, cols []*vector.Vector) bool {
   317  				for i := 0; i < rows; i++ {
   318  					id := vector.MustFixedCol[uint64](cols[0])[i]
   319  					columns := cols[1].GetStringAt(i)
   320  					filters = append(filters, NewKeepTableFilter(id, strings.Split(columns, ",")))
   321  				}
   322  				return true
   323  			})
   324  			return nil
   325  		},
   326  		executor.Options{}.
   327  			WithDisableTrace().
   328  			WithMinCommittedTS(now).
   329  			WithWaitCommittedLogApplied())
   330  	if err != nil {
   331  		return err
   332  	}
   333  
   334  	s.atomic.tableFilters.Store(&tableFilters{filters: filters})
   335  	return nil
   336  }
   337  
   338  func (s *service) handleDataEvents(ctx context.Context) {
   339  	s.handleEvent(
   340  		ctx,
   341  		s.dataCSVFile,
   342  		9,
   343  		EventDataTable,
   344  		s.entryC,
   345  	)
   346  }
   347  
   348  func (s *service) dataCSVFile() string {
   349  	return filepath.Join(s.dir, fmt.Sprintf("data-%d.csv", s.seq.Add(1)))
   350  }
   351  
   352  func addTableFilterSQL(
   353  	id uint64,
   354  	name string,
   355  	columns []string) string {
   356  	return fmt.Sprintf("insert into %s (table_id, table_name, columns) values (%d, '%s', '%s')",
   357  		TraceTableFilterTable,
   358  		id,
   359  		name,
   360  		strings.Join(columns, ","))
   361  }