github.com/matrixorigin/matrixone@v0.7.0/pkg/vm/engine/tae/db/db_test.go (about)

     1  // Copyright 2021 Matrix Origin
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package db
    16  
    17  import (
    18  	"bytes"
    19  	"context"
    20  	"math/rand"
    21  	"reflect"
    22  	"sync"
    23  	"sync/atomic"
    24  	"testing"
    25  	"time"
    26  
    27  	"github.com/matrixorigin/matrixone/pkg/util/fault"
    28  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/db/gc"
    29  
    30  	"github.com/matrixorigin/matrixone/pkg/pb/api"
    31  	"github.com/matrixorigin/matrixone/pkg/pb/timestamp"
    32  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/dataio/blockio"
    33  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/db/checkpoint"
    34  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/logtail"
    35  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/model"
    36  
    37  	"github.com/matrixorigin/matrixone/pkg/common/moerr"
    38  	"github.com/matrixorigin/matrixone/pkg/common/mpool"
    39  	"github.com/matrixorigin/matrixone/pkg/container/batch"
    40  	"github.com/matrixorigin/matrixone/pkg/container/types"
    41  	"github.com/matrixorigin/matrixone/pkg/container/vector"
    42  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/containers"
    43  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/testutils/config"
    44  
    45  	pkgcatalog "github.com/matrixorigin/matrixone/pkg/catalog"
    46  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/catalog"
    47  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/common"
    48  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/iface/handle"
    49  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/iface/txnif"
    50  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/options"
    51  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/tables/jobs"
    52  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/tables/txnentries"
    53  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/tasks"
    54  	ops "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/tasks/worker"
    55  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/testutils"
    56  	"github.com/panjf2000/ants/v2"
    57  	"github.com/stretchr/testify/assert"
    58  )
    59  
    60  func TestAppend(t *testing.T) {
    61  	defer testutils.AfterTest(t)()
    62  	testutils.EnsureNoLeak(t)
    63  	tae := newTestEngine(t, nil)
    64  	defer tae.Close()
    65  	schema := catalog.MockSchemaAll(14, 3)
    66  	schema.BlockMaxRows = options.DefaultBlockMaxRows
    67  	schema.SegmentMaxBlocks = options.DefaultBlocksPerSegment
    68  	tae.bindSchema(schema)
    69  	data := catalog.MockBatch(schema, int(schema.BlockMaxRows*2))
    70  	defer data.Close()
    71  	bats := data.Split(4)
    72  	now := time.Now()
    73  	tae.createRelAndAppend(bats[0], true)
    74  	t.Log(time.Since(now))
    75  	tae.checkRowsByScan(bats[0].Length(), false)
    76  
    77  	txn, rel := tae.getRelation()
    78  	err := rel.Append(bats[1])
    79  	assert.NoError(t, err)
    80  	// FIXME
    81  	// checkAllColRowsByScan(t, rel, bats[0].Length()+bats[1].Length(), false)
    82  	err = rel.Append(bats[2])
    83  	assert.NoError(t, err)
    84  	assert.NoError(t, txn.Commit())
    85  	tae.checkRowsByScan(bats[0].Length()+bats[1].Length()+bats[2].Length(), false)
    86  }
    87  
    88  func TestAppend2(t *testing.T) {
    89  	defer testutils.AfterTest(t)()
    90  	testutils.EnsureNoLeak(t)
    91  	opts := config.WithQuickScanAndCKPOpts(nil)
    92  	db := initDB(t, opts)
    93  	defer db.Close()
    94  
    95  	// this task won't affect logic of TestAppend2, it just prints logs about dirty count
    96  	forest := logtail.NewDirtyCollector(db.LogtailMgr, opts.Clock, db.Catalog, new(catalog.LoopProcessor))
    97  	hb := ops.NewHeartBeaterWithFunc(5*time.Millisecond, func() {
    98  		forest.Run()
    99  		t.Log(forest.String())
   100  	}, nil)
   101  	hb.Start()
   102  	defer hb.Stop()
   103  
   104  	schema := catalog.MockSchemaAll(13, 3)
   105  	schema.BlockMaxRows = 400
   106  	schema.SegmentMaxBlocks = 10
   107  	createRelation(t, db, "db", schema, true)
   108  
   109  	totalRows := uint64(schema.BlockMaxRows * 30)
   110  	bat := catalog.MockBatch(schema, int(totalRows))
   111  	defer bat.Close()
   112  	bats := bat.Split(100)
   113  
   114  	var wg sync.WaitGroup
   115  	pool, _ := ants.NewPool(80)
   116  	defer pool.Release()
   117  
   118  	start := time.Now()
   119  	for _, data := range bats {
   120  		wg.Add(1)
   121  		err := pool.Submit(appendClosure(t, data, schema.Name, db, &wg))
   122  		assert.Nil(t, err)
   123  	}
   124  	wg.Wait()
   125  	t.Logf("Append %d rows takes: %s", totalRows, time.Since(start))
   126  	{
   127  		txn, rel := getDefaultRelation(t, db, schema.Name)
   128  		checkAllColRowsByScan(t, rel, int(totalRows), false)
   129  		assert.NoError(t, txn.Commit())
   130  	}
   131  	t.Log(db.Opts.Catalog.SimplePPString(common.PPL1))
   132  
   133  	now := time.Now()
   134  	testutils.WaitExpect(10000, func() bool {
   135  		return db.Scheduler.GetPenddingLSNCnt() == 0
   136  	})
   137  	t.Log(time.Since(now))
   138  	t.Logf("Checkpointed: %d", db.Scheduler.GetCheckpointedLSN())
   139  	t.Logf("GetPenddingLSNCnt: %d", db.Scheduler.GetPenddingLSNCnt())
   140  	assert.Equal(t, uint64(0), db.Scheduler.GetPenddingLSNCnt())
   141  	t.Log(db.Catalog.SimplePPString(common.PPL1))
   142  	wg.Add(1)
   143  	appendFailClosure(t, bats[0], schema.Name, db, &wg)()
   144  	wg.Wait()
   145  }
   146  
   147  func TestAppend3(t *testing.T) {
   148  	defer testutils.AfterTest(t)()
   149  	testutils.EnsureNoLeak(t)
   150  	opts := config.WithQuickScanAndCKPOpts(nil)
   151  	tae := initDB(t, opts)
   152  	defer tae.Close()
   153  	schema := catalog.MockSchema(2, 0)
   154  	schema.BlockMaxRows = 10
   155  	schema.SegmentMaxBlocks = 2
   156  	createRelation(t, tae, "db", schema, true)
   157  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows))
   158  	defer bat.Close()
   159  	var wg sync.WaitGroup
   160  	wg.Add(1)
   161  	appendClosure(t, bat, schema.Name, tae, &wg)()
   162  	wg.Wait()
   163  	testutils.WaitExpect(2000, func() bool {
   164  		return tae.Scheduler.GetPenddingLSNCnt() == 0
   165  	})
   166  	// t.Log(tae.Catalog.SimplePPString(common.PPL1))
   167  	wg.Add(1)
   168  	appendFailClosure(t, bat, schema.Name, tae, &wg)()
   169  	wg.Wait()
   170  }
   171  
   172  func TestAppend4(t *testing.T) {
   173  	defer testutils.AfterTest(t)()
   174  	testutils.EnsureNoLeak(t)
   175  	opts := config.WithLongScanAndCKPOpts(nil)
   176  	tae := initDB(t, opts)
   177  	defer tae.Close()
   178  	schema1 := catalog.MockSchemaAll(18, 14)
   179  	schema2 := catalog.MockSchemaAll(18, 15)
   180  	schema3 := catalog.MockSchemaAll(18, 16)
   181  	schema4 := catalog.MockSchemaAll(18, 11)
   182  	schema1.BlockMaxRows = 10
   183  	schema2.BlockMaxRows = 10
   184  	schema3.BlockMaxRows = 10
   185  	schema4.BlockMaxRows = 10
   186  	schema1.SegmentMaxBlocks = 2
   187  	schema2.SegmentMaxBlocks = 2
   188  	schema3.SegmentMaxBlocks = 2
   189  	schema4.SegmentMaxBlocks = 2
   190  	schemas := []*catalog.Schema{schema1, schema2, schema3, schema4}
   191  	createDB(t, tae, defaultTestDB)
   192  	for _, schema := range schemas {
   193  		bat := catalog.MockBatch(schema, int(schema.BlockMaxRows*3-1))
   194  		defer bat.Close()
   195  		bats := bat.Split(1)
   196  		createRelation(t, tae, defaultTestDB, schema, false)
   197  		for i := range bats {
   198  			txn, rel := getDefaultRelation(t, tae, schema.Name)
   199  			err := rel.Append(bats[i])
   200  			assert.NoError(t, err)
   201  			err = txn.Commit()
   202  			assert.NoError(t, err)
   203  		}
   204  		txn, rel := getDefaultRelation(t, tae, schema.Name)
   205  		checkAllColRowsByScan(t, rel, bat.Length(), false)
   206  
   207  		v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(3)
   208  		filter := handle.NewEQFilter(v)
   209  		err := rel.DeleteByFilter(filter)
   210  		assert.NoError(t, err)
   211  		err = txn.Commit()
   212  		assert.NoError(t, err)
   213  
   214  		txn, rel = getDefaultRelation(t, tae, schema.Name)
   215  		checkAllColRowsByScan(t, rel, bat.Length()-1, true)
   216  		err = txn.Commit()
   217  		assert.NoError(t, err)
   218  		compactBlocks(t, 0, tae, defaultTestDB, schema, false)
   219  		txn, rel = getDefaultRelation(t, tae, schema.Name)
   220  		checkAllColRowsByScan(t, rel, bat.Length()-1, false)
   221  		err = txn.Commit()
   222  		assert.NoError(t, err)
   223  	}
   224  }
   225  
   226  func testCRUD(t *testing.T, tae *DB, schema *catalog.Schema) {
   227  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows*(uint32(schema.SegmentMaxBlocks)+1)-1))
   228  	defer bat.Close()
   229  	bats := bat.Split(4)
   230  
   231  	var updateColIdx int
   232  	if schema.GetSingleSortKeyIdx() >= 17 {
   233  		updateColIdx = 0
   234  	} else {
   235  		updateColIdx = schema.GetSingleSortKeyIdx() + 1
   236  	}
   237  
   238  	createRelationAndAppend(t, 0, tae, defaultTestDB, schema, bats[0], false)
   239  
   240  	txn, rel := getDefaultRelation(t, tae, schema.Name)
   241  	err := rel.Append(bats[0])
   242  	assert.True(t, moerr.IsMoErrCode(err, moerr.ErrDuplicateEntry))
   243  	checkAllColRowsByScan(t, rel, bats[0].Length(), false)
   244  	v := bats[0].Vecs[schema.GetSingleSortKeyIdx()].Get(2)
   245  	filter := handle.NewEQFilter(v)
   246  	err = rel.DeleteByFilter(filter)
   247  	assert.NoError(t, err)
   248  
   249  	oldv := bats[0].Vecs[updateColIdx].Get(5)
   250  	v = bats[0].Vecs[schema.GetSingleSortKeyIdx()].Get(5)
   251  	ufilter := handle.NewEQFilter(v)
   252  	{
   253  		ot := reflect.ValueOf(&oldv).Elem()
   254  		nv := reflect.ValueOf(int8(99))
   255  		if nv.CanConvert(reflect.TypeOf(oldv)) {
   256  			ot.Set(nv.Convert(reflect.TypeOf(oldv)))
   257  		}
   258  	}
   259  	err = rel.UpdateByFilter(ufilter, uint16(updateColIdx), oldv)
   260  	assert.NoError(t, err)
   261  
   262  	checkAllColRowsByScan(t, rel, bats[0].Length()-1, true)
   263  	assert.NoError(t, txn.Commit())
   264  
   265  	txn, rel = getDefaultRelation(t, tae, schema.Name)
   266  	checkAllColRowsByScan(t, rel, bats[0].Length()-1, true)
   267  	for _, b := range bats[1:] {
   268  		err = rel.Append(b)
   269  		assert.NoError(t, err)
   270  	}
   271  	checkAllColRowsByScan(t, rel, bat.Length()-1, true)
   272  	assert.NoError(t, txn.Commit())
   273  
   274  	compactBlocks(t, 0, tae, defaultTestDB, schema, false)
   275  
   276  	txn, rel = getDefaultRelation(t, tae, schema.Name)
   277  	checkAllColRowsByScan(t, rel, bat.Length()-1, false)
   278  	v = bats[0].Vecs[schema.GetSingleSortKeyIdx()].Get(3)
   279  	filter = handle.NewEQFilter(v)
   280  	err = rel.DeleteByFilter(filter)
   281  	assert.NoError(t, err)
   282  	checkAllColRowsByScan(t, rel, bat.Length()-2, true)
   283  	assert.NoError(t, txn.Commit())
   284  
   285  	// After merging blocks, the logic of read data is modified
   286  	//compactSegs(t, tae, schema)
   287  
   288  	txn, rel = getDefaultRelation(t, tae, schema.Name)
   289  	//checkAllColRowsByScan(t, rel, bat.Length()-2, false)
   290  	checkAllColRowsByScan(t, rel, bat.Length()-1, false)
   291  	assert.NoError(t, txn.Commit())
   292  
   293  	// t.Log(rel.GetMeta().(*catalog.TableEntry).PPString(common.PPL1, 0, ""))
   294  	dropRelation(t, tae, defaultTestDB, schema.Name)
   295  }
   296  
   297  func TestCRUD(t *testing.T) {
   298  	defer testutils.AfterTest(t)()
   299  	testutils.EnsureNoLeak(t)
   300  	opts := config.WithLongScanAndCKPOpts(nil)
   301  	tae := initDB(t, opts)
   302  	defer tae.Close()
   303  	createDB(t, tae, defaultTestDB)
   304  	withTestAllPKType(t, tae, testCRUD)
   305  }
   306  
   307  func TestTableHandle(t *testing.T) {
   308  	defer testutils.AfterTest(t)()
   309  	testutils.EnsureNoLeak(t)
   310  	db := initDB(t, nil)
   311  	defer db.Close()
   312  
   313  	schema := catalog.MockSchema(2, 0)
   314  	schema.BlockMaxRows = 1000
   315  	schema.SegmentMaxBlocks = 2
   316  
   317  	txn, _ := db.StartTxn(nil)
   318  	database, _ := txn.CreateDatabase("db", "")
   319  	rel, _ := database.CreateRelation(schema)
   320  
   321  	tableMeta := rel.GetMeta().(*catalog.TableEntry)
   322  	t.Log(tableMeta.String())
   323  	table := tableMeta.GetTableData()
   324  
   325  	handle := table.GetHandle()
   326  	appender, err := handle.GetAppender()
   327  	assert.Nil(t, appender)
   328  	assert.True(t, moerr.IsMoErrCode(err, moerr.ErrAppendableSegmentNotFound))
   329  }
   330  
   331  func TestCreateBlock(t *testing.T) {
   332  	defer testutils.AfterTest(t)()
   333  	testutils.EnsureNoLeak(t)
   334  	db := initDB(t, nil)
   335  	defer db.Close()
   336  
   337  	txn, _ := db.StartTxn(nil)
   338  	database, _ := txn.CreateDatabase("db", "")
   339  	schema := catalog.MockSchemaAll(13, 12)
   340  	rel, err := database.CreateRelation(schema)
   341  	assert.Nil(t, err)
   342  	seg, err := rel.CreateSegment(false)
   343  	assert.Nil(t, err)
   344  	blk1, err := seg.CreateBlock(false)
   345  	assert.Nil(t, err)
   346  	blk2, err := seg.CreateNonAppendableBlock()
   347  	assert.Nil(t, err)
   348  	lastAppendable := seg.GetMeta().(*catalog.SegmentEntry).LastAppendableBlock()
   349  	assert.Equal(t, blk1.Fingerprint().BlockID, lastAppendable.GetID())
   350  	assert.True(t, lastAppendable.IsAppendable())
   351  	blk2Meta := blk2.GetMeta().(*catalog.BlockEntry)
   352  	assert.False(t, blk2Meta.IsAppendable())
   353  
   354  	t.Log(db.Opts.Catalog.SimplePPString(common.PPL1))
   355  	assert.Nil(t, txn.Commit())
   356  	t.Log(db.Opts.Catalog.SimplePPString(common.PPL1))
   357  }
   358  
   359  func TestNonAppendableBlock(t *testing.T) {
   360  	defer testutils.AfterTest(t)()
   361  	testutils.EnsureNoLeak(t)
   362  	db := initDB(t, nil)
   363  	defer db.Close()
   364  	schema := catalog.MockSchemaAll(13, 1)
   365  	schema.BlockMaxRows = 10
   366  	schema.SegmentMaxBlocks = 2
   367  
   368  	bat := catalog.MockBatch(schema, 8)
   369  	defer bat.Close()
   370  
   371  	createRelation(t, db, "db", schema, true)
   372  
   373  	{
   374  		txn, _ := db.StartTxn(nil)
   375  		database, err := txn.GetDatabase("db")
   376  		assert.Nil(t, err)
   377  		rel, err := database.GetRelationByName(schema.Name)
   378  		assert.Nil(t, err)
   379  		seg, err := rel.CreateSegment(false)
   380  		assert.Nil(t, err)
   381  		blk, err := seg.CreateNonAppendableBlock()
   382  		assert.Nil(t, err)
   383  		dataBlk := blk.GetMeta().(*catalog.BlockEntry).GetBlockData()
   384  		name := blockio.EncodeObjectName()
   385  		writer := blockio.NewWriter(context.Background(), dataBlk.GetFs(), name)
   386  		_, err = writer.WriteBlock(bat)
   387  		assert.Nil(t, err)
   388  		blocks, err := writer.Sync()
   389  		assert.Nil(t, err)
   390  		metaLoc, err := blockio.EncodeMetaLocWithObject(
   391  			blocks[0].GetExtent(),
   392  			uint32(bat.Length()),
   393  			blocks)
   394  		assert.Nil(t, err)
   395  		blk.UpdateMetaLoc(metaLoc)
   396  		v, err := dataBlk.GetValue(txn, 4, 2)
   397  		assert.Nil(t, err)
   398  		expectVal := bat.Vecs[2].Get(4)
   399  		assert.Equal(t, expectVal, v)
   400  		assert.Equal(t, bat.Vecs[0].Length(), blk.Rows())
   401  
   402  		view, err := dataBlk.GetColumnDataById(txn, 2, nil)
   403  		assert.Nil(t, err)
   404  		defer view.Close()
   405  		assert.Nil(t, view.DeleteMask)
   406  		assert.Equal(t, bat.Vecs[2].Length(), view.Length())
   407  
   408  		_, err = dataBlk.RangeDelete(txn, 1, 2, handle.DT_Normal)
   409  		assert.Nil(t, err)
   410  
   411  		view, err = dataBlk.GetColumnDataById(txn, 2, nil)
   412  		assert.Nil(t, err)
   413  		defer view.Close()
   414  		assert.True(t, view.DeleteMask.Contains(1))
   415  		assert.True(t, view.DeleteMask.Contains(2))
   416  		assert.Equal(t, bat.Vecs[2].Length(), view.Length())
   417  
   418  		// _, err = dataBlk.Update(txn, 3, 2, int32(999))
   419  		// assert.Nil(t, err)
   420  
   421  		view, err = dataBlk.GetColumnDataById(txn, 2, nil)
   422  		assert.Nil(t, err)
   423  		defer view.Close()
   424  		assert.True(t, view.DeleteMask.Contains(1))
   425  		assert.True(t, view.DeleteMask.Contains(2))
   426  		assert.Equal(t, bat.Vecs[2].Length(), view.Length())
   427  		// v = view.GetData().Get(3)
   428  		// assert.Equal(t, int32(999), v)
   429  
   430  		assert.Nil(t, txn.Commit())
   431  	}
   432  }
   433  
   434  func TestCreateSegment(t *testing.T) {
   435  	defer testutils.AfterTest(t)()
   436  	testutils.EnsureNoLeak(t)
   437  	tae := initDB(t, nil)
   438  	defer tae.Close()
   439  	schema := catalog.MockSchemaAll(1, 0)
   440  	txn, _ := tae.StartTxn(nil)
   441  	db, err := txn.CreateDatabase("db", "")
   442  	assert.Nil(t, err)
   443  	rel, err := db.CreateRelation(schema)
   444  	assert.Nil(t, err)
   445  	_, err = rel.CreateNonAppendableSegment(false)
   446  	assert.Nil(t, err)
   447  	assert.Nil(t, txn.Commit())
   448  
   449  	bat := catalog.MockBatch(schema, 5)
   450  	defer bat.Close()
   451  
   452  	appendClosure(t, bat, schema.Name, tae, nil)()
   453  
   454  	segCnt := 0
   455  	processor := new(catalog.LoopProcessor)
   456  	processor.SegmentFn = func(segment *catalog.SegmentEntry) error {
   457  		segCnt++
   458  		return nil
   459  	}
   460  	err = tae.Opts.Catalog.RecurLoop(processor)
   461  	assert.Nil(t, err)
   462  	assert.Equal(t, 2+3, segCnt)
   463  	t.Log(tae.Opts.Catalog.SimplePPString(common.PPL1))
   464  }
   465  
   466  func TestCompactBlock1(t *testing.T) {
   467  	defer testutils.AfterTest(t)()
   468  	testutils.EnsureNoLeak(t)
   469  	opts := config.WithLongScanAndCKPOpts(nil)
   470  	db := initDB(t, opts)
   471  	defer db.Close()
   472  	schema := catalog.MockSchemaAll(13, 2)
   473  	schema.BlockMaxRows = 10
   474  	schema.SegmentMaxBlocks = 4
   475  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows))
   476  	defer bat.Close()
   477  	createRelationAndAppend(t, 0, db, "db", schema, bat, true)
   478  	t.Log(db.Opts.Catalog.SimplePPString(common.PPL1))
   479  
   480  	v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(2)
   481  	filter := handle.NewEQFilter(v)
   482  	// 1. No updates and deletes
   483  	{
   484  		txn, rel := getDefaultRelation(t, db, schema.Name)
   485  		blkMeta := getOneBlockMeta(rel)
   486  		task, err := jobs.NewCompactBlockTask(tasks.WaitableCtx, txn, blkMeta, db.Scheduler)
   487  		assert.Nil(t, err)
   488  		preparer, _, err := task.PrepareData()
   489  		assert.Nil(t, err)
   490  		assert.NotNil(t, preparer.Columns)
   491  		defer preparer.Close()
   492  		for col := 0; col < len(bat.Vecs); col++ {
   493  			for row := 0; row < bat.Vecs[0].Length(); row++ {
   494  				exp := bat.Vecs[col].Get(row)
   495  				act := preparer.Columns.Vecs[col].Get(row)
   496  				assert.Equal(t, exp, act)
   497  			}
   498  		}
   499  		err = rel.DeleteByFilter(filter)
   500  		assert.NoError(t, err)
   501  		assert.NoError(t, txn.Commit())
   502  	}
   503  	{
   504  		txn, _ := db.StartTxn(nil)
   505  		database, err := txn.GetDatabase("db")
   506  		assert.Nil(t, err)
   507  		rel, err := database.GetRelationByName(schema.Name)
   508  		assert.Nil(t, err)
   509  		v = bat.Vecs[schema.GetSingleSortKeyIdx()].Get(3)
   510  		filter.Val = v
   511  		id, _, err := rel.GetByFilter(filter)
   512  		assert.Nil(t, err)
   513  		seg, _ := rel.GetSegment(id.SegmentID)
   514  		block, err := seg.GetBlock(id.BlockID)
   515  		assert.Nil(t, err)
   516  		blkMeta := block.GetMeta().(*catalog.BlockEntry)
   517  		task, err := jobs.NewCompactBlockTask(tasks.WaitableCtx, txn, blkMeta, nil)
   518  		assert.Nil(t, err)
   519  		preparer, _, err := task.PrepareData()
   520  		assert.Nil(t, err)
   521  		defer preparer.Close()
   522  		assert.Equal(t, bat.Vecs[0].Length()-1, preparer.Columns.Vecs[0].Length())
   523  		{
   524  			txn, _ := db.StartTxn(nil)
   525  			database, err := txn.GetDatabase("db")
   526  			assert.Nil(t, err)
   527  			rel, err := database.GetRelationByName(schema.Name)
   528  			assert.Nil(t, err)
   529  			v = bat.Vecs[schema.GetSingleSortKeyIdx()].Get(4)
   530  			filter.Val = v
   531  			id, offset, err := rel.GetByFilter(filter)
   532  			assert.Nil(t, err)
   533  			err = rel.RangeDelete(id, offset, offset, handle.DT_Normal)
   534  			assert.Nil(t, err)
   535  			f2 := handle.NewEQFilter(v.(int32) + 1)
   536  			err = rel.UpdateByFilter(f2, 3, int64(99))
   537  			assert.Nil(t, err)
   538  			assert.Nil(t, txn.Commit())
   539  		}
   540  		task, err = jobs.NewCompactBlockTask(tasks.WaitableCtx, txn, blkMeta, nil)
   541  		assert.Nil(t, err)
   542  		preparer, _, err = task.PrepareData()
   543  		assert.Nil(t, err)
   544  		defer preparer.Close()
   545  		assert.Equal(t, bat.Vecs[0].Length()-1, preparer.Columns.Vecs[0].Length())
   546  		var maxTs types.TS
   547  		{
   548  			txn, rel := getDefaultRelation(t, db, schema.Name)
   549  			seg, err := rel.GetSegment(id.SegmentID)
   550  			assert.Nil(t, err)
   551  			blk, err := seg.GetBlock(id.BlockID)
   552  			assert.Nil(t, err)
   553  			blkMeta := blk.GetMeta().(*catalog.BlockEntry)
   554  			task, err = jobs.NewCompactBlockTask(tasks.WaitableCtx, txn, blkMeta, nil)
   555  			assert.Nil(t, err)
   556  			preparer, _, err := task.PrepareData()
   557  			assert.Nil(t, err)
   558  			defer preparer.Close()
   559  			assert.Equal(t, bat.Vecs[0].Length()-3, preparer.Columns.Vecs[0].Length())
   560  			maxTs = txn.GetStartTS()
   561  		}
   562  
   563  		dataBlock := block.GetMeta().(*catalog.BlockEntry).GetBlockData()
   564  		changes, err := dataBlock.CollectChangesInRange(txn.GetStartTS(), maxTs.Next())
   565  		assert.NoError(t, err)
   566  		assert.Equal(t, uint64(2), changes.DeleteMask.GetCardinality())
   567  
   568  		destBlock, err := seg.CreateNonAppendableBlock()
   569  		assert.Nil(t, err)
   570  		m := destBlock.GetMeta().(*catalog.BlockEntry)
   571  		txnEntry := txnentries.NewCompactBlockEntry(txn, block, destBlock, db.Scheduler, nil, nil)
   572  		err = txn.LogTxnEntry(m.GetSegment().GetTable().GetDB().ID, destBlock.Fingerprint().TableID, txnEntry, []*common.ID{block.Fingerprint()})
   573  		assert.Nil(t, err)
   574  		assert.Nil(t, err)
   575  		err = txn.Commit()
   576  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrTxnWWConflict))
   577  	}
   578  }
   579  
   580  func TestAddBlksWithMetaLoc(t *testing.T) {
   581  	defer testutils.AfterTest(t)()
   582  	testutils.EnsureNoLeak(t)
   583  	opts := config.WithLongScanAndCKPOpts(nil)
   584  	db := initDB(t, opts)
   585  	defer db.Close()
   586  
   587  	worker := ops.NewOpWorker("xx")
   588  	worker.Start()
   589  	defer worker.Stop()
   590  	schema := catalog.MockSchemaAll(13, 2)
   591  	schema.Name = "tb-0"
   592  	schema.BlockMaxRows = 20
   593  	schema.SegmentMaxBlocks = 2
   594  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows*4))
   595  	defer bat.Close()
   596  	bats := bat.Split(4)
   597  	{
   598  		txn, _, rel := createRelationNoCommit(t, db, "db", schema, true)
   599  		err := rel.Append(bats[0])
   600  		assert.NoError(t, err)
   601  		err = rel.Append(bats[1])
   602  		assert.NoError(t, err)
   603  		assert.Nil(t, txn.Commit())
   604  	}
   605  	//compact blocks
   606  	var newBlockFp1 *common.ID
   607  	var metaLoc1 string
   608  	var newBlockFp2 *common.ID
   609  	var metaLoc2 string
   610  	{
   611  		txn, rel := getRelation(t, 0, db, "db", schema.Name)
   612  		it := rel.MakeBlockIt()
   613  		blkMeta1 := it.GetBlock().GetMeta().(*catalog.BlockEntry)
   614  		it.Next()
   615  		blkMeta2 := it.GetBlock().GetMeta().(*catalog.BlockEntry)
   616  		task1, err := jobs.NewCompactBlockTask(tasks.WaitableCtx, txn, blkMeta1, db.Scheduler)
   617  		assert.NoError(t, err)
   618  		task2, err := jobs.NewCompactBlockTask(tasks.WaitableCtx, txn, blkMeta2, db.Scheduler)
   619  		assert.NoError(t, err)
   620  		worker.SendOp(task1)
   621  		worker.SendOp(task2)
   622  		err = task1.WaitDone()
   623  		assert.NoError(t, err)
   624  		err = task2.WaitDone()
   625  		assert.NoError(t, err)
   626  		newBlockFp1 = task1.GetNewBlock().Fingerprint()
   627  		metaLoc1 = task1.GetNewBlock().GetMetaLoc()
   628  		newBlockFp2 = task2.GetNewBlock().Fingerprint()
   629  		metaLoc2 = task2.GetNewBlock().GetMetaLoc()
   630  		assert.Nil(t, txn.Commit())
   631  	}
   632  	//read new non-appendable block data and check
   633  	{
   634  		txn, rel := getRelation(t, 0, db, "db", schema.Name)
   635  		assert.Equal(t, newBlockFp2.SegmentID, newBlockFp1.SegmentID)
   636  		seg, err := rel.GetSegment(newBlockFp1.SegmentID)
   637  		assert.Nil(t, err)
   638  		blk1, err := seg.GetBlock(newBlockFp1.BlockID)
   639  		assert.Nil(t, err)
   640  		blk2, err := seg.GetBlock(newBlockFp2.BlockID)
   641  		assert.Nil(t, err)
   642  
   643  		view1, err := blk1.GetColumnDataById(2, nil)
   644  		assert.NoError(t, err)
   645  		defer view1.Close()
   646  		assert.True(t, view1.GetData().Equals(bats[0].Vecs[2]))
   647  		assert.Equal(t, blk1.Rows(), bats[0].Vecs[2].Length())
   648  
   649  		view2, err := blk2.GetColumnDataById(2, nil)
   650  		assert.NoError(t, err)
   651  		defer view2.Close()
   652  		assert.True(t, view2.GetData().Equals(bats[1].Vecs[2]))
   653  		assert.Equal(t, blk2.Rows(), bats[1].Vecs[2].Length())
   654  		assert.Nil(t, txn.Commit())
   655  	}
   656  
   657  	{
   658  		schema.Name = "tb-1"
   659  		txn, _, rel := createRelationNoCommit(t, db, "db", schema, false)
   660  		var pks []containers.Vector
   661  		var metaLocs []string
   662  		pks = append(pks, bats[0].Vecs[2])
   663  		pks = append(pks, bats[1].Vecs[2])
   664  		metaLocs = append(metaLocs, metaLoc1)
   665  		metaLocs = append(metaLocs, metaLoc2)
   666  		err := rel.AddBlksWithMetaLoc(pks, "", metaLocs, 0)
   667  		assert.Nil(t, err)
   668  		//local deduplication check
   669  		err = rel.Append(bats[2])
   670  		assert.Nil(t, err)
   671  		err = rel.Append(bats[0])
   672  		assert.NotNil(t, err)
   673  		err = rel.Append(bats[1])
   674  		assert.NotNil(t, err)
   675  		//err = rel.RangeDeleteLocal(start, end)
   676  		//assert.Nil(t, err)
   677  		//assert.True(t, rel.IsLocalDeleted(start, end))
   678  		err = txn.Commit()
   679  		assert.Nil(t, err)
   680  
   681  		//"tb-1" table now has one committed non-appendable segment which contains
   682  		//two non-appendable block, and one committed appendable segment which contains one appendable block.
   683  
   684  		//do deduplication check
   685  		txn, rel = getRelation(t, 0, db, "db", schema.Name)
   686  		err = rel.Append(bats[0])
   687  		assert.NotNil(t, err)
   688  		err = rel.Append(bats[1])
   689  		assert.NotNil(t, err)
   690  		err = rel.Append(bats[3])
   691  		assert.Nil(t, err)
   692  		cntOfAblk := 0
   693  		cntOfblk := 0
   694  		forEachBlock(rel, func(blk handle.Block) (err error) {
   695  			if blk.IsAppendableBlock() {
   696  				view, err := blk.GetColumnDataById(3, nil)
   697  				assert.NoError(t, err)
   698  				defer view.Close()
   699  				cntOfAblk++
   700  				return nil
   701  			}
   702  			metaLoc := blk.GetMetaLoc()
   703  			assert.True(t, metaLoc != "")
   704  			if metaLoc == metaLoc1 {
   705  				view, err := blk.GetColumnDataById(2, nil)
   706  				assert.NoError(t, err)
   707  				defer view.Close()
   708  				assert.True(t, view.GetData().Equals(bats[0].Vecs[2]))
   709  			} else {
   710  				view, err := blk.GetColumnDataById(3, nil)
   711  				assert.NoError(t, err)
   712  				defer view.Close()
   713  				assert.True(t, view.GetData().Equals(bats[1].Vecs[3]))
   714  
   715  			}
   716  			cntOfblk++
   717  			return
   718  		})
   719  		assert.True(t, cntOfblk == 2)
   720  		assert.True(t, cntOfAblk == 2)
   721  		assert.Nil(t, txn.Commit())
   722  		//check count of committed segments.
   723  		cntOfAseg := 0
   724  		cntOfseg := 0
   725  		txn, rel = getRelation(t, 0, db, "db", schema.Name)
   726  		forEachSegment(rel, func(seg handle.Segment) (err error) {
   727  			if seg.IsAppendable() {
   728  				cntOfAseg++
   729  				return
   730  			}
   731  			cntOfseg++
   732  			return
   733  		})
   734  		assert.True(t, cntOfseg == 1)
   735  		assert.True(t, cntOfAseg == 1)
   736  		assert.Nil(t, txn.Commit())
   737  	}
   738  }
   739  
   740  func TestCompactBlock2(t *testing.T) {
   741  	defer testutils.AfterTest(t)()
   742  	testutils.EnsureNoLeak(t)
   743  	opts := config.WithLongScanAndCKPOpts(nil)
   744  	db := initDB(t, opts)
   745  	defer db.Close()
   746  
   747  	worker := ops.NewOpWorker("xx")
   748  	worker.Start()
   749  	defer worker.Stop()
   750  	schema := catalog.MockSchemaAll(13, 2)
   751  	schema.BlockMaxRows = 20
   752  	schema.SegmentMaxBlocks = 2
   753  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows))
   754  	defer bat.Close()
   755  	createRelationAndAppend(t, 0, db, "db", schema, bat, true)
   756  	var newBlockFp *common.ID
   757  	{
   758  		txn, rel := getDefaultRelation(t, db, schema.Name)
   759  		blkMeta := getOneBlockMeta(rel)
   760  		task, err := jobs.NewCompactBlockTask(tasks.WaitableCtx, txn, blkMeta, db.Scheduler)
   761  		assert.NoError(t, err)
   762  		worker.SendOp(task)
   763  		err = task.WaitDone()
   764  		assert.NoError(t, err)
   765  		newBlockFp = task.GetNewBlock().Fingerprint()
   766  		assert.NoError(t, txn.Commit())
   767  	}
   768  	{
   769  		t.Log(db.Opts.Catalog.SimplePPString(common.PPL1))
   770  		txn, rel := getDefaultRelation(t, db, schema.Name)
   771  		t.Log(rel.SimplePPString(common.PPL1))
   772  		seg, err := rel.GetSegment(newBlockFp.SegmentID)
   773  		assert.Nil(t, err)
   774  		blk, err := seg.GetBlock(newBlockFp.BlockID)
   775  		assert.Nil(t, err)
   776  		view, err := blk.GetColumnDataById(3, nil)
   777  		assert.NoError(t, err)
   778  		defer view.Close()
   779  		assert.True(t, view.GetData().Equals(bat.Vecs[3]))
   780  		err = blk.RangeDelete(1, 2, handle.DT_Normal)
   781  		assert.Nil(t, err)
   782  		assert.Nil(t, txn.Commit())
   783  	}
   784  	{
   785  		t.Log(db.Opts.Catalog.SimplePPString(common.PPL1))
   786  		txn, rel := getDefaultRelation(t, db, schema.Name)
   787  		t.Log(rel.SimplePPString(common.PPL1))
   788  		seg, err := rel.GetSegment(newBlockFp.SegmentID)
   789  		assert.Nil(t, err)
   790  		blk, err := seg.GetBlock(newBlockFp.BlockID)
   791  		assert.Nil(t, err)
   792  		task, err := jobs.NewCompactBlockTask(tasks.WaitableCtx, txn, blk.GetMeta().(*catalog.BlockEntry), db.Scheduler)
   793  		assert.Nil(t, err)
   794  		worker.SendOp(task)
   795  		err = task.WaitDone()
   796  		assert.Nil(t, err)
   797  		newBlockFp = task.GetNewBlock().Fingerprint()
   798  		assert.Nil(t, txn.Commit())
   799  	}
   800  	{
   801  		t.Log(db.Opts.Catalog.SimplePPString(common.PPL1))
   802  		txn, rel := getDefaultRelation(t, db, schema.Name)
   803  		seg, err := rel.GetSegment(newBlockFp.SegmentID)
   804  		assert.Nil(t, err)
   805  		blk, err := seg.GetBlock(newBlockFp.BlockID)
   806  		assert.Nil(t, err)
   807  		view, err := blk.GetColumnDataById(3, nil)
   808  		assert.Nil(t, err)
   809  		defer view.Close()
   810  		assert.Nil(t, view.DeleteMask)
   811  		// t.Logf("view: %v", view.GetData().String())
   812  		// t.Logf("raw : %v", bat.Vecs[3].String())
   813  		assert.Equal(t, bat.Vecs[0].Length()-2, view.Length())
   814  
   815  		cnt := 0
   816  		forEachBlock(rel, func(blk handle.Block) (err error) {
   817  			cnt++
   818  			return
   819  		})
   820  		assert.Equal(t, 1, cnt)
   821  
   822  		task, err := jobs.NewCompactBlockTask(tasks.WaitableCtx, txn, blk.GetMeta().(*catalog.BlockEntry), db.Scheduler)
   823  		assert.Nil(t, err)
   824  		worker.SendOp(task)
   825  		err = task.WaitDone()
   826  		assert.Nil(t, err)
   827  		newBlockFp = task.GetNewBlock().Fingerprint()
   828  		{
   829  			txn, rel := getDefaultRelation(t, db, schema.Name)
   830  			seg, err := rel.GetSegment(newBlockFp.SegmentID)
   831  			assert.NoError(t, err)
   832  			blk, err := seg.GetBlock(newBlockFp.BlockID)
   833  			assert.NoError(t, err)
   834  			err = blk.RangeDelete(4, 5, handle.DT_Normal)
   835  			assert.NoError(t, err)
   836  			assert.NoError(t, txn.Commit())
   837  		}
   838  		assert.NoError(t, txn.Commit())
   839  	}
   840  	{
   841  		txn, rel := getDefaultRelation(t, db, schema.Name)
   842  		t.Log(rel.SimplePPString(common.PPL1))
   843  		t.Log(db.Opts.Catalog.SimplePPString(common.PPL1))
   844  		seg, err := rel.GetSegment(newBlockFp.SegmentID)
   845  		assert.Nil(t, err)
   846  		blk, err := seg.GetBlock(newBlockFp.BlockID)
   847  		assert.Nil(t, err)
   848  		view, err := blk.GetColumnDataById(3, nil)
   849  		assert.Nil(t, err)
   850  		defer view.Close()
   851  		assert.True(t, view.DeleteMask.Contains(4))
   852  		assert.True(t, view.DeleteMask.Contains(5))
   853  		assert.Equal(t, bat.Vecs[0].Length()-2, view.Length())
   854  
   855  		txn2, rel2 := getDefaultRelation(t, db, schema.Name)
   856  		seg2, err := rel2.GetSegment(newBlockFp.SegmentID)
   857  		assert.NoError(t, err)
   858  		blk2, err := seg2.GetBlock(newBlockFp.BlockID)
   859  		assert.NoError(t, err)
   860  		err = blk2.RangeDelete(7, 7, handle.DT_Normal)
   861  		assert.NoError(t, err)
   862  
   863  		task, err := jobs.NewCompactBlockTask(tasks.WaitableCtx, txn, blk.GetMeta().(*catalog.BlockEntry), db.Scheduler)
   864  		assert.NoError(t, err)
   865  		worker.SendOp(task)
   866  		err = task.WaitDone()
   867  		assert.NoError(t, err)
   868  		assert.NoError(t, txn.Commit())
   869  
   870  		err = txn2.Commit()
   871  		assert.NoError(t, err)
   872  	}
   873  }
   874  
   875  func TestAutoCompactABlk1(t *testing.T) {
   876  	defer testutils.AfterTest(t)()
   877  	testutils.EnsureNoLeak(t)
   878  	opts := config.WithQuickScanAndCKPOpts(nil)
   879  	tae := initDB(t, opts)
   880  	defer tae.Close()
   881  	schema := catalog.MockSchemaAll(13, 3)
   882  	schema.BlockMaxRows = 1000
   883  	schema.SegmentMaxBlocks = 10
   884  
   885  	totalRows := schema.BlockMaxRows / 5
   886  	bat := catalog.MockBatch(schema, int(totalRows))
   887  	defer bat.Close()
   888  	createRelationAndAppend(t, 0, tae, "db", schema, bat, true)
   889  	time.Sleep(time.Millisecond * 2)
   890  	testutils.WaitExpect(5000, func() bool {
   891  		return tae.Scheduler.GetPenddingLSNCnt() == 0
   892  	})
   893  	assert.Equal(t, uint64(0), tae.Scheduler.GetPenddingLSNCnt())
   894  	t.Log(tae.Catalog.SimplePPString(common.PPL1))
   895  	{
   896  		txn, rel := getDefaultRelation(t, tae, schema.Name)
   897  		blk := getOneBlock(rel)
   898  		blkData := blk.GetMeta().(*catalog.BlockEntry).GetBlockData()
   899  		factory, taskType, scopes, err := blkData.BuildCompactionTaskFactory()
   900  		assert.Nil(t, err)
   901  		task, err := tae.Scheduler.ScheduleMultiScopedTxnTask(tasks.WaitableCtx, taskType, scopes, factory)
   902  		assert.Nil(t, err)
   903  		err = task.WaitDone()
   904  		assert.Nil(t, err)
   905  		assert.Nil(t, txn.Commit())
   906  	}
   907  }
   908  
   909  func TestAutoCompactABlk2(t *testing.T) {
   910  	defer testutils.AfterTest(t)()
   911  	testutils.EnsureNoLeak(t)
   912  	opts := new(options.Options)
   913  	opts.CacheCfg = new(options.CacheCfg)
   914  	opts.CacheCfg.InsertCapacity = common.M * 5
   915  	opts.CacheCfg.TxnCapacity = common.M
   916  	opts = config.WithQuickScanAndCKPOpts(opts)
   917  	db := initDB(t, opts)
   918  	defer db.Close()
   919  
   920  	schema1 := catalog.MockSchemaAll(13, 2)
   921  	schema1.BlockMaxRows = 20
   922  	schema1.SegmentMaxBlocks = 2
   923  
   924  	schema2 := catalog.MockSchemaAll(13, 2)
   925  	schema2.BlockMaxRows = 20
   926  	schema2.SegmentMaxBlocks = 2
   927  	{
   928  		txn, _ := db.StartTxn(nil)
   929  		database, err := txn.CreateDatabase("db", "")
   930  		assert.Nil(t, err)
   931  		_, err = database.CreateRelation(schema1)
   932  		assert.Nil(t, err)
   933  		_, err = database.CreateRelation(schema2)
   934  		assert.Nil(t, err)
   935  		assert.Nil(t, txn.Commit())
   936  	}
   937  	bat := catalog.MockBatch(schema1, int(schema1.BlockMaxRows*3-1))
   938  	defer bat.Close()
   939  	bats := bat.Split(bat.Length())
   940  
   941  	pool, err := ants.NewPool(20)
   942  	assert.Nil(t, err)
   943  	defer pool.Release()
   944  	var wg sync.WaitGroup
   945  	doSearch := func(name string) func() {
   946  		return func() {
   947  			defer wg.Done()
   948  			txn, rel := getDefaultRelation(t, db, name)
   949  			it := rel.MakeBlockIt()
   950  			for it.Valid() {
   951  				blk := it.GetBlock()
   952  				view, err := blk.GetColumnDataById(schema1.GetSingleSortKeyIdx(), nil)
   953  				assert.Nil(t, err)
   954  				view.Close()
   955  				it.Next()
   956  			}
   957  			err := txn.Commit()
   958  			assert.NoError(t, err)
   959  		}
   960  	}
   961  
   962  	for _, data := range bats {
   963  		wg.Add(4)
   964  		err := pool.Submit(doSearch(schema1.Name))
   965  		assert.Nil(t, err)
   966  		err = pool.Submit(doSearch(schema2.Name))
   967  		assert.Nil(t, err)
   968  		err = pool.Submit(appendClosure(t, data, schema1.Name, db, &wg))
   969  		assert.Nil(t, err)
   970  		err = pool.Submit(appendClosure(t, data, schema2.Name, db, &wg))
   971  		assert.Nil(t, err)
   972  	}
   973  	wg.Wait()
   974  	testutils.WaitExpect(8000, func() bool {
   975  		return db.Scheduler.GetPenddingLSNCnt() == 0
   976  	})
   977  	assert.Equal(t, uint64(0), db.Scheduler.GetPenddingLSNCnt())
   978  	t.Log(db.MTBufMgr.String())
   979  	t.Log(db.Catalog.SimplePPString(common.PPL1))
   980  	t.Logf("GetPenddingLSNCnt: %d", db.Scheduler.GetPenddingLSNCnt())
   981  	t.Logf("GetCheckpointed: %d", db.Scheduler.GetCheckpointedLSN())
   982  }
   983  
   984  func TestCompactABlk(t *testing.T) {
   985  	defer testutils.AfterTest(t)()
   986  	testutils.EnsureNoLeak(t)
   987  	tae := initDB(t, nil)
   988  	defer tae.Close()
   989  	schema := catalog.MockSchemaAll(13, 3)
   990  	schema.BlockMaxRows = 1000
   991  	schema.SegmentMaxBlocks = 10
   992  
   993  	totalRows := schema.BlockMaxRows / 5
   994  	bat := catalog.MockBatch(schema, int(totalRows))
   995  	defer bat.Close()
   996  	createRelationAndAppend(t, 0, tae, "db", schema, bat, true)
   997  	{
   998  		txn, rel := getDefaultRelation(t, tae, schema.Name)
   999  		blk := getOneBlock(rel)
  1000  		blkData := blk.GetMeta().(*catalog.BlockEntry).GetBlockData()
  1001  		factory, taskType, scopes, err := blkData.BuildCompactionTaskFactory()
  1002  		assert.NoError(t, err)
  1003  		task, err := tae.Scheduler.ScheduleMultiScopedTxnTask(tasks.WaitableCtx, taskType, scopes, factory)
  1004  		assert.NoError(t, err)
  1005  		err = task.WaitDone()
  1006  		assert.NoError(t, err)
  1007  		assert.NoError(t, txn.Commit())
  1008  	}
  1009  	err := tae.BGCheckpointRunner.ForceIncrementalCheckpoint(tae.TxnMgr.StatMaxCommitTS())
  1010  	assert.NoError(t, err)
  1011  	lsn := tae.BGCheckpointRunner.MaxLSNInRange(tae.TxnMgr.StatMaxCommitTS())
  1012  	entry, err := tae.Wal.RangeCheckpoint(1, lsn)
  1013  	assert.NoError(t, err)
  1014  	assert.NoError(t, entry.WaitDone())
  1015  	testutils.WaitExpect(1000, func() bool {
  1016  		return tae.Scheduler.GetPenddingLSNCnt() == 0
  1017  	})
  1018  	assert.Equal(t, uint64(0), tae.Scheduler.GetPenddingLSNCnt())
  1019  	t.Log(tae.Catalog.SimplePPString(common.PPL1))
  1020  }
  1021  
  1022  func TestRollback1(t *testing.T) {
  1023  	defer testutils.AfterTest(t)()
  1024  	testutils.EnsureNoLeak(t)
  1025  	db := initDB(t, nil)
  1026  	defer db.Close()
  1027  	schema := catalog.MockSchema(2, 0)
  1028  
  1029  	createRelation(t, db, "db", schema, true)
  1030  
  1031  	segCnt := 0
  1032  	onSegFn := func(segment *catalog.SegmentEntry) error {
  1033  		segCnt++
  1034  		return nil
  1035  	}
  1036  	blkCnt := 0
  1037  	onBlkFn := func(block *catalog.BlockEntry) error {
  1038  		blkCnt++
  1039  		return nil
  1040  	}
  1041  	processor := new(catalog.LoopProcessor)
  1042  	processor.SegmentFn = onSegFn
  1043  	processor.BlockFn = onBlkFn
  1044  	txn, rel := getDefaultRelation(t, db, schema.Name)
  1045  	_, err := rel.CreateSegment(false)
  1046  	assert.Nil(t, err)
  1047  
  1048  	tableMeta := rel.GetMeta().(*catalog.TableEntry)
  1049  	err = tableMeta.RecurLoop(processor)
  1050  	assert.Nil(t, err)
  1051  	assert.Equal(t, segCnt, 1)
  1052  
  1053  	assert.Nil(t, txn.Rollback())
  1054  	segCnt = 0
  1055  	err = tableMeta.RecurLoop(processor)
  1056  	assert.Nil(t, err)
  1057  	assert.Equal(t, segCnt, 0)
  1058  
  1059  	txn, rel = getDefaultRelation(t, db, schema.Name)
  1060  	seg, err := rel.CreateSegment(false)
  1061  	assert.Nil(t, err)
  1062  	segMeta := seg.GetMeta().(*catalog.SegmentEntry)
  1063  	assert.Nil(t, txn.Commit())
  1064  	segCnt = 0
  1065  	err = tableMeta.RecurLoop(processor)
  1066  	assert.Nil(t, err)
  1067  	assert.Equal(t, segCnt, 1)
  1068  
  1069  	txn, rel = getDefaultRelation(t, db, schema.Name)
  1070  	seg, err = rel.GetSegment(segMeta.GetID())
  1071  	assert.Nil(t, err)
  1072  	_, err = seg.CreateBlock(false)
  1073  	assert.Nil(t, err)
  1074  	blkCnt = 0
  1075  	err = tableMeta.RecurLoop(processor)
  1076  	assert.Nil(t, err)
  1077  	assert.Equal(t, blkCnt, 1)
  1078  
  1079  	err = txn.Rollback()
  1080  	assert.Nil(t, err)
  1081  	blkCnt = 0
  1082  	err = tableMeta.RecurLoop(processor)
  1083  	assert.Nil(t, err)
  1084  	assert.Equal(t, blkCnt, 0)
  1085  
  1086  	t.Log(db.Opts.Catalog.SimplePPString(common.PPL1))
  1087  }
  1088  
  1089  func TestMVCC1(t *testing.T) {
  1090  	defer testutils.AfterTest(t)()
  1091  	testutils.EnsureNoLeak(t)
  1092  	db := initDB(t, nil)
  1093  	defer db.Close()
  1094  	schema := catalog.MockSchemaAll(13, 2)
  1095  	schema.BlockMaxRows = 40
  1096  	schema.SegmentMaxBlocks = 2
  1097  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows*10))
  1098  	defer bat.Close()
  1099  	bats := bat.Split(40)
  1100  
  1101  	txn, _, rel := createRelationNoCommit(t, db, "db", schema, true)
  1102  	err := rel.Append(bats[0])
  1103  	assert.NoError(t, err)
  1104  
  1105  	row := 5
  1106  	expectVal := bats[0].Vecs[schema.GetSingleSortKeyIdx()].Get(row)
  1107  	filter := handle.NewEQFilter(expectVal)
  1108  	actualVal, err := rel.GetValueByFilter(filter, schema.GetSingleSortKeyIdx())
  1109  	assert.NoError(t, err)
  1110  	assert.Equal(t, expectVal, actualVal)
  1111  	assert.NoError(t, txn.Commit())
  1112  
  1113  	_, rel = getDefaultRelation(t, db, schema.Name)
  1114  	actualVal, err = rel.GetValueByFilter(filter, schema.GetSingleSortKeyIdx())
  1115  	assert.NoError(t, err)
  1116  	assert.Equal(t, expectVal, actualVal)
  1117  
  1118  	txn2, rel2 := getDefaultRelation(t, db, schema.Name)
  1119  	err = rel2.Append(bats[1])
  1120  	assert.NoError(t, err)
  1121  
  1122  	val2 := bats[1].Vecs[schema.GetSingleSortKeyIdx()].Get(row)
  1123  	filter.Val = val2
  1124  	actualVal, err = rel2.GetValueByFilter(filter, schema.GetSingleSortKeyIdx())
  1125  	assert.NoError(t, err)
  1126  	assert.Equal(t, val2, actualVal)
  1127  
  1128  	assert.NoError(t, txn2.Commit())
  1129  
  1130  	_, _, err = rel.GetByFilter(filter)
  1131  	assert.Error(t, err)
  1132  	var id *common.ID
  1133  
  1134  	{
  1135  		txn, rel := getDefaultRelation(t, db, schema.Name)
  1136  		id, _, err = rel.GetByFilter(filter)
  1137  		assert.NoError(t, err)
  1138  		assert.NoError(t, txn.Commit())
  1139  	}
  1140  
  1141  	it := rel.MakeBlockIt()
  1142  	for it.Valid() {
  1143  		block := it.GetBlock()
  1144  		bid := block.Fingerprint()
  1145  		if bid.BlockID == id.BlockID {
  1146  			var buffer bytes.Buffer
  1147  			view, err := block.GetColumnDataById(schema.GetSingleSortKeyIdx(), &buffer)
  1148  			assert.Nil(t, err)
  1149  			defer view.Close()
  1150  			assert.Nil(t, view.DeleteMask)
  1151  			assert.NotNil(t, view.GetData())
  1152  			t.Log(view.GetData().String())
  1153  			assert.Equal(t, bats[0].Vecs[0].Length(), view.Length())
  1154  		}
  1155  		it.Next()
  1156  	}
  1157  }
  1158  
  1159  // 1. Txn1 create db, relation and append 10 rows. committed -- PASS
  1160  // 2. Txn2 append 10 rows. Get the 5th append row value -- PASS
  1161  // 3. Txn2 delete the 5th row value in uncommitted state -- PASS
  1162  // 4. Txn2 get the 5th row value -- NotFound
  1163  func TestMVCC2(t *testing.T) {
  1164  	defer testutils.AfterTest(t)()
  1165  	testutils.EnsureNoLeak(t)
  1166  	db := initDB(t, nil)
  1167  	defer db.Close()
  1168  	schema := catalog.MockSchemaAll(13, 2)
  1169  	schema.BlockMaxRows = 100
  1170  	schema.SegmentMaxBlocks = 2
  1171  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows))
  1172  	defer bat.Close()
  1173  	bats := bat.Split(10)
  1174  	{
  1175  		txn, _, rel := createRelationNoCommit(t, db, "db", schema, true)
  1176  		err := rel.Append(bats[0])
  1177  		assert.NoError(t, err)
  1178  		val := bats[0].Vecs[schema.GetSingleSortKeyIdx()].Get(5)
  1179  		filter := handle.NewEQFilter(val)
  1180  		_, _, err = rel.GetByFilter(filter)
  1181  		assert.NoError(t, err)
  1182  		assert.NoError(t, txn.Commit())
  1183  	}
  1184  	{
  1185  		txn, rel := getDefaultRelation(t, db, schema.Name)
  1186  		err := rel.Append(bats[1])
  1187  		assert.NoError(t, err)
  1188  		val := bats[1].Vecs[schema.GetSingleSortKeyIdx()].Get(5)
  1189  		filter := handle.NewEQFilter(val)
  1190  		err = rel.DeleteByFilter(filter)
  1191  		assert.NoError(t, err)
  1192  
  1193  		_, _, err = rel.GetByFilter(filter)
  1194  		assert.Error(t, err)
  1195  		t.Log(err)
  1196  		assert.NoError(t, txn.Commit())
  1197  	}
  1198  	{
  1199  		txn, rel := getDefaultRelation(t, db, schema.Name)
  1200  		it := rel.MakeBlockIt()
  1201  		var buffer bytes.Buffer
  1202  		for it.Valid() {
  1203  			block := it.GetBlock()
  1204  			view, err := block.GetColumnDataByName(schema.GetSingleSortKey().Name, &buffer)
  1205  			assert.Nil(t, err)
  1206  			assert.Nil(t, view.DeleteMask)
  1207  			assert.Equal(t, bats[1].Vecs[0].Length()*2-1, view.Length())
  1208  			// TODO: exclude deleted rows when apply appends
  1209  			it.Next()
  1210  			view.Close()
  1211  		}
  1212  		assert.NoError(t, txn.Commit())
  1213  	}
  1214  }
  1215  
  1216  func TestUnload1(t *testing.T) {
  1217  	defer testutils.AfterTest(t)()
  1218  	testutils.EnsureNoLeak(t)
  1219  	opts := new(options.Options)
  1220  	opts.CacheCfg = new(options.CacheCfg)
  1221  	opts.CacheCfg.InsertCapacity = common.K
  1222  	opts.CacheCfg.TxnCapacity = common.M
  1223  	db := initDB(t, opts)
  1224  	defer db.Close()
  1225  
  1226  	schema := catalog.MockSchemaAll(13, 2)
  1227  	schema.BlockMaxRows = 10
  1228  	schema.SegmentMaxBlocks = 2
  1229  
  1230  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows*2))
  1231  	defer bat.Close()
  1232  	bats := bat.Split(int(schema.BlockMaxRows))
  1233  	createRelation(t, db, "db", schema, true)
  1234  	var wg sync.WaitGroup
  1235  	pool, err := ants.NewPool(1)
  1236  	assert.Nil(t, err)
  1237  	defer pool.Release()
  1238  	for _, data := range bats {
  1239  		wg.Add(1)
  1240  		err := pool.Submit(appendClosure(t, data, schema.Name, db, &wg))
  1241  		assert.Nil(t, err)
  1242  	}
  1243  	wg.Wait()
  1244  	{
  1245  		txn, rel := getDefaultRelation(t, db, schema.Name)
  1246  		for i := 0; i < 10; i++ {
  1247  			it := rel.MakeBlockIt()
  1248  			for it.Valid() {
  1249  				blk := it.GetBlock()
  1250  				view, err := blk.GetColumnDataByName(schema.GetSingleSortKey().Name, nil)
  1251  				assert.Nil(t, err)
  1252  				defer view.Close()
  1253  				assert.Equal(t, int(schema.BlockMaxRows), view.Length())
  1254  				it.Next()
  1255  			}
  1256  		}
  1257  		_ = txn.Commit()
  1258  	}
  1259  }
  1260  
  1261  func TestUnload2(t *testing.T) {
  1262  	defer testutils.AfterTest(t)()
  1263  	testutils.EnsureNoLeak(t)
  1264  	opts := new(options.Options)
  1265  	opts.CacheCfg = new(options.CacheCfg)
  1266  	opts.CacheCfg.InsertCapacity = common.K*4 - common.K/2
  1267  	opts.CacheCfg.TxnCapacity = common.M
  1268  	db := initDB(t, opts)
  1269  	defer db.Close()
  1270  
  1271  	schema1 := catalog.MockSchemaAll(13, 2)
  1272  	schema1.BlockMaxRows = 10
  1273  	schema1.SegmentMaxBlocks = 2
  1274  
  1275  	schema2 := catalog.MockSchemaAll(13, 2)
  1276  	schema2.BlockMaxRows = 10
  1277  	schema2.SegmentMaxBlocks = 2
  1278  	{
  1279  		txn, _ := db.StartTxn(nil)
  1280  		database, err := txn.CreateDatabase("db", "")
  1281  		assert.Nil(t, err)
  1282  		_, err = database.CreateRelation(schema1)
  1283  		assert.Nil(t, err)
  1284  		_, err = database.CreateRelation(schema2)
  1285  		assert.Nil(t, err)
  1286  		assert.Nil(t, txn.Commit())
  1287  	}
  1288  
  1289  	bat := catalog.MockBatch(schema1, int(schema1.BlockMaxRows*5+5))
  1290  	defer bat.Close()
  1291  	bats := bat.Split(bat.Length())
  1292  
  1293  	p, err := ants.NewPool(10)
  1294  	assert.Nil(t, err)
  1295  	defer p.Release()
  1296  	var wg sync.WaitGroup
  1297  	for i, data := range bats {
  1298  		wg.Add(1)
  1299  		name := schema1.Name
  1300  		if i%2 == 1 {
  1301  			name = schema2.Name
  1302  		}
  1303  		err := p.Submit(appendClosure(t, data, name, db, &wg))
  1304  		assert.Nil(t, err)
  1305  	}
  1306  	wg.Wait()
  1307  
  1308  	{
  1309  		txn, rel := getDefaultRelation(t, db, schema1.Name)
  1310  		for i := 0; i < len(bats); i += 2 {
  1311  			data := bats[i]
  1312  			v := data.Vecs[schema1.GetSingleSortKeyIdx()].Get(0)
  1313  			filter := handle.NewEQFilter(v)
  1314  			_, _, err := rel.GetByFilter(filter)
  1315  			assert.NoError(t, err)
  1316  		}
  1317  		database, _ := txn.GetDatabase("db")
  1318  		rel, err = database.GetRelationByName(schema2.Name)
  1319  		assert.Nil(t, err)
  1320  		for i := 1; i < len(bats); i += 2 {
  1321  			data := bats[i]
  1322  			v := data.Vecs[schema1.GetSingleSortKeyIdx()].Get(0)
  1323  			filter := handle.NewEQFilter(v)
  1324  			_, _, err := rel.GetByFilter(filter)
  1325  			assert.NoError(t, err)
  1326  		}
  1327  		_ = txn.Commit()
  1328  	}
  1329  
  1330  	t.Log(db.MTBufMgr.String())
  1331  	// t.Log(db.Opts.Catalog.SimplePPString(common.PPL1))
  1332  }
  1333  
  1334  func TestDelete1(t *testing.T) {
  1335  	defer testutils.AfterTest(t)()
  1336  	testutils.EnsureNoLeak(t)
  1337  	tae := initDB(t, nil)
  1338  	defer tae.Close()
  1339  
  1340  	schema := catalog.MockSchemaAll(3, 2)
  1341  	schema.BlockMaxRows = 10
  1342  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows))
  1343  	defer bat.Close()
  1344  	createRelationAndAppend(t, 0, tae, "db", schema, bat, true)
  1345  	var id *common.ID
  1346  	var row uint32
  1347  	{
  1348  		txn, rel := getDefaultRelation(t, tae, schema.Name)
  1349  		assert.Equal(t, bat.Length(), int(rel.Rows()))
  1350  		pkCol := bat.Vecs[schema.GetSingleSortKeyIdx()]
  1351  		pkVal := pkCol.Get(5)
  1352  		filter := handle.NewEQFilter(pkVal)
  1353  		var err error
  1354  		id, row, err = rel.GetByFilter(filter)
  1355  		assert.NoError(t, err)
  1356  		err = rel.RangeDelete(id, row, row, handle.DT_Normal)
  1357  		assert.NoError(t, err)
  1358  		assert.NoError(t, txn.Commit())
  1359  	}
  1360  	{
  1361  		txn, rel := getDefaultRelation(t, tae, schema.Name)
  1362  		assert.Equal(t, bat.Length()-1, int(rel.Rows()))
  1363  		pkCol := bat.Vecs[schema.GetSingleSortKeyIdx()]
  1364  		pkVal := pkCol.Get(5)
  1365  		filter := handle.NewEQFilter(pkVal)
  1366  		_, _, err := rel.GetByFilter(filter)
  1367  		assert.Error(t, err)
  1368  		assert.NoError(t, txn.Commit())
  1369  	}
  1370  	{
  1371  		txn, rel := getDefaultRelation(t, tae, schema.Name)
  1372  		blkMeta := getOneBlockMeta(rel)
  1373  		blkData := blkMeta.GetBlockData()
  1374  		factory, taskType, scopes, err := blkData.BuildCompactionTaskFactory()
  1375  		assert.NoError(t, err)
  1376  		task, err := tae.Scheduler.ScheduleMultiScopedTxnTask(tasks.WaitableCtx, taskType, scopes, factory)
  1377  		assert.NoError(t, err)
  1378  		err = task.WaitDone()
  1379  		assert.NoError(t, err)
  1380  		assert.NoError(t, txn.Commit())
  1381  	}
  1382  	{
  1383  		txn, rel := getDefaultRelation(t, tae, schema.Name)
  1384  		blk := getOneBlock(rel)
  1385  		view, err := blk.GetColumnDataById(schema.GetSingleSortKeyIdx(), nil)
  1386  		assert.NoError(t, err)
  1387  		defer view.Close()
  1388  		assert.Nil(t, view.DeleteMask)
  1389  		assert.Equal(t, bat.Vecs[0].Length()-1, view.Length())
  1390  
  1391  		err = blk.RangeDelete(0, 0, handle.DT_Normal)
  1392  		assert.NoError(t, err)
  1393  		view, err = blk.GetColumnDataById(schema.GetSingleSortKeyIdx(), nil)
  1394  		assert.NoError(t, err)
  1395  		defer view.Close()
  1396  		assert.True(t, view.DeleteMask.Contains(0))
  1397  		v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(0)
  1398  		filter := handle.NewEQFilter(v)
  1399  		_, _, err = rel.GetByFilter(filter)
  1400  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  1401  		assert.NoError(t, txn.Commit())
  1402  	}
  1403  	{
  1404  		txn, rel := getDefaultRelation(t, tae, schema.Name)
  1405  		assert.Equal(t, bat.Length()-2, int(rel.Rows()))
  1406  		blk := getOneBlock(rel)
  1407  		view, err := blk.GetColumnDataById(schema.GetSingleSortKeyIdx(), nil)
  1408  		assert.NoError(t, err)
  1409  		defer view.Close()
  1410  		assert.True(t, view.DeleteMask.Contains(0))
  1411  		assert.Equal(t, bat.Vecs[0].Length()-1, view.Length())
  1412  		v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(0)
  1413  		filter := handle.NewEQFilter(v)
  1414  		_, _, err = rel.GetByFilter(filter)
  1415  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  1416  		_ = txn.Rollback()
  1417  	}
  1418  	t.Log(tae.Opts.Catalog.SimplePPString(common.PPL1))
  1419  }
  1420  
  1421  func TestLogIndex1(t *testing.T) {
  1422  	defer testutils.AfterTest(t)()
  1423  	testutils.EnsureNoLeak(t)
  1424  	tae := initDB(t, nil)
  1425  	defer tae.Close()
  1426  	schema := catalog.MockSchemaAll(13, 0)
  1427  	schema.BlockMaxRows = 10
  1428  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows))
  1429  	defer bat.Close()
  1430  	bats := bat.Split(int(schema.BlockMaxRows))
  1431  	createRelation(t, tae, "db", schema, true)
  1432  	txns := make([]txnif.AsyncTxn, 0)
  1433  	doAppend := func(data *containers.Batch) func() {
  1434  		return func() {
  1435  			txn, rel := getDefaultRelation(t, tae, schema.Name)
  1436  			err := rel.Append(data)
  1437  			assert.NoError(t, err)
  1438  			assert.NoError(t, txn.Commit())
  1439  			txns = append(txns, txn)
  1440  		}
  1441  	}
  1442  	for _, data := range bats {
  1443  		doAppend(data)()
  1444  	}
  1445  	var id *common.ID
  1446  	var offset uint32
  1447  	var err error
  1448  	{
  1449  		txn, rel := getDefaultRelation(t, tae, schema.Name)
  1450  		v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(3)
  1451  		filter := handle.NewEQFilter(v)
  1452  		id, offset, err = rel.GetByFilter(filter)
  1453  		assert.Nil(t, err)
  1454  		err = rel.RangeDelete(id, offset, offset, handle.DT_Normal)
  1455  		assert.Nil(t, err)
  1456  		assert.Nil(t, txn.Commit())
  1457  	}
  1458  	{
  1459  		txn, rel := getDefaultRelation(t, tae, schema.Name)
  1460  		meta := getOneBlockMeta(rel)
  1461  		indexes, err := meta.GetBlockData().CollectAppendLogIndexes(txns[0].GetStartTS(), txns[len(txns)-1].GetCommitTS())
  1462  		assert.NoError(t, err)
  1463  		assert.Equal(t, len(txns), len(indexes))
  1464  		indexes, err = meta.GetBlockData().CollectAppendLogIndexes(txns[1].GetStartTS(), txns[len(txns)-1].GetCommitTS())
  1465  		assert.NoError(t, err)
  1466  		assert.Equal(t, len(txns)-1, len(indexes))
  1467  		indexes, err = meta.GetBlockData().CollectAppendLogIndexes(txns[2].GetCommitTS(), txns[len(txns)-1].GetCommitTS())
  1468  		assert.NoError(t, err)
  1469  		assert.Equal(t, len(txns)-2, len(indexes))
  1470  		indexes, err = meta.GetBlockData().CollectAppendLogIndexes(txns[3].GetCommitTS(), txns[len(txns)-1].GetCommitTS())
  1471  		assert.NoError(t, err)
  1472  		assert.Equal(t, len(txns)-3, len(indexes))
  1473  		assert.NoError(t, txn.Commit())
  1474  	}
  1475  	{
  1476  		txn, rel := getDefaultRelation(t, tae, schema.Name)
  1477  		blk := getOneBlock(rel)
  1478  		meta := blk.GetMeta().(*catalog.BlockEntry)
  1479  
  1480  		var zeroV types.TS
  1481  		indexes, err := meta.GetBlockData().CollectAppendLogIndexes(zeroV.Next(), txn.GetStartTS())
  1482  		assert.NoError(t, err)
  1483  		for i, index := range indexes {
  1484  			t.Logf("%d: %s", i, index.String())
  1485  		}
  1486  
  1487  		view, err := blk.GetColumnDataById(schema.GetSingleSortKeyIdx(), nil)
  1488  		assert.Nil(t, err)
  1489  		defer view.Close()
  1490  		assert.True(t, view.DeleteMask.Contains(offset))
  1491  		task, err := jobs.NewCompactBlockTask(nil, txn, meta, tae.Scheduler)
  1492  		assert.Nil(t, err)
  1493  		err = task.OnExec()
  1494  		assert.Nil(t, err)
  1495  		assert.Nil(t, txn.Commit())
  1496  	}
  1497  }
  1498  
  1499  func TestCrossDBTxn(t *testing.T) {
  1500  	defer testutils.AfterTest(t)()
  1501  	testutils.EnsureNoLeak(t)
  1502  	tae := initDB(t, nil)
  1503  	defer tae.Close()
  1504  
  1505  	txn, _ := tae.StartTxn(nil)
  1506  	db1, err := txn.CreateDatabase("db1", "")
  1507  	assert.Nil(t, err)
  1508  	db2, err := txn.CreateDatabase("db2", "")
  1509  	assert.Nil(t, err)
  1510  	assert.NotNil(t, db1)
  1511  	assert.NotNil(t, db2)
  1512  	assert.Nil(t, txn.Commit())
  1513  
  1514  	schema1 := catalog.MockSchema(2, 0)
  1515  	schema1.BlockMaxRows = 10
  1516  	schema1.SegmentMaxBlocks = 2
  1517  	schema2 := catalog.MockSchema(4, 0)
  1518  	schema2.BlockMaxRows = 10
  1519  	schema2.SegmentMaxBlocks = 2
  1520  
  1521  	rows1 := schema1.BlockMaxRows * 5 / 2
  1522  	rows2 := schema1.BlockMaxRows * 3 / 2
  1523  	bat1 := catalog.MockBatch(schema1, int(rows1))
  1524  	bat2 := catalog.MockBatch(schema2, int(rows2))
  1525  	defer bat1.Close()
  1526  	defer bat2.Close()
  1527  
  1528  	txn, _ = tae.StartTxn(nil)
  1529  	db1, err = txn.GetDatabase("db1")
  1530  	assert.Nil(t, err)
  1531  	db2, err = txn.GetDatabase("db2")
  1532  	assert.Nil(t, err)
  1533  	rel1, err := db1.CreateRelation(schema1)
  1534  	assert.Nil(t, err)
  1535  	rel2, err := db2.CreateRelation(schema2)
  1536  	assert.Nil(t, err)
  1537  	err = rel1.Append(bat1)
  1538  	assert.Nil(t, err)
  1539  	err = rel2.Append(bat2)
  1540  	assert.Nil(t, err)
  1541  
  1542  	assert.Nil(t, txn.Commit())
  1543  
  1544  	txn, _ = tae.StartTxn(nil)
  1545  	db1, err = txn.GetDatabase("db1")
  1546  	assert.NoError(t, err)
  1547  	db2, err = txn.GetDatabase("db2")
  1548  	assert.NoError(t, err)
  1549  	rel1, err = db1.GetRelationByName(schema1.Name)
  1550  	assert.NoError(t, err)
  1551  	rel2, err = db2.GetRelationByName(schema2.Name)
  1552  	assert.NoError(t, err)
  1553  
  1554  	checkAllColRowsByScan(t, rel1, int(rows1), false)
  1555  	checkAllColRowsByScan(t, rel2, int(rows2), false)
  1556  
  1557  	t.Log(tae.Catalog.SimplePPString(common.PPL1))
  1558  }
  1559  
  1560  func TestSystemDB1(t *testing.T) {
  1561  	defer testutils.AfterTest(t)()
  1562  	testutils.EnsureNoLeak(t)
  1563  	tae := initDB(t, nil)
  1564  	defer tae.Close()
  1565  	schema := catalog.MockSchema(2, 0)
  1566  	txn, _ := tae.StartTxn(nil)
  1567  	_, err := txn.CreateDatabase(pkgcatalog.MO_CATALOG, "")
  1568  	assert.NotNil(t, err)
  1569  	_, err = txn.DropDatabase(pkgcatalog.MO_CATALOG)
  1570  	assert.NotNil(t, err)
  1571  
  1572  	db1, err := txn.CreateDatabase("db1", "")
  1573  	assert.Nil(t, err)
  1574  	_, err = db1.CreateRelation(schema)
  1575  	assert.Nil(t, err)
  1576  
  1577  	_, err = txn.CreateDatabase("db2", "")
  1578  	assert.Nil(t, err)
  1579  
  1580  	db, _ := txn.GetDatabase(pkgcatalog.MO_CATALOG)
  1581  	table, err := db.GetRelationByName(pkgcatalog.MO_DATABASE)
  1582  	assert.Nil(t, err)
  1583  	it := table.MakeBlockIt()
  1584  	rows := 0
  1585  	for it.Valid() {
  1586  		blk := it.GetBlock()
  1587  		rows += blk.Rows()
  1588  		view, err := blk.GetColumnDataByName(pkgcatalog.SystemDBAttr_Name, nil)
  1589  		assert.Nil(t, err)
  1590  		defer view.Close()
  1591  		assert.Equal(t, 3, view.Length())
  1592  		view, err = blk.GetColumnDataByName(pkgcatalog.SystemDBAttr_CatalogName, nil)
  1593  		assert.Nil(t, err)
  1594  		defer view.Close()
  1595  		assert.Equal(t, 3, view.Length())
  1596  		view, err = blk.GetColumnDataByName(pkgcatalog.SystemDBAttr_CreateSQL, nil)
  1597  		assert.Nil(t, err)
  1598  		defer view.Close()
  1599  		assert.Equal(t, 3, view.Length())
  1600  		it.Next()
  1601  	}
  1602  	assert.Equal(t, 3, rows)
  1603  
  1604  	table, err = db.GetRelationByName(pkgcatalog.MO_TABLES)
  1605  	assert.Nil(t, err)
  1606  	it = table.MakeBlockIt()
  1607  	rows = 0
  1608  	for it.Valid() {
  1609  		blk := it.GetBlock()
  1610  		rows += blk.Rows()
  1611  		view, err := blk.GetColumnDataByName(pkgcatalog.SystemRelAttr_Name, nil)
  1612  		assert.Nil(t, err)
  1613  		defer view.Close()
  1614  		assert.Equal(t, 4, view.Length())
  1615  		view, err = blk.GetColumnDataByName(pkgcatalog.SystemRelAttr_Persistence, nil)
  1616  		assert.NoError(t, err)
  1617  		defer view.Close()
  1618  		view, err = blk.GetColumnDataByName(pkgcatalog.SystemRelAttr_Kind, nil)
  1619  		assert.NoError(t, err)
  1620  		defer view.Close()
  1621  		it.Next()
  1622  	}
  1623  	assert.Equal(t, 4, rows)
  1624  
  1625  	table, err = db.GetRelationByName(pkgcatalog.MO_COLUMNS)
  1626  	assert.Nil(t, err)
  1627  
  1628  	bat := containers.NewBatch()
  1629  	defer bat.Close()
  1630  	// schema2 := table.GetMeta().(*catalog.TableEntry).GetSchema()
  1631  	// bat := containers.BuildBatch(schema2.AllNames(), schema2.AllTypes(), schema2.AllNullables(), 0)
  1632  	it = table.MakeBlockIt()
  1633  	rows = 0
  1634  	for it.Valid() {
  1635  		blk := it.GetBlock()
  1636  		rows += blk.Rows()
  1637  		view, err := blk.GetColumnDataByName(pkgcatalog.SystemColAttr_DBName, nil)
  1638  		assert.NoError(t, err)
  1639  		defer view.Close()
  1640  		bat.AddVector(pkgcatalog.SystemColAttr_DBName, view.Orphan())
  1641  
  1642  		view, err = blk.GetColumnDataByName(pkgcatalog.SystemColAttr_RelName, nil)
  1643  		assert.Nil(t, err)
  1644  		defer view.Close()
  1645  		bat.AddVector(pkgcatalog.SystemColAttr_RelName, view.Orphan())
  1646  
  1647  		view, err = blk.GetColumnDataByName(pkgcatalog.SystemColAttr_Name, nil)
  1648  		assert.Nil(t, err)
  1649  		defer view.Close()
  1650  		bat.AddVector(pkgcatalog.SystemColAttr_Name, view.Orphan())
  1651  
  1652  		view, err = blk.GetColumnDataByName(pkgcatalog.SystemColAttr_ConstraintType, nil)
  1653  		assert.Nil(t, err)
  1654  		defer view.Close()
  1655  		t.Log(view.GetData().String())
  1656  		bat.AddVector(pkgcatalog.SystemColAttr_ConstraintType, view.Orphan())
  1657  
  1658  		view, err = blk.GetColumnDataByName(pkgcatalog.SystemColAttr_Type, nil)
  1659  		assert.Nil(t, err)
  1660  		defer view.Close()
  1661  		t.Log(view.GetData().String())
  1662  		view, err = blk.GetColumnDataByName(pkgcatalog.SystemColAttr_Num, nil)
  1663  		assert.Nil(t, err)
  1664  		defer view.Close()
  1665  		t.Log(view.GetData().String())
  1666  		it.Next()
  1667  	}
  1668  	t.Log(rows)
  1669  
  1670  	for i := 0; i < bat.Vecs[0].Length(); i++ {
  1671  		dbName := string(bat.Vecs[0].Get(i).([]byte))
  1672  		relName := string(bat.Vecs[1].Get(i).([]byte))
  1673  		attrName := string(bat.Vecs[2].Get(i).([]byte))
  1674  		ct := string(bat.Vecs[3].Get(i).([]byte))
  1675  		if dbName == pkgcatalog.MO_CATALOG {
  1676  			if relName == pkgcatalog.MO_DATABASE {
  1677  				if attrName == pkgcatalog.SystemDBAttr_ID {
  1678  					assert.Equal(t, pkgcatalog.SystemColPKConstraint, ct)
  1679  				} else {
  1680  					assert.Equal(t, pkgcatalog.SystemColNoConstraint, ct)
  1681  				}
  1682  			} else if relName == pkgcatalog.MO_TABLES {
  1683  				if attrName == pkgcatalog.SystemRelAttr_ID {
  1684  					assert.Equal(t, pkgcatalog.SystemColPKConstraint, ct)
  1685  				} else {
  1686  					assert.Equal(t, pkgcatalog.SystemColNoConstraint, ct)
  1687  				}
  1688  			} else if relName == pkgcatalog.MO_COLUMNS {
  1689  				if attrName == pkgcatalog.SystemColAttr_UniqName {
  1690  					assert.Equal(t, pkgcatalog.SystemColPKConstraint, ct)
  1691  				} else {
  1692  					assert.Equal(t, pkgcatalog.SystemColNoConstraint, ct)
  1693  				}
  1694  			}
  1695  		}
  1696  	}
  1697  
  1698  	err = txn.Rollback()
  1699  	assert.Nil(t, err)
  1700  	t.Log(tae.Catalog.SimplePPString(common.PPL1))
  1701  }
  1702  
  1703  func TestSystemDB2(t *testing.T) {
  1704  	defer testutils.AfterTest(t)()
  1705  	testutils.EnsureNoLeak(t)
  1706  	tae := initDB(t, nil)
  1707  	defer tae.Close()
  1708  
  1709  	txn, _ := tae.StartTxn(nil)
  1710  	sysDB, err := txn.GetDatabase(pkgcatalog.MO_CATALOG)
  1711  	assert.NoError(t, err)
  1712  	_, err = sysDB.DropRelationByName(pkgcatalog.MO_DATABASE)
  1713  	assert.Error(t, err)
  1714  	_, err = sysDB.DropRelationByName(pkgcatalog.MO_TABLES)
  1715  	assert.Error(t, err)
  1716  	_, err = sysDB.DropRelationByName(pkgcatalog.MO_COLUMNS)
  1717  	assert.Error(t, err)
  1718  
  1719  	schema := catalog.MockSchema(2, 0)
  1720  	schema.BlockMaxRows = 100
  1721  	schema.SegmentMaxBlocks = 2
  1722  	bat := catalog.MockBatch(schema, 1000)
  1723  	defer bat.Close()
  1724  
  1725  	rel, err := sysDB.CreateRelation(schema)
  1726  	assert.NoError(t, err)
  1727  	assert.NotNil(t, rel)
  1728  	err = rel.Append(bat)
  1729  	assert.Nil(t, err)
  1730  	assert.NoError(t, txn.Commit())
  1731  
  1732  	txn, _ = tae.StartTxn(nil)
  1733  	sysDB, err = txn.GetDatabase(pkgcatalog.MO_CATALOG)
  1734  	assert.NoError(t, err)
  1735  	rel, err = sysDB.GetRelationByName(schema.Name)
  1736  	assert.NoError(t, err)
  1737  	checkAllColRowsByScan(t, rel, 1000, false)
  1738  	assert.NoError(t, txn.Commit())
  1739  }
  1740  
  1741  func TestSystemDB3(t *testing.T) {
  1742  	defer testutils.AfterTest(t)()
  1743  	testutils.EnsureNoLeak(t)
  1744  	tae := initDB(t, nil)
  1745  	defer tae.Close()
  1746  	txn, _ := tae.StartTxn(nil)
  1747  	schema := catalog.MockSchemaAll(13, 12)
  1748  	schema.BlockMaxRows = 100
  1749  	schema.SegmentMaxBlocks = 2
  1750  	bat := catalog.MockBatch(schema, 20)
  1751  	defer bat.Close()
  1752  	db, err := txn.GetDatabase(pkgcatalog.MO_CATALOG)
  1753  	assert.NoError(t, err)
  1754  	rel, err := db.CreateRelation(schema)
  1755  	assert.NoError(t, err)
  1756  	err = rel.Append(bat)
  1757  	assert.NoError(t, err)
  1758  	assert.NoError(t, txn.Commit())
  1759  }
  1760  
  1761  func TestScan1(t *testing.T) {
  1762  	defer testutils.AfterTest(t)()
  1763  	testutils.EnsureNoLeak(t)
  1764  	tae := initDB(t, nil)
  1765  	defer tae.Close()
  1766  
  1767  	schema := catalog.MockSchemaAll(13, 2)
  1768  	schema.BlockMaxRows = 100
  1769  	schema.SegmentMaxBlocks = 2
  1770  
  1771  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows-1))
  1772  	defer bat.Close()
  1773  	txn, _, rel := createRelationNoCommit(t, tae, defaultTestDB, schema, true)
  1774  	err := rel.Append(bat)
  1775  	assert.NoError(t, err)
  1776  	checkAllColRowsByScan(t, rel, bat.Length(), false)
  1777  	assert.NoError(t, txn.Commit())
  1778  }
  1779  
  1780  func TestDedup(t *testing.T) {
  1781  	defer testutils.AfterTest(t)()
  1782  	testutils.EnsureNoLeak(t)
  1783  	tae := initDB(t, nil)
  1784  	defer tae.Close()
  1785  
  1786  	schema := catalog.MockSchemaAll(13, 2)
  1787  	schema.BlockMaxRows = 100
  1788  	schema.SegmentMaxBlocks = 2
  1789  
  1790  	bat := catalog.MockBatch(schema, 10)
  1791  	defer bat.Close()
  1792  	txn, _, rel := createRelationNoCommit(t, tae, defaultTestDB, schema, true)
  1793  	err := rel.Append(bat)
  1794  	assert.NoError(t, err)
  1795  	err = rel.Append(bat)
  1796  	t.Log(err)
  1797  	assert.True(t, moerr.IsMoErrCode(err, moerr.ErrDuplicateEntry))
  1798  	checkAllColRowsByScan(t, rel, 10, false)
  1799  	err = txn.Rollback()
  1800  	assert.NoError(t, err)
  1801  }
  1802  
  1803  func TestScan2(t *testing.T) {
  1804  	defer testutils.AfterTest(t)()
  1805  	testutils.EnsureNoLeak(t)
  1806  	tae := initDB(t, nil)
  1807  	defer tae.Close()
  1808  	schema := catalog.MockSchemaAll(13, 12)
  1809  	schema.BlockMaxRows = 10
  1810  	schema.SegmentMaxBlocks = 10
  1811  	rows := schema.BlockMaxRows * 5 / 2
  1812  	bat := catalog.MockBatch(schema, int(rows))
  1813  	defer bat.Close()
  1814  	bats := bat.Split(2)
  1815  
  1816  	txn, _, rel := createRelationNoCommit(t, tae, defaultTestDB, schema, true)
  1817  	err := rel.Append(bats[0])
  1818  	assert.NoError(t, err)
  1819  	checkAllColRowsByScan(t, rel, bats[0].Length(), false)
  1820  
  1821  	err = rel.Append(bats[0])
  1822  	assert.Error(t, err)
  1823  	err = rel.Append(bats[1])
  1824  	assert.NoError(t, err)
  1825  	checkAllColRowsByScan(t, rel, int(rows), false)
  1826  
  1827  	pkv := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(5)
  1828  	filter := handle.NewEQFilter(pkv)
  1829  	err = rel.DeleteByFilter(filter)
  1830  	assert.NoError(t, err)
  1831  	checkAllColRowsByScan(t, rel, int(rows)-1, true)
  1832  
  1833  	pkv = bat.Vecs[schema.GetSingleSortKeyIdx()].Get(8)
  1834  	filter = handle.NewEQFilter(pkv)
  1835  	updateV := int64(999)
  1836  	err = rel.UpdateByFilter(filter, 3, updateV)
  1837  	assert.NoError(t, err)
  1838  
  1839  	v, err := rel.GetValueByFilter(filter, 3)
  1840  	assert.NoError(t, err)
  1841  	assert.Equal(t, updateV, v.(int64))
  1842  	checkAllColRowsByScan(t, rel, int(rows)-1, true)
  1843  	assert.NoError(t, txn.Commit())
  1844  }
  1845  
  1846  func TestADA(t *testing.T) {
  1847  	defer testutils.AfterTest(t)()
  1848  	testutils.EnsureNoLeak(t)
  1849  	tae := initDB(t, nil)
  1850  	defer tae.Close()
  1851  	schema := catalog.MockSchemaAll(13, 3)
  1852  	schema.BlockMaxRows = 1000
  1853  	bat := catalog.MockBatch(schema, 1)
  1854  	defer bat.Close()
  1855  
  1856  	// Append to a block
  1857  	createRelationAndAppend(t, 0, tae, "db", schema, bat, true)
  1858  
  1859  	// Delete a row from the block
  1860  	txn, rel := getDefaultRelation(t, tae, schema.Name)
  1861  	v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(0)
  1862  	filter := handle.NewEQFilter(v)
  1863  	id, row, err := rel.GetByFilter(filter)
  1864  	assert.NoError(t, err)
  1865  	err = rel.RangeDelete(id, row, row, handle.DT_Normal)
  1866  	assert.NoError(t, err)
  1867  	_, _, err = rel.GetByFilter(filter)
  1868  	assert.Error(t, err)
  1869  	assert.NoError(t, txn.Commit())
  1870  
  1871  	// Append a row with the same primary key
  1872  	txn, rel = getDefaultRelation(t, tae, schema.Name)
  1873  	_, _, err = rel.GetByFilter(filter)
  1874  	assert.Error(t, err)
  1875  	err = rel.Append(bat)
  1876  	assert.NoError(t, err)
  1877  	id, row, err = rel.GetByFilter(filter)
  1878  	assert.NoError(t, err)
  1879  	checkAllColRowsByScan(t, rel, 1, true)
  1880  
  1881  	err = rel.RangeDelete(id, row, row, handle.DT_Normal)
  1882  	assert.NoError(t, err)
  1883  	_, _, err = rel.GetByFilter(filter)
  1884  	assert.Error(t, err)
  1885  
  1886  	err = rel.Append(bat)
  1887  	assert.NoError(t, err)
  1888  	_, _, err = rel.GetByFilter(filter)
  1889  	assert.NoError(t, err)
  1890  	checkAllColRowsByScan(t, rel, 1, true)
  1891  	assert.NoError(t, txn.Commit())
  1892  
  1893  	txn, rel = getDefaultRelation(t, tae, schema.Name)
  1894  	err = rel.Append(bat)
  1895  	assert.Error(t, err)
  1896  	id, row, err = rel.GetByFilter(filter)
  1897  	assert.NoError(t, err)
  1898  	err = rel.RangeDelete(id, row, row, handle.DT_Normal)
  1899  	assert.NoError(t, err)
  1900  	_, _, err = rel.GetByFilter(filter)
  1901  	assert.Error(t, err)
  1902  
  1903  	err = rel.Append(bat)
  1904  	assert.NoError(t, err)
  1905  
  1906  	id, row, err = rel.GetByFilter(filter)
  1907  	assert.NoError(t, err)
  1908  
  1909  	err = rel.Append(bat)
  1910  	assert.Error(t, err)
  1911  
  1912  	err = rel.RangeDelete(id, row, row, handle.DT_Normal)
  1913  	assert.NoError(t, err)
  1914  	_, _, err = rel.GetByFilter(filter)
  1915  	assert.Error(t, err)
  1916  	err = rel.Append(bat)
  1917  	assert.NoError(t, err)
  1918  
  1919  	assert.NoError(t, txn.Commit())
  1920  
  1921  	txn, rel = getDefaultRelation(t, tae, schema.Name)
  1922  	err = rel.Append(bat)
  1923  	assert.Error(t, err)
  1924  	id, row, err = rel.GetByFilter(filter)
  1925  	assert.NoError(t, err)
  1926  	err = rel.RangeDelete(id, row, row, handle.DT_Normal)
  1927  	assert.NoError(t, err)
  1928  	_, _, err = rel.GetByFilter(filter)
  1929  	assert.Error(t, err)
  1930  
  1931  	err = rel.Append(bat)
  1932  	assert.NoError(t, err)
  1933  	assert.NoError(t, txn.Commit())
  1934  
  1935  	txn, rel = getDefaultRelation(t, tae, schema.Name)
  1936  	it := rel.MakeBlockIt()
  1937  	for it.Valid() {
  1938  		blk := it.GetBlock()
  1939  		view, err := blk.GetColumnDataById(schema.GetSingleSortKeyIdx(), nil)
  1940  		assert.NoError(t, err)
  1941  		defer view.Close()
  1942  		assert.Equal(t, 4, view.Length())
  1943  		assert.Equal(t, uint64(3), view.DeleteMask.GetCardinality())
  1944  		it.Next()
  1945  	}
  1946  	assert.NoError(t, txn.Commit())
  1947  }
  1948  
  1949  func TestUpdateByFilter(t *testing.T) {
  1950  	defer testutils.AfterTest(t)()
  1951  	testutils.EnsureNoLeak(t)
  1952  	tae := initDB(t, nil)
  1953  	defer tae.Close()
  1954  	schema := catalog.MockSchemaAll(13, 3)
  1955  	bat := catalog.MockBatch(schema, 100)
  1956  	defer bat.Close()
  1957  
  1958  	createRelationAndAppend(t, 0, tae, "db", schema, bat, true)
  1959  
  1960  	txn, rel := getDefaultRelation(t, tae, schema.Name)
  1961  	v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(2)
  1962  	filter := handle.NewEQFilter(v)
  1963  	err := rel.UpdateByFilter(filter, 2, int32(2222))
  1964  	assert.NoError(t, err)
  1965  
  1966  	id, row, err := rel.GetByFilter(filter)
  1967  	assert.NoError(t, err)
  1968  	cv, err := rel.GetValue(id, row, 2)
  1969  	assert.NoError(t, err)
  1970  	assert.Equal(t, int32(2222), cv.(int32))
  1971  
  1972  	v = bat.Vecs[schema.GetSingleSortKeyIdx()].Get(3)
  1973  	filter = handle.NewEQFilter(v)
  1974  
  1975  	err = rel.UpdateByFilter(filter, uint16(schema.GetSingleSortKeyIdx()), int64(333333))
  1976  	assert.NoError(t, err)
  1977  
  1978  	assert.NoError(t, txn.Commit())
  1979  }
  1980  
  1981  // Test Steps
  1982  // 1. Create DB|Relation and append 10 rows. Commit
  1983  // 2. Make a equal filter with value of the pk of the second inserted row
  1984  // 3. Start Txn1. GetByFilter return PASS
  1985  // 4. Start Txn2. Delete row 2. Commit.
  1986  // 5. Txn1 call GetByFilter and should return PASS
  1987  func TestGetByFilter(t *testing.T) {
  1988  	defer testutils.AfterTest(t)()
  1989  	testutils.EnsureNoLeak(t)
  1990  	tae := initDB(t, nil)
  1991  	defer tae.Close()
  1992  	schema := catalog.MockSchemaAll(13, 12)
  1993  	bat := catalog.MockBatch(schema, 10)
  1994  	defer bat.Close()
  1995  
  1996  	// Step 1
  1997  	createRelationAndAppend(t, 0, tae, "db", schema, bat, true)
  1998  
  1999  	// Step 2
  2000  	v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(2)
  2001  	filter := handle.NewEQFilter(v)
  2002  
  2003  	// Step 3
  2004  	txn1, rel := getDefaultRelation(t, tae, schema.Name)
  2005  	id, row, err := rel.GetByFilter(filter)
  2006  	assert.NoError(t, err)
  2007  
  2008  	// Step 4
  2009  	{
  2010  		txn2, rel := getDefaultRelation(t, tae, schema.Name)
  2011  		err := rel.RangeDelete(id, row, row, handle.DT_Normal)
  2012  		assert.NoError(t, err)
  2013  		assert.NoError(t, txn2.Commit())
  2014  	}
  2015  
  2016  	// Step 5
  2017  	_, _, err = rel.GetByFilter(filter)
  2018  	assert.NoError(t, err)
  2019  	assert.NoError(t, txn1.Commit())
  2020  }
  2021  
  2022  //  1. Set a big BlockMaxRows
  2023  //  2. Mock one row batch
  2024  //  3. Start tones of workers. Each work execute below routines:
  2025  //     3.1 GetByFilter a pk val
  2026  //     3.1.1 If found, go to 3.5
  2027  //     3.2 Append a row
  2028  //     3.3 err should not be duplicated(TODO: now is duplicated, should be W-W conflict)
  2029  //     (why not duplicated: previous GetByFilter had checked that there was no duplicate key)
  2030  //     3.4 If no error. try commit. If commit ok, inc appendedcnt. If error, rollback
  2031  //     3.5 Delete the row
  2032  //     3.5.1 If no error. try commit. commit should always pass
  2033  //     3.5.2 If error, should always be w-w conflict
  2034  //  4. Wait done all workers. Check the raw row count of table, should be same with appendedcnt.
  2035  func TestChaos1(t *testing.T) {
  2036  	defer testutils.AfterTest(t)()
  2037  	testutils.EnsureNoLeak(t)
  2038  	tae := initDB(t, nil)
  2039  	defer tae.Close()
  2040  	schema := catalog.MockSchemaAll(13, 12)
  2041  	schema.BlockMaxRows = 100000
  2042  	schema.SegmentMaxBlocks = 2
  2043  	bat := catalog.MockBatch(schema, 1)
  2044  	defer bat.Close()
  2045  
  2046  	createRelation(t, tae, "db", schema, true)
  2047  
  2048  	v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(0)
  2049  	filter := handle.NewEQFilter(v)
  2050  	var wg sync.WaitGroup
  2051  	appendCnt := uint32(0)
  2052  	deleteCnt := uint32(0)
  2053  	worker := func() {
  2054  		defer wg.Done()
  2055  		txn, rel := getDefaultRelation(t, tae, schema.Name)
  2056  		id, row, err := rel.GetByFilter(filter)
  2057  		// logutil.Infof("id=%v,row=%d,err=%v", id, row, err)
  2058  		if err == nil {
  2059  			err = rel.RangeDelete(id, row, row, handle.DT_Normal)
  2060  			if err != nil {
  2061  				t.Logf("delete: %v", err)
  2062  				// assert.Equal(t, txnif.ErrTxnWWConflict, err)
  2063  				assert.NoError(t, txn.Rollback())
  2064  				return
  2065  			}
  2066  			assert.NoError(t, txn.Commit())
  2067  			atomic.AddUint32(&deleteCnt, uint32(1))
  2068  			return
  2069  		}
  2070  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  2071  		err = rel.Append(bat)
  2072  		// TODO: enable below check later
  2073  		// assert.NotEqual(t, data.ErrDuplicate, err)
  2074  		if err == nil {
  2075  			err = txn.Commit()
  2076  			// TODO: enable below check later
  2077  			// assert.NotEqual(t, data.ErrDuplicate, err)
  2078  			if err == nil {
  2079  				atomic.AddUint32(&appendCnt, uint32(1))
  2080  			} else {
  2081  				t.Logf("commit: %v", err)
  2082  			}
  2083  			return
  2084  		}
  2085  		_ = txn.Rollback()
  2086  	}
  2087  	pool, _ := ants.NewPool(10)
  2088  	defer pool.Release()
  2089  	for i := 0; i < 50; i++ {
  2090  		wg.Add(1)
  2091  		err := pool.Submit(worker)
  2092  		assert.Nil(t, err)
  2093  	}
  2094  	wg.Wait()
  2095  	t.Logf("AppendCnt: %d", appendCnt)
  2096  	t.Logf("DeleteCnt: %d", deleteCnt)
  2097  	assert.True(t, appendCnt-deleteCnt <= 1)
  2098  	_, rel := getDefaultRelation(t, tae, schema.Name)
  2099  	assert.Equal(t, int64(appendCnt-deleteCnt), rel.Rows())
  2100  	blk := getOneBlock(rel)
  2101  	view, err := blk.GetColumnDataById(schema.GetSingleSortKeyIdx(), nil)
  2102  	assert.NoError(t, err)
  2103  	defer view.Close()
  2104  	assert.Equal(t, int(appendCnt), view.Length())
  2105  	mask := view.DeleteMask
  2106  	view.ApplyDeletes()
  2107  	t.Log(view.String())
  2108  	assert.Equal(t, uint64(deleteCnt), mask.GetCardinality())
  2109  }
  2110  
  2111  // Testing Steps
  2112  // 1. Append 10 rows
  2113  // 2. Start txn1
  2114  // 3. Start txn2. Update the 3rd row 3rd col to int64(2222) and commit. -- PASS
  2115  // 4. Txn1 try to update the 3rd row 3rd col to int64(1111). -- W-W Conflict.
  2116  // 5. Txn1 try to delete the 3rd row. W-W Conflict. Rollback
  2117  // 6. Start txn3 and try to update th3 3rd row 3rd col to int64(3333). -- PASS
  2118  func TestSnapshotIsolation1(t *testing.T) {
  2119  	defer testutils.AfterTest(t)()
  2120  	testutils.EnsureNoLeak(t)
  2121  	tae := initDB(t, nil)
  2122  	defer tae.Close()
  2123  	schema := catalog.MockSchemaAll(13, 12)
  2124  	schema.BlockMaxRows = 100
  2125  	bat := catalog.MockBatch(schema, 10)
  2126  	defer bat.Close()
  2127  	v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(3)
  2128  	filter := handle.NewEQFilter(v)
  2129  
  2130  	// Step 1
  2131  	createRelationAndAppend(t, 0, tae, "db", schema, bat, true)
  2132  
  2133  	// Step 2
  2134  	txn1, rel1 := getDefaultRelation(t, tae, schema.Name)
  2135  
  2136  	// Step 3
  2137  	txn2, rel2 := getDefaultRelation(t, tae, schema.Name)
  2138  	err := rel2.UpdateByFilter(filter, 3, int64(2222))
  2139  	assert.NoError(t, err)
  2140  	assert.NoError(t, txn2.Commit())
  2141  
  2142  	// Step 4
  2143  	err = rel1.UpdateByFilter(filter, 3, int64(1111))
  2144  	t.Log(err)
  2145  	assert.True(t, moerr.IsMoErrCode(err, moerr.ErrTxnWWConflict))
  2146  
  2147  	// Step 5
  2148  	id, row, err := rel1.GetByFilter(filter)
  2149  	assert.NoError(t, err)
  2150  	err = rel1.RangeDelete(id, row, row, handle.DT_Normal)
  2151  	t.Log(err)
  2152  	assert.True(t, moerr.IsMoErrCode(err, moerr.ErrTxnWWConflict))
  2153  	_ = txn1.Rollback()
  2154  
  2155  	// Step 6
  2156  	txn3, rel3 := getDefaultRelation(t, tae, schema.Name)
  2157  	err = rel3.UpdateByFilter(filter, 3, int64(3333))
  2158  	assert.NoError(t, err)
  2159  	assert.NoError(t, txn3.Commit())
  2160  
  2161  	txn, rel := getDefaultRelation(t, tae, schema.Name)
  2162  	v, err = rel.GetValueByFilter(filter, 3)
  2163  	assert.NoError(t, err)
  2164  	assert.Equal(t, int64(3333), v.(int64))
  2165  	err = rel.RangeDelete(id, row, row, handle.DT_Normal)
  2166  	assert.Error(t, err)
  2167  	assert.NoError(t, txn.Commit())
  2168  }
  2169  
  2170  // Testing Steps
  2171  // 1. Start txn1
  2172  // 2. Start txn2 and append one row and commit
  2173  // 3. Start txn3 and delete the row and commit
  2174  // 4. Txn1 try to append the row. (W-W). Rollback
  2175  func TestSnapshotIsolation2(t *testing.T) {
  2176  	defer testutils.AfterTest(t)()
  2177  	testutils.EnsureNoLeak(t)
  2178  	opts := config.WithLongScanAndCKPOpts(nil)
  2179  	tae := initDB(t, opts)
  2180  	defer tae.Close()
  2181  	schema := catalog.MockSchemaAll(13, 12)
  2182  	schema.BlockMaxRows = 100
  2183  	bat := catalog.MockBatch(schema, 1)
  2184  	defer bat.Close()
  2185  	v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(0)
  2186  	filter := handle.NewEQFilter(v)
  2187  
  2188  	createRelation(t, tae, "db", schema, true)
  2189  
  2190  	// Step 1
  2191  	txn1, rel1 := getDefaultRelation(t, tae, schema.Name)
  2192  
  2193  	// Step 2
  2194  	txn2, rel2 := getDefaultRelation(t, tae, schema.Name)
  2195  	err := rel2.Append(bat)
  2196  	assert.NoError(t, err)
  2197  	assert.NoError(t, txn2.Commit())
  2198  
  2199  	// Step 3
  2200  	txn3, rel3 := getDefaultRelation(t, tae, schema.Name)
  2201  	err = rel3.DeleteByFilter(filter)
  2202  	assert.NoError(t, err)
  2203  	assert.NoError(t, txn3.Commit())
  2204  
  2205  	// Step 4
  2206  	err = rel1.Append(bat)
  2207  	assert.NoError(t, err)
  2208  	err = txn1.Commit()
  2209  	t.Log(err)
  2210  	assert.True(t, moerr.IsMoErrCode(err, moerr.ErrTxnWWConflict))
  2211  }
  2212  
  2213  // 1. Append 3 blocks and delete last 5 rows of the 1st block
  2214  // 2. Merge blocks
  2215  // 3. Check rows and col[0]
  2216  func TestMergeBlocks(t *testing.T) {
  2217  	defer testutils.AfterTest(t)()
  2218  	testutils.EnsureNoLeak(t)
  2219  	tae := initDB(t, nil)
  2220  	defer tae.Close()
  2221  	schema := catalog.MockSchemaAll(13, -1)
  2222  	schema.BlockMaxRows = 10
  2223  	schema.SegmentMaxBlocks = 3
  2224  	bat := catalog.MockBatch(schema, 30)
  2225  	defer bat.Close()
  2226  
  2227  	createRelationAndAppend(t, 0, tae, "db", schema, bat, true)
  2228  
  2229  	txn, err := tae.StartTxn(nil)
  2230  	assert.Nil(t, err)
  2231  	db, err := txn.GetDatabase("db")
  2232  	assert.Nil(t, err)
  2233  	rel, err := db.GetRelationByName(schema.Name)
  2234  	assert.Nil(t, err)
  2235  	it := rel.MakeBlockIt()
  2236  	blkID := it.GetBlock().Fingerprint()
  2237  	err = rel.RangeDelete(blkID, 5, 9, handle.DT_Normal)
  2238  	assert.Nil(t, err)
  2239  	assert.Nil(t, txn.Commit())
  2240  
  2241  	txn, err = tae.StartTxn(nil)
  2242  	assert.Nil(t, err)
  2243  	for it.Valid() {
  2244  		checkAllColRowsByScan(t, rel, bat.Length(), false)
  2245  		col, err := it.GetBlock().GetMeta().(*catalog.BlockEntry).GetBlockData().GetColumnDataById(txn, 0, nil)
  2246  		assert.NoError(t, err)
  2247  		defer col.Close()
  2248  		t.Log(col)
  2249  		it.Next()
  2250  	}
  2251  	assert.Nil(t, txn.Commit())
  2252  
  2253  	mergeBlocks(t, 0, tae, "db", schema, false)
  2254  
  2255  	txn, err = tae.StartTxn(nil)
  2256  	assert.Nil(t, err)
  2257  	db, err = txn.GetDatabase("db")
  2258  	assert.Nil(t, err)
  2259  	rel, err = db.GetRelationByName(schema.Name)
  2260  	assert.Nil(t, err)
  2261  	assert.Equal(t, uint64(25), rel.GetMeta().(*catalog.TableEntry).GetRows())
  2262  	it = rel.MakeBlockIt()
  2263  	for it.Valid() {
  2264  		checkAllColRowsByScan(t, rel, bat.Length()-5, false)
  2265  		col, err := it.GetBlock().GetMeta().(*catalog.BlockEntry).GetBlockData().GetColumnDataById(txn, 0, nil)
  2266  		assert.Nil(t, err)
  2267  		t.Log(col)
  2268  		defer col.Close()
  2269  		it.Next()
  2270  	}
  2271  	assert.Nil(t, txn.Commit())
  2272  }
  2273  
  2274  // delete
  2275  // merge but not commit
  2276  // delete
  2277  // commit merge
  2278  func TestMergeblocks2(t *testing.T) {
  2279  	defer testutils.AfterTest(t)()
  2280  	testutils.EnsureNoLeak(t)
  2281  	opts := config.WithLongScanAndCKPOpts(nil)
  2282  	tae := newTestEngine(t, opts)
  2283  	defer tae.Close()
  2284  	schema := catalog.MockSchemaAll(1, 0)
  2285  	schema.BlockMaxRows = 3
  2286  	schema.SegmentMaxBlocks = 2
  2287  	tae.bindSchema(schema)
  2288  	bat := catalog.MockBatch(schema, 6)
  2289  	bats := bat.Split(2)
  2290  	defer bat.Close()
  2291  
  2292  	tae.createRelAndAppend(bats[0], true)
  2293  
  2294  	txn, rel := tae.getRelation()
  2295  	_ = rel.Append(bats[1])
  2296  	assert.Nil(t, txn.Commit())
  2297  
  2298  	{
  2299  		v := getSingleSortKeyValue(bat, schema, 1)
  2300  		t.Logf("v is %v**********", v)
  2301  		filter := handle.NewEQFilter(v)
  2302  		txn2, rel := tae.getRelation()
  2303  		t.Log("********before delete******************")
  2304  		checkAllColRowsByScan(t, rel, 6, true)
  2305  		_ = rel.DeleteByFilter(filter)
  2306  		assert.Nil(t, txn2.Commit())
  2307  	}
  2308  
  2309  	_, rel = tae.getRelation()
  2310  	t.Log("**********************")
  2311  	checkAllColRowsByScan(t, rel, 5, true)
  2312  
  2313  	{
  2314  		t.Log("************merge************")
  2315  
  2316  		txn, rel = tae.getRelation()
  2317  
  2318  		segIt := rel.MakeSegmentIt()
  2319  		seg := segIt.GetSegment().GetMeta().(*catalog.SegmentEntry)
  2320  		segHandle, err := rel.GetSegment(seg.ID)
  2321  		assert.NoError(t, err)
  2322  
  2323  		var metas []*catalog.BlockEntry
  2324  		it := segHandle.MakeBlockIt()
  2325  		for it.Valid() {
  2326  			meta := it.GetBlock().GetMeta().(*catalog.BlockEntry)
  2327  			metas = append(metas, meta)
  2328  			it.Next()
  2329  		}
  2330  		segsToMerge := []*catalog.SegmentEntry{segHandle.GetMeta().(*catalog.SegmentEntry)}
  2331  		task, err := jobs.NewMergeBlocksTask(nil, txn, metas, segsToMerge, nil, tae.Scheduler)
  2332  		assert.NoError(t, err)
  2333  		err = task.OnExec()
  2334  		assert.NoError(t, err)
  2335  
  2336  		{
  2337  			v := getSingleSortKeyValue(bat, schema, 2)
  2338  			t.Logf("v is %v**********", v)
  2339  			filter := handle.NewEQFilter(v)
  2340  			txn2, rel := tae.getRelation()
  2341  			t.Log("********before delete******************")
  2342  			checkAllColRowsByScan(t, rel, 5, true)
  2343  			_ = rel.DeleteByFilter(filter)
  2344  			assert.Nil(t, txn2.Commit())
  2345  		}
  2346  		err = txn.Commit()
  2347  		assert.NoError(t, err)
  2348  	}
  2349  
  2350  	t.Log("********************")
  2351  	_, rel = tae.getRelation()
  2352  	checkAllColRowsByScan(t, rel, 4, true)
  2353  	assert.Equal(t, int64(4), rel.Rows())
  2354  
  2355  	v := getSingleSortKeyValue(bat, schema, 1)
  2356  	filter := handle.NewEQFilter(v)
  2357  	_, _, err := rel.GetByFilter(filter)
  2358  	assert.NotNil(t, err)
  2359  
  2360  	v = getSingleSortKeyValue(bat, schema, 2)
  2361  	filter = handle.NewEQFilter(v)
  2362  	_, _, err = rel.GetByFilter(filter)
  2363  	assert.NotNil(t, err)
  2364  
  2365  	// v = getSingleSortKeyValue(bat, schema, 4)
  2366  	// filter = handle.NewEQFilter(v)
  2367  	// _, _, err = rel.GetByFilter(filter)
  2368  	// assert.NotNil(t, err)
  2369  
  2370  	// tae.restart()
  2371  	// assert.Equal(t, int64(2), rel.Rows())
  2372  }
  2373  func TestMergeEmptyBlocks(t *testing.T) {
  2374  	defer testutils.AfterTest(t)()
  2375  	testutils.EnsureNoLeak(t)
  2376  	opts := config.WithLongScanAndCKPOpts(nil)
  2377  	tae := newTestEngine(t, opts)
  2378  	defer tae.Close()
  2379  	schema := catalog.MockSchemaAll(1, 0)
  2380  	schema.BlockMaxRows = 3
  2381  	schema.SegmentMaxBlocks = 2
  2382  	tae.bindSchema(schema)
  2383  	bat := catalog.MockBatch(schema, 6)
  2384  	bats := bat.Split(2)
  2385  	defer bat.Close()
  2386  
  2387  	tae.createRelAndAppend(bats[0], true)
  2388  
  2389  	assert.NoError(t, tae.deleteAll(true))
  2390  
  2391  	txn, rel := tae.getRelation()
  2392  	assert.NoError(t, rel.Append(bats[1]))
  2393  	assert.NoError(t, txn.Commit())
  2394  
  2395  	{
  2396  		t.Log("************merge************")
  2397  
  2398  		txn, rel = tae.getRelation()
  2399  
  2400  		segIt := rel.MakeSegmentIt()
  2401  		seg := segIt.GetSegment().GetMeta().(*catalog.SegmentEntry)
  2402  		segHandle, err := rel.GetSegment(seg.ID)
  2403  		assert.NoError(t, err)
  2404  
  2405  		var metas []*catalog.BlockEntry
  2406  		it := segHandle.MakeBlockIt()
  2407  		for it.Valid() {
  2408  			meta := it.GetBlock().GetMeta().(*catalog.BlockEntry)
  2409  			metas = append(metas, meta)
  2410  			it.Next()
  2411  		}
  2412  		segsToMerge := []*catalog.SegmentEntry{segHandle.GetMeta().(*catalog.SegmentEntry)}
  2413  		task, err := jobs.NewMergeBlocksTask(nil, txn, metas, segsToMerge, nil, tae.Scheduler)
  2414  		assert.NoError(t, err)
  2415  		err = task.OnExec()
  2416  		assert.NoError(t, err)
  2417  
  2418  		{
  2419  			v := getSingleSortKeyValue(bat, schema, 4)
  2420  			filter := handle.NewEQFilter(v)
  2421  			txn2, rel := tae.getRelation()
  2422  			_ = rel.DeleteByFilter(filter)
  2423  			assert.Nil(t, txn2.Commit())
  2424  		}
  2425  		err = txn.Commit()
  2426  		assert.NoError(t, err)
  2427  	}
  2428  }
  2429  func TestDelete2(t *testing.T) {
  2430  	defer testutils.AfterTest(t)()
  2431  	testutils.EnsureNoLeak(t)
  2432  	opts := config.WithLongScanAndCKPOpts(nil)
  2433  	tae := newTestEngine(t, opts)
  2434  	defer tae.Close()
  2435  	schema := catalog.MockSchemaAll(18, 11)
  2436  	schema.BlockMaxRows = 10
  2437  	schema.SegmentMaxBlocks = 2
  2438  	tae.bindSchema(schema)
  2439  	bat := catalog.MockBatch(schema, 5)
  2440  	defer bat.Close()
  2441  	tae.createRelAndAppend(bat, true)
  2442  
  2443  	txn, rel := tae.getRelation()
  2444  	v := getSingleSortKeyValue(bat, schema, 2)
  2445  	filter := handle.NewEQFilter(v)
  2446  	err := rel.DeleteByFilter(filter)
  2447  	assert.NoError(t, err)
  2448  	assert.NoError(t, txn.Commit())
  2449  
  2450  	tae.compactBlocks(false)
  2451  }
  2452  
  2453  func TestNull1(t *testing.T) {
  2454  	defer testutils.AfterTest(t)()
  2455  	testutils.EnsureNoLeak(t)
  2456  	opts := config.WithLongScanAndCKPOpts(nil)
  2457  	tae := newTestEngine(t, opts)
  2458  	defer tae.Close()
  2459  	schema := catalog.MockSchemaAll(18, 9)
  2460  	schema.BlockMaxRows = 10
  2461  	schema.SegmentMaxBlocks = 2
  2462  	tae.bindSchema(schema)
  2463  
  2464  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows*3+1))
  2465  	defer bat.Close()
  2466  	bats := bat.Split(4)
  2467  	bat.Vecs[3].Update(2, types.Null{})
  2468  	tae.createRelAndAppend(bats[0], true)
  2469  
  2470  	txn, rel := tae.getRelation()
  2471  	blk := getOneBlock(rel)
  2472  	view, err := blk.GetColumnDataById(3, nil)
  2473  	assert.NoError(t, err)
  2474  	defer view.Close()
  2475  	v := view.GetData().Get(2)
  2476  	assert.True(t, types.IsNull(v))
  2477  	checkAllColRowsByScan(t, rel, bats[0].Length(), false)
  2478  	assert.NoError(t, txn.Commit())
  2479  
  2480  	tae.restart()
  2481  	txn, rel = tae.getRelation()
  2482  	blk = getOneBlock(rel)
  2483  	view, err = blk.GetColumnDataById(3, nil)
  2484  	assert.NoError(t, err)
  2485  	defer view.Close()
  2486  	v = view.GetData().Get(2)
  2487  	assert.True(t, types.IsNull(v))
  2488  	checkAllColRowsByScan(t, rel, bats[0].Length(), false)
  2489  
  2490  	v = getSingleSortKeyValue(bats[0], schema, 2)
  2491  	filter_2 := handle.NewEQFilter(v)
  2492  	uv0_2, err := rel.GetValueByFilter(filter_2, 3)
  2493  	assert.NoError(t, err)
  2494  	assert.True(t, types.IsNull(uv0_2))
  2495  
  2496  	v0_4 := getSingleSortKeyValue(bats[0], schema, 4)
  2497  	filter_4 := handle.NewEQFilter(v0_4)
  2498  	err = rel.UpdateByFilter(filter_4, 3, types.Null{})
  2499  	assert.NoError(t, err)
  2500  	uv, err := rel.GetValueByFilter(filter_4, 3)
  2501  	assert.NoError(t, err)
  2502  	assert.True(t, types.IsNull(uv))
  2503  	assert.NoError(t, txn.Commit())
  2504  
  2505  	txn, rel = tae.getRelation()
  2506  	checkAllColRowsByScan(t, rel, bats[0].Length(), true)
  2507  	uv, err = rel.GetValueByFilter(filter_4, 3)
  2508  	assert.NoError(t, err)
  2509  	assert.True(t, types.IsNull(uv))
  2510  
  2511  	err = rel.Append(bats[1])
  2512  	assert.NoError(t, err)
  2513  	assert.NoError(t, txn.Commit())
  2514  
  2515  	tae.compactBlocks(false)
  2516  	txn, rel = tae.getRelation()
  2517  	checkAllColRowsByScan(t, rel, lenOfBats(bats[:2]), false)
  2518  	uv, err = rel.GetValueByFilter(filter_4, 3)
  2519  	assert.NoError(t, err)
  2520  	assert.True(t, types.IsNull(uv))
  2521  	assert.NoError(t, txn.Commit())
  2522  
  2523  	tae.restart()
  2524  	txn, rel = tae.getRelation()
  2525  	checkAllColRowsByScan(t, rel, lenOfBats(bats[:2]), false)
  2526  	uv, err = rel.GetValueByFilter(filter_4, 3)
  2527  	assert.NoError(t, err)
  2528  	assert.True(t, types.IsNull(uv))
  2529  
  2530  	v0_1 := getSingleSortKeyValue(bats[0], schema, 1)
  2531  	filter0_1 := handle.NewEQFilter(v0_1)
  2532  	err = rel.UpdateByFilter(filter0_1, 12, types.Null{})
  2533  	assert.NoError(t, err)
  2534  	uv0_1, err := rel.GetValueByFilter(filter0_1, 12)
  2535  	assert.NoError(t, err)
  2536  	assert.True(t, types.IsNull(uv0_1))
  2537  	assert.NoError(t, txn.Commit())
  2538  
  2539  	txn, rel = tae.getRelation()
  2540  	uv0_1, err = rel.GetValueByFilter(filter0_1, 12)
  2541  	assert.NoError(t, err)
  2542  	assert.True(t, types.IsNull(uv0_1))
  2543  	err = rel.Append(bats[2])
  2544  	assert.NoError(t, err)
  2545  	assert.NoError(t, txn.Commit())
  2546  
  2547  	tae.compactBlocks(false)
  2548  	tae.mergeBlocks(false)
  2549  
  2550  	txn, rel = tae.getRelation()
  2551  	uv0_1, err = rel.GetValueByFilter(filter0_1, 12)
  2552  	assert.NoError(t, err)
  2553  	assert.True(t, types.IsNull(uv0_1))
  2554  	uv0_2, err = rel.GetValueByFilter(filter_2, 3)
  2555  	assert.NoError(t, err)
  2556  	assert.True(t, types.IsNull(uv0_2))
  2557  	assert.NoError(t, txn.Commit())
  2558  
  2559  	tae.restart()
  2560  
  2561  	txn, rel = tae.getRelation()
  2562  	uv0_1, err = rel.GetValueByFilter(filter0_1, 12)
  2563  	assert.NoError(t, err)
  2564  	assert.True(t, types.IsNull(uv0_1))
  2565  	uv0_2, err = rel.GetValueByFilter(filter_2, 3)
  2566  	assert.NoError(t, err)
  2567  	assert.True(t, types.IsNull(uv0_2))
  2568  	assert.NoError(t, txn.Commit())
  2569  }
  2570  
  2571  func TestTruncate(t *testing.T) {
  2572  	defer testutils.AfterTest(t)()
  2573  	testutils.EnsureNoLeak(t)
  2574  	opts := config.WithQuickScanAndCKPOpts(nil)
  2575  	tae := newTestEngine(t, opts)
  2576  	defer tae.Close()
  2577  	schema := catalog.MockSchemaAll(18, 15)
  2578  	schema.BlockMaxRows = 10
  2579  	schema.SegmentMaxBlocks = 2
  2580  	tae.bindSchema(schema)
  2581  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows*5+1))
  2582  	defer bat.Close()
  2583  	bats := bat.Split(20)
  2584  	tae.createRelAndAppend(bats[0], true)
  2585  
  2586  	var wg sync.WaitGroup
  2587  	p, _ := ants.NewPool(10)
  2588  	defer p.Release()
  2589  	tryAppend := func(i int) func() {
  2590  		return func() {
  2591  			defer wg.Done()
  2592  			tae.tryAppend(bats[1+i])
  2593  		}
  2594  	}
  2595  
  2596  	for i := range bats[1:] {
  2597  		if i == 10 {
  2598  			wg.Add(1)
  2599  			_ = p.Submit(func() {
  2600  				defer wg.Done()
  2601  				tae.truncate()
  2602  				t.Log(tae.Catalog.SimplePPString(common.PPL1))
  2603  			})
  2604  		}
  2605  		wg.Add(1)
  2606  		_ = p.Submit(tryAppend(i))
  2607  		time.Sleep(time.Millisecond * 2)
  2608  	}
  2609  	wg.Wait()
  2610  	txn, rel := tae.getRelation()
  2611  	t.Logf("Rows: %d", rel.Rows())
  2612  	assert.NoError(t, txn.Commit())
  2613  	tae.truncate()
  2614  	txn, rel = tae.getRelation()
  2615  	assert.Zero(t, 0, rel.Rows())
  2616  	assert.NoError(t, txn.Commit())
  2617  }
  2618  
  2619  func TestGetColumnData(t *testing.T) {
  2620  	defer testutils.AfterTest(t)()
  2621  	testutils.EnsureNoLeak(t)
  2622  	opts := config.WithLongScanAndCKPOpts(nil)
  2623  	tae := newTestEngine(t, opts)
  2624  	defer tae.Close()
  2625  	schema := catalog.MockSchemaAll(18, 13)
  2626  	schema.BlockMaxRows = 10
  2627  	schema.SegmentMaxBlocks = 2
  2628  	tae.bindSchema(schema)
  2629  	bat := catalog.MockBatch(schema, 39)
  2630  	bats := bat.Split(4)
  2631  	defer bat.Close()
  2632  	tae.createRelAndAppend(bats[0], true)
  2633  	txn, rel := tae.getRelation()
  2634  	blk := getOneBlock(rel)
  2635  	view, _ := blk.GetColumnDataById(2, nil)
  2636  	defer view.Close()
  2637  	assert.Equal(t, bats[0].Length(), view.Length())
  2638  	assert.NotZero(t, view.GetData().Allocated())
  2639  
  2640  	buffer := new(bytes.Buffer)
  2641  	view, _ = blk.GetColumnDataById(2, buffer)
  2642  	defer view.Close()
  2643  	assert.Equal(t, bats[0].Length(), view.Length())
  2644  	assert.Zero(t, view.GetData().Allocated())
  2645  	assert.NoError(t, txn.Commit())
  2646  
  2647  	tae.compactBlocks(false)
  2648  	txn, rel = tae.getRelation()
  2649  	blk = getOneBlock(rel)
  2650  	view, _ = blk.GetColumnDataById(2, nil)
  2651  	defer view.Close()
  2652  	assert.Equal(t, bats[0].Length(), view.Length())
  2653  	assert.NotZero(t, view.GetData().Allocated())
  2654  
  2655  	buffer.Reset()
  2656  	view, _ = blk.GetColumnDataById(2, buffer)
  2657  	defer view.Close()
  2658  	assert.Equal(t, bats[0].Length(), view.Length())
  2659  	assert.Zero(t, view.GetData().Allocated())
  2660  	assert.NoError(t, txn.Commit())
  2661  
  2662  	txn, rel = tae.getRelation()
  2663  	err := rel.Append(bats[1])
  2664  	assert.NoError(t, err)
  2665  	blk = getOneBlock(rel)
  2666  	view, err = blk.GetColumnDataById(2, nil)
  2667  	assert.NoError(t, err)
  2668  	defer view.Close()
  2669  	assert.True(t, view.GetData().Equals(bats[1].Vecs[2]))
  2670  	assert.NotZero(t, view.GetData().Allocated())
  2671  	buffer.Reset()
  2672  	view, err = blk.GetColumnDataById(2, buffer)
  2673  	assert.NoError(t, err)
  2674  	defer view.Close()
  2675  	assert.True(t, view.GetData().Equals(bats[1].Vecs[2]))
  2676  	assert.Zero(t, view.GetData().Allocated())
  2677  
  2678  	assert.NoError(t, txn.Commit())
  2679  }
  2680  
  2681  func TestCompactBlk1(t *testing.T) {
  2682  	defer testutils.AfterTest(t)()
  2683  	testutils.EnsureNoLeak(t)
  2684  	opts := config.WithLongScanAndCKPOpts(nil)
  2685  	tae := newTestEngine(t, opts)
  2686  	defer tae.Close()
  2687  	schema := catalog.MockSchemaAll(3, 1)
  2688  	schema.BlockMaxRows = 5
  2689  	schema.SegmentMaxBlocks = 2
  2690  	tae.bindSchema(schema)
  2691  	bat := catalog.MockBatch(schema, 5)
  2692  	bats := bat.Split(5)
  2693  	defer bat.Close()
  2694  
  2695  	tae.createRelAndAppend(bats[2], true)
  2696  
  2697  	txn, rel := tae.getRelation()
  2698  	_ = rel.Append(bats[1])
  2699  	assert.Nil(t, txn.Commit())
  2700  
  2701  	txn, rel = tae.getRelation()
  2702  	_ = rel.Append(bats[3])
  2703  	assert.Nil(t, txn.Commit())
  2704  
  2705  	txn, rel = tae.getRelation()
  2706  	_ = rel.Append(bats[4])
  2707  	assert.Nil(t, txn.Commit())
  2708  
  2709  	txn, rel = tae.getRelation()
  2710  	_ = rel.Append(bats[0])
  2711  	assert.Nil(t, txn.Commit())
  2712  
  2713  	{
  2714  		v := getSingleSortKeyValue(bat, schema, 1)
  2715  		t.Logf("v is %v**********", v)
  2716  		filter := handle.NewEQFilter(v)
  2717  		txn2, rel := tae.getRelation()
  2718  		t.Log("********before delete******************")
  2719  		checkAllColRowsByScan(t, rel, 5, true)
  2720  		_ = rel.DeleteByFilter(filter)
  2721  		assert.Nil(t, txn2.Commit())
  2722  	}
  2723  
  2724  	_, rel = tae.getRelation()
  2725  	checkAllColRowsByScan(t, rel, 4, true)
  2726  
  2727  	{
  2728  		t.Log("************compact************")
  2729  		txn, rel = tae.getRelation()
  2730  		it := rel.MakeBlockIt()
  2731  		blk := it.GetBlock()
  2732  		meta := blk.GetMeta().(*catalog.BlockEntry)
  2733  		task, err := jobs.NewCompactBlockTask(nil, txn, meta, tae.DB.Scheduler)
  2734  		assert.NoError(t, err)
  2735  		err = task.OnExec()
  2736  		assert.NoError(t, err)
  2737  
  2738  		{
  2739  			v := getSingleSortKeyValue(bat, schema, 2)
  2740  			t.Logf("v is %v**********", v)
  2741  			filter := handle.NewEQFilter(v)
  2742  			txn2, rel := tae.getRelation()
  2743  			t.Log("********before delete******************")
  2744  			checkAllColRowsByScan(t, rel, 4, true)
  2745  			_ = rel.DeleteByFilter(filter)
  2746  			assert.Nil(t, txn2.Commit())
  2747  		}
  2748  
  2749  		err = txn.Commit()
  2750  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrTxnWWConflict))
  2751  	}
  2752  
  2753  	_, rel = tae.getRelation()
  2754  	checkAllColRowsByScan(t, rel, 3, true)
  2755  	assert.Equal(t, int64(3), rel.Rows())
  2756  
  2757  	tae.restart()
  2758  	_, rel = tae.getRelation()
  2759  	checkAllColRowsByScan(t, rel, 3, true)
  2760  	assert.Equal(t, int64(3), rel.Rows())
  2761  }
  2762  
  2763  func TestCompactBlk2(t *testing.T) {
  2764  	defer testutils.AfterTest(t)()
  2765  	testutils.EnsureNoLeak(t)
  2766  	opts := config.WithLongScanAndCKPOpts(nil)
  2767  	tae := newTestEngine(t, opts)
  2768  	defer tae.Close()
  2769  	schema := catalog.MockSchemaAll(3, 1)
  2770  	schema.BlockMaxRows = 5
  2771  	schema.SegmentMaxBlocks = 2
  2772  	tae.bindSchema(schema)
  2773  	bat := catalog.MockBatch(schema, 5)
  2774  	bats := bat.Split(5)
  2775  	defer bat.Close()
  2776  
  2777  	tae.createRelAndAppend(bats[2], true)
  2778  
  2779  	txn, rel := tae.getRelation()
  2780  	_ = rel.Append(bats[1])
  2781  	assert.Nil(t, txn.Commit())
  2782  
  2783  	txn, rel = tae.getRelation()
  2784  	_ = rel.Append(bats[3])
  2785  	assert.Nil(t, txn.Commit())
  2786  
  2787  	txn, rel = tae.getRelation()
  2788  	_ = rel.Append(bats[4])
  2789  	assert.Nil(t, txn.Commit())
  2790  
  2791  	txn, rel = tae.getRelation()
  2792  	_ = rel.Append(bats[0])
  2793  	assert.Nil(t, txn.Commit())
  2794  
  2795  	v := getSingleSortKeyValue(bat, schema, 1)
  2796  	t.Logf("v is %v**********", v)
  2797  	filter := handle.NewEQFilter(v)
  2798  	txn2, rel1 := tae.getRelation()
  2799  	t.Log("********before delete******************")
  2800  	checkAllColRowsByScan(t, rel1, 5, true)
  2801  	_ = rel1.DeleteByFilter(filter)
  2802  	assert.Nil(t, txn2.Commit())
  2803  
  2804  	_, rel2 := tae.getRelation()
  2805  	checkAllColRowsByScan(t, rel2, 4, true)
  2806  
  2807  	t.Log("************compact************")
  2808  	txn, rel = tae.getRelation()
  2809  	it := rel.MakeBlockIt()
  2810  	blk := it.GetBlock()
  2811  	meta := blk.GetMeta().(*catalog.BlockEntry)
  2812  	task, err := jobs.NewCompactBlockTask(nil, txn, meta, tae.DB.Scheduler)
  2813  	assert.NoError(t, err)
  2814  	err = task.OnExec()
  2815  	assert.NoError(t, err)
  2816  	err = txn.Commit()
  2817  	assert.NoError(t, err)
  2818  
  2819  	v = getSingleSortKeyValue(bat, schema, 2)
  2820  	t.Logf("v is %v**********", v)
  2821  	filter = handle.NewEQFilter(v)
  2822  	txn2, rel3 := tae.getRelation()
  2823  	t.Log("********before delete******************")
  2824  	checkAllColRowsByScan(t, rel3, 4, true)
  2825  	_ = rel3.DeleteByFilter(filter)
  2826  	assert.Nil(t, txn2.Commit())
  2827  
  2828  	v = getSingleSortKeyValue(bat, schema, 4)
  2829  	t.Logf("v is %v**********", v)
  2830  	filter = handle.NewEQFilter(v)
  2831  	txn2, rel4 := tae.getRelation()
  2832  	t.Log("********before delete******************")
  2833  	checkAllColRowsByScan(t, rel4, 3, true)
  2834  	_ = rel4.DeleteByFilter(filter)
  2835  	assert.Nil(t, txn2.Commit())
  2836  
  2837  	checkAllColRowsByScan(t, rel1, 5, true)
  2838  	checkAllColRowsByScan(t, rel2, 4, true)
  2839  
  2840  	_, rel = tae.getRelation()
  2841  	checkAllColRowsByScan(t, rel, 2, true)
  2842  	assert.Equal(t, int64(2), rel.Rows())
  2843  
  2844  	v = getSingleSortKeyValue(bat, schema, 2)
  2845  	filter = handle.NewEQFilter(v)
  2846  	_, _, err = rel.GetByFilter(filter)
  2847  	assert.NotNil(t, err)
  2848  
  2849  	v = getSingleSortKeyValue(bat, schema, 4)
  2850  	filter = handle.NewEQFilter(v)
  2851  	_, _, err = rel.GetByFilter(filter)
  2852  	assert.NotNil(t, err)
  2853  
  2854  	tae.restart()
  2855  	assert.Equal(t, int64(2), rel.Rows())
  2856  }
  2857  
  2858  func TestCompactblk3(t *testing.T) {
  2859  	defer testutils.AfterTest(t)()
  2860  	testutils.EnsureNoLeak(t)
  2861  	opts := config.WithLongScanAndCKPOpts(nil)
  2862  	tae := newTestEngine(t, opts)
  2863  	defer tae.Close()
  2864  	schema := catalog.MockSchemaAll(3, 1)
  2865  	schema.BlockMaxRows = 5
  2866  	schema.SegmentMaxBlocks = 2
  2867  	tae.bindSchema(schema)
  2868  	bat := catalog.MockBatch(schema, 3)
  2869  	defer bat.Close()
  2870  
  2871  	tae.createRelAndAppend(bat, true)
  2872  
  2873  	v := getSingleSortKeyValue(bat, schema, 1)
  2874  	filter := handle.NewEQFilter(v)
  2875  	txn2, rel1 := tae.getRelation()
  2876  	checkAllColRowsByScan(t, rel1, 3, true)
  2877  	_ = rel1.DeleteByFilter(filter)
  2878  	assert.Nil(t, txn2.Commit())
  2879  
  2880  	_, rel2 := tae.getRelation()
  2881  	checkAllColRowsByScan(t, rel2, 2, true)
  2882  
  2883  	txn, rel := tae.getRelation()
  2884  	it := rel.MakeBlockIt()
  2885  	blk := it.GetBlock()
  2886  	meta := blk.GetMeta().(*catalog.BlockEntry)
  2887  	task, err := jobs.NewCompactBlockTask(nil, txn, meta, tae.DB.Scheduler)
  2888  	assert.NoError(t, err)
  2889  	err = task.OnExec()
  2890  	assert.NoError(t, err)
  2891  	err = txn.Commit()
  2892  	assert.NoError(t, err)
  2893  
  2894  	txn, err = tae.StartTxn(nil)
  2895  	assert.NoError(t, err)
  2896  	processor := &catalog.LoopProcessor{}
  2897  	processor.BlockFn = func(be *catalog.BlockEntry) error {
  2898  		if be.GetSegment().GetTable().GetDB().IsSystemDB() {
  2899  			return nil
  2900  		}
  2901  		view, err := be.GetBlockData().GetColumnDataById(txn, 0, nil)
  2902  		assert.NoError(t, err)
  2903  		view.ApplyDeletes()
  2904  		assert.Equal(t, 2, view.Length())
  2905  		return nil
  2906  	}
  2907  	err = tae.Catalog.RecurLoop(processor)
  2908  	assert.NoError(t, err)
  2909  }
  2910  
  2911  func TestImmutableIndexInAblk(t *testing.T) {
  2912  	defer testutils.AfterTest(t)()
  2913  	testutils.EnsureNoLeak(t)
  2914  	opts := config.WithLongScanAndCKPOpts(nil)
  2915  	tae := newTestEngine(t, opts)
  2916  	defer tae.Close()
  2917  	schema := catalog.MockSchemaAll(3, 1)
  2918  	schema.BlockMaxRows = 5
  2919  	schema.SegmentMaxBlocks = 2
  2920  	tae.bindSchema(schema)
  2921  	bat := catalog.MockBatch(schema, 5)
  2922  	bats := bat.Split(5)
  2923  	defer bat.Close()
  2924  
  2925  	tae.createRelAndAppend(bats[2], true)
  2926  	txn, rel := tae.getRelation()
  2927  	_ = rel.Append(bats[1])
  2928  	assert.Nil(t, txn.Commit())
  2929  	txn, rel = tae.getRelation()
  2930  	_ = rel.Append(bats[3])
  2931  	assert.Nil(t, txn.Commit())
  2932  	txn, rel = tae.getRelation()
  2933  	_ = rel.Append(bats[4])
  2934  	assert.Nil(t, txn.Commit())
  2935  	txn, rel = tae.getRelation()
  2936  	_ = rel.Append(bats[0])
  2937  	assert.Nil(t, txn.Commit())
  2938  
  2939  	v := getSingleSortKeyValue(bat, schema, 1)
  2940  	filter := handle.NewEQFilter(v)
  2941  	txn2, rel := tae.getRelation()
  2942  	_ = rel.DeleteByFilter(filter)
  2943  	assert.Nil(t, txn2.Commit())
  2944  
  2945  	txn, rel = tae.getRelation()
  2946  	it := rel.MakeBlockIt()
  2947  	blk := it.GetBlock()
  2948  	meta := blk.GetMeta().(*catalog.BlockEntry)
  2949  	task, err := jobs.NewCompactBlockTask(nil, txn, meta, tae.DB.Scheduler)
  2950  	assert.NoError(t, err)
  2951  	err = task.OnExec()
  2952  	assert.NoError(t, err)
  2953  	err = txn.Commit()
  2954  	assert.NoError(t, err)
  2955  
  2956  	txn, _ = tae.getRelation()
  2957  	_, err = meta.GetBlockData().GetByFilter(txn, filter)
  2958  	assert.Error(t, err)
  2959  	v = getSingleSortKeyValue(bat, schema, 2)
  2960  	filter = handle.NewEQFilter(v)
  2961  	_, err = meta.GetBlockData().GetByFilter(txn, filter)
  2962  	assert.NoError(t, err)
  2963  
  2964  	err = meta.GetBlockData().BatchDedup(txn, bat.Vecs[1], nil, false)
  2965  	assert.Error(t, err)
  2966  }
  2967  
  2968  func TestDelete3(t *testing.T) {
  2969  	// t.Skip(any("This case crashes occasionally, is being fixed, skip it for now"))
  2970  	defer testutils.AfterTest(t)()
  2971  	opts := config.WithQuickScanAndCKPOpts(nil)
  2972  	tae := newTestEngine(t, opts)
  2973  	defer tae.Close()
  2974  
  2975  	// this task won't affect logic of TestAppend2, it just prints logs about dirty count
  2976  	forest := logtail.NewDirtyCollector(tae.LogtailMgr, opts.Clock, tae.Catalog, new(catalog.LoopProcessor))
  2977  	hb := ops.NewHeartBeaterWithFunc(5*time.Millisecond, func() {
  2978  		forest.Run()
  2979  		t.Log(forest.String())
  2980  	}, nil)
  2981  	hb.Start()
  2982  	defer hb.Stop()
  2983  	schema := catalog.MockSchemaAll(3, 2)
  2984  	schema.BlockMaxRows = 10
  2985  	schema.SegmentMaxBlocks = 2
  2986  	tae.bindSchema(schema)
  2987  	// rows := int(schema.BlockMaxRows * 1)
  2988  	rows := int(schema.BlockMaxRows*3) + 1
  2989  	bat := catalog.MockBatch(schema, rows)
  2990  
  2991  	tae.createRelAndAppend(bat, true)
  2992  	tae.checkRowsByScan(rows, false)
  2993  	deleted := false
  2994  	for i := 0; i < 10; i++ {
  2995  		if deleted {
  2996  			tae.checkRowsByScan(0, true)
  2997  			tae.DoAppend(bat)
  2998  			deleted = false
  2999  			tae.checkRowsByScan(rows, true)
  3000  		} else {
  3001  			tae.checkRowsByScan(rows, true)
  3002  			err := tae.deleteAll(true)
  3003  			if err == nil {
  3004  				deleted = true
  3005  				tae.checkRowsByScan(0, true)
  3006  				// assert.Zero(t, tae.getRows())
  3007  			} else {
  3008  				tae.checkRowsByScan(rows, true)
  3009  				// assert.Equal(t, tae.getRows(), rows)
  3010  			}
  3011  		}
  3012  	}
  3013  	t.Logf(tae.Catalog.SimplePPString(common.PPL1))
  3014  }
  3015  
  3016  func TestDropCreated1(t *testing.T) {
  3017  	defer testutils.AfterTest(t)()
  3018  	opts := config.WithLongScanAndCKPOpts(nil)
  3019  	tae := newTestEngine(t, opts)
  3020  	defer tae.Close()
  3021  
  3022  	txn, err := tae.StartTxn(nil)
  3023  	assert.Nil(t, err)
  3024  	_, err = txn.CreateDatabase("db", "")
  3025  	assert.Nil(t, err)
  3026  	db, err := txn.DropDatabase("db")
  3027  	assert.Nil(t, err)
  3028  	assert.Nil(t, txn.Commit())
  3029  
  3030  	assert.Equal(t, txn.GetCommitTS(), db.GetMeta().(*catalog.DBEntry).GetCreatedAt())
  3031  	assert.Equal(t, txn.GetCommitTS(), db.GetMeta().(*catalog.DBEntry).GetCreatedAt())
  3032  
  3033  	tae.restart()
  3034  }
  3035  
  3036  func TestDropCreated2(t *testing.T) {
  3037  	defer testutils.AfterTest(t)()
  3038  	opts := config.WithLongScanAndCKPOpts(nil)
  3039  	tae := newTestEngine(t, opts)
  3040  	schema := catalog.MockSchemaAll(1, -1)
  3041  	defer tae.Close()
  3042  
  3043  	txn, err := tae.StartTxn(nil)
  3044  	assert.Nil(t, err)
  3045  	db, err := txn.CreateDatabase("db", "")
  3046  	assert.Nil(t, err)
  3047  	rel, err := db.CreateRelation(schema)
  3048  	assert.Nil(t, err)
  3049  	_, err = db.DropRelationByName(schema.Name)
  3050  	assert.Nil(t, err)
  3051  	assert.Nil(t, txn.Commit())
  3052  
  3053  	assert.Equal(t, txn.GetCommitTS(), rel.GetMeta().(*catalog.TableEntry).GetCreatedAt())
  3054  	assert.Equal(t, txn.GetCommitTS(), rel.GetMeta().(*catalog.TableEntry).GetCreatedAt())
  3055  
  3056  	tae.restart()
  3057  }
  3058  
  3059  // records create at 1 and commit
  3060  // read by ts 1, err should be nil
  3061  func TestReadEqualTS(t *testing.T) {
  3062  	defer testutils.AfterTest(t)()
  3063  	opts := config.WithLongScanAndCKPOpts(nil)
  3064  	tae := newTestEngine(t, opts)
  3065  	defer tae.Close()
  3066  
  3067  	txn, err := tae.StartTxn(nil)
  3068  	tae.Catalog.CreateDBEntryByTS("db", txn.GetStartTS())
  3069  	assert.Nil(t, err)
  3070  	_, err = txn.GetDatabase("db")
  3071  	assert.Nil(t, err)
  3072  }
  3073  
  3074  func TestTruncateZonemap(t *testing.T) {
  3075  	defer testutils.AfterTest(t)()
  3076  	type Mod struct {
  3077  		offset int
  3078  		v      byte
  3079  	}
  3080  	mockBytes := func(init byte, size int, mods ...Mod) []byte {
  3081  		ret := make([]byte, size)
  3082  		for i := 0; i < size; i++ {
  3083  			ret[i] = init
  3084  		}
  3085  		for _, m := range mods {
  3086  			ret[m.offset] = m.v
  3087  		}
  3088  		return ret
  3089  	}
  3090  	testutils.EnsureNoLeak(t)
  3091  	opts := config.WithLongScanAndCKPOpts(nil)
  3092  	tae := newTestEngine(t, opts)
  3093  	defer tae.Close()
  3094  
  3095  	schema := catalog.MockSchemaAll(13, 12) // set varchar PK
  3096  	schema.BlockMaxRows = 10
  3097  	schema.SegmentMaxBlocks = 2
  3098  	tae.bindSchema(schema)
  3099  
  3100  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows*2+9))        // 2.9 blocks
  3101  	minv := mockBytes(0, 35)                                              // 0x00000000
  3102  	trickyMinv := mockBytes(0, 33)                                        // smaller than minv, not in mut index but in immut index
  3103  	maxv := mockBytes(0xff, 35, Mod{0, 0x61}, Mod{1, 0x62}, Mod{2, 0x63}) // abc0xff0xff...
  3104  	trickyMaxv := []byte("abd")                                           // bigger than maxv, not in mut index but in immut index
  3105  	bat.Vecs[12].Update(8, maxv)
  3106  	bat.Vecs[12].Update(11, minv)
  3107  	bat.Vecs[12].Update(22, []byte("abcc"))
  3108  	defer bat.Close()
  3109  
  3110  	checkMinMax := func(rel handle.Relation, minvOffset, maxvOffset uint32) {
  3111  		_, _, err := rel.GetByFilter(handle.NewEQFilter(trickyMinv))
  3112  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  3113  		_, _, err = rel.GetByFilter(handle.NewEQFilter(trickyMaxv))
  3114  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  3115  		_, row, err := rel.GetByFilter(handle.NewEQFilter(minv))
  3116  		assert.NoError(t, err)
  3117  		assert.Equal(t, minvOffset, row)
  3118  		_, row, err = rel.GetByFilter(handle.NewEQFilter(maxv))
  3119  		assert.NoError(t, err)
  3120  		assert.Equal(t, maxvOffset, row)
  3121  	}
  3122  
  3123  	tae.createRelAndAppend(bat, true)
  3124  
  3125  	// runtime check
  3126  	txn, rel := tae.getRelation()
  3127  	checkMinMax(rel, 1, 8)
  3128  	assert.NoError(t, txn.Commit())
  3129  
  3130  	// restart without compact
  3131  	tae.restart()
  3132  	txn, rel = tae.getRelation()
  3133  	checkMinMax(rel, 1, 8)
  3134  	assert.NoError(t, txn.Commit())
  3135  
  3136  	// restart with compact
  3137  	tae.compactBlocks(false)
  3138  	tae.mergeBlocks(false)
  3139  	tae.restart()
  3140  	txn, rel = tae.getRelation()
  3141  	checkMinMax(rel, 0, 9)
  3142  	assert.NoError(t, txn.Commit())
  3143  
  3144  	// 3 NonAppendable Blocks
  3145  	txn, rel = tae.getRelation()
  3146  	rel.UpdateByFilter(handle.NewEQFilter(maxv), 12, mockBytes(0xff, 35))
  3147  	assert.NoError(t, txn.Commit())
  3148  	tae.compactBlocks(false)
  3149  	tae.mergeBlocks(false)
  3150  	tae.restart()
  3151  
  3152  	txn, rel = tae.getRelation()
  3153  	_, row, err := rel.GetByFilter(handle.NewEQFilter(mockBytes(0xff, 35)))
  3154  	assert.NoError(t, err)
  3155  	assert.Equal(t, uint32(9), row)
  3156  	assert.NoError(t, txn.Commit())
  3157  }
  3158  
  3159  func mustStartTxn(t *testing.T, tae *testEngine, tenantID uint32) txnif.AsyncTxn {
  3160  	txn, err := tae.StartTxn(nil)
  3161  	assert.NoError(t, err)
  3162  	txn.BindAccessInfo(tenantID, 0, 0)
  3163  	return txn
  3164  }
  3165  
  3166  func TestMultiTenantDBOps(t *testing.T) {
  3167  	defer testutils.AfterTest(t)()
  3168  	var err error
  3169  	opts := config.WithLongScanAndCKPOpts(nil)
  3170  	tae := newTestEngine(t, opts)
  3171  	defer tae.Close()
  3172  
  3173  	txn11 := mustStartTxn(t, tae, 1)
  3174  	_, err = txn11.CreateDatabase("db", "")
  3175  	assert.NoError(t, err)
  3176  	txn12 := mustStartTxn(t, tae, 1)
  3177  	_, err = txn11.CreateDatabase("db", "")
  3178  	assert.Error(t, err)
  3179  
  3180  	txn21 := mustStartTxn(t, tae, 2)
  3181  	_, err = txn21.CreateDatabase("db", "")
  3182  	assert.NoError(t, err)
  3183  
  3184  	assert.NoError(t, txn11.Commit())
  3185  	assert.NoError(t, txn12.Commit())
  3186  	assert.NoError(t, txn21.Commit())
  3187  
  3188  	txn22 := mustStartTxn(t, tae, 2)
  3189  	_, _ = txn22.CreateDatabase("db2", "")
  3190  
  3191  	txn23 := mustStartTxn(t, tae, 2)
  3192  	// [mo_catalog, db]
  3193  	assert.Equal(t, 2, len(txn23.DatabaseNames()))
  3194  	assert.NoError(t, txn23.Commit())
  3195  
  3196  	txn22.Commit()
  3197  	tae.restart()
  3198  
  3199  	txn24 := mustStartTxn(t, tae, 2)
  3200  	// [mo_catalog, db, db2]
  3201  	assert.Equal(t, 3, len(txn24.DatabaseNames()))
  3202  	assert.NoError(t, txn24.Commit())
  3203  
  3204  	txn13 := mustStartTxn(t, tae, 1)
  3205  	// [mo_catalog, db]
  3206  	assert.Equal(t, 2, len(txn13.DatabaseNames()))
  3207  
  3208  	_, err = txn13.GetDatabase("db2")
  3209  	assert.Error(t, err)
  3210  	dbHdl, err := txn13.GetDatabase("db")
  3211  	assert.NoError(t, err)
  3212  	assert.Equal(t, uint32(1), dbHdl.GetMeta().(*catalog.DBEntry).GetTenantID())
  3213  
  3214  	_, err = txn13.DropDatabase("db2")
  3215  	assert.Error(t, err)
  3216  	_, err = txn13.DropDatabase("db")
  3217  	assert.NoError(t, err)
  3218  	assert.NoError(t, txn13.Commit())
  3219  
  3220  	txn14 := mustStartTxn(t, tae, 1)
  3221  	// [mo_catalog]
  3222  	assert.Equal(t, 1, len(txn14.DatabaseNames()))
  3223  	assert.NoError(t, txn14.Commit())
  3224  }
  3225  
  3226  func TestMultiTenantMoCatalogOps(t *testing.T) {
  3227  	defer testutils.AfterTest(t)()
  3228  	var err error
  3229  	opts := config.WithLongScanAndCKPOpts(nil)
  3230  	tae := newTestEngine(t, opts)
  3231  	defer tae.Close()
  3232  
  3233  	s := catalog.MockSchemaAll(1, 0)
  3234  	s.Name = "mo_accounts"
  3235  	txn0, sysDB := tae.getDB(pkgcatalog.MO_CATALOG)
  3236  	_, err = sysDB.CreateRelation(s)
  3237  	assert.NoError(t, err)
  3238  	assert.NoError(t, txn0.Commit())
  3239  
  3240  	schema11 := catalog.MockSchemaAll(3, 0)
  3241  	schema11.BlockMaxRows = 10
  3242  	schema11.SegmentMaxBlocks = 2
  3243  	tae.bindSchema(schema11)
  3244  	tae.bindTenantID(1)
  3245  
  3246  	bat1 := catalog.MockBatch(schema11, int(schema11.BlockMaxRows*2+9))
  3247  	tae.createRelAndAppend(bat1, true)
  3248  	// pretend 'mo_users'
  3249  	s = catalog.MockSchemaAll(1, 0)
  3250  	s.Name = "mo_users"
  3251  	txn11, sysDB := tae.getDB(pkgcatalog.MO_CATALOG)
  3252  	_, err = sysDB.CreateRelation(s)
  3253  	assert.NoError(t, err)
  3254  	assert.NoError(t, txn11.Commit())
  3255  
  3256  	tae.compactBlocks(false)
  3257  	tae.mergeBlocks(false)
  3258  
  3259  	schema21 := catalog.MockSchemaAll(2, 1)
  3260  	schema21.BlockMaxRows = 10
  3261  	schema21.SegmentMaxBlocks = 2
  3262  	tae.bindSchema(schema21)
  3263  	tae.bindTenantID(2)
  3264  
  3265  	bat2 := catalog.MockBatch(schema21, int(schema21.BlockMaxRows*3+5))
  3266  	tae.createRelAndAppend(bat2, true)
  3267  	txn21, sysDB := tae.getDB(pkgcatalog.MO_CATALOG)
  3268  	s = catalog.MockSchemaAll(1, 0)
  3269  	s.Name = "mo_users"
  3270  	_, err = sysDB.CreateRelation(s)
  3271  	assert.NoError(t, err)
  3272  	assert.NoError(t, txn21.Commit())
  3273  
  3274  	tae.compactBlocks(false)
  3275  	tae.mergeBlocks(false)
  3276  
  3277  	tae.restart()
  3278  
  3279  	reservedColumnsCnt := len(catalog.SystemDBSchema.ColDefs) +
  3280  		len(catalog.SystemColumnSchema.ColDefs) +
  3281  		len(catalog.SystemTableSchema.ColDefs)
  3282  	{
  3283  		// account 2
  3284  		// check data for good
  3285  		_, tbl := tae.getRelation()
  3286  		checkAllColRowsByScan(t, tbl, 35, false)
  3287  		// [mo_catalog, db]
  3288  		assert.Equal(t, 2, len(mustStartTxn(t, tae, 2).DatabaseNames()))
  3289  		_, sysDB = tae.getDB(pkgcatalog.MO_CATALOG)
  3290  		sysDB.Relations()
  3291  		sysDBTbl, _ := sysDB.GetRelationByName(pkgcatalog.MO_DATABASE)
  3292  		// [mo_catalog, db]
  3293  		checkAllColRowsByScan(t, sysDBTbl, 2, true)
  3294  		sysTblTbl, _ := sysDB.GetRelationByName(pkgcatalog.MO_TABLES)
  3295  		// [mo_database, mo_tables, mo_columns, 'mo_users_t2' 'test-table-a-timestamp']
  3296  		checkAllColRowsByScan(t, sysTblTbl, 5, true)
  3297  		sysColTbl, _ := sysDB.GetRelationByName(pkgcatalog.MO_COLUMNS)
  3298  		// [mo_database(8), mo_tables(13), mo_columns(19), 'mo_users_t2'(1+1), 'test-table-a-timestamp'(2+1)]
  3299  		checkAllColRowsByScan(t, sysColTbl, reservedColumnsCnt+5, true)
  3300  	}
  3301  	{
  3302  		// account 1
  3303  		tae.bindSchema(schema11)
  3304  		tae.bindTenantID(1)
  3305  		// check data for good
  3306  		_, tbl := tae.getRelation()
  3307  		checkAllColRowsByScan(t, tbl, 29, false)
  3308  		// [mo_catalog, db]
  3309  		assert.Equal(t, 2, len(mustStartTxn(t, tae, 1).DatabaseNames()))
  3310  		_, sysDB = tae.getDB(pkgcatalog.MO_CATALOG)
  3311  		sysDB.Relations()
  3312  		sysDBTbl, _ := sysDB.GetRelationByName(pkgcatalog.MO_DATABASE)
  3313  		// [mo_catalog, db]
  3314  		checkAllColRowsByScan(t, sysDBTbl, 2, true)
  3315  		sysTblTbl, _ := sysDB.GetRelationByName(pkgcatalog.MO_TABLES)
  3316  		// [mo_database, mo_tables, mo_columns, 'mo_users_t1' 'test-table-a-timestamp']
  3317  		checkAllColRowsByScan(t, sysTblTbl, 5, true)
  3318  		sysColTbl, _ := sysDB.GetRelationByName(pkgcatalog.MO_COLUMNS)
  3319  		// [mo_database(8), mo_tables(13), mo_columns(19), 'mo_users_t1'(1+1), 'test-table-a-timestamp'(3+1)]
  3320  		checkAllColRowsByScan(t, sysColTbl, reservedColumnsCnt+6, true)
  3321  	}
  3322  	{
  3323  		// sys account
  3324  		tae.bindSchema(nil)
  3325  		tae.bindTenantID(0)
  3326  		// [mo_catalog]
  3327  		assert.Equal(t, 1, len(mustStartTxn(t, tae, 0).DatabaseNames()))
  3328  		_, sysDB = tae.getDB(pkgcatalog.MO_CATALOG)
  3329  		sysDB.Relations()
  3330  		sysDBTbl, _ := sysDB.GetRelationByName(pkgcatalog.MO_DATABASE)
  3331  		// [mo_catalog]
  3332  		checkAllColRowsByScan(t, sysDBTbl, 1, true)
  3333  		sysTblTbl, _ := sysDB.GetRelationByName(pkgcatalog.MO_TABLES)
  3334  		// [mo_database, mo_tables, mo_columns, 'mo_accounts']
  3335  		checkAllColRowsByScan(t, sysTblTbl, 4, true)
  3336  		sysColTbl, _ := sysDB.GetRelationByName(pkgcatalog.MO_COLUMNS)
  3337  		// [mo_database(8), mo_tables(13), mo_columns(19), 'mo_accounts'(1+1)]
  3338  		checkAllColRowsByScan(t, sysColTbl, reservedColumnsCnt+2, true)
  3339  	}
  3340  
  3341  }
  3342  
  3343  // txn1 create update
  3344  // txn2 update delete
  3345  func TestUpdateAttr(t *testing.T) {
  3346  	defer testutils.AfterTest(t)()
  3347  	opts := config.WithLongScanAndCKPOpts(nil)
  3348  	tae := newTestEngine(t, opts)
  3349  	schema := catalog.MockSchemaAll(1, -1)
  3350  	defer tae.Close()
  3351  
  3352  	txn, err := tae.StartTxn(nil)
  3353  	assert.NoError(t, err)
  3354  	db, err := txn.CreateDatabase("db", "")
  3355  	assert.NoError(t, err)
  3356  	rel, err := db.CreateRelation(schema)
  3357  	assert.NoError(t, err)
  3358  	seg, err := rel.CreateSegment(false)
  3359  	assert.NoError(t, err)
  3360  	seg.GetMeta().(*catalog.SegmentEntry).UpdateMetaLoc(txn, "test_1")
  3361  	assert.NoError(t, txn.Commit())
  3362  
  3363  	txn, err = tae.StartTxn(nil)
  3364  	assert.NoError(t, err)
  3365  	db, err = txn.GetDatabase("db")
  3366  	assert.NoError(t, err)
  3367  	rel, err = db.GetRelationByName(schema.Name)
  3368  	assert.NoError(t, err)
  3369  	seg, err = rel.GetSegment(seg.GetID())
  3370  	assert.NoError(t, err)
  3371  	seg.GetMeta().(*catalog.SegmentEntry).UpdateDeltaLoc(txn, "test_2")
  3372  	rel.SoftDeleteSegment(seg.GetID())
  3373  	assert.NoError(t, txn.Commit())
  3374  
  3375  	t.Log(tae.Catalog.SimplePPString(3))
  3376  
  3377  	tae.restart()
  3378  
  3379  	t.Log(tae.Catalog.SimplePPString(3))
  3380  }
  3381  
  3382  type dummyCpkGetter struct{}
  3383  
  3384  func (c *dummyCpkGetter) CollectCheckpointsInRange(ctx context.Context, start, end types.TS) (ckpLoc string, lastEnd types.TS, err error) {
  3385  	return "", types.TS{}, nil
  3386  }
  3387  
  3388  func (c *dummyCpkGetter) FlushTable(dbID, tableID uint64, ts types.TS) error { return nil }
  3389  
  3390  func TestLogtailBasic(t *testing.T) {
  3391  	defer testutils.AfterTest(t)()
  3392  	opts := config.WithLongScanAndCKPOpts(nil)
  3393  	opts.LogtailCfg = &options.LogtailCfg{PageSize: 30}
  3394  	tae := newTestEngine(t, opts)
  3395  	logMgr := tae.LogtailMgr
  3396  	defer tae.Close()
  3397  
  3398  	// at first, we can see nothing
  3399  	minTs, maxTs := types.BuildTS(0, 0), types.BuildTS(1000, 1000)
  3400  	reader := logMgr.GetReader(minTs, maxTs)
  3401  	assert.False(t, reader.HasCatalogChanges())
  3402  	assert.Equal(t, 0, len(reader.GetDirtyByTable(1000, 1000).Segs))
  3403  
  3404  	schema := catalog.MockSchemaAll(2, -1)
  3405  	schema.Name = "test"
  3406  	schema.BlockMaxRows = 10
  3407  	schema.SegmentMaxBlocks = 2
  3408  	// craete 2 db and 2 tables
  3409  	txn, _ := tae.StartTxn(nil)
  3410  	todropdb, _ := txn.CreateDatabase("todrop", "")
  3411  	todropdb.CreateRelation(schema)
  3412  	db, _ := txn.CreateDatabase("db", "")
  3413  	tbl, _ := db.CreateRelation(schema)
  3414  	dbID := db.GetID()
  3415  	tableID := tbl.ID()
  3416  	txn.Commit()
  3417  	catalogWriteTs := txn.GetPrepareTS()
  3418  
  3419  	// drop the first db
  3420  	txn2, _ := tae.StartTxn(nil)
  3421  	txn2.DropDatabase("todrop")
  3422  	txn2.Commit()
  3423  	catalogDropTs := txn2.GetPrepareTS()
  3424  
  3425  	writeTs := make([]types.TS, 0, 120)
  3426  	deleteRowIDs := make([]types.Rowid, 0, 10)
  3427  
  3428  	wg := new(sync.WaitGroup)
  3429  	wg.Add(1)
  3430  	go func() {
  3431  		// insert 100 rows
  3432  		for i := 0; i < 100; i++ {
  3433  			txn, _ := tae.StartTxn(nil)
  3434  			db, _ := txn.GetDatabase("db")
  3435  			tbl, _ := db.GetRelationByName("test")
  3436  			tbl.Append(catalog.MockBatch(schema, 1))
  3437  			assert.NoError(t, txn.Commit())
  3438  			writeTs = append(writeTs, txn.GetPrepareTS())
  3439  		}
  3440  		// delete the row whose offset is 5 for every block
  3441  		{
  3442  			// collect rowid
  3443  			txn, _ := tae.StartTxn(nil)
  3444  			db, _ := txn.GetDatabase("db")
  3445  			tbl, _ := db.GetRelationByName("test")
  3446  			blkIt := tbl.MakeBlockIt()
  3447  			for ; blkIt.Valid(); blkIt.Next() {
  3448  				prefix := blkIt.GetBlock().GetMeta().(*catalog.BlockEntry).MakeKey()
  3449  				deleteRowIDs = append(deleteRowIDs, model.EncodePhyAddrKeyWithPrefix(prefix, 5))
  3450  			}
  3451  			assert.NoError(t, txn.Commit())
  3452  		}
  3453  
  3454  		// delete two 2 rows one time. no special reason, it just comes up
  3455  		for i := 0; i < len(deleteRowIDs); i += 2 {
  3456  			txn, _ := tae.StartTxn(nil)
  3457  			db, _ := txn.GetDatabase("db")
  3458  			tbl, _ := db.GetRelationByName("test")
  3459  			assert.NoError(t, tbl.DeleteByPhyAddrKey(deleteRowIDs[i]))
  3460  			if i+1 < len(deleteRowIDs) {
  3461  				tbl.DeleteByPhyAddrKey(deleteRowIDs[i+1])
  3462  			}
  3463  			assert.NoError(t, txn.Commit())
  3464  			writeTs = append(writeTs, txn.GetPrepareTS())
  3465  		}
  3466  		wg.Done()
  3467  	}()
  3468  
  3469  	// concurrent read to test race
  3470  	for i := 0; i < 5; i++ {
  3471  		wg.Add(1)
  3472  		go func() {
  3473  			for i := 0; i < 10; i++ {
  3474  				reader := logMgr.GetReader(minTs, maxTs)
  3475  				_ = reader.GetDirtyByTable(dbID, tableID)
  3476  			}
  3477  			wg.Done()
  3478  		}()
  3479  	}
  3480  
  3481  	wg.Wait()
  3482  
  3483  	firstWriteTs, lastWriteTs := writeTs[0], writeTs[len(writeTs)-1]
  3484  
  3485  	reader = logMgr.GetReader(firstWriteTs, lastWriteTs.Next())
  3486  	assert.False(t, reader.HasCatalogChanges())
  3487  	reader = logMgr.GetReader(minTs, catalogWriteTs)
  3488  	assert.Equal(t, 0, len(reader.GetDirtyByTable(dbID, tableID).Segs))
  3489  	reader = logMgr.GetReader(firstWriteTs, lastWriteTs)
  3490  	assert.Equal(t, 0, len(reader.GetDirtyByTable(dbID, tableID-1).Segs))
  3491  	// 5 segments, every segment has 2 blocks
  3492  	reader = logMgr.GetReader(firstWriteTs, lastWriteTs)
  3493  	dirties := reader.GetDirtyByTable(dbID, tableID)
  3494  	assert.Equal(t, 5, len(dirties.Segs))
  3495  	for _, seg := range dirties.Segs {
  3496  		assert.Equal(t, 2, len(seg.Blks))
  3497  	}
  3498  	tots := func(ts types.TS) *timestamp.Timestamp {
  3499  		return &timestamp.Timestamp{PhysicalTime: types.DecodeInt64(ts[4:12]), LogicalTime: types.DecodeUint32(ts[:4])}
  3500  	}
  3501  
  3502  	fixedColCnt := 2 // __rowid + commit_time, the columns for a delBatch
  3503  	// check Bat rows count consistency
  3504  	check_same_rows := func(bat *api.Batch, expect int) {
  3505  		for i, vec := range bat.Vecs {
  3506  			col, err := vector.ProtoVectorToVector(vec)
  3507  			assert.NoError(t, err)
  3508  			assert.Equal(t, expect, col.Length(), "columns %d", i)
  3509  		}
  3510  	}
  3511  
  3512  	ctx := context.Background()
  3513  
  3514  	// get db catalog change
  3515  	resp, err := logtail.HandleSyncLogTailReq(ctx, new(dummyCpkGetter), tae.LogtailMgr, tae.Catalog, api.SyncLogTailReq{
  3516  		CnHave: tots(minTs),
  3517  		CnWant: tots(catalogDropTs),
  3518  		Table:  &api.TableID{DbId: pkgcatalog.MO_CATALOG_ID, TbId: pkgcatalog.MO_DATABASE_ID},
  3519  	}, true)
  3520  	assert.NoError(t, err)
  3521  	assert.Equal(t, 2, len(resp.Commands)) // insert and delete
  3522  
  3523  	assert.Equal(t, api.Entry_Insert, resp.Commands[0].EntryType)
  3524  	assert.Equal(t, len(catalog.SystemDBSchema.ColDefs)+fixedColCnt, len(resp.Commands[0].Bat.Vecs))
  3525  	check_same_rows(resp.Commands[0].Bat, 2)                                 // 2 db
  3526  	datname, err := vector.ProtoVectorToVector(resp.Commands[0].Bat.Vecs[3]) // datname column
  3527  	assert.NoError(t, err)
  3528  	assert.Equal(t, "todrop", datname.GetString(0))
  3529  	assert.Equal(t, "db", datname.GetString(1))
  3530  
  3531  	assert.Equal(t, api.Entry_Delete, resp.Commands[1].EntryType)
  3532  	assert.Equal(t, fixedColCnt, len(resp.Commands[1].Bat.Vecs))
  3533  	check_same_rows(resp.Commands[1].Bat, 1) // 1 drop db
  3534  
  3535  	// get table catalog change
  3536  	resp, err = logtail.HandleSyncLogTailReq(ctx, new(dummyCpkGetter), tae.LogtailMgr, tae.Catalog, api.SyncLogTailReq{
  3537  		CnHave: tots(minTs),
  3538  		CnWant: tots(catalogDropTs),
  3539  		Table:  &api.TableID{DbId: pkgcatalog.MO_CATALOG_ID, TbId: pkgcatalog.MO_TABLES_ID},
  3540  	}, true)
  3541  	assert.NoError(t, err)
  3542  	assert.Equal(t, 1, len(resp.Commands)) // insert
  3543  	assert.Equal(t, api.Entry_Insert, resp.Commands[0].EntryType)
  3544  	assert.Equal(t, len(catalog.SystemTableSchema.ColDefs)+fixedColCnt, len(resp.Commands[0].Bat.Vecs))
  3545  	check_same_rows(resp.Commands[0].Bat, 2)                                 // 2 tables
  3546  	relname, err := vector.ProtoVectorToVector(resp.Commands[0].Bat.Vecs[3]) // relname column
  3547  	assert.NoError(t, err)
  3548  	assert.Equal(t, schema.Name, relname.GetString(0))
  3549  	assert.Equal(t, schema.Name, relname.GetString(1))
  3550  
  3551  	// get columns catalog change
  3552  	resp, err = logtail.HandleSyncLogTailReq(ctx, new(dummyCpkGetter), tae.LogtailMgr, tae.Catalog, api.SyncLogTailReq{
  3553  		CnHave: tots(minTs),
  3554  		CnWant: tots(catalogDropTs),
  3555  		Table:  &api.TableID{DbId: pkgcatalog.MO_CATALOG_ID, TbId: pkgcatalog.MO_COLUMNS_ID},
  3556  	}, true)
  3557  	assert.NoError(t, err)
  3558  	assert.Equal(t, 1, len(resp.Commands)) // insert
  3559  	assert.Equal(t, api.Entry_Insert, resp.Commands[0].EntryType)
  3560  	assert.Equal(t, len(catalog.SystemColumnSchema.ColDefs)+fixedColCnt, len(resp.Commands[0].Bat.Vecs))
  3561  	// sysColumnsCount := len(catalog.SystemDBSchema.ColDefs) + len(catalog.SystemTableSchema.ColDefs) + len(catalog.SystemColumnSchema.ColDefs)
  3562  	check_same_rows(resp.Commands[0].Bat, len(schema.ColDefs)*2) // column count of 2 tables
  3563  
  3564  	// get user table change
  3565  	resp, err = logtail.HandleSyncLogTailReq(ctx, new(dummyCpkGetter), tae.LogtailMgr, tae.Catalog, api.SyncLogTailReq{
  3566  		CnHave: tots(firstWriteTs.Next()), // skip the first write deliberately,
  3567  		CnWant: tots(lastWriteTs),
  3568  		Table:  &api.TableID{DbId: dbID, TbId: tableID},
  3569  	}, true)
  3570  	assert.NoError(t, err)
  3571  	assert.Equal(t, 2, len(resp.Commands)) // insert data and delete data
  3572  
  3573  	// blk meta change
  3574  	// blkMetaEntry := resp.Commands[0]
  3575  	// assert.Equal(t, api.Entry_Insert, blkMetaEntry.EntryType)
  3576  	// assert.Equal(t, len(logtail.BlkMetaSchema.ColDefs)+fixedColCnt, len(blkMetaEntry.Bat.Vecs))
  3577  	// check_same_rows(blkMetaEntry.Bat, 9) // 9 blocks, because the first write is excluded.
  3578  
  3579  	// check data change
  3580  	insDataEntry := resp.Commands[0]
  3581  	assert.Equal(t, api.Entry_Insert, insDataEntry.EntryType)
  3582  	assert.Equal(t, len(schema.ColDefs)+1, len(insDataEntry.Bat.Vecs)) // 5 columns, rowid + commit ts + 2 visibile
  3583  	check_same_rows(insDataEntry.Bat, 99)                              // 99 rows, because the first write is excluded.
  3584  	// test first user col, this is probably fragile, it depends on the details of MockSchema
  3585  	// if something changes, delete this is okay.
  3586  	firstCol, err := vector.ProtoVectorToVector(insDataEntry.Bat.Vecs[2]) // mock_0 column, int8 type
  3587  	assert.Equal(t, types.T_int8, firstCol.GetType().Oid)
  3588  	assert.NoError(t, err)
  3589  
  3590  	delDataEntry := resp.Commands[1]
  3591  	assert.Equal(t, api.Entry_Delete, delDataEntry.EntryType)
  3592  	assert.Equal(t, fixedColCnt, len(delDataEntry.Bat.Vecs)) // 3 columns, rowid + commit_ts + aborted
  3593  	check_same_rows(delDataEntry.Bat, 10)
  3594  
  3595  	// check delete rowids are exactly what we want
  3596  	rowids, err := vector.ProtoVectorToVector(delDataEntry.Bat.Vecs[0])
  3597  	assert.NoError(t, err)
  3598  	assert.Equal(t, types.T_Rowid, rowids.GetType().Oid)
  3599  	rowidMap := make(map[types.Rowid]int)
  3600  	for _, id := range deleteRowIDs {
  3601  		rowidMap[id] = 1
  3602  	}
  3603  	for i := int64(0); i < 10; i++ {
  3604  		id := vector.GetValueAt[types.Rowid](rowids, i)
  3605  		rowidMap[id] = rowidMap[id] + 1
  3606  	}
  3607  	assert.Equal(t, 10, len(rowidMap))
  3608  	for _, v := range rowidMap {
  3609  		assert.Equal(t, 2, v)
  3610  	}
  3611  }
  3612  
  3613  // txn1: create relation and append, half blk
  3614  // txn2: compact
  3615  // txn3: append, shouldn't get rw
  3616  func TestGetLastAppender(t *testing.T) {
  3617  	defer testutils.AfterTest(t)()
  3618  	opts := config.WithLongScanAndCKPOpts(nil)
  3619  	tae := newTestEngine(t, opts)
  3620  	defer tae.Close()
  3621  	schema := catalog.MockSchemaAll(1, -1)
  3622  	schema.BlockMaxRows = 10
  3623  	schema.SegmentMaxBlocks = 2
  3624  	tae.bindSchema(schema)
  3625  	bat := catalog.MockBatch(schema, 14)
  3626  	bats := bat.Split(2)
  3627  
  3628  	tae.createRelAndAppend(bats[0], true)
  3629  	t.Log(tae.Catalog.SimplePPString(3))
  3630  
  3631  	tae.compactBlocks(false)
  3632  	t.Log(tae.Catalog.SimplePPString(3))
  3633  
  3634  	tae.restart()
  3635  
  3636  	txn, rel := tae.getRelation()
  3637  	rel.Append(bats[1])
  3638  	assert.NoError(t, txn.Commit())
  3639  }
  3640  
  3641  // txn1[s1,p1,e1] append1
  3642  // txn2[s2,p2,e2] append2
  3643  // txn3[s3,p3,e3] append3
  3644  // collect [0,p1] [0,p2] [p1+1,p2] [p1+1,p3]
  3645  // check data, row count, commit ts
  3646  // TODO 1. in2pc committs!=preparets; 2. abort
  3647  func TestCollectInsert(t *testing.T) {
  3648  	defer testutils.AfterTest(t)()
  3649  	opts := config.WithLongScanAndCKPOpts(nil)
  3650  	tae := newTestEngine(t, opts)
  3651  	defer tae.Close()
  3652  	schema := catalog.MockSchemaAll(1, -1)
  3653  	schema.BlockMaxRows = 20
  3654  	tae.bindSchema(schema)
  3655  	bat := catalog.MockBatch(schema, 12)
  3656  	bats := bat.Split(4)
  3657  
  3658  	tae.createRelAndAppend(bats[0], true)
  3659  
  3660  	txn1, rel := tae.getRelation()
  3661  	assert.NoError(t, rel.Append(bats[1]))
  3662  	assert.NoError(t, txn1.Commit())
  3663  
  3664  	p1 := txn1.GetPrepareTS()
  3665  	t.Logf("p1= %v", p1.ToString())
  3666  
  3667  	txn2, rel := tae.getRelation()
  3668  	assert.NoError(t, rel.Append(bats[2]))
  3669  	assert.NoError(t, txn2.Commit())
  3670  
  3671  	p2 := txn2.GetPrepareTS()
  3672  	t.Logf("p2= %v", p2.ToString())
  3673  
  3674  	txn3, rel := tae.getRelation()
  3675  	assert.NoError(t, rel.Append(bats[3]))
  3676  	assert.NoError(t, txn3.Commit())
  3677  
  3678  	p3 := txn3.GetPrepareTS()
  3679  	t.Logf("p3= %v", p3.ToString())
  3680  
  3681  	_, rel = tae.getRelation()
  3682  	blkit := rel.MakeBlockIt()
  3683  	blkdata := blkit.GetBlock().GetMeta().(*catalog.BlockEntry).GetBlockData()
  3684  
  3685  	batch, err := blkdata.CollectAppendInRange(types.TS{}, p1, true)
  3686  	assert.NoError(t, err)
  3687  	t.Log((batch.Attrs))
  3688  	for _, vec := range batch.Vecs {
  3689  		t.Log(vec)
  3690  		assert.Equal(t, 6, vec.Length())
  3691  	}
  3692  	batch, err = blkdata.CollectAppendInRange(types.TS{}, p2, true)
  3693  	assert.NoError(t, err)
  3694  	t.Log((batch.Attrs))
  3695  	for _, vec := range batch.Vecs {
  3696  		t.Log(vec)
  3697  		assert.Equal(t, 9, vec.Length())
  3698  	}
  3699  	batch, err = blkdata.CollectAppendInRange(p1.Next(), p2, true)
  3700  	assert.NoError(t, err)
  3701  	t.Log((batch.Attrs))
  3702  	for _, vec := range batch.Vecs {
  3703  		t.Log(vec)
  3704  		assert.Equal(t, 3, vec.Length())
  3705  	}
  3706  	batch, err = blkdata.CollectAppendInRange(p1.Next(), p3, true)
  3707  	assert.NoError(t, err)
  3708  	t.Log((batch.Attrs))
  3709  	for _, vec := range batch.Vecs {
  3710  		t.Log(vec)
  3711  		assert.Equal(t, 6, vec.Length())
  3712  	}
  3713  }
  3714  
  3715  // txn0 append
  3716  // txn1[s1,p1,e1] delete
  3717  // txn1[s2,p2,e2] delete
  3718  // txn1[s3,p3,e3] delete
  3719  // collect [0,p1] [0,p2] [p1+1,p2] [p1+1,p3]
  3720  func TestCollectDelete(t *testing.T) {
  3721  	defer testutils.AfterTest(t)()
  3722  	opts := config.WithLongScanAndCKPOpts(nil)
  3723  	tae := newTestEngine(t, opts)
  3724  	defer tae.Close()
  3725  	schema := catalog.MockSchemaAll(2, 1)
  3726  	schema.BlockMaxRows = 20
  3727  	tae.bindSchema(schema)
  3728  	bat := catalog.MockBatch(schema, 12)
  3729  
  3730  	tae.createRelAndAppend(bat, true)
  3731  
  3732  	_, rel := tae.getRelation()
  3733  	blkit := rel.MakeBlockIt()
  3734  	blkID := blkit.GetBlock().GetMeta().(*catalog.BlockEntry).AsCommonID()
  3735  
  3736  	txn1, rel := tae.getRelation()
  3737  	assert.NoError(t, rel.RangeDelete(blkID, 0, 0, handle.DT_Normal))
  3738  	assert.NoError(t, txn1.Commit())
  3739  	p1 := txn1.GetPrepareTS()
  3740  	t.Logf("p1= %v", p1.ToString())
  3741  
  3742  	txn2, rel := tae.getRelation()
  3743  	assert.NoError(t, rel.RangeDelete(blkID, 1, 3, handle.DT_Normal))
  3744  	assert.NoError(t, txn2.Commit())
  3745  	p2 := txn2.GetPrepareTS()
  3746  	t.Logf("p2= %v", p2.ToString())
  3747  
  3748  	txn3, rel := tae.getRelation()
  3749  	assert.NoError(t, rel.RangeDelete(blkID, 4, 5, handle.DT_Normal))
  3750  	assert.NoError(t, txn3.Commit())
  3751  	p3 := txn3.GetPrepareTS()
  3752  	t.Logf("p3= %v", p3.ToString())
  3753  
  3754  	_, rel = tae.getRelation()
  3755  	blkit = rel.MakeBlockIt()
  3756  	blkdata := blkit.GetBlock().GetMeta().(*catalog.BlockEntry).GetBlockData()
  3757  
  3758  	batch, err := blkdata.CollectDeleteInRange(types.TS{}, p1, true)
  3759  	assert.NoError(t, err)
  3760  	t.Log((batch.Attrs))
  3761  	for _, vec := range batch.Vecs {
  3762  		t.Log(vec)
  3763  		assert.Equal(t, 1, vec.Length())
  3764  	}
  3765  	batch, err = blkdata.CollectDeleteInRange(types.TS{}, p2, true)
  3766  	assert.NoError(t, err)
  3767  	t.Log((batch.Attrs))
  3768  	for _, vec := range batch.Vecs {
  3769  		t.Log(vec)
  3770  		assert.Equal(t, 4, vec.Length())
  3771  	}
  3772  	batch, err = blkdata.CollectDeleteInRange(p1.Next(), p2, true)
  3773  	assert.NoError(t, err)
  3774  	t.Log((batch.Attrs))
  3775  	for _, vec := range batch.Vecs {
  3776  		t.Log(vec)
  3777  		assert.Equal(t, 3, vec.Length())
  3778  	}
  3779  	batch, err = blkdata.CollectDeleteInRange(p1.Next(), p3, true)
  3780  	assert.NoError(t, err)
  3781  	t.Log((batch.Attrs))
  3782  	for _, vec := range batch.Vecs {
  3783  		t.Log(vec)
  3784  		assert.Equal(t, 5, vec.Length())
  3785  	}
  3786  }
  3787  
  3788  func TestAppendnode(t *testing.T) {
  3789  	defer testutils.AfterTest(t)()
  3790  	opts := config.WithLongScanAndCKPOpts(nil)
  3791  	tae := newTestEngine(t, opts)
  3792  	defer tae.Close()
  3793  	schema := catalog.MockSchemaAll(1, 0)
  3794  	schema.BlockMaxRows = 10000
  3795  	schema.SegmentMaxBlocks = 2
  3796  	tae.bindSchema(schema)
  3797  	appendCnt := 20
  3798  	bat := catalog.MockBatch(schema, appendCnt)
  3799  	bats := bat.Split(appendCnt)
  3800  
  3801  	tae.createRelAndAppend(bats[0], true)
  3802  	tae.checkRowsByScan(1, false)
  3803  
  3804  	var wg sync.WaitGroup
  3805  	pool, _ := ants.NewPool(5)
  3806  	defer pool.Release()
  3807  	worker := func(i int) func() {
  3808  		return func() {
  3809  			txn, rel := tae.getRelation()
  3810  			row := getColumnRowsByScan(t, rel, 0, true)
  3811  			err := tae.doAppendWithTxn(bats[i], txn, true)
  3812  			assert.NoError(t, err)
  3813  			row2 := getColumnRowsByScan(t, rel, 0, true)
  3814  			assert.Equal(t, row+1, row2)
  3815  			assert.NoError(t, txn.Commit())
  3816  			wg.Done()
  3817  		}
  3818  	}
  3819  	for i := 1; i < appendCnt; i++ {
  3820  		wg.Add(1)
  3821  		pool.Submit(worker(i))
  3822  	}
  3823  	wg.Wait()
  3824  	tae.checkRowsByScan(appendCnt, true)
  3825  
  3826  	tae.restart()
  3827  	tae.checkRowsByScan(appendCnt, true)
  3828  }
  3829  
  3830  func TestTxnIdempotent(t *testing.T) {
  3831  	defer testutils.AfterTest(t)()
  3832  	opts := config.WithLongScanAndCKPOpts(nil)
  3833  	tae := newTestEngine(t, opts)
  3834  	defer tae.Close()
  3835  
  3836  	schema := catalog.MockSchemaAll(1, 0)
  3837  	schema.BlockMaxRows = 10000
  3838  	schema.SegmentMaxBlocks = 2
  3839  	tae.bindSchema(schema)
  3840  	appendCnt := 20
  3841  	bat := catalog.MockBatch(schema, appendCnt)
  3842  	bats := bat.Split(appendCnt)
  3843  
  3844  	var wg sync.WaitGroup
  3845  
  3846  	tae.createRelAndAppend(bats[0], true)
  3847  	for i := 0; i < 10; i++ {
  3848  		txn, _ := tae.getRelation()
  3849  		wg.Add(1)
  3850  		assert.NoError(t, txn.Rollback())
  3851  		go func() {
  3852  			defer wg.Done()
  3853  			assert.True(t, moerr.IsMoErrCode(txn.Commit(), moerr.ErrTxnNotFound))
  3854  			// txn.Commit()
  3855  		}()
  3856  		wg.Wait()
  3857  	}
  3858  }
  3859  
  3860  // insert 200 rows and do quick compaction
  3861  // expect that there are some dirty tables at first and then zero dirty table found
  3862  func TestWatchDirty(t *testing.T) {
  3863  	defer testutils.AfterTest(t)()
  3864  	opts := config.WithQuickScanAndCKPOpts(nil)
  3865  	tae := newTestEngine(t, opts)
  3866  	defer tae.Close()
  3867  	logMgr := tae.LogtailMgr
  3868  
  3869  	visitor := &catalog.LoopProcessor{}
  3870  	watcher := logtail.NewDirtyCollector(logMgr, opts.Clock, tae.Catalog, visitor)
  3871  
  3872  	tbl, seg, blk := watcher.DirtyCount()
  3873  	assert.Zero(t, blk)
  3874  	assert.Zero(t, seg)
  3875  	assert.Zero(t, tbl)
  3876  
  3877  	schema := catalog.MockSchemaAll(1, 0)
  3878  	schema.BlockMaxRows = 50
  3879  	schema.SegmentMaxBlocks = 2
  3880  	tae.bindSchema(schema)
  3881  	appendCnt := 200
  3882  	bat := catalog.MockBatch(schema, appendCnt)
  3883  	bats := bat.Split(appendCnt)
  3884  
  3885  	tae.createRelAndAppend(bats[0], true)
  3886  	tae.checkRowsByScan(1, false)
  3887  
  3888  	wg := &sync.WaitGroup{}
  3889  	pool, _ := ants.NewPool(3)
  3890  	defer pool.Release()
  3891  	worker := func(i int) func() {
  3892  		return func() {
  3893  			txn, _ := tae.getRelation()
  3894  			err := tae.doAppendWithTxn(bats[i], txn, true)
  3895  			assert.NoError(t, err)
  3896  			assert.NoError(t, txn.Commit())
  3897  			wg.Done()
  3898  		}
  3899  	}
  3900  	for i := 1; i < appendCnt; i++ {
  3901  		wg.Add(1)
  3902  		pool.Submit(worker(i))
  3903  	}
  3904  	wg.Wait()
  3905  
  3906  	timer := time.After(10 * time.Second)
  3907  	for {
  3908  		select {
  3909  		case <-timer:
  3910  			t.Errorf("timeout to wait zero")
  3911  			return
  3912  		default:
  3913  			watcher.Run()
  3914  			time.Sleep(5 * time.Millisecond)
  3915  			_, _, blkCnt := watcher.DirtyCount()
  3916  			// find block zero
  3917  			if blkCnt == 0 {
  3918  				return
  3919  			}
  3920  		}
  3921  	}
  3922  }
  3923  
  3924  func TestDirtyWatchRace(t *testing.T) {
  3925  	defer testutils.AfterTest(t)()
  3926  	opts := config.WithQuickScanAndCKPOpts(nil)
  3927  	tae := newTestEngine(t, opts)
  3928  	defer tae.Close()
  3929  
  3930  	schema := catalog.MockSchemaAll(2, -1)
  3931  	schema.Name = "test"
  3932  	schema.BlockMaxRows = 5
  3933  	schema.SegmentMaxBlocks = 5
  3934  	tae.bindSchema(schema)
  3935  
  3936  	tae.createRelAndAppend(catalog.MockBatch(schema, 1), true)
  3937  
  3938  	visitor := &catalog.LoopProcessor{}
  3939  	watcher := logtail.NewDirtyCollector(tae.LogtailMgr, opts.Clock, tae.Catalog, visitor)
  3940  
  3941  	wg := &sync.WaitGroup{}
  3942  
  3943  	addRow := func() {
  3944  		txn, _ := tae.StartTxn(nil)
  3945  		db, _ := txn.GetDatabase("db")
  3946  		tbl, _ := db.GetRelationByName("test")
  3947  		tbl.Append(catalog.MockBatch(schema, 1))
  3948  		assert.NoError(t, txn.Commit())
  3949  		wg.Done()
  3950  	}
  3951  
  3952  	pool, _ := ants.NewPool(5)
  3953  	defer pool.Release()
  3954  
  3955  	for i := 0; i < 50; i++ {
  3956  		wg.Add(1)
  3957  		pool.Submit(addRow)
  3958  	}
  3959  
  3960  	// test race
  3961  	for i := 0; i < 3; i++ {
  3962  		wg.Add(1)
  3963  		go func(i int) {
  3964  			for j := 0; j < 300; j++ {
  3965  				time.Sleep(5 * time.Millisecond)
  3966  				watcher.Run()
  3967  				// tbl, seg, blk := watcher.DirtyCount()
  3968  				// t.Logf("t%d: tbl %d, seg %d, blk %d", i, tbl, seg, blk)
  3969  				_, _, _ = watcher.DirtyCount()
  3970  			}
  3971  			wg.Done()
  3972  		}(i)
  3973  	}
  3974  
  3975  	wg.Wait()
  3976  }
  3977  
  3978  func TestBlockRead(t *testing.T) {
  3979  	defer testutils.AfterTest(t)()
  3980  	opts := config.WithLongScanAndCKPOpts(nil)
  3981  	tae := newTestEngine(t, opts)
  3982  	tsAlloc := types.NewTsAlloctor(opts.Clock)
  3983  	defer tae.Close()
  3984  	schema := catalog.MockSchemaAll(2, 1)
  3985  	schema.BlockMaxRows = 20
  3986  	schema.SegmentMaxBlocks = 2
  3987  	tae.bindSchema(schema)
  3988  	bat := catalog.MockBatch(schema, 40)
  3989  
  3990  	tae.createRelAndAppend(bat, true)
  3991  
  3992  	_, rel := tae.getRelation()
  3993  	blkit := rel.MakeBlockIt()
  3994  	blkEntry := blkit.GetBlock().GetMeta().(*catalog.BlockEntry)
  3995  	blkID := blkEntry.AsCommonID()
  3996  
  3997  	beforeDel := tsAlloc.Alloc()
  3998  	txn1, rel := tae.getRelation()
  3999  	assert.NoError(t, rel.RangeDelete(blkID, 0, 0, handle.DT_Normal))
  4000  	assert.NoError(t, txn1.Commit())
  4001  
  4002  	afterFirstDel := tsAlloc.Alloc()
  4003  	txn2, rel := tae.getRelation()
  4004  	assert.NoError(t, rel.RangeDelete(blkID, 1, 3, handle.DT_Normal))
  4005  	assert.NoError(t, txn2.Commit())
  4006  
  4007  	afterSecondDel := tsAlloc.Alloc()
  4008  
  4009  	tae.compactBlocks(false)
  4010  
  4011  	metaloc := blkEntry.GetMetaLoc()
  4012  	deltaloc := blkEntry.GetDeltaLoc()
  4013  	assert.NotEmpty(t, metaloc)
  4014  	assert.NotEmpty(t, deltaloc)
  4015  
  4016  	bid, sid := blkEntry.ID, blkEntry.GetSegment().ID
  4017  
  4018  	info := &pkgcatalog.BlockInfo{
  4019  		BlockID:    bid,
  4020  		SegmentID:  sid,
  4021  		EntryState: true,
  4022  		MetaLoc:    metaloc,
  4023  		DeltaLoc:   deltaloc,
  4024  	}
  4025  
  4026  	columns := make([]string, 0)
  4027  	colIdxs := make([]uint16, 0)
  4028  	colTyps := make([]types.Type, 0)
  4029  	colNulls := make([]bool, 0)
  4030  	defs := schema.ColDefs[:]
  4031  	rand.Shuffle(len(defs), func(i, j int) { defs[i], defs[j] = defs[j], defs[i] })
  4032  	for _, col := range defs {
  4033  		columns = append(columns, col.Name)
  4034  		colIdxs = append(colIdxs, uint16(col.Idx))
  4035  		colTyps = append(colTyps, col.Type)
  4036  		colNulls = append(colNulls, col.NullAbility)
  4037  	}
  4038  	t.Log("read columns: ", columns)
  4039  	fs := tae.DB.Fs.Service
  4040  	pool, err := mpool.NewMPool("test", 0, mpool.NoFixed)
  4041  	assert.NoError(t, err)
  4042  	b1, err := blockio.BlockReadInner(
  4043  		context.Background(), info, len(schema.ColDefs),
  4044  		columns, colIdxs, colTyps, colNulls,
  4045  		beforeDel, fs, pool,
  4046  	)
  4047  	assert.NoError(t, err)
  4048  	defer b1.Close()
  4049  	assert.Equal(t, columns, b1.Attrs)
  4050  	assert.Equal(t, len(columns), len(b1.Vecs))
  4051  	assert.Equal(t, 20, b1.Vecs[0].Length())
  4052  
  4053  	b2, err := blockio.BlockReadInner(
  4054  		context.Background(), info, len(schema.ColDefs),
  4055  		columns, colIdxs, colTyps, colNulls,
  4056  		afterFirstDel, fs, pool,
  4057  	)
  4058  	assert.NoError(t, err)
  4059  	defer b2.Close()
  4060  	assert.Equal(t, columns, b2.Attrs)
  4061  	assert.Equal(t, len(columns), len(b2.Vecs))
  4062  	assert.Equal(t, 19, b2.Vecs[0].Length())
  4063  	b3, err := blockio.BlockReadInner(
  4064  		context.Background(), info, len(schema.ColDefs),
  4065  		columns, colIdxs, colTyps, colNulls,
  4066  		afterSecondDel, fs, pool,
  4067  	)
  4068  	assert.NoError(t, err)
  4069  	defer b3.Close()
  4070  	assert.Equal(t, columns, b2.Attrs)
  4071  	assert.Equal(t, len(columns), len(b2.Vecs))
  4072  	assert.Equal(t, 16, b3.Vecs[0].Length())
  4073  
  4074  	// read rowid column only
  4075  	b4, err := blockio.BlockReadInner(
  4076  		context.Background(), info, len(schema.ColDefs),
  4077  		[]string{catalog.AttrRowID},
  4078  		[]uint16{2},
  4079  		[]types.Type{types.T_Rowid.ToType()},
  4080  		[]bool{false},
  4081  		afterSecondDel, fs, pool,
  4082  	)
  4083  	assert.NoError(t, err)
  4084  	defer b4.Close()
  4085  	assert.Equal(t, []string{catalog.AttrRowID}, b4.Attrs)
  4086  	assert.Equal(t, 1, len(b4.Vecs))
  4087  	assert.Equal(t, 16, b4.Vecs[0].Length())
  4088  }
  4089  
  4090  func TestCompactDeltaBlk(t *testing.T) {
  4091  	defer testutils.AfterTest(t)()
  4092  	testutils.EnsureNoLeak(t)
  4093  	opts := config.WithLongScanAndCKPOpts(nil)
  4094  	tae := newTestEngine(t, opts)
  4095  	defer tae.Close()
  4096  	schema := catalog.MockSchemaAll(3, 1)
  4097  	schema.BlockMaxRows = 6
  4098  	schema.SegmentMaxBlocks = 2
  4099  	tae.bindSchema(schema)
  4100  	bat := catalog.MockBatch(schema, 5)
  4101  
  4102  	tae.createRelAndAppend(bat, true)
  4103  
  4104  	{
  4105  		v := getSingleSortKeyValue(bat, schema, 1)
  4106  		t.Logf("v is %v**********", v)
  4107  		filter := handle.NewEQFilter(v)
  4108  		txn2, rel := tae.getRelation()
  4109  		t.Log("********before delete******************")
  4110  		checkAllColRowsByScan(t, rel, 5, true)
  4111  		_ = rel.DeleteByFilter(filter)
  4112  		assert.Nil(t, txn2.Commit())
  4113  	}
  4114  
  4115  	_, rel := tae.getRelation()
  4116  	checkAllColRowsByScan(t, rel, 4, true)
  4117  
  4118  	{
  4119  		t.Log("************compact************")
  4120  		txn, rel := tae.getRelation()
  4121  		it := rel.MakeBlockIt()
  4122  		blk := it.GetBlock()
  4123  		meta := blk.GetMeta().(*catalog.BlockEntry)
  4124  		task, err := jobs.NewCompactBlockTask(nil, txn, meta, tae.DB.Scheduler)
  4125  		assert.NoError(t, err)
  4126  		err = task.OnExec()
  4127  		assert.NoError(t, err)
  4128  		assert.True(t, meta.GetMetaLoc() != "")
  4129  		assert.True(t, meta.GetDeltaLoc() != "")
  4130  		assert.True(t, task.GetNewBlock().GetMeta().(*catalog.BlockEntry).GetMetaLoc() != "")
  4131  		assert.True(t, task.GetNewBlock().GetMeta().(*catalog.BlockEntry).GetDeltaLoc() == "")
  4132  		err = txn.Commit()
  4133  		assert.Nil(t, err)
  4134  		err = meta.GetSegment().RemoveEntry(meta)
  4135  		assert.Nil(t, err)
  4136  	}
  4137  	{
  4138  		v := getSingleSortKeyValue(bat, schema, 2)
  4139  		t.Logf("v is %v**********", v)
  4140  		filter := handle.NewEQFilter(v)
  4141  		txn2, rel := tae.getRelation()
  4142  		t.Log("********before delete******************")
  4143  		checkAllColRowsByScan(t, rel, 4, true)
  4144  		_ = rel.DeleteByFilter(filter)
  4145  		assert.Nil(t, txn2.Commit())
  4146  	}
  4147  	{
  4148  		t.Log("************compact************")
  4149  		txn, rel := tae.getRelation()
  4150  		it := rel.MakeBlockIt()
  4151  		blk := it.GetBlock()
  4152  		meta := blk.GetMeta().(*catalog.BlockEntry)
  4153  		assert.False(t, meta.IsAppendable())
  4154  		task, err := jobs.NewCompactBlockTask(nil, txn, meta, tae.DB.Scheduler)
  4155  		assert.NoError(t, err)
  4156  		err = task.OnExec()
  4157  		assert.NoError(t, err)
  4158  		assert.True(t, meta.GetMetaLoc() != "")
  4159  		assert.True(t, meta.GetDeltaLoc() != "")
  4160  		assert.True(t, task.GetNewBlock().GetMeta().(*catalog.BlockEntry).GetMetaLoc() != "")
  4161  		assert.True(t, task.GetNewBlock().GetMeta().(*catalog.BlockEntry).GetDeltaLoc() == "")
  4162  		err = txn.Commit()
  4163  		assert.Nil(t, err)
  4164  	}
  4165  
  4166  	_, rel = tae.getRelation()
  4167  	checkAllColRowsByScan(t, rel, 3, true)
  4168  	assert.Equal(t, int64(3), rel.Rows())
  4169  
  4170  	tae.restart()
  4171  	_, rel = tae.getRelation()
  4172  	checkAllColRowsByScan(t, rel, 3, true)
  4173  	assert.Equal(t, int64(3), rel.Rows())
  4174  }
  4175  
  4176  func TestFlushTable(t *testing.T) {
  4177  	defer testutils.AfterTest(t)()
  4178  	opts := config.WithLongScanAndCKPOpts(nil)
  4179  	tae := newTestEngine(t, opts)
  4180  	defer tae.Close()
  4181  
  4182  	tae.BGCheckpointRunner.DebugUpdateOptions(
  4183  		checkpoint.WithForceFlushCheckInterval(time.Millisecond * 5))
  4184  
  4185  	schema := catalog.MockSchemaAll(3, 1)
  4186  	schema.BlockMaxRows = 10
  4187  	schema.SegmentMaxBlocks = 2
  4188  	tae.bindSchema(schema)
  4189  	bat := catalog.MockBatch(schema, 21)
  4190  	defer bat.Close()
  4191  
  4192  	tae.createRelAndAppend(bat, true)
  4193  
  4194  	_, rel := tae.getRelation()
  4195  	db, err := rel.GetDB()
  4196  	assert.Nil(t, err)
  4197  	table, err := db.GetRelationByName(schema.Name)
  4198  	assert.Nil(t, err)
  4199  	err = tae.FlushTable(
  4200  		0,
  4201  		db.GetID(),
  4202  		table.ID(),
  4203  		types.BuildTS(time.Now().UTC().UnixNano(), 0))
  4204  	assert.NoError(t, err)
  4205  	t.Log(tae.Catalog.SimplePPString(common.PPL1))
  4206  
  4207  	txn, rel := tae.getRelation()
  4208  	it := rel.MakeBlockIt()
  4209  	for it.Valid() {
  4210  		blk := it.GetBlock().GetMeta().(*catalog.BlockEntry)
  4211  		assert.True(t, blk.HasPersistedData())
  4212  		it.Next()
  4213  	}
  4214  	assert.NoError(t, txn.Commit())
  4215  }
  4216  
  4217  func TestReadCheckpoint(t *testing.T) {
  4218  	defer testutils.AfterTest(t)()
  4219  	opts := config.WithQuickScanAndCKPOpts(nil)
  4220  	tae := newTestEngine(t, opts)
  4221  	defer tae.Close()
  4222  
  4223  	schema := catalog.MockSchemaAll(3, 1)
  4224  	schema.BlockMaxRows = 10
  4225  	schema.SegmentMaxBlocks = 2
  4226  	tae.bindSchema(schema)
  4227  	bat := catalog.MockBatch(schema, 21)
  4228  	defer bat.Close()
  4229  
  4230  	tae.createRelAndAppend(bat, true)
  4231  	now := time.Now()
  4232  	testutils.WaitExpect(10000, func() bool {
  4233  		return tae.Scheduler.GetPenddingLSNCnt() == 0
  4234  	})
  4235  	t.Log(time.Since(now))
  4236  	t.Logf("Checkpointed: %d", tae.Scheduler.GetCheckpointedLSN())
  4237  	t.Logf("GetPenddingLSNCnt: %d", tae.Scheduler.GetPenddingLSNCnt())
  4238  	assert.Equal(t, uint64(0), tae.Scheduler.GetPenddingLSNCnt())
  4239  	tids := []uint64{
  4240  		pkgcatalog.MO_DATABASE_ID,
  4241  		pkgcatalog.MO_TABLES_ID,
  4242  		pkgcatalog.MO_COLUMNS_ID,
  4243  		1000,
  4244  	}
  4245  
  4246  	gcTS := types.BuildTS(time.Now().UTC().UnixNano(), 0)
  4247  	err := tae.BGCheckpointRunner.GCByTS(context.Background(), gcTS)
  4248  	assert.NoError(t, err)
  4249  
  4250  	testutils.WaitExpect(10000, func() bool {
  4251  		return tae.Scheduler.GetPenddingLSNCnt() == 0
  4252  	})
  4253  	t.Log(time.Since(now))
  4254  	assert.Equal(t, uint64(0), tae.Scheduler.GetPenddingLSNCnt())
  4255  
  4256  	testutils.WaitExpect(10000, func() bool {
  4257  		return tae.BGCheckpointRunner.GetPenddingIncrementalCount() == 0
  4258  	})
  4259  	t.Log(time.Since(now))
  4260  	assert.Equal(t, 0, tae.BGCheckpointRunner.GetPenddingIncrementalCount())
  4261  
  4262  	assert.Equal(t, uint64(0), tae.Wal.GetPenddingCnt())
  4263  	testutils.WaitExpect(4000, func() bool {
  4264  		tae.BGCheckpointRunner.ExistPendingEntryToGC()
  4265  		return !tae.BGCheckpointRunner.ExistPendingEntryToGC()
  4266  	})
  4267  	assert.False(t, tae.BGCheckpointRunner.ExistPendingEntryToGC())
  4268  	entries := tae.BGCheckpointRunner.GetAllGlobalCheckpoints()
  4269  	for _, entry := range entries {
  4270  		t.Log(entry.String())
  4271  	}
  4272  	for _, entry := range entries {
  4273  		for _, tid := range tids {
  4274  			ins, del, _, err := entry.GetByTableID(tae.Fs, tid)
  4275  			assert.NoError(t, err)
  4276  			t.Logf("table %d", tid)
  4277  			t.Log(ins)
  4278  			t.Log(del)
  4279  		}
  4280  	}
  4281  	tae.restart()
  4282  	entries = tae.BGCheckpointRunner.GetAllGlobalCheckpoints()
  4283  	for _, entry := range entries {
  4284  		for _, tid := range tids {
  4285  			ins, del, _, err := entry.GetByTableID(tae.Fs, tid)
  4286  			assert.NoError(t, err)
  4287  			t.Logf("table %d", tid)
  4288  			t.Log(ins)
  4289  			t.Log(del)
  4290  		}
  4291  	}
  4292  }
  4293  
  4294  func TestDelete4(t *testing.T) {
  4295  	t.Skip(any("This case crashes occasionally, is being fixed, skip it for now"))
  4296  	defer testutils.AfterTest(t)()
  4297  	opts := config.WithQuickScanAndCKPOpts(nil)
  4298  	tae := newTestEngine(t, opts)
  4299  	defer tae.Close()
  4300  	schema := catalog.NewEmptySchema("xx")
  4301  	schema.AppendPKCol("name", types.T_varchar.ToType(), 0)
  4302  	schema.AppendCol("offset", types.T_uint32.ToType())
  4303  	schema.Finalize(false)
  4304  	schema.BlockMaxRows = 50
  4305  	schema.SegmentMaxBlocks = 5
  4306  	tae.bindSchema(schema)
  4307  	bat := catalog.MockBatch(schema, 1)
  4308  	bat.Vecs[1].Update(0, uint32(0))
  4309  	defer bat.Close()
  4310  	tae.createRelAndAppend(bat, true)
  4311  
  4312  	filter := handle.NewEQFilter(bat.Vecs[0].Get(0))
  4313  	var wg sync.WaitGroup
  4314  	var count atomic.Uint32
  4315  
  4316  	run := func() {
  4317  		defer wg.Done()
  4318  		time.Sleep(time.Duration(rand.Intn(20)+1) * time.Millisecond)
  4319  		cloneBat := bat.CloneWindow(0, 1)
  4320  		defer cloneBat.Close()
  4321  		txn, rel := tae.getRelation()
  4322  		id, offset, err := rel.GetByFilter(filter)
  4323  		if err != nil {
  4324  			txn.Rollback()
  4325  			return
  4326  		}
  4327  		v, err := rel.GetValue(id, offset, 1)
  4328  		if err != nil {
  4329  			txn.Rollback()
  4330  			return
  4331  		}
  4332  		oldV := v.(uint32)
  4333  		newV := oldV + 1
  4334  		if err := rel.RangeDelete(id, offset, offset, handle.DT_Normal); err != nil {
  4335  			txn.Rollback()
  4336  			return
  4337  		}
  4338  		cloneBat.Vecs[1].Update(0, newV)
  4339  		if err := rel.Append(cloneBat); err != nil {
  4340  			txn.Rollback()
  4341  			return
  4342  		}
  4343  		if err := txn.Commit(); err == nil {
  4344  			ok := count.CompareAndSwap(oldV, newV)
  4345  			for !ok {
  4346  				ok = count.CompareAndSwap(oldV, newV)
  4347  			}
  4348  			t.Logf("RangeDelete block-%d, offset-%d, old %d newV %d, %s", id.BlockID, offset, oldV, newV, txn.GetCommitTS().ToString())
  4349  		}
  4350  	}
  4351  
  4352  	p, _ := ants.NewPool(20)
  4353  	defer p.Release()
  4354  	for i := 0; i < 100; i++ {
  4355  		wg.Add(1)
  4356  		_ = p.Submit(run)
  4357  	}
  4358  	wg.Wait()
  4359  
  4360  	t.Logf("count=%v", count.Load())
  4361  
  4362  	getValueFn := func() {
  4363  		txn, rel := tae.getRelation()
  4364  		v, err := rel.GetValueByFilter(filter, 1)
  4365  		assert.NoError(t, err)
  4366  		assert.Equal(t, int(count.Load()), int(v.(uint32)))
  4367  		assert.NoError(t, txn.Commit())
  4368  		t.Logf("GetV=%v, %s", v, txn.GetStartTS().ToString())
  4369  	}
  4370  	scanFn := func() {
  4371  		txn, rel := tae.getRelation()
  4372  		it := rel.MakeBlockIt()
  4373  		for it.Valid() {
  4374  			blk := it.GetBlock()
  4375  			view, err := blk.GetColumnDataById(0, nil)
  4376  			assert.NoError(t, err)
  4377  			defer view.Close()
  4378  			view.ApplyDeletes()
  4379  			if view.Length() != 0 {
  4380  				t.Logf("block-%d, data=%s", blk.ID(), logtail.ToStringTemplate(view.GetData(), -1))
  4381  			}
  4382  			it.Next()
  4383  		}
  4384  		txn.Commit()
  4385  	}
  4386  
  4387  	for i := 0; i < 20; i++ {
  4388  		getValueFn()
  4389  		scanFn()
  4390  
  4391  		tae.restart()
  4392  
  4393  		getValueFn()
  4394  		scanFn()
  4395  		for j := 0; j < 100; j++ {
  4396  			wg.Add(1)
  4397  			p.Submit(run)
  4398  		}
  4399  		wg.Wait()
  4400  	}
  4401  	t.Log(tae.Catalog.SimplePPString(common.PPL3))
  4402  }
  4403  
  4404  // append, delete, apppend, get start ts, compact, get active row
  4405  func TestGetActiveRow(t *testing.T) {
  4406  	opts := config.WithLongScanAndCKPOpts(nil)
  4407  	tae := newTestEngine(t, opts)
  4408  	defer tae.Close()
  4409  
  4410  	schema := catalog.MockSchemaAll(3, 1)
  4411  	schema.BlockMaxRows = 10
  4412  	schema.SegmentMaxBlocks = 2
  4413  	tae.bindSchema(schema)
  4414  	bat := catalog.MockBatch(schema, 1)
  4415  	defer bat.Close()
  4416  
  4417  	tae.createRelAndAppend(bat, true)
  4418  
  4419  	txn, rel := tae.getRelation()
  4420  	v := getSingleSortKeyValue(bat, schema, 0)
  4421  	filter := handle.NewEQFilter(v)
  4422  	id, row, err := rel.GetByFilter(filter)
  4423  	assert.NoError(t, err)
  4424  	err = rel.RangeDelete(id, row, row, handle.DT_Normal)
  4425  	assert.NoError(t, err)
  4426  	assert.NoError(t, txn.Commit())
  4427  
  4428  	txn, rel = tae.getRelation()
  4429  	assert.NoError(t, rel.Append(bat))
  4430  	assert.NoError(t, txn.Commit())
  4431  
  4432  	_, rel = tae.getRelation()
  4433  	{
  4434  		txn2, rel2 := tae.getRelation()
  4435  		it := rel2.MakeBlockIt()
  4436  		blk := it.GetBlock().GetMeta().(*catalog.BlockEntry)
  4437  		task, err := jobs.NewCompactBlockTask(nil, txn2, blk, tae.Scheduler)
  4438  		assert.NoError(t, err)
  4439  		err = task.OnExec()
  4440  		assert.NoError(t, err)
  4441  		assert.NoError(t, txn2.Commit())
  4442  	}
  4443  	filter = handle.NewEQFilter(v)
  4444  	_, _, err = rel.GetByFilter(filter)
  4445  	assert.NoError(t, err)
  4446  }
  4447  func TestTransfer(t *testing.T) {
  4448  	opts := config.WithLongScanAndCKPOpts(nil)
  4449  	tae := newTestEngine(t, opts)
  4450  	defer tae.Close()
  4451  	schema := catalog.MockSchemaAll(5, 3)
  4452  	schema.BlockMaxRows = 100
  4453  	schema.SegmentMaxBlocks = 10
  4454  	tae.bindSchema(schema)
  4455  
  4456  	bat := catalog.MockBatch(schema, 10)
  4457  	defer bat.Close()
  4458  
  4459  	tae.createRelAndAppend(bat, true)
  4460  
  4461  	filter := handle.NewEQFilter(bat.Vecs[3].Get(3))
  4462  
  4463  	txn1, rel1 := tae.getRelation()
  4464  	err := rel1.DeleteByFilter(filter)
  4465  	assert.NoError(t, err)
  4466  
  4467  	meta := rel1.GetMeta().(*catalog.TableEntry)
  4468  	err = tae.FlushTable(0, meta.GetDB().ID, meta.ID,
  4469  		types.BuildTS(time.Now().UTC().UnixNano(), 0))
  4470  	assert.NoError(t, err)
  4471  
  4472  	err = txn1.Commit()
  4473  	// assert.True(t, moerr.IsMoErrCode(err, moerr.ErrTxnRWConflict))
  4474  	assert.NoError(t, err)
  4475  
  4476  	txn2, rel2 := tae.getRelation()
  4477  	_, err = rel2.GetValueByFilter(filter, 3)
  4478  	t.Log(err)
  4479  	assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  4480  	v, err := rel2.GetValueByFilter(handle.NewEQFilter(bat.Vecs[3].Get(4)), 2)
  4481  	expectV := bat.Vecs[2].Get(4)
  4482  	assert.Equal(t, expectV, v)
  4483  	assert.NoError(t, err)
  4484  	_ = txn2.Commit()
  4485  }
  4486  
  4487  func TestTransfer2(t *testing.T) {
  4488  	opts := config.WithLongScanAndCKPOpts(nil)
  4489  	tae := newTestEngine(t, opts)
  4490  	defer tae.Close()
  4491  	schema := catalog.MockSchemaAll(5, 3)
  4492  	schema.BlockMaxRows = 10
  4493  	schema.SegmentMaxBlocks = 10
  4494  	tae.bindSchema(schema)
  4495  
  4496  	bat := catalog.MockBatch(schema, 200)
  4497  	defer bat.Close()
  4498  
  4499  	tae.createRelAndAppend(bat, true)
  4500  
  4501  	filter := handle.NewEQFilter(bat.Vecs[3].Get(3))
  4502  
  4503  	txn1, rel1 := tae.getRelation()
  4504  	err := rel1.DeleteByFilter(filter)
  4505  	assert.NoError(t, err)
  4506  
  4507  	tae.mergeBlocks(false)
  4508  
  4509  	err = txn1.Commit()
  4510  	// assert.True(t, moerr.IsMoErrCode(err, moerr.ErrTxnRWConflict))
  4511  	assert.NoError(t, err)
  4512  
  4513  	txn2, rel2 := tae.getRelation()
  4514  	_, err = rel2.GetValueByFilter(filter, 3)
  4515  	t.Log(err)
  4516  	assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  4517  	v, err := rel2.GetValueByFilter(handle.NewEQFilter(bat.Vecs[3].Get(4)), 2)
  4518  	expectV := bat.Vecs[2].Get(4)
  4519  	assert.Equal(t, expectV, v)
  4520  	assert.NoError(t, err)
  4521  	_ = txn2.Commit()
  4522  }
  4523  
  4524  func TestCompactEmptyBlock(t *testing.T) {
  4525  	defer testutils.AfterTest(t)()
  4526  	opts := config.WithLongScanAndCKPOpts(nil)
  4527  	tae := newTestEngine(t, opts)
  4528  	defer tae.Close()
  4529  	schema := catalog.MockSchemaAll(1, 0)
  4530  	schema.BlockMaxRows = 3
  4531  	schema.SegmentMaxBlocks = 2
  4532  	tae.bindSchema(schema)
  4533  	bat := catalog.MockBatch(schema, 6)
  4534  	defer bat.Close()
  4535  
  4536  	tae.createRelAndAppend(bat, true)
  4537  	assert.NoError(t, tae.deleteAll(true))
  4538  	tae.checkRowsByScan(0, true)
  4539  
  4540  	tae.compactBlocks(false)
  4541  
  4542  	tae.checkRowsByScan(0, true)
  4543  
  4544  	blkCnt := 0
  4545  	p := &catalog.LoopProcessor{}
  4546  	p.BlockFn = func(be *catalog.BlockEntry) error {
  4547  		blkCnt++
  4548  		return nil
  4549  	}
  4550  
  4551  	_, rel := tae.getRelation()
  4552  	err := rel.GetMeta().(*catalog.TableEntry).RecurLoop(p)
  4553  	assert.NoError(t, err)
  4554  	assert.Equal(t, 2, blkCnt)
  4555  	t.Log(tae.Catalog.SimplePPString(3))
  4556  
  4557  	tae.restart()
  4558  
  4559  	blkCnt = 0
  4560  	_, rel = tae.getRelation()
  4561  	err = rel.GetMeta().(*catalog.TableEntry).RecurLoop(p)
  4562  	assert.NoError(t, err)
  4563  	assert.Equal(t, 2, blkCnt)
  4564  	tae.checkRowsByScan(0, true)
  4565  	t.Log(tae.Catalog.SimplePPString(3))
  4566  }
  4567  
  4568  func TestTransfer3(t *testing.T) {
  4569  	defer testutils.AfterTest(t)()
  4570  	opts := config.WithLongScanAndCKPOpts(nil)
  4571  	tae := newTestEngine(t, opts)
  4572  	defer tae.Close()
  4573  	schema := catalog.MockSchemaAll(5, 3)
  4574  	schema.BlockMaxRows = 100
  4575  	schema.SegmentMaxBlocks = 10
  4576  	tae.bindSchema(schema)
  4577  
  4578  	bat := catalog.MockBatch(schema, 10)
  4579  	defer bat.Close()
  4580  
  4581  	tae.createRelAndAppend(bat, true)
  4582  
  4583  	filter := handle.NewEQFilter(bat.Vecs[3].Get(3))
  4584  
  4585  	txn1, rel1 := tae.getRelation()
  4586  
  4587  	var err error
  4588  	err = rel1.DeleteByFilter(filter)
  4589  	assert.NoError(t, err)
  4590  
  4591  	meta := rel1.GetMeta().(*catalog.TableEntry)
  4592  	err = tae.FlushTable(0, meta.GetDB().ID, meta.ID,
  4593  		types.BuildTS(time.Now().UTC().UnixNano(), 0))
  4594  	assert.NoError(t, err)
  4595  
  4596  	err = rel1.Append(bat.Window(3, 1))
  4597  	assert.NoError(t, err)
  4598  	err = txn1.Commit()
  4599  	assert.NoError(t, err)
  4600  }
  4601  
  4602  func TestUpdate(t *testing.T) {
  4603  	t.Skip(any("This case crashes occasionally, is being fixed, skip it for now"))
  4604  	defer testutils.AfterTest(t)()
  4605  	opts := config.WithQuickScanAndCKPOpts2(nil, 5)
  4606  	// opts := config.WithLongScanAndCKPOpts(nil)
  4607  	tae := newTestEngine(t, opts)
  4608  	defer tae.Close()
  4609  
  4610  	schema := catalog.MockSchemaAll(5, 3)
  4611  	schema.BlockMaxRows = 100
  4612  	schema.SegmentMaxBlocks = 4
  4613  	tae.bindSchema(schema)
  4614  
  4615  	bat := catalog.MockBatch(schema, 1)
  4616  	defer bat.Close()
  4617  	bat.Vecs[2].Update(0, int32(0))
  4618  
  4619  	tae.createRelAndAppend(bat, true)
  4620  
  4621  	var wg sync.WaitGroup
  4622  
  4623  	var expectV atomic.Int32
  4624  	expectV.Store(bat.Vecs[2].Get(0).(int32))
  4625  	filter := handle.NewEQFilter(bat.Vecs[3].Get(0))
  4626  	updateFn := func() {
  4627  		defer wg.Done()
  4628  		txn, rel := tae.getRelation()
  4629  		id, offset, err := rel.GetByFilter(filter)
  4630  		assert.NoError(t, err)
  4631  		v, err := rel.GetValue(id, offset, 2)
  4632  		assert.NoError(t, err)
  4633  		err = rel.RangeDelete(id, offset, offset, handle.DT_Normal)
  4634  		if err != nil {
  4635  			t.Logf("range delete %v, rollbacking", err)
  4636  			_ = txn.Rollback()
  4637  			return
  4638  		}
  4639  		tuples := bat.CloneWindow(0, 1)
  4640  		defer tuples.Close()
  4641  		updatedV := v.(int32) + 1
  4642  		tuples.Vecs[2].Update(0, updatedV)
  4643  		err = rel.Append(tuples)
  4644  		assert.NoError(t, err)
  4645  
  4646  		err = txn.Commit()
  4647  		if err != nil {
  4648  			t.Logf("commit update %v", err)
  4649  		} else {
  4650  			expectV.CompareAndSwap(v.(int32), updatedV)
  4651  			t.Logf("%v committed", updatedV)
  4652  		}
  4653  	}
  4654  	p, _ := ants.NewPool(5)
  4655  	defer p.Release()
  4656  	loop := 1000
  4657  	for i := 0; i < loop; i++ {
  4658  		wg.Add(1)
  4659  		// updateFn()
  4660  		_ = p.Submit(updateFn)
  4661  	}
  4662  	wg.Wait()
  4663  	t.Logf("Final: %v", expectV.Load())
  4664  	{
  4665  		txn, rel := tae.getRelation()
  4666  		v, err := rel.GetValueByFilter(filter, 2)
  4667  		assert.NoError(t, err)
  4668  		assert.Equal(t, v.(int32), expectV.Load())
  4669  		checkAllColRowsByScan(t, rel, 1, true)
  4670  		assert.NoError(t, txn.Commit())
  4671  	}
  4672  }
  4673  
  4674  func TestInsertPerf(t *testing.T) {
  4675  	t.Skip(any("for debug"))
  4676  	opts := new(options.Options)
  4677  	options.WithCheckpointScanInterval(time.Second * 10)(opts)
  4678  	options.WithFlushInterval(time.Second * 10)(opts)
  4679  	tae := newTestEngine(t, opts)
  4680  	defer tae.Close()
  4681  	schema := catalog.MockSchemaAll(10, 2)
  4682  	schema.BlockMaxRows = 1000
  4683  	schema.SegmentMaxBlocks = 5
  4684  	tae.bindSchema(schema)
  4685  
  4686  	cnt := 1000
  4687  	iBat := 1
  4688  	poolSize := 20
  4689  
  4690  	bat := catalog.MockBatch(schema, cnt*iBat*poolSize*2)
  4691  	defer bat.Close()
  4692  
  4693  	tae.createRelAndAppend(bat.Window(0, 1), true)
  4694  	var wg sync.WaitGroup
  4695  	run := func(start int) func() {
  4696  		return func() {
  4697  			defer wg.Done()
  4698  			for i := start; i < start+cnt*iBat; i += iBat {
  4699  				txn, rel := tae.getRelation()
  4700  				_ = rel.Append(bat.Window(i, iBat))
  4701  				_ = txn.Commit()
  4702  			}
  4703  		}
  4704  	}
  4705  
  4706  	p, _ := ants.NewPool(poolSize)
  4707  	defer p.Release()
  4708  	now := time.Now()
  4709  	for i := 1; i <= poolSize; i++ {
  4710  		wg.Add(1)
  4711  		_ = p.Submit(run(i * cnt * iBat))
  4712  	}
  4713  	wg.Wait()
  4714  	t.Log(time.Since(now))
  4715  }
  4716  
  4717  func TestAppendBat(t *testing.T) {
  4718  	p, _ := ants.NewPool(100)
  4719  	defer p.Release()
  4720  	var wg sync.WaitGroup
  4721  
  4722  	schema := catalog.MockSchema(7, 2)
  4723  	bat := catalog.MockBatch(schema, 1000)
  4724  	defer bat.Close()
  4725  
  4726  	run := func() {
  4727  		defer wg.Done()
  4728  		b := containers.BuildBatch(schema.Attrs(), schema.Types(), schema.Nullables(), containers.Options{
  4729  			Allocator: common.DefaultAllocator})
  4730  		defer b.Close()
  4731  		for i := 0; i < bat.Length(); i++ {
  4732  			w := bat.Window(i, 1)
  4733  			b.Extend(w)
  4734  		}
  4735  	}
  4736  
  4737  	for i := 0; i < 200; i++ {
  4738  		wg.Add(1)
  4739  		_ = p.Submit(run)
  4740  	}
  4741  	wg.Wait()
  4742  }
  4743  
  4744  func TestGCWithCheckpoint(t *testing.T) {
  4745  	defer testutils.AfterTest(t)()
  4746  	opts := config.WithQuickScanAndCKPAndGCOpts(nil)
  4747  	tae := newTestEngine(t, opts)
  4748  	defer tae.Close()
  4749  	manager := gc.NewDiskCleaner(tae.Fs, tae.BGCheckpointRunner, tae.Catalog)
  4750  	manager.Start()
  4751  	defer manager.Stop()
  4752  
  4753  	schema := catalog.MockSchemaAll(3, 1)
  4754  	schema.BlockMaxRows = 10
  4755  	schema.SegmentMaxBlocks = 2
  4756  	tae.bindSchema(schema)
  4757  	bat := catalog.MockBatch(schema, 21)
  4758  	defer bat.Close()
  4759  
  4760  	tae.createRelAndAppend(bat, true)
  4761  	now := time.Now()
  4762  	testutils.WaitExpect(10000, func() bool {
  4763  		return tae.Scheduler.GetPenddingLSNCnt() == 0
  4764  	})
  4765  	t.Log(time.Since(now))
  4766  	t.Logf("Checkpointed: %d", tae.Scheduler.GetCheckpointedLSN())
  4767  	t.Logf("GetPenddingLSNCnt: %d", tae.Scheduler.GetPenddingLSNCnt())
  4768  	assert.Equal(t, uint64(0), tae.Scheduler.GetPenddingLSNCnt())
  4769  	err := manager.GC(context.Background())
  4770  	assert.Nil(t, err)
  4771  	entries := tae.BGCheckpointRunner.GetAllIncrementalCheckpoints()
  4772  	num := len(entries)
  4773  	assert.Greater(t, num, 0)
  4774  	testutils.WaitExpect(5000, func() bool {
  4775  		if manager.GetMaxConsumed() == nil {
  4776  			return false
  4777  		}
  4778  		return entries[num-1].GetEnd().Equal(manager.GetMaxConsumed().GetEnd())
  4779  	})
  4780  	assert.True(t, entries[num-1].GetEnd().Equal(manager.GetMaxConsumed().GetEnd()))
  4781  	manager2 := gc.NewDiskCleaner(tae.Fs, tae.BGCheckpointRunner, tae.Catalog)
  4782  	manager2.Start()
  4783  	defer manager2.Stop()
  4784  	testutils.WaitExpect(5000, func() bool {
  4785  		if manager2.GetMaxConsumed() == nil {
  4786  			return false
  4787  		}
  4788  		return entries[num-1].GetEnd().Equal(manager2.GetMaxConsumed().GetEnd())
  4789  	})
  4790  	assert.True(t, entries[num-1].GetEnd().Equal(manager2.GetMaxConsumed().GetEnd()))
  4791  	tables1 := manager.GetInputs()
  4792  	tables2 := manager2.GetInputs()
  4793  	assert.True(t, tables1.Compare(tables2))
  4794  }
  4795  
  4796  func TestGCDropDB(t *testing.T) {
  4797  	defer testutils.AfterTest(t)()
  4798  	opts := config.WithQuickScanAndCKPAndGCOpts(nil)
  4799  	tae := newTestEngine(t, opts)
  4800  	defer tae.Close()
  4801  	manager := gc.NewDiskCleaner(tae.Fs, tae.BGCheckpointRunner, tae.Catalog)
  4802  	manager.Start()
  4803  	defer manager.Stop()
  4804  	schema := catalog.MockSchemaAll(3, 1)
  4805  	schema.BlockMaxRows = 10
  4806  	schema.SegmentMaxBlocks = 2
  4807  	tae.bindSchema(schema)
  4808  	bat := catalog.MockBatch(schema, 210)
  4809  	defer bat.Close()
  4810  
  4811  	tae.createRelAndAppend(bat, true)
  4812  	txn, err := tae.StartTxn(nil)
  4813  	assert.Nil(t, err)
  4814  	db, err := txn.DropDatabase(defaultTestDB)
  4815  	assert.Nil(t, err)
  4816  	assert.Nil(t, txn.Commit())
  4817  
  4818  	assert.Equal(t, txn.GetCommitTS(), db.GetMeta().(*catalog.DBEntry).GetDeleteAt())
  4819  	now := time.Now()
  4820  	testutils.WaitExpect(10000, func() bool {
  4821  		return tae.Scheduler.GetPenddingLSNCnt() == 0
  4822  	})
  4823  	t.Log(time.Since(now))
  4824  	err = manager.GC(context.Background())
  4825  	assert.Nil(t, err)
  4826  	entries := tae.BGCheckpointRunner.GetAllIncrementalCheckpoints()
  4827  	num := len(entries)
  4828  	assert.Greater(t, num, 0)
  4829  	testutils.WaitExpect(5000, func() bool {
  4830  		if manager.GetMaxConsumed() == nil {
  4831  			return false
  4832  		}
  4833  		return entries[num-1].GetEnd().Equal(manager.GetMaxConsumed().GetEnd())
  4834  	})
  4835  	assert.True(t, entries[num-1].GetEnd().Equal(manager.GetMaxConsumed().GetEnd()))
  4836  	manager2 := gc.NewDiskCleaner(tae.Fs, tae.BGCheckpointRunner, tae.Catalog)
  4837  	manager2.Start()
  4838  	defer manager2.Stop()
  4839  	testutils.WaitExpect(5000, func() bool {
  4840  		if manager2.GetMaxConsumed() == nil {
  4841  			return false
  4842  		}
  4843  		return entries[num-1].GetEnd().Equal(manager2.GetMaxConsumed().GetEnd())
  4844  	})
  4845  	assert.True(t, entries[num-1].GetEnd().Equal(manager2.GetMaxConsumed().GetEnd()))
  4846  	tables1 := manager.GetInputs()
  4847  	tables2 := manager2.GetInputs()
  4848  	assert.True(t, tables1.Compare(tables2))
  4849  	tae.restart()
  4850  }
  4851  
  4852  func TestGCDropTable(t *testing.T) {
  4853  	defer testutils.AfterTest(t)()
  4854  	opts := config.WithQuickScanAndCKPAndGCOpts(nil)
  4855  	tae := newTestEngine(t, opts)
  4856  	defer tae.Close()
  4857  	manager := gc.NewDiskCleaner(tae.Fs, tae.BGCheckpointRunner, tae.Catalog)
  4858  	manager.Start()
  4859  	defer manager.Stop()
  4860  	schema := catalog.MockSchemaAll(3, 1)
  4861  	schema.BlockMaxRows = 10
  4862  	schema.SegmentMaxBlocks = 2
  4863  	tae.bindSchema(schema)
  4864  	bat := catalog.MockBatch(schema, 210)
  4865  	defer bat.Close()
  4866  	schema2 := catalog.MockSchemaAll(3, 1)
  4867  	schema2.BlockMaxRows = 10
  4868  	schema2.SegmentMaxBlocks = 2
  4869  	bat2 := catalog.MockBatch(schema2, 210)
  4870  	defer bat.Close()
  4871  
  4872  	tae.createRelAndAppend(bat, true)
  4873  	txn, _ := tae.StartTxn(nil)
  4874  	db, err := txn.GetDatabase(defaultTestDB)
  4875  	assert.Nil(t, err)
  4876  	rel, _ := db.CreateRelation(schema2)
  4877  	rel.Append(bat2)
  4878  	assert.Nil(t, txn.Commit())
  4879  
  4880  	txn, err = tae.StartTxn(nil)
  4881  	assert.Nil(t, err)
  4882  	db, err = txn.GetDatabase(defaultTestDB)
  4883  	assert.Nil(t, err)
  4884  	_, err = db.DropRelationByName(schema2.Name)
  4885  	assert.Nil(t, err)
  4886  	assert.Nil(t, txn.Commit())
  4887  
  4888  	now := time.Now()
  4889  	testutils.WaitExpect(10000, func() bool {
  4890  		return tae.Scheduler.GetPenddingLSNCnt() == 0
  4891  	})
  4892  	assert.Equal(t, uint64(0), tae.Scheduler.GetPenddingLSNCnt())
  4893  	assert.Equal(t, txn.GetCommitTS(), rel.GetMeta().(*catalog.TableEntry).GetDeleteAt())
  4894  	t.Log(time.Since(now))
  4895  	err = manager.GC(context.Background())
  4896  	assert.Nil(t, err)
  4897  	entries := tae.BGCheckpointRunner.GetAllIncrementalCheckpoints()
  4898  	num := len(entries)
  4899  	assert.Greater(t, num, 0)
  4900  	testutils.WaitExpect(10000, func() bool {
  4901  		if manager.GetMaxConsumed() == nil {
  4902  			return false
  4903  		}
  4904  		return entries[num-1].GetEnd().Equal(manager.GetMaxConsumed().GetEnd())
  4905  	})
  4906  	assert.True(t, entries[num-1].GetEnd().Equal(manager.GetMaxConsumed().GetEnd()))
  4907  	manager2 := gc.NewDiskCleaner(tae.Fs, tae.BGCheckpointRunner, tae.Catalog)
  4908  	manager2.Start()
  4909  	defer manager2.Stop()
  4910  	testutils.WaitExpect(5000, func() bool {
  4911  		if manager2.GetMaxConsumed() == nil {
  4912  			return false
  4913  		}
  4914  		return entries[num-1].GetEnd().Equal(manager2.GetMaxConsumed().GetEnd())
  4915  	})
  4916  	assert.True(t, entries[num-1].GetEnd().Equal(manager2.GetMaxConsumed().GetEnd()))
  4917  	tables1 := manager.GetInputs()
  4918  	tables2 := manager2.GetInputs()
  4919  	assert.True(t, tables1.Compare(tables2))
  4920  	tae.restart()
  4921  }
  4922  
  4923  func TestUpdateCstr(t *testing.T) {
  4924  	defer testutils.AfterTest(t)()
  4925  	opts := config.WithLongScanAndCKPOpts(nil)
  4926  	tae := newTestEngine(t, opts)
  4927  	defer tae.Close()
  4928  
  4929  	schema := catalog.MockSchemaAll(2, -1)
  4930  	schema.Name = "test"
  4931  	schema.BlockMaxRows = 10
  4932  	schema.SegmentMaxBlocks = 2
  4933  	schema.Constraint = []byte("start version")
  4934  
  4935  	txn, _ := tae.StartTxn(nil)
  4936  	db, _ := txn.CreateDatabase("db", "")
  4937  	db.CreateRelation(schema)
  4938  	txn.Commit()
  4939  
  4940  	txn, _ = tae.StartTxn(nil)
  4941  	db, _ = txn.GetDatabase("db")
  4942  	tbl, _ := db.GetRelationByName("test")
  4943  	err := tbl.UpdateConstraint([]byte("version 1"))
  4944  	assert.NoError(t, err)
  4945  	err = txn.Commit()
  4946  	assert.NoError(t, err)
  4947  
  4948  	txn, _ = tae.StartTxn(nil)
  4949  	db, _ = txn.GetDatabase("db")
  4950  	tbl, _ = db.GetRelationByName("test")
  4951  	err = tbl.UpdateConstraint([]byte("version 2"))
  4952  	assert.NoError(t, err)
  4953  	txn.Commit()
  4954  
  4955  	tots := func(ts types.TS) *timestamp.Timestamp {
  4956  		return &timestamp.Timestamp{PhysicalTime: types.DecodeInt64(ts[4:12]), LogicalTime: types.DecodeUint32(ts[:4])}
  4957  	}
  4958  
  4959  	ctx := context.Background()
  4960  	resp, _ := logtail.HandleSyncLogTailReq(ctx, new(dummyCpkGetter), tae.LogtailMgr, tae.Catalog, api.SyncLogTailReq{
  4961  		CnHave: tots(types.BuildTS(0, 0)),
  4962  		CnWant: tots(types.MaxTs()),
  4963  		Table:  &api.TableID{DbId: pkgcatalog.MO_CATALOG_ID, TbId: pkgcatalog.MO_TABLES_ID},
  4964  	}, true)
  4965  
  4966  	bat, _ := batch.ProtoBatchToBatch(resp.Commands[0].Bat)
  4967  	cstrCol := containers.NewNonNullBatchWithSharedMemory(bat).GetVectorByName(pkgcatalog.SystemRelAttr_Constraint)
  4968  	assert.Equal(t, 3, cstrCol.Length())
  4969  	assert.Equal(t, []byte("start version"), cstrCol.Get(0).([]byte))
  4970  	assert.Equal(t, []byte("version 1"), cstrCol.Get(1).([]byte))
  4971  	assert.Equal(t, []byte("version 2"), cstrCol.Get(2).([]byte))
  4972  
  4973  	tae.restart()
  4974  
  4975  	resp, _ = logtail.HandleSyncLogTailReq(ctx, new(dummyCpkGetter), tae.LogtailMgr, tae.Catalog, api.SyncLogTailReq{
  4976  		CnHave: tots(types.BuildTS(0, 0)),
  4977  		CnWant: tots(types.MaxTs()),
  4978  		Table:  &api.TableID{DbId: pkgcatalog.MO_CATALOG_ID, TbId: pkgcatalog.MO_TABLES_ID},
  4979  	}, true)
  4980  
  4981  	bat, _ = batch.ProtoBatchToBatch(resp.Commands[0].Bat)
  4982  	cstrCol = containers.NewNonNullBatchWithSharedMemory(bat).GetVectorByName(pkgcatalog.SystemRelAttr_Constraint)
  4983  	assert.Equal(t, 3, cstrCol.Length())
  4984  	assert.Equal(t, []byte("start version"), cstrCol.Get(0).([]byte))
  4985  	assert.Equal(t, []byte("version 1"), cstrCol.Get(1).([]byte))
  4986  	assert.Equal(t, []byte("version 2"), cstrCol.Get(2).([]byte))
  4987  
  4988  	txn, _ = tae.StartTxn(nil)
  4989  	db, _ = txn.GetDatabase("db")
  4990  	_, err = db.DropRelationByName("test")
  4991  	assert.NoError(t, err)
  4992  	txn.Commit()
  4993  
  4994  	resp, _ = logtail.HandleSyncLogTailReq(ctx, new(dummyCpkGetter), tae.LogtailMgr, tae.Catalog, api.SyncLogTailReq{
  4995  		CnHave: tots(types.BuildTS(0, 0)),
  4996  		CnWant: tots(types.MaxTs()),
  4997  		Table:  &api.TableID{DbId: pkgcatalog.MO_CATALOG_ID, TbId: pkgcatalog.MO_COLUMNS_ID},
  4998  	}, true)
  4999  
  5000  	assert.Equal(t, 2, len(resp.Commands)) // create and drop
  5001  	assert.Equal(t, api.Entry_Insert, resp.Commands[0].EntryType)
  5002  	assert.Equal(t, api.Entry_Delete, resp.Commands[1].EntryType)
  5003  }
  5004  
  5005  func TestGlobalCheckpoint1(t *testing.T) {
  5006  	defer testutils.AfterTest(t)()
  5007  	testutils.EnsureNoLeak(t)
  5008  	opts := config.WithQuickScanAndCKPOpts(nil)
  5009  	options.WithCheckpointGlobalMinCount(1)(opts)
  5010  	options.WithGlobalVersionInterval(time.Millisecond * 10)(opts)
  5011  	tae := newTestEngine(t, opts)
  5012  	defer tae.Close()
  5013  	schema := catalog.MockSchemaAll(10, 2)
  5014  	schema.BlockMaxRows = 10
  5015  	schema.SegmentMaxBlocks = 2
  5016  	tae.bindSchema(schema)
  5017  	bat := catalog.MockBatch(schema, 400)
  5018  
  5019  	tae.createRelAndAppend(bat, true)
  5020  
  5021  	tae.restart()
  5022  	tae.checkRowsByScan(400, true)
  5023  
  5024  	testutils.WaitExpect(4000, func() bool {
  5025  		return tae.Wal.GetPenddingCnt() == 0
  5026  	})
  5027  
  5028  	tae.restart()
  5029  	tae.checkRowsByScan(400, true)
  5030  }
  5031  
  5032  func TestAppendAndGC(t *testing.T) {
  5033  	defer testutils.AfterTest(t)()
  5034  	testutils.EnsureNoLeak(t)
  5035  	opts := new(options.Options)
  5036  	opts.CacheCfg = new(options.CacheCfg)
  5037  	opts.CacheCfg.InsertCapacity = common.M * 5
  5038  	opts.CacheCfg.TxnCapacity = common.M
  5039  	opts = config.WithQuickScanAndCKPOpts(opts)
  5040  	options.WithDisableGCCheckpoint()(opts)
  5041  	tae := newTestEngine(t, opts)
  5042  	defer tae.Close()
  5043  	db := tae.DB
  5044  	db.DiskCleaner.SetMinMergeCountForTest(2)
  5045  
  5046  	schema1 := catalog.MockSchemaAll(13, 2)
  5047  	schema1.BlockMaxRows = 10
  5048  	schema1.SegmentMaxBlocks = 2
  5049  
  5050  	schema2 := catalog.MockSchemaAll(13, 2)
  5051  	schema2.BlockMaxRows = 10
  5052  	schema2.SegmentMaxBlocks = 2
  5053  	{
  5054  		txn, _ := db.StartTxn(nil)
  5055  		database, err := txn.CreateDatabase("db", "")
  5056  		assert.Nil(t, err)
  5057  		_, err = database.CreateRelation(schema1)
  5058  		assert.Nil(t, err)
  5059  		_, err = database.CreateRelation(schema2)
  5060  		assert.Nil(t, err)
  5061  		assert.Nil(t, txn.Commit())
  5062  	}
  5063  	bat := catalog.MockBatch(schema1, int(schema1.BlockMaxRows*10-1))
  5064  	defer bat.Close()
  5065  	bats := bat.Split(bat.Length())
  5066  
  5067  	pool, err := ants.NewPool(20)
  5068  	assert.Nil(t, err)
  5069  	defer pool.Release()
  5070  	var wg sync.WaitGroup
  5071  
  5072  	for _, data := range bats {
  5073  		wg.Add(2)
  5074  		err = pool.Submit(appendClosure(t, data, schema1.Name, db, &wg))
  5075  		assert.Nil(t, err)
  5076  		err = pool.Submit(appendClosure(t, data, schema2.Name, db, &wg))
  5077  		assert.Nil(t, err)
  5078  	}
  5079  	wg.Wait()
  5080  	testutils.WaitExpect(10000, func() bool {
  5081  		return db.Scheduler.GetPenddingLSNCnt() == 0
  5082  	})
  5083  	assert.Equal(t, uint64(0), db.Scheduler.GetPenddingLSNCnt())
  5084  	err = db.DiskCleaner.CheckGC()
  5085  	assert.Nil(t, err)
  5086  	testutils.WaitExpect(5000, func() bool {
  5087  		return db.DiskCleaner.GetMinMerged() != nil
  5088  	})
  5089  	minMerged := db.DiskCleaner.GetMinMerged()
  5090  	testutils.WaitExpect(5000, func() bool {
  5091  		return db.DiskCleaner.GetMinMerged() != nil
  5092  	})
  5093  	assert.NotNil(t, minMerged)
  5094  	tae.restart()
  5095  	db = tae.DB
  5096  	db.DiskCleaner.SetMinMergeCountForTest(2)
  5097  	testutils.WaitExpect(5000, func() bool {
  5098  		if db.DiskCleaner.GetMaxConsumed() == nil {
  5099  			return false
  5100  		}
  5101  		return db.DiskCleaner.GetMaxConsumed().GetEnd().GreaterEq(minMerged.GetEnd())
  5102  	})
  5103  	assert.True(t, db.DiskCleaner.GetMaxConsumed().GetEnd().GreaterEq(minMerged.GetEnd()))
  5104  	err = db.DiskCleaner.CheckGC()
  5105  	assert.Nil(t, err)
  5106  
  5107  }
  5108  
  5109  func TestGlobalCheckpoint2(t *testing.T) {
  5110  	defer testutils.AfterTest(t)()
  5111  	testutils.EnsureNoLeak(t)
  5112  	opts := config.WithQuickScanAndCKPOpts(nil)
  5113  	options.WithCheckpointGlobalMinCount(1)(opts)
  5114  	options.WithDisableGCCatalog()(opts)
  5115  	tae := newTestEngine(t, opts)
  5116  	tae.BGCheckpointRunner.DisableCheckpoint()
  5117  	tae.BGCheckpointRunner.CleanPenddingCheckpoint()
  5118  	defer tae.Close()
  5119  	schema := catalog.MockSchemaAll(10, 2)
  5120  	schema.BlockMaxRows = 10
  5121  	schema.SegmentMaxBlocks = 2
  5122  	tae.bindSchema(schema)
  5123  	bat := catalog.MockBatch(schema, 40)
  5124  
  5125  	_, rel := tae.createRelAndAppend(bat, true)
  5126  
  5127  	tae.dropRelation(t)
  5128  	txn, err := tae.StartTxn(nil)
  5129  	assert.NoError(t, err)
  5130  	tae.incrementalCheckpoint(txn.GetStartTS(), false, true, true)
  5131  	tae.globalCheckpoint(txn.GetStartTS(), 0, false)
  5132  	assert.NoError(t, txn.Commit())
  5133  
  5134  	tae.createRelAndAppend(bat, false)
  5135  	txn, err = tae.StartTxn(nil)
  5136  	assert.NoError(t, err)
  5137  	tae.incrementalCheckpoint(txn.GetStartTS(), false, true, true)
  5138  	tae.globalCheckpoint(txn.GetStartTS(), 0, false)
  5139  	assert.NoError(t, txn.Commit())
  5140  
  5141  	p := &catalog.LoopProcessor{}
  5142  	tableExisted := false
  5143  	p.TableFn = func(te *catalog.TableEntry) error {
  5144  		if te.ID == rel.ID() {
  5145  			tableExisted = true
  5146  		}
  5147  		return nil
  5148  	}
  5149  
  5150  	assert.NoError(t, tae.Catalog.RecurLoop(p))
  5151  	assert.True(t, tableExisted)
  5152  
  5153  	t.Log(tae.Catalog.SimplePPString(3))
  5154  	tae.restart()
  5155  	t.Log(tae.Catalog.SimplePPString(3))
  5156  
  5157  	tableExisted = false
  5158  	assert.NoError(t, tae.Catalog.RecurLoop(p))
  5159  	assert.False(t, tableExisted)
  5160  }
  5161  
  5162  func TestGlobalCheckpoint3(t *testing.T) {
  5163  	defer testutils.AfterTest(t)()
  5164  	testutils.EnsureNoLeak(t)
  5165  	opts := config.WithQuickScanAndCKPOpts(nil)
  5166  	options.WithCheckpointGlobalMinCount(1)(opts)
  5167  	options.WithGlobalVersionInterval(time.Nanosecond * 1)(opts)
  5168  	options.WithDisableGCCatalog()(opts)
  5169  	tae := newTestEngine(t, opts)
  5170  	defer tae.Close()
  5171  	schema := catalog.MockSchemaAll(10, 2)
  5172  	schema.BlockMaxRows = 10
  5173  	schema.SegmentMaxBlocks = 2
  5174  	tae.bindSchema(schema)
  5175  	bat := catalog.MockBatch(schema, 40)
  5176  
  5177  	_, rel := tae.createRelAndAppend(bat, true)
  5178  	testutils.WaitExpect(1000, func() bool {
  5179  		return tae.Wal.GetPenddingCnt() == 0
  5180  	})
  5181  
  5182  	tae.dropRelation(t)
  5183  	testutils.WaitExpect(1000, func() bool {
  5184  		return tae.Wal.GetPenddingCnt() == 0
  5185  	})
  5186  
  5187  	tae.createRelAndAppend(bat, false)
  5188  	testutils.WaitExpect(1000, func() bool {
  5189  		return tae.Wal.GetPenddingCnt() == 0
  5190  	})
  5191  
  5192  	p := &catalog.LoopProcessor{}
  5193  	tableExisted := false
  5194  	p.TableFn = func(te *catalog.TableEntry) error {
  5195  		if te.ID == rel.ID() {
  5196  			tableExisted = true
  5197  		}
  5198  		return nil
  5199  	}
  5200  
  5201  	assert.NoError(t, tae.Catalog.RecurLoop(p))
  5202  	assert.True(t, tableExisted)
  5203  
  5204  	tae.restart()
  5205  
  5206  	tableExisted = false
  5207  	assert.NoError(t, tae.Catalog.RecurLoop(p))
  5208  	assert.False(t, tableExisted)
  5209  }
  5210  
  5211  func TestGlobalCheckpoint4(t *testing.T) {
  5212  	defer testutils.AfterTest(t)()
  5213  	testutils.EnsureNoLeak(t)
  5214  	opts := config.WithQuickScanAndCKPOpts(nil)
  5215  	tae := newTestEngine(t, opts)
  5216  	defer tae.Close()
  5217  	tae.BGCheckpointRunner.DisableCheckpoint()
  5218  	tae.BGCheckpointRunner.CleanPenddingCheckpoint()
  5219  	globalCkpInterval := time.Second
  5220  
  5221  	schema := catalog.MockSchemaAll(18, 2)
  5222  	schema.BlockMaxRows = 10
  5223  	schema.SegmentMaxBlocks = 2
  5224  	tae.bindSchema(schema)
  5225  	bat := catalog.MockBatch(schema, 40)
  5226  
  5227  	txn, err := tae.StartTxn(nil)
  5228  	assert.NoError(t, err)
  5229  	_, err = txn.CreateDatabase("db", "")
  5230  	assert.NoError(t, err)
  5231  	assert.NoError(t, txn.Commit())
  5232  
  5233  	err = tae.incrementalCheckpoint(txn.GetCommitTS(), false, true, true)
  5234  	assert.NoError(t, err)
  5235  
  5236  	txn, err = tae.StartTxn(nil)
  5237  	assert.NoError(t, err)
  5238  	_, err = txn.DropDatabase("db")
  5239  	assert.NoError(t, err)
  5240  	assert.NoError(t, txn.Commit())
  5241  
  5242  	err = tae.globalCheckpoint(txn.GetCommitTS(), globalCkpInterval, false)
  5243  	assert.NoError(t, err)
  5244  
  5245  	tae.createRelAndAppend(bat, true)
  5246  
  5247  	t.Log(tae.Catalog.SimplePPString(3))
  5248  	tae.restart()
  5249  	tae.BGCheckpointRunner.DisableCheckpoint()
  5250  	tae.BGCheckpointRunner.CleanPenddingCheckpoint()
  5251  	t.Log(tae.Catalog.SimplePPString(3))
  5252  
  5253  	// tae.createRelAndAppend(bat, false)
  5254  
  5255  	txn, err = tae.StartTxn(nil)
  5256  	assert.NoError(t, err)
  5257  	db, err := txn.GetDatabase("db")
  5258  	assert.NoError(t, err)
  5259  	_, err = db.DropRelationByName(schema.Name)
  5260  	assert.NoError(t, err)
  5261  	assert.NoError(t, txn.Commit())
  5262  
  5263  	err = tae.globalCheckpoint(txn.GetCommitTS(), globalCkpInterval, false)
  5264  	assert.NoError(t, err)
  5265  
  5266  	tae.createRelAndAppend(bat, false)
  5267  
  5268  	t.Log(tae.Catalog.SimplePPString(3))
  5269  	tae.restart()
  5270  	tae.BGCheckpointRunner.DisableCheckpoint()
  5271  	tae.BGCheckpointRunner.CleanPenddingCheckpoint()
  5272  	t.Log(tae.Catalog.SimplePPString(3))
  5273  }
  5274  
  5275  func TestGlobalCheckpoint5(t *testing.T) {
  5276  	defer testutils.AfterTest(t)()
  5277  	testutils.EnsureNoLeak(t)
  5278  	opts := config.WithQuickScanAndCKPOpts(nil)
  5279  	tae := newTestEngine(t, opts)
  5280  	defer tae.Close()
  5281  	tae.BGCheckpointRunner.DisableCheckpoint()
  5282  	tae.BGCheckpointRunner.CleanPenddingCheckpoint()
  5283  	globalCkpInterval := time.Duration(0)
  5284  
  5285  	schema := catalog.MockSchemaAll(18, 2)
  5286  	schema.BlockMaxRows = 10
  5287  	schema.SegmentMaxBlocks = 2
  5288  	tae.bindSchema(schema)
  5289  	bat := catalog.MockBatch(schema, 60)
  5290  	bats := bat.Split(3)
  5291  
  5292  	txn, err := tae.StartTxn(nil)
  5293  	assert.NoError(t, err)
  5294  	err = tae.incrementalCheckpoint(txn.GetStartTS(), false, true, true)
  5295  	assert.NoError(t, err)
  5296  	assert.NoError(t, txn.Commit())
  5297  
  5298  	tae.createRelAndAppend(bats[0], true)
  5299  
  5300  	txn, err = tae.StartTxn(nil)
  5301  	assert.NoError(t, err)
  5302  	err = tae.globalCheckpoint(txn.GetStartTS(), globalCkpInterval, false)
  5303  	assert.NoError(t, err)
  5304  	assert.NoError(t, txn.Commit())
  5305  
  5306  	tae.DoAppend(bats[1])
  5307  
  5308  	txn, err = tae.StartTxn(nil)
  5309  	assert.NoError(t, err)
  5310  	err = tae.globalCheckpoint(txn.GetStartTS(), globalCkpInterval, false)
  5311  	assert.NoError(t, err)
  5312  	assert.NoError(t, txn.Commit())
  5313  
  5314  	tae.checkRowsByScan(40, true)
  5315  
  5316  	t.Log(tae.Catalog.SimplePPString(3))
  5317  	tae.restart()
  5318  	tae.BGCheckpointRunner.DisableCheckpoint()
  5319  	tae.BGCheckpointRunner.CleanPenddingCheckpoint()
  5320  	t.Log(tae.Catalog.SimplePPString(3))
  5321  
  5322  	tae.checkRowsByScan(40, true)
  5323  
  5324  	tae.DoAppend(bats[2])
  5325  
  5326  	tae.checkRowsByScan(60, true)
  5327  	txn, err = tae.StartTxn(nil)
  5328  	assert.NoError(t, err)
  5329  	err = tae.globalCheckpoint(txn.GetStartTS(), globalCkpInterval, false)
  5330  	assert.NoError(t, err)
  5331  	assert.NoError(t, err)
  5332  	assert.NoError(t, txn.Commit())
  5333  }
  5334  
  5335  func TestGlobalCheckpoint6(t *testing.T) {
  5336  	defer testutils.AfterTest(t)()
  5337  	testutils.EnsureNoLeak(t)
  5338  	opts := config.WithQuickScanAndCKPOpts(nil)
  5339  	tae := newTestEngine(t, opts)
  5340  	defer tae.Close()
  5341  	tae.BGCheckpointRunner.DisableCheckpoint()
  5342  	tae.BGCheckpointRunner.CleanPenddingCheckpoint()
  5343  	globalCkpInterval := time.Duration(0)
  5344  	restartCnt := 10
  5345  	batchsize := 10
  5346  
  5347  	schema := catalog.MockSchemaAll(18, 2)
  5348  	schema.BlockMaxRows = 5
  5349  	schema.SegmentMaxBlocks = 2
  5350  	tae.bindSchema(schema)
  5351  	bat := catalog.MockBatch(schema, batchsize*(restartCnt+1))
  5352  	bats := bat.Split(restartCnt + 1)
  5353  
  5354  	tae.createRelAndAppend(bats[0], true)
  5355  	txn, err := tae.StartTxn(nil)
  5356  	assert.NoError(t, err)
  5357  	err = tae.incrementalCheckpoint(txn.GetStartTS(), false, true, true)
  5358  	assert.NoError(t, err)
  5359  	assert.NoError(t, txn.Commit())
  5360  
  5361  	for i := 0; i < restartCnt; i++ {
  5362  		tae.DoAppend(bats[i+1])
  5363  		txn, err = tae.StartTxn(nil)
  5364  		assert.NoError(t, err)
  5365  		err = tae.globalCheckpoint(txn.GetStartTS(), globalCkpInterval, false)
  5366  		assert.NoError(t, err)
  5367  		assert.NoError(t, txn.Commit())
  5368  
  5369  		rows := (i + 2) * batchsize
  5370  		tae.checkRowsByScan(rows, true)
  5371  		t.Log(tae.Catalog.SimplePPString(3))
  5372  		tae.restart()
  5373  		tae.BGCheckpointRunner.DisableCheckpoint()
  5374  		tae.BGCheckpointRunner.CleanPenddingCheckpoint()
  5375  		t.Log(tae.Catalog.SimplePPString(3))
  5376  		tae.checkRowsByScan(rows, true)
  5377  	}
  5378  }
  5379  
  5380  func TestGCCheckpoint1(t *testing.T) {
  5381  	defer testutils.AfterTest(t)()
  5382  	testutils.EnsureNoLeak(t)
  5383  	opts := config.WithQuickScanAndCKPOpts(nil)
  5384  	tae := newTestEngine(t, opts)
  5385  	defer tae.Close()
  5386  
  5387  	schema := catalog.MockSchemaAll(18, 2)
  5388  	schema.BlockMaxRows = 5
  5389  	schema.SegmentMaxBlocks = 2
  5390  	tae.bindSchema(schema)
  5391  	bat := catalog.MockBatch(schema, 50)
  5392  
  5393  	tae.createRelAndAppend(bat, true)
  5394  
  5395  	testutils.WaitExpect(4000, func() bool {
  5396  		return tae.Wal.GetPenddingCnt() == 0
  5397  	})
  5398  	assert.Equal(t, uint64(0), tae.Wal.GetPenddingCnt())
  5399  
  5400  	testutils.WaitExpect(4000, func() bool {
  5401  		return tae.BGCheckpointRunner.GetPenddingIncrementalCount() == 0
  5402  	})
  5403  	assert.Equal(t, 0, tae.BGCheckpointRunner.GetPenddingIncrementalCount())
  5404  
  5405  	testutils.WaitExpect(4000, func() bool {
  5406  		return tae.BGCheckpointRunner.MaxGlobalCheckpoint().IsFinished()
  5407  	})
  5408  	assert.True(t, tae.BGCheckpointRunner.MaxGlobalCheckpoint().IsFinished())
  5409  
  5410  	tae.BGCheckpointRunner.DisableCheckpoint()
  5411  
  5412  	gcTS := types.BuildTS(time.Now().UTC().UnixNano(), 0)
  5413  	t.Log(gcTS.ToString())
  5414  	tae.BGCheckpointRunner.GCByTS(context.Background(), gcTS)
  5415  
  5416  	maxGlobal := tae.BGCheckpointRunner.MaxGlobalCheckpoint()
  5417  
  5418  	testutils.WaitExpect(4000, func() bool {
  5419  		tae.BGCheckpointRunner.ExistPendingEntryToGC()
  5420  		return !tae.BGCheckpointRunner.ExistPendingEntryToGC()
  5421  	})
  5422  	assert.False(t, tae.BGCheckpointRunner.ExistPendingEntryToGC())
  5423  
  5424  	globals := tae.BGCheckpointRunner.GetAllGlobalCheckpoints()
  5425  	assert.Equal(t, 1, len(globals))
  5426  	assert.True(t, maxGlobal.GetEnd().Equal(globals[0].GetEnd()))
  5427  	for _, global := range globals {
  5428  		t.Log(global.String())
  5429  	}
  5430  
  5431  	incrementals := tae.BGCheckpointRunner.GetAllIncrementalCheckpoints()
  5432  	prevEnd := maxGlobal.GetEnd().Prev()
  5433  	for _, incremental := range incrementals {
  5434  		assert.True(t, incremental.GetStart().Equal(prevEnd.Next()))
  5435  		t.Log(incremental.String())
  5436  	}
  5437  }
  5438  
  5439  func TestGCCatalog1(t *testing.T) {
  5440  	t.Skip(any("This case crashes occasionally, is being fixed, skip it for now"))
  5441  	defer testutils.AfterTest(t)()
  5442  	opts := config.WithLongScanAndCKPOpts(nil)
  5443  	tae := newTestEngine(t, opts)
  5444  	defer tae.Close()
  5445  
  5446  	txn1, _ := tae.StartTxn(nil)
  5447  	db, err := txn1.CreateDatabase("db1", "")
  5448  	assert.Nil(t, err)
  5449  	db2, err := txn1.CreateDatabase("db2", "")
  5450  	assert.Nil(t, err)
  5451  
  5452  	schema := catalog.MockSchema(1, 0)
  5453  	schema.Name = "tb1"
  5454  	tb, err := db.CreateRelation(schema)
  5455  	assert.Nil(t, err)
  5456  	schema2 := catalog.MockSchema(1, 0)
  5457  	schema2.Name = "tb2"
  5458  	tb2, err := db.CreateRelation(schema2)
  5459  	assert.Nil(t, err)
  5460  	schema3 := catalog.MockSchema(1, 0)
  5461  	schema3.Name = "tb3"
  5462  	tb3, err := db2.CreateRelation(schema3)
  5463  	assert.Nil(t, err)
  5464  
  5465  	seg1, err := tb.CreateSegment(false)
  5466  	assert.Nil(t, err)
  5467  	seg2, err := tb2.CreateSegment(false)
  5468  	assert.Nil(t, err)
  5469  	seg3, err := tb2.CreateSegment(false)
  5470  	assert.Nil(t, err)
  5471  	seg4, err := tb3.CreateSegment(false)
  5472  	assert.Nil(t, err)
  5473  
  5474  	_, err = seg1.CreateBlock(false)
  5475  	assert.NoError(t, err)
  5476  	_, err = seg2.CreateBlock(false)
  5477  	assert.NoError(t, err)
  5478  	_, err = seg3.CreateBlock(false)
  5479  	assert.NoError(t, err)
  5480  	blk4, err := seg4.CreateBlock(false)
  5481  	assert.NoError(t, err)
  5482  
  5483  	err = txn1.Commit()
  5484  	assert.Nil(t, err)
  5485  
  5486  	p := &catalog.LoopProcessor{}
  5487  	var dbCnt, tableCnt, segCnt, blkCnt int
  5488  	p.DatabaseFn = func(d *catalog.DBEntry) error {
  5489  		if d.IsSystemDB() {
  5490  			return nil
  5491  		}
  5492  		dbCnt++
  5493  		return nil
  5494  	}
  5495  	p.TableFn = func(te *catalog.TableEntry) error {
  5496  		if te.GetDB().IsSystemDB() {
  5497  			return nil
  5498  		}
  5499  		tableCnt++
  5500  		return nil
  5501  	}
  5502  	p.SegmentFn = func(se *catalog.SegmentEntry) error {
  5503  		if se.GetTable().GetDB().IsSystemDB() {
  5504  			return nil
  5505  		}
  5506  		segCnt++
  5507  		return nil
  5508  	}
  5509  	p.BlockFn = func(be *catalog.BlockEntry) error {
  5510  		if be.GetSegment().GetTable().GetDB().IsSystemDB() {
  5511  			return nil
  5512  		}
  5513  		blkCnt++
  5514  		return nil
  5515  	}
  5516  	resetCount := func() {
  5517  		dbCnt = 0
  5518  		tableCnt = 0
  5519  		segCnt = 0
  5520  		blkCnt = 0
  5521  	}
  5522  
  5523  	err = tae.Catalog.RecurLoop(p)
  5524  	assert.NoError(t, err)
  5525  	assert.Equal(t, 2, dbCnt)
  5526  	assert.Equal(t, 3, tableCnt)
  5527  	assert.Equal(t, 4, segCnt)
  5528  	assert.Equal(t, 4, blkCnt)
  5529  
  5530  	txn2, err := tae.StartTxn(nil)
  5531  	assert.NoError(t, err)
  5532  	db2, err = txn2.GetDatabase("db2")
  5533  	assert.NoError(t, err)
  5534  	tb3, err = db2.GetRelationByName("tb3")
  5535  	assert.NoError(t, err)
  5536  	seg4, err = tb3.GetSegment(seg4.GetID())
  5537  	assert.NoError(t, err)
  5538  	err = seg4.SoftDeleteBlock(blk4.ID())
  5539  	assert.NoError(t, err)
  5540  	err = txn2.Commit()
  5541  	assert.NoError(t, err)
  5542  
  5543  	t.Log(tae.Catalog.SimplePPString(3))
  5544  	tae.Catalog.GCByTS(context.Background(), txn2.GetCommitTS().Next())
  5545  	t.Log(tae.Catalog.SimplePPString(3))
  5546  
  5547  	resetCount()
  5548  	err = tae.Catalog.RecurLoop(p)
  5549  	assert.NoError(t, err)
  5550  	assert.Equal(t, 2, dbCnt)
  5551  	assert.Equal(t, 3, tableCnt)
  5552  	assert.Equal(t, 4, segCnt)
  5553  	assert.Equal(t, 3, blkCnt)
  5554  
  5555  	txn3, err := tae.StartTxn(nil)
  5556  	assert.NoError(t, err)
  5557  	db2, err = txn3.GetDatabase("db2")
  5558  	assert.NoError(t, err)
  5559  	tb3, err = db2.GetRelationByName("tb3")
  5560  	assert.NoError(t, err)
  5561  	err = tb3.SoftDeleteSegment(seg4.GetID())
  5562  	assert.NoError(t, err)
  5563  
  5564  	db2, err = txn3.GetDatabase("db1")
  5565  	assert.NoError(t, err)
  5566  	tb3, err = db2.GetRelationByName("tb2")
  5567  	assert.NoError(t, err)
  5568  	err = tb3.SoftDeleteSegment(seg3.GetID())
  5569  	assert.NoError(t, err)
  5570  
  5571  	err = txn3.Commit()
  5572  	assert.NoError(t, err)
  5573  
  5574  	t.Log(tae.Catalog.SimplePPString(3))
  5575  	tae.Catalog.GCByTS(context.Background(), txn3.GetCommitTS().Next())
  5576  	t.Log(tae.Catalog.SimplePPString(3))
  5577  
  5578  	resetCount()
  5579  	err = tae.Catalog.RecurLoop(p)
  5580  	assert.NoError(t, err)
  5581  	assert.Equal(t, 2, dbCnt)
  5582  	assert.Equal(t, 3, tableCnt)
  5583  	assert.Equal(t, 2, segCnt)
  5584  	assert.Equal(t, 2, blkCnt)
  5585  
  5586  	txn4, err := tae.StartTxn(nil)
  5587  	assert.NoError(t, err)
  5588  	db2, err = txn4.GetDatabase("db2")
  5589  	assert.NoError(t, err)
  5590  	_, err = db2.DropRelationByName("tb3")
  5591  	assert.NoError(t, err)
  5592  
  5593  	db2, err = txn4.GetDatabase("db1")
  5594  	assert.NoError(t, err)
  5595  	_, err = db2.DropRelationByName("tb2")
  5596  	assert.NoError(t, err)
  5597  
  5598  	err = txn4.Commit()
  5599  	assert.NoError(t, err)
  5600  
  5601  	t.Log(tae.Catalog.SimplePPString(3))
  5602  	tae.Catalog.GCByTS(context.Background(), txn4.GetCommitTS().Next())
  5603  	t.Log(tae.Catalog.SimplePPString(3))
  5604  
  5605  	resetCount()
  5606  	err = tae.Catalog.RecurLoop(p)
  5607  	assert.NoError(t, err)
  5608  	assert.Equal(t, 2, dbCnt)
  5609  	assert.Equal(t, 1, tableCnt)
  5610  	assert.Equal(t, 1, segCnt)
  5611  	assert.Equal(t, 1, blkCnt)
  5612  
  5613  	txn5, err := tae.StartTxn(nil)
  5614  	assert.NoError(t, err)
  5615  	_, err = txn5.DropDatabase("db2")
  5616  	assert.NoError(t, err)
  5617  
  5618  	_, err = txn5.DropDatabase("db1")
  5619  	assert.NoError(t, err)
  5620  
  5621  	err = txn5.Commit()
  5622  	assert.NoError(t, err)
  5623  
  5624  	t.Log(tae.Catalog.SimplePPString(3))
  5625  	tae.Catalog.GCByTS(context.Background(), txn5.GetCommitTS().Next())
  5626  	t.Log(tae.Catalog.SimplePPString(3))
  5627  
  5628  	resetCount()
  5629  	err = tae.Catalog.RecurLoop(p)
  5630  	assert.NoError(t, err)
  5631  	assert.Equal(t, 0, dbCnt)
  5632  	assert.Equal(t, 0, tableCnt)
  5633  	assert.Equal(t, 0, segCnt)
  5634  	assert.Equal(t, 0, blkCnt)
  5635  }
  5636  
  5637  func TestGCCatalog2(t *testing.T) {
  5638  	t.Skip(any("This case crashes occasionally, is being fixed, skip it for now"))
  5639  	defer testutils.AfterTest(t)()
  5640  	opts := config.WithQuickScanAndCKPOpts(nil)
  5641  	options.WithCatalogGCInterval(10 * time.Millisecond)(opts)
  5642  	tae := newTestEngine(t, opts)
  5643  	defer tae.Close()
  5644  	schema := catalog.MockSchema(3, 2)
  5645  	schema.BlockMaxRows = 10
  5646  	schema.SegmentMaxBlocks = 2
  5647  	tae.bindSchema(schema)
  5648  	bat := catalog.MockBatch(schema, 33)
  5649  
  5650  	checkCompactAndGCFn := func() bool {
  5651  		p := &catalog.LoopProcessor{}
  5652  		appendableCount := 0
  5653  		p.BlockFn = func(be *catalog.BlockEntry) error {
  5654  			if be.GetSegment().GetTable().GetDB().IsSystemDB() {
  5655  				return nil
  5656  			}
  5657  			if be.IsAppendable() {
  5658  				appendableCount++
  5659  			}
  5660  			return nil
  5661  		}
  5662  		err := tae.Catalog.RecurLoop(p)
  5663  		assert.NoError(t, err)
  5664  		return appendableCount == 0
  5665  	}
  5666  
  5667  	tae.createRelAndAppend(bat, true)
  5668  	t.Log(tae.Catalog.SimplePPString(3))
  5669  	testutils.WaitExpect(4000, checkCompactAndGCFn)
  5670  	assert.True(t, checkCompactAndGCFn())
  5671  	t.Log(tae.Catalog.SimplePPString(3))
  5672  }
  5673  func TestGCCatalog3(t *testing.T) {
  5674  	t.Skip(any("This case crashes occasionally, is being fixed, skip it for now"))
  5675  	defer testutils.AfterTest(t)()
  5676  	opts := config.WithQuickScanAndCKPOpts(nil)
  5677  	options.WithCatalogGCInterval(10 * time.Millisecond)(opts)
  5678  	tae := newTestEngine(t, opts)
  5679  	defer tae.Close()
  5680  	schema := catalog.MockSchema(3, 2)
  5681  	schema.BlockMaxRows = 10
  5682  	schema.SegmentMaxBlocks = 2
  5683  	tae.bindSchema(schema)
  5684  	bat := catalog.MockBatch(schema, 33)
  5685  
  5686  	checkCompactAndGCFn := func() bool {
  5687  		p := &catalog.LoopProcessor{}
  5688  		dbCount := 0
  5689  		p.DatabaseFn = func(be *catalog.DBEntry) error {
  5690  			if be.IsSystemDB() {
  5691  				return nil
  5692  			}
  5693  			dbCount++
  5694  			return nil
  5695  		}
  5696  		err := tae.Catalog.RecurLoop(p)
  5697  		assert.NoError(t, err)
  5698  		return dbCount == 0
  5699  	}
  5700  
  5701  	tae.createRelAndAppend(bat, true)
  5702  	txn, err := tae.StartTxn(nil)
  5703  	assert.NoError(t, err)
  5704  	_, err = txn.DropDatabase("db")
  5705  	assert.NoError(t, err)
  5706  	assert.NoError(t, txn.Commit())
  5707  
  5708  	t.Log(tae.Catalog.SimplePPString(3))
  5709  	testutils.WaitExpect(4000, checkCompactAndGCFn)
  5710  	assert.True(t, checkCompactAndGCFn())
  5711  	t.Log(tae.Catalog.SimplePPString(3))
  5712  }
  5713  
  5714  func TestForceCheckpoint(t *testing.T) {
  5715  	fault.Enable()
  5716  	defer fault.Disable()
  5717  	err := fault.AddFaultPoint(context.Background(), "tae: flush timeout", ":::", "echo", 0, "mock flush timeout")
  5718  	assert.NoError(t, err)
  5719  	defer func() {
  5720  		err := fault.RemoveFaultPoint(context.Background(), "tae: flush timeout")
  5721  		assert.NoError(t, err)
  5722  	}()
  5723  	opts := config.WithLongScanAndCKPOpts(nil)
  5724  	tae := newTestEngine(t, opts)
  5725  	defer tae.Close()
  5726  
  5727  	schema := catalog.MockSchemaAll(18, 2)
  5728  	schema.BlockMaxRows = 5
  5729  	schema.SegmentMaxBlocks = 2
  5730  	tae.bindSchema(schema)
  5731  	bat := catalog.MockBatch(schema, 50)
  5732  
  5733  	tae.createRelAndAppend(bat, true)
  5734  
  5735  	err = tae.BGCheckpointRunner.ForceFlush(tae.TxnMgr.StatMaxCommitTS(), context.Background(), time.Second)
  5736  	assert.Error(t, err)
  5737  	err = tae.BGCheckpointRunner.ForceIncrementalCheckpoint(tae.TxnMgr.StatMaxCommitTS())
  5738  	assert.NoError(t, err)
  5739  }