github.com/matrixorigin/matrixone@v1.2.0/pkg/vm/engine/tae/db/test/db_test.go (about)

     1  // Copyright 2021 Matrix Origin
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package test
    16  
    17  import (
    18  	"bytes"
    19  	"context"
    20  	"fmt"
    21  	"math/rand"
    22  	"reflect"
    23  	"strings"
    24  	"sync"
    25  	"sync/atomic"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/matrixorigin/matrixone/pkg/fileservice"
    30  
    31  	"sort"
    32  
    33  	"github.com/matrixorigin/matrixone/pkg/logutil"
    34  	"github.com/matrixorigin/matrixone/pkg/objectio"
    35  
    36  	"github.com/matrixorigin/matrixone/pkg/util/fault"
    37  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/blockio"
    38  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/db"
    39  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/db/dbutils"
    40  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/db/gc"
    41  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/db/testutil"
    42  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/tables"
    43  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/txn/txnbase"
    44  
    45  	"github.com/matrixorigin/matrixone/pkg/pb/api"
    46  	"github.com/matrixorigin/matrixone/pkg/pb/timestamp"
    47  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/db/checkpoint"
    48  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/logtail"
    49  
    50  	"github.com/matrixorigin/matrixone/pkg/common/moerr"
    51  	"github.com/matrixorigin/matrixone/pkg/common/mpool"
    52  	"github.com/matrixorigin/matrixone/pkg/container/batch"
    53  	"github.com/matrixorigin/matrixone/pkg/container/types"
    54  	"github.com/matrixorigin/matrixone/pkg/container/vector"
    55  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/containers"
    56  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/testutils/config"
    57  
    58  	pkgcatalog "github.com/matrixorigin/matrixone/pkg/catalog"
    59  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/catalog"
    60  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/common"
    61  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/iface/handle"
    62  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/iface/txnif"
    63  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/options"
    64  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/tables/jobs"
    65  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/tasks"
    66  	ops "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/tasks/worker"
    67  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/testutils"
    68  	"github.com/panjf2000/ants/v2"
    69  	"github.com/stretchr/testify/assert"
    70  	"github.com/stretchr/testify/require"
    71  )
    72  
    73  const (
    74  	ModuleName               = "TAEDB"
    75  	smallCheckpointBlockRows = 10
    76  	smallCheckpointSize      = 1024
    77  )
    78  
    79  func TestAppend(t *testing.T) {
    80  	defer testutils.AfterTest(t)()
    81  	testutils.EnsureNoLeak(t)
    82  	ctx := context.Background()
    83  
    84  	tae := testutil.NewTestEngine(ctx, ModuleName, t, nil)
    85  	defer tae.Close()
    86  	schema := catalog.MockSchemaAll(14, 3)
    87  	schema.BlockMaxRows = options.DefaultBlockMaxRows
    88  	schema.ObjectMaxBlocks = options.DefaultBlocksPerObject
    89  	tae.BindSchema(schema)
    90  	data := catalog.MockBatch(schema, int(schema.BlockMaxRows*2))
    91  	defer data.Close()
    92  	bats := data.Split(4)
    93  	now := time.Now()
    94  	tae.CreateRelAndAppend(bats[0], true)
    95  	t.Log(time.Since(now))
    96  	tae.CheckRowsByScan(bats[0].Length(), false)
    97  
    98  	txn, rel := tae.GetRelation()
    99  	err := rel.Append(context.Background(), bats[1])
   100  	assert.NoError(t, err)
   101  	// FIXME
   102  	// testutil.CheckAllColRowsByScan(t, rel, bats[0].Length()+bats[1].Length(), false)
   103  	err = rel.Append(context.Background(), bats[2])
   104  	assert.NoError(t, err)
   105  	assert.NoError(t, txn.Commit(context.Background()))
   106  	tae.CheckRowsByScan(bats[0].Length()+bats[1].Length()+bats[2].Length(), false)
   107  	t.Log(tae.Catalog.SimplePPString(common.PPL1))
   108  }
   109  
   110  func TestAppend2(t *testing.T) {
   111  	defer testutils.AfterTest(t)()
   112  	testutils.EnsureNoLeak(t)
   113  	ctx := context.Background()
   114  
   115  	opts := config.WithQuickScanAndCKPOpts(nil)
   116  	db := testutil.InitTestDB(ctx, ModuleName, t, opts)
   117  	defer db.Close()
   118  
   119  	// this task won't affect logic of TestAppend2, it just prints logs about dirty count
   120  	// forest := logtail.NewDirtyCollector(db.LogtailMgr, opts.Clock, db.Catalog, new(catalog.LoopProcessor))
   121  	// hb := ops.NewHeartBeaterWithFunc(5*time.Millisecond, func() {
   122  	// 	forest.Run()
   123  	// 	t.Log(forest.String())
   124  	// }, nil)
   125  	// hb.Start()
   126  	// defer hb.Stop()
   127  
   128  	schema := catalog.MockSchemaAll(13, 3)
   129  	schema.BlockMaxRows = 10
   130  	schema.ObjectMaxBlocks = 10
   131  	testutil.CreateRelation(t, db, "db", schema, true)
   132  
   133  	totalRows := uint64(schema.BlockMaxRows * 30)
   134  	bat := catalog.MockBatch(schema, int(totalRows))
   135  	defer bat.Close()
   136  	bats := bat.Split(100)
   137  
   138  	var wg sync.WaitGroup
   139  	pool, _ := ants.NewPool(80)
   140  	defer pool.Release()
   141  
   142  	start := time.Now()
   143  	for _, data := range bats {
   144  		wg.Add(1)
   145  		err := pool.Submit(testutil.AppendClosure(t, data, schema.Name, db, &wg))
   146  		assert.Nil(t, err)
   147  	}
   148  	wg.Wait()
   149  	t.Logf("Append %d rows takes: %s", totalRows, time.Since(start))
   150  	{
   151  		txn, rel := testutil.GetDefaultRelation(t, db, schema.Name)
   152  		testutil.CheckAllColRowsByScan(t, rel, int(totalRows), false)
   153  		assert.NoError(t, txn.Commit(context.Background()))
   154  	}
   155  	t.Log(db.Catalog.SimplePPString(common.PPL1))
   156  
   157  	now := time.Now()
   158  	testutils.WaitExpect(20000, func() bool {
   159  		return db.Runtime.Scheduler.GetPenddingLSNCnt() == 0
   160  	})
   161  	t.Log(time.Since(now))
   162  	t.Logf("Checkpointed: %d", db.Runtime.Scheduler.GetCheckpointedLSN())
   163  	t.Logf("GetPenddingLSNCnt: %d", db.Runtime.Scheduler.GetPenddingLSNCnt())
   164  	assert.Equal(t, uint64(0), db.Runtime.Scheduler.GetPenddingLSNCnt())
   165  	t.Log(db.Catalog.SimplePPString(common.PPL1))
   166  	wg.Add(1)
   167  	testutil.AppendFailClosure(t, bats[0], schema.Name, db, &wg)()
   168  	wg.Wait()
   169  }
   170  
   171  func TestAppend3(t *testing.T) {
   172  	defer testutils.AfterTest(t)()
   173  	testutils.EnsureNoLeak(t)
   174  	ctx := context.Background()
   175  
   176  	opts := config.WithQuickScanAndCKPOpts(nil)
   177  	tae := testutil.InitTestDB(ctx, ModuleName, t, opts)
   178  	defer tae.Close()
   179  	schema := catalog.MockSchema(2, 0)
   180  	schema.BlockMaxRows = 10
   181  	schema.ObjectMaxBlocks = 2
   182  	testutil.CreateRelation(t, tae, "db", schema, true)
   183  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows))
   184  	defer bat.Close()
   185  	var wg sync.WaitGroup
   186  	wg.Add(1)
   187  	testutil.AppendClosure(t, bat, schema.Name, tae, &wg)()
   188  	wg.Wait()
   189  	testutils.WaitExpect(2000, func() bool {
   190  		return tae.Runtime.Scheduler.GetPenddingLSNCnt() == 0
   191  	})
   192  	// t.Log(tae.Catalog.SimplePPString(common.PPL3))
   193  	wg.Add(1)
   194  	testutil.AppendFailClosure(t, bat, schema.Name, tae, &wg)()
   195  	wg.Wait()
   196  }
   197  
   198  func TestAppend4(t *testing.T) {
   199  	defer testutils.AfterTest(t)()
   200  	testutils.EnsureNoLeak(t)
   201  	ctx := context.Background()
   202  
   203  	opts := config.WithLongScanAndCKPOpts(nil)
   204  	tae := testutil.InitTestDB(ctx, ModuleName, t, opts)
   205  	defer tae.Close()
   206  	schema1 := catalog.MockSchemaAll(18, 14)
   207  	schema2 := catalog.MockSchemaAll(18, 15)
   208  	schema3 := catalog.MockSchemaAll(18, 16)
   209  	schema4 := catalog.MockSchemaAll(18, 11)
   210  	schema1.BlockMaxRows = 10
   211  	schema2.BlockMaxRows = 10
   212  	schema3.BlockMaxRows = 10
   213  	schema4.BlockMaxRows = 10
   214  	schema1.ObjectMaxBlocks = 2
   215  	schema2.ObjectMaxBlocks = 2
   216  	schema3.ObjectMaxBlocks = 2
   217  	schema4.ObjectMaxBlocks = 2
   218  	schemas := []*catalog.Schema{schema1, schema2, schema3, schema4}
   219  	testutil.CreateDB(t, tae, testutil.DefaultTestDB)
   220  	for _, schema := range schemas {
   221  		bat := catalog.MockBatch(schema, int(schema.BlockMaxRows*3-1))
   222  		defer bat.Close()
   223  		bats := bat.Split(1)
   224  		testutil.CreateRelation(t, tae, testutil.DefaultTestDB, schema, false)
   225  		for i := range bats {
   226  			txn, rel := testutil.GetDefaultRelation(t, tae, schema.Name)
   227  			err := rel.Append(context.Background(), bats[i])
   228  			assert.NoError(t, err)
   229  			err = txn.Commit(context.Background())
   230  			assert.NoError(t, err)
   231  		}
   232  		txn, rel := testutil.GetDefaultRelation(t, tae, schema.Name)
   233  		testutil.CheckAllColRowsByScan(t, rel, bat.Length(), false)
   234  
   235  		v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(3)
   236  		filter := handle.NewEQFilter(v)
   237  		err := rel.DeleteByFilter(context.Background(), filter)
   238  		assert.NoError(t, err)
   239  		err = txn.Commit(context.Background())
   240  		assert.NoError(t, err)
   241  
   242  		txn, rel = testutil.GetDefaultRelation(t, tae, schema.Name)
   243  		testutil.CheckAllColRowsByScan(t, rel, bat.Length()-1, true)
   244  		err = txn.Commit(context.Background())
   245  		assert.NoError(t, err)
   246  		testutil.CompactBlocks(t, 0, tae, testutil.DefaultTestDB, schema, false)
   247  		txn, rel = testutil.GetDefaultRelation(t, tae, schema.Name)
   248  		testutil.CheckAllColRowsByScan(t, rel, bat.Length()-1, false)
   249  		err = txn.Commit(context.Background())
   250  		assert.NoError(t, err)
   251  	}
   252  }
   253  
   254  func testCRUD(t *testing.T, tae *db.DB, schema *catalog.Schema) {
   255  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows*(uint32(schema.ObjectMaxBlocks)+1)-1))
   256  	defer bat.Close()
   257  	bats := bat.Split(4)
   258  
   259  	var updateColIdx int
   260  	if schema.GetSingleSortKeyIdx() >= 17 {
   261  		updateColIdx = 0
   262  	} else {
   263  		updateColIdx = schema.GetSingleSortKeyIdx() + 1
   264  	}
   265  
   266  	testutil.CreateRelationAndAppend(t, 0, tae, testutil.DefaultTestDB, schema, bats[0], false)
   267  
   268  	txn, rel := testutil.GetDefaultRelation(t, tae, schema.Name)
   269  	err := rel.Append(context.Background(), bats[0])
   270  	assert.True(t, moerr.IsMoErrCode(err, moerr.ErrDuplicateEntry))
   271  	testutil.CheckAllColRowsByScan(t, rel, bats[0].Length(), false)
   272  	v := bats[0].Vecs[schema.GetSingleSortKeyIdx()].Get(2)
   273  	filter := handle.NewEQFilter(v)
   274  	err = rel.DeleteByFilter(context.Background(), filter)
   275  	assert.NoError(t, err)
   276  
   277  	oldv := bats[0].Vecs[updateColIdx].Get(5)
   278  	oldvIsNull := bats[0].Vecs[updateColIdx].IsNull(5)
   279  
   280  	v = bats[0].Vecs[schema.GetSingleSortKeyIdx()].Get(5)
   281  	ufilter := handle.NewEQFilter(v)
   282  	{
   283  		ot := reflect.ValueOf(&oldv).Elem()
   284  		nv := reflect.ValueOf(int8(99))
   285  		if nv.CanConvert(reflect.TypeOf(oldv)) {
   286  			ot.Set(nv.Convert(reflect.TypeOf(oldv)))
   287  		}
   288  	}
   289  	err = rel.UpdateByFilter(context.Background(), ufilter, uint16(updateColIdx), oldv, oldvIsNull)
   290  	assert.NoError(t, err)
   291  
   292  	testutil.CheckAllColRowsByScan(t, rel, bats[0].Length()-1, true)
   293  	assert.NoError(t, txn.Commit(context.Background()))
   294  
   295  	txn, rel = testutil.GetDefaultRelation(t, tae, schema.Name)
   296  	testutil.CheckAllColRowsByScan(t, rel, bats[0].Length()-1, true)
   297  	for _, b := range bats[1:] {
   298  		err = rel.Append(context.Background(), b)
   299  		assert.NoError(t, err)
   300  	}
   301  	testutil.CheckAllColRowsByScan(t, rel, bat.Length()-1, true)
   302  	assert.NoError(t, txn.Commit(context.Background()))
   303  
   304  	testutil.CompactBlocks(t, 0, tae, testutil.DefaultTestDB, schema, false)
   305  
   306  	txn, rel = testutil.GetDefaultRelation(t, tae, schema.Name)
   307  	testutil.CheckAllColRowsByScan(t, rel, bat.Length()-1, false)
   308  	v = bats[0].Vecs[schema.GetSingleSortKeyIdx()].Get(3)
   309  	filter = handle.NewEQFilter(v)
   310  	err = rel.DeleteByFilter(context.Background(), filter)
   311  	assert.NoError(t, err)
   312  	testutil.CheckAllColRowsByScan(t, rel, bat.Length()-2, true)
   313  	assert.NoError(t, txn.Commit(context.Background()))
   314  
   315  	// After merging blocks, the logic of read data is modified
   316  	//compactObjs(t, tae, schema)
   317  
   318  	txn, rel = testutil.GetDefaultRelation(t, tae, schema.Name)
   319  	//testutil.CheckAllColRowsByScan(t, rel, bat.Length()-2, false)
   320  	testutil.CheckAllColRowsByScan(t, rel, bat.Length()-1, false)
   321  	assert.NoError(t, txn.Commit(context.Background()))
   322  
   323  	// t.Log(rel.GetMeta().(*catalog.TableEntry).PPString(common.PPL1, 0, ""))
   324  	txn, err = tae.StartTxn(nil)
   325  	assert.NoError(t, err)
   326  	db, err := txn.GetDatabase(testutil.DefaultTestDB)
   327  	assert.NoError(t, err)
   328  	_, err = db.DropRelationByName(schema.Name)
   329  	assert.NoError(t, err)
   330  	assert.NoError(t, txn.Commit(context.Background()))
   331  }
   332  
   333  func TestCRUD(t *testing.T) {
   334  	defer testutils.AfterTest(t)()
   335  	testutils.EnsureNoLeak(t)
   336  	ctx := context.Background()
   337  
   338  	opts := config.WithLongScanAndCKPOpts(nil)
   339  	tae := testutil.InitTestDB(ctx, ModuleName, t, opts)
   340  	defer tae.Close()
   341  	testutil.CreateDB(t, tae, testutil.DefaultTestDB)
   342  	testutil.WithTestAllPKType(t, tae, testCRUD)
   343  }
   344  
   345  func TestTableHandle(t *testing.T) {
   346  	defer testutils.AfterTest(t)()
   347  	testutils.EnsureNoLeak(t)
   348  	ctx := context.Background()
   349  
   350  	db := testutil.InitTestDB(ctx, ModuleName, t, nil)
   351  	defer db.Close()
   352  
   353  	schema := catalog.MockSchema(2, 0)
   354  	schema.BlockMaxRows = 1000
   355  	schema.ObjectMaxBlocks = 2
   356  
   357  	txn, _ := db.StartTxn(nil)
   358  	database, _ := txn.CreateDatabase("db", "", "")
   359  	rel, _ := database.CreateRelation(schema)
   360  
   361  	tableMeta := rel.GetMeta().(*catalog.TableEntry)
   362  	t.Log(tableMeta.String())
   363  	table := tableMeta.GetTableData()
   364  
   365  	handle := table.GetHandle()
   366  	appender, err := handle.GetAppender()
   367  	assert.Nil(t, appender)
   368  	assert.True(t, moerr.IsMoErrCode(err, moerr.ErrAppendableObjectNotFound))
   369  }
   370  
   371  func TestCreateBlock(t *testing.T) {
   372  	defer testutils.AfterTest(t)()
   373  	testutils.EnsureNoLeak(t)
   374  	ctx := context.Background()
   375  
   376  	db := testutil.InitTestDB(ctx, ModuleName, t, nil)
   377  	defer db.Close()
   378  
   379  	txn, _ := db.StartTxn(nil)
   380  	database, _ := txn.CreateDatabase("db", "", "")
   381  	schema := catalog.MockSchemaAll(13, 12)
   382  	rel, err := database.CreateRelation(schema)
   383  	assert.Nil(t, err)
   384  	_, err = rel.CreateObject(false)
   385  	assert.Nil(t, err)
   386  
   387  	t.Log(db.Catalog.SimplePPString(common.PPL1))
   388  	assert.Nil(t, txn.Commit(context.Background()))
   389  	t.Log(db.Catalog.SimplePPString(common.PPL1))
   390  }
   391  
   392  func TestNonAppendableBlock(t *testing.T) {
   393  	defer testutils.AfterTest(t)()
   394  	testutils.EnsureNoLeak(t)
   395  	ctx := context.Background()
   396  
   397  	db := testutil.InitTestDB(ctx, ModuleName, t, nil)
   398  	defer db.Close()
   399  	schema := catalog.MockSchemaAll(13, 1)
   400  	schema.BlockMaxRows = 10
   401  	schema.ObjectMaxBlocks = 2
   402  
   403  	bat := catalog.MockBatch(schema, 8)
   404  	defer bat.Close()
   405  
   406  	testutil.CreateRelation(t, db, "db", schema, true)
   407  
   408  	{
   409  		txn, _ := db.StartTxn(nil)
   410  		database, err := txn.GetDatabase("db")
   411  		assert.Nil(t, err)
   412  		rel, err := database.GetRelationByName(schema.Name)
   413  		readSchema := rel.Schema()
   414  		assert.Nil(t, err)
   415  		obj, err := rel.CreateNonAppendableObject(false, nil)
   416  		assert.Nil(t, err)
   417  		dataBlk := obj.GetMeta().(*catalog.ObjectEntry).GetObjectData()
   418  		sid := objectio.NewObjectid()
   419  		name := objectio.BuildObjectNameWithObjectID(sid)
   420  		writer, err := blockio.NewBlockWriterNew(dataBlk.GetFs().Service, name, 0, nil)
   421  		assert.Nil(t, err)
   422  		_, err = writer.WriteBatch(containers.ToCNBatch(bat))
   423  		assert.Nil(t, err)
   424  		_, _, err = writer.Sync(context.Background())
   425  		assert.Nil(t, err)
   426  		obj.UpdateStats(writer.Stats())
   427  		v, _, err := dataBlk.GetValue(context.Background(), txn, readSchema, 0, 4, 2, common.DefaultAllocator)
   428  		assert.Nil(t, err)
   429  		expectVal := bat.Vecs[2].Get(4)
   430  		assert.Equal(t, expectVal, v)
   431  
   432  		view, err := dataBlk.GetColumnDataById(context.Background(), txn, readSchema, 0, 2, common.DefaultAllocator)
   433  		assert.Nil(t, err)
   434  		defer view.Close()
   435  		assert.Nil(t, view.DeleteMask)
   436  		assert.Equal(t, bat.Vecs[2].Length(), view.Length())
   437  
   438  		pkDef := schema.GetPrimaryKey()
   439  		pkVec := containers.MakeVector(pkDef.Type, common.DefaultAllocator)
   440  		val1, _, err := dataBlk.GetValue(ctx, txn, schema, 0, 1, pkDef.Idx, common.DefaultAllocator)
   441  		assert.NoError(t, err)
   442  		pkVec.Append(val1, false)
   443  		val2, _, err := dataBlk.GetValue(ctx, txn, schema, 0, 2, pkDef.Idx, common.DefaultAllocator)
   444  		assert.NoError(t, err)
   445  		pkVec.Append(val2, false)
   446  		_, err = dataBlk.RangeDelete(txn, 0, 1, 2, pkVec, handle.DT_Normal)
   447  		assert.Nil(t, err)
   448  
   449  		view, err = dataBlk.GetColumnDataById(context.Background(), txn, readSchema, 0, 2, common.DefaultAllocator)
   450  		assert.Nil(t, err)
   451  		defer view.Close()
   452  		assert.True(t, view.DeleteMask.Contains(1))
   453  		assert.True(t, view.DeleteMask.Contains(2))
   454  		assert.Equal(t, bat.Vecs[2].Length(), view.Length())
   455  
   456  		// _, err = dataBlk.Update(txn, 3, 2, int32(999))
   457  		// assert.Nil(t, err)
   458  
   459  		view, err = dataBlk.GetColumnDataById(context.Background(), txn, readSchema, 0, 2, common.DefaultAllocator)
   460  		assert.Nil(t, err)
   461  		defer view.Close()
   462  		assert.True(t, view.DeleteMask.Contains(1))
   463  		assert.True(t, view.DeleteMask.Contains(2))
   464  		assert.Equal(t, bat.Vecs[2].Length(), view.Length())
   465  		// v = view.GetData().Get(3)
   466  		// assert.Equal(t, int32(999), v)
   467  
   468  		assert.Nil(t, txn.Commit(context.Background()))
   469  	}
   470  }
   471  
   472  func TestCreateObject(t *testing.T) {
   473  	defer testutils.AfterTest(t)()
   474  	testutils.EnsureNoLeak(t)
   475  	ctx := context.Background()
   476  
   477  	tae := testutil.InitTestDB(ctx, ModuleName, t, nil)
   478  	defer tae.Close()
   479  	schema := catalog.MockSchemaAll(1, 0)
   480  	txn, _ := tae.StartTxn(nil)
   481  	db, err := txn.CreateDatabase("db", "", "")
   482  	assert.Nil(t, err)
   483  	rel, err := db.CreateRelation(schema)
   484  	assert.Nil(t, err)
   485  	_, err = rel.CreateNonAppendableObject(false, nil)
   486  	assert.Nil(t, err)
   487  	assert.Nil(t, txn.Commit(context.Background()))
   488  
   489  	bat := catalog.MockBatch(schema, 5)
   490  	defer bat.Close()
   491  
   492  	testutil.AppendClosure(t, bat, schema.Name, tae, nil)()
   493  
   494  	objCnt := 0
   495  	processor := new(catalog.LoopProcessor)
   496  	processor.ObjectFn = func(Object *catalog.ObjectEntry) error {
   497  		objCnt++
   498  		return nil
   499  	}
   500  	err = tae.Catalog.RecurLoop(processor)
   501  	assert.Nil(t, err)
   502  	assert.Equal(t, 2+3, objCnt)
   503  	t.Log(tae.Catalog.SimplePPString(common.PPL1))
   504  }
   505  
   506  func TestAddObjsWithMetaLoc(t *testing.T) {
   507  	defer testutils.AfterTest(t)()
   508  	testutils.EnsureNoLeak(t)
   509  	ctx := context.Background()
   510  
   511  	opts := config.WithLongScanAndCKPOpts(nil)
   512  	db := testutil.InitTestDB(ctx, ModuleName, t, opts)
   513  	defer db.Close()
   514  
   515  	worker := ops.NewOpWorker(context.Background(), "xx")
   516  	worker.Start()
   517  	defer worker.Stop()
   518  	schema := catalog.MockSchemaAll(13, 2)
   519  	schema.Name = "tb-0"
   520  	schema.BlockMaxRows = 20
   521  	schema.ObjectMaxBlocks = 2
   522  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows*4))
   523  	defer bat.Close()
   524  	bats := bat.Split(4)
   525  	{
   526  		txn, _, rel := testutil.CreateRelationNoCommit(t, db, "db", schema, true)
   527  		err := rel.Append(context.Background(), bats[0])
   528  		assert.NoError(t, err)
   529  		err = rel.Append(context.Background(), bats[1])
   530  		assert.NoError(t, err)
   531  		assert.Nil(t, txn.Commit(context.Background()))
   532  	}
   533  	//compact blocks
   534  	var newBlockFp1 *common.ID
   535  	var stats1 objectio.ObjectStats
   536  	var newBlockFp2 *common.ID
   537  	var stats2 objectio.ObjectStats
   538  	var metaLoc1 objectio.Location
   539  	{
   540  		txn, rel := testutil.GetRelation(t, 0, db, "db", schema.Name)
   541  		it := rel.MakeObjectIt()
   542  		blkMeta1 := it.GetObject().GetMeta().(*catalog.ObjectEntry)
   543  		it.Next()
   544  		blkMeta2 := it.GetObject().GetMeta().(*catalog.ObjectEntry)
   545  
   546  		task1, err := jobs.NewFlushTableTailTask(tasks.WaitableCtx, txn, []*catalog.ObjectEntry{blkMeta1, blkMeta2}, db.Runtime, txn.GetStartTS())
   547  		assert.NoError(t, err)
   548  		worker.SendOp(task1)
   549  		err = task1.WaitDone(context.Background())
   550  		assert.NoError(t, err)
   551  		newBlockFp1 = task1.GetCreatedObjects().Fingerprint()
   552  		stats1 = task1.GetCreatedObjects().GetMeta().(*catalog.ObjectEntry).GetLatestNodeLocked().BaseNode.ObjectStats
   553  		metaLoc1 = task1.GetCreatedObjects().GetMeta().(*catalog.ObjectEntry).GetLocation()
   554  		metaLoc1.SetID(0)
   555  		metaLoc1.SetRows(schema.BlockMaxRows)
   556  		newBlockFp2 = task1.GetCreatedObjects().Fingerprint()
   557  		newBlockFp2.SetBlockOffset(1)
   558  		stats2 = task1.GetCreatedObjects().GetMeta().(*catalog.ObjectEntry).GetLatestNodeLocked().BaseNode.ObjectStats
   559  		assert.Nil(t, txn.Commit(context.Background()))
   560  	}
   561  	//read new non-appendable block data and check
   562  	{
   563  		txn, rel := testutil.GetRelation(t, 0, db, "db", schema.Name)
   564  		assert.True(t, newBlockFp2.ObjectID().Eq(*newBlockFp1.ObjectID()))
   565  		obj, err := rel.GetObject(newBlockFp1.ObjectID())
   566  		assert.Nil(t, err)
   567  
   568  		view1, err := obj.GetColumnDataById(context.Background(), 0, 2, common.DefaultAllocator)
   569  		assert.NoError(t, err)
   570  		defer view1.Close()
   571  		assert.True(t, view1.GetData().Equals(bats[0].Vecs[2]))
   572  
   573  		view2, err := obj.GetColumnDataById(context.Background(), 1, 2, common.DefaultAllocator)
   574  		assert.NoError(t, err)
   575  		defer view2.Close()
   576  		assert.True(t, view2.GetData().Equals(bats[1].Vecs[2]))
   577  		assert.Nil(t, txn.Commit(context.Background()))
   578  	}
   579  
   580  	{
   581  
   582  		schema = catalog.MockSchemaAll(13, 2)
   583  		schema.Name = "tb-1"
   584  		schema.BlockMaxRows = 20
   585  		schema.ObjectMaxBlocks = 2
   586  		txn, _, rel := testutil.CreateRelationNoCommit(t, db, "db", schema, false)
   587  		txn.SetDedupType(txnif.FullSkipWorkSpaceDedup)
   588  		vec1 := containers.MakeVector(types.T_varchar.ToType(), common.DefaultAllocator)
   589  		vec1.Append(stats1[:], false)
   590  		defer vec1.Close()
   591  		err := rel.AddObjsWithMetaLoc(context.Background(), vec1)
   592  		assert.Nil(t, err)
   593  		err = rel.Append(context.Background(), bats[0])
   594  		assert.Nil(t, err)
   595  
   596  		vec2 := containers.MakeVector(types.T_varchar.ToType(), common.DefaultAllocator)
   597  		vec2.Append(stats2[:], false)
   598  		defer vec1.Close()
   599  		err = rel.AddObjsWithMetaLoc(context.Background(), vec2)
   600  		assert.Nil(t, err)
   601  		err = rel.Append(context.Background(), bats[1])
   602  		assert.Nil(t, err)
   603  		//err = rel.RangeDeleteLocal(start, end)
   604  		//assert.Nil(t, err)
   605  		//assert.True(t, rel.IsLocalDeleted(start, end))
   606  		err = txn.Commit(context.Background())
   607  		assert.Nil(t, err)
   608  
   609  		//"tb-1" table now has one committed non-appendable Object which contains
   610  		//two non-appendable block, and one committed appendable Object which contains two appendable block.
   611  
   612  		//do deduplication check against sanpshot data.
   613  		txn, rel = testutil.GetRelation(t, 0, db, "db", schema.Name)
   614  		txn.SetDedupType(txnif.FullSkipWorkSpaceDedup)
   615  		err = rel.Append(context.Background(), bats[0])
   616  		assert.NotNil(t, err)
   617  		err = rel.Append(context.Background(), bats[1])
   618  		assert.NotNil(t, err)
   619  
   620  		vec3 := containers.MakeVector(types.T_varchar.ToType(), common.DefaultAllocator)
   621  		vec3.Append(stats1[:], false)
   622  		vec3.Append(stats2[:], false)
   623  		defer vec1.Close()
   624  		err = rel.AddObjsWithMetaLoc(context.Background(), vec3)
   625  		assert.NotNil(t, err)
   626  
   627  		//check blk count.
   628  		t.Log(db.Catalog.SimplePPString(3))
   629  		cntOfAblk := 0
   630  		cntOfblk := 0
   631  		testutil.ForEachObject(rel, func(blk handle.Object) (err error) {
   632  			if blk.IsAppendable() {
   633  				view, err := blk.GetColumnDataById(context.Background(), 0, 3, common.DefaultAllocator)
   634  				assert.NoError(t, err)
   635  				defer view.Close()
   636  				cntOfAblk += blk.BlkCnt()
   637  				return nil
   638  			}
   639  			metaLoc := blk.GetMeta().(*catalog.ObjectEntry).GetLocation()
   640  			metaLoc.SetID(0)
   641  			metaLoc.SetRows(schema.BlockMaxRows)
   642  			assert.True(t, !metaLoc.IsEmpty())
   643  			if bytes.Equal(metaLoc, metaLoc1) {
   644  				view, err := blk.GetColumnDataById(context.Background(), 0, 2, common.DefaultAllocator)
   645  				assert.NoError(t, err)
   646  				defer view.Close()
   647  				assert.True(t, view.GetData().Equals(bats[0].Vecs[2]))
   648  			} else {
   649  				view, err := blk.GetColumnDataById(context.Background(), 1, 3, common.DefaultAllocator)
   650  				assert.NoError(t, err)
   651  				defer view.Close()
   652  				assert.True(t, view.GetData().Equals(bats[1].Vecs[3]))
   653  
   654  			}
   655  			cntOfblk += blk.BlkCnt()
   656  			return
   657  		})
   658  		assert.Equal(t, 2, cntOfblk)
   659  		assert.Equal(t, 2, cntOfAblk)
   660  		assert.Nil(t, txn.Commit(context.Background()))
   661  
   662  		//check count of committed Objects.
   663  		cntOfAobj := 0
   664  		cntOfobj := 0
   665  		txn, rel = testutil.GetRelation(t, 0, db, "db", schema.Name)
   666  		testutil.ForEachObject(rel, func(obj handle.Object) (err error) {
   667  			if obj.IsAppendable() {
   668  				cntOfAobj++
   669  				return
   670  			}
   671  			cntOfobj++
   672  			return
   673  		})
   674  		assert.True(t, cntOfobj == 1)
   675  		assert.True(t, cntOfAobj == 2)
   676  		assert.Nil(t, txn.Commit(context.Background()))
   677  	}
   678  }
   679  
   680  func TestCompactMemAlter(t *testing.T) {
   681  	defer testutils.AfterTest(t)()
   682  	testutils.EnsureNoLeak(t)
   683  	ctx := context.Background()
   684  
   685  	opts := config.WithLongScanAndCKPOpts(nil)
   686  	db := testutil.InitTestDB(ctx, ModuleName, t, opts)
   687  	defer db.Close()
   688  
   689  	worker := ops.NewOpWorker(context.Background(), "xx")
   690  	worker.Start()
   691  	defer worker.Stop()
   692  	schema := catalog.MockSchemaAll(5, 2)
   693  	schema.BlockMaxRows = 20
   694  	schema.ObjectMaxBlocks = 2
   695  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows))
   696  	defer bat.Close()
   697  	testutil.CreateRelationAndAppend(t, 0, db, "db", schema, bat, true)
   698  
   699  	// Alter: add a column to the last
   700  	{
   701  		txn, rel := testutil.GetDefaultRelation(t, db, schema.Name)
   702  		err := rel.AlterTable(context.TODO(), api.NewAddColumnReq(0, 0, "xyz", types.NewProtoType(types.T_char), 5))
   703  		require.NoError(t, err)
   704  		require.Nil(t, txn.Commit(context.Background()))
   705  	}
   706  	var newBlockFp *common.ID
   707  	{
   708  		txn, rel := testutil.GetDefaultRelation(t, db, schema.Name)
   709  		blkMeta := testutil.GetOneBlockMeta(rel)
   710  		// ablk-0 & nablk-1
   711  		task, err := jobs.NewFlushTableTailTask(tasks.WaitableCtx, txn, []*catalog.ObjectEntry{blkMeta}, db.Runtime, txn.GetStartTS())
   712  		assert.NoError(t, err)
   713  		worker.SendOp(task)
   714  		err = task.WaitDone(ctx)
   715  		assert.NoError(t, err)
   716  		assert.NoError(t, txn.Commit(context.Background()))
   717  		newBlockFp = task.GetCreatedObjects().Fingerprint()
   718  	}
   719  	{
   720  		txn, rel := testutil.GetDefaultRelation(t, db, schema.Name)
   721  		obj, err := rel.GetObject(newBlockFp.ObjectID())
   722  		assert.Nil(t, err)
   723  		for i := 0; i <= 5; i++ {
   724  			view, err := obj.GetColumnDataById(context.Background(), 0, i, common.DefaultAllocator)
   725  			assert.NoError(t, err)
   726  			defer view.Close()
   727  			if i < 5 {
   728  				require.Equal(t, bat.Vecs[i].GetType().Oid, view.GetData().GetType().Oid)
   729  			} else {
   730  				require.Equal(t, types.T_char.ToType().Oid, view.GetData().GetType().Oid)
   731  			}
   732  			if i == 3 {
   733  				assert.True(t, view.GetData().Equals(bat.Vecs[3]))
   734  			}
   735  		}
   736  		require.NoError(t, txn.Commit(context.Background()))
   737  	}
   738  }
   739  
   740  func TestFlushTableMergeOrder(t *testing.T) {
   741  	defer testutils.AfterTest(t)()
   742  	testutils.EnsureNoLeak(t)
   743  	ctx := context.Background()
   744  
   745  	opts := config.WithLongScanAndCKPOpts(nil)
   746  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
   747  	defer tae.Close()
   748  
   749  	worker := ops.NewOpWorker(context.Background(), "xx")
   750  	worker.Start()
   751  	defer worker.Stop()
   752  
   753  	schema := catalog.NewEmptySchema("test")
   754  	schema.AppendCol("aa", types.T_int64.ToType())
   755  	schema.AppendCol("bb", types.T_int32.ToType())
   756  	schema.AppendFakePKCol()
   757  	schema.BlockMaxRows = 78
   758  	schema.ObjectMaxBlocks = 256
   759  	require.NoError(t, schema.Finalize(false))
   760  	tae.BindSchema(schema)
   761  
   762  	// new bacth for aa and bb vector, and fill aa and bb with some random values
   763  	bat := containers.NewBatch()
   764  	bat.AddVector("aa", containers.NewVector(types.T_int64.ToType()))
   765  	bat.AddVector("bb", containers.NewVector(types.T_int32.ToType()))
   766  
   767  	dedup := make(map[int32]bool)
   768  
   769  	rows := 500
   770  
   771  	for i := 0; i < rows; i++ {
   772  		bb := int32(rand.Intn(100000))
   773  		if _, ok := dedup[bb]; ok {
   774  			continue
   775  		} else {
   776  			dedup[bb] = true
   777  		}
   778  		aa := int64(20000000 + bb)
   779  		bat.Vecs[0].Append(aa, false)
   780  		bat.Vecs[1].Append(bb, false)
   781  	}
   782  
   783  	defer bat.Close()
   784  	testutil.CreateRelationAndAppend(t, 0, tae.DB, "db", schema, bat, true)
   785  
   786  	{
   787  		txn, rel := testutil.GetDefaultRelation(t, tae.DB, schema.Name)
   788  		it := rel.MakeObjectIt()
   789  		for ; it.Valid(); it.Next() {
   790  			blk := it.GetObject()
   791  			for i := 0; i < blk.BlkCnt(); i++ {
   792  				blk.RangeDelete(uint16(i), 0, 0, handle.DT_Normal, common.DefaultAllocator)
   793  				blk.RangeDelete(uint16(i), 3, 3, handle.DT_Normal, common.DefaultAllocator)
   794  
   795  			}
   796  		}
   797  		require.NoError(t, txn.Commit(context.Background()))
   798  	}
   799  
   800  	txn, rel := testutil.GetDefaultRelation(t, tae.DB, schema.Name)
   801  	blkMetas := testutil.GetAllBlockMetas(rel)
   802  	task, err := jobs.NewFlushTableTailTask(tasks.WaitableCtx, txn, blkMetas, tae.DB.Runtime, types.MaxTs())
   803  	require.NoError(t, err)
   804  	worker.SendOp(task)
   805  	err = task.WaitDone(ctx)
   806  	require.NoError(t, err)
   807  	require.NoError(t, txn.Commit(context.Background()))
   808  }
   809  
   810  func TestFlushTableMergeOrderPK(t *testing.T) {
   811  	defer testutils.AfterTest(t)()
   812  	testutils.EnsureNoLeak(t)
   813  	ctx := context.Background()
   814  
   815  	opts := config.WithLongScanAndCKPOpts(nil)
   816  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
   817  	defer tae.Close()
   818  
   819  	worker := ops.NewOpWorker(context.Background(), "xx")
   820  	worker.Start()
   821  	defer worker.Stop()
   822  
   823  	schema := catalog.NewEmptySchema("test")
   824  	schema.AppendPKCol("aa", types.T_int64.ToType(), 0)
   825  	schema.AppendCol("bb", types.T_int32.ToType())
   826  	schema.BlockMaxRows = 78
   827  	schema.ObjectMaxBlocks = 256
   828  	require.NoError(t, schema.Finalize(false))
   829  	tae.BindSchema(schema)
   830  
   831  	// new bacth for aa and bb vector, and fill aa and bb with some random values
   832  	bat := containers.NewBatch()
   833  	bat.AddVector("aa", containers.NewVector(types.T_int64.ToType()))
   834  	bat.AddVector("bb", containers.NewVector(types.T_int32.ToType()))
   835  
   836  	dedup := make(map[int32]bool)
   837  
   838  	target := 500
   839  	rows := 0
   840  
   841  	for i := 0; i < target; i++ {
   842  		bb := int32(rand.Intn(100000))
   843  		if _, ok := dedup[bb]; ok {
   844  			continue
   845  		} else {
   846  			dedup[bb] = true
   847  		}
   848  		rows++
   849  		aa := int64(20000000 + bb)
   850  		bat.Vecs[0].Append(aa, false)
   851  		bat.Vecs[1].Append(bb, false)
   852  	}
   853  
   854  	defer bat.Close()
   855  	testutil.CreateRelationAndAppend(t, 0, tae.DB, "db", schema, bat, true)
   856  
   857  	deleted := 0
   858  	{
   859  		txn, rel := testutil.GetDefaultRelation(t, tae.DB, schema.Name)
   860  		for x := range dedup {
   861  			err := rel.DeleteByFilter(context.Background(), handle.NewEQFilter(int64(x+20000000)))
   862  			require.NoError(t, err)
   863  			deleted++
   864  			if deleted > rows/2 {
   865  				break
   866  			}
   867  		}
   868  		require.NoError(t, txn.Commit(context.Background()))
   869  	}
   870  
   871  	txn, rel := testutil.GetDefaultRelation(t, tae.DB, schema.Name)
   872  	blkMetas := testutil.GetAllBlockMetas(rel)
   873  	task, err := jobs.NewFlushTableTailTask(tasks.WaitableCtx, txn, blkMetas, tae.DB.Runtime, types.MaxTs())
   874  	require.NoError(t, err)
   875  	worker.SendOp(task)
   876  	err = task.WaitDone(ctx)
   877  	require.NoError(t, err)
   878  	require.NoError(t, txn.Commit(context.Background()))
   879  
   880  	tae.Restart(ctx)
   881  	tae.CheckRowsByScan(rows-deleted, true)
   882  }
   883  
   884  func TestFlushTableNoPk(t *testing.T) {
   885  	defer testutils.AfterTest(t)()
   886  	testutils.EnsureNoLeak(t)
   887  	ctx := context.Background()
   888  
   889  	opts := config.WithLongScanAndCKPOpts(nil)
   890  	// db := initDB(ctx, t, opts)
   891  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
   892  	defer tae.Close()
   893  
   894  	worker := ops.NewOpWorker(context.Background(), "xx")
   895  	worker.Start()
   896  	defer worker.Stop()
   897  	schema := catalog.MockSchemaAll(13, -1)
   898  	schema.Name = "table"
   899  	schema.BlockMaxRows = 20
   900  	schema.ObjectMaxBlocks = 10
   901  	tae.BindSchema(schema)
   902  	bat := catalog.MockBatch(schema, 2*(int(schema.BlockMaxRows)*2+int(schema.BlockMaxRows/2)))
   903  	defer bat.Close()
   904  	testutil.CreateRelationAndAppend(t, 0, tae.DB, "db", schema, bat, true)
   905  
   906  	txn, rel := testutil.GetDefaultRelation(t, tae.DB, schema.Name)
   907  	blkMetas := testutil.GetAllBlockMetas(rel)
   908  	task, err := jobs.NewFlushTableTailTask(tasks.WaitableCtx, txn, blkMetas, tae.DB.Runtime, types.MaxTs())
   909  	require.NoError(t, err)
   910  	worker.SendOp(task)
   911  	err = task.WaitDone(ctx)
   912  	require.NoError(t, err)
   913  	require.NoError(t, txn.Commit(context.Background()))
   914  
   915  	tae.Restart(ctx)
   916  	tae.CheckRowsByScan(100, true)
   917  }
   918  
   919  func TestFlushTableErrorHandle(t *testing.T) {
   920  	ctx := context.WithValue(context.Background(), jobs.TestFlushBailoutPos1{}, "bail")
   921  
   922  	opts := config.WithLongScanAndCKPOpts(nil)
   923  	opts.Ctx = ctx
   924  
   925  	tae := testutil.NewTestEngine(context.Background(), ModuleName, t, opts)
   926  	defer tae.Close()
   927  
   928  	worker := ops.NewOpWorker(ctx, "xx")
   929  	worker.Start()
   930  	defer worker.Stop()
   931  	schema := catalog.MockSchemaAll(13, 2)
   932  	schema.Name = "table"
   933  	schema.BlockMaxRows = 20
   934  	schema.ObjectMaxBlocks = 10
   935  	bat := catalog.MockBatch(schema, (int(schema.BlockMaxRows)*2 + int(schema.BlockMaxRows/2)))
   936  
   937  	txn, _ := tae.StartTxn(nil)
   938  	txn.CreateDatabase("db", "", "")
   939  	txn.Commit(ctx)
   940  
   941  	createAndInsert := func() {
   942  		testutil.CreateRelationAndAppend(t, 0, tae.DB, "db", schema, bat, false)
   943  	}
   944  
   945  	droptable := func() {
   946  		txn, _ := tae.StartTxn(nil)
   947  		d, _ := txn.GetDatabase("db")
   948  		d.DropRelationByName(schema.Name)
   949  		txn.Commit(ctx)
   950  	}
   951  
   952  	flushTable := func() {
   953  		txn, rel := testutil.GetDefaultRelation(t, tae.DB, schema.Name)
   954  		blkMetas := testutil.GetAllBlockMetas(rel)
   955  		task, err := jobs.NewFlushTableTailTask(tasks.WaitableCtx, txn, blkMetas, tae.Runtime, types.MaxTs())
   956  		require.NoError(t, err)
   957  		worker.SendOp(task)
   958  		err = task.WaitDone(ctx)
   959  		require.Error(t, err)
   960  		require.NoError(t, txn.Commit(context.Background()))
   961  	}
   962  	for i := 0; i < 20; i++ {
   963  		createAndInsert()
   964  		flushTable()
   965  		droptable()
   966  	}
   967  }
   968  
   969  func TestFlushTableErrorHandle2(t *testing.T) {
   970  	ctx := context.WithValue(context.Background(), jobs.TestFlushBailoutPos2{}, "bail")
   971  
   972  	opts := config.WithLongScanAndCKPOpts(nil)
   973  	opts.Ctx = ctx
   974  
   975  	tae := testutil.NewTestEngine(context.Background(), ModuleName, t, opts)
   976  	defer tae.Close()
   977  
   978  	worker := ops.NewOpWorker(ctx, "xx")
   979  	worker.Start()
   980  	defer worker.Stop()
   981  	goodworker := ops.NewOpWorker(context.Background(), "goodworker")
   982  	goodworker.Start()
   983  	defer goodworker.Stop()
   984  	schema := catalog.MockSchemaAll(13, 2)
   985  	schema.Name = "table"
   986  	schema.BlockMaxRows = 20
   987  	bats := catalog.MockBatch(schema, (int(schema.BlockMaxRows)*2 + int(schema.BlockMaxRows/2))).Split(2)
   988  	bat1, bat2 := bats[0], bats[1]
   989  	defer bat1.Close()
   990  	defer bat2.Close()
   991  	flushTable := func(worker *ops.OpWorker) {
   992  		txn, rel := testutil.GetDefaultRelation(t, tae.DB, schema.Name)
   993  		blkMetas := testutil.GetAllBlockMetas(rel)
   994  		task, err := jobs.NewFlushTableTailTask(tasks.WaitableCtx, txn, blkMetas, tae.Runtime, types.MaxTs())
   995  		require.NoError(t, err)
   996  		worker.SendOp(task)
   997  		err = task.WaitDone(ctx)
   998  		if err != nil {
   999  			t.Logf("flush task outter wait %v", err)
  1000  		}
  1001  		require.NoError(t, txn.Commit(context.Background()))
  1002  	}
  1003  	testutil.CreateRelationAndAppend(t, 0, tae.DB, "db", schema, bat1, true)
  1004  	flushTable(goodworker)
  1005  
  1006  	{
  1007  		txn, rel := testutil.GetDefaultRelation(t, tae.DB, schema.Name)
  1008  		require.NoError(t, rel.DeleteByFilter(context.Background(), handle.NewEQFilter(bat1.Vecs[2].Get(1))))
  1009  		require.NoError(t, rel.Append(ctx, bat2))
  1010  		require.NoError(t, txn.Commit(context.Background()))
  1011  	}
  1012  
  1013  	flushTable(worker)
  1014  	t.Log(tae.Catalog.SimplePPString(common.PPL0))
  1015  }
  1016  
  1017  func TestFlushTabletail(t *testing.T) {
  1018  	// TODO
  1019  	defer testutils.AfterTest(t)()
  1020  	testutils.EnsureNoLeak(t)
  1021  	ctx := context.Background()
  1022  
  1023  	opts := config.WithLongScanAndCKPOpts(nil)
  1024  	// db := initDB(ctx, t, opts)
  1025  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  1026  	defer tae.Close()
  1027  
  1028  	worker := ops.NewOpWorker(context.Background(), "xx")
  1029  	worker.Start()
  1030  	defer worker.Stop()
  1031  	schema := catalog.MockSchemaAll(13, 2)
  1032  	schema.Name = "table"
  1033  	schema.BlockMaxRows = 20
  1034  	schema.ObjectMaxBlocks = 10
  1035  	bats := catalog.MockBatch(schema, 2*(int(schema.BlockMaxRows)*2+int(schema.BlockMaxRows/2))).Split(2)
  1036  	bat := bats[0]  // 50 rows
  1037  	bat2 := bats[1] // 50 rows
  1038  
  1039  	defer bat.Close()
  1040  	defer bat2.Close()
  1041  	testutil.CreateRelationAndAppend(t, 0, tae.DB, "db", schema, bat, true)
  1042  
  1043  	{
  1044  		txn, rel := testutil.GetDefaultRelation(t, tae.DB, schema.Name)
  1045  		require.NoError(t, rel.DeleteByFilter(context.Background(), handle.NewEQFilter(bat.Vecs[2].Get(1))))
  1046  		require.NoError(t, rel.DeleteByFilter(context.Background(), handle.NewEQFilter(bat.Vecs[2].Get(19)))) // ab0 has 2
  1047  		require.NoError(t, rel.DeleteByFilter(context.Background(), handle.NewEQFilter(bat.Vecs[2].Get(21)))) // ab1 has 1
  1048  		require.NoError(t, rel.DeleteByFilter(context.Background(), handle.NewEQFilter(bat.Vecs[2].Get(45)))) // ab2 has 1
  1049  
  1050  		require.NoError(t, txn.Commit(context.Background()))
  1051  	}
  1052  
  1053  	var commitDeleteAfterFlush txnif.AsyncTxn
  1054  	{
  1055  		var rel handle.Relation
  1056  		commitDeleteAfterFlush, rel = testutil.GetDefaultRelation(t, tae.DB, schema.Name)
  1057  		require.NoError(t, rel.DeleteByFilter(context.Background(), handle.NewEQFilter(bat.Vecs[2].Get(42)))) // expect to transfer to nablk1
  1058  	}
  1059  
  1060  	flushTable := func() {
  1061  		txn, rel := testutil.GetDefaultRelation(t, tae.DB, schema.Name)
  1062  		blkMetas := testutil.GetAllBlockMetas(rel)
  1063  		task, err := jobs.NewFlushTableTailTask(tasks.WaitableCtx, txn, blkMetas, tae.Runtime, types.MaxTs())
  1064  		require.NoError(t, err)
  1065  		worker.SendOp(task)
  1066  		err = task.WaitDone(ctx)
  1067  		require.NoError(t, err)
  1068  		require.NoError(t, txn.Commit(context.Background()))
  1069  	}
  1070  
  1071  	flushTable()
  1072  
  1073  	{
  1074  		require.NoError(t, commitDeleteAfterFlush.Commit(context.Background()))
  1075  		txn, rel := testutil.GetDefaultRelation(t, tae.DB, schema.Name)
  1076  		_, _, err := rel.GetByFilter(context.Background(), handle.NewEQFilter(bat.Vecs[2].Get(42)))
  1077  		require.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  1078  
  1079  		require.NoError(t, rel.Append(context.Background(), bat2))
  1080  		require.NoError(t, txn.Commit(context.Background()))
  1081  	}
  1082  	{
  1083  		txn, rel := testutil.GetDefaultRelation(t, tae.DB, schema.Name)
  1084  		require.NoError(t, rel.DeleteByFilter(context.Background(), handle.NewEQFilter(bat.Vecs[2].Get(15))))
  1085  		require.NoError(t, rel.DeleteByFilter(context.Background(), handle.NewEQFilter(bat.Vecs[2].Get(20)))) // nab0 has 2
  1086  		require.NoError(t, rel.DeleteByFilter(context.Background(), handle.NewEQFilter(bat.Vecs[2].Get(27)))) // nab1 has 2
  1087  		require.NoError(t, rel.DeleteByFilter(context.Background(), handle.NewEQFilter(bat2.Vecs[2].Get(11))))
  1088  		require.NoError(t, rel.DeleteByFilter(context.Background(), handle.NewEQFilter(bat2.Vecs[2].Get(15)))) // ab3 has 2, ab4 and ab5 has 0
  1089  		require.NoError(t, txn.Commit(context.Background()))
  1090  	}
  1091  
  1092  	flushTable()
  1093  
  1094  	{
  1095  		txn, rel := testutil.GetDefaultRelation(t, tae.DB, schema.Name)
  1096  		require.NoError(t, rel.DeleteByFilter(context.Background(), handle.NewEQFilter(bat.Vecs[2].Get(10)))) // nab0 has 2+1, nab1 has 2
  1097  		require.NoError(t, rel.DeleteByFilter(context.Background(), handle.NewEQFilter(bat2.Vecs[2].Get(44))))
  1098  		require.NoError(t, rel.DeleteByFilter(context.Background(), handle.NewEQFilter(bat2.Vecs[2].Get(45)))) // nab5 has 2
  1099  		require.NoError(t, txn.Commit(context.Background()))
  1100  	}
  1101  
  1102  	flushTable()
  1103  
  1104  	{
  1105  		txn, rel := testutil.GetDefaultRelation(t, tae.DB, schema.Name)
  1106  		it := rel.MakeObjectIt()
  1107  		// 6 nablks has 87 rows
  1108  		dels := []int{3, 2, 0, 0, 0, 2}
  1109  		total := 0
  1110  		for i := 0; it.Valid(); it.Next() {
  1111  			obj := it.GetObject()
  1112  			for j := uint16(0); j < uint16(obj.BlkCnt()); j++ {
  1113  				view, err := obj.GetColumnDataById(context.Background(), j, 2, common.DefaultAllocator)
  1114  				require.NoError(t, err)
  1115  				defer view.Close()
  1116  				viewDel := 0
  1117  				if view.DeleteMask != nil {
  1118  					viewDel = view.DeleteMask.GetCardinality()
  1119  				}
  1120  				require.Equal(t, dels[i], viewDel)
  1121  				view.ApplyDeletes()
  1122  				total += view.Length()
  1123  				i++
  1124  			}
  1125  		}
  1126  		require.Equal(t, 87, total)
  1127  		require.NoError(t, txn.Commit(context.Background()))
  1128  	}
  1129  
  1130  	t.Log(tae.Catalog.SimplePPString(common.PPL2))
  1131  
  1132  	tae.Restart(ctx)
  1133  	{
  1134  		txn, rel := testutil.GetDefaultRelation(t, tae.DB, schema.Name)
  1135  		it := rel.MakeObjectIt()
  1136  		// 6 nablks has 87 rows
  1137  		dels := []int{3, 2, 0, 0, 0, 2}
  1138  		total := 0
  1139  		idxs := make([]int, 0, len(schema.ColDefs)-1)
  1140  		for i := 0; i < len(schema.ColDefs)-1; i++ {
  1141  			idxs = append(idxs, i)
  1142  		}
  1143  		for i := 0; it.Valid(); it.Next() {
  1144  			obj := it.GetObject()
  1145  			for j := uint16(0); j < uint16(obj.BlkCnt()); j++ {
  1146  				views, err := obj.GetColumnDataByIds(context.Background(), j, idxs, common.DefaultAllocator)
  1147  				require.NoError(t, err)
  1148  				defer views.Close()
  1149  				for j, view := range views.Columns {
  1150  					require.Equal(t, schema.ColDefs[j].Type.Oid, view.GetData().GetType().Oid)
  1151  				}
  1152  
  1153  				viewDel := 0
  1154  				if views.DeleteMask != nil {
  1155  					viewDel = views.DeleteMask.GetCardinality()
  1156  				}
  1157  				require.Equal(t, dels[i], viewDel)
  1158  				views.ApplyDeletes()
  1159  				total += views.Columns[0].Length()
  1160  				i++
  1161  			}
  1162  		}
  1163  		require.Equal(t, 87, total)
  1164  		require.NoError(t, txn.Commit(context.Background()))
  1165  	}
  1166  }
  1167  
  1168  func TestRollback1(t *testing.T) {
  1169  	defer testutils.AfterTest(t)()
  1170  	testutils.EnsureNoLeak(t)
  1171  	ctx := context.Background()
  1172  
  1173  	db := testutil.InitTestDB(ctx, ModuleName, t, nil)
  1174  	defer db.Close()
  1175  	schema := catalog.MockSchema(2, 0)
  1176  
  1177  	testutil.CreateRelation(t, db, "db", schema, true)
  1178  
  1179  	objCnt := 0
  1180  	onSegFn := func(object *catalog.ObjectEntry) error {
  1181  		objCnt++
  1182  		return nil
  1183  	}
  1184  	processor := new(catalog.LoopProcessor)
  1185  	processor.ObjectFn = onSegFn
  1186  	txn, rel := testutil.GetDefaultRelation(t, db, schema.Name)
  1187  	_, err := rel.CreateObject(false)
  1188  	assert.Nil(t, err)
  1189  
  1190  	tableMeta := rel.GetMeta().(*catalog.TableEntry)
  1191  	err = tableMeta.RecurLoop(processor)
  1192  	assert.Nil(t, err)
  1193  	assert.Equal(t, objCnt, 1)
  1194  
  1195  	assert.Nil(t, txn.Rollback(context.Background()))
  1196  	objCnt = 0
  1197  	err = tableMeta.RecurLoop(processor)
  1198  	assert.Nil(t, err)
  1199  	assert.Equal(t, objCnt, 0)
  1200  
  1201  	txn, rel = testutil.GetDefaultRelation(t, db, schema.Name)
  1202  	obj, err := rel.CreateObject(false)
  1203  	assert.Nil(t, err)
  1204  	objMeta := obj.GetMeta().(*catalog.ObjectEntry)
  1205  	assert.Nil(t, txn.Commit(context.Background()))
  1206  	objCnt = 0
  1207  	err = tableMeta.RecurLoop(processor)
  1208  	assert.Nil(t, err)
  1209  	assert.Equal(t, objCnt, 1)
  1210  
  1211  	txn, rel = testutil.GetDefaultRelation(t, db, schema.Name)
  1212  	_, err = rel.GetObject(&objMeta.ID)
  1213  	assert.Nil(t, err)
  1214  	err = tableMeta.RecurLoop(processor)
  1215  	assert.Nil(t, err)
  1216  
  1217  	err = txn.Rollback(context.Background())
  1218  	assert.Nil(t, err)
  1219  	err = tableMeta.RecurLoop(processor)
  1220  	assert.Nil(t, err)
  1221  
  1222  	t.Log(db.Catalog.SimplePPString(common.PPL1))
  1223  }
  1224  
  1225  func TestMVCC1(t *testing.T) {
  1226  	defer testutils.AfterTest(t)()
  1227  	testutils.EnsureNoLeak(t)
  1228  	ctx := context.Background()
  1229  
  1230  	db := testutil.InitTestDB(ctx, ModuleName, t, nil)
  1231  	defer db.Close()
  1232  	schema := catalog.MockSchemaAll(13, 2)
  1233  	schema.BlockMaxRows = 40
  1234  	schema.ObjectMaxBlocks = 2
  1235  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows*10))
  1236  	defer bat.Close()
  1237  	bats := bat.Split(40)
  1238  
  1239  	txn, _, rel := testutil.CreateRelationNoCommit(t, db, "db", schema, true)
  1240  	err := rel.Append(context.Background(), bats[0])
  1241  	assert.NoError(t, err)
  1242  
  1243  	row := 5
  1244  	expectVal := bats[0].Vecs[schema.GetSingleSortKeyIdx()].Get(row)
  1245  	filter := handle.NewEQFilter(expectVal)
  1246  	actualVal, _, err := rel.GetValueByFilter(context.Background(), filter, schema.GetSingleSortKeyIdx())
  1247  	assert.NoError(t, err)
  1248  	assert.Equal(t, expectVal, actualVal)
  1249  	assert.NoError(t, txn.Commit(context.Background()))
  1250  
  1251  	_, rel = testutil.GetDefaultRelation(t, db, schema.Name)
  1252  	actualVal, _, err = rel.GetValueByFilter(context.Background(), filter, schema.GetSingleSortKeyIdx())
  1253  	assert.NoError(t, err)
  1254  	assert.Equal(t, expectVal, actualVal)
  1255  
  1256  	txn2, rel2 := testutil.GetDefaultRelation(t, db, schema.Name)
  1257  	err = rel2.Append(context.Background(), bats[1])
  1258  	assert.NoError(t, err)
  1259  
  1260  	val2 := bats[1].Vecs[schema.GetSingleSortKeyIdx()].Get(row)
  1261  	filter.Val = val2
  1262  	actualVal, _, err = rel2.GetValueByFilter(context.Background(), filter, schema.GetSingleSortKeyIdx())
  1263  	assert.NoError(t, err)
  1264  	assert.Equal(t, val2, actualVal)
  1265  
  1266  	assert.NoError(t, txn2.Commit(context.Background()))
  1267  
  1268  	_, _, err = rel.GetByFilter(context.Background(), filter)
  1269  	assert.Error(t, err)
  1270  	var id *common.ID
  1271  
  1272  	{
  1273  		txn, rel := testutil.GetDefaultRelation(t, db, schema.Name)
  1274  		id, _, err = rel.GetByFilter(context.Background(), filter)
  1275  		assert.NoError(t, err)
  1276  		assert.NoError(t, txn.Commit(context.Background()))
  1277  	}
  1278  
  1279  	it := rel.MakeObjectIt()
  1280  	for it.Valid() {
  1281  		block := it.GetObject()
  1282  		bid := block.Fingerprint()
  1283  		_, targetBlkOffset := id.BlockID.Offsets()
  1284  		if bid.ObjectID() == id.ObjectID() {
  1285  			view, err := block.GetColumnDataById(context.Background(), targetBlkOffset, schema.GetSingleSortKeyIdx(), common.DefaultAllocator)
  1286  			assert.Nil(t, err)
  1287  			defer view.Close()
  1288  			assert.Nil(t, view.DeleteMask)
  1289  			assert.NotNil(t, view.GetData())
  1290  			t.Log(view.GetData().String())
  1291  			assert.Equal(t, bats[0].Vecs[0].Length(), view.Length())
  1292  		}
  1293  		it.Next()
  1294  	}
  1295  }
  1296  
  1297  // 1. Txn1 create db, relation and append 10 rows. committed -- PASS
  1298  // 2. Txn2 append 10 rows. Get the 5th append row value -- PASS
  1299  // 3. Txn2 delete the 5th row value in uncommitted state -- PASS
  1300  // 4. Txn2 get the 5th row value -- NotFound
  1301  func TestMVCC2(t *testing.T) {
  1302  	defer testutils.AfterTest(t)()
  1303  	testutils.EnsureNoLeak(t)
  1304  	ctx := context.Background()
  1305  
  1306  	db := testutil.InitTestDB(ctx, ModuleName, t, nil)
  1307  	defer db.Close()
  1308  	schema := catalog.MockSchemaAll(13, 2)
  1309  	schema.BlockMaxRows = 100
  1310  	schema.ObjectMaxBlocks = 2
  1311  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows))
  1312  	defer bat.Close()
  1313  	bats := bat.Split(10)
  1314  	{
  1315  		txn, _, rel := testutil.CreateRelationNoCommit(t, db, "db", schema, true)
  1316  		err := rel.Append(context.Background(), bats[0])
  1317  		assert.NoError(t, err)
  1318  		val := bats[0].Vecs[schema.GetSingleSortKeyIdx()].Get(5)
  1319  		filter := handle.NewEQFilter(val)
  1320  		_, _, err = rel.GetByFilter(context.Background(), filter)
  1321  		assert.NoError(t, err)
  1322  		assert.NoError(t, txn.Commit(context.Background()))
  1323  	}
  1324  	{
  1325  		txn, rel := testutil.GetDefaultRelation(t, db, schema.Name)
  1326  		err := rel.Append(context.Background(), bats[1])
  1327  		assert.NoError(t, err)
  1328  		val := bats[1].Vecs[schema.GetSingleSortKeyIdx()].Get(5)
  1329  		filter := handle.NewEQFilter(val)
  1330  		err = rel.DeleteByFilter(context.Background(), filter)
  1331  		assert.NoError(t, err)
  1332  
  1333  		_, _, err = rel.GetByFilter(context.Background(), filter)
  1334  		assert.Error(t, err)
  1335  		t.Log(err)
  1336  		assert.NoError(t, txn.Commit(context.Background()))
  1337  	}
  1338  	{
  1339  		txn, rel := testutil.GetDefaultRelation(t, db, schema.Name)
  1340  		it := rel.MakeObjectIt()
  1341  		for it.Valid() {
  1342  			obj := it.GetObject()
  1343  			view, err := obj.GetColumnDataByName(context.Background(), 0, schema.GetSingleSortKey().Name, common.DefaultAllocator)
  1344  			assert.Nil(t, err)
  1345  			assert.Nil(t, view.DeleteMask)
  1346  			assert.Equal(t, bats[1].Vecs[0].Length()*2-1, view.Length())
  1347  			// TODO: exclude deleted rows when apply appends
  1348  			it.Next()
  1349  			view.Close()
  1350  		}
  1351  		assert.NoError(t, txn.Commit(context.Background()))
  1352  	}
  1353  }
  1354  
  1355  func TestUnload1(t *testing.T) {
  1356  	defer testutils.AfterTest(t)()
  1357  	testutils.EnsureNoLeak(t)
  1358  	ctx := context.Background()
  1359  
  1360  	opts := new(options.Options)
  1361  	db := testutil.InitTestDB(ctx, ModuleName, t, opts)
  1362  	defer db.Close()
  1363  
  1364  	schema := catalog.MockSchemaAll(13, 2)
  1365  	schema.BlockMaxRows = 10
  1366  	schema.ObjectMaxBlocks = 2
  1367  
  1368  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows*2))
  1369  	defer bat.Close()
  1370  	bats := bat.Split(int(schema.BlockMaxRows))
  1371  	testutil.CreateRelation(t, db, "db", schema, true)
  1372  	var wg sync.WaitGroup
  1373  	pool, err := ants.NewPool(1)
  1374  	assert.Nil(t, err)
  1375  	defer pool.Release()
  1376  	for _, data := range bats {
  1377  		wg.Add(1)
  1378  		err := pool.Submit(testutil.AppendClosure(t, data, schema.Name, db, &wg))
  1379  		assert.Nil(t, err)
  1380  	}
  1381  	wg.Wait()
  1382  	{
  1383  		txn, rel := testutil.GetDefaultRelation(t, db, schema.Name)
  1384  		for i := 0; i < 10; i++ {
  1385  			it := rel.MakeObjectIt()
  1386  			for it.Valid() {
  1387  				blk := it.GetObject()
  1388  				for j := 0; j < blk.BlkCnt(); j++ {
  1389  					view, err := blk.GetColumnDataByName(context.Background(), uint16(j), schema.GetSingleSortKey().Name, common.DefaultAllocator)
  1390  					assert.Nil(t, err)
  1391  					defer view.Close()
  1392  					assert.Equal(t, int(schema.BlockMaxRows), view.Length())
  1393  				}
  1394  				it.Next()
  1395  			}
  1396  		}
  1397  		_ = txn.Commit(context.Background())
  1398  	}
  1399  }
  1400  
  1401  func TestUnload2(t *testing.T) {
  1402  	defer testutils.AfterTest(t)()
  1403  	testutils.EnsureNoLeak(t)
  1404  	ctx := context.Background()
  1405  
  1406  	opts := new(options.Options)
  1407  	db := testutil.InitTestDB(ctx, ModuleName, t, opts)
  1408  	defer db.Close()
  1409  
  1410  	schema1 := catalog.MockSchemaAll(13, 2)
  1411  	schema1.BlockMaxRows = 10
  1412  	schema1.ObjectMaxBlocks = 2
  1413  
  1414  	schema2 := catalog.MockSchemaAll(13, 2)
  1415  	schema2.BlockMaxRows = 10
  1416  	schema2.ObjectMaxBlocks = 2
  1417  	{
  1418  		txn, _ := db.StartTxn(nil)
  1419  		database, err := txn.CreateDatabase("db", "", "")
  1420  		assert.Nil(t, err)
  1421  		_, err = database.CreateRelation(schema1)
  1422  		assert.Nil(t, err)
  1423  		_, err = database.CreateRelation(schema2)
  1424  		assert.Nil(t, err)
  1425  		assert.Nil(t, txn.Commit(context.Background()))
  1426  	}
  1427  
  1428  	bat := catalog.MockBatch(schema1, int(schema1.BlockMaxRows*5+5))
  1429  	defer bat.Close()
  1430  	bats := bat.Split(bat.Length())
  1431  
  1432  	p, err := ants.NewPool(10)
  1433  	assert.Nil(t, err)
  1434  	defer p.Release()
  1435  	var wg sync.WaitGroup
  1436  	for i, data := range bats {
  1437  		wg.Add(1)
  1438  		name := schema1.Name
  1439  		if i%2 == 1 {
  1440  			name = schema2.Name
  1441  		}
  1442  		err := p.Submit(testutil.AppendClosure(t, data, name, db, &wg))
  1443  		assert.Nil(t, err)
  1444  	}
  1445  	wg.Wait()
  1446  
  1447  	{
  1448  		txn, rel := testutil.GetDefaultRelation(t, db, schema1.Name)
  1449  		for i := 0; i < len(bats); i += 2 {
  1450  			data := bats[i]
  1451  			v := data.Vecs[schema1.GetSingleSortKeyIdx()].Get(0)
  1452  			filter := handle.NewEQFilter(v)
  1453  			_, _, err := rel.GetByFilter(context.Background(), filter)
  1454  			assert.NoError(t, err)
  1455  		}
  1456  		database, _ := txn.GetDatabase("db")
  1457  		rel, err = database.GetRelationByName(schema2.Name)
  1458  		assert.Nil(t, err)
  1459  		for i := 1; i < len(bats); i += 2 {
  1460  			data := bats[i]
  1461  			v := data.Vecs[schema1.GetSingleSortKeyIdx()].Get(0)
  1462  			filter := handle.NewEQFilter(v)
  1463  			_, _, err := rel.GetByFilter(context.Background(), filter)
  1464  			assert.NoError(t, err)
  1465  		}
  1466  		_ = txn.Commit(context.Background())
  1467  	}
  1468  }
  1469  
  1470  func TestDelete1(t *testing.T) {
  1471  	defer testutils.AfterTest(t)()
  1472  	testutils.EnsureNoLeak(t)
  1473  	ctx := context.Background()
  1474  
  1475  	tae := testutil.InitTestDB(ctx, ModuleName, t, nil)
  1476  	defer tae.Close()
  1477  
  1478  	schema := catalog.MockSchemaAll(3, 2)
  1479  	schema.BlockMaxRows = 10
  1480  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows))
  1481  	defer bat.Close()
  1482  	testutil.CreateRelationAndAppend(t, 0, tae, "db", schema, bat, true)
  1483  	var id *common.ID
  1484  	var row uint32
  1485  	{
  1486  		txn, rel := testutil.GetDefaultRelation(t, tae, schema.Name)
  1487  		pkCol := bat.Vecs[schema.GetSingleSortKeyIdx()]
  1488  		pkVal := pkCol.Get(5)
  1489  		filter := handle.NewEQFilter(pkVal)
  1490  		var err error
  1491  		id, row, err = rel.GetByFilter(context.Background(), filter)
  1492  		assert.NoError(t, err)
  1493  		err = rel.RangeDelete(id, row, row, handle.DT_Normal)
  1494  		assert.NoError(t, err)
  1495  		assert.NoError(t, txn.Commit(context.Background()))
  1496  	}
  1497  	{
  1498  		txn, rel := testutil.GetDefaultRelation(t, tae, schema.Name)
  1499  		pkCol := bat.Vecs[schema.GetSingleSortKeyIdx()]
  1500  		pkVal := pkCol.Get(5)
  1501  		filter := handle.NewEQFilter(pkVal)
  1502  		_, _, err := rel.GetByFilter(context.Background(), filter)
  1503  		assert.Error(t, err)
  1504  		assert.NoError(t, txn.Commit(context.Background()))
  1505  	}
  1506  	{
  1507  		txn, rel := testutil.GetDefaultRelation(t, tae, schema.Name)
  1508  		blkMeta := testutil.GetOneBlockMeta(rel)
  1509  		task, err := jobs.NewFlushTableTailTask(nil, txn, []*catalog.ObjectEntry{blkMeta}, tae.Runtime, txn.GetStartTS())
  1510  		assert.NoError(t, err)
  1511  		err = task.OnExec(context.Background())
  1512  		assert.NoError(t, err)
  1513  		assert.NoError(t, txn.Commit(context.Background()))
  1514  	}
  1515  	{
  1516  		txn, rel := testutil.GetDefaultRelation(t, tae, schema.Name)
  1517  		blk := testutil.GetOneObject(rel)
  1518  		view, err := blk.GetColumnDataById(context.Background(), 0, schema.GetSingleSortKeyIdx(), common.DefaultAllocator)
  1519  		assert.NoError(t, err)
  1520  		defer view.Close()
  1521  		assert.Nil(t, view.DeleteMask)
  1522  		assert.Equal(t, bat.Vecs[0].Length()-1, view.Length())
  1523  
  1524  		err = blk.RangeDelete(0, 0, 0, handle.DT_Normal, common.DefaultAllocator)
  1525  		assert.NoError(t, err)
  1526  		view, err = blk.GetColumnDataById(context.Background(), 0, schema.GetSingleSortKeyIdx(), common.DefaultAllocator)
  1527  		assert.NoError(t, err)
  1528  		defer view.Close()
  1529  		assert.True(t, view.DeleteMask.Contains(0))
  1530  		v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(0)
  1531  		filter := handle.NewEQFilter(v)
  1532  		_, _, err = rel.GetByFilter(context.Background(), filter)
  1533  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  1534  		assert.NoError(t, txn.Commit(context.Background()))
  1535  	}
  1536  	{
  1537  		txn, rel := testutil.GetDefaultRelation(t, tae, schema.Name)
  1538  		blk := testutil.GetOneObject(rel)
  1539  		view, err := blk.GetColumnDataById(context.Background(), 0, schema.GetSingleSortKeyIdx(), common.DefaultAllocator)
  1540  		assert.NoError(t, err)
  1541  		defer view.Close()
  1542  		assert.True(t, view.DeleteMask.Contains(0))
  1543  		assert.Equal(t, bat.Vecs[0].Length()-1, view.Length())
  1544  		v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(0)
  1545  		filter := handle.NewEQFilter(v)
  1546  		_, _, err = rel.GetByFilter(context.Background(), filter)
  1547  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  1548  		_ = txn.Rollback(context.Background())
  1549  	}
  1550  	t.Log(tae.Catalog.SimplePPString(common.PPL1))
  1551  }
  1552  
  1553  func TestLogIndex1(t *testing.T) {
  1554  	defer testutils.AfterTest(t)()
  1555  	testutils.EnsureNoLeak(t)
  1556  	ctx := context.Background()
  1557  
  1558  	tae := testutil.InitTestDB(ctx, ModuleName, t, nil)
  1559  	defer tae.Close()
  1560  	schema := catalog.MockSchemaAll(13, 0)
  1561  	schema.BlockMaxRows = 10
  1562  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows))
  1563  	defer bat.Close()
  1564  	bats := bat.Split(int(schema.BlockMaxRows))
  1565  	testutil.CreateRelation(t, tae, "db", schema, true)
  1566  	txns := make([]txnif.AsyncTxn, 0)
  1567  	doAppend := func(data *containers.Batch) func() {
  1568  		return func() {
  1569  			txn, rel := testutil.GetDefaultRelation(t, tae, schema.Name)
  1570  			err := rel.Append(context.Background(), data)
  1571  			assert.NoError(t, err)
  1572  			assert.NoError(t, txn.Commit(context.Background()))
  1573  			txns = append(txns, txn)
  1574  		}
  1575  	}
  1576  	for _, data := range bats {
  1577  		doAppend(data)()
  1578  	}
  1579  	var id *common.ID
  1580  	var offset uint32
  1581  	var err error
  1582  	{
  1583  		txn, rel := testutil.GetDefaultRelation(t, tae, schema.Name)
  1584  		v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(3)
  1585  		filter := handle.NewEQFilter(v)
  1586  		id, offset, err = rel.GetByFilter(context.Background(), filter)
  1587  		assert.Nil(t, err)
  1588  		err = rel.RangeDelete(id, offset, offset, handle.DT_Normal)
  1589  		assert.Nil(t, err)
  1590  		assert.Nil(t, txn.Commit(context.Background()))
  1591  	}
  1592  	{
  1593  		txn, rel := testutil.GetDefaultRelation(t, tae, schema.Name)
  1594  		blk := testutil.GetOneObject(rel)
  1595  		meta := blk.GetMeta().(*catalog.ObjectEntry)
  1596  
  1597  		view, err := blk.GetColumnDataById(context.Background(), 0, schema.GetSingleSortKeyIdx(), common.DefaultAllocator)
  1598  		assert.Nil(t, err)
  1599  		defer view.Close()
  1600  		assert.True(t, view.DeleteMask.Contains(uint64(offset)))
  1601  		task, err := jobs.NewFlushTableTailTask(nil, txn, []*catalog.ObjectEntry{meta}, tae.Runtime, txn.GetStartTS())
  1602  		assert.Nil(t, err)
  1603  		err = task.OnExec(context.Background())
  1604  		assert.Nil(t, err)
  1605  		assert.Nil(t, txn.Commit(context.Background()))
  1606  	}
  1607  }
  1608  
  1609  func TestCrossDBTxn(t *testing.T) {
  1610  	defer testutils.AfterTest(t)()
  1611  	testutils.EnsureNoLeak(t)
  1612  	ctx := context.Background()
  1613  
  1614  	tae := testutil.InitTestDB(ctx, ModuleName, t, nil)
  1615  	defer tae.Close()
  1616  
  1617  	txn, _ := tae.StartTxn(nil)
  1618  	db1, err := txn.CreateDatabase("db1", "", "")
  1619  	assert.Nil(t, err)
  1620  	db2, err := txn.CreateDatabase("db2", "", "")
  1621  	assert.Nil(t, err)
  1622  	assert.NotNil(t, db1)
  1623  	assert.NotNil(t, db2)
  1624  	assert.Nil(t, txn.Commit(context.Background()))
  1625  
  1626  	schema1 := catalog.MockSchema(2, 0)
  1627  	schema1.BlockMaxRows = 10
  1628  	schema1.ObjectMaxBlocks = 2
  1629  	schema2 := catalog.MockSchema(4, 0)
  1630  	schema2.BlockMaxRows = 10
  1631  	schema2.ObjectMaxBlocks = 2
  1632  
  1633  	rows1 := schema1.BlockMaxRows * 5 / 2
  1634  	rows2 := schema1.BlockMaxRows * 3 / 2
  1635  	bat1 := catalog.MockBatch(schema1, int(rows1))
  1636  	bat2 := catalog.MockBatch(schema2, int(rows2))
  1637  	defer bat1.Close()
  1638  	defer bat2.Close()
  1639  
  1640  	txn, _ = tae.StartTxn(nil)
  1641  	db1, err = txn.GetDatabase("db1")
  1642  	assert.Nil(t, err)
  1643  	db2, err = txn.GetDatabase("db2")
  1644  	assert.Nil(t, err)
  1645  	rel1, err := db1.CreateRelation(schema1)
  1646  	assert.Nil(t, err)
  1647  	rel2, err := db2.CreateRelation(schema2)
  1648  	assert.Nil(t, err)
  1649  	err = rel1.Append(context.Background(), bat1)
  1650  	assert.Nil(t, err)
  1651  	err = rel2.Append(context.Background(), bat2)
  1652  	assert.Nil(t, err)
  1653  
  1654  	assert.Nil(t, txn.Commit(context.Background()))
  1655  
  1656  	txn, _ = tae.StartTxn(nil)
  1657  	db1, err = txn.GetDatabase("db1")
  1658  	assert.NoError(t, err)
  1659  	db2, err = txn.GetDatabase("db2")
  1660  	assert.NoError(t, err)
  1661  	rel1, err = db1.GetRelationByName(schema1.Name)
  1662  	assert.NoError(t, err)
  1663  	rel2, err = db2.GetRelationByName(schema2.Name)
  1664  	assert.NoError(t, err)
  1665  
  1666  	testutil.CheckAllColRowsByScan(t, rel1, int(rows1), false)
  1667  	testutil.CheckAllColRowsByScan(t, rel2, int(rows2), false)
  1668  
  1669  	t.Log(tae.Catalog.SimplePPString(common.PPL1))
  1670  }
  1671  
  1672  func TestSystemDB1(t *testing.T) {
  1673  	defer testutils.AfterTest(t)()
  1674  	testutils.EnsureNoLeak(t)
  1675  	ctx := context.Background()
  1676  
  1677  	tae := testutil.InitTestDB(ctx, ModuleName, t, nil)
  1678  	defer tae.Close()
  1679  	schema := catalog.MockSchema(2, 0)
  1680  	txn, _ := tae.StartTxn(nil)
  1681  	_, err := txn.CreateDatabase(pkgcatalog.MO_CATALOG, "", "")
  1682  	assert.NotNil(t, err)
  1683  	_, err = txn.DropDatabase(pkgcatalog.MO_CATALOG)
  1684  	assert.NotNil(t, err)
  1685  
  1686  	db1, err := txn.CreateDatabase("db1", "", "")
  1687  	assert.Nil(t, err)
  1688  	_, err = db1.CreateRelation(schema)
  1689  	assert.Nil(t, err)
  1690  
  1691  	_, err = txn.CreateDatabase("db2", "", "")
  1692  	assert.Nil(t, err)
  1693  
  1694  	db, _ := txn.GetDatabase(pkgcatalog.MO_CATALOG)
  1695  	table, err := db.GetRelationByName(pkgcatalog.MO_DATABASE)
  1696  	assert.Nil(t, err)
  1697  	it := table.MakeObjectIt()
  1698  	for it.Valid() {
  1699  		blk := it.GetObject()
  1700  		view, err := blk.GetColumnDataByName(context.Background(), 0, pkgcatalog.SystemDBAttr_Name, common.DefaultAllocator)
  1701  		assert.Nil(t, err)
  1702  		defer view.Close()
  1703  		assert.Equal(t, 3, view.Length())
  1704  		view, err = blk.GetColumnDataByName(context.Background(), 0, pkgcatalog.SystemDBAttr_CatalogName, common.DefaultAllocator)
  1705  		assert.Nil(t, err)
  1706  		defer view.Close()
  1707  		assert.Equal(t, 3, view.Length())
  1708  		view, err = blk.GetColumnDataByName(context.Background(), 0, pkgcatalog.SystemDBAttr_CreateSQL, common.DefaultAllocator)
  1709  		assert.Nil(t, err)
  1710  		defer view.Close()
  1711  		assert.Equal(t, 3, view.Length())
  1712  		it.Next()
  1713  	}
  1714  
  1715  	table, err = db.GetRelationByName(pkgcatalog.MO_TABLES)
  1716  	assert.Nil(t, err)
  1717  	it = table.MakeObjectIt()
  1718  	for it.Valid() {
  1719  		blk := it.GetObject()
  1720  		view, err := blk.GetColumnDataByName(context.Background(), 0, pkgcatalog.SystemRelAttr_Name, common.DefaultAllocator)
  1721  		assert.Nil(t, err)
  1722  		defer view.Close()
  1723  		assert.Equal(t, 4, view.Length())
  1724  		view, err = blk.GetColumnDataByName(context.Background(), 0, pkgcatalog.SystemRelAttr_Persistence, common.DefaultAllocator)
  1725  		assert.NoError(t, err)
  1726  		defer view.Close()
  1727  		view, err = blk.GetColumnDataByName(context.Background(), 0, pkgcatalog.SystemRelAttr_Kind, common.DefaultAllocator)
  1728  		assert.NoError(t, err)
  1729  		defer view.Close()
  1730  		it.Next()
  1731  	}
  1732  
  1733  	table, err = db.GetRelationByName(pkgcatalog.MO_COLUMNS)
  1734  	assert.Nil(t, err)
  1735  
  1736  	bat := containers.NewBatch()
  1737  	defer bat.Close()
  1738  	// schema2 := table.GetMeta().(*catalog.TableEntry).GetSchema()
  1739  	// bat := containers.BuildBatch(schema2.AllNames(), schema2.AllTypes(), schema2.AllNullables(), 0)
  1740  	it = table.MakeObjectIt()
  1741  	for it.Valid() {
  1742  		blk := it.GetObject()
  1743  		view, err := blk.GetColumnDataByName(context.Background(), 0, pkgcatalog.SystemColAttr_DBName, common.DefaultAllocator)
  1744  		assert.NoError(t, err)
  1745  		defer view.Close()
  1746  		bat.AddVector(pkgcatalog.SystemColAttr_DBName, view.Orphan())
  1747  
  1748  		view, err = blk.GetColumnDataByName(context.Background(), 0, pkgcatalog.SystemColAttr_RelName, common.DefaultAllocator)
  1749  		assert.Nil(t, err)
  1750  		defer view.Close()
  1751  		bat.AddVector(pkgcatalog.SystemColAttr_RelName, view.Orphan())
  1752  
  1753  		view, err = blk.GetColumnDataByName(context.Background(), 0, pkgcatalog.SystemColAttr_Name, common.DefaultAllocator)
  1754  		assert.Nil(t, err)
  1755  		defer view.Close()
  1756  		bat.AddVector(pkgcatalog.SystemColAttr_Name, view.Orphan())
  1757  
  1758  		view, err = blk.GetColumnDataByName(context.Background(), 0, pkgcatalog.SystemColAttr_ConstraintType, common.DefaultAllocator)
  1759  		assert.Nil(t, err)
  1760  		defer view.Close()
  1761  		t.Log(view.GetData().String())
  1762  		bat.AddVector(pkgcatalog.SystemColAttr_ConstraintType, view.Orphan())
  1763  
  1764  		view, err = blk.GetColumnDataByName(context.Background(), 0, pkgcatalog.SystemColAttr_Type, common.DefaultAllocator)
  1765  		assert.Nil(t, err)
  1766  		defer view.Close()
  1767  		t.Log(view.GetData().String())
  1768  		view, err = blk.GetColumnDataByName(context.Background(), 0, pkgcatalog.SystemColAttr_Num, common.DefaultAllocator)
  1769  		assert.Nil(t, err)
  1770  		defer view.Close()
  1771  		t.Log(view.GetData().String())
  1772  		it.Next()
  1773  	}
  1774  
  1775  	for i := 0; i < bat.Vecs[0].Length(); i++ {
  1776  		dbName := string(bat.Vecs[0].Get(i).([]byte))
  1777  		relName := string(bat.Vecs[1].Get(i).([]byte))
  1778  		attrName := string(bat.Vecs[2].Get(i).([]byte))
  1779  		ct := string(bat.Vecs[3].Get(i).([]byte))
  1780  		if dbName == pkgcatalog.MO_CATALOG {
  1781  			if relName == pkgcatalog.MO_DATABASE {
  1782  				if attrName == pkgcatalog.SystemDBAttr_ID {
  1783  					assert.Equal(t, pkgcatalog.SystemColPKConstraint, ct)
  1784  				} else {
  1785  					assert.Equal(t, pkgcatalog.SystemColNoConstraint, ct)
  1786  				}
  1787  			} else if relName == pkgcatalog.MO_TABLES {
  1788  				if attrName == pkgcatalog.SystemRelAttr_ID {
  1789  					assert.Equal(t, pkgcatalog.SystemColPKConstraint, ct)
  1790  				} else {
  1791  					assert.Equal(t, pkgcatalog.SystemColNoConstraint, ct)
  1792  				}
  1793  			} else if relName == pkgcatalog.MO_COLUMNS {
  1794  				if attrName == pkgcatalog.SystemColAttr_UniqName {
  1795  					assert.Equal(t, pkgcatalog.SystemColPKConstraint, ct)
  1796  				} else {
  1797  					assert.Equal(t, pkgcatalog.SystemColNoConstraint, ct)
  1798  				}
  1799  			}
  1800  		}
  1801  	}
  1802  
  1803  	err = txn.Rollback(context.Background())
  1804  	assert.Nil(t, err)
  1805  	t.Log(tae.Catalog.SimplePPString(common.PPL1))
  1806  }
  1807  
  1808  func TestSystemDB2(t *testing.T) {
  1809  	defer testutils.AfterTest(t)()
  1810  	testutils.EnsureNoLeak(t)
  1811  	ctx := context.Background()
  1812  
  1813  	tae := testutil.InitTestDB(ctx, ModuleName, t, nil)
  1814  	defer tae.Close()
  1815  
  1816  	txn, _ := tae.StartTxn(nil)
  1817  	sysDB, err := txn.GetDatabase(pkgcatalog.MO_CATALOG)
  1818  	assert.NoError(t, err)
  1819  	_, err = sysDB.DropRelationByName(pkgcatalog.MO_DATABASE)
  1820  	assert.Error(t, err)
  1821  	_, err = sysDB.DropRelationByName(pkgcatalog.MO_TABLES)
  1822  	assert.Error(t, err)
  1823  	_, err = sysDB.DropRelationByName(pkgcatalog.MO_COLUMNS)
  1824  	assert.Error(t, err)
  1825  
  1826  	schema := catalog.MockSchema(2, 0)
  1827  	schema.BlockMaxRows = 100
  1828  	schema.ObjectMaxBlocks = 2
  1829  	bat := catalog.MockBatch(schema, 1000)
  1830  	defer bat.Close()
  1831  
  1832  	rel, err := sysDB.CreateRelation(schema)
  1833  	assert.NoError(t, err)
  1834  	assert.NotNil(t, rel)
  1835  	err = rel.Append(context.Background(), bat)
  1836  	assert.Nil(t, err)
  1837  	assert.NoError(t, txn.Commit(context.Background()))
  1838  
  1839  	txn, _ = tae.StartTxn(nil)
  1840  	sysDB, err = txn.GetDatabase(pkgcatalog.MO_CATALOG)
  1841  	assert.NoError(t, err)
  1842  	rel, err = sysDB.GetRelationByName(schema.Name)
  1843  	assert.NoError(t, err)
  1844  	testutil.CheckAllColRowsByScan(t, rel, 1000, false)
  1845  	assert.NoError(t, txn.Commit(context.Background()))
  1846  }
  1847  
  1848  func TestSystemDB3(t *testing.T) {
  1849  	defer testutils.AfterTest(t)()
  1850  	testutils.EnsureNoLeak(t)
  1851  	ctx := context.Background()
  1852  	tae := testutil.InitTestDB(ctx, ModuleName, t, nil)
  1853  	defer tae.Close()
  1854  	txn, _ := tae.StartTxn(nil)
  1855  	schema := catalog.MockSchemaAll(13, 12)
  1856  	schema.BlockMaxRows = 100
  1857  	schema.ObjectMaxBlocks = 2
  1858  	bat := catalog.MockBatch(schema, 20)
  1859  	defer bat.Close()
  1860  	db, err := txn.GetDatabase(pkgcatalog.MO_CATALOG)
  1861  	assert.NoError(t, err)
  1862  	rel, err := db.CreateRelation(schema)
  1863  	assert.NoError(t, err)
  1864  	err = rel.Append(context.Background(), bat)
  1865  	assert.NoError(t, err)
  1866  	assert.NoError(t, txn.Commit(context.Background()))
  1867  }
  1868  
  1869  func TestScan1(t *testing.T) {
  1870  	defer testutils.AfterTest(t)()
  1871  	testutils.EnsureNoLeak(t)
  1872  	ctx := context.Background()
  1873  	tae := testutil.InitTestDB(ctx, ModuleName, t, nil)
  1874  	defer tae.Close()
  1875  
  1876  	schema := catalog.MockSchemaAll(13, 2)
  1877  	schema.BlockMaxRows = 100
  1878  	schema.ObjectMaxBlocks = 2
  1879  
  1880  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows-1))
  1881  	defer bat.Close()
  1882  	txn, _, rel := testutil.CreateRelationNoCommit(t, tae, testutil.DefaultTestDB, schema, true)
  1883  	err := rel.Append(context.Background(), bat)
  1884  	assert.NoError(t, err)
  1885  	testutil.CheckAllColRowsByScan(t, rel, bat.Length(), false)
  1886  	assert.NoError(t, txn.Commit(context.Background()))
  1887  }
  1888  
  1889  func TestDedup(t *testing.T) {
  1890  	defer testutils.AfterTest(t)()
  1891  	testutils.EnsureNoLeak(t)
  1892  	ctx := context.Background()
  1893  
  1894  	tae := testutil.InitTestDB(ctx, ModuleName, t, nil)
  1895  	defer tae.Close()
  1896  
  1897  	schema := catalog.MockSchemaAll(13, 2)
  1898  	schema.BlockMaxRows = 100
  1899  	schema.ObjectMaxBlocks = 2
  1900  
  1901  	bat := catalog.MockBatch(schema, 10)
  1902  	defer bat.Close()
  1903  	txn, _, rel := testutil.CreateRelationNoCommit(t, tae, testutil.DefaultTestDB, schema, true)
  1904  	err := rel.Append(context.Background(), bat)
  1905  	assert.NoError(t, err)
  1906  	err = rel.Append(context.Background(), bat)
  1907  	t.Log(err)
  1908  	assert.True(t, moerr.IsMoErrCode(err, moerr.ErrDuplicateEntry))
  1909  	testutil.CheckAllColRowsByScan(t, rel, 10, false)
  1910  	err = txn.Rollback(context.Background())
  1911  	assert.NoError(t, err)
  1912  }
  1913  
  1914  func TestScan2(t *testing.T) {
  1915  	defer testutils.AfterTest(t)()
  1916  	testutils.EnsureNoLeak(t)
  1917  	ctx := context.Background()
  1918  
  1919  	tae := testutil.InitTestDB(ctx, ModuleName, t, nil)
  1920  	defer tae.Close()
  1921  	schema := catalog.MockSchemaAll(13, 12)
  1922  	schema.BlockMaxRows = 10
  1923  	schema.ObjectMaxBlocks = 10
  1924  	rows := schema.BlockMaxRows * 5 / 2
  1925  	bat := catalog.MockBatch(schema, int(rows))
  1926  	defer bat.Close()
  1927  	bats := bat.Split(2)
  1928  
  1929  	txn, _, rel := testutil.CreateRelationNoCommit(t, tae, testutil.DefaultTestDB, schema, true)
  1930  	err := rel.Append(context.Background(), bats[0])
  1931  	assert.NoError(t, err)
  1932  	testutil.CheckAllColRowsByScan(t, rel, bats[0].Length(), false)
  1933  
  1934  	err = rel.Append(context.Background(), bats[0])
  1935  	assert.Error(t, err)
  1936  	err = rel.Append(context.Background(), bats[1])
  1937  	assert.NoError(t, err)
  1938  	testutil.CheckAllColRowsByScan(t, rel, int(rows), false)
  1939  
  1940  	pkv := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(5)
  1941  	filter := handle.NewEQFilter(pkv)
  1942  	err = rel.DeleteByFilter(context.Background(), filter)
  1943  	assert.NoError(t, err)
  1944  	testutil.CheckAllColRowsByScan(t, rel, int(rows)-1, true)
  1945  
  1946  	pkv = bat.Vecs[schema.GetSingleSortKeyIdx()].Get(8)
  1947  	filter = handle.NewEQFilter(pkv)
  1948  	updateV := int64(999)
  1949  	err = rel.UpdateByFilter(context.Background(), filter, 3, updateV, false)
  1950  	assert.NoError(t, err)
  1951  
  1952  	v, _, err := rel.GetValueByFilter(context.Background(), filter, 3)
  1953  	assert.NoError(t, err)
  1954  	assert.Equal(t, updateV, v.(int64))
  1955  	testutil.CheckAllColRowsByScan(t, rel, int(rows)-1, true)
  1956  	assert.NoError(t, txn.Commit(context.Background()))
  1957  }
  1958  
  1959  func TestADA(t *testing.T) {
  1960  	defer testutils.AfterTest(t)()
  1961  	testutils.EnsureNoLeak(t)
  1962  	ctx := context.Background()
  1963  
  1964  	tae := testutil.InitTestDB(ctx, ModuleName, t, nil)
  1965  	defer tae.Close()
  1966  	schema := catalog.MockSchemaAll(13, 3)
  1967  	schema.BlockMaxRows = 1000
  1968  	bat := catalog.MockBatch(schema, 1)
  1969  	defer bat.Close()
  1970  
  1971  	// Append to a block
  1972  	testutil.CreateRelationAndAppend(t, 0, tae, "db", schema, bat, true)
  1973  
  1974  	// Delete a row from the block
  1975  	txn, rel := testutil.GetDefaultRelation(t, tae, schema.Name)
  1976  	v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(0)
  1977  	filter := handle.NewEQFilter(v)
  1978  	id, row, err := rel.GetByFilter(context.Background(), filter)
  1979  	assert.NoError(t, err)
  1980  	err = rel.RangeDelete(id, row, row, handle.DT_Normal)
  1981  	assert.NoError(t, err)
  1982  	_, _, err = rel.GetByFilter(context.Background(), filter)
  1983  	assert.Error(t, err)
  1984  	assert.NoError(t, txn.Commit(context.Background()))
  1985  
  1986  	// Append a row with the same primary key
  1987  	txn, rel = testutil.GetDefaultRelation(t, tae, schema.Name)
  1988  	_, _, err = rel.GetByFilter(context.Background(), filter)
  1989  	assert.Error(t, err)
  1990  	err = rel.Append(context.Background(), bat)
  1991  	assert.NoError(t, err)
  1992  	id, row, err = rel.GetByFilter(context.Background(), filter)
  1993  	assert.NoError(t, err)
  1994  	testutil.CheckAllColRowsByScan(t, rel, 1, true)
  1995  
  1996  	err = rel.RangeDelete(id, row, row, handle.DT_Normal)
  1997  	assert.NoError(t, err)
  1998  	_, _, err = rel.GetByFilter(context.Background(), filter)
  1999  	assert.Error(t, err)
  2000  
  2001  	err = rel.Append(context.Background(), bat)
  2002  	assert.NoError(t, err)
  2003  	_, _, err = rel.GetByFilter(context.Background(), filter)
  2004  	assert.NoError(t, err)
  2005  	testutil.CheckAllColRowsByScan(t, rel, 1, true)
  2006  	assert.NoError(t, txn.Commit(context.Background()))
  2007  
  2008  	txn, rel = testutil.GetDefaultRelation(t, tae, schema.Name)
  2009  	err = rel.Append(context.Background(), bat)
  2010  	assert.Error(t, err)
  2011  	id, row, err = rel.GetByFilter(context.Background(), filter)
  2012  	assert.NoError(t, err)
  2013  	err = rel.RangeDelete(id, row, row, handle.DT_Normal)
  2014  	assert.NoError(t, err)
  2015  	_, _, err = rel.GetByFilter(context.Background(), filter)
  2016  	assert.Error(t, err)
  2017  
  2018  	err = rel.Append(context.Background(), bat)
  2019  	assert.NoError(t, err)
  2020  
  2021  	id, row, err = rel.GetByFilter(context.Background(), filter)
  2022  	assert.NoError(t, err)
  2023  
  2024  	err = rel.Append(context.Background(), bat)
  2025  	assert.Error(t, err)
  2026  
  2027  	err = rel.RangeDelete(id, row, row, handle.DT_Normal)
  2028  	assert.NoError(t, err)
  2029  	_, _, err = rel.GetByFilter(context.Background(), filter)
  2030  	assert.Error(t, err)
  2031  	err = rel.Append(context.Background(), bat)
  2032  	assert.NoError(t, err)
  2033  
  2034  	assert.NoError(t, txn.Commit(context.Background()))
  2035  
  2036  	txn, rel = testutil.GetDefaultRelation(t, tae, schema.Name)
  2037  	err = rel.Append(context.Background(), bat)
  2038  	assert.Error(t, err)
  2039  	id, row, err = rel.GetByFilter(context.Background(), filter)
  2040  	assert.NoError(t, err)
  2041  	err = rel.RangeDelete(id, row, row, handle.DT_Normal)
  2042  	assert.NoError(t, err)
  2043  	_, _, err = rel.GetByFilter(context.Background(), filter)
  2044  	assert.Error(t, err)
  2045  
  2046  	err = rel.Append(context.Background(), bat)
  2047  	assert.NoError(t, err)
  2048  	assert.NoError(t, txn.Commit(context.Background()))
  2049  
  2050  	txn, rel = testutil.GetDefaultRelation(t, tae, schema.Name)
  2051  	it := rel.MakeObjectIt()
  2052  	for it.Valid() {
  2053  		blk := it.GetObject()
  2054  		for j := 0; j < blk.BlkCnt(); j++ {
  2055  			view, err := blk.GetColumnDataById(context.Background(), uint16(j), schema.GetSingleSortKeyIdx(), common.DefaultAllocator)
  2056  			assert.NoError(t, err)
  2057  			defer view.Close()
  2058  			assert.Equal(t, 4, view.Length())
  2059  			assert.Equal(t, 3, view.DeleteMask.GetCardinality())
  2060  
  2061  		}
  2062  		it.Next()
  2063  	}
  2064  	assert.NoError(t, txn.Commit(context.Background()))
  2065  }
  2066  
  2067  func TestUpdateByFilter(t *testing.T) {
  2068  	defer testutils.AfterTest(t)()
  2069  	testutils.EnsureNoLeak(t)
  2070  	ctx := context.Background()
  2071  
  2072  	tae := testutil.InitTestDB(ctx, ModuleName, t, nil)
  2073  	defer tae.Close()
  2074  	schema := catalog.MockSchemaAll(13, 3)
  2075  	bat := catalog.MockBatch(schema, 100)
  2076  	defer bat.Close()
  2077  
  2078  	testutil.CreateRelationAndAppend(t, 0, tae, "db", schema, bat, true)
  2079  
  2080  	txn, rel := testutil.GetDefaultRelation(t, tae, schema.Name)
  2081  	v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(2)
  2082  	filter := handle.NewEQFilter(v)
  2083  	err := rel.UpdateByFilter(context.Background(), filter, 2, int32(2222), false)
  2084  	assert.NoError(t, err)
  2085  
  2086  	id, row, err := rel.GetByFilter(context.Background(), filter)
  2087  	assert.NoError(t, err)
  2088  	cv, _, err := rel.GetValue(id, row, 2)
  2089  	assert.NoError(t, err)
  2090  	assert.Equal(t, int32(2222), cv.(int32))
  2091  
  2092  	v = bat.Vecs[schema.GetSingleSortKeyIdx()].Get(3)
  2093  	filter = handle.NewEQFilter(v)
  2094  
  2095  	err = rel.UpdateByFilter(context.Background(), filter, uint16(schema.GetSingleSortKeyIdx()), int64(333333), false)
  2096  	assert.NoError(t, err)
  2097  
  2098  	assert.NoError(t, txn.Commit(context.Background()))
  2099  }
  2100  
  2101  // Test Steps
  2102  // 1. Create DB|Relation and append 10 rows. Commit
  2103  // 2. Make a equal filter with value of the pk of the second inserted row
  2104  // 3. Start Txn1. GetByFilter return PASS
  2105  // 4. Start Txn2. Delete row 2. Commit.
  2106  // 5. Txn1 call GetByFilter and should return PASS
  2107  func TestGetByFilter(t *testing.T) {
  2108  	defer testutils.AfterTest(t)()
  2109  	testutils.EnsureNoLeak(t)
  2110  	ctx := context.Background()
  2111  
  2112  	tae := testutil.InitTestDB(ctx, ModuleName, t, nil)
  2113  	defer tae.Close()
  2114  	schema := catalog.MockSchemaAll(13, 12)
  2115  	bat := catalog.MockBatch(schema, 10)
  2116  	defer bat.Close()
  2117  
  2118  	// Step 1
  2119  	testutil.CreateRelationAndAppend(t, 0, tae, "db", schema, bat, true)
  2120  
  2121  	// Step 2
  2122  	v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(2)
  2123  	filter := handle.NewEQFilter(v)
  2124  
  2125  	// Step 3
  2126  	txn1, rel := testutil.GetDefaultRelation(t, tae, schema.Name)
  2127  	id, row, err := rel.GetByFilter(context.Background(), filter)
  2128  	assert.NoError(t, err)
  2129  
  2130  	// Step 4
  2131  	{
  2132  		txn2, rel := testutil.GetDefaultRelation(t, tae, schema.Name)
  2133  		err := rel.RangeDelete(id, row, row, handle.DT_Normal)
  2134  		assert.NoError(t, err)
  2135  		assert.NoError(t, txn2.Commit(context.Background()))
  2136  	}
  2137  
  2138  	// Step 5
  2139  	_, _, err = rel.GetByFilter(context.Background(), filter)
  2140  	assert.NoError(t, err)
  2141  	assert.NoError(t, txn1.Commit(context.Background()))
  2142  }
  2143  
  2144  //  1. Set a big BlockMaxRows
  2145  //  2. Mock one row batch
  2146  //  3. Start tones of workers. Each work execute below routines:
  2147  //     3.1 GetByFilter a pk val
  2148  //     3.1.1 If found, go to 3.5
  2149  //     3.2 Append a row
  2150  //     3.3 err should not be duplicated(TODO: now is duplicated, should be W-W conflict)
  2151  //     (why not duplicated: previous GetByFilter had checked that there was no duplicate key)
  2152  //     3.4 If no error. try commit. If commit ok, inc appendedcnt. If error, rollback
  2153  //     3.5 Delete the row
  2154  //     3.5.1 If no error. try commit. commit should always pass
  2155  //     3.5.2 If error, should always be w-w conflict
  2156  //  4. Wait done all workers. Check the raw row count of table, should be same with appendedcnt.
  2157  func TestChaos1(t *testing.T) {
  2158  	defer testutils.AfterTest(t)()
  2159  	testutils.EnsureNoLeak(t)
  2160  	ctx := context.Background()
  2161  
  2162  	tae := testutil.InitTestDB(ctx, ModuleName, t, nil)
  2163  	defer tae.Close()
  2164  	schema := catalog.MockSchemaAll(13, 12)
  2165  	schema.BlockMaxRows = 100000
  2166  	schema.ObjectMaxBlocks = 2
  2167  	bat := catalog.MockBatch(schema, 1)
  2168  	defer bat.Close()
  2169  
  2170  	testutil.CreateRelation(t, tae, "db", schema, true)
  2171  
  2172  	v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(0)
  2173  	filter := handle.NewEQFilter(v)
  2174  	var wg sync.WaitGroup
  2175  	appendCnt := uint32(0)
  2176  	deleteCnt := uint32(0)
  2177  	worker := func() {
  2178  		defer wg.Done()
  2179  		txn, rel := testutil.GetDefaultRelation(t, tae, schema.Name)
  2180  		id, row, err := rel.GetByFilter(context.Background(), filter)
  2181  		// logutil.Infof("id=%v,row=%d,err=%v", id, row, err)
  2182  		if err == nil {
  2183  			err = rel.RangeDelete(id, row, row, handle.DT_Normal)
  2184  			if err != nil {
  2185  				t.Logf("delete: %v", err)
  2186  				// assert.Equal(t, txnif.ErrTxnWWConflict, err)
  2187  				assert.NoError(t, txn.Rollback(context.Background()))
  2188  				return
  2189  			}
  2190  			assert.NoError(t, txn.Commit(context.Background()))
  2191  			atomic.AddUint32(&deleteCnt, uint32(1))
  2192  			return
  2193  		}
  2194  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  2195  		err = rel.Append(context.Background(), bat)
  2196  		// TODO: enable below check later
  2197  		// assert.NotEqual(t, data.ErrDuplicate, err)
  2198  		if err == nil {
  2199  			err = txn.Commit(context.Background())
  2200  			// TODO: enable below check later
  2201  			// assert.NotEqual(t, data.ErrDuplicate, err)
  2202  			if err == nil {
  2203  				atomic.AddUint32(&appendCnt, uint32(1))
  2204  			} else {
  2205  				t.Logf("commit: %v", err)
  2206  			}
  2207  			return
  2208  		}
  2209  		_ = txn.Rollback(context.Background())
  2210  	}
  2211  	pool, _ := ants.NewPool(10)
  2212  	defer pool.Release()
  2213  	for i := 0; i < 50; i++ {
  2214  		wg.Add(1)
  2215  		err := pool.Submit(worker)
  2216  		assert.Nil(t, err)
  2217  	}
  2218  	wg.Wait()
  2219  	t.Logf("AppendCnt: %d", appendCnt)
  2220  	t.Logf("DeleteCnt: %d", deleteCnt)
  2221  	assert.True(t, appendCnt-deleteCnt <= 1)
  2222  	_, rel := testutil.GetDefaultRelation(t, tae, schema.Name)
  2223  	blk := testutil.GetOneObject(rel)
  2224  	view, err := blk.GetColumnDataById(context.Background(), 0, schema.GetSingleSortKeyIdx(), common.DefaultAllocator)
  2225  	assert.NoError(t, err)
  2226  	defer view.Close()
  2227  	assert.Equal(t, int(appendCnt), view.Length())
  2228  	mask := view.DeleteMask
  2229  	view.ApplyDeletes()
  2230  	t.Log(view.String())
  2231  	assert.Equal(t, int(deleteCnt), mask.GetCardinality())
  2232  }
  2233  
  2234  // Testing Steps
  2235  // 1. Append 10 rows
  2236  // 2. Start txn1
  2237  // 3. Start txn2. Update the 3rd row 3rd col to int64(2222) and commit. -- PASS
  2238  // 4. Txn1 try to update the 3rd row 3rd col to int64(1111). -- W-W Conflict.
  2239  // 5. Txn1 try to delete the 3rd row. W-W Conflict. Rollback
  2240  // 6. Start txn3 and try to update th3 3rd row 3rd col to int64(3333). -- PASS
  2241  func TestSnapshotIsolation1(t *testing.T) {
  2242  	defer testutils.AfterTest(t)()
  2243  	testutils.EnsureNoLeak(t)
  2244  	ctx := context.Background()
  2245  
  2246  	tae := testutil.InitTestDB(ctx, ModuleName, t, nil)
  2247  	defer tae.Close()
  2248  	schema := catalog.MockSchemaAll(13, 12)
  2249  	schema.BlockMaxRows = 100
  2250  	bat := catalog.MockBatch(schema, 10)
  2251  	defer bat.Close()
  2252  	v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(3)
  2253  	filter := handle.NewEQFilter(v)
  2254  
  2255  	// Step 1
  2256  	testutil.CreateRelationAndAppend(t, 0, tae, "db", schema, bat, true)
  2257  
  2258  	// Step 2
  2259  	txn1, rel1 := testutil.GetDefaultRelation(t, tae, schema.Name)
  2260  
  2261  	// Step 3
  2262  	txn2, rel2 := testutil.GetDefaultRelation(t, tae, schema.Name)
  2263  	err := rel2.UpdateByFilter(context.Background(), filter, 3, int64(2222), false)
  2264  	assert.NoError(t, err)
  2265  	assert.NoError(t, txn2.Commit(context.Background()))
  2266  
  2267  	// Step 4
  2268  	err = rel1.UpdateByFilter(context.Background(), filter, 3, int64(1111), false)
  2269  	t.Log(err)
  2270  	assert.True(t, moerr.IsMoErrCode(err, moerr.ErrTxnWWConflict))
  2271  
  2272  	// Step 5
  2273  	id, row, err := rel1.GetByFilter(context.Background(), filter)
  2274  	assert.NoError(t, err)
  2275  	err = rel1.RangeDelete(id, row, row, handle.DT_Normal)
  2276  	t.Log(err)
  2277  	assert.True(t, moerr.IsMoErrCode(err, moerr.ErrTxnWWConflict))
  2278  	_ = txn1.Rollback(context.Background())
  2279  
  2280  	// Step 6
  2281  	txn3, rel3 := testutil.GetDefaultRelation(t, tae, schema.Name)
  2282  	err = rel3.UpdateByFilter(context.Background(), filter, 3, int64(3333), false)
  2283  	assert.NoError(t, err)
  2284  	assert.NoError(t, txn3.Commit(context.Background()))
  2285  
  2286  	txn, rel := testutil.GetDefaultRelation(t, tae, schema.Name)
  2287  	v, _, err = rel.GetValueByFilter(context.Background(), filter, 3)
  2288  	assert.NoError(t, err)
  2289  	assert.Equal(t, int64(3333), v.(int64))
  2290  	err = rel.RangeDelete(id, row, row, handle.DT_Normal)
  2291  	assert.Error(t, err)
  2292  	assert.NoError(t, txn.Commit(context.Background()))
  2293  }
  2294  
  2295  // Testing Steps
  2296  // 1. Start txn1
  2297  // 2. Start txn2 and append one row and commit
  2298  // 3. Start txn3 and delete the row and commit
  2299  // 4. Txn1 try to append the row. (W-W). Rollback
  2300  func TestSnapshotIsolation2(t *testing.T) {
  2301  	defer testutils.AfterTest(t)()
  2302  	testutils.EnsureNoLeak(t)
  2303  	ctx := context.Background()
  2304  
  2305  	opts := config.WithLongScanAndCKPOpts(nil)
  2306  	tae := testutil.InitTestDB(ctx, ModuleName, t, opts)
  2307  	defer tae.Close()
  2308  	schema := catalog.MockSchemaAll(13, 12)
  2309  	schema.BlockMaxRows = 100
  2310  	bat := catalog.MockBatch(schema, 1)
  2311  	defer bat.Close()
  2312  	v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(0)
  2313  	filter := handle.NewEQFilter(v)
  2314  
  2315  	testutil.CreateRelation(t, tae, "db", schema, true)
  2316  
  2317  	// Step 1
  2318  	txn1, rel1 := testutil.GetDefaultRelation(t, tae, schema.Name)
  2319  
  2320  	// Step 2
  2321  	txn2, rel2 := testutil.GetDefaultRelation(t, tae, schema.Name)
  2322  	err := rel2.Append(context.Background(), bat)
  2323  	assert.NoError(t, err)
  2324  	assert.NoError(t, txn2.Commit(context.Background()))
  2325  
  2326  	// Step 3
  2327  	txn3, rel3 := testutil.GetDefaultRelation(t, tae, schema.Name)
  2328  	err = rel3.DeleteByFilter(context.Background(), filter)
  2329  	assert.NoError(t, err)
  2330  	assert.NoError(t, txn3.Commit(context.Background()))
  2331  
  2332  	// Step 4
  2333  	err = rel1.Append(context.Background(), bat)
  2334  	assert.NoError(t, err)
  2335  	err = txn1.Commit(context.Background())
  2336  	t.Log(err)
  2337  	assert.True(t, moerr.IsMoErrCode(err, moerr.ErrTxnWWConflict))
  2338  }
  2339  
  2340  // Same as TestMergeBlocks
  2341  // no pkRow in schema, so merge will run reshape.
  2342  func TestReshapeBlocks(t *testing.T) {
  2343  	defer testutils.AfterTest(t)()
  2344  	testutils.EnsureNoLeak(t)
  2345  	ctx := context.Background()
  2346  
  2347  	tae := testutil.InitTestDB(ctx, ModuleName, t, nil)
  2348  	defer tae.Close()
  2349  	schema := catalog.MockSchemaAll(1, -1)
  2350  	schema.BlockMaxRows = 10
  2351  	schema.ObjectMaxBlocks = 3
  2352  	bat := catalog.MockBatch(schema, 30)
  2353  	defer bat.Close()
  2354  
  2355  	testutil.CreateRelationAndAppend(t, 0, tae, "db", schema, bat, true)
  2356  
  2357  	txn, err := tae.StartTxn(nil)
  2358  	assert.Nil(t, err)
  2359  	db, err := txn.GetDatabase("db")
  2360  	assert.Nil(t, err)
  2361  	rel, err := db.GetRelationByName(schema.Name)
  2362  	assert.Nil(t, err)
  2363  	it := rel.MakeObjectIt()
  2364  	blkID := it.GetObject().Fingerprint()
  2365  	err = rel.RangeDelete(blkID, 5, 9, handle.DT_Normal)
  2366  	assert.Nil(t, err)
  2367  	assert.Nil(t, txn.Commit(context.Background()))
  2368  
  2369  	txn, err = tae.StartTxn(nil)
  2370  	assert.Nil(t, err)
  2371  	for it.Valid() {
  2372  		testutil.CheckAllColRowsByScan(t, rel, bat.Length(), false)
  2373  		obj := it.GetObject()
  2374  		for j := 0; j < obj.BlkCnt(); j++ {
  2375  			col, err := obj.GetColumnDataById(context.Background(), uint16(j), 0, common.DefaultAllocator)
  2376  			assert.NoError(t, err)
  2377  			defer col.Close()
  2378  			t.Log(col)
  2379  		}
  2380  		it.Next()
  2381  	}
  2382  	assert.Nil(t, txn.Commit(context.Background()))
  2383  
  2384  	testutil.CompactBlocks(t, 0, tae, "db", schema, false)
  2385  	testutil.MergeBlocks(t, 0, tae, "db", schema, false)
  2386  
  2387  	txn, err = tae.StartTxn(nil)
  2388  	assert.Nil(t, err)
  2389  	db, err = txn.GetDatabase("db")
  2390  	assert.Nil(t, err)
  2391  	rel, err = db.GetRelationByName(schema.Name)
  2392  	assert.Nil(t, err)
  2393  	assert.Equal(t, uint64(25), rel.GetMeta().(*catalog.TableEntry).GetRows())
  2394  	it = rel.MakeObjectIt()
  2395  	for it.Valid() {
  2396  		testutil.CheckAllColRowsByScan(t, rel, bat.Length()-5, false)
  2397  		obj := it.GetObject()
  2398  		for j := 0; j < obj.BlkCnt(); j++ {
  2399  			col, err := obj.GetColumnDataById(context.Background(), uint16(j), 0, common.DefaultAllocator)
  2400  			assert.NoError(t, err)
  2401  			defer col.Close()
  2402  			t.Log(col)
  2403  		}
  2404  		it.Next()
  2405  	}
  2406  	assert.Nil(t, txn.Commit(context.Background()))
  2407  }
  2408  
  2409  // 1. Append 3 blocks and delete last 5 rows of the 1st block
  2410  // 2. Merge blocks
  2411  // 3. Check rows and col[0]
  2412  func TestMergeBlocks(t *testing.T) {
  2413  	defer testutils.AfterTest(t)()
  2414  	testutils.EnsureNoLeak(t)
  2415  	ctx := context.Background()
  2416  
  2417  	tae := testutil.InitTestDB(ctx, ModuleName, t, nil)
  2418  	defer tae.Close()
  2419  	schema := catalog.MockSchemaAll(1, 0)
  2420  	schema.BlockMaxRows = 10
  2421  	schema.ObjectMaxBlocks = 3
  2422  	bat := catalog.MockBatch(schema, 30)
  2423  	defer bat.Close()
  2424  
  2425  	testutil.CreateRelationAndAppend(t, 0, tae, "db", schema, bat, true)
  2426  
  2427  	txn, err := tae.StartTxn(nil)
  2428  	assert.Nil(t, err)
  2429  	db, err := txn.GetDatabase("db")
  2430  	assert.Nil(t, err)
  2431  	rel, err := db.GetRelationByName(schema.Name)
  2432  	assert.Nil(t, err)
  2433  	it := rel.MakeObjectIt()
  2434  	blkID := it.GetObject().Fingerprint()
  2435  	err = rel.RangeDelete(blkID, 5, 9, handle.DT_Normal)
  2436  	assert.Nil(t, err)
  2437  	assert.Nil(t, txn.Commit(context.Background()))
  2438  
  2439  	txn, err = tae.StartTxn(nil)
  2440  	assert.Nil(t, err)
  2441  	for it.Valid() {
  2442  		testutil.CheckAllColRowsByScan(t, rel, bat.Length(), false)
  2443  		obj := it.GetObject()
  2444  		for j := 0; j < obj.BlkCnt(); j++ {
  2445  			col, err := obj.GetColumnDataById(context.Background(), uint16(j), 0, common.DefaultAllocator)
  2446  			assert.NoError(t, err)
  2447  			defer col.Close()
  2448  			t.Log(col)
  2449  		}
  2450  		it.Next()
  2451  	}
  2452  	assert.Nil(t, txn.Commit(context.Background()))
  2453  
  2454  	testutil.CompactBlocks(t, 0, tae, "db", schema, false)
  2455  	testutil.MergeBlocks(t, 0, tae, "db", schema, false)
  2456  
  2457  	txn, err = tae.StartTxn(nil)
  2458  	assert.Nil(t, err)
  2459  	db, err = txn.GetDatabase("db")
  2460  	assert.Nil(t, err)
  2461  	rel, err = db.GetRelationByName(schema.Name)
  2462  	assert.Nil(t, err)
  2463  	assert.Equal(t, uint64(25), rel.GetMeta().(*catalog.TableEntry).GetRows())
  2464  	it = rel.MakeObjectIt()
  2465  	for it.Valid() {
  2466  		testutil.CheckAllColRowsByScan(t, rel, bat.Length()-5, false)
  2467  		obj := it.GetObject()
  2468  		for j := 0; j < obj.BlkCnt(); j++ {
  2469  			col, err := obj.GetColumnDataById(context.Background(), uint16(j), 0, common.DefaultAllocator)
  2470  			assert.NoError(t, err)
  2471  			defer col.Close()
  2472  			t.Log(col)
  2473  		}
  2474  		it.Next()
  2475  	}
  2476  	assert.Nil(t, txn.Commit(context.Background()))
  2477  }
  2478  
  2479  func TestSegDelLogtail(t *testing.T) {
  2480  	defer testutils.AfterTest(t)()
  2481  	testutils.EnsureNoLeak(t)
  2482  	ctx := context.Background()
  2483  
  2484  	opts := config.WithLongScanAndCKPOpts(nil)
  2485  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  2486  	defer tae.Close()
  2487  	schema := catalog.MockSchemaAll(13, -1)
  2488  	schema.BlockMaxRows = 10
  2489  	schema.ObjectMaxBlocks = 3
  2490  	bat := catalog.MockBatch(schema, 30)
  2491  	defer bat.Close()
  2492  
  2493  	testutil.CreateRelationAndAppend(t, 0, tae.DB, "db", schema, bat, true)
  2494  
  2495  	txn, err := tae.StartTxn(nil)
  2496  	assert.Nil(t, err)
  2497  	db, err := txn.GetDatabase("db")
  2498  	did := db.GetID()
  2499  	assert.Nil(t, err)
  2500  	rel, err := db.GetRelationByName(schema.Name)
  2501  	tid := rel.ID()
  2502  	assert.Nil(t, err)
  2503  	it := rel.MakeObjectIt()
  2504  	blkID := it.GetObject().Fingerprint()
  2505  	err = rel.RangeDelete(blkID, 5, 9, handle.DT_Normal)
  2506  	assert.Nil(t, err)
  2507  	assert.Nil(t, txn.Commit(context.Background()))
  2508  
  2509  	testutil.CompactBlocks(t, 0, tae.DB, "db", schema, false)
  2510  	testutil.MergeBlocks(t, 0, tae.DB, "db", schema, false)
  2511  
  2512  	t.Log(tae.Catalog.SimplePPString(common.PPL3))
  2513  	resp, close, err := logtail.HandleSyncLogTailReq(context.TODO(), new(dummyCpkGetter), tae.LogtailMgr, tae.Catalog, api.SyncLogTailReq{
  2514  		CnHave: tots(types.TS{}),
  2515  		CnWant: tots(types.MaxTs()),
  2516  		Table:  &api.TableID{DbId: did, TbId: tid},
  2517  	}, false)
  2518  	require.Nil(t, err)
  2519  	require.Equal(t, 2, len(resp.Commands)) // block insert + object info
  2520  
  2521  	require.Equal(t, api.Entry_Insert, resp.Commands[0].EntryType)
  2522  	require.True(t, strings.HasSuffix(resp.Commands[0].TableName, "meta"))
  2523  	require.Equal(t, uint32(1), resp.Commands[0].Bat.Vecs[0].Len) /* 1 deltaloc */
  2524  
  2525  	require.Equal(t, api.Entry_Insert, resp.Commands[1].EntryType)
  2526  	require.True(t, strings.HasSuffix(resp.Commands[1].TableName, "obj"))
  2527  	require.Equal(t, uint32(6), resp.Commands[1].Bat.Vecs[0].Len) /* 2 Objects (create) + 4 (update object info) */
  2528  
  2529  	close()
  2530  
  2531  	txn, err = tae.StartTxn(nil)
  2532  	assert.Nil(t, err)
  2533  	db, err = txn.GetDatabase("db")
  2534  	assert.Nil(t, err)
  2535  	rel, err = db.GetRelationByName(schema.Name)
  2536  	assert.Nil(t, err)
  2537  	assert.Equal(t, uint64(25), rel.GetMeta().(*catalog.TableEntry).GetRows())
  2538  	testutil.CheckAllColRowsByScan(t, rel, bat.Length()-5, false)
  2539  	assert.Nil(t, txn.Commit(context.Background()))
  2540  
  2541  	err = tae.BGCheckpointRunner.ForceIncrementalCheckpoint(tae.TxnMgr.Now(), false)
  2542  	require.NoError(t, err)
  2543  
  2544  	check := func() {
  2545  		ckpEntries := tae.BGCheckpointRunner.GetAllIncrementalCheckpoints()
  2546  		require.Equal(t, 1, len(ckpEntries))
  2547  		entry := ckpEntries[0]
  2548  		ins, del, cnins, segdel, err := entry.GetByTableID(context.Background(), tae.Runtime.Fs, tid)
  2549  		require.NoError(t, err)
  2550  		require.Equal(t, uint32(1), ins.Vecs[0].Len)    // 1 deltaloc, skip blks without deltaloc
  2551  		require.Nil(t, del)                             // 0  del
  2552  		require.Nil(t, cnins)                           // 0  del
  2553  		require.Equal(t, uint32(6), segdel.Vecs[0].Len) // 2 create + 4 update
  2554  		require.Equal(t, 12, len(segdel.Vecs))
  2555  	}
  2556  	check()
  2557  
  2558  	tae.Restart(ctx)
  2559  
  2560  	txn, err = tae.StartTxn(nil)
  2561  	assert.Nil(t, err)
  2562  	db, err = txn.GetDatabase("db")
  2563  	assert.Nil(t, err)
  2564  	rel, err = db.GetRelationByName(schema.Name)
  2565  	assert.Nil(t, err)
  2566  	assert.Equal(t, uint64(25), rel.GetMeta().(*catalog.TableEntry).GetRows())
  2567  	testutil.CheckAllColRowsByScan(t, rel, bat.Length()-5, false)
  2568  	assert.Nil(t, txn.Commit(context.Background()))
  2569  
  2570  	check()
  2571  
  2572  }
  2573  
  2574  // delete
  2575  // merge but not commit
  2576  // delete
  2577  // commit merge
  2578  func TestMergeblocks2(t *testing.T) {
  2579  	defer testutils.AfterTest(t)()
  2580  	testutils.EnsureNoLeak(t)
  2581  	ctx := context.Background()
  2582  
  2583  	opts := config.WithLongScanAndCKPOpts(nil)
  2584  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  2585  	defer tae.Close()
  2586  	schema := catalog.MockSchemaAll(1, 0)
  2587  	schema.BlockMaxRows = 3
  2588  	schema.ObjectMaxBlocks = 2
  2589  	tae.BindSchema(schema)
  2590  	bat := catalog.MockBatch(schema, 6)
  2591  	bats := bat.Split(2)
  2592  	defer bat.Close()
  2593  
  2594  	tae.CreateRelAndAppend(bats[0], true)
  2595  
  2596  	txn, rel := tae.GetRelation()
  2597  	_ = rel.Append(context.Background(), bats[1])
  2598  	assert.Nil(t, txn.Commit(context.Background()))
  2599  
  2600  	// flush to nblk
  2601  	{
  2602  		txn, rel := tae.GetRelation()
  2603  		blkMetas := testutil.GetAllBlockMetas(rel)
  2604  		task, err := jobs.NewFlushTableTailTask(tasks.WaitableCtx, txn, blkMetas, tae.DB.Runtime, types.MaxTs())
  2605  		require.NoError(t, err)
  2606  		require.NoError(t, task.OnExec(context.Background()))
  2607  		require.NoError(t, txn.Commit(context.Background()))
  2608  	}
  2609  
  2610  	{
  2611  		v := testutil.GetSingleSortKeyValue(bat, schema, 1)
  2612  		filter := handle.NewEQFilter(v)
  2613  		txn2, rel := tae.GetRelation()
  2614  		t.Log("********before delete******************")
  2615  		testutil.CheckAllColRowsByScan(t, rel, 6, true)
  2616  		_ = rel.DeleteByFilter(context.Background(), filter)
  2617  		assert.Nil(t, txn2.Commit(context.Background()))
  2618  	}
  2619  
  2620  	_, rel = tae.GetRelation()
  2621  	t.Log("**********************")
  2622  	testutil.CheckAllColRowsByScan(t, rel, 5, true)
  2623  
  2624  	{
  2625  		t.Log("************merge************")
  2626  
  2627  		txn, rel = tae.GetRelation()
  2628  
  2629  		objIt := rel.MakeObjectIt()
  2630  		obj := objIt.GetObject().GetMeta().(*catalog.ObjectEntry)
  2631  		objHandle, err := rel.GetObject(&obj.ID)
  2632  		assert.NoError(t, err)
  2633  
  2634  		objsToMerge := []*catalog.ObjectEntry{objHandle.GetMeta().(*catalog.ObjectEntry)}
  2635  		task, err := jobs.NewMergeObjectsTask(nil, txn, objsToMerge, tae.Runtime, 0)
  2636  		assert.NoError(t, err)
  2637  		err = task.OnExec(context.Background())
  2638  		assert.NoError(t, err)
  2639  
  2640  		{
  2641  			v := testutil.GetSingleSortKeyValue(bat, schema, 2)
  2642  			filter := handle.NewEQFilter(v)
  2643  			txn2, rel := tae.GetRelation()
  2644  			t.Log("********before delete******************")
  2645  			testutil.CheckAllColRowsByScan(t, rel, 5, true)
  2646  			_ = rel.DeleteByFilter(context.Background(), filter)
  2647  			assert.Nil(t, txn2.Commit(context.Background()))
  2648  		}
  2649  		err = txn.Commit(context.Background())
  2650  		assert.NoError(t, err)
  2651  	}
  2652  
  2653  	t.Log("********************")
  2654  	_, rel = tae.GetRelation()
  2655  	testutil.CheckAllColRowsByScan(t, rel, 4, true)
  2656  
  2657  	v := testutil.GetSingleSortKeyValue(bat, schema, 1)
  2658  	filter := handle.NewEQFilter(v)
  2659  	_, _, err := rel.GetByFilter(context.Background(), filter)
  2660  	assert.NotNil(t, err)
  2661  
  2662  	v = testutil.GetSingleSortKeyValue(bat, schema, 2)
  2663  	filter = handle.NewEQFilter(v)
  2664  	_, _, err = rel.GetByFilter(context.Background(), filter)
  2665  	assert.NotNil(t, err)
  2666  }
  2667  
  2668  // Object1: 1, 2, 3 | 4, 5, 6
  2669  // Object2: 7, 8, 9 | 10, 11, 12
  2670  // Now delete 1 and 10, then after merge:
  2671  // Object1: 2, 3, 4 | 5, 6, 7
  2672  // Object2: 8, 9, 11 | 12
  2673  // Delete map not nil on: [obj1, blk1] and [obj2, blk2]
  2674  func TestMergeBlocksIntoMultipleObjects(t *testing.T) {
  2675  	defer testutils.AfterTest(t)()
  2676  	testutils.EnsureNoLeak(t)
  2677  	ctx := context.Background()
  2678  
  2679  	opts := config.WithLongScanAndCKPOpts(nil)
  2680  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  2681  	defer tae.Close()
  2682  	schema := catalog.MockSchemaAll(1, 0)
  2683  	schema.BlockMaxRows = 3
  2684  	schema.ObjectMaxBlocks = 2
  2685  	tae.BindSchema(schema)
  2686  	bat := catalog.MockBatch(schema, 12)
  2687  	bats := bat.Split(2)
  2688  	defer bat.Close()
  2689  
  2690  	tae.CreateRelAndAppend(bats[0], true)
  2691  
  2692  	txn, rel := tae.GetRelation()
  2693  	_ = rel.Append(context.Background(), bats[1])
  2694  	assert.Nil(t, txn.Commit(context.Background()))
  2695  
  2696  	// flush to nblk
  2697  	{
  2698  		txn, rel := tae.GetRelation()
  2699  		blkMetas := testutil.GetAllBlockMetas(rel)
  2700  		task, err := jobs.NewFlushTableTailTask(tasks.WaitableCtx, txn, blkMetas, tae.DB.Runtime, types.MaxTs())
  2701  		require.NoError(t, err)
  2702  		require.NoError(t, task.OnExec(context.Background()))
  2703  		require.NoError(t, txn.Commit(context.Background()))
  2704  
  2705  		testutil.CheckAllColRowsByScan(t, rel, 12, true)
  2706  	}
  2707  
  2708  	{
  2709  		t.Log("************split one object into two objects************")
  2710  
  2711  		txn, rel = tae.GetRelation()
  2712  		objIt := rel.MakeObjectIt()
  2713  		obj := objIt.GetObject().GetMeta().(*catalog.ObjectEntry)
  2714  		objHandle, err := rel.GetObject(&obj.ID)
  2715  		assert.NoError(t, err)
  2716  
  2717  		objsToMerge := []*catalog.ObjectEntry{objHandle.GetMeta().(*catalog.ObjectEntry)}
  2718  		task, err := jobs.NewMergeObjectsTask(nil, txn, objsToMerge, tae.Runtime, 0)
  2719  		assert.NoError(t, err)
  2720  		assert.NoError(t, task.OnExec(context.Background()))
  2721  		assert.NoError(t, txn.Commit(context.Background()))
  2722  	}
  2723  
  2724  	{
  2725  		t.Log("************check del map************")
  2726  		it := rel.MakeObjectIt()
  2727  		for it.Valid() {
  2728  			obj := it.GetObject()
  2729  			assert.Nil(t, tae.Runtime.TransferDelsMap.GetDelsForBlk(*objectio.NewBlockidWithObjectID(obj.GetID(), 0)))
  2730  			assert.Nil(t, tae.Runtime.TransferDelsMap.GetDelsForBlk(*objectio.NewBlockidWithObjectID(obj.GetID(), 1)))
  2731  			it.Next()
  2732  		}
  2733  	}
  2734  
  2735  	{
  2736  		t.Log("************delete during merge************")
  2737  
  2738  		txn, rel = tae.GetRelation()
  2739  		objIt := rel.MakeObjectIt()
  2740  		obj1 := objIt.GetObject().GetMeta().(*catalog.ObjectEntry)
  2741  		objHandle1, err := rel.GetObject(&obj1.ID)
  2742  		assert.NoError(t, err)
  2743  		objIt.Next()
  2744  		obj2 := objIt.GetObject().GetMeta().(*catalog.ObjectEntry)
  2745  		objHandle2, err := rel.GetObject(&obj2.ID)
  2746  		assert.NoError(t, err)
  2747  
  2748  		v := testutil.GetSingleSortKeyValue(bat, schema, 1)
  2749  		filter := handle.NewEQFilter(v)
  2750  		txn2, rel := tae.GetRelation()
  2751  		_ = rel.DeleteByFilter(context.Background(), filter)
  2752  		assert.NoError(t, txn2.Commit(context.Background()))
  2753  		_, rel = tae.GetRelation()
  2754  		testutil.CheckAllColRowsByScan(t, rel, 11, true)
  2755  
  2756  		v = testutil.GetSingleSortKeyValue(bat, schema, 10)
  2757  		filter = handle.NewEQFilter(v)
  2758  		txn2, rel = tae.GetRelation()
  2759  		_ = rel.DeleteByFilter(context.Background(), filter)
  2760  		assert.NoError(t, txn2.Commit(context.Background()))
  2761  		_, rel = tae.GetRelation()
  2762  		testutil.CheckAllColRowsByScan(t, rel, 10, true)
  2763  
  2764  		objsToMerge := []*catalog.ObjectEntry{objHandle1.GetMeta().(*catalog.ObjectEntry), objHandle2.GetMeta().(*catalog.ObjectEntry)}
  2765  		task, err := jobs.NewMergeObjectsTask(nil, txn, objsToMerge, tae.Runtime, 0)
  2766  		assert.NoError(t, err)
  2767  		assert.NoError(t, task.OnExec(context.Background()))
  2768  		assert.NoError(t, txn.Commit(context.Background()))
  2769  		{
  2770  			t.Log("************check del map again************")
  2771  			_, rel = tae.GetRelation()
  2772  			objCnt := 0
  2773  			for it := rel.MakeObjectIt(); it.Valid(); it.Next() {
  2774  				obj := it.GetObject()
  2775  				if objCnt == 0 {
  2776  					assert.NotNil(t, tae.Runtime.TransferDelsMap.GetDelsForBlk(*objectio.NewBlockidWithObjectID(obj.GetID(), 0)))
  2777  				} else {
  2778  					assert.NotNil(t, tae.Runtime.TransferDelsMap.GetDelsForBlk(*objectio.NewBlockidWithObjectID(obj.GetID(), 1)))
  2779  				}
  2780  				objCnt++
  2781  			}
  2782  			assert.Equal(t, 2, objCnt)
  2783  		}
  2784  	}
  2785  }
  2786  
  2787  func TestMergeEmptyBlocks(t *testing.T) {
  2788  	defer testutils.AfterTest(t)()
  2789  	testutils.EnsureNoLeak(t)
  2790  	ctx := context.Background()
  2791  
  2792  	opts := config.WithLongScanAndCKPOpts(nil)
  2793  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  2794  	defer tae.Close()
  2795  	schema := catalog.MockSchemaAll(1, 0)
  2796  	schema.BlockMaxRows = 3
  2797  	schema.ObjectMaxBlocks = 2
  2798  	tae.BindSchema(schema)
  2799  	bat := catalog.MockBatch(schema, 6)
  2800  	bats := bat.Split(2)
  2801  	defer bat.Close()
  2802  
  2803  	tae.CreateRelAndAppend(bats[0], true)
  2804  
  2805  	// flush to nblk
  2806  	{
  2807  		txn, rel := tae.GetRelation()
  2808  		blkMetas := testutil.GetAllBlockMetas(rel)
  2809  		task, err := jobs.NewFlushTableTailTask(tasks.WaitableCtx, txn, blkMetas, tae.DB.Runtime, types.MaxTs())
  2810  		require.NoError(t, err)
  2811  		require.NoError(t, task.OnExec(context.Background()))
  2812  		require.NoError(t, txn.Commit(context.Background()))
  2813  	}
  2814  
  2815  	assert.NoError(t, tae.DeleteAll(true))
  2816  
  2817  	{
  2818  		txn, rel := tae.GetRelation()
  2819  		assert.NoError(t, rel.Append(context.Background(), bats[1]))
  2820  		assert.NoError(t, txn.Commit(context.Background()))
  2821  	}
  2822  
  2823  	{
  2824  		t.Log("************merge************")
  2825  
  2826  		txn, rel := tae.GetRelation()
  2827  
  2828  		objIt := rel.MakeObjectIt()
  2829  		obj := objIt.GetObject().GetMeta().(*catalog.ObjectEntry)
  2830  		objHandle, err := rel.GetObject(&obj.ID)
  2831  		assert.NoError(t, err)
  2832  
  2833  		objsToMerge := []*catalog.ObjectEntry{objHandle.GetMeta().(*catalog.ObjectEntry)}
  2834  		task, err := jobs.NewMergeObjectsTask(nil, txn, objsToMerge, tae.Runtime, 0)
  2835  		assert.NoError(t, err)
  2836  		err = task.OnExec(context.Background())
  2837  		assert.NoError(t, err)
  2838  
  2839  		{
  2840  			v := testutil.GetSingleSortKeyValue(bat, schema, 4)
  2841  			filter := handle.NewEQFilter(v)
  2842  			txn2, rel := tae.GetRelation()
  2843  			require.NoError(t, rel.DeleteByFilter(context.Background(), filter))
  2844  			assert.Nil(t, txn2.Commit(context.Background()))
  2845  		}
  2846  		err = txn.Commit(context.Background())
  2847  		assert.NoError(t, err)
  2848  	}
  2849  }
  2850  func TestDelete2(t *testing.T) {
  2851  	defer testutils.AfterTest(t)()
  2852  	testutils.EnsureNoLeak(t)
  2853  	ctx := context.Background()
  2854  
  2855  	opts := config.WithLongScanAndCKPOpts(nil)
  2856  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  2857  	defer tae.Close()
  2858  	schema := catalog.MockSchemaAll(18, 11)
  2859  	schema.BlockMaxRows = 10
  2860  	schema.ObjectMaxBlocks = 2
  2861  	tae.BindSchema(schema)
  2862  	bat := catalog.MockBatch(schema, 5)
  2863  	defer bat.Close()
  2864  	tae.CreateRelAndAppend(bat, true)
  2865  
  2866  	txn, rel := tae.GetRelation()
  2867  	v := testutil.GetSingleSortKeyValue(bat, schema, 2)
  2868  	filter := handle.NewEQFilter(v)
  2869  	err := rel.DeleteByFilter(context.Background(), filter)
  2870  	assert.NoError(t, err)
  2871  	assert.NoError(t, txn.Commit(context.Background()))
  2872  
  2873  	tae.CompactBlocks(false)
  2874  }
  2875  
  2876  func TestNull1(t *testing.T) {
  2877  	defer testutils.AfterTest(t)()
  2878  	testutils.EnsureNoLeak(t)
  2879  	ctx := context.Background()
  2880  
  2881  	opts := config.WithLongScanAndCKPOpts(nil)
  2882  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  2883  	defer tae.Close()
  2884  	schema := catalog.MockSchemaAll(18, 9)
  2885  	schema.BlockMaxRows = 10
  2886  	schema.ObjectMaxBlocks = 2
  2887  	tae.BindSchema(schema)
  2888  
  2889  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows*3+1))
  2890  	defer bat.Close()
  2891  	bats := bat.Split(4)
  2892  	bats[0].Vecs[3].Update(2, nil, true)
  2893  	tae.CreateRelAndAppend(bats[0], true)
  2894  
  2895  	txn, rel := tae.GetRelation()
  2896  	blk := testutil.GetOneObject(rel)
  2897  	view, err := blk.GetColumnDataById(context.Background(), 0, 3, common.DefaultAllocator)
  2898  	assert.NoError(t, err)
  2899  	defer view.Close()
  2900  	//v := view.GetData().Get(2)
  2901  	assert.True(t, view.GetData().IsNull(2))
  2902  	testutil.CheckAllColRowsByScan(t, rel, bats[0].Length(), false)
  2903  	assert.NoError(t, txn.Commit(context.Background()))
  2904  
  2905  	tae.Restart(ctx)
  2906  	txn, rel = tae.GetRelation()
  2907  	blk = testutil.GetOneObject(rel)
  2908  	view, err = blk.GetColumnDataById(context.Background(), 0, 3, common.DefaultAllocator)
  2909  	assert.NoError(t, err)
  2910  	defer view.Close()
  2911  	//v = view.GetData().Get(2)
  2912  	assert.True(t, view.GetData().IsNull(2))
  2913  	testutil.CheckAllColRowsByScan(t, rel, bats[0].Length(), false)
  2914  
  2915  	v := testutil.GetSingleSortKeyValue(bats[0], schema, 2)
  2916  	filter_2 := handle.NewEQFilter(v)
  2917  	_, uv0_2_isNull, err := rel.GetValueByFilter(context.Background(), filter_2, 3)
  2918  	assert.NoError(t, err)
  2919  	assert.True(t, uv0_2_isNull)
  2920  
  2921  	v0_4 := testutil.GetSingleSortKeyValue(bats[0], schema, 4)
  2922  	filter_4 := handle.NewEQFilter(v0_4)
  2923  	err = rel.UpdateByFilter(context.Background(), filter_4, 3, nil, true)
  2924  	assert.NoError(t, err)
  2925  	_, uv_isNull, err := rel.GetValueByFilter(context.Background(), filter_4, 3)
  2926  	assert.NoError(t, err)
  2927  	assert.True(t, uv_isNull)
  2928  	assert.NoError(t, txn.Commit(context.Background()))
  2929  
  2930  	txn, rel = tae.GetRelation()
  2931  	testutil.CheckAllColRowsByScan(t, rel, bats[0].Length(), true)
  2932  	_, uv_isNull, err = rel.GetValueByFilter(context.Background(), filter_4, 3)
  2933  	assert.NoError(t, err)
  2934  	assert.True(t, uv_isNull)
  2935  
  2936  	err = rel.Append(context.Background(), bats[1])
  2937  	assert.NoError(t, err)
  2938  	assert.NoError(t, txn.Commit(context.Background()))
  2939  
  2940  	tae.CompactBlocks(false)
  2941  	txn, rel = tae.GetRelation()
  2942  	testutil.CheckAllColRowsByScan(t, rel, testutil.LenOfBats(bats[:2]), false)
  2943  	_, uv_isNull, err = rel.GetValueByFilter(context.Background(), filter_4, 3)
  2944  	assert.NoError(t, err)
  2945  	assert.True(t, uv_isNull)
  2946  	assert.NoError(t, txn.Commit(context.Background()))
  2947  
  2948  	tae.Restart(ctx)
  2949  	txn, rel = tae.GetRelation()
  2950  	testutil.CheckAllColRowsByScan(t, rel, testutil.LenOfBats(bats[:2]), false)
  2951  	_, uv_isNull, err = rel.GetValueByFilter(context.Background(), filter_4, 3)
  2952  	assert.NoError(t, err)
  2953  	assert.True(t, uv_isNull)
  2954  
  2955  	v0_1 := testutil.GetSingleSortKeyValue(bats[0], schema, 1)
  2956  	filter0_1 := handle.NewEQFilter(v0_1)
  2957  	err = rel.UpdateByFilter(context.Background(), filter0_1, 12, nil, true)
  2958  	assert.NoError(t, err)
  2959  	_, uv0_1_isNull, err := rel.GetValueByFilter(context.Background(), filter0_1, 12)
  2960  	assert.NoError(t, err)
  2961  	assert.True(t, uv0_1_isNull)
  2962  	assert.NoError(t, txn.Commit(context.Background()))
  2963  
  2964  	txn, rel = tae.GetRelation()
  2965  	_, uv0_1_isNull, err = rel.GetValueByFilter(context.Background(), filter0_1, 12)
  2966  	assert.NoError(t, err)
  2967  	assert.True(t, uv0_1_isNull)
  2968  	err = rel.Append(context.Background(), bats[2])
  2969  	assert.NoError(t, err)
  2970  	assert.NoError(t, txn.Commit(context.Background()))
  2971  
  2972  	tae.CompactBlocks(false)
  2973  	tae.MergeBlocks(false)
  2974  
  2975  	txn, rel = tae.GetRelation()
  2976  	_, uv0_1_isNull, err = rel.GetValueByFilter(context.Background(), filter0_1, 12)
  2977  	assert.NoError(t, err)
  2978  	assert.True(t, uv0_1_isNull)
  2979  	_, uv0_2_isNull, err = rel.GetValueByFilter(context.Background(), filter_2, 3)
  2980  	assert.NoError(t, err)
  2981  	assert.True(t, uv0_2_isNull)
  2982  	assert.NoError(t, txn.Commit(context.Background()))
  2983  
  2984  	tae.Restart(ctx)
  2985  
  2986  	txn, rel = tae.GetRelation()
  2987  	_, uv0_1_isNull, err = rel.GetValueByFilter(context.Background(), filter0_1, 12)
  2988  	assert.NoError(t, err)
  2989  	assert.True(t, uv0_1_isNull)
  2990  	_, uv0_2_isNull, err = rel.GetValueByFilter(context.Background(), filter_2, 3)
  2991  	assert.NoError(t, err)
  2992  	assert.True(t, uv0_2_isNull)
  2993  	assert.NoError(t, txn.Commit(context.Background()))
  2994  }
  2995  
  2996  func TestTruncate(t *testing.T) {
  2997  	defer testutils.AfterTest(t)()
  2998  	testutils.EnsureNoLeak(t)
  2999  	ctx := context.Background()
  3000  
  3001  	opts := config.WithQuickScanAndCKPOpts(nil)
  3002  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  3003  	defer tae.Close()
  3004  	schema := catalog.MockSchemaAll(18, 15)
  3005  	schema.BlockMaxRows = 10
  3006  	schema.ObjectMaxBlocks = 2
  3007  	tae.BindSchema(schema)
  3008  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows*5+1))
  3009  	defer bat.Close()
  3010  	bats := bat.Split(20)
  3011  	tae.CreateRelAndAppend(bats[0], true)
  3012  
  3013  	var wg sync.WaitGroup
  3014  	p, _ := ants.NewPool(10)
  3015  	defer p.Release()
  3016  	tryAppend := func(i int) func() {
  3017  		return func() {
  3018  			defer wg.Done()
  3019  			tae.TryAppend(bats[1+i])
  3020  		}
  3021  	}
  3022  
  3023  	for i := range bats[1:] {
  3024  		if i == 10 {
  3025  			wg.Add(1)
  3026  			_ = p.Submit(func() {
  3027  				defer wg.Done()
  3028  				tae.Truncate()
  3029  				t.Log(tae.Catalog.SimplePPString(common.PPL1))
  3030  			})
  3031  		}
  3032  		wg.Add(1)
  3033  		_ = p.Submit(tryAppend(i))
  3034  		time.Sleep(time.Millisecond * 2)
  3035  	}
  3036  	wg.Wait()
  3037  	txn, _ := tae.GetRelation()
  3038  	assert.NoError(t, txn.Commit(context.Background()))
  3039  	tae.Truncate()
  3040  	txn, _ = tae.GetRelation()
  3041  	assert.NoError(t, txn.Commit(context.Background()))
  3042  }
  3043  
  3044  func TestGetColumnData(t *testing.T) {
  3045  	defer testutils.AfterTest(t)()
  3046  	testutils.EnsureNoLeak(t)
  3047  	ctx := context.Background()
  3048  
  3049  	opts := config.WithLongScanAndCKPOpts(nil)
  3050  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  3051  	defer tae.Close()
  3052  	schema := catalog.MockSchemaAll(18, 13)
  3053  	schema.BlockMaxRows = 10
  3054  	schema.ObjectMaxBlocks = 2
  3055  	tae.BindSchema(schema)
  3056  	bat := catalog.MockBatch(schema, 39)
  3057  	bats := bat.Split(4)
  3058  	defer bat.Close()
  3059  	tae.CreateRelAndAppend(bats[0], true)
  3060  	txn, rel := tae.GetRelation()
  3061  	blk := testutil.GetOneObject(rel)
  3062  	view, _ := blk.GetColumnDataById(context.Background(), 0, 2, common.DefaultAllocator)
  3063  	defer view.Close()
  3064  	assert.Equal(t, bats[0].Length(), view.Length())
  3065  	assert.NotZero(t, view.GetData().Allocated())
  3066  
  3067  	view, _ = blk.GetColumnDataById(context.Background(), 0, 2, common.DefaultAllocator)
  3068  	defer view.Close()
  3069  	assert.Equal(t, bats[0].Length(), view.Length())
  3070  	assert.NotZero(t, view.GetData().Allocated())
  3071  	assert.NoError(t, txn.Commit(context.Background()))
  3072  
  3073  	tae.CompactBlocks(false)
  3074  	txn, rel = tae.GetRelation()
  3075  	blk = testutil.GetOneObject(rel)
  3076  	view, _ = blk.GetColumnDataById(context.Background(), 0, 2, common.DefaultAllocator)
  3077  	defer view.Close()
  3078  	assert.Equal(t, bats[0].Length(), view.Length())
  3079  	assert.NotZero(t, view.GetData().Allocated())
  3080  
  3081  	view, _ = blk.GetColumnDataById(context.Background(), 0, 2, common.DefaultAllocator)
  3082  	defer view.Close()
  3083  	assert.Equal(t, bats[0].Length(), view.Length())
  3084  	assert.NotZero(t, view.GetData().Allocated())
  3085  	assert.NoError(t, txn.Commit(context.Background()))
  3086  
  3087  	txn, rel = tae.GetRelation()
  3088  	err := rel.Append(context.Background(), bats[1])
  3089  	assert.NoError(t, err)
  3090  	blk = testutil.GetOneObject(rel)
  3091  	view, err = blk.GetColumnDataById(context.Background(), 0, 2, common.DefaultAllocator)
  3092  	assert.NoError(t, err)
  3093  	defer view.Close()
  3094  	assert.True(t, view.GetData().Equals(bats[1].Vecs[2]))
  3095  	assert.NotZero(t, view.GetData().Allocated())
  3096  	view, err = blk.GetColumnDataById(context.Background(), 0, 2, common.DefaultAllocator)
  3097  	assert.NoError(t, err)
  3098  	defer view.Close()
  3099  	assert.True(t, view.GetData().Equals(bats[1].Vecs[2]))
  3100  	assert.NotZero(t, view.GetData().Allocated())
  3101  
  3102  	assert.NoError(t, txn.Commit(context.Background()))
  3103  }
  3104  
  3105  func TestCompactBlk1(t *testing.T) {
  3106  	defer testutils.AfterTest(t)()
  3107  	testutils.EnsureNoLeak(t)
  3108  	ctx := context.Background()
  3109  
  3110  	opts := config.WithLongScanAndCKPOpts(nil)
  3111  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  3112  	defer tae.Close()
  3113  	schema := catalog.MockSchemaAll(3, 1)
  3114  	schema.BlockMaxRows = 5
  3115  	schema.ObjectMaxBlocks = 2
  3116  	tae.BindSchema(schema)
  3117  	bat := catalog.MockBatch(schema, 5)
  3118  	bats := bat.Split(5)
  3119  	defer bat.Close()
  3120  
  3121  	tae.CreateRelAndAppend(bats[2], true)
  3122  
  3123  	txn, rel := tae.GetRelation()
  3124  	_ = rel.Append(context.Background(), bats[1])
  3125  	assert.Nil(t, txn.Commit(context.Background()))
  3126  
  3127  	txn, rel = tae.GetRelation()
  3128  	_ = rel.Append(context.Background(), bats[3])
  3129  	assert.Nil(t, txn.Commit(context.Background()))
  3130  
  3131  	txn, rel = tae.GetRelation()
  3132  	_ = rel.Append(context.Background(), bats[4])
  3133  	assert.Nil(t, txn.Commit(context.Background()))
  3134  
  3135  	txn, rel = tae.GetRelation()
  3136  	_ = rel.Append(context.Background(), bats[0])
  3137  	assert.Nil(t, txn.Commit(context.Background()))
  3138  
  3139  	{
  3140  		v := testutil.GetSingleSortKeyValue(bat, schema, 1)
  3141  		t.Logf("v is %v**********", v)
  3142  		filter := handle.NewEQFilter(v)
  3143  		txn2, rel := tae.GetRelation()
  3144  		t.Log("********before delete******************")
  3145  		testutil.CheckAllColRowsByScan(t, rel, 5, true)
  3146  		_ = rel.DeleteByFilter(context.Background(), filter)
  3147  		assert.Nil(t, txn2.Commit(context.Background()))
  3148  	}
  3149  
  3150  	_, rel = tae.GetRelation()
  3151  	testutil.CheckAllColRowsByScan(t, rel, 4, true)
  3152  
  3153  	{
  3154  		t.Log("************compact************")
  3155  		txn, rel = tae.GetRelation()
  3156  		it := rel.MakeObjectIt()
  3157  		blk := it.GetObject()
  3158  		meta := blk.GetMeta().(*catalog.ObjectEntry)
  3159  		task, err := jobs.NewFlushTableTailTask(nil, txn, []*catalog.ObjectEntry{meta}, tae.DB.Runtime, txn.GetStartTS())
  3160  		assert.NoError(t, err)
  3161  		err = task.OnExec(context.Background())
  3162  		assert.NoError(t, err)
  3163  
  3164  		{
  3165  			v := testutil.GetSingleSortKeyValue(bat, schema, 2)
  3166  			t.Logf("v is %v**********", v)
  3167  			filter := handle.NewEQFilter(v)
  3168  			txn2, rel := tae.GetRelation()
  3169  			t.Log("********before delete******************")
  3170  			testutil.CheckAllColRowsByScan(t, rel, 4, true)
  3171  			_ = rel.DeleteByFilter(context.Background(), filter)
  3172  			assert.Nil(t, txn2.Commit(context.Background()))
  3173  		}
  3174  
  3175  		err = txn.Commit(context.Background())
  3176  		assert.NoError(t, err)
  3177  	}
  3178  
  3179  	_, rel = tae.GetRelation()
  3180  	testutil.CheckAllColRowsByScan(t, rel, 3, true)
  3181  
  3182  	tae.Restart(ctx)
  3183  	_, rel = tae.GetRelation()
  3184  	testutil.CheckAllColRowsByScan(t, rel, 3, true)
  3185  }
  3186  
  3187  func TestCompactBlk2(t *testing.T) {
  3188  	defer testutils.AfterTest(t)()
  3189  	testutils.EnsureNoLeak(t)
  3190  	ctx := context.Background()
  3191  
  3192  	opts := config.WithLongScanAndCKPOpts(nil)
  3193  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  3194  	defer tae.Close()
  3195  	schema := catalog.MockSchemaAll(3, 1)
  3196  	schema.BlockMaxRows = 5
  3197  	schema.ObjectMaxBlocks = 2
  3198  	tae.BindSchema(schema)
  3199  	bat := catalog.MockBatch(schema, 5)
  3200  	bats := bat.Split(5)
  3201  	defer bat.Close()
  3202  
  3203  	tae.CreateRelAndAppend(bats[2], true)
  3204  
  3205  	txn, rel := tae.GetRelation()
  3206  	_ = rel.Append(context.Background(), bats[1])
  3207  	assert.Nil(t, txn.Commit(context.Background()))
  3208  
  3209  	txn, rel = tae.GetRelation()
  3210  	_ = rel.Append(context.Background(), bats[3])
  3211  	assert.Nil(t, txn.Commit(context.Background()))
  3212  
  3213  	txn, rel = tae.GetRelation()
  3214  	_ = rel.Append(context.Background(), bats[4])
  3215  	assert.Nil(t, txn.Commit(context.Background()))
  3216  
  3217  	txn, rel = tae.GetRelation()
  3218  	_ = rel.Append(context.Background(), bats[0])
  3219  	assert.Nil(t, txn.Commit(context.Background()))
  3220  
  3221  	v := testutil.GetSingleSortKeyValue(bat, schema, 1)
  3222  	filter := handle.NewEQFilter(v)
  3223  	txn2, rel1 := tae.GetRelation()
  3224  	testutil.CheckAllColRowsByScan(t, rel1, 5, true)
  3225  	_ = rel1.DeleteByFilter(context.Background(), filter)
  3226  	assert.Nil(t, txn2.Commit(context.Background()))
  3227  
  3228  	txn4, rel2 := tae.GetRelation()
  3229  	testutil.CheckAllColRowsByScan(t, rel2, 4, true)
  3230  
  3231  	txn, rel = tae.GetRelation()
  3232  	it := rel.MakeObjectIt()
  3233  	blk := it.GetObject()
  3234  	meta := blk.GetMeta().(*catalog.ObjectEntry)
  3235  	task, err := jobs.NewFlushTableTailTask(nil, txn, []*catalog.ObjectEntry{meta}, tae.DB.Runtime, types.TS{})
  3236  	assert.NoError(t, err)
  3237  	err = task.OnExec(context.Background())
  3238  	assert.NoError(t, err)
  3239  	err = txn.Commit(context.Background())
  3240  	assert.NoError(t, err)
  3241  
  3242  	v = testutil.GetSingleSortKeyValue(bat, schema, 2)
  3243  	filter = handle.NewEQFilter(v)
  3244  	txn2, rel3 := tae.GetRelation()
  3245  	testutil.CheckAllColRowsByScan(t, rel3, 4, true)
  3246  	_ = rel3.DeleteByFilter(context.Background(), filter)
  3247  	assert.Nil(t, txn2.Commit(context.Background()))
  3248  
  3249  	v = testutil.GetSingleSortKeyValue(bat, schema, 4)
  3250  	filter = handle.NewEQFilter(v)
  3251  	txn2, rel4 := tae.GetRelation()
  3252  	testutil.CheckAllColRowsByScan(t, rel4, 3, true)
  3253  	_ = rel4.DeleteByFilter(context.Background(), filter)
  3254  	assert.Nil(t, txn2.Commit(context.Background()))
  3255  
  3256  	testutil.CheckAllColRowsByScan(t, rel1, 5, true)
  3257  	testutil.CheckAllColRowsByScan(t, rel2, 4, true)
  3258  	assert.Nil(t, txn4.Commit(context.Background()))
  3259  
  3260  	_, rel = tae.GetRelation()
  3261  	testutil.CheckAllColRowsByScan(t, rel, 2, true)
  3262  
  3263  	v = testutil.GetSingleSortKeyValue(bat, schema, 2)
  3264  	filter = handle.NewEQFilter(v)
  3265  	_, _, err = rel.GetByFilter(context.Background(), filter)
  3266  	assert.NotNil(t, err)
  3267  
  3268  	v = testutil.GetSingleSortKeyValue(bat, schema, 4)
  3269  	filter = handle.NewEQFilter(v)
  3270  	_, _, err = rel.GetByFilter(context.Background(), filter)
  3271  	assert.NotNil(t, err)
  3272  
  3273  	tae.Restart(ctx)
  3274  }
  3275  
  3276  func TestCompactblk3(t *testing.T) {
  3277  	defer testutils.AfterTest(t)()
  3278  	testutils.EnsureNoLeak(t)
  3279  	ctx := context.Background()
  3280  
  3281  	opts := config.WithLongScanAndCKPOpts(nil)
  3282  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  3283  	defer tae.Close()
  3284  	schema := catalog.MockSchemaAll(3, 1)
  3285  	schema.BlockMaxRows = 5
  3286  	schema.ObjectMaxBlocks = 2
  3287  	tae.BindSchema(schema)
  3288  	bat := catalog.MockBatch(schema, 3)
  3289  	defer bat.Close()
  3290  
  3291  	tae.CreateRelAndAppend(bat, true)
  3292  
  3293  	v := testutil.GetSingleSortKeyValue(bat, schema, 1)
  3294  	filter := handle.NewEQFilter(v)
  3295  	txn2, rel1 := tae.GetRelation()
  3296  	testutil.CheckAllColRowsByScan(t, rel1, 3, true)
  3297  	_ = rel1.DeleteByFilter(context.Background(), filter)
  3298  	assert.Nil(t, txn2.Commit(context.Background()))
  3299  
  3300  	_, rel2 := tae.GetRelation()
  3301  	testutil.CheckAllColRowsByScan(t, rel2, 2, true)
  3302  
  3303  	txn, rel := tae.GetRelation()
  3304  	it := rel.MakeObjectIt()
  3305  	blk := it.GetObject()
  3306  	meta := blk.GetMeta().(*catalog.ObjectEntry)
  3307  	task, err := jobs.NewFlushTableTailTask(nil, txn, []*catalog.ObjectEntry{meta}, tae.DB.Runtime, txn.GetStartTS())
  3308  	assert.NoError(t, err)
  3309  	err = task.OnExec(context.Background())
  3310  	assert.NoError(t, err)
  3311  	err = txn.Commit(context.Background())
  3312  	assert.NoError(t, err)
  3313  
  3314  	txn, err = tae.StartTxn(nil)
  3315  	assert.NoError(t, err)
  3316  	processor := &catalog.LoopProcessor{}
  3317  	processor.ObjectFn = func(be *catalog.ObjectEntry) error {
  3318  		if be.GetTable().GetDB().IsSystemDB() {
  3319  			return nil
  3320  		}
  3321  		for j := 0; j < be.BlockCnt(); j++ {
  3322  			view, err := be.GetObjectData().GetColumnDataById(context.Background(), txn, schema, uint16(j), 0, common.DefaultAllocator)
  3323  			assert.NoError(t, err)
  3324  			view.ApplyDeletes()
  3325  			assert.Equal(t, 2, view.Length())
  3326  		}
  3327  		return nil
  3328  	}
  3329  	err = tae.Catalog.RecurLoop(processor)
  3330  	assert.NoError(t, err)
  3331  }
  3332  
  3333  func TestImmutableIndexInAblk(t *testing.T) {
  3334  	defer testutils.AfterTest(t)()
  3335  	testutils.EnsureNoLeak(t)
  3336  	ctx := context.Background()
  3337  
  3338  	opts := config.WithLongScanAndCKPOpts(nil)
  3339  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  3340  	defer tae.Close()
  3341  	schema := catalog.MockSchemaAll(3, 1)
  3342  	schema.BlockMaxRows = 5
  3343  	schema.ObjectMaxBlocks = 2
  3344  	tae.BindSchema(schema)
  3345  	bat := catalog.MockBatch(schema, 5)
  3346  	bats := bat.Split(5)
  3347  	defer bat.Close()
  3348  
  3349  	tae.CreateRelAndAppend(bats[2], true)
  3350  	txn, rel := tae.GetRelation()
  3351  	_ = rel.Append(context.Background(), bats[1])
  3352  	assert.Nil(t, txn.Commit(context.Background()))
  3353  	txn, rel = tae.GetRelation()
  3354  	_ = rel.Append(context.Background(), bats[3])
  3355  	assert.Nil(t, txn.Commit(context.Background()))
  3356  	txn, rel = tae.GetRelation()
  3357  	_ = rel.Append(context.Background(), bats[4])
  3358  	assert.Nil(t, txn.Commit(context.Background()))
  3359  	txn, rel = tae.GetRelation()
  3360  	_ = rel.Append(context.Background(), bats[0])
  3361  	assert.Nil(t, txn.Commit(context.Background()))
  3362  
  3363  	v := testutil.GetSingleSortKeyValue(bat, schema, 1)
  3364  	filter := handle.NewEQFilter(v)
  3365  	txn2, rel := tae.GetRelation()
  3366  	_ = rel.DeleteByFilter(context.Background(), filter)
  3367  	assert.Nil(t, txn2.Commit(context.Background()))
  3368  
  3369  	txn, rel = tae.GetRelation()
  3370  	it := rel.MakeObjectIt()
  3371  	blk := it.GetObject()
  3372  	meta := blk.GetMeta().(*catalog.ObjectEntry)
  3373  	task, err := jobs.NewFlushTableTailTask(nil, txn, []*catalog.ObjectEntry{meta}, tae.DB.Runtime, txn.GetStartTS())
  3374  	assert.NoError(t, err)
  3375  	err = task.OnExec(context.Background())
  3376  	assert.NoError(t, err)
  3377  	err = txn.Commit(context.Background())
  3378  	assert.NoError(t, err)
  3379  
  3380  	txn, _ = tae.GetRelation()
  3381  	_, _, err = meta.GetObjectData().GetByFilter(context.Background(), txn, filter, common.DefaultAllocator)
  3382  	assert.Error(t, err)
  3383  	v = testutil.GetSingleSortKeyValue(bat, schema, 2)
  3384  	filter = handle.NewEQFilter(v)
  3385  	_, _, err = meta.GetObjectData().GetByFilter(context.Background(), txn, filter, common.DefaultAllocator)
  3386  	assert.NoError(t, err)
  3387  
  3388  	err = meta.GetObjectData().BatchDedup(
  3389  		context.Background(), txn, bat.Vecs[1], nil, nil, false, objectio.BloomFilter{}, common.DefaultAllocator,
  3390  	)
  3391  	assert.Error(t, err)
  3392  }
  3393  
  3394  func TestDelete3(t *testing.T) {
  3395  	// t.Skip(any("This case crashes occasionally, is being fixed, skip it for now"))
  3396  	defer testutils.AfterTest(t)()
  3397  	ctx := context.Background()
  3398  
  3399  	opts := config.WithQuickScanAndCKPOpts(nil)
  3400  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  3401  	defer tae.Close()
  3402  
  3403  	// this task won't affect logic of TestAppend2, it just prints logs about dirty count
  3404  	forest := logtail.NewDirtyCollector(tae.LogtailMgr, opts.Clock, tae.Catalog, new(catalog.LoopProcessor))
  3405  	hb := ops.NewHeartBeaterWithFunc(5*time.Millisecond, func() {
  3406  		forest.Run(0)
  3407  		t.Log(forest.String())
  3408  	}, nil)
  3409  	hb.Start()
  3410  	defer hb.Stop()
  3411  	schema := catalog.MockSchemaAll(3, 2)
  3412  	schema.BlockMaxRows = 10
  3413  	schema.ObjectMaxBlocks = 2
  3414  	tae.BindSchema(schema)
  3415  	// rows := int(schema.BlockMaxRows * 1)
  3416  	rows := int(schema.BlockMaxRows*3) + 1
  3417  	bat := catalog.MockBatch(schema, rows)
  3418  
  3419  	tae.CreateRelAndAppend(bat, true)
  3420  	tae.CheckRowsByScan(rows, false)
  3421  	deleted := false
  3422  	for i := 0; i < 10; i++ {
  3423  		if deleted {
  3424  			tae.CheckRowsByScan(0, true)
  3425  			tae.DoAppend(bat)
  3426  			deleted = false
  3427  			tae.CheckRowsByScan(rows, true)
  3428  		} else {
  3429  			tae.CheckRowsByScan(rows, true)
  3430  			err := tae.DeleteAll(true)
  3431  			if err == nil {
  3432  				deleted = true
  3433  				tae.CheckRowsByScan(0, true)
  3434  				// assert.Zero(t, tae.getRows())
  3435  			} else {
  3436  				tae.CheckRowsByScan(rows, true)
  3437  				// assert.Equal(t, tae.getRows(), rows)
  3438  			}
  3439  		}
  3440  	}
  3441  	t.Logf(tae.Catalog.SimplePPString(common.PPL1))
  3442  }
  3443  
  3444  func TestDropCreated1(t *testing.T) {
  3445  	defer testutils.AfterTest(t)()
  3446  	ctx := context.Background()
  3447  
  3448  	opts := config.WithLongScanAndCKPOpts(nil)
  3449  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  3450  	defer tae.Close()
  3451  
  3452  	txn, err := tae.StartTxn(nil)
  3453  	assert.Nil(t, err)
  3454  	_, err = txn.CreateDatabase("db", "", "")
  3455  	assert.Nil(t, err)
  3456  	db, err := txn.DropDatabase("db")
  3457  	assert.Nil(t, err)
  3458  	assert.Nil(t, txn.Commit(context.Background()))
  3459  
  3460  	assert.Equal(t, txn.GetCommitTS(), db.GetMeta().(*catalog.DBEntry).GetCreatedAtLocked())
  3461  	assert.Equal(t, txn.GetCommitTS(), db.GetMeta().(*catalog.DBEntry).GetCreatedAtLocked())
  3462  
  3463  	tae.Restart(ctx)
  3464  }
  3465  
  3466  func TestDropCreated2(t *testing.T) {
  3467  	defer testutils.AfterTest(t)()
  3468  	ctx := context.Background()
  3469  
  3470  	opts := config.WithLongScanAndCKPOpts(nil)
  3471  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  3472  	schema := catalog.MockSchemaAll(1, -1)
  3473  	defer tae.Close()
  3474  
  3475  	txn, err := tae.StartTxn(nil)
  3476  	assert.Nil(t, err)
  3477  	db, err := txn.CreateDatabase("db", "", "")
  3478  	assert.Nil(t, err)
  3479  	rel, err := db.CreateRelation(schema)
  3480  	assert.Nil(t, err)
  3481  	_, err = db.DropRelationByName(schema.Name)
  3482  	assert.Nil(t, err)
  3483  	assert.Nil(t, txn.Commit(context.Background()))
  3484  
  3485  	assert.Equal(t, txn.GetCommitTS(), rel.GetMeta().(*catalog.TableEntry).GetCreatedAtLocked())
  3486  	assert.Equal(t, txn.GetCommitTS(), rel.GetMeta().(*catalog.TableEntry).GetCreatedAtLocked())
  3487  
  3488  	tae.Restart(ctx)
  3489  }
  3490  
  3491  func TestDropCreated3(t *testing.T) {
  3492  	defer testutils.AfterTest(t)()
  3493  	ctx := context.Background()
  3494  
  3495  	opts := config.WithLongScanAndCKPOpts(nil)
  3496  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  3497  	defer tae.Close()
  3498  
  3499  	txn, err := tae.StartTxn(nil)
  3500  	assert.Nil(t, err)
  3501  	_, err = txn.CreateDatabase("db", "", "")
  3502  	assert.Nil(t, err)
  3503  	_, err = txn.DropDatabase("db")
  3504  	assert.Nil(t, err)
  3505  	assert.Nil(t, txn.Commit(context.Background()))
  3506  
  3507  	err = tae.BGCheckpointRunner.ForceIncrementalCheckpoint(tae.TxnMgr.Now(), false)
  3508  	assert.Nil(t, err)
  3509  
  3510  	tae.Restart(ctx)
  3511  }
  3512  
  3513  func TestDropCreated4(t *testing.T) {
  3514  	defer testutils.AfterTest(t)()
  3515  	ctx := context.Background()
  3516  
  3517  	opts := config.WithLongScanAndCKPOpts(nil)
  3518  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  3519  	schema := catalog.MockSchemaAll(1, -1)
  3520  	defer tae.Close()
  3521  
  3522  	txn, err := tae.StartTxn(nil)
  3523  	assert.Nil(t, err)
  3524  	db, err := txn.CreateDatabase("db", "", "")
  3525  	assert.Nil(t, err)
  3526  	_, err = db.CreateRelation(schema)
  3527  	assert.Nil(t, err)
  3528  	_, err = db.DropRelationByName(schema.Name)
  3529  	assert.Nil(t, err)
  3530  	assert.Nil(t, txn.Commit(context.Background()))
  3531  
  3532  	err = tae.BGCheckpointRunner.ForceIncrementalCheckpoint(tae.TxnMgr.Now(), false)
  3533  	assert.Nil(t, err)
  3534  
  3535  	tae.Restart(ctx)
  3536  }
  3537  
  3538  func TestTruncateZonemap(t *testing.T) {
  3539  	defer testutils.AfterTest(t)()
  3540  	ctx := context.Background()
  3541  
  3542  	type Mod struct {
  3543  		offset int
  3544  		v      byte
  3545  	}
  3546  	mockBytes := func(init byte, size int, mods ...Mod) []byte {
  3547  		ret := make([]byte, size)
  3548  		for i := 0; i < size; i++ {
  3549  			ret[i] = init
  3550  		}
  3551  		for _, m := range mods {
  3552  			ret[m.offset] = m.v
  3553  		}
  3554  		return ret
  3555  	}
  3556  	testutils.EnsureNoLeak(t)
  3557  	opts := config.WithLongScanAndCKPOpts(nil)
  3558  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  3559  	defer tae.Close()
  3560  
  3561  	schema := catalog.MockSchemaAll(13, 12) // set varchar PK
  3562  	schema.BlockMaxRows = 10
  3563  	schema.ObjectMaxBlocks = 2
  3564  	tae.BindSchema(schema)
  3565  
  3566  	bat := catalog.MockBatch(schema, int(schema.BlockMaxRows*2+9))        // 2.9 blocks
  3567  	minv := mockBytes(0, 35)                                              // 0x00000000
  3568  	trickyMinv := mockBytes(0, 33)                                        // smaller than minv, not in mut index but in immut index
  3569  	maxv := mockBytes(0xff, 35, Mod{0, 0x61}, Mod{1, 0x62}, Mod{2, 0x63}) // abc0xff0xff...
  3570  	trickyMaxv := []byte("abd")                                           // bigger than maxv, not in mut index but in immut index
  3571  	bat.Vecs[12].Update(8, maxv, false)
  3572  	bat.Vecs[12].Update(11, minv, false)
  3573  	bat.Vecs[12].Update(22, []byte("abcc"), false)
  3574  	defer bat.Close()
  3575  
  3576  	checkMinMax := func(rel handle.Relation, minvOffset, maxvOffset uint32) {
  3577  		_, _, err := rel.GetByFilter(context.Background(), handle.NewEQFilter(trickyMinv))
  3578  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  3579  		_, _, err = rel.GetByFilter(context.Background(), handle.NewEQFilter(trickyMaxv))
  3580  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  3581  		_, row, err := rel.GetByFilter(context.Background(), handle.NewEQFilter(minv))
  3582  		assert.NoError(t, err)
  3583  		assert.Equal(t, minvOffset, row)
  3584  		_, row, err = rel.GetByFilter(context.Background(), handle.NewEQFilter(maxv))
  3585  		assert.NoError(t, err)
  3586  		assert.Equal(t, maxvOffset, row)
  3587  	}
  3588  
  3589  	tae.CreateRelAndAppend(bat, true)
  3590  
  3591  	// runtime check
  3592  	txn, rel := tae.GetRelation()
  3593  	checkMinMax(rel, 1, 8)
  3594  	assert.NoError(t, txn.Commit(context.Background()))
  3595  
  3596  	// restart without compact
  3597  	tae.Restart(ctx)
  3598  	txn, rel = tae.GetRelation()
  3599  	checkMinMax(rel, 1, 8)
  3600  	assert.NoError(t, txn.Commit(context.Background()))
  3601  
  3602  	// restart with compact
  3603  	tae.CompactBlocks(false)
  3604  	tae.MergeBlocks(false)
  3605  	tae.Restart(ctx)
  3606  	txn, rel = tae.GetRelation()
  3607  	checkMinMax(rel, 0, 8)
  3608  	assert.NoError(t, txn.Commit(context.Background()))
  3609  
  3610  	// 3 NonAppendable Blocks
  3611  	txn, rel = tae.GetRelation()
  3612  	rel.UpdateByFilter(context.Background(), handle.NewEQFilter(maxv), 12, mockBytes(0xff, 35), false)
  3613  	assert.NoError(t, txn.Commit(context.Background()))
  3614  	tae.CompactBlocks(false)
  3615  	tae.MergeBlocks(false)
  3616  	tae.Restart(ctx)
  3617  
  3618  	txn, rel = tae.GetRelation()
  3619  	_, row, err := rel.GetByFilter(context.Background(), handle.NewEQFilter(mockBytes(0xff, 35)))
  3620  	assert.NoError(t, err)
  3621  	assert.Equal(t, uint32(8), row)
  3622  	assert.NoError(t, txn.Commit(context.Background()))
  3623  }
  3624  
  3625  func mustStartTxn(t *testing.T, tae *testutil.TestEngine, tenantID uint32) txnif.AsyncTxn {
  3626  	txn, err := tae.StartTxn(nil)
  3627  	assert.NoError(t, err)
  3628  	txn.BindAccessInfo(tenantID, 0, 0)
  3629  	return txn
  3630  }
  3631  
  3632  func TestMultiTenantDBOps(t *testing.T) {
  3633  	defer testutils.AfterTest(t)()
  3634  	ctx := context.Background()
  3635  
  3636  	var err error
  3637  	opts := config.WithLongScanAndCKPOpts(nil)
  3638  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  3639  	defer tae.Close()
  3640  
  3641  	txn11 := mustStartTxn(t, tae, 1)
  3642  	_, err = txn11.CreateDatabase("db", "", "")
  3643  	assert.NoError(t, err)
  3644  	txn12 := mustStartTxn(t, tae, 1)
  3645  	_, err = txn11.CreateDatabase("db", "", "")
  3646  	assert.Error(t, err)
  3647  
  3648  	txn21 := mustStartTxn(t, tae, 2)
  3649  	_, err = txn21.CreateDatabase("db", "", "")
  3650  	assert.NoError(t, err)
  3651  
  3652  	assert.NoError(t, txn11.Commit(context.Background()))
  3653  	assert.NoError(t, txn12.Commit(context.Background()))
  3654  	assert.NoError(t, txn21.Commit(context.Background()))
  3655  
  3656  	txn22 := mustStartTxn(t, tae, 2)
  3657  	_, _ = txn22.CreateDatabase("db2", "", "")
  3658  
  3659  	txn23 := mustStartTxn(t, tae, 2)
  3660  	// [mo_catalog, db]
  3661  	assert.Equal(t, 2, len(txn23.DatabaseNames()))
  3662  	assert.NoError(t, txn23.Commit(context.Background()))
  3663  
  3664  	txn22.Commit(context.Background())
  3665  	tae.Restart(ctx)
  3666  
  3667  	txn24 := mustStartTxn(t, tae, 2)
  3668  	// [mo_catalog, db, db2]
  3669  	assert.Equal(t, 3, len(txn24.DatabaseNames()))
  3670  	assert.NoError(t, txn24.Commit(context.Background()))
  3671  
  3672  	txn13 := mustStartTxn(t, tae, 1)
  3673  	// [mo_catalog, db]
  3674  	assert.Equal(t, 2, len(txn13.DatabaseNames()))
  3675  
  3676  	_, err = txn13.GetDatabase("db2")
  3677  	assert.Error(t, err)
  3678  	dbHdl, err := txn13.GetDatabase("db")
  3679  	assert.NoError(t, err)
  3680  	assert.Equal(t, uint32(1), dbHdl.GetMeta().(*catalog.DBEntry).GetTenantID())
  3681  
  3682  	_, err = txn13.DropDatabase("db2")
  3683  	assert.Error(t, err)
  3684  	_, err = txn13.DropDatabase("db")
  3685  	assert.NoError(t, err)
  3686  	assert.NoError(t, txn13.Commit(context.Background()))
  3687  
  3688  	txn14 := mustStartTxn(t, tae, 1)
  3689  	// [mo_catalog]
  3690  	assert.Equal(t, 1, len(txn14.DatabaseNames()))
  3691  	assert.NoError(t, txn14.Commit(context.Background()))
  3692  }
  3693  
  3694  func TestMultiTenantMoCatalogOps(t *testing.T) {
  3695  	defer testutils.AfterTest(t)()
  3696  	ctx := context.Background()
  3697  
  3698  	var err error
  3699  	opts := config.WithLongScanAndCKPOpts(nil)
  3700  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  3701  	defer tae.Close()
  3702  
  3703  	s := catalog.MockSchemaAll(1, 0)
  3704  	s.Name = "mo_accounts"
  3705  	txn0, sysDB := tae.GetDB(pkgcatalog.MO_CATALOG)
  3706  	_, err = sysDB.CreateRelation(s)
  3707  	assert.NoError(t, err)
  3708  	assert.NoError(t, txn0.Commit(context.Background()))
  3709  
  3710  	schema11 := catalog.MockSchemaAll(3, 0)
  3711  	schema11.BlockMaxRows = 10
  3712  	schema11.ObjectMaxBlocks = 2
  3713  	tae.BindSchema(schema11)
  3714  	tae.BindTenantID(1)
  3715  
  3716  	bat1 := catalog.MockBatch(schema11, int(schema11.BlockMaxRows*2+9))
  3717  	tae.CreateRelAndAppend(bat1, true)
  3718  	// pretend 'mo_users'
  3719  	s = catalog.MockSchemaAll(1, 0)
  3720  	s.Name = "mo_users"
  3721  	txn11, sysDB := tae.GetDB(pkgcatalog.MO_CATALOG)
  3722  	_, err = sysDB.CreateRelation(s)
  3723  	assert.NoError(t, err)
  3724  	assert.NoError(t, txn11.Commit(context.Background()))
  3725  
  3726  	tae.CompactBlocks(false)
  3727  	tae.MergeBlocks(false)
  3728  
  3729  	schema21 := catalog.MockSchemaAll(2, 1)
  3730  	schema21.BlockMaxRows = 10
  3731  	schema21.ObjectMaxBlocks = 2
  3732  	tae.BindSchema(schema21)
  3733  	tae.BindTenantID(2)
  3734  
  3735  	bat2 := catalog.MockBatch(schema21, int(schema21.BlockMaxRows*3+5))
  3736  	tae.CreateRelAndAppend(bat2, true)
  3737  	txn21, sysDB := tae.GetDB(pkgcatalog.MO_CATALOG)
  3738  	s = catalog.MockSchemaAll(1, 0)
  3739  	s.Name = "mo_users"
  3740  	_, err = sysDB.CreateRelation(s)
  3741  	assert.NoError(t, err)
  3742  	assert.NoError(t, txn21.Commit(context.Background()))
  3743  
  3744  	tae.CompactBlocks(false)
  3745  	tae.MergeBlocks(false)
  3746  
  3747  	tae.Restart(ctx)
  3748  
  3749  	reservedColumnsCnt := len(catalog.SystemDBSchema.ColDefs) +
  3750  		len(catalog.SystemColumnSchema.ColDefs) +
  3751  		len(catalog.SystemTableSchema.ColDefs)
  3752  	{
  3753  		// account 2
  3754  		// check data for good
  3755  		_, tbl := tae.GetRelation()
  3756  		testutil.CheckAllColRowsByScan(t, tbl, 35, false)
  3757  		// [mo_catalog, db]
  3758  		assert.Equal(t, 2, len(mustStartTxn(t, tae, 2).DatabaseNames()))
  3759  		_, sysDB = tae.GetDB(pkgcatalog.MO_CATALOG)
  3760  		sysDB.Relations()
  3761  		sysDBTbl, _ := sysDB.GetRelationByName(pkgcatalog.MO_DATABASE)
  3762  		// [mo_catalog, db]
  3763  		testutil.CheckAllColRowsByScan(t, sysDBTbl, 2, true)
  3764  		sysTblTbl, _ := sysDB.GetRelationByName(pkgcatalog.MO_TABLES)
  3765  		// [mo_database, mo_tables, mo_columns, 'mo_users_t2' 'test-table-a-timestamp']
  3766  		testutil.CheckAllColRowsByScan(t, sysTblTbl, 5, true)
  3767  		sysColTbl, _ := sysDB.GetRelationByName(pkgcatalog.MO_COLUMNS)
  3768  		// [mo_database(8), mo_tables(13), mo_columns(19), 'mo_users_t2'(1+1), 'test-table-a-timestamp'(2+1)]
  3769  		testutil.CheckAllColRowsByScan(t, sysColTbl, reservedColumnsCnt+5, true)
  3770  	}
  3771  	{
  3772  		// account 1
  3773  		tae.BindSchema(schema11)
  3774  		tae.BindTenantID(1)
  3775  		// check data for good
  3776  		_, tbl := tae.GetRelation()
  3777  		testutil.CheckAllColRowsByScan(t, tbl, 29, false)
  3778  		// [mo_catalog, db]
  3779  		assert.Equal(t, 2, len(mustStartTxn(t, tae, 1).DatabaseNames()))
  3780  		_, sysDB = tae.GetDB(pkgcatalog.MO_CATALOG)
  3781  		sysDB.Relations()
  3782  		sysDBTbl, _ := sysDB.GetRelationByName(pkgcatalog.MO_DATABASE)
  3783  		// [mo_catalog, db]
  3784  		testutil.CheckAllColRowsByScan(t, sysDBTbl, 2, true)
  3785  		sysTblTbl, _ := sysDB.GetRelationByName(pkgcatalog.MO_TABLES)
  3786  		// [mo_database, mo_tables, mo_columns, 'mo_users_t1' 'test-table-a-timestamp']
  3787  		testutil.CheckAllColRowsByScan(t, sysTblTbl, 5, true)
  3788  		sysColTbl, _ := sysDB.GetRelationByName(pkgcatalog.MO_COLUMNS)
  3789  		// [mo_database(8), mo_tables(13), mo_columns(19), 'mo_users_t1'(1+1), 'test-table-a-timestamp'(3+1)]
  3790  		testutil.CheckAllColRowsByScan(t, sysColTbl, reservedColumnsCnt+6, true)
  3791  	}
  3792  	{
  3793  		// sys account
  3794  		tae.BindSchema(nil)
  3795  		tae.BindTenantID(0)
  3796  		// [mo_catalog]
  3797  		assert.Equal(t, 1, len(mustStartTxn(t, tae, 0).DatabaseNames()))
  3798  		_, sysDB = tae.GetDB(pkgcatalog.MO_CATALOG)
  3799  		sysDB.Relations()
  3800  		sysDBTbl, _ := sysDB.GetRelationByName(pkgcatalog.MO_DATABASE)
  3801  		// [mo_catalog]
  3802  		testutil.CheckAllColRowsByScan(t, sysDBTbl, 1, true)
  3803  		sysTblTbl, _ := sysDB.GetRelationByName(pkgcatalog.MO_TABLES)
  3804  		// [mo_database, mo_tables, mo_columns, 'mo_accounts']
  3805  		testutil.CheckAllColRowsByScan(t, sysTblTbl, 4, true)
  3806  		sysColTbl, _ := sysDB.GetRelationByName(pkgcatalog.MO_COLUMNS)
  3807  		// [mo_database(8), mo_tables(13), mo_columns(19), 'mo_accounts'(1+1)]
  3808  		testutil.CheckAllColRowsByScan(t, sysColTbl, reservedColumnsCnt+2, true)
  3809  	}
  3810  
  3811  }
  3812  
  3813  type dummyCpkGetter struct{}
  3814  
  3815  func (c *dummyCpkGetter) CollectCheckpointsInRange(ctx context.Context, start, end types.TS) (ckpLoc string, lastEnd types.TS, err error) {
  3816  	return "", types.TS{}, nil
  3817  }
  3818  
  3819  func (c *dummyCpkGetter) FlushTable(ctx context.Context, dbID, tableID uint64, ts types.TS) error {
  3820  	return nil
  3821  }
  3822  
  3823  func tots(ts types.TS) *timestamp.Timestamp {
  3824  	t := ts.ToTimestamp()
  3825  	return &t
  3826  }
  3827  
  3828  func TestLogtailBasic(t *testing.T) {
  3829  	defer testutils.AfterTest(t)()
  3830  	ctx := context.Background()
  3831  
  3832  	opts := config.WithLongScanAndCKPOpts(nil)
  3833  	opts.LogtailCfg = &options.LogtailCfg{PageSize: 30}
  3834  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  3835  	logMgr := tae.LogtailMgr
  3836  	defer tae.Close()
  3837  
  3838  	// at first, we can see nothing
  3839  	minTs, maxTs := types.BuildTS(0, 0), types.BuildTS(1000, 1000)
  3840  	reader := logMgr.GetReader(minTs, maxTs)
  3841  	require.False(t, reader.HasCatalogChanges())
  3842  	require.Equal(t, 0, len(reader.GetDirtyByTable(1000, 1000).Objs))
  3843  
  3844  	schema := catalog.MockSchemaAll(2, -1)
  3845  	schema.Name = "test"
  3846  	schema.BlockMaxRows = 10
  3847  	// craete 2 db and 2 tables
  3848  	txn, _ := tae.StartTxn(nil)
  3849  	todropdb, _ := txn.CreateDatabase("todrop", "", "")
  3850  	todropdb.CreateRelation(schema)
  3851  	db, _ := txn.CreateDatabase("db", "", "")
  3852  	tbl, _ := db.CreateRelation(schema)
  3853  	dbID := db.GetID()
  3854  	tableID := tbl.ID()
  3855  	txn.Commit(context.Background())
  3856  	catalogWriteTs := txn.GetPrepareTS()
  3857  
  3858  	// drop the first db
  3859  	txn2, _ := tae.StartTxn(nil)
  3860  	txn2.DropDatabase("todrop")
  3861  	txn2.Commit(context.Background())
  3862  	catalogDropTs := txn2.GetPrepareTS()
  3863  
  3864  	writeTs := make([]types.TS, 0, 120)
  3865  	deleteRowIDs := make([]types.Rowid, 0, 10)
  3866  
  3867  	wg := new(sync.WaitGroup)
  3868  	wg.Add(1)
  3869  	go func() {
  3870  		// insert 100 rows
  3871  		for i := 0; i < 100; i++ {
  3872  			txn, _ := tae.StartTxn(nil)
  3873  			db, _ := txn.GetDatabase("db")
  3874  			tbl, _ := db.GetRelationByName("test")
  3875  			tbl.Append(context.Background(), catalog.MockBatch(schema, 1))
  3876  			require.NoError(t, txn.Commit(context.Background()))
  3877  			writeTs = append(writeTs, txn.GetPrepareTS())
  3878  		}
  3879  		// delete the row whose offset is 5 for every block
  3880  		{
  3881  			// collect rowid
  3882  			txn, _ := tae.StartTxn(nil)
  3883  			db, _ := txn.GetDatabase("db")
  3884  			tbl, _ := db.GetRelationByName("test")
  3885  			blkIt := tbl.MakeObjectIt()
  3886  			for ; blkIt.Valid(); blkIt.Next() {
  3887  				obj := blkIt.GetObject()
  3888  				id := obj.GetMeta().(*catalog.ObjectEntry).ID
  3889  				for j := 0; j < obj.BlkCnt(); j++ {
  3890  					blkID := objectio.NewBlockidWithObjectID(&id, uint16(j))
  3891  					deleteRowIDs = append(deleteRowIDs, *objectio.NewRowid(blkID, 5))
  3892  				}
  3893  			}
  3894  			require.NoError(t, txn.Commit(context.Background()))
  3895  		}
  3896  
  3897  		// delete two 2 rows one time. no special reason, it just comes up
  3898  		for i := 0; i < len(deleteRowIDs); i += 2 {
  3899  			txn, _ := tae.StartTxn(nil)
  3900  			db, _ := txn.GetDatabase("db")
  3901  			tbl, _ := db.GetRelationByName("test")
  3902  			require.NoError(t, tbl.DeleteByPhyAddrKey(deleteRowIDs[i]))
  3903  			if i+1 < len(deleteRowIDs) {
  3904  				tbl.DeleteByPhyAddrKey(deleteRowIDs[i+1])
  3905  			}
  3906  			require.NoError(t, txn.Commit(context.Background()))
  3907  			writeTs = append(writeTs, txn.GetPrepareTS())
  3908  		}
  3909  		wg.Done()
  3910  	}()
  3911  
  3912  	// concurrent read to test race
  3913  	for i := 0; i < 5; i++ {
  3914  		wg.Add(1)
  3915  		go func() {
  3916  			for i := 0; i < 10; i++ {
  3917  				reader := logMgr.GetReader(minTs, maxTs)
  3918  				_ = reader.GetDirtyByTable(dbID, tableID)
  3919  			}
  3920  			wg.Done()
  3921  		}()
  3922  	}
  3923  
  3924  	wg.Wait()
  3925  
  3926  	firstWriteTs, lastWriteTs := writeTs[0], writeTs[len(writeTs)-1]
  3927  
  3928  	reader = logMgr.GetReader(firstWriteTs, lastWriteTs.Next())
  3929  	require.False(t, reader.HasCatalogChanges())
  3930  	reader = logMgr.GetReader(minTs, catalogWriteTs)
  3931  	require.Equal(t, 0, len(reader.GetDirtyByTable(dbID, tableID).Objs))
  3932  	reader = logMgr.GetReader(firstWriteTs, lastWriteTs)
  3933  	require.Equal(t, 0, len(reader.GetDirtyByTable(dbID, tableID-1).Objs))
  3934  	// 10 Objects, every Object has 1 blocks
  3935  	reader = logMgr.GetReader(firstWriteTs, lastWriteTs)
  3936  	dirties := reader.GetDirtyByTable(dbID, tableID)
  3937  	require.Equal(t, 10, len(dirties.Objs))
  3938  	tots := func(ts types.TS) *timestamp.Timestamp {
  3939  		return &timestamp.Timestamp{PhysicalTime: types.DecodeInt64(ts[4:12]), LogicalTime: types.DecodeUint32(ts[:4])}
  3940  	}
  3941  
  3942  	fixedColCnt := 2 // __rowid + commit_time, the columns for a delBatch
  3943  	// check Bat rows count consistency
  3944  	check_same_rows := func(bat *api.Batch, expect int) {
  3945  		for i, vec := range bat.Vecs {
  3946  			col, err := vector.ProtoVectorToVector(vec)
  3947  			require.NoError(t, err)
  3948  			require.Equal(t, expect, col.Length(), "columns %d", i)
  3949  		}
  3950  	}
  3951  
  3952  	// get db catalog change
  3953  	resp, close, err := logtail.HandleSyncLogTailReq(ctx, new(dummyCpkGetter), tae.LogtailMgr, tae.Catalog, api.SyncLogTailReq{
  3954  		CnHave: tots(minTs),
  3955  		CnWant: tots(catalogDropTs),
  3956  		Table:  &api.TableID{DbId: pkgcatalog.MO_CATALOG_ID, TbId: pkgcatalog.MO_DATABASE_ID},
  3957  	}, true)
  3958  	require.NoError(t, err)
  3959  	require.Equal(t, 3, len(resp.Commands)) // insert and delete
  3960  
  3961  	require.Equal(t, api.Entry_Insert, resp.Commands[0].EntryType)
  3962  	require.Equal(t, len(catalog.SystemDBSchema.ColDefs)+fixedColCnt, len(resp.Commands[0].Bat.Vecs))
  3963  	check_same_rows(resp.Commands[0].Bat, 2)                                 // 2 db
  3964  	datname, err := vector.ProtoVectorToVector(resp.Commands[0].Bat.Vecs[3]) // datname column
  3965  	require.NoError(t, err)
  3966  	require.Equal(t, "todrop", datname.GetStringAt(0))
  3967  	require.Equal(t, "db", datname.GetStringAt(1))
  3968  
  3969  	require.Equal(t, api.Entry_Delete, resp.Commands[1].EntryType)
  3970  	require.Equal(t, fixedColCnt+1, len(resp.Commands[1].Bat.Vecs))
  3971  	check_same_rows(resp.Commands[1].Bat, 1) // 1 drop db
  3972  
  3973  	close()
  3974  
  3975  	// get table catalog change
  3976  	resp, close, err = logtail.HandleSyncLogTailReq(ctx, new(dummyCpkGetter), tae.LogtailMgr, tae.Catalog, api.SyncLogTailReq{
  3977  		CnHave: tots(minTs),
  3978  		CnWant: tots(catalogDropTs),
  3979  		Table:  &api.TableID{DbId: pkgcatalog.MO_CATALOG_ID, TbId: pkgcatalog.MO_TABLES_ID},
  3980  	}, true)
  3981  	require.NoError(t, err)
  3982  	require.Equal(t, 1, len(resp.Commands)) // insert
  3983  	require.Equal(t, api.Entry_Insert, resp.Commands[0].EntryType)
  3984  	require.Equal(t, len(catalog.SystemTableSchema.ColDefs)+fixedColCnt, len(resp.Commands[0].Bat.Vecs))
  3985  	check_same_rows(resp.Commands[0].Bat, 2)                                 // 2 tables
  3986  	relname, err := vector.ProtoVectorToVector(resp.Commands[0].Bat.Vecs[3]) // relname column
  3987  	require.NoError(t, err)
  3988  	require.Equal(t, schema.Name, relname.GetStringAt(0))
  3989  	require.Equal(t, schema.Name, relname.GetStringAt(1))
  3990  	close()
  3991  
  3992  	// get columns catalog change
  3993  	resp, close, err = logtail.HandleSyncLogTailReq(ctx, new(dummyCpkGetter), tae.LogtailMgr, tae.Catalog, api.SyncLogTailReq{
  3994  		CnHave: tots(minTs),
  3995  		CnWant: tots(catalogDropTs),
  3996  		Table:  &api.TableID{DbId: pkgcatalog.MO_CATALOG_ID, TbId: pkgcatalog.MO_COLUMNS_ID},
  3997  	}, true)
  3998  	require.NoError(t, err)
  3999  	require.Equal(t, 1, len(resp.Commands)) // insert
  4000  	require.Equal(t, api.Entry_Insert, resp.Commands[0].EntryType)
  4001  	require.Equal(t, len(catalog.SystemColumnSchema.ColDefs)+fixedColCnt, len(resp.Commands[0].Bat.Vecs))
  4002  	// sysColumnsCount := len(catalog.SystemDBSchema.ColDefs) + len(catalog.SystemTableSchema.ColDefs) + len(catalog.SystemColumnSchema.ColDefs)
  4003  	check_same_rows(resp.Commands[0].Bat, len(schema.ColDefs)*2) // column count of 2 tables
  4004  	close()
  4005  
  4006  	// get user table change
  4007  	resp, close, err = logtail.HandleSyncLogTailReq(ctx, new(dummyCpkGetter), tae.LogtailMgr, tae.Catalog, api.SyncLogTailReq{
  4008  		CnHave: tots(firstWriteTs.Next()), // skip the first write deliberately,
  4009  		CnWant: tots(lastWriteTs),
  4010  		Table:  &api.TableID{DbId: dbID, TbId: tableID},
  4011  	}, true)
  4012  	require.NoError(t, err)
  4013  	require.Equal(t, 2, len(resp.Commands)) // 2 insert data and delete data
  4014  
  4015  	// blk meta change
  4016  	// blkMetaEntry := resp.Commands[0]
  4017  	// require.Equal(t, api.Entry_Insert, blkMetaEntry.EntryType)
  4018  	// require.Equal(t, len(logtail.BlkMetaSchema.ColDefs)+fixedColCnt, len(blkMetaEntry.Bat.Vecs))
  4019  	// check_same_rows(blkMetaEntry.Bat, 9) // 9 blocks, because the first write is excluded.
  4020  
  4021  	// check data change
  4022  	insDataEntry := resp.Commands[0]
  4023  	require.Equal(t, api.Entry_Insert, insDataEntry.EntryType)
  4024  	require.Equal(t, len(schema.ColDefs)+1, len(insDataEntry.Bat.Vecs)) // 5 columns, rowid + commit ts + 2 visibile
  4025  	check_same_rows(insDataEntry.Bat, 99)                               // 99 rows, because the first write is excluded.
  4026  	// test first user col, this is probably fragile, it depends on the details of MockSchema
  4027  	// if something changes, delete this is okay.
  4028  	firstCol, err := vector.ProtoVectorToVector(insDataEntry.Bat.Vecs[2]) // mock_0 column, int8 type
  4029  	require.Equal(t, types.T_int8, firstCol.GetType().Oid)
  4030  	require.NoError(t, err)
  4031  
  4032  	delDataEntry := resp.Commands[1]
  4033  	require.Equal(t, api.Entry_Delete, delDataEntry.EntryType)
  4034  	require.Equal(t, fixedColCnt+1, len(delDataEntry.Bat.Vecs)) // 3 columns, rowid + commit_ts + aborted
  4035  	check_same_rows(delDataEntry.Bat, 10)
  4036  
  4037  	// check delete rowids are exactly what we want
  4038  	rowids, err := vector.ProtoVectorToVector(delDataEntry.Bat.Vecs[0])
  4039  	require.NoError(t, err)
  4040  	require.Equal(t, types.T_Rowid, rowids.GetType().Oid)
  4041  	rowidMap := make(map[types.Rowid]int)
  4042  	for _, id := range deleteRowIDs {
  4043  		rowidMap[id] = 1
  4044  	}
  4045  	for i := int64(0); i < 10; i++ {
  4046  		id := vector.MustFixedCol[types.Rowid](rowids)[i]
  4047  		rowidMap[id] = rowidMap[id] + 1
  4048  	}
  4049  	require.Equal(t, 10, len(rowidMap))
  4050  	for _, v := range rowidMap {
  4051  		require.Equal(t, 2, v)
  4052  	}
  4053  	close()
  4054  }
  4055  
  4056  // txn1: create relation and append, half blk
  4057  // txn2: compact
  4058  // txn3: append, shouldn't get rw
  4059  func TestGetLastAppender(t *testing.T) {
  4060  	defer testutils.AfterTest(t)()
  4061  	ctx := context.Background()
  4062  
  4063  	opts := config.WithLongScanAndCKPOpts(nil)
  4064  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  4065  	defer tae.Close()
  4066  	schema := catalog.MockSchemaAll(1, -1)
  4067  	schema.BlockMaxRows = 10
  4068  	schema.ObjectMaxBlocks = 2
  4069  	tae.BindSchema(schema)
  4070  	bat := catalog.MockBatch(schema, 14)
  4071  	bats := bat.Split(2)
  4072  
  4073  	tae.CreateRelAndAppend(bats[0], true)
  4074  	t.Log(tae.Catalog.SimplePPString(3))
  4075  
  4076  	tae.CompactBlocks(false)
  4077  	t.Log(tae.Catalog.SimplePPString(3))
  4078  
  4079  	tae.Restart(ctx)
  4080  
  4081  	txn, rel := tae.GetRelation()
  4082  	rel.Append(context.Background(), bats[1])
  4083  	require.NoError(t, txn.Commit(context.Background()))
  4084  }
  4085  
  4086  // txn1[s1,p1,e1] append1
  4087  // txn2[s2,p2,e2] append2
  4088  // txn3[s3,p3,e3] append3
  4089  // collect [0,p1] [0,p2] [p1+1,p2] [p1+1,p3]
  4090  // check data, row count, commit ts
  4091  // TODO 1. in2pc committs!=preparets; 2. abort
  4092  func TestCollectInsert(t *testing.T) {
  4093  	defer testutils.AfterTest(t)()
  4094  	ctx := context.Background()
  4095  
  4096  	opts := config.WithLongScanAndCKPOpts(nil)
  4097  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  4098  	defer tae.Close()
  4099  	schema := catalog.MockSchemaAll(1, -1)
  4100  	schema.BlockMaxRows = 20
  4101  	tae.BindSchema(schema)
  4102  	bat := catalog.MockBatch(schema, 12)
  4103  	bats := bat.Split(4)
  4104  
  4105  	tae.CreateRelAndAppend(bats[0], true)
  4106  
  4107  	txn1, rel := tae.GetRelation()
  4108  	assert.NoError(t, rel.Append(context.Background(), bats[1]))
  4109  	assert.NoError(t, txn1.Commit(context.Background()))
  4110  
  4111  	p1 := txn1.GetPrepareTS()
  4112  	t.Logf("p1= %v", p1.ToString())
  4113  
  4114  	txn2, rel := tae.GetRelation()
  4115  	assert.NoError(t, rel.Append(context.Background(), bats[2]))
  4116  	assert.NoError(t, txn2.Commit(context.Background()))
  4117  
  4118  	p2 := txn2.GetPrepareTS()
  4119  	t.Logf("p2= %v", p2.ToString())
  4120  
  4121  	txn3, rel := tae.GetRelation()
  4122  	assert.NoError(t, rel.Append(context.Background(), bats[3]))
  4123  	assert.NoError(t, txn3.Commit(context.Background()))
  4124  
  4125  	p3 := txn3.GetPrepareTS()
  4126  	t.Logf("p3= %v", p3.ToString())
  4127  
  4128  	_, rel = tae.GetRelation()
  4129  	blkit := rel.MakeObjectIt()
  4130  	blkdata := blkit.GetObject().GetMeta().(*catalog.ObjectEntry).GetObjectData()
  4131  
  4132  	batch, err := blkdata.CollectAppendInRange(types.TS{}, p1, true, common.DefaultAllocator)
  4133  	assert.NoError(t, err)
  4134  	t.Log((batch.Attrs))
  4135  	for _, vec := range batch.Vecs {
  4136  		t.Log(vec)
  4137  		assert.Equal(t, 6, vec.Length())
  4138  	}
  4139  	batch, err = blkdata.CollectAppendInRange(types.TS{}, p2, true, common.DefaultAllocator)
  4140  	assert.NoError(t, err)
  4141  	t.Log((batch.Attrs))
  4142  	for _, vec := range batch.Vecs {
  4143  		t.Log(vec)
  4144  		assert.Equal(t, 9, vec.Length())
  4145  	}
  4146  	batch, err = blkdata.CollectAppendInRange(p1.Next(), p2, true, common.DefaultAllocator)
  4147  	assert.NoError(t, err)
  4148  	t.Log((batch.Attrs))
  4149  	for _, vec := range batch.Vecs {
  4150  		t.Log(vec)
  4151  		assert.Equal(t, 3, vec.Length())
  4152  	}
  4153  	batch, err = blkdata.CollectAppendInRange(p1.Next(), p3, true, common.DefaultAllocator)
  4154  	assert.NoError(t, err)
  4155  	t.Log((batch.Attrs))
  4156  	for _, vec := range batch.Vecs {
  4157  		t.Log(vec)
  4158  		assert.Equal(t, 6, vec.Length())
  4159  	}
  4160  }
  4161  
  4162  // txn0 append
  4163  // txn1[s1,p1,e1] delete
  4164  // txn1[s2,p2,e2] delete
  4165  // txn1[s3,p3,e3] delete
  4166  // collect [0,p1] [0,p2] [p1+1,p2] [p1+1,p3]
  4167  func TestCollectDelete(t *testing.T) {
  4168  	defer testutils.AfterTest(t)()
  4169  	ctx := context.Background()
  4170  
  4171  	opts := config.WithLongScanAndCKPOpts(nil)
  4172  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  4173  	defer tae.Close()
  4174  	schema := catalog.MockSchemaAll(2, 1)
  4175  	schema.BlockMaxRows = 20
  4176  	tae.BindSchema(schema)
  4177  	bat := catalog.MockBatch(schema, 12)
  4178  
  4179  	tae.CreateRelAndAppend(bat, true)
  4180  
  4181  	_, rel := tae.GetRelation()
  4182  	blkit := rel.MakeObjectIt()
  4183  	blkID := blkit.GetObject().GetMeta().(*catalog.ObjectEntry).AsCommonID()
  4184  
  4185  	txn1, rel := tae.GetRelation()
  4186  	assert.NoError(t, rel.RangeDelete(blkID, 0, 0, handle.DT_Normal))
  4187  	assert.NoError(t, txn1.Commit(context.Background()))
  4188  	p1 := txn1.GetPrepareTS()
  4189  	t.Logf("p1= %v", p1.ToString())
  4190  
  4191  	txn2, rel := tae.GetRelation()
  4192  	assert.NoError(t, rel.RangeDelete(blkID, 1, 3, handle.DT_Normal))
  4193  	assert.NoError(t, txn2.Commit(context.Background()))
  4194  	p2 := txn2.GetPrepareTS()
  4195  	t.Logf("p2= %v", p2.ToString())
  4196  
  4197  	txn3, rel := tae.GetRelation()
  4198  	assert.NoError(t, rel.RangeDelete(blkID, 4, 5, handle.DT_Normal))
  4199  	assert.NoError(t, txn3.Commit(context.Background()))
  4200  	p3 := txn3.GetPrepareTS()
  4201  	t.Logf("p3= %v", p3.ToString())
  4202  
  4203  	txn, rel := tae.GetRelation()
  4204  	blkit = rel.MakeObjectIt()
  4205  	blkhandle := blkit.GetObject()
  4206  	blkdata := blkhandle.GetMeta().(*catalog.ObjectEntry).GetObjectData()
  4207  
  4208  	batch, _, err := blkdata.CollectDeleteInRange(context.Background(), types.TS{}, p1, true, common.DefaultAllocator)
  4209  	assert.NoError(t, err)
  4210  	t.Logf(logtail.BatchToString("", batch, false))
  4211  	for i, vec := range batch.Vecs {
  4212  		t.Logf(batch.Attrs[i])
  4213  		assert.Equal(t, 1, vec.Length())
  4214  	}
  4215  	view, err := blkdata.CollectChangesInRange(context.Background(), 0, types.TS{}, p1, common.DefaultAllocator)
  4216  	assert.NoError(t, err)
  4217  	t.Logf(view.DeleteMask.String())
  4218  	assert.Equal(t, 1, view.DeleteMask.GetCardinality())
  4219  
  4220  	batch, _, err = blkdata.CollectDeleteInRange(context.Background(), types.TS{}, p2, true, common.DefaultAllocator)
  4221  	assert.NoError(t, err)
  4222  	t.Logf(logtail.BatchToString("", batch, false))
  4223  	for i, vec := range batch.Vecs {
  4224  		t.Logf(batch.Attrs[i])
  4225  		assert.Equal(t, 4, vec.Length())
  4226  	}
  4227  	view, err = blkdata.CollectChangesInRange(context.Background(), 0, types.TS{}, p2, common.DefaultAllocator)
  4228  	assert.NoError(t, err)
  4229  	t.Logf(view.DeleteMask.String())
  4230  	assert.Equal(t, 4, view.DeleteMask.GetCardinality())
  4231  
  4232  	batch, _, err = blkdata.CollectDeleteInRange(context.Background(), p1.Next(), p2, true, common.DefaultAllocator)
  4233  	assert.NoError(t, err)
  4234  	t.Logf(logtail.BatchToString("", batch, false))
  4235  	for i, vec := range batch.Vecs {
  4236  		t.Logf(batch.Attrs[i])
  4237  		assert.Equal(t, 3, vec.Length())
  4238  	}
  4239  	view, err = blkdata.CollectChangesInRange(context.Background(), 0, p1.Next(), p2, common.DefaultAllocator)
  4240  	assert.NoError(t, err)
  4241  	t.Logf(view.DeleteMask.String())
  4242  	assert.Equal(t, 3, view.DeleteMask.GetCardinality())
  4243  
  4244  	batch, _, err = blkdata.CollectDeleteInRange(context.Background(), p1.Next(), p3, true, common.DefaultAllocator)
  4245  	assert.NoError(t, err)
  4246  	t.Logf(logtail.BatchToString("", batch, false))
  4247  	for i, vec := range batch.Vecs {
  4248  		t.Logf(batch.Attrs[i])
  4249  		assert.Equal(t, 5, vec.Length())
  4250  	}
  4251  	view, err = blkdata.CollectChangesInRange(context.Background(), 0, p1.Next(), p3, common.DefaultAllocator)
  4252  	assert.NoError(t, err)
  4253  	t.Logf(view.DeleteMask.String())
  4254  	assert.Equal(t, 5, view.DeleteMask.GetCardinality())
  4255  
  4256  	blk1Name := objectio.BuildObjectNameWithObjectID(objectio.NewObjectid())
  4257  	writer, err := blockio.NewBlockWriterNew(tae.Runtime.Fs.Service, blk1Name, 0, nil)
  4258  	assert.NoError(t, err)
  4259  	writer.SetPrimaryKey(3)
  4260  	writer.WriteTombstoneBatch(containers.ToCNBatch(batch))
  4261  	blocks, _, err := writer.Sync(context.TODO())
  4262  	assert.NoError(t, err)
  4263  	assert.Equal(t, 1, len(blocks))
  4264  
  4265  	deltaLoc := blockio.EncodeLocation(
  4266  		writer.GetName(),
  4267  		blocks[0].GetExtent(),
  4268  		uint32(batch.Length()),
  4269  		blocks[0].GetID(),
  4270  	)
  4271  
  4272  	err = blkhandle.UpdateDeltaLoc(0, deltaLoc)
  4273  	assert.NoError(t, err)
  4274  	assert.NoError(t, txn.Commit(context.Background()))
  4275  
  4276  	blkdata.GCInMemeoryDeletesByTSForTest(p3)
  4277  
  4278  	batch, _, err = blkdata.CollectDeleteInRange(context.Background(), p1.Next(), p3, true, common.DefaultAllocator)
  4279  	assert.NoError(t, err)
  4280  	t.Logf(logtail.BatchToString("", batch, false))
  4281  	for i, vec := range batch.Vecs {
  4282  		t.Logf(batch.Attrs[i])
  4283  		assert.Equal(t, 5, vec.Length())
  4284  	}
  4285  	view, err = blkdata.CollectChangesInRange(context.Background(), 0, p1.Next(), p3, common.DefaultAllocator)
  4286  	assert.NoError(t, err)
  4287  	t.Logf(view.DeleteMask.String())
  4288  	assert.Equal(t, 5, view.DeleteMask.GetCardinality())
  4289  
  4290  }
  4291  
  4292  func TestAppendnode(t *testing.T) {
  4293  	defer testutils.AfterTest(t)()
  4294  	ctx := context.Background()
  4295  
  4296  	opts := config.WithLongScanAndCKPOpts(nil)
  4297  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  4298  	defer tae.Close()
  4299  	schema := catalog.MockSchemaAll(1, 0)
  4300  	schema.BlockMaxRows = 10000
  4301  	schema.ObjectMaxBlocks = 2
  4302  	tae.BindSchema(schema)
  4303  	appendCnt := 20
  4304  	bat := catalog.MockBatch(schema, appendCnt)
  4305  	bats := bat.Split(appendCnt)
  4306  
  4307  	tae.CreateRelAndAppend(bats[0], true)
  4308  	tae.CheckRowsByScan(1, false)
  4309  
  4310  	var wg sync.WaitGroup
  4311  	pool, _ := ants.NewPool(5)
  4312  	defer pool.Release()
  4313  	worker := func(i int) func() {
  4314  		return func() {
  4315  			txn, rel := tae.GetRelation()
  4316  			row := testutil.GetColumnRowsByScan(t, rel, 0, true)
  4317  			err := tae.DoAppendWithTxn(bats[i], txn, true)
  4318  			assert.NoError(t, err)
  4319  			row2 := testutil.GetColumnRowsByScan(t, rel, 0, true)
  4320  			assert.Equal(t, row+1, row2)
  4321  			assert.NoError(t, txn.Commit(context.Background()))
  4322  			wg.Done()
  4323  		}
  4324  	}
  4325  	for i := 1; i < appendCnt; i++ {
  4326  		wg.Add(1)
  4327  		pool.Submit(worker(i))
  4328  	}
  4329  	wg.Wait()
  4330  	tae.CheckRowsByScan(appendCnt, true)
  4331  
  4332  	tae.Restart(ctx)
  4333  	tae.CheckRowsByScan(appendCnt, true)
  4334  }
  4335  
  4336  func TestTxnIdempotent(t *testing.T) {
  4337  	defer testutils.AfterTest(t)()
  4338  	ctx := context.Background()
  4339  
  4340  	opts := config.WithLongScanAndCKPOpts(nil)
  4341  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  4342  	defer tae.Close()
  4343  
  4344  	schema := catalog.MockSchemaAll(1, 0)
  4345  	schema.BlockMaxRows = 10000
  4346  	schema.ObjectMaxBlocks = 2
  4347  	tae.BindSchema(schema)
  4348  	appendCnt := 20
  4349  	bat := catalog.MockBatch(schema, appendCnt)
  4350  	bats := bat.Split(appendCnt)
  4351  
  4352  	var wg sync.WaitGroup
  4353  
  4354  	tae.CreateRelAndAppend(bats[0], true)
  4355  	for i := 0; i < 10; i++ {
  4356  		txn, _ := tae.GetRelation()
  4357  		wg.Add(1)
  4358  		assert.NoError(t, txn.Rollback(context.Background()))
  4359  		go func() {
  4360  			defer wg.Done()
  4361  			assert.True(t, moerr.IsMoErrCode(txn.Commit(context.Background()), moerr.ErrTxnNotFound))
  4362  			// txn.Commit(context.Background())
  4363  		}()
  4364  		wg.Wait()
  4365  	}
  4366  }
  4367  
  4368  // insert 200 rows and do quick compaction
  4369  // expect that there are some dirty tables at first and then zero dirty table found
  4370  func TestWatchDirty(t *testing.T) {
  4371  	defer testutils.AfterTest(t)()
  4372  	ctx := context.Background()
  4373  
  4374  	opts := config.WithQuickScanAndCKPOpts(nil)
  4375  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  4376  	defer tae.Close()
  4377  	logMgr := tae.LogtailMgr
  4378  
  4379  	visitor := &catalog.LoopProcessor{}
  4380  	watcher := logtail.NewDirtyCollector(logMgr, opts.Clock, tae.Catalog, visitor)
  4381  
  4382  	tbl, obj := watcher.DirtyCount()
  4383  	assert.Zero(t, obj)
  4384  	assert.Zero(t, tbl)
  4385  
  4386  	schema := catalog.MockSchemaAll(1, 0)
  4387  	schema.BlockMaxRows = 50
  4388  	schema.ObjectMaxBlocks = 2
  4389  	tae.BindSchema(schema)
  4390  	appendCnt := 200
  4391  	bat := catalog.MockBatch(schema, appendCnt)
  4392  	bats := bat.Split(appendCnt)
  4393  
  4394  	tae.CreateRelAndAppend(bats[0], true)
  4395  	tae.CheckRowsByScan(1, false)
  4396  
  4397  	wg := &sync.WaitGroup{}
  4398  	pool, _ := ants.NewPool(3)
  4399  	defer pool.Release()
  4400  	worker := func(i int) func() {
  4401  		return func() {
  4402  			txn, _ := tae.GetRelation()
  4403  			err := tae.DoAppendWithTxn(bats[i], txn, true)
  4404  			assert.NoError(t, err)
  4405  			assert.NoError(t, txn.Commit(context.Background()))
  4406  			wg.Done()
  4407  		}
  4408  	}
  4409  	for i := 1; i < appendCnt; i++ {
  4410  		wg.Add(1)
  4411  		pool.Submit(worker(i))
  4412  	}
  4413  	wg.Wait()
  4414  
  4415  	timer := time.After(20 * time.Second)
  4416  	for {
  4417  		select {
  4418  		case <-timer:
  4419  			t.Errorf("timeout to wait zero")
  4420  			return
  4421  		default:
  4422  			watcher.Run(0)
  4423  			time.Sleep(5 * time.Millisecond)
  4424  			_, objCnt := watcher.DirtyCount()
  4425  			// find block zero
  4426  			if objCnt == 0 {
  4427  				return
  4428  			}
  4429  		}
  4430  	}
  4431  }
  4432  
  4433  func TestDirtyWatchRace(t *testing.T) {
  4434  	defer testutils.AfterTest(t)()
  4435  	ctx := context.Background()
  4436  
  4437  	opts := config.WithQuickScanAndCKPOpts(nil)
  4438  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  4439  	defer tae.Close()
  4440  
  4441  	schema := catalog.MockSchemaAll(2, -1)
  4442  	schema.Name = "test"
  4443  	schema.BlockMaxRows = 5
  4444  	schema.ObjectMaxBlocks = 5
  4445  	tae.BindSchema(schema)
  4446  
  4447  	tae.CreateRelAndAppend(catalog.MockBatch(schema, 1), true)
  4448  
  4449  	visitor := &catalog.LoopProcessor{}
  4450  	watcher := logtail.NewDirtyCollector(tae.LogtailMgr, opts.Clock, tae.Catalog, visitor)
  4451  
  4452  	wg := &sync.WaitGroup{}
  4453  
  4454  	addRow := func() {
  4455  		txn, _ := tae.StartTxn(nil)
  4456  		db, _ := txn.GetDatabase("db")
  4457  		tbl, _ := db.GetRelationByName("test")
  4458  		tbl.Append(context.Background(), catalog.MockBatch(schema, 1))
  4459  		assert.NoError(t, txn.Commit(context.Background()))
  4460  		wg.Done()
  4461  	}
  4462  
  4463  	pool, _ := ants.NewPool(5)
  4464  	defer pool.Release()
  4465  
  4466  	for i := 0; i < 50; i++ {
  4467  		wg.Add(1)
  4468  		pool.Submit(addRow)
  4469  	}
  4470  
  4471  	// test race
  4472  	for i := 0; i < 3; i++ {
  4473  		wg.Add(1)
  4474  		go func(i int) {
  4475  			for j := 0; j < 300; j++ {
  4476  				time.Sleep(5 * time.Millisecond)
  4477  				watcher.Run(0)
  4478  				// tbl, obj, blk := watcher.DirtyCount()
  4479  				// t.Logf("t%d: tbl %d, obj %d, blk %d", i, tbl, obj, blk)
  4480  				_, _ = watcher.DirtyCount()
  4481  			}
  4482  			wg.Done()
  4483  		}(i)
  4484  	}
  4485  
  4486  	wg.Wait()
  4487  }
  4488  
  4489  func TestBlockRead(t *testing.T) {
  4490  	defer testutils.AfterTest(t)()
  4491  	ctx := context.Background()
  4492  
  4493  	opts := config.WithLongScanAndCKPOpts(nil)
  4494  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  4495  	tsAlloc := types.NewTsAlloctor(opts.Clock)
  4496  	defer tae.Close()
  4497  	schema := catalog.MockSchemaAll(2, 1)
  4498  	schema.BlockMaxRows = 20
  4499  	schema.ObjectMaxBlocks = 2
  4500  	tae.BindSchema(schema)
  4501  	bat := catalog.MockBatch(schema, 40)
  4502  
  4503  	tae.CreateRelAndAppend(bat, true)
  4504  
  4505  	_, rel := tae.GetRelation()
  4506  	blkit := rel.MakeObjectIt()
  4507  	blkEntry := blkit.GetObject().GetMeta().(*catalog.ObjectEntry)
  4508  	blkID := blkEntry.AsCommonID()
  4509  
  4510  	beforeDel := tsAlloc.Alloc()
  4511  	txn1, rel := tae.GetRelation()
  4512  	assert.NoError(t, rel.RangeDelete(blkID, 0, 0, handle.DT_Normal))
  4513  	assert.NoError(t, txn1.Commit(context.Background()))
  4514  
  4515  	afterFirstDel := tsAlloc.Alloc()
  4516  	txn2, rel := tae.GetRelation()
  4517  	assert.NoError(t, rel.RangeDelete(blkID, 1, 3, handle.DT_Normal))
  4518  	assert.NoError(t, txn2.Commit(context.Background()))
  4519  
  4520  	afterSecondDel := tsAlloc.Alloc()
  4521  
  4522  	tae.CompactBlocks(false)
  4523  
  4524  	objStats := blkEntry.GetLatestCommittedNodeLocked().BaseNode
  4525  	deltaloc := rel.GetMeta().(*catalog.TableEntry).TryGetTombstone(blkEntry.ID).GetLatestDeltaloc(0)
  4526  	assert.False(t, objStats.IsEmpty())
  4527  	assert.NotEmpty(t, deltaloc)
  4528  
  4529  	bid, sid := blkEntry.ID, blkEntry.ID
  4530  
  4531  	info := &objectio.BlockInfo{
  4532  		BlockID:    *objectio.NewBlockidWithObjectID(&bid, 0),
  4533  		SegmentID:  *sid.Segment(),
  4534  		EntryState: true,
  4535  	}
  4536  	metaloc := objStats.ObjectLocation()
  4537  	metaloc.SetRows(schema.BlockMaxRows)
  4538  	info.SetMetaLocation(metaloc)
  4539  	info.SetDeltaLocation(deltaloc)
  4540  
  4541  	columns := make([]string, 0)
  4542  	colIdxs := make([]uint16, 0)
  4543  	colTyps := make([]types.Type, 0)
  4544  	defs := schema.ColDefs[:]
  4545  	rand.Shuffle(len(defs), func(i, j int) { defs[i], defs[j] = defs[j], defs[i] })
  4546  	for _, col := range defs {
  4547  		columns = append(columns, col.Name)
  4548  		colIdxs = append(colIdxs, uint16(col.Idx))
  4549  		colTyps = append(colTyps, col.Type)
  4550  	}
  4551  	t.Log("read columns: ", columns)
  4552  	fs := tae.DB.Runtime.Fs.Service
  4553  	pool, err := mpool.NewMPool("test", 0, mpool.NoFixed)
  4554  	assert.NoError(t, err)
  4555  	infos := make([][]*objectio.BlockInfo, 0)
  4556  	infos = append(infos, []*objectio.BlockInfo{info})
  4557  	err = blockio.BlockPrefetch(colIdxs, fs, infos, false)
  4558  	assert.NoError(t, err)
  4559  	b1, err := blockio.BlockReadInner(
  4560  		context.Background(), info, nil, colIdxs, colTyps,
  4561  		beforeDel, nil, fs, pool, nil, fileservice.Policy(0),
  4562  	)
  4563  	assert.NoError(t, err)
  4564  	assert.Equal(t, len(columns), len(b1.Vecs))
  4565  	assert.Equal(t, 20, b1.Vecs[0].Length())
  4566  
  4567  	b2, err := blockio.BlockReadInner(
  4568  		context.Background(), info, nil, colIdxs, colTyps,
  4569  		afterFirstDel, nil, fs, pool, nil, fileservice.Policy(0),
  4570  	)
  4571  	assert.NoError(t, err)
  4572  	assert.Equal(t, 19, b2.Vecs[0].Length())
  4573  	b3, err := blockio.BlockReadInner(
  4574  		context.Background(), info, nil, colIdxs, colTyps,
  4575  		afterSecondDel, nil, fs, pool, nil, fileservice.Policy(0),
  4576  	)
  4577  	assert.NoError(t, err)
  4578  	assert.Equal(t, len(columns), len(b2.Vecs))
  4579  	assert.Equal(t, 16, b3.Vecs[0].Length())
  4580  
  4581  	// read rowid column only
  4582  	b4, err := blockio.BlockReadInner(
  4583  		context.Background(), info,
  4584  		nil,
  4585  		[]uint16{2},
  4586  		[]types.Type{types.T_Rowid.ToType()},
  4587  		afterSecondDel, nil, fs, pool, nil, fileservice.Policy(0),
  4588  	)
  4589  	assert.NoError(t, err)
  4590  	assert.Equal(t, 1, len(b4.Vecs))
  4591  	assert.Equal(t, 16, b4.Vecs[0].Length())
  4592  
  4593  	// read rowid column only
  4594  	info.EntryState = false
  4595  	b5, err := blockio.BlockReadInner(
  4596  		context.Background(), info,
  4597  		nil, []uint16{2},
  4598  		[]types.Type{types.T_Rowid.ToType()},
  4599  		afterSecondDel, nil, fs, pool, nil, fileservice.Policy(0),
  4600  	)
  4601  	assert.NoError(t, err)
  4602  	assert.Equal(t, 1, len(b5.Vecs))
  4603  	assert.Equal(t, 16, b5.Vecs[0].Length())
  4604  }
  4605  
  4606  func TestCompactDeltaBlk(t *testing.T) {
  4607  	defer testutils.AfterTest(t)()
  4608  	testutils.EnsureNoLeak(t)
  4609  	ctx := context.Background()
  4610  
  4611  	opts := config.WithLongScanAndCKPOpts(nil)
  4612  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  4613  	defer tae.Close()
  4614  	schema := catalog.MockSchemaAll(3, 1)
  4615  	schema.BlockMaxRows = 6
  4616  	schema.ObjectMaxBlocks = 2
  4617  	tae.BindSchema(schema)
  4618  	bat := catalog.MockBatch(schema, 5)
  4619  
  4620  	tae.CreateRelAndAppend(bat, true)
  4621  
  4622  	{
  4623  		v := testutil.GetSingleSortKeyValue(bat, schema, 1)
  4624  		filter := handle.NewEQFilter(v)
  4625  		txn2, rel := tae.GetRelation()
  4626  		testutil.CheckAllColRowsByScan(t, rel, 5, true)
  4627  		_ = rel.DeleteByFilter(context.Background(), filter)
  4628  		assert.Nil(t, txn2.Commit(context.Background()))
  4629  	}
  4630  
  4631  	_, rel := tae.GetRelation()
  4632  	testutil.CheckAllColRowsByScan(t, rel, 4, true)
  4633  
  4634  	{
  4635  		txn, rel := tae.GetRelation()
  4636  		it := rel.MakeObjectIt()
  4637  		blk := it.GetObject()
  4638  		meta := blk.GetMeta().(*catalog.ObjectEntry)
  4639  		task, err := jobs.NewFlushTableTailTask(nil, txn, []*catalog.ObjectEntry{meta}, tae.DB.Runtime, txn.GetStartTS())
  4640  		assert.NoError(t, err)
  4641  		err = task.OnExec(context.Background())
  4642  		assert.NoError(t, err)
  4643  		assert.False(t, meta.GetLatestNodeLocked().BaseNode.IsEmpty())
  4644  		assert.False(t, rel.GetMeta().(*catalog.TableEntry).TryGetTombstone(meta.ID).GetLatestDeltaloc(0).IsEmpty())
  4645  		created := task.GetCreatedObjects().GetMeta().(*catalog.ObjectEntry)
  4646  		assert.False(t, created.GetLatestNodeLocked().BaseNode.IsEmpty())
  4647  		assert.Nil(t, rel.GetMeta().(*catalog.TableEntry).TryGetTombstone(created.ID))
  4648  		err = txn.Commit(context.Background())
  4649  		assert.Nil(t, err)
  4650  		err = meta.GetTable().RemoveEntry(meta)
  4651  		assert.Nil(t, err)
  4652  	}
  4653  	{
  4654  		v := testutil.GetSingleSortKeyValue(bat, schema, 2)
  4655  		filter := handle.NewEQFilter(v)
  4656  		txn2, rel := tae.GetRelation()
  4657  		testutil.CheckAllColRowsByScan(t, rel, 4, true)
  4658  		_ = rel.DeleteByFilter(context.Background(), filter)
  4659  		assert.Nil(t, txn2.Commit(context.Background()))
  4660  	}
  4661  	{
  4662  		txn, rel := tae.GetRelation()
  4663  		it := rel.MakeObjectIt()
  4664  		blk := it.GetObject()
  4665  		meta := blk.GetMeta().(*catalog.ObjectEntry)
  4666  		assert.False(t, meta.IsAppendable())
  4667  		task2, err := jobs.NewFlushTableTailTask(nil, txn, []*catalog.ObjectEntry{meta}, tae.DB.Runtime, txn.GetStartTS())
  4668  		assert.NoError(t, err)
  4669  		err = task2.OnExec(context.Background())
  4670  		assert.NoError(t, err)
  4671  		assert.Nil(t, txn.Commit(context.Background()))
  4672  		t.Log(tae.Catalog.SimplePPString(3))
  4673  
  4674  		txn, rel = tae.GetRelation()
  4675  		task, err := jobs.NewMergeObjectsTask(nil, txn, []*catalog.ObjectEntry{meta}, tae.DB.Runtime, 0)
  4676  		assert.NoError(t, err)
  4677  		err = task.OnExec(context.Background())
  4678  		assert.NoError(t, err)
  4679  		t.Log(tae.Catalog.SimplePPString(3))
  4680  		assert.True(t, !meta.GetLatestCommittedNodeLocked().BaseNode.IsEmpty())
  4681  		assert.True(t, !rel.GetMeta().(*catalog.TableEntry).TryGetTombstone(meta.ID).GetLatestDeltaloc(0).IsEmpty())
  4682  		created := task.GetCreatedObjects()[0]
  4683  		assert.False(t, created.GetLatestNodeLocked().BaseNode.IsEmpty())
  4684  		assert.Nil(t, rel.GetMeta().(*catalog.TableEntry).TryGetTombstone(created.ID))
  4685  		err = txn.Commit(context.Background())
  4686  		assert.Nil(t, err)
  4687  	}
  4688  
  4689  	_, rel = tae.GetRelation()
  4690  	testutil.CheckAllColRowsByScan(t, rel, 3, true)
  4691  
  4692  	tae.Restart(ctx)
  4693  	_, rel = tae.GetRelation()
  4694  	testutil.CheckAllColRowsByScan(t, rel, 3, true)
  4695  }
  4696  
  4697  func TestFlushTable(t *testing.T) {
  4698  	defer testutils.AfterTest(t)()
  4699  	ctx := context.Background()
  4700  
  4701  	opts := config.WithLongScanAndCKPOpts(nil)
  4702  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  4703  	defer tae.Close()
  4704  
  4705  	tae.BGCheckpointRunner.DebugUpdateOptions(
  4706  		checkpoint.WithForceFlushCheckInterval(time.Millisecond * 5))
  4707  
  4708  	schema := catalog.MockSchemaAll(3, 1)
  4709  	schema.BlockMaxRows = 10
  4710  	schema.ObjectMaxBlocks = 2
  4711  	tae.BindSchema(schema)
  4712  	bat := catalog.MockBatch(schema, 21)
  4713  	defer bat.Close()
  4714  
  4715  	tae.CreateRelAndAppend(bat, true)
  4716  
  4717  	_, rel := tae.GetRelation()
  4718  	db, err := rel.GetDB()
  4719  	assert.Nil(t, err)
  4720  	table, err := db.GetRelationByName(schema.Name)
  4721  	assert.Nil(t, err)
  4722  	err = tae.FlushTable(
  4723  		context.Background(),
  4724  		0,
  4725  		db.GetID(),
  4726  		table.ID(),
  4727  		types.BuildTS(time.Now().UTC().UnixNano(), 0))
  4728  	assert.NoError(t, err)
  4729  	t.Log(tae.Catalog.SimplePPString(common.PPL1))
  4730  
  4731  	txn, rel := tae.GetRelation()
  4732  	it := rel.MakeObjectIt()
  4733  	for it.Valid() {
  4734  		blk := it.GetObject().GetMeta().(*catalog.ObjectEntry)
  4735  		assert.True(t, blk.HasPersistedData())
  4736  		it.Next()
  4737  	}
  4738  	assert.NoError(t, txn.Commit(context.Background()))
  4739  }
  4740  
  4741  func TestReadCheckpoint(t *testing.T) {
  4742  	defer testutils.AfterTest(t)()
  4743  	ctx := context.Background()
  4744  
  4745  	opts := config.WithQuickScanAndCKPOpts(nil)
  4746  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  4747  	defer tae.Close()
  4748  
  4749  	schema := catalog.MockSchemaAll(3, 1)
  4750  	schema.BlockMaxRows = 10
  4751  	schema.ObjectMaxBlocks = 2
  4752  	tae.BindSchema(schema)
  4753  	bat := catalog.MockBatch(schema, 21)
  4754  	defer bat.Close()
  4755  
  4756  	tae.CreateRelAndAppend(bat, true)
  4757  	now := time.Now()
  4758  	testutils.WaitExpect(10000, func() bool {
  4759  		return tae.Runtime.Scheduler.GetPenddingLSNCnt() == 0
  4760  	})
  4761  	t.Log(time.Since(now))
  4762  	t.Logf("Checkpointed: %d", tae.Runtime.Scheduler.GetCheckpointedLSN())
  4763  	t.Logf("GetPenddingLSNCnt: %d", tae.Runtime.Scheduler.GetPenddingLSNCnt())
  4764  	assert.Equal(t, uint64(0), tae.Runtime.Scheduler.GetPenddingLSNCnt())
  4765  	tids := []uint64{
  4766  		pkgcatalog.MO_DATABASE_ID,
  4767  		pkgcatalog.MO_TABLES_ID,
  4768  		pkgcatalog.MO_COLUMNS_ID,
  4769  		1000,
  4770  	}
  4771  
  4772  	now = time.Now()
  4773  	testutils.WaitExpect(10000, func() bool {
  4774  		return tae.Runtime.Scheduler.GetPenddingLSNCnt() == 0
  4775  	})
  4776  	t.Log(time.Since(now))
  4777  	assert.Equal(t, uint64(0), tae.Runtime.Scheduler.GetPenddingLSNCnt())
  4778  
  4779  	now = time.Now()
  4780  	testutils.WaitExpect(10000, func() bool {
  4781  		return tae.BGCheckpointRunner.GetPenddingIncrementalCount() == 0
  4782  	})
  4783  	t.Log(time.Since(now))
  4784  	assert.Equal(t, 0, tae.BGCheckpointRunner.GetPenddingIncrementalCount())
  4785  
  4786  	gcTS := types.BuildTS(time.Now().UTC().UnixNano(), 0)
  4787  	err := tae.BGCheckpointRunner.GCByTS(context.Background(), gcTS)
  4788  	assert.NoError(t, err)
  4789  	now = time.Now()
  4790  	assert.Equal(t, uint64(0), tae.Wal.GetPenddingCnt())
  4791  	testutils.WaitExpect(10000, func() bool {
  4792  		tae.BGCheckpointRunner.ExistPendingEntryToGC()
  4793  		return !tae.BGCheckpointRunner.ExistPendingEntryToGC()
  4794  	})
  4795  	t.Log(time.Since(now))
  4796  	assert.False(t, tae.BGCheckpointRunner.ExistPendingEntryToGC())
  4797  	entries := tae.BGCheckpointRunner.GetAllGlobalCheckpoints()
  4798  	for _, entry := range entries {
  4799  		t.Log(entry.String())
  4800  	}
  4801  	for _, entry := range entries {
  4802  		for _, tid := range tids {
  4803  			ins, del, _, _, err := entry.GetByTableID(context.Background(), tae.Runtime.Fs, tid)
  4804  			assert.NoError(t, err)
  4805  			t.Logf("table %d", tid)
  4806  			if ins != nil {
  4807  				logutil.Infof("ins is %v", ins.Vecs[0].String())
  4808  				t.Log(common.ApiBatchToString(ins, 3))
  4809  			}
  4810  			if del != nil {
  4811  				t.Log(common.ApiBatchToString(del, 3))
  4812  			}
  4813  		}
  4814  	}
  4815  	tae.Restart(ctx)
  4816  	entries = tae.BGCheckpointRunner.GetAllGlobalCheckpoints()
  4817  	entry := entries[len(entries)-1]
  4818  	for _, tid := range tids {
  4819  		ins, del, _, _, err := entry.GetByTableID(context.Background(), tae.Runtime.Fs, tid)
  4820  		assert.NoError(t, err)
  4821  		t.Logf("table %d", tid)
  4822  		if ins != nil {
  4823  			t.Log(common.ApiBatchToString(ins, 3))
  4824  		}
  4825  		if del != nil {
  4826  			t.Log(common.ApiBatchToString(del, 3))
  4827  		}
  4828  	}
  4829  }
  4830  
  4831  func TestDelete4(t *testing.T) {
  4832  	t.Skip(any("This case crashes occasionally, is being fixed, skip it for now"))
  4833  	defer testutils.AfterTest(t)()
  4834  	ctx := context.Background()
  4835  
  4836  	opts := config.WithQuickScanAndCKPOpts(nil)
  4837  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  4838  	defer tae.Close()
  4839  	schema := catalog.NewEmptySchema("xx")
  4840  	schema.AppendPKCol("name", types.T_varchar.ToType(), 0)
  4841  	schema.AppendCol("offset", types.T_uint32.ToType())
  4842  	schema.Finalize(false)
  4843  	schema.BlockMaxRows = 50
  4844  	schema.ObjectMaxBlocks = 5
  4845  	tae.BindSchema(schema)
  4846  	bat := catalog.MockBatch(schema, 1)
  4847  	bat.Vecs[1].Update(0, uint32(0), false)
  4848  	defer bat.Close()
  4849  	tae.CreateRelAndAppend(bat, true)
  4850  
  4851  	filter := handle.NewEQFilter(bat.Vecs[0].Get(0))
  4852  	var wg sync.WaitGroup
  4853  	var count atomic.Uint32
  4854  
  4855  	run := func() {
  4856  		defer wg.Done()
  4857  		time.Sleep(time.Duration(rand.Intn(20)+1) * time.Millisecond)
  4858  		cloneBat := bat.CloneWindow(0, 1)
  4859  		defer cloneBat.Close()
  4860  		txn, rel := tae.GetRelation()
  4861  		id, offset, err := rel.GetByFilter(context.Background(), filter)
  4862  		if err != nil {
  4863  			txn.Rollback(context.Background())
  4864  			return
  4865  		}
  4866  		v, _, err := rel.GetValue(id, offset, 1)
  4867  		if err != nil {
  4868  			txn.Rollback(context.Background())
  4869  			return
  4870  		}
  4871  		oldV := v.(uint32)
  4872  		newV := oldV + 1
  4873  		if err := rel.RangeDelete(id, offset, offset, handle.DT_Normal); err != nil {
  4874  			txn.Rollback(context.Background())
  4875  			return
  4876  		}
  4877  		cloneBat.Vecs[1].Update(0, newV, false)
  4878  		if err := rel.Append(context.Background(), cloneBat); err != nil {
  4879  			txn.Rollback(context.Background())
  4880  			return
  4881  		}
  4882  		if err := txn.Commit(context.Background()); err == nil {
  4883  			ok := count.CompareAndSwap(oldV, newV)
  4884  			for !ok {
  4885  				ok = count.CompareAndSwap(oldV, newV)
  4886  			}
  4887  			t.Logf("RangeDelete block-%d, offset-%d, old %d newV %d, %s", id.BlockID, offset, oldV, newV, txn.GetCommitTS().ToString())
  4888  		}
  4889  	}
  4890  
  4891  	p, _ := ants.NewPool(20)
  4892  	defer p.Release()
  4893  	for i := 0; i < 100; i++ {
  4894  		wg.Add(1)
  4895  		_ = p.Submit(run)
  4896  	}
  4897  	wg.Wait()
  4898  
  4899  	t.Logf("count=%v", count.Load())
  4900  
  4901  	getValueFn := func() {
  4902  		txn, rel := tae.GetRelation()
  4903  		v, _, err := rel.GetValueByFilter(context.Background(), filter, 1)
  4904  		assert.NoError(t, err)
  4905  		assert.Equal(t, int(count.Load()), int(v.(uint32)))
  4906  		assert.NoError(t, txn.Commit(context.Background()))
  4907  		t.Logf("GetV=%v, %s", v, txn.GetStartTS().ToString())
  4908  	}
  4909  	scanFn := func() {
  4910  		txn, rel := tae.GetRelation()
  4911  		it := rel.MakeObjectIt()
  4912  		for it.Valid() {
  4913  			blk := it.GetObject()
  4914  			for j := 0; j < blk.BlkCnt(); j++ {
  4915  				view, err := blk.GetColumnDataById(context.Background(), uint16(j), 0, common.DefaultAllocator)
  4916  				assert.NoError(t, err)
  4917  				defer view.Close()
  4918  				view.ApplyDeletes()
  4919  				if view.Length() != 0 {
  4920  					t.Logf("block-%d, data=%s", j, logtail.ToStringTemplate(view.GetData(), -1))
  4921  				}
  4922  			}
  4923  			it.Next()
  4924  		}
  4925  		txn.Commit(context.Background())
  4926  	}
  4927  
  4928  	for i := 0; i < 20; i++ {
  4929  		getValueFn()
  4930  		scanFn()
  4931  
  4932  		tae.Restart(ctx)
  4933  
  4934  		getValueFn()
  4935  		scanFn()
  4936  		for j := 0; j < 100; j++ {
  4937  			wg.Add(1)
  4938  			p.Submit(run)
  4939  		}
  4940  		wg.Wait()
  4941  	}
  4942  	t.Log(tae.Catalog.SimplePPString(common.PPL3))
  4943  }
  4944  
  4945  // append, delete, apppend, get start ts, compact, get active row
  4946  func TestGetActiveRow(t *testing.T) {
  4947  	ctx := context.Background()
  4948  
  4949  	opts := config.WithLongScanAndCKPOpts(nil)
  4950  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  4951  	defer tae.Close()
  4952  
  4953  	schema := catalog.MockSchemaAll(3, 1)
  4954  	schema.BlockMaxRows = 10
  4955  	schema.ObjectMaxBlocks = 2
  4956  	tae.BindSchema(schema)
  4957  	bat := catalog.MockBatch(schema, 1)
  4958  	defer bat.Close()
  4959  
  4960  	tae.CreateRelAndAppend(bat, true)
  4961  
  4962  	txn, rel := tae.GetRelation()
  4963  	v := testutil.GetSingleSortKeyValue(bat, schema, 0)
  4964  	filter := handle.NewEQFilter(v)
  4965  	id, row, err := rel.GetByFilter(context.Background(), filter)
  4966  	assert.NoError(t, err)
  4967  	err = rel.RangeDelete(id, row, row, handle.DT_Normal)
  4968  	assert.NoError(t, err)
  4969  	assert.NoError(t, txn.Commit(context.Background()))
  4970  
  4971  	txn, rel = tae.GetRelation()
  4972  	assert.NoError(t, rel.Append(context.Background(), bat))
  4973  	assert.NoError(t, txn.Commit(context.Background()))
  4974  
  4975  	_, rel = tae.GetRelation()
  4976  	{
  4977  		txn2, rel2 := tae.GetRelation()
  4978  		it := rel2.MakeObjectIt()
  4979  		blk := it.GetObject().GetMeta().(*catalog.ObjectEntry)
  4980  		task, err := jobs.NewFlushTableTailTask(nil, txn2, []*catalog.ObjectEntry{blk}, tae.Runtime, txn2.GetStartTS())
  4981  		assert.NoError(t, err)
  4982  		err = task.OnExec(context.Background())
  4983  		assert.NoError(t, err)
  4984  		assert.NoError(t, txn2.Commit(context.Background()))
  4985  	}
  4986  	filter = handle.NewEQFilter(v)
  4987  	_, _, err = rel.GetByFilter(context.Background(), filter)
  4988  	assert.NoError(t, err)
  4989  }
  4990  func TestTransfer(t *testing.T) {
  4991  	ctx := context.Background()
  4992  
  4993  	opts := config.WithLongScanAndCKPOpts(nil)
  4994  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  4995  	defer tae.Close()
  4996  	schema := catalog.MockSchemaAll(5, 3)
  4997  	schema.BlockMaxRows = 100
  4998  	schema.ObjectMaxBlocks = 10
  4999  	tae.BindSchema(schema)
  5000  
  5001  	bat := catalog.MockBatch(schema, 10)
  5002  	defer bat.Close()
  5003  
  5004  	tae.CreateRelAndAppend(bat, true)
  5005  
  5006  	filter := handle.NewEQFilter(bat.Vecs[3].Get(3))
  5007  
  5008  	txn1, rel1 := tae.GetRelation()
  5009  	err := rel1.DeleteByFilter(context.Background(), filter)
  5010  	assert.NoError(t, err)
  5011  
  5012  	meta := rel1.GetMeta().(*catalog.TableEntry)
  5013  	err = tae.FlushTable(context.Background(), 0, meta.GetDB().ID, meta.ID,
  5014  		types.BuildTS(time.Now().UTC().UnixNano(), 0))
  5015  	assert.NoError(t, err)
  5016  
  5017  	err = txn1.Commit(context.Background())
  5018  	// assert.True(t, moerr.IsMoErrCode(err, moerr.ErrTxnRWConflict))
  5019  	assert.NoError(t, err)
  5020  
  5021  	txn2, rel2 := tae.GetRelation()
  5022  	_, _, err = rel2.GetValueByFilter(context.Background(), filter, 3)
  5023  	t.Log(err)
  5024  	assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  5025  	v, _, err := rel2.GetValueByFilter(context.Background(), handle.NewEQFilter(bat.Vecs[3].Get(4)), 2)
  5026  	expectV := bat.Vecs[2].Get(4)
  5027  	assert.Equal(t, expectV, v)
  5028  	assert.NoError(t, err)
  5029  	_ = txn2.Commit(context.Background())
  5030  }
  5031  
  5032  func TestTransfer2(t *testing.T) {
  5033  	ctx := context.Background()
  5034  
  5035  	opts := config.WithLongScanAndCKPOpts(nil)
  5036  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  5037  	defer tae.Close()
  5038  	schema := catalog.MockSchemaAll(5, 3)
  5039  	schema.BlockMaxRows = 10
  5040  	schema.ObjectMaxBlocks = 10
  5041  	tae.BindSchema(schema)
  5042  
  5043  	bat := catalog.MockBatch(schema, 200)
  5044  	defer bat.Close()
  5045  
  5046  	tae.CreateRelAndAppend(bat, true)
  5047  
  5048  	filter := handle.NewEQFilter(bat.Vecs[3].Get(3))
  5049  
  5050  	txn1, rel1 := tae.GetRelation()
  5051  	err := rel1.DeleteByFilter(context.Background(), filter)
  5052  	assert.NoError(t, err)
  5053  
  5054  	tae.CompactBlocks(false)
  5055  	tae.MergeBlocks(false)
  5056  
  5057  	err = txn1.Commit(context.Background())
  5058  	// assert.True(t, moerr.IsMoErrCode(err, moerr.ErrTxnRWConflict))
  5059  	assert.NoError(t, err)
  5060  
  5061  	txn2, rel2 := tae.GetRelation()
  5062  	_, _, err = rel2.GetValueByFilter(context.Background(), filter, 3)
  5063  	t.Log(err)
  5064  	assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  5065  	v, _, err := rel2.GetValueByFilter(context.Background(), handle.NewEQFilter(bat.Vecs[3].Get(4)), 2)
  5066  	expectV := bat.Vecs[2].Get(4)
  5067  	assert.Equal(t, expectV, v)
  5068  	assert.NoError(t, err)
  5069  	_ = txn2.Commit(context.Background())
  5070  }
  5071  
  5072  func TestMergeBlocks3(t *testing.T) {
  5073  	ctx := context.Background()
  5074  
  5075  	opts := config.WithLongScanAndCKPOpts(nil)
  5076  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  5077  	defer tae.Close()
  5078  	schema := catalog.MockSchemaAll(5, 3)
  5079  	schema.BlockMaxRows = 10
  5080  	schema.ObjectMaxBlocks = 5
  5081  	tae.BindSchema(schema)
  5082  	bat := catalog.MockBatch(schema, 100)
  5083  	defer bat.Close()
  5084  	tae.CreateRelAndAppend(bat, true)
  5085  
  5086  	// flush to nblk
  5087  	{
  5088  		txn, rel := tae.GetRelation()
  5089  		blkMetas := testutil.GetAllBlockMetas(rel)
  5090  		task, err := jobs.NewFlushTableTailTask(tasks.WaitableCtx, txn, blkMetas, tae.DB.Runtime, types.MaxTs())
  5091  		require.NoError(t, err)
  5092  		require.NoError(t, task.OnExec(context.Background()))
  5093  		require.NoError(t, txn.Commit(context.Background()))
  5094  	}
  5095  
  5096  	filter15 := handle.NewEQFilter(bat.Vecs[3].Get(15))
  5097  	filter19 := handle.NewEQFilter(bat.Vecs[3].Get(19))
  5098  	filter18 := handle.NewEQFilter(bat.Vecs[3].Get(18))
  5099  	filter17 := handle.NewEQFilter(bat.Vecs[3].Get(17))
  5100  	// delete all rows in first blk in obj1 and the 5th,9th rows in blk2
  5101  	{
  5102  		txn, rel := tae.GetRelation()
  5103  		objit := rel.MakeObjectIt()
  5104  		obj1 := objit.GetObject().GetMeta().(*catalog.ObjectEntry)
  5105  		objHandle, err := rel.GetObject(&obj1.ID)
  5106  		require.NoError(t, err)
  5107  
  5108  		view, err := objHandle.GetColumnDataByName(context.Background(), 0, catalog.PhyAddrColumnName, common.DefaultAllocator)
  5109  		view.GetData()
  5110  		require.NoError(t, err)
  5111  		pkDef := schema.GetPrimaryKey()
  5112  		pkView, err := objHandle.GetColumnDataByName(context.Background(), 0, pkDef.Name, common.DefaultAllocator)
  5113  		pkView.GetData()
  5114  		require.NoError(t, err)
  5115  		err = rel.DeleteByPhyAddrKeys(view.GetData(), pkView.GetData())
  5116  		require.NoError(t, err)
  5117  
  5118  		require.NoError(t, rel.DeleteByFilter(context.Background(), filter15))
  5119  		require.NoError(t, rel.DeleteByFilter(context.Background(), filter19))
  5120  		require.NoError(t, txn.Commit(context.Background()))
  5121  	}
  5122  
  5123  	// 1. merge first Object
  5124  	// 2. delete 7th row in blk2 during executing merge task
  5125  	// 3. delete 8th row in blk2 and commit that after merging, test transfer
  5126  	{
  5127  		del8txn, rel8 := tae.GetRelation()
  5128  		valrow8, null, err := rel8.GetValueByFilter(context.Background(), filter18, schema.GetColIdx(catalog.PhyAddrColumnName))
  5129  		require.NoError(t, err)
  5130  		require.False(t, null)
  5131  
  5132  		del7txn, rel7 := tae.GetRelation()
  5133  		mergetxn, relm := tae.GetRelation()
  5134  
  5135  		// merge first Object
  5136  		objit := relm.MakeObjectIt()
  5137  		obj1 := objit.GetObject().GetMeta().(*catalog.ObjectEntry)
  5138  		require.NoError(t, err)
  5139  
  5140  		objsToMerge := []*catalog.ObjectEntry{obj1}
  5141  		task, err := jobs.NewMergeObjectsTask(nil, mergetxn, objsToMerge, tae.Runtime, 0)
  5142  		require.NoError(t, err)
  5143  		require.NoError(t, task.OnExec(context.Background()))
  5144  
  5145  		// delete del7 after starting merge txn
  5146  		require.NoError(t, rel7.DeleteByFilter(context.Background(), filter17))
  5147  		require.NoError(t, del7txn.Commit(context.Background()))
  5148  
  5149  		// commit merge, and it will carry del7 to the new block
  5150  		require.NoError(t, mergetxn.Commit(context.Background()))
  5151  
  5152  		// delete 8 row and it is expected to be transfered correctly
  5153  		rel8.DeleteByPhyAddrKey(valrow8)
  5154  		require.NoError(t, del8txn.Commit(context.Background()))
  5155  	}
  5156  
  5157  	// consistency check
  5158  	{
  5159  		var err error
  5160  		txn, rel := tae.GetRelation()
  5161  		_, _, err = rel.GetValueByFilter(context.Background(), filter15, 3)
  5162  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  5163  		_, _, err = rel.GetValueByFilter(context.Background(), filter17, 3)
  5164  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  5165  		_, _, err = rel.GetValueByFilter(context.Background(), filter18, 3)
  5166  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  5167  		_, _, err = rel.GetValueByFilter(context.Background(), filter19, 3)
  5168  		assert.True(t, moerr.IsMoErrCode(err, moerr.ErrNotFound))
  5169  
  5170  		testutil.CheckAllColRowsByScan(t, rel, 86, true)
  5171  		require.NoError(t, txn.Commit(context.Background()))
  5172  	}
  5173  }
  5174  
  5175  func TestTransfer3(t *testing.T) {
  5176  	defer testutils.AfterTest(t)()
  5177  	ctx := context.Background()
  5178  
  5179  	opts := config.WithLongScanAndCKPOpts(nil)
  5180  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  5181  	defer tae.Close()
  5182  	schema := catalog.MockSchemaAll(5, 3)
  5183  	schema.BlockMaxRows = 100
  5184  	schema.ObjectMaxBlocks = 10
  5185  	tae.BindSchema(schema)
  5186  
  5187  	bat := catalog.MockBatch(schema, 10)
  5188  	defer bat.Close()
  5189  
  5190  	tae.CreateRelAndAppend(bat, true)
  5191  
  5192  	filter := handle.NewEQFilter(bat.Vecs[3].Get(3))
  5193  
  5194  	txn1, rel1 := tae.GetRelation()
  5195  
  5196  	var err error
  5197  	err = rel1.DeleteByFilter(context.Background(), filter)
  5198  	assert.NoError(t, err)
  5199  
  5200  	meta := rel1.GetMeta().(*catalog.TableEntry)
  5201  	err = tae.FlushTable(context.Background(), 0, meta.GetDB().ID, meta.ID,
  5202  		types.BuildTS(time.Now().UTC().UnixNano(), 0))
  5203  	assert.NoError(t, err)
  5204  
  5205  	err = rel1.Append(context.Background(), bat.Window(3, 1))
  5206  	assert.NoError(t, err)
  5207  	err = txn1.Commit(context.Background())
  5208  	assert.NoError(t, err)
  5209  }
  5210  
  5211  func TestUpdate(t *testing.T) {
  5212  	t.Skip(any("This case crashes occasionally, is being fixed, skip it for now"))
  5213  	defer testutils.AfterTest(t)()
  5214  	ctx := context.Background()
  5215  
  5216  	opts := config.WithQuickScanAndCKPOpts2(nil, 5)
  5217  	// opts := config.WithLongScanAndCKPOpts(nil)
  5218  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  5219  	defer tae.Close()
  5220  
  5221  	schema := catalog.MockSchemaAll(5, 3)
  5222  	schema.BlockMaxRows = 100
  5223  	schema.ObjectMaxBlocks = 4
  5224  	tae.BindSchema(schema)
  5225  
  5226  	bat := catalog.MockBatch(schema, 1)
  5227  	defer bat.Close()
  5228  	bat.Vecs[2].Update(0, int32(0), false)
  5229  
  5230  	tae.CreateRelAndAppend(bat, true)
  5231  
  5232  	var wg sync.WaitGroup
  5233  
  5234  	var expectV atomic.Int32
  5235  	expectV.Store(bat.Vecs[2].Get(0).(int32))
  5236  	filter := handle.NewEQFilter(bat.Vecs[3].Get(0))
  5237  	updateFn := func() {
  5238  		defer wg.Done()
  5239  		txn, rel := tae.GetRelation()
  5240  		id, offset, err := rel.GetByFilter(context.Background(), filter)
  5241  		assert.NoError(t, err)
  5242  		v, _, err := rel.GetValue(id, offset, 2)
  5243  		assert.NoError(t, err)
  5244  		err = rel.RangeDelete(id, offset, offset, handle.DT_Normal)
  5245  		if err != nil {
  5246  			t.Logf("range delete %v, rollbacking", err)
  5247  			_ = txn.Rollback(context.Background())
  5248  			return
  5249  		}
  5250  		tuples := bat.CloneWindow(0, 1)
  5251  		defer tuples.Close()
  5252  		updatedV := v.(int32) + 1
  5253  		tuples.Vecs[2].Update(0, updatedV, false)
  5254  		err = rel.Append(context.Background(), tuples)
  5255  		assert.NoError(t, err)
  5256  
  5257  		err = txn.Commit(context.Background())
  5258  		if err != nil {
  5259  			t.Logf("commit update %v", err)
  5260  		} else {
  5261  			expectV.CompareAndSwap(v.(int32), updatedV)
  5262  			t.Logf("%v committed", updatedV)
  5263  		}
  5264  	}
  5265  	p, _ := ants.NewPool(5)
  5266  	defer p.Release()
  5267  	loop := 1000
  5268  	for i := 0; i < loop; i++ {
  5269  		wg.Add(1)
  5270  		// updateFn()
  5271  		_ = p.Submit(updateFn)
  5272  	}
  5273  	wg.Wait()
  5274  	t.Logf("Final: %v", expectV.Load())
  5275  	{
  5276  		txn, rel := tae.GetRelation()
  5277  		v, _, err := rel.GetValueByFilter(context.Background(), filter, 2)
  5278  		assert.NoError(t, err)
  5279  		assert.Equal(t, v.(int32), expectV.Load())
  5280  		testutil.CheckAllColRowsByScan(t, rel, 1, true)
  5281  		assert.NoError(t, txn.Commit(context.Background()))
  5282  	}
  5283  }
  5284  
  5285  func TestMergeMemsize(t *testing.T) {
  5286  	t.Skip("run it manully to observe memory heap")
  5287  	ctx := context.Background()
  5288  	opts := config.WithLongScanAndCKPOpts(nil)
  5289  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  5290  	defer tae.Close()
  5291  
  5292  	schema := catalog.MockSchemaAll(18, 3)
  5293  	schema.Name = "testupdate"
  5294  	schema.BlockMaxRows = 8192
  5295  	schema.ObjectMaxBlocks = 200
  5296  	tae.BindSchema(schema)
  5297  
  5298  	wholebat := catalog.MockBatch(schema, 8192*80)
  5299  	for _, col := range schema.ColDefs {
  5300  		t.Log(col.Type.DescString(), col.Type.Size)
  5301  	}
  5302  	t.Log(wholebat.ApproxSize())
  5303  	batCnt := 40
  5304  	bats := wholebat.Split(batCnt)
  5305  	// write only one block by apply metaloc
  5306  	objName1 := objectio.BuildObjectNameWithObjectID(objectio.NewObjectid())
  5307  	writer, err := blockio.NewBlockWriterNew(tae.Runtime.Fs.Service, objName1, 0, nil)
  5308  	require.Nil(t, err)
  5309  	writer.SetPrimaryKey(3)
  5310  	for _, b := range bats {
  5311  		_, err = writer.WriteBatch(containers.ToCNBatch(b))
  5312  		require.Nil(t, err)
  5313  	}
  5314  	blocks, _, err := writer.Sync(context.Background())
  5315  	assert.Nil(t, err)
  5316  	assert.Equal(t, batCnt, len(blocks))
  5317  	statsVec := containers.MakeVector(types.T_varchar.ToType(), common.DefaultAllocator)
  5318  	statsVec.Append(writer.GetObjectStats()[objectio.SchemaData][:], false)
  5319  	{
  5320  		txn, _ := tae.StartTxn(nil)
  5321  		txn.SetDedupType(txnif.IncrementalDedup)
  5322  		db, err := txn.CreateDatabase("db", "", "")
  5323  		assert.NoError(t, err)
  5324  		tbl, err := db.CreateRelation(schema)
  5325  		assert.NoError(t, err)
  5326  		assert.NoError(t, tbl.AddObjsWithMetaLoc(context.Background(), statsVec))
  5327  		assert.NoError(t, txn.Commit(context.Background()))
  5328  	}
  5329  	statsVec.Close()
  5330  
  5331  	t.Log(tae.Catalog.SimplePPString(common.PPL1))
  5332  	var metas []*catalog.ObjectEntry
  5333  	{
  5334  		txn, rel := tae.GetRelation()
  5335  		it := rel.MakeObjectIt()
  5336  		blkcnt := 0
  5337  		for ; it.Valid(); it.Next() {
  5338  			obj := it.GetObject()
  5339  			defer obj.Close()
  5340  			meta := it.GetObject().GetMeta().(*catalog.ObjectEntry)
  5341  			stat := meta.GetObjectStats()
  5342  			blkcnt += int(stat.BlkCnt())
  5343  			metas = append(metas, meta)
  5344  
  5345  		}
  5346  		txn.Commit(ctx)
  5347  		require.Equal(t, batCnt, blkcnt)
  5348  	}
  5349  
  5350  	{
  5351  		txn, _ := tae.StartTxn(nil)
  5352  		task, err := jobs.NewMergeObjectsTask(nil, txn, metas, tae.Runtime, 0)
  5353  		require.NoError(t, err)
  5354  
  5355  		dbutils.PrintMemStats()
  5356  		err = task.OnExec(context.Background())
  5357  		require.NoError(t, err)
  5358  		require.NoError(t, txn.Commit(ctx))
  5359  		dbutils.PrintMemStats()
  5360  	}
  5361  }
  5362  
  5363  func TestCollectDeletesAfterCKP(t *testing.T) {
  5364  	ctx := context.Background()
  5365  	opts := config.WithLongScanAndCKPOpts(nil)
  5366  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  5367  	defer tae.Close()
  5368  
  5369  	schema := catalog.MockSchemaAll(5, 3)
  5370  	schema.Name = "testupdate"
  5371  	schema.BlockMaxRows = 8192
  5372  	schema.ObjectMaxBlocks = 20
  5373  	tae.BindSchema(schema)
  5374  
  5375  	bat := catalog.MockBatch(schema, 400)
  5376  	// write only one block by apply metaloc
  5377  	objName1 := objectio.BuildObjectNameWithObjectID(objectio.NewObjectid())
  5378  	writer, err := blockio.NewBlockWriterNew(tae.Runtime.Fs.Service, objName1, 0, nil)
  5379  	assert.Nil(t, err)
  5380  	writer.SetPrimaryKey(3)
  5381  	_, err = writer.WriteBatch(containers.ToCNBatch(bat))
  5382  	assert.Nil(t, err)
  5383  	blocks, _, err := writer.Sync(context.Background())
  5384  	assert.Nil(t, err)
  5385  	assert.Equal(t, 1, len(blocks))
  5386  	statsVec := containers.MakeVector(types.T_varchar.ToType(), common.DefaultAllocator)
  5387  	statsVec.Append(writer.GetObjectStats()[objectio.SchemaData][:], false)
  5388  	defer statsVec.Close()
  5389  	{
  5390  		txn, _ := tae.StartTxn(nil)
  5391  		txn.SetDedupType(txnif.IncrementalDedup)
  5392  		db, err := txn.CreateDatabase("db", "", "")
  5393  		assert.NoError(t, err)
  5394  		tbl, err := db.CreateRelation(schema)
  5395  		assert.NoError(t, err)
  5396  		assert.NoError(t, tbl.AddObjsWithMetaLoc(context.Background(), statsVec))
  5397  		assert.NoError(t, txn.Commit(context.Background()))
  5398  	}
  5399  
  5400  	updateFn := func(round, i, j int) {
  5401  		tuples := bat.CloneWindow(0, 1)
  5402  		defer tuples.Close()
  5403  		for x := i; x < j; x++ {
  5404  			txn, rel := tae.GetRelation()
  5405  			filter := handle.NewEQFilter(int64(x))
  5406  			id, offset, err := rel.GetByFilter(context.Background(), filter)
  5407  			assert.NoError(t, err)
  5408  			_, _, err = rel.GetValue(id, offset, 2)
  5409  			assert.NoError(t, err)
  5410  			err = rel.RangeDelete(id, offset, offset, handle.DT_Normal)
  5411  			if err != nil {
  5412  				t.Logf("range delete %v, rollbacking", err)
  5413  				_ = txn.Rollback(context.Background())
  5414  				return
  5415  			}
  5416  			tuples.Vecs[3].Update(0, int64(x), false)
  5417  			err = rel.Append(context.Background(), tuples)
  5418  			assert.NoError(t, err)
  5419  			assert.NoError(t, txn.Commit(context.Background()))
  5420  		}
  5421  		t.Logf("(%d, %d, %d) done", round, i, j)
  5422  	}
  5423  	updateFn(1, 100, 110)
  5424  	{
  5425  		txn, rel := tae.GetRelation()
  5426  		meta := testutil.GetOneBlockMeta(rel)
  5427  		bat, _, err := meta.GetObjectData().CollectDeleteInRange(ctx, types.TS{}, types.MaxTs(), true, common.DefaultAllocator)
  5428  		require.NoError(t, err)
  5429  		require.Equal(t, 10, bat.Length())
  5430  		require.NoError(t, txn.Commit(ctx))
  5431  	}
  5432  	logutil.Infof(tae.Catalog.SimplePPString(3))
  5433  	tae.ForceLongCheckpoint()
  5434  	{
  5435  		txn, rel := tae.GetRelation()
  5436  		meta := testutil.GetOneBlockMeta(rel)
  5437  		bat, _, err := meta.GetObjectData().CollectDeleteInRange(ctx, types.TS{}, types.MaxTs(), true, common.DefaultAllocator)
  5438  		require.NoError(t, err)
  5439  		require.Equal(t, 10, bat.Length())
  5440  		require.NoError(t, txn.Commit(ctx))
  5441  	}
  5442  	logutil.Infof(tae.Catalog.SimplePPString(3))
  5443  	tae.Restart(ctx)
  5444  	logutil.Infof(tae.Catalog.SimplePPString(3))
  5445  	{
  5446  		txn, rel := tae.GetRelation()
  5447  		meta := testutil.GetOneBlockMeta(rel)
  5448  		bat, _, err := meta.GetObjectData().CollectDeleteInRange(ctx, types.TS{}, types.MaxTs(), true, common.DefaultAllocator)
  5449  		require.NoError(t, err)
  5450  		require.Equal(t, 10, bat.Length())
  5451  		require.NoError(t, txn.Commit(ctx))
  5452  	}
  5453  }
  5454  
  5455  // This is used to observe a lot of compactions to overflow a Object, it is not compulsory
  5456  func TestAlwaysUpdate(t *testing.T) {
  5457  	t.Skip("This is a long test, run it manully to observe what you want")
  5458  	defer testutils.AfterTest(t)()
  5459  	ctx := context.Background()
  5460  
  5461  	// opts := config.WithQuickScanAndCKPOpts2(nil, 10)
  5462  	// opts.GCCfg.ScanGCInterval = 3600 * time.Second
  5463  	// opts.CatalogCfg.GCInterval = 3600 * time.Second
  5464  	opts := config.WithQuickScanAndCKPAndGCOpts(nil)
  5465  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  5466  	defer tae.Close()
  5467  
  5468  	schema := catalog.MockSchemaAll(5, 3)
  5469  	schema.Name = "testupdate"
  5470  	schema.BlockMaxRows = 8192
  5471  	schema.ObjectMaxBlocks = 200
  5472  	tae.BindSchema(schema)
  5473  
  5474  	bats := catalog.MockBatch(schema, 400*100).Split(100)
  5475  	statsVec := containers.MakeVector(types.T_varchar.ToType(), common.DefaultAllocator)
  5476  	defer statsVec.Close()
  5477  	// write only one Object
  5478  	for i := 0; i < 1; i++ {
  5479  		objName1 := objectio.BuildObjectNameWithObjectID(objectio.NewObjectid())
  5480  		writer, err := blockio.NewBlockWriterNew(tae.Runtime.Fs.Service, objName1, 0, nil)
  5481  		assert.Nil(t, err)
  5482  		writer.SetPrimaryKey(3)
  5483  		for _, bat := range bats[i*25 : (i+1)*25] {
  5484  			_, err := writer.WriteBatch(containers.ToCNBatch(bat))
  5485  			assert.Nil(t, err)
  5486  		}
  5487  		blocks, _, err := writer.Sync(context.Background())
  5488  		assert.Nil(t, err)
  5489  		assert.Equal(t, 25, len(blocks))
  5490  		statsVec.Append(writer.GetObjectStats()[objectio.SchemaData][:], false)
  5491  	}
  5492  
  5493  	// var did, tid uint64
  5494  	txn, _ := tae.StartTxn(nil)
  5495  	txn.SetDedupType(txnif.IncrementalDedup)
  5496  	db, err := txn.CreateDatabase("db", "", "")
  5497  	// did = db.GetID()
  5498  	assert.NoError(t, err)
  5499  	tbl, err := db.CreateRelation(schema)
  5500  	// tid = tbl.ID()
  5501  	assert.NoError(t, err)
  5502  	assert.NoError(t, tbl.AddObjsWithMetaLoc(context.Background(), statsVec))
  5503  	assert.NoError(t, txn.Commit(context.Background()))
  5504  
  5505  	t.Log(tae.Catalog.SimplePPString(common.PPL1))
  5506  
  5507  	wg := &sync.WaitGroup{}
  5508  
  5509  	updateFn := func(round, i, j int) {
  5510  		defer wg.Done()
  5511  		tuples := bats[0].CloneWindow(0, 1)
  5512  		defer tuples.Close()
  5513  		for x := i; x < j; x++ {
  5514  			txn, rel := tae.GetRelation()
  5515  			filter := handle.NewEQFilter(int64(x))
  5516  			id, offset, err := rel.GetByFilter(context.Background(), filter)
  5517  			assert.NoError(t, err)
  5518  			_, _, err = rel.GetValue(id, offset, 2)
  5519  			assert.NoError(t, err)
  5520  			err = rel.RangeDelete(id, offset, offset, handle.DT_Normal)
  5521  			if err != nil {
  5522  				t.Logf("range delete %v, rollbacking", err)
  5523  				_ = txn.Rollback(context.Background())
  5524  				return
  5525  			}
  5526  			tuples.Vecs[3].Update(0, int64(x), false)
  5527  			err = rel.Append(context.Background(), tuples)
  5528  			assert.NoError(t, err)
  5529  			assert.NoError(t, txn.Commit(context.Background()))
  5530  		}
  5531  		t.Logf("(%d, %d, %d) done", round, i, j)
  5532  	}
  5533  
  5534  	p, _ := ants.NewPool(20)
  5535  	defer p.Release()
  5536  
  5537  	// ch := make(chan int, 1)
  5538  	// ticker := time.NewTicker(1 * time.Second)
  5539  	// ticker2 := time.NewTicker(100 * time.Millisecond)
  5540  	// go func() {
  5541  	// 	for {
  5542  	// 		select {
  5543  	// 		case <-ticker.C:
  5544  	// 			t.Log(tbl.SimplePPString(common.PPL1))
  5545  	// 		case <-ticker2.C:
  5546  	// 			_, _, _ = logtail.HandleSyncLogTailReq(ctx, new(dummyCpkGetter), tae.LogtailMgr, tae.Catalog, api.SyncLogTailReq{
  5547  	// 				CnHave: tots(types.BuildTS(0, 0)),
  5548  	// 				CnWant: tots(types.MaxTs()),
  5549  	// 				Table:  &api.TableID{DbId: did, TbId: tid},
  5550  	// 			}, true)
  5551  	// 		case <-ch:
  5552  	// 		}
  5553  	// 	}
  5554  	// }()
  5555  
  5556  	for r := 0; r < 10; r++ {
  5557  		for i := 0; i < 40; i++ {
  5558  			wg.Add(1)
  5559  			start, end := i*200, (i+1)*200
  5560  			f := func() { updateFn(r, start, end) }
  5561  			p.Submit(f)
  5562  		}
  5563  		wg.Wait()
  5564  		tae.CheckRowsByScan(100*100, true)
  5565  	}
  5566  }
  5567  
  5568  func TestInsertPerf(t *testing.T) {
  5569  	t.Skip(any("for debug"))
  5570  	ctx := context.Background()
  5571  
  5572  	opts := new(options.Options)
  5573  	options.WithCheckpointScanInterval(time.Second * 10)(opts)
  5574  	options.WithFlushInterval(time.Second * 10)(opts)
  5575  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  5576  	defer tae.Close()
  5577  	schema := catalog.MockSchemaAll(10, 2)
  5578  	schema.BlockMaxRows = 1000
  5579  	schema.ObjectMaxBlocks = 5
  5580  	tae.BindSchema(schema)
  5581  
  5582  	cnt := 1000
  5583  	iBat := 1
  5584  	poolSize := 20
  5585  
  5586  	bat := catalog.MockBatch(schema, cnt*iBat*poolSize*2)
  5587  	defer bat.Close()
  5588  
  5589  	tae.CreateRelAndAppend(bat.Window(0, 1), true)
  5590  	var wg sync.WaitGroup
  5591  	run := func(start int) func() {
  5592  		return func() {
  5593  			defer wg.Done()
  5594  			for i := start; i < start+cnt*iBat; i += iBat {
  5595  				txn, rel := tae.GetRelation()
  5596  				_ = rel.Append(context.Background(), bat.Window(i, iBat))
  5597  				_ = txn.Commit(context.Background())
  5598  			}
  5599  		}
  5600  	}
  5601  
  5602  	p, _ := ants.NewPool(poolSize)
  5603  	defer p.Release()
  5604  	now := time.Now()
  5605  	for i := 1; i <= poolSize; i++ {
  5606  		wg.Add(1)
  5607  		_ = p.Submit(run(i * cnt * iBat))
  5608  	}
  5609  	wg.Wait()
  5610  	t.Log(time.Since(now))
  5611  }
  5612  
  5613  func TestAppendBat(t *testing.T) {
  5614  	p, _ := ants.NewPool(100)
  5615  	defer p.Release()
  5616  	var wg sync.WaitGroup
  5617  
  5618  	schema := catalog.MockSchema(7, 2)
  5619  	bat := catalog.MockBatch(schema, 1000)
  5620  	defer bat.Close()
  5621  
  5622  	run := func() {
  5623  		defer wg.Done()
  5624  		b := containers.BuildBatch(schema.Attrs(), schema.Types(), containers.Options{
  5625  			Allocator: common.DefaultAllocator})
  5626  		defer b.Close()
  5627  		for i := 0; i < bat.Length(); i++ {
  5628  			w := bat.Window(i, 1)
  5629  			b.Extend(w)
  5630  		}
  5631  	}
  5632  
  5633  	for i := 0; i < 200; i++ {
  5634  		wg.Add(1)
  5635  		_ = p.Submit(run)
  5636  	}
  5637  	wg.Wait()
  5638  }
  5639  
  5640  func TestGCWithCheckpoint(t *testing.T) {
  5641  	defer testutils.AfterTest(t)()
  5642  	ctx := context.Background()
  5643  
  5644  	opts := config.WithQuickScanAndCKPAndGCOpts(nil)
  5645  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  5646  	defer tae.Close()
  5647  	cleaner := gc.NewCheckpointCleaner(context.Background(), tae.Runtime.Fs, tae.BGCheckpointRunner, false)
  5648  	manager := gc.NewDiskCleaner(cleaner)
  5649  	manager.Start()
  5650  	defer manager.Stop()
  5651  
  5652  	schema := catalog.MockSchemaAll(3, 1)
  5653  	schema.BlockMaxRows = 10
  5654  	schema.ObjectMaxBlocks = 2
  5655  	tae.BindSchema(schema)
  5656  	bat := catalog.MockBatch(schema, 21)
  5657  	defer bat.Close()
  5658  
  5659  	tae.CreateRelAndAppend(bat, true)
  5660  	now := time.Now()
  5661  	testutils.WaitExpect(10000, func() bool {
  5662  		return tae.Runtime.Scheduler.GetPenddingLSNCnt() == 0
  5663  	})
  5664  	t.Log(time.Since(now))
  5665  	t.Logf("Checkpointed: %d", tae.Runtime.Scheduler.GetCheckpointedLSN())
  5666  	t.Logf("GetPenddingLSNCnt: %d", tae.Runtime.Scheduler.GetPenddingLSNCnt())
  5667  	assert.Equal(t, uint64(0), tae.Runtime.Scheduler.GetPenddingLSNCnt())
  5668  	err := manager.GC(context.Background())
  5669  	assert.Nil(t, err)
  5670  	entries := tae.BGCheckpointRunner.GetAllIncrementalCheckpoints()
  5671  	num := len(entries)
  5672  	assert.Greater(t, num, 0)
  5673  	testutils.WaitExpect(5000, func() bool {
  5674  		if manager.GetCleaner().GetMaxConsumed() == nil {
  5675  			return false
  5676  		}
  5677  		end := entries[num-1].GetEnd()
  5678  		maxEnd := manager.GetCleaner().GetMaxConsumed().GetEnd()
  5679  		return end.Equal(&maxEnd)
  5680  	})
  5681  	end := entries[num-1].GetEnd()
  5682  	maxEnd := manager.GetCleaner().GetMaxConsumed().GetEnd()
  5683  	assert.True(t, end.Equal(&maxEnd))
  5684  	cleaner2 := gc.NewCheckpointCleaner(context.Background(), tae.Runtime.Fs, tae.BGCheckpointRunner, false)
  5685  	manager2 := gc.NewDiskCleaner(cleaner2)
  5686  	manager2.Start()
  5687  	defer manager2.Stop()
  5688  	testutils.WaitExpect(5000, func() bool {
  5689  		if manager2.GetCleaner().GetMaxConsumed() == nil {
  5690  			return false
  5691  		}
  5692  		end := entries[num-1].GetEnd()
  5693  		maxEnd := manager2.GetCleaner().GetMaxConsumed().GetEnd()
  5694  		return end.Equal(&maxEnd)
  5695  	})
  5696  	end = entries[num-1].GetEnd()
  5697  	maxEnd = manager2.GetCleaner().GetMaxConsumed().GetEnd()
  5698  	assert.True(t, end.Equal(&maxEnd))
  5699  	tables1 := manager.GetCleaner().GetInputs()
  5700  	tables2 := manager2.GetCleaner().GetInputs()
  5701  	assert.True(t, tables1.Compare(tables2))
  5702  }
  5703  
  5704  func TestGCDropDB(t *testing.T) {
  5705  	defer testutils.AfterTest(t)()
  5706  	ctx := context.Background()
  5707  
  5708  	opts := config.WithQuickScanAndCKPAndGCOpts(nil)
  5709  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  5710  	defer tae.Close()
  5711  	cleaner := gc.NewCheckpointCleaner(context.Background(), tae.Runtime.Fs, tae.BGCheckpointRunner, false)
  5712  	manager := gc.NewDiskCleaner(cleaner)
  5713  	manager.Start()
  5714  	defer manager.Stop()
  5715  	schema := catalog.MockSchemaAll(3, 1)
  5716  	schema.BlockMaxRows = 10
  5717  	schema.ObjectMaxBlocks = 2
  5718  	tae.BindSchema(schema)
  5719  	bat := catalog.MockBatch(schema, 210)
  5720  	defer bat.Close()
  5721  
  5722  	tae.CreateRelAndAppend(bat, true)
  5723  	txn, err := tae.StartTxn(nil)
  5724  	assert.Nil(t, err)
  5725  	db, err := txn.DropDatabase(testutil.DefaultTestDB)
  5726  	assert.Nil(t, err)
  5727  	assert.Nil(t, txn.Commit(context.Background()))
  5728  
  5729  	assert.Equal(t, txn.GetCommitTS(), db.GetMeta().(*catalog.DBEntry).GetDeleteAtLocked())
  5730  	now := time.Now()
  5731  	testutils.WaitExpect(10000, func() bool {
  5732  		return tae.Runtime.Scheduler.GetPenddingLSNCnt() == 0
  5733  	})
  5734  	t.Log(time.Since(now))
  5735  	err = manager.GC(context.Background())
  5736  	assert.Nil(t, err)
  5737  	entries := tae.BGCheckpointRunner.GetAllIncrementalCheckpoints()
  5738  	num := len(entries)
  5739  	assert.Greater(t, num, 0)
  5740  	testutils.WaitExpect(5000, func() bool {
  5741  		if manager.GetCleaner().GetMaxConsumed() == nil {
  5742  			return false
  5743  		}
  5744  		end := entries[num-1].GetEnd()
  5745  		maxEnd := manager.GetCleaner().GetMaxConsumed().GetEnd()
  5746  		return end.Equal(&maxEnd)
  5747  	})
  5748  	end := entries[num-1].GetEnd()
  5749  	maxEnd := manager.GetCleaner().GetMaxConsumed().GetEnd()
  5750  	assert.True(t, end.Equal(&maxEnd))
  5751  	cleaner2 := gc.NewCheckpointCleaner(context.Background(), tae.Runtime.Fs, tae.BGCheckpointRunner, false)
  5752  	manager2 := gc.NewDiskCleaner(cleaner2)
  5753  	manager2.Start()
  5754  	defer manager2.Stop()
  5755  	testutils.WaitExpect(5000, func() bool {
  5756  		if manager2.GetCleaner().GetMaxConsumed() == nil {
  5757  			return false
  5758  		}
  5759  		end := entries[num-1].GetEnd()
  5760  		maxEnd := manager2.GetCleaner().GetMaxConsumed().GetEnd()
  5761  		return end.Equal(&maxEnd)
  5762  	})
  5763  	end = entries[num-1].GetEnd()
  5764  	maxEnd = manager2.GetCleaner().GetMaxConsumed().GetEnd()
  5765  	assert.True(t, end.Equal(&maxEnd))
  5766  	tables1 := manager.GetCleaner().GetInputs()
  5767  	tables2 := manager2.GetCleaner().GetInputs()
  5768  	assert.True(t, tables1.Compare(tables2))
  5769  	tae.Restart(ctx)
  5770  }
  5771  
  5772  func TestGCDropTable(t *testing.T) {
  5773  	defer testutils.AfterTest(t)()
  5774  	ctx := context.Background()
  5775  
  5776  	opts := config.WithQuickScanAndCKPAndGCOpts(nil)
  5777  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  5778  	defer tae.Close()
  5779  	cleaner := gc.NewCheckpointCleaner(context.Background(), tae.Runtime.Fs, tae.BGCheckpointRunner, false)
  5780  	manager := gc.NewDiskCleaner(cleaner)
  5781  	manager.Start()
  5782  	defer manager.Stop()
  5783  	schema := catalog.MockSchemaAll(3, 1)
  5784  	schema.BlockMaxRows = 10
  5785  	schema.ObjectMaxBlocks = 2
  5786  	tae.BindSchema(schema)
  5787  	bat := catalog.MockBatch(schema, 210)
  5788  	defer bat.Close()
  5789  	schema2 := catalog.MockSchemaAll(3, 1)
  5790  	schema2.BlockMaxRows = 10
  5791  	schema2.ObjectMaxBlocks = 2
  5792  	bat2 := catalog.MockBatch(schema2, 210)
  5793  	defer bat.Close()
  5794  
  5795  	tae.CreateRelAndAppend(bat, true)
  5796  	txn, _ := tae.StartTxn(nil)
  5797  	db, err := txn.GetDatabase(testutil.DefaultTestDB)
  5798  	assert.Nil(t, err)
  5799  	rel, _ := db.CreateRelation(schema2)
  5800  	rel.Append(context.Background(), bat2)
  5801  	assert.Nil(t, txn.Commit(context.Background()))
  5802  
  5803  	txn, err = tae.StartTxn(nil)
  5804  	assert.Nil(t, err)
  5805  	db, err = txn.GetDatabase(testutil.DefaultTestDB)
  5806  	assert.Nil(t, err)
  5807  	_, err = db.DropRelationByName(schema2.Name)
  5808  	assert.Nil(t, err)
  5809  	assert.Nil(t, txn.Commit(context.Background()))
  5810  
  5811  	now := time.Now()
  5812  	testutils.WaitExpect(10000, func() bool {
  5813  		return tae.Runtime.Scheduler.GetPenddingLSNCnt() == 0
  5814  	})
  5815  	assert.Equal(t, uint64(0), tae.Runtime.Scheduler.GetPenddingLSNCnt())
  5816  	assert.Equal(t, txn.GetCommitTS(), rel.GetMeta().(*catalog.TableEntry).GetDeleteAtLocked())
  5817  	t.Log(time.Since(now))
  5818  	err = manager.GC(context.Background())
  5819  	assert.Nil(t, err)
  5820  	entries := tae.BGCheckpointRunner.GetAllIncrementalCheckpoints()
  5821  	num := len(entries)
  5822  	assert.Greater(t, num, 0)
  5823  	testutils.WaitExpect(10000, func() bool {
  5824  		if manager.GetCleaner().GetMaxConsumed() == nil {
  5825  			return false
  5826  		}
  5827  		end := entries[num-1].GetEnd()
  5828  		maxEnd := manager.GetCleaner().GetMaxConsumed().GetEnd()
  5829  		return end.Equal(&maxEnd)
  5830  	})
  5831  	end := entries[num-1].GetEnd()
  5832  	maxEnd := manager.GetCleaner().GetMaxConsumed().GetEnd()
  5833  	assert.True(t, end.Equal(&maxEnd))
  5834  	cleaner2 := gc.NewCheckpointCleaner(context.Background(), tae.Runtime.Fs, tae.BGCheckpointRunner, false)
  5835  	manager2 := gc.NewDiskCleaner(cleaner2)
  5836  	manager2.Start()
  5837  	defer manager2.Stop()
  5838  	testutils.WaitExpect(5000, func() bool {
  5839  		if manager2.GetCleaner().GetMaxConsumed() == nil {
  5840  			return false
  5841  		}
  5842  		end := entries[num-1].GetEnd()
  5843  		maxEnd := manager2.GetCleaner().GetMaxConsumed().GetEnd()
  5844  		return end.Equal(&maxEnd)
  5845  	})
  5846  	end = entries[num-1].GetEnd()
  5847  	maxEnd = manager2.GetCleaner().GetMaxConsumed().GetEnd()
  5848  	assert.True(t, end.Equal(&maxEnd))
  5849  	tables1 := manager.GetCleaner().GetInputs()
  5850  	tables2 := manager2.GetCleaner().GetInputs()
  5851  	assert.True(t, tables1.Compare(tables2))
  5852  	tae.Restart(ctx)
  5853  }
  5854  
  5855  func TestAlterRenameTbl(t *testing.T) {
  5856  	defer testutils.AfterTest(t)()
  5857  	ctx := context.Background()
  5858  
  5859  	opts := config.WithLongScanAndCKPOpts(nil)
  5860  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  5861  	defer tae.Close()
  5862  
  5863  	schema := catalog.MockSchemaAll(2, -1)
  5864  	schema.Name = "test"
  5865  	schema.BlockMaxRows = 10
  5866  	schema.ObjectMaxBlocks = 2
  5867  	schema.Constraint = []byte("start version")
  5868  	schema.Comment = "comment version"
  5869  
  5870  	{
  5871  		var err error
  5872  		txn, _ := tae.StartTxn(nil)
  5873  		txn.CreateDatabase("xx", "", "")
  5874  		require.NoError(t, txn.Commit(context.Background()))
  5875  		txn1, _ := tae.StartTxn(nil)
  5876  		txn2, _ := tae.StartTxn(nil)
  5877  
  5878  		db, _ := txn1.GetDatabase("xx")
  5879  		_, err = db.CreateRelation(schema)
  5880  		require.NoError(t, err)
  5881  
  5882  		db1, _ := txn2.GetDatabase("xx")
  5883  		_, err = db1.CreateRelation(schema)
  5884  		require.True(t, moerr.IsMoErrCode(err, moerr.ErrTxnWWConflict))
  5885  		require.NoError(t, txn1.Rollback(context.Background()))
  5886  		require.NoError(t, txn2.Rollback(context.Background()))
  5887  	}
  5888  
  5889  	txn, _ := tae.StartTxn(nil)
  5890  	db, _ := txn.CreateDatabase("db", "", "")
  5891  	created, _ := db.CreateRelation(schema)
  5892  	tid := created.ID()
  5893  	txn.Commit(context.Background())
  5894  
  5895  	// concurrent create and in txn alter check
  5896  	txn0, _ := tae.StartTxn(nil)
  5897  	txn, _ = tae.StartTxn(nil)
  5898  	db, _ = txn.GetDatabase("db")
  5899  	tbl, _ := db.GetRelationByName("test") // 1002
  5900  	require.NoError(t, tbl.AlterTable(context.TODO(), api.NewRenameTableReq(0, 0, "test", "ultra-test")))
  5901  	_, err := db.GetRelationByName("test")
  5902  	require.True(t, moerr.IsMoErrCode(err, moerr.OkExpectedEOB))
  5903  	tbl, err = db.GetRelationByName("ultra-test")
  5904  	require.NoError(t, err)
  5905  	require.Equal(t, tid, tbl.ID())
  5906  
  5907  	require.NoError(t, tbl.AlterTable(context.TODO(), api.NewRenameTableReq(0, 0, "ultra-test", "ultraman-test")))
  5908  	_, err = db.GetRelationByName("test")
  5909  	require.True(t, moerr.IsMoErrCode(err, moerr.OkExpectedEOB))
  5910  	_, err = db.GetRelationByName("ultra-test")
  5911  	require.True(t, moerr.IsMoErrCode(err, moerr.OkExpectedEOB))
  5912  	tbl, err = db.GetRelationByName("ultraman-test")
  5913  	require.NoError(t, err)
  5914  	require.Equal(t, tid, tbl.ID())
  5915  
  5916  	// concurrent txn should see test
  5917  	txn1, _ := tae.StartTxn(nil)
  5918  	db, err = txn1.GetDatabase("db")
  5919  	require.NoError(t, err)
  5920  	tbl, err = db.GetRelationByName("test")
  5921  	require.NoError(t, err)
  5922  	require.Equal(t, tid, tbl.ID())
  5923  	_, err = db.GetRelationByName("ultraman-test")
  5924  	require.True(t, moerr.IsMoErrCode(err, moerr.OkExpectedEOB))
  5925  	require.NoError(t, txn1.Commit(context.Background()))
  5926  
  5927  	require.NoError(t, txn.Commit(context.Background()))
  5928  
  5929  	txn2, _ := tae.StartTxn(nil)
  5930  	db, err = txn2.GetDatabase("db")
  5931  	require.NoError(t, err)
  5932  	_, err = db.GetRelationByName("test")
  5933  	require.True(t, moerr.IsMoErrCode(err, moerr.OkExpectedEOB))
  5934  	_, err = db.GetRelationByName("ultra-test")
  5935  	require.True(t, moerr.IsMoErrCode(err, moerr.OkExpectedEOB))
  5936  	tbl, err = db.GetRelationByName("ultraman-test")
  5937  	require.NoError(t, err)
  5938  	require.Equal(t, tid, tbl.ID())
  5939  
  5940  	require.NoError(t, txn2.Commit(context.Background()))
  5941  
  5942  	// should see test, not newest name
  5943  	db, err = txn0.GetDatabase("db")
  5944  	require.NoError(t, err)
  5945  	_, err = db.GetRelationByName("ultraman-test")
  5946  	require.True(t, moerr.IsMoErrCode(err, moerr.OkExpectedEOB))
  5947  	_, err = db.GetRelationByName("ultra-test")
  5948  	require.True(t, moerr.IsMoErrCode(err, moerr.OkExpectedEOB))
  5949  	tbl, err = db.GetRelationByName("test")
  5950  	require.NoError(t, err)
  5951  	require.Equal(t, tid, tbl.ID())
  5952  
  5953  	txn3, _ := tae.StartTxn(nil)
  5954  	db, _ = txn3.GetDatabase("db")
  5955  	rel, err := db.CreateRelation(schema)
  5956  	require.NoError(t, err)
  5957  	require.NotEqual(t, rel.ID(), tid)
  5958  	require.NoError(t, txn3.Commit(context.Background()))
  5959  
  5960  	t.Log(1, db.GetMeta().(*catalog.DBEntry).PrettyNameIndex())
  5961  	{
  5962  		txn, _ := tae.StartTxn(nil)
  5963  		db, _ := txn.GetDatabase("db")
  5964  		tbl, _ := db.GetRelationByName("test")
  5965  		require.Error(t, tbl.AlterTable(context.TODO(), api.NewRenameTableReq(0, 0, "unmatch", "yyyy")))
  5966  		require.NoError(t, txn.Rollback(context.Background()))
  5967  	}
  5968  	// alter back to original schema
  5969  	{
  5970  		txn, _ := tae.StartTxn(nil)
  5971  		db, _ := txn.GetDatabase("db")
  5972  		tbl, _ := db.GetRelationByName("test")
  5973  		require.NoError(t, tbl.AlterTable(context.TODO(), api.NewRenameTableReq(0, 0, "test", "xx")))
  5974  		require.NoError(t, txn.Commit(context.Background()))
  5975  
  5976  		t.Log(2, db.GetMeta().(*catalog.DBEntry).PrettyNameIndex())
  5977  		txn, _ = tae.StartTxn(nil)
  5978  		db, _ = txn.GetDatabase("db")
  5979  		tbl, _ = db.GetRelationByName("xx")
  5980  		require.NoError(t, tbl.AlterTable(context.TODO(), api.NewRenameTableReq(0, 0, "xx", "test")))
  5981  		require.NoError(t, txn.Commit(context.Background()))
  5982  
  5983  		t.Log(3, db.GetMeta().(*catalog.DBEntry).PrettyNameIndex())
  5984  	}
  5985  
  5986  	// rename duplicate and rollback
  5987  	{
  5988  		txn, _ := tae.StartTxn(nil)
  5989  		db, _ := txn.GetDatabase("db")
  5990  		schema.Name = "other"
  5991  		_, err := db.CreateRelation(schema)
  5992  		require.NoError(t, err)
  5993  		require.NoError(t, txn.Commit(context.Background()))
  5994  
  5995  		t.Log(4, db.GetMeta().(*catalog.DBEntry).PrettyNameIndex())
  5996  		txn, _ = tae.StartTxn(nil)
  5997  		db, _ = txn.GetDatabase("db")
  5998  		tbl, _ = db.GetRelationByName("test")
  5999  		require.NoError(t, tbl.AlterTable(context.TODO(), api.NewRenameTableReq(0, 0, "test", "toBeRollback1")))
  6000  		require.NoError(t, tbl.AlterTable(context.TODO(), api.NewRenameTableReq(0, 0, "toBeRollback1", "toBeRollback2")))
  6001  		require.Error(t, tbl.AlterTable(context.TODO(), api.NewRenameTableReq(0, 0, "toBeRollback2", "other"))) // duplicate
  6002  		require.NoError(t, txn.Rollback(context.Background()))
  6003  
  6004  		t.Log(5, db.GetMeta().(*catalog.DBEntry).PrettyNameIndex())
  6005  	}
  6006  
  6007  	// test checkpoint replay with txn nil
  6008  	{
  6009  		txn, _ := tae.StartTxn(nil)
  6010  		db, _ := txn.GetDatabase("db")
  6011  		tbl, _ := db.GetRelationByName("test")
  6012  		require.NoError(t, tbl.AlterTable(context.TODO(), api.NewRenameTableReq(0, 0, "test", "newtest"))) // make test nodelist has no active node
  6013  		require.NoError(t, txn.Commit(context.Background()))
  6014  
  6015  		txn, _ = tae.StartTxn(nil)
  6016  		db, _ = txn.GetDatabase("db")
  6017  		tbl, _ = db.GetRelationByName("other")
  6018  		require.NoError(t, tbl.AlterTable(context.TODO(), api.NewRenameTableReq(0, 0, "other", "test"))) // rename other to test, success
  6019  		require.NoError(t, txn.Commit(context.Background()))
  6020  	}
  6021  
  6022  	tae.Restart(ctx)
  6023  
  6024  	txn, _ = tae.StartTxn(nil)
  6025  	db, _ = txn.GetDatabase("db")
  6026  	dbentry := db.GetMeta().(*catalog.DBEntry)
  6027  	t.Log(dbentry.PrettyNameIndex())
  6028  	require.NoError(t, txn.Commit(context.Background()))
  6029  
  6030  	require.NoError(t, tae.BGCheckpointRunner.ForceIncrementalCheckpoint(tae.TxnMgr.Now(), false))
  6031  	tae.Restart(ctx)
  6032  
  6033  	txn, _ = tae.StartTxn(nil)
  6034  	db, _ = txn.GetDatabase("db")
  6035  	dbentry = db.GetMeta().(*catalog.DBEntry)
  6036  	t.Log(dbentry.PrettyNameIndex())
  6037  	require.NoError(t, txn.Commit(context.Background()))
  6038  }
  6039  
  6040  func TestAlterRenameTbl2(t *testing.T) {
  6041  	defer testutils.AfterTest(t)()
  6042  	ctx := context.Background()
  6043  
  6044  	opts := config.WithLongScanAndCKPOpts(nil)
  6045  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  6046  	defer tae.Close()
  6047  
  6048  	schema := catalog.MockSchemaAll(2, -1)
  6049  	schema.Name = "t1"
  6050  	schema.BlockMaxRows = 10
  6051  	schema.ObjectMaxBlocks = 2
  6052  	schema.Constraint = []byte("start version")
  6053  	schema.Comment = "comment version"
  6054  
  6055  	schema2 := schema.Clone()
  6056  	schema2.Name = "t1-copy-fefsfwafe"
  6057  
  6058  	schema3 := schema.Clone()
  6059  	schema3.Name = "t1-copy-igmgibjtm"
  6060  
  6061  	var oldId, newId uint64
  6062  	{
  6063  		var err error
  6064  		txn, _ := tae.StartTxn(nil)
  6065  		txn.CreateDatabase("xx", "", "")
  6066  
  6067  		db, _ := txn.GetDatabase("xx")
  6068  
  6069  		hdl, err := db.CreateRelation(schema)
  6070  		require.NoError(t, err)
  6071  		oldId = hdl.ID()
  6072  		require.NoError(t, txn.Commit(context.Background()))
  6073  	}
  6074  
  6075  	{
  6076  		txn, _ := tae.StartTxn(nil)
  6077  		db, _ := txn.GetDatabase("xx")
  6078  		hdl, err := db.CreateRelation(schema2)
  6079  		require.NoError(t, err)
  6080  		newId = hdl.ID()
  6081  
  6082  		_, err = db.DropRelationByID(oldId)
  6083  		require.NoError(t, err)
  6084  
  6085  		newhdl, _ := db.GetRelationByID(newId)
  6086  		require.NoError(t, newhdl.AlterTable(ctx, api.NewRenameTableReq(0, 0, "t1-copy-fefsfwafe", "t1")))
  6087  		require.NoError(t, txn.Commit(context.Background()))
  6088  
  6089  		dbentry := db.GetMeta().(*catalog.DBEntry)
  6090  		t.Log(dbentry.PrettyNameIndex())
  6091  	}
  6092  
  6093  	{
  6094  		txn, _ := tae.StartTxn(nil)
  6095  		db, _ := txn.GetDatabase("xx")
  6096  		hdl, err := db.CreateRelation(schema3)
  6097  		require.NoError(t, err)
  6098  		newId2 := hdl.ID()
  6099  
  6100  		_, err = db.DropRelationByID(newId)
  6101  		require.NoError(t, err)
  6102  
  6103  		newhdl, _ := db.GetRelationByID(newId2)
  6104  		require.NoError(t, newhdl.AlterTable(ctx, api.NewRenameTableReq(0, 0, "t1-copy-igmgibjtm", "t1")))
  6105  		require.NoError(t, txn.Commit(context.Background()))
  6106  
  6107  		dbentry := db.GetMeta().(*catalog.DBEntry)
  6108  		t.Log(dbentry.PrettyNameIndex())
  6109  		newId = newId2
  6110  	}
  6111  
  6112  	tae.Restart(ctx)
  6113  	{
  6114  		txn, _ := tae.StartTxn(nil)
  6115  		db, _ := txn.GetDatabase("xx")
  6116  		dbentry := db.GetMeta().(*catalog.DBEntry)
  6117  		t.Log(dbentry.PrettyNameIndex())
  6118  		require.NoError(t, txn.Commit(context.Background()))
  6119  	}
  6120  
  6121  	{
  6122  		txn, _ := tae.StartTxn(nil)
  6123  		db, _ := txn.GetDatabase("xx")
  6124  
  6125  		newhdl, _ := db.GetRelationByID(newId)
  6126  		require.NoError(t, newhdl.AlterTable(ctx, api.NewRenameTableReq(0, 0, "t1", "t2")))
  6127  		require.NoError(t, txn.Commit(context.Background()))
  6128  
  6129  		dbentry := db.GetMeta().(*catalog.DBEntry)
  6130  		t.Log(dbentry.PrettyNameIndex())
  6131  	}
  6132  
  6133  	require.NoError(t, tae.BGCheckpointRunner.ForceIncrementalCheckpoint(tae.TxnMgr.Now(), false))
  6134  
  6135  	tae.Restart(ctx)
  6136  	{
  6137  		txn, _ := tae.StartTxn(nil)
  6138  		db, _ := txn.GetDatabase("xx")
  6139  		dbentry := db.GetMeta().(*catalog.DBEntry)
  6140  		t.Log(dbentry.PrettyNameIndex())
  6141  		require.NoError(t, txn.Commit(context.Background()))
  6142  	}
  6143  
  6144  }
  6145  
  6146  func TestAlterTableBasic(t *testing.T) {
  6147  	defer testutils.AfterTest(t)()
  6148  	ctx := context.Background()
  6149  
  6150  	opts := config.WithLongScanAndCKPOpts(nil)
  6151  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  6152  	defer tae.Close()
  6153  
  6154  	schema := catalog.MockSchemaAll(2, -1)
  6155  	schema.Name = "test"
  6156  	schema.BlockMaxRows = 10
  6157  	schema.ObjectMaxBlocks = 2
  6158  	schema.Constraint = []byte("start version")
  6159  	schema.Comment = "comment version"
  6160  
  6161  	txn, _ := tae.StartTxn(nil)
  6162  	db, _ := txn.CreateDatabase("db", "", "")
  6163  	db.CreateRelation(schema)
  6164  	txn.Commit(context.Background())
  6165  
  6166  	txn, _ = tae.StartTxn(nil)
  6167  	db, _ = txn.GetDatabase("db")
  6168  	tbl, _ := db.GetRelationByName("test")
  6169  	err := tbl.AlterTable(context.Background(), api.NewUpdateConstraintReq(0, 0, "version 1"))
  6170  	require.NoError(t, err)
  6171  	err = tbl.AlterTable(context.Background(), api.NewUpdateCommentReq(0, 0, "comment version 1"))
  6172  	require.NoError(t, err)
  6173  	err = txn.Commit(context.Background())
  6174  	require.NoError(t, err)
  6175  
  6176  	txn, _ = tae.StartTxn(nil)
  6177  	db, _ = txn.GetDatabase("db")
  6178  	tbl, _ = db.GetRelationByName("test")
  6179  	err = tbl.AlterTable(context.Background(), api.NewUpdateConstraintReq(0, 0, "version 2"))
  6180  	require.NoError(t, err)
  6181  	txn.Commit(context.Background())
  6182  
  6183  	tots := func(ts types.TS) *timestamp.Timestamp {
  6184  		return &timestamp.Timestamp{PhysicalTime: types.DecodeInt64(ts[4:12]), LogicalTime: types.DecodeUint32(ts[:4])}
  6185  	}
  6186  
  6187  	resp, close, _ := logtail.HandleSyncLogTailReq(ctx, new(dummyCpkGetter), tae.LogtailMgr, tae.Catalog, api.SyncLogTailReq{
  6188  		CnHave: tots(types.BuildTS(0, 0)),
  6189  		CnWant: tots(types.MaxTs()),
  6190  		Table:  &api.TableID{DbId: pkgcatalog.MO_CATALOG_ID, TbId: pkgcatalog.MO_TABLES_ID},
  6191  	}, true)
  6192  
  6193  	bat, _ := batch.ProtoBatchToBatch(resp.Commands[0].Bat)
  6194  	cstrCol := containers.NewNonNullBatchWithSharedMemory(bat, common.DefaultAllocator).GetVectorByName(pkgcatalog.SystemRelAttr_Constraint)
  6195  	require.Equal(t, 3, cstrCol.Length())
  6196  	require.Equal(t, []byte("start version"), cstrCol.Get(0).([]byte))
  6197  	require.Equal(t, []byte("version 1"), cstrCol.Get(1).([]byte))
  6198  	require.Equal(t, []byte("version 2"), cstrCol.Get(2).([]byte))
  6199  
  6200  	commetCol := containers.NewNonNullBatchWithSharedMemory(bat, common.DefaultAllocator).GetVectorByName(pkgcatalog.SystemRelAttr_Comment)
  6201  	require.Equal(t, 3, cstrCol.Length())
  6202  	require.Equal(t, []byte("comment version"), commetCol.Get(0).([]byte))
  6203  	require.Equal(t, []byte("comment version 1"), commetCol.Get(1).([]byte))
  6204  	require.Equal(t, []byte("comment version 1"), commetCol.Get(2).([]byte))
  6205  
  6206  	close()
  6207  
  6208  	tae.Restart(ctx)
  6209  
  6210  	resp, close, _ = logtail.HandleSyncLogTailReq(ctx, new(dummyCpkGetter), tae.LogtailMgr, tae.Catalog, api.SyncLogTailReq{
  6211  		CnHave: tots(types.BuildTS(0, 0)),
  6212  		CnWant: tots(types.MaxTs()),
  6213  		Table:  &api.TableID{DbId: pkgcatalog.MO_CATALOG_ID, TbId: pkgcatalog.MO_TABLES_ID},
  6214  	}, true)
  6215  
  6216  	bat, _ = batch.ProtoBatchToBatch(resp.Commands[0].Bat)
  6217  	cstrCol = containers.NewNonNullBatchWithSharedMemory(bat, common.DefaultAllocator).GetVectorByName(pkgcatalog.SystemRelAttr_Constraint)
  6218  	require.Equal(t, 3, cstrCol.Length())
  6219  	require.Equal(t, []byte("start version"), cstrCol.Get(0).([]byte))
  6220  	require.Equal(t, []byte("version 1"), cstrCol.Get(1).([]byte))
  6221  	require.Equal(t, []byte("version 2"), cstrCol.Get(2).([]byte))
  6222  
  6223  	commetCol = containers.NewNonNullBatchWithSharedMemory(bat, common.DefaultAllocator).GetVectorByName(pkgcatalog.SystemRelAttr_Comment)
  6224  	require.Equal(t, 3, cstrCol.Length())
  6225  	require.Equal(t, []byte("comment version"), commetCol.Get(0).([]byte))
  6226  	require.Equal(t, []byte("comment version 1"), commetCol.Get(1).([]byte))
  6227  	require.Equal(t, []byte("comment version 1"), commetCol.Get(2).([]byte))
  6228  	close()
  6229  
  6230  	logutil.Info(tae.Catalog.SimplePPString(common.PPL2))
  6231  
  6232  	txn, _ = tae.StartTxn(nil)
  6233  	db, _ = txn.GetDatabase("db")
  6234  	_, err = db.DropRelationByName("test")
  6235  	require.NoError(t, err)
  6236  	txn.Commit(context.Background())
  6237  
  6238  	resp, close, _ = logtail.HandleSyncLogTailReq(ctx, new(dummyCpkGetter), tae.LogtailMgr, tae.Catalog, api.SyncLogTailReq{
  6239  		CnHave: tots(types.BuildTS(0, 0)),
  6240  		CnWant: tots(types.MaxTs()),
  6241  		Table:  &api.TableID{DbId: pkgcatalog.MO_CATALOG_ID, TbId: pkgcatalog.MO_COLUMNS_ID},
  6242  	}, true)
  6243  
  6244  	require.Equal(t, 2, len(resp.Commands)) // create and drop
  6245  	require.Equal(t, api.Entry_Insert, resp.Commands[0].EntryType)
  6246  	require.Equal(t, api.Entry_Delete, resp.Commands[1].EntryType)
  6247  	close()
  6248  }
  6249  
  6250  func TestAlterFakePk(t *testing.T) {
  6251  	defer testutils.AfterTest(t)()
  6252  	testutils.EnsureNoLeak(t)
  6253  	ctx := context.Background()
  6254  
  6255  	opts := config.WithLongScanAndCKPOpts(nil)
  6256  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  6257  	defer tae.Close()
  6258  	schema := catalog.MockSchemaAll(3, -1)
  6259  	schema.BlockMaxRows = 10
  6260  	schema.ObjectMaxBlocks = 2
  6261  	tae.BindSchema(schema)
  6262  	bats := catalog.MockBatch(schema, 12).Split(3)
  6263  	tae.CreateRelAndAppend(bats[0], true)
  6264  
  6265  	var did, tid uint64
  6266  	var blkFp *common.ID
  6267  	{
  6268  		// add two cloumns
  6269  		txn, rel := tae.GetRelation()
  6270  		tid = rel.ID()
  6271  		d, _ := rel.GetDB()
  6272  		did = d.GetID()
  6273  		blkFp = testutil.GetOneObject(rel).Fingerprint()
  6274  		tblEntry := rel.GetMeta().(*catalog.TableEntry)
  6275  		err := rel.AlterTable(context.TODO(), api.NewAddColumnReq(0, 0, "add1", types.NewProtoType(types.T_int32), 1))
  6276  		require.NoError(t, err)
  6277  		err = rel.AlterTable(context.TODO(), api.NewAddColumnReq(0, 0, "add2", types.NewProtoType(types.T_int64), 2))
  6278  		require.NoError(t, err)
  6279  		t.Log(tblEntry.StringWithLevel(common.PPL2))
  6280  		require.NoError(t, txn.Commit(context.Background()))
  6281  		require.Equal(t, 2, tblEntry.MVCC.Depth())
  6282  	}
  6283  
  6284  	{
  6285  		txn, rel := tae.GetRelation()
  6286  		obj, err := rel.GetObject(blkFp.ObjectID())
  6287  		require.NoError(t, err)
  6288  		err = obj.RangeDelete(0, 1, 1, handle.DT_Normal, common.DefaultAllocator)
  6289  		require.NoError(t, err)
  6290  		err = obj.RangeDelete(0, 3, 3, handle.DT_Normal, common.DefaultAllocator)
  6291  		require.NoError(t, err)
  6292  		require.NoError(t, txn.Commit(context.Background()))
  6293  	}
  6294  
  6295  	{
  6296  		txn, rel := tae.GetRelation()
  6297  		obj, err := rel.GetObject(blkFp.ObjectID())
  6298  		require.NoError(t, err)
  6299  		// check non-exist column foreach
  6300  		newSchema := obj.GetRelation().Schema()
  6301  		blkdata := obj.GetMeta().(*catalog.ObjectEntry).GetObjectData()
  6302  		sels := []uint32{1, 3}
  6303  		rows := make([]int, 0, 4)
  6304  		blkdata.Foreach(context.Background(), newSchema, 0, 1 /*"add1" column*/, func(v any, isnull bool, row int) error {
  6305  			require.True(t, true)
  6306  			rows = append(rows, row)
  6307  			return nil
  6308  		}, sels, common.DefaultAllocator)
  6309  		require.Equal(t, []int{1, 3}, rows)
  6310  		require.NoError(t, err)
  6311  		require.NoError(t, txn.Commit(context.Background()))
  6312  	}
  6313  
  6314  	resp, close, _ := logtail.HandleSyncLogTailReq(context.TODO(), new(dummyCpkGetter), tae.LogtailMgr, tae.Catalog, api.SyncLogTailReq{
  6315  		CnHave: tots(types.BuildTS(0, 0)),
  6316  		CnWant: tots(types.MaxTs()),
  6317  		Table:  &api.TableID{DbId: did, TbId: tid},
  6318  	}, true)
  6319  
  6320  	defer close()
  6321  	require.Equal(t, 2, len(resp.Commands)) // first blk 4 insert; first blk 2 dels
  6322  	for i, cmd := range resp.Commands {
  6323  		t.Logf("command %d, table name %v, type %d", i, cmd.TableName, cmd.EntryType)
  6324  	}
  6325  	require.Equal(t, api.Entry_Insert, resp.Commands[0].EntryType) // data insert
  6326  	require.Equal(t, api.Entry_Delete, resp.Commands[1].EntryType) // data delete
  6327  
  6328  	insBat, err := batch.ProtoBatchToBatch(resp.Commands[0].Bat)
  6329  	require.NoError(t, err)
  6330  	tnInsBat := containers.NewNonNullBatchWithSharedMemory(insBat, common.DefaultAllocator)
  6331  	t.Log(tnInsBat.Attrs)
  6332  	require.Equal(t, 6, len(tnInsBat.Vecs)) // 3 col + 1 fake pk + 1 rowid + 1 committs
  6333  	for _, v := range tnInsBat.Vecs {
  6334  		require.Equal(t, 4, v.Length())
  6335  	}
  6336  	t.Log(tnInsBat.GetVectorByName(pkgcatalog.FakePrimaryKeyColName).PPString(10))
  6337  
  6338  	delBat, err := batch.ProtoBatchToBatch(resp.Commands[1].Bat)
  6339  	require.NoError(t, err)
  6340  	tnDelBat := containers.NewNonNullBatchWithSharedMemory(delBat, common.DefaultAllocator)
  6341  	t.Log(tnDelBat.Attrs)
  6342  	require.Equal(t, 3, len(tnDelBat.Vecs)) // 1 fake pk + 1 rowid + 1 committs
  6343  	for _, v := range tnDelBat.Vecs {
  6344  		require.Equal(t, 2, v.Length())
  6345  	}
  6346  	t.Log(tnDelBat.GetVectorByName(catalog.AttrPKVal).PPString(10))
  6347  
  6348  }
  6349  
  6350  func TestAlterColumnAndFreeze(t *testing.T) {
  6351  	defer testutils.AfterTest(t)()
  6352  	testutils.EnsureNoLeak(t)
  6353  	ctx := context.Background()
  6354  
  6355  	opts := config.WithLongScanAndCKPOpts(nil)
  6356  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  6357  	defer tae.Close()
  6358  	schema := catalog.MockSchemaAll(10, 5)
  6359  	schema.BlockMaxRows = 10
  6360  	schema.ObjectMaxBlocks = 2
  6361  	tae.BindSchema(schema)
  6362  	bats := catalog.MockBatch(schema, 8).Split(2)
  6363  	tae.CreateRelAndAppend(bats[0], true)
  6364  
  6365  	{
  6366  		// test error in alter
  6367  		txn, rel := tae.GetRelation()
  6368  		tblEntry := rel.GetMeta().(*catalog.TableEntry)
  6369  		err := rel.AlterTable(context.TODO(), api.NewRemoveColumnReq(0, 0, 1, 10))
  6370  		require.True(t, moerr.IsMoErrCode(err, moerr.ErrInternal))
  6371  		require.Equal(t, 2, tblEntry.MVCC.Depth())
  6372  		t.Log(tblEntry.StringWithLevel(common.PPL2))
  6373  		require.NoError(t, txn.Rollback(context.Background()))
  6374  		// new node is clean
  6375  		require.Equal(t, 1, tblEntry.MVCC.Depth())
  6376  	}
  6377  
  6378  	txn0, rel0 := tae.GetRelation()
  6379  	db, err := rel0.GetDB()
  6380  	require.NoError(t, err)
  6381  	did, tid := db.GetID(), rel0.ID()
  6382  
  6383  	require.NoError(t, rel0.Append(context.Background(), bats[1])) // in localObject
  6384  
  6385  	txn, rel := tae.GetRelation()
  6386  	require.NoError(t, rel.AlterTable(context.TODO(), api.NewAddColumnReq(0, 0, "xyz", types.NewProtoType(types.T_int32), 0)))
  6387  	require.NoError(t, txn.Commit(context.Background()))
  6388  
  6389  	require.Error(t, rel0.Append(context.Background(), nil)) // schema changed, error
  6390  	// Test variaous read on old schema
  6391  	testutil.CheckAllColRowsByScan(t, rel0, 8, false)
  6392  
  6393  	filter := handle.NewEQFilter(uint16(3))
  6394  	id, row, err := rel0.GetByFilter(context.Background(), filter)
  6395  	filen, blkn := id.BlockID.Offsets() // first block
  6396  	require.Equal(t, uint16(0), filen)
  6397  	require.Equal(t, uint16(0), blkn)
  6398  	require.Equal(t, uint32(3), row)
  6399  	require.NoError(t, err)
  6400  
  6401  	for _, col := range rel0.Schema().(*catalog.Schema).ColDefs {
  6402  		val, null, err := rel0.GetValue(id, 2, uint16(col.Idx))
  6403  		require.NoError(t, err)
  6404  		require.False(t, null)
  6405  		if col.IsPrimary() {
  6406  			require.Equal(t, uint16(2), val.(uint16))
  6407  		}
  6408  	}
  6409  	require.Error(t, txn0.Commit(context.Background())) // scheam change, commit failed
  6410  
  6411  	// GetValueByFilter() is combination of GetByFilter and GetValue
  6412  	// GetValueByPhyAddrKey is GetValue
  6413  
  6414  	tae.Restart(ctx)
  6415  
  6416  	txn, rel = tae.GetRelation()
  6417  	schema1 := rel.Schema().(*catalog.Schema)
  6418  	bats = catalog.MockBatch(schema1, 16).Split(4)
  6419  	require.Error(t, rel.Append(context.Background(), bats[0])) // dup error
  6420  	require.NoError(t, rel.Append(context.Background(), bats[1]))
  6421  	require.NoError(t, txn.Commit(context.Background()))
  6422  
  6423  	txn, rel = tae.GetRelation()
  6424  	testutil.CheckAllColRowsByScan(t, rel, 8, false)
  6425  	it := rel.MakeObjectIt()
  6426  	cnt := 0
  6427  	var id2 *common.ID
  6428  	for ; it.Valid(); it.Next() {
  6429  		cnt++
  6430  		id2 = it.GetObject().Fingerprint()
  6431  	}
  6432  	require.Equal(t, 2, cnt) // 2 blocks because the first is freezed
  6433  
  6434  	for _, col := range rel.Schema().(*catalog.Schema).ColDefs {
  6435  		val, null, err := rel.GetValue(id, 3, uint16(col.Idx)) // get first blk
  6436  		require.NoError(t, err)
  6437  		if col.Name == "xyz" {
  6438  			require.True(t, null) // fill null for the new column
  6439  		} else {
  6440  			require.False(t, null)
  6441  		}
  6442  		if col.IsPrimary() {
  6443  			require.Equal(t, uint16(3), val.(uint16))
  6444  		}
  6445  
  6446  		val, null, err = rel.GetValue(id2, 3, uint16(col.Idx)) // get second blk
  6447  		require.NoError(t, err)
  6448  		require.False(t, null)
  6449  		if col.IsPrimary() {
  6450  			require.Equal(t, uint16(7), val.(uint16))
  6451  		}
  6452  	}
  6453  	txn.Commit(context.Background())
  6454  
  6455  	// append to the second block
  6456  	txn, rel = tae.GetRelation()
  6457  	require.NoError(t, rel.Append(context.Background(), bats[2]))
  6458  	require.NoError(t, rel.Append(context.Background(), bats[3])) // new block and append 2 rows
  6459  	require.NoError(t, txn.Commit(context.Background()))
  6460  
  6461  	// remove and freeze
  6462  	txn, rel = tae.GetRelation()
  6463  	require.NoError(t, rel.AlterTable(context.TODO(), api.NewRemoveColumnReq(0, 0, 9, 8))) // remove float mock_8
  6464  	require.NoError(t, txn.Commit(context.Background()))
  6465  
  6466  	txn, rel = tae.GetRelation()
  6467  	schema2 := rel.Schema().(*catalog.Schema)
  6468  	bats = catalog.MockBatch(schema2, 20).Split(5)
  6469  	require.NoError(t, rel.Append(context.Background(), bats[4])) // new 4th block and append 4 blocks
  6470  
  6471  	testutil.CheckAllColRowsByScan(t, rel, 20, true)
  6472  	require.NoError(t, txn.Commit(context.Background()))
  6473  
  6474  	resp, close, _ := logtail.HandleSyncLogTailReq(context.TODO(), new(dummyCpkGetter), tae.LogtailMgr, tae.Catalog, api.SyncLogTailReq{
  6475  		CnHave: tots(types.BuildTS(0, 0)),
  6476  		CnWant: tots(types.MaxTs()),
  6477  		Table:  &api.TableID{DbId: did, TbId: tid},
  6478  	}, true)
  6479  
  6480  	require.Equal(t, 3, len(resp.Commands)) // 3 version insert
  6481  	bat0 := resp.Commands[0].Bat
  6482  	require.Equal(t, 12, len(bat0.Attrs))
  6483  	require.Equal(t, "mock_9", bat0.Attrs[2+schema.GetSeqnum("mock_9")])
  6484  	bat1 := resp.Commands[1].Bat
  6485  	require.Equal(t, 13, len(bat1.Attrs))
  6486  	require.Equal(t, "mock_9", bat1.Attrs[2+schema1.GetSeqnum("mock_9")])
  6487  	require.Equal(t, "xyz", bat1.Attrs[2+schema1.GetSeqnum("xyz")])
  6488  	bat2 := resp.Commands[2].Bat
  6489  	require.Equal(t, 13, len(bat2.Attrs))
  6490  	require.Equal(t, "mock_9", bat2.Attrs[2+schema1.GetSeqnum("mock_9")])
  6491  	require.Equal(t, "mock_9", bat2.Attrs[2+schema2.GetSeqnum("mock_9")])
  6492  	require.Equal(t, "xyz", bat2.Attrs[2+schema1.GetSeqnum("xyz")])
  6493  	require.Equal(t, "xyz", bat2.Attrs[2+schema2.GetSeqnum("xyz")])
  6494  	close()
  6495  	logutil.Infof(tae.Catalog.SimplePPString(common.PPL1))
  6496  }
  6497  
  6498  func TestGlobalCheckpoint1(t *testing.T) {
  6499  	defer testutils.AfterTest(t)()
  6500  	testutils.EnsureNoLeak(t)
  6501  	ctx := context.Background()
  6502  
  6503  	opts := config.WithQuickScanAndCKPOpts(nil)
  6504  	options.WithCheckpointGlobalMinCount(1)(opts)
  6505  	options.WithGlobalVersionInterval(time.Millisecond * 10)(opts)
  6506  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  6507  	defer tae.Close()
  6508  	schema := catalog.MockSchemaAll(10, 2)
  6509  	schema.BlockMaxRows = 10
  6510  	schema.ObjectMaxBlocks = 2
  6511  	tae.BindSchema(schema)
  6512  	bat := catalog.MockBatch(schema, 400)
  6513  
  6514  	tae.CreateRelAndAppend(bat, true)
  6515  
  6516  	t.Log(tae.Catalog.SimplePPString(common.PPL1))
  6517  	tae.Restart(ctx)
  6518  	t.Log(tae.Catalog.SimplePPString(common.PPL1))
  6519  	tae.CheckRowsByScan(400, true)
  6520  
  6521  	testutils.WaitExpect(4000, func() bool {
  6522  		return tae.Wal.GetPenddingCnt() == 0
  6523  	})
  6524  
  6525  	tae.Restart(ctx)
  6526  	tae.CheckRowsByScan(400, true)
  6527  }
  6528  
  6529  func TestAppendAndGC(t *testing.T) {
  6530  	defer testutils.AfterTest(t)()
  6531  	testutils.EnsureNoLeak(t)
  6532  	ctx := context.Background()
  6533  
  6534  	opts := new(options.Options)
  6535  	opts = config.WithQuickScanAndCKPOpts(opts)
  6536  	options.WithDisableGCCheckpoint()(opts)
  6537  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  6538  	defer tae.Close()
  6539  	db := tae.DB
  6540  	db.DiskCleaner.GetCleaner().SetMinMergeCountForTest(2)
  6541  
  6542  	schema1 := catalog.MockSchemaAll(13, 2)
  6543  	schema1.BlockMaxRows = 10
  6544  	schema1.ObjectMaxBlocks = 2
  6545  
  6546  	schema2 := catalog.MockSchemaAll(13, 2)
  6547  	schema2.BlockMaxRows = 10
  6548  	schema2.ObjectMaxBlocks = 2
  6549  	{
  6550  		txn, _ := db.StartTxn(nil)
  6551  		database, err := txn.CreateDatabase("db", "", "")
  6552  		assert.Nil(t, err)
  6553  		_, err = database.CreateRelation(schema1)
  6554  		assert.Nil(t, err)
  6555  		_, err = database.CreateRelation(schema2)
  6556  		assert.Nil(t, err)
  6557  		assert.Nil(t, txn.Commit(context.Background()))
  6558  	}
  6559  	bat := catalog.MockBatch(schema1, int(schema1.BlockMaxRows*10-1))
  6560  	defer bat.Close()
  6561  	bats := bat.Split(bat.Length())
  6562  
  6563  	pool, err := ants.NewPool(20)
  6564  	assert.Nil(t, err)
  6565  	defer pool.Release()
  6566  	var wg sync.WaitGroup
  6567  
  6568  	for _, data := range bats {
  6569  		wg.Add(2)
  6570  		err = pool.Submit(testutil.AppendClosure(t, data, schema1.Name, db, &wg))
  6571  		assert.Nil(t, err)
  6572  		err = pool.Submit(testutil.AppendClosure(t, data, schema2.Name, db, &wg))
  6573  		assert.Nil(t, err)
  6574  	}
  6575  	wg.Wait()
  6576  	testutils.WaitExpect(10000, func() bool {
  6577  		return db.Runtime.Scheduler.GetPenddingLSNCnt() == 0
  6578  	})
  6579  	t.Log(tae.Catalog.SimplePPString(common.PPL1))
  6580  	assert.Equal(t, uint64(0), db.Runtime.Scheduler.GetPenddingLSNCnt())
  6581  	err = db.DiskCleaner.GetCleaner().CheckGC()
  6582  	assert.Nil(t, err)
  6583  	testutils.WaitExpect(5000, func() bool {
  6584  		return db.DiskCleaner.GetCleaner().GetMinMerged() != nil
  6585  	})
  6586  	testutils.WaitExpect(10000, func() bool {
  6587  		return db.DiskCleaner.GetCleaner().GetMinMerged() != nil
  6588  	})
  6589  	minMerged := db.DiskCleaner.GetCleaner().GetMinMerged()
  6590  	assert.NotNil(t, minMerged)
  6591  	tae.Restart(ctx)
  6592  	db = tae.DB
  6593  	db.DiskCleaner.GetCleaner().SetMinMergeCountForTest(2)
  6594  	testutils.WaitExpect(5000, func() bool {
  6595  		if db.DiskCleaner.GetCleaner().GetMaxConsumed() == nil {
  6596  			return false
  6597  		}
  6598  		end := db.DiskCleaner.GetCleaner().GetMaxConsumed().GetEnd()
  6599  		minEnd := minMerged.GetEnd()
  6600  		return end.GreaterEq(&minEnd)
  6601  	})
  6602  	end := db.DiskCleaner.GetCleaner().GetMaxConsumed().GetEnd()
  6603  	minEnd := minMerged.GetEnd()
  6604  	assert.True(t, end.GreaterEq(&minEnd))
  6605  	err = db.DiskCleaner.GetCleaner().CheckGC()
  6606  	assert.Nil(t, err)
  6607  
  6608  }
  6609  
  6610  func TestSnapshotGC(t *testing.T) {
  6611  	defer testutils.AfterTest(t)()
  6612  	testutils.EnsureNoLeak(t)
  6613  	ctx := context.Background()
  6614  
  6615  	opts := new(options.Options)
  6616  	opts = config.WithQuickScanAndCKPOpts(opts)
  6617  	options.WithDisableGCCheckpoint()(opts)
  6618  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  6619  	defer tae.Close()
  6620  	db := tae.DB
  6621  	db.DiskCleaner.GetCleaner().SetMinMergeCountForTest(1)
  6622  
  6623  	snapshotSchema := catalog.MockSnapShotSchema()
  6624  	snapshotSchema.BlockMaxRows = 2
  6625  	snapshotSchema.ObjectMaxBlocks = 1
  6626  	schema1 := catalog.MockSchemaAll(13, 2)
  6627  	schema1.BlockMaxRows = 10
  6628  	schema1.ObjectMaxBlocks = 2
  6629  
  6630  	schema2 := catalog.MockSchemaAll(13, 2)
  6631  	schema2.BlockMaxRows = 10
  6632  	schema2.ObjectMaxBlocks = 2
  6633  	var rel3 handle.Relation
  6634  	{
  6635  		txn, _ := db.StartTxn(nil)
  6636  		database, err := txn.CreateDatabase("db", "", "")
  6637  		assert.Nil(t, err)
  6638  		_, err = database.CreateRelation(schema1)
  6639  		assert.Nil(t, err)
  6640  		_, err = database.CreateRelation(schema2)
  6641  		assert.Nil(t, err)
  6642  		rel3, err = database.CreateRelation(snapshotSchema)
  6643  		assert.Nil(t, err)
  6644  		assert.Nil(t, txn.Commit(context.Background()))
  6645  	}
  6646  	db.DiskCleaner.GetCleaner().SetTid(rel3.ID())
  6647  	db.DiskCleaner.GetCleaner().DisableGCForTest()
  6648  	bat := catalog.MockBatch(schema1, int(schema1.BlockMaxRows*10-1))
  6649  	defer bat.Close()
  6650  	bats := bat.Split(bat.Length())
  6651  
  6652  	pool, err := ants.NewPool(20)
  6653  	assert.Nil(t, err)
  6654  	defer pool.Release()
  6655  	snapshots := make([]int64, 0)
  6656  	var wg sync.WaitGroup
  6657  	var snapWG sync.WaitGroup
  6658  	snapWG.Add(1)
  6659  	go func() {
  6660  		i := 0
  6661  		for {
  6662  			if i > 3 {
  6663  				snapWG.Done()
  6664  				break
  6665  			}
  6666  			i++
  6667  			time.Sleep(200 * time.Millisecond)
  6668  			snapshot := time.Now().UTC().Unix()
  6669  			snapshots = append(snapshots, snapshot)
  6670  		}
  6671  	}()
  6672  	for _, data := range bats {
  6673  		wg.Add(2)
  6674  		err = pool.Submit(testutil.AppendClosure(t, data, schema1.Name, db, &wg))
  6675  		assert.Nil(t, err)
  6676  
  6677  		err = pool.Submit(testutil.AppendClosure(t, data, schema2.Name, db, &wg))
  6678  		assert.Nil(t, err)
  6679  	}
  6680  	snapWG.Wait()
  6681  	for _, snapshot := range snapshots {
  6682  		attrs := []string{"col0", "col1", "ts", "col3", "col4", "col5", "col6", "id"}
  6683  		vecTypes := []types.Type{types.T_uint64.ToType(),
  6684  			types.T_uint64.ToType(), types.T_int64.ToType(),
  6685  			types.T_enum.ToType(), types.T_uint64.ToType(), types.T_uint64.ToType(),
  6686  			types.T_uint64.ToType(), types.T_uint64.ToType()}
  6687  		opt := containers.Options{}
  6688  		opt.Capacity = 0
  6689  		data1 := containers.BuildBatch(attrs, vecTypes, opt)
  6690  		data1.Vecs[0].Append(uint64(0), false)
  6691  		data1.Vecs[1].Append(uint64(0), false)
  6692  		data1.Vecs[2].Append(snapshot, false)
  6693  		data1.Vecs[3].Append(types.Enum(1), false)
  6694  		data1.Vecs[4].Append(uint64(0), false)
  6695  		data1.Vecs[5].Append(uint64(0), false)
  6696  		data1.Vecs[6].Append(uint64(0), false)
  6697  		data1.Vecs[7].Append(uint64(0), false)
  6698  		txn1, _ := db.StartTxn(nil)
  6699  		database, _ := txn1.GetDatabase("db")
  6700  		rel, _ := database.GetRelationByName(snapshotSchema.Name)
  6701  		err = rel.Append(context.Background(), data1)
  6702  		data1.Close()
  6703  		assert.Nil(t, err)
  6704  		assert.Nil(t, txn1.Commit(context.Background()))
  6705  	}
  6706  	wg.Wait()
  6707  	testutils.WaitExpect(10000, func() bool {
  6708  		return db.Runtime.Scheduler.GetPenddingLSNCnt() == 0
  6709  	})
  6710  	db.DiskCleaner.GetCleaner().EnableGCForTest()
  6711  	t.Log(tae.Catalog.SimplePPString(common.PPL1))
  6712  	assert.Equal(t, uint64(0), db.Runtime.Scheduler.GetPenddingLSNCnt())
  6713  	testutils.WaitExpect(5000, func() bool {
  6714  		return db.DiskCleaner.GetCleaner().GetMinMerged() != nil
  6715  	})
  6716  	minMerged := db.DiskCleaner.GetCleaner().GetMinMerged()
  6717  	testutils.WaitExpect(5000, func() bool {
  6718  		return db.DiskCleaner.GetCleaner().GetMinMerged() != nil
  6719  	})
  6720  	assert.NotNil(t, minMerged)
  6721  	err = db.DiskCleaner.GetCleaner().CheckGC()
  6722  	assert.Nil(t, err)
  6723  	tae.RestartDisableGC(ctx)
  6724  	db = tae.DB
  6725  	db.DiskCleaner.GetCleaner().SetMinMergeCountForTest(1)
  6726  	db.DiskCleaner.GetCleaner().SetTid(rel3.ID())
  6727  	testutils.WaitExpect(5000, func() bool {
  6728  		if db.DiskCleaner.GetCleaner().GetMaxConsumed() == nil {
  6729  			return false
  6730  		}
  6731  		end := db.DiskCleaner.GetCleaner().GetMaxConsumed().GetEnd()
  6732  		minEnd := minMerged.GetEnd()
  6733  		return end.GreaterEq(&minEnd)
  6734  	})
  6735  	end := db.DiskCleaner.GetCleaner().GetMaxConsumed().GetEnd()
  6736  	minEnd := minMerged.GetEnd()
  6737  	assert.True(t, end.GreaterEq(&minEnd))
  6738  	err = db.DiskCleaner.GetCleaner().CheckGC()
  6739  	assert.Nil(t, err)
  6740  
  6741  }
  6742  
  6743  func TestGlobalCheckpoint2(t *testing.T) {
  6744  	defer testutils.AfterTest(t)()
  6745  	testutils.EnsureNoLeak(t)
  6746  	ctx := context.Background()
  6747  
  6748  	opts := config.WithQuickScanAndCKPOpts(nil)
  6749  	options.WithCheckpointGlobalMinCount(1)(opts)
  6750  	options.WithDisableGCCatalog()(opts)
  6751  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  6752  	tae.BGCheckpointRunner.DisableCheckpoint()
  6753  	tae.BGCheckpointRunner.CleanPenddingCheckpoint()
  6754  	defer tae.Close()
  6755  	schema := catalog.MockSchemaAll(10, 2)
  6756  	schema.BlockMaxRows = 10
  6757  	schema.ObjectMaxBlocks = 2
  6758  	tae.BindSchema(schema)
  6759  	bat := catalog.MockBatch(schema, 40)
  6760  
  6761  	_, firstRel := tae.CreateRelAndAppend(bat, true)
  6762  
  6763  	tae.DropRelation(t)
  6764  	txn, err := tae.StartTxn(nil)
  6765  	assert.NoError(t, err)
  6766  	tae.IncrementalCheckpoint(txn.GetStartTS(), false, true, true)
  6767  	tae.GlobalCheckpoint(txn.GetStartTS(), 0, false)
  6768  	assert.NoError(t, txn.Commit(context.Background()))
  6769  
  6770  	tae.CreateRelAndAppend(bat, false)
  6771  
  6772  	txn, rel := tae.GetRelation()
  6773  	require.NoError(t, rel.AlterTable(context.Background(), api.NewRemoveColumnReq(0, 0, 3, 3)))
  6774  	require.NoError(t, txn.Commit(context.Background()))
  6775  
  6776  	txn, rel = tae.GetRelation()
  6777  	newschema := rel.Schema().(*catalog.Schema)
  6778  	require.Equal(t, uint32(1), newschema.Version)
  6779  	require.Equal(t, uint32(10), newschema.Extra.NextColSeqnum)
  6780  	require.Equal(t, "mock_3", newschema.Extra.DroppedAttrs[0])
  6781  	require.NoError(t, txn.Commit(context.Background()))
  6782  
  6783  	currTs := types.BuildTS(time.Now().UTC().UnixNano(), 0)
  6784  	assert.NoError(t, err)
  6785  	tae.IncrementalCheckpoint(currTs, false, true, true)
  6786  	tae.GlobalCheckpoint(currTs, time.Duration(1), false)
  6787  
  6788  	p := &catalog.LoopProcessor{}
  6789  	tableExisted := false
  6790  	p.TableFn = func(te *catalog.TableEntry) error {
  6791  		if te.ID == firstRel.ID() {
  6792  			tableExisted = true
  6793  		}
  6794  		return nil
  6795  	}
  6796  
  6797  	assert.NoError(t, tae.Catalog.RecurLoop(p))
  6798  	assert.True(t, tableExisted)
  6799  
  6800  	t.Log(tae.Catalog.SimplePPString(3))
  6801  	tae.Restart(ctx)
  6802  	t.Log(tae.Catalog.SimplePPString(3))
  6803  
  6804  	tableExisted = false
  6805  	assert.NoError(t, tae.Catalog.RecurLoop(p))
  6806  	assert.False(t, tableExisted)
  6807  	txn, rel = tae.GetRelation()
  6808  	newschema = rel.Schema().(*catalog.Schema)
  6809  	require.Equal(t, uint32(1), newschema.Version)
  6810  	require.Equal(t, uint32(10), newschema.Extra.NextColSeqnum)
  6811  	require.Equal(t, "mock_3", newschema.Extra.DroppedAttrs[0])
  6812  	require.NoError(t, txn.Commit(context.Background()))
  6813  
  6814  }
  6815  
  6816  func TestGlobalCheckpoint3(t *testing.T) {
  6817  	t.Skip("This case crashes occasionally, is being fixed, skip it for now")
  6818  	defer testutils.AfterTest(t)()
  6819  	testutils.EnsureNoLeak(t)
  6820  	ctx := context.Background()
  6821  
  6822  	opts := config.WithQuickScanAndCKPOpts(nil)
  6823  	options.WithCheckpointGlobalMinCount(1)(opts)
  6824  	options.WithGlobalVersionInterval(time.Nanosecond * 1)(opts)
  6825  	options.WithDisableGCCatalog()(opts)
  6826  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  6827  	defer tae.Close()
  6828  	schema := catalog.MockSchemaAll(10, 2)
  6829  	schema.BlockMaxRows = 10
  6830  	schema.ObjectMaxBlocks = 2
  6831  	tae.BindSchema(schema)
  6832  	bat := catalog.MockBatch(schema, 40)
  6833  
  6834  	_, rel := tae.CreateRelAndAppend(bat, true)
  6835  	testutils.WaitExpect(1000, func() bool {
  6836  		return tae.Wal.GetPenddingCnt() == 0
  6837  	})
  6838  
  6839  	tae.DropRelation(t)
  6840  	testutils.WaitExpect(1000, func() bool {
  6841  		return tae.Wal.GetPenddingCnt() == 0
  6842  	})
  6843  
  6844  	tae.CreateRelAndAppend(bat, false)
  6845  	testutils.WaitExpect(1000, func() bool {
  6846  		return tae.Wal.GetPenddingCnt() == 0
  6847  	})
  6848  
  6849  	p := &catalog.LoopProcessor{}
  6850  	tableExisted := false
  6851  	p.TableFn = func(te *catalog.TableEntry) error {
  6852  		if te.ID == rel.ID() {
  6853  			tableExisted = true
  6854  		}
  6855  		return nil
  6856  	}
  6857  
  6858  	assert.NoError(t, tae.Catalog.RecurLoop(p))
  6859  	assert.True(t, tableExisted)
  6860  
  6861  	tae.Restart(ctx)
  6862  
  6863  	tableExisted = false
  6864  	assert.NoError(t, tae.Catalog.RecurLoop(p))
  6865  	assert.False(t, tableExisted)
  6866  }
  6867  
  6868  func TestGlobalCheckpoint4(t *testing.T) {
  6869  	t.Skip("This case crashes occasionally, is being fixed, skip it for now")
  6870  	defer testutils.AfterTest(t)()
  6871  	testutils.EnsureNoLeak(t)
  6872  	ctx := context.Background()
  6873  
  6874  	opts := config.WithQuickScanAndCKPOpts(nil)
  6875  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  6876  	defer tae.Close()
  6877  	tae.BGCheckpointRunner.DisableCheckpoint()
  6878  	tae.BGCheckpointRunner.CleanPenddingCheckpoint()
  6879  	globalCkpInterval := time.Second
  6880  
  6881  	schema := catalog.MockSchemaAll(18, 2)
  6882  	schema.BlockMaxRows = 10
  6883  	schema.ObjectMaxBlocks = 2
  6884  	tae.BindSchema(schema)
  6885  	bat := catalog.MockBatch(schema, 40)
  6886  
  6887  	txn, err := tae.StartTxn(nil)
  6888  	assert.NoError(t, err)
  6889  	_, err = txn.CreateDatabase("db", "", "")
  6890  	assert.NoError(t, err)
  6891  	assert.NoError(t, txn.Commit(context.Background()))
  6892  
  6893  	err = tae.IncrementalCheckpoint(txn.GetCommitTS(), false, true, true)
  6894  	assert.NoError(t, err)
  6895  
  6896  	txn, err = tae.StartTxn(nil)
  6897  	assert.NoError(t, err)
  6898  	_, err = txn.DropDatabase("db")
  6899  	assert.NoError(t, err)
  6900  	assert.NoError(t, txn.Commit(context.Background()))
  6901  
  6902  	err = tae.GlobalCheckpoint(txn.GetCommitTS(), globalCkpInterval, false)
  6903  	assert.NoError(t, err)
  6904  
  6905  	tae.CreateRelAndAppend(bat, true)
  6906  
  6907  	t.Log(tae.Catalog.SimplePPString(3))
  6908  	tae.Restart(ctx)
  6909  	tae.BGCheckpointRunner.DisableCheckpoint()
  6910  	tae.BGCheckpointRunner.CleanPenddingCheckpoint()
  6911  	t.Log(tae.Catalog.SimplePPString(3))
  6912  
  6913  	// tae.CreateRelAndAppend(bat, false)
  6914  
  6915  	txn, err = tae.StartTxn(nil)
  6916  	assert.NoError(t, err)
  6917  	db, err := txn.GetDatabase("db")
  6918  	assert.NoError(t, err)
  6919  	_, err = db.DropRelationByName(schema.Name)
  6920  	assert.NoError(t, err)
  6921  	assert.NoError(t, txn.Commit(context.Background()))
  6922  
  6923  	err = tae.GlobalCheckpoint(txn.GetCommitTS(), globalCkpInterval, false)
  6924  	assert.NoError(t, err)
  6925  
  6926  	tae.CreateRelAndAppend(bat, false)
  6927  
  6928  	t.Log(tae.Catalog.SimplePPString(3))
  6929  	tae.Restart(ctx)
  6930  	tae.BGCheckpointRunner.DisableCheckpoint()
  6931  	tae.BGCheckpointRunner.CleanPenddingCheckpoint()
  6932  	t.Log(tae.Catalog.SimplePPString(3))
  6933  }
  6934  
  6935  func TestGlobalCheckpoint5(t *testing.T) {
  6936  	defer testutils.AfterTest(t)()
  6937  	testutils.EnsureNoLeak(t)
  6938  	ctx := context.Background()
  6939  
  6940  	opts := config.WithQuickScanAndCKPOpts(nil)
  6941  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  6942  	defer tae.Close()
  6943  	tae.BGCheckpointRunner.DisableCheckpoint()
  6944  	tae.BGCheckpointRunner.CleanPenddingCheckpoint()
  6945  	globalCkpInterval := time.Duration(0)
  6946  
  6947  	schema := catalog.MockSchemaAll(18, 2)
  6948  	schema.BlockMaxRows = 10
  6949  	schema.ObjectMaxBlocks = 2
  6950  	tae.BindSchema(schema)
  6951  	bat := catalog.MockBatch(schema, 60)
  6952  	bats := bat.Split(3)
  6953  
  6954  	txn, err := tae.StartTxn(nil)
  6955  	assert.NoError(t, err)
  6956  	err = tae.IncrementalCheckpoint(txn.GetStartTS(), false, true, true)
  6957  	assert.NoError(t, err)
  6958  	assert.NoError(t, txn.Commit(context.Background()))
  6959  
  6960  	tae.CreateRelAndAppend(bats[0], true)
  6961  
  6962  	txn, err = tae.StartTxn(nil)
  6963  	assert.NoError(t, err)
  6964  	err = tae.GlobalCheckpoint(txn.GetStartTS(), globalCkpInterval, false)
  6965  	assert.NoError(t, err)
  6966  	assert.NoError(t, txn.Commit(context.Background()))
  6967  
  6968  	tae.DoAppend(bats[1])
  6969  
  6970  	txn, err = tae.StartTxn(nil)
  6971  	assert.NoError(t, err)
  6972  	err = tae.GlobalCheckpoint(txn.GetStartTS(), globalCkpInterval, false)
  6973  	assert.NoError(t, err)
  6974  	assert.NoError(t, txn.Commit(context.Background()))
  6975  
  6976  	tae.CheckRowsByScan(40, true)
  6977  
  6978  	t.Log(tae.Catalog.SimplePPString(3))
  6979  	tae.Restart(ctx)
  6980  	tae.BGCheckpointRunner.DisableCheckpoint()
  6981  	tae.BGCheckpointRunner.CleanPenddingCheckpoint()
  6982  	t.Log(tae.Catalog.SimplePPString(3))
  6983  
  6984  	tae.CheckRowsByScan(40, true)
  6985  
  6986  	tae.DoAppend(bats[2])
  6987  
  6988  	tae.CheckRowsByScan(60, true)
  6989  	txn, err = tae.StartTxn(nil)
  6990  	assert.NoError(t, err)
  6991  	err = tae.GlobalCheckpoint(txn.GetStartTS(), globalCkpInterval, false)
  6992  	assert.NoError(t, err)
  6993  	assert.NoError(t, err)
  6994  	assert.NoError(t, txn.Commit(context.Background()))
  6995  }
  6996  
  6997  func TestGlobalCheckpoint6(t *testing.T) {
  6998  	t.Skip("This case crashes occasionally, is being fixed, skip it for now")
  6999  	defer testutils.AfterTest(t)()
  7000  	testutils.EnsureNoLeak(t)
  7001  	ctx := context.Background()
  7002  
  7003  	opts := config.WithQuickScanAndCKPOpts(nil)
  7004  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  7005  	defer tae.Close()
  7006  	tae.BGCheckpointRunner.DisableCheckpoint()
  7007  	tae.BGCheckpointRunner.CleanPenddingCheckpoint()
  7008  	globalCkpInterval := time.Duration(0)
  7009  	restartCnt := 10
  7010  	batchsize := 10
  7011  
  7012  	schema := catalog.MockSchemaAll(18, 2)
  7013  	schema.BlockMaxRows = 5
  7014  	schema.ObjectMaxBlocks = 2
  7015  	tae.BindSchema(schema)
  7016  	bat := catalog.MockBatch(schema, batchsize*(restartCnt+1))
  7017  	bats := bat.Split(restartCnt + 1)
  7018  
  7019  	tae.CreateRelAndAppend(bats[0], true)
  7020  	txn, err := tae.StartTxn(nil)
  7021  	assert.NoError(t, err)
  7022  	err = tae.IncrementalCheckpoint(txn.GetStartTS(), false, true, true)
  7023  	assert.NoError(t, err)
  7024  	assert.NoError(t, txn.Commit(context.Background()))
  7025  
  7026  	for i := 0; i < restartCnt; i++ {
  7027  		tae.DoAppend(bats[i+1])
  7028  		txn, err = tae.StartTxn(nil)
  7029  		assert.NoError(t, err)
  7030  		err = tae.GlobalCheckpoint(txn.GetStartTS(), globalCkpInterval, false)
  7031  		assert.NoError(t, err)
  7032  		assert.NoError(t, txn.Commit(context.Background()))
  7033  
  7034  		rows := (i + 2) * batchsize
  7035  		tae.CheckRowsByScan(rows, true)
  7036  		t.Log(tae.Catalog.SimplePPString(3))
  7037  		tae.Restart(ctx)
  7038  		tae.BGCheckpointRunner.DisableCheckpoint()
  7039  		tae.BGCheckpointRunner.CleanPenddingCheckpoint()
  7040  		t.Log(tae.Catalog.SimplePPString(3))
  7041  		tae.CheckRowsByScan(rows, true)
  7042  	}
  7043  }
  7044  
  7045  func TestGCCheckpoint1(t *testing.T) {
  7046  	defer testutils.AfterTest(t)()
  7047  	testutils.EnsureNoLeak(t)
  7048  	ctx := context.Background()
  7049  
  7050  	opts := config.WithQuickScanAndCKPOpts(nil)
  7051  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  7052  	defer tae.Close()
  7053  
  7054  	schema := catalog.MockSchemaAll(18, 2)
  7055  	schema.BlockMaxRows = 5
  7056  	schema.ObjectMaxBlocks = 2
  7057  	tae.BindSchema(schema)
  7058  	bat := catalog.MockBatch(schema, 50)
  7059  
  7060  	tae.CreateRelAndAppend(bat, true)
  7061  
  7062  	testutils.WaitExpect(4000, func() bool {
  7063  		return tae.Wal.GetPenddingCnt() == 0
  7064  	})
  7065  	assert.Equal(t, uint64(0), tae.Wal.GetPenddingCnt())
  7066  
  7067  	testutils.WaitExpect(4000, func() bool {
  7068  		return tae.BGCheckpointRunner.GetPenddingIncrementalCount() == 0
  7069  	})
  7070  	assert.Equal(t, 0, tae.BGCheckpointRunner.GetPenddingIncrementalCount())
  7071  
  7072  	testutils.WaitExpect(4000, func() bool {
  7073  		return tae.BGCheckpointRunner.MaxGlobalCheckpoint().IsFinished()
  7074  	})
  7075  	assert.True(t, tae.BGCheckpointRunner.MaxGlobalCheckpoint().IsFinished())
  7076  
  7077  	tae.BGCheckpointRunner.DisableCheckpoint()
  7078  
  7079  	gcTS := types.BuildTS(time.Now().UTC().UnixNano(), 0)
  7080  	t.Log(gcTS.ToString())
  7081  	tae.BGCheckpointRunner.GCByTS(context.Background(), gcTS)
  7082  
  7083  	maxGlobal := tae.BGCheckpointRunner.MaxGlobalCheckpoint()
  7084  
  7085  	testutils.WaitExpect(4000, func() bool {
  7086  		tae.BGCheckpointRunner.ExistPendingEntryToGC()
  7087  		return !tae.BGCheckpointRunner.ExistPendingEntryToGC()
  7088  	})
  7089  	assert.False(t, tae.BGCheckpointRunner.ExistPendingEntryToGC())
  7090  
  7091  	globals := tae.BGCheckpointRunner.GetAllGlobalCheckpoints()
  7092  	assert.Equal(t, 1, len(globals))
  7093  	end := maxGlobal.GetEnd()
  7094  	maxEnd := globals[0].GetEnd()
  7095  	assert.True(t, end.Equal(&maxEnd))
  7096  	for _, global := range globals {
  7097  		t.Log(global.String())
  7098  	}
  7099  
  7100  	incrementals := tae.BGCheckpointRunner.GetAllIncrementalCheckpoints()
  7101  	prevEnd := maxGlobal.GetEnd().Prev()
  7102  	for _, incremental := range incrementals {
  7103  		startTS := incremental.GetStart()
  7104  		prevEndNextTS := prevEnd.Next()
  7105  		assert.True(t, startTS.Equal(&prevEndNextTS))
  7106  		t.Log(incremental.String())
  7107  	}
  7108  }
  7109  
  7110  func TestGCCatalog1(t *testing.T) {
  7111  	defer testutils.AfterTest(t)()
  7112  	ctx := context.Background()
  7113  
  7114  	opts := config.WithLongScanAndCKPOpts(nil)
  7115  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  7116  	defer tae.Close()
  7117  
  7118  	txn1, _ := tae.StartTxn(nil)
  7119  	db, err := txn1.CreateDatabase("db1", "", "")
  7120  	assert.Nil(t, err)
  7121  	db2, err := txn1.CreateDatabase("db2", "", "")
  7122  	assert.Nil(t, err)
  7123  
  7124  	schema := catalog.MockSchema(1, 0)
  7125  	schema.Name = "tb1"
  7126  	tb, err := db.CreateRelation(schema)
  7127  	assert.Nil(t, err)
  7128  	schema2 := catalog.MockSchema(1, 0)
  7129  	schema2.Name = "tb2"
  7130  	tb2, err := db.CreateRelation(schema2)
  7131  	assert.Nil(t, err)
  7132  	schema3 := catalog.MockSchema(1, 0)
  7133  	schema3.Name = "tb3"
  7134  	tb3, err := db2.CreateRelation(schema3)
  7135  	assert.Nil(t, err)
  7136  
  7137  	_, err = tb.CreateObject(false)
  7138  	assert.Nil(t, err)
  7139  	_, err = tb2.CreateObject(false)
  7140  	assert.Nil(t, err)
  7141  	obj3, err := tb2.CreateObject(false)
  7142  	assert.Nil(t, err)
  7143  	obj4, err := tb3.CreateObject(false)
  7144  	assert.Nil(t, err)
  7145  
  7146  	err = txn1.Commit(context.Background())
  7147  	assert.Nil(t, err)
  7148  
  7149  	p := &catalog.LoopProcessor{}
  7150  	var dbCnt, tableCnt, objCnt int
  7151  	p.DatabaseFn = func(d *catalog.DBEntry) error {
  7152  		if d.IsSystemDB() {
  7153  			return nil
  7154  		}
  7155  		dbCnt++
  7156  		return nil
  7157  	}
  7158  	p.TableFn = func(te *catalog.TableEntry) error {
  7159  		if te.GetDB().IsSystemDB() {
  7160  			return nil
  7161  		}
  7162  		tableCnt++
  7163  		return nil
  7164  	}
  7165  	p.ObjectFn = func(se *catalog.ObjectEntry) error {
  7166  		if se.GetTable().GetDB().IsSystemDB() {
  7167  			return nil
  7168  		}
  7169  		objCnt++
  7170  		return nil
  7171  	}
  7172  	resetCount := func() {
  7173  		dbCnt = 0
  7174  		tableCnt = 0
  7175  		objCnt = 0
  7176  	}
  7177  
  7178  	err = tae.Catalog.RecurLoop(p)
  7179  	assert.NoError(t, err)
  7180  	assert.Equal(t, 2, dbCnt)
  7181  	assert.Equal(t, 3, tableCnt)
  7182  	assert.Equal(t, 4, objCnt)
  7183  
  7184  	txn2, err := tae.StartTxn(nil)
  7185  	assert.NoError(t, err)
  7186  	db2, err = txn2.GetDatabase("db2")
  7187  	assert.NoError(t, err)
  7188  	tb3, err = db2.GetRelationByName("tb3")
  7189  	assert.NoError(t, err)
  7190  	obj4, err = tb3.GetObject(obj4.GetID())
  7191  	assert.NoError(t, err)
  7192  	err = txn2.Commit(context.Background())
  7193  	assert.NoError(t, err)
  7194  
  7195  	t.Log(tae.Catalog.SimplePPString(3))
  7196  	commitTS := txn2.GetCommitTS()
  7197  	tae.Catalog.GCByTS(context.Background(), commitTS.Next())
  7198  	t.Log(tae.Catalog.SimplePPString(3))
  7199  
  7200  	resetCount()
  7201  	err = tae.Catalog.RecurLoop(p)
  7202  	assert.NoError(t, err)
  7203  	assert.Equal(t, 2, dbCnt)
  7204  	assert.Equal(t, 3, tableCnt)
  7205  	assert.Equal(t, 4, objCnt)
  7206  
  7207  	txn3, err := tae.StartTxn(nil)
  7208  	assert.NoError(t, err)
  7209  	db2, err = txn3.GetDatabase("db2")
  7210  	assert.NoError(t, err)
  7211  	tb3, err = db2.GetRelationByName("tb3")
  7212  	assert.NoError(t, err)
  7213  	err = tb3.SoftDeleteObject(obj4.GetID())
  7214  	assert.NoError(t, err)
  7215  
  7216  	db2, err = txn3.GetDatabase("db1")
  7217  	assert.NoError(t, err)
  7218  	tb3, err = db2.GetRelationByName("tb2")
  7219  	assert.NoError(t, err)
  7220  	err = tb3.SoftDeleteObject(obj3.GetID())
  7221  	assert.NoError(t, err)
  7222  
  7223  	err = txn3.Commit(context.Background())
  7224  	assert.NoError(t, err)
  7225  
  7226  	t.Log(tae.Catalog.SimplePPString(3))
  7227  	commitTS = txn3.GetCommitTS()
  7228  	tae.Catalog.GCByTS(context.Background(), commitTS.Next())
  7229  	t.Log(tae.Catalog.SimplePPString(3))
  7230  
  7231  	resetCount()
  7232  	err = tae.Catalog.RecurLoop(p)
  7233  	assert.NoError(t, err)
  7234  	assert.Equal(t, 2, dbCnt)
  7235  	assert.Equal(t, 3, tableCnt)
  7236  	assert.Equal(t, 2, objCnt)
  7237  
  7238  	txn4, err := tae.StartTxn(nil)
  7239  	assert.NoError(t, err)
  7240  	db2, err = txn4.GetDatabase("db2")
  7241  	assert.NoError(t, err)
  7242  	_, err = db2.DropRelationByName("tb3")
  7243  	assert.NoError(t, err)
  7244  
  7245  	db2, err = txn4.GetDatabase("db1")
  7246  	assert.NoError(t, err)
  7247  	_, err = db2.DropRelationByName("tb2")
  7248  	assert.NoError(t, err)
  7249  
  7250  	err = txn4.Commit(context.Background())
  7251  	assert.NoError(t, err)
  7252  
  7253  	t.Log(tae.Catalog.SimplePPString(3))
  7254  	commitTS = txn4.GetCommitTS()
  7255  	tae.Catalog.GCByTS(context.Background(), commitTS.Next())
  7256  	t.Log(tae.Catalog.SimplePPString(3))
  7257  
  7258  	resetCount()
  7259  	err = tae.Catalog.RecurLoop(p)
  7260  	assert.NoError(t, err)
  7261  	assert.Equal(t, 2, dbCnt)
  7262  	assert.Equal(t, 1, tableCnt)
  7263  	assert.Equal(t, 1, objCnt)
  7264  
  7265  	txn5, err := tae.StartTxn(nil)
  7266  	assert.NoError(t, err)
  7267  	_, err = txn5.DropDatabase("db2")
  7268  	assert.NoError(t, err)
  7269  
  7270  	_, err = txn5.DropDatabase("db1")
  7271  	assert.NoError(t, err)
  7272  
  7273  	err = txn5.Commit(context.Background())
  7274  	assert.NoError(t, err)
  7275  
  7276  	t.Log(tae.Catalog.SimplePPString(3))
  7277  	commitTS = txn5.GetCommitTS()
  7278  	tae.Catalog.GCByTS(context.Background(), commitTS.Next())
  7279  	t.Log(tae.Catalog.SimplePPString(3))
  7280  
  7281  	resetCount()
  7282  	err = tae.Catalog.RecurLoop(p)
  7283  	assert.NoError(t, err)
  7284  	assert.Equal(t, 0, dbCnt)
  7285  	assert.Equal(t, 0, tableCnt)
  7286  	assert.Equal(t, 0, objCnt)
  7287  }
  7288  
  7289  func TestGCCatalog2(t *testing.T) {
  7290  	defer testutils.AfterTest(t)()
  7291  	ctx := context.Background()
  7292  
  7293  	opts := config.WithQuickScanAndCKPOpts(nil)
  7294  	options.WithCatalogGCInterval(10 * time.Millisecond)(opts)
  7295  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  7296  	defer tae.Close()
  7297  	schema := catalog.MockSchema(3, 2)
  7298  	schema.BlockMaxRows = 10
  7299  	schema.ObjectMaxBlocks = 2
  7300  	tae.BindSchema(schema)
  7301  	bat := catalog.MockBatch(schema, 33)
  7302  
  7303  	checkCompactAndGCFn := func() bool {
  7304  		p := &catalog.LoopProcessor{}
  7305  		appendableCount := 0
  7306  		p.ObjectFn = func(be *catalog.ObjectEntry) error {
  7307  			if be.GetTable().GetDB().IsSystemDB() {
  7308  				return nil
  7309  			}
  7310  			if be.IsAppendable() {
  7311  				appendableCount++
  7312  			}
  7313  			return nil
  7314  		}
  7315  		err := tae.Catalog.RecurLoop(p)
  7316  		assert.NoError(t, err)
  7317  		return appendableCount == 0
  7318  	}
  7319  
  7320  	tae.CreateRelAndAppend(bat, true)
  7321  	t.Log(tae.Catalog.SimplePPString(3))
  7322  	testutils.WaitExpect(10000, checkCompactAndGCFn)
  7323  	assert.True(t, checkCompactAndGCFn())
  7324  	t.Log(tae.Catalog.SimplePPString(3))
  7325  }
  7326  func TestGCCatalog3(t *testing.T) {
  7327  	defer testutils.AfterTest(t)()
  7328  	ctx := context.Background()
  7329  
  7330  	opts := config.WithQuickScanAndCKPOpts(nil)
  7331  	options.WithCatalogGCInterval(10 * time.Millisecond)(opts)
  7332  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  7333  	defer tae.Close()
  7334  	schema := catalog.MockSchema(3, 2)
  7335  	schema.BlockMaxRows = 10
  7336  	schema.ObjectMaxBlocks = 2
  7337  	tae.BindSchema(schema)
  7338  	bat := catalog.MockBatch(schema, 33)
  7339  
  7340  	checkCompactAndGCFn := func() bool {
  7341  		p := &catalog.LoopProcessor{}
  7342  		dbCount := 0
  7343  		p.DatabaseFn = func(be *catalog.DBEntry) error {
  7344  			if be.IsSystemDB() {
  7345  				return nil
  7346  			}
  7347  			dbCount++
  7348  			return nil
  7349  		}
  7350  		err := tae.Catalog.RecurLoop(p)
  7351  		assert.NoError(t, err)
  7352  		return dbCount == 0
  7353  	}
  7354  
  7355  	tae.CreateRelAndAppend(bat, true)
  7356  	txn, err := tae.StartTxn(nil)
  7357  	assert.NoError(t, err)
  7358  	_, err = txn.DropDatabase("db")
  7359  	assert.NoError(t, err)
  7360  	assert.NoError(t, txn.Commit(context.Background()))
  7361  
  7362  	t.Log(tae.Catalog.SimplePPString(3))
  7363  	testutils.WaitExpect(10000, checkCompactAndGCFn)
  7364  	assert.True(t, checkCompactAndGCFn())
  7365  	t.Log(tae.Catalog.SimplePPString(3))
  7366  }
  7367  
  7368  func TestForceCheckpoint(t *testing.T) {
  7369  	fault.Enable()
  7370  	defer fault.Disable()
  7371  	err := fault.AddFaultPoint(context.Background(), "tae: flush timeout", ":::", "echo", 0, "mock flush timeout")
  7372  	assert.NoError(t, err)
  7373  	defer func() {
  7374  		err := fault.RemoveFaultPoint(context.Background(), "tae: flush timeout")
  7375  		assert.NoError(t, err)
  7376  	}()
  7377  	ctx := context.Background()
  7378  
  7379  	opts := config.WithLongScanAndCKPOpts(nil)
  7380  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  7381  	defer tae.Close()
  7382  
  7383  	schema := catalog.MockSchemaAll(18, 2)
  7384  	schema.BlockMaxRows = 5
  7385  	schema.ObjectMaxBlocks = 2
  7386  	tae.BindSchema(schema)
  7387  	bat := catalog.MockBatch(schema, 50)
  7388  
  7389  	tae.CreateRelAndAppend(bat, true)
  7390  
  7391  	err = tae.BGCheckpointRunner.ForceFlushWithInterval(tae.TxnMgr.Now(), context.Background(), time.Second*2, time.Millisecond*10)
  7392  	assert.Error(t, err)
  7393  	err = tae.BGCheckpointRunner.ForceIncrementalCheckpoint(tae.TxnMgr.Now(), false)
  7394  	assert.NoError(t, err)
  7395  }
  7396  
  7397  func TestLogailAppend(t *testing.T) {
  7398  	ctx := context.Background()
  7399  	tae := testutil.NewTestEngine(ctx, ModuleName, t, nil)
  7400  	defer tae.Close()
  7401  	tae.DB.LogtailMgr.RegisterCallback(logtail.MockCallback)
  7402  	schema := catalog.MockSchemaAll(13, 2)
  7403  	schema.BlockMaxRows = 10
  7404  	schema.ObjectMaxBlocks = 2
  7405  	tae.BindSchema(schema)
  7406  	batch := catalog.MockBatch(schema, int(schema.BlockMaxRows*uint32(schema.ObjectMaxBlocks)-1))
  7407  	//create database, create table, append
  7408  	tae.CreateRelAndAppend(batch, true)
  7409  	//delete
  7410  	err := tae.DeleteAll(true)
  7411  	assert.NoError(t, err)
  7412  	//compact(metadata)
  7413  	tae.DoAppend(batch)
  7414  	tae.CompactBlocks(false)
  7415  	//drop table
  7416  	tae.DropRelation(t)
  7417  	//drop database
  7418  	txn, err := tae.StartTxn(nil)
  7419  	assert.NoError(t, err)
  7420  	txn.DropDatabase("db")
  7421  	assert.NoError(t, txn.Commit(context.Background()))
  7422  }
  7423  
  7424  func TestSnapshotLag1(t *testing.T) {
  7425  	ctx := context.Background()
  7426  	opts := config.WithLongScanAndCKPOpts(nil)
  7427  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  7428  	defer tae.Close()
  7429  
  7430  	schema := catalog.MockSchemaAll(14, 3)
  7431  	schema.BlockMaxRows = 10000
  7432  	schema.ObjectMaxBlocks = 10
  7433  	tae.BindSchema(schema)
  7434  
  7435  	data := catalog.MockBatch(schema, 20)
  7436  	defer data.Close()
  7437  
  7438  	bats := data.Split(4)
  7439  	tae.CreateRelAndAppend(bats[0], true)
  7440  
  7441  	txn1, rel1 := tae.GetRelation()
  7442  	assert.NoError(t, rel1.Append(context.Background(), bats[1]))
  7443  	txn2, rel2 := tae.GetRelation()
  7444  	assert.NoError(t, rel2.Append(context.Background(), bats[1]))
  7445  
  7446  	{
  7447  		txn, rel := tae.GetRelation()
  7448  		assert.NoError(t, rel.Append(context.Background(), bats[1]))
  7449  		assert.NoError(t, txn.Commit(context.Background()))
  7450  	}
  7451  
  7452  	txn1.MockStartTS(tae.TxnMgr.Now())
  7453  	err := txn1.Commit(context.Background())
  7454  	assert.True(t, moerr.IsMoErrCode(err, moerr.ErrDuplicateEntry))
  7455  	err = txn2.Commit(context.Background())
  7456  	assert.True(t, moerr.IsMoErrCode(err, moerr.ErrTxnWWConflict))
  7457  }
  7458  
  7459  func TestMarshalPartioned(t *testing.T) {
  7460  	ctx := context.Background()
  7461  	opts := config.WithLongScanAndCKPOpts(nil)
  7462  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  7463  	defer tae.Close()
  7464  
  7465  	schema := catalog.MockSchemaAll(14, 3)
  7466  	schema.BlockMaxRows = 10000
  7467  	schema.ObjectMaxBlocks = 10
  7468  	schema.Partitioned = 1
  7469  	tae.BindSchema(schema)
  7470  
  7471  	data := catalog.MockBatch(schema, 20)
  7472  	defer data.Close()
  7473  
  7474  	bats := data.Split(4)
  7475  	tae.CreateRelAndAppend(bats[0], true)
  7476  
  7477  	_, rel := tae.GetRelation()
  7478  	partioned := rel.Schema().(*catalog.Schema).Partitioned
  7479  	assert.Equal(t, int8(1), partioned)
  7480  
  7481  	tae.Restart(ctx)
  7482  
  7483  	_, rel = tae.GetRelation()
  7484  	partioned = rel.Schema().(*catalog.Schema).Partitioned
  7485  	assert.Equal(t, int8(1), partioned)
  7486  
  7487  	err := tae.BGCheckpointRunner.ForceIncrementalCheckpoint(tae.TxnMgr.Now(), false)
  7488  	assert.NoError(t, err)
  7489  	lsn := tae.BGCheckpointRunner.MaxLSNInRange(tae.TxnMgr.Now())
  7490  	entry, err := tae.Wal.RangeCheckpoint(1, lsn)
  7491  	assert.NoError(t, err)
  7492  	assert.NoError(t, entry.WaitDone())
  7493  
  7494  	tae.Restart(ctx)
  7495  
  7496  	_, rel = tae.GetRelation()
  7497  	partioned = rel.Schema().(*catalog.Schema).Partitioned
  7498  	assert.Equal(t, int8(1), partioned)
  7499  }
  7500  
  7501  func TestDedup2(t *testing.T) {
  7502  	ctx := context.Background()
  7503  	opts := config.WithQuickScanAndCKPAndGCOpts(nil)
  7504  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  7505  	defer tae.Close()
  7506  
  7507  	schema := catalog.MockSchemaAll(14, 3)
  7508  	schema.BlockMaxRows = 2
  7509  	schema.ObjectMaxBlocks = 10
  7510  	schema.Partitioned = 1
  7511  	tae.BindSchema(schema)
  7512  
  7513  	count := 50
  7514  	data := catalog.MockBatch(schema, count)
  7515  	datas := data.Split(count)
  7516  
  7517  	tae.CreateRelAndAppend(datas[0], true)
  7518  
  7519  	for i := 1; i < count; i++ {
  7520  		tae.DoAppend(datas[i])
  7521  		txn, rel := tae.GetRelation()
  7522  		for j := 0; j <= i; j++ {
  7523  			err := rel.Append(context.Background(), datas[j])
  7524  			assert.Error(t, err)
  7525  		}
  7526  		assert.NoError(t, txn.Commit(context.Background()))
  7527  	}
  7528  }
  7529  
  7530  func TestCompactLargeTable(t *testing.T) {
  7531  	ctx := context.Background()
  7532  	opts := config.WithQuickScanAndCKPAndGCOpts(nil)
  7533  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  7534  	defer tae.Close()
  7535  
  7536  	schema := catalog.MockSchemaAll(600, 3)
  7537  	schema.BlockMaxRows = 2
  7538  	schema.ObjectMaxBlocks = 10
  7539  	schema.Partitioned = 1
  7540  	tae.BindSchema(schema)
  7541  
  7542  	data := catalog.MockBatch(schema, 10)
  7543  
  7544  	tae.CreateRelAndAppend(data, true)
  7545  
  7546  	tae.Restart(ctx)
  7547  
  7548  	tae.CheckRowsByScan(10, true)
  7549  
  7550  	testutils.WaitExpect(10000, func() bool {
  7551  		return tae.Wal.GetPenddingCnt() == 0
  7552  	})
  7553  
  7554  	tae.Restart(ctx)
  7555  
  7556  	tae.CheckRowsByScan(10, true)
  7557  }
  7558  
  7559  func TestCommitS3Blocks(t *testing.T) {
  7560  	ctx := context.Background()
  7561  	opts := config.WithQuickScanAndCKPAndGCOpts(nil)
  7562  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  7563  	defer tae.Close()
  7564  
  7565  	schema := catalog.MockSchemaAll(60, 3)
  7566  	schema.BlockMaxRows = 20
  7567  	schema.ObjectMaxBlocks = 10
  7568  	schema.Partitioned = 1
  7569  	tae.BindSchema(schema)
  7570  
  7571  	data := catalog.MockBatch(schema, 200)
  7572  	datas := data.Split(10)
  7573  	tae.CreateRelAndAppend(datas[0], true)
  7574  	datas = datas[1:]
  7575  
  7576  	statsVecs := make([]containers.Vector, 0)
  7577  	for _, bat := range datas {
  7578  		name := objectio.BuildObjectNameWithObjectID(objectio.NewObjectid())
  7579  		writer, err := blockio.NewBlockWriterNew(tae.Runtime.Fs.Service, name, 0, nil)
  7580  		assert.Nil(t, err)
  7581  		writer.SetPrimaryKey(3)
  7582  		for i := 0; i < 50; i++ {
  7583  			_, err := writer.WriteBatch(containers.ToCNBatch(bat))
  7584  			assert.Nil(t, err)
  7585  			//offset++
  7586  		}
  7587  		blocks, _, err := writer.Sync(context.Background())
  7588  		assert.Nil(t, err)
  7589  		assert.Equal(t, 50, len(blocks))
  7590  		statsVec := containers.MakeVector(types.T_varchar.ToType(), common.DefaultAllocator)
  7591  		defer statsVec.Close()
  7592  		statsVec.Append(writer.GetObjectStats()[objectio.SchemaData][:], false)
  7593  		statsVecs = append(statsVecs, statsVec)
  7594  	}
  7595  
  7596  	for _, vec := range statsVecs {
  7597  		txn, rel := tae.GetRelation()
  7598  		rel.AddObjsWithMetaLoc(context.Background(), vec)
  7599  		assert.NoError(t, txn.Commit(context.Background()))
  7600  	}
  7601  	for _, vec := range statsVecs {
  7602  		txn, rel := tae.GetRelation()
  7603  		err := rel.AddObjsWithMetaLoc(context.Background(), vec)
  7604  		assert.Error(t, err)
  7605  		assert.NoError(t, txn.Commit(context.Background()))
  7606  	}
  7607  }
  7608  
  7609  func TestDedupSnapshot1(t *testing.T) {
  7610  	defer testutils.AfterTest(t)()
  7611  	testutils.EnsureNoLeak(t)
  7612  	ctx := context.Background()
  7613  
  7614  	opts := config.WithQuickScanAndCKPOpts(nil)
  7615  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  7616  	defer tae.Close()
  7617  
  7618  	schema := catalog.MockSchemaAll(13, 3)
  7619  	schema.BlockMaxRows = 10
  7620  	schema.ObjectMaxBlocks = 3
  7621  	tae.BindSchema(schema)
  7622  	bat := catalog.MockBatch(schema, 10)
  7623  	tae.CreateRelAndAppend(bat, true)
  7624  
  7625  	testutils.WaitExpect(10000, func() bool {
  7626  		return tae.Wal.GetPenddingCnt() == 0
  7627  	})
  7628  	assert.Equal(t, uint64(0), tae.Wal.GetPenddingCnt())
  7629  
  7630  	txn, rel := tae.GetRelation()
  7631  	startTS := txn.GetStartTS()
  7632  	txn.SetSnapshotTS(startTS.Next())
  7633  	txn.SetDedupType(txnif.IncrementalDedup)
  7634  	err := rel.Append(context.Background(), bat)
  7635  	assert.NoError(t, err)
  7636  	_ = txn.Commit(context.Background())
  7637  }
  7638  
  7639  func TestDedupSnapshot2(t *testing.T) {
  7640  	defer testutils.AfterTest(t)()
  7641  	testutils.EnsureNoLeak(t)
  7642  	ctx := context.Background()
  7643  
  7644  	opts := config.WithQuickScanAndCKPOpts(nil)
  7645  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  7646  	defer tae.Close()
  7647  
  7648  	schema := catalog.MockSchemaAll(13, 3)
  7649  	schema.BlockMaxRows = 10
  7650  	schema.ObjectMaxBlocks = 3
  7651  	tae.BindSchema(schema)
  7652  	data := catalog.MockBatch(schema, 200)
  7653  	testutil.CreateRelation(t, tae.DB, "db", schema, true)
  7654  
  7655  	name := objectio.BuildObjectNameWithObjectID(objectio.NewObjectid())
  7656  	writer, err := blockio.NewBlockWriterNew(tae.Runtime.Fs.Service, name, 0, nil)
  7657  	assert.Nil(t, err)
  7658  	writer.SetPrimaryKey(3)
  7659  	_, err = writer.WriteBatch(containers.ToCNBatch(data))
  7660  	assert.Nil(t, err)
  7661  	blocks, _, err := writer.Sync(context.Background())
  7662  	assert.Nil(t, err)
  7663  	assert.Equal(t, 1, len(blocks))
  7664  	statsVec := containers.MakeVector(types.T_varchar.ToType(), common.DefaultAllocator)
  7665  	defer statsVec.Close()
  7666  	statsVec.Append(writer.GetObjectStats()[objectio.SchemaData][:], false)
  7667  
  7668  	txn, rel := tae.GetRelation()
  7669  	err = rel.AddObjsWithMetaLoc(context.Background(), statsVec)
  7670  	assert.NoError(t, err)
  7671  	assert.NoError(t, txn.Commit(context.Background()))
  7672  
  7673  	txn, rel = tae.GetRelation()
  7674  	startTS := txn.GetStartTS()
  7675  	txn.SetSnapshotTS(startTS.Next())
  7676  	txn.SetDedupType(txnif.IncrementalDedup)
  7677  	err = rel.AddObjsWithMetaLoc(context.Background(), statsVec)
  7678  	assert.NoError(t, err)
  7679  	_ = txn.Commit(context.Background())
  7680  }
  7681  
  7682  func TestDedupSnapshot3(t *testing.T) {
  7683  	defer testutils.AfterTest(t)()
  7684  	testutils.EnsureNoLeak(t)
  7685  	ctx := context.Background()
  7686  
  7687  	opts := config.WithQuickScanAndCKPOpts(nil)
  7688  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  7689  	defer tae.Close()
  7690  
  7691  	schema := catalog.MockSchemaAll(13, 3)
  7692  	schema.BlockMaxRows = 10
  7693  	schema.ObjectMaxBlocks = 3
  7694  	tae.BindSchema(schema)
  7695  	testutil.CreateRelation(t, tae.DB, "db", schema, true)
  7696  
  7697  	totalRows := 100
  7698  
  7699  	bat := catalog.MockBatch(schema, int(totalRows))
  7700  	bats := bat.Split(totalRows)
  7701  	var wg sync.WaitGroup
  7702  	pool, _ := ants.NewPool(80)
  7703  	defer pool.Release()
  7704  
  7705  	appendFn := func(offset uint32) func() {
  7706  		return func() {
  7707  			defer wg.Done()
  7708  			txn, _ := tae.StartTxn(nil)
  7709  			database, _ := txn.GetDatabase("db")
  7710  			rel, _ := database.GetRelationByName(schema.Name)
  7711  			err := rel.BatchDedup(bats[offset].Vecs[3])
  7712  			txn.Commit(context.Background())
  7713  			if err != nil {
  7714  				logutil.Infof("err is %v", err)
  7715  				return
  7716  			}
  7717  
  7718  			txn2, _ := tae.StartTxn(nil)
  7719  			txn2.SetDedupType(txnif.IncrementalDedup)
  7720  			txn2.SetSnapshotTS(txn.GetStartTS())
  7721  			database, _ = txn2.GetDatabase("db")
  7722  			rel, _ = database.GetRelationByName(schema.Name)
  7723  			_ = rel.Append(context.Background(), bats[offset])
  7724  			_ = txn2.Commit(context.Background())
  7725  		}
  7726  	}
  7727  
  7728  	for i := 0; i < totalRows; i++ {
  7729  		for j := 0; j < 5; j++ {
  7730  			wg.Add(1)
  7731  			err := pool.Submit(appendFn(uint32(i)))
  7732  			assert.Nil(t, err)
  7733  		}
  7734  	}
  7735  	wg.Wait()
  7736  
  7737  	tae.CheckRowsByScan(totalRows, false)
  7738  }
  7739  
  7740  func TestDeduplication(t *testing.T) {
  7741  	ctx := context.Background()
  7742  	opts := config.WithLongScanAndCKPOpts(nil)
  7743  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  7744  	defer tae.Close()
  7745  
  7746  	schema := catalog.MockSchemaAll(60, 3)
  7747  	schema.BlockMaxRows = 2
  7748  	schema.ObjectMaxBlocks = 10
  7749  	tae.BindSchema(schema)
  7750  	testutil.CreateRelation(t, tae.DB, "db", schema, true)
  7751  
  7752  	rows := 10
  7753  	bat := catalog.MockBatch(schema, rows)
  7754  	bats := bat.Split(rows)
  7755  
  7756  	ObjectIDs := make([]*types.Objectid, 2)
  7757  	ObjectIDs[0] = objectio.NewObjectid()
  7758  	ObjectIDs[1] = objectio.NewObjectid()
  7759  	sort.Slice(ObjectIDs, func(i, j int) bool {
  7760  		return ObjectIDs[i].Le(*ObjectIDs[j])
  7761  	})
  7762  
  7763  	blk1Name := objectio.BuildObjectNameWithObjectID(ObjectIDs[1])
  7764  	writer, err := blockio.NewBlockWriterNew(tae.Runtime.Fs.Service, blk1Name, 0, nil)
  7765  	assert.NoError(t, err)
  7766  	writer.SetPrimaryKey(3)
  7767  	writer.WriteBatch(containers.ToCNBatch(bats[0]))
  7768  	blocks, _, err := writer.Sync(context.TODO())
  7769  	assert.NoError(t, err)
  7770  	assert.Equal(t, 1, len(blocks))
  7771  
  7772  	statsVec := containers.MakeVector(types.T_varchar.ToType(), common.DefaultAllocator)
  7773  	defer statsVec.Close()
  7774  	statsVec.Append(writer.GetObjectStats()[objectio.SchemaData][:], false)
  7775  
  7776  	txn, rel := tae.GetRelation()
  7777  	err = rel.AddObjsWithMetaLoc(context.Background(), statsVec)
  7778  	assert.NoError(t, err)
  7779  	assert.NoError(t, txn.Commit(context.Background()))
  7780  
  7781  	txn, err = tae.StartTxn(nil)
  7782  	assert.NoError(t, err)
  7783  	db, err := tae.Catalog.TxnGetDBEntryByName("db", txn)
  7784  	assert.NoError(t, err)
  7785  	tbl, err := db.TxnGetTableEntryByName(schema.Name, txn)
  7786  	assert.NoError(t, err)
  7787  	dataFactory := tables.NewDataFactory(
  7788  		tae.Runtime,
  7789  		tae.Dir)
  7790  	obj, err := tbl.CreateObject(
  7791  		txn,
  7792  		catalog.ES_Appendable,
  7793  		new(objectio.CreateObjOpt).WithId(ObjectIDs[0]), dataFactory.MakeObjectFactory())
  7794  	assert.NoError(t, err)
  7795  	txn.GetStore().AddTxnEntry(txnif.TxnType_Normal, obj)
  7796  	txn.GetStore().IncreateWriteCnt()
  7797  	assert.NoError(t, txn.Commit(context.Background()))
  7798  	assert.NoError(t, obj.PrepareCommit())
  7799  	assert.NoError(t, obj.ApplyCommit())
  7800  
  7801  	txns := make([]txnif.AsyncTxn, 0)
  7802  	for i := 0; i < 5; i++ {
  7803  		for j := 1; j < rows; j++ {
  7804  			txn, _ := tae.StartTxn(nil)
  7805  			database, _ := txn.GetDatabase("db")
  7806  			rel, _ := database.GetRelationByName(schema.Name)
  7807  			_ = rel.Append(context.Background(), bats[j])
  7808  			txns = append(txns, txn)
  7809  		}
  7810  	}
  7811  	for _, txn := range txns {
  7812  		txn.Commit(context.Background())
  7813  	}
  7814  	tae.CheckRowsByScan(rows, false)
  7815  	t.Logf(tae.Catalog.SimplePPString(3))
  7816  }
  7817  
  7818  func TestGCInMemoryDeletesByTS(t *testing.T) {
  7819  	t.Skip(any("This case crashes occasionally, is being fixed, skip it for now"))
  7820  	defer testutils.AfterTest(t)()
  7821  	ctx := context.Background()
  7822  
  7823  	opts := config.WithLongScanAndCKPOpts(nil)
  7824  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  7825  	defer tae.Close()
  7826  	rows := 100
  7827  	schema := catalog.MockSchemaAll(2, 1)
  7828  	schema.BlockMaxRows = uint32(rows)
  7829  	tae.BindSchema(schema)
  7830  	bat := catalog.MockBatch(schema, rows)
  7831  	tae.CreateRelAndAppend(bat, true)
  7832  
  7833  	txn, rel := tae.GetRelation()
  7834  	blkit := rel.MakeObjectIt()
  7835  	blkHandle := blkit.GetObject()
  7836  	blkMeta := blkHandle.GetMeta().(*catalog.ObjectEntry)
  7837  	blkID := blkMeta.AsCommonID()
  7838  	blkData := blkMeta.GetObjectData()
  7839  	assert.NoError(t, txn.Commit(context.Background()))
  7840  	ctx, cancel := context.WithCancel(context.Background())
  7841  	wg := sync.WaitGroup{}
  7842  	wg.Add(1)
  7843  	go func() {
  7844  		defer wg.Done()
  7845  		i := 0
  7846  		for {
  7847  			select {
  7848  			case <-ctx.Done():
  7849  				return
  7850  			default:
  7851  
  7852  				txn, rel := tae.GetRelation()
  7853  				ts := txn.GetStartTS()
  7854  				batch, _, err := blkData.CollectDeleteInRange(context.Background(), types.TS{}, ts, true, common.DefaultAllocator)
  7855  				assert.NoError(t, err)
  7856  				if batch == nil {
  7857  					continue
  7858  				}
  7859  
  7860  				blk1Name := objectio.BuildObjectNameWithObjectID(objectio.NewObjectid())
  7861  				writer, err := blockio.NewBlockWriterNew(tae.Runtime.Fs.Service, blk1Name, 0, nil)
  7862  				assert.NoError(t, err)
  7863  				writer.SetPrimaryKey(3)
  7864  				writer.WriteTombstoneBatch(containers.ToCNBatch(batch))
  7865  				blocks, _, err := writer.Sync(context.TODO())
  7866  				assert.NoError(t, err)
  7867  				assert.Equal(t, 1, len(blocks))
  7868  
  7869  				deltaLoc := blockio.EncodeLocation(
  7870  					writer.GetName(),
  7871  					blocks[0].GetExtent(),
  7872  					uint32(batch.Length()),
  7873  					blocks[0].GetID(),
  7874  				)
  7875  				blkit := rel.MakeObjectIt()
  7876  				blkHandle := blkit.GetObject()
  7877  				err = blkHandle.UpdateDeltaLoc(0, deltaLoc)
  7878  				assert.NoError(t, err)
  7879  				assert.NoError(t, txn.Commit(context.Background()))
  7880  
  7881  				blkData.GCInMemeoryDeletesByTSForTest(ts)
  7882  			}
  7883  			i++
  7884  		}
  7885  	}()
  7886  
  7887  	for offset := 0; offset < rows; offset++ {
  7888  		txn, rel := tae.GetRelation()
  7889  		assert.NoError(t, rel.RangeDelete(blkID, uint32(offset), uint32(offset), handle.DT_Normal))
  7890  		assert.NoError(t, txn.Commit(context.Background()))
  7891  		ts := txn.GetCommitTS()
  7892  
  7893  		batch, _, err := blkData.CollectDeleteInRange(context.Background(), types.TS{}, ts, true, common.DefaultAllocator)
  7894  		assert.NoError(t, err)
  7895  		t.Logf(logtail.BatchToString("", batch, false))
  7896  		for i, vec := range batch.Vecs {
  7897  			t.Logf(batch.Attrs[i])
  7898  			assert.Equal(t, offset+1, vec.Length())
  7899  		}
  7900  		view, err := blkData.CollectChangesInRange(context.Background(), 0, types.TS{}, ts, common.DefaultAllocator)
  7901  		assert.NoError(t, err)
  7902  		t.Logf(view.DeleteMask.String())
  7903  		assert.Equal(t, offset+1, view.DeleteMask.GetCardinality())
  7904  	}
  7905  	cancel()
  7906  	wg.Wait()
  7907  }
  7908  
  7909  func TestRW(t *testing.T) {
  7910  	ctx := context.Background()
  7911  	opts := config.WithLongScanAndCKPOpts(nil)
  7912  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  7913  	defer tae.Close()
  7914  	rows := 10
  7915  	schema := catalog.MockSchemaAll(5, 2)
  7916  	schema.BlockMaxRows = uint32(rows)
  7917  	tae.BindSchema(schema)
  7918  	bat := catalog.MockBatch(schema, rows)
  7919  	defer bat.Close()
  7920  	tae.CreateRelAndAppend(bat, true)
  7921  
  7922  	txn1, rel1 := tae.GetRelation()
  7923  	v := bat.Vecs[2].Get(2)
  7924  	filter := handle.NewEQFilter(v)
  7925  	id, row, err := rel1.GetByFilter(ctx, filter)
  7926  	assert.NoError(t, err)
  7927  	err = rel1.RangeDelete(id, row, row, handle.DT_Normal)
  7928  	assert.NoError(t, err)
  7929  
  7930  	meta := rel1.GetMeta().(*catalog.TableEntry)
  7931  
  7932  	cnt := 3
  7933  	for i := 0; i < cnt; i++ {
  7934  		txn2, rel2 := tae.GetRelation()
  7935  		v = bat.Vecs[2].Get(i + 3)
  7936  		filter = handle.NewEQFilter(v)
  7937  		id, row, err = rel2.GetByFilter(ctx, filter)
  7938  		assert.NoError(t, err)
  7939  		err = rel2.RangeDelete(id, row, row, handle.DT_Normal)
  7940  		assert.NoError(t, err)
  7941  		err = txn2.Commit(ctx)
  7942  		assert.NoError(t, err)
  7943  
  7944  		err = tae.FlushTable(
  7945  			ctx, 0, meta.GetDB().ID, meta.ID,
  7946  			types.BuildTS(time.Now().UTC().UnixNano(), 0),
  7947  		)
  7948  		assert.NoError(t, err)
  7949  	}
  7950  
  7951  	err = txn1.Commit(ctx)
  7952  	assert.NoError(t, err)
  7953  
  7954  	{
  7955  		txn, rel := tae.GetRelation()
  7956  		rcnt := testutil.GetColumnRowsByScan(t, rel, 2, true)
  7957  		assert.Equal(t, rows-cnt-1, rcnt)
  7958  		assert.NoError(t, txn.Commit(ctx))
  7959  	}
  7960  }
  7961  
  7962  func TestReplayDeletes(t *testing.T) {
  7963  	defer testutils.AfterTest(t)()
  7964  	ctx := context.Background()
  7965  
  7966  	opts := config.WithLongScanAndCKPOpts(nil)
  7967  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  7968  	defer tae.Close()
  7969  	rows := 250
  7970  	schema := catalog.MockSchemaAll(2, 1)
  7971  	schema.BlockMaxRows = 50
  7972  	tae.BindSchema(schema)
  7973  	bat := catalog.MockBatch(schema, rows)
  7974  	defer bat.Close()
  7975  	bats := bat.Split(5)
  7976  	tae.CreateRelAndAppend(bats[0], true)
  7977  	//nablk
  7978  	tae.CompactBlocks(false)
  7979  	//deletes
  7980  	txn, rel := tae.GetRelation()
  7981  	blkIt := rel.MakeObjectIt()
  7982  	blk := blkIt.GetObject()
  7983  	blk.RangeDelete(0, 1, 49, handle.DT_Normal, common.DefaultAllocator)
  7984  	assert.NoError(t, txn.Commit(context.Background()))
  7985  	//the next blk to compact
  7986  	tae.DoAppend(bats[1])
  7987  	//keep the Object appendable
  7988  	tae.DoAppend(bats[2])
  7989  	//compact nablk and its next blk
  7990  	txn2, rel := tae.GetRelation()
  7991  	blkIt = rel.MakeObjectIt()
  7992  	blkEntry := blkIt.GetObject().GetMeta().(*catalog.ObjectEntry)
  7993  	txn, err := tae.StartTxn(nil)
  7994  	assert.NoError(t, err)
  7995  	task, err := jobs.NewFlushTableTailTask(nil, txn, []*catalog.ObjectEntry{blkEntry}, tae.Runtime, types.MaxTs())
  7996  	assert.NoError(t, err)
  7997  	err = task.OnExec(context.Background())
  7998  	assert.NoError(t, err)
  7999  	assert.NoError(t, txn.Commit(context.Background()))
  8000  	assert.NoError(t, txn2.Commit(context.Background()))
  8001  }
  8002  func TestApplyDeltalocation1(t *testing.T) {
  8003  	defer testutils.AfterTest(t)()
  8004  	ctx := context.Background()
  8005  
  8006  	opts := config.WithLongScanAndCKPOpts(nil)
  8007  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  8008  	defer tae.Close()
  8009  	rows := 10
  8010  	schema := catalog.MockSchemaAll(2, 1)
  8011  	schema.BlockMaxRows = 10
  8012  	tae.BindSchema(schema)
  8013  	bat := catalog.MockBatch(schema, rows)
  8014  	defer bat.Close()
  8015  	tae.CreateRelAndAppend(bat, true)
  8016  
  8017  	// apply deleteloc fails on ablk
  8018  	v1 := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(1)
  8019  	ok, err := tae.TryDeleteByDeltaloc([]any{v1})
  8020  	assert.NoError(t, err)
  8021  	assert.False(t, ok)
  8022  
  8023  	tae.CompactBlocks(false)
  8024  	filter := handle.NewEQFilter(v1)
  8025  	txn, rel := tae.GetRelation()
  8026  	id, offset, err := rel.GetByFilter(context.Background(), filter)
  8027  	assert.NoError(t, err)
  8028  	ok, err = tae.TryDeleteByDeltaloc([]any{v1})
  8029  	assert.NoError(t, err)
  8030  	assert.True(t, ok)
  8031  
  8032  	// range delete conflicts with deletes in deltaloc
  8033  	err = rel.RangeDelete(id, offset, offset, handle.DT_Normal)
  8034  	assert.Error(t, err)
  8035  	assert.NoError(t, txn.Commit(context.Background()))
  8036  
  8037  	// apply deltaloc fails if there're persisted deletes
  8038  	v2 := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(2)
  8039  	ok, err = tae.TryDeleteByDeltaloc([]any{v2})
  8040  	assert.NoError(t, err)
  8041  	assert.False(t, ok)
  8042  
  8043  	// apply deltaloc fails if there're deletes in memory
  8044  	tae.CompactBlocks(false)
  8045  	v3 := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(3)
  8046  	filter = handle.NewEQFilter(v3)
  8047  	txn, rel = tae.GetRelation()
  8048  	id, offset, err = rel.GetByFilter(context.Background(), filter)
  8049  	assert.NoError(t, err)
  8050  	err = rel.RangeDelete(id, offset, offset, handle.DT_Normal)
  8051  	assert.NoError(t, err)
  8052  	assert.NoError(t, txn.Commit(context.Background()))
  8053  
  8054  	v4 := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(4)
  8055  	ok, err = tae.TryDeleteByDeltaloc([]any{v4})
  8056  	assert.NoError(t, err)
  8057  	assert.False(t, ok)
  8058  
  8059  }
  8060  
  8061  // test compact
  8062  func TestApplyDeltalocation2(t *testing.T) {
  8063  	defer testutils.AfterTest(t)()
  8064  	ctx := context.Background()
  8065  
  8066  	opts := config.WithLongScanAndCKPOpts(nil)
  8067  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  8068  	defer tae.Close()
  8069  	rows := 10
  8070  	schema := catalog.MockSchemaAll(2, 1)
  8071  	schema.BlockMaxRows = 10
  8072  	tae.BindSchema(schema)
  8073  	bat := catalog.MockBatch(schema, rows)
  8074  	bats := bat.Split(10)
  8075  	defer bat.Close()
  8076  	tae.CreateRelAndAppend(bat, true)
  8077  	tae.CompactBlocks(false)
  8078  
  8079  	v3 := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(3)
  8080  	v5 := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(5)
  8081  	filter3 := handle.NewEQFilter(v3)
  8082  	filter5 := handle.NewEQFilter(v5)
  8083  
  8084  	// test logtail
  8085  	tae.LogtailMgr.RegisterCallback(logtail.MockCallback)
  8086  	tae.TryDeleteByDeltaloc([]any{v3, v5})
  8087  	t.Log(tae.Catalog.SimplePPString(3))
  8088  
  8089  	txn, rel := tae.GetRelation()
  8090  	_, _, err := rel.GetByFilter(context.Background(), filter5)
  8091  	assert.Error(t, err)
  8092  	_, _, err = rel.GetByFilter(context.Background(), filter3)
  8093  	assert.Error(t, err)
  8094  	assert.NoError(t, txn.Commit(context.Background()))
  8095  	tae.CheckRowsByScan(8, true)
  8096  
  8097  	tae.Restart(context.Background())
  8098  	txn, rel = tae.GetRelation()
  8099  	_, _, err = rel.GetByFilter(context.Background(), filter5)
  8100  	assert.Error(t, err)
  8101  	_, _, err = rel.GetByFilter(context.Background(), filter3)
  8102  	assert.Error(t, err)
  8103  	assert.NoError(t, txn.Commit(context.Background()))
  8104  	tae.CheckRowsByScan(8, true)
  8105  
  8106  	// test dedup
  8107  	tae.DoAppend(bats[3])
  8108  	tae.CheckRowsByScan(9, true)
  8109  
  8110  	// test compact
  8111  	tae.CompactBlocks(false)
  8112  	txn, rel = tae.GetRelation()
  8113  	_, _, err = rel.GetByFilter(context.Background(), filter5)
  8114  	assert.Error(t, err)
  8115  	assert.NoError(t, txn.Commit(context.Background()))
  8116  	tae.CheckRowsByScan(9, true)
  8117  
  8118  	tae.Restart(context.Background())
  8119  	txn, rel = tae.GetRelation()
  8120  	_, _, err = rel.GetByFilter(context.Background(), filter5)
  8121  	assert.Error(t, err)
  8122  	assert.NoError(t, txn.Commit(context.Background()))
  8123  	tae.CheckRowsByScan(9, true)
  8124  }
  8125  
  8126  func TestApplyDeltalocation3(t *testing.T) {
  8127  	defer testutils.AfterTest(t)()
  8128  	ctx := context.Background()
  8129  
  8130  	opts := config.WithLongScanAndCKPOpts(nil)
  8131  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  8132  	defer tae.Close()
  8133  	rows := 10
  8134  	schema := catalog.MockSchemaAll(2, 1)
  8135  	schema.BlockMaxRows = 10
  8136  	tae.BindSchema(schema)
  8137  	bat := catalog.MockBatch(schema, rows)
  8138  	defer bat.Close()
  8139  	tae.CreateRelAndAppend(bat, true)
  8140  	tae.CompactBlocks(false)
  8141  
  8142  	v3 := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(3)
  8143  	filter3 := handle.NewEQFilter(v3)
  8144  
  8145  	// apply deltaloc failed if there're new deletes
  8146  
  8147  	v5 := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(5)
  8148  	filter5 := handle.NewEQFilter(v5)
  8149  	txn, err := tae.StartTxn(nil)
  8150  	assert.NoError(t, err)
  8151  	ok, err := tae.TryDeleteByDeltalocWithTxn([]any{v3}, txn)
  8152  	assert.NoError(t, err)
  8153  	assert.True(t, ok)
  8154  
  8155  	{
  8156  		// delete v5
  8157  		txn2, rel2 := tae.GetRelation()
  8158  		err = rel2.DeleteByFilter(context.Background(), filter5)
  8159  		assert.NoError(t, err)
  8160  		assert.NoError(t, txn2.Commit(context.Background()))
  8161  	}
  8162  	tae.CheckRowsByScan(9, true)
  8163  
  8164  	assert.Error(t, txn.Commit(context.Background()))
  8165  	tae.CheckRowsByScan(9, true)
  8166  
  8167  	// apply deltaloc successfully if txn of new deletes are active
  8168  
  8169  	tae.MergeBlocks(false)
  8170  	txn, err = tae.StartTxn(nil)
  8171  	assert.NoError(t, err)
  8172  	ok, err = tae.TryDeleteByDeltalocWithTxn([]any{v3}, txn)
  8173  	assert.NoError(t, err)
  8174  	assert.True(t, ok)
  8175  
  8176  	// delete v5
  8177  	v4 := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(4)
  8178  	filter4 := handle.NewEQFilter(v4)
  8179  	txn2, rel2 := tae.GetRelation()
  8180  	err = rel2.DeleteByFilter(context.Background(), filter4)
  8181  	assert.NoError(t, err)
  8182  
  8183  	assert.NoError(t, txn.Commit(context.Background()))
  8184  	tae.CheckRowsByScan(8, true)
  8185  
  8186  	assert.NoError(t, txn2.Commit(context.Background()))
  8187  	tae.CheckRowsByScan(7, true)
  8188  
  8189  	txn, rel := tae.GetRelation()
  8190  	_, _, err = rel.GetByFilter(context.Background(), filter3)
  8191  	assert.Error(t, err)
  8192  	assert.NoError(t, txn.Commit(context.Background()))
  8193  
  8194  }
  8195  
  8196  func TestApplyDeltalocation4(t *testing.T) {
  8197  	defer testutils.AfterTest(t)()
  8198  	ctx := context.Background()
  8199  
  8200  	opts := config.WithLongScanAndCKPOpts(nil)
  8201  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  8202  	defer tae.Close()
  8203  	rows := 10
  8204  	schema := catalog.MockSchemaAll(2, 1)
  8205  	schema.BlockMaxRows = 10
  8206  	tae.BindSchema(schema)
  8207  	bat := catalog.MockBatch(schema, rows)
  8208  	defer bat.Close()
  8209  	bats := bat.Split(rows)
  8210  	tae.CreateRelAndAppend(bat, true)
  8211  
  8212  	tae.CompactBlocks(false)
  8213  
  8214  	txn, err := tae.StartTxn(nil)
  8215  	assert.NoError(t, err)
  8216  	v5 := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(5)
  8217  	tae.TryDeleteByDeltalocWithTxn([]any{v5}, txn)
  8218  	v1 := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(1)
  8219  	filter1 := handle.NewEQFilter(v1)
  8220  	db, err := txn.GetDatabase("db")
  8221  	assert.NoError(t, err)
  8222  	rel, err := db.GetRelationByName(schema.Name)
  8223  	assert.NoError(t, err)
  8224  	err = rel.DeleteByFilter(context.Background(), filter1)
  8225  	assert.NoError(t, err)
  8226  	tae.DoAppendWithTxn(bats[1], txn, false)
  8227  	tae.DoAppendWithTxn(bats[5], txn, false)
  8228  	assert.NoError(t, txn.Commit(context.Background()))
  8229  
  8230  	tae.CheckRowsByScan(rows, true)
  8231  
  8232  	tae.Restart(ctx)
  8233  
  8234  	tae.CheckRowsByScan(rows, true)
  8235  }
  8236  
  8237  func TestReplayPersistedDelete(t *testing.T) {
  8238  	defer testutils.AfterTest(t)()
  8239  	ctx := context.Background()
  8240  
  8241  	opts := config.WithLongScanAndCKPOpts(nil)
  8242  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  8243  	defer tae.Close()
  8244  	rows := 10
  8245  	schema := catalog.MockSchemaAll(2, 1)
  8246  	schema.BlockMaxRows = 10
  8247  	tae.BindSchema(schema)
  8248  	bat := catalog.MockBatch(schema, rows)
  8249  	defer bat.Close()
  8250  	tae.CreateRelAndAppend(bat, true)
  8251  	tae.CompactBlocks(false)
  8252  
  8253  	v3 := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(3)
  8254  	filter3 := handle.NewEQFilter(v3)
  8255  	txn, rel := tae.GetRelation()
  8256  	id, offset, err := rel.GetByFilter(context.Background(), filter3)
  8257  	assert.NoError(t, err)
  8258  	assert.NoError(t, txn.Commit(context.Background()))
  8259  
  8260  	ok, err := tae.TryDeleteByDeltaloc([]any{v3})
  8261  	assert.NoError(t, err)
  8262  	assert.True(t, ok)
  8263  
  8264  	tae.Restart(context.Background())
  8265  
  8266  	txn, rel = tae.GetRelation()
  8267  	err = rel.RangeDelete(id, offset, offset, handle.DT_Normal)
  8268  	assert.Error(t, err)
  8269  	assert.NoError(t, txn.Commit(context.Background()))
  8270  }
  8271  
  8272  func TestCheckpointReadWrite(t *testing.T) {
  8273  	defer testutils.AfterTest(t)()
  8274  	ctx := context.Background()
  8275  
  8276  	opts := config.WithLongScanAndCKPOpts(nil)
  8277  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  8278  	defer tae.Close()
  8279  
  8280  	txn, err := tae.StartTxn(nil)
  8281  	assert.NoError(t, err)
  8282  	db, err := txn.CreateDatabase("db", "create database db", "1")
  8283  	assert.NoError(t, err)
  8284  	schema1 := catalog.MockSchemaAll(2, 1)
  8285  	_, err = db.CreateRelation(schema1)
  8286  	assert.NoError(t, err)
  8287  	schema2 := catalog.MockSchemaAll(3, -1)
  8288  	_, err = db.CreateRelation(schema2)
  8289  	assert.NoError(t, err)
  8290  	assert.NoError(t, txn.Commit(context.Background()))
  8291  
  8292  	t1 := tae.TxnMgr.Now()
  8293  	testutil.CheckCheckpointReadWrite(t, types.TS{}, t1, tae.Catalog, smallCheckpointBlockRows, smallCheckpointSize, tae.Opts.Fs)
  8294  
  8295  	txn, err = tae.StartTxn(nil)
  8296  	assert.NoError(t, err)
  8297  	db, err = txn.GetDatabase("db")
  8298  	assert.NoError(t, err)
  8299  	_, err = db.DropRelationByName(schema1.Name)
  8300  	assert.NoError(t, err)
  8301  	_, err = db.DropRelationByName(schema2.Name)
  8302  	assert.NoError(t, err)
  8303  	assert.NoError(t, txn.Commit(context.Background()))
  8304  
  8305  	t2 := tae.TxnMgr.Now()
  8306  	testutil.CheckCheckpointReadWrite(t, types.TS{}, t2, tae.Catalog, smallCheckpointBlockRows, smallCheckpointSize, tae.Opts.Fs)
  8307  	testutil.CheckCheckpointReadWrite(t, t1, t2, tae.Catalog, smallCheckpointBlockRows, smallCheckpointSize, tae.Opts.Fs)
  8308  
  8309  	txn, err = tae.StartTxn(nil)
  8310  	assert.NoError(t, err)
  8311  	_, err = txn.DropDatabase("db")
  8312  	assert.NoError(t, err)
  8313  	assert.NoError(t, txn.Commit(context.Background()))
  8314  	t3 := tae.TxnMgr.Now()
  8315  	testutil.CheckCheckpointReadWrite(t, types.TS{}, t3, tae.Catalog, smallCheckpointBlockRows, smallCheckpointSize, tae.Opts.Fs)
  8316  	testutil.CheckCheckpointReadWrite(t, t2, t3, tae.Catalog, smallCheckpointBlockRows, smallCheckpointSize, tae.Opts.Fs)
  8317  
  8318  	schema := catalog.MockSchemaAll(2, 1)
  8319  	schema.BlockMaxRows = 1
  8320  	schema.ObjectMaxBlocks = 1
  8321  	tae.BindSchema(schema)
  8322  	bat := catalog.MockBatch(schema, 10)
  8323  
  8324  	tae.CreateRelAndAppend(bat, true)
  8325  	t4 := tae.TxnMgr.Now()
  8326  	testutil.CheckCheckpointReadWrite(t, types.TS{}, t4, tae.Catalog, smallCheckpointBlockRows, smallCheckpointSize, tae.Opts.Fs)
  8327  	testutil.CheckCheckpointReadWrite(t, t3, t4, tae.Catalog, smallCheckpointBlockRows, smallCheckpointSize, tae.Opts.Fs)
  8328  
  8329  	tae.CompactBlocks(false)
  8330  	t5 := tae.TxnMgr.Now()
  8331  	testutil.CheckCheckpointReadWrite(t, types.TS{}, t5, tae.Catalog, smallCheckpointBlockRows, smallCheckpointSize, tae.Opts.Fs)
  8332  	testutil.CheckCheckpointReadWrite(t, t4, t5, tae.Catalog, smallCheckpointBlockRows, smallCheckpointSize, tae.Opts.Fs)
  8333  }
  8334  
  8335  func TestCheckpointReadWrite2(t *testing.T) {
  8336  	defer testutils.AfterTest(t)()
  8337  	ctx := context.Background()
  8338  
  8339  	opts := config.WithLongScanAndCKPOpts(nil)
  8340  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  8341  	defer tae.Close()
  8342  
  8343  	for i := 0; i < 10; i++ {
  8344  		schema := catalog.MockSchemaAll(i+1, i)
  8345  		schema.BlockMaxRows = 2
  8346  		bat := catalog.MockBatch(schema, rand.Intn(30))
  8347  		tae.BindSchema(schema)
  8348  		createDB := false
  8349  		if i == 0 {
  8350  			createDB = true
  8351  		}
  8352  		tae.CreateRelAndAppend(bat, createDB)
  8353  		tae.CompactBlocks(false)
  8354  	}
  8355  
  8356  	t1 := tae.TxnMgr.Now()
  8357  	testutil.CheckCheckpointReadWrite(t, types.TS{}, t1, tae.Catalog, smallCheckpointBlockRows, smallCheckpointSize, tae.Opts.Fs)
  8358  }
  8359  
  8360  func TestSnapshotCheckpoint(t *testing.T) {
  8361  	defer testutils.AfterTest(t)()
  8362  	testutils.EnsureNoLeak(t)
  8363  	ctx := context.Background()
  8364  
  8365  	opts := new(options.Options)
  8366  	opts = config.WithLongScanAndCKPOpts(opts)
  8367  	options.WithDisableGCCheckpoint()(opts)
  8368  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  8369  	defer tae.Close()
  8370  	db := tae.DB
  8371  	db.DiskCleaner.GetCleaner().SetMinMergeCountForTest(2)
  8372  
  8373  	schema1 := catalog.MockSchemaAll(13, 2)
  8374  	schema1.BlockMaxRows = 10
  8375  	schema1.ObjectMaxBlocks = 2
  8376  
  8377  	schema2 := catalog.MockSchemaAll(13, 2)
  8378  	schema2.BlockMaxRows = 10
  8379  	schema2.ObjectMaxBlocks = 2
  8380  	var rel1 handle.Relation
  8381  	{
  8382  		txn, _ := db.StartTxn(nil)
  8383  		database, err := txn.CreateDatabase("db", "", "")
  8384  		assert.Nil(t, err)
  8385  		rel1, err = database.CreateRelation(schema1)
  8386  		assert.Nil(t, err)
  8387  		_, err = database.CreateRelation(schema2)
  8388  		assert.Nil(t, err)
  8389  		assert.Nil(t, txn.Commit(context.Background()))
  8390  	}
  8391  	bat := catalog.MockBatch(schema1, int(schema1.BlockMaxRows*10-1))
  8392  	defer bat.Close()
  8393  	bats := bat.Split(bat.Length())
  8394  
  8395  	pool, err := ants.NewPool(20)
  8396  	assert.Nil(t, err)
  8397  	defer pool.Release()
  8398  	var wg sync.WaitGroup
  8399  
  8400  	for i := 0; i < len(bats)/2; i++ {
  8401  		wg.Add(2)
  8402  		err = pool.Submit(testutil.AppendClosure(t, bats[i], schema1.Name, db, &wg))
  8403  		assert.Nil(t, err)
  8404  		err = pool.Submit(testutil.AppendClosure(t, bats[i], schema2.Name, db, &wg))
  8405  		assert.Nil(t, err)
  8406  	}
  8407  	wg.Wait()
  8408  	ts := types.BuildTS(time.Now().UTC().UnixNano(), 0)
  8409  	db.ForceCheckpoint(ctx, ts, time.Minute)
  8410  	snapshot := types.BuildTS(time.Now().UTC().UnixNano(), 0)
  8411  	db.ForceCheckpoint(ctx, snapshot, time.Minute)
  8412  	tae.ForceCheckpoint()
  8413  	assert.Equal(t, uint64(0), db.Runtime.Scheduler.GetPenddingLSNCnt())
  8414  	var wg2 sync.WaitGroup
  8415  	for i := len(bats) / 2; i < len(bats); i++ {
  8416  		wg2.Add(2)
  8417  		err = pool.Submit(testutil.AppendClosure(t, bats[i], schema1.Name, db, &wg2))
  8418  		assert.Nil(t, err)
  8419  		err = pool.Submit(testutil.AppendClosure(t, bats[i], schema2.Name, db, &wg2))
  8420  		assert.Nil(t, err)
  8421  	}
  8422  	wg2.Wait()
  8423  	tae.ForceCheckpoint()
  8424  	tae.ForceCheckpoint()
  8425  	ins1, seg1 := testutil.GetUserTablesInsBatch(t, rel1.ID(), types.TS{}, snapshot, db.Catalog)
  8426  	ckps, err := checkpoint.ListSnapshotCheckpoint(ctx, db.Opts.Fs, snapshot, rel1.ID(), checkpoint.SpecifiedCheckpoint)
  8427  	assert.Nil(t, err)
  8428  	var inslen, seglen int
  8429  	for _, ckp := range ckps {
  8430  		ins, _, _, seg, cbs := testutil.ReadSnapshotCheckpoint(t, rel1.ID(), ckp.GetLocation(), db.Opts.Fs)
  8431  		for _, cb := range cbs {
  8432  			if cb != nil {
  8433  				cb()
  8434  			}
  8435  		}
  8436  		if ins != nil {
  8437  			moIns, err := batch.ProtoBatchToBatch(ins)
  8438  			assert.NoError(t, err)
  8439  			inslen += moIns.Vecs[0].Length()
  8440  		}
  8441  		if seg != nil {
  8442  			moIns, err := batch.ProtoBatchToBatch(seg)
  8443  			assert.NoError(t, err)
  8444  			seglen += moIns.Vecs[0].Length()
  8445  		}
  8446  	}
  8447  	assert.Equal(t, inslen, ins1.Length())
  8448  	assert.Equal(t, seglen, seg1.Length())
  8449  	assert.Equal(t, int64(0), common.DebugAllocator.CurrNB())
  8450  }
  8451  
  8452  func TestEstimateMemSize(t *testing.T) {
  8453  	defer testutils.AfterTest(t)()
  8454  	ctx := context.Background()
  8455  
  8456  	opts := config.WithLongScanAndCKPOpts(nil)
  8457  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  8458  	defer tae.Close()
  8459  	schema := catalog.MockSchemaAll(2, 1)
  8460  	schema.BlockMaxRows = 50
  8461  	schemaBig := catalog.MockSchemaAll(14, 1)
  8462  
  8463  	schema50rowSize := 0
  8464  	{
  8465  		tae.BindSchema(schema)
  8466  		bat := catalog.MockBatch(schema, 50)
  8467  		testutil.CreateRelationAndAppend(t, 0, tae.DB, "db", schema, bat, true)
  8468  		txn, rel := tae.GetRelation()
  8469  		blk := testutil.GetOneBlockMeta(rel)
  8470  		size1, ds1 := blk.GetObjectData().EstimateMemSize()
  8471  		schema50rowSize = size1
  8472  
  8473  		blkID := objectio.NewBlockidWithObjectID(&blk.ID, 0)
  8474  		err := rel.DeleteByPhyAddrKey(*objectio.NewRowid(blkID, 1))
  8475  		require.NoError(t, err)
  8476  		size2, ds2 := blk.GetObjectData().EstimateMemSize()
  8477  
  8478  		err = rel.DeleteByPhyAddrKey(*objectio.NewRowid(blkID, 5))
  8479  		require.NoError(t, err)
  8480  		size3, ds3 := blk.GetObjectData().EstimateMemSize()
  8481  		// require.Less(t, size1, size2)
  8482  		// require.Less(t, size2, size3)
  8483  		require.NoError(t, txn.Rollback(ctx))
  8484  		size4, ds4 := blk.GetObjectData().EstimateMemSize()
  8485  		t.Log(size1, size2, size3, size4)
  8486  		t.Log(ds1, ds2, ds3, ds4)
  8487  	}
  8488  
  8489  	{
  8490  		tae.BindSchema(schemaBig)
  8491  		bat := catalog.MockBatch(schemaBig, 50)
  8492  		testutil.CreateRelationAndAppend(t, 0, tae.DB, "db", schemaBig, bat, false)
  8493  		txn, rel := tae.GetRelation()
  8494  		blk := testutil.GetOneBlockMeta(rel)
  8495  		size1, d1 := blk.GetObjectData().EstimateMemSize()
  8496  
  8497  		blkID := objectio.NewBlockidWithObjectID(&blk.ID, 0)
  8498  		err := rel.DeleteByPhyAddrKey(*objectio.NewRowid(blkID, 1))
  8499  		require.NoError(t, err)
  8500  
  8501  		size2, d2 := blk.GetObjectData().EstimateMemSize()
  8502  
  8503  		err = rel.DeleteByPhyAddrKey(*objectio.NewRowid(blkID, 5))
  8504  		require.NoError(t, err)
  8505  		size3, d3 := blk.GetObjectData().EstimateMemSize()
  8506  
  8507  		t.Log(size1, size2, size3)
  8508  		t.Log(d1, d2, d3)
  8509  		require.Equal(t, size1, size2)
  8510  		require.Equal(t, size2, size3)
  8511  		require.Less(t, d1, d2)
  8512  		require.Less(t, schema50rowSize, size1)
  8513  		require.NoError(t, txn.Commit(ctx))
  8514  	}
  8515  }
  8516  
  8517  func TestColumnCount(t *testing.T) {
  8518  	defer testutils.AfterTest(t)()
  8519  	ctx := context.Background()
  8520  
  8521  	opts := config.WithLongScanAndCKPOpts(nil)
  8522  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  8523  	defer tae.Close()
  8524  	schema := catalog.MockSchemaAll(2, 1)
  8525  	schema.BlockMaxRows = 50
  8526  	tae.BindSchema(schema)
  8527  	bat := catalog.MockBatch(schema, 1)
  8528  	defer bat.Close()
  8529  
  8530  	tae.CreateRelAndAppend(bat, true)
  8531  
  8532  	{
  8533  		txn, rel := tae.GetRelation()
  8534  		for i := 0; i < 500; i++ {
  8535  			colName := fmt.Sprintf("col %d", i)
  8536  			err := rel.AlterTable(context.TODO(), api.NewAddColumnReq(0, 0, colName, types.NewProtoType(types.T_char), 5))
  8537  			require.NoError(t, err)
  8538  		}
  8539  		require.Nil(t, txn.Commit(context.Background()))
  8540  	}
  8541  
  8542  	txn, err := tae.StartTxn(nil)
  8543  	assert.NoError(t, err)
  8544  	db, err := txn.GetDatabase("db")
  8545  	assert.NoError(t, err)
  8546  	_, err = db.DropRelationByName(schema.Name)
  8547  	assert.NoError(t, err)
  8548  	assert.NoError(t, txn.Commit(context.Background()))
  8549  
  8550  	commitTS := txn.GetCommitTS()
  8551  	tae.Catalog.GCByTS(context.Background(), commitTS.Next())
  8552  }
  8553  
  8554  func TestCollectDeletesInRange1(t *testing.T) {
  8555  	defer testutils.AfterTest(t)()
  8556  	ctx := context.Background()
  8557  
  8558  	opts := config.WithLongScanAndCKPOpts(nil)
  8559  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  8560  	defer tae.Close()
  8561  	schema := catalog.MockSchemaAll(2, 1)
  8562  	schema.BlockMaxRows = 50
  8563  	tae.BindSchema(schema)
  8564  	bat := catalog.MockBatch(schema, 2)
  8565  	defer bat.Close()
  8566  
  8567  	tae.CreateRelAndAppend(bat, true)
  8568  
  8569  	txn, rel := tae.GetRelation()
  8570  	v := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(0)
  8571  	filter := handle.NewEQFilter(v)
  8572  	err := rel.DeleteByFilter(context.Background(), filter)
  8573  	assert.NoError(t, err)
  8574  	err = txn.Commit(context.Background())
  8575  	assert.NoError(t, err)
  8576  
  8577  	txn, rel = tae.GetRelation()
  8578  	v = bat.Vecs[schema.GetSingleSortKeyIdx()].Get(1)
  8579  	filter = handle.NewEQFilter(v)
  8580  	err = rel.DeleteByFilter(context.Background(), filter)
  8581  	assert.NoError(t, err)
  8582  	err = txn.Commit(context.Background())
  8583  	assert.NoError(t, err)
  8584  
  8585  	tae.CheckCollectDeleteInRange()
  8586  }
  8587  
  8588  func TestCollectDeletesInRange2(t *testing.T) {
  8589  	defer testutils.AfterTest(t)()
  8590  	ctx := context.Background()
  8591  
  8592  	opts := config.WithLongScanAndCKPOpts(nil)
  8593  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  8594  	defer tae.Close()
  8595  	schema := catalog.MockSchemaAll(2, 1)
  8596  	schema.BlockMaxRows = 50
  8597  	tae.BindSchema(schema)
  8598  	bat := catalog.MockBatch(schema, 50)
  8599  	defer bat.Close()
  8600  
  8601  	tae.CreateRelAndAppend(bat, true)
  8602  	tae.CompactBlocks(false)
  8603  
  8604  	txn, rel := tae.GetRelation()
  8605  	blk := rel.MakeObjectIt().GetObject()
  8606  	deltaLoc, err := testutil.MockCNDeleteInS3(tae.Runtime.Fs, blk.GetMeta().(*catalog.ObjectEntry).GetObjectData(), 0, schema, txn, []uint32{0, 1, 2, 3})
  8607  	assert.NoError(t, err)
  8608  	assert.NoError(t, txn.Commit(context.Background()))
  8609  
  8610  	txn, rel = tae.GetRelation()
  8611  	blk = rel.MakeObjectIt().GetObject()
  8612  	ok, err := rel.TryDeleteByDeltaloc(blk.Fingerprint(), deltaLoc)
  8613  	assert.True(t, ok)
  8614  	assert.NoError(t, err)
  8615  	assert.NoError(t, txn.Commit(context.Background()))
  8616  
  8617  	t.Log(tae.Catalog.SimplePPString(3))
  8618  	txn, rel = tae.GetRelation()
  8619  	blk = rel.MakeObjectIt().GetObject()
  8620  	deletes, _, err := blk.GetMeta().(*catalog.ObjectEntry).GetObjectData().CollectDeleteInRange(
  8621  		context.Background(), types.TS{}, txn.GetStartTS(), true, common.DefaultAllocator,
  8622  	)
  8623  	assert.NoError(t, err)
  8624  	assert.Equal(t, 4, deletes.Length())
  8625  	assert.NoError(t, txn.Commit(context.Background()))
  8626  
  8627  	txn, rel = tae.GetRelation()
  8628  	v1 := bat.Vecs[schema.GetSingleSortKeyIdx()].Get(4)
  8629  	filter := handle.NewEQFilter(v1)
  8630  	err = rel.DeleteByFilter(context.Background(), filter)
  8631  	assert.NoError(t, err)
  8632  	assert.NoError(t, txn.Commit(context.Background()))
  8633  
  8634  	txn, rel = tae.GetRelation()
  8635  	blk = rel.MakeObjectIt().GetObject()
  8636  	deletes, _, err = blk.GetMeta().(*catalog.ObjectEntry).GetObjectData().CollectDeleteInRange(
  8637  		context.Background(), types.TS{}, txn.GetStartTS(), true, common.DefaultAllocator,
  8638  	)
  8639  	assert.NoError(t, err)
  8640  	assert.Equal(t, 5, deletes.Length())
  8641  	assert.NoError(t, txn.Commit(context.Background()))
  8642  }
  8643  
  8644  func TestGlobalCheckpoint7(t *testing.T) {
  8645  	defer testutils.AfterTest(t)()
  8646  	ctx := context.Background()
  8647  
  8648  	opts := config.WithQuickScanAndCKPOpts(nil)
  8649  	options.WithCheckpointGlobalMinCount(3)(opts)
  8650  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  8651  	defer tae.Close()
  8652  
  8653  	txn, err := tae.StartTxn(nil)
  8654  	assert.NoError(t, err)
  8655  	_, err = txn.CreateDatabase("db1", "sql", "typ")
  8656  	assert.NoError(t, err)
  8657  	assert.NoError(t, txn.Commit(context.Background()))
  8658  
  8659  	testutils.WaitExpect(10000, func() bool {
  8660  		return tae.Wal.GetPenddingCnt() == 0
  8661  	})
  8662  
  8663  	entries := tae.BGCheckpointRunner.GetAllCheckpoints()
  8664  	for _, e := range entries {
  8665  		t.Logf("%s", e.String())
  8666  	}
  8667  	assert.Equal(t, 1, len(entries))
  8668  
  8669  	tae.Restart(context.Background())
  8670  
  8671  	txn, err = tae.StartTxn(nil)
  8672  	assert.NoError(t, err)
  8673  	_, err = txn.CreateDatabase("db2", "sql", "typ")
  8674  	assert.NoError(t, err)
  8675  	assert.NoError(t, txn.Commit(context.Background()))
  8676  
  8677  	testutils.WaitExpect(10000, func() bool {
  8678  		return tae.Wal.GetPenddingCnt() == 0
  8679  	})
  8680  
  8681  	entries = tae.BGCheckpointRunner.GetAllCheckpoints()
  8682  	for _, e := range entries {
  8683  		t.Logf("%s", e.String())
  8684  	}
  8685  	assert.Equal(t, 2, len(entries))
  8686  
  8687  	tae.Restart(context.Background())
  8688  
  8689  	txn, err = tae.StartTxn(nil)
  8690  	assert.NoError(t, err)
  8691  	_, err = txn.CreateDatabase("db3", "sql", "typ")
  8692  	assert.NoError(t, err)
  8693  	assert.NoError(t, txn.Commit(context.Background()))
  8694  
  8695  	testutils.WaitExpect(10000, func() bool {
  8696  		return tae.Wal.GetPenddingCnt() == 0
  8697  	})
  8698  
  8699  	testutils.WaitExpect(10000, func() bool {
  8700  		return len(tae.BGCheckpointRunner.GetAllGlobalCheckpoints()) == 1
  8701  	})
  8702  
  8703  	entries = tae.BGCheckpointRunner.GetAllCheckpoints()
  8704  	for _, e := range entries {
  8705  		t.Logf("%s", e.String())
  8706  	}
  8707  	assert.Equal(t, 1, len(entries))
  8708  
  8709  }
  8710  
  8711  func TestSplitCommand(t *testing.T) {
  8712  	defer testutils.AfterTest(t)()
  8713  	ctx := context.Background()
  8714  
  8715  	opts := config.WithLongScanAndCKPOpts(nil)
  8716  	opts.MaxMessageSize = txnbase.CmdBufReserved + 2*1024
  8717  	tae := testutil.NewTestEngine(ctx, ModuleName, t, opts)
  8718  	defer tae.Close()
  8719  	schema := catalog.MockSchemaAll(2, 1)
  8720  	schema.BlockMaxRows = 50
  8721  	tae.BindSchema(schema)
  8722  	bat := catalog.MockBatch(schema, 50)
  8723  	defer bat.Close()
  8724  
  8725  	tae.CreateRelAndAppend(bat, true)
  8726  
  8727  	tae.CheckRowsByScan(50, false)
  8728  	t.Log(tae.Catalog.SimplePPString(3))
  8729  	tae.Restart(context.Background())
  8730  	t.Log(tae.Catalog.SimplePPString(3))
  8731  	tae.CheckRowsByScan(50, false)
  8732  }