github.com/matrixorigin/matrixone@v1.2.0/pkg/vm/engine/tae/logtail/backup.go (about)

     1  // Copyright 2021 Matrix Origin
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package logtail
    16  
    17  import (
    18  	"context"
    19  	"fmt"
    20  	"math"
    21  	"sort"
    22  
    23  	"github.com/matrixorigin/matrixone/pkg/catalog"
    24  	"github.com/matrixorigin/matrixone/pkg/common/moerr"
    25  	"github.com/matrixorigin/matrixone/pkg/container/batch"
    26  	"github.com/matrixorigin/matrixone/pkg/container/types"
    27  	"github.com/matrixorigin/matrixone/pkg/fileservice"
    28  	"github.com/matrixorigin/matrixone/pkg/logutil"
    29  	"github.com/matrixorigin/matrixone/pkg/objectio"
    30  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/blockio"
    31  	catalog2 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/catalog"
    32  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/common"
    33  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/containers"
    34  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/db/dbutils"
    35  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/mergesort"
    36  	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/txn/txnbase"
    37  )
    38  
    39  type fileData struct {
    40  	data          map[uint16]*blockData
    41  	name          objectio.ObjectName
    42  	obj           *objData
    43  	isDeleteBatch bool
    44  	isChange      bool
    45  	isABlock      bool
    46  }
    47  
    48  type objData struct {
    49  	stats     *objectio.ObjectStats
    50  	data      []*batch.Batch
    51  	sortKey   uint16
    52  	infoRow   []int
    53  	infoDel   []int
    54  	infoTNRow []int
    55  	tid       uint64
    56  	delete    bool
    57  	isABlock  bool
    58  }
    59  
    60  type blockData struct {
    61  	num       uint16
    62  	deleteRow []int
    63  	insertRow []int
    64  	blockType objectio.DataMetaType
    65  	location  objectio.Location
    66  	data      *batch.Batch
    67  	sortKey   uint16
    68  	isABlock  bool
    69  	blockId   types.Blockid
    70  	tid       uint64
    71  	tombstone *blockData
    72  }
    73  
    74  type iBlocks struct {
    75  	insertBlocks []*insertBlock
    76  }
    77  
    78  type iObjects struct {
    79  	rowObjects []*insertObjects
    80  }
    81  
    82  type insertBlock struct {
    83  	blockId   objectio.Blockid
    84  	location  objectio.Location
    85  	deleteRow int
    86  	apply     bool
    87  	data      *blockData
    88  }
    89  
    90  type insertObjects struct {
    91  	location objectio.Location
    92  	apply    bool
    93  	obj      *objData
    94  }
    95  
    96  type tableOffset struct {
    97  	offset int
    98  	end    int
    99  }
   100  
   101  func getCheckpointData(
   102  	ctx context.Context,
   103  	fs fileservice.FileService,
   104  	location objectio.Location,
   105  	version uint32,
   106  ) (*CheckpointData, error) {
   107  	data := NewCheckpointData(common.CheckpointAllocator)
   108  	reader, err := blockio.NewObjectReader(fs, location)
   109  	if err != nil {
   110  		return nil, err
   111  	}
   112  	err = data.readMetaBatch(ctx, version, reader, nil)
   113  	if err != nil {
   114  		return nil, err
   115  	}
   116  	err = data.readAll(ctx, version, fs)
   117  	if err != nil {
   118  		return nil, err
   119  	}
   120  	return data, nil
   121  }
   122  
   123  func addObjectToObjectData(
   124  	stats *objectio.ObjectStats,
   125  	isABlk, isDelete bool, isTN bool,
   126  	row int, tid uint64,
   127  	objectsData *map[string]*fileData,
   128  ) {
   129  	name := stats.ObjectName().String()
   130  	if (*objectsData)[name] == nil {
   131  		object := &fileData{
   132  			name:          stats.ObjectName(),
   133  			obj:           &objData{},
   134  			isDeleteBatch: isDelete,
   135  			isChange:      false,
   136  			isABlock:      isABlk,
   137  		}
   138  		object.obj.stats = stats
   139  		object.obj.tid = tid
   140  		object.obj.delete = isDelete
   141  		object.obj.isABlock = isABlk
   142  		if isABlk {
   143  			object.data = make(map[uint16]*blockData)
   144  			object.data[0] = &blockData{
   145  				num:       0,
   146  				location:  stats.ObjectLocation(),
   147  				blockType: objectio.SchemaData,
   148  				isABlock:  true,
   149  				tid:       tid,
   150  				sortKey:   uint16(math.MaxUint16),
   151  			}
   152  		}
   153  		(*objectsData)[name] = object
   154  		if !isTN {
   155  			if isDelete {
   156  				(*objectsData)[name].obj.infoDel = []int{row}
   157  			} else {
   158  				(*objectsData)[name].obj.infoRow = []int{row}
   159  			}
   160  		} else {
   161  			(*objectsData)[name].obj.infoTNRow = []int{row}
   162  		}
   163  		return
   164  	}
   165  
   166  	if !isTN {
   167  		if isDelete {
   168  			(*objectsData)[name].obj.infoDel = append((*objectsData)[name].obj.infoDel, row)
   169  		} else {
   170  			(*objectsData)[name].obj.infoRow = append((*objectsData)[name].obj.infoRow, row)
   171  		}
   172  	} else {
   173  		(*objectsData)[name].obj.infoTNRow = append((*objectsData)[name].obj.infoTNRow, row)
   174  	}
   175  
   176  }
   177  
   178  func addBlockToObjectData(
   179  	location objectio.Location,
   180  	isABlk, isCnBatch bool,
   181  	row int, tid uint64,
   182  	blockID types.Blockid,
   183  	blockType objectio.DataMetaType,
   184  	objectsData *map[string]*fileData,
   185  ) {
   186  	name := location.Name().String()
   187  	if (*objectsData)[name] == nil {
   188  		object := &fileData{
   189  			name:          location.Name(),
   190  			data:          make(map[uint16]*blockData),
   191  			isChange:      false,
   192  			isDeleteBatch: isCnBatch,
   193  			isABlock:      isABlk,
   194  		}
   195  		(*objectsData)[name] = object
   196  	}
   197  	if (*objectsData)[name].data == nil {
   198  		(*objectsData)[name].data = make(map[uint16]*blockData)
   199  	}
   200  	if (*objectsData)[name].data[location.ID()] == nil {
   201  		(*objectsData)[name].data[location.ID()] = &blockData{
   202  			num:       location.ID(),
   203  			location:  location,
   204  			blockType: blockType,
   205  			isABlock:  isABlk,
   206  			tid:       tid,
   207  			blockId:   blockID,
   208  			sortKey:   uint16(math.MaxUint16),
   209  		}
   210  		if isCnBatch {
   211  			(*objectsData)[name].data[location.ID()].deleteRow = []int{row}
   212  		} else {
   213  			(*objectsData)[name].data[location.ID()].insertRow = []int{row}
   214  		}
   215  	} else {
   216  		if isCnBatch {
   217  			(*objectsData)[name].data[location.ID()].deleteRow = append((*objectsData)[name].data[location.ID()].deleteRow, row)
   218  		} else {
   219  			(*objectsData)[name].data[location.ID()].insertRow = append((*objectsData)[name].data[location.ID()].insertRow, row)
   220  		}
   221  	}
   222  }
   223  
   224  func trimObjectsData(
   225  	ctx context.Context,
   226  	fs fileservice.FileService,
   227  	ts types.TS,
   228  	objectsData *map[string]*fileData,
   229  ) (bool, error) {
   230  	isCkpChange := false
   231  	for name := range *objectsData {
   232  		isChange := false
   233  		if (*objectsData)[name].obj != nil && (*objectsData)[name].obj.isABlock {
   234  			if !(*objectsData)[name].obj.delete {
   235  				panic(fmt.Sprintf("object %s is not a delete batch", name))
   236  			}
   237  			if len((*objectsData)[name].data) == 0 {
   238  				var bat *batch.Batch
   239  				var err error
   240  				commitTs := types.TS{}
   241  				// As long as there is an aBlk to be deleted, isCkpChange must be set to true.
   242  				isCkpChange = true
   243  				obj := (*objectsData)[name].obj
   244  				location := obj.stats.ObjectLocation()
   245  				meta, err := objectio.FastLoadObjectMeta(ctx, &location, false, fs)
   246  				if err != nil {
   247  					return isCkpChange, err
   248  				}
   249  				sortKey := uint16(math.MaxUint16)
   250  				if meta.MustDataMeta().BlockHeader().Appendable() {
   251  					sortKey = meta.MustDataMeta().BlockHeader().SortKey()
   252  				}
   253  				bat, err = blockio.LoadOneBlock(ctx, fs, location, objectio.SchemaData)
   254  				if err != nil {
   255  					return isCkpChange, err
   256  				}
   257  				for v := 0; v < bat.Vecs[0].Length(); v++ {
   258  					err = commitTs.Unmarshal(bat.Vecs[len(bat.Vecs)-2].GetRawBytesAt(v))
   259  					if err != nil {
   260  						return isCkpChange, err
   261  					}
   262  					if commitTs.Greater(&ts) {
   263  						windowCNBatch(bat, 0, uint64(v))
   264  						logutil.Debugf("blkCommitTs %v ts %v , block is %v",
   265  							commitTs.ToString(), ts.ToString(), location.String())
   266  						isChange = true
   267  						break
   268  					}
   269  				}
   270  				(*objectsData)[name].obj.sortKey = sortKey
   271  				(*objectsData)[name].obj.data = make([]*batch.Batch, 0)
   272  				bat = formatData(bat)
   273  				(*objectsData)[name].obj.data = append((*objectsData)[name].obj.data, bat)
   274  				(*objectsData)[name].isChange = isChange
   275  				continue
   276  			}
   277  		}
   278  
   279  		for id, block := range (*objectsData)[name].data {
   280  			if !block.isABlock && block.blockType == objectio.SchemaData {
   281  				continue
   282  			}
   283  			var bat *batch.Batch
   284  			var err error
   285  			commitTs := types.TS{}
   286  			if block.blockType == objectio.SchemaTombstone {
   287  				bat, err = blockio.LoadOneBlock(ctx, fs, block.location, objectio.SchemaTombstone)
   288  				if err != nil {
   289  					return isCkpChange, err
   290  				}
   291  				deleteRow := make([]int64, 0)
   292  				for v := 0; v < bat.Vecs[0].Length(); v++ {
   293  					err = commitTs.Unmarshal(bat.Vecs[len(bat.Vecs)-3].GetRawBytesAt(v))
   294  					if err != nil {
   295  						return isCkpChange, err
   296  					}
   297  					if commitTs.Greater(&ts) {
   298  						logutil.Debugf("delete row %v, commitTs %v, location %v",
   299  							v, commitTs.ToString(), block.location.String())
   300  						isChange = true
   301  						isCkpChange = true
   302  					} else {
   303  						deleteRow = append(deleteRow, int64(v))
   304  					}
   305  				}
   306  				if len(deleteRow) != bat.Vecs[0].Length() {
   307  					bat.Shrink(deleteRow, false)
   308  				}
   309  			} else {
   310  				// As long as there is an aBlk to be deleted, isCkpChange must be set to true.
   311  				isCkpChange = true
   312  				meta, err := objectio.FastLoadObjectMeta(ctx, &block.location, false, fs)
   313  				if err != nil {
   314  					return isCkpChange, err
   315  				}
   316  				sortKey := uint16(math.MaxUint16)
   317  				if meta.MustDataMeta().BlockHeader().Appendable() {
   318  					sortKey = meta.MustDataMeta().BlockHeader().SortKey()
   319  				}
   320  				bat, err = blockio.LoadOneBlock(ctx, fs, block.location, objectio.SchemaData)
   321  				if err != nil {
   322  					return isCkpChange, err
   323  				}
   324  				for v := 0; v < bat.Vecs[0].Length(); v++ {
   325  					err = commitTs.Unmarshal(bat.Vecs[len(bat.Vecs)-2].GetRawBytesAt(v))
   326  					if err != nil {
   327  						return isCkpChange, err
   328  					}
   329  					if commitTs.Greater(&ts) {
   330  						windowCNBatch(bat, 0, uint64(v))
   331  						logutil.Debugf("blkCommitTs %v ts %v , block is %v",
   332  							commitTs.ToString(), ts.ToString(), block.location.String())
   333  						isChange = true
   334  						break
   335  					}
   336  				}
   337  				(*objectsData)[name].data[id].sortKey = sortKey
   338  			}
   339  			bat = formatData(bat)
   340  			(*objectsData)[name].data[id].data = bat
   341  		}
   342  		(*objectsData)[name].isChange = isChange
   343  	}
   344  	return isCkpChange, nil
   345  }
   346  
   347  func applyDelete(dataBatch *batch.Batch, deleteBatch *batch.Batch, id string) error {
   348  	if deleteBatch == nil {
   349  		return nil
   350  	}
   351  	deleteRow := make([]int64, 0)
   352  	rows := make(map[int64]bool)
   353  	for i := 0; i < deleteBatch.Vecs[0].Length(); i++ {
   354  		row := deleteBatch.Vecs[0].GetRawBytesAt(i)
   355  		rowId := objectio.HackBytes2Rowid(row)
   356  		blockId, ro := rowId.Decode()
   357  		if blockId.String() != id {
   358  			continue
   359  		}
   360  		rows[int64(ro)] = true
   361  	}
   362  	for i := 0; i < dataBatch.Vecs[0].Length(); i++ {
   363  		if rows[int64(i)] {
   364  			deleteRow = append(deleteRow, int64(i))
   365  		}
   366  	}
   367  	dataBatch.Shrink(deleteRow, true)
   368  	return nil
   369  }
   370  
   371  func updateBlockMeta(blkMeta, blkMetaTxn *containers.Batch, row int, blockID types.Blockid, location objectio.Location, sort bool) {
   372  	blkMeta.GetVectorByName(catalog2.AttrRowID).Update(
   373  		row,
   374  		objectio.HackBlockid2Rowid(&blockID),
   375  		false)
   376  	blkMeta.GetVectorByName(catalog.BlockMeta_ID).Update(
   377  		row,
   378  		blockID,
   379  		false)
   380  	blkMeta.GetVectorByName(catalog.BlockMeta_EntryState).Update(
   381  		row,
   382  		false,
   383  		false)
   384  	blkMeta.GetVectorByName(catalog.BlockMeta_Sorted).Update(
   385  		row,
   386  		sort,
   387  		false)
   388  	blkMeta.GetVectorByName(catalog.BlockMeta_SegmentID).Update(
   389  		row,
   390  		*blockID.Segment(),
   391  		false)
   392  	blkMeta.GetVectorByName(catalog.BlockMeta_MetaLoc).Update(
   393  		row,
   394  		[]byte(location),
   395  		false)
   396  	blkMeta.GetVectorByName(catalog.BlockMeta_DeltaLoc).Update(
   397  		row,
   398  		nil,
   399  		true)
   400  	blkMetaTxn.GetVectorByName(catalog.BlockMeta_MetaLoc).Update(
   401  		row,
   402  		[]byte(location),
   403  		false)
   404  	blkMetaTxn.GetVectorByName(catalog.BlockMeta_DeltaLoc).Update(
   405  		row,
   406  		nil,
   407  		true)
   408  
   409  	if !sort {
   410  		logutil.Infof("block %v is not sorted", blockID.String())
   411  	}
   412  }
   413  
   414  func appendValToBatch(src, dst *containers.Batch, row int) {
   415  	for v, vec := range src.Vecs {
   416  		val := vec.Get(row)
   417  		if val == nil {
   418  			dst.Vecs[v].Append(val, true)
   419  		} else {
   420  			dst.Vecs[v].Append(val, false)
   421  		}
   422  	}
   423  }
   424  
   425  // Need to format the loaded batch, otherwise panic may occur when WriteBatch.
   426  func formatData(data *batch.Batch) *batch.Batch {
   427  	if data.Vecs[0].Length() > 0 {
   428  		data.Attrs = make([]string, 0)
   429  		for i := range data.Vecs {
   430  			att := fmt.Sprintf("col_%d", i)
   431  			data.Attrs = append(data.Attrs, att)
   432  		}
   433  		tmp := containers.ToTNBatch(data, common.CheckpointAllocator)
   434  		data = containers.ToCNBatch(tmp)
   435  	}
   436  	return data
   437  }
   438  
   439  func LoadCheckpointEntriesFromKey(
   440  	ctx context.Context,
   441  	fs fileservice.FileService,
   442  	location objectio.Location,
   443  	version uint32,
   444  	softDeletes *map[string]bool,
   445  	baseTS *types.TS,
   446  ) ([]*objectio.BackupObject, *CheckpointData, error) {
   447  	locations := make([]*objectio.BackupObject, 0)
   448  	data, err := getCheckpointData(ctx, fs, location, version)
   449  	if err != nil {
   450  		return nil, nil, err
   451  	}
   452  
   453  	locations = append(locations, &objectio.BackupObject{
   454  		Location: location,
   455  		NeedCopy: true,
   456  	})
   457  
   458  	for _, location = range data.locations {
   459  		locations = append(locations, &objectio.BackupObject{
   460  			Location: location,
   461  			NeedCopy: true,
   462  		})
   463  	}
   464  	for i := 0; i < data.bats[ObjectInfoIDX].Length(); i++ {
   465  		var objectStats objectio.ObjectStats
   466  		buf := data.bats[ObjectInfoIDX].GetVectorByName(ObjectAttr_ObjectStats).Get(i).([]byte)
   467  		objectStats.UnMarshal(buf)
   468  		deletedAt := data.bats[ObjectInfoIDX].GetVectorByName(EntryNode_DeleteAt).Get(i).(types.TS)
   469  		createAt := data.bats[ObjectInfoIDX].GetVectorByName(EntryNode_CreateAt).Get(i).(types.TS)
   470  		commitAt := data.bats[ObjectInfoIDX].GetVectorByName(txnbase.SnapshotAttr_CommitTS).Get(i).(types.TS)
   471  		isAblk := data.bats[ObjectInfoIDX].GetVectorByName(ObjectAttr_State).Get(i).(bool)
   472  		if objectStats.Extent().End() == 0 {
   473  			panic(fmt.Sprintf("object %v Extent not empty", objectStats.ObjectName().String()))
   474  		}
   475  
   476  		if deletedAt.IsEmpty() && isAblk {
   477  			panic(fmt.Sprintf("object %v is not deleted", objectStats.ObjectName().String()))
   478  		}
   479  		bo := &objectio.BackupObject{
   480  			Location: objectStats.ObjectLocation(),
   481  			CrateTS:  createAt,
   482  			DropTS:   deletedAt,
   483  		}
   484  		if baseTS.IsEmpty() || (!baseTS.IsEmpty() &&
   485  			(createAt.GreaterEq(baseTS) || commitAt.GreaterEq(baseTS))) {
   486  			bo.NeedCopy = true
   487  		}
   488  		locations = append(locations, bo)
   489  		if !deletedAt.IsEmpty() {
   490  			if softDeletes != nil {
   491  				if !(*softDeletes)[objectStats.ObjectName().String()] {
   492  					(*softDeletes)[objectStats.ObjectName().String()] = true
   493  				}
   494  			}
   495  		}
   496  	}
   497  
   498  	for i := 0; i < data.bats[TNObjectInfoIDX].Length(); i++ {
   499  		var objectStats objectio.ObjectStats
   500  		buf := data.bats[TNObjectInfoIDX].GetVectorByName(ObjectAttr_ObjectStats).Get(i).([]byte)
   501  		objectStats.UnMarshal(buf)
   502  		deletedAt := data.bats[TNObjectInfoIDX].GetVectorByName(EntryNode_DeleteAt).Get(i).(types.TS)
   503  		if objectStats.Extent().End() > 0 {
   504  			panic(any(fmt.Sprintf("extent end is not 0: %v, name is %v", objectStats.Extent().End(), objectStats.ObjectName().String())))
   505  		}
   506  		if !deletedAt.IsEmpty() {
   507  			panic(any(fmt.Sprintf("deleteAt is not empty: %v, name is %v", deletedAt.ToString(), objectStats.ObjectName().String())))
   508  		}
   509  		//locations = append(locations, objectStats.ObjectName())
   510  	}
   511  
   512  	for i := 0; i < data.bats[BLKMetaInsertIDX].Length(); i++ {
   513  		deltaLoc := objectio.Location(
   514  			data.bats[BLKMetaInsertIDX].GetVectorByName(catalog.BlockMeta_DeltaLoc).Get(i).([]byte))
   515  		commitTS := data.bats[BLKMetaInsertIDX].GetVectorByName(catalog.BlockMeta_CommitTs).Get(i).(types.TS)
   516  		if deltaLoc.IsEmpty() {
   517  			metaLoc := objectio.Location(
   518  				data.bats[BLKMetaInsertIDX].GetVectorByName(catalog.BlockMeta_MetaLoc).Get(i).([]byte))
   519  			panic(fmt.Sprintf("block %v deltaLoc is empty", metaLoc.String()))
   520  		}
   521  		bo := &objectio.BackupObject{
   522  			Location: deltaLoc,
   523  			CrateTS:  commitTS,
   524  		}
   525  		if baseTS.IsEmpty() ||
   526  			(!baseTS.IsEmpty() && commitTS.GreaterEq(baseTS)) {
   527  			bo.NeedCopy = true
   528  		}
   529  		locations = append(locations, bo)
   530  	}
   531  	for i := 0; i < data.bats[BLKCNMetaInsertIDX].Length(); i++ {
   532  		metaLoc := objectio.Location(
   533  			data.bats[BLKCNMetaInsertIDX].GetVectorByName(catalog.BlockMeta_MetaLoc).Get(i).([]byte))
   534  		commitTS := data.bats[BLKCNMetaInsertIDX].GetVectorByName(catalog.BlockMeta_CommitTs).Get(i).(types.TS)
   535  		if !metaLoc.IsEmpty() {
   536  			if softDeletes != nil {
   537  				if !(*softDeletes)[metaLoc.Name().String()] {
   538  					(*softDeletes)[metaLoc.Name().String()] = true
   539  					//Fixme:The objectlist has updated this object to the cropped object,
   540  					// and the expired object in the soft-deleted blocklist has not been processed.
   541  					logutil.Warnf("block %v metaLoc is not deleted", metaLoc.String())
   542  					//panic(fmt.Sprintf("block111 %v metaLoc is not deleted", metaLoc.String()))
   543  				}
   544  			}
   545  		}
   546  		deltaLoc := objectio.Location(
   547  			data.bats[BLKCNMetaInsertIDX].GetVectorByName(catalog.BlockMeta_DeltaLoc).Get(i).([]byte))
   548  		if deltaLoc.IsEmpty() {
   549  			panic(fmt.Sprintf("block %v deltaLoc is empty", deltaLoc.String()))
   550  		}
   551  		bo := &objectio.BackupObject{
   552  			Location: deltaLoc,
   553  			CrateTS:  commitTS,
   554  		}
   555  		if baseTS.IsEmpty() ||
   556  			(!baseTS.IsEmpty() && commitTS.GreaterEq(baseTS)) {
   557  			bo.NeedCopy = true
   558  		}
   559  		locations = append(locations, bo)
   560  	}
   561  	return locations, data, nil
   562  }
   563  
   564  func ReWriteCheckpointAndBlockFromKey(
   565  	ctx context.Context,
   566  	fs, dstFs fileservice.FileService,
   567  	loc, tnLocation objectio.Location,
   568  	version uint32, ts types.TS,
   569  	softDeletes map[string]bool,
   570  ) (objectio.Location, objectio.Location, []string, error) {
   571  	logutil.Info("[Start]", common.OperationField("ReWrite Checkpoint"),
   572  		common.OperandField(loc.String()),
   573  		common.OperandField(ts.ToString()))
   574  	phaseNumber := 0
   575  	var err error
   576  	defer func() {
   577  		if err != nil {
   578  			logutil.Error("[DoneWithErr]", common.OperationField("ReWrite Checkpoint"),
   579  				common.AnyField("error", err),
   580  				common.AnyField("phase", phaseNumber),
   581  			)
   582  		}
   583  	}()
   584  	objectsData := make(map[string]*fileData, 0)
   585  
   586  	defer func() {
   587  		for i := range objectsData {
   588  			if objectsData[i].obj != nil && objectsData[i].obj.data != nil {
   589  				for z := range objectsData[i].obj.data {
   590  					for y := range objectsData[i].obj.data[z].Vecs {
   591  						objectsData[i].obj.data[z].Vecs[y].Free(common.DebugAllocator)
   592  					}
   593  				}
   594  			}
   595  			for j := range objectsData[i].data {
   596  				if objectsData[i].data[j].data == nil {
   597  					continue
   598  				}
   599  				for z := range objectsData[i].data[j].data.Vecs {
   600  					objectsData[i].data[j].data.Vecs[z].Free(common.CheckpointAllocator)
   601  				}
   602  			}
   603  		}
   604  	}()
   605  	phaseNumber = 1
   606  	// Load checkpoint
   607  	data, err := getCheckpointData(ctx, fs, loc, version)
   608  	if err != nil {
   609  		return nil, nil, nil, err
   610  	}
   611  	data.FormatData(common.CheckpointAllocator)
   612  	defer data.Close()
   613  
   614  	phaseNumber = 2
   615  	// Analyze checkpoint to get the object file
   616  	var files []string
   617  	isCkpChange := false
   618  	blkCNMetaInsert := data.bats[BLKCNMetaInsertIDX]
   619  	blkMetaInsTxnBat := data.bats[BLKMetaInsertTxnIDX]
   620  	blkMetaInsTxnBatTid := blkMetaInsTxnBat.GetVectorByName(SnapshotAttr_TID)
   621  
   622  	blkMetaInsert := data.bats[BLKMetaInsertIDX]
   623  	blkMetaInsertMetaLoc := data.bats[BLKMetaInsertIDX].GetVectorByName(catalog.BlockMeta_MetaLoc)
   624  	blkMetaInsertDeltaLoc := data.bats[BLKMetaInsertIDX].GetVectorByName(catalog.BlockMeta_DeltaLoc)
   625  	blkMetaInsertEntryState := data.bats[BLKMetaInsertIDX].GetVectorByName(catalog.BlockMeta_EntryState)
   626  	blkMetaInsertBlkID := data.bats[BLKMetaInsertIDX].GetVectorByName(catalog.BlockMeta_ID)
   627  
   628  	objInfoData := data.bats[ObjectInfoIDX]
   629  	objInfoStats := objInfoData.GetVectorByName(ObjectAttr_ObjectStats)
   630  	objInfoState := objInfoData.GetVectorByName(ObjectAttr_State)
   631  	objInfoTid := objInfoData.GetVectorByName(SnapshotAttr_TID)
   632  	objInfoDelete := objInfoData.GetVectorByName(EntryNode_DeleteAt)
   633  	objInfoCommit := objInfoData.GetVectorByName(txnbase.SnapshotAttr_CommitTS)
   634  
   635  	for i := 0; i < objInfoData.Length(); i++ {
   636  		stats := objectio.NewObjectStats()
   637  		stats.UnMarshal(objInfoStats.Get(i).([]byte))
   638  		isABlk := objInfoState.Get(i).(bool)
   639  		deleteAt := objInfoDelete.Get(i).(types.TS)
   640  		commitTS := objInfoCommit.Get(i).(types.TS)
   641  		tid := objInfoTid.Get(i).(uint64)
   642  		if commitTS.Less(&ts) {
   643  			panic(any(fmt.Sprintf("commitTs less than ts: %v-%v", commitTS.ToString(), ts.ToString())))
   644  		}
   645  
   646  		if isABlk && deleteAt.IsEmpty() {
   647  			panic(any(fmt.Sprintf("block %v deleteAt is empty", stats.ObjectName().String())))
   648  		}
   649  		addObjectToObjectData(stats, isABlk, !deleteAt.IsEmpty(), false, i, tid, &objectsData)
   650  	}
   651  
   652  	tnObjInfoData := data.bats[TNObjectInfoIDX]
   653  	tnObjInfoStats := tnObjInfoData.GetVectorByName(ObjectAttr_ObjectStats)
   654  	tnObjInfoState := tnObjInfoData.GetVectorByName(ObjectAttr_State)
   655  	tnObjInfoTid := tnObjInfoData.GetVectorByName(SnapshotAttr_TID)
   656  	tnObjInfoDelete := tnObjInfoData.GetVectorByName(EntryNode_DeleteAt)
   657  	tnObjInfoCommit := tnObjInfoData.GetVectorByName(txnbase.SnapshotAttr_CommitTS)
   658  	for i := 0; i < tnObjInfoData.Length(); i++ {
   659  		stats := objectio.NewObjectStats()
   660  		stats.UnMarshal(tnObjInfoStats.Get(i).([]byte))
   661  		isABlk := tnObjInfoState.Get(i).(bool)
   662  		deleteAt := tnObjInfoDelete.Get(i).(types.TS)
   663  		tid := tnObjInfoTid.Get(i).(uint64)
   664  		commitTS := tnObjInfoCommit.Get(i).(types.TS)
   665  
   666  		if commitTS.Less(&ts) {
   667  			panic(any(fmt.Sprintf("commitTs less than ts: %v-%v", commitTS.ToString(), ts.ToString())))
   668  		}
   669  
   670  		if stats.Extent().End() > 0 {
   671  			panic(any(fmt.Sprintf("extent end is not 0: %v, name is %v", stats.Extent().End(), stats.ObjectName().String())))
   672  		}
   673  		if !deleteAt.IsEmpty() {
   674  			panic(any(fmt.Sprintf("deleteAt is not empty: %v, name is %v", deleteAt.ToString(), stats.ObjectName().String())))
   675  		}
   676  		addObjectToObjectData(stats, isABlk, !deleteAt.IsEmpty(), true, i, tid, &objectsData)
   677  	}
   678  
   679  	if blkCNMetaInsert.Length() > 0 {
   680  		panic(any("blkCNMetaInsert is not empty"))
   681  	}
   682  
   683  	for i := 0; i < blkMetaInsert.Length(); i++ {
   684  		metaLoc := objectio.Location(blkMetaInsertMetaLoc.Get(i).([]byte))
   685  		deltaLoc := objectio.Location(blkMetaInsertDeltaLoc.Get(i).([]byte))
   686  		blkID := blkMetaInsertBlkID.Get(i).(types.Blockid)
   687  		isABlk := blkMetaInsertEntryState.Get(i).(bool)
   688  		if deltaLoc.IsEmpty() || !metaLoc.IsEmpty() {
   689  			panic(any(fmt.Sprintf("deltaLoc is empty: %v-%v", deltaLoc.String(), metaLoc.String())))
   690  		}
   691  		name := objectio.BuildObjectName(blkID.Segment(), blkID.Sequence())
   692  		if isABlk {
   693  			if objectsData[name.String()] == nil {
   694  				continue
   695  			}
   696  			if !objectsData[name.String()].isDeleteBatch {
   697  				panic(any(fmt.Sprintf("object %v is not deleteBatch", name.String())))
   698  			}
   699  			addBlockToObjectData(deltaLoc, isABlk, true, i,
   700  				blkMetaInsTxnBatTid.Get(i).(uint64), blkID, objectio.SchemaTombstone, &objectsData)
   701  			objectsData[name.String()].data[blkID.Sequence()].blockId = blkID
   702  			objectsData[name.String()].data[blkID.Sequence()].tombstone = objectsData[deltaLoc.Name().String()].data[deltaLoc.ID()]
   703  			if len(objectsData[name.String()].data[blkID.Sequence()].deleteRow) > 0 {
   704  				objectsData[name.String()].data[blkID.Sequence()].deleteRow = append(objectsData[name.String()].data[blkID.Sequence()].deleteRow, i)
   705  			} else {
   706  				objectsData[name.String()].data[blkID.Sequence()].deleteRow = []int{i}
   707  			}
   708  		} else {
   709  			if objectsData[name.String()] != nil {
   710  				if objectsData[name.String()].isDeleteBatch {
   711  					addBlockToObjectData(deltaLoc, isABlk, true, i,
   712  						blkMetaInsTxnBatTid.Get(i).(uint64), blkID, objectio.SchemaTombstone, &objectsData)
   713  					continue
   714  				}
   715  			}
   716  			addBlockToObjectData(deltaLoc, isABlk, false, i,
   717  				blkMetaInsTxnBatTid.Get(i).(uint64), blkID, objectio.SchemaTombstone, &objectsData)
   718  		}
   719  	}
   720  
   721  	phaseNumber = 3
   722  	// Trim object files based on timestamp
   723  	isCkpChange, err = trimObjectsData(ctx, fs, ts, &objectsData)
   724  	if err != nil {
   725  		return nil, nil, nil, err
   726  	}
   727  	if !isCkpChange {
   728  		return loc, tnLocation, files, nil
   729  	}
   730  
   731  	backupPool := dbutils.MakeDefaultSmallPool("backup-vector-pool")
   732  	defer backupPool.Destory()
   733  
   734  	insertBatch := make(map[uint64]*iBlocks)
   735  	insertObjBatch := make(map[uint64]*iObjects)
   736  
   737  	phaseNumber = 4
   738  	// Rewrite object file
   739  	for fileName, objectData := range objectsData {
   740  		if !objectData.isChange && !objectData.isDeleteBatch {
   741  			continue
   742  		}
   743  		dataBlocks := make([]*blockData, 0)
   744  		var blocks []objectio.BlockObject
   745  		var extent objectio.Extent
   746  		for _, block := range objectData.data {
   747  			dataBlocks = append(dataBlocks, block)
   748  		}
   749  		sort.Slice(dataBlocks, func(i, j int) bool {
   750  			return dataBlocks[i].num < dataBlocks[j].num
   751  		})
   752  
   753  		if objectData.isChange &&
   754  			(!objectData.isDeleteBatch || (objectData.data[0] != nil &&
   755  				objectData.data[0].blockType == objectio.SchemaTombstone)) {
   756  			// Rewrite the insert block/delete block file.
   757  			objectData.isDeleteBatch = false
   758  			writer, err := blockio.NewBlockWriter(dstFs, fileName)
   759  			if err != nil {
   760  				return nil, nil, nil, err
   761  			}
   762  			for _, block := range dataBlocks {
   763  				if block.sortKey != math.MaxUint16 {
   764  					writer.SetPrimaryKey(block.sortKey)
   765  				}
   766  				if block.blockType == objectio.SchemaData {
   767  					// TODO: maybe remove
   768  					_, err = writer.WriteBatch(block.data)
   769  					if err != nil {
   770  						return nil, nil, nil, err
   771  					}
   772  				} else if block.blockType == objectio.SchemaTombstone {
   773  					_, err = writer.WriteTombstoneBatch(block.data)
   774  					if err != nil {
   775  						return nil, nil, nil, err
   776  					}
   777  				}
   778  			}
   779  
   780  			blocks, extent, err = writer.Sync(ctx)
   781  			if err != nil {
   782  				if !moerr.IsMoErrCode(err, moerr.ErrFileAlreadyExists) {
   783  					return nil, nil, nil, err
   784  				}
   785  				err = fs.Delete(ctx, fileName)
   786  				if err != nil {
   787  					return nil, nil, nil, err
   788  				}
   789  				blocks, extent, err = writer.Sync(ctx)
   790  				if err != nil {
   791  					return nil, nil, nil, err
   792  				}
   793  			}
   794  		}
   795  
   796  		if objectData.isDeleteBatch &&
   797  			objectData.data[0] != nil &&
   798  			objectData.data[0].blockType != objectio.SchemaTombstone {
   799  			var blockLocation objectio.Location
   800  			if !objectData.isABlock {
   801  				// Case of merge nBlock
   802  				for _, dt := range dataBlocks {
   803  					if insertBatch[dataBlocks[0].tid] == nil {
   804  						insertBatch[dataBlocks[0].tid] = &iBlocks{
   805  							insertBlocks: make([]*insertBlock, 0),
   806  						}
   807  					}
   808  					ib := &insertBlock{
   809  						apply:     false,
   810  						deleteRow: dt.deleteRow[len(dt.deleteRow)-1],
   811  						data:      dt,
   812  					}
   813  					insertBatch[dataBlocks[0].tid].insertBlocks = append(insertBatch[dataBlocks[0].tid].insertBlocks, ib)
   814  				}
   815  			} else {
   816  				// For the aBlock that needs to be retained,
   817  				// the corresponding NBlock is generated and inserted into the corresponding batch.
   818  				if len(dataBlocks) > 2 {
   819  					panic(any(fmt.Sprintf("dataBlocks len > 2: %v - %d", dataBlocks[0].location.String(), len(dataBlocks))))
   820  				}
   821  				if objectData.data[0].tombstone != nil {
   822  					applyDelete(dataBlocks[0].data, objectData.data[0].tombstone.data, dataBlocks[0].blockId.String())
   823  				}
   824  				sortData := containers.ToTNBatch(dataBlocks[0].data, common.CheckpointAllocator)
   825  				if dataBlocks[0].sortKey != math.MaxUint16 {
   826  					_, err = mergesort.SortBlockColumns(sortData.Vecs, int(dataBlocks[0].sortKey), backupPool)
   827  					if err != nil {
   828  						return nil, nil, nil, err
   829  					}
   830  				}
   831  				dataBlocks[0].data = containers.ToCNBatch(sortData)
   832  				result := batch.NewWithSize(len(dataBlocks[0].data.Vecs) - 3)
   833  				for i := range result.Vecs {
   834  					result.Vecs[i] = dataBlocks[0].data.Vecs[i]
   835  				}
   836  				dataBlocks[0].data = result
   837  				fileNum := uint16(1000) + dataBlocks[0].location.Name().Num()
   838  				segment := dataBlocks[0].location.Name().SegmentId()
   839  				name := objectio.BuildObjectName(&segment, fileNum)
   840  
   841  				writer, err := blockio.NewBlockWriter(dstFs, name.String())
   842  				if err != nil {
   843  					return nil, nil, nil, err
   844  				}
   845  				if dataBlocks[0].sortKey != math.MaxUint16 {
   846  					writer.SetPrimaryKey(dataBlocks[0].sortKey)
   847  				}
   848  				_, err = writer.WriteBatch(dataBlocks[0].data)
   849  				if err != nil {
   850  					return nil, nil, nil, err
   851  				}
   852  				blocks, extent, err = writer.Sync(ctx)
   853  				if err != nil {
   854  					panic("sync error")
   855  				}
   856  				files = append(files, name.String())
   857  				blockLocation = objectio.BuildLocation(name, extent, blocks[0].GetRows(), blocks[0].GetID())
   858  				if insertBatch[dataBlocks[0].tid] == nil {
   859  					insertBatch[dataBlocks[0].tid] = &iBlocks{
   860  						insertBlocks: make([]*insertBlock, 0),
   861  					}
   862  				}
   863  				ib := &insertBlock{
   864  					location: blockLocation,
   865  					blockId:  *objectio.BuildObjectBlockid(name, blocks[0].GetID()),
   866  					apply:    false,
   867  				}
   868  				if len(dataBlocks[0].deleteRow) > 0 {
   869  					ib.deleteRow = dataBlocks[0].deleteRow[0]
   870  				}
   871  				insertBatch[dataBlocks[0].tid].insertBlocks = append(insertBatch[dataBlocks[0].tid].insertBlocks, ib)
   872  
   873  				if objectData.obj != nil {
   874  					objectData.obj.stats = &writer.GetObjectStats()[objectio.SchemaData]
   875  				}
   876  			}
   877  			if objectData.obj != nil {
   878  				obj := objectData.obj
   879  				if insertObjBatch[obj.tid] == nil {
   880  					insertObjBatch[obj.tid] = &iObjects{
   881  						rowObjects: make([]*insertObjects, 0),
   882  					}
   883  				}
   884  				io := &insertObjects{
   885  					location: blockLocation,
   886  					apply:    false,
   887  					obj:      obj,
   888  				}
   889  				insertObjBatch[obj.tid].rowObjects = append(insertObjBatch[obj.tid].rowObjects, io)
   890  			}
   891  		} else {
   892  			if objectData.isDeleteBatch && objectData.data[0] == nil {
   893  				if !objectData.isABlock {
   894  					// Case of merge nBlock
   895  					if insertObjBatch[objectData.obj.tid] == nil {
   896  						insertObjBatch[objectData.obj.tid] = &iObjects{
   897  							rowObjects: make([]*insertObjects, 0),
   898  						}
   899  					}
   900  					io := &insertObjects{
   901  						apply: false,
   902  						obj:   objectData.obj,
   903  					}
   904  					insertObjBatch[objectData.obj.tid].rowObjects = append(insertObjBatch[objectData.obj.tid].rowObjects, io)
   905  				} else {
   906  					sortData := containers.ToTNBatch(objectData.obj.data[0], common.CheckpointAllocator)
   907  					if objectData.obj.sortKey != math.MaxUint16 {
   908  						_, err = mergesort.SortBlockColumns(sortData.Vecs, int(objectData.obj.sortKey), backupPool)
   909  						if err != nil {
   910  							return nil, nil, nil, err
   911  						}
   912  					}
   913  					objectData.obj.data[0] = containers.ToCNBatch(sortData)
   914  					result := batch.NewWithSize(len(objectData.obj.data[0].Vecs) - 3)
   915  					for i := range result.Vecs {
   916  						result.Vecs[i] = objectData.obj.data[0].Vecs[i]
   917  					}
   918  					objectData.obj.data[0] = result
   919  					fileNum := uint16(1000) + objectData.obj.stats.ObjectName().Num()
   920  					segment := objectData.obj.stats.ObjectName().SegmentId()
   921  					name := objectio.BuildObjectName(&segment, fileNum)
   922  
   923  					writer, err := blockio.NewBlockWriter(dstFs, name.String())
   924  					if err != nil {
   925  						return nil, nil, nil, err
   926  					}
   927  					if objectData.obj.sortKey != math.MaxUint16 {
   928  						writer.SetPrimaryKey(objectData.obj.sortKey)
   929  					}
   930  					_, err = writer.WriteBatch(objectData.obj.data[0])
   931  					if err != nil {
   932  						return nil, nil, nil, err
   933  					}
   934  					blocks, extent, err = writer.Sync(ctx)
   935  					if err != nil {
   936  						panic("sync error")
   937  					}
   938  					files = append(files, name.String())
   939  					blockLocation := objectio.BuildLocation(name, extent, blocks[0].GetRows(), blocks[0].GetID())
   940  					obj := objectData.obj
   941  					if insertObjBatch[obj.tid] == nil {
   942  						insertObjBatch[obj.tid] = &iObjects{
   943  							rowObjects: make([]*insertObjects, 0),
   944  						}
   945  					}
   946  					obj.stats = &writer.GetObjectStats()[objectio.SchemaData]
   947  					objectio.SetObjectStatsObjectName(obj.stats, blockLocation.Name())
   948  					io := &insertObjects{
   949  						location: blockLocation,
   950  						apply:    false,
   951  						obj:      obj,
   952  					}
   953  					insertObjBatch[obj.tid].rowObjects = append(insertObjBatch[obj.tid].rowObjects, io)
   954  				}
   955  			}
   956  
   957  			for i := range dataBlocks {
   958  				blockLocation := dataBlocks[i].location
   959  				if objectData.isChange {
   960  					blockLocation = objectio.BuildLocation(objectData.name, extent, blocks[uint16(i)].GetRows(), dataBlocks[i].num)
   961  				}
   962  				for _, insertRow := range dataBlocks[i].insertRow {
   963  					if dataBlocks[uint16(i)].blockType == objectio.SchemaTombstone {
   964  						data.bats[BLKMetaInsertIDX].GetVectorByName(catalog.BlockMeta_DeltaLoc).Update(
   965  							insertRow,
   966  							[]byte(blockLocation),
   967  							false)
   968  						data.bats[BLKMetaInsertTxnIDX].GetVectorByName(catalog.BlockMeta_DeltaLoc).Update(
   969  							insertRow,
   970  							[]byte(blockLocation),
   971  							false)
   972  					}
   973  				}
   974  				for _, deleteRow := range dataBlocks[uint16(i)].deleteRow {
   975  					if dataBlocks[uint16(i)].blockType == objectio.SchemaTombstone {
   976  						data.bats[BLKMetaInsertIDX].GetVectorByName(catalog.BlockMeta_DeltaLoc).Update(
   977  							deleteRow,
   978  							[]byte(blockLocation),
   979  							false)
   980  						data.bats[BLKMetaInsertTxnIDX].GetVectorByName(catalog.BlockMeta_DeltaLoc).Update(
   981  							deleteRow,
   982  							[]byte(blockLocation),
   983  							false)
   984  					}
   985  				}
   986  			}
   987  		}
   988  	}
   989  
   990  	phaseNumber = 5
   991  	// Transfer the object file that needs to be deleted to insert
   992  	if len(insertBatch) > 0 {
   993  		blkMeta := makeRespBatchFromSchema(checkpointDataSchemas_Curr[BLKMetaInsertIDX], common.CheckpointAllocator)
   994  		blkMetaTxn := makeRespBatchFromSchema(checkpointDataSchemas_Curr[BLKMetaInsertTxnIDX], common.CheckpointAllocator)
   995  		for i := 0; i < blkMetaInsert.Length(); i++ {
   996  			tid := data.bats[BLKMetaInsertTxnIDX].GetVectorByName(SnapshotAttr_TID).Get(i).(uint64)
   997  			appendValToBatch(data.bats[BLKMetaInsertIDX], blkMeta, i)
   998  			appendValToBatch(data.bats[BLKMetaInsertTxnIDX], blkMetaTxn, i)
   999  			if insertBatch[tid] != nil {
  1000  				for b, blk := range insertBatch[tid].insertBlocks {
  1001  					if blk.apply {
  1002  						continue
  1003  					}
  1004  					if insertBatch[tid].insertBlocks[b].data == nil {
  1005  
  1006  					} else {
  1007  						insertBatch[tid].insertBlocks[b].apply = true
  1008  
  1009  						row := blkMeta.Vecs[0].Length() - 1
  1010  						if !blk.location.IsEmpty() {
  1011  							sort := true
  1012  							if insertBatch[tid].insertBlocks[b].data != nil &&
  1013  								insertBatch[tid].insertBlocks[b].data.isABlock &&
  1014  								insertBatch[tid].insertBlocks[b].data.sortKey == math.MaxUint16 {
  1015  								sort = false
  1016  							}
  1017  							updateBlockMeta(blkMeta, blkMetaTxn, row,
  1018  								insertBatch[tid].insertBlocks[b].blockId,
  1019  								insertBatch[tid].insertBlocks[b].location,
  1020  								sort)
  1021  						}
  1022  					}
  1023  				}
  1024  			}
  1025  		}
  1026  
  1027  		for tid := range insertBatch {
  1028  			for b := range insertBatch[tid].insertBlocks {
  1029  				if insertBatch[tid].insertBlocks[b].apply {
  1030  					continue
  1031  				}
  1032  				if insertBatch[tid] != nil && !insertBatch[tid].insertBlocks[b].apply {
  1033  					insertBatch[tid].insertBlocks[b].apply = true
  1034  					if insertBatch[tid].insertBlocks[b].data == nil {
  1035  
  1036  					} else {
  1037  						i := blkMeta.Vecs[0].Length() - 1
  1038  						if !insertBatch[tid].insertBlocks[b].location.IsEmpty() {
  1039  							sort := true
  1040  							if insertBatch[tid].insertBlocks[b].data != nil &&
  1041  								insertBatch[tid].insertBlocks[b].data.isABlock &&
  1042  								insertBatch[tid].insertBlocks[b].data.sortKey == math.MaxUint16 {
  1043  								sort = false
  1044  							}
  1045  							updateBlockMeta(blkMeta, blkMetaTxn, i,
  1046  								insertBatch[tid].insertBlocks[b].blockId,
  1047  								insertBatch[tid].insertBlocks[b].location,
  1048  								sort)
  1049  						}
  1050  					}
  1051  				}
  1052  			}
  1053  		}
  1054  
  1055  		for i := range insertBatch {
  1056  			for _, block := range insertBatch[i].insertBlocks {
  1057  				if block.data != nil {
  1058  					for _, cnRow := range block.data.deleteRow {
  1059  						if block.data.isABlock {
  1060  							data.bats[BLKMetaInsertIDX].Delete(cnRow)
  1061  							data.bats[BLKMetaInsertTxnIDX].Delete(cnRow)
  1062  						}
  1063  					}
  1064  				}
  1065  			}
  1066  		}
  1067  
  1068  		data.bats[BLKMetaInsertIDX].Compact()
  1069  		data.bats[BLKMetaInsertTxnIDX].Compact()
  1070  		tableInsertOff := make(map[uint64]*tableOffset)
  1071  		for i := 0; i < blkMetaTxn.Vecs[0].Length(); i++ {
  1072  			tid := blkMetaTxn.GetVectorByName(SnapshotAttr_TID).Get(i).(uint64)
  1073  			if tableInsertOff[tid] == nil {
  1074  				tableInsertOff[tid] = &tableOffset{
  1075  					offset: i,
  1076  					end:    i,
  1077  				}
  1078  			}
  1079  			tableInsertOff[tid].end += 1
  1080  		}
  1081  
  1082  		for tid, table := range tableInsertOff {
  1083  			data.UpdateBlockInsertBlkMeta(tid, int32(table.offset), int32(table.end))
  1084  		}
  1085  		data.bats[BLKMetaInsertIDX].Close()
  1086  		data.bats[BLKMetaInsertTxnIDX].Close()
  1087  		data.bats[BLKMetaInsertIDX] = blkMeta
  1088  		data.bats[BLKMetaInsertTxnIDX] = blkMetaTxn
  1089  	}
  1090  
  1091  	phaseNumber = 6
  1092  	if len(insertObjBatch) > 0 {
  1093  		deleteRow := make([]int, 0)
  1094  		objectInfoMeta := makeRespBatchFromSchema(checkpointDataSchemas_Curr[ObjectInfoIDX], common.CheckpointAllocator)
  1095  		infoInsert := make(map[int]*objData, 0)
  1096  		infoDelete := make(map[int]bool, 0)
  1097  		for tid := range insertObjBatch {
  1098  			for i := range insertObjBatch[tid].rowObjects {
  1099  				if insertObjBatch[tid].rowObjects[i].apply {
  1100  					continue
  1101  				}
  1102  				if !insertObjBatch[tid].rowObjects[i].location.IsEmpty() {
  1103  					obj := insertObjBatch[tid].rowObjects[i].obj
  1104  					if infoInsert[obj.infoDel[0]] != nil {
  1105  						panic("should not have info insert")
  1106  					}
  1107  					objectio.SetObjectStatsExtent(insertObjBatch[tid].rowObjects[i].obj.stats, insertObjBatch[tid].rowObjects[i].location.Extent())
  1108  					objectio.SetObjectStatsObjectName(insertObjBatch[tid].rowObjects[i].obj.stats, insertObjBatch[tid].rowObjects[i].location.Name())
  1109  					infoInsert[obj.infoDel[0]] = insertObjBatch[tid].rowObjects[i].obj
  1110  					if len(obj.infoTNRow) > 0 {
  1111  						data.bats[TNObjectInfoIDX].Delete(obj.infoTNRow[0])
  1112  					}
  1113  				} else {
  1114  					if infoDelete[insertObjBatch[tid].rowObjects[i].obj.infoDel[0]] {
  1115  						panic("should not have info delete")
  1116  					}
  1117  					infoDelete[insertObjBatch[tid].rowObjects[i].obj.infoDel[0]] = true
  1118  				}
  1119  			}
  1120  
  1121  		}
  1122  		for i := 0; i < objInfoData.Length(); i++ {
  1123  			appendValToBatch(objInfoData, objectInfoMeta, i)
  1124  			if infoInsert[i] != nil && infoDelete[i] {
  1125  				panic("info should not have info delete")
  1126  			}
  1127  			if infoInsert[i] != nil {
  1128  				appendValToBatch(objInfoData, objectInfoMeta, i)
  1129  				row := objectInfoMeta.Length() - 1
  1130  				objectInfoMeta.GetVectorByName(ObjectAttr_ObjectStats).Update(row, infoInsert[i].stats[:], false)
  1131  				objectInfoMeta.GetVectorByName(ObjectAttr_State).Update(row, false, false)
  1132  				objectInfoMeta.GetVectorByName(EntryNode_DeleteAt).Update(row, types.TS{}, false)
  1133  			}
  1134  
  1135  			if infoDelete[i] {
  1136  				deleteRow = append(deleteRow, objectInfoMeta.Length()-1)
  1137  			}
  1138  		}
  1139  		for i := range deleteRow {
  1140  			objectInfoMeta.Delete(deleteRow[i])
  1141  		}
  1142  		data.bats[TNObjectInfoIDX].Compact()
  1143  		objectInfoMeta.Compact()
  1144  		data.bats[ObjectInfoIDX].Close()
  1145  		data.bats[ObjectInfoIDX] = objectInfoMeta
  1146  		tableInsertOff := make(map[uint64]*tableOffset)
  1147  		for i := 0; i < objectInfoMeta.Vecs[0].Length(); i++ {
  1148  			tid := objectInfoMeta.GetVectorByName(SnapshotAttr_TID).Get(i).(uint64)
  1149  			if tableInsertOff[tid] == nil {
  1150  				tableInsertOff[tid] = &tableOffset{
  1151  					offset: i,
  1152  					end:    i,
  1153  				}
  1154  			}
  1155  			tableInsertOff[tid].end += 1
  1156  		}
  1157  
  1158  		for tid, table := range tableInsertOff {
  1159  			data.UpdateObjectInsertMeta(tid, int32(table.offset), int32(table.end))
  1160  		}
  1161  	}
  1162  	cnLocation, dnLocation, checkpointFiles, err := data.WriteTo(dstFs, DefaultCheckpointBlockRows, DefaultCheckpointSize)
  1163  	if err != nil {
  1164  		return nil, nil, nil, err
  1165  	}
  1166  	logutil.Info("[Done]",
  1167  		common.AnyField("checkpoint", cnLocation.String()),
  1168  		common.OperationField("ReWrite Checkpoint"),
  1169  		common.AnyField("new object", checkpointFiles))
  1170  	loc = cnLocation
  1171  	tnLocation = dnLocation
  1172  	files = append(files, checkpointFiles...)
  1173  	files = append(files, cnLocation.Name().String())
  1174  	return loc, tnLocation, files, nil
  1175  }