github.com/matrixorigin/matrixone@v1.2.0/pkg/vm/engine/tae/db/test/storage_usage_test.go (about) 1 // Copyright 2023 Matrix Origin 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package test 16 17 import ( 18 "context" 19 "fmt" 20 "math" 21 "math/rand" 22 "sort" 23 "sync/atomic" 24 "testing" 25 "time" 26 "unsafe" 27 28 pkgcatalog "github.com/matrixorigin/matrixone/pkg/catalog" 29 "github.com/matrixorigin/matrixone/pkg/common/moerr" 30 "github.com/matrixorigin/matrixone/pkg/container/types" 31 "github.com/matrixorigin/matrixone/pkg/container/vector" 32 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/catalog" 33 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/common" 34 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/containers" 35 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/logtail" 36 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/txn/txnbase" 37 "github.com/stretchr/testify/require" 38 ) 39 40 func Test_StorageUsageCache(t *testing.T) { 41 // new cache with no option 42 cache := logtail.NewStorageUsageCache(logtail.WithLazyThreshold(1)) 43 cache.Lock() 44 defer cache.Unlock() 45 46 require.True(t, cache.IsExpired()) 47 48 allocator := atomic.Uint64{} 49 50 accCnt, dbCnt, tblCnt := 10, 10, 10 51 usages := logtail.MockUsageData(accCnt, dbCnt, tblCnt, &allocator) 52 for idx := range usages { 53 cache.SetOrReplace(usages[idx]) 54 } 55 56 fmt.Println(cache.String()) 57 58 // 1. test expired 59 { 60 require.Equal(t, len(usages), cache.CacheLen()) 61 require.False(t, cache.IsExpired()) 62 63 time.Sleep(time.Second * 1) 64 require.True(t, cache.IsExpired()) 65 } 66 67 // 2. test the less func 68 { 69 sort.Slice(usages, func(i, j int) bool { 70 return cache.LessFunc()(usages[i], usages[j]) 71 }) 72 73 for idx := 1; idx < len(usages); idx++ { 74 require.True(t, usages[idx].AccId >= usages[idx-1].AccId) 75 76 if usages[idx].AccId == usages[idx-1].AccId { 77 require.True(t, usages[idx].DbId >= usages[idx-1].DbId) 78 79 if usages[idx].DbId == usages[idx-1].DbId { 80 require.True(t, usages[idx].TblId >= usages[idx-1].TblId) 81 } 82 } 83 } 84 } 85 86 // 3. test gather account size 87 { 88 totalSize := uint64(0) 89 for idx := 0; idx < len(usages); idx++ { 90 totalSize += usages[idx].Size 91 } 92 93 gathered := cache.GatherAllAccSize() 94 for accId := range gathered { 95 totalSize -= gathered[accId] 96 97 size, exist := cache.GatherAccountSize(accId) 98 require.True(t, exist) 99 require.Equal(t, gathered[accId], size) 100 } 101 102 require.Equal(t, uint64(0), totalSize) 103 104 size := uint64(0) 105 preAccId := usages[0].AccId 106 for idx := 0; idx < len(usages); idx++ { 107 if usages[idx].AccId == preAccId { 108 size += usages[idx].Size 109 continue 110 } 111 112 gsize, exist := cache.GatherAccountSize(preAccId) 113 require.True(t, exist) 114 require.Equal(t, size, gsize) 115 116 size = usages[idx].Size 117 preAccId = usages[idx].AccId 118 } 119 } 120 121 // 4. test mem used 122 { 123 require.Equal(t, len(usages), cache.CacheLen()) 124 used := float64(int(unsafe.Sizeof(logtail.UsageData{})) * len(usages)) 125 require.True(t, cache.MemUsed() > math.Round(used/1048576.0*1e6)/10e6) 126 } 127 128 // 5. test delete and get 129 { 130 for idx := 0; usages[idx].AccId == usages[0].AccId; idx++ { 131 cache.Delete(usages[idx]) 132 _, exist := cache.Get(usages[idx]) 133 require.False(t, exist) 134 } 135 136 require.False(t, cache.IsExpired()) 137 138 _, exist := cache.GatherAccountSize(usages[0].AccId) 139 require.False(t, exist) 140 } 141 142 } 143 144 func mockDeletesAndInserts( 145 usages []logtail.UsageData, 146 delDbIds, delTblIds map[uint64]int, 147 delSegIdxes, insSegIdxes map[int]struct{}) ( 148 []interface{}, []*catalog.ObjectEntry, []*catalog.ObjectEntry) { 149 var deletes []interface{} 150 var segInserts []*catalog.ObjectEntry 151 var segDeletes []*catalog.ObjectEntry 152 153 // mock deletes, inserts 154 { 155 // db deletes 156 for idx := range usages { 157 if _, ok := delDbIds[usages[idx].DbId]; !ok { 158 continue 159 } 160 deletes = append(deletes, 161 catalog.MockDBEntryWithAccInfo(usages[idx].AccId, usages[idx].DbId)) 162 } 163 164 // tbl deletes 165 for idx := range usages { 166 if _, ok := delTblIds[usages[idx].TblId]; !ok { 167 continue 168 } 169 deletes = append(deletes, 170 catalog.MockTableEntryWithDB( 171 catalog.MockDBEntryWithAccInfo( 172 usages[idx].AccId, usages[idx].DbId), usages[idx].TblId)) 173 } 174 175 // segment deletes 176 for idx := range usages { 177 if _, ok := delSegIdxes[idx]; !ok { 178 continue 179 } 180 segDeletes = append(segDeletes, 181 catalog.MockObjEntryWithTbl( 182 catalog.MockTableEntryWithDB( 183 catalog.MockDBEntryWithAccInfo(usages[idx].AccId, usages[idx].DbId), 184 usages[idx].TblId), usages[idx].Size)) 185 } 186 187 // segment inserts 188 for idx := range usages { 189 if _, ok := insSegIdxes[idx]; !ok { 190 continue 191 } 192 segInserts = append(segInserts, 193 catalog.MockObjEntryWithTbl( 194 catalog.MockTableEntryWithDB( 195 catalog.MockDBEntryWithAccInfo(usages[idx].AccId, usages[idx].DbId), 196 usages[idx].TblId), usages[idx].Size)) 197 } 198 } 199 200 return deletes, segDeletes, segInserts 201 } 202 203 func Test_FillUsageBatOfIncremental(t *testing.T) { 204 allocator := atomic.Uint64{} 205 allocator.Store(pkgcatalog.MO_RESERVED_MAX + 1) 206 207 accCnt, dbCnt, tblCnt := 10, 10, 10 208 usages := logtail.MockUsageData(accCnt, dbCnt, tblCnt, &allocator) 209 210 memo := logtail.NewTNUsageMemo() 211 memo.Clear() 212 213 sort.Slice(usages, func(i, j int) bool { 214 return memo.GetCache().LessFunc()(usages[i], usages[j]) 215 }) 216 217 delDbCnt, delTblCnt, delSegCnt := 2, 3, 7 218 delDbIds := make(map[uint64]int) 219 delTblIds := make(map[uint64]int) 220 delSegIdxes := make(map[int]struct{}) 221 insSegIdxes := make(map[int]struct{}) 222 223 // generate deletes 224 { 225 for i := 0; i < delDbCnt; i++ { 226 idx := rand.Int() % len(usages) 227 delDbIds[usages[idx].DbId] = idx 228 } 229 230 for i := 0; i < delTblCnt; i++ { 231 idx := rand.Int() % len(usages) 232 delTblIds[usages[idx].TblId] = idx 233 } 234 235 for i := 0; i < delSegCnt; i++ { 236 delSegIdxes[rand.Int()%len(usages)] = struct{}{} 237 } 238 239 for i := 0; i < len(usages); i++ { 240 insSegIdxes[i] = struct{}{} 241 } 242 } 243 244 deletes, segDeletes, segInserts := mockDeletesAndInserts( 245 usages, delDbIds, delTblIds, delSegIdxes, insSegIdxes) 246 247 iCollector := logtail.NewIncrementalCollector(types.TS{}, types.MaxTs(), false) 248 iCollector.UsageMemo = memo 249 defer iCollector.Close() 250 251 iCollector.Usage.Deletes = deletes 252 iCollector.Usage.ObjDeletes = segDeletes 253 iCollector.Usage.ObjInserts = segInserts 254 255 logtail.FillUsageBatOfIncremental(iCollector) 256 257 var delUsages []logtail.UsageData 258 259 // test apply inserts and deletes 260 { 261 for idx := range usages { 262 old, exist := memo.Get(usages[idx]) 263 _, ok1 := delDbIds[usages[idx].DbId] 264 _, ok2 := delTblIds[usages[idx].TblId] 265 if ok1 || ok2 { 266 require.Equal(t, logtail.UsageData{}, old) 267 require.False(t, exist) 268 continue 269 } 270 271 if _, ok := delSegIdxes[idx]; ok { 272 require.Equal(t, old.Size, uint64(0)) 273 require.True(t, exist) 274 } 275 } 276 277 // gather all deletes 278 for idx := range usages { 279 if _, ok := delTblIds[usages[idx].TblId]; ok { 280 delUsages = append(delUsages, usages[idx]) 281 } 282 } 283 284 for idx := range usages { 285 if _, ok := delTblIds[usages[idx].TblId]; ok { 286 continue 287 } 288 if _, ok := delDbIds[usages[idx].DbId]; ok { 289 delUsages = append(delUsages, usages[idx]) 290 } 291 } 292 293 for idx := range usages { 294 _, ok1 := delDbIds[usages[idx].DbId] 295 _, ok2 := delTblIds[usages[idx].TblId] 296 if ok1 || ok2 { 297 continue 298 } 299 if _, ok := delSegIdxes[idx]; ok { 300 last := &delUsages[len(delUsages)-1] 301 302 if last.TblId == usages[idx].TblId && 303 last.AccId == usages[idx].AccId && 304 last.DbId == usages[idx].DbId { 305 last.Size += usages[idx].Size 306 } else { 307 delUsages = append(delUsages, usages[idx]) 308 } 309 } 310 311 } 312 } 313 314 // test append to checkpoint 315 { 316 ckpData := iCollector.OrphanData() 317 defer ckpData.Close() 318 319 delBat := ckpData.GetBatches()[logtail.StorageUsageDelIDX] 320 //insBat := ckpData.GetBatches()[logtail.StorageUsageInsIDX] 321 322 accCol := vector.MustFixedCol[uint64](delBat.GetVectorByName(pkgcatalog.SystemColAttr_AccID).GetDownstreamVector()) 323 dbCol := vector.MustFixedCol[uint64](delBat.GetVectorByName(catalog.SnapshotAttr_DBID).GetDownstreamVector()) 324 tblCol := vector.MustFixedCol[uint64](delBat.GetVectorByName(catalog.SnapshotAttr_TID).GetDownstreamVector()) 325 sizeCol := vector.MustFixedCol[uint64](delBat.GetVectorByName(logtail.CheckpointMetaAttr_ObjectSize).GetDownstreamVector()) 326 327 require.Equal(t, len(accCol), len(delUsages)) 328 329 for idx := range accCol { 330 require.Equal(t, accCol[idx], delUsages[idx].AccId) 331 require.Equal(t, dbCol[idx], delUsages[idx].DbId) 332 require.Equal(t, tblCol[idx], delUsages[idx].TblId) 333 require.Equal(t, sizeCol[idx], delUsages[idx].Size) 334 } 335 } 336 } 337 338 func Test_FillUsageBatOfGlobal(t *testing.T) { 339 allocator := atomic.Uint64{} 340 allocator.Store(pkgcatalog.MO_RESERVED_MAX + 1) 341 342 accCnt, dbCnt, tblCnt := 10, 10, 10 343 usages := logtail.MockUsageData(accCnt, dbCnt, tblCnt, &allocator) 344 345 memo := logtail.NewTNUsageMemo() 346 memo.Clear() 347 348 gCollector := logtail.NewGlobalCollector(types.TS{}, time.Second) 349 gCollector.UsageMemo = memo 350 defer gCollector.Close() 351 352 for idx := range usages { 353 memo.DeltaUpdate(usages[idx], false) 354 gCollector.Usage.ReservedAccIds[usages[idx].AccId] = struct{}{} 355 } 356 357 // test memo reply to global ckp 358 { 359 360 logtail.FillUsageBatOfGlobal(gCollector) 361 362 ckpData := gCollector.OrphanData() 363 defer ckpData.Close() 364 365 delBat := ckpData.GetBatches()[logtail.StorageUsageDelIDX] 366 require.Equal(t, delBat.GetVectorByName(pkgcatalog.SystemColAttr_AccID).Length(), 0) 367 368 insBat := ckpData.GetBatches()[logtail.StorageUsageInsIDX] 369 require.Equal(t, insBat.GetVectorByName(pkgcatalog.SystemColAttr_AccID).Length(), len(usages)) 370 371 // usage datas in memo ordered 372 sort.Slice(usages, func(i, j int) bool { 373 return memo.GetCache().LessFunc()(usages[i], usages[j]) 374 }) 375 376 accCol := vector.MustFixedCol[uint64](insBat.GetVectorByName(pkgcatalog.SystemColAttr_AccID).GetDownstreamVector()) 377 dbCol := vector.MustFixedCol[uint64](insBat.GetVectorByName(catalog.SnapshotAttr_DBID).GetDownstreamVector()) 378 tblCol := vector.MustFixedCol[uint64](insBat.GetVectorByName(catalog.SnapshotAttr_TID).GetDownstreamVector()) 379 sizeCol := vector.MustFixedCol[uint64](insBat.GetVectorByName(logtail.CheckpointMetaAttr_ObjectSize).GetDownstreamVector()) 380 381 for idx := 0; idx < len(accCol); idx++ { 382 require.Equal(t, accCol[idx], usages[idx].AccId) 383 require.Equal(t, dbCol[idx], usages[idx].DbId) 384 require.Equal(t, tblCol[idx], usages[idx].TblId) 385 require.Equal(t, sizeCol[idx], usages[idx].Size) 386 } 387 } 388 } 389 390 func appendUsageToBatch(bat *containers.Batch, usage logtail.UsageData) { 391 accVec := bat.GetVectorByName(pkgcatalog.SystemColAttr_AccID).GetDownstreamVector() 392 dbVec := bat.GetVectorByName(catalog.SnapshotAttr_DBID).GetDownstreamVector() 393 tblVec := bat.GetVectorByName(catalog.SnapshotAttr_TID).GetDownstreamVector() 394 sizeVec := bat.GetVectorByName(logtail.CheckpointMetaAttr_ObjectSize).GetDownstreamVector() 395 396 vector.AppendFixed(accVec, usage.AccId, false, common.DebugAllocator) 397 vector.AppendFixed(dbVec, usage.DbId, false, common.DebugAllocator) 398 vector.AppendFixed(tblVec, usage.TblId, false, common.DebugAllocator) 399 vector.AppendFixed(sizeVec, usage.Size, false, common.DebugAllocator) 400 401 } 402 403 func Test_EstablishFromCheckpoints(t *testing.T) { 404 version8Cnt, version9Cnt, version11Cnt := 3, 4, 5 405 allocator := atomic.Uint64{} 406 allocator.Store(pkgcatalog.MO_RESERVED_MAX + 1) 407 408 ckps := make([]*logtail.CheckpointData, version8Cnt+version9Cnt+version11Cnt) 409 vers := make([]uint32, version8Cnt+version9Cnt+version11Cnt) 410 411 for idx := 0; idx < version8Cnt; idx++ { 412 data := logtail.NewCheckpointDataWithVersion(logtail.CheckpointVersion8, common.DebugAllocator) 413 ckps = append(ckps, data) 414 vers = append(vers, logtail.CheckpointVersion8) 415 } 416 417 var usageIns, usageDel []logtail.UsageData 418 419 for idx := 0; idx < version9Cnt; idx++ { 420 data := logtail.NewCheckpointDataWithVersion(logtail.CheckpointVersion9, common.DebugAllocator) 421 insBat := data.GetBatches()[logtail.StorageUsageInsIDX] 422 423 usages := logtail.MockUsageData(10, 10, 10, &allocator) 424 usageIns = append(usageIns, usages...) 425 426 for xx := range usages { 427 appendUsageToBatch(insBat, usages[xx]) 428 } 429 430 ckps = append(ckps, data) 431 vers = append(vers, logtail.CheckpointVersion9) 432 } 433 434 for idx := 0; idx < version11Cnt; idx++ { 435 data := logtail.NewCheckpointDataWithVersion(logtail.CheckpointVersion11, common.DebugAllocator) 436 insBat := data.GetBatches()[logtail.StorageUsageInsIDX] 437 delBat := data.GetBatches()[logtail.StorageUsageDelIDX] 438 439 usages := logtail.MockUsageData(10, 10, 10, &allocator) 440 usageIns = append(usageIns, usages...) 441 for xx := range usages { 442 appendUsageToBatch(insBat, usages[xx]) 443 } 444 445 usages = logtail.MockUsageData(10, 10, 10, &allocator) 446 usageDel = append(usageDel, usages...) 447 for xx := range usages { 448 appendUsageToBatch(delBat, usages[xx]) 449 } 450 451 ckps = append(ckps, data) 452 vers = append(vers, logtail.CheckpointVersion11) 453 } 454 455 memo := logtail.NewTNUsageMemo() 456 memo.Clear() 457 458 memo.PrepareReplay(ckps, vers) 459 memo.EstablishFromCKPs(nil) 460 461 memoShadow := logtail.NewTNUsageMemo() 462 for idx := range usageIns { 463 memoShadow.DeltaUpdate(usageIns[idx], false) 464 } 465 466 for idx := range usageDel { 467 memoShadow.DeltaUpdate(usageDel[idx], true) 468 } 469 470 require.Equal(t, memo.CacheLen(), memoShadow.CacheLen()) 471 472 iter := memoShadow.GetCache().Iter() 473 for iter.Next() { 474 usage, exist := memo.Get(iter.Item()) 475 require.True(t, exist) 476 //fmt.Println(usage) 477 //fmt.Println(iter.Item()) 478 //fmt.Println() 479 require.Equal(t, usage, iter.Item()) 480 } 481 iter.Release() 482 } 483 484 func Test_RemoveStaleAccounts(t *testing.T) { 485 // clear stale accounts happens in global ckp 486 allocator := atomic.Uint64{} 487 allocator.Store(pkgcatalog.MO_RESERVED_MAX + 1) 488 489 accCnt, dbCnt, tblCnt := 10000, 2, 2 490 usages := logtail.MockUsageData(accCnt, dbCnt, tblCnt, &allocator) 491 492 gCollector := logtail.NewGlobalCollector(types.TS{}, time.Second) 493 gCollector.UsageMemo = logtail.NewTNUsageMemo() 494 defer gCollector.Close() 495 496 for idx := range usages { 497 gCollector.UsageMemo.DeltaUpdate(usages[idx], false) 498 if rand.Int()%3 == 0 { 499 // mock the accounts deletion 500 continue 501 } 502 gCollector.Usage.ReservedAccIds[usages[idx].AccId] = struct{}{} 503 } 504 505 logtail.FillUsageBatOfGlobal(gCollector) 506 507 sizes := gCollector.UsageMemo.GatherAllAccSize() 508 509 require.Equal(t, len(gCollector.Usage.ReservedAccIds), len(sizes)) 510 511 for accId := range sizes { 512 _, ok := gCollector.Usage.ReservedAccIds[accId] 513 require.True(t, ok) 514 } 515 } 516 517 func mockCkpDataWithVersion(version uint32, cnt int) (ckpDats []*logtail.CheckpointData, usages [][]logtail.UsageData) { 518 allocator := atomic.Uint64{} 519 allocator.Store(pkgcatalog.MO_RESERVED_MAX + 1) 520 521 for i := 0; i < cnt; i++ { 522 data := logtail.NewCheckpointDataWithVersion(version, common.DebugAllocator) 523 524 usage := logtail.MockUsageData(10, 10, 10, &allocator) 525 for xx := range usage { 526 appendUsageToBatch(data.GetBatches()[logtail.StorageUsageInsIDX], usage[xx]) 527 } 528 529 ckpDats = append(ckpDats, data) 530 usages = append(usages, usage) 531 } 532 533 return 534 } 535 536 func Test_UpdateDataFromOldVersion(t *testing.T) { 537 538 memo := logtail.NewTNUsageMemo() 539 ctlog := catalog.MockCatalog() 540 defer ctlog.Close() 541 542 ctlog.SetUsageMemo(memo) 543 544 ckpDatas, _ := mockCkpDataWithVersion(logtail.CheckpointVersion9, 1) 545 546 // phase 1: all db/tbl have been deleted 547 { 548 memo.PrepareReplay(ckpDatas, []uint32{logtail.CheckpointVersion9}) 549 memo.EstablishFromCKPs(ctlog) 550 551 require.Equal(t, 0, len(memo.GetDelayed())) 552 require.Equal(t, 0, memo.CacheLen()) 553 554 for idx := range ckpDatas { 555 require.Nil(t, ckpDatas[idx]) 556 } 557 } 558 559 createdTbl := make([]logtail.UsageData, 0) 560 561 // phase 2: part of them have been deleted 562 { 563 564 txnMgr := txnbase.NewTxnManager( 565 catalog.MockTxnStoreFactory(ctlog), 566 catalog.MockTxnFactory(ctlog), 567 types.NewMockHLCClock(1)) 568 569 ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) 570 defer cancel() 571 572 txnMgr.Start(ctx) 573 defer txnMgr.Stop() 574 575 txn, _ := txnMgr.StartTxn(nil) 576 577 ckpDatas, usages := mockCkpDataWithVersion(logtail.CheckpointVersion9, 1) 578 579 for xx := range usages { 580 for yy := range usages[xx] { 581 db, err := ctlog.GetDatabaseByID(usages[xx][yy].DbId) 582 if moerr.IsMoErrCode(err, moerr.OkExpectedEOB) || db == nil { 583 db, err = ctlog.CreateDBEntryWithID(usages[xx][yy].String(), "", "", usages[xx][yy].DbId, txn) 584 db.TestSetAccId(uint32(usages[xx][yy].AccId)) 585 } 586 587 require.Nil(t, err) 588 require.NotNil(t, db) 589 590 if rand.Int()%3 == 0 { 591 continue 592 } 593 594 tbl, err := db.CreateTableEntryWithTableId( 595 catalog.MockSchema(1, 1), txn, nil, usages[xx][yy].TblId) 596 require.Nil(t, err) 597 require.NotNil(t, tbl) 598 599 createdTbl = append(createdTbl, usages[xx][yy]) 600 } 601 } 602 603 require.Nil(t, txn.Commit(ctx)) 604 605 memo.PrepareReplay(ckpDatas, []uint32{logtail.CheckpointVersion9}) 606 memo.EstablishFromCKPs(ctlog) 607 608 for idx := range ckpDatas { 609 require.Nil(t, ckpDatas[idx]) 610 } 611 612 require.Equal(t, len(createdTbl), len(memo.GetDelayed())) 613 614 sizes := memo.GatherAllAccSize() 615 for idx := range createdTbl { 616 _, ok := sizes[createdTbl[idx].AccId] 617 require.True(t, ok) 618 619 sizes[createdTbl[idx].AccId] -= createdTbl[idx].Size 620 } 621 622 for _, size := range sizes { 623 require.Equal(t, uint64(0), size) 624 } 625 } 626 627 { 628 // test update old data when global ckp 629 gCollector := logtail.NewGlobalCollector(types.TS{}, time.Second) 630 gCollector.UsageMemo = memo 631 defer gCollector.Close() 632 633 for _, usage := range createdTbl { 634 gCollector.Usage.ReservedAccIds[usage.AccId] = struct{}{} 635 636 db, err := ctlog.GetDatabaseByID(usage.DbId) 637 require.Nil(t, err) 638 require.NotNil(t, db) 639 640 tbl, err := db.GetTableEntryByID(usage.TblId) 641 require.Nil(t, err) 642 require.NotNil(t, tbl) 643 644 // double the size 645 obj := catalog.MockObjEntryWithTbl(tbl, usage.Size*2) 646 gCollector.Usage.ObjInserts = append(gCollector.Usage.ObjInserts, obj) 647 } 648 649 logtail.FillUsageBatOfGlobal(gCollector) 650 sizes := memo.GatherAllAccSize() 651 652 for idx := range createdTbl { 653 _, ok := sizes[createdTbl[idx].AccId] 654 require.True(t, ok) 655 656 sizes[createdTbl[idx].AccId] -= createdTbl[idx].Size * 2 657 } 658 659 for _, size := range sizes { 660 require.Equal(t, uint64(0), size) 661 } 662 } 663 }