github.com/matrixorigin/matrixone@v1.2.0/pkg/vm/engine/tae/rpc/inspect.go (about) 1 // Copyright 2021 - 2022 Matrix Origin 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package rpc 16 17 import ( 18 "bytes" 19 "container/heap" 20 "context" 21 "fmt" 22 "io" 23 "math" 24 "math/rand" 25 "os" 26 "strconv" 27 "strings" 28 "sync" 29 "time" 30 31 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/iface/handle" 32 33 "github.com/matrixorigin/matrixone/pkg/common/moerr" 34 "github.com/matrixorigin/matrixone/pkg/container/types" 35 "github.com/matrixorigin/matrixone/pkg/logutil" 36 "github.com/matrixorigin/matrixone/pkg/objectio" 37 "github.com/matrixorigin/matrixone/pkg/pb/api" 38 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/catalog" 39 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/common" 40 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/db" 41 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/db/merge" 42 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/logtail" 43 "github.com/spf13/cobra" 44 ) 45 46 type inspectContext struct { 47 db *db.DB 48 acinfo *db.AccessInfo 49 args []string 50 out io.Writer 51 resp *db.InspectResp 52 } 53 54 // impl Pflag.Value interface 55 func (i *inspectContext) String() string { return "" } 56 func (i *inspectContext) Set(string) error { return nil } 57 func (i *inspectContext) Type() string { return "ictx" } 58 59 func initCommand(_ context.Context, inspectCtx *inspectContext) *cobra.Command { 60 rootCmd := &cobra.Command{ 61 Use: "inspect", 62 } 63 64 rootCmd.PersistentFlags().VarPF(inspectCtx, "ictx", "", "").Hidden = true 65 66 rootCmd.SetArgs(inspectCtx.args) 67 rootCmd.SetErr(inspectCtx.out) 68 rootCmd.SetOut(inspectCtx.out) 69 70 rootCmd.CompletionOptions.DisableDefaultCmd = true 71 72 catalog := &catalogArg{} 73 rootCmd.AddCommand(catalog.PrepareCommand()) 74 75 object := &objStatArg{} 76 rootCmd.AddCommand(object.PrepareCommand()) 77 78 policy := &mergePolicyArg{} 79 rootCmd.AddCommand(policy.PrepareCommand()) 80 81 info := &infoArg{} 82 rootCmd.AddCommand(info.PrepareCommand()) 83 84 mignore := &manualyIgnoreArg{} 85 rootCmd.AddCommand(mignore.PrepareCommand()) 86 87 storage := &storageUsageHistoryArg{} 88 rootCmd.AddCommand(storage.PrepareCommand()) 89 90 renamecol := &RenameColArg{} 91 rootCmd.AddCommand(renamecol.PrepareCommand()) 92 93 pstatus := &PolicyStatus{} 94 rootCmd.AddCommand(pstatus.PrepareCommand()) 95 96 objPrune := &objectPruneArg{} 97 rootCmd.AddCommand(objPrune.PrepareCommand()) 98 99 return rootCmd 100 } 101 102 func RunInspect(ctx context.Context, inspectCtx *inspectContext) { 103 rootCmd := initCommand(ctx, inspectCtx) 104 rootCmd.Execute() 105 } 106 107 type InspectCmd interface { 108 FromCommand(cmd *cobra.Command) error 109 String() string 110 Run() error 111 } 112 113 func RunFactory[T InspectCmd](t T) func(cmd *cobra.Command, args []string) { 114 return func(cmd *cobra.Command, args []string) { 115 if err := t.FromCommand(cmd); err != nil { 116 cmd.OutOrStdout().Write([]byte(fmt.Sprintf("parse err: %v", err))) 117 return 118 } 119 ctx := cmd.Flag("ictx").Value.(*inspectContext) 120 logutil.Infof("inpsect mo_ctl %s: %v by account %+v", cmd.Name(), t.String(), ctx.acinfo) 121 err := t.Run() 122 if err != nil { 123 cmd.OutOrStdout().Write( 124 []byte(fmt.Sprintf("run err: %v", err)), 125 ) 126 } else { 127 cmd.OutOrStdout().Write( 128 []byte(fmt.Sprintf("success. arg %v", t.String())), 129 ) 130 } 131 } 132 } 133 134 type catalogArg struct { 135 ctx *inspectContext 136 outfile *os.File 137 tbl *catalog.TableEntry 138 verbose common.PPLevel 139 } 140 141 func (c *catalogArg) PrepareCommand() *cobra.Command { 142 catalogCmd := &cobra.Command{ 143 Use: "catalog", 144 Short: "show catalog", 145 Run: RunFactory(c), 146 } 147 148 catalogCmd.Flags().CountP("verbose", "v", "verbose level") 149 catalogCmd.Flags().StringP("outfile", "o", "", "write output to a file") 150 catalogCmd.Flags().StringP("target", "t", "*", "format: db.table") 151 return catalogCmd 152 } 153 154 func switchPPL(count int) common.PPLevel { 155 switch count { 156 case 0: 157 return common.PPL0 158 case 1: 159 return common.PPL1 160 case 2: 161 return common.PPL2 162 case 3: 163 return common.PPL3 164 case 4: 165 return common.PPL4 166 default: 167 return common.PPL1 168 } 169 } 170 171 func (c *catalogArg) FromCommand(cmd *cobra.Command) (err error) { 172 c.ctx = cmd.Flag("ictx").Value.(*inspectContext) 173 count, _ := cmd.Flags().GetCount("verbose") 174 c.verbose = switchPPL(count) 175 176 address, _ := cmd.Flags().GetString("target") 177 c.tbl, err = parseTableTarget(address, c.ctx.acinfo, c.ctx.db) 178 if err != nil { 179 return err 180 } 181 182 file, _ := cmd.Flags().GetString("outfile") 183 if file != "" { 184 if f, err := os.Create(file); err != nil { 185 return moerr.NewInternalErrorNoCtx("open %s err: %v", file, err) 186 } else { 187 c.outfile = f 188 } 189 } 190 return nil 191 } 192 193 func (c *catalogArg) String() string { 194 t := "*" 195 if c.tbl != nil { 196 t = fmt.Sprintf("%d-%s", c.tbl.ID, c.tbl.GetLastestSchemaLocked().Name) 197 } 198 f := "nil" 199 if c.outfile != nil { 200 f = c.outfile.Name() 201 } 202 return fmt.Sprintf("(%s) outfile: %v, verbose: %d, ", t, f, c.verbose) 203 } 204 205 func (c *catalogArg) Run() error { 206 var ret string 207 if c.tbl != nil { 208 ret = c.tbl.PPString(c.verbose, 0, "") 209 } else { 210 ret = c.ctx.db.Catalog.SimplePPString(c.verbose) 211 } 212 if c.outfile != nil { 213 c.outfile.WriteString(ret) 214 defer c.outfile.Close() 215 c.ctx.resp.Payload = []byte("write file done") 216 } else { 217 c.ctx.resp.Payload = []byte(ret) 218 } 219 return nil 220 } 221 222 type objStatArg struct { 223 ctx *inspectContext 224 tbl *catalog.TableEntry 225 topk, start, end int 226 verbose common.PPLevel 227 } 228 229 func (c *objStatArg) PrepareCommand() *cobra.Command { 230 objectCmd := &cobra.Command{ 231 Use: "object", 232 Short: "show object statistics", 233 Run: RunFactory(c), 234 } 235 objectCmd.Flags().CountP("verbose", "v", "verbose level") 236 objectCmd.Flags().IntP("topk", "k", 10, "tables with topk objects count") 237 objectCmd.Flags().StringP("target", "t", "*", "format: db.table") 238 objectCmd.Flags().IntP("start", "s", 0, "show object detail starts from") 239 objectCmd.Flags().IntP("end", "e", -1, "show object detail ends at") 240 return objectCmd 241 } 242 243 func (c *objStatArg) FromCommand(cmd *cobra.Command) (err error) { 244 c.ctx = cmd.Flag("ictx").Value.(*inspectContext) 245 c.topk, _ = cmd.Flags().GetInt("topk") 246 c.start, _ = cmd.Flags().GetInt("start") 247 c.end, _ = cmd.Flags().GetInt("end") 248 count, _ := cmd.Flags().GetCount("verbose") 249 c.verbose = switchPPL(count) 250 address, _ := cmd.Flags().GetString("target") 251 c.tbl, err = parseTableTarget(address, c.ctx.acinfo, c.ctx.db) 252 if err != nil { 253 return err 254 } 255 return nil 256 } 257 258 func (c *objStatArg) String() string { 259 if c.tbl != nil { 260 return fmt.Sprintf("%d-%s verbose %v", c.tbl.ID, c.tbl.GetLastestSchema().Name, c.verbose) 261 } else { 262 return fmt.Sprintf("list with top %d", c.topk) 263 } 264 } 265 266 func (c *objStatArg) Run() error { 267 if c.tbl != nil { 268 b := &bytes.Buffer{} 269 p := c.ctx.db.MergeHandle.GetPolicy(c.tbl).(*merge.BasicPolicyConfig) 270 b.WriteString(c.tbl.ObjectStatsString(c.verbose, c.start, c.end)) 271 b.WriteByte('\n') 272 b.WriteString(fmt.Sprintf("\n%s", p.String())) 273 c.ctx.resp.Payload = b.Bytes() 274 } else { 275 visitor := newObjectVisitor() 276 visitor.topk = c.topk 277 c.ctx.db.Catalog.RecurLoop(visitor) 278 b := &bytes.Buffer{} 279 b.WriteString(fmt.Sprintf("db count: %d, table count: %d\n", visitor.db, visitor.tbl)) 280 for i, l := 0, visitor.candidates.Len(); i < l; i++ { 281 item := heap.Pop(&visitor.candidates).(mItem) 282 b.WriteString(fmt.Sprintf(" %d.%d: %d\n", item.did, item.tid, item.objcnt)) 283 } 284 c.ctx.resp.Payload = b.Bytes() 285 } 286 return nil 287 } 288 289 type pruneTask struct { 290 objs []*catalog.ObjectEntry 291 insertAt time.Time 292 } 293 294 type objsPruneTask struct { 295 sync.Mutex 296 memos map[int]pruneTask 297 } 298 299 func (c *objsPruneTask) PruneLocked() { 300 for id, task := range c.memos { 301 if time.Since(task.insertAt) > 5*time.Minute { 302 delete(c.memos, id) 303 } 304 } 305 } 306 307 func (c *objsPruneTask) Len() int { 308 c.Lock() 309 defer c.Unlock() 310 return len(c.memos) 311 } 312 313 var TaskCache = &objsPruneTask{ 314 memos: make(map[int]pruneTask), 315 } 316 317 type objectPruneArg struct { 318 ctx *inspectContext 319 tbl *catalog.TableEntry 320 ago time.Duration 321 ack int 322 } 323 324 func (c *objectPruneArg) PrepareCommand() *cobra.Command { 325 objectPruneCmd := &cobra.Command{ 326 Use: "objprune", 327 Short: "prune objects", 328 Run: RunFactory(c), 329 } 330 objectPruneCmd.Flags().StringP("target", "t", "*", "format: db.table") 331 objectPruneCmd.Flags().DurationP("duration", "d", 72*time.Hour, "prune objects older than duration") 332 objectPruneCmd.Flags().IntP("ack", "a", -1, "execute task by ack") 333 334 return objectPruneCmd 335 } 336 337 func (c *objectPruneArg) String() string { 338 if c.ack != -1 { 339 return fmt.Sprintf("prune: execute task: %d", c.ack) 340 } else { 341 return fmt.Sprintf("prune: table %v-%v, %v ago, cacheLen %v", c.tbl.ID, c.tbl.GetLastestSchema().Name, c.ago, TaskCache.Len()) 342 } 343 } 344 345 func (c *objectPruneArg) FromCommand(cmd *cobra.Command) (err error) { 346 c.ctx = cmd.Flag("ictx").Value.(*inspectContext) 347 address, _ := cmd.Flags().GetString("target") 348 c.ack, _ = cmd.Flags().GetInt("ack") 349 c.ago, _ = cmd.Flags().GetDuration("duration") 350 if c.ago < 24*time.Hour { 351 return moerr.NewInvalidInputNoCtx("pruning objects within 24h is not supported") 352 } 353 c.tbl, err = parseTableTarget(address, c.ctx.acinfo, c.ctx.db) 354 if err != nil { 355 return err 356 } 357 return nil 358 } 359 360 func (c *objectPruneArg) Run() error { 361 if c.ack != -1 { 362 if err := c.executePrune(); err != nil { 363 return err 364 } 365 return nil 366 } 367 368 if c.tbl == nil { 369 return moerr.NewInvalidInputNoCtx("need table target") 370 } 371 372 TaskCache.Lock() 373 TaskCache.PruneLocked() 374 if len(TaskCache.memos) >= 100 { 375 TaskCache.Unlock() 376 return moerr.NewInvalidInputNoCtx("too many cache, try later") 377 } 378 TaskCache.Unlock() 379 entry := c.tbl 380 381 it := entry.MakeObjectIt(true) 382 now := c.ctx.db.TxnMgr.Now() 383 var total, stale, selected int 384 var minR, maxR, totalR, minS, maxS, totalS int 385 ago := types.BuildTS(now.Physical()-int64(c.ago), now.Logical()) 386 387 selectedObjs := make([]*catalog.ObjectEntry, 0, 64) 388 389 for ; it.Valid(); it.Next() { 390 obj := it.Get().GetPayload() 391 if !obj.IsActive() || obj.IsAppendable() { 392 continue 393 } 394 total++ 395 396 obj.RLock() 397 createTs := obj.GetCreatedAtLocked() 398 obj.RUnlock() 399 if createTs.GreaterEq(&ago) { 400 continue 401 } 402 stale++ 403 if c.tbl.TryGetTombstone(obj.ID) != nil || obj.GetObjectData().GetTotalChanges() > 0 { // has deletes 404 continue 405 } 406 selected++ 407 selectedObjs = append(selectedObjs, obj) 408 stat := obj.GetObjectStats() 409 rw := int(stat.Rows()) 410 sz := int(stat.OriginSize()) 411 if minR == 0 || rw < minR { 412 minR = rw 413 } 414 if rw > maxR { 415 maxR = rw 416 } 417 totalR += rw 418 if minS == 0 || sz < minS { 419 minS = sz 420 } 421 if sz > maxS { 422 maxS = sz 423 } 424 totalS += sz 425 } 426 427 if selected == 0 { 428 c.ctx.resp.Payload = []byte(fmt.Sprintf( 429 "total: %d, stale: %d, selected: %d, no valid objs to prune", 430 total, stale, selected, 431 )) 432 return nil 433 } 434 435 TaskCache.Lock() 436 var id int 437 for { 438 id = rand.Intn(100) 439 if _, ok := TaskCache.memos[id]; ok { 440 continue 441 } 442 TaskCache.memos[id] = pruneTask{ 443 objs: selectedObjs, 444 insertAt: time.Now(), 445 } 446 break 447 } 448 TaskCache.Unlock() 449 450 c.ctx.resp.Payload = []byte(fmt.Sprintf( 451 "total: %d, stale: %d, selected: %d, minR: %d, maxR: %d, avgR: %d, minS: %v, maxS: %v, avgS: %v, taskid: %d", 452 total, stale, selected, minR, maxR, totalR/selected, 453 common.HumanReadableBytes(minS), 454 common.HumanReadableBytes(maxS), 455 common.HumanReadableBytes(totalS/selected), 456 id, 457 )) 458 459 return nil 460 } 461 462 func (c *objectPruneArg) executePrune() error { 463 TaskCache.Lock() 464 task, ok := TaskCache.memos[c.ack] 465 delete(TaskCache.memos, c.ack) 466 TaskCache.Unlock() 467 if !ok { 468 c.ctx.resp.Payload = []byte("task not found") 469 return nil 470 } 471 txn, _ := c.ctx.db.StartTxn(nil) 472 tid := task.objs[0].GetTable().ID 473 did := task.objs[0].GetTable().GetDB().ID 474 dbHdl, err := txn.GetDatabaseByID(uint64(did)) 475 if err != nil { 476 return err 477 } 478 tblHdl, err := dbHdl.GetRelationByID(uint64(tid)) 479 if err != nil { 480 return err 481 } 482 notfound := 0 483 w := &bytes.Buffer{} 484 for _, obj := range task.objs { 485 if err := tblHdl.SoftDeleteObject(&obj.ID); err != nil { 486 logutil.Errorf("objprune: del obj %s: %v", obj.ID.String(), err) 487 return err 488 } 489 w.WriteString(obj.ID.String()) 490 w.WriteRune(',') 491 } 492 if err := txn.Commit(context.Background()); err != nil { 493 return err 494 } 495 496 logutil.Infof("objprune done: %v", w.String()) 497 c.ctx.resp.Payload = []byte(fmt.Sprintf("prunes total: %d, notfound: %d", len(task.objs), notfound)) 498 return nil 499 } 500 501 type storageUsageHistoryArg struct { 502 ctx *inspectContext 503 detail *struct { 504 accId uint64 505 dbI uint64 506 tblId uint64 507 } 508 509 trace *struct { 510 tStart, tEnd time.Time 511 accounts map[uint64]struct{} 512 } 513 514 transfer bool 515 eliminateErrors bool 516 } 517 518 func (c *storageUsageHistoryArg) PrepareCommand() *cobra.Command { 519 storageUsageCmd := &cobra.Command{ 520 Use: "storage_usage", 521 Short: "storage usage details", 522 Run: RunFactory(c), 523 } 524 525 // storage usage request history 526 storageUsageCmd.Flags().StringP("trace", "t", "", "format: -time time range or -acc account id list") 527 // storage usage details in ckp 528 storageUsageCmd.Flags().StringP("detail", "d", "", "format: accId{.dbName{.tableName}}") 529 storageUsageCmd.Flags().StringP("transfer", "f", "", "format: *") 530 storageUsageCmd.Flags().StringP("eliminate_errors", "e", "", "format: *") 531 return storageUsageCmd 532 } 533 534 func (c *storageUsageHistoryArg) FromCommand(cmd *cobra.Command) (err error) { 535 c.ctx = cmd.Flag("ictx").Value.(*inspectContext) 536 537 expr, _ := cmd.Flags().GetString("detail") 538 if expr != "" { 539 accId, dbId, tblId, err := parseStorageUsageDetail(expr, c.ctx.acinfo, c.ctx.db) 540 if err != nil { 541 return err 542 } 543 c.detail = &struct { 544 accId uint64 545 dbI uint64 546 tblId uint64 547 }{accId: accId, dbI: dbId, tblId: tblId} 548 } 549 550 expr, _ = cmd.Flags().GetString("trace") 551 if expr != "" { 552 start, end, accs, err := parseStorageUsageTrace(expr, c.ctx.acinfo, c.ctx.db) 553 if err != nil { 554 return err 555 } 556 557 c.trace = &struct { 558 tStart, tEnd time.Time 559 accounts map[uint64]struct{} 560 }{tStart: start, tEnd: end, accounts: accs} 561 } 562 563 expr, _ = cmd.Flags().GetString("transfer") 564 if expr != "" { 565 if expr == "*" { 566 c.transfer = true 567 } else { 568 return moerr.NewInvalidArgNoCtx(expr, "`storage_usage -f *` expected") 569 } 570 } 571 572 expr, _ = cmd.Flags().GetString("eliminate_errors") 573 if expr != "" { 574 if expr == "*" { 575 c.eliminateErrors = true 576 } else { 577 return moerr.NewInvalidArgNoCtx(expr, "`storage_usage -e *` expected") 578 } 579 } 580 581 return nil 582 } 583 584 func (c *storageUsageHistoryArg) Run() (err error) { 585 if c.detail != nil { 586 return storageUsageDetails(c) 587 } else if c.trace != nil { 588 return storageTrace(c) 589 } else if c.transfer { 590 return storageUsageTransfer(c) 591 } else if c.eliminateErrors { 592 return storageUsageEliminateErrors(c) 593 } 594 return moerr.NewInvalidArgNoCtx("", c.ctx.args) 595 } 596 597 func (c *storageUsageHistoryArg) String() string { 598 return "" 599 } 600 601 type manualyIgnoreArg struct { 602 ctx *inspectContext 603 id uint64 604 } 605 606 func (c *manualyIgnoreArg) PrepareCommand() *cobra.Command { 607 miCmd := &cobra.Command{ 608 Use: "ckpignore", 609 Short: "manually ignore table when checking checkpoint entry", 610 Run: RunFactory(c), 611 } 612 miCmd.Flags().Uint64P("tid", "t", 0, "format: table-id") 613 return miCmd 614 } 615 616 func (c *manualyIgnoreArg) FromCommand(cmd *cobra.Command) (err error) { 617 c.ctx = cmd.Flag("ictx").Value.(*inspectContext) 618 c.id, _ = cmd.Flags().GetUint64("tid") 619 return nil 620 } 621 622 func (c *manualyIgnoreArg) String() string { 623 return fmt.Sprintf("ignore ckp table: %v", c.id) 624 } 625 626 func (c *manualyIgnoreArg) Run() error { 627 logtail.TempF.Add(c.id) 628 return nil 629 } 630 631 type infoArg struct { 632 ctx *inspectContext 633 tbl *catalog.TableEntry 634 obj *catalog.ObjectEntry 635 blkn int 636 verbose common.PPLevel 637 } 638 639 func (c *infoArg) PrepareCommand() *cobra.Command { 640 cmd := &cobra.Command{ 641 Use: "info", 642 Short: "get dedicated debug info", 643 Run: RunFactory(c), 644 } 645 cmd.Flags().CountP("verbose", "v", "verbose level") 646 cmd.Flags().StringP("target", "t", "*", "format: table-id") 647 cmd.Flags().StringP("blk", "b", "", "format: <objectId>_<fineN>_<blkn>") 648 return cmd 649 } 650 651 func (c *infoArg) FromCommand(cmd *cobra.Command) (err error) { 652 c.ctx = cmd.Flag("ictx").Value.(*inspectContext) 653 count, _ := cmd.Flags().GetCount("verbose") 654 c.verbose = switchPPL(count) 655 656 address, _ := cmd.Flags().GetString("target") 657 c.tbl, err = parseTableTarget(address, c.ctx.acinfo, c.ctx.db) 658 if err != nil { 659 return err 660 } 661 if c.tbl == nil { 662 return moerr.NewInvalidInputNoCtx("need table target") 663 } 664 665 baddress, _ := cmd.Flags().GetString("blk") 666 c.obj, c.blkn, err = parseBlkTarget(baddress, c.tbl) 667 if err != nil { 668 return err 669 } 670 671 return nil 672 } 673 674 func (c *infoArg) String() string { 675 t := "*" 676 if c.tbl != nil { 677 t = fmt.Sprintf("%d-%s", c.tbl.ID, c.tbl.GetLastestSchemaLocked().Name) 678 } 679 680 if c.obj != nil { 681 t = fmt.Sprintf("%s o-%s b-%d", t, c.obj.ID.String(), c.blkn) 682 } 683 684 return fmt.Sprintf("info: %v", t) 685 } 686 687 func (c *infoArg) Run() error { 688 b := &bytes.Buffer{} 689 if c.tbl != nil { 690 b.WriteString(fmt.Sprintf("last_merge: %v\n", c.tbl.Stats.GetLastMerge().String())) 691 b.WriteString(fmt.Sprintf("last_flush: %v\n", c.tbl.Stats.GetLastFlush().ToString())) 692 } 693 if c.obj != nil { 694 b.WriteRune('\n') 695 b.WriteString(fmt.Sprintf("persisted_ts: %v\n", c.obj.GetObjectData().GetDeltaPersistedTS().ToString())) 696 r, reason := c.obj.GetObjectData().PrepareCompactInfo() 697 rows, err := c.obj.GetObjectData().Rows() 698 if err != nil { 699 logutil.Warnf("get object rows failed, obj: %v, err %v", c.obj.ID.String(), err) 700 } 701 dels := c.obj.GetObjectData().GetTotalChanges() 702 b.WriteString(fmt.Sprintf("prepareCompact: %v, %q\n", r, reason)) 703 b.WriteString(fmt.Sprintf("left rows: %v\n", rows-dels)) 704 b.WriteString(fmt.Sprintf("ppstring: %v\n", c.obj.GetObjectData().PPString(c.verbose, 0, "", c.blkn))) 705 706 schema := c.obj.GetSchema() 707 if schema.HasSortKey() { 708 zm, err := c.obj.GetPKZoneMap(context.Background(), c.obj.GetObjectData().GetFs().Service) 709 var zmstr string 710 if err != nil { 711 zmstr = err.Error() 712 } else if c.verbose <= common.PPL1 { 713 zmstr = zm.String() 714 } else if c.verbose == common.PPL2 { 715 zmstr = zm.StringForCompose() 716 } else { 717 zmstr = zm.StringForHex() 718 } 719 b.WriteString(fmt.Sprintf("sort key zm: %v\n", zmstr)) 720 } 721 } 722 c.ctx.resp.Payload = b.Bytes() 723 return nil 724 } 725 726 type mergePolicyArg struct { 727 ctx *inspectContext 728 tbl *catalog.TableEntry 729 maxMergeObjN int32 730 minOsizeQualified int32 731 maxOsizeObject int32 732 cnMinMergeSize int32 733 hints []api.MergeHint 734 } 735 736 func (c *mergePolicyArg) PrepareCommand() *cobra.Command { 737 policyCmd := &cobra.Command{ 738 Use: "policy", 739 Short: "set merge policy for table", 740 Run: RunFactory(c), 741 } 742 policyCmd.Flags().StringP("target", "t", "*", "format: db.table") 743 policyCmd.Flags().Int32P("maxMergeObjN", "r", common.DefaultMaxMergeObjN, "max number of objects merged for one run") 744 policyCmd.Flags().Int32P("minOsizeQualified", "m", common.DefaultMinOsizeQualifiedMB, "objects whose osize are less than minOsizeQualified(MB) will be picked up to merge") 745 policyCmd.Flags().Int32P("maxOsizeObject", "o", common.DefaultMaxOsizeObjMB, "merged objects' osize should be near maxOsizeObject(MB)") 746 policyCmd.Flags().Int32P("minCNMergeSize", "c", common.DefaultMinCNMergeSize, "Merge task whose memory occupation exceeds minCNMergeSize(MB) will be moved to CN") 747 policyCmd.Flags().Int32SliceP("mergeHints", "n", []int32{0}, "hints to merge the table") 748 return policyCmd 749 } 750 751 func (c *mergePolicyArg) FromCommand(cmd *cobra.Command) (err error) { 752 c.ctx = cmd.Flag("ictx").Value.(*inspectContext) 753 754 address, _ := cmd.Flags().GetString("target") 755 c.tbl, err = parseTableTarget(address, c.ctx.acinfo, c.ctx.db) 756 if err != nil { 757 return err 758 } 759 c.maxMergeObjN, _ = cmd.Flags().GetInt32("maxMergeObjN") 760 c.maxOsizeObject, _ = cmd.Flags().GetInt32("maxOsizeObject") 761 c.minOsizeQualified, _ = cmd.Flags().GetInt32("minOsizeQualified") 762 c.cnMinMergeSize, _ = cmd.Flags().GetInt32("minCNMergeSize") 763 if c.maxOsizeObject > 2048 || c.minOsizeQualified > 2048 { 764 return moerr.NewInvalidInputNoCtx("maxOsizeObject or minOsizeQualified should be less than 2048") 765 } 766 hints, _ := cmd.Flags().GetInt32Slice("mergeHints") 767 for _, h := range hints { 768 if _, ok := api.MergeHint_name[h]; !ok { 769 return moerr.NewInvalidArgNoCtx("unspported hint %v", h) 770 } 771 c.hints = append(c.hints, api.MergeHint(h)) 772 } 773 return nil 774 } 775 776 func (c *mergePolicyArg) String() string { 777 t := "*" 778 if c.tbl != nil { 779 t = fmt.Sprintf("%d-%s", c.tbl.ID, c.tbl.GetLastestSchemaLocked().Name) 780 } 781 return fmt.Sprintf( 782 "(%s) maxMergeObjN: %v, maxOsizeObj: %vMB, minOsizeQualified: %vMB, offloadToCnSize: %vMB, hints: %v", 783 t, c.maxMergeObjN, c.maxOsizeObject, c.minOsizeQualified, c.cnMinMergeSize, c.hints, 784 ) 785 } 786 787 func (c *mergePolicyArg) Run() error { 788 maxosize := uint32(c.maxOsizeObject * common.Const1MBytes) 789 minosize := uint32(c.minOsizeQualified * common.Const1MBytes) 790 cnsize := uint64(c.cnMinMergeSize) * common.Const1MBytes 791 792 if c.tbl == nil { 793 common.RuntimeMaxMergeObjN.Store(c.maxMergeObjN) 794 common.RuntimeOsizeRowsQualified.Store(minosize) 795 common.RuntimeMaxObjOsize.Store(maxosize) 796 common.RuntimeMinCNMergeSize.Store(cnsize) 797 if c.maxMergeObjN == 0 && c.minOsizeQualified == 0 { 798 merge.StopMerge.Store(true) 799 } else { 800 merge.StopMerge.Store(false) 801 } 802 } else { 803 c.ctx.db.MergeHandle.ConfigPolicy(c.tbl, &merge.BasicPolicyConfig{ 804 MergeMaxOneRun: int(c.maxMergeObjN), 805 ObjectMinOsize: minosize, 806 MaxOsizeMergedObj: maxosize, 807 MinCNMergeSize: cnsize, 808 MergeHints: c.hints, 809 }) 810 } 811 c.ctx.resp.Payload = []byte("<empty>") 812 return nil 813 } 814 815 type RenameColArg struct { 816 ctx *inspectContext 817 tbl *catalog.TableEntry 818 oldName, newName string 819 seq int 820 } 821 822 func (c *RenameColArg) FromCommand(cmd *cobra.Command) (err error) { 823 c.ctx = cmd.Flag("ictx").Value.(*inspectContext) 824 c.tbl, _ = parseTableTarget(cmd.Flag("target").Value.String(), c.ctx.acinfo, c.ctx.db) 825 c.oldName, _ = cmd.Flags().GetString("old") 826 c.newName, _ = cmd.Flags().GetString("new") 827 c.seq, _ = cmd.Flags().GetInt("seq") 828 return nil 829 } 830 831 func (c *RenameColArg) PrepareCommand() *cobra.Command { 832 renameColCmd := &cobra.Command{ 833 Use: "rename_col", 834 Short: "rename column", 835 Run: RunFactory(c), 836 } 837 renameColCmd.Flags().StringP("target", "t", "*", "format: db.table") 838 renameColCmd.Flags().StringP("old", "o", "", "old column name") 839 renameColCmd.Flags().StringP("new", "n", "", "new column name") 840 renameColCmd.Flags().IntP("seq", "s", 0, "column seq") 841 return renameColCmd 842 } 843 844 func (c *RenameColArg) String() string { 845 return fmt.Sprintf("rename col: %v, %v,%v,%v", c.tbl.GetLastestSchemaLocked().Name, c.oldName, c.newName, c.seq) 846 } 847 848 func (c *RenameColArg) Run() (err error) { 849 txn, _ := c.ctx.db.StartTxn(nil) 850 defer func() { 851 if err != nil { 852 txn.Rollback(context.Background()) 853 } 854 }() 855 dbHdl, err := txn.GetDatabase(c.tbl.GetDB().GetName()) 856 if err != nil { 857 return err 858 } 859 tblHdl, err := dbHdl.GetRelationByName(c.tbl.GetLastestSchemaLocked().Name) 860 if err != nil { 861 return err 862 } 863 err = tblHdl.AlterTable(context.Background(), api.NewRenameColumnReq(0, 0, c.oldName, c.newName, uint32(c.seq))) 864 if err != nil { 865 return err 866 } 867 return txn.Commit(context.Background()) 868 } 869 870 type PolicyStatus struct { 871 ctx *inspectContext 872 pruneId uint64 873 pruneAgo time.Duration 874 } 875 876 func (c *PolicyStatus) FromCommand(cmd *cobra.Command) (err error) { 877 c.ctx = cmd.Flag("ictx").Value.(*inspectContext) 878 c.pruneAgo, _ = cmd.Flags().GetDuration("prune-ago") 879 c.pruneId, _ = cmd.Flags().GetUint64("prune-id") 880 return nil 881 } 882 883 func (c *PolicyStatus) PrepareCommand() *cobra.Command { 884 statusCmd := &cobra.Command{ 885 Use: "policy_status", 886 Short: "check cn merge status", 887 Run: RunFactory(c), 888 } 889 statusCmd.Flags().DurationP("prune-ago", "a", 0, "prune objects by time ago") 890 statusCmd.Flags().Uint64P("prune-id", "i", 0, "prune objects by table id") 891 return statusCmd 892 } 893 894 func (c *PolicyStatus) String() string { 895 return fmt.Sprintf("policy status: prune %v ago, by id %v", c.pruneAgo, c.pruneId) 896 } 897 898 func (c *PolicyStatus) Run() (err error) { 899 if c.pruneAgo == 0 && c.pruneId == 0 { 900 c.ctx.resp.Payload = []byte(merge.ActiveCNObj.String()) 901 return nil 902 } else { 903 merge.ActiveCNObj.Prune(c.pruneId, c.pruneAgo) 904 return nil 905 } 906 } 907 908 func parseBlkTarget(address string, tbl *catalog.TableEntry) (*catalog.ObjectEntry, int, error) { 909 if address == "" { 910 return nil, 0, nil 911 } 912 parts := strings.Split(address, "_") 913 if len(parts) != 3 { 914 return nil, 0, moerr.NewInvalidInputNoCtx(fmt.Sprintf("invalid block address: %q", address)) 915 } 916 uid, err := types.ParseUuid(parts[0]) 917 if err != nil { 918 return nil, 0, err 919 } 920 fn, err := strconv.Atoi(parts[1]) 921 if err != nil { 922 return nil, 0, err 923 } 924 bn, err := strconv.Atoi(parts[2]) 925 if err != nil { 926 return nil, 0, err 927 } 928 bid := objectio.NewBlockid(&uid, uint16(fn), uint16(bn)) 929 objid := bid.Object() 930 oentry, err := tbl.GetObjectByID(objid) 931 if err != nil { 932 return nil, 0, err 933 } 934 return oentry, bn, nil 935 } 936 937 func parseTableTarget(address string, ac *db.AccessInfo, db *db.DB) (*catalog.TableEntry, error) { 938 if address == "*" { 939 return nil, nil 940 } 941 parts := strings.Split(address, ".") 942 if len(parts) != 2 { 943 return nil, moerr.NewInvalidInputNoCtx(fmt.Sprintf("invalid db.table: %q", address)) 944 } 945 946 txn, _ := db.StartTxn(nil) 947 if ac != nil { 948 txn.BindAccessInfo(ac.AccountID, ac.UserID, ac.RoleID) 949 } 950 951 did, err1 := strconv.Atoi(parts[0]) 952 tid, err2 := strconv.Atoi(parts[1]) 953 954 if err1 == nil && err2 == nil { 955 dbHdl, err := txn.GetDatabaseByID(uint64(did)) 956 if err != nil { 957 return nil, err 958 } 959 tblHdl, err := dbHdl.GetRelationByID(uint64(tid)) 960 if err != nil { 961 return nil, err 962 } 963 tbl := tblHdl.GetMeta().(*catalog.TableEntry) 964 txn.Commit(context.Background()) 965 return tbl, nil 966 } else { 967 dbHdl, err := txn.GetDatabase(parts[0]) 968 if err != nil { 969 return nil, err 970 } 971 tblHdl, err := dbHdl.GetRelationByName(parts[1]) 972 if err != nil { 973 return nil, err 974 } 975 tbl := tblHdl.GetMeta().(*catalog.TableEntry) 976 txn.Commit(context.Background()) 977 return tbl, nil 978 } 979 } 980 981 type objectVisitor struct { 982 catalog.LoopProcessor 983 topk int 984 db, tbl int 985 candidates itemSet 986 } 987 988 func newObjectVisitor() *objectVisitor { 989 v := &objectVisitor{} 990 heap.Init(&v.candidates) 991 return &objectVisitor{} 992 } 993 994 func (o *objectVisitor) OnDatabase(db *catalog.DBEntry) error { 995 if !db.IsActive() { 996 return moerr.GetOkStopCurrRecur() 997 } 998 o.db++ 999 return nil 1000 } 1001 func (o *objectVisitor) OnTable(table *catalog.TableEntry) error { 1002 if !table.IsActive() { 1003 return moerr.GetOkStopCurrRecur() 1004 } 1005 o.tbl++ 1006 1007 stat, _ := table.ObjectStats(common.PPL0, 0, -1) 1008 heap.Push(&o.candidates, mItem{objcnt: stat.ObjectCnt, did: table.GetDB().ID, tid: table.ID}) 1009 if o.candidates.Len() > o.topk { 1010 heap.Pop(&o.candidates) 1011 } 1012 return nil 1013 } 1014 1015 // the history of one table 1016 // mo_ctl("dn", "inspect", "storage_usage -t accId.dbName.tableName"); 1017 // 1018 // the history of one db 1019 // mo_ctl("dn", "inspect", "storage_usage -t accId.dbName"); 1020 // 1021 // the history of one acc 1022 // mo_ctl("dn", "inspect", "storage_usage -t accId"); 1023 // 1024 // the history of all 1025 // mo_ctl("dn", "inspect", "storage_usage -t *"); 1026 func parseStorageUsageDetail(expr string, ac *db.AccessInfo, db *db.DB) ( 1027 accId uint64, dbId uint64, tblId uint64, err error) { 1028 strs := strings.Split(expr, ".") 1029 1030 if len(strs) == 0 || len(strs) > 3 { 1031 return 0, 0, 0, moerr.NewInvalidArgNoCtx(expr, "") 1032 } 1033 1034 if len(strs) == 1 && strs[0] == "*" { 1035 return math.MaxUint32, math.MaxUint64, math.MaxUint64, nil 1036 } 1037 1038 txn, _ := db.StartTxn(nil) 1039 defer txn.Commit(context.Background()) 1040 1041 if ac != nil { 1042 logutil.Infof("inspect with access info: %+v", ac) 1043 txn.BindAccessInfo(ac.AccountID, ac.UserID, ac.RoleID) 1044 } 1045 1046 var id int 1047 if id, err = strconv.Atoi(strs[0]); err != nil { 1048 return 0, 0, 0, err 1049 } 1050 1051 accId = uint64(id) 1052 dbId, tblId = math.MaxUint64, math.MaxUint64 1053 1054 var dbHdl handle.Database 1055 if len(strs) >= 2 { 1056 dbHdl, err = txn.GetDatabase(strs[1]) 1057 if err != nil { 1058 return 0, 0, 0, err 1059 } 1060 dbId = dbHdl.GetID() 1061 } 1062 1063 if len(strs) == 3 { 1064 tblHdl, err := dbHdl.GetRelationByName(strs[2]) 1065 if err != nil { 1066 return 0, 0, 0, err 1067 } 1068 1069 tblId = tblHdl.ID() 1070 } 1071 1072 return accId, dbId, tblId, nil 1073 } 1074 1075 // [pos1, pos2) 1076 func subString(src string, pos1, pos2 int) (string, error) { 1077 if pos2 > len(src) { 1078 return "", moerr.NewOutOfRangeNoCtx("", src, pos1, " to ", pos2) 1079 } 1080 1081 dst := make([]byte, pos2-pos1) 1082 1083 copy(dst, []byte(src)[pos1:pos2]) 1084 1085 return string(dst), nil 1086 } 1087 1088 // specify the time range 1089 // select mo_ctl("dn", "inspect", "-t '-time 2023-12-18 14:26:14_2023-12-18 15:26:14'"); 1090 // 1091 // specify the account id list 1092 // select mo_ctl("dn", "inspect", "-t '-acc 0 1 2'"); 1093 // 1094 // specify time range and account list 1095 // select mo_ctl("dn", "inspect", "-t '-time 2023-12-18 14:26:14_2023-12-18 15:26:14 -acc 0 1 2'"); 1096 // 1097 // no limit, show all request trace info 1098 // select mo_ctl("dn", "inspect", "-t "); 1099 func parseStorageUsageTrace(expr string, ac *db.AccessInfo, db *db.DB) ( 1100 tStart, tEnd time.Time, accounts map[uint64]struct{}, err error) { 1101 1102 var str string 1103 tIdx := strings.Index(expr, "-time") 1104 if tIdx != -1 { 1105 dash := strings.Index(expr, "_") 1106 if dash == -1 { 1107 err = moerr.NewInvalidArgNoCtx(expr, "") 1108 return 1109 } 1110 str, err = subString(expr, tIdx+len("-time")+len(" "), dash) 1111 if err != nil { 1112 return 1113 } 1114 1115 tStart, err = time.Parse("2006-01-02 15:04:05", str) 1116 if err != nil { 1117 return 1118 } 1119 1120 str, err = subString(expr, dash+len("_"), dash+len("_")+len("2006-01-02 15:04:05")) 1121 if err != nil { 1122 return 1123 } 1124 tEnd, err = time.Parse("2006-01-02 15:04:05", str) 1125 if err != nil { 1126 return 1127 } 1128 } 1129 1130 aIdx := strings.Index(expr, "-acc") 1131 if aIdx != -1 { 1132 stop := len(expr) 1133 if aIdx < tIdx { 1134 stop = tIdx 1135 } 1136 str, err = subString(expr, aIdx+len("-acc")+len(" "), stop) 1137 if err != nil { 1138 return 1139 } 1140 accs := strings.Split(str, " ") 1141 1142 accounts = make(map[uint64]struct{}) 1143 1144 var id int 1145 for i := range accs { 1146 id, err = strconv.Atoi(accs[i]) 1147 if err != nil { 1148 return 1149 } 1150 accounts[uint64(id)] = struct{}{} 1151 } 1152 } 1153 1154 return 1155 } 1156 1157 func checkUsageData(data logtail.UsageData, c *storageUsageHistoryArg) bool { 1158 if c.detail.accId == math.MaxUint32 { 1159 return true 1160 } 1161 1162 if c.detail.accId != data.AccId { 1163 return false 1164 } 1165 1166 if c.detail.dbI == math.MaxUint64 { 1167 return true 1168 } 1169 1170 if c.detail.dbI != data.DbId { 1171 return false 1172 } 1173 1174 if c.detail.tblId == math.MaxUint64 { 1175 return true 1176 } 1177 1178 return c.detail.tblId == data.TblId 1179 } 1180 1181 func storageUsageDetails(c *storageUsageHistoryArg) (err error) { 1182 ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) 1183 defer cancel() 1184 1185 entries := c.ctx.db.BGCheckpointRunner.GetAllCheckpoints() 1186 1187 versions := make([]uint32, 0) 1188 locations := make([]objectio.Location, 0) 1189 1190 for idx := range entries { 1191 if entries[idx].GetVersion() < logtail.CheckpointVersion11 { 1192 continue 1193 } 1194 versions = append(versions, entries[idx].GetVersion()) 1195 locations = append(locations, entries[idx].GetLocation()) 1196 } 1197 1198 // remove the old version 1199 entries = entries[len(entries)-len(versions):] 1200 1201 var usageInsData [][]logtail.UsageData 1202 var usageDelData [][]logtail.UsageData 1203 1204 if usageInsData, usageDelData, err = logtail.GetStorageUsageHistory( 1205 ctx, locations, versions, 1206 c.ctx.db.Runtime.Fs.Service, common.DebugAllocator); err != nil { 1207 return err 1208 } 1209 1210 txn, _ := c.ctx.db.StartTxn(nil) 1211 defer txn.Commit(context.Background()) 1212 1213 getDbAndTblNames := func(dbId, tblId uint64) (string, string) { 1214 h, _ := txn.GetDatabaseByID(dbId) 1215 if h == nil { 1216 return "deleted", "deleted" 1217 } 1218 1219 r, _ := h.GetRelationByID(tblId) 1220 if r == nil { 1221 return h.GetName(), "deleted" 1222 } 1223 return h.GetName(), r.Schema().(*catalog.Schema).Name 1224 } 1225 1226 getAllDbAndTblNames := func(usages []logtail.UsageData) (dbs, tbls []string, maxDbLen, maxTblLen int) { 1227 for idx := range usages { 1228 if checkUsageData(usages[idx], c) { 1229 dbName, tblName := getDbAndTblNames(usages[idx].DbId, usages[idx].TblId) 1230 dbs = append(dbs, dbName) 1231 tbls = append(tbls, tblName) 1232 1233 maxDbLen = int(math.Max(float64(maxDbLen), float64(len(dbName)))) 1234 maxTblLen = int(math.Max(float64(maxTblLen), float64(len(tblName)))) 1235 } 1236 } 1237 return 1238 } 1239 1240 formatOutput := func( 1241 dst *bytes.Buffer, data logtail.UsageData, 1242 dbName, tblName string, maxDbLen, maxTblLen int, hint string) float64 { 1243 1244 size := float64(data.Size) / 1048576 1245 1246 dst.WriteString(fmt.Sprintf("\t[(acc)-%-10d (%*s)-%-10d (%*s)-%-10d] %s -> %15.6f (mb)\n", 1247 data.AccId, maxDbLen, dbName, data.DbId, 1248 maxTblLen, tblName, data.TblId, hint, size)) 1249 1250 return size 1251 } 1252 1253 b := &bytes.Buffer{} 1254 ckpType := []string{"G", "I"} 1255 1256 totalSize := 0.0 1257 for x := range entries { 1258 eachCkpTotal := 0.0 1259 1260 b.WriteString(fmt.Sprintf("CKP[%s]: %s\n", ckpType[entries[x].GetType()], 1261 time.Unix(0, entries[x].GetEnd().Physical()))) 1262 1263 dbNames, tblNames, dbLen, tblLen := getAllDbAndTblNames(usageInsData[x]) 1264 for _, data := range usageInsData[x] { 1265 if checkUsageData(data, c) { 1266 eachCkpTotal += formatOutput(b, data, dbNames[0], tblNames[0], dbLen, tblLen, "insert") 1267 dbNames = dbNames[1:] 1268 tblNames = tblNames[1:] 1269 } 1270 } 1271 1272 dbNames, tblNames, dbLen, tblLen = getAllDbAndTblNames(usageDelData[x]) 1273 for _, data := range usageDelData[x] { 1274 if checkUsageData(data, c) { 1275 eachCkpTotal -= formatOutput(b, data, dbNames[0], tblNames[0], dbLen, tblLen, "delete") 1276 dbNames = dbNames[1:] 1277 tblNames = tblNames[1:] 1278 } 1279 } 1280 1281 if eachCkpTotal != 0 { 1282 b.WriteString(fmt.Sprintf("\n\taccumulation: %f (mb)\n", eachCkpTotal)) 1283 } 1284 1285 totalSize += eachCkpTotal 1286 1287 b.WriteByte('\n') 1288 } 1289 1290 b.WriteString(fmt.Sprintf( 1291 "total accumulation in all ckps: %f (mb), current tn cache mem used: %f (mb)\n", 1292 totalSize, c.ctx.db.GetUsageMemo().MemoryUsed())) 1293 1294 c.ctx.resp.Payload = b.Bytes() 1295 return nil 1296 } 1297 1298 func storageTrace(c *storageUsageHistoryArg) (err error) { 1299 1300 filter := func(accId uint64, stamp time.Time) bool { 1301 if !c.trace.tStart.IsZero() { 1302 if stamp.UTC().Add(time.Hour*8).Before(c.trace.tStart) || 1303 stamp.UTC().Add(time.Hour*8).After(c.trace.tEnd) { 1304 return false 1305 } 1306 } 1307 1308 if len(c.trace.accounts) != 0 { 1309 if _, ok := c.trace.accounts[accId]; !ok { 1310 return false 1311 } 1312 } 1313 return true 1314 } 1315 1316 var b bytes.Buffer 1317 1318 memo := c.ctx.db.GetUsageMemo() 1319 accIds, stamps, sizes, hints := memo.GetAllReqTrace() 1320 1321 preIdx := -1 1322 for idx := range stamps { 1323 if !filter(accIds[idx], stamps[idx]) { 1324 continue 1325 } 1326 if preIdx == -1 || !stamps[preIdx].Equal(stamps[idx]) { 1327 preIdx = idx 1328 b.WriteString(fmt.Sprintf("\n%s:\n", stamps[idx].String())) 1329 } 1330 1331 size := float64(sizes[idx]) / 1048576 1332 b.WriteString(fmt.Sprintf("\taccount id: %-10d\tsize: %15.6f\thint: %s\n", 1333 accIds[idx], size, hints[idx])) 1334 } 1335 1336 b.WriteString("\n") 1337 1338 c.ctx.resp.Payload = b.Bytes() 1339 1340 return nil 1341 } 1342 1343 func storageUsageTransfer(c *storageUsageHistoryArg) (err error) { 1344 cnt, size, err := logtail.CorrectUsageWrongPlacement(c.ctx.db.Catalog) 1345 if err != nil { 1346 return err 1347 } 1348 1349 c.ctx.out.Write([]byte(fmt.Sprintf("transferred %d tbl, %f mb; ", cnt, size))) 1350 return 1351 } 1352 1353 func storageUsageEliminateErrors(c *storageUsageHistoryArg) (err error) { 1354 entries := c.ctx.db.BGCheckpointRunner.GetAllCheckpoints() 1355 if len(entries) == 0 { 1356 return moerr.NewNotSupportedNoCtx("please execute this cmd after at least one checkpoint has been generated") 1357 } 1358 end := entries[len(entries)-1].GetEnd() 1359 cnt := logtail.EliminateErrorsOnCache(c.ctx.db.Catalog, end) 1360 c.ctx.out.Write([]byte(fmt.Sprintf("%d tables backed to the track. ", cnt))) 1361 1362 return nil 1363 }