github.com/matrixorigin/matrixone@v1.2.0/pkg/vm/engine/tae/rpc/handle.go (about) 1 // Copyright 2021 - 2022 Matrix Origin 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package rpc 16 17 import ( 18 "bytes" 19 "context" 20 "fmt" 21 "os" 22 "regexp" 23 "runtime" 24 "strings" 25 "sync/atomic" 26 "syscall" 27 "time" 28 29 "golang.org/x/exp/slices" 30 31 "github.com/google/shlex" 32 "github.com/matrixorigin/matrixone/pkg/catalog" 33 "github.com/matrixorigin/matrixone/pkg/common/moerr" 34 "github.com/matrixorigin/matrixone/pkg/common/util" 35 "github.com/matrixorigin/matrixone/pkg/container/batch" 36 "github.com/matrixorigin/matrixone/pkg/container/types" 37 "github.com/matrixorigin/matrixone/pkg/container/vector" 38 "github.com/matrixorigin/matrixone/pkg/defines" 39 "github.com/matrixorigin/matrixone/pkg/logutil" 40 "github.com/matrixorigin/matrixone/pkg/objectio" 41 "github.com/matrixorigin/matrixone/pkg/pb/api" 42 "github.com/matrixorigin/matrixone/pkg/pb/timestamp" 43 "github.com/matrixorigin/matrixone/pkg/pb/txn" 44 "github.com/matrixorigin/matrixone/pkg/perfcounter" 45 "github.com/matrixorigin/matrixone/pkg/util/fault" 46 v2 "github.com/matrixorigin/matrixone/pkg/util/metric/v2" 47 "github.com/matrixorigin/matrixone/pkg/util/trace" 48 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/blockio" 49 catalog2 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/catalog" 50 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/common" 51 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/containers" 52 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/db" 53 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/db/merge" 54 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/gc" 55 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/iface/rpchandle" 56 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/iface/txnif" 57 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/logtail" 58 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/options" 59 "github.com/matrixorigin/matrixone/pkg/vm/engine/tae/tables/jobs" 60 "go.uber.org/zap" 61 ) 62 63 const ( 64 MAX_ALLOWED_TXN_LATENCY = time.Millisecond * 300 65 MAX_TXN_COMMIT_LATENCY = time.Minute * 2 66 ) 67 68 type Handle struct { 69 db *db.DB 70 txnCtxs *common.Map[string, *txnContext] 71 GCManager *gc.Manager 72 73 interceptMatchRegexp atomic.Pointer[regexp.Regexp] 74 } 75 76 func (h *Handle) IsInterceptTable(name string) bool { 77 printMatchRegexp := h.getInterceptMatchRegexp() 78 if printMatchRegexp == nil { 79 return false 80 } 81 return printMatchRegexp.MatchString(name) 82 } 83 84 func (h *Handle) getInterceptMatchRegexp() *regexp.Regexp { 85 return h.interceptMatchRegexp.Load() 86 } 87 88 func (h *Handle) UpdateInterceptMatchRegexp(name string) { 89 if name == "" { 90 h.interceptMatchRegexp.Store(nil) 91 return 92 } 93 h.interceptMatchRegexp.Store(regexp.MustCompile(fmt.Sprintf(`.*%s.*`, name))) 94 } 95 96 var _ rpchandle.Handler = (*Handle)(nil) 97 98 type txnContext struct { 99 //createAt is used to GC the abandoned txn. 100 createAt time.Time 101 deadline time.Time 102 meta txn.TxnMeta 103 reqs []any 104 //the table to create by this txn. 105 toCreate map[uint64]*catalog2.Schema 106 } 107 108 func (h *Handle) GetDB() *db.DB { 109 return h.db 110 } 111 112 func NewTAEHandle(ctx context.Context, path string, opt *options.Options) *Handle { 113 if path == "" { 114 path = "./store" 115 } 116 tae, err := openTAE(ctx, path, opt) 117 if err != nil { 118 panic(err) 119 } 120 121 h := &Handle{ 122 db: tae, 123 } 124 h.txnCtxs = common.NewMap[string, *txnContext](runtime.GOMAXPROCS(0)) 125 126 h.GCManager = gc.NewManager( 127 gc.WithCronJob( 128 "clean-txn-cache", 129 MAX_TXN_COMMIT_LATENCY, 130 func(ctx context.Context) error { 131 return h.GCCache(time.Now()) 132 }, 133 ), 134 ) 135 h.GCManager.Start() 136 137 return h 138 } 139 140 // TODO: vast items within h.mu.txnCtxs would incur performance penality. 141 func (h *Handle) GCCache(now time.Time) error { 142 logutil.Infof("GC rpc handle txn cache") 143 h.txnCtxs.DeleteIf(func(k string, v *txnContext) bool { 144 return v.deadline.Before(now) 145 }) 146 return nil 147 } 148 149 func (h *Handle) HandleCommit( 150 ctx context.Context, 151 meta txn.TxnMeta) (cts timestamp.Timestamp, err error) { 152 start := time.Now() 153 txnCtx, ok := h.txnCtxs.Load(util.UnsafeBytesToString(meta.GetID())) 154 common.DoIfDebugEnabled(func() { 155 logutil.Debugf("HandleCommit start : %X", 156 string(meta.GetID())) 157 }) 158 defer func() { 159 if ok { 160 //delete the txn's context. 161 h.txnCtxs.Delete(util.UnsafeBytesToString(meta.GetID())) 162 } 163 common.DoIfInfoEnabled(func() { 164 if time.Since(start) > MAX_ALLOWED_TXN_LATENCY { 165 logutil.Info("Commit with long latency", zap.Duration("duration", time.Since(start)), zap.String("debug", meta.DebugString())) 166 } 167 }) 168 }() 169 var txn txnif.AsyncTxn 170 if ok { 171 //Handle precommit-write command for 1PC 172 txn, err = h.db.GetOrCreateTxnWithMeta(nil, meta.GetID(), 173 types.TimestampToTS(meta.GetSnapshotTS())) 174 if err != nil { 175 return 176 } 177 err = h.handleRequests(ctx, txn, txnCtx) 178 if err != nil { 179 return 180 } 181 } 182 txn, err = h.db.GetTxnByID(meta.GetID()) 183 if err != nil { 184 return 185 } 186 //if txn is 2PC ,need to set commit timestamp passed by coordinator. 187 if txn.Is2PC() { 188 txn.SetCommitTS(types.TimestampToTS(meta.GetCommitTS())) 189 } 190 191 v2.TxnBeforeCommitDurationHistogram.Observe(time.Since(start).Seconds()) 192 193 err = txn.Commit(ctx) 194 cts = txn.GetCommitTS().ToTimestamp() 195 196 if moerr.IsMoErrCode(err, moerr.ErrTAENeedRetry) { 197 for { 198 txn, err = h.db.StartTxnWithStartTSAndSnapshotTS(nil, 199 types.TimestampToTS(meta.GetSnapshotTS())) 200 if err != nil { 201 return 202 } 203 logutil.Infof("retry txn %X with new txn %X", string(meta.GetID()), txn.GetID()) 204 //Handle precommit-write command for 1PC 205 err = h.handleRequests(ctx, txn, txnCtx) 206 if err != nil && !moerr.IsMoErrCode(err, moerr.ErrTAENeedRetry) { 207 break 208 } 209 //if txn is 2PC ,need to set commit timestamp passed by coordinator. 210 if txn.Is2PC() { 211 txn.SetCommitTS(types.TimestampToTS(meta.GetCommitTS())) 212 } 213 err = txn.Commit(ctx) 214 cts = txn.GetCommitTS().ToTimestamp() 215 if !moerr.IsMoErrCode(err, moerr.ErrTAENeedRetry) { 216 break 217 } 218 } 219 } 220 return 221 } 222 223 func (h *Handle) handleRequests( 224 ctx context.Context, 225 txn txnif.AsyncTxn, 226 txnCtx *txnContext, 227 ) (err error) { 228 var createDB, createRelation, dropDB, dropRelation, alterTable, write int 229 for _, e := range txnCtx.reqs { 230 switch req := e.(type) { 231 case *db.CreateDatabaseReq: 232 err = h.HandleCreateDatabase( 233 ctx, 234 txn, 235 req, 236 &db.CreateDatabaseResp{}, 237 ) 238 createDB++ 239 case *db.CreateRelationReq: 240 err = h.HandleCreateRelation( 241 ctx, 242 txn, 243 req, 244 &db.CreateRelationResp{}, 245 ) 246 createRelation++ 247 case *db.DropDatabaseReq: 248 err = h.HandleDropDatabase( 249 ctx, 250 txn, 251 req, 252 &db.DropDatabaseResp{}, 253 ) 254 dropDB++ 255 case *db.DropOrTruncateRelationReq: 256 err = h.HandleDropOrTruncateRelation( 257 ctx, 258 txn, 259 req, 260 &db.DropOrTruncateRelationResp{}, 261 ) 262 dropRelation++ 263 case *api.AlterTableReq: 264 err = h.HandleAlterTable( 265 ctx, 266 txn, 267 req, 268 &db.WriteResp{}, 269 ) 270 alterTable++ 271 case *db.WriteReq: 272 err = h.HandleWrite( 273 ctx, 274 txn, 275 req, 276 &db.WriteResp{}, 277 ) 278 write++ 279 default: 280 err = moerr.NewNotSupported(ctx, "unknown txn request type: %T", req) 281 } 282 //Need to roll back the txn. 283 if err != nil { 284 txn.Rollback(ctx) 285 return 286 } 287 } 288 return 289 } 290 291 func (h *Handle) HandleRollback( 292 ctx context.Context, 293 meta txn.TxnMeta) (err error) { 294 _, ok := h.txnCtxs.LoadAndDelete(util.UnsafeBytesToString(meta.GetID())) 295 296 //Rollback after pre-commit write. 297 if ok { 298 return 299 } 300 txn, err := h.db.GetTxnByID(meta.GetID()) 301 302 if err != nil { 303 return err 304 } 305 err = txn.Rollback(ctx) 306 return 307 } 308 309 func (h *Handle) HandleCommitting( 310 ctx context.Context, 311 meta txn.TxnMeta) (err error) { 312 txn, err := h.db.GetTxnByID(meta.GetID()) 313 if err != nil { 314 return err 315 } 316 txn.SetCommitTS(types.TimestampToTS(meta.GetCommitTS())) 317 err = txn.Committing() 318 return 319 } 320 321 func (h *Handle) HandlePrepare( 322 ctx context.Context, 323 meta txn.TxnMeta) (pts timestamp.Timestamp, err error) { 324 txnCtx, ok := h.txnCtxs.Load(util.UnsafeBytesToString(meta.GetID())) 325 var txn txnif.AsyncTxn 326 defer func() { 327 if ok { 328 //delete the txn's context. 329 h.txnCtxs.Delete(util.UnsafeBytesToString(meta.GetID())) 330 } 331 }() 332 if ok { 333 //handle pre-commit write for 2PC 334 txn, err = h.db.GetOrCreateTxnWithMeta(nil, meta.GetID(), 335 types.TimestampToTS(meta.GetSnapshotTS())) 336 if err != nil { 337 return 338 } 339 h.handleRequests(ctx, txn, txnCtx) 340 } 341 txn, err = h.db.GetTxnByID(meta.GetID()) 342 if err != nil { 343 return timestamp.Timestamp{}, err 344 } 345 participants := make([]uint64, 0, len(meta.GetTNShards())) 346 for _, shard := range meta.GetTNShards() { 347 participants = append(participants, shard.GetShardID()) 348 } 349 txn.SetParticipants(participants) 350 var ts types.TS 351 ts, err = txn.Prepare(ctx) 352 pts = ts.ToTimestamp() 353 return 354 } 355 356 func (h *Handle) HandleStartRecovery( 357 ctx context.Context, 358 ch chan txn.TxnMeta) { 359 //panic(moerr.NewNYI("HandleStartRecovery is not implemented yet")) 360 //TODO:: 1. Get the 2PC transactions which be in prepared or 361 // committing state from txn engine's recovery. 362 // 2. Feed these transaction into ch. 363 close(ch) 364 } 365 366 func (h *Handle) HandleClose(ctx context.Context) (err error) { 367 //FIXME::should wait txn request's job done? 368 if h.GCManager != nil { 369 h.GCManager.Stop() 370 } 371 return h.db.Close() 372 } 373 374 func (h *Handle) HandleDestroy(ctx context.Context) (err error) { 375 //FIXME::should wait txn request's job done? 376 return 377 } 378 379 func (h *Handle) HandleGetLogTail( 380 ctx context.Context, 381 meta txn.TxnMeta, 382 req *api.SyncLogTailReq, 383 resp *api.SyncLogTailResp) (closeCB func(), err error) { 384 res, closeCB, err := logtail.HandleSyncLogTailReq( 385 ctx, 386 h.db.BGCheckpointRunner, 387 h.db.LogtailMgr, 388 h.db.Catalog, 389 *req, 390 true) 391 if err != nil { 392 return 393 } 394 *resp = res 395 return 396 } 397 398 func (h *Handle) HandleCommitMerge( 399 ctx context.Context, 400 meta txn.TxnMeta, 401 req *api.MergeCommitEntry, 402 resp *db.InspectResp) (cb func(), err error) { 403 404 defer func() { 405 if err != nil { 406 e := moerr.DowncastError(err) 407 logutil.Error("mergeblocks err handle commit merge", 408 zap.String("table", fmt.Sprintf("%v-%v", req.TblId, req.TableName)), 409 zap.String("start-ts", req.StartTs.DebugString()), 410 zap.String("error", e.Display())) 411 } 412 413 }() 414 txn, err := h.db.GetOrCreateTxnWithMeta(nil, meta.GetID(), 415 types.TimestampToTS(meta.GetSnapshotTS())) 416 if err != nil { 417 return 418 } 419 ids := make([]objectio.ObjectId, 0, len(req.MergedObjs)) 420 for _, o := range req.MergedObjs { 421 stat := objectio.ObjectStats(o) 422 ids = append(ids, *stat.ObjectName().ObjectId()) 423 } 424 merge.ActiveCNObj.RemoveActiveCNObj(ids) 425 if req.Err != "" { 426 resp.Message = req.Err 427 err = moerr.NewInternalError(ctx, "merge err in cn: %s", req.Err) 428 return 429 } 430 431 defer func() { 432 if err != nil { 433 txn.Rollback(ctx) 434 resp.Message = err.Error() 435 merge.CleanUpUselessFiles(req, h.db.Runtime.Fs.Service) 436 } 437 }() 438 439 if len(req.BookingLoc) > 0 { 440 // load transfer info from s3 441 if req.Booking != nil { 442 logutil.Error("mergeblocks err booking loc is not empty, but booking is not nil") 443 } 444 if len(req.BookingLoc) == objectio.LocationLen { 445 loc := objectio.Location(req.BookingLoc) 446 var bat *batch.Batch 447 var release func() 448 bat, release, err = blockio.LoadTombstoneColumns(ctx, []uint16{0}, nil, h.db.Runtime.Fs.Service, loc, nil) 449 if err != nil { 450 return 451 } 452 req.Booking = &api.BlkTransferBooking{} 453 err = req.Booking.Unmarshal(bat.Vecs[0].GetBytesAt(0)) 454 if err != nil { 455 release() 456 return 457 } 458 release() 459 h.db.Runtime.Fs.Service.Delete(ctx, loc.Name().String()) 460 bat = nil 461 } else { 462 // it has to copy to concat 463 idx := 0 464 locations := req.BookingLoc 465 data := make([]byte, 0, 2<<30) 466 for ; idx < len(locations); idx += objectio.LocationLen { 467 loc := objectio.Location(locations[idx : idx+objectio.LocationLen]) 468 var bat *batch.Batch 469 var release func() 470 bat, release, err = blockio.LoadTombstoneColumns(ctx, []uint16{0}, nil, h.db.Runtime.Fs.Service, loc, nil) 471 if err != nil { 472 return 473 } 474 data = append(data, bat.Vecs[0].GetBytesAt(0)...) 475 release() 476 h.db.Runtime.Fs.Service.Delete(ctx, loc.Name().String()) 477 bat = nil 478 } 479 req.Booking = &api.BlkTransferBooking{} 480 if err = req.Booking.Unmarshal(data); err != nil { 481 return 482 } 483 } 484 } 485 486 _, err = jobs.HandleMergeEntryInTxn(txn, req, h.db.Runtime) 487 if err != nil { 488 return 489 } 490 err = txn.Commit(ctx) 491 if err == nil { 492 b := &bytes.Buffer{} 493 b.WriteString("merged success\n") 494 for _, o := range req.CreatedObjs { 495 stat := objectio.ObjectStats(o) 496 b.WriteString(fmt.Sprintf("%v, rows %v, blks %v, osize %v, csize %v", 497 stat.ObjectName().String(), stat.Rows(), stat.BlkCnt(), 498 common.HumanReadableBytes(int(stat.OriginSize())), 499 common.HumanReadableBytes(int(stat.Size())), 500 )) 501 b.WriteByte('\n') 502 } 503 resp.Message = b.String() 504 } 505 return nil, err 506 } 507 508 func (h *Handle) HandleFlushTable( 509 ctx context.Context, 510 meta txn.TxnMeta, 511 req *db.FlushTable, 512 resp *api.SyncLogTailResp) (cb func(), err error) { 513 514 // We use current TS instead of transaction ts. 515 // Here, the point of this handle function is to trigger a flush 516 // via mo_ctl. We mimic the behaviour of a real background flush 517 // currTs := types.TimestampToTS(meta.GetSnapshotTS()) 518 currTs := types.BuildTS(time.Now().UTC().UnixNano(), 0) 519 520 err = h.db.FlushTable( 521 ctx, 522 req.AccessInfo.AccountID, 523 req.DatabaseID, 524 req.TableID, 525 currTs) 526 return nil, err 527 } 528 529 func (h *Handle) HandleForceGlobalCheckpoint( 530 ctx context.Context, 531 meta txn.TxnMeta, 532 req *db.Checkpoint, 533 resp *api.SyncLogTailResp) (cb func(), err error) { 534 535 timeout := req.FlushDuration 536 537 currTs := types.BuildTS(time.Now().UTC().UnixNano(), 0) 538 539 err = h.db.ForceGlobalCheckpoint(ctx, currTs, timeout) 540 return nil, err 541 } 542 543 func (h *Handle) HandleForceCheckpoint( 544 ctx context.Context, 545 meta txn.TxnMeta, 546 req *db.Checkpoint, 547 resp *api.SyncLogTailResp) (cb func(), err error) { 548 549 timeout := req.FlushDuration 550 551 currTs := types.BuildTS(time.Now().UTC().UnixNano(), 0) 552 553 err = h.db.ForceCheckpoint(ctx, currTs, timeout) 554 return nil, err 555 } 556 557 func (h *Handle) HandleBackup( 558 ctx context.Context, 559 meta txn.TxnMeta, 560 req *db.Checkpoint, 561 resp *api.SyncLogTailResp) (cb func(), err error) { 562 563 timeout := req.FlushDuration 564 565 backupTime := time.Now().UTC() 566 currTs := types.BuildTS(backupTime.UnixNano(), 0) 567 var locations string 568 locations += backupTime.Format(time.DateTime) + ";" 569 location, err := h.db.ForceCheckpointForBackup(ctx, currTs, timeout) 570 if err != nil { 571 return nil, err 572 } 573 data := h.db.BGCheckpointRunner.GetAllCheckpoints() 574 locations += location + ";" 575 for i := range data { 576 locations += data[i].GetLocation().String() 577 locations += ":" 578 locations += fmt.Sprintf("%d", data[i].GetVersion()) 579 locations += ";" 580 } 581 resp.CkpLocation = locations 582 return nil, err 583 } 584 585 func (h *Handle) HandleInterceptCommit( 586 ctx context.Context, 587 meta txn.TxnMeta, 588 req *db.InterceptCommit, 589 resp *api.SyncLogTailResp) (cb func(), err error) { 590 591 name := req.TableName 592 h.UpdateInterceptMatchRegexp(name) 593 return nil, err 594 } 595 596 func (h *Handle) HandleInspectTN( 597 ctx context.Context, 598 meta txn.TxnMeta, 599 req *db.InspectTN, 600 resp *db.InspectResp) (cb func(), err error) { 601 defer func() { 602 if e := recover(); e != nil { 603 err = moerr.ConvertPanicError(ctx, e) 604 logutil.Error( 605 "panic in inspect dn", 606 zap.String("cmd", req.Operation), 607 zap.String("error", err.Error())) 608 } 609 }() 610 args, _ := shlex.Split(req.Operation) 611 common.DoIfDebugEnabled(func() { 612 logutil.Debug("Inspect", zap.Strings("args", args)) 613 }) 614 b := &bytes.Buffer{} 615 616 inspectCtx := &inspectContext{ 617 db: h.db, 618 acinfo: &req.AccessInfo, 619 args: args, 620 out: b, 621 resp: resp, 622 } 623 RunInspect(ctx, inspectCtx) 624 resp.Message = b.String() 625 return nil, nil 626 } 627 628 func (h *Handle) prefetchDeleteRowID(ctx context.Context, req *db.WriteReq) error { 629 if len(req.DeltaLocs) == 0 { 630 return nil 631 } 632 //for loading deleted rowid. 633 columnIdx := 0 634 pkIdx := 1 635 //start loading jobs asynchronously,should create a new root context. 636 loc, err := blockio.EncodeLocationFromString(req.DeltaLocs[0]) 637 if err != nil { 638 return err 639 } 640 pref, err := blockio.BuildPrefetchParams(h.db.Runtime.Fs.Service, loc) 641 if err != nil { 642 return err 643 } 644 for _, key := range req.DeltaLocs { 645 var location objectio.Location 646 location, err = blockio.EncodeLocationFromString(key) 647 if err != nil { 648 return err 649 } 650 pref.AddBlockWithType([]uint16{uint16(columnIdx), uint16(pkIdx)}, []uint16{location.ID()}, uint16(objectio.SchemaTombstone)) 651 } 652 return blockio.PrefetchWithMerged(pref) 653 } 654 655 func (h *Handle) prefetchMetadata(ctx context.Context, 656 req *db.WriteReq) (int, error) { 657 if len(req.MetaLocs) == 0 { 658 return 0, nil 659 } 660 //start loading jobs asynchronously,should create a new root context. 661 objCnt := 0 662 var objectName objectio.ObjectNameShort 663 for _, meta := range req.MetaLocs { 664 loc, err := blockio.EncodeLocationFromString(meta) 665 if err != nil { 666 return 0, err 667 } 668 if !objectio.IsSameObjectLocVsShort(loc, &objectName) { 669 err := blockio.PrefetchMeta(h.db.Runtime.Fs.Service, loc) 670 if err != nil { 671 return 0, err 672 } 673 objCnt++ 674 objectName = *loc.Name().Short() 675 } 676 } 677 return objCnt, nil 678 } 679 680 // EvaluateTxnRequest only evaluate the request ,do not change the state machine of TxnEngine. 681 func (h *Handle) EvaluateTxnRequest( 682 ctx context.Context, 683 meta txn.TxnMeta, 684 ) error { 685 txnCtx, _ := h.txnCtxs.Load(util.UnsafeBytesToString(meta.GetID())) 686 687 metaLocCnt := 0 688 deltaLocCnt := 0 689 690 defer func() { 691 if metaLocCnt != 0 { 692 v2.TxnCNCommittedMetaLocationQuantityGauge.Set(float64(metaLocCnt)) 693 } 694 695 if deltaLocCnt != 0 { 696 v2.TxnCNCommittedDeltaLocationQuantityGauge.Set(float64(deltaLocCnt)) 697 } 698 }() 699 700 for _, e := range txnCtx.reqs { 701 if r, ok := e.(*db.WriteReq); ok { 702 if r.FileName != "" { 703 if r.Type == db.EntryDelete { 704 // start to load deleted row ids 705 deltaLocCnt += len(r.DeltaLocs) 706 if err := h.prefetchDeleteRowID(ctx, r); err != nil { 707 return err 708 } 709 } else if r.Type == db.EntryInsert { 710 objCnt, err := h.prefetchMetadata(ctx, r) 711 if err != nil { 712 return err 713 } 714 metaLocCnt += objCnt 715 } 716 } 717 } 718 } 719 return nil 720 } 721 722 func (h *Handle) CacheTxnRequest( 723 ctx context.Context, 724 meta txn.TxnMeta, 725 req any, 726 rsp any) (err error) { 727 txnCtx, ok := h.txnCtxs.Load(util.UnsafeBytesToString(meta.GetID())) 728 if !ok { 729 now := time.Now() 730 txnCtx = &txnContext{ 731 createAt: now, 732 deadline: now.Add(MAX_TXN_COMMIT_LATENCY), 733 meta: meta, 734 toCreate: make(map[uint64]*catalog2.Schema), 735 } 736 h.txnCtxs.Store(util.UnsafeBytesToString(meta.GetID()), txnCtx) 737 } 738 txnCtx.reqs = append(txnCtx.reqs, req) 739 if r, ok := req.(*db.CreateRelationReq); ok { 740 // Does this place need 741 schema, err := DefsToSchema(r.Name, r.Defs) 742 if err != nil { 743 return err 744 } 745 txnCtx.toCreate[r.RelationId] = schema 746 } 747 return nil 748 } 749 750 func (h *Handle) HandlePreCommitWrite( 751 ctx context.Context, 752 meta txn.TxnMeta, 753 req *api.PrecommitWriteCmd, 754 resp *api.SyncLogTailResp) (err error) { 755 var e any 756 es := req.EntryList 757 for len(es) > 0 { 758 e, es, err = catalog.ParseEntryList(es) 759 if err != nil { 760 return err 761 } 762 switch cmds := e.(type) { 763 case []catalog.CreateDatabase: 764 for _, cmd := range cmds { 765 req := &db.CreateDatabaseReq{ 766 Name: cmd.Name, 767 CreateSql: cmd.CreateSql, 768 DatabaseId: cmd.DatabaseId, 769 AccessInfo: db.AccessInfo{ 770 UserID: cmd.Creator, 771 RoleID: cmd.Owner, 772 AccountID: cmd.AccountId, 773 }, 774 DatTyp: cmd.DatTyp, 775 } 776 if err = h.CacheTxnRequest(ctx, meta, req, 777 new(db.CreateDatabaseResp)); err != nil { 778 return err 779 } 780 } 781 case []catalog.CreateTable: 782 for _, cmd := range cmds { 783 req := &db.CreateRelationReq{ 784 AccessInfo: db.AccessInfo{ 785 UserID: cmd.Creator, 786 RoleID: cmd.Owner, 787 AccountID: cmd.AccountId, 788 }, 789 Name: cmd.Name, 790 RelationId: cmd.TableId, 791 DatabaseName: cmd.DatabaseName, 792 DatabaseID: cmd.DatabaseId, 793 Defs: cmd.Defs, 794 } 795 // TODO: debug for #11917 796 if strings.Contains(req.Name, "sbtest") { 797 logutil.Infof("create table: %s.%s\n", req.DatabaseName, req.Name) 798 } 799 if err = h.CacheTxnRequest(ctx, meta, req, 800 new(db.CreateRelationResp)); err != nil { 801 return err 802 } 803 } 804 case []catalog.UpdateConstraint: 805 for _, cmd := range cmds { 806 req := api.NewUpdateConstraintReq( 807 cmd.DatabaseId, 808 cmd.TableId, 809 string(cmd.Constraint)) 810 if err = h.CacheTxnRequest(ctx, meta, req, nil); err != nil { 811 return err 812 } 813 } 814 case []*api.AlterTableReq: 815 for _, cmd := range cmds { 816 if err = h.CacheTxnRequest(ctx, meta, cmd, nil); err != nil { 817 return err 818 } 819 } 820 case []catalog.DropDatabase: 821 for _, cmd := range cmds { 822 req := &db.DropDatabaseReq{ 823 Name: cmd.Name, 824 ID: cmd.Id, 825 } 826 if err = h.CacheTxnRequest(ctx, meta, req, 827 new(db.DropDatabaseResp)); err != nil { 828 return err 829 } 830 } 831 case []catalog.DropOrTruncateTable: 832 for _, cmd := range cmds { 833 req := &db.DropOrTruncateRelationReq{ 834 IsDrop: cmd.IsDrop, 835 Name: cmd.Name, 836 ID: cmd.Id, 837 NewId: cmd.NewId, 838 DatabaseName: cmd.DatabaseName, 839 DatabaseID: cmd.DatabaseId, 840 } 841 logutil.Infof("dropOrTruncateRelation isDrop: %v, name: %s, id: %d, newId: %d, databaseName: %s, databaseId: %d\n", 842 req.IsDrop, req.Name, req.ID, req.NewId, req.DatabaseName, req.DatabaseID) 843 if err = h.CacheTxnRequest(ctx, meta, req, 844 new(db.DropOrTruncateRelationResp)); err != nil { 845 return err 846 } 847 } 848 case *api.Entry: 849 //Handle DML 850 pe := e.(*api.Entry) 851 moBat, err := batch.ProtoBatchToBatch(pe.GetBat()) 852 if err != nil { 853 panic(err) 854 } 855 req := &db.WriteReq{ 856 Type: db.EntryType(pe.EntryType), 857 DatabaseId: pe.GetDatabaseId(), 858 TableID: pe.GetTableId(), 859 DatabaseName: pe.GetDatabaseName(), 860 TableName: pe.GetTableName(), 861 FileName: pe.GetFileName(), 862 Batch: moBat, 863 PkCheck: db.PKCheckType(pe.GetPkCheckByTn()), 864 } 865 if req.FileName != "" { 866 rows := catalog.GenRows(req.Batch) 867 for _, row := range rows { 868 if req.Type == db.EntryInsert { 869 //req.Blks[i] = row[catalog.BLOCKMETA_ID_ON_FS_IDX].(uint64) 870 //req.MetaLocs[i] = string(row[catalog.BLOCKMETA_METALOC_ON_FS_IDX].([]byte)) 871 req.MetaLocs = append(req.MetaLocs, 872 string(row[0].([]byte))) 873 } else { 874 //req.DeltaLocs[i] = string(row[0].([]byte)) 875 req.DeltaLocs = append(req.DeltaLocs, 876 string(row[0].([]byte))) 877 } 878 } 879 } 880 if err = h.CacheTxnRequest(ctx, meta, req, 881 new(db.WriteResp)); err != nil { 882 return err 883 } 884 default: 885 return moerr.NewNYI(ctx, "pre commit write type: %T", cmds) 886 } 887 } 888 //evaluate all the txn requests. 889 return h.EvaluateTxnRequest(ctx, meta) 890 } 891 892 //Handle DDL commands. 893 894 func (h *Handle) HandleCreateDatabase( 895 ctx context.Context, 896 txn txnif.AsyncTxn, 897 req *db.CreateDatabaseReq, 898 resp *db.CreateDatabaseResp) (err error) { 899 _, span := trace.Start(ctx, "HandleCreateDatabase") 900 defer span.End() 901 902 common.DoIfInfoEnabled(func() { 903 logutil.Infof("[precommit] create database: %+v txn: %s", req, txn.String()) 904 }) 905 defer func() { 906 common.DoIfDebugEnabled(func() { 907 logutil.Debugf("[precommit] create database end txn: %s", txn.String()) 908 }) 909 }() 910 911 ctx = defines.AttachAccount(ctx, req.AccessInfo.AccountID, req.AccessInfo.UserID, req.AccessInfo.RoleID) 912 ctx = context.WithValue(ctx, defines.DatTypKey{}, req.DatTyp) 913 if _, err = txn.CreateDatabaseWithCtx( 914 ctx, 915 req.Name, 916 req.CreateSql, 917 req.DatTyp, 918 req.DatabaseId); err != nil { 919 return 920 } 921 resp.ID = req.DatabaseId 922 return 923 } 924 925 func (h *Handle) HandleDropDatabase( 926 ctx context.Context, 927 txn txnif.AsyncTxn, 928 req *db.DropDatabaseReq, 929 resp *db.DropDatabaseResp) (err error) { 930 931 common.DoIfInfoEnabled(func() { 932 logutil.Infof("[precommit] drop database: %+v txn: %s", req, txn.String()) 933 }) 934 defer func() { 935 common.DoIfDebugEnabled(func() { 936 logutil.Debugf("[precommit] drop database end: %s", txn.String()) 937 }) 938 }() 939 940 if _, err = txn.DropDatabaseByID(req.ID); err != nil { 941 return 942 } 943 resp.ID = req.ID 944 return 945 } 946 947 func (h *Handle) HandleCreateRelation( 948 ctx context.Context, 949 txn txnif.AsyncTxn, 950 req *db.CreateRelationReq, 951 resp *db.CreateRelationResp) (err error) { 952 953 common.DoIfInfoEnabled(func() { 954 logutil.Infof("[precommit] create relation: %+v txn: %s", req, txn.String()) 955 }) 956 defer func() { 957 // do not turn it on in prod. This print outputs multiple duplicate lines 958 common.DoIfDebugEnabled(func() { 959 logutil.Debugf("[precommit] create relation end txn: %s", txn.String()) 960 }) 961 }() 962 963 ctx = defines.AttachAccount(ctx, req.AccessInfo.AccountID, req.AccessInfo.UserID, req.AccessInfo.RoleID) 964 dbH, err := txn.GetDatabaseWithCtx(ctx, req.DatabaseName) 965 if err != nil { 966 return 967 } 968 969 if err = CreateRelation(ctx, dbH, req.Name, req.RelationId, req.Defs); err != nil { 970 return 971 } 972 973 resp.ID = req.RelationId 974 return 975 } 976 977 func (h *Handle) HandleDropOrTruncateRelation( 978 ctx context.Context, 979 txn txnif.AsyncTxn, 980 req *db.DropOrTruncateRelationReq, 981 resp *db.DropOrTruncateRelationResp) (err error) { 982 983 common.DoIfInfoEnabled(func() { 984 logutil.Infof("[precommit] drop/truncate relation: %+v txn: %s", req, txn.String()) 985 }) 986 defer func() { 987 common.DoIfDebugEnabled(func() { 988 logutil.Debugf("[precommit] drop/truncate relation end txn: %s", txn.String()) 989 }) 990 }() 991 992 db, err := txn.GetDatabaseByID(req.DatabaseID) 993 if err != nil { 994 return 995 } 996 997 if req.IsDrop { 998 _, err = db.DropRelationByID(req.ID) 999 return 1000 } 1001 _, err = db.TruncateByID(req.ID, req.NewId) 1002 return err 1003 } 1004 1005 func PrintTuple(tuple types.Tuple) string { 1006 res := "(" 1007 for i, t := range tuple { 1008 switch t := t.(type) { 1009 case int32: 1010 res += fmt.Sprintf("%v", t) 1011 } 1012 if i != len(tuple)-1 { 1013 res += "," 1014 } 1015 } 1016 res += ")" 1017 return res 1018 } 1019 1020 // HandleWrite Handle DML commands 1021 func (h *Handle) HandleWrite( 1022 ctx context.Context, 1023 txn txnif.AsyncTxn, 1024 req *db.WriteReq, 1025 resp *db.WriteResp) (err error) { 1026 defer func() { 1027 if req.Cancel != nil { 1028 req.Cancel() 1029 } 1030 }() 1031 ctx = perfcounter.WithCounterSetFrom(ctx, h.db.Opts.Ctx) 1032 switch req.PkCheck { 1033 case db.FullDedup: 1034 txn.SetDedupType(txnif.FullDedup) 1035 case db.IncrementalDedup: 1036 if h.db.Opts.IncrementalDedup { 1037 txn.SetDedupType(txnif.IncrementalDedup) 1038 } else { 1039 txn.SetDedupType(txnif.FullSkipWorkSpaceDedup) 1040 } 1041 case db.FullSkipWorkspaceDedup: 1042 txn.SetDedupType(txnif.FullSkipWorkSpaceDedup) 1043 } 1044 common.DoIfDebugEnabled(func() { 1045 logutil.Debugf("[precommit] handle write typ: %v, %d-%s, %d-%s txn: %s", 1046 req.Type, req.TableID, 1047 req.TableName, req.DatabaseId, req.DatabaseName, 1048 txn.String(), 1049 ) 1050 logutil.Debugf("[precommit] write batch: %s", common.DebugMoBatch(req.Batch)) 1051 }) 1052 defer func() { 1053 common.DoIfDebugEnabled(func() { 1054 logutil.Debugf("[precommit] handle write end txn: %s", txn.String()) 1055 }) 1056 }() 1057 1058 dbase, err := txn.GetDatabaseByID(req.DatabaseId) 1059 if err != nil { 1060 return 1061 } 1062 1063 tb, err := dbase.GetRelationByID(req.TableID) 1064 if err != nil { 1065 return 1066 } 1067 1068 if req.Type == db.EntryInsert { 1069 //Add blocks which had been bulk-loaded into S3 into table. 1070 if req.FileName != "" { 1071 metalocations := make(map[string]struct{}) 1072 for _, metLoc := range req.MetaLocs { 1073 location, err := blockio.EncodeLocationFromString(metLoc) 1074 if err != nil { 1075 return err 1076 } 1077 metalocations[location.Name().String()] = struct{}{} 1078 } 1079 statsCNVec := req.Batch.Vecs[1] 1080 statsVec := containers.ToTNVector(statsCNVec, common.WorkspaceAllocator) 1081 for i := 0; i < statsVec.Length(); i++ { 1082 s := objectio.ObjectStats(statsVec.Get(i).([]byte)) 1083 delete(metalocations, s.ObjectName().String()) 1084 } 1085 if len(metalocations) != 0 { 1086 logutil.Warnf("tbl %v, not receive stats of following locations %v", req.TableName, metalocations) 1087 err = moerr.NewInternalError(ctx, "object stats doesn't match meta locations") 1088 return 1089 } 1090 err = tb.AddObjsWithMetaLoc(ctx, statsVec) 1091 return 1092 } 1093 //check the input batch passed by cn is valid. 1094 len := 0 1095 for i, vec := range req.Batch.Vecs { 1096 if vec == nil { 1097 logutil.Errorf("the vec:%d in req.Batch is nil", i) 1098 panic("invalid vector : vector is nil") 1099 } 1100 if vec.Length() == 0 { 1101 logutil.Errorf("the vec:%d in req.Batch is empty", i) 1102 panic("invalid vector: vector is empty") 1103 } 1104 if i == 0 { 1105 len = vec.Length() 1106 } 1107 if vec.Length() != len { 1108 logutil.Errorf("the length of vec:%d in req.Batch is not equal to the first vec", i) 1109 panic("invalid batch : the length of vectors in batch is not the same") 1110 } 1111 } 1112 // TODO: debug for #13342, remove me later 1113 if h.IsInterceptTable(tb.Schema().(*catalog2.Schema).Name) { 1114 if tb.Schema().(*catalog2.Schema).HasPK() { 1115 idx := tb.Schema().(*catalog2.Schema).GetSingleSortKeyIdx() 1116 for i := 0; i < req.Batch.Vecs[0].Length(); i++ { 1117 logutil.Infof("op1 %v, %v", txn.GetStartTS().ToString(), common.MoVectorToString(req.Batch.Vecs[idx], i)) 1118 } 1119 } 1120 } 1121 //Appends a batch of data into table. 1122 err = AppendDataToTable(ctx, tb, req.Batch) 1123 return 1124 } 1125 1126 //handle delete 1127 if req.FileName != "" { 1128 //wait for loading deleted row-id done. 1129 nctx := context.Background() 1130 if deadline, ok := ctx.Deadline(); ok { 1131 _, req.Cancel = context.WithTimeout(nctx, time.Until(deadline)) 1132 } 1133 rowidIdx := 0 1134 pkIdx := 1 1135 for _, key := range req.DeltaLocs { 1136 var location objectio.Location 1137 location, err = blockio.EncodeLocationFromString(key) 1138 if err != nil { 1139 return err 1140 } 1141 var ok bool 1142 var vectors []containers.Vector 1143 var closeFunc func() 1144 //Extend lifetime of vectors is within the function. 1145 //No NeedCopy. closeFunc is required after use. 1146 //closeFunc is not nil. 1147 vectors, closeFunc, err = blockio.LoadTombstoneColumns2( 1148 ctx, 1149 []uint16{uint16(rowidIdx), uint16(pkIdx)}, 1150 nil, 1151 h.db.Runtime.Fs.Service, 1152 location, 1153 false, 1154 nil, 1155 ) 1156 if err != nil { 1157 return 1158 } 1159 defer closeFunc() 1160 blkids := getBlkIDsFromRowids(vectors[0].GetDownstreamVector()) 1161 id := tb.GetMeta().(*catalog2.TableEntry).AsCommonID() 1162 if len(blkids) == 1 { 1163 for blkID := range blkids { 1164 id.BlockID = blkID 1165 } 1166 ok, err = tb.TryDeleteByDeltaloc(id, location) 1167 if err != nil { 1168 return 1169 } 1170 if ok { 1171 continue 1172 } 1173 logutil.Warnf("blk %v try delete by deltaloc failed", id.BlockID.String()) 1174 } else { 1175 logutil.Warnf("multiply blocks in one deltalocation") 1176 } 1177 rowIDVec := vectors[0] 1178 defer rowIDVec.Close() 1179 pkVec := vectors[1] 1180 //defer pkVec.Close() 1181 if err = tb.DeleteByPhyAddrKeys(rowIDVec, pkVec); err != nil { 1182 return 1183 } 1184 } 1185 return 1186 } 1187 if len(req.Batch.Vecs) != 2 { 1188 panic(fmt.Sprintf("req.Batch.Vecs length is %d, should be 2", len(req.Batch.Vecs))) 1189 } 1190 rowIDVec := containers.ToTNVector(req.Batch.GetVector(0), common.WorkspaceAllocator) 1191 defer rowIDVec.Close() 1192 pkVec := containers.ToTNVector(req.Batch.GetVector(1), common.WorkspaceAllocator) 1193 //defer pkVec.Close() 1194 // TODO: debug for #13342, remove me later 1195 if h.IsInterceptTable(tb.Schema().(*catalog2.Schema).Name) { 1196 if tb.Schema().(*catalog2.Schema).HasPK() { 1197 for i := 0; i < rowIDVec.Length(); i++ { 1198 rowID := objectio.HackBytes2Rowid(req.Batch.Vecs[0].GetRawBytesAt(i)) 1199 logutil.Infof("op2 %v %v %v", txn.GetStartTS().ToString(), common.MoVectorToString(req.Batch.Vecs[1], i), rowID.String()) 1200 } 1201 } 1202 } 1203 err = tb.DeleteByPhyAddrKeys(rowIDVec, pkVec) 1204 return 1205 } 1206 1207 func getBlkIDsFromRowids(vec *vector.Vector) map[types.Blockid]struct{} { 1208 rowids := vector.MustFixedCol[types.Rowid](vec) 1209 blkids := make(map[types.Blockid]struct{}) 1210 for _, rowid := range rowids { 1211 blkID := *rowid.BorrowBlockID() 1212 blkids[blkID] = struct{}{} 1213 } 1214 return blkids 1215 } 1216 1217 func (h *Handle) HandleAlterTable( 1218 ctx context.Context, 1219 txn txnif.AsyncTxn, 1220 req *api.AlterTableReq, 1221 resp *db.WriteResp) (err error) { 1222 common.DoIfInfoEnabled(func() { 1223 logutil.Debugf("[precommit] alter table: %v txn: %s", req.String(), txn.String()) 1224 }) 1225 1226 dbase, err := txn.GetDatabaseByID(req.DbId) 1227 if err != nil { 1228 return 1229 } 1230 1231 tbl, err := dbase.GetRelationByID(req.TableId) 1232 if err != nil { 1233 return 1234 } 1235 1236 return tbl.AlterTable(ctx, req) 1237 } 1238 1239 func (h *Handle) HandleAddFaultPoint( 1240 ctx context.Context, 1241 meta txn.TxnMeta, 1242 req *db.FaultPoint, 1243 resp *api.SyncLogTailResp) (func(), error) { 1244 if req.Name == db.EnableFaultInjection { 1245 fault.Enable() 1246 return nil, nil 1247 } else if req.Name == db.DisableFaultInjection { 1248 fault.Disable() 1249 return nil, nil 1250 } 1251 return nil, h.db.AddFaultPoint(ctx, req.Name, req.Freq, req.Action, req.Iarg, req.Sarg) 1252 } 1253 1254 func (h *Handle) HandleTraceSpan(ctx context.Context, 1255 meta txn.TxnMeta, 1256 req *db.TraceSpan, 1257 resp *api.SyncLogTailResp) (func(), error) { 1258 1259 return nil, nil 1260 } 1261 1262 func traverseCatalogForNewAccounts(c *catalog2.Catalog, memo *logtail.TNUsageMemo, ids []uint32) { 1263 if len(ids) == 0 { 1264 return 1265 } 1266 processor := new(catalog2.LoopProcessor) 1267 processor.DatabaseFn = func(entry *catalog2.DBEntry) error { 1268 if entry.HasDropCommitted() { 1269 return nil 1270 } 1271 1272 accId := entry.GetTenantID() 1273 if !slices.Contains(ids, accId) { 1274 return nil 1275 } 1276 1277 tblIt := entry.MakeTableIt(true) 1278 for tblIt.Valid() { 1279 insUsage := logtail.UsageData{ 1280 AccId: uint64(accId), DbId: entry.ID, TblId: tblIt.Get().GetPayload().ID} 1281 1282 tblEntry := tblIt.Get().GetPayload() 1283 if tblEntry.HasDropCommitted() { 1284 tblIt.Next() 1285 continue 1286 } 1287 1288 objIt := tblEntry.MakeObjectIt(true) 1289 for objIt.Valid() { 1290 objEntry := objIt.Get().GetPayload() 1291 // PXU TODO 1292 if !objEntry.IsAppendable() && !objEntry.HasDropCommitted() && objEntry.IsCommitted() { 1293 insUsage.Size += uint64(objEntry.GetCompSize()) 1294 } 1295 objIt.Next() 1296 } 1297 1298 if insUsage.Size > 0 { 1299 memo.UpdateNewAccCache(insUsage, false) 1300 } 1301 1302 tblIt.Next() 1303 } 1304 return nil 1305 } 1306 1307 c.RecurLoop(processor) 1308 } 1309 1310 func (h *Handle) HandleStorageUsage(ctx context.Context, meta txn.TxnMeta, 1311 req *db.StorageUsageReq, resp *db.StorageUsageResp) (func(), error) { 1312 memo := h.db.GetUsageMemo() 1313 1314 start := time.Now() 1315 defer func() { 1316 v2.TaskStorageUsageReqDurationHistogram.Observe(time.Since(start).Seconds()) 1317 }() 1318 1319 memo.EnterProcessing() 1320 defer func() { 1321 resp.Magic = logtail.StorageUsageMagic 1322 memo.LeaveProcessing() 1323 }() 1324 1325 if !memo.HasUpdate() { 1326 resp.Succeed = true 1327 return nil, nil 1328 } 1329 1330 usages := memo.GatherAllAccSize() 1331 1332 newIds := make([]uint32, 0) 1333 for _, id := range req.AccIds { 1334 if usages != nil { 1335 if size, exist := usages[uint64(id)]; exist { 1336 memo.AddReqTrace(uint64(id), size, start, "req") 1337 resp.AccIds = append(resp.AccIds, int32(id)) 1338 resp.Sizes = append(resp.Sizes, size) 1339 delete(usages, uint64(id)) 1340 continue 1341 } 1342 } 1343 // new account which haven't been collect 1344 newIds = append(newIds, uint32(id)) 1345 } 1346 1347 for accId, size := range usages { 1348 memo.AddReqTrace(uint64(accId), size, start, "oth") 1349 resp.AccIds = append(resp.AccIds, int32(accId)) 1350 resp.Sizes = append(resp.Sizes, size) 1351 } 1352 1353 // new accounts 1354 traverseCatalogForNewAccounts(h.db.Catalog, memo, newIds) 1355 1356 for idx := range newIds { 1357 if size, exist := memo.GatherNewAccountSize(uint64(newIds[idx])); exist { 1358 resp.AccIds = append(resp.AccIds, int32(newIds[idx])) 1359 resp.Sizes = append(resp.Sizes, size) 1360 memo.AddReqTrace(uint64(newIds[idx]), size, start, "new") 1361 } 1362 } 1363 1364 memo.ClearNewAccCache() 1365 1366 resp.Succeed = true 1367 1368 return nil, nil 1369 } 1370 1371 func openTAE(ctx context.Context, targetDir string, opt *options.Options) (tae *db.DB, err error) { 1372 1373 if targetDir != "" { 1374 mask := syscall.Umask(0) 1375 if err := os.MkdirAll(targetDir, os.FileMode(0755)); err != nil { 1376 syscall.Umask(mask) 1377 logutil.Infof("Recreate dir error:%v", err) 1378 return nil, err 1379 } 1380 syscall.Umask(mask) 1381 tae, err = db.Open(ctx, targetDir+"/tae", opt) 1382 if err != nil { 1383 logutil.Warnf("Open tae failed. error:%v", err) 1384 return nil, err 1385 } 1386 return tae, nil 1387 } 1388 1389 tae, err = db.Open(ctx, targetDir, opt) 1390 if err != nil { 1391 logutil.Warnf("Open tae failed. error:%v", err) 1392 return nil, err 1393 } 1394 return 1395 }