github.com/whtcorpsinc/MilevaDB-Prod@v0.0.0-20211104133533-f57f4be3b597/causetstore/petri/acyclic/causet/embedded/planbuilder.go (about) 1 // Copyright 2020 WHTCORPS INC, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package embedded 15 16 import ( 17 "bytes" 18 "context" 19 "encoding/binary" 20 "fmt" 21 "strings" 22 "time" 23 24 "github.com/whtcorpsinc/BerolinaSQL" 25 "github.com/whtcorpsinc/BerolinaSQL/allegrosql" 26 "github.com/whtcorpsinc/BerolinaSQL/ast" 27 "github.com/whtcorpsinc/BerolinaSQL/charset" 28 "github.com/whtcorpsinc/BerolinaSQL/opcode" 29 "github.com/whtcorpsinc/BerolinaSQL/perceptron" 30 "github.com/whtcorpsinc/errors" 31 "github.com/whtcorpsinc/milevadb/causet" 32 "github.com/whtcorpsinc/milevadb/causet/property" 33 "github.com/whtcorpsinc/milevadb/causet/soliton" 34 "github.com/whtcorpsinc/milevadb/causetstore/einsteindb" 35 "github.com/whtcorpsinc/milevadb/config" 36 "github.com/whtcorpsinc/milevadb/dbs" 37 "github.com/whtcorpsinc/milevadb/ekv" 38 "github.com/whtcorpsinc/milevadb/memex" 39 "github.com/whtcorpsinc/milevadb/schemareplicant" 40 util2 "github.com/whtcorpsinc/milevadb/soliton" 41 utilBerolinaSQL "github.com/whtcorpsinc/milevadb/soliton/BerolinaSQL" 42 "github.com/whtcorpsinc/milevadb/soliton/chunk" 43 "github.com/whtcorpsinc/milevadb/soliton/hint" 44 "github.com/whtcorpsinc/milevadb/soliton/logutil" 45 "github.com/whtcorpsinc/milevadb/soliton/ranger" 46 "github.com/whtcorpsinc/milevadb/soliton/set" 47 "github.com/whtcorpsinc/milevadb/statistics" 48 "github.com/whtcorpsinc/milevadb/stochastikctx" 49 "github.com/whtcorpsinc/milevadb/stochastikctx/stmtctx" 50 "github.com/whtcorpsinc/milevadb/stochastikctx/variable" 51 "github.com/whtcorpsinc/milevadb/types" 52 driver "github.com/whtcorpsinc/milevadb/types/BerolinaSQL_driver" 53 54 "github.com/cznic/mathutil" 55 "github.com/whtcorpsinc/milevadb/causet/blocks" 56 "go.uber.org/zap" 57 ) 58 59 type visitInfo struct { 60 privilege allegrosql.PrivilegeType 61 EDB string 62 causet string 63 column string 64 err error 65 } 66 67 type indexNestedLoopJoinBlocks struct { 68 inljBlocks []hintBlockInfo 69 inlhjBlocks []hintBlockInfo 70 inlmjBlocks []hintBlockInfo 71 } 72 73 type blockHintInfo struct { 74 indexNestedLoopJoinBlocks 75 sortMergeJoinBlocks []hintBlockInfo 76 broadcastJoinBlocks []hintBlockInfo 77 broadcastJoinPreferredLocal []hintBlockInfo 78 hashJoinBlocks []hintBlockInfo 79 indexHintList []indexHintInfo 80 tiflashBlocks []hintBlockInfo 81 einsteindbBlocks []hintBlockInfo 82 aggHints aggHintInfo 83 indexMergeHintList []indexHintInfo 84 timeRangeHint ast.HintTimeRange 85 limitHints limitHintInfo 86 } 87 88 type limitHintInfo struct { 89 preferLimitToCop bool 90 } 91 92 type hintBlockInfo struct { 93 dbName perceptron.CIStr 94 tblName perceptron.CIStr 95 partitions []perceptron.CIStr 96 selectOffset int 97 matched bool 98 } 99 100 type indexHintInfo struct { 101 dbName perceptron.CIStr 102 tblName perceptron.CIStr 103 partitions []perceptron.CIStr 104 indexHint *ast.IndexHint 105 // Matched indicates whether this index hint 106 // has been successfully applied to a DataSource. 107 // If an indexHintInfo is not matched after building 108 // a Select memex, we will generate a warning for it. 109 matched bool 110 } 111 112 func (hint *indexHintInfo) hintTypeString() string { 113 switch hint.indexHint.HintType { 114 case ast.HintUse: 115 return "use_index" 116 case ast.HintIgnore: 117 return "ignore_index" 118 case ast.HintForce: 119 return "force_index" 120 } 121 return "" 122 } 123 124 // indexString formats the indexHint as dbName.blockName[, indexNames]. 125 func (hint *indexHintInfo) indexString() string { 126 var indexListString string 127 indexList := make([]string, len(hint.indexHint.IndexNames)) 128 for i := range hint.indexHint.IndexNames { 129 indexList[i] = hint.indexHint.IndexNames[i].L 130 } 131 if len(indexList) > 0 { 132 indexListString = fmt.Sprintf(", %s", strings.Join(indexList, ", ")) 133 } 134 return fmt.Sprintf("%s.%s%s", hint.dbName, hint.tblName, indexListString) 135 } 136 137 type aggHintInfo struct { 138 preferAggType uint 139 preferAggToCop bool 140 } 141 142 // QueryTimeRange represents a time range specified by TIME_RANGE hint 143 type QueryTimeRange struct { 144 From time.Time 145 To time.Time 146 } 147 148 // Condition returns a WHERE clause base on it's value 149 func (tr *QueryTimeRange) Condition() string { 150 return fmt.Sprintf("where time>='%s' and time<='%s'", tr.From.Format(MetricBlockTimeFormat), tr.To.Format(MetricBlockTimeFormat)) 151 } 152 153 func blockNames2HintBlockInfo(ctx stochastikctx.Context, hintName string, hintBlocks []ast.HintBlock, p *hint.BlockHintProcessor, nodeType hint.NodeType, currentOffset int) []hintBlockInfo { 154 if len(hintBlocks) == 0 { 155 return nil 156 } 157 hintBlockInfos := make([]hintBlockInfo, 0, len(hintBlocks)) 158 defaultDBName := perceptron.NewCIStr(ctx.GetStochastikVars().CurrentDB) 159 isInapplicable := false 160 for _, hintBlock := range hintBlocks { 161 blockInfo := hintBlockInfo{ 162 dbName: hintBlock.DBName, 163 tblName: hintBlock.BlockName, 164 partitions: hintBlock.PartitionList, 165 selectOffset: p.GetHintOffset(hintBlock.QBName, nodeType, currentOffset), 166 } 167 if blockInfo.dbName.L == "" { 168 blockInfo.dbName = defaultDBName 169 } 170 switch hintName { 171 case MilevaDBMergeJoin, HintSMJ, MilevaDBIndexNestedLoopJoin, HintINLJ, HintINLHJ, HintINLMJ, MilevaDBHashJoin, HintHJ: 172 if len(blockInfo.partitions) > 0 { 173 isInapplicable = true 174 } 175 } 176 hintBlockInfos = append(hintBlockInfos, blockInfo) 177 } 178 if isInapplicable { 179 ctx.GetStochastikVars().StmtCtx.AppendWarning( 180 errors.New(fmt.Sprintf("Optimizer Hint %s is inapplicable on specified partitions", 181 restore2JoinHint(hintName, hintBlockInfos)))) 182 return nil 183 } 184 return hintBlockInfos 185 } 186 187 // ifPreferAsLocalInBCJoin checks if there is a data source specified as local read by hint 188 func (info *blockHintInfo) ifPreferAsLocalInBCJoin(p LogicalCauset, blockOffset int) bool { 189 alias := extractBlockAlias(p, blockOffset) 190 if alias != nil { 191 blockNames := make([]*hintBlockInfo, 1) 192 blockNames[0] = alias 193 return info.matchBlockName(blockNames, info.broadcastJoinPreferredLocal) 194 } 195 for _, c := range p.Children() { 196 if info.ifPreferAsLocalInBCJoin(c, blockOffset) { 197 return true 198 } 199 } 200 return false 201 } 202 203 func (info *blockHintInfo) ifPreferMergeJoin(blockNames ...*hintBlockInfo) bool { 204 return info.matchBlockName(blockNames, info.sortMergeJoinBlocks) 205 } 206 207 func (info *blockHintInfo) ifPreferBroadcastJoin(blockNames ...*hintBlockInfo) bool { 208 return info.matchBlockName(blockNames, info.broadcastJoinBlocks) 209 } 210 211 func (info *blockHintInfo) ifPreferHashJoin(blockNames ...*hintBlockInfo) bool { 212 return info.matchBlockName(blockNames, info.hashJoinBlocks) 213 } 214 215 func (info *blockHintInfo) ifPreferINLJ(blockNames ...*hintBlockInfo) bool { 216 return info.matchBlockName(blockNames, info.indexNestedLoopJoinBlocks.inljBlocks) 217 } 218 219 func (info *blockHintInfo) ifPreferINLHJ(blockNames ...*hintBlockInfo) bool { 220 return info.matchBlockName(blockNames, info.indexNestedLoopJoinBlocks.inlhjBlocks) 221 } 222 223 func (info *blockHintInfo) ifPreferINLMJ(blockNames ...*hintBlockInfo) bool { 224 return info.matchBlockName(blockNames, info.indexNestedLoopJoinBlocks.inlmjBlocks) 225 } 226 227 func (info *blockHintInfo) ifPreferTiFlash(blockName *hintBlockInfo) *hintBlockInfo { 228 if blockName == nil { 229 return nil 230 } 231 for i, tbl := range info.tiflashBlocks { 232 if blockName.dbName.L == tbl.dbName.L && blockName.tblName.L == tbl.tblName.L && tbl.selectOffset == blockName.selectOffset { 233 info.tiflashBlocks[i].matched = true 234 return &tbl 235 } 236 } 237 return nil 238 } 239 240 func (info *blockHintInfo) ifPreferEinsteinDB(blockName *hintBlockInfo) *hintBlockInfo { 241 if blockName == nil { 242 return nil 243 } 244 for i, tbl := range info.einsteindbBlocks { 245 if blockName.dbName.L == tbl.dbName.L && blockName.tblName.L == tbl.tblName.L && tbl.selectOffset == blockName.selectOffset { 246 info.einsteindbBlocks[i].matched = true 247 return &tbl 248 } 249 } 250 return nil 251 } 252 253 // matchBlockName checks whether the hint hit the need. 254 // Only need either side matches one on the list. 255 // Even though you can put 2 blocks on the list, 256 // it doesn't mean optimizer will reorder to make them 257 // join directly. 258 // Which it joins on with depend on sequence of traverse 259 // and without reorder, user might adjust themselves. 260 // This is similar to MyALLEGROSQL hints. 261 func (info *blockHintInfo) matchBlockName(blocks []*hintBlockInfo, hintBlocks []hintBlockInfo) bool { 262 hintMatched := false 263 for _, causet := range blocks { 264 for i, curEntry := range hintBlocks { 265 if causet == nil { 266 continue 267 } 268 if curEntry.dbName.L == causet.dbName.L && curEntry.tblName.L == causet.tblName.L && causet.selectOffset == curEntry.selectOffset { 269 hintBlocks[i].matched = true 270 hintMatched = true 271 break 272 } 273 } 274 } 275 return hintMatched 276 } 277 278 func restore2BlockHint(hintBlocks ...hintBlockInfo) string { 279 buffer := bytes.NewBufferString("") 280 for i, causet := range hintBlocks { 281 buffer.WriteString(causet.tblName.L) 282 if len(causet.partitions) > 0 { 283 buffer.WriteString(" PARTITION(") 284 for j, partition := range causet.partitions { 285 if j > 0 { 286 buffer.WriteString(", ") 287 } 288 buffer.WriteString(partition.L) 289 } 290 buffer.WriteString(")") 291 } 292 if i < len(hintBlocks)-1 { 293 buffer.WriteString(", ") 294 } 295 } 296 return buffer.String() 297 } 298 299 func restore2JoinHint(hintType string, hintBlocks []hintBlockInfo) string { 300 buffer := bytes.NewBufferString("/*+ ") 301 buffer.WriteString(strings.ToUpper(hintType)) 302 buffer.WriteString("(") 303 buffer.WriteString(restore2BlockHint(hintBlocks...)) 304 buffer.WriteString(") */") 305 return buffer.String() 306 } 307 308 func restore2IndexHint(hintType string, hintIndex indexHintInfo) string { 309 buffer := bytes.NewBufferString("/*+ ") 310 buffer.WriteString(strings.ToUpper(hintType)) 311 buffer.WriteString("(") 312 buffer.WriteString(restore2BlockHint(hintBlockInfo{ 313 dbName: hintIndex.dbName, 314 tblName: hintIndex.tblName, 315 partitions: hintIndex.partitions, 316 })) 317 if hintIndex.indexHint != nil && len(hintIndex.indexHint.IndexNames) > 0 { 318 for i, indexName := range hintIndex.indexHint.IndexNames { 319 if i > 0 { 320 buffer.WriteString(",") 321 } 322 buffer.WriteString(" " + indexName.L) 323 } 324 } 325 buffer.WriteString(") */") 326 return buffer.String() 327 } 328 329 func restore2StorageHint(tiflashBlocks, einsteindbBlocks []hintBlockInfo) string { 330 buffer := bytes.NewBufferString("/*+ ") 331 buffer.WriteString(strings.ToUpper(HintReadFromStorage)) 332 buffer.WriteString("(") 333 if len(tiflashBlocks) > 0 { 334 buffer.WriteString("tiflash[") 335 buffer.WriteString(restore2BlockHint(tiflashBlocks...)) 336 buffer.WriteString("]") 337 if len(einsteindbBlocks) > 0 { 338 buffer.WriteString(", ") 339 } 340 } 341 if len(einsteindbBlocks) > 0 { 342 buffer.WriteString("einsteindb[") 343 buffer.WriteString(restore2BlockHint(einsteindbBlocks...)) 344 buffer.WriteString("]") 345 } 346 buffer.WriteString(") */") 347 return buffer.String() 348 } 349 350 func extractUnmatchedBlocks(hintBlocks []hintBlockInfo) []string { 351 var blockNames []string 352 for _, causet := range hintBlocks { 353 if !causet.matched { 354 blockNames = append(blockNames, causet.tblName.O) 355 } 356 } 357 return blockNames 358 } 359 360 // clauseCode indicates in which clause the column is currently. 361 type clauseCode int 362 363 const ( 364 unknowClause clauseCode = iota 365 fieldList 366 havingClause 367 onClause 368 orderByClause 369 whereClause 370 groupByClause 371 showStatement 372 globalOrderByClause 373 ) 374 375 var clauseMsg = map[clauseCode]string{ 376 unknowClause: "", 377 fieldList: "field list", 378 havingClause: "having clause", 379 onClause: "on clause", 380 orderByClause: "order clause", 381 whereClause: "where clause", 382 groupByClause: "group memex", 383 showStatement: "show memex", 384 globalOrderByClause: "global ORDER clause", 385 } 386 387 type capFlagType = uint64 388 389 const ( 390 _ capFlagType = iota 391 // canExpandAST indicates whether the origin AST can be expanded during plan 392 // building. ONLY used for `CreateViewStmt` now. 393 canExpandAST 394 // collectUnderlyingViewName indicates whether to collect the underlying 395 // view names of a CreateViewStmt during plan building. 396 collectUnderlyingViewName 397 ) 398 399 // CausetBuilder builds Causet from an ast.Node. 400 // It just builds the ast node straightforwardly. 401 type CausetBuilder struct { 402 ctx stochastikctx.Context 403 is schemareplicant.SchemaReplicant 404 outerSchemas []*memex.Schema 405 outerNames [][]*types.FieldName 406 // colMapper stores the column that must be pre-resolved. 407 colMapper map[*ast.DeferredCausetNameExpr]int 408 // visitInfo is used for privilege check. 409 visitInfo []visitInfo 410 blockHintInfo []blockHintInfo 411 // optFlag indicates the flags of the optimizer rules. 412 optFlag uint64 413 // capFlag indicates the capability flags. 414 capFlag capFlagType 415 416 curClause clauseCode 417 418 // rewriterPool stores the memexRewriter we have created to reuse it if it has been released. 419 // rewriterCounter counts how many rewriter is being used. 420 rewriterPool []*memexRewriter 421 rewriterCounter int 422 423 windowSpecs map[string]*ast.WindowSpec 424 inUFIDelateStmt bool 425 inDeleteStmt bool 426 // inStraightJoin represents whether the current "SELECT" memex has 427 // "STRAIGHT_JOIN" option. 428 inStraightJoin bool 429 430 // handleHelper records the handle column position for blocks. Delete/UFIDelate/SelectLock/UnionScan may need this information. 431 // It collects the information by the following procedure: 432 // Since we build the plan tree from bottom to top, we maintain a stack to record the current handle information. 433 // If it's a dataSource/blockDual node, we create a new map. 434 // If it's a aggregation, we pop the map and push a nil map since no handle information left. 435 // If it's a union, we pop all children's and push a nil map. 436 // If it's a join, we pop its children's out then merge them and push the new map to stack. 437 // If we meet a subquery, it's clearly that it's a independent problem so we just pop one map out when we finish building the subquery. 438 handleHelper *handleDefCausHelper 439 440 hintProcessor *hint.BlockHintProcessor 441 // selectOffset is the offsets of current processing select stmts. 442 selectOffset []int 443 444 // SelectLock need this information to locate the dagger on partitions. 445 partitionedBlock []causet.PartitionedBlock 446 // CreateView needs this information to check whether exists nested view. 447 underlyingViewNames set.StringSet 448 449 // evalDefaultExpr needs this information to find the corresponding column. 450 // It stores the OutputNames before buildProjection. 451 allNames [][]*types.FieldName 452 } 453 454 type handleDefCausHelper struct { 455 id2HandleMapStack []map[int64][]HandleDefCauss 456 stackTail int 457 } 458 459 func (hch *handleDefCausHelper) appendDefCausToLastMap(tblID int64, handleDefCauss HandleDefCauss) { 460 tailMap := hch.id2HandleMapStack[hch.stackTail-1] 461 tailMap[tblID] = append(tailMap[tblID], handleDefCauss) 462 } 463 464 func (hch *handleDefCausHelper) popMap() map[int64][]HandleDefCauss { 465 ret := hch.id2HandleMapStack[hch.stackTail-1] 466 hch.stackTail-- 467 hch.id2HandleMapStack = hch.id2HandleMapStack[:hch.stackTail] 468 return ret 469 } 470 471 func (hch *handleDefCausHelper) pushMap(m map[int64][]HandleDefCauss) { 472 hch.id2HandleMapStack = append(hch.id2HandleMapStack, m) 473 hch.stackTail++ 474 } 475 476 func (hch *handleDefCausHelper) mergeAndPush(m1, m2 map[int64][]HandleDefCauss) { 477 newMap := make(map[int64][]HandleDefCauss, mathutil.Max(len(m1), len(m2))) 478 for k, v := range m1 { 479 newMap[k] = make([]HandleDefCauss, len(v)) 480 copy(newMap[k], v) 481 } 482 for k, v := range m2 { 483 if _, ok := newMap[k]; ok { 484 newMap[k] = append(newMap[k], v...) 485 } else { 486 newMap[k] = make([]HandleDefCauss, len(v)) 487 copy(newMap[k], v) 488 } 489 } 490 hch.pushMap(newMap) 491 } 492 493 func (hch *handleDefCausHelper) tailMap() map[int64][]HandleDefCauss { 494 return hch.id2HandleMapStack[hch.stackTail-1] 495 } 496 497 // GetVisitInfo gets the visitInfo of the CausetBuilder. 498 func (b *CausetBuilder) GetVisitInfo() []visitInfo { 499 return b.visitInfo 500 } 501 502 // GetDBBlockInfo gets the accessed dbs and blocks info. 503 func (b *CausetBuilder) GetDBBlockInfo() []stmtctx.BlockEntry { 504 var blocks []stmtctx.BlockEntry 505 existsFunc := func(tbls []stmtctx.BlockEntry, tbl *stmtctx.BlockEntry) bool { 506 for _, t := range tbls { 507 if t == *tbl { 508 return true 509 } 510 } 511 return false 512 } 513 for _, v := range b.visitInfo { 514 tbl := &stmtctx.BlockEntry{EDB: v.EDB, Block: v.causet} 515 if !existsFunc(blocks, tbl) { 516 blocks = append(blocks, *tbl) 517 } 518 } 519 return blocks 520 } 521 522 // GetOptFlag gets the optFlag of the CausetBuilder. 523 func (b *CausetBuilder) GetOptFlag() uint64 { 524 return b.optFlag 525 } 526 527 func (b *CausetBuilder) getSelectOffset() int { 528 if len(b.selectOffset) > 0 { 529 return b.selectOffset[len(b.selectOffset)-1] 530 } 531 return -1 532 } 533 534 func (b *CausetBuilder) pushSelectOffset(offset int) { 535 b.selectOffset = append(b.selectOffset, offset) 536 } 537 538 func (b *CausetBuilder) popSelectOffset() { 539 b.selectOffset = b.selectOffset[:len(b.selectOffset)-1] 540 } 541 542 // NewCausetBuilder creates a new CausetBuilder. 543 func NewCausetBuilder(sctx stochastikctx.Context, is schemareplicant.SchemaReplicant, processor *hint.BlockHintProcessor) *CausetBuilder { 544 if processor == nil { 545 sctx.GetStochastikVars().CausetAppendSelectBlockAsName = nil 546 } else { 547 sctx.GetStochastikVars().CausetAppendSelectBlockAsName = make([]ast.HintBlock, processor.MaxSelectStmtOffset()+1) 548 } 549 return &CausetBuilder{ 550 ctx: sctx, 551 is: is, 552 colMapper: make(map[*ast.DeferredCausetNameExpr]int), 553 handleHelper: &handleDefCausHelper{id2HandleMapStack: make([]map[int64][]HandleDefCauss, 0)}, 554 hintProcessor: processor, 555 } 556 } 557 558 // Build builds the ast node to a Causet. 559 func (b *CausetBuilder) Build(ctx context.Context, node ast.Node) (Causet, error) { 560 b.optFlag |= flagPrunDeferredCausets 561 switch x := node.(type) { 562 case *ast.AdminStmt: 563 return b.buildAdmin(ctx, x) 564 case *ast.DeallocateStmt: 565 return &Deallocate{Name: x.Name}, nil 566 case *ast.DeleteStmt: 567 return b.buildDelete(ctx, x) 568 case *ast.InterDircuteStmt: 569 return b.buildInterDircute(ctx, x) 570 case *ast.ExplainStmt: 571 return b.buildExplain(ctx, x) 572 case *ast.ExplainForStmt: 573 return b.buildExplainFor(x) 574 case *ast.TraceStmt: 575 return b.buildTrace(x) 576 case *ast.InsertStmt: 577 return b.buildInsert(ctx, x) 578 case *ast.LoadDataStmt: 579 return b.buildLoadData(ctx, x) 580 case *ast.LoadStatsStmt: 581 return b.buildLoadStats(x), nil 582 case *ast.IndexAdviseStmt: 583 return b.buildIndexAdvise(x), nil 584 case *ast.PrepareStmt: 585 return b.buildPrepare(x), nil 586 case *ast.SelectStmt: 587 if x.SelectIntoOpt != nil { 588 return b.buildSelectInto(ctx, x) 589 } 590 return b.buildSelect(ctx, x) 591 case *ast.SetOprStmt: 592 return b.buildSetOpr(ctx, x) 593 case *ast.UFIDelateStmt: 594 return b.buildUFIDelate(ctx, x) 595 case *ast.ShowStmt: 596 return b.buildShow(ctx, x) 597 case *ast.DoStmt: 598 return b.buildDo(ctx, x) 599 case *ast.SetStmt: 600 return b.buildSet(ctx, x) 601 case *ast.SetConfigStmt: 602 return b.buildSetConfig(ctx, x) 603 case *ast.AnalyzeBlockStmt: 604 return b.buildAnalyze(x) 605 case *ast.BinlogStmt, *ast.FlushStmt, *ast.UseStmt, *ast.BRIEStmt, 606 *ast.BeginStmt, *ast.CommitStmt, *ast.RollbackStmt, *ast.CreateUserStmt, *ast.SetPwdStmt, *ast.AlterInstanceStmt, 607 *ast.GrantStmt, *ast.DropUserStmt, *ast.AlterUserStmt, *ast.RevokeStmt, *ast.KillStmt, *ast.DropStatsStmt, 608 *ast.GrantRoleStmt, *ast.RevokeRoleStmt, *ast.SetRoleStmt, *ast.SetDefaultRoleStmt, *ast.ShutdownStmt, 609 *ast.CreateStatisticsStmt, *ast.DropStatisticsStmt: 610 return b.buildSimple(node.(ast.StmtNode)) 611 case ast.DBSNode: 612 return b.buildDBS(ctx, x) 613 case *ast.CreateBindingStmt: 614 return b.buildCreateBindCauset(x) 615 case *ast.DropBindingStmt: 616 return b.buildDropBindCauset(x) 617 case *ast.ChangeStmt: 618 return b.buildChange(x) 619 case *ast.SplitRegionStmt: 620 return b.buildSplitRegion(x) 621 } 622 return nil, ErrUnsupportedType.GenWithStack("Unsupported type %T", node) 623 } 624 625 func (b *CausetBuilder) buildSetConfig(ctx context.Context, v *ast.SetConfigStmt) (Causet, error) { 626 privErr := ErrSpecificAccessDenied.GenWithStackByArgs("CONFIG") 627 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.ConfigPriv, "", "", "", privErr) 628 mockBlockCauset := LogicalBlockDual{}.Init(b.ctx, b.getSelectOffset()) 629 expr, _, err := b.rewrite(ctx, v.Value, mockBlockCauset, nil, true) 630 return &SetConfig{Name: v.Name, Type: v.Type, Instance: v.Instance, Value: expr}, err 631 } 632 633 func (b *CausetBuilder) buildChange(v *ast.ChangeStmt) (Causet, error) { 634 exe := &Change{ 635 ChangeStmt: v, 636 } 637 return exe, nil 638 } 639 640 func (b *CausetBuilder) buildInterDircute(ctx context.Context, v *ast.InterDircuteStmt) (Causet, error) { 641 vars := make([]memex.Expression, 0, len(v.UsingVars)) 642 for _, expr := range v.UsingVars { 643 newExpr, _, err := b.rewrite(ctx, expr, nil, nil, true) 644 if err != nil { 645 return nil, err 646 } 647 vars = append(vars, newExpr) 648 } 649 exe := &InterDircute{Name: v.Name, UsingVars: vars, InterDircID: v.InterDircID} 650 if v.BinaryArgs != nil { 651 exe.PrepareParams = v.BinaryArgs.([]types.Causet) 652 } 653 return exe, nil 654 } 655 656 func (b *CausetBuilder) buildDo(ctx context.Context, v *ast.DoStmt) (Causet, error) { 657 var p LogicalCauset 658 dual := LogicalBlockDual{RowCount: 1}.Init(b.ctx, b.getSelectOffset()) 659 dual.SetSchema(memex.NewSchema()) 660 p = dual 661 proj := LogicalProjection{Exprs: make([]memex.Expression, 0, len(v.Exprs))}.Init(b.ctx, b.getSelectOffset()) 662 proj.names = make([]*types.FieldName, len(v.Exprs)) 663 schemaReplicant := memex.NewSchema(make([]*memex.DeferredCauset, 0, len(v.Exprs))...) 664 for _, astExpr := range v.Exprs { 665 expr, np, err := b.rewrite(ctx, astExpr, p, nil, true) 666 if err != nil { 667 return nil, err 668 } 669 p = np 670 proj.Exprs = append(proj.Exprs, expr) 671 schemaReplicant.Append(&memex.DeferredCauset{ 672 UniqueID: b.ctx.GetStochastikVars().AllocCausetDeferredCausetID(), 673 RetType: expr.GetType(), 674 }) 675 } 676 proj.SetChildren(p) 677 proj.self = proj 678 proj.SetSchema(schemaReplicant) 679 proj.CalculateNoDelay = true 680 return proj, nil 681 } 682 683 func (b *CausetBuilder) buildSet(ctx context.Context, v *ast.SetStmt) (Causet, error) { 684 p := &Set{} 685 for _, vars := range v.Variables { 686 if vars.IsGlobal { 687 err := ErrSpecificAccessDenied.GenWithStackByArgs("SUPER") 688 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SuperPriv, "", "", "", err) 689 } 690 assign := &memex.VarAssignment{ 691 Name: vars.Name, 692 IsGlobal: vars.IsGlobal, 693 IsSystem: vars.IsSystem, 694 } 695 if _, ok := vars.Value.(*ast.DefaultExpr); !ok { 696 if cn, ok2 := vars.Value.(*ast.DeferredCausetNameExpr); ok2 && cn.Name.Block.L == "" { 697 // Convert column name memex to string value memex. 698 char, col := b.ctx.GetStochastikVars().GetCharsetInfo() 699 vars.Value = ast.NewValueExpr(cn.Name.Name.O, char, col) 700 } 701 mockBlockCauset := LogicalBlockDual{}.Init(b.ctx, b.getSelectOffset()) 702 var err error 703 assign.Expr, _, err = b.rewrite(ctx, vars.Value, mockBlockCauset, nil, true) 704 if err != nil { 705 return nil, err 706 } 707 } else { 708 assign.IsDefault = true 709 } 710 if vars.ExtendValue != nil { 711 assign.ExtendValue = &memex.Constant{ 712 Value: vars.ExtendValue.(*driver.ValueExpr).Causet, 713 RetType: &vars.ExtendValue.(*driver.ValueExpr).Type, 714 } 715 } 716 p.VarAssigns = append(p.VarAssigns, assign) 717 } 718 return p, nil 719 } 720 721 func (b *CausetBuilder) buildDropBindCauset(v *ast.DropBindingStmt) (Causet, error) { 722 p := &ALLEGROSQLBindCauset{ 723 ALLEGROSQLBindOp: OpALLEGROSQLBindDrop, 724 NormdOrigALLEGROSQL: BerolinaSQL.Normalize(v.OriginSel.Text()), 725 IsGlobal: v.GlobalScope, 726 EDB: utilBerolinaSQL.GetDefaultDB(v.OriginSel, b.ctx.GetStochastikVars().CurrentDB), 727 } 728 if v.HintedSel != nil { 729 p.BindALLEGROSQL = v.HintedSel.Text() 730 } 731 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SuperPriv, "", "", "", nil) 732 return p, nil 733 } 734 735 func (b *CausetBuilder) buildCreateBindCauset(v *ast.CreateBindingStmt) (Causet, error) { 736 charSet, collation := b.ctx.GetStochastikVars().GetCharsetInfo() 737 p := &ALLEGROSQLBindCauset{ 738 ALLEGROSQLBindOp: OpALLEGROSQLBindCreate, 739 NormdOrigALLEGROSQL: BerolinaSQL.Normalize(v.OriginSel.Text()), 740 BindALLEGROSQL: v.HintedSel.Text(), 741 IsGlobal: v.GlobalScope, 742 BindStmt: v.HintedSel, 743 EDB: utilBerolinaSQL.GetDefaultDB(v.OriginSel, b.ctx.GetStochastikVars().CurrentDB), 744 Charset: charSet, 745 DefCauslation: collation, 746 } 747 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SuperPriv, "", "", "", nil) 748 return p, nil 749 } 750 751 // detectSelectAgg detects an aggregate function or GROUP BY clause. 752 func (b *CausetBuilder) detectSelectAgg(sel *ast.SelectStmt) bool { 753 if sel.GroupBy != nil { 754 return true 755 } 756 for _, f := range sel.Fields.Fields { 757 if ast.HasAggFlag(f.Expr) { 758 return true 759 } 760 } 761 if sel.Having != nil { 762 if ast.HasAggFlag(sel.Having.Expr) { 763 return true 764 } 765 } 766 if sel.OrderBy != nil { 767 for _, item := range sel.OrderBy.Items { 768 if ast.HasAggFlag(item.Expr) { 769 return true 770 } 771 } 772 } 773 return false 774 } 775 776 func (b *CausetBuilder) detectSelectWindow(sel *ast.SelectStmt) bool { 777 for _, f := range sel.Fields.Fields { 778 if ast.HasWindowFlag(f.Expr) { 779 return true 780 } 781 } 782 if sel.OrderBy != nil { 783 for _, item := range sel.OrderBy.Items { 784 if ast.HasWindowFlag(item.Expr) { 785 return true 786 } 787 } 788 } 789 return false 790 } 791 792 func getPathByIndexName(paths []*soliton.AccessPath, idxName perceptron.CIStr, tblInfo *perceptron.BlockInfo) *soliton.AccessPath { 793 var blockPath *soliton.AccessPath 794 for _, path := range paths { 795 if path.IsBlockPath() { 796 blockPath = path 797 continue 798 } 799 if path.Index.Name.L == idxName.L { 800 return path 801 } 802 } 803 if isPrimaryIndex(idxName) && (tblInfo.PKIsHandle || tblInfo.IsCommonHandle) { 804 return blockPath 805 } 806 return nil 807 } 808 809 func isPrimaryIndex(indexName perceptron.CIStr) bool { 810 return indexName.L == "primary" 811 } 812 813 func genTiFlashPath(tblInfo *perceptron.BlockInfo, isGlobalRead bool) *soliton.AccessPath { 814 tiFlashPath := &soliton.AccessPath{StoreType: ekv.TiFlash, IsTiFlashGlobalRead: isGlobalRead} 815 fillContentForBlockPath(tiFlashPath, tblInfo) 816 return tiFlashPath 817 } 818 819 func fillContentForBlockPath(blockPath *soliton.AccessPath, tblInfo *perceptron.BlockInfo) { 820 if tblInfo.IsCommonHandle { 821 blockPath.IsCommonHandlePath = true 822 for _, index := range tblInfo.Indices { 823 if index.Primary { 824 blockPath.Index = index 825 break 826 } 827 } 828 } else { 829 blockPath.IsIntHandlePath = true 830 } 831 } 832 833 func getPossibleAccessPaths(ctx stochastikctx.Context, blockHints *blockHintInfo, indexHints []*ast.IndexHint, tbl causet.Block, dbName, tblName perceptron.CIStr) ([]*soliton.AccessPath, error) { 834 tblInfo := tbl.Meta() 835 publicPaths := make([]*soliton.AccessPath, 0, len(tblInfo.Indices)+2) 836 tp := ekv.EinsteinDB 837 if tbl.Type().IsClusterBlock() { 838 tp = ekv.MilevaDB 839 } 840 blockPath := &soliton.AccessPath{StoreType: tp} 841 fillContentForBlockPath(blockPath, tblInfo) 842 publicPaths = append(publicPaths, blockPath) 843 if tblInfo.TiFlashReplica != nil && tblInfo.TiFlashReplica.Available { 844 publicPaths = append(publicPaths, genTiFlashPath(tblInfo, false)) 845 publicPaths = append(publicPaths, genTiFlashPath(tblInfo, true)) 846 } 847 optimizerUseInvisibleIndexes := ctx.GetStochastikVars().OptimizerUseInvisibleIndexes 848 for _, index := range tblInfo.Indices { 849 if index.State == perceptron.StatePublic { 850 // Filter out invisible index, because they are not visible for optimizer 851 if !optimizerUseInvisibleIndexes && index.Invisible { 852 continue 853 } 854 if tblInfo.IsCommonHandle && index.Primary { 855 continue 856 } 857 publicPaths = append(publicPaths, &soliton.AccessPath{Index: index}) 858 } 859 } 860 861 hasScanHint, hasUseOrForce := false, false 862 available := make([]*soliton.AccessPath, 0, len(publicPaths)) 863 ignored := make([]*soliton.AccessPath, 0, len(publicPaths)) 864 865 // Extract comment-style index hint like /*+ INDEX(t, idx1, idx2) */. 866 indexHintsLen := len(indexHints) 867 if blockHints != nil { 868 for i, hint := range blockHints.indexHintList { 869 if hint.dbName.L == dbName.L && hint.tblName.L == tblName.L { 870 indexHints = append(indexHints, hint.indexHint) 871 blockHints.indexHintList[i].matched = true 872 } 873 } 874 } 875 876 _, isolationReadEnginesHasEinsteinDB := ctx.GetStochastikVars().GetIsolationReadEngines()[ekv.EinsteinDB] 877 for i, hint := range indexHints { 878 if hint.HintScope != ast.HintForScan { 879 continue 880 } 881 882 hasScanHint = true 883 884 if !isolationReadEnginesHasEinsteinDB { 885 if hint.IndexNames != nil { 886 engineVals, _ := ctx.GetStochastikVars().GetSystemVar(variable.MilevaDBIsolationReadEngines) 887 err := errors.New(fmt.Sprintf("MilevaDB doesn't support index in the isolation read engines(value: '%v')", engineVals)) 888 if i < indexHintsLen { 889 return nil, err 890 } 891 ctx.GetStochastikVars().StmtCtx.AppendWarning(err) 892 } 893 continue 894 } 895 // It is syntactically valid to omit index_list for USE INDEX, which means “use no indexes”. 896 // Omitting index_list for FORCE INDEX or IGNORE INDEX is a syntax error. 897 // See https://dev.allegrosql.com/doc/refman/8.0/en/index-hints.html. 898 if hint.IndexNames == nil && hint.HintType != ast.HintIgnore { 899 if path := getBlockPath(publicPaths); path != nil { 900 hasUseOrForce = true 901 path.Forced = true 902 available = append(available, path) 903 } 904 } 905 for _, idxName := range hint.IndexNames { 906 path := getPathByIndexName(publicPaths, idxName, tblInfo) 907 if path == nil { 908 err := ErrKeyDoesNotExist.GenWithStackByArgs(idxName, tblInfo.Name) 909 // if hint is from comment-style allegrosql hints, we should throw a warning instead of error. 910 if i < indexHintsLen { 911 return nil, err 912 } 913 ctx.GetStochastikVars().StmtCtx.AppendWarning(err) 914 continue 915 } 916 if hint.HintType == ast.HintIgnore { 917 // DefCauslect all the ignored index hints. 918 ignored = append(ignored, path) 919 continue 920 } 921 // Currently we don't distinguish between "FORCE" and "USE" because 922 // our cost estimation is not reliable. 923 hasUseOrForce = true 924 path.Forced = true 925 available = append(available, path) 926 } 927 } 928 929 if !hasScanHint || !hasUseOrForce { 930 available = publicPaths 931 } 932 933 available = removeIgnoredPaths(available, ignored, tblInfo) 934 935 // If we have got "FORCE" or "USE" index hint but got no available index, 936 // we have to use causet scan. 937 if len(available) == 0 { 938 available = append(available, blockPath) 939 } 940 return available, nil 941 } 942 943 func filterPathByIsolationRead(ctx stochastikctx.Context, paths []*soliton.AccessPath, dbName perceptron.CIStr) ([]*soliton.AccessPath, error) { 944 // TODO: filter paths with isolation read locations. 945 if dbName.L == allegrosql.SystemDB { 946 return paths, nil 947 } 948 isolationReadEngines := ctx.GetStochastikVars().GetIsolationReadEngines() 949 availableEngine := map[ekv.StoreType]struct{}{} 950 var availableEngineStr string 951 for i := len(paths) - 1; i >= 0; i-- { 952 if _, ok := availableEngine[paths[i].StoreType]; !ok { 953 availableEngine[paths[i].StoreType] = struct{}{} 954 if availableEngineStr != "" { 955 availableEngineStr += ", " 956 } 957 availableEngineStr += paths[i].StoreType.Name() 958 } 959 if _, ok := isolationReadEngines[paths[i].StoreType]; !ok && paths[i].StoreType != ekv.MilevaDB { 960 paths = append(paths[:i], paths[i+1:]...) 961 } 962 } 963 var err error 964 if len(paths) == 0 { 965 engineVals, _ := ctx.GetStochastikVars().GetSystemVar(variable.MilevaDBIsolationReadEngines) 966 err = ErrInternal.GenWithStackByArgs(fmt.Sprintf("Can not find access path matching '%v'(value: '%v'). Available values are '%v'.", 967 variable.MilevaDBIsolationReadEngines, engineVals, availableEngineStr)) 968 } 969 return paths, err 970 } 971 972 func removeIgnoredPaths(paths, ignoredPaths []*soliton.AccessPath, tblInfo *perceptron.BlockInfo) []*soliton.AccessPath { 973 if len(ignoredPaths) == 0 { 974 return paths 975 } 976 remainedPaths := make([]*soliton.AccessPath, 0, len(paths)) 977 for _, path := range paths { 978 if path.IsBlockPath() || getPathByIndexName(ignoredPaths, path.Index.Name, tblInfo) == nil { 979 remainedPaths = append(remainedPaths, path) 980 } 981 } 982 return remainedPaths 983 } 984 985 func (b *CausetBuilder) buildSelectLock(src LogicalCauset, dagger *ast.SelectLockInfo) *LogicalLock { 986 selectLock := LogicalLock{ 987 Lock: dagger, 988 tblID2Handle: b.handleHelper.tailMap(), 989 partitionedBlock: b.partitionedBlock, 990 }.Init(b.ctx) 991 selectLock.SetChildren(src) 992 return selectLock 993 } 994 995 func (b *CausetBuilder) buildPrepare(x *ast.PrepareStmt) Causet { 996 p := &Prepare{ 997 Name: x.Name, 998 } 999 if x.ALLEGROSQLVar != nil { 1000 if v, ok := b.ctx.GetStochastikVars().Users[strings.ToLower(x.ALLEGROSQLVar.Name)]; ok { 1001 p.ALLEGROSQLText = v.GetString() 1002 } else { 1003 p.ALLEGROSQLText = "NULL" 1004 } 1005 } else { 1006 p.ALLEGROSQLText = x.ALLEGROSQLText 1007 } 1008 return p 1009 } 1010 1011 func (b *CausetBuilder) buildAdmin(ctx context.Context, as *ast.AdminStmt) (Causet, error) { 1012 var ret Causet 1013 var err error 1014 switch as.Tp { 1015 case ast.AdminCheckBlock, ast.AdminChecHoTTex: 1016 ret, err = b.buildAdminCheckBlock(ctx, as) 1017 if err != nil { 1018 return ret, err 1019 } 1020 case ast.AdminRecoverIndex: 1021 p := &RecoverIndex{Block: as.Blocks[0], IndexName: as.Index} 1022 p.setSchemaAndNames(buildRecoverIndexFields()) 1023 ret = p 1024 case ast.AdminCleanupIndex: 1025 p := &CleanupIndex{Block: as.Blocks[0], IndexName: as.Index} 1026 p.setSchemaAndNames(buildCleanupIndexFields()) 1027 ret = p 1028 case ast.AdminChecksumBlock: 1029 p := &ChecksumBlock{Blocks: as.Blocks} 1030 p.setSchemaAndNames(buildChecksumBlockSchema()) 1031 ret = p 1032 case ast.AdminShowNextRowID: 1033 p := &ShowNextRowID{BlockName: as.Blocks[0]} 1034 p.setSchemaAndNames(buildShowNextRowID()) 1035 ret = p 1036 case ast.AdminShowDBS: 1037 p := &ShowDBS{} 1038 p.setSchemaAndNames(buildShowDBSFields()) 1039 ret = p 1040 case ast.AdminShowDBSJobs: 1041 p := LogicalShowDBSJobs{JobNumber: as.JobNumber}.Init(b.ctx) 1042 p.setSchemaAndNames(buildShowDBSJobsFields()) 1043 for _, col := range p.schemaReplicant.DeferredCausets { 1044 col.UniqueID = b.ctx.GetStochastikVars().AllocCausetDeferredCausetID() 1045 } 1046 ret = p 1047 if as.Where != nil { 1048 ret, err = b.buildSelection(ctx, p, as.Where, nil) 1049 if err != nil { 1050 return nil, err 1051 } 1052 } 1053 case ast.AdminCancelDBSJobs: 1054 p := &CancelDBSJobs{JobIDs: as.JobIDs} 1055 p.setSchemaAndNames(buildCancelDBSJobsFields()) 1056 ret = p 1057 case ast.AdminChecHoTTexRange: 1058 schemaReplicant, names, err := b.buildChecHoTTexSchema(as.Blocks[0], as.Index) 1059 if err != nil { 1060 return nil, err 1061 } 1062 1063 p := &ChecHoTTexRange{Block: as.Blocks[0], IndexName: as.Index, HandleRanges: as.HandleRanges} 1064 p.setSchemaAndNames(schemaReplicant, names) 1065 ret = p 1066 case ast.AdminShowDBSJobQueries: 1067 p := &ShowDBSJobQueries{JobIDs: as.JobIDs} 1068 p.setSchemaAndNames(buildShowDBSJobQueriesFields()) 1069 ret = p 1070 case ast.AdminShowSlow: 1071 p := &ShowSlow{ShowSlow: as.ShowSlow} 1072 p.setSchemaAndNames(buildShowSlowSchema()) 1073 ret = p 1074 case ast.AdminReloadExprPushdownBlacklist: 1075 return &ReloadExprPushdownBlacklist{}, nil 1076 case ast.AdminReloadOptMemruleBlacklist: 1077 return &ReloadOptMemruleBlacklist{}, nil 1078 case ast.AdminPluginEnable: 1079 return &AdminPlugins{CausetAction: Enable, Plugins: as.Plugins}, nil 1080 case ast.AdminPluginDisable: 1081 return &AdminPlugins{CausetAction: Disable, Plugins: as.Plugins}, nil 1082 case ast.AdminFlushBindings: 1083 return &ALLEGROSQLBindCauset{ALLEGROSQLBindOp: OpFlushBindings}, nil 1084 case ast.AdminCaptureBindings: 1085 return &ALLEGROSQLBindCauset{ALLEGROSQLBindOp: OpCaptureBindings}, nil 1086 case ast.AdminEvolveBindings: 1087 return &ALLEGROSQLBindCauset{ALLEGROSQLBindOp: OpEvolveBindings}, nil 1088 case ast.AdminReloadBindings: 1089 return &ALLEGROSQLBindCauset{ALLEGROSQLBindOp: OpReloadBindings}, nil 1090 case ast.AdminShowTelemetry: 1091 p := &AdminShowTelemetry{} 1092 p.setSchemaAndNames(buildShowTelemetrySchema()) 1093 ret = p 1094 case ast.AdminResetTelemetryID: 1095 return &AdminResetTelemetryID{}, nil 1096 case ast.AdminReloadStatistics: 1097 return &Simple{Statement: as}, nil 1098 default: 1099 return nil, ErrUnsupportedType.GenWithStack("Unsupported ast.AdminStmt(%T) for buildAdmin", as) 1100 } 1101 1102 // Admin command can only be executed by administrator. 1103 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SuperPriv, "", "", "", nil) 1104 return ret, nil 1105 } 1106 1107 // getGenExprs gets generated memexs map. 1108 func (b *CausetBuilder) getGenExprs(ctx context.Context, dbName perceptron.CIStr, tbl causet.Block, idx *perceptron.IndexInfo, exprDefCauss *memex.Schema, names types.NameSlice) ( 1109 map[perceptron.BlockDeferredCausetID]memex.Expression, error) { 1110 tblInfo := tbl.Meta() 1111 genExprsMap := make(map[perceptron.BlockDeferredCausetID]memex.Expression) 1112 exprs := make([]memex.Expression, 0, len(tbl.DefCauss())) 1113 genExprIdxs := make([]perceptron.BlockDeferredCausetID, len(tbl.DefCauss())) 1114 mockBlockCauset := LogicalBlockDual{}.Init(b.ctx, b.getSelectOffset()) 1115 mockBlockCauset.SetSchema(exprDefCauss) 1116 mockBlockCauset.names = names 1117 for i, colExpr := range mockBlockCauset.Schema().DeferredCausets { 1118 col := tbl.DefCauss()[i] 1119 var expr memex.Expression 1120 expr = colExpr 1121 if col.IsGenerated() && !col.GeneratedStored { 1122 var err error 1123 expr, _, err = b.rewrite(ctx, col.GeneratedExpr, mockBlockCauset, nil, true) 1124 if err != nil { 1125 return nil, errors.Trace(err) 1126 } 1127 found := false 1128 for _, column := range idx.DeferredCausets { 1129 if strings.EqualFold(col.Name.L, column.Name.L) { 1130 found = true 1131 break 1132 } 1133 } 1134 if found { 1135 genDeferredCausetID := perceptron.BlockDeferredCausetID{BlockID: tblInfo.ID, DeferredCausetID: col.DeferredCausetInfo.ID} 1136 genExprsMap[genDeferredCausetID] = expr 1137 genExprIdxs[i] = genDeferredCausetID 1138 } 1139 } 1140 exprs = append(exprs, expr) 1141 } 1142 // Re-iterate memexs to handle those virtual generated columns that refers to the other generated columns. 1143 for i, expr := range exprs { 1144 exprs[i] = memex.DeferredCausetSubstitute(expr, mockBlockCauset.Schema(), exprs) 1145 if _, ok := genExprsMap[genExprIdxs[i]]; ok { 1146 genExprsMap[genExprIdxs[i]] = exprs[i] 1147 } 1148 } 1149 return genExprsMap, nil 1150 } 1151 1152 // FindDeferredCausetInfoByID finds DeferredCausetInfo in defcaus by ID. 1153 func FindDeferredCausetInfoByID(colInfos []*perceptron.DeferredCausetInfo, id int64) *perceptron.DeferredCausetInfo { 1154 for _, info := range colInfos { 1155 if info.ID == id { 1156 return info 1157 } 1158 } 1159 return nil 1160 } 1161 1162 func (b *CausetBuilder) buildPhysicalIndexLookUpReader(ctx context.Context, dbName perceptron.CIStr, tbl causet.Block, idx *perceptron.IndexInfo) (Causet, error) { 1163 tblInfo := tbl.Meta() 1164 physicalID, isPartition := getPhysicalID(tbl) 1165 fullExprDefCauss, _, err := memex.BlockInfo2SchemaAndNames(b.ctx, dbName, tblInfo) 1166 if err != nil { 1167 return nil, err 1168 } 1169 extraInfo, extraDefCaus, hasExtraDefCaus := tryGetPkExtraDeferredCauset(b.ctx.GetStochastikVars(), tblInfo) 1170 pkHandleInfo, pkHandleDefCaus, hasPkIsHandle := tryGetPkHandleDefCaus(tblInfo, fullExprDefCauss) 1171 commonInfos, commonDefCauss, hasCommonDefCauss := tryGetCommonHandleDefCauss(tbl, fullExprDefCauss) 1172 idxDefCausInfos := getIndexDeferredCausetInfos(tblInfo, idx) 1173 idxDefCausSchema := getIndexDefCaussSchema(tblInfo, idx, fullExprDefCauss) 1174 idxDefCauss, idxDefCausLens := memex.IndexInfo2PrefixDefCauss(idxDefCausInfos, idxDefCausSchema.DeferredCausets, idx) 1175 1176 is := PhysicalIndexScan{ 1177 Block: tblInfo, 1178 BlockAsName: &tblInfo.Name, 1179 DBName: dbName, 1180 DeferredCausets: idxDefCausInfos, 1181 Index: idx, 1182 IdxDefCauss: idxDefCauss, 1183 IdxDefCausLens: idxDefCausLens, 1184 dataSourceSchema: idxDefCausSchema.Clone(), 1185 Ranges: ranger.FullRange(), 1186 physicalBlockID: physicalID, 1187 isPartition: isPartition, 1188 }.Init(b.ctx, b.getSelectOffset()) 1189 // There is no alternative plan choices, so just use pseudo stats to avoid panic. 1190 is.stats = &property.StatsInfo{HistDefCausl: &(statistics.PseudoBlock(tblInfo)).HistDefCausl} 1191 if hasCommonDefCauss { 1192 for _, c := range commonInfos { 1193 is.DeferredCausets = append(is.DeferredCausets, c.DeferredCausetInfo) 1194 } 1195 } 1196 is.initSchema(append(is.IdxDefCauss, commonDefCauss...), true) 1197 1198 // It's double read case. 1199 ts := PhysicalBlockScan{ 1200 DeferredCausets: idxDefCausInfos, 1201 Block: tblInfo, 1202 BlockAsName: &tblInfo.Name, 1203 physicalBlockID: physicalID, 1204 isPartition: isPartition, 1205 }.Init(b.ctx, b.getSelectOffset()) 1206 ts.SetSchema(idxDefCausSchema) 1207 ts.DeferredCausets = ExpandVirtualDeferredCauset(ts.DeferredCausets, ts.schemaReplicant, ts.Block.DeferredCausets) 1208 switch { 1209 case hasExtraDefCaus: 1210 ts.DeferredCausets = append(ts.DeferredCausets, extraInfo) 1211 ts.schemaReplicant.Append(extraDefCaus) 1212 ts.HandleIdx = []int{len(ts.DeferredCausets) - 1} 1213 case hasPkIsHandle: 1214 ts.DeferredCausets = append(ts.DeferredCausets, pkHandleInfo) 1215 ts.schemaReplicant.Append(pkHandleDefCaus) 1216 ts.HandleIdx = []int{len(ts.DeferredCausets) - 1} 1217 case hasCommonDefCauss: 1218 ts.HandleIdx = make([]int, 0, len(commonDefCauss)) 1219 for pkOffset, cInfo := range commonInfos { 1220 found := false 1221 for i, c := range ts.DeferredCausets { 1222 if c.ID == cInfo.ID { 1223 found = true 1224 ts.HandleIdx = append(ts.HandleIdx, i) 1225 break 1226 } 1227 } 1228 if !found { 1229 ts.DeferredCausets = append(ts.DeferredCausets, cInfo.DeferredCausetInfo) 1230 ts.schemaReplicant.Append(commonDefCauss[pkOffset]) 1231 ts.HandleIdx = append(ts.HandleIdx, len(ts.DeferredCausets)-1) 1232 } 1233 1234 } 1235 } 1236 1237 cop := &copTask{ 1238 indexCauset: is, 1239 blockCauset: ts, 1240 tblDefCausHists: is.stats.HistDefCausl, 1241 extraHandleDefCaus: extraDefCaus, 1242 commonHandleDefCauss: commonDefCauss, 1243 } 1244 rootT := finishCopTask(b.ctx, cop).(*rootTask) 1245 if err := rootT.p.ResolveIndices(); err != nil { 1246 return nil, err 1247 } 1248 return rootT.p, nil 1249 } 1250 1251 func getIndexDeferredCausetInfos(tblInfo *perceptron.BlockInfo, idx *perceptron.IndexInfo) []*perceptron.DeferredCausetInfo { 1252 ret := make([]*perceptron.DeferredCausetInfo, len(idx.DeferredCausets)) 1253 for i, idxDefCaus := range idx.DeferredCausets { 1254 ret[i] = tblInfo.DeferredCausets[idxDefCaus.Offset] 1255 } 1256 return ret 1257 } 1258 1259 func getIndexDefCaussSchema(tblInfo *perceptron.BlockInfo, idx *perceptron.IndexInfo, allDefCausSchema *memex.Schema) *memex.Schema { 1260 schemaReplicant := memex.NewSchema(make([]*memex.DeferredCauset, 0, len(idx.DeferredCausets))...) 1261 for _, idxDefCaus := range idx.DeferredCausets { 1262 for i, colInfo := range tblInfo.DeferredCausets { 1263 if colInfo.Name.L == idxDefCaus.Name.L { 1264 schemaReplicant.Append(allDefCausSchema.DeferredCausets[i]) 1265 break 1266 } 1267 } 1268 } 1269 return schemaReplicant 1270 } 1271 1272 func getPhysicalID(t causet.Block) (physicalID int64, isPartition bool) { 1273 tblInfo := t.Meta() 1274 if tblInfo.GetPartitionInfo() != nil { 1275 pid := t.(causet.PhysicalBlock).GetPhysicalID() 1276 return pid, true 1277 } 1278 return tblInfo.ID, false 1279 } 1280 1281 func tryGetPkExtraDeferredCauset(sv *variable.StochastikVars, tblInfo *perceptron.BlockInfo) (*perceptron.DeferredCausetInfo, *memex.DeferredCauset, bool) { 1282 if tblInfo.IsCommonHandle || tblInfo.PKIsHandle { 1283 return nil, nil, false 1284 } 1285 info := perceptron.NewExtraHandleDefCausInfo() 1286 expDefCaus := &memex.DeferredCauset{ 1287 RetType: types.NewFieldType(allegrosql.TypeLonglong), 1288 UniqueID: sv.AllocCausetDeferredCausetID(), 1289 ID: perceptron.ExtraHandleID, 1290 } 1291 return info, expDefCaus, true 1292 } 1293 1294 func tryGetCommonHandleDefCauss(t causet.Block, allDefCausSchema *memex.Schema) ([]*causet.DeferredCauset, []*memex.DeferredCauset, bool) { 1295 tblInfo := t.Meta() 1296 if !tblInfo.IsCommonHandle { 1297 return nil, nil, false 1298 } 1299 pk := blocks.FindPrimaryIndex(tblInfo) 1300 commonHandleDefCauss, _ := memex.IndexInfo2DefCauss(tblInfo.DeferredCausets, allDefCausSchema.DeferredCausets, pk) 1301 commonHandelDefCausInfos := blocks.TryGetCommonPkDeferredCausets(t) 1302 return commonHandelDefCausInfos, commonHandleDefCauss, true 1303 } 1304 1305 func tryGetPkHandleDefCaus(tblInfo *perceptron.BlockInfo, allDefCausSchema *memex.Schema) (*perceptron.DeferredCausetInfo, *memex.DeferredCauset, bool) { 1306 if !tblInfo.PKIsHandle { 1307 return nil, nil, false 1308 } 1309 for i, c := range tblInfo.DeferredCausets { 1310 if allegrosql.HasPriKeyFlag(c.Flag) { 1311 return c, allDefCausSchema.DeferredCausets[i], true 1312 } 1313 } 1314 return nil, nil, false 1315 } 1316 1317 func (b *CausetBuilder) buildPhysicalIndexLookUpReaders(ctx context.Context, dbName perceptron.CIStr, tbl causet.Block, indices []causet.Index) ([]Causet, []*perceptron.IndexInfo, error) { 1318 tblInfo := tbl.Meta() 1319 // get index information 1320 indexInfos := make([]*perceptron.IndexInfo, 0, len(tblInfo.Indices)) 1321 indexLookUpReaders := make([]Causet, 0, len(tblInfo.Indices)) 1322 for _, idx := range indices { 1323 idxInfo := idx.Meta() 1324 if idxInfo.State != perceptron.StatePublic { 1325 logutil.Logger(context.Background()).Info("build physical index lookup reader, the index isn't public", 1326 zap.String("index", idxInfo.Name.O), zap.Stringer("state", idxInfo.State), zap.String("causet", tblInfo.Name.O)) 1327 continue 1328 } 1329 indexInfos = append(indexInfos, idxInfo) 1330 // For partition blocks. 1331 if pi := tbl.Meta().GetPartitionInfo(); pi != nil { 1332 for _, def := range pi.Definitions { 1333 t := tbl.(causet.PartitionedBlock).GetPartition(def.ID) 1334 reader, err := b.buildPhysicalIndexLookUpReader(ctx, dbName, t, idxInfo) 1335 if err != nil { 1336 return nil, nil, err 1337 } 1338 indexLookUpReaders = append(indexLookUpReaders, reader) 1339 } 1340 continue 1341 } 1342 // For non-partition blocks. 1343 reader, err := b.buildPhysicalIndexLookUpReader(ctx, dbName, tbl, idxInfo) 1344 if err != nil { 1345 return nil, nil, err 1346 } 1347 indexLookUpReaders = append(indexLookUpReaders, reader) 1348 } 1349 if len(indexLookUpReaders) == 0 { 1350 return nil, nil, nil 1351 } 1352 return indexLookUpReaders, indexInfos, nil 1353 } 1354 1355 func (b *CausetBuilder) buildAdminCheckBlock(ctx context.Context, as *ast.AdminStmt) (*CheckBlock, error) { 1356 tblName := as.Blocks[0] 1357 blockInfo := as.Blocks[0].BlockInfo 1358 tbl, ok := b.is.BlockByID(blockInfo.ID) 1359 if !ok { 1360 return nil, schemareplicant.ErrBlockNotExists.GenWithStackByArgs(tblName.DBInfo.Name.O, blockInfo.Name.O) 1361 } 1362 p := &CheckBlock{ 1363 DBName: tblName.Schema.O, 1364 Block: tbl, 1365 } 1366 var readerCausets []Causet 1367 var indexInfos []*perceptron.IndexInfo 1368 var err error 1369 if as.Tp == ast.AdminChecHoTTex { 1370 // get index information 1371 var idx causet.Index 1372 idxName := strings.ToLower(as.Index) 1373 for _, index := range tbl.Indices() { 1374 if index.Meta().Name.L == idxName { 1375 idx = index 1376 break 1377 } 1378 } 1379 if idx == nil { 1380 return nil, errors.Errorf("index %s do not exist", as.Index) 1381 } 1382 if idx.Meta().State != perceptron.StatePublic { 1383 return nil, errors.Errorf("index %s state %s isn't public", as.Index, idx.Meta().State) 1384 } 1385 p.ChecHoTTex = true 1386 readerCausets, indexInfos, err = b.buildPhysicalIndexLookUpReaders(ctx, tblName.Schema, tbl, []causet.Index{idx}) 1387 } else { 1388 readerCausets, indexInfos, err = b.buildPhysicalIndexLookUpReaders(ctx, tblName.Schema, tbl, tbl.Indices()) 1389 } 1390 if err != nil { 1391 return nil, errors.Trace(err) 1392 } 1393 readers := make([]*PhysicalIndexLookUpReader, 0, len(readerCausets)) 1394 for _, plan := range readerCausets { 1395 readers = append(readers, plan.(*PhysicalIndexLookUpReader)) 1396 } 1397 p.IndexInfos = indexInfos 1398 p.IndexLookUpReaders = readers 1399 return p, nil 1400 } 1401 1402 func (b *CausetBuilder) buildChecHoTTexSchema(tn *ast.BlockName, indexName string) (*memex.Schema, types.NameSlice, error) { 1403 schemaReplicant := memex.NewSchema() 1404 var names types.NameSlice 1405 indexName = strings.ToLower(indexName) 1406 indicesInfo := tn.BlockInfo.Indices 1407 defcaus := tn.BlockInfo.DefCauss() 1408 for _, idxInfo := range indicesInfo { 1409 if idxInfo.Name.L != indexName { 1410 continue 1411 } 1412 for _, idxDefCaus := range idxInfo.DeferredCausets { 1413 col := defcaus[idxDefCaus.Offset] 1414 names = append(names, &types.FieldName{ 1415 DefCausName: idxDefCaus.Name, 1416 TblName: tn.Name, 1417 DBName: tn.Schema, 1418 }) 1419 schemaReplicant.Append(&memex.DeferredCauset{ 1420 RetType: &col.FieldType, 1421 UniqueID: b.ctx.GetStochastikVars().AllocCausetDeferredCausetID(), 1422 ID: col.ID}) 1423 } 1424 names = append(names, &types.FieldName{ 1425 DefCausName: perceptron.NewCIStr("extra_handle"), 1426 TblName: tn.Name, 1427 DBName: tn.Schema, 1428 }) 1429 schemaReplicant.Append(&memex.DeferredCauset{ 1430 RetType: types.NewFieldType(allegrosql.TypeLonglong), 1431 UniqueID: b.ctx.GetStochastikVars().AllocCausetDeferredCausetID(), 1432 ID: -1, 1433 }) 1434 } 1435 if schemaReplicant.Len() == 0 { 1436 return nil, nil, errors.Errorf("index %s not found", indexName) 1437 } 1438 return schemaReplicant, names, nil 1439 } 1440 1441 // getDefCaussInfo returns the info of index columns, normal columns and primary key. 1442 func getDefCaussInfo(tn *ast.BlockName) (indicesInfo []*perceptron.IndexInfo, defcausInfo []*perceptron.DeferredCausetInfo) { 1443 tbl := tn.BlockInfo 1444 for _, col := range tbl.DeferredCausets { 1445 // The virtual column will not causetstore any data in EinsteinDB, so it should be ignored when collect statistics 1446 if col.IsGenerated() && !col.GeneratedStored { 1447 continue 1448 } 1449 if allegrosql.HasPriKeyFlag(col.Flag) && (tbl.PKIsHandle || tbl.IsCommonHandle) { 1450 continue 1451 } 1452 defcausInfo = append(defcausInfo, col) 1453 } 1454 for _, idx := range tn.BlockInfo.Indices { 1455 if idx.State == perceptron.StatePublic { 1456 indicesInfo = append(indicesInfo, idx) 1457 } 1458 } 1459 return 1460 } 1461 1462 // BuildHandleDefCaussForAnalyze is exported for test. 1463 func BuildHandleDefCaussForAnalyze(ctx stochastikctx.Context, tblInfo *perceptron.BlockInfo) HandleDefCauss { 1464 var handleDefCauss HandleDefCauss 1465 switch { 1466 case tblInfo.PKIsHandle: 1467 pkDefCaus := tblInfo.GetPkDefCausInfo() 1468 handleDefCauss = &IntHandleDefCauss{col: &memex.DeferredCauset{ 1469 ID: pkDefCaus.ID, 1470 RetType: &pkDefCaus.FieldType, 1471 Index: pkDefCaus.Offset, 1472 }} 1473 case tblInfo.IsCommonHandle: 1474 pkIdx := blocks.FindPrimaryIndex(tblInfo) 1475 pkDefCausLen := len(pkIdx.DeferredCausets) 1476 columns := make([]*memex.DeferredCauset, pkDefCausLen) 1477 for i := 0; i < pkDefCausLen; i++ { 1478 colInfo := tblInfo.DeferredCausets[pkIdx.DeferredCausets[i].Offset] 1479 columns[i] = &memex.DeferredCauset{ 1480 ID: colInfo.ID, 1481 RetType: &colInfo.FieldType, 1482 Index: colInfo.Offset, 1483 } 1484 } 1485 handleDefCauss = &CommonHandleDefCauss{ 1486 tblInfo: tblInfo, 1487 idxInfo: pkIdx, 1488 columns: columns, 1489 sc: ctx.GetStochastikVars().StmtCtx, 1490 } 1491 } 1492 return handleDefCauss 1493 } 1494 1495 func getPhysicalIDsAndPartitionNames(tblInfo *perceptron.BlockInfo, partitionNames []perceptron.CIStr) ([]int64, []string, error) { 1496 pi := tblInfo.GetPartitionInfo() 1497 if pi == nil { 1498 if len(partitionNames) != 0 { 1499 return nil, nil, errors.Trace(dbs.ErrPartitionMgmtOnNonpartitioned) 1500 } 1501 return []int64{tblInfo.ID}, []string{""}, nil 1502 } 1503 if len(partitionNames) == 0 { 1504 ids := make([]int64, 0, len(pi.Definitions)) 1505 names := make([]string, 0, len(pi.Definitions)) 1506 for _, def := range pi.Definitions { 1507 ids = append(ids, def.ID) 1508 names = append(names, def.Name.O) 1509 } 1510 return ids, names, nil 1511 } 1512 ids := make([]int64, 0, len(partitionNames)) 1513 names := make([]string, 0, len(partitionNames)) 1514 for _, name := range partitionNames { 1515 found := false 1516 for _, def := range pi.Definitions { 1517 if def.Name.L == name.L { 1518 found = true 1519 ids = append(ids, def.ID) 1520 names = append(names, def.Name.O) 1521 break 1522 } 1523 } 1524 if !found { 1525 return nil, nil, fmt.Errorf("can not found the specified partition name %s in the causet definition", name.O) 1526 } 1527 } 1528 return ids, names, nil 1529 } 1530 1531 func (b *CausetBuilder) buildAnalyzeBlock(as *ast.AnalyzeBlockStmt, opts map[ast.AnalyzeOptionType]uint64) (Causet, error) { 1532 p := &Analyze{Opts: opts} 1533 for _, tbl := range as.BlockNames { 1534 if tbl.BlockInfo.IsView() { 1535 return nil, errors.Errorf("analyze view %s is not supported now.", tbl.Name.O) 1536 } 1537 if tbl.BlockInfo.IsSequence() { 1538 return nil, errors.Errorf("analyze sequence %s is not supported now.", tbl.Name.O) 1539 } 1540 idxInfo, colInfo := getDefCaussInfo(tbl) 1541 physicalIDs, names, err := getPhysicalIDsAndPartitionNames(tbl.BlockInfo, as.PartitionNames) 1542 if err != nil { 1543 return nil, err 1544 } 1545 for _, idx := range idxInfo { 1546 for i, id := range physicalIDs { 1547 info := analyzeInfo{DBName: tbl.Schema.O, BlockName: tbl.Name.O, PartitionName: names[i], BlockID: AnalyzeBlockID{PersistID: id, DefCauslectIDs: []int64{id}}, Incremental: as.Incremental} 1548 p.IdxTasks = append(p.IdxTasks, AnalyzeIndexTask{ 1549 IndexInfo: idx, 1550 analyzeInfo: info, 1551 TblInfo: tbl.BlockInfo, 1552 }) 1553 } 1554 } 1555 handleDefCauss := BuildHandleDefCaussForAnalyze(b.ctx, tbl.BlockInfo) 1556 if len(colInfo) > 0 || handleDefCauss != nil { 1557 for i, id := range physicalIDs { 1558 info := analyzeInfo{DBName: tbl.Schema.O, BlockName: tbl.Name.O, PartitionName: names[i], BlockID: AnalyzeBlockID{PersistID: id, DefCauslectIDs: []int64{id}}, Incremental: as.Incremental} 1559 p.DefCausTasks = append(p.DefCausTasks, AnalyzeDeferredCausetsTask{ 1560 HandleDefCauss: handleDefCauss, 1561 DefCaussInfo: colInfo, 1562 analyzeInfo: info, 1563 TblInfo: tbl.BlockInfo, 1564 }) 1565 } 1566 } 1567 } 1568 return p, nil 1569 } 1570 1571 func (b *CausetBuilder) buildAnalyzeIndex(as *ast.AnalyzeBlockStmt, opts map[ast.AnalyzeOptionType]uint64) (Causet, error) { 1572 p := &Analyze{Opts: opts} 1573 tblInfo := as.BlockNames[0].BlockInfo 1574 physicalIDs, names, err := getPhysicalIDsAndPartitionNames(tblInfo, as.PartitionNames) 1575 if err != nil { 1576 return nil, err 1577 } 1578 for _, idxName := range as.IndexNames { 1579 if isPrimaryIndex(idxName) { 1580 handleDefCauss := BuildHandleDefCaussForAnalyze(b.ctx, tblInfo) 1581 if handleDefCauss != nil { 1582 for i, id := range physicalIDs { 1583 info := analyzeInfo{DBName: as.BlockNames[0].Schema.O, BlockName: as.BlockNames[0].Name.O, PartitionName: names[i], BlockID: AnalyzeBlockID{PersistID: id, DefCauslectIDs: []int64{id}}, Incremental: as.Incremental} 1584 p.DefCausTasks = append(p.DefCausTasks, AnalyzeDeferredCausetsTask{HandleDefCauss: handleDefCauss, analyzeInfo: info, TblInfo: tblInfo}) 1585 } 1586 continue 1587 } 1588 } 1589 idx := tblInfo.FindIndexByName(idxName.L) 1590 if idx == nil || idx.State != perceptron.StatePublic { 1591 return nil, ErrAnalyzeMissIndex.GenWithStackByArgs(idxName.O, tblInfo.Name.O) 1592 } 1593 for i, id := range physicalIDs { 1594 info := analyzeInfo{DBName: as.BlockNames[0].Schema.O, BlockName: as.BlockNames[0].Name.O, PartitionName: names[i], BlockID: AnalyzeBlockID{PersistID: id, DefCauslectIDs: []int64{id}}, Incremental: as.Incremental} 1595 p.IdxTasks = append(p.IdxTasks, AnalyzeIndexTask{IndexInfo: idx, analyzeInfo: info, TblInfo: tblInfo}) 1596 } 1597 } 1598 return p, nil 1599 } 1600 1601 func (b *CausetBuilder) buildAnalyzeAllIndex(as *ast.AnalyzeBlockStmt, opts map[ast.AnalyzeOptionType]uint64) (Causet, error) { 1602 p := &Analyze{Opts: opts} 1603 tblInfo := as.BlockNames[0].BlockInfo 1604 physicalIDs, names, err := getPhysicalIDsAndPartitionNames(tblInfo, as.PartitionNames) 1605 if err != nil { 1606 return nil, err 1607 } 1608 for _, idx := range tblInfo.Indices { 1609 if idx.State == perceptron.StatePublic { 1610 for i, id := range physicalIDs { 1611 info := analyzeInfo{DBName: as.BlockNames[0].Schema.O, BlockName: as.BlockNames[0].Name.O, PartitionName: names[i], BlockID: AnalyzeBlockID{PersistID: id, DefCauslectIDs: []int64{id}}, Incremental: as.Incremental} 1612 p.IdxTasks = append(p.IdxTasks, AnalyzeIndexTask{IndexInfo: idx, analyzeInfo: info, TblInfo: tblInfo}) 1613 } 1614 } 1615 } 1616 handleDefCauss := BuildHandleDefCaussForAnalyze(b.ctx, tblInfo) 1617 if handleDefCauss != nil { 1618 for i, id := range physicalIDs { 1619 info := analyzeInfo{DBName: as.BlockNames[0].Schema.O, BlockName: as.BlockNames[0].Name.O, PartitionName: names[i], BlockID: AnalyzeBlockID{PersistID: id, DefCauslectIDs: []int64{id}}, Incremental: as.Incremental} 1620 p.DefCausTasks = append(p.DefCausTasks, AnalyzeDeferredCausetsTask{HandleDefCauss: handleDefCauss, analyzeInfo: info, TblInfo: tblInfo}) 1621 } 1622 } 1623 return p, nil 1624 } 1625 1626 var cmSketchSizeLimit = ekv.TxnEntrySizeLimit / binary.MaxVarintLen32 1627 1628 var analyzeOptionLimit = map[ast.AnalyzeOptionType]uint64{ 1629 ast.AnalyzeOptNumBuckets: 1024, 1630 ast.AnalyzeOptNumTopN: 1024, 1631 ast.AnalyzeOptCMSketchWidth: cmSketchSizeLimit, 1632 ast.AnalyzeOptCMSketchDepth: cmSketchSizeLimit, 1633 ast.AnalyzeOptNumSamples: 100000, 1634 } 1635 1636 var analyzeOptionDefault = map[ast.AnalyzeOptionType]uint64{ 1637 ast.AnalyzeOptNumBuckets: 256, 1638 ast.AnalyzeOptNumTopN: 20, 1639 ast.AnalyzeOptCMSketchWidth: 2048, 1640 ast.AnalyzeOptCMSketchDepth: 5, 1641 ast.AnalyzeOptNumSamples: 10000, 1642 } 1643 1644 func handleAnalyzeOptions(opts []ast.AnalyzeOpt) (map[ast.AnalyzeOptionType]uint64, error) { 1645 optMap := make(map[ast.AnalyzeOptionType]uint64, len(analyzeOptionDefault)) 1646 for key, val := range analyzeOptionDefault { 1647 optMap[key] = val 1648 } 1649 for _, opt := range opts { 1650 if opt.Type == ast.AnalyzeOptNumTopN { 1651 if opt.Value > analyzeOptionLimit[opt.Type] { 1652 return nil, errors.Errorf("value of analyze option %s should not larger than %d", ast.AnalyzeOptionString[opt.Type], analyzeOptionLimit[opt.Type]) 1653 } 1654 } else { 1655 if opt.Value == 0 || opt.Value > analyzeOptionLimit[opt.Type] { 1656 return nil, errors.Errorf("value of analyze option %s should be positive and not larger than %d", ast.AnalyzeOptionString[opt.Type], analyzeOptionLimit[opt.Type]) 1657 } 1658 } 1659 optMap[opt.Type] = opt.Value 1660 } 1661 if optMap[ast.AnalyzeOptCMSketchWidth]*optMap[ast.AnalyzeOptCMSketchDepth] > cmSketchSizeLimit { 1662 return nil, errors.Errorf("cm sketch size(depth * width) should not larger than %d", cmSketchSizeLimit) 1663 } 1664 return optMap, nil 1665 } 1666 1667 func (b *CausetBuilder) buildAnalyze(as *ast.AnalyzeBlockStmt) (Causet, error) { 1668 // If enable fast analyze, the storage must be einsteindb.CausetStorage. 1669 if _, isEinsteinDBStorage := b.ctx.GetStore().(einsteindb.CausetStorage); !isEinsteinDBStorage && b.ctx.GetStochastikVars().EnableFastAnalyze { 1670 return nil, errors.Errorf("Only support fast analyze in einsteindb storage.") 1671 } 1672 for _, tbl := range as.BlockNames { 1673 user := b.ctx.GetStochastikVars().User 1674 var insertErr, selectErr error 1675 if user != nil { 1676 insertErr = ErrBlockaccessDenied.GenWithStackByArgs("INSERT", user.AuthUsername, user.AuthHostname, tbl.Name.O) 1677 selectErr = ErrBlockaccessDenied.GenWithStackByArgs("SELECT", user.AuthUsername, user.AuthHostname, tbl.Name.O) 1678 } 1679 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.InsertPriv, tbl.Schema.O, tbl.Name.O, "", insertErr) 1680 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SelectPriv, tbl.Schema.O, tbl.Name.O, "", selectErr) 1681 } 1682 opts, err := handleAnalyzeOptions(as.AnalyzeOpts) 1683 if err != nil { 1684 return nil, err 1685 } 1686 if as.IndexFlag { 1687 if len(as.IndexNames) == 0 { 1688 return b.buildAnalyzeAllIndex(as, opts) 1689 } 1690 return b.buildAnalyzeIndex(as, opts) 1691 } 1692 return b.buildAnalyzeBlock(as, opts) 1693 } 1694 1695 func buildShowNextRowID() (*memex.Schema, types.NameSlice) { 1696 schemaReplicant := newDeferredCausetsWithNames(4) 1697 schemaReplicant.Append(buildDeferredCausetWithName("", "DB_NAME", allegrosql.TypeVarchar, allegrosql.MaxDatabaseNameLength)) 1698 schemaReplicant.Append(buildDeferredCausetWithName("", "TABLE_NAME", allegrosql.TypeVarchar, allegrosql.MaxBlockNameLength)) 1699 schemaReplicant.Append(buildDeferredCausetWithName("", "COLUMN_NAME", allegrosql.TypeVarchar, allegrosql.MaxDeferredCausetNameLength)) 1700 schemaReplicant.Append(buildDeferredCausetWithName("", "NEXT_GLOBAL_ROW_ID", allegrosql.TypeLonglong, 4)) 1701 schemaReplicant.Append(buildDeferredCausetWithName("", "ID_TYPE", allegrosql.TypeVarchar, 15)) 1702 return schemaReplicant.col2Schema(), schemaReplicant.names 1703 } 1704 1705 func buildShowDBSFields() (*memex.Schema, types.NameSlice) { 1706 schemaReplicant := newDeferredCausetsWithNames(6) 1707 schemaReplicant.Append(buildDeferredCausetWithName("", "SCHEMA_VER", allegrosql.TypeLonglong, 4)) 1708 schemaReplicant.Append(buildDeferredCausetWithName("", "OWNER_ID", allegrosql.TypeVarchar, 64)) 1709 schemaReplicant.Append(buildDeferredCausetWithName("", "OWNER_ADDRESS", allegrosql.TypeVarchar, 32)) 1710 schemaReplicant.Append(buildDeferredCausetWithName("", "RUNNING_JOBS", allegrosql.TypeVarchar, 256)) 1711 schemaReplicant.Append(buildDeferredCausetWithName("", "SELF_ID", allegrosql.TypeVarchar, 64)) 1712 schemaReplicant.Append(buildDeferredCausetWithName("", "QUERY", allegrosql.TypeVarchar, 256)) 1713 1714 return schemaReplicant.col2Schema(), schemaReplicant.names 1715 } 1716 1717 func buildRecoverIndexFields() (*memex.Schema, types.NameSlice) { 1718 schemaReplicant := newDeferredCausetsWithNames(2) 1719 schemaReplicant.Append(buildDeferredCausetWithName("", "ADDED_COUNT", allegrosql.TypeLonglong, 4)) 1720 schemaReplicant.Append(buildDeferredCausetWithName("", "SCAN_COUNT", allegrosql.TypeLonglong, 4)) 1721 return schemaReplicant.col2Schema(), schemaReplicant.names 1722 } 1723 1724 func buildCleanupIndexFields() (*memex.Schema, types.NameSlice) { 1725 schemaReplicant := newDeferredCausetsWithNames(1) 1726 schemaReplicant.Append(buildDeferredCausetWithName("", "REMOVED_COUNT", allegrosql.TypeLonglong, 4)) 1727 return schemaReplicant.col2Schema(), schemaReplicant.names 1728 } 1729 1730 func buildShowDBSJobsFields() (*memex.Schema, types.NameSlice) { 1731 schemaReplicant := newDeferredCausetsWithNames(11) 1732 schemaReplicant.Append(buildDeferredCausetWithName("", "JOB_ID", allegrosql.TypeLonglong, 4)) 1733 schemaReplicant.Append(buildDeferredCausetWithName("", "DB_NAME", allegrosql.TypeVarchar, 64)) 1734 schemaReplicant.Append(buildDeferredCausetWithName("", "TABLE_NAME", allegrosql.TypeVarchar, 64)) 1735 schemaReplicant.Append(buildDeferredCausetWithName("", "JOB_TYPE", allegrosql.TypeVarchar, 64)) 1736 schemaReplicant.Append(buildDeferredCausetWithName("", "SCHEMA_STATE", allegrosql.TypeVarchar, 64)) 1737 schemaReplicant.Append(buildDeferredCausetWithName("", "SCHEMA_ID", allegrosql.TypeLonglong, 4)) 1738 schemaReplicant.Append(buildDeferredCausetWithName("", "TABLE_ID", allegrosql.TypeLonglong, 4)) 1739 schemaReplicant.Append(buildDeferredCausetWithName("", "ROW_COUNT", allegrosql.TypeLonglong, 4)) 1740 schemaReplicant.Append(buildDeferredCausetWithName("", "START_TIME", allegrosql.TypeDatetime, 19)) 1741 schemaReplicant.Append(buildDeferredCausetWithName("", "END_TIME", allegrosql.TypeDatetime, 19)) 1742 schemaReplicant.Append(buildDeferredCausetWithName("", "STATE", allegrosql.TypeVarchar, 64)) 1743 return schemaReplicant.col2Schema(), schemaReplicant.names 1744 } 1745 1746 func buildBlockRegionsSchema() (*memex.Schema, types.NameSlice) { 1747 schemaReplicant := newDeferredCausetsWithNames(11) 1748 schemaReplicant.Append(buildDeferredCausetWithName("", "REGION_ID", allegrosql.TypeLonglong, 4)) 1749 schemaReplicant.Append(buildDeferredCausetWithName("", "START_KEY", allegrosql.TypeVarchar, 64)) 1750 schemaReplicant.Append(buildDeferredCausetWithName("", "END_KEY", allegrosql.TypeVarchar, 64)) 1751 schemaReplicant.Append(buildDeferredCausetWithName("", "LEADER_ID", allegrosql.TypeLonglong, 4)) 1752 schemaReplicant.Append(buildDeferredCausetWithName("", "LEADER_STORE_ID", allegrosql.TypeLonglong, 4)) 1753 schemaReplicant.Append(buildDeferredCausetWithName("", "PEERS", allegrosql.TypeVarchar, 64)) 1754 schemaReplicant.Append(buildDeferredCausetWithName("", "SCATTERING", allegrosql.TypeTiny, 1)) 1755 schemaReplicant.Append(buildDeferredCausetWithName("", "WRITTEN_BYTES", allegrosql.TypeLonglong, 4)) 1756 schemaReplicant.Append(buildDeferredCausetWithName("", "READ_BYTES", allegrosql.TypeLonglong, 4)) 1757 schemaReplicant.Append(buildDeferredCausetWithName("", "APPROXIMATE_SIZE(MB)", allegrosql.TypeLonglong, 4)) 1758 schemaReplicant.Append(buildDeferredCausetWithName("", "APPROXIMATE_KEYS", allegrosql.TypeLonglong, 4)) 1759 return schemaReplicant.col2Schema(), schemaReplicant.names 1760 } 1761 1762 func buildSplitRegionsSchema() (*memex.Schema, types.NameSlice) { 1763 schemaReplicant := newDeferredCausetsWithNames(2) 1764 schemaReplicant.Append(buildDeferredCausetWithName("", "TOTAL_SPLIT_REGION", allegrosql.TypeLonglong, 4)) 1765 schemaReplicant.Append(buildDeferredCausetWithName("", "SCATTER_FINISH_RATIO", allegrosql.TypeDouble, 8)) 1766 return schemaReplicant.col2Schema(), schemaReplicant.names 1767 } 1768 1769 func buildShowDBSJobQueriesFields() (*memex.Schema, types.NameSlice) { 1770 schemaReplicant := newDeferredCausetsWithNames(1) 1771 schemaReplicant.Append(buildDeferredCausetWithName("", "QUERY", allegrosql.TypeVarchar, 256)) 1772 return schemaReplicant.col2Schema(), schemaReplicant.names 1773 } 1774 1775 func buildShowSlowSchema() (*memex.Schema, types.NameSlice) { 1776 longlongSize, _ := allegrosql.GetDefaultFieldLengthAndDecimal(allegrosql.TypeLonglong) 1777 tinySize, _ := allegrosql.GetDefaultFieldLengthAndDecimal(allegrosql.TypeTiny) 1778 timestampSize, _ := allegrosql.GetDefaultFieldLengthAndDecimal(allegrosql.TypeTimestamp) 1779 durationSize, _ := allegrosql.GetDefaultFieldLengthAndDecimal(allegrosql.TypeDuration) 1780 1781 schemaReplicant := newDeferredCausetsWithNames(11) 1782 schemaReplicant.Append(buildDeferredCausetWithName("", "ALLEGROALLEGROSQL", allegrosql.TypeVarchar, 4096)) 1783 schemaReplicant.Append(buildDeferredCausetWithName("", "START", allegrosql.TypeTimestamp, timestampSize)) 1784 schemaReplicant.Append(buildDeferredCausetWithName("", "DURATION", allegrosql.TypeDuration, durationSize)) 1785 schemaReplicant.Append(buildDeferredCausetWithName("", "DETAILS", allegrosql.TypeVarchar, 256)) 1786 schemaReplicant.Append(buildDeferredCausetWithName("", "SUCC", allegrosql.TypeTiny, tinySize)) 1787 schemaReplicant.Append(buildDeferredCausetWithName("", "CONN_ID", allegrosql.TypeLonglong, longlongSize)) 1788 schemaReplicant.Append(buildDeferredCausetWithName("", "TRANSACTION_TS", allegrosql.TypeLonglong, longlongSize)) 1789 schemaReplicant.Append(buildDeferredCausetWithName("", "USER", allegrosql.TypeVarchar, 32)) 1790 schemaReplicant.Append(buildDeferredCausetWithName("", "EDB", allegrosql.TypeVarchar, 64)) 1791 schemaReplicant.Append(buildDeferredCausetWithName("", "TABLE_IDS", allegrosql.TypeVarchar, 256)) 1792 schemaReplicant.Append(buildDeferredCausetWithName("", "INDEX_IDS", allegrosql.TypeVarchar, 256)) 1793 schemaReplicant.Append(buildDeferredCausetWithName("", "INTERNAL", allegrosql.TypeTiny, tinySize)) 1794 schemaReplicant.Append(buildDeferredCausetWithName("", "DIGEST", allegrosql.TypeVarchar, 64)) 1795 return schemaReplicant.col2Schema(), schemaReplicant.names 1796 } 1797 1798 func buildCancelDBSJobsFields() (*memex.Schema, types.NameSlice) { 1799 schemaReplicant := newDeferredCausetsWithNames(2) 1800 schemaReplicant.Append(buildDeferredCausetWithName("", "JOB_ID", allegrosql.TypeVarchar, 64)) 1801 schemaReplicant.Append(buildDeferredCausetWithName("", "RESULT", allegrosql.TypeVarchar, 128)) 1802 1803 return schemaReplicant.col2Schema(), schemaReplicant.names 1804 } 1805 1806 func buildBRIESchema() (*memex.Schema, types.NameSlice) { 1807 longlongSize, _ := allegrosql.GetDefaultFieldLengthAndDecimal(allegrosql.TypeLonglong) 1808 datetimeSize, _ := allegrosql.GetDefaultFieldLengthAndDecimal(allegrosql.TypeDatetime) 1809 1810 schemaReplicant := newDeferredCausetsWithNames(5) 1811 schemaReplicant.Append(buildDeferredCausetWithName("", "Destination", allegrosql.TypeVarchar, 255)) 1812 schemaReplicant.Append(buildDeferredCausetWithName("", "Size", allegrosql.TypeLonglong, longlongSize)) 1813 schemaReplicant.Append(buildDeferredCausetWithName("", "BackupTS", allegrosql.TypeLonglong, longlongSize)) 1814 schemaReplicant.Append(buildDeferredCausetWithName("", "Queue Time", allegrosql.TypeDatetime, datetimeSize)) 1815 schemaReplicant.Append(buildDeferredCausetWithName("", "InterDircution Time", allegrosql.TypeDatetime, datetimeSize)) 1816 return schemaReplicant.col2Schema(), schemaReplicant.names 1817 } 1818 1819 func buildShowTelemetrySchema() (*memex.Schema, types.NameSlice) { 1820 schemaReplicant := newDeferredCausetsWithNames(1) 1821 schemaReplicant.Append(buildDeferredCausetWithName("", "TRACKING_ID", allegrosql.TypeVarchar, 64)) 1822 schemaReplicant.Append(buildDeferredCausetWithName("", "LAST_STATUS", allegrosql.TypeString, allegrosql.MaxBlobWidth)) 1823 schemaReplicant.Append(buildDeferredCausetWithName("", "DATA_PREVIEW", allegrosql.TypeString, allegrosql.MaxBlobWidth)) 1824 return schemaReplicant.col2Schema(), schemaReplicant.names 1825 } 1826 1827 func buildDeferredCausetWithName(blockName, name string, tp byte, size int) (*memex.DeferredCauset, *types.FieldName) { 1828 cs, cl := types.DefaultCharsetForType(tp) 1829 flag := allegrosql.UnsignedFlag 1830 if tp == allegrosql.TypeVarchar || tp == allegrosql.TypeBlob { 1831 cs = charset.CharsetUTF8MB4 1832 cl = charset.DefCauslationUTF8MB4 1833 flag = 0 1834 } 1835 1836 fieldType := &types.FieldType{ 1837 Charset: cs, 1838 DefCauslate: cl, 1839 Tp: tp, 1840 Flen: size, 1841 Flag: flag, 1842 } 1843 return &memex.DeferredCauset{ 1844 RetType: fieldType, 1845 }, &types.FieldName{DBName: util2.InformationSchemaName, TblName: perceptron.NewCIStr(blockName), DefCausName: perceptron.NewCIStr(name)} 1846 } 1847 1848 type columnsWithNames struct { 1849 defcaus []*memex.DeferredCauset 1850 names types.NameSlice 1851 } 1852 1853 func newDeferredCausetsWithNames(cap int) *columnsWithNames { 1854 return &columnsWithNames{ 1855 defcaus: make([]*memex.DeferredCauset, 0, 2), 1856 names: make(types.NameSlice, 0, 2), 1857 } 1858 } 1859 1860 func (cwn *columnsWithNames) Append(col *memex.DeferredCauset, name *types.FieldName) { 1861 cwn.defcaus = append(cwn.defcaus, col) 1862 cwn.names = append(cwn.names, name) 1863 } 1864 1865 func (cwn *columnsWithNames) col2Schema() *memex.Schema { 1866 return memex.NewSchema(cwn.defcaus...) 1867 } 1868 1869 // splitWhere split a where memex to a list of AND conditions. 1870 func splitWhere(where ast.ExprNode) []ast.ExprNode { 1871 var conditions []ast.ExprNode 1872 switch x := where.(type) { 1873 case nil: 1874 case *ast.BinaryOperationExpr: 1875 if x.Op == opcode.LogicAnd { 1876 conditions = append(conditions, splitWhere(x.L)...) 1877 conditions = append(conditions, splitWhere(x.R)...) 1878 } else { 1879 conditions = append(conditions, x) 1880 } 1881 case *ast.ParenthesesExpr: 1882 conditions = append(conditions, splitWhere(x.Expr)...) 1883 default: 1884 conditions = append(conditions, where) 1885 } 1886 return conditions 1887 } 1888 1889 func (b *CausetBuilder) buildShow(ctx context.Context, show *ast.ShowStmt) (Causet, error) { 1890 p := LogicalShow{ 1891 ShowContents: ShowContents{ 1892 Tp: show.Tp, 1893 DBName: show.DBName, 1894 Block: show.Block, 1895 DeferredCauset: show.DeferredCauset, 1896 IndexName: show.IndexName, 1897 Flag: show.Flag, 1898 User: show.User, 1899 Roles: show.Roles, 1900 Full: show.Full, 1901 IfNotExists: show.IfNotExists, 1902 GlobalScope: show.GlobalScope, 1903 Extended: show.Extended, 1904 }, 1905 }.Init(b.ctx) 1906 isView := false 1907 isSequence := false 1908 switch show.Tp { 1909 case ast.ShowBlocks, ast.ShowBlockStatus: 1910 if p.DBName == "" { 1911 return nil, ErrNoDB 1912 } 1913 case ast.ShowCreateBlock, ast.ShowCreateSequence: 1914 user := b.ctx.GetStochastikVars().User 1915 var err error 1916 if user != nil { 1917 err = ErrBlockaccessDenied.GenWithStackByArgs("SHOW", user.AuthUsername, user.AuthHostname, show.Block.Name.L) 1918 } 1919 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.AllPrivMask, show.Block.Schema.L, show.Block.Name.L, "", err) 1920 if causet, err := b.is.BlockByName(show.Block.Schema, show.Block.Name); err == nil { 1921 isView = causet.Meta().IsView() 1922 isSequence = causet.Meta().IsSequence() 1923 } 1924 case ast.ShowCreateView: 1925 err := ErrSpecificAccessDenied.GenWithStackByArgs("SHOW VIEW") 1926 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.ShowViewPriv, show.Block.Schema.L, show.Block.Name.L, "", err) 1927 case ast.ShowBackups, ast.ShowRestores: 1928 err := ErrSpecificAccessDenied.GenWithStackByArgs("SUPER") 1929 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SuperPriv, "", "", "", err) 1930 case ast.ShowBlockNextRowId: 1931 p := &ShowNextRowID{BlockName: show.Block} 1932 p.setSchemaAndNames(buildShowNextRowID()) 1933 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SelectPriv, show.Block.Schema.L, show.Block.Name.L, "", ErrPrivilegeCheckFail) 1934 return p, nil 1935 case ast.ShowStatsBuckets, ast.ShowStatsHistograms, ast.ShowStatsMeta, ast.ShowStatsHealthy: 1936 user := b.ctx.GetStochastikVars().User 1937 var err error 1938 if user != nil { 1939 err = ErrDBaccessDenied.GenWithStackByArgs(user.AuthUsername, user.AuthHostname, allegrosql.SystemDB) 1940 } 1941 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SelectPriv, allegrosql.SystemDB, "", "", err) 1942 } 1943 schemaReplicant, names := buildShowSchema(show, isView, isSequence) 1944 p.SetSchema(schemaReplicant) 1945 p.names = names 1946 for _, col := range p.schemaReplicant.DeferredCausets { 1947 col.UniqueID = b.ctx.GetStochastikVars().AllocCausetDeferredCausetID() 1948 } 1949 var err error 1950 var np LogicalCauset 1951 np = p 1952 if show.Pattern != nil { 1953 show.Pattern.Expr = &ast.DeferredCausetNameExpr{ 1954 Name: &ast.DeferredCausetName{Name: p.OutputNames()[0].DefCausName}, 1955 } 1956 np, err = b.buildSelection(ctx, np, show.Pattern, nil) 1957 if err != nil { 1958 return nil, err 1959 } 1960 } 1961 if show.Where != nil { 1962 np, err = b.buildSelection(ctx, np, show.Where, nil) 1963 if err != nil { 1964 return nil, err 1965 } 1966 } 1967 if np != p { 1968 b.optFlag |= flagEliminateProjection 1969 fieldsLen := len(p.schemaReplicant.DeferredCausets) 1970 proj := LogicalProjection{Exprs: make([]memex.Expression, 0, fieldsLen)}.Init(b.ctx, 0) 1971 schemaReplicant := memex.NewSchema(make([]*memex.DeferredCauset, 0, fieldsLen)...) 1972 for _, col := range p.schemaReplicant.DeferredCausets { 1973 proj.Exprs = append(proj.Exprs, col) 1974 newDefCaus := col.Clone().(*memex.DeferredCauset) 1975 newDefCaus.UniqueID = b.ctx.GetStochastikVars().AllocCausetDeferredCausetID() 1976 schemaReplicant.Append(newDefCaus) 1977 } 1978 proj.SetSchema(schemaReplicant) 1979 proj.SetChildren(np) 1980 proj.SetOutputNames(np.OutputNames()) 1981 np = proj 1982 } 1983 if show.Tp == ast.ShowVariables || show.Tp == ast.ShowStatus { 1984 b.curClause = orderByClause 1985 orderByDefCaus := np.Schema().DeferredCausets[0].Clone().(*memex.DeferredCauset) 1986 sort := LogicalSort{ 1987 ByItems: []*soliton.ByItems{{Expr: orderByDefCaus}}, 1988 }.Init(b.ctx, b.getSelectOffset()) 1989 sort.SetChildren(np) 1990 np = sort 1991 } 1992 return np, nil 1993 } 1994 1995 func (b *CausetBuilder) buildSimple(node ast.StmtNode) (Causet, error) { 1996 p := &Simple{Statement: node} 1997 1998 switch raw := node.(type) { 1999 case *ast.FlushStmt: 2000 err := ErrSpecificAccessDenied.GenWithStackByArgs("RELOAD") 2001 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.ReloadPriv, "", "", "", err) 2002 case *ast.AlterInstanceStmt: 2003 err := ErrSpecificAccessDenied.GenWithStack("ALTER INSTANCE") 2004 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SuperPriv, "", "", "", err) 2005 case *ast.AlterUserStmt: 2006 err := ErrSpecificAccessDenied.GenWithStackByArgs("CREATE USER") 2007 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.CreateUserPriv, "", "", "", err) 2008 case *ast.GrantStmt: 2009 if b.ctx.GetStochastikVars().CurrentDB == "" && raw.Level.DBName == "" { 2010 if raw.Level.Level == ast.GrantLevelBlock { 2011 return nil, ErrNoDB 2012 } 2013 } 2014 b.visitInfo = collectVisitInfoFromGrantStmt(b.ctx, b.visitInfo, raw) 2015 case *ast.BRIEStmt: 2016 p.setSchemaAndNames(buildBRIESchema()) 2017 err := ErrSpecificAccessDenied.GenWithStackByArgs("SUPER") 2018 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SuperPriv, "", "", "", err) 2019 case *ast.GrantRoleStmt, *ast.RevokeRoleStmt: 2020 err := ErrSpecificAccessDenied.GenWithStackByArgs("SUPER") 2021 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SuperPriv, "", "", "", err) 2022 case *ast.RevokeStmt: 2023 b.visitInfo = collectVisitInfoFromRevokeStmt(b.ctx, b.visitInfo, raw) 2024 case *ast.KillStmt: 2025 // If you have the SUPER privilege, you can kill all threads and memexs. 2026 // Otherwise, you can kill only your own threads and memexs. 2027 sm := b.ctx.GetStochastikManager() 2028 if sm != nil { 2029 if pi, ok := sm.GetProcessInfo(raw.ConnectionID); ok { 2030 loginUser := b.ctx.GetStochastikVars().User 2031 if pi.User != loginUser.Username { 2032 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SuperPriv, "", "", "", nil) 2033 } 2034 } 2035 } 2036 case *ast.UseStmt: 2037 if raw.DBName == "" { 2038 return nil, ErrNoDB 2039 } 2040 case *ast.ShutdownStmt: 2041 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.ShutdownPriv, "", "", "", nil) 2042 case *ast.CreateStatisticsStmt: 2043 var selectErr, insertErr error 2044 user := b.ctx.GetStochastikVars().User 2045 if user != nil { 2046 selectErr = ErrBlockaccessDenied.GenWithStackByArgs("CREATE STATISTICS", user.AuthUsername, 2047 user.AuthHostname, raw.Block.Name.L) 2048 insertErr = ErrBlockaccessDenied.GenWithStackByArgs("CREATE STATISTICS", user.AuthUsername, 2049 user.AuthHostname, "stats_extended") 2050 } 2051 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SelectPriv, raw.Block.Schema.L, 2052 raw.Block.Name.L, "", selectErr) 2053 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.InsertPriv, allegrosql.SystemDB, 2054 "stats_extended", "", insertErr) 2055 case *ast.DropStatisticsStmt: 2056 var err error 2057 user := b.ctx.GetStochastikVars().User 2058 if user != nil { 2059 err = ErrBlockaccessDenied.GenWithStackByArgs("DROP STATISTICS", user.AuthUsername, 2060 user.AuthHostname, "stats_extended") 2061 } 2062 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.UFIDelatePriv, allegrosql.SystemDB, 2063 "stats_extended", "", err) 2064 } 2065 return p, nil 2066 } 2067 2068 func collectVisitInfoFromRevokeStmt(sctx stochastikctx.Context, vi []visitInfo, stmt *ast.RevokeStmt) []visitInfo { 2069 // To use REVOKE, you must have the GRANT OPTION privilege, 2070 // and you must have the privileges that you are granting. 2071 dbName := stmt.Level.DBName 2072 blockName := stmt.Level.BlockName 2073 if dbName == "" { 2074 dbName = sctx.GetStochastikVars().CurrentDB 2075 } 2076 vi = appendVisitInfo(vi, allegrosql.GrantPriv, dbName, blockName, "", nil) 2077 2078 var allPrivs []allegrosql.PrivilegeType 2079 for _, item := range stmt.Privs { 2080 if item.Priv == allegrosql.AllPriv { 2081 switch stmt.Level.Level { 2082 case ast.GrantLevelGlobal: 2083 allPrivs = allegrosql.AllGlobalPrivs 2084 case ast.GrantLevelDB: 2085 allPrivs = allegrosql.AllDBPrivs 2086 case ast.GrantLevelBlock: 2087 allPrivs = allegrosql.AllBlockPrivs 2088 } 2089 break 2090 } 2091 vi = appendVisitInfo(vi, item.Priv, dbName, blockName, "", nil) 2092 } 2093 2094 for _, priv := range allPrivs { 2095 vi = appendVisitInfo(vi, priv, dbName, blockName, "", nil) 2096 } 2097 2098 return vi 2099 } 2100 2101 func collectVisitInfoFromGrantStmt(sctx stochastikctx.Context, vi []visitInfo, stmt *ast.GrantStmt) []visitInfo { 2102 // To use GRANT, you must have the GRANT OPTION privilege, 2103 // and you must have the privileges that you are granting. 2104 dbName := stmt.Level.DBName 2105 blockName := stmt.Level.BlockName 2106 if dbName == "" { 2107 dbName = sctx.GetStochastikVars().CurrentDB 2108 } 2109 vi = appendVisitInfo(vi, allegrosql.GrantPriv, dbName, blockName, "", nil) 2110 2111 var allPrivs []allegrosql.PrivilegeType 2112 for _, item := range stmt.Privs { 2113 if item.Priv == allegrosql.AllPriv { 2114 switch stmt.Level.Level { 2115 case ast.GrantLevelGlobal: 2116 allPrivs = allegrosql.AllGlobalPrivs 2117 case ast.GrantLevelDB: 2118 allPrivs = allegrosql.AllDBPrivs 2119 case ast.GrantLevelBlock: 2120 allPrivs = allegrosql.AllBlockPrivs 2121 } 2122 break 2123 } 2124 vi = appendVisitInfo(vi, item.Priv, dbName, blockName, "", nil) 2125 } 2126 2127 for _, priv := range allPrivs { 2128 vi = appendVisitInfo(vi, priv, dbName, blockName, "", nil) 2129 } 2130 2131 return vi 2132 } 2133 2134 func (b *CausetBuilder) getDefaultValue(col *causet.DeferredCauset) (*memex.Constant, error) { 2135 var ( 2136 value types.Causet 2137 err error 2138 ) 2139 if col.DefaultIsExpr && col.DefaultExpr != nil { 2140 value, err = causet.EvalDefCausDefaultExpr(b.ctx, col.ToInfo(), col.DefaultExpr) 2141 } else { 2142 value, err = causet.GetDefCausDefaultValue(b.ctx, col.ToInfo()) 2143 } 2144 if err != nil { 2145 return nil, err 2146 } 2147 return &memex.Constant{Value: value, RetType: &col.FieldType}, nil 2148 } 2149 2150 func (b *CausetBuilder) findDefaultValue(defcaus []*causet.DeferredCauset, name *ast.DeferredCausetName) (*memex.Constant, error) { 2151 for _, col := range defcaus { 2152 if col.Name.L == name.Name.L { 2153 return b.getDefaultValue(col) 2154 } 2155 } 2156 return nil, ErrUnknownDeferredCauset.GenWithStackByArgs(name.Name.O, "field_list") 2157 } 2158 2159 // resolveGeneratedDeferredCausets resolves generated columns with their generation 2160 // memexs respectively. onDups indicates which columns are in on-duplicate list. 2161 func (b *CausetBuilder) resolveGeneratedDeferredCausets(ctx context.Context, columns []*causet.DeferredCauset, onDups map[string]struct{}, mockCauset LogicalCauset) (igc InsertGeneratedDeferredCausets, err error) { 2162 for _, column := range columns { 2163 if !column.IsGenerated() { 2164 continue 2165 } 2166 columnName := &ast.DeferredCausetName{Name: column.Name} 2167 columnName.SetText(column.Name.O) 2168 2169 idx, err := memex.FindFieldName(mockCauset.OutputNames(), columnName) 2170 if err != nil { 2171 return igc, err 2172 } 2173 colExpr := mockCauset.Schema().DeferredCausets[idx] 2174 2175 expr, _, err := b.rewrite(ctx, column.GeneratedExpr, mockCauset, nil, true) 2176 if err != nil { 2177 return igc, err 2178 } 2179 2180 igc.DeferredCausets = append(igc.DeferredCausets, columnName) 2181 igc.Exprs = append(igc.Exprs, expr) 2182 if onDups == nil { 2183 continue 2184 } 2185 for dep := range column.Dependences { 2186 if _, ok := onDups[dep]; ok { 2187 assign := &memex.Assignment{DefCaus: colExpr, DefCausName: column.Name, Expr: expr} 2188 igc.OnDuplicates = append(igc.OnDuplicates, assign) 2189 break 2190 } 2191 } 2192 } 2193 return igc, nil 2194 } 2195 2196 func (b *CausetBuilder) buildInsert(ctx context.Context, insert *ast.InsertStmt) (Causet, error) { 2197 ts, ok := insert.Block.BlockRefs.Left.(*ast.BlockSource) 2198 if !ok { 2199 return nil, schemareplicant.ErrBlockNotExists.GenWithStackByArgs() 2200 } 2201 tn, ok := ts.Source.(*ast.BlockName) 2202 if !ok { 2203 return nil, schemareplicant.ErrBlockNotExists.GenWithStackByArgs() 2204 } 2205 blockInfo := tn.BlockInfo 2206 if blockInfo.IsView() { 2207 err := errors.Errorf("insert into view %s is not supported now.", blockInfo.Name.O) 2208 if insert.IsReplace { 2209 err = errors.Errorf("replace into view %s is not supported now.", blockInfo.Name.O) 2210 } 2211 return nil, err 2212 } 2213 if blockInfo.IsSequence() { 2214 err := errors.Errorf("insert into sequence %s is not supported now.", blockInfo.Name.O) 2215 if insert.IsReplace { 2216 err = errors.Errorf("replace into sequence %s is not supported now.", blockInfo.Name.O) 2217 } 2218 return nil, err 2219 } 2220 // Build Schema with DBName otherwise DeferredCausetRef with DBName cannot match any DeferredCauset in Schema. 2221 schemaReplicant, names, err := memex.BlockInfo2SchemaAndNames(b.ctx, tn.Schema, blockInfo) 2222 if err != nil { 2223 return nil, err 2224 } 2225 blockInCauset, ok := b.is.BlockByID(blockInfo.ID) 2226 if !ok { 2227 return nil, errors.Errorf("Can't get causet %s.", blockInfo.Name.O) 2228 } 2229 2230 insertCauset := Insert{ 2231 Block: blockInCauset, 2232 DeferredCausets: insert.DeferredCausets, 2233 blockSchema: schemaReplicant, 2234 blockDefCausNames: names, 2235 IsReplace: insert.IsReplace, 2236 }.Init(b.ctx) 2237 2238 if blockInfo.GetPartitionInfo() != nil && len(insert.PartitionNames) != 0 { 2239 givenPartitionSets := make(map[int64]struct{}, len(insert.PartitionNames)) 2240 // check partition by name. 2241 for _, name := range insert.PartitionNames { 2242 id, err := blocks.FindPartitionByName(blockInfo, name.L) 2243 if err != nil { 2244 return nil, err 2245 } 2246 givenPartitionSets[id] = struct{}{} 2247 } 2248 pt := blockInCauset.(causet.PartitionedBlock) 2249 insertCauset.Block = blocks.NewPartitionBlockithGivenSets(pt, givenPartitionSets) 2250 } else if len(insert.PartitionNames) != 0 { 2251 return nil, ErrPartitionClauseOnNonpartitioned 2252 } 2253 2254 var authErr error 2255 if b.ctx.GetStochastikVars().User != nil { 2256 authErr = ErrBlockaccessDenied.GenWithStackByArgs("INSERT", b.ctx.GetStochastikVars().User.AuthUsername, 2257 b.ctx.GetStochastikVars().User.AuthHostname, blockInfo.Name.L) 2258 } 2259 2260 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.InsertPriv, tn.DBInfo.Name.L, 2261 blockInfo.Name.L, "", authErr) 2262 2263 mockBlockCauset := LogicalBlockDual{}.Init(b.ctx, b.getSelectOffset()) 2264 mockBlockCauset.SetSchema(insertCauset.blockSchema) 2265 mockBlockCauset.names = insertCauset.blockDefCausNames 2266 2267 checkRefDeferredCauset := func(n ast.Node) ast.Node { 2268 if insertCauset.NeedFillDefaultValue { 2269 return n 2270 } 2271 switch n.(type) { 2272 case *ast.DeferredCausetName, *ast.DeferredCausetNameExpr: 2273 insertCauset.NeedFillDefaultValue = true 2274 } 2275 return n 2276 } 2277 2278 if len(insert.Setlist) > 0 { 2279 // Branch for `INSERT ... SET ...`. 2280 err := b.buildSetValuesOfInsert(ctx, insert, insertCauset, mockBlockCauset, checkRefDeferredCauset) 2281 if err != nil { 2282 return nil, err 2283 } 2284 } else if len(insert.Lists) > 0 { 2285 // Branch for `INSERT ... VALUES ...`. 2286 err := b.buildValuesListOfInsert(ctx, insert, insertCauset, mockBlockCauset, checkRefDeferredCauset) 2287 if err != nil { 2288 return nil, err 2289 } 2290 } else { 2291 // Branch for `INSERT ... SELECT ...`. 2292 err := b.buildSelectCausetOfInsert(ctx, insert, insertCauset) 2293 if err != nil { 2294 return nil, err 2295 } 2296 } 2297 2298 mockBlockCauset.SetSchema(insertCauset.Schema4OnDuplicate) 2299 mockBlockCauset.names = insertCauset.names4OnDuplicate 2300 2301 onDupDefCausSet, err := insertCauset.resolveOnDuplicate(insert.OnDuplicate, blockInfo, func(node ast.ExprNode) (memex.Expression, error) { 2302 return b.rewriteInsertOnDuplicateUFIDelate(ctx, node, mockBlockCauset, insertCauset) 2303 }) 2304 if err != nil { 2305 return nil, err 2306 } 2307 2308 // Calculate generated columns. 2309 mockBlockCauset.schemaReplicant = insertCauset.blockSchema 2310 mockBlockCauset.names = insertCauset.blockDefCausNames 2311 insertCauset.GenDefCauss, err = b.resolveGeneratedDeferredCausets(ctx, insertCauset.Block.DefCauss(), onDupDefCausSet, mockBlockCauset) 2312 if err != nil { 2313 return nil, err 2314 } 2315 2316 err = insertCauset.ResolveIndices() 2317 return insertCauset, err 2318 } 2319 2320 func (p *Insert) resolveOnDuplicate(onDup []*ast.Assignment, tblInfo *perceptron.BlockInfo, yield func(ast.ExprNode) (memex.Expression, error)) (map[string]struct{}, error) { 2321 onDupDefCausSet := make(map[string]struct{}, len(onDup)) 2322 colMap := make(map[string]*causet.DeferredCauset, len(p.Block.DefCauss())) 2323 for _, col := range p.Block.DefCauss() { 2324 colMap[col.Name.L] = col 2325 } 2326 for _, assign := range onDup { 2327 // Check whether the column to be uFIDelated exists in the source causet. 2328 idx, err := memex.FindFieldName(p.blockDefCausNames, assign.DeferredCauset) 2329 if err != nil { 2330 return nil, err 2331 } else if idx < 0 { 2332 return nil, ErrUnknownDeferredCauset.GenWithStackByArgs(assign.DeferredCauset.OrigDefCausName(), "field list") 2333 } 2334 2335 column := colMap[assign.DeferredCauset.Name.L] 2336 if column.Hidden { 2337 return nil, ErrUnknownDeferredCauset.GenWithStackByArgs(column.Name, clauseMsg[fieldList]) 2338 } 2339 // Check whether the column to be uFIDelated is the generated column. 2340 defaultExpr := extractDefaultExpr(assign.Expr) 2341 if defaultExpr != nil { 2342 defaultExpr.Name = assign.DeferredCauset 2343 } 2344 // Note: For INSERT, REPLACE, and UFIDelATE, if a generated column is inserted into, replaced, or uFIDelated explicitly, the only permitted value is DEFAULT. 2345 // see https://dev.allegrosql.com/doc/refman/8.0/en/create-causet-generated-columns.html 2346 if column.IsGenerated() { 2347 if defaultExpr != nil { 2348 continue 2349 } 2350 return nil, ErrBadGeneratedDeferredCauset.GenWithStackByArgs(assign.DeferredCauset.Name.O, tblInfo.Name.O) 2351 } 2352 2353 onDupDefCausSet[column.Name.L] = struct{}{} 2354 2355 expr, err := yield(assign.Expr) 2356 if err != nil { 2357 return nil, err 2358 } 2359 2360 p.OnDuplicate = append(p.OnDuplicate, &memex.Assignment{ 2361 DefCaus: p.blockSchema.DeferredCausets[idx], 2362 DefCausName: p.blockDefCausNames[idx].DefCausName, 2363 Expr: expr, 2364 }) 2365 } 2366 return onDupDefCausSet, nil 2367 } 2368 2369 func (b *CausetBuilder) getAffectDefCauss(insertStmt *ast.InsertStmt, insertCauset *Insert) (affectedValuesDefCauss []*causet.DeferredCauset, err error) { 2370 if len(insertStmt.DeferredCausets) > 0 { 2371 // This branch is for the following scenarios: 2372 // 1. `INSERT INTO tbl_name (col_name [, col_name] ...) {VALUES | VALUE} (value_list) [, (value_list)] ...`, 2373 // 2. `INSERT INTO tbl_name (col_name [, col_name] ...) SELECT ...`. 2374 colName := make([]string, 0, len(insertStmt.DeferredCausets)) 2375 for _, col := range insertStmt.DeferredCausets { 2376 colName = append(colName, col.Name.O) 2377 } 2378 var missingDefCausName string 2379 affectedValuesDefCauss, missingDefCausName = causet.FindDefCauss(insertCauset.Block.VisibleDefCauss(), colName, insertCauset.Block.Meta().PKIsHandle) 2380 if missingDefCausName != "" { 2381 return nil, ErrUnknownDeferredCauset.GenWithStackByArgs(missingDefCausName, clauseMsg[fieldList]) 2382 } 2383 } else if len(insertStmt.Setlist) == 0 { 2384 // This branch is for the following scenarios: 2385 // 1. `INSERT INTO tbl_name {VALUES | VALUE} (value_list) [, (value_list)] ...`, 2386 // 2. `INSERT INTO tbl_name SELECT ...`. 2387 affectedValuesDefCauss = insertCauset.Block.VisibleDefCauss() 2388 } 2389 return affectedValuesDefCauss, nil 2390 } 2391 2392 func (b *CausetBuilder) buildSetValuesOfInsert(ctx context.Context, insert *ast.InsertStmt, insertCauset *Insert, mockBlockCauset *LogicalBlockDual, checkRefDeferredCauset func(n ast.Node) ast.Node) error { 2393 blockInfo := insertCauset.Block.Meta() 2394 colNames := make([]string, 0, len(insert.Setlist)) 2395 exprDefCauss := make([]*memex.DeferredCauset, 0, len(insert.Setlist)) 2396 for _, assign := range insert.Setlist { 2397 idx, err := memex.FindFieldName(insertCauset.blockDefCausNames, assign.DeferredCauset) 2398 if err != nil { 2399 return err 2400 } 2401 if idx < 0 { 2402 return errors.Errorf("Can't find column %s", assign.DeferredCauset) 2403 } 2404 colNames = append(colNames, assign.DeferredCauset.Name.L) 2405 exprDefCauss = append(exprDefCauss, insertCauset.blockSchema.DeferredCausets[idx]) 2406 } 2407 2408 // Check whether the column to be uFIDelated is the generated column. 2409 tDefCauss, missingDefCausName := causet.FindDefCauss(insertCauset.Block.VisibleDefCauss(), colNames, blockInfo.PKIsHandle) 2410 if missingDefCausName != "" { 2411 return ErrUnknownDeferredCauset.GenWithStackByArgs(missingDefCausName, clauseMsg[fieldList]) 2412 } 2413 generatedDeferredCausets := make(map[string]struct{}, len(tDefCauss)) 2414 for _, tDefCaus := range tDefCauss { 2415 if tDefCaus.IsGenerated() { 2416 generatedDeferredCausets[tDefCaus.Name.L] = struct{}{} 2417 } 2418 } 2419 2420 insertCauset.AllAssignmentsAreConstant = true 2421 for i, assign := range insert.Setlist { 2422 defaultExpr := extractDefaultExpr(assign.Expr) 2423 if defaultExpr != nil { 2424 defaultExpr.Name = assign.DeferredCauset 2425 } 2426 // Note: For INSERT, REPLACE, and UFIDelATE, if a generated column is inserted into, replaced, or uFIDelated explicitly, the only permitted value is DEFAULT. 2427 // see https://dev.allegrosql.com/doc/refman/8.0/en/create-causet-generated-columns.html 2428 if _, ok := generatedDeferredCausets[assign.DeferredCauset.Name.L]; ok { 2429 if defaultExpr != nil { 2430 continue 2431 } 2432 return ErrBadGeneratedDeferredCauset.GenWithStackByArgs(assign.DeferredCauset.Name.O, blockInfo.Name.O) 2433 } 2434 b.curClause = fieldList 2435 // subquery in insert values should not reference upper scope 2436 usingCauset := mockBlockCauset 2437 if _, ok := assign.Expr.(*ast.SubqueryExpr); ok { 2438 usingCauset = LogicalBlockDual{}.Init(b.ctx, b.getSelectOffset()) 2439 } 2440 expr, _, err := b.rewriteWithPreprocess(ctx, assign.Expr, usingCauset, nil, nil, true, checkRefDeferredCauset) 2441 if err != nil { 2442 return err 2443 } 2444 if insertCauset.AllAssignmentsAreConstant { 2445 _, isConstant := expr.(*memex.Constant) 2446 insertCauset.AllAssignmentsAreConstant = isConstant 2447 } 2448 2449 insertCauset.SetList = append(insertCauset.SetList, &memex.Assignment{ 2450 DefCaus: exprDefCauss[i], 2451 DefCausName: perceptron.NewCIStr(colNames[i]), 2452 Expr: expr, 2453 }) 2454 } 2455 insertCauset.Schema4OnDuplicate = insertCauset.blockSchema 2456 insertCauset.names4OnDuplicate = insertCauset.blockDefCausNames 2457 return nil 2458 } 2459 2460 func (b *CausetBuilder) buildValuesListOfInsert(ctx context.Context, insert *ast.InsertStmt, insertCauset *Insert, mockBlockCauset *LogicalBlockDual, checkRefDeferredCauset func(n ast.Node) ast.Node) error { 2461 affectedValuesDefCauss, err := b.getAffectDefCauss(insert, insertCauset) 2462 if err != nil { 2463 return err 2464 } 2465 2466 // If value_list and col_list are empty and we have a generated column, we can still write data to this causet. 2467 // For example, insert into t values(); can be executed successfully if t has a generated column. 2468 if len(insert.DeferredCausets) > 0 || len(insert.Lists[0]) > 0 { 2469 // If value_list or col_list is not empty, the length of value_list should be the same with that of col_list. 2470 if len(insert.Lists[0]) != len(affectedValuesDefCauss) { 2471 return ErrWrongValueCountOnRow.GenWithStackByArgs(1) 2472 } 2473 } 2474 2475 insertCauset.AllAssignmentsAreConstant = true 2476 totalBlockDefCauss := insertCauset.Block.DefCauss() 2477 for i, valuesItem := range insert.Lists { 2478 // The length of all the value_list should be the same. 2479 // "insert into t values (), ()" is valid. 2480 // "insert into t values (), (1)" is not valid. 2481 // "insert into t values (1), ()" is not valid. 2482 // "insert into t values (1,2), (1)" is not valid. 2483 if i > 0 && len(insert.Lists[i-1]) != len(insert.Lists[i]) { 2484 return ErrWrongValueCountOnRow.GenWithStackByArgs(i + 1) 2485 } 2486 exprList := make([]memex.Expression, 0, len(valuesItem)) 2487 for j, valueItem := range valuesItem { 2488 var expr memex.Expression 2489 var err error 2490 var generatedDeferredCausetWithDefaultExpr bool 2491 col := affectedValuesDefCauss[j] 2492 switch x := valueItem.(type) { 2493 case *ast.DefaultExpr: 2494 if col.IsGenerated() { 2495 if x.Name != nil { 2496 return ErrBadGeneratedDeferredCauset.GenWithStackByArgs(col.Name.O, insertCauset.Block.Meta().Name.O) 2497 } 2498 generatedDeferredCausetWithDefaultExpr = true 2499 break 2500 } 2501 if x.Name != nil { 2502 expr, err = b.findDefaultValue(totalBlockDefCauss, x.Name) 2503 } else { 2504 expr, err = b.getDefaultValue(affectedValuesDefCauss[j]) 2505 } 2506 case *driver.ValueExpr: 2507 expr = &memex.Constant{ 2508 Value: x.Causet, 2509 RetType: &x.Type, 2510 } 2511 default: 2512 b.curClause = fieldList 2513 // subquery in insert values should not reference upper scope 2514 usingCauset := mockBlockCauset 2515 if _, ok := valueItem.(*ast.SubqueryExpr); ok { 2516 usingCauset = LogicalBlockDual{}.Init(b.ctx, b.getSelectOffset()) 2517 } 2518 expr, _, err = b.rewriteWithPreprocess(ctx, valueItem, usingCauset, nil, nil, true, checkRefDeferredCauset) 2519 } 2520 if err != nil { 2521 return err 2522 } 2523 if insertCauset.AllAssignmentsAreConstant { 2524 _, isConstant := expr.(*memex.Constant) 2525 insertCauset.AllAssignmentsAreConstant = isConstant 2526 } 2527 // Note: For INSERT, REPLACE, and UFIDelATE, if a generated column is inserted into, replaced, or uFIDelated explicitly, the only permitted value is DEFAULT. 2528 // see https://dev.allegrosql.com/doc/refman/8.0/en/create-causet-generated-columns.html 2529 if col.IsGenerated() { 2530 if generatedDeferredCausetWithDefaultExpr { 2531 continue 2532 } 2533 return ErrBadGeneratedDeferredCauset.GenWithStackByArgs(col.Name.O, insertCauset.Block.Meta().Name.O) 2534 } 2535 exprList = append(exprList, expr) 2536 } 2537 insertCauset.Lists = append(insertCauset.Lists, exprList) 2538 } 2539 insertCauset.Schema4OnDuplicate = insertCauset.blockSchema 2540 insertCauset.names4OnDuplicate = insertCauset.blockDefCausNames 2541 return nil 2542 } 2543 2544 func (b *CausetBuilder) buildSelectCausetOfInsert(ctx context.Context, insert *ast.InsertStmt, insertCauset *Insert) error { 2545 affectedValuesDefCauss, err := b.getAffectDefCauss(insert, insertCauset) 2546 if err != nil { 2547 return err 2548 } 2549 selectCauset, err := b.Build(ctx, insert.Select) 2550 if err != nil { 2551 return err 2552 } 2553 2554 // Check to guarantee that the length of the event returned by select is equal to that of affectedValuesDefCauss. 2555 if selectCauset.Schema().Len() != len(affectedValuesDefCauss) { 2556 return ErrWrongValueCountOnRow.GenWithStackByArgs(1) 2557 } 2558 2559 // Check to guarantee that there's no generated column. 2560 // This check should be done after the above one to make its behavior compatible with MyALLEGROSQL. 2561 // For example, causet t has two columns, namely a and b, and b is a generated column. 2562 // "insert into t (b) select * from t" will raise an error that the column count is not matched. 2563 // "insert into t select * from t" will raise an error that there's a generated column in the column list. 2564 // If we do this check before the above one, "insert into t (b) select * from t" will raise an error 2565 // that there's a generated column in the column list. 2566 for _, col := range affectedValuesDefCauss { 2567 if col.IsGenerated() { 2568 return ErrBadGeneratedDeferredCauset.GenWithStackByArgs(col.Name.O, insertCauset.Block.Meta().Name.O) 2569 } 2570 } 2571 2572 names := selectCauset.OutputNames() 2573 insertCauset.SelectCauset, _, err = DoOptimize(ctx, b.ctx, b.optFlag, selectCauset.(LogicalCauset)) 2574 if err != nil { 2575 return err 2576 } 2577 2578 // schema4NewRow is the schemaReplicant for the newly created data record based on 2579 // the result of the select memex. 2580 schema4NewRow := memex.NewSchema(make([]*memex.DeferredCauset, len(insertCauset.Block.DefCauss()))...) 2581 names4NewRow := make(types.NameSlice, len(insertCauset.Block.DefCauss())) 2582 // TODO: don't clone it. 2583 for i, selDefCaus := range insertCauset.SelectCauset.Schema().DeferredCausets { 2584 ordinal := affectedValuesDefCauss[i].Offset 2585 schema4NewRow.DeferredCausets[ordinal] = &memex.DeferredCauset{} 2586 *schema4NewRow.DeferredCausets[ordinal] = *selDefCaus 2587 2588 schema4NewRow.DeferredCausets[ordinal].RetType = &types.FieldType{} 2589 *schema4NewRow.DeferredCausets[ordinal].RetType = affectedValuesDefCauss[i].FieldType 2590 2591 names4NewRow[ordinal] = names[i] 2592 } 2593 for i := range schema4NewRow.DeferredCausets { 2594 if schema4NewRow.DeferredCausets[i] == nil { 2595 schema4NewRow.DeferredCausets[i] = &memex.DeferredCauset{UniqueID: insertCauset.ctx.GetStochastikVars().AllocCausetDeferredCausetID()} 2596 names4NewRow[i] = types.EmptyName 2597 } 2598 } 2599 insertCauset.Schema4OnDuplicate = memex.MergeSchema(insertCauset.blockSchema, schema4NewRow) 2600 insertCauset.names4OnDuplicate = append(insertCauset.blockDefCausNames.Shallow(), names4NewRow...) 2601 return nil 2602 } 2603 2604 func (b *CausetBuilder) buildLoadData(ctx context.Context, ld *ast.LoadDataStmt) (Causet, error) { 2605 p := &LoadData{ 2606 IsLocal: ld.IsLocal, 2607 OnDuplicate: ld.OnDuplicate, 2608 Path: ld.Path, 2609 Block: ld.Block, 2610 DeferredCausets: ld.DeferredCausets, 2611 FieldsInfo: ld.FieldsInfo, 2612 LinesInfo: ld.LinesInfo, 2613 IgnoreLines: ld.IgnoreLines, 2614 DeferredCausetAssignments: ld.DeferredCausetAssignments, 2615 DeferredCausetsAndUserVars: ld.DeferredCausetsAndUserVars, 2616 } 2617 user := b.ctx.GetStochastikVars().User 2618 var insertErr error 2619 if user != nil { 2620 insertErr = ErrBlockaccessDenied.GenWithStackByArgs("INSERT", user.AuthUsername, user.AuthHostname, p.Block.Name.O) 2621 } 2622 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.InsertPriv, p.Block.Schema.O, p.Block.Name.O, "", insertErr) 2623 blockInfo := p.Block.BlockInfo 2624 blockInCauset, ok := b.is.BlockByID(blockInfo.ID) 2625 if !ok { 2626 EDB := b.ctx.GetStochastikVars().CurrentDB 2627 return nil, schemareplicant.ErrBlockNotExists.GenWithStackByArgs(EDB, blockInfo.Name.O) 2628 } 2629 schemaReplicant, names, err := memex.BlockInfo2SchemaAndNames(b.ctx, perceptron.NewCIStr(""), blockInfo) 2630 if err != nil { 2631 return nil, err 2632 } 2633 mockBlockCauset := LogicalBlockDual{}.Init(b.ctx, b.getSelectOffset()) 2634 mockBlockCauset.SetSchema(schemaReplicant) 2635 mockBlockCauset.names = names 2636 2637 p.GenDefCauss, err = b.resolveGeneratedDeferredCausets(ctx, blockInCauset.DefCauss(), nil, mockBlockCauset) 2638 if err != nil { 2639 return nil, err 2640 } 2641 return p, nil 2642 } 2643 2644 func (b *CausetBuilder) buildLoadStats(ld *ast.LoadStatsStmt) Causet { 2645 p := &LoadStats{Path: ld.Path} 2646 return p 2647 } 2648 2649 func (b *CausetBuilder) buildIndexAdvise(node *ast.IndexAdviseStmt) Causet { 2650 p := &IndexAdvise{ 2651 IsLocal: node.IsLocal, 2652 Path: node.Path, 2653 MaxMinutes: node.MaxMinutes, 2654 MaxIndexNum: node.MaxIndexNum, 2655 LinesInfo: node.LinesInfo, 2656 } 2657 return p 2658 } 2659 2660 func (b *CausetBuilder) buildSplitRegion(node *ast.SplitRegionStmt) (Causet, error) { 2661 if node.SplitSyntaxOpt != nil && node.SplitSyntaxOpt.HasPartition && node.Block.BlockInfo.Partition == nil { 2662 return nil, ErrPartitionClauseOnNonpartitioned 2663 } 2664 if len(node.IndexName.L) != 0 { 2665 return b.buildSplitIndexRegion(node) 2666 } 2667 return b.buildSplitBlockRegion(node) 2668 } 2669 2670 func (b *CausetBuilder) buildSplitIndexRegion(node *ast.SplitRegionStmt) (Causet, error) { 2671 tblInfo := node.Block.BlockInfo 2672 indexInfo := tblInfo.FindIndexByName(node.IndexName.L) 2673 if indexInfo == nil { 2674 return nil, ErrKeyDoesNotExist.GenWithStackByArgs(node.IndexName, tblInfo.Name) 2675 } 2676 mockBlockCauset := LogicalBlockDual{}.Init(b.ctx, b.getSelectOffset()) 2677 schemaReplicant, names, err := memex.BlockInfo2SchemaAndNames(b.ctx, node.Block.Schema, tblInfo) 2678 if err != nil { 2679 return nil, err 2680 } 2681 mockBlockCauset.SetSchema(schemaReplicant) 2682 mockBlockCauset.names = names 2683 2684 p := &SplitRegion{ 2685 BlockInfo: tblInfo, 2686 PartitionNames: node.PartitionNames, 2687 IndexInfo: indexInfo, 2688 } 2689 p.names = names 2690 p.setSchemaAndNames(buildSplitRegionsSchema()) 2691 // Split index regions by user specified value lists. 2692 if len(node.SplitOpt.ValueLists) > 0 { 2693 indexValues := make([][]types.Causet, 0, len(node.SplitOpt.ValueLists)) 2694 for i, valuesItem := range node.SplitOpt.ValueLists { 2695 if len(valuesItem) > len(indexInfo.DeferredCausets) { 2696 return nil, ErrWrongValueCountOnRow.GenWithStackByArgs(i + 1) 2697 } 2698 values, err := b.convertValue2DeferredCausetType(valuesItem, mockBlockCauset, indexInfo, tblInfo) 2699 if err != nil { 2700 return nil, err 2701 } 2702 indexValues = append(indexValues, values) 2703 } 2704 p.ValueLists = indexValues 2705 return p, nil 2706 } 2707 2708 // Split index regions by lower, upper value. 2709 checkLowerUpperValue := func(valuesItem []ast.ExprNode, name string) ([]types.Causet, error) { 2710 if len(valuesItem) == 0 { 2711 return nil, errors.Errorf("Split index `%v` region %s value count should more than 0", indexInfo.Name, name) 2712 } 2713 if len(valuesItem) > len(indexInfo.DeferredCausets) { 2714 return nil, errors.Errorf("Split index `%v` region column count doesn't match value count at %v", indexInfo.Name, name) 2715 } 2716 return b.convertValue2DeferredCausetType(valuesItem, mockBlockCauset, indexInfo, tblInfo) 2717 } 2718 lowerValues, err := checkLowerUpperValue(node.SplitOpt.Lower, "lower") 2719 if err != nil { 2720 return nil, err 2721 } 2722 upperValues, err := checkLowerUpperValue(node.SplitOpt.Upper, "upper") 2723 if err != nil { 2724 return nil, err 2725 } 2726 p.Lower = lowerValues 2727 p.Upper = upperValues 2728 2729 maxSplitRegionNum := int64(config.GetGlobalConfig().SplitRegionMaxNum) 2730 if node.SplitOpt.Num > maxSplitRegionNum { 2731 return nil, errors.Errorf("Split index region num exceeded the limit %v", maxSplitRegionNum) 2732 } else if node.SplitOpt.Num < 1 { 2733 return nil, errors.Errorf("Split index region num should more than 0") 2734 } 2735 p.Num = int(node.SplitOpt.Num) 2736 return p, nil 2737 } 2738 2739 func (b *CausetBuilder) convertValue2DeferredCausetType(valuesItem []ast.ExprNode, mockBlockCauset LogicalCauset, indexInfo *perceptron.IndexInfo, tblInfo *perceptron.BlockInfo) ([]types.Causet, error) { 2740 values := make([]types.Causet, 0, len(valuesItem)) 2741 for j, valueItem := range valuesItem { 2742 colOffset := indexInfo.DeferredCausets[j].Offset 2743 value, err := b.convertValue(valueItem, mockBlockCauset, tblInfo.DeferredCausets[colOffset]) 2744 if err != nil { 2745 return nil, err 2746 } 2747 values = append(values, value) 2748 } 2749 return values, nil 2750 } 2751 2752 func (b *CausetBuilder) convertValue(valueItem ast.ExprNode, mockBlockCauset LogicalCauset, col *perceptron.DeferredCausetInfo) (d types.Causet, err error) { 2753 var expr memex.Expression 2754 switch x := valueItem.(type) { 2755 case *driver.ValueExpr: 2756 expr = &memex.Constant{ 2757 Value: x.Causet, 2758 RetType: &x.Type, 2759 } 2760 default: 2761 expr, _, err = b.rewrite(context.TODO(), valueItem, mockBlockCauset, nil, true) 2762 if err != nil { 2763 return d, err 2764 } 2765 } 2766 constant, ok := expr.(*memex.Constant) 2767 if !ok { 2768 return d, errors.New("Expect constant values") 2769 } 2770 value, err := constant.Eval(chunk.Row{}) 2771 if err != nil { 2772 return d, err 2773 } 2774 d, err = value.ConvertTo(b.ctx.GetStochastikVars().StmtCtx, &col.FieldType) 2775 if err != nil { 2776 if !types.ErrTruncated.Equal(err) && !types.ErrTruncatedWrongVal.Equal(err) { 2777 return d, err 2778 } 2779 valStr, err1 := value.ToString() 2780 if err1 != nil { 2781 return d, err 2782 } 2783 return d, types.ErrTruncated.GenWithStack("Incorrect value: '%-.128s' for column '%.192s'", valStr, col.Name.O) 2784 } 2785 return d, nil 2786 } 2787 2788 func (b *CausetBuilder) buildSplitBlockRegion(node *ast.SplitRegionStmt) (Causet, error) { 2789 tblInfo := node.Block.BlockInfo 2790 handleDefCausInfos := buildHandleDeferredCausetInfos(tblInfo) 2791 mockBlockCauset := LogicalBlockDual{}.Init(b.ctx, b.getSelectOffset()) 2792 schemaReplicant, names, err := memex.BlockInfo2SchemaAndNames(b.ctx, node.Block.Schema, tblInfo) 2793 if err != nil { 2794 return nil, err 2795 } 2796 mockBlockCauset.SetSchema(schemaReplicant) 2797 mockBlockCauset.names = names 2798 2799 p := &SplitRegion{ 2800 BlockInfo: tblInfo, 2801 PartitionNames: node.PartitionNames, 2802 } 2803 p.setSchemaAndNames(buildSplitRegionsSchema()) 2804 if len(node.SplitOpt.ValueLists) > 0 { 2805 values := make([][]types.Causet, 0, len(node.SplitOpt.ValueLists)) 2806 for i, valuesItem := range node.SplitOpt.ValueLists { 2807 data, err := convertValueListToData(valuesItem, handleDefCausInfos, i, b, mockBlockCauset) 2808 if err != nil { 2809 return nil, err 2810 } 2811 values = append(values, data) 2812 } 2813 p.ValueLists = values 2814 return p, nil 2815 } 2816 2817 p.Lower, err = convertValueListToData(node.SplitOpt.Lower, handleDefCausInfos, lowerBound, b, mockBlockCauset) 2818 if err != nil { 2819 return nil, err 2820 } 2821 p.Upper, err = convertValueListToData(node.SplitOpt.Upper, handleDefCausInfos, upperBound, b, mockBlockCauset) 2822 if err != nil { 2823 return nil, err 2824 } 2825 2826 maxSplitRegionNum := int64(config.GetGlobalConfig().SplitRegionMaxNum) 2827 if node.SplitOpt.Num > maxSplitRegionNum { 2828 return nil, errors.Errorf("Split causet region num exceeded the limit %v", maxSplitRegionNum) 2829 } else if node.SplitOpt.Num < 1 { 2830 return nil, errors.Errorf("Split causet region num should more than 0") 2831 } 2832 p.Num = int(node.SplitOpt.Num) 2833 return p, nil 2834 } 2835 2836 func buildHandleDeferredCausetInfos(tblInfo *perceptron.BlockInfo) []*perceptron.DeferredCausetInfo { 2837 switch { 2838 case tblInfo.PKIsHandle: 2839 if col := tblInfo.GetPkDefCausInfo(); col != nil { 2840 return []*perceptron.DeferredCausetInfo{col} 2841 } 2842 case tblInfo.IsCommonHandle: 2843 pkIdx := blocks.FindPrimaryIndex(tblInfo) 2844 pkDefCauss := make([]*perceptron.DeferredCausetInfo, 0, len(pkIdx.DeferredCausets)) 2845 defcaus := tblInfo.DeferredCausets 2846 for _, idxDefCaus := range pkIdx.DeferredCausets { 2847 pkDefCauss = append(pkDefCauss, defcaus[idxDefCaus.Offset]) 2848 } 2849 return pkDefCauss 2850 default: 2851 return []*perceptron.DeferredCausetInfo{perceptron.NewExtraHandleDefCausInfo()} 2852 } 2853 return nil 2854 } 2855 2856 const ( 2857 lowerBound int = -1 2858 upperBound int = -2 2859 ) 2860 2861 func convertValueListToData(valueList []ast.ExprNode, handleDefCausInfos []*perceptron.DeferredCausetInfo, rowIdx int, 2862 b *CausetBuilder, mockBlockCauset *LogicalBlockDual) ([]types.Causet, error) { 2863 if len(valueList) != len(handleDefCausInfos) { 2864 var err error 2865 switch rowIdx { 2866 case lowerBound: 2867 err = errors.Errorf("Split causet region lower value count should be %d", len(handleDefCausInfos)) 2868 case upperBound: 2869 err = errors.Errorf("Split causet region upper value count should be %d", len(handleDefCausInfos)) 2870 default: 2871 err = ErrWrongValueCountOnRow.GenWithStackByArgs(rowIdx) 2872 } 2873 return nil, err 2874 } 2875 data := make([]types.Causet, 0, len(handleDefCausInfos)) 2876 for i, v := range valueList { 2877 convertedCauset, err := b.convertValue(v, mockBlockCauset, handleDefCausInfos[i]) 2878 if err != nil { 2879 return nil, err 2880 } 2881 data = append(data, convertedCauset) 2882 } 2883 return data, nil 2884 } 2885 2886 func (b *CausetBuilder) buildDBS(ctx context.Context, node ast.DBSNode) (Causet, error) { 2887 var authErr error 2888 switch v := node.(type) { 2889 case *ast.AlterDatabaseStmt: 2890 if v.AlterDefaultDatabase { 2891 v.Name = b.ctx.GetStochastikVars().CurrentDB 2892 } 2893 if v.Name == "" { 2894 return nil, ErrNoDB 2895 } 2896 if b.ctx.GetStochastikVars().User != nil { 2897 authErr = ErrDBaccessDenied.GenWithStackByArgs("ALTER", b.ctx.GetStochastikVars().User.AuthUsername, 2898 b.ctx.GetStochastikVars().User.AuthHostname, v.Name) 2899 } 2900 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.AlterPriv, v.Name, "", "", authErr) 2901 case *ast.AlterBlockStmt: 2902 if b.ctx.GetStochastikVars().User != nil { 2903 authErr = ErrBlockaccessDenied.GenWithStackByArgs("ALTER", b.ctx.GetStochastikVars().User.AuthUsername, 2904 b.ctx.GetStochastikVars().User.AuthHostname, v.Block.Name.L) 2905 } 2906 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.AlterPriv, v.Block.Schema.L, 2907 v.Block.Name.L, "", authErr) 2908 for _, spec := range v.Specs { 2909 if spec.Tp == ast.AlterBlockRenameBlock || spec.Tp == ast.AlterBlockExchangePartition { 2910 if b.ctx.GetStochastikVars().User != nil { 2911 authErr = ErrBlockaccessDenied.GenWithStackByArgs("DROP", b.ctx.GetStochastikVars().User.AuthUsername, 2912 b.ctx.GetStochastikVars().User.AuthHostname, v.Block.Name.L) 2913 } 2914 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.DropPriv, v.Block.Schema.L, 2915 v.Block.Name.L, "", authErr) 2916 2917 if b.ctx.GetStochastikVars().User != nil { 2918 authErr = ErrBlockaccessDenied.GenWithStackByArgs("CREATE", b.ctx.GetStochastikVars().User.AuthUsername, 2919 b.ctx.GetStochastikVars().User.AuthHostname, spec.NewBlock.Name.L) 2920 } 2921 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.CreatePriv, spec.NewBlock.Schema.L, 2922 spec.NewBlock.Name.L, "", authErr) 2923 2924 if b.ctx.GetStochastikVars().User != nil { 2925 authErr = ErrBlockaccessDenied.GenWithStackByArgs("INSERT", b.ctx.GetStochastikVars().User.AuthUsername, 2926 b.ctx.GetStochastikVars().User.AuthHostname, spec.NewBlock.Name.L) 2927 } 2928 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.InsertPriv, spec.NewBlock.Schema.L, 2929 spec.NewBlock.Name.L, "", authErr) 2930 } else if spec.Tp == ast.AlterBlockDropPartition { 2931 if b.ctx.GetStochastikVars().User != nil { 2932 authErr = ErrBlockaccessDenied.GenWithStackByArgs("DROP", b.ctx.GetStochastikVars().User.AuthUsername, 2933 b.ctx.GetStochastikVars().User.AuthHostname, v.Block.Name.L) 2934 } 2935 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.DropPriv, v.Block.Schema.L, 2936 v.Block.Name.L, "", authErr) 2937 } 2938 } 2939 case *ast.CreateDatabaseStmt: 2940 if b.ctx.GetStochastikVars().User != nil { 2941 authErr = ErrDBaccessDenied.GenWithStackByArgs(b.ctx.GetStochastikVars().User.AuthUsername, 2942 b.ctx.GetStochastikVars().User.AuthHostname, v.Name) 2943 } 2944 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.CreatePriv, v.Name, 2945 "", "", authErr) 2946 case *ast.CreateIndexStmt: 2947 if b.ctx.GetStochastikVars().User != nil { 2948 authErr = ErrBlockaccessDenied.GenWithStackByArgs("INDEX", b.ctx.GetStochastikVars().User.AuthUsername, 2949 b.ctx.GetStochastikVars().User.AuthHostname, v.Block.Name.L) 2950 } 2951 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.IndexPriv, v.Block.Schema.L, 2952 v.Block.Name.L, "", authErr) 2953 case *ast.CreateBlockStmt: 2954 if b.ctx.GetStochastikVars().User != nil { 2955 authErr = ErrBlockaccessDenied.GenWithStackByArgs("CREATE", b.ctx.GetStochastikVars().User.AuthUsername, 2956 b.ctx.GetStochastikVars().User.AuthHostname, v.Block.Name.L) 2957 } 2958 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.CreatePriv, v.Block.Schema.L, 2959 v.Block.Name.L, "", authErr) 2960 if v.ReferBlock != nil { 2961 if b.ctx.GetStochastikVars().User != nil { 2962 authErr = ErrBlockaccessDenied.GenWithStackByArgs("CREATE", b.ctx.GetStochastikVars().User.AuthUsername, 2963 b.ctx.GetStochastikVars().User.AuthHostname, v.ReferBlock.Name.L) 2964 } 2965 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SelectPriv, v.ReferBlock.Schema.L, 2966 v.ReferBlock.Name.L, "", authErr) 2967 } 2968 case *ast.CreateViewStmt: 2969 b.capFlag |= canExpandAST 2970 b.capFlag |= collectUnderlyingViewName 2971 defer func() { 2972 b.capFlag &= ^canExpandAST 2973 b.capFlag &= ^collectUnderlyingViewName 2974 }() 2975 b.underlyingViewNames = set.NewStringSet() 2976 plan, err := b.Build(ctx, v.Select) 2977 if err != nil { 2978 return nil, err 2979 } 2980 if b.underlyingViewNames.Exist(v.ViewName.Schema.L + "." + v.ViewName.Name.L) { 2981 return nil, ErrNoSuchBlock.GenWithStackByArgs(v.ViewName.Schema.O, v.ViewName.Name.O) 2982 } 2983 schemaReplicant := plan.Schema() 2984 names := plan.OutputNames() 2985 if v.DefCauss == nil { 2986 adjustOverlongViewDefCausname(plan.(LogicalCauset)) 2987 v.DefCauss = make([]perceptron.CIStr, len(schemaReplicant.DeferredCausets)) 2988 for i, name := range names { 2989 v.DefCauss[i] = name.DefCausName 2990 } 2991 } 2992 if len(v.DefCauss) != schemaReplicant.Len() { 2993 return nil, dbs.ErrViewWrongList 2994 } 2995 if b.ctx.GetStochastikVars().User != nil { 2996 authErr = ErrBlockaccessDenied.GenWithStackByArgs("CREATE VIEW", b.ctx.GetStochastikVars().User.AuthUsername, 2997 b.ctx.GetStochastikVars().User.AuthHostname, v.ViewName.Name.L) 2998 } 2999 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.CreateViewPriv, v.ViewName.Schema.L, 3000 v.ViewName.Name.L, "", authErr) 3001 if v.Definer.CurrentUser && b.ctx.GetStochastikVars().User != nil { 3002 v.Definer = b.ctx.GetStochastikVars().User 3003 } 3004 if b.ctx.GetStochastikVars().User != nil && v.Definer.String() != b.ctx.GetStochastikVars().User.String() { 3005 err = ErrSpecificAccessDenied.GenWithStackByArgs("SUPER") 3006 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SuperPriv, "", 3007 "", "", err) 3008 } 3009 case *ast.CreateSequenceStmt: 3010 if b.ctx.GetStochastikVars().User != nil { 3011 authErr = ErrBlockaccessDenied.GenWithStackByArgs("CREATE", b.ctx.GetStochastikVars().User.AuthUsername, 3012 b.ctx.GetStochastikVars().User.AuthHostname, v.Name.Name.L) 3013 } 3014 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.CreatePriv, v.Name.Schema.L, 3015 v.Name.Name.L, "", authErr) 3016 case *ast.DroFIDelatabaseStmt: 3017 if b.ctx.GetStochastikVars().User != nil { 3018 authErr = ErrDBaccessDenied.GenWithStackByArgs(b.ctx.GetStochastikVars().User.AuthUsername, 3019 b.ctx.GetStochastikVars().User.AuthHostname, v.Name) 3020 } 3021 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.DropPriv, v.Name, 3022 "", "", authErr) 3023 case *ast.DropIndexStmt: 3024 if b.ctx.GetStochastikVars().User != nil { 3025 authErr = ErrBlockaccessDenied.GenWithStackByArgs("INDEx", b.ctx.GetStochastikVars().User.AuthUsername, 3026 b.ctx.GetStochastikVars().User.AuthHostname, v.Block.Name.L) 3027 } 3028 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.IndexPriv, v.Block.Schema.L, 3029 v.Block.Name.L, "", authErr) 3030 case *ast.DropBlockStmt: 3031 for _, blockVal := range v.Blocks { 3032 if b.ctx.GetStochastikVars().User != nil { 3033 authErr = ErrBlockaccessDenied.GenWithStackByArgs("DROP", b.ctx.GetStochastikVars().User.AuthUsername, 3034 b.ctx.GetStochastikVars().User.AuthHostname, blockVal.Name.L) 3035 } 3036 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.DropPriv, blockVal.Schema.L, 3037 blockVal.Name.L, "", authErr) 3038 } 3039 case *ast.DropSequenceStmt: 3040 for _, sequence := range v.Sequences { 3041 if b.ctx.GetStochastikVars().User != nil { 3042 authErr = ErrBlockaccessDenied.GenWithStackByArgs("DROP", b.ctx.GetStochastikVars().User.AuthUsername, 3043 b.ctx.GetStochastikVars().User.AuthHostname, sequence.Name.L) 3044 } 3045 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.DropPriv, sequence.Schema.L, 3046 sequence.Name.L, "", authErr) 3047 } 3048 case *ast.TruncateBlockStmt: 3049 if b.ctx.GetStochastikVars().User != nil { 3050 authErr = ErrBlockaccessDenied.GenWithStackByArgs("DROP", b.ctx.GetStochastikVars().User.AuthUsername, 3051 b.ctx.GetStochastikVars().User.AuthHostname, v.Block.Name.L) 3052 } 3053 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.DropPriv, v.Block.Schema.L, 3054 v.Block.Name.L, "", authErr) 3055 case *ast.RenameBlockStmt: 3056 if b.ctx.GetStochastikVars().User != nil { 3057 authErr = ErrBlockaccessDenied.GenWithStackByArgs("ALTER", b.ctx.GetStochastikVars().User.AuthUsername, 3058 b.ctx.GetStochastikVars().User.AuthHostname, v.OldBlock.Name.L) 3059 } 3060 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.AlterPriv, v.OldBlock.Schema.L, 3061 v.OldBlock.Name.L, "", authErr) 3062 3063 if b.ctx.GetStochastikVars().User != nil { 3064 authErr = ErrBlockaccessDenied.GenWithStackByArgs("DROP", b.ctx.GetStochastikVars().User.AuthUsername, 3065 b.ctx.GetStochastikVars().User.AuthHostname, v.OldBlock.Name.L) 3066 } 3067 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.DropPriv, v.OldBlock.Schema.L, 3068 v.OldBlock.Name.L, "", authErr) 3069 3070 if b.ctx.GetStochastikVars().User != nil { 3071 authErr = ErrBlockaccessDenied.GenWithStackByArgs("CREATE", b.ctx.GetStochastikVars().User.AuthUsername, 3072 b.ctx.GetStochastikVars().User.AuthHostname, v.NewBlock.Name.L) 3073 } 3074 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.CreatePriv, v.NewBlock.Schema.L, 3075 v.NewBlock.Name.L, "", authErr) 3076 3077 if b.ctx.GetStochastikVars().User != nil { 3078 authErr = ErrBlockaccessDenied.GenWithStackByArgs("INSERT", b.ctx.GetStochastikVars().User.AuthUsername, 3079 b.ctx.GetStochastikVars().User.AuthHostname, v.NewBlock.Name.L) 3080 } 3081 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.InsertPriv, v.NewBlock.Schema.L, 3082 v.NewBlock.Name.L, "", authErr) 3083 case *ast.RecoverBlockStmt, *ast.FlashBackBlockStmt: 3084 // Recover causet command can only be executed by administrator. 3085 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SuperPriv, "", "", "", nil) 3086 case *ast.LockBlocksStmt, *ast.UnlockBlocksStmt: 3087 // TODO: add Lock Block privilege check. 3088 case *ast.CleanupBlockLockStmt: 3089 // This command can only be executed by administrator. 3090 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SuperPriv, "", "", "", nil) 3091 case *ast.RepairBlockStmt: 3092 // Repair causet command can only be executed by administrator. 3093 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SuperPriv, "", "", "", nil) 3094 } 3095 p := &DBS{Statement: node} 3096 return p, nil 3097 } 3098 3099 const ( 3100 // TraceFormatRow indicates event tracing format. 3101 TraceFormatRow = "event" 3102 // TraceFormatJSON indicates json tracing format. 3103 TraceFormatJSON = "json" 3104 // TraceFormatLog indicates log tracing format. 3105 TraceFormatLog = "log" 3106 ) 3107 3108 // buildTrace builds a trace plan. Inside this method, it first optimize the 3109 // underlying query and then constructs a schemaReplicant, which will be used to constructs 3110 // rows result. 3111 func (b *CausetBuilder) buildTrace(trace *ast.TraceStmt) (Causet, error) { 3112 p := &Trace{StmtNode: trace.Stmt, Format: trace.Format} 3113 switch trace.Format { 3114 case TraceFormatRow: 3115 schemaReplicant := newDeferredCausetsWithNames(3) 3116 schemaReplicant.Append(buildDeferredCausetWithName("", "operation", allegrosql.TypeString, allegrosql.MaxBlobWidth)) 3117 schemaReplicant.Append(buildDeferredCausetWithName("", "startTS", allegrosql.TypeString, allegrosql.MaxBlobWidth)) 3118 schemaReplicant.Append(buildDeferredCausetWithName("", "duration", allegrosql.TypeString, allegrosql.MaxBlobWidth)) 3119 p.SetSchema(schemaReplicant.col2Schema()) 3120 p.names = schemaReplicant.names 3121 case TraceFormatJSON: 3122 schemaReplicant := newDeferredCausetsWithNames(1) 3123 schemaReplicant.Append(buildDeferredCausetWithName("", "operation", allegrosql.TypeString, allegrosql.MaxBlobWidth)) 3124 p.SetSchema(schemaReplicant.col2Schema()) 3125 p.names = schemaReplicant.names 3126 case TraceFormatLog: 3127 schemaReplicant := newDeferredCausetsWithNames(4) 3128 schemaReplicant.Append(buildDeferredCausetWithName("", "time", allegrosql.TypeTimestamp, allegrosql.MaxBlobWidth)) 3129 schemaReplicant.Append(buildDeferredCausetWithName("", "event", allegrosql.TypeString, allegrosql.MaxBlobWidth)) 3130 schemaReplicant.Append(buildDeferredCausetWithName("", "tags", allegrosql.TypeString, allegrosql.MaxBlobWidth)) 3131 schemaReplicant.Append(buildDeferredCausetWithName("", "spanName", allegrosql.TypeString, allegrosql.MaxBlobWidth)) 3132 p.SetSchema(schemaReplicant.col2Schema()) 3133 p.names = schemaReplicant.names 3134 default: 3135 return nil, errors.New("trace format should be one of 'event', 'log' or 'json'") 3136 } 3137 return p, nil 3138 } 3139 3140 func (b *CausetBuilder) buildExplainCauset(targetCauset Causet, format string, rows [][]string, analyze bool, execStmt ast.StmtNode) (Causet, error) { 3141 p := &Explain{ 3142 TargetCauset: targetCauset, 3143 Format: format, 3144 Analyze: analyze, 3145 InterDircStmt: execStmt, 3146 Rows: rows, 3147 } 3148 p.ctx = b.ctx 3149 return p, p.prepareSchema() 3150 } 3151 3152 // buildExplainFor gets *last* (maybe running or finished) query plan from connection #connection id. 3153 // See https://dev.allegrosql.com/doc/refman/8.0/en/explain-for-connection.html. 3154 func (b *CausetBuilder) buildExplainFor(explainFor *ast.ExplainForStmt) (Causet, error) { 3155 processInfo, ok := b.ctx.GetStochastikManager().GetProcessInfo(explainFor.ConnectionID) 3156 if !ok { 3157 return nil, ErrNoSuchThread.GenWithStackByArgs(explainFor.ConnectionID) 3158 } 3159 if b.ctx.GetStochastikVars() != nil && b.ctx.GetStochastikVars().User != nil { 3160 if b.ctx.GetStochastikVars().User.Username != processInfo.User { 3161 err := ErrAccessDenied.GenWithStackByArgs(b.ctx.GetStochastikVars().User.Username, b.ctx.GetStochastikVars().User.Hostname) 3162 // Different from MyALLEGROSQL's behavior and document. 3163 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.SuperPriv, "", "", "", err) 3164 } 3165 } 3166 3167 targetCauset, ok := processInfo.Causet.(Causet) 3168 if !ok || targetCauset == nil { 3169 return &Explain{Format: explainFor.Format}, nil 3170 } 3171 var rows [][]string 3172 if explainFor.Format == ast.ExplainFormatROW { 3173 rows = processInfo.CausetExplainRows 3174 } 3175 return b.buildExplainCauset(targetCauset, explainFor.Format, rows, false, nil) 3176 } 3177 3178 func (b *CausetBuilder) buildExplain(ctx context.Context, explain *ast.ExplainStmt) (Causet, error) { 3179 if show, ok := explain.Stmt.(*ast.ShowStmt); ok { 3180 return b.buildShow(ctx, show) 3181 } 3182 targetCauset, _, err := OptimizeAstNode(ctx, b.ctx, explain.Stmt, b.is) 3183 if err != nil { 3184 return nil, err 3185 } 3186 3187 return b.buildExplainCauset(targetCauset, explain.Format, nil, explain.Analyze, explain.Stmt) 3188 } 3189 3190 func (b *CausetBuilder) buildSelectInto(ctx context.Context, sel *ast.SelectStmt) (Causet, error) { 3191 selectIntoInfo := sel.SelectIntoOpt 3192 sel.SelectIntoOpt = nil 3193 targetCauset, _, err := OptimizeAstNode(ctx, b.ctx, sel, b.is) 3194 if err != nil { 3195 return nil, err 3196 } 3197 b.visitInfo = appendVisitInfo(b.visitInfo, allegrosql.FilePriv, "", "", "", ErrSpecificAccessDenied.GenWithStackByArgs("FILE")) 3198 return &SelectInto{ 3199 TargetCauset: targetCauset, 3200 IntoOpt: selectIntoInfo, 3201 }, nil 3202 } 3203 3204 func buildShowProcedureSchema() (*memex.Schema, []*types.FieldName) { 3205 tblName := "ROUTINES" 3206 schemaReplicant := newDeferredCausetsWithNames(11) 3207 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "EDB", allegrosql.TypeVarchar, 128)) 3208 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Name", allegrosql.TypeVarchar, 128)) 3209 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Type", allegrosql.TypeVarchar, 128)) 3210 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Definer", allegrosql.TypeVarchar, 128)) 3211 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Modified", allegrosql.TypeDatetime, 19)) 3212 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Created", allegrosql.TypeDatetime, 19)) 3213 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Security_type", allegrosql.TypeVarchar, 128)) 3214 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Comment", allegrosql.TypeBlob, 196605)) 3215 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "character_set_client", allegrosql.TypeVarchar, 32)) 3216 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "collation_connection", allegrosql.TypeVarchar, 32)) 3217 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Database DefCauslation", allegrosql.TypeVarchar, 32)) 3218 return schemaReplicant.col2Schema(), schemaReplicant.names 3219 } 3220 3221 func buildShowTriggerSchema() (*memex.Schema, []*types.FieldName) { 3222 tblName := "TRIGGERS" 3223 schemaReplicant := newDeferredCausetsWithNames(11) 3224 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Trigger", allegrosql.TypeVarchar, 128)) 3225 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Event", allegrosql.TypeVarchar, 128)) 3226 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Block", allegrosql.TypeVarchar, 128)) 3227 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Statement", allegrosql.TypeBlob, 196605)) 3228 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Timing", allegrosql.TypeVarchar, 128)) 3229 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Created", allegrosql.TypeDatetime, 19)) 3230 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "sql_mode", allegrosql.TypeBlob, 8192)) 3231 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Definer", allegrosql.TypeVarchar, 128)) 3232 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "character_set_client", allegrosql.TypeVarchar, 32)) 3233 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "collation_connection", allegrosql.TypeVarchar, 32)) 3234 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Database DefCauslation", allegrosql.TypeVarchar, 32)) 3235 return schemaReplicant.col2Schema(), schemaReplicant.names 3236 } 3237 3238 func buildShowEventsSchema() (*memex.Schema, []*types.FieldName) { 3239 tblName := "EVENTS" 3240 schemaReplicant := newDeferredCausetsWithNames(15) 3241 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "EDB", allegrosql.TypeVarchar, 128)) 3242 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Name", allegrosql.TypeVarchar, 128)) 3243 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Time zone", allegrosql.TypeVarchar, 32)) 3244 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Definer", allegrosql.TypeVarchar, 128)) 3245 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Type", allegrosql.TypeVarchar, 128)) 3246 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "InterDircute At", allegrosql.TypeDatetime, 19)) 3247 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Interval Value", allegrosql.TypeVarchar, 128)) 3248 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Interval Field", allegrosql.TypeVarchar, 128)) 3249 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Starts", allegrosql.TypeDatetime, 19)) 3250 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Ends", allegrosql.TypeDatetime, 19)) 3251 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Status", allegrosql.TypeVarchar, 32)) 3252 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Originator", allegrosql.TypeInt24, 4)) 3253 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "character_set_client", allegrosql.TypeVarchar, 32)) 3254 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "collation_connection", allegrosql.TypeVarchar, 32)) 3255 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Database DefCauslation", allegrosql.TypeVarchar, 32)) 3256 return schemaReplicant.col2Schema(), schemaReplicant.names 3257 } 3258 3259 func buildShowWarningsSchema() (*memex.Schema, types.NameSlice) { 3260 tblName := "WARNINGS" 3261 schemaReplicant := newDeferredCausetsWithNames(3) 3262 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Level", allegrosql.TypeVarchar, 64)) 3263 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Code", allegrosql.TypeLong, 19)) 3264 schemaReplicant.Append(buildDeferredCausetWithName(tblName, "Message", allegrosql.TypeVarchar, 64)) 3265 return schemaReplicant.col2Schema(), schemaReplicant.names 3266 } 3267 3268 // buildShowSchema builds column info for ShowStmt including column name and type. 3269 func buildShowSchema(s *ast.ShowStmt, isView bool, isSequence bool) (schemaReplicant *memex.Schema, outputNames []*types.FieldName) { 3270 var names []string 3271 var ftypes []byte 3272 switch s.Tp { 3273 case ast.ShowProcedureStatus: 3274 return buildShowProcedureSchema() 3275 case ast.ShowTriggers: 3276 return buildShowTriggerSchema() 3277 case ast.ShowEvents: 3278 return buildShowEventsSchema() 3279 case ast.ShowWarnings, ast.ShowErrors: 3280 return buildShowWarningsSchema() 3281 case ast.ShowRegions: 3282 return buildBlockRegionsSchema() 3283 case ast.ShowEngines: 3284 names = []string{"Engine", "Support", "Comment", "Transactions", "XA", "Savepoints"} 3285 case ast.ShowConfig: 3286 names = []string{"Type", "Instance", "Name", "Value"} 3287 case ast.ShowDatabases: 3288 names = []string{"Database"} 3289 case ast.ShowOpenBlocks: 3290 names = []string{"Database", "Block", "In_use", "Name_locked"} 3291 ftypes = []byte{allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeLong, allegrosql.TypeLong} 3292 case ast.ShowBlocks: 3293 names = []string{fmt.Sprintf("Blocks_in_%s", s.DBName)} 3294 if s.Full { 3295 names = append(names, "Block_type") 3296 } 3297 case ast.ShowBlockStatus: 3298 names = []string{"Name", "Engine", "Version", "Row_format", "Rows", "Avg_row_length", 3299 "Data_length", "Max_data_length", "Index_length", "Data_free", "Auto_increment", 3300 "Create_time", "UFIDelate_time", "Check_time", "DefCauslation", "Checksum", 3301 "Create_options", "Comment"} 3302 ftypes = []byte{allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeLonglong, allegrosql.TypeVarchar, allegrosql.TypeLonglong, allegrosql.TypeLonglong, 3303 allegrosql.TypeLonglong, allegrosql.TypeLonglong, allegrosql.TypeLonglong, allegrosql.TypeLonglong, allegrosql.TypeLonglong, 3304 allegrosql.TypeDatetime, allegrosql.TypeDatetime, allegrosql.TypeDatetime, allegrosql.TypeVarchar, allegrosql.TypeVarchar, 3305 allegrosql.TypeVarchar, allegrosql.TypeVarchar} 3306 case ast.ShowDeferredCausets: 3307 names = causet.DefCausDescFieldNames(s.Full) 3308 case ast.ShowCharset: 3309 names = []string{"Charset", "Description", "Default collation", "Maxlen"} 3310 ftypes = []byte{allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeLonglong} 3311 case ast.ShowVariables, ast.ShowStatus: 3312 names = []string{"Variable_name", "Value"} 3313 case ast.ShowDefCauslation: 3314 names = []string{"DefCauslation", "Charset", "Id", "Default", "Compiled", "Sortlen"} 3315 ftypes = []byte{allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeLonglong, 3316 allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeLonglong} 3317 case ast.ShowCreateBlock, ast.ShowCreateSequence: 3318 if isSequence { 3319 names = []string{"Sequence", "Create Sequence"} 3320 } else if isView { 3321 names = []string{"View", "Create View", "character_set_client", "collation_connection"} 3322 } else { 3323 names = []string{"Block", "Create Block"} 3324 } 3325 case ast.ShowCreateUser: 3326 if s.User != nil { 3327 names = []string{fmt.Sprintf("CREATE USER for %s", s.User)} 3328 } 3329 case ast.ShowCreateView: 3330 names = []string{"View", "Create View", "character_set_client", "collation_connection"} 3331 case ast.ShowCreateDatabase: 3332 names = []string{"Database", "Create Database"} 3333 case ast.ShowDrainerStatus: 3334 names = []string{"NodeID", "Address", "State", "Max_Commit_Ts", "UFIDelate_Time"} 3335 ftypes = []byte{allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeLonglong, allegrosql.TypeVarchar} 3336 case ast.ShowGrants: 3337 if s.User != nil { 3338 names = []string{fmt.Sprintf("Grants for %s", s.User)} 3339 } else { 3340 // Don't know the name yet, so just say "user" 3341 names = []string{"Grants for User"} 3342 } 3343 case ast.ShowIndex: 3344 names = []string{"Block", "Non_unique", "Key_name", "Seq_in_index", 3345 "DeferredCauset_name", "DefCauslation", "Cardinality", "Sub_part", "Packed", 3346 "Null", "Index_type", "Comment", "Index_comment", "Visible", "Expression"} 3347 ftypes = []byte{allegrosql.TypeVarchar, allegrosql.TypeLonglong, allegrosql.TypeVarchar, allegrosql.TypeLonglong, 3348 allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeLonglong, allegrosql.TypeLonglong, 3349 allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, 3350 allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar} 3351 case ast.ShowPlugins: 3352 names = []string{"Name", "Status", "Type", "Library", "License", "Version"} 3353 ftypes = []byte{ 3354 allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, 3355 } 3356 case ast.ShowProcessList: 3357 names = []string{"Id", "User", "Host", "EDB", "Command", "Time", "State", "Info"} 3358 ftypes = []byte{allegrosql.TypeLonglong, allegrosql.TypeVarchar, allegrosql.TypeVarchar, 3359 allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeLong, allegrosql.TypeVarchar, allegrosql.TypeString} 3360 case ast.ShowPumpStatus: 3361 names = []string{"NodeID", "Address", "State", "Max_Commit_Ts", "UFIDelate_Time"} 3362 ftypes = []byte{allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeLonglong, allegrosql.TypeVarchar} 3363 case ast.ShowStatsMeta: 3364 names = []string{"Db_name", "Block_name", "Partition_name", "UFIDelate_time", "Modify_count", "Row_count"} 3365 ftypes = []byte{allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeDatetime, allegrosql.TypeLonglong, allegrosql.TypeLonglong} 3366 case ast.ShowStatsHistograms: 3367 names = []string{"Db_name", "Block_name", "Partition_name", "DeferredCauset_name", "Is_index", "UFIDelate_time", "Distinct_count", "Null_count", "Avg_col_size", "Correlation"} 3368 ftypes = []byte{allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeTiny, allegrosql.TypeDatetime, 3369 allegrosql.TypeLonglong, allegrosql.TypeLonglong, allegrosql.TypeDouble, allegrosql.TypeDouble} 3370 case ast.ShowStatsBuckets: 3371 names = []string{"Db_name", "Block_name", "Partition_name", "DeferredCauset_name", "Is_index", "Bucket_id", "Count", 3372 "Repeats", "Lower_Bound", "Upper_Bound"} 3373 ftypes = []byte{allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeTiny, allegrosql.TypeLonglong, 3374 allegrosql.TypeLonglong, allegrosql.TypeLonglong, allegrosql.TypeVarchar, allegrosql.TypeVarchar} 3375 case ast.ShowStatsHealthy: 3376 names = []string{"Db_name", "Block_name", "Partition_name", "Healthy"} 3377 ftypes = []byte{allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeLonglong} 3378 case ast.ShowProfiles: // ShowProfiles is deprecated. 3379 names = []string{"Query_ID", "Duration", "Query"} 3380 ftypes = []byte{allegrosql.TypeLong, allegrosql.TypeDouble, allegrosql.TypeVarchar} 3381 case ast.ShowMasterStatus: 3382 names = []string{"File", "Position", "Binlog_Do_DB", "Binlog_Ignore_DB", "InterDircuted_Gtid_Set"} 3383 ftypes = []byte{allegrosql.TypeVarchar, allegrosql.TypeLonglong, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar} 3384 case ast.ShowPrivileges: 3385 names = []string{"Privilege", "Context", "Comment"} 3386 ftypes = []byte{allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar} 3387 case ast.ShowBindings: 3388 names = []string{"Original_sql", "Bind_sql", "Default_db", "Status", "Create_time", "UFIDelate_time", "Charset", "DefCauslation", "Source"} 3389 ftypes = []byte{allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeDatetime, allegrosql.TypeDatetime, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar} 3390 case ast.ShowAnalyzeStatus: 3391 names = []string{"Block_schema", "Block_name", "Partition_name", "Job_info", "Processed_rows", "Start_time", "State"} 3392 ftypes = []byte{allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeLonglong, allegrosql.TypeDatetime, allegrosql.TypeVarchar} 3393 case ast.ShowBuiltins: 3394 names = []string{"Supported_builtin_functions"} 3395 ftypes = []byte{allegrosql.TypeVarchar} 3396 case ast.ShowBackups, ast.ShowRestores: 3397 names = []string{"Destination", "State", "Progress", "Queue_time", "InterDircution_time", "Finish_time", "Connection"} 3398 ftypes = []byte{allegrosql.TypeVarchar, allegrosql.TypeVarchar, allegrosql.TypeDouble, allegrosql.TypeDatetime, allegrosql.TypeDatetime, allegrosql.TypeDatetime, allegrosql.TypeLonglong} 3399 } 3400 3401 schemaReplicant = memex.NewSchema(make([]*memex.DeferredCauset, 0, len(names))...) 3402 outputNames = make([]*types.FieldName, 0, len(names)) 3403 for i := range names { 3404 col := &memex.DeferredCauset{} 3405 outputNames = append(outputNames, &types.FieldName{DefCausName: perceptron.NewCIStr(names[i])}) 3406 // User varchar as the default return column type. 3407 tp := allegrosql.TypeVarchar 3408 if len(ftypes) != 0 && ftypes[i] != allegrosql.TypeUnspecified { 3409 tp = ftypes[i] 3410 } 3411 fieldType := types.NewFieldType(tp) 3412 fieldType.Flen, fieldType.Decimal = allegrosql.GetDefaultFieldLengthAndDecimal(tp) 3413 fieldType.Charset, fieldType.DefCauslate = types.DefaultCharsetForType(tp) 3414 col.RetType = fieldType 3415 schemaReplicant.Append(col) 3416 } 3417 return 3418 } 3419 3420 func buildChecksumBlockSchema() (*memex.Schema, []*types.FieldName) { 3421 schemaReplicant := newDeferredCausetsWithNames(5) 3422 schemaReplicant.Append(buildDeferredCausetWithName("", "Db_name", allegrosql.TypeVarchar, 128)) 3423 schemaReplicant.Append(buildDeferredCausetWithName("", "Block_name", allegrosql.TypeVarchar, 128)) 3424 schemaReplicant.Append(buildDeferredCausetWithName("", "Checksum_crc64_xor", allegrosql.TypeLonglong, 22)) 3425 schemaReplicant.Append(buildDeferredCausetWithName("", "Total_ekvs", allegrosql.TypeLonglong, 22)) 3426 schemaReplicant.Append(buildDeferredCausetWithName("", "Total_bytes", allegrosql.TypeLonglong, 22)) 3427 return schemaReplicant.col2Schema(), schemaReplicant.names 3428 } 3429 3430 // adjustOverlongViewDefCausname adjusts the overlong outputNames of a view to 3431 // `new_exp_$off` where `$off` is the offset of the output column, $off starts from 1. 3432 // There is still some MyALLEGROSQL compatible problems. 3433 func adjustOverlongViewDefCausname(plan LogicalCauset) { 3434 outputNames := plan.OutputNames() 3435 for i := range outputNames { 3436 if outputName := outputNames[i].DefCausName.L; len(outputName) > allegrosql.MaxDeferredCausetNameLength { 3437 outputNames[i].DefCausName = perceptron.NewCIStr(fmt.Sprintf("name_exp_%d", i+1)) 3438 } 3439 } 3440 }