github.com/matrixorigin/matrixone@v1.2.0/pkg/sql/plan/build_dml_util.go (about) 1 // Copyright 2022 Matrix Origin 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package plan 16 17 import ( 18 "context" 19 "fmt" 20 "github.com/matrixorigin/matrixone/pkg/txn/trace" 21 "github.com/matrixorigin/matrixone/pkg/util/sysview" 22 "strings" 23 "sync" 24 25 "github.com/google/uuid" 26 27 moruntime "github.com/matrixorigin/matrixone/pkg/common/runtime" 28 "github.com/matrixorigin/matrixone/pkg/sql/parsers/tree" 29 "github.com/matrixorigin/matrixone/pkg/util/executor" 30 31 "golang.org/x/exp/slices" 32 33 "github.com/matrixorigin/matrixone/pkg/catalog" 34 "github.com/matrixorigin/matrixone/pkg/common/moerr" 35 "github.com/matrixorigin/matrixone/pkg/container/types" 36 "github.com/matrixorigin/matrixone/pkg/pb/plan" 37 "github.com/matrixorigin/matrixone/pkg/pb/timestamp" 38 "github.com/matrixorigin/matrixone/pkg/sql/util" 39 ) 40 41 var dmlPlanCtxPool = sync.Pool{ 42 New: func() any { 43 return &dmlPlanCtx{} 44 }, 45 } 46 47 var deleteNodeInfoPool = sync.Pool{ 48 New: func() any { 49 return &deleteNodeInfo{} 50 }, 51 } 52 53 func getDmlPlanCtx() *dmlPlanCtx { 54 ctx := dmlPlanCtxPool.Get().(*dmlPlanCtx) 55 ctx.updatePkCol = true 56 ctx.partitionInfos = make(map[uint64]*partSubTableInfo) 57 return ctx 58 } 59 60 func putDmlPlanCtx(ctx *dmlPlanCtx) { 61 var x dmlPlanCtx 62 *ctx = x 63 dmlPlanCtxPool.Put(ctx) 64 } 65 66 func getDeleteNodeInfo() *deleteNodeInfo { 67 info := deleteNodeInfoPool.Get().(*deleteNodeInfo) 68 return info 69 } 70 71 func putDeleteNodeInfo(info *deleteNodeInfo) { 72 var x deleteNodeInfo 73 *info = x 74 deleteNodeInfoPool.Put(info) 75 } 76 77 type dmlPlanCtx struct { 78 objRef *ObjectRef 79 tableDef *TableDef 80 beginIdx int 81 sourceStep int32 82 isMulti bool 83 needAggFilter bool 84 updateColLength int 85 rowIdPos int 86 insertColPos []int 87 updateColPosMap map[string]int 88 allDelTableIDs map[uint64]struct{} 89 allDelTables map[FkReferKey]struct{} 90 isFkRecursionCall bool //if update plan was recursion called by parent table( ref foreign key), we do not check parent's foreign key contraint 91 lockTable bool //we need lock table in stmt: delete from tbl 92 checkInsertPkDup bool //if we need check for duplicate values in insert batch. eg:insert into t values (1). load data will not check 93 updatePkCol bool //if update stmt will update the primary key or one of pks 94 pkFilterExprs []*Expr 95 isDeleteWithoutFilters bool 96 partitionInfos map[uint64]*partSubTableInfo // key: Main Table Id, value: Partition sub table information 97 } 98 99 type partSubTableInfo struct { 100 partTableIDs []uint64 // Align array index with the partition number 101 partTableNames []string // Align partition subtable names with partition numbers 102 } 103 104 // information of deleteNode, which is about the deleted table 105 type deleteNodeInfo struct { 106 objRef *ObjectRef 107 tableDef *TableDef 108 IsClusterTable bool 109 deleteIndex int // The array index position of the rowid column 110 partTableIDs []uint64 // Align array index with the partition number 111 partTableNames []string // Align array index with the partition number 112 partitionIdx int // The array index position of the partition expression column 113 indexTableNames []string 114 foreignTbl []uint64 115 addAffectedRows bool 116 pkPos int 117 pkTyp plan.Type 118 lockTable bool 119 } 120 121 // buildInsertPlans build insert plan. 122 func buildInsertPlans( 123 ctx CompilerContext, builder *QueryBuilder, bindCtx *BindContext, stmt *tree.Insert, 124 objRef *ObjectRef, tableDef *TableDef, lastNodeId int32, ifExistAutoPkCol bool, insertWithoutUniqueKeyMap map[string]bool) error { 125 126 var err error 127 var insertColsNameFromStmt []string 128 var pkFilterExpr []*Expr 129 var newPartitionExpr *Expr 130 if stmt != nil { 131 insertColsNameFromStmt, err = getInsertColsFromStmt(ctx.GetContext(), stmt, tableDef) 132 if err != nil { 133 return err 134 } 135 136 // try to build pk filter epxr for origin table 137 if canUsePkFilter(builder, ctx, stmt, tableDef, insertColsNameFromStmt, nil) { 138 pkLocationMap := newLocationMap(tableDef, nil) 139 // The insert statement subplan with a primary key has undergone manual column pruning in advance, 140 // so the partition expression needs to be remapped and judged whether partition pruning can be performed 141 newPartitionExpr = remapPartitionExpr(builder, tableDef, pkLocationMap.getPkOrderInValues(insertColsNameFromStmt)) 142 if pkFilterExpr, err = getPkValueExpr(builder, ctx, tableDef, pkLocationMap, insertColsNameFromStmt); err != nil { 143 return err 144 } 145 } 146 } 147 148 // add plan: -> preinsert -> sink 149 lastNodeId = appendPreInsertNode(builder, bindCtx, objRef, tableDef, lastNodeId, false) 150 151 lastNodeId = appendSinkNode(builder, bindCtx, lastNodeId) 152 sourceStep := builder.appendStep(lastNodeId) 153 154 // make insert plans for origin table and related index table 155 insertBindCtx := NewBindContext(builder, nil) 156 updateColLength := 0 157 updatePkCol := true 158 addAffectedRows := true 159 isFkRecursionCall := false 160 ifNeedCheckPkDup := !builder.qry.LoadTag 161 var indexSourceColTypes []*plan.Type 162 var fuzzymessage *OriginTableMessageForFuzzy 163 return buildInsertPlansWithRelatedHiddenTable(stmt, ctx, builder, insertBindCtx, objRef, tableDef, 164 updateColLength, sourceStep, addAffectedRows, isFkRecursionCall, updatePkCol, pkFilterExpr, 165 newPartitionExpr, ifExistAutoPkCol, ifNeedCheckPkDup, indexSourceColTypes, fuzzymessage, insertWithoutUniqueKeyMap) 166 } 167 168 // buildUpdatePlans build update plan. 169 func buildUpdatePlans(ctx CompilerContext, builder *QueryBuilder, bindCtx *BindContext, updatePlanCtx *dmlPlanCtx, addAffectedRows bool) error { 170 var err error 171 // sink_scan -> project -> [agg] -> [filter] -> sink 172 lastNodeId := appendSinkScanNode(builder, bindCtx, updatePlanCtx.sourceStep) 173 lastNodeId, err = makePreUpdateDeletePlan(ctx, builder, bindCtx, updatePlanCtx, lastNodeId) 174 if err != nil { 175 return err 176 } 177 lastNodeId = appendSinkNode(builder, bindCtx, lastNodeId) 178 nextSourceStep := builder.appendStep(lastNodeId) 179 updatePlanCtx.sourceStep = nextSourceStep 180 181 // build delete plans 182 err = buildDeletePlans(ctx, builder, bindCtx, updatePlanCtx) 183 if err != nil { 184 return err 185 } 186 187 // sink_scan -> project -> preinsert -> sink 188 lastNodeId = appendSinkScanNode(builder, bindCtx, updatePlanCtx.sourceStep) 189 lastNode := builder.qry.Nodes[lastNodeId] 190 newCols := make([]*ColDef, 0, len(updatePlanCtx.tableDef.Cols)) 191 oldRowIdPos := len(updatePlanCtx.tableDef.Cols) - 1 192 for _, col := range updatePlanCtx.tableDef.Cols { 193 if col.Hidden && col.Name != catalog.FakePrimaryKeyColName { 194 continue 195 } 196 newCols = append(newCols, col) 197 } 198 updatePlanCtx.tableDef.Cols = newCols 199 insertColLength := len(updatePlanCtx.insertColPos) + 1 200 projectList := make([]*Expr, insertColLength) 201 for i, idx := range updatePlanCtx.insertColPos { 202 name := "" 203 if col, ok := lastNode.ProjectList[idx].Expr.(*plan.Expr_Col); ok { 204 name = col.Col.Name 205 } 206 projectList[i] = &plan.Expr{ 207 Typ: lastNode.ProjectList[idx].Typ, 208 Expr: &plan.Expr_Col{ 209 Col: &plan.ColRef{ 210 ColPos: int32(idx), 211 Name: name, 212 }, 213 }, 214 } 215 } 216 projectList[insertColLength-1] = &plan.Expr{ 217 Typ: lastNode.ProjectList[oldRowIdPos].Typ, 218 Expr: &plan.Expr_Col{ 219 Col: &plan.ColRef{ 220 ColPos: int32(oldRowIdPos), 221 Name: catalog.Row_ID, 222 }, 223 }, 224 } 225 226 //append project node 227 projectNode := &Node{ 228 NodeType: plan.Node_PROJECT, 229 Children: []int32{lastNodeId}, 230 ProjectList: projectList, 231 } 232 lastNodeId = builder.appendNode(projectNode, bindCtx) 233 //append preinsert node 234 lastNodeId = appendPreInsertNode(builder, bindCtx, updatePlanCtx.objRef, updatePlanCtx.tableDef, lastNodeId, true) 235 236 //append sink node 237 lastNodeId = appendSinkNode(builder, bindCtx, lastNodeId) 238 sourceStep := builder.appendStep(lastNodeId) 239 240 // build insert plan. 241 insertBindCtx := NewBindContext(builder, nil) 242 var partitionExpr *Expr 243 ifExistAutoPkCol := false 244 ifNeedCheckPkDup := true 245 var indexSourceColTypes []*plan.Type 246 var fuzzymessage *OriginTableMessageForFuzzy 247 return buildInsertPlansWithRelatedHiddenTable(nil, ctx, builder, insertBindCtx, updatePlanCtx.objRef, updatePlanCtx.tableDef, 248 updatePlanCtx.updateColLength, sourceStep, addAffectedRows, updatePlanCtx.isFkRecursionCall, updatePlanCtx.updatePkCol, 249 updatePlanCtx.pkFilterExprs, partitionExpr, ifExistAutoPkCol, ifNeedCheckPkDup, indexSourceColTypes, fuzzymessage, nil) 250 } 251 252 func getStepByNodeId(builder *QueryBuilder, nodeId int32) int { 253 for step, stepNodeId := range builder.qry.Steps { 254 if stepNodeId == nodeId { 255 return step 256 } 257 } 258 return -1 259 } 260 261 // buildDeletePlans build preinsert plan. 262 /* 263 [o1]sink_scan -> join[u1] -> sink 264 [u1]sink_scan -> lock -> delete -> [mergedelete] ... // if it's delete stmt. do delete u1 265 [u1]sink_scan -> preinsert_uk -> sink ... // if it's update stmt. do update u1 266 [o1]sink_scan -> join[u2] -> sink 267 [u2]sink_scan -> lock -> delete -> [mergedelete] ... // if it's delete stmt. do delete u2 268 [u2]sink_scan -> preinsert_uk -> sink ... // if it's update stmt. do update u2 269 [o1]sink_scan -> predelete[get partition] -> lock -> delete -> [mergedelete] 270 271 [o1]sink_scan -> join[f1 semi join c1 on c1.fid=f1.id, get f1.id] -> filter(assert(isempty(id))) // if have refChild table with no action 272 [o1]sink_scan -> join[f1 inner join c2 on f1.id = c2.fid, 取c2.*, null] -> sink ...(like update) // if have refChild table with set null 273 [o1]sink_scan -> join[f1 inner join c4 on f1.id = c4.fid, get c3.*] -> sink ...(like delete) // delete stmt: if have refChild table with cascade 274 [o1]sink_scan -> join[f1 inner join c4 on f1.id = c4.fid, get c3.*, update cols] -> sink ...(like update) // update stmt: if have refChild table with cascade 275 */ 276 func buildDeletePlans(ctx CompilerContext, builder *QueryBuilder, bindCtx *BindContext, delCtx *dmlPlanCtx) error { 277 if sinkOrUnionNodeId, ok := builder.deleteNode[delCtx.tableDef.TblId]; ok { 278 sinkOrUnionNode := builder.qry.Nodes[sinkOrUnionNodeId] 279 if sinkOrUnionNode.NodeType == plan.Node_SINK { 280 step := getStepByNodeId(builder, sinkOrUnionNodeId) 281 if step == -1 || delCtx.sourceStep == -1 { 282 panic("steps should not be -1") 283 } 284 285 oldDelPlanSinkScanNodeId := appendSinkScanNode(builder, bindCtx, int32(step)) 286 thisDelPlanSinkScanNodeId := appendSinkScanNode(builder, bindCtx, delCtx.sourceStep) 287 unionProjection := getProjectionByLastNode(builder, sinkOrUnionNodeId) 288 unionNode := &plan.Node{ 289 NodeType: plan.Node_UNION, 290 Children: []int32{oldDelPlanSinkScanNodeId, thisDelPlanSinkScanNodeId}, 291 ProjectList: unionProjection, 292 } 293 unionNodeId := builder.appendNode(unionNode, bindCtx) 294 newSinkNodeId := appendSinkNode(builder, bindCtx, unionNodeId) 295 endStep := builder.appendStep(newSinkNodeId) 296 for i, n := range builder.qry.Nodes { 297 if n.NodeType == plan.Node_SINK_SCAN && n.SourceStep[0] == int32(step) && i != int(oldDelPlanSinkScanNodeId) { 298 n.SourceStep[0] = endStep 299 } 300 } 301 builder.deleteNode[delCtx.tableDef.TblId] = unionNodeId 302 } else { 303 // todo : we need make union operator to support more than two children. 304 panic("unsuport more than two plans to delete one table") 305 // thisDelPlanSinkScanNodeId := appendSinkScanNode(builder, bindCtx, delCtx.sourceStep) 306 // sinkOrUnionNode.Children = append(sinkOrUnionNode.Children, thisDelPlanSinkScanNodeId) 307 } 308 return nil 309 } else { 310 builder.deleteNode[delCtx.tableDef.TblId] = builder.qry.Steps[delCtx.sourceStep] 311 } 312 isUpdate := delCtx.updateColLength > 0 313 314 // delete unique/secondary index table 315 // Refer to this PR:https://github.com/matrixorigin/matrixone/pull/12093 316 // we have build SK using UK code path. So we might see UK in function signature even thought it could be for 317 // both UK and SK. To handle SK case, we will have flags to indicate if it's UK or SK. 318 hasUniqueKey := haveUniqueKey(delCtx.tableDef) 319 hasSecondaryKey := haveSecondaryKey(delCtx.tableDef) 320 canTruncate := delCtx.isDeleteWithoutFilters 321 322 accountId, err := ctx.GetAccountId() 323 if err != nil { 324 return err 325 } 326 327 enabled, err := IsForeignKeyChecksEnabled(ctx) 328 if err != nil { 329 return err 330 } 331 332 if enabled && len(delCtx.tableDef.RefChildTbls) > 0 || 333 delCtx.tableDef.ViewSql != nil || 334 (util.TableIsClusterTable(delCtx.tableDef.GetTableType()) && accountId != catalog.System_Account) || 335 delCtx.objRef.PubInfo != nil { 336 canTruncate = false 337 } 338 339 if (hasUniqueKey || hasSecondaryKey) && !canTruncate { 340 typMap := make(map[string]plan.Type) 341 posMap := make(map[string]int) 342 colMap := make(map[string]*ColDef) 343 for idx, col := range delCtx.tableDef.Cols { 344 posMap[col.Name] = idx 345 typMap[col.Name] = col.Typ 346 colMap[col.Name] = col 347 } 348 multiTableIndexes := make(map[string]*MultiTableIndex) 349 for idx, indexdef := range delCtx.tableDef.Indexes { 350 351 if isUpdate { 352 pkeyName := delCtx.tableDef.Pkey.PkeyColName 353 354 // Check if primary key is being updated. 355 isPrimaryKeyUpdated := func() bool { 356 if pkeyName == catalog.CPrimaryKeyColName { 357 // Handle compound primary key. 358 for _, pkPartColName := range delCtx.tableDef.Pkey.Names { 359 if _, exists := delCtx.updateColPosMap[pkPartColName]; exists || colMap[pkPartColName].OnUpdate != nil { 360 return true 361 } 362 } 363 } else if pkeyName == catalog.FakePrimaryKeyColName { 364 // Handle programmatically generated primary key. 365 if _, exists := delCtx.updateColPosMap[pkeyName]; exists || colMap[pkeyName].OnUpdate != nil { 366 return true 367 } 368 } else { 369 // Handle single primary key. 370 if _, exists := delCtx.updateColPosMap[pkeyName]; exists || colMap[pkeyName].OnUpdate != nil { 371 return true 372 } 373 } 374 return false 375 } 376 377 // Check if secondary key is being updated. 378 isSecondaryKeyUpdated := func() bool { 379 for _, colName := range indexdef.Parts { 380 resolvedColName := catalog.ResolveAlias(colName) 381 if colIdx, ok := posMap[resolvedColName]; ok { 382 col := delCtx.tableDef.Cols[colIdx] 383 if _, exists := delCtx.updateColPosMap[resolvedColName]; exists || col.OnUpdate != nil { 384 return true 385 } 386 } 387 } 388 return false 389 } 390 391 if !isPrimaryKeyUpdated() && !isSecondaryKeyUpdated() { 392 continue 393 } 394 } 395 396 if indexdef.TableExist && catalog.IsRegularIndexAlgo(indexdef.IndexAlgo) { 397 398 /******** 399 NOTE: make sure to make the major change applied to secondary index, to IVFFLAT index as well. 400 Else IVFFLAT index would fail 401 ********/ 402 var isUk = indexdef.Unique 403 var isSK = !isUk && catalog.IsRegularIndexAlgo(indexdef.IndexAlgo) 404 405 uniqueObjRef, uniqueTableDef := builder.compCtx.Resolve(delCtx.objRef.SchemaName, indexdef.IndexTableName, Snapshot{TS: ×tamp.Timestamp{}}) 406 if uniqueTableDef == nil { 407 return moerr.NewNoSuchTable(builder.GetContext(), delCtx.objRef.SchemaName, indexdef.IndexTableName) 408 } 409 var lastNodeId int32 410 var err error 411 var uniqueDeleteIdx int 412 var uniqueTblPkPos int 413 var uniqueTblPkTyp Type 414 415 if delCtx.isDeleteWithoutFilters { 416 lastNodeId, err = appendDeleteIndexTablePlanWithoutFilters(builder, bindCtx, uniqueObjRef, uniqueTableDef) 417 uniqueDeleteIdx = getRowIdPos(uniqueTableDef) 418 uniqueTblPkPos, uniqueTblPkTyp = getPkPos(uniqueTableDef, false) 419 } else { 420 lastNodeId = appendSinkScanNode(builder, bindCtx, delCtx.sourceStep) 421 lastNodeId, err = appendDeleteIndexTablePlan(builder, bindCtx, uniqueObjRef, uniqueTableDef, indexdef, typMap, posMap, lastNodeId, isUk) 422 uniqueDeleteIdx = len(delCtx.tableDef.Cols) + delCtx.updateColLength 423 uniqueTblPkPos = uniqueDeleteIdx + 1 424 uniqueTblPkTyp = uniqueTableDef.Cols[0].Typ 425 } 426 if err != nil { 427 return err 428 } 429 if isUpdate { 430 // do it like simple update 431 lastNodeId = appendSinkNode(builder, bindCtx, lastNodeId) 432 newSourceStep := builder.appendStep(lastNodeId) 433 // delete uk plan 434 { 435 //sink_scan -> lock -> delete 436 lastNodeId = appendSinkScanNode(builder, bindCtx, newSourceStep) 437 delNodeInfo := makeDeleteNodeInfo(builder.compCtx, uniqueObjRef, uniqueTableDef, uniqueDeleteIdx, -1, false, uniqueTblPkPos, uniqueTblPkTyp, delCtx.lockTable, delCtx.partitionInfos) 438 lastNodeId, err = makeOneDeletePlan(builder, bindCtx, lastNodeId, delNodeInfo, isUk, isSK, false) 439 putDeleteNodeInfo(delNodeInfo) 440 if err != nil { 441 return err 442 } 443 builder.appendStep(lastNodeId) 444 } 445 // insert uk plan 446 { 447 lastNodeId = appendSinkScanNode(builder, bindCtx, newSourceStep) 448 lastProject := builder.qry.Nodes[lastNodeId].ProjectList 449 projectProjection := make([]*Expr, len(delCtx.tableDef.Cols)) 450 for j, uCols := range delCtx.tableDef.Cols { 451 if nIdx, ok := delCtx.updateColPosMap[uCols.Name]; ok { 452 projectProjection[j] = lastProject[nIdx] 453 } else { 454 if uCols.Name == catalog.Row_ID { 455 // replace the origin table's row_id with unique table's row_id 456 projectProjection[j] = lastProject[len(lastProject)-2] 457 } else { 458 projectProjection[j] = lastProject[j] 459 } 460 } 461 } 462 projectNode := &Node{ 463 NodeType: plan.Node_PROJECT, 464 Children: []int32{lastNodeId}, 465 ProjectList: projectProjection, 466 } 467 lastNodeId = builder.appendNode(projectNode, bindCtx) 468 preUKStep, err := appendPreInsertUkPlan(builder, bindCtx, delCtx.tableDef, lastNodeId, idx, true, uniqueTableDef, isUk) 469 if err != nil { 470 return err 471 } 472 473 insertUniqueTableDef := DeepCopyTableDef(uniqueTableDef, false) 474 for _, col := range uniqueTableDef.Cols { 475 if col.Name != catalog.Row_ID { 476 insertUniqueTableDef.Cols = append(insertUniqueTableDef.Cols, DeepCopyColDef(col)) 477 } 478 } 479 _checkPKDupForHiddenIndexTable := indexdef.Unique // only check PK uniqueness for UK. SK will not check PK uniqueness. 480 updateColLength := 1 481 addAffectedRows := false 482 isFkRecursionCall := false 483 updatePkCol := true 484 ifExistAutoPkCol := false 485 var pkFilterExprs []*Expr 486 var partitionExpr *Expr 487 var indexSourceColTypes []*Type 488 var fuzzymessage *OriginTableMessageForFuzzy 489 err = makeOneInsertPlan(ctx, builder, bindCtx, uniqueObjRef, insertUniqueTableDef, 490 updateColLength, preUKStep, addAffectedRows, isFkRecursionCall, updatePkCol, 491 pkFilterExprs, partitionExpr, ifExistAutoPkCol, _checkPKDupForHiddenIndexTable, 492 indexSourceColTypes, fuzzymessage) 493 if err != nil { 494 return err 495 } 496 } 497 } else { 498 // it's more simple for delete hidden unique table .so we append nodes after the plan. not recursive call buildDeletePlans 499 delNodeInfo := makeDeleteNodeInfo(builder.compCtx, uniqueObjRef, uniqueTableDef, uniqueDeleteIdx, -1, false, uniqueTblPkPos, uniqueTblPkTyp, delCtx.lockTable, delCtx.partitionInfos) 500 lastNodeId, err = makeOneDeletePlan(builder, bindCtx, lastNodeId, delNodeInfo, isUk, isSK, false) 501 putDeleteNodeInfo(delNodeInfo) 502 if err != nil { 503 return err 504 } 505 builder.appendStep(lastNodeId) 506 } 507 } else if indexdef.TableExist && catalog.IsIvfIndexAlgo(indexdef.IndexAlgo) { 508 // IVF indexDefs are aggregated and handled later 509 if _, ok := multiTableIndexes[indexdef.IndexName]; !ok { 510 multiTableIndexes[indexdef.IndexName] = &MultiTableIndex{ 511 IndexAlgo: catalog.ToLower(indexdef.IndexAlgo), 512 IndexDefs: make(map[string]*IndexDef), 513 } 514 } 515 multiTableIndexes[indexdef.IndexName].IndexDefs[catalog.ToLower(indexdef.IndexAlgoTableType)] = indexdef 516 } else if indexdef.TableExist && catalog.IsMasterIndexAlgo(indexdef.IndexAlgo) { 517 // Used by pre-insert vector index. 518 masterObjRef, masterTableDef := ctx.Resolve(delCtx.objRef.SchemaName, indexdef.IndexTableName, Snapshot{TS: ×tamp.Timestamp{}}) 519 if masterTableDef == nil { 520 return moerr.NewNoSuchTable(builder.GetContext(), delCtx.objRef.SchemaName, indexdef.IndexName) 521 } 522 523 var lastNodeId int32 524 var err error 525 var masterDeleteIdx int 526 var masterTblPkPos int 527 var masterTblPkTyp Type 528 529 if delCtx.isDeleteWithoutFilters { 530 lastNodeId, err = appendDeleteIndexTablePlanWithoutFilters(builder, bindCtx, masterObjRef, masterTableDef) 531 masterDeleteIdx = getRowIdPos(masterTableDef) 532 masterTblPkPos, masterTblPkTyp = getPkPos(masterTableDef, false) 533 } else { 534 lastNodeId = appendSinkScanNode(builder, bindCtx, delCtx.sourceStep) 535 lastNodeId, err = appendDeleteMasterTablePlan(builder, bindCtx, masterObjRef, masterTableDef, lastNodeId, delCtx.tableDef, indexdef, typMap, posMap) 536 masterDeleteIdx = len(delCtx.tableDef.Cols) + delCtx.updateColLength 537 masterTblPkPos = masterDeleteIdx + 1 538 masterTblPkTyp = masterTableDef.Cols[0].Typ 539 } 540 541 if err != nil { 542 return err 543 } 544 545 if isUpdate { 546 // do it like simple update 547 lastNodeId = appendSinkNode(builder, bindCtx, lastNodeId) 548 newSourceStep := builder.appendStep(lastNodeId) 549 // delete uk plan 550 { 551 //sink_scan -> lock -> delete 552 lastNodeId = appendSinkScanNode(builder, bindCtx, newSourceStep) 553 delNodeInfo := makeDeleteNodeInfo(builder.compCtx, masterObjRef, masterTableDef, masterDeleteIdx, -1, false, masterTblPkPos, masterTblPkTyp, delCtx.lockTable, delCtx.partitionInfos) 554 lastNodeId, err = makeOneDeletePlan(builder, bindCtx, lastNodeId, delNodeInfo, false, true, false) 555 putDeleteNodeInfo(delNodeInfo) 556 if err != nil { 557 return err 558 } 559 builder.appendStep(lastNodeId) 560 } 561 // insert master sk plan 562 { 563 // This function creates new SinkScanNode for each of Union's inside appendPreInsertSkMasterPlan 564 genLastNodeIdFn := func() int32 { 565 //TODO: verify if this will cause memory leak. 566 newLastNodeId := appendSinkScanNode(builder, bindCtx, newSourceStep) 567 lastProject := builder.qry.Nodes[newLastNodeId].ProjectList 568 projectProjection := make([]*Expr, len(delCtx.tableDef.Cols)) 569 for j, uCols := range delCtx.tableDef.Cols { 570 if nIdx, ok := delCtx.updateColPosMap[uCols.Name]; ok { 571 projectProjection[j] = lastProject[nIdx] 572 } else { 573 if uCols.Name == catalog.Row_ID { 574 // NOTE: 575 // 1. In the case of secondary index, we are reusing the row_id values 576 // that were deleted. 577 // 2. But in the master index case, we are using the row_id values from 578 // the original table. Row_id associated with say 2 rows (one row for each column) 579 // in the index table will be same value (ie that from the original table). 580 // So, when we do UNION it automatically removes the duplicate values. 581 // ie 582 // <"a_arjun_1",1, 1> --> (select serial_full("0", a, c),__mo_pk_key, __mo_row_id) 583 // <"a_arjun_1",1, 1> 584 // <"b_sunil_1",1, 1> --> (select serial_full("2", b,c),__mo_pk_key, __mo_row_id) 585 // <"b_sunil_1",1, 1> 586 // when we use UNION, we remove the duplicate values 587 // 3. RowID is added here: https://github.com/arjunsk/matrixone/blob/d7db178e1c7298e2a3e4f99e7292425a7ef0ef06/pkg/vm/engine/disttae/txn.go#L95 588 // TODO: verify this with Feng, Ouyuanning and Qingx (not reusing the row_id) 589 projectProjection[j] = lastProject[j] 590 } else { 591 projectProjection[j] = lastProject[j] 592 } 593 } 594 } 595 projectNode := &Node{ 596 NodeType: plan.Node_PROJECT, 597 Children: []int32{newLastNodeId}, 598 ProjectList: projectProjection, 599 } 600 return builder.appendNode(projectNode, bindCtx) 601 } 602 603 preUKStep, err := appendPreInsertSkMasterPlan(builder, bindCtx, delCtx.tableDef, idx, true, masterTableDef, genLastNodeIdFn) 604 if err != nil { 605 return err 606 } 607 608 insertEntriesTableDef := DeepCopyTableDef(masterTableDef, false) 609 for _, col := range masterTableDef.Cols { 610 if col.Name != catalog.Row_ID { 611 insertEntriesTableDef.Cols = append(insertEntriesTableDef.Cols, DeepCopyColDef(col)) 612 } 613 } 614 updateColLength := 1 615 addAffectedRows := false 616 isFkRecursionCall := false 617 updatePkCol := true 618 ifExistAutoPkCol := false 619 ifCheckPkDup := false 620 var pkFilterExprs []*Expr 621 var partitionExpr *Expr 622 var indexSourceColTypes []*Type 623 var fuzzymessage *OriginTableMessageForFuzzy 624 err = makeOneInsertPlan(ctx, builder, bindCtx, masterObjRef, insertEntriesTableDef, 625 updateColLength, preUKStep, addAffectedRows, isFkRecursionCall, updatePkCol, 626 pkFilterExprs, partitionExpr, ifExistAutoPkCol, ifCheckPkDup, 627 indexSourceColTypes, fuzzymessage) 628 629 if err != nil { 630 return err 631 } 632 } 633 634 } else { 635 // it's more simple for delete hidden unique table .so we append nodes after the plan. not recursive call buildDeletePlans 636 delNodeInfo := makeDeleteNodeInfo(builder.compCtx, masterObjRef, masterTableDef, masterDeleteIdx, -1, false, masterTblPkPos, masterTblPkTyp, delCtx.lockTable, delCtx.partitionInfos) 637 lastNodeId, err = makeOneDeletePlan(builder, bindCtx, lastNodeId, delNodeInfo, false, true, false) 638 putDeleteNodeInfo(delNodeInfo) 639 if err != nil { 640 return err 641 } 642 builder.appendStep(lastNodeId) 643 } 644 645 } 646 } 647 648 for _, multiTableIndex := range multiTableIndexes { 649 switch multiTableIndex.IndexAlgo { 650 case catalog.MoIndexIvfFlatAlgo.ToString(): 651 652 // Used by pre-insert vector index. 653 var idxRefs = make([]*ObjectRef, 3) 654 var idxTableDefs = make([]*TableDef, 3) 655 // TODO: plan node should hold snapshot and account info 656 //idxRefs[0], idxTableDefs[0] = ctx.Resolve(delCtx.objRef.SchemaName, multiTableIndex.IndexDefs[catalog.SystemSI_IVFFLAT_TblType_Metadata].IndexTableName, timestamp.Timestamp{}) 657 //idxRefs[1], idxTableDefs[1] = ctx.Resolve(delCtx.objRef.SchemaName, multiTableIndex.IndexDefs[catalog.SystemSI_IVFFLAT_TblType_Centroids].IndexTableName, timestamp.Timestamp{}) 658 //idxRefs[2], idxTableDefs[2] = ctx.Resolve(delCtx.objRef.SchemaName, multiTableIndex.IndexDefs[catalog.SystemSI_IVFFLAT_TblType_Entries].IndexTableName, timestamp.Timestamp{}) 659 660 idxRefs[0], idxTableDefs[0] = ctx.Resolve(delCtx.objRef.SchemaName, multiTableIndex.IndexDefs[catalog.SystemSI_IVFFLAT_TblType_Metadata].IndexTableName, Snapshot{TS: ×tamp.Timestamp{}}) 661 idxRefs[1], idxTableDefs[1] = ctx.Resolve(delCtx.objRef.SchemaName, multiTableIndex.IndexDefs[catalog.SystemSI_IVFFLAT_TblType_Centroids].IndexTableName, Snapshot{TS: ×tamp.Timestamp{}}) 662 idxRefs[2], idxTableDefs[2] = ctx.Resolve(delCtx.objRef.SchemaName, multiTableIndex.IndexDefs[catalog.SystemSI_IVFFLAT_TblType_Entries].IndexTableName, Snapshot{TS: ×tamp.Timestamp{}}) 663 664 entriesObjRef, entriesTableDef := idxRefs[2], idxTableDefs[2] 665 if entriesTableDef == nil { 666 return moerr.NewNoSuchTable(builder.GetContext(), delCtx.objRef.SchemaName, multiTableIndex.IndexDefs[catalog.SystemSI_IVFFLAT_TblType_Entries].IndexName) 667 } 668 669 var lastNodeId int32 670 var err error 671 var entriesDeleteIdx int 672 var entriesTblPkPos int 673 var entriesTblPkTyp Type 674 675 if delCtx.isDeleteWithoutFilters { 676 lastNodeId, err = appendDeleteIndexTablePlanWithoutFilters(builder, bindCtx, entriesObjRef, entriesTableDef) 677 entriesDeleteIdx = getRowIdPos(entriesTableDef) 678 entriesTblPkPos, entriesTblPkTyp = getPkPos(entriesTableDef, false) 679 } else { 680 lastNodeId = appendSinkScanNode(builder, bindCtx, delCtx.sourceStep) 681 lastNodeId, err = appendDeleteIvfTablePlan(builder, bindCtx, entriesObjRef, entriesTableDef, lastNodeId, delCtx.tableDef) 682 entriesDeleteIdx = len(delCtx.tableDef.Cols) + delCtx.updateColLength // eg:- <id, embedding, row_id, <... update_col> > + 0/1 683 entriesTblPkPos = entriesDeleteIdx + 1 // this is the compound primary key of the entries table 684 entriesTblPkTyp = entriesTableDef.Cols[4].Typ // 4'th column is the compound primary key <version,id, org_pk,org_embedding, cp_pk, row_id> 685 } 686 687 if err != nil { 688 return err 689 } 690 691 if isUpdate { 692 // do it like simple update 693 lastNodeId = appendSinkNode(builder, bindCtx, lastNodeId) 694 newSourceStep := builder.appendStep(lastNodeId) 695 // delete uk plan 696 { 697 //sink_scan -> lock -> delete 698 lastNodeId = appendSinkScanNode(builder, bindCtx, newSourceStep) 699 delNodeInfo := makeDeleteNodeInfo(builder.compCtx, entriesObjRef, entriesTableDef, entriesDeleteIdx, -1, false, entriesTblPkPos, entriesTblPkTyp, delCtx.lockTable, delCtx.partitionInfos) 700 lastNodeId, err = makeOneDeletePlan(builder, bindCtx, lastNodeId, delNodeInfo, false, true, false) 701 putDeleteNodeInfo(delNodeInfo) 702 if err != nil { 703 return err 704 } 705 builder.appendStep(lastNodeId) 706 } 707 // insert ivf_sk plan 708 { 709 //TODO: verify with ouyuanning, if this is correct 710 lastNodeId = appendSinkScanNode(builder, bindCtx, newSourceStep) 711 lastNodeIdForTblJoinCentroids := appendSinkScanNode(builder, bindCtx, newSourceStep) 712 713 lastProject := builder.qry.Nodes[lastNodeId].ProjectList 714 lastProjectForTblJoinCentroids := builder.qry.Nodes[lastNodeIdForTblJoinCentroids].ProjectList 715 716 projectProjection := make([]*Expr, len(delCtx.tableDef.Cols)) 717 projectProjectionForTblJoinCentroids := make([]*Expr, len(delCtx.tableDef.Cols)) 718 for j, uCols := range delCtx.tableDef.Cols { 719 if nIdx, ok := delCtx.updateColPosMap[uCols.Name]; ok { 720 projectProjection[j] = lastProject[nIdx] 721 projectProjectionForTblJoinCentroids[j] = lastProjectForTblJoinCentroids[nIdx] 722 } else { 723 if uCols.Name == catalog.Row_ID { 724 // replace the origin table's row_id with entry table's row_id 725 // it is the 2nd last column in the entry table join 726 projectProjection[j] = lastProject[len(lastProject)-2] 727 projectProjectionForTblJoinCentroids[j] = lastProjectForTblJoinCentroids[len(lastProjectForTblJoinCentroids)-2] 728 } else { 729 projectProjection[j] = lastProject[j] 730 projectProjectionForTblJoinCentroids[j] = lastProjectForTblJoinCentroids[j] 731 } 732 } 733 } 734 projectNode := &Node{ 735 NodeType: plan.Node_PROJECT, 736 Children: []int32{lastNodeId}, 737 ProjectList: projectProjection, 738 } 739 projectNodeForTblJoinCentroids := &Node{ 740 NodeType: plan.Node_PROJECT, 741 Children: []int32{lastNodeIdForTblJoinCentroids}, 742 ProjectList: projectProjectionForTblJoinCentroids, 743 } 744 lastNodeId = builder.appendNode(projectNode, bindCtx) 745 lastNodeIdForTblJoinCentroids = builder.appendNode(projectNodeForTblJoinCentroids, bindCtx) 746 747 preUKStep, err := appendPreInsertSkVectorPlan(builder, bindCtx, delCtx.tableDef, 748 lastNodeId, lastNodeIdForTblJoinCentroids, 749 multiTableIndex, true, idxRefs, idxTableDefs) 750 if err != nil { 751 return err 752 } 753 754 insertEntriesTableDef := DeepCopyTableDef(entriesTableDef, false) 755 for _, col := range entriesTableDef.Cols { 756 if col.Name != catalog.Row_ID { 757 insertEntriesTableDef.Cols = append(insertEntriesTableDef.Cols, DeepCopyColDef(col)) 758 } 759 } 760 updateColLength := 1 761 addAffectedRows := false 762 isFkRecursionCall := false 763 updatePkCol := true 764 ifExistAutoPkCol := false 765 ifCheckPkDup := false 766 var pkFilterExprs []*Expr 767 var partitionExpr *Expr 768 var indexSourceColTypes []*Type 769 var fuzzymessage *OriginTableMessageForFuzzy 770 err = makeOneInsertPlan(ctx, builder, bindCtx, entriesObjRef, insertEntriesTableDef, 771 updateColLength, preUKStep, addAffectedRows, isFkRecursionCall, updatePkCol, 772 pkFilterExprs, partitionExpr, ifExistAutoPkCol, ifCheckPkDup, 773 indexSourceColTypes, fuzzymessage) 774 775 if err != nil { 776 return err 777 } 778 } 779 780 } else { 781 // it's more simple for delete hidden unique table .so we append nodes after the plan. not recursive call buildDeletePlans 782 delNodeInfo := makeDeleteNodeInfo(builder.compCtx, entriesObjRef, entriesTableDef, entriesDeleteIdx, -1, false, entriesTblPkPos, entriesTblPkTyp, delCtx.lockTable, delCtx.partitionInfos) 783 lastNodeId, err = makeOneDeletePlan(builder, bindCtx, lastNodeId, delNodeInfo, false, true, false) 784 putDeleteNodeInfo(delNodeInfo) 785 if err != nil { 786 return err 787 } 788 builder.appendStep(lastNodeId) 789 } 790 default: 791 return moerr.NewNYINoCtx("unsupported index algorithm %s", multiTableIndex.IndexAlgo) 792 } 793 } 794 } 795 796 // delete origin table 797 lastNodeId := appendSinkScanNode(builder, bindCtx, delCtx.sourceStep) 798 partExprIdx := -1 799 if delCtx.tableDef.Partition != nil { 800 partExprIdx = len(delCtx.tableDef.Cols) + delCtx.updateColLength 801 lastNodeId = appendPreDeleteNode(builder, bindCtx, delCtx.objRef, delCtx.tableDef, lastNodeId) 802 } 803 pkPos, pkTyp := getPkPos(delCtx.tableDef, false) 804 delNodeInfo := makeDeleteNodeInfo(ctx, delCtx.objRef, delCtx.tableDef, delCtx.rowIdPos, partExprIdx, true, pkPos, pkTyp, delCtx.lockTable, delCtx.partitionInfos) 805 lastNodeId, err = makeOneDeletePlan(builder, bindCtx, lastNodeId, delNodeInfo, false, false, canTruncate) 806 putDeleteNodeInfo(delNodeInfo) 807 if err != nil { 808 return err 809 } 810 builder.appendStep(lastNodeId) 811 812 // if some table references to this table 813 if enabled && len(delCtx.tableDef.RefChildTbls) > 0 { 814 nameTypMap := make(map[string]*plan.Type) 815 idNameMap := make(map[uint64]string) 816 nameIdxMap := make(map[string]int32) 817 for idx, col := range delCtx.tableDef.Cols { 818 nameTypMap[col.Name] = &col.Typ 819 idNameMap[col.ColId] = col.Name 820 nameIdxMap[col.Name] = int32(idx) 821 } 822 baseProject := getProjectionByLastNode(builder, lastNodeId) 823 824 for _, tableId := range delCtx.tableDef.RefChildTbls { 825 // stmt: delete p, c from child_tbl c join parent_tbl p on c.pid = p.id , skip 826 if _, existInDelTable := delCtx.allDelTableIDs[tableId]; existInDelTable { 827 continue 828 } 829 830 //delete data in parent table may trigger some actions in the child table 831 var childObjRef *ObjectRef 832 var childTableDef *TableDef 833 if tableId == 0 { 834 //fk self refer 835 childObjRef = delCtx.objRef 836 childTableDef = delCtx.tableDef 837 } else { 838 childObjRef, childTableDef = builder.compCtx.ResolveById(tableId, Snapshot{TS: ×tamp.Timestamp{}}) 839 } 840 childPosMap := make(map[string]int32) 841 childTypMap := make(map[string]*plan.Type) 842 childId2name := make(map[uint64]string) 843 childProjectList := make([]*Expr, len(childTableDef.Cols)) 844 childForJoinProject := make([]*Expr, len(childTableDef.Cols)) 845 childRowIdPos := -1 846 for idx, col := range childTableDef.Cols { 847 childPosMap[col.Name] = int32(idx) 848 childTypMap[col.Name] = &col.Typ 849 childId2name[col.ColId] = col.Name 850 childProjectList[idx] = &Expr{ 851 Typ: col.Typ, 852 Expr: &plan.Expr_Col{ 853 Col: &plan.ColRef{ 854 ColPos: int32(idx), 855 Name: col.Name, 856 }, 857 }, 858 } 859 childForJoinProject[idx] = &Expr{ 860 Typ: col.Typ, 861 Expr: &plan.Expr_Col{ 862 Col: &plan.ColRef{ 863 RelPos: 1, 864 ColPos: int32(idx), 865 Name: col.Name, 866 }, 867 }, 868 } 869 if col.Name == catalog.Row_ID { 870 childRowIdPos = idx 871 } 872 } 873 874 for _, fk := range childTableDef.Fkeys { 875 //child table fk self refer 876 //if the child table in the delete table list, something must be done 877 fkSelfReferCond := fk.ForeignTbl == 0 && 878 childTableDef.TblId == delCtx.tableDef.TblId 879 if fk.ForeignTbl == delCtx.tableDef.TblId || fkSelfReferCond { 880 // update stmt: update the columns do not contain ref key, skip 881 updateRefColumn := make(map[string]int32) 882 if isUpdate { 883 for _, colId := range fk.ForeignCols { 884 updateName := idNameMap[colId] 885 if uIdx, ok := delCtx.updateColPosMap[updateName]; ok { 886 updateRefColumn[updateName] = int32(uIdx) 887 } 888 } 889 if len(updateRefColumn) == 0 { 890 continue 891 } 892 } 893 894 // build join conds 895 joinConds := make([]*Expr, len(fk.Cols)) 896 rightConds := make([]*Expr, len(fk.Cols)) 897 leftConds := make([]*Expr, len(fk.Cols)) 898 // use for join's projection & filter's condExpr 899 var oneLeftCond *Expr 900 var oneLeftCondName string 901 updateChildColPosMap := make(map[string]int) 902 updateChildColExpr := make([]*Expr, len(fk.Cols)) // use for update 903 insertColPos := make([]int, 0, len(childTableDef.Cols)-1) // use for update 904 childColLength := len(childTableDef.Cols) 905 childTablePkMap := make(map[string]struct{}) 906 for _, name := range childTableDef.Pkey.Names { 907 childTablePkMap[name] = struct{}{} 908 } 909 var updatePk bool 910 for i, colId := range fk.Cols { 911 for _, col := range childTableDef.Cols { 912 if col.ColId == colId { 913 childColumnName := col.Name 914 originColumnName := idNameMap[fk.ForeignCols[i]] 915 916 leftExpr := &Expr{ 917 Typ: *nameTypMap[originColumnName], 918 Expr: &plan.Expr_Col{ 919 Col: &plan.ColRef{ 920 RelPos: 0, 921 ColPos: nameIdxMap[originColumnName], 922 Name: originColumnName, 923 }, 924 }, 925 } 926 if pos, ok := delCtx.updateColPosMap[originColumnName]; ok { 927 updateChildColExpr[i] = &Expr{ 928 Typ: baseProject[pos].Typ, 929 Expr: &plan.Expr_Col{ 930 Col: &plan.ColRef{ 931 RelPos: 0, 932 ColPos: int32(pos), 933 Name: originColumnName, 934 }, 935 }, 936 } 937 } else { 938 updateChildColExpr[i] = leftExpr 939 } 940 rightExpr := &plan.Expr{ 941 Typ: *childTypMap[childColumnName], 942 Expr: &plan.Expr_Col{ 943 Col: &plan.ColRef{ 944 RelPos: 1, 945 ColPos: childPosMap[childColumnName], 946 Name: childColumnName, 947 }, 948 }, 949 } 950 updateChildColPosMap[childColumnName] = childColLength + i 951 if _, exists := childTablePkMap[childColumnName]; exists { 952 updatePk = true 953 } 954 condExpr, err := BindFuncExprImplByPlanExpr(builder.GetContext(), "=", []*Expr{leftExpr, rightExpr}) 955 if err != nil { 956 return err 957 } 958 rightConds[i] = rightExpr 959 leftConds[i] = leftExpr 960 oneLeftCond = leftExpr 961 oneLeftCondName = originColumnName 962 joinConds[i] = condExpr 963 964 break 965 } 966 } 967 } 968 969 for idx, col := range childTableDef.Cols { 970 if col.Name != catalog.Row_ID && col.Name != catalog.CPrimaryKeyColName { 971 if pos, ok := updateChildColPosMap[col.Name]; ok { 972 insertColPos = append(insertColPos, pos) 973 } else { 974 insertColPos = append(insertColPos, idx) 975 } 976 } 977 } 978 979 var refAction plan.ForeignKeyDef_RefAction 980 if isUpdate { 981 refAction = fk.OnUpdate 982 } else { 983 refAction = fk.OnDelete 984 } 985 986 lastNodeId = appendSinkScanNode(builder, bindCtx, delCtx.sourceStep) 987 // deal with case: update t1 set a = a. then do not need to check constraint 988 if isUpdate { 989 var filterExpr, tmpExpr *Expr 990 for updateName, newIdx := range updateRefColumn { 991 oldIdx := nameIdxMap[updateName] 992 tmpExpr, err = BindFuncExprImplByPlanExpr(builder.GetContext(), "!=", []*Expr{{ 993 Typ: *nameTypMap[updateName], 994 Expr: &plan.Expr_Col{ 995 Col: &ColRef{ 996 ColPos: oldIdx, 997 Name: updateName, 998 }, 999 }, 1000 }, { 1001 Typ: *nameTypMap[updateName], 1002 Expr: &plan.Expr_Col{ 1003 Col: &ColRef{ 1004 ColPos: newIdx, 1005 Name: updateName, 1006 }, 1007 }, 1008 }}) 1009 if err != nil { 1010 return nil 1011 } 1012 if filterExpr == nil { 1013 filterExpr = tmpExpr 1014 } else { 1015 filterExpr, err = BindFuncExprImplByPlanExpr(builder.GetContext(), "or", []*Expr{filterExpr, tmpExpr}) 1016 if err != nil { 1017 return nil 1018 } 1019 } 1020 } 1021 lastNodeId = builder.appendNode(&plan.Node{ 1022 NodeType: plan.Node_FILTER, 1023 Children: []int32{lastNodeId}, 1024 FilterList: []*Expr{filterExpr}, 1025 }, bindCtx) 1026 } 1027 1028 switch refAction { 1029 case plan.ForeignKeyDef_NO_ACTION, plan.ForeignKeyDef_RESTRICT, plan.ForeignKeyDef_SET_DEFAULT: 1030 // plan : sink_scan -> join(f1 semi join c1 & get f1's col) -> filter(assert(isempty(f1's col))) 1031 /* 1032 CORNER CASE: for the reason of the deep copy 1033 create table t1(a int unique key,b int, foreign key fk1(b) references t1(a)); 1034 insert into t1 values (1,1); 1035 insert into t1 values (2,1); 1036 insert into t1 values (3,2); 1037 1038 update t1 set a = NULL where a = 4; 1039 --> ERROR 20101 (HY000): internal error: unexpected input batch for column expression 1040 */ 1041 copiedTableDef := DeepCopyTableDef(childTableDef, true) 1042 rightId := builder.appendNode(&plan.Node{ 1043 NodeType: plan.Node_TABLE_SCAN, 1044 Stats: &plan.Stats{}, 1045 ObjRef: childObjRef, 1046 TableDef: copiedTableDef, 1047 ProjectList: childProjectList, 1048 }, bindCtx) 1049 1050 lastNodeId = builder.appendNode(&plan.Node{ 1051 NodeType: plan.Node_JOIN, 1052 Children: []int32{lastNodeId, rightId}, 1053 JoinType: plan.Node_SEMI, 1054 OnList: joinConds, 1055 ProjectList: []*Expr{oneLeftCond}, 1056 }, bindCtx) 1057 1058 colExpr := &Expr{ 1059 Typ: oneLeftCond.Typ, 1060 Expr: &plan.Expr_Col{ 1061 Col: &plan.ColRef{ 1062 Name: oneLeftCondName, 1063 }, 1064 }, 1065 } 1066 errExpr := makePlan2StringConstExprWithType("Cannot delete or update a parent row: a foreign key constraint fails") 1067 isEmptyExpr, err := BindFuncExprImplByPlanExpr(builder.GetContext(), "isempty", []*Expr{colExpr}) 1068 if err != nil { 1069 return err 1070 } 1071 assertExpr, err := BindFuncExprImplByPlanExpr(builder.GetContext(), "assert", []*Expr{isEmptyExpr, errExpr}) 1072 if err != nil { 1073 return err 1074 } 1075 filterNode := &Node{ 1076 NodeType: plan.Node_FILTER, 1077 Children: []int32{lastNodeId}, 1078 FilterList: []*Expr{assertExpr}, 1079 ProjectList: getProjectionByLastNode(builder, lastNodeId), 1080 IsEnd: true, 1081 } 1082 lastNodeId = builder.appendNode(filterNode, bindCtx) 1083 builder.appendStep(lastNodeId) 1084 1085 case plan.ForeignKeyDef_SET_NULL: 1086 // plan : sink_scan -> join[f1 inner join c1 on f1.id = c1.fid, get c1.* & null] -> project -> sink then + updatePlans 1087 rightId := builder.appendNode(&plan.Node{ 1088 NodeType: plan.Node_TABLE_SCAN, 1089 Stats: &plan.Stats{}, 1090 ObjRef: childObjRef, 1091 TableDef: DeepCopyTableDef(childTableDef, true), 1092 ProjectList: childProjectList, 1093 }, bindCtx) 1094 lastNodeId = builder.appendNode(&plan.Node{ 1095 NodeType: plan.Node_JOIN, 1096 Children: []int32{lastNodeId, rightId}, 1097 JoinType: plan.Node_INNER, 1098 OnList: joinConds, 1099 ProjectList: childForJoinProject, 1100 }, bindCtx) 1101 // inner join cannot dealwith null expr in projectList. so we append a project node 1102 projectProjection := getProjectionByLastNode(builder, lastNodeId) 1103 for _, e := range rightConds { 1104 projectProjection = append(projectProjection, &plan.Expr{ 1105 Typ: e.Typ, 1106 Expr: &plan.Expr_Lit{ 1107 Lit: &Const{ 1108 Isnull: true, 1109 }, 1110 }, 1111 }) 1112 } 1113 lastNodeId = builder.appendNode(&Node{ 1114 NodeType: plan.Node_PROJECT, 1115 Children: []int32{lastNodeId}, 1116 ProjectList: projectProjection, 1117 }, bindCtx) 1118 1119 lastNodeId = appendAggNodeForFkJoin(builder, bindCtx, lastNodeId) 1120 1121 newSourceStep := builder.appendStep(lastNodeId) 1122 1123 upPlanCtx := getDmlPlanCtx() 1124 upPlanCtx.objRef = childObjRef 1125 upPlanCtx.tableDef = childTableDef 1126 upPlanCtx.updateColLength = len(rightConds) 1127 upPlanCtx.isMulti = false 1128 upPlanCtx.rowIdPos = childRowIdPos 1129 upPlanCtx.sourceStep = newSourceStep 1130 upPlanCtx.beginIdx = 0 1131 upPlanCtx.updateColPosMap = updateChildColPosMap 1132 upPlanCtx.allDelTableIDs = map[uint64]struct{}{} 1133 upPlanCtx.insertColPos = insertColPos 1134 upPlanCtx.isFkRecursionCall = true 1135 upPlanCtx.updatePkCol = updatePk 1136 1137 err = buildUpdatePlans(ctx, builder, bindCtx, upPlanCtx, false) 1138 putDmlPlanCtx(upPlanCtx) 1139 if err != nil { 1140 return err 1141 } 1142 1143 case plan.ForeignKeyDef_CASCADE: 1144 rightId := builder.appendNode(&plan.Node{ 1145 NodeType: plan.Node_TABLE_SCAN, 1146 Stats: &plan.Stats{}, 1147 ObjRef: childObjRef, 1148 TableDef: childTableDef, 1149 ProjectList: childProjectList, 1150 }, bindCtx) 1151 1152 //skip cascade for fk self refer 1153 if !fkSelfReferCond { 1154 if isUpdate { 1155 // update stmt get plan : sink_scan -> join[f1 inner join c1 on f1.id = c1.fid, get c1.* & update cols] -> sink then + updatePlans 1156 joinProjection := childForJoinProject 1157 joinProjection = append(joinProjection, updateChildColExpr...) 1158 lastNodeId = builder.appendNode(&plan.Node{ 1159 NodeType: plan.Node_JOIN, 1160 Children: []int32{lastNodeId, rightId}, 1161 JoinType: plan.Node_INNER, 1162 OnList: joinConds, 1163 ProjectList: joinProjection, 1164 }, bindCtx) 1165 lastNodeId = appendAggNodeForFkJoin(builder, bindCtx, lastNodeId) 1166 newSourceStep := builder.appendStep(lastNodeId) 1167 1168 upPlanCtx := getDmlPlanCtx() 1169 upPlanCtx.objRef = childObjRef 1170 upPlanCtx.tableDef = DeepCopyTableDef(childTableDef, true) 1171 upPlanCtx.updateColLength = len(rightConds) 1172 upPlanCtx.isMulti = false 1173 upPlanCtx.rowIdPos = childRowIdPos 1174 upPlanCtx.sourceStep = newSourceStep 1175 upPlanCtx.beginIdx = 0 1176 upPlanCtx.updateColPosMap = updateChildColPosMap 1177 upPlanCtx.insertColPos = insertColPos 1178 upPlanCtx.allDelTableIDs = map[uint64]struct{}{} 1179 upPlanCtx.isFkRecursionCall = true 1180 upPlanCtx.updatePkCol = updatePk 1181 1182 err = buildUpdatePlans(ctx, builder, bindCtx, upPlanCtx, false) 1183 putDmlPlanCtx(upPlanCtx) 1184 if err != nil { 1185 return err 1186 } 1187 } else { 1188 // delete stmt get plan : sink_scan -> join[f1 inner join c1 on f1.id = c1.fid, get c1.*] -> sink then + deletePlans 1189 lastNodeId = builder.appendNode(&plan.Node{ 1190 NodeType: plan.Node_JOIN, 1191 Children: []int32{lastNodeId, rightId}, 1192 JoinType: plan.Node_INNER, 1193 OnList: joinConds, 1194 ProjectList: childForJoinProject, 1195 }, bindCtx) 1196 lastNodeId = appendSinkNode(builder, bindCtx, lastNodeId) 1197 newSourceStep := builder.appendStep(lastNodeId) 1198 1199 //make deletePlans 1200 allDelTableIDs := make(map[uint64]struct{}) 1201 allDelTableIDs[childTableDef.TblId] = struct{}{} 1202 upPlanCtx := getDmlPlanCtx() 1203 upPlanCtx.objRef = childObjRef 1204 upPlanCtx.tableDef = childTableDef 1205 upPlanCtx.updateColLength = 0 1206 upPlanCtx.isMulti = false 1207 upPlanCtx.rowIdPos = childRowIdPos 1208 upPlanCtx.sourceStep = newSourceStep 1209 upPlanCtx.beginIdx = 0 1210 upPlanCtx.allDelTableIDs = allDelTableIDs 1211 1212 err := buildDeletePlans(ctx, builder, bindCtx, upPlanCtx) 1213 putDmlPlanCtx(upPlanCtx) 1214 if err != nil { 1215 return err 1216 } 1217 } 1218 } 1219 } 1220 1221 } 1222 } 1223 } 1224 } 1225 1226 return nil 1227 } 1228 1229 // appendAggNodeForFkJoin append agg node. to deal with these case: 1230 // create table f (a int, b int, primary key(a,b)); 1231 // insert into f values (1,1),(1,2),(1,3),(2,3); 1232 // create table c (a int primary key, f_a int, constraint fa_ck foreign key(f_a) REFERENCES f(a) on delete SET NULL on update SET NULL); 1233 // insert into c values (1,1),(2,1),(3,2); 1234 // update f set a = 10 where b=1; we need update c only once for 2 rows. not three times for 6 rows. 1235 func appendAggNodeForFkJoin(builder *QueryBuilder, bindCtx *BindContext, lastNodeId int32) int32 { 1236 groupByList := getProjectionByLastNode(builder, lastNodeId) 1237 aggProject := make([]*Expr, len(groupByList)) 1238 for i, e := range groupByList { 1239 aggProject[i] = &Expr{ 1240 Typ: e.Typ, 1241 Expr: &plan.Expr_Col{ 1242 Col: &plan.ColRef{ 1243 RelPos: -2, 1244 ColPos: int32(i), 1245 }, 1246 }, 1247 } 1248 } 1249 lastNodeId = builder.appendNode(&Node{ 1250 NodeType: plan.Node_AGG, 1251 GroupBy: groupByList, 1252 Children: []int32{lastNodeId}, 1253 ProjectList: aggProject, 1254 }, bindCtx) 1255 lastNodeId = appendSinkNode(builder, bindCtx, lastNodeId) 1256 1257 return lastNodeId 1258 } 1259 1260 // buildInsertPlansWithRelatedHiddenTable build insert plan recursively for origin table 1261 func buildInsertPlansWithRelatedHiddenTable( 1262 stmt *tree.Insert, ctx CompilerContext, builder *QueryBuilder, bindCtx *BindContext, objRef *ObjectRef, 1263 tableDef *TableDef, updateColLength int, sourceStep int32, addAffectedRows bool, isFkRecursionCall bool, 1264 updatePkCol bool, pkFilterExprs []*Expr, partitionExpr *Expr, ifExistAutoPkCol bool, 1265 checkInsertPkDupForHiddenIndexTable bool, indexSourceColTypes []*plan.Type, fuzzymessage *OriginTableMessageForFuzzy, 1266 insertWithoutUniqueKeyMap map[string]bool, 1267 ) error { 1268 var lastNodeId int32 1269 var err error 1270 1271 multiTableIndexes := make(map[string]*MultiTableIndex) 1272 if updateColLength == 0 { 1273 for idx, indexdef := range tableDef.Indexes { 1274 if indexdef.GetUnique() && (insertWithoutUniqueKeyMap != nil && insertWithoutUniqueKeyMap[indexdef.IndexName]) { 1275 continue 1276 } 1277 1278 // append plan for the hidden tables of unique/secondary keys 1279 if indexdef.TableExist && catalog.IsRegularIndexAlgo(indexdef.IndexAlgo) { 1280 /******** 1281 NOTE: make sure to make the major change applied to secondary index, to IVFFLAT index as well. 1282 Else IVFFLAT index would fail 1283 ********/ 1284 1285 idxRef, idxTableDef := ctx.Resolve(objRef.SchemaName, indexdef.IndexTableName, Snapshot{TS: ×tamp.Timestamp{}}) 1286 // remove row_id 1287 for i, col := range idxTableDef.Cols { 1288 if col.Name == catalog.Row_ID { 1289 idxTableDef.Cols = append(idxTableDef.Cols[:i], idxTableDef.Cols[i+1:]...) 1290 break 1291 } 1292 } 1293 1294 lastNodeId = appendSinkScanNode(builder, bindCtx, sourceStep) 1295 newSourceStep, err := appendPreInsertUkPlan(builder, bindCtx, tableDef, lastNodeId, idx, false, idxTableDef, indexdef.Unique) 1296 if err != nil { 1297 return err 1298 } 1299 1300 needCheckPkDupForHiddenTable := indexdef.Unique // only check PK uniqueness for UK. SK will not check PK uniqueness. 1301 var insertColsNameFromStmt []string 1302 var pkFilterExprForHiddenTable []*Expr 1303 var originTableMessageForFuzzy *OriginTableMessageForFuzzy 1304 1305 // The way to guarantee the uniqueness of the unique key is to create a hidden table, 1306 // with the primary key of the hidden table as the unique key. 1307 // package contains some information needed by the fuzzy filter to run background SQL. 1308 if indexdef.GetUnique() { 1309 _, idxTableDef := ctx.Resolve(objRef.SchemaName, indexdef.IndexTableName, Snapshot{TS: ×tamp.Timestamp{}}) 1310 // remove row_id 1311 for i, colVal := range idxTableDef.Cols { 1312 if colVal.Name == catalog.Row_ID { 1313 idxTableDef.Cols = append(idxTableDef.Cols[:i], idxTableDef.Cols[i+1:]...) 1314 break 1315 } 1316 } 1317 originTableMessageForFuzzy = &OriginTableMessageForFuzzy{ 1318 ParentTableName: tableDef.Name, 1319 } 1320 1321 uniqueCols := make([]*plan.ColDef, len(indexdef.Parts)) 1322 uniqueColsMap := make(map[string]int) 1323 for i, n := range indexdef.Parts { 1324 uniqueColsMap[n] = i 1325 } 1326 for _, c := range tableDef.Cols { // sort 1327 if i, ok := uniqueColsMap[c.Name]; ok { 1328 uniqueCols[i] = c 1329 } 1330 } 1331 originTableMessageForFuzzy.ParentUniqueCols = uniqueCols 1332 uniqueColLocationMap := newLocationMap(tableDef, indexdef) 1333 if stmt != nil { 1334 insertColsNameFromStmt, err = getInsertColsFromStmt(ctx.GetContext(), stmt, tableDef) 1335 if err != nil { 1336 return err 1337 } 1338 1339 // check if this unique key need to check dup 1340 for name, oi := range uniqueColLocationMap.m { 1341 if tableDef.Cols[oi.index].Typ.AutoIncr { 1342 found := false 1343 for _, inserted := range insertColsNameFromStmt { 1344 if inserted == name { 1345 found = true 1346 } 1347 } 1348 if !found { // still need to check dup for auto incr unique if contains value, else no need 1349 needCheckPkDupForHiddenTable = false 1350 } 1351 } 1352 } 1353 1354 // try to build pk filter epxr for hidden table created by unique key 1355 if needCheckPkDupForHiddenTable && canUsePkFilter(builder, ctx, stmt, tableDef, insertColsNameFromStmt, indexdef) { 1356 pkFilterExprForHiddenTable, err = getPkValueExpr(builder, ctx, tableDef, uniqueColLocationMap, insertColsNameFromStmt) 1357 if err != nil { 1358 return err 1359 } 1360 } 1361 } 1362 } 1363 1364 colTypes := make([]*plan.Type, len(tableDef.Cols)) 1365 for i := range tableDef.Cols { 1366 colTypes[i] = &tableDef.Cols[i].Typ 1367 } 1368 1369 updateColLength := 0 1370 addAffectedRows := false 1371 isFkRecursionCall := false 1372 updatePkCol := true 1373 ifExistAutoPkCol := false 1374 var partitionExpr *Expr 1375 err = makeOneInsertPlan(ctx, builder, bindCtx, idxRef, idxTableDef, 1376 updateColLength, newSourceStep, addAffectedRows, isFkRecursionCall, updatePkCol, 1377 pkFilterExprForHiddenTable, partitionExpr, ifExistAutoPkCol, needCheckPkDupForHiddenTable, 1378 colTypes, originTableMessageForFuzzy) 1379 1380 if err != nil { 1381 return err 1382 } 1383 } else if indexdef.TableExist && catalog.IsIvfIndexAlgo(indexdef.IndexAlgo) { 1384 1385 // IVF indexDefs are aggregated and handled later 1386 if _, ok := multiTableIndexes[indexdef.IndexName]; !ok { 1387 multiTableIndexes[indexdef.IndexName] = &MultiTableIndex{ 1388 IndexAlgo: catalog.ToLower(indexdef.IndexAlgo), 1389 IndexDefs: make(map[string]*IndexDef), 1390 } 1391 } 1392 multiTableIndexes[indexdef.IndexName].IndexDefs[catalog.ToLower(indexdef.IndexAlgoTableType)] = indexdef 1393 } else if indexdef.TableExist && catalog.IsMasterIndexAlgo(indexdef.IndexAlgo) { 1394 1395 idxRef, idxTableDef := ctx.Resolve(objRef.SchemaName, indexdef.IndexTableName, Snapshot{TS: ×tamp.Timestamp{}}) 1396 // remove row_id 1397 for i, colVal := range idxTableDef.Cols { 1398 if colVal.Name == catalog.Row_ID { 1399 idxTableDef.Cols = append(idxTableDef.Cols[:i], idxTableDef.Cols[i+1:]...) 1400 break 1401 } 1402 } 1403 genLastNodeIdFn := func() int32 { 1404 return appendSinkScanNode(builder, bindCtx, sourceStep) 1405 } 1406 newSourceStep, err := appendPreInsertSkMasterPlan(builder, bindCtx, tableDef, idx, false, idxTableDef, genLastNodeIdFn) 1407 if err != nil { 1408 return err 1409 } 1410 1411 //TODO: verify with zengyan1 if colType should read from original table. 1412 // It is mainly used for retaining decimal datatype precision in error messages. 1413 colTypes := make([]*plan.Type, len(tableDef.Cols)) 1414 for i := range tableDef.Cols { 1415 colTypes[i] = &tableDef.Cols[i].Typ 1416 } 1417 1418 updateColLength := 0 1419 addAffectedRows := false 1420 isFkRecursionCall := false 1421 updatePkCol := true 1422 ifExistAutoPkCol := false 1423 ifCheckPkDup := false 1424 var pkFilterExprs []*Expr 1425 var partitionExpr *Expr 1426 var fuzzymessage *OriginTableMessageForFuzzy 1427 err = makeOneInsertPlan(ctx, builder, bindCtx, idxRef, idxTableDef, 1428 updateColLength, newSourceStep, addAffectedRows, isFkRecursionCall, updatePkCol, 1429 pkFilterExprs, partitionExpr, ifExistAutoPkCol, ifCheckPkDup, 1430 colTypes, fuzzymessage) 1431 1432 if err != nil { 1433 return err 1434 } 1435 } 1436 } 1437 } 1438 1439 for _, multiTableIndex := range multiTableIndexes { 1440 1441 switch multiTableIndex.IndexAlgo { 1442 case catalog.MoIndexIvfFlatAlgo.ToString(): 1443 lastNodeId = appendSinkScanNode(builder, bindCtx, sourceStep) 1444 lastNodeIdForTblJoinCentroids := appendSinkScanNode(builder, bindCtx, sourceStep) 1445 1446 var idxRefs = make([]*ObjectRef, 3) 1447 var idxTableDefs = make([]*TableDef, 3) 1448 // TODO: node should hold snapshot and account info 1449 //idxRefs[0], idxTableDefs[0] = ctx.Resolve(objRef.SchemaName, multiTableIndex.IndexDefs[catalog.SystemSI_IVFFLAT_TblType_Metadata].IndexTableName, timestamp.Timestamp{}) 1450 //idxRefs[1], idxTableDefs[1] = ctx.Resolve(objRef.SchemaName, multiTableIndex.IndexDefs[catalog.SystemSI_IVFFLAT_TblType_Centroids].IndexTableName, timestamp.Timestamp{}) 1451 //idxRefs[2], idxTableDefs[2] = ctx.Resolve(objRef.SchemaName, multiTableIndex.IndexDefs[catalog.SystemSI_IVFFLAT_TblType_Entries].IndexTableName, timestamp.Timestamp{}) 1452 1453 idxRefs[0], idxTableDefs[0] = ctx.Resolve(objRef.SchemaName, multiTableIndex.IndexDefs[catalog.SystemSI_IVFFLAT_TblType_Metadata].IndexTableName, Snapshot{TS: ×tamp.Timestamp{}}) 1454 idxRefs[1], idxTableDefs[1] = ctx.Resolve(objRef.SchemaName, multiTableIndex.IndexDefs[catalog.SystemSI_IVFFLAT_TblType_Centroids].IndexTableName, Snapshot{TS: ×tamp.Timestamp{}}) 1455 idxRefs[2], idxTableDefs[2] = ctx.Resolve(objRef.SchemaName, multiTableIndex.IndexDefs[catalog.SystemSI_IVFFLAT_TblType_Entries].IndexTableName, Snapshot{TS: ×tamp.Timestamp{}}) 1456 1457 // remove row_id 1458 for i := range idxTableDefs { 1459 for j, column := range idxTableDefs[i].Cols { 1460 if column.Name == catalog.Row_ID { 1461 idxTableDefs[i].Cols = append(idxTableDefs[i].Cols[:j], idxTableDefs[i].Cols[j+1:]...) 1462 break 1463 } 1464 } 1465 } 1466 1467 newSourceStep, err := appendPreInsertSkVectorPlan(builder, bindCtx, tableDef, 1468 lastNodeId, lastNodeIdForTblJoinCentroids, 1469 multiTableIndex, false, idxRefs, idxTableDefs) 1470 if err != nil { 1471 return err 1472 } 1473 1474 //TODO: verify with zengyan1 if colType should read from original table. 1475 // It is mainly used for retaining decimal datatype precision in error messages. 1476 colTypes := make([]*plan.Type, len(tableDef.Cols)) 1477 for i := range tableDef.Cols { 1478 colTypes[i] = &tableDef.Cols[i].Typ 1479 } 1480 1481 updateColLength := 0 1482 addAffectedRows := false 1483 isFkRecursionCall := false 1484 updatePkCol := true 1485 ifExistAutoPkCol := false 1486 ifCheckPkDup := false 1487 var pkFilterExprs []*Expr 1488 var partitionExpr *Expr 1489 var fuzzymessage *OriginTableMessageForFuzzy 1490 err = makeOneInsertPlan(ctx, builder, bindCtx, idxRefs[2], idxTableDefs[2], 1491 updateColLength, newSourceStep, addAffectedRows, isFkRecursionCall, updatePkCol, 1492 pkFilterExprs, partitionExpr, ifExistAutoPkCol, ifCheckPkDup, 1493 colTypes, fuzzymessage) 1494 1495 if err != nil { 1496 return err 1497 } 1498 default: 1499 return moerr.NewInvalidInputNoCtx("Unsupported index algorithm: %s", multiTableIndex.IndexAlgo) 1500 } 1501 if err != nil { 1502 return err 1503 } 1504 } 1505 1506 return makeOneInsertPlan(ctx, builder, bindCtx, objRef, tableDef, 1507 updateColLength, sourceStep, addAffectedRows, isFkRecursionCall, updatePkCol, 1508 pkFilterExprs, partitionExpr, ifExistAutoPkCol, checkInsertPkDupForHiddenIndexTable, 1509 indexSourceColTypes, fuzzymessage) 1510 } 1511 1512 // makeOneInsertPlan generates plan branch for insert one table 1513 // sink_scan -> lock -> insert 1514 // sink_scan -> join -> filter (if table have fk. then append join node & filter node) 1515 // sink_scan -> Fuzzyfilter -- (if need to check pk duplicate) 1516 // table_scan -----^ 1517 func makeOneInsertPlan( 1518 ctx CompilerContext, builder *QueryBuilder, bindCtx *BindContext, objRef *ObjectRef, tableDef *TableDef, 1519 updateColLength int, sourceStep int32, addAffectedRows bool, isFkRecursionCall bool, updatePkCol bool, 1520 pkFilterExprs []*Expr, partitionExpr *Expr, ifExistAutoPkCol bool, ifCheckPkDup bool, 1521 indexSourceColTypes []*plan.Type, fuzzymessage *OriginTableMessageForFuzzy, 1522 ) (err error) { 1523 1524 // make plan : sink_scan -> lock -> insert 1525 appendPureInsertBranch(ctx, builder, bindCtx, objRef, tableDef, sourceStep, addAffectedRows) 1526 1527 // if table have fk. then append join node & filter node 1528 // sink_scan -> join -> filter 1529 if err = appendForeignConstrantPlan(builder, bindCtx, tableDef, objRef, sourceStep, isFkRecursionCall); err != nil { 1530 return err 1531 } 1532 1533 // there will be some cases that no need to check if primary key is duplicate 1534 // case 1: For SQL that contains on duplicate update 1535 // case 2: the only primary key is auto increment type 1536 // case 3: create hidden table for secondary index 1537 1538 isSecondaryHidden := strings.Contains(tableDef.Name, catalog.SecondaryIndexTableNamePrefix) 1539 if isSecondaryHidden { 1540 return nil 1541 } 1542 1543 if ifCheckPkDup && !ifExistAutoPkCol { 1544 if err = appendPrimaryConstrantPlan(builder, bindCtx, tableDef, objRef, partitionExpr, pkFilterExprs, 1545 indexSourceColTypes, sourceStep, updateColLength > 0, updatePkCol, fuzzymessage); err != nil { 1546 return err 1547 } 1548 } 1549 1550 return nil 1551 } 1552 1553 // appendPureInsertBranch appends the pure insert branch to the query builder. 1554 // It includes the sink scan node, project node (if necessary), and insert node. 1555 // The last node ID of the branch is returned. 1556 func appendPureInsertBranch(ctx CompilerContext, builder *QueryBuilder, bindCtx *BindContext, objRef *ObjectRef, tableDef *TableDef, sourceStep int32, addAffectedRows bool) { 1557 lastNodeId := appendSinkScanNode(builder, bindCtx, sourceStep) 1558 1559 // Get table partition information 1560 paritionTableIds, paritionTableNames := getPartTableIdsAndNames(ctx, objRef, tableDef) 1561 partitionIdx := -1 1562 1563 // append project node if necessary 1564 projectProjection := getProjectionByLastNode(builder, lastNodeId) 1565 if len(projectProjection) > len(tableDef.Cols) || tableDef.Partition != nil { 1566 if len(projectProjection) > len(tableDef.Cols) { 1567 projectProjection = projectProjection[:len(tableDef.Cols)] 1568 } 1569 partitionIdx = len(tableDef.Cols) 1570 if tableDef.Partition != nil { 1571 partitionExpr := DeepCopyExpr(tableDef.Partition.PartitionExpression) 1572 projectProjection = append(projectProjection, partitionExpr) 1573 } 1574 1575 projectNode := &Node{ 1576 NodeType: plan.Node_PROJECT, 1577 Children: []int32{lastNodeId}, 1578 ProjectList: projectProjection, 1579 } 1580 lastNodeId = builder.appendNode(projectNode, bindCtx) 1581 } 1582 1583 // append insert node 1584 insertProjection := getProjectionByLastNode(builder, lastNodeId) 1585 // in this case. insert columns in front of batch 1586 if len(insertProjection) > len(tableDef.Cols) { 1587 insertProjection = insertProjection[:len(tableDef.Cols)] 1588 } 1589 1590 insertNode := &Node{ 1591 NodeType: plan.Node_INSERT, 1592 Children: []int32{lastNodeId}, 1593 ObjRef: objRef, 1594 TableDef: tableDef, 1595 InsertCtx: &plan.InsertCtx{ 1596 Ref: objRef, 1597 AddAffectedRows: addAffectedRows, 1598 IsClusterTable: tableDef.TableType == catalog.SystemClusterRel, 1599 TableDef: tableDef, 1600 PartitionTableIds: paritionTableIds, 1601 PartitionTableNames: paritionTableNames, 1602 PartitionIdx: int32(partitionIdx), 1603 }, 1604 ProjectList: insertProjection, 1605 } 1606 lastNodeId = builder.appendNode(insertNode, bindCtx) 1607 builder.appendStep(lastNodeId) 1608 } 1609 1610 // makeOneDeletePlan 1611 // lock -> delete 1612 func makeOneDeletePlan( 1613 builder *QueryBuilder, 1614 bindCtx *BindContext, 1615 lastNodeId int32, 1616 delNodeInfo *deleteNodeInfo, 1617 isUK bool, // is delete unique key hidden table 1618 isSK bool, 1619 canTruncate bool, 1620 ) (int32, error) { 1621 if isUK || isSK { 1622 1623 // For the hidden table of the secondary index, there will be no null situation, only unique key hidden table need this filter 1624 if isUK { 1625 // append filter 1626 rowIdTyp := types.T_Rowid.ToType() 1627 rowIdColExpr := &plan.Expr{ 1628 Typ: makePlan2Type(&rowIdTyp), 1629 Expr: &plan.Expr_Col{ 1630 Col: &plan.ColRef{ 1631 ColPos: int32(delNodeInfo.deleteIndex), 1632 }, 1633 }, 1634 } 1635 filterExpr, err := BindFuncExprImplByPlanExpr(builder.GetContext(), "is_not_null", []*Expr{rowIdColExpr}) 1636 if err != nil { 1637 return -1, err 1638 } 1639 filterNode := &Node{ 1640 NodeType: plan.Node_FILTER, 1641 Children: []int32{lastNodeId}, 1642 FilterList: []*plan.Expr{filterExpr}, 1643 } 1644 lastNodeId = builder.appendNode(filterNode, bindCtx) 1645 } 1646 // append lock 1647 lockTarget := &plan.LockTarget{ 1648 TableId: delNodeInfo.tableDef.TblId, 1649 PrimaryColIdxInBat: int32(delNodeInfo.pkPos), 1650 PrimaryColTyp: delNodeInfo.pkTyp, 1651 RefreshTsIdxInBat: -1, //unsupport now 1652 // FilterColIdxInBat: int32(delNodeInfo.partitionIdx), 1653 LockTable: delNodeInfo.lockTable, 1654 } 1655 // if delNodeInfo.tableDef.Partition != nil { 1656 // lockTarget.IsPartitionTable = true 1657 // lockTarget.PartitionTableIds = delNodeInfo.partTableIDs 1658 // } 1659 lockNode := &Node{ 1660 NodeType: plan.Node_LOCK_OP, 1661 Children: []int32{lastNodeId}, 1662 LockTargets: []*plan.LockTarget{lockTarget}, 1663 } 1664 lastNodeId = builder.appendNode(lockNode, bindCtx) 1665 } 1666 truncateTable := &plan.TruncateTable{} 1667 if canTruncate { 1668 tableDef := delNodeInfo.tableDef 1669 truncateTable.Table = tableDef.Name 1670 truncateTable.TableId = tableDef.TblId 1671 truncateTable.Database = delNodeInfo.objRef.SchemaName 1672 truncateTable.IndexTableNames = delNodeInfo.indexTableNames 1673 truncateTable.PartitionTableNames = delNodeInfo.partTableNames 1674 truncateTable.ForeignTbl = delNodeInfo.foreignTbl 1675 truncateTable.ClusterTable = &plan.ClusterTable{ 1676 IsClusterTable: util.TableIsClusterTable(tableDef.GetTableType()), 1677 } 1678 truncateTable.IsDelete = true 1679 } 1680 // append delete node 1681 deleteNode := &Node{ 1682 NodeType: plan.Node_DELETE, 1683 Children: []int32{lastNodeId}, 1684 // ProjectList: getProjectionByLastNode(builder, lastNodeId), 1685 DeleteCtx: &plan.DeleteCtx{ 1686 TableDef: delNodeInfo.tableDef, 1687 RowIdIdx: int32(delNodeInfo.deleteIndex), 1688 Ref: delNodeInfo.objRef, 1689 CanTruncate: canTruncate, 1690 AddAffectedRows: delNodeInfo.addAffectedRows, 1691 IsClusterTable: delNodeInfo.IsClusterTable, 1692 PartitionTableIds: delNodeInfo.partTableIDs, 1693 PartitionTableNames: delNodeInfo.partTableNames, 1694 PartitionIdx: int32(delNodeInfo.partitionIdx), 1695 PrimaryKeyIdx: int32(delNodeInfo.pkPos), 1696 TruncateTable: truncateTable, 1697 }, 1698 } 1699 lastNodeId = builder.appendNode(deleteNode, bindCtx) 1700 1701 return lastNodeId, nil 1702 } 1703 1704 func getProjectionByLastNodeForRightJoin(builder *QueryBuilder, lastNodeId int32) []*Expr { 1705 lastNode := builder.qry.Nodes[lastNodeId] 1706 projLength := len(lastNode.ProjectList) 1707 if projLength == 0 { 1708 return getProjectionByLastNode(builder, lastNode.Children[0]) 1709 } 1710 projection := make([]*Expr, len(lastNode.ProjectList)) 1711 for i, expr := range lastNode.ProjectList { 1712 name := "" 1713 if col, ok := expr.Expr.(*plan.Expr_Col); ok { 1714 name = col.Col.Name 1715 } 1716 projection[i] = &plan.Expr{ 1717 Typ: expr.Typ, 1718 Expr: &plan.Expr_Col{ 1719 Col: &plan.ColRef{ 1720 RelPos: 1, 1721 ColPos: int32(i), 1722 Name: name, 1723 }, 1724 }, 1725 } 1726 } 1727 return projection 1728 } 1729 1730 func getProjectionByLastNode(builder *QueryBuilder, lastNodeId int32) []*Expr { 1731 lastNode := builder.qry.Nodes[lastNodeId] 1732 projLength := len(lastNode.ProjectList) 1733 if projLength == 0 { 1734 return getProjectionByLastNode(builder, lastNode.Children[0]) 1735 } 1736 projection := make([]*Expr, len(lastNode.ProjectList)) 1737 for i, expr := range lastNode.ProjectList { 1738 name := "" 1739 if col, ok := expr.Expr.(*plan.Expr_Col); ok { 1740 name = col.Col.Name 1741 } 1742 projection[i] = &plan.Expr{ 1743 Typ: expr.Typ, 1744 Expr: &plan.Expr_Col{ 1745 Col: &plan.ColRef{ 1746 RelPos: 0, 1747 ColPos: int32(i), 1748 Name: name, 1749 }, 1750 }, 1751 } 1752 } 1753 return projection 1754 } 1755 1756 func getProjectionByLastNodeWithTag(builder *QueryBuilder, lastNodeId, tag int32) []*Expr { 1757 lastNode := builder.qry.Nodes[lastNodeId] 1758 projLength := len(lastNode.ProjectList) 1759 if projLength == 0 { 1760 return getProjectionByLastNodeWithTag(builder, lastNode.Children[0], tag) 1761 } 1762 projection := make([]*Expr, len(lastNode.ProjectList)) 1763 for i, expr := range lastNode.ProjectList { 1764 name := "" 1765 if col, ok := expr.Expr.(*plan.Expr_Col); ok { 1766 name = col.Col.Name 1767 } 1768 projection[i] = &plan.Expr{ 1769 Typ: expr.Typ, 1770 Expr: &plan.Expr_Col{ 1771 Col: &plan.ColRef{ 1772 RelPos: lastNode.BindingTags[0], 1773 ColPos: int32(i), 1774 Name: name, 1775 }, 1776 }, 1777 } 1778 } 1779 return projection 1780 } 1781 1782 func haveUniqueKey(tableDef *TableDef) bool { 1783 for _, indexdef := range tableDef.Indexes { 1784 if indexdef.Unique { 1785 return true 1786 } 1787 } 1788 return false 1789 } 1790 1791 func haveSecondaryKey(tableDef *TableDef) bool { 1792 for _, indexdef := range tableDef.Indexes { 1793 if !indexdef.Unique && indexdef.TableExist { 1794 return true 1795 } 1796 } 1797 return false 1798 } 1799 1800 // Check if the unique key is the primary key of the table 1801 // When the unqiue key meet the following conditions, it is the primary key of the table 1802 // 1. There is no primary key in the table. 1803 // 2. The unique key is the only unique key of the table. 1804 // 3. The columns of the unique key are not null by default. 1805 func isPrimaryKey(tableDef *TableDef, colNames []string) bool { 1806 // Ensure there is no real primary key in the table. 1807 // FakePrimaryKeyColName is for tables without a primary key. 1808 // So we need to exclude FakePrimaryKeyColName. 1809 if len(tableDef.Pkey.Names) != 1 { 1810 return false 1811 } 1812 if tableDef.Pkey.Names[0] != catalog.FakePrimaryKeyColName { 1813 return false 1814 } 1815 // Ensure the unique key is the only unique key of the table. 1816 uniqueKeyCount := 0 1817 for _, indexdef := range tableDef.Indexes { 1818 if indexdef.Unique { 1819 uniqueKeyCount++ 1820 } 1821 } 1822 // All the columns of the unique key are not null by default. 1823 if uniqueKeyCount == 1 { 1824 for _, col := range tableDef.Cols { 1825 for _, colName := range colNames { 1826 if col.Name == colName { 1827 if col.Default.NullAbility { 1828 return false 1829 } 1830 } 1831 } 1832 } 1833 return true 1834 } 1835 return false 1836 } 1837 1838 // Check if the unique key is the multiple primary key of the table 1839 // When the unique key contains more than one column, it is the multiple primary key of the table. 1840 func isMultiplePriKey(indexdef *plan.IndexDef) bool { 1841 return len(indexdef.Parts) > 1 1842 } 1843 1844 // makeDeleteNodeInfo Get `DeleteNode` based on TableDef 1845 func makeDeleteNodeInfo(ctx CompilerContext, objRef *ObjectRef, tableDef *TableDef, 1846 deleteIdx int, partitionIdx int, addAffectedRows bool, pkPos int, pkTyp Type, lockTable bool, partitionInfos map[uint64]*partSubTableInfo) *deleteNodeInfo { 1847 delNodeInfo := getDeleteNodeInfo() 1848 delNodeInfo.objRef = objRef 1849 delNodeInfo.tableDef = tableDef 1850 delNodeInfo.deleteIndex = deleteIdx 1851 delNodeInfo.partitionIdx = partitionIdx 1852 delNodeInfo.addAffectedRows = addAffectedRows 1853 delNodeInfo.IsClusterTable = tableDef.TableType == catalog.SystemClusterRel 1854 delNodeInfo.pkPos = pkPos 1855 delNodeInfo.pkTyp = pkTyp 1856 delNodeInfo.lockTable = lockTable 1857 1858 if tableDef.Partition != nil { 1859 if partSubs := partitionInfos[tableDef.GetTblId()]; partSubs != nil { 1860 delNodeInfo.partTableIDs = partSubs.partTableIDs 1861 delNodeInfo.partTableNames = partSubs.partTableNames 1862 } else { 1863 partTableIds := make([]uint64, tableDef.Partition.PartitionNum) 1864 partTableNames := make([]string, tableDef.Partition.PartitionNum) 1865 for i, partition := range tableDef.Partition.Partitions { 1866 _, partTableDef := ctx.Resolve(objRef.SchemaName, partition.PartitionTableName, Snapshot{TS: ×tamp.Timestamp{}}) 1867 partTableIds[i] = partTableDef.TblId 1868 partTableNames[i] = partition.PartitionTableName 1869 } 1870 delNodeInfo.partTableIDs = partTableIds 1871 delNodeInfo.partTableNames = partTableNames 1872 partitionInfos[tableDef.GetTblId()] = &partSubTableInfo{ 1873 partTableIDs: partTableIds, 1874 partTableNames: partTableNames, 1875 } 1876 } 1877 1878 } 1879 if tableDef.Fkeys != nil { 1880 for _, fk := range tableDef.Fkeys { 1881 delNodeInfo.foreignTbl = append(delNodeInfo.foreignTbl, fk.ForeignTbl) 1882 } 1883 } 1884 if tableDef.Indexes != nil { 1885 for _, indexdef := range tableDef.Indexes { 1886 if indexdef.TableExist { 1887 if catalog.IsRegularIndexAlgo(indexdef.IndexAlgo) { 1888 delNodeInfo.indexTableNames = append(delNodeInfo.indexTableNames, indexdef.IndexTableName) 1889 } else if catalog.IsIvfIndexAlgo(indexdef.IndexAlgo) { 1890 // apply deletes only for entries table. 1891 if indexdef.IndexAlgoTableType == catalog.SystemSI_IVFFLAT_TblType_Entries { 1892 delNodeInfo.indexTableNames = append(delNodeInfo.indexTableNames, indexdef.IndexTableName) 1893 } 1894 } else if catalog.IsMasterIndexAlgo(indexdef.IndexAlgo) { 1895 delNodeInfo.indexTableNames = append(delNodeInfo.indexTableNames, indexdef.IndexTableName) 1896 } 1897 } 1898 } 1899 } 1900 return delNodeInfo 1901 } 1902 1903 // Get sub tableIds and table names of the partition table 1904 func getPartTableIdsAndNames(ctx CompilerContext, objRef *ObjectRef, tableDef *TableDef) ([]uint64, []string) { 1905 var partTableIds []uint64 1906 var partTableNames []string 1907 if tableDef.Partition != nil { 1908 partTableIds = make([]uint64, tableDef.Partition.PartitionNum) 1909 partTableNames = make([]string, tableDef.Partition.PartitionNum) 1910 for i, partition := range tableDef.Partition.Partitions { 1911 _, partTableDef := ctx.Resolve(objRef.SchemaName, partition.PartitionTableName, Snapshot{TS: ×tamp.Timestamp{}}) 1912 partTableIds[i] = partTableDef.TblId 1913 partTableNames[i] = partition.PartitionTableName 1914 } 1915 } 1916 return partTableIds, partTableNames 1917 } 1918 1919 func appendSinkScanNode(builder *QueryBuilder, bindCtx *BindContext, sourceStep int32) int32 { 1920 lastNodeId := builder.qry.Steps[sourceStep] 1921 // lastNode := builder.qry.Nodes[lastNodeId] 1922 sinkScanProject := getProjectionByLastNode(builder, lastNodeId) 1923 sinkScanNode := &Node{ 1924 NodeType: plan.Node_SINK_SCAN, 1925 SourceStep: []int32{sourceStep}, 1926 ProjectList: sinkScanProject, 1927 } 1928 lastNodeId = builder.appendNode(sinkScanNode, bindCtx) 1929 return lastNodeId 1930 } 1931 1932 func appendSinkScanNodeWithTag(builder *QueryBuilder, bindCtx *BindContext, sourceStep, tag int32) int32 { 1933 lastNodeId := builder.qry.Steps[sourceStep] 1934 // lastNode := builder.qry.Nodes[lastNodeId] 1935 sinkScanProject := getProjectionByLastNodeWithTag(builder, lastNodeId, tag) 1936 sinkScanNode := &Node{ 1937 NodeType: plan.Node_SINK_SCAN, 1938 SourceStep: []int32{sourceStep}, 1939 ProjectList: sinkScanProject, 1940 BindingTags: []int32{tag}, 1941 TableDef: &TableDef{Name: bindCtx.cteName}, 1942 } 1943 b := bindCtx.bindings[0] 1944 sinkScanNode.TableDef.Cols = make([]*ColDef, len(b.cols)) 1945 for i, col := range b.cols { 1946 sinkScanNode.TableDef.Cols[i] = &ColDef{ 1947 Name: col, 1948 Hidden: b.colIsHidden[i], 1949 Typ: *b.types[i], 1950 } 1951 } 1952 lastNodeId = builder.appendNode(sinkScanNode, bindCtx) 1953 return lastNodeId 1954 } 1955 1956 func appendRecursiveScanNode(builder *QueryBuilder, bindCtx *BindContext, sourceStep, tag int32) int32 { 1957 lastNodeId := builder.qry.Steps[sourceStep] 1958 // lastNode := builder.qry.Nodes[lastNodeId] 1959 recursiveScanProject := getProjectionByLastNodeWithTag(builder, lastNodeId, tag) 1960 recursiveScanNode := &Node{ 1961 NodeType: plan.Node_RECURSIVE_SCAN, 1962 SourceStep: []int32{sourceStep}, 1963 ProjectList: recursiveScanProject, 1964 BindingTags: []int32{tag}, 1965 TableDef: &TableDef{Name: bindCtx.cteName}, 1966 } 1967 b := bindCtx.bindings[0] 1968 recursiveScanNode.TableDef.Cols = make([]*ColDef, len(b.cols)) 1969 for i, col := range b.cols { 1970 recursiveScanNode.TableDef.Cols[i] = &ColDef{ 1971 Name: col, 1972 Hidden: b.colIsHidden[i], 1973 Typ: *b.types[i], 1974 } 1975 } 1976 lastNodeId = builder.appendNode(recursiveScanNode, bindCtx) 1977 return lastNodeId 1978 } 1979 1980 func appendCTEScanNode(builder *QueryBuilder, bindCtx *BindContext, sourceStep, tag int32) int32 { 1981 lastNodeId := builder.qry.Steps[sourceStep] 1982 // lastNode := builder.qry.Nodes[lastNodeId] 1983 recursiveScanProject := getProjectionByLastNodeWithTag(builder, lastNodeId, tag) 1984 recursiveScanNode := &Node{ 1985 NodeType: plan.Node_RECURSIVE_CTE, 1986 SourceStep: []int32{sourceStep}, 1987 ProjectList: recursiveScanProject, 1988 BindingTags: []int32{tag}, 1989 } 1990 lastNodeId = builder.appendNode(recursiveScanNode, bindCtx) 1991 return lastNodeId 1992 } 1993 1994 func appendSinkNode(builder *QueryBuilder, bindCtx *BindContext, lastNodeId int32) int32 { 1995 sinkProject := getProjectionByLastNode(builder, lastNodeId) 1996 sinkNode := &Node{ 1997 NodeType: plan.Node_SINK, 1998 Children: []int32{lastNodeId}, 1999 ProjectList: sinkProject, 2000 } 2001 lastNodeId = builder.appendNode(sinkNode, bindCtx) 2002 return lastNodeId 2003 } 2004 2005 func appendSinkNodeWithTag(builder *QueryBuilder, bindCtx *BindContext, lastNodeId, tag int32) int32 { 2006 sinkProject := getProjectionByLastNodeWithTag(builder, lastNodeId, tag) 2007 sinkNode := &Node{ 2008 NodeType: plan.Node_SINK, 2009 Children: []int32{lastNodeId}, 2010 ProjectList: sinkProject, 2011 BindingTags: []int32{tag}, 2012 } 2013 lastNodeId = builder.appendNode(sinkNode, bindCtx) 2014 return lastNodeId 2015 } 2016 2017 // func appendFuzzyFilterByColExpf(builder *QueryBuilder, bindCtx *BindContext, lastNodeId int32) (int32, error) { 2018 // fuzzyFilterNode := &Node{ 2019 // NodeType: plan.Node_FUZZY_FILTER, 2020 // Children: []int32{lastNodeId}, 2021 // } 2022 // lastNodeId = builder.appendNode(fuzzyFilterNode, bindCtx) 2023 // return lastNodeId, nil 2024 // } 2025 2026 func appendAggCountGroupByColExpr(builder *QueryBuilder, bindCtx *BindContext, lastNodeId int32, colExpr *plan.Expr) (int32, error) { 2027 aggExpr, err := BindFuncExprImplByPlanExpr(builder.GetContext(), "starcount", []*Expr{colExpr}) 2028 if err != nil { 2029 return -1, err 2030 } 2031 2032 countType := types.T_int64.ToType() 2033 groupByNode := &Node{ 2034 NodeType: plan.Node_AGG, 2035 Children: []int32{lastNodeId}, 2036 GroupBy: []*Expr{colExpr}, 2037 AggList: []*Expr{aggExpr}, 2038 ProjectList: []*Expr{ 2039 { 2040 Typ: makePlan2Type(&countType), 2041 Expr: &plan.Expr_Col{ 2042 Col: &plan.ColRef{ 2043 RelPos: -2, 2044 ColPos: 1, 2045 }, 2046 }, 2047 }, 2048 { 2049 Typ: colExpr.Typ, 2050 Expr: &plan.Expr_Col{ 2051 Col: &plan.ColRef{ 2052 RelPos: -2, 2053 ColPos: 0, 2054 }, 2055 }, 2056 }}, 2057 } 2058 lastNodeId = builder.appendNode(groupByNode, bindCtx) 2059 return lastNodeId, nil 2060 } 2061 2062 func getPkPos(tableDef *TableDef, ignoreFakePK bool) (int, Type) { 2063 pkName := tableDef.Pkey.PkeyColName 2064 // if pkName == catalog.CPrimaryKeyColName { 2065 // return len(tableDef.Cols) - 1, makeHiddenColTyp() 2066 // } 2067 for i, col := range tableDef.Cols { 2068 if col.Name == pkName { 2069 if ignoreFakePK && col.Name == catalog.FakePrimaryKeyColName { 2070 continue 2071 } 2072 return i, col.Typ 2073 } 2074 } 2075 return -1, Type{} 2076 } 2077 2078 func getRowIdPos(tableDef *TableDef) int { 2079 for i, col := range tableDef.Cols { 2080 if col.Name == catalog.Row_ID { 2081 return i 2082 } 2083 } 2084 return -1 2085 } 2086 2087 func getHiddenColumnForPreInsert(tableDef *TableDef) ([]Type, []string) { 2088 var typs []Type 2089 var names []string 2090 if tableDef.Pkey != nil && tableDef.Pkey.PkeyColName == catalog.CPrimaryKeyColName { 2091 typs = append(typs, makeHiddenColTyp()) 2092 names = append(names, catalog.CPrimaryKeyColName) 2093 } else if tableDef.ClusterBy != nil && util.JudgeIsCompositeClusterByColumn(tableDef.ClusterBy.Name) { 2094 typs = append(typs, makeHiddenColTyp()) 2095 names = append(names, tableDef.ClusterBy.Name) 2096 } 2097 return typs, names 2098 } 2099 2100 // appendPreDeleteNode build predelete node. 2101 func appendPreDeleteNode(builder *QueryBuilder, bindCtx *BindContext, objRef *ObjectRef, tableDef *TableDef, lastNodeId int32) int32 { 2102 projection := getProjectionByLastNode(builder, lastNodeId) 2103 partitionExpr := DeepCopyExpr(tableDef.Partition.PartitionExpression) 2104 projection = append(projection, partitionExpr) 2105 2106 preDeleteNode := &Node{ 2107 NodeType: plan.Node_PRE_DELETE, 2108 ObjRef: objRef, 2109 Children: []int32{lastNodeId}, 2110 ProjectList: projection, 2111 } 2112 return builder.appendNode(preDeleteNode, bindCtx) 2113 } 2114 2115 func appendJoinNodeForParentFkCheck(builder *QueryBuilder, bindCtx *BindContext, objRef *ObjectRef, tableDef *TableDef, baseNodeId int32) (int32, error) { 2116 typMap := make(map[string]plan.Type) 2117 id2name := make(map[uint64]string) 2118 name2pos := make(map[string]int) 2119 for i, col := range tableDef.Cols { 2120 typMap[col.Name] = col.Typ 2121 id2name[col.ColId] = col.Name 2122 name2pos[col.Name] = i 2123 } 2124 2125 //for stmt: update c1 set ref_col = null where col > 0; 2126 //we will skip foreign key constraint check when set null 2127 projectProjection := getProjectionByLastNode(builder, baseNodeId) 2128 baseNodeId = builder.appendNode(&Node{ 2129 NodeType: plan.Node_PROJECT, 2130 Children: []int32{baseNodeId}, 2131 ProjectList: projectProjection, 2132 }, bindCtx) 2133 2134 var filterConds []*Expr 2135 for _, fk := range tableDef.Fkeys { 2136 for _, colId := range fk.Cols { 2137 for fIdx, col := range tableDef.Cols { 2138 if col.ColId == colId { 2139 colExpr := &Expr{ 2140 Typ: col.Typ, 2141 Expr: &plan.Expr_Col{ 2142 Col: &plan.ColRef{ 2143 ColPos: int32(fIdx), 2144 Name: col.Name, 2145 }, 2146 }, 2147 } 2148 condExpr, err := BindFuncExprImplByPlanExpr(builder.GetContext(), "isnotnull", []*Expr{colExpr}) 2149 if err != nil { 2150 return -1, err 2151 } 2152 filterConds = append(filterConds, condExpr) 2153 } 2154 } 2155 } 2156 } 2157 baseNodeId = builder.appendNode(&Node{ 2158 NodeType: plan.Node_FILTER, 2159 Children: []int32{baseNodeId}, 2160 FilterList: filterConds, 2161 // ProjectList: projectProjection, 2162 }, bindCtx) 2163 2164 lastNodeId := baseNodeId 2165 for _, fk := range tableDef.Fkeys { 2166 if fk.ForeignTbl == 0 { 2167 //skip fk self refer 2168 continue 2169 } 2170 2171 fkeyId2Idx := make(map[uint64]int) 2172 for i, colId := range fk.ForeignCols { 2173 fkeyId2Idx[colId] = i 2174 } 2175 2176 parentObjRef, parentTableDef := builder.compCtx.ResolveById(fk.ForeignTbl, Snapshot{TS: ×tamp.Timestamp{}}) 2177 if parentTableDef == nil { 2178 return -1, moerr.NewInternalError(builder.GetContext(), "parent table %d not found", fk.ForeignTbl) 2179 } 2180 newTableDef := DeepCopyTableDef(parentTableDef, false) 2181 joinConds := make([]*plan.Expr, 0) 2182 for _, col := range parentTableDef.Cols { 2183 if fkIdx, ok := fkeyId2Idx[col.ColId]; ok { 2184 rightPos := len(newTableDef.Cols) 2185 newTableDef.Cols = append(newTableDef.Cols, DeepCopyColDef(col)) 2186 2187 parentColumnName := col.Name 2188 childColumnName := id2name[fk.Cols[fkIdx]] 2189 2190 leftExpr := &Expr{ 2191 Typ: typMap[childColumnName], 2192 Expr: &plan.Expr_Col{ 2193 Col: &plan.ColRef{ 2194 RelPos: 0, 2195 ColPos: int32(name2pos[childColumnName]), 2196 Name: childColumnName, 2197 }, 2198 }, 2199 } 2200 rightExpr := &plan.Expr{ 2201 Typ: col.Typ, 2202 Expr: &plan.Expr_Col{ 2203 Col: &plan.ColRef{ 2204 RelPos: 1, 2205 ColPos: int32(rightPos), 2206 Name: parentColumnName, 2207 }, 2208 }, 2209 } 2210 condExpr, err := BindFuncExprImplByPlanExpr(builder.GetContext(), "=", []*Expr{leftExpr, rightExpr}) 2211 if err != nil { 2212 return -1, err 2213 } 2214 joinConds = append(joinConds, condExpr) 2215 } 2216 } 2217 2218 parentTableDef = newTableDef 2219 2220 // append table scan node 2221 scanNodeProject := make([]*Expr, len(parentTableDef.Cols)) 2222 for colIdx, col := range parentTableDef.Cols { 2223 scanNodeProject[colIdx] = &plan.Expr{ 2224 Typ: col.Typ, 2225 Expr: &plan.Expr_Col{ 2226 Col: &plan.ColRef{ 2227 ColPos: int32(colIdx), 2228 Name: col.Name, 2229 }, 2230 }, 2231 } 2232 } 2233 rightId := builder.appendNode(&plan.Node{ 2234 NodeType: plan.Node_TABLE_SCAN, 2235 Stats: &plan.Stats{}, 2236 ObjRef: parentObjRef, 2237 TableDef: parentTableDef, 2238 ProjectList: scanNodeProject, 2239 }, bindCtx) 2240 2241 projectList := getProjectionByLastNode(builder, lastNodeId) 2242 2243 // append project 2244 projectList = append(projectList, &Expr{ 2245 Typ: parentTableDef.Cols[0].Typ, 2246 Expr: &plan.Expr_Col{ 2247 Col: &plan.ColRef{ 2248 RelPos: 1, 2249 ColPos: 0, 2250 Name: parentTableDef.Cols[0].Name, 2251 }, 2252 }, 2253 }) 2254 2255 // append join node 2256 lastNodeId = builder.appendNode(&plan.Node{ 2257 NodeType: plan.Node_JOIN, 2258 Children: []int32{lastNodeId, rightId}, 2259 JoinType: plan.Node_LEFT, 2260 OnList: joinConds, 2261 ProjectList: projectList, 2262 }, bindCtx) 2263 } 2264 2265 if lastNodeId == baseNodeId { 2266 //all fk are fk self refer 2267 return -1, nil 2268 } 2269 2270 return lastNodeId, nil 2271 } 2272 2273 // appendPreInsertNode append preinsert node 2274 func appendPreInsertNode(builder *QueryBuilder, bindCtx *BindContext, 2275 objRef *ObjectRef, tableDef *TableDef, 2276 lastNodeId int32, isUpdate bool) int32 { 2277 2278 preInsertProjection := getProjectionByLastNode(builder, lastNodeId) 2279 hiddenColumnTyp, hiddenColumnName := getHiddenColumnForPreInsert(tableDef) 2280 2281 hashAutoCol := false 2282 for _, col := range tableDef.Cols { 2283 if col.Typ.AutoIncr { 2284 // for insert allways set true when col.Typ.AutoIncr 2285 // todo for update 2286 hashAutoCol = true 2287 break 2288 } 2289 } 2290 if len(hiddenColumnTyp) > 0 { 2291 if isUpdate { 2292 rowIdProj := preInsertProjection[len(preInsertProjection)-1] 2293 preInsertProjection = preInsertProjection[:len(preInsertProjection)-1] 2294 for i, typ := range hiddenColumnTyp { 2295 preInsertProjection = append(preInsertProjection, &plan.Expr{ 2296 Typ: typ, 2297 Expr: &plan.Expr_Col{Col: &plan.ColRef{ 2298 RelPos: -1, 2299 ColPos: int32(i), 2300 Name: hiddenColumnName[i], 2301 }}, 2302 }) 2303 } 2304 preInsertProjection = append(preInsertProjection, rowIdProj) 2305 } else { 2306 for i, typ := range hiddenColumnTyp { 2307 preInsertProjection = append(preInsertProjection, &plan.Expr{ 2308 Typ: typ, 2309 Expr: &plan.Expr_Col{Col: &plan.ColRef{ 2310 RelPos: -1, 2311 ColPos: int32(i), 2312 Name: hiddenColumnName[i], 2313 }}, 2314 }) 2315 } 2316 } 2317 } 2318 2319 preInsertNode := &Node{ 2320 NodeType: plan.Node_PRE_INSERT, 2321 Children: []int32{lastNodeId}, 2322 ProjectList: preInsertProjection, 2323 PreInsertCtx: &plan.PreInsertCtx{ 2324 Ref: objRef, 2325 TableDef: DeepCopyTableDef(tableDef, true), 2326 HasAutoCol: hashAutoCol, 2327 IsUpdate: isUpdate, 2328 }, 2329 } 2330 lastNodeId = builder.appendNode(preInsertNode, bindCtx) 2331 2332 // append hidden column to tableDef 2333 if tableDef.Pkey != nil && tableDef.Pkey.PkeyColName == catalog.CPrimaryKeyColName { 2334 tableDef.Cols = append(tableDef.Cols, tableDef.Pkey.CompPkeyCol) 2335 } 2336 if tableDef.ClusterBy != nil && util.JudgeIsCompositeClusterByColumn(tableDef.ClusterBy.Name) { 2337 tableDef.Cols = append(tableDef.Cols, tableDef.ClusterBy.CompCbkeyCol) 2338 } 2339 2340 // Get table partition information 2341 partTableIds, _ := getPartTableIdsAndNames(builder.compCtx, objRef, tableDef) 2342 // append project node 2343 projectProjection := getProjectionByLastNode(builder, lastNodeId) 2344 partitionIdx := -1 2345 2346 if tableDef.Partition != nil { 2347 partitionIdx = len(projectProjection) 2348 partitionExpr := DeepCopyExpr(tableDef.Partition.PartitionExpression) 2349 projectProjection = append(projectProjection, partitionExpr) 2350 2351 projectNode := &Node{ 2352 NodeType: plan.Node_PROJECT, 2353 Children: []int32{lastNodeId}, 2354 ProjectList: projectProjection, 2355 } 2356 lastNodeId = builder.appendNode(projectNode, bindCtx) 2357 } 2358 2359 if !isUpdate { 2360 if lockNodeId, ok := appendLockNode( 2361 builder, 2362 bindCtx, 2363 lastNodeId, 2364 tableDef, 2365 false, 2366 false, 2367 partitionIdx, 2368 partTableIds, 2369 isUpdate, 2370 ); ok { 2371 lastNodeId = lockNodeId 2372 } 2373 } 2374 2375 return lastNodeId 2376 } 2377 2378 // appendPreInsertSkMasterPlan append preinsert node 2379 func appendPreInsertSkMasterPlan(builder *QueryBuilder, 2380 bindCtx *BindContext, 2381 tableDef *TableDef, 2382 indexIdx int, 2383 isUpdate bool, 2384 indexTableDef *TableDef, 2385 genLastNodeIdFn func() int32) (int32, error) { 2386 2387 // 1. init details 2388 idxDef := tableDef.Indexes[indexIdx] 2389 originPkPos, originPkType := getPkPos(tableDef, false) 2390 //var rowIdPos int 2391 //var rowIdType *Type 2392 2393 colsPos := make(map[string]int) 2394 colsType := make(map[string]*Type) 2395 for i, colVal := range tableDef.Cols { 2396 //if colVal.Name == catalog.Row_ID { 2397 // rowIdPos = i 2398 // rowIdType = colVal.Typ 2399 //} 2400 colsPos[colVal.Name] = i 2401 colsType[colVal.Name] = &tableDef.Cols[i].Typ 2402 } 2403 2404 var lastNodeId int32 2405 2406 // 2. build single project or union based on the number of index parts. 2407 // NOTE: Union with single child will cause panic. 2408 if len(idxDef.Parts) == 0 { 2409 return -1, moerr.NewInternalErrorNoCtx("index parts is empty. file a bug") 2410 } else if len(idxDef.Parts) == 1 { 2411 // 2.a build single project 2412 projectNode, err := buildSerialFullAndPKColsProjMasterIndex(builder, bindCtx, tableDef, genLastNodeIdFn, originPkPos, idxDef.Parts[0], colsType, colsPos, originPkType) 2413 if err != nil { 2414 return -1, err 2415 } 2416 lastNodeId = builder.appendNode(projectNode, bindCtx) 2417 } else { 2418 // 2.b build union in pairs. ie union(c, union(b,a)) 2419 2420 // a) build all the projects 2421 var unionChildren []int32 2422 for _, part := range idxDef.Parts { 2423 // 2.b.i build project 2424 projectNode, err := buildSerialFullAndPKColsProjMasterIndex(builder, bindCtx, tableDef, genLastNodeIdFn, originPkPos, part, colsType, colsPos, originPkType) 2425 if err != nil { 2426 return -1, err 2427 } 2428 // 2.b.ii add to union's list 2429 unionChildren = append(unionChildren, builder.appendNode(projectNode, bindCtx)) 2430 } 2431 2432 // b) get projectList 2433 outputProj := getProjectionByLastNode(builder, unionChildren[0]) 2434 2435 // c) build union in pairs 2436 lastNodeId = unionChildren[0] 2437 for _, nextProjectId := range unionChildren[1:] { // NOTE: we start from the 2nd item 2438 lastNodeId = builder.appendNode(&plan.Node{ 2439 NodeType: plan.Node_UNION, 2440 Children: []int32{nextProjectId, lastNodeId}, 2441 ProjectList: outputProj, 2442 }, bindCtx) 2443 } 2444 2445 // NOTE: we could merge the len==1 and len>1 cases, but keeping it separate to make help understand how the 2446 // union works (ie it works in pairs) 2447 } 2448 2449 // 3. add lock 2450 if lockNodeId, ok := appendLockNode( 2451 builder, 2452 bindCtx, 2453 lastNodeId, 2454 indexTableDef, 2455 false, 2456 false, 2457 -1, 2458 nil, 2459 isUpdate, 2460 ); ok { 2461 lastNodeId = lockNodeId 2462 } 2463 2464 lastNodeId = appendSinkNode(builder, bindCtx, lastNodeId) 2465 newSourceStep := builder.appendStep(lastNodeId) 2466 2467 return newSourceStep, nil 2468 } 2469 2470 func buildSerialFullAndPKColsProjMasterIndex(builder *QueryBuilder, bindCtx *BindContext, tableDef *TableDef, genLastNodeIdFn func() int32, originPkPos int, part string, colsType map[string]*Type, colsPos map[string]int, originPkType Type) (*Node, error) { 2471 var err error 2472 // 1. get new source sink 2473 var currLastNodeId = genLastNodeIdFn() 2474 2475 //2. recompute CP PK. 2476 currLastNodeId = recomputeMoCPKeyViaProjection(builder, bindCtx, tableDef, currLastNodeId, originPkPos) 2477 2478 //3. add a new project for < serial_full("0", a, pk), pk > 2479 projectProjection := make([]*Expr, 2) 2480 2481 //3.i build serial_full("0", a, pk) 2482 serialArgs := make([]*plan.Expr, 3) 2483 serialArgs[0] = makePlan2StringConstExprWithType(getColSeqFromColDef(tableDef.Cols[colsPos[part]])) 2484 serialArgs[1] = &Expr{ 2485 Typ: *colsType[part], 2486 Expr: &plan.Expr_Col{ 2487 Col: &plan.ColRef{ 2488 RelPos: 0, 2489 ColPos: int32(colsPos[part]), 2490 Name: part, 2491 }, 2492 }, 2493 } 2494 serialArgs[2] = &Expr{ 2495 Typ: originPkType, 2496 Expr: &plan.Expr_Col{ 2497 Col: &plan.ColRef{ 2498 RelPos: 0, 2499 ColPos: int32(originPkPos), 2500 Name: tableDef.Cols[originPkPos].Name, 2501 }, 2502 }, 2503 } 2504 projectProjection[0], err = BindFuncExprImplByPlanExpr(builder.GetContext(), "serial_full", serialArgs) 2505 if err != nil { 2506 return nil, err 2507 } 2508 2509 //3.ii build pk 2510 projectProjection[1] = &Expr{ 2511 Typ: originPkType, 2512 Expr: &plan.Expr_Col{ 2513 Col: &plan.ColRef{ 2514 RelPos: 0, 2515 ColPos: int32(originPkPos), 2516 Name: tableDef.Cols[originPkPos].Name, 2517 }, 2518 }, 2519 } 2520 2521 // TODO: verify this with Feng, Ouyuanning and Qingx (not reusing the row_id) 2522 //if isUpdate { 2523 // // 2.iii add row_id if Update 2524 // projectProjection = append(projectProjection, &plan.Expr{ 2525 // Typ: rowIdType, 2526 // Expr: &plan.Expr_Col{ 2527 // Col: &plan.ColRef{ 2528 // RelPos: 0, 2529 // ColPos: int32(rowIdPos), 2530 // Name: catalog.Row_ID, 2531 // }, 2532 // }, 2533 // }) 2534 //} 2535 2536 projectNode := &Node{ 2537 NodeType: plan.Node_PROJECT, 2538 Children: []int32{currLastNodeId}, 2539 ProjectList: projectProjection, 2540 } 2541 return projectNode, nil 2542 } 2543 2544 func appendPreInsertSkVectorPlan( 2545 builder *QueryBuilder, 2546 bindCtx *BindContext, 2547 tableDef *TableDef, 2548 lastNodeId, lastNodeIdForTblJoinCentroids int32, 2549 multiTableIndex *MultiTableIndex, 2550 isUpdate bool, 2551 idxRefs []*ObjectRef, 2552 indexTableDefs []*TableDef) (int32, error) { 2553 2554 /* 2555 ### Sample SQL: 2556 INSERT INTO `a`.`__mo_index_secondary_018ebbd4-ebb7-7898-b0bb-3b133af1905e` 2557 ( 2558 `__mo_index_centroid_fk_version`, 2559 `__mo_index_centroid_fk_id`, 2560 `__mo_index_pri_col`, 2561 `__mo_index_centroid_fk_entry` 2562 ) 2563 SELECT `__mo_index_tbl_join_centroids`.`__mo_index_centroid_version` , 2564 `__mo_index_tbl_join_centroids`.`__mo_index_joined_centroid_id` , 2565 `__mo_index_tbl_join_centroids`.`__mo_org_tbl_pk_may_serial_col` , 2566 `t1`.`b` 2567 FROM ( 2568 SELECT `t1`.`a` AS `__mo_org_tbl_pk_may_serial_col`, 2569 `t1`.`b` 2570 FROM `a`.`t1`) AS `t1` 2571 INNER JOIN 2572 ( 2573 SELECT `centroids`.`__mo_index_centroid_version` AS `__mo_index_centroid_version`, 2574 serial_extract( min( serial_full( l2_distance(`centroids`.`__mo_index_centroid`, `t1`.`__mo_org_tbl_norm_vec_col`), `centroids`.`__mo_index_centroid_id`)), 1 AS bigint) AS `__mo_index_joined_centroid_id`, 2575 `__mo_org_tbl_pk_may_serial_col` 2576 FROM ( 2577 SELECT `t1`.`a` AS `__mo_org_tbl_pk_may_serial_col`, 2578 normalize_l2(`t1`.`b`) AS `__mo_org_tbl_norm_vec_col`, 2579 FROM `a`.`t1` 2580 ) AS `t1` 2581 CROSS JOIN 2582 ( 2583 SELECT * 2584 FROM `a`.`centroids` 2585 WHERE `__mo_index_centroid_version` = ( SELECT cast(__mo_index_val AS bigint) FROM `a`.`meta` WHERE `__mo_index_key` = 'version') 2586 ) AS `centroids` 2587 GROUP BY `__mo_index_centroid_version`, 2588 __mo_org_tbl_pk_may_serial_col 2589 ) AS `__mo_index_tbl_join_centroids` 2590 2591 ON `__mo_index_tbl_join_centroids`.`__mo_org_tbl_pk_may_serial_col` = `t1`.`__mo_org_tbl_pk_may_serial_col`; 2592 2593 ### Corresponding Plan 2594 ------------------------------------------------------------------------------------------------------------------------------------- 2595 | Plan 1: | 2596 | Insert on vecdb3.__mo_index_secondary_018ebf04-f31c-79fe-973b-cc18e91117c0 | 2597 | -> Lock | 2598 | -> Join | 2599 | Join Type: INNER | 2600 | Join Cond: (a = a) | 2601 | -> Project | 2602 | -> Sink Scan | 2603 | DataSource: Plan 0 | 2604 | -> Project | 2605 | -> Aggregate | 2606 | Group Key: __mo_index_centroid_version, a | 2607 | Aggregate Functions: min(serial_full(l2_distance(__mo_index_centroid, #[0,4]), __mo_index_centroid_id)) | 2608 | -> Join | 2609 | Join Type: INNER | 2610 | -> Project | 2611 | -> Project | 2612 | -> Sink Scan | 2613 | DataSource: Plan 0 | 2614 | -> Join | 2615 | Join Type: INNER | 2616 | Join Cond: (__mo_index_centroid_version = cast(__mo_index_val AS BIGINT)) | 2617 | -> Table Scan on vecdb3.__mo_index_secondary_018ebf04-f31c-7cc9-a9ba-f25f08228699 | 2618 | -> Table Scan on vecdb3.__mo_index_secondary_018ebf04-f31c-7e8a-a18a-fca905316151 | 2619 | Filter Cond: (__mo_index_key = cast('version' AS VARCHAR)) | 2620 ------------------------------------------------------------------------------------------------------------------------------------- 2621 2622 */ 2623 2624 //1.a get vector & pk column details 2625 var posOriginPk, posOriginVecColumn int 2626 var typeOriginPk, typeOriginVecColumn Type 2627 { 2628 colsMap := make(map[string]int) 2629 colTypes := make([]Type, len(tableDef.Cols)) 2630 for i, col := range tableDef.Cols { 2631 colsMap[col.Name] = i 2632 colTypes[i] = tableDef.Cols[i].Typ 2633 } 2634 2635 for _, part := range multiTableIndex.IndexDefs[catalog.SystemSI_IVFFLAT_TblType_Entries].Parts { 2636 if i, ok := colsMap[part]; ok { 2637 posOriginVecColumn = i 2638 typeOriginVecColumn = tableDef.Cols[i].Typ 2639 break 2640 } 2641 } 2642 2643 posOriginPk, typeOriginPk = getPkPos(tableDef, false) 2644 } 2645 2646 //1.b Handle mo_cp_key 2647 lastNodeId = recomputeMoCPKeyViaProjection(builder, bindCtx, tableDef, lastNodeId, posOriginPk) 2648 lastNodeIdForTblJoinCentroids = recomputeMoCPKeyViaProjection(builder, bindCtx, tableDef, lastNodeIdForTblJoinCentroids, posOriginPk) 2649 2650 // 2. scan meta table to find the `current version` number 2651 metaCurrVersionRow, err := makeMetaTblScanWhereKeyEqVersion(builder, bindCtx, indexTableDefs, idxRefs) 2652 if err != nil { 2653 return -1, err 2654 } 2655 2656 // 3. create a scan node for centroids x meta on centroids.version = cast (meta.version as bigint) 2657 currVersionCentroids, err := makeCrossJoinCentroidsMetaForCurrVersion(builder, bindCtx, 2658 indexTableDefs, idxRefs, metaCurrVersionRow) 2659 if err != nil { 2660 return -1, err 2661 } 2662 2663 // 4. Make Table Projection with cpPk, normalize_l2() 2664 tableId := lastNodeIdForTblJoinCentroids 2665 projectTblScan, err := makeTableProjectionIncludingNormalizeL2(builder, bindCtx, tableId, tableDef, 2666 typeOriginPk, posOriginPk, 2667 typeOriginVecColumn, posOriginVecColumn) 2668 if err != nil { 2669 return -1, err 2670 } 2671 2672 // 5. create "tbl" cross join "centroids" 2673 // Projections: 2674 // centroids.version, centroids.centroid_id, tbl.pk, tbl.embedding, 2675 // centroids.centroid, normalize_l2(tbl.embedding) 2676 var leftChildTblId = projectTblScan 2677 var rightChildCentroidsId = currVersionCentroids 2678 var crossJoinTblAndCentroidsID = makeCrossJoinTblAndCentroids(builder, bindCtx, tableDef, 2679 leftChildTblId, rightChildCentroidsId, 2680 typeOriginPk, posOriginPk, 2681 typeOriginVecColumn) 2682 2683 // 6. select centroids.version, serial_extract(min( pair< l2_distance, centroid_id >, 1 ), pk, 2684 // from crossJoinTblAndCentroidsID group by pk, 2685 minCentroidIdNode, err := makeMinCentroidIdAndCpKey(builder, bindCtx, crossJoinTblAndCentroidsID, multiTableIndex) 2686 if err != nil { 2687 return -1, err 2688 } 2689 2690 // 7. Final project: centroids.version, centroids.centroid_id, tbl.pk, tbl.embedding, cp_col 2691 projectId, err := makeFinalProjectWithTblEmbedding(builder, bindCtx, 2692 lastNodeId, minCentroidIdNode, 2693 tableDef, 2694 typeOriginPk, posOriginPk, 2695 typeOriginVecColumn, posOriginVecColumn) 2696 if err != nil { 2697 return -1, err 2698 } 2699 2700 lastNodeId = projectId 2701 2702 if lockNodeId, ok := appendLockNode( 2703 builder, 2704 bindCtx, 2705 lastNodeId, 2706 indexTableDefs[2], 2707 false, 2708 false, 2709 -1, 2710 nil, 2711 isUpdate, 2712 ); ok { 2713 lastNodeId = lockNodeId 2714 } 2715 2716 lastNodeId = appendSinkNode(builder, bindCtx, lastNodeId) 2717 sourceStep := builder.appendStep(lastNodeId) 2718 2719 return sourceStep, nil 2720 } 2721 2722 func recomputeMoCPKeyViaProjection(builder *QueryBuilder, bindCtx *BindContext, tableDef *TableDef, lastNodeId int32, posOriginPk int) int32 { 2723 if tableDef.Pkey != nil && tableDef.Pkey.PkeyColName != catalog.FakePrimaryKeyColName { 2724 lastProject := builder.qry.Nodes[lastNodeId].ProjectList 2725 2726 projectProjection := make([]*Expr, len(lastProject)) 2727 for i := 0; i < len(lastProject); i++ { 2728 projectProjection[i] = &plan.Expr{ 2729 Typ: lastProject[i].Typ, 2730 Expr: &plan.Expr_Col{ 2731 Col: &plan.ColRef{ 2732 RelPos: 0, 2733 ColPos: int32(i), 2734 //Name: "col" + strconv.FormatInt(int64(i), 10), 2735 }, 2736 }, 2737 } 2738 } 2739 2740 if tableDef.Pkey.PkeyColName == catalog.CPrimaryKeyColName { 2741 pkNamesMap := make(map[string]int) 2742 for _, name := range tableDef.Pkey.Names { 2743 pkNamesMap[name] = 1 2744 } 2745 2746 prikeyPos := make([]int, 0) 2747 for i, coldef := range tableDef.Cols { 2748 if _, ok := pkNamesMap[coldef.Name]; ok { 2749 prikeyPos = append(prikeyPos, i) 2750 } 2751 } 2752 2753 serialArgs := make([]*plan.Expr, len(prikeyPos)) 2754 for i, position := range prikeyPos { 2755 serialArgs[i] = &plan.Expr{ 2756 Typ: lastProject[position].Typ, 2757 Expr: &plan.Expr_Col{ 2758 Col: &plan.ColRef{ 2759 RelPos: 0, 2760 ColPos: int32(position), 2761 Name: tableDef.Cols[position].Name, 2762 }, 2763 }, 2764 } 2765 } 2766 compkey, _ := BindFuncExprImplByPlanExpr(builder.GetContext(), "serial", serialArgs) 2767 projectProjection[posOriginPk] = compkey 2768 } else { 2769 pkPos := -1 2770 for i, coldef := range tableDef.Cols { 2771 if tableDef.Pkey.PkeyColName == coldef.Name { 2772 pkPos = i 2773 break 2774 } 2775 } 2776 if pkPos != -1 { 2777 projectProjection[posOriginPk] = &plan.Expr{ 2778 Typ: lastProject[pkPos].Typ, 2779 Expr: &plan.Expr_Col{ 2780 Col: &plan.ColRef{ 2781 RelPos: 0, 2782 ColPos: int32(pkPos), 2783 Name: tableDef.Pkey.PkeyColName, 2784 }, 2785 }, 2786 } 2787 } 2788 } 2789 projectNode := &Node{ 2790 NodeType: plan.Node_PROJECT, 2791 Children: []int32{lastNodeId}, 2792 ProjectList: projectProjection, 2793 } 2794 lastNodeId = builder.appendNode(projectNode, bindCtx) 2795 } 2796 return lastNodeId 2797 } 2798 2799 // appendPreInsertUkPlan build preinsert plan. 2800 // sink_scan -> preinsert_uk -> sink 2801 func appendPreInsertUkPlan( 2802 builder *QueryBuilder, 2803 bindCtx *BindContext, 2804 tableDef *TableDef, 2805 lastNodeId int32, 2806 indexIdx int, 2807 isUpddate bool, 2808 uniqueTableDef *TableDef, 2809 isUK bool) (int32, error) { 2810 /******** 2811 NOTE: make sure to make the major change applied to secondary index, to IVFFLAT index as well. 2812 Else IVFFLAT index would fail 2813 ********/ 2814 2815 var useColumns []int32 2816 idxDef := tableDef.Indexes[indexIdx] 2817 colsMap := make(map[string]int) 2818 2819 for i, col := range tableDef.Cols { 2820 colsMap[col.Name] = i 2821 } 2822 for _, part := range idxDef.Parts { 2823 part = catalog.ResolveAlias(part) 2824 if i, ok := colsMap[part]; ok { 2825 useColumns = append(useColumns, int32(i)) 2826 } 2827 } 2828 2829 pkColumn, originPkType := getPkPos(tableDef, false) 2830 lastNodeId = recomputeMoCPKeyViaProjection(builder, bindCtx, tableDef, lastNodeId, pkColumn) 2831 2832 var ukType Type 2833 if len(idxDef.Parts) == 1 { 2834 ukType = tableDef.Cols[useColumns[0]].Typ 2835 } else { 2836 ukType = Type{ 2837 Id: int32(types.T_varchar), 2838 Width: types.MaxVarcharLen, 2839 } 2840 } 2841 var preinsertUkProjection []*Expr 2842 preinsertUkProjection = append(preinsertUkProjection, &plan.Expr{ 2843 Typ: ukType, 2844 Expr: &plan.Expr_Col{ 2845 Col: &plan.ColRef{ 2846 RelPos: -1, 2847 ColPos: 0, 2848 Name: catalog.IndexTableIndexColName, 2849 }, 2850 }, 2851 }) 2852 preinsertUkProjection = append(preinsertUkProjection, &plan.Expr{ 2853 Typ: originPkType, 2854 Expr: &plan.Expr_Col{ 2855 Col: &plan.ColRef{ 2856 RelPos: -1, 2857 ColPos: 1, 2858 Name: catalog.IndexTablePrimaryColName, 2859 }, 2860 }, 2861 }) 2862 if isUpddate { 2863 lastProjection := builder.qry.Nodes[lastNodeId].ProjectList 2864 originRowIdIdx := len(lastProjection) - 1 2865 preinsertUkProjection = append(preinsertUkProjection, &plan.Expr{ 2866 Typ: lastProjection[originRowIdIdx].Typ, 2867 Expr: &plan.Expr_Col{ 2868 Col: &plan.ColRef{ 2869 RelPos: 0, 2870 ColPos: int32(originRowIdIdx), 2871 Name: catalog.Row_ID, 2872 }, 2873 }, 2874 }) 2875 } 2876 //TODO: once everything works, rename all the UK to a more generic name that means UK and SK. 2877 // ie preInsertUkNode -> preInsertIKNode 2878 // NOTE: we have build secondary index by reusing the whole code flow of Unique Index. 2879 // This would be done in a separate PR after verifying the correctness of the current code. 2880 var preInsertUkNode *Node 2881 if isUK { 2882 preInsertUkNode = &Node{ 2883 NodeType: plan.Node_PRE_INSERT_UK, 2884 Children: []int32{lastNodeId}, 2885 ProjectList: preinsertUkProjection, 2886 PreInsertUkCtx: &plan.PreInsertUkCtx{ 2887 Columns: useColumns, 2888 PkColumn: int32(pkColumn), 2889 PkType: originPkType, 2890 UkType: ukType, 2891 }, 2892 } 2893 } else { 2894 // NOTE: We don't defined PreInsertSkCtx. Instead, we use PreInsertUkCtx for both UK and SK since there 2895 // is no difference in the contents. 2896 preInsertUkNode = &Node{ 2897 NodeType: plan.Node_PRE_INSERT_SK, 2898 Children: []int32{lastNodeId}, 2899 ProjectList: preinsertUkProjection, 2900 PreInsertSkCtx: &plan.PreInsertUkCtx{ 2901 Columns: useColumns, 2902 PkColumn: int32(pkColumn), 2903 PkType: originPkType, 2904 UkType: ukType, 2905 }, 2906 } 2907 } 2908 lastNodeId = builder.appendNode(preInsertUkNode, bindCtx) 2909 2910 if lockNodeId, ok := appendLockNode( 2911 builder, 2912 bindCtx, 2913 lastNodeId, 2914 uniqueTableDef, 2915 false, 2916 false, 2917 -1, 2918 nil, 2919 isUpddate, 2920 ); ok { 2921 lastNodeId = lockNodeId 2922 } 2923 2924 lastNodeId = appendSinkNode(builder, bindCtx, lastNodeId) 2925 sourceStep := builder.appendStep(lastNodeId) 2926 2927 return sourceStep, nil 2928 } 2929 2930 func appendDeleteIndexTablePlan( 2931 builder *QueryBuilder, 2932 bindCtx *BindContext, 2933 uniqueObjRef *ObjectRef, 2934 uniqueTableDef *TableDef, 2935 indexdef *IndexDef, 2936 typMap map[string]plan.Type, 2937 posMap map[string]int, 2938 baseNodeId int32, 2939 isUK bool, 2940 ) (int32, error) { 2941 /******** 2942 NOTE: make sure to make the major change applied to secondary index, to IVFFLAT index as well. 2943 Else IVFFLAT index would fail 2944 ********/ 2945 lastNodeId := baseNodeId 2946 var err error 2947 projectList := getProjectionByLastNodeForRightJoin(builder, lastNodeId) 2948 rfTag := builder.genNewMsgTag() 2949 2950 var rightRowIdPos int32 = -1 2951 var rightPkPos int32 = -1 2952 scanNodeProject := make([]*Expr, len(uniqueTableDef.Cols)) 2953 for colIdx, col := range uniqueTableDef.Cols { 2954 if col.Name == catalog.Row_ID { 2955 rightRowIdPos = int32(colIdx) 2956 } else if col.Name == catalog.IndexTableIndexColName { 2957 rightPkPos = int32(colIdx) 2958 } 2959 scanNodeProject[colIdx] = &plan.Expr{ 2960 Typ: col.Typ, 2961 Expr: &plan.Expr_Col{ 2962 Col: &plan.ColRef{ 2963 ColPos: int32(colIdx), 2964 Name: col.Name, 2965 }, 2966 }, 2967 } 2968 } 2969 pkTyp := uniqueTableDef.Cols[rightPkPos].Typ 2970 2971 probeExpr := &plan.Expr{ 2972 Typ: pkTyp, 2973 Expr: &plan.Expr_Col{ 2974 Col: &plan.ColRef{ 2975 Name: uniqueTableDef.Pkey.PkeyColName, 2976 }, 2977 }, 2978 } 2979 2980 leftId := builder.appendNode(&plan.Node{ 2981 NodeType: plan.Node_TABLE_SCAN, 2982 Stats: &plan.Stats{}, 2983 ObjRef: uniqueObjRef, 2984 TableDef: uniqueTableDef, 2985 ProjectList: scanNodeProject, 2986 RuntimeFilterProbeList: []*plan.RuntimeFilterSpec{MakeRuntimeFilter(rfTag, false, 0, probeExpr)}, 2987 }, bindCtx) 2988 2989 // append projection 2990 projectList = append(projectList, &plan.Expr{ 2991 Typ: uniqueTableDef.Cols[rightRowIdPos].Typ, 2992 Expr: &plan.Expr_Col{ 2993 Col: &plan.ColRef{ 2994 RelPos: 0, 2995 ColPos: rightRowIdPos, 2996 Name: catalog.Row_ID, 2997 }, 2998 }, 2999 }, &plan.Expr{ 3000 Typ: uniqueTableDef.Cols[rightPkPos].Typ, 3001 Expr: &plan.Expr_Col{ 3002 Col: &plan.ColRef{ 3003 RelPos: 0, 3004 ColPos: rightPkPos, 3005 Name: catalog.IndexTableIndexColName, 3006 }, 3007 }, 3008 }) 3009 3010 rightExpr := &plan.Expr{ 3011 Typ: uniqueTableDef.Cols[rightPkPos].Typ, 3012 Expr: &plan.Expr_Col{ 3013 Col: &plan.ColRef{ 3014 RelPos: 0, 3015 ColPos: rightPkPos, 3016 Name: catalog.IndexTableIndexColName, 3017 }, 3018 }, 3019 } 3020 3021 // append join node 3022 var joinConds []*Expr 3023 var leftExpr *Expr 3024 partsLength := len(indexdef.Parts) 3025 if partsLength == 1 { 3026 orginIndexColumnName := indexdef.Parts[0] 3027 typ := typMap[orginIndexColumnName] 3028 leftExpr = &Expr{ 3029 Typ: typ, 3030 Expr: &plan.Expr_Col{ 3031 Col: &plan.ColRef{ 3032 RelPos: 1, 3033 ColPos: int32(posMap[orginIndexColumnName]), 3034 Name: orginIndexColumnName, 3035 }, 3036 }, 3037 } 3038 } else { 3039 args := make([]*Expr, partsLength) 3040 for i, column := range indexdef.Parts { 3041 column = catalog.ResolveAlias(column) 3042 typ := typMap[column] 3043 args[i] = &plan.Expr{ 3044 Typ: typ, 3045 Expr: &plan.Expr_Col{ 3046 Col: &plan.ColRef{ 3047 RelPos: 1, 3048 ColPos: int32(posMap[column]), 3049 Name: column, 3050 }, 3051 }, 3052 } 3053 } 3054 if isUK { 3055 // use for UK 3056 // 0: serial(part1, part2) <---- 3057 // 1: serial(pk1, pk2) 3058 leftExpr, err = BindFuncExprImplByPlanExpr(builder.GetContext(), "serial", args) 3059 } else { 3060 // only used for regular secondary index's 0'th column 3061 // 0: serial_full(part1, part2, serial(pk1, pk2)) <---- 3062 // 1: serial(pk1, pk2) 3063 leftExpr, err = BindFuncExprImplByPlanExpr(builder.GetContext(), "serial_full", args) 3064 } 3065 if err != nil { 3066 return -1, err 3067 } 3068 } 3069 3070 condExpr, err := BindFuncExprImplByPlanExpr(builder.GetContext(), "=", []*Expr{rightExpr, leftExpr}) 3071 if err != nil { 3072 return -1, err 3073 } 3074 joinConds = []*Expr{condExpr} 3075 3076 buildExpr := &plan.Expr{ 3077 Typ: pkTyp, 3078 Expr: &plan.Expr_Col{ 3079 Col: &plan.ColRef{ 3080 RelPos: 0, 3081 ColPos: 0, 3082 }, 3083 }, 3084 } 3085 3086 /* 3087 For the hidden table of the secondary index, there will be no null situation, so there is no need to use right join 3088 For why right join is needed, you can consider the following SQL : 3089 3090 create table t1(a int, b int, c int, unique key(a)); 3091 insert into t1 values(null, 1, 1); 3092 explain verbose update t1 set a = 1 where a is null; 3093 */ 3094 joinType := plan.Node_INNER 3095 if isUK { 3096 joinType = plan.Node_RIGHT 3097 } 3098 3099 lastNodeId = builder.appendNode(&plan.Node{ 3100 NodeType: plan.Node_JOIN, 3101 Children: []int32{leftId, lastNodeId}, 3102 JoinType: joinType, 3103 OnList: joinConds, 3104 ProjectList: projectList, 3105 RuntimeFilterBuildList: []*plan.RuntimeFilterSpec{MakeRuntimeFilter(rfTag, false, GetInFilterCardLimitOnPK(builder.qry.Nodes[leftId].Stats.TableCnt), buildExpr)}, 3106 }, bindCtx) 3107 return lastNodeId, nil 3108 } 3109 3110 func appendDeleteMasterTablePlan(builder *QueryBuilder, bindCtx *BindContext, 3111 masterObjRef *ObjectRef, masterTableDef *TableDef, 3112 baseNodeId int32, tableDef *TableDef, indexDef *plan.IndexDef, 3113 typMap map[string]plan.Type, posMap map[string]int) (int32, error) { 3114 3115 originPkColumnPos, originPkType := getPkPos(tableDef, false) 3116 3117 lastNodeId := baseNodeId 3118 projectList := getProjectionByLastNode(builder, lastNodeId) 3119 3120 var rightRowIdPos int32 = -1 3121 var rightPkPos int32 = -1 3122 scanNodeProject := make([]*Expr, len(masterTableDef.Cols)) 3123 for colIdx, colVal := range masterTableDef.Cols { 3124 3125 if colVal.Name == catalog.Row_ID { 3126 rightRowIdPos = int32(colIdx) 3127 } else if colVal.Name == catalog.MasterIndexTableIndexColName { 3128 rightPkPos = int32(colIdx) 3129 } 3130 3131 scanNodeProject[colIdx] = &plan.Expr{ 3132 Typ: colVal.Typ, 3133 Expr: &plan.Expr_Col{ 3134 Col: &plan.ColRef{ 3135 ColPos: int32(colIdx), 3136 Name: colVal.Name, 3137 }, 3138 }, 3139 } 3140 } 3141 3142 rightId := builder.appendNode(&plan.Node{ 3143 NodeType: plan.Node_TABLE_SCAN, 3144 Stats: &plan.Stats{}, 3145 ObjRef: masterObjRef, 3146 TableDef: masterTableDef, 3147 ProjectList: scanNodeProject, 3148 }, bindCtx) 3149 3150 // join conditions 3151 // Example :- 3152 // ( (serial_full('1', a, c) = __mo_index_idx_col) or (serial_full('1', b, c) = __mo_index_idx_col) ) 3153 var joinConds *Expr 3154 for idx, part := range indexDef.Parts { 3155 // serial_full("colPos", col1, pk) 3156 var leftExpr *Expr 3157 leftExprArgs := make([]*Expr, 3) 3158 leftExprArgs[0] = makePlan2StringConstExprWithType(getColSeqFromColDef(tableDef.Cols[posMap[part]])) 3159 leftExprArgs[1] = &Expr{ 3160 Typ: typMap[part], 3161 Expr: &plan.Expr_Col{ 3162 Col: &plan.ColRef{ 3163 RelPos: 0, 3164 ColPos: int32(posMap[part]), 3165 Name: part, 3166 }, 3167 }, 3168 } 3169 leftExprArgs[2] = &Expr{ 3170 Typ: originPkType, 3171 Expr: &plan.Expr_Col{ 3172 Col: &plan.ColRef{ 3173 RelPos: 0, 3174 ColPos: int32(originPkColumnPos), 3175 Name: tableDef.Pkey.PkeyColName, 3176 }, 3177 }, 3178 } 3179 leftExpr, err := BindFuncExprImplByPlanExpr(builder.GetContext(), "serial_full", leftExprArgs) 3180 if err != nil { 3181 return -1, err 3182 } 3183 3184 var rightExpr = &plan.Expr{ 3185 Typ: masterTableDef.Cols[rightPkPos].Typ, 3186 Expr: &plan.Expr_Col{ 3187 Col: &plan.ColRef{ 3188 RelPos: 1, 3189 ColPos: rightPkPos, 3190 Name: catalog.MasterIndexTableIndexColName, 3191 }, 3192 }, 3193 } 3194 currCond, err := BindFuncExprImplByPlanExpr(builder.GetContext(), "=", []*Expr{leftExpr, rightExpr}) 3195 if err != nil { 3196 return -1, err 3197 } 3198 if idx == 0 { 3199 joinConds = currCond 3200 } else { 3201 joinConds, err = BindFuncExprImplByPlanExpr(builder.GetContext(), "or", []*plan.Expr{joinConds, currCond}) 3202 if err != nil { 3203 return -1, err 3204 } 3205 } 3206 } 3207 3208 projectList = append(projectList, &plan.Expr{ 3209 Typ: masterTableDef.Cols[rightRowIdPos].Typ, 3210 Expr: &plan.Expr_Col{ 3211 Col: &plan.ColRef{ 3212 RelPos: 1, 3213 ColPos: rightRowIdPos, 3214 Name: catalog.Row_ID, 3215 }, 3216 }, 3217 }, &plan.Expr{ 3218 Typ: masterTableDef.Cols[rightPkPos].Typ, 3219 Expr: &plan.Expr_Col{ 3220 Col: &plan.ColRef{ 3221 RelPos: 1, 3222 ColPos: rightPkPos, 3223 Name: catalog.MasterIndexTableIndexColName, 3224 }, 3225 }, 3226 }) 3227 lastNodeId = builder.appendNode(&plan.Node{ 3228 NodeType: plan.Node_JOIN, 3229 JoinType: plan.Node_LEFT, 3230 Children: []int32{lastNodeId, rightId}, 3231 OnList: []*Expr{joinConds}, 3232 ProjectList: projectList, 3233 }, bindCtx) 3234 3235 return lastNodeId, nil 3236 } 3237 func appendDeleteIvfTablePlan(builder *QueryBuilder, bindCtx *BindContext, 3238 entriesObjRef *ObjectRef, entriesTableDef *TableDef, 3239 baseNodeId int32, tableDef *TableDef) (int32, error) { 3240 3241 originPkColumnPos, originPkType := getPkPos(tableDef, false) 3242 3243 lastNodeId := baseNodeId 3244 var err error 3245 projectList := getProjectionByLastNode(builder, lastNodeId) 3246 3247 var entriesRowIdPos int32 = -1 3248 var entriesFkPkColPos int32 = -1 3249 var entriesCpPkColPos int32 = -1 3250 var cpPkType = types.T_varchar.ToType() 3251 scanNodeProject := make([]*Expr, len(entriesTableDef.Cols)) 3252 for colIdx, col := range entriesTableDef.Cols { 3253 if col.Name == catalog.Row_ID { 3254 entriesRowIdPos = int32(colIdx) 3255 } else if col.Name == catalog.SystemSI_IVFFLAT_TblCol_Entries_pk { 3256 entriesFkPkColPos = int32(colIdx) 3257 } else if col.Name == catalog.CPrimaryKeyColName { 3258 entriesCpPkColPos = int32(colIdx) 3259 } 3260 scanNodeProject[colIdx] = &plan.Expr{ 3261 Typ: col.Typ, 3262 Expr: &plan.Expr_Col{ 3263 Col: &plan.ColRef{ 3264 ColPos: int32(colIdx), 3265 Name: col.Name, 3266 }, 3267 }, 3268 } 3269 } 3270 rightId := builder.appendNode(&plan.Node{ 3271 NodeType: plan.Node_TABLE_SCAN, 3272 Stats: &plan.Stats{}, 3273 ObjRef: entriesObjRef, 3274 TableDef: entriesTableDef, 3275 ProjectList: scanNodeProject, 3276 }, bindCtx) 3277 3278 // append projection 3279 projectList = append(projectList, 3280 &plan.Expr{ 3281 Typ: entriesTableDef.Cols[entriesRowIdPos].Typ, 3282 Expr: &plan.Expr_Col{ 3283 Col: &plan.ColRef{ 3284 RelPos: 1, 3285 ColPos: entriesRowIdPos, 3286 Name: catalog.Row_ID, 3287 }, 3288 }, 3289 }, 3290 &plan.Expr{ 3291 Typ: makePlan2Type(&cpPkType), 3292 Expr: &plan.Expr_Col{ 3293 Col: &plan.ColRef{ 3294 RelPos: 1, 3295 ColPos: entriesCpPkColPos, 3296 Name: catalog.CPrimaryKeyColName, 3297 }, 3298 }, 3299 }, 3300 ) 3301 3302 rightExpr := &plan.Expr{ 3303 Typ: entriesTableDef.Cols[entriesFkPkColPos].Typ, 3304 Expr: &plan.Expr_Col{ 3305 Col: &plan.ColRef{ 3306 RelPos: 1, 3307 ColPos: entriesFkPkColPos, 3308 Name: catalog.SystemSI_IVFFLAT_TblCol_Entries_pk, 3309 }, 3310 }, 3311 } 3312 3313 // append join node 3314 var joinConds []*Expr 3315 var leftExpr = &plan.Expr{ 3316 Typ: originPkType, 3317 Expr: &plan.Expr_Col{ 3318 Col: &plan.ColRef{ 3319 RelPos: 0, 3320 ColPos: int32(originPkColumnPos), 3321 Name: tableDef.Cols[originPkColumnPos].Name, 3322 }, 3323 }, 3324 } 3325 3326 /* 3327 Some notes: 3328 1. Primary key of entries table is a <version,origin_pk> pair. 3329 2. In the Join condition we are only using origin_pk and not serial(version,origin_pk). For this reason, 3330 we will be deleting older version entries as well, so keep in mind that older version entries are stale. 3331 3. The same goes with inserts as well. We only update the current version. Due to this reason, updates will cause 3332 older versions of the entries to be stale. 3333 */ 3334 3335 condExpr, err := BindFuncExprImplByPlanExpr(builder.GetContext(), "=", []*Expr{leftExpr, rightExpr}) 3336 if err != nil { 3337 return -1, err 3338 } 3339 joinConds = []*Expr{condExpr} 3340 3341 lastNodeId = builder.appendNode(&plan.Node{ 3342 NodeType: plan.Node_JOIN, 3343 JoinType: plan.Node_LEFT, 3344 Children: []int32{lastNodeId, rightId}, 3345 OnList: joinConds, 3346 ProjectList: projectList, 3347 }, bindCtx) 3348 return lastNodeId, nil 3349 } 3350 3351 func appendDeleteIndexTablePlanWithoutFilters( 3352 builder *QueryBuilder, 3353 bindCtx *BindContext, 3354 uniqueObjRef *ObjectRef, 3355 uniqueTableDef *TableDef, 3356 ) (int32, error) { 3357 scanNodeProject := make([]*Expr, len(uniqueTableDef.Cols)) 3358 for colIdx, col := range uniqueTableDef.Cols { 3359 scanNodeProject[colIdx] = &plan.Expr{ 3360 Typ: col.Typ, 3361 Expr: &plan.Expr_Col{ 3362 Col: &plan.ColRef{ 3363 ColPos: int32(colIdx), 3364 Name: col.Name, 3365 }, 3366 }, 3367 } 3368 } 3369 lastNodeId := builder.appendNode(&plan.Node{ 3370 NodeType: plan.Node_TABLE_SCAN, 3371 Stats: &plan.Stats{}, 3372 ObjRef: uniqueObjRef, 3373 TableDef: uniqueTableDef, 3374 ProjectList: scanNodeProject, 3375 }, bindCtx) 3376 return lastNodeId, nil 3377 } 3378 3379 // makePreUpdateDeletePlan 3380 // sink_scan -> project -> [agg] -> [filter] -> sink 3381 func makePreUpdateDeletePlan( 3382 ctx CompilerContext, 3383 builder *QueryBuilder, 3384 bindCtx *BindContext, 3385 delCtx *dmlPlanCtx, 3386 lastNodeId int32, 3387 ) (int32, error) { 3388 // lastNodeId := appendSinkScanNode(builder, bindCtx, delCtx.sourceStep) 3389 lastNode := builder.qry.Nodes[lastNodeId] 3390 3391 // append project Node to fetch the columns of this table 3392 // in front of this projectList are update cols 3393 projectProjection := make([]*Expr, len(delCtx.tableDef.Cols)+delCtx.updateColLength) 3394 for i, col := range delCtx.tableDef.Cols { 3395 projectProjection[i] = &plan.Expr{ 3396 Typ: col.Typ, 3397 Expr: &plan.Expr_Col{ 3398 Col: &plan.ColRef{ 3399 RelPos: 0, 3400 ColPos: int32(delCtx.beginIdx + i), 3401 Name: col.Name, 3402 }, 3403 }, 3404 } 3405 } 3406 offset := len(delCtx.tableDef.Cols) 3407 for i := 0; i < delCtx.updateColLength; i++ { 3408 idx := delCtx.beginIdx + offset + i 3409 name := "" 3410 if col, ok := lastNode.ProjectList[idx].Expr.(*plan.Expr_Col); ok { 3411 name = col.Col.Name 3412 } 3413 projectProjection[offset+i] = &plan.Expr{ 3414 Typ: lastNode.ProjectList[idx].Typ, 3415 Expr: &plan.Expr_Col{ 3416 Col: &plan.ColRef{ 3417 RelPos: 0, 3418 ColPos: int32(idx), 3419 Name: name, 3420 }, 3421 }, 3422 } 3423 } 3424 projectNode := &Node{ 3425 NodeType: plan.Node_PROJECT, 3426 Children: []int32{lastNodeId}, 3427 ProjectList: projectProjection, 3428 } 3429 lastNodeId = builder.appendNode(projectNode, bindCtx) 3430 3431 //when update multi table. we append agg node: 3432 //eg: update t1, t2 set t1.a= t1.a+1 where t2.b >10 3433 //eg: update t2, (select a from t2) as tt set t2.a= t2.a+1 where t2.b >10 3434 if delCtx.needAggFilter { 3435 lastNode := builder.qry.Nodes[lastNodeId] 3436 groupByExprs := make([]*Expr, len(delCtx.tableDef.Cols)) 3437 aggNodeProjection := make([]*Expr, len(lastNode.ProjectList)) 3438 for i := 0; i < len(delCtx.tableDef.Cols); i++ { 3439 e := lastNode.ProjectList[i] 3440 name := "" 3441 if col, ok := e.Expr.(*plan.Expr_Col); ok { 3442 name = col.Col.Name 3443 } 3444 groupByExprs[i] = &plan.Expr{ 3445 Typ: e.Typ, 3446 Expr: &plan.Expr_Col{ 3447 Col: &plan.ColRef{ 3448 RelPos: 0, 3449 ColPos: int32(i), 3450 Name: name, 3451 }, 3452 }, 3453 } 3454 aggNodeProjection[i] = &plan.Expr{ 3455 Typ: e.Typ, 3456 Expr: &plan.Expr_Col{ 3457 Col: &plan.ColRef{ 3458 RelPos: -1, 3459 ColPos: int32(i), 3460 Name: name, 3461 }, 3462 }, 3463 } 3464 } 3465 offset := len(delCtx.tableDef.Cols) 3466 aggList := make([]*Expr, delCtx.updateColLength) 3467 for i := 0; i < delCtx.updateColLength; i++ { 3468 pos := offset + i 3469 e := lastNode.ProjectList[pos] 3470 name := "" 3471 if col, ok := e.Expr.(*plan.Expr_Col); ok { 3472 name = col.Col.Name 3473 } 3474 baseExpr := &plan.Expr{ 3475 Typ: e.Typ, 3476 Expr: &plan.Expr_Col{ 3477 Col: &plan.ColRef{ 3478 RelPos: 0, 3479 ColPos: int32(pos), 3480 Name: name, 3481 }, 3482 }, 3483 } 3484 aggExpr, err := BindFuncExprImplByPlanExpr(builder.GetContext(), "any_value", []*Expr{baseExpr}) 3485 if err != nil { 3486 return -1, err 3487 } 3488 aggList[i] = aggExpr 3489 aggNodeProjection[pos] = &plan.Expr{ 3490 Typ: e.Typ, 3491 Expr: &plan.Expr_Col{ 3492 Col: &plan.ColRef{ 3493 RelPos: -2, 3494 ColPos: int32(pos), 3495 Name: name, 3496 }, 3497 }, 3498 } 3499 } 3500 3501 aggNode := &Node{ 3502 NodeType: plan.Node_AGG, 3503 Children: []int32{lastNodeId}, 3504 GroupBy: groupByExprs, 3505 AggList: aggList, 3506 ProjectList: aggNodeProjection, 3507 } 3508 lastNodeId = builder.appendNode(aggNode, bindCtx) 3509 3510 // we need filter null in left join/right join 3511 // eg: UPDATE stu s LEFT JOIN class c ON s.class_id = c.id SET s.class_name = 'test22', c.stu_name = 'test22'; 3512 // we can not let null rows in batch go to insert Node 3513 rowIdExpr := &plan.Expr{ 3514 Typ: aggNodeProjection[delCtx.rowIdPos].Typ, 3515 Expr: &plan.Expr_Col{ 3516 Col: &plan.ColRef{ 3517 RelPos: 0, 3518 ColPos: int32(delCtx.rowIdPos), 3519 Name: catalog.Row_ID, 3520 }, 3521 }, 3522 } 3523 nullCheckExpr, err := BindFuncExprImplByPlanExpr(builder.GetContext(), "isnotnull", []*Expr{rowIdExpr}) 3524 if err != nil { 3525 return -1, err 3526 } 3527 filterProjection := getProjectionByLastNode(builder, lastNodeId) 3528 filterNode := &Node{ 3529 NodeType: plan.Node_FILTER, 3530 Children: []int32{lastNodeId}, 3531 FilterList: []*Expr{nullCheckExpr}, 3532 ProjectList: filterProjection, 3533 } 3534 lastNodeId = builder.appendNode(filterNode, bindCtx) 3535 } 3536 3537 // lastNodeId = appendSinkNode(builder, bindCtx, lastNodeId) 3538 // nextSourceStep := builder.appendStep(lastNodeId) 3539 3540 // lock old pk for delete statement 3541 partExprIdx := -1 3542 lastProjectList := getProjectionByLastNode(builder, lastNodeId) 3543 originProjectListLen := len(lastProjectList) 3544 if delCtx.tableDef.Partition != nil { 3545 partExprIdx = len(delCtx.tableDef.Cols) + delCtx.updateColLength 3546 lastNodeId = appendPreDeleteNode(builder, bindCtx, delCtx.objRef, delCtx.tableDef, lastNodeId) 3547 lastProjectList = getProjectionByLastNode(builder, lastNodeId) 3548 } 3549 pkPos, pkTyp := getPkPos(delCtx.tableDef, false) 3550 delNodeInfo := makeDeleteNodeInfo(ctx, delCtx.objRef, delCtx.tableDef, delCtx.rowIdPos, partExprIdx, true, pkPos, pkTyp, delCtx.lockTable, delCtx.partitionInfos) 3551 3552 lockTarget := &plan.LockTarget{ 3553 TableId: delCtx.tableDef.TblId, 3554 PrimaryColIdxInBat: int32(pkPos), 3555 PrimaryColTyp: pkTyp, 3556 RefreshTsIdxInBat: -1, 3557 LockTable: false, 3558 } 3559 if delCtx.tableDef.Partition != nil { 3560 lockTarget.IsPartitionTable = true 3561 lockTarget.FilterColIdxInBat = int32(delNodeInfo.partitionIdx) 3562 lockTarget.PartitionTableIds = delNodeInfo.partTableIDs 3563 } 3564 lockNode := &Node{ 3565 NodeType: plan.Node_LOCK_OP, 3566 Children: []int32{lastNodeId}, 3567 LockTargets: []*plan.LockTarget{lockTarget}, 3568 } 3569 lastNodeId = builder.appendNode(lockNode, bindCtx) 3570 3571 //lock new pk for update statement (if update pk) 3572 if delCtx.updateColLength > 0 && delCtx.updatePkCol && delCtx.tableDef.Pkey != nil { 3573 newPkPos := int32(0) 3574 partitionColIdx := int32(len(lastProjectList)) 3575 3576 // for compound primary key, we need append hidden pk column to the project list 3577 if delCtx.tableDef.Pkey.PkeyColName == catalog.CPrimaryKeyColName { 3578 pkColExpr := make([]*Expr, len(delCtx.tableDef.Pkey.Names)) 3579 for i, colName := range delCtx.tableDef.Pkey.Names { 3580 colIdx := 0 3581 var colTyp *Type 3582 if idx, exists := delCtx.updateColPosMap[colName]; exists { 3583 colIdx = idx 3584 colTyp = &lastProjectList[idx].Typ 3585 } else { 3586 for idx, col := range delCtx.tableDef.Cols { 3587 if col.Name == colName { 3588 colIdx = idx 3589 colTyp = &col.Typ 3590 break 3591 } 3592 } 3593 } 3594 pkColExpr[i] = &Expr{ 3595 Typ: *colTyp, 3596 Expr: &plan.Expr_Col{Col: &plan.ColRef{ColPos: int32(colIdx)}}, 3597 } 3598 } 3599 cpPkExpr, err := BindFuncExprImplByPlanExpr(builder.GetContext(), "serial", pkColExpr) 3600 if err != nil { 3601 return -1, err 3602 } 3603 lastProjectList = append(lastProjectList, cpPkExpr) 3604 // if table have partition, we need append partition expr to projectList 3605 if delCtx.tableDef.Partition != nil { 3606 partitionExpr := DeepCopyExpr(delCtx.tableDef.Partition.PartitionExpression) 3607 resetPartitionExprPos(partitionExpr, delCtx.tableDef, delCtx.updateColPosMap) 3608 lastProjectList = append(lastProjectList, partitionExpr) 3609 } 3610 projNode := &Node{ 3611 NodeType: plan.Node_PROJECT, 3612 Children: []int32{lastNodeId}, 3613 ProjectList: lastProjectList, 3614 } 3615 lastNodeId = builder.appendNode(projNode, bindCtx) 3616 3617 newPkPos = partitionColIdx 3618 partitionColIdx += 1 3619 } else { 3620 // one pk col, just use update pos 3621 for k, v := range delCtx.updateColPosMap { 3622 if k == delCtx.tableDef.Pkey.PkeyColName { 3623 newPkPos = int32(v) 3624 break 3625 } 3626 } 3627 // if table have partition, we need append project node to get partition 3628 if delCtx.tableDef.Partition != nil { 3629 partitionExpr := DeepCopyExpr(delCtx.tableDef.Partition.PartitionExpression) 3630 resetPartitionExprPos(partitionExpr, delCtx.tableDef, delCtx.updateColPosMap) 3631 lastProjectList := append(lastProjectList, partitionExpr) 3632 projNode := &Node{ 3633 NodeType: plan.Node_PROJECT, 3634 Children: []int32{lastNodeId}, 3635 ProjectList: lastProjectList, 3636 } 3637 lastNodeId = builder.appendNode(projNode, bindCtx) 3638 } 3639 } 3640 3641 lockTarget := &plan.LockTarget{ 3642 TableId: delCtx.tableDef.TblId, 3643 PrimaryColIdxInBat: newPkPos, 3644 PrimaryColTyp: pkTyp, 3645 RefreshTsIdxInBat: -1, //unsupport now 3646 LockTable: false, 3647 } 3648 if delCtx.tableDef.Partition != nil { 3649 lockTarget.IsPartitionTable = true 3650 lockTarget.FilterColIdxInBat = partitionColIdx 3651 lockTarget.PartitionTableIds = delNodeInfo.partTableIDs 3652 } 3653 lockNode := &Node{ 3654 NodeType: plan.Node_LOCK_OP, 3655 Children: []int32{lastNodeId}, 3656 LockTargets: []*plan.LockTarget{lockTarget}, 3657 } 3658 lastNodeId = builder.appendNode(lockNode, bindCtx) 3659 } 3660 3661 if len(lastProjectList) > originProjectListLen { 3662 projectList := lastProjectList[0:originProjectListLen] 3663 projNode := &Node{ 3664 NodeType: plan.Node_PROJECT, 3665 Children: []int32{lastNodeId}, 3666 ProjectList: projectList, 3667 } 3668 lastNodeId = builder.appendNode(projNode, bindCtx) 3669 } 3670 3671 return lastNodeId, nil 3672 } 3673 3674 func resetPartitionExprPos(expr *Expr, tableDef *TableDef, updateColPos map[string]int) { 3675 colPos := make(map[int32]int32) 3676 for idx, col := range tableDef.Cols { 3677 if newIdx, exists := updateColPos[col.Name]; exists { 3678 colPos[int32(idx)] = int32(newIdx) 3679 } else { 3680 colPos[int32(idx)] = int32(idx) 3681 } 3682 } 3683 resetColPos(expr, colPos) 3684 } 3685 3686 // func getColPos(expr *Expr, colPos map[int32]int32) { 3687 // switch e := expr.Expr.(type) { 3688 // case *plan.Expr_Col: 3689 // colPos[e.Col.ColPos] = 0 3690 // case *plan.Expr_F: 3691 // for _, arg := range e.F.Args { 3692 // getColPos(arg, colPos) 3693 // } 3694 // } 3695 // } 3696 3697 func resetColPos(expr *Expr, colPos map[int32]int32) { 3698 switch e := expr.Expr.(type) { 3699 case *plan.Expr_Col: 3700 e.Col.ColPos = colPos[e.Col.ColPos] 3701 case *plan.Expr_F: 3702 for _, arg := range e.F.Args { 3703 resetColPos(arg, colPos) 3704 } 3705 } 3706 } 3707 3708 func appendLockNode( 3709 builder *QueryBuilder, 3710 bindCtx *BindContext, 3711 lastNodeId int32, 3712 tableDef *TableDef, 3713 lockTable bool, 3714 block bool, 3715 partitionIdx int, 3716 partTableIDs []uint64, 3717 isUpdate bool, 3718 ) (int32, bool) { 3719 if !isUpdate && tableDef.Pkey.PkeyColName == catalog.FakePrimaryKeyColName { 3720 return -1, false 3721 } 3722 pkPos, pkTyp := getPkPos(tableDef, false) 3723 if pkPos == -1 { 3724 return -1, false 3725 } 3726 3727 if builder.qry.LoadTag && !lockTable { 3728 return -1, false 3729 } 3730 3731 lockTarget := &plan.LockTarget{ 3732 TableId: tableDef.TblId, 3733 PrimaryColIdxInBat: int32(pkPos), 3734 PrimaryColTyp: pkTyp, 3735 RefreshTsIdxInBat: -1, //unsupport now 3736 LockTable: lockTable, 3737 Block: block, 3738 } 3739 3740 if !lockTable && tableDef.Partition != nil { 3741 lockTarget.IsPartitionTable = true 3742 lockTarget.FilterColIdxInBat = int32(partitionIdx) 3743 lockTarget.PartitionTableIds = partTableIDs 3744 } 3745 3746 lockNode := &Node{ 3747 NodeType: plan.Node_LOCK_OP, 3748 Children: []int32{lastNodeId}, 3749 LockTargets: []*plan.LockTarget{lockTarget}, 3750 } 3751 lastNodeId = builder.appendNode(lockNode, bindCtx) 3752 return lastNodeId, true 3753 } 3754 3755 type sinkMeta struct { 3756 step int 3757 scans []*sinkScanMeta 3758 } 3759 3760 type sinkScanMeta struct { 3761 step int 3762 nodeId int32 3763 sinkNodeId int32 3764 preNodeId int32 3765 preNodeIsUnion bool //if preNode is Union, one sinkScan to one sink is fine 3766 recursive bool 3767 } 3768 3769 func reduceSinkSinkScanNodes(qry *Query) { 3770 if len(qry.Steps) == 1 { 3771 return 3772 } 3773 stepMaps := make(map[int]int32) 3774 sinks := make(map[int32]*sinkMeta) 3775 for i, nodeId := range qry.Steps { 3776 stepMaps[i] = nodeId 3777 collectSinkAndSinkScanMeta(qry, sinks, i, nodeId, -1) 3778 } 3779 3780 // merge one sink to one sinkScan 3781 pointToNodeMap := make(map[int32][]int32) 3782 for sinkNodeId, meta := range sinks { 3783 if len(meta.scans) == 1 && !meta.scans[0].preNodeIsUnion && !meta.scans[0].recursive { 3784 // one sink to one sinkScan 3785 sinkNode := qry.Nodes[sinkNodeId] 3786 sinkScanPreNode := qry.Nodes[meta.scans[0].preNodeId] 3787 sinkScanPreNode.Children = sinkNode.Children 3788 delete(stepMaps, meta.step) 3789 } else { 3790 for _, scanMeta := range meta.scans { 3791 if _, ok := pointToNodeMap[sinkNodeId]; !ok { 3792 pointToNodeMap[sinkNodeId] = []int32{scanMeta.nodeId} 3793 } else { 3794 pointToNodeMap[sinkNodeId] = append(pointToNodeMap[sinkNodeId], scanMeta.nodeId) 3795 } 3796 } 3797 } 3798 } 3799 3800 newStepLength := len(stepMaps) 3801 if len(qry.Steps) > newStepLength { 3802 // reset steps & some sinkScan's sourceStep 3803 newSteps := make([]int32, 0, newStepLength) 3804 keys := make([]int, 0, newStepLength) 3805 for key := range stepMaps { 3806 keys = append(keys, key) 3807 } 3808 slices.Sort(keys) 3809 for _, key := range keys { 3810 nodeId := stepMaps[key] 3811 newStepIdx := len(newSteps) 3812 newSteps = append(newSteps, nodeId) 3813 if sinkScanNodeIds, ok := pointToNodeMap[nodeId]; ok { 3814 for _, sinkScanNodeId := range sinkScanNodeIds { 3815 if len(qry.Nodes[sinkScanNodeId].SourceStep) > 1 { 3816 qry.Nodes[sinkScanNodeId].SourceStep[0] = int32(newStepIdx) 3817 } else { 3818 qry.Nodes[sinkScanNodeId].SourceStep = []int32{int32(newStepIdx)} 3819 } 3820 } 3821 } 3822 } 3823 qry.Steps = newSteps 3824 } 3825 } 3826 3827 func collectSinkAndSinkScanMeta( 3828 qry *Query, 3829 sinks map[int32]*sinkMeta, 3830 oldStep int, 3831 nodeId int32, 3832 preNodeId int32) { 3833 node := qry.Nodes[nodeId] 3834 3835 if node.NodeType == plan.Node_SINK { 3836 if _, ok := sinks[nodeId]; !ok { 3837 sinks[nodeId] = &sinkMeta{ 3838 step: oldStep, 3839 scans: make([]*sinkScanMeta, 0, len(qry.Steps)), 3840 } 3841 } else { 3842 sinks[nodeId].step = oldStep 3843 } 3844 } else if node.NodeType == plan.Node_SINK_SCAN || node.NodeType == plan.Node_RECURSIVE_CTE || node.NodeType == plan.Node_RECURSIVE_SCAN { 3845 sinkNodeId := qry.Steps[node.SourceStep[0]] 3846 if _, ok := sinks[sinkNodeId]; !ok { 3847 sinks[sinkNodeId] = &sinkMeta{ 3848 step: -1, 3849 scans: make([]*sinkScanMeta, 0, len(qry.Steps)), 3850 } 3851 } 3852 3853 meta := &sinkScanMeta{ 3854 step: oldStep, 3855 nodeId: nodeId, 3856 sinkNodeId: sinkNodeId, 3857 preNodeId: preNodeId, 3858 preNodeIsUnion: qry.Nodes[preNodeId].NodeType == plan.Node_UNION, 3859 recursive: len(node.SourceStep) > 1 || node.NodeType == plan.Node_RECURSIVE_CTE, 3860 } 3861 sinks[sinkNodeId].scans = append(sinks[sinkNodeId].scans, meta) 3862 } 3863 3864 for _, childId := range node.Children { 3865 collectSinkAndSinkScanMeta(qry, sinks, oldStep, childId, nodeId) 3866 } 3867 3868 } 3869 3870 // constraintNameAreWhiteSpaces does not include empty name 3871 func constraintNameAreWhiteSpaces(constraint string) bool { 3872 return len(constraint) != 0 && len(strings.TrimSpace(constraint)) == 0 3873 } 3874 3875 // GenConstraintName yields uuid for the constraint name 3876 func GenConstraintName() string { 3877 constraintId, _ := uuid.NewV7() 3878 return constraintId.String() 3879 } 3880 3881 // adjustConstraintName updates a suitable name for the constraint. 3882 // throw error if the user input all white space name. 3883 // regenerate a new name if the user input nothing. 3884 func adjustConstraintName(ctx context.Context, def *tree.ForeignKey) error { 3885 //user add a constraint name 3886 if constraintNameAreWhiteSpaces(def.ConstraintSymbol) { 3887 return moerr.NewErrWrongNameForIndex(ctx, def.ConstraintSymbol) 3888 } else { 3889 if len(def.ConstraintSymbol) == 0 { 3890 def.ConstraintSymbol = GenConstraintName() 3891 } 3892 } 3893 return nil 3894 } 3895 3896 func runSql(ctx CompilerContext, sql string) (executor.Result, error) { 3897 v, ok := moruntime.ProcessLevelRuntime().GetGlobalVariables(moruntime.InternalSQLExecutor) 3898 if !ok { 3899 panic("missing lock service") 3900 } 3901 proc := ctx.GetProcess() 3902 exec := v.(executor.SQLExecutor) 3903 opts := executor.Options{}. 3904 // All runSql and runSqlWithResult is a part of input sql, can not incr statement. 3905 // All these sub-sql's need to be rolled back and retried en masse when they conflict in pessimistic mode 3906 WithDisableIncrStatement(). 3907 WithTxn(proc.TxnOperator). 3908 WithDatabase(proc.SessionInfo.Database). 3909 WithTimeZone(proc.SessionInfo.TimeZone). 3910 WithAccountID(proc.SessionInfo.AccountId) 3911 return exec.Exec(proc.Ctx, sql, opts) 3912 } 3913 3914 /* 3915 Example on FkReferKey and FkReferDef: 3916 3917 In database `test`: 3918 3919 create table t1(a int,primary key(a)); 3920 3921 create table t2(b int, constraint c1 foreign key(b) references t1(a)); 3922 3923 So, the structure FkReferDef below denotes such relationships : test.t2(b) -> test.t1(a) 3924 FkReferKey holds : db = test, tbl = t2 3925 3926 */ 3927 3928 // FkReferKey holds the database and table name of the foreign key 3929 type FkReferKey struct { 3930 Db string //fk database name 3931 Tbl string //fk table name 3932 } 3933 3934 // FkReferDef holds the definition & details of the foreign key 3935 type FkReferDef struct { 3936 Db string //fk database name 3937 Tbl string //fk table name 3938 Name string //fk constraint name 3939 Col string //fk column name 3940 ReferCol string //referenced column name 3941 OnDelete string //on delete action 3942 OnUpdate string //on update action 3943 } 3944 3945 func (fk FkReferDef) String() string { 3946 return fmt.Sprintf("%s.%s %s %s => %s", 3947 fk.Db, fk.Tbl, fk.Name, fk.Col, fk.ReferCol) 3948 } 3949 3950 // GetSqlForFkReferredTo returns the query that retrieves the fk relationships 3951 // that refer to the table 3952 func GetSqlForFkReferredTo(db, table string) string { 3953 return fmt.Sprintf( 3954 "select "+ 3955 "db_name, "+ 3956 "table_name, "+ 3957 "constraint_name, "+ 3958 "column_name, "+ 3959 "refer_column_name, "+ 3960 "on_delete, "+ 3961 "on_update "+ 3962 "from "+ 3963 "`mo_catalog`.`mo_foreign_keys` "+ 3964 "where "+ 3965 "refer_db_name = '%s' and refer_table_name = '%s' "+ 3966 " and "+ 3967 "(db_name != '%s' or db_name = '%s' and table_name != '%s') "+ 3968 "order by db_name, table_name, constraint_name;", 3969 db, table, db, db, table) 3970 } 3971 3972 // GetFkReferredTo returns the foreign key relationships that refer to the table 3973 func GetFkReferredTo(ctx CompilerContext, db, table string) (map[FkReferKey]map[string][]*FkReferDef, error) { 3974 //exclude fk self reference 3975 sql := GetSqlForFkReferredTo(db, table) 3976 res, err := runSql(ctx, sql) 3977 if err != nil { 3978 return nil, err 3979 } 3980 defer res.Close() 3981 ret := make(map[FkReferKey]map[string][]*FkReferDef) 3982 const dbIdx = 0 3983 const tblIdx = 1 3984 const nameIdx = 2 3985 const colIdx = 3 3986 const referColIdx = 4 3987 const deleteIdx = 5 3988 const updateIdx = 6 3989 if res.Batches != nil { 3990 for _, batch := range res.Batches { 3991 if batch != nil && 3992 batch.Vecs[0] != nil && 3993 batch.Vecs[0].Length() > 0 { 3994 for i := 0; i < batch.Vecs[0].Length(); i++ { 3995 fk := &FkReferDef{ 3996 Db: string(batch.Vecs[dbIdx].GetBytesAt(i)), 3997 Tbl: string(batch.Vecs[tblIdx].GetBytesAt(i)), 3998 Name: string(batch.Vecs[nameIdx].GetBytesAt(i)), 3999 Col: string(batch.Vecs[colIdx].GetBytesAt(i)), 4000 ReferCol: string(batch.Vecs[referColIdx].GetBytesAt(i)), 4001 OnDelete: string(batch.Vecs[deleteIdx].GetBytesAt(i)), 4002 OnUpdate: string(batch.Vecs[updateIdx].GetBytesAt(i)), 4003 } 4004 key := FkReferKey{Db: fk.Db, Tbl: fk.Tbl} 4005 var constraint map[string][]*FkReferDef 4006 var ok bool 4007 if constraint, ok = ret[key]; !ok { 4008 constraint = make(map[string][]*FkReferDef) 4009 ret[key] = constraint 4010 } 4011 constraint[fk.Name] = append(constraint[fk.Name], fk) 4012 } 4013 } 4014 } 4015 } 4016 return ret, nil 4017 } 4018 4019 func convertIntoReferAction(s string) plan.ForeignKeyDef_RefAction { 4020 switch strings.ToLower(s) { 4021 case "cascade": 4022 return plan.ForeignKeyDef_CASCADE 4023 case "restrict": 4024 return plan.ForeignKeyDef_RESTRICT 4025 case "set null": 4026 fallthrough 4027 case "set_null": 4028 return plan.ForeignKeyDef_SET_NULL 4029 case "no_action": 4030 fallthrough 4031 case "no action": 4032 return plan.ForeignKeyDef_NO_ACTION 4033 case "set_default": 4034 fallthrough 4035 case "set default": 4036 return plan.ForeignKeyDef_SET_DEFAULT 4037 default: 4038 return plan.ForeignKeyDef_RESTRICT 4039 } 4040 } 4041 4042 // getSqlForAddFk returns the insert sql that adds a fk relationship 4043 // into the mo_foreign_keys table 4044 func getSqlForAddFk(db, table string, data *FkData) string { 4045 row := make([]string, 16) 4046 rows := 0 4047 sb := strings.Builder{} 4048 sb.WriteString("insert into `mo_catalog`.`mo_foreign_keys` ") 4049 sb.WriteString(" values ") 4050 for childIdx, childCol := range data.Cols.Cols { 4051 row[0] = data.Def.Name 4052 row[1] = "0" 4053 row[2] = db 4054 row[3] = "0" 4055 row[4] = table 4056 row[5] = "0" 4057 row[6] = childCol 4058 row[7] = "0" 4059 row[8] = data.ParentDbName 4060 row[9] = "0" 4061 row[10] = data.ParentTableName 4062 row[11] = "0" 4063 row[12] = data.ColsReferred.Cols[childIdx] 4064 row[13] = "0" 4065 row[14] = data.Def.OnDelete.String() 4066 row[15] = data.Def.OnUpdate.String() 4067 { 4068 if rows > 0 { 4069 sb.WriteByte(',') 4070 } 4071 rows++ 4072 sb.WriteByte('(') 4073 for j, col := range row { 4074 if j > 0 { 4075 sb.WriteByte(',') 4076 } 4077 sb.WriteByte('\'') 4078 sb.WriteString(col) 4079 sb.WriteByte('\'') 4080 } 4081 sb.WriteByte(')') 4082 } 4083 } 4084 return sb.String() 4085 } 4086 4087 // getSqlForDeleteTable returns the delete sql that deletes all the fk relationships from mo_foreign_keys 4088 // on the table 4089 func getSqlForDeleteTable(db, tbl string) string { 4090 sb := strings.Builder{} 4091 sb.WriteString("delete from `mo_catalog`.`mo_foreign_keys` where ") 4092 sb.WriteString(fmt.Sprintf( 4093 "db_name = '%s' and table_name = '%s'", db, tbl)) 4094 return sb.String() 4095 } 4096 4097 // getSqlForDeleteConstraint returns the delete sql that deletes the fk constraint from mo_foreign_keys 4098 // on the table 4099 func getSqlForDeleteConstraint(db, tbl, constraint string) string { 4100 sb := strings.Builder{} 4101 sb.WriteString("delete from `mo_catalog`.`mo_foreign_keys` where ") 4102 sb.WriteString(fmt.Sprintf( 4103 "constraint_name = '%s' and db_name = '%s' and table_name = '%s'", 4104 constraint, db, tbl)) 4105 return sb.String() 4106 } 4107 4108 // getSqlForDeleteDB returns the delete sql that deletes all the fk relationships from mo_foreign_keys 4109 // on the database 4110 func getSqlForDeleteDB(db string) string { 4111 sb := strings.Builder{} 4112 sb.WriteString("delete from `mo_catalog`.`mo_foreign_keys` where ") 4113 sb.WriteString(fmt.Sprintf("db_name = '%s'", db)) 4114 return sb.String() 4115 } 4116 4117 // getSqlForRenameTable returns the sqls that rename the table of all fk relationships in mo_foreign_keys 4118 func getSqlForRenameTable(db, oldName, newName string) (ret []string) { 4119 sb := strings.Builder{} 4120 sb.WriteString("update `mo_catalog`.`mo_foreign_keys` ") 4121 sb.WriteString(fmt.Sprintf("set table_name = '%s' ", newName)) 4122 sb.WriteString(fmt.Sprintf("where db_name = '%s' and table_name = '%s' ; ", db, oldName)) 4123 ret = append(ret, sb.String()) 4124 4125 sb.Reset() 4126 sb.WriteString("update `mo_catalog`.`mo_foreign_keys` ") 4127 sb.WriteString(fmt.Sprintf("set refer_table_name = '%s' ", newName)) 4128 sb.WriteString(fmt.Sprintf("where refer_db_name = '%s' and refer_table_name = '%s' ; ", db, oldName)) 4129 ret = append(ret, sb.String()) 4130 return 4131 } 4132 4133 // getSqlForRenameColumn returns the sqls that rename the column of all fk relationships in mo_foreign_keys 4134 func getSqlForRenameColumn(db, table, oldName, newName string) (ret []string) { 4135 sb := strings.Builder{} 4136 sb.WriteString("update `mo_catalog`.`mo_foreign_keys` ") 4137 sb.WriteString(fmt.Sprintf("set column_name = '%s' ", newName)) 4138 sb.WriteString(fmt.Sprintf("where db_name = '%s' and table_name = '%s' and column_name = '%s' ; ", 4139 db, table, oldName)) 4140 ret = append(ret, sb.String()) 4141 4142 sb.Reset() 4143 sb.WriteString("update `mo_catalog`.`mo_foreign_keys` ") 4144 sb.WriteString(fmt.Sprintf("set refer_column_name = '%s' ", newName)) 4145 sb.WriteString(fmt.Sprintf("where refer_db_name = '%s' and refer_table_name = '%s' and refer_column_name = '%s' ; ", 4146 db, table, oldName)) 4147 ret = append(ret, sb.String()) 4148 return 4149 } 4150 4151 // getSqlForCheckHasDBRefersTo returns the sql that checks if the database has any foreign key relationships 4152 // that refer to it. 4153 func getSqlForCheckHasDBRefersTo(db string) string { 4154 sb := strings.Builder{} 4155 sb.WriteString("select count(*) > 0 from `mo_catalog`.`mo_foreign_keys` ") 4156 sb.WriteString(fmt.Sprintf("where refer_db_name = '%s' and db_name != '%s';", db, db)) 4157 return sb.String() 4158 } 4159 4160 // fkBannedDatabase denotes the databases that forbid the foreign keys 4161 // you can not define fk in these databases or 4162 // define fk refers to these databases. 4163 // for simplicity of the design 4164 var fkBannedDatabase = map[string]bool{ 4165 catalog.MO_CATALOG: true, 4166 catalog.MO_SYSTEM: true, 4167 catalog.MO_SYSTEM_METRICS: true, 4168 catalog.MOTaskDB: true, 4169 sysview.InformationDBConst: true, 4170 sysview.MysqlDBConst: true, 4171 trace.DebugDB: true, 4172 } 4173 4174 // IsFkBannedDatabase denotes the database should not have any 4175 // foreign keys 4176 func IsFkBannedDatabase(db string) bool { 4177 if _, has := fkBannedDatabase[db]; has { 4178 return true 4179 } 4180 return false 4181 } 4182 4183 // IsForeignKeyChecksEnabled returns the system variable foreign_key_checks is true or false 4184 func IsForeignKeyChecksEnabled(ctx CompilerContext) (bool, error) { 4185 value, err := ctx.ResolveVariable("foreign_key_checks", true, false) 4186 if err != nil { 4187 return false, err 4188 } 4189 if value == nil { 4190 return true, nil 4191 } 4192 if v, ok := value.(int64); ok { 4193 return v == 1, nil 4194 } else if v1, ok := value.(int8); ok { 4195 return v1 == 1, nil 4196 } else { 4197 return false, moerr.NewInternalError(ctx.GetContext(), "invalid %v ", value) 4198 } 4199 }