github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/sql/insert_fast_path.go (about) 1 // Copyright 2019 The Cockroach Authors. 2 // 3 // Use of this software is governed by the Business Source License 4 // included in the file licenses/BSL.txt. 5 // 6 // As of the Change Date specified in that file, in accordance with 7 // the Business Source License, use of this software will be governed 8 // by the Apache License, Version 2.0, included in the file 9 // licenses/APL.txt. 10 11 package sql 12 13 import ( 14 "context" 15 "sync" 16 17 "github.com/cockroachdb/cockroach/pkg/roachpb" 18 "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" 19 "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" 20 "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" 21 "github.com/cockroachdb/cockroach/pkg/sql/row" 22 "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" 23 "github.com/cockroachdb/cockroach/pkg/sql/span" 24 "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" 25 "github.com/cockroachdb/cockroach/pkg/util/log" 26 "github.com/cockroachdb/cockroach/pkg/util/tracing" 27 "github.com/cockroachdb/errors" 28 ) 29 30 var insertFastPathNodePool = sync.Pool{ 31 New: func() interface{} { 32 return &insertFastPathNode{} 33 }, 34 } 35 36 // Check that exec.InsertFastPathMaxRows does not exceed the default 37 // maxInsertBatchSize. 38 func init() { 39 if maxInsertBatchSize < exec.InsertFastPathMaxRows { 40 panic("decrease exec.InsertFastPathMaxRows") 41 } 42 } 43 44 // insertFastPathNode is a faster implementation of inserting values in a table 45 // and performing FK checks. It is used when all the foreign key checks can be 46 // performed via a direct lookup in an index, and when the input is VALUES of 47 // limited size (at most exec.InsertFastPathMaxRows). 48 type insertFastPathNode struct { 49 // input values, similar to a valuesNode. 50 input [][]tree.TypedExpr 51 52 // columns is set if this INSERT is returning any rows, to be 53 // consumed by a renderNode upstream. This occurs when there is a 54 // RETURNING clause with some scalar expressions. 55 columns sqlbase.ResultColumns 56 57 run insertFastPathRun 58 } 59 60 type insertFastPathRun struct { 61 insertRun 62 63 fkChecks []insertFastPathFKCheck 64 65 numInputCols int 66 67 // inputBuf stores the evaluation result of the input rows, linearized into a 68 // single slice; see inputRow(). Unfortunately we can't do everything one row 69 // at a time, because we need the datums for generating error messages in case 70 // an FK check fails. 71 inputBuf tree.Datums 72 73 // fkBatch accumulates the FK existence checks. 74 fkBatch roachpb.BatchRequest 75 // fkSpanInfo keeps track of information for each fkBatch.Request entry. 76 fkSpanInfo []insertFastPathFKSpanInfo 77 78 // fkSpanMap is used to de-duplicate FK existence checks. Only used if there 79 // is more than one input row. 80 fkSpanMap map[string]struct{} 81 } 82 83 // insertFastPathFKSpanInfo records information about each Request in the 84 // fkBatch, associating it with a specific check and row index. 85 type insertFastPathFKSpanInfo struct { 86 check *insertFastPathFKCheck 87 rowIdx int 88 } 89 90 // insertFastPathFKCheck extends exec.InsertFastPathFKCheck with metadata that 91 // is computed once and can be reused across rows. 92 type insertFastPathFKCheck struct { 93 exec.InsertFastPathFKCheck 94 95 tabDesc *sqlbase.ImmutableTableDescriptor 96 idxDesc *sqlbase.IndexDescriptor 97 keyPrefix []byte 98 colMap map[sqlbase.ColumnID]int 99 spanBuilder *span.Builder 100 } 101 102 func (c *insertFastPathFKCheck) init(params runParams) error { 103 idx := c.ReferencedIndex.(*optIndex) 104 c.tabDesc = c.ReferencedTable.(*optTable).desc 105 c.idxDesc = idx.desc 106 107 codec := params.ExecCfg().Codec 108 c.keyPrefix = sqlbase.MakeIndexKeyPrefix(codec, &c.tabDesc.TableDescriptor, c.idxDesc.ID) 109 c.spanBuilder = span.MakeBuilder(codec, c.tabDesc.TableDesc(), c.idxDesc) 110 111 if len(c.InsertCols) > idx.numLaxKeyCols { 112 return errors.AssertionFailedf( 113 "%d FK cols, only %d cols in index", len(c.InsertCols), idx.numLaxKeyCols, 114 ) 115 } 116 c.colMap = make(map[sqlbase.ColumnID]int, len(c.InsertCols)) 117 for i, ord := range c.InsertCols { 118 var colID sqlbase.ColumnID 119 if i < len(c.idxDesc.ColumnIDs) { 120 colID = c.idxDesc.ColumnIDs[i] 121 } else { 122 colID = c.idxDesc.ExtraColumnIDs[i-len(c.idxDesc.ColumnIDs)] 123 } 124 125 c.colMap[colID] = int(ord) 126 } 127 return nil 128 } 129 130 // generateSpan returns the span that we need to look up to confirm existence of 131 // the referenced row. 132 func (c *insertFastPathFKCheck) generateSpan(inputRow tree.Datums) (roachpb.Span, error) { 133 return row.FKCheckSpan(c.spanBuilder, inputRow, c.colMap, len(c.InsertCols)) 134 } 135 136 // errorForRow returns an error indicating failure of this FK check for the 137 // given row. 138 func (c *insertFastPathFKCheck) errorForRow(inputRow tree.Datums) error { 139 values := make(tree.Datums, len(c.InsertCols)) 140 for i, ord := range c.InsertCols { 141 values[i] = inputRow[ord] 142 } 143 return c.MkErr(values) 144 } 145 146 func (r *insertFastPathRun) inputRow(rowIdx int) tree.Datums { 147 start := rowIdx * r.numInputCols 148 end := start + r.numInputCols 149 return r.inputBuf[start:end:end] 150 } 151 152 // addFKChecks adds Requests to fkBatch and entries in fkSpanInfo / fkSpanMap as 153 // needed for checking foreign keys for the given row. 154 func (r *insertFastPathRun) addFKChecks( 155 ctx context.Context, rowIdx int, inputRow tree.Datums, 156 ) error { 157 for i := range r.fkChecks { 158 c := &r.fkChecks[i] 159 160 // See if we have any nulls. 161 numNulls := 0 162 for _, ord := range c.InsertCols { 163 if inputRow[ord] == tree.DNull { 164 numNulls++ 165 } 166 } 167 if numNulls > 0 { 168 if c.MatchMethod == tree.MatchFull && numNulls != len(c.InsertCols) { 169 return c.errorForRow(inputRow) 170 } 171 // We have a row with only NULLS, or a row with some NULLs and match 172 // method PARTIAL. We can ignore this row. 173 return nil 174 } 175 176 span, err := c.generateSpan(inputRow) 177 if err != nil { 178 return err 179 } 180 if r.fkSpanMap != nil { 181 _, exists := r.fkSpanMap[string(span.Key)] 182 if exists { 183 // Duplicate span. 184 continue 185 } 186 r.fkSpanMap[string(span.Key)] = struct{}{} 187 } 188 if r.traceKV { 189 log.VEventf(ctx, 2, "FKScan %s", span) 190 } 191 reqIdx := len(r.fkBatch.Requests) 192 r.fkBatch.Requests = append(r.fkBatch.Requests, roachpb.RequestUnion{}) 193 r.fkBatch.Requests[reqIdx].MustSetInner(&roachpb.ScanRequest{ 194 RequestHeader: roachpb.RequestHeaderFromSpan(span), 195 }) 196 r.fkSpanInfo = append(r.fkSpanInfo, insertFastPathFKSpanInfo{ 197 check: c, 198 rowIdx: rowIdx, 199 }) 200 } 201 return nil 202 } 203 204 // runFKChecks runs the fkBatch and checks that all spans return at least one 205 // key. 206 func (n *insertFastPathNode) runFKChecks(params runParams) error { 207 if len(n.run.fkBatch.Requests) == 0 { 208 return nil 209 } 210 defer n.run.fkBatch.Reset() 211 212 // Run the FK checks batch. 213 br, err := params.p.txn.Send(params.ctx, n.run.fkBatch) 214 if err != nil { 215 return err.GoError() 216 } 217 218 for i := range br.Responses { 219 resp := br.Responses[i].GetInner().(*roachpb.ScanResponse) 220 if len(resp.Rows) == 0 { 221 // No results for lookup; generate the violation error. 222 info := n.run.fkSpanInfo[i] 223 return info.check.errorForRow(n.run.inputRow(info.rowIdx)) 224 } 225 } 226 227 return nil 228 } 229 230 func (n *insertFastPathNode) startExec(params runParams) error { 231 // Cache traceKV during execution, to avoid re-evaluating it for every row. 232 n.run.traceKV = params.p.ExtendedEvalContext().Tracing.KVTracingEnabled() 233 234 n.run.initRowContainer(params, n.columns, 0 /* rowCapacity */) 235 236 n.run.numInputCols = len(n.input[0]) 237 n.run.inputBuf = make(tree.Datums, len(n.input)*n.run.numInputCols) 238 239 if len(n.input) > 1 { 240 n.run.fkSpanMap = make(map[string]struct{}) 241 } 242 243 if len(n.run.fkChecks) > 0 { 244 for i := range n.run.fkChecks { 245 if err := n.run.fkChecks[i].init(params); err != nil { 246 return err 247 } 248 } 249 maxSpans := len(n.run.fkChecks) * len(n.input) 250 n.run.fkBatch.Requests = make([]roachpb.RequestUnion, 0, maxSpans) 251 n.run.fkSpanInfo = make([]insertFastPathFKSpanInfo, 0, maxSpans) 252 if len(n.input) > 1 { 253 n.run.fkSpanMap = make(map[string]struct{}, maxSpans) 254 } 255 } 256 257 return n.run.ti.init(params.ctx, params.p.txn, params.EvalContext()) 258 } 259 260 // Next is required because batchedPlanNode inherits from planNode, but 261 // batchedPlanNode doesn't really provide it. See the explanatory comments 262 // in plan_batch.go. 263 func (n *insertFastPathNode) Next(params runParams) (bool, error) { panic("not valid") } 264 265 // Values is required because batchedPlanNode inherits from planNode, but 266 // batchedPlanNode doesn't really provide it. See the explanatory comments 267 // in plan_batch.go. 268 func (n *insertFastPathNode) Values() tree.Datums { panic("not valid") } 269 270 // BatchedNext implements the batchedPlanNode interface. 271 func (n *insertFastPathNode) BatchedNext(params runParams) (bool, error) { 272 if n.run.done { 273 return false, nil 274 } 275 276 tracing.AnnotateTrace() 277 278 // The fast path node does everything in one batch. 279 280 for rowIdx, tupleRow := range n.input { 281 if err := params.p.cancelChecker.Check(); err != nil { 282 return false, err 283 } 284 inputRow := n.run.inputRow(rowIdx) 285 for col, typedExpr := range tupleRow { 286 var err error 287 inputRow[col], err = typedExpr.Eval(params.EvalContext()) 288 if err != nil { 289 err = interceptAlterColumnTypeParseError(n.run.insertCols, col, err) 290 return false, err 291 } 292 } 293 // Process the insertion for the current source row, potentially 294 // accumulating the result row for later. 295 if err := n.run.processSourceRow(params, inputRow); err != nil { 296 return false, err 297 } 298 299 // Add FK existence checks. 300 if len(n.run.fkChecks) > 0 { 301 if err := n.run.addFKChecks(params.ctx, rowIdx, inputRow); err != nil { 302 return false, err 303 } 304 } 305 } 306 307 // Perform the FK checks. 308 // TODO(radu): we could run the FK batch in parallel with the main batch (if 309 // we aren't auto-committing). 310 if err := n.runFKChecks(params); err != nil { 311 return false, err 312 } 313 314 if err := n.run.ti.atBatchEnd(params.ctx, n.run.traceKV); err != nil { 315 return false, err 316 } 317 318 if _, err := n.run.ti.finalize(params.ctx, n.run.traceKV); err != nil { 319 return false, err 320 } 321 // Remember we're done for the next call to BatchedNext(). 322 n.run.done = true 323 324 // Possibly initiate a run of CREATE STATISTICS. 325 params.ExecCfg().StatsRefresher.NotifyMutation(n.run.ti.tableDesc().ID, len(n.input)) 326 327 return true, nil 328 } 329 330 // BatchedCount implements the batchedPlanNode interface. 331 func (n *insertFastPathNode) BatchedCount() int { return len(n.input) } 332 333 // BatchedCount implements the batchedPlanNode interface. 334 func (n *insertFastPathNode) BatchedValues(rowIdx int) tree.Datums { return n.run.rows.At(rowIdx) } 335 336 func (n *insertFastPathNode) Close(ctx context.Context) { 337 n.run.ti.close(ctx) 338 if n.run.rows != nil { 339 n.run.rows.Close(ctx) 340 } 341 *n = insertFastPathNode{} 342 insertFastPathNodePool.Put(n) 343 } 344 345 // See planner.autoCommit. 346 func (n *insertFastPathNode) enableAutoCommit() { 347 n.run.ti.enableAutoCommit() 348 } 349 350 // interceptAlterColumnTypeParseError wraps a type parsing error with a warning 351 // about the column undergoing an ALTER COLUMN TYPE schema change. 352 // If colNum is not -1, only the colNum'th column in insertCols will be checked 353 // for AlterColumnTypeInProgress, otherwise every column in insertCols will 354 // be checked. 355 func interceptAlterColumnTypeParseError( 356 insertCols []sqlbase.ColumnDescriptor, colNum int, err error, 357 ) error { 358 var insertCol sqlbase.ColumnDescriptor 359 360 // wrapParseError is a helper function that checks if an insertCol has the 361 // AlterColumnTypeInProgress flag and wraps the parse error msg stating 362 // that the error may be because the column is being altered. 363 // Returns if the error msg has been wrapped and the wrapped error msg. 364 wrapParseError := func(insertCol sqlbase.ColumnDescriptor, colNum int, err error) (bool, error) { 365 if insertCol.AlterColumnTypeInProgress { 366 code := pgerror.GetPGCode(err) 367 if code == pgcode.InvalidTextRepresentation { 368 if colNum != -1 { 369 // If a column is specified, we can ensure the parse error 370 // is happening because the column is undergoing an alter column type 371 // schema change. 372 return true, errors.Wrapf(err, 373 "This table is still undergoing the ALTER COLUMN TYPE schema change, "+ 374 "this insert is not supported until the schema change is finalized") 375 } 376 // If no column is specified, the error message is slightly changed to say 377 // that the error MAY be because a column is undergoing an alter column type 378 // schema change. 379 return true, errors.Wrap(err, 380 "This table is still undergoing the ALTER COLUMN TYPE schema change, "+ 381 "this insert may not be supported until the schema change is finalized") 382 } 383 } 384 return false, err 385 } 386 387 // If a colNum is specified, we just check the one column for 388 // AlterColumnTypeInProgress and return the error whether it's wrapped or not. 389 if colNum != -1 { 390 insertCol = insertCols[colNum] 391 _, err = wrapParseError(insertCol, colNum, err) 392 return err 393 } 394 395 // If the colNum is -1, we check every insertCol for AlterColumnTypeInProgress. 396 for _, insertCol = range insertCols { 397 var changed bool 398 changed, err = wrapParseError(insertCol, colNum, err) 399 if changed { 400 return err 401 } 402 } 403 404 return err 405 }