github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/sql/row/cascader.go (about)

     1  // Copyright 2017 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package row
    12  
    13  import (
    14  	"context"
    15  
    16  	"github.com/cockroachdb/cockroach/pkg/keys"
    17  	"github.com/cockroachdb/cockroach/pkg/kv"
    18  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    19  	"github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver"
    20  	"github.com/cockroachdb/cockroach/pkg/sql/parser"
    21  	"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
    22  	"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
    23  	"github.com/cockroachdb/cockroach/pkg/sql/rowcontainer"
    24  	"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
    25  	"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
    26  	"github.com/cockroachdb/cockroach/pkg/util"
    27  	"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented"
    28  	"github.com/cockroachdb/cockroach/pkg/util/log"
    29  	"github.com/cockroachdb/errors"
    30  )
    31  
    32  // cascader is used to handle all referential integrity cascading actions.
    33  type cascader struct {
    34  	txn      *kv.Txn
    35  	fkTables FkTableMetadata
    36  	alloc    *sqlbase.DatumAlloc
    37  	evalCtx  *tree.EvalContext
    38  
    39  	indexPKRowFetchers map[TableID]map[sqlbase.IndexID]Fetcher // PK RowFetchers by Table ID and Index ID
    40  
    41  	// Row Deleters
    42  	rowDeleters        map[TableID]Deleter                    // RowDeleters by Table ID
    43  	deleterRowFetchers map[TableID]Fetcher                    // RowFetchers for rowDeleters by Table ID
    44  	deletedRows        map[TableID]*rowcontainer.RowContainer // Rows that have been deleted by Table ID
    45  
    46  	// Row Updaters
    47  	rowUpdaters        map[TableID]Updater                    // RowUpdaters by Table ID
    48  	updaterRowFetchers map[TableID]Fetcher                    // RowFetchers for rowUpdaters by Table ID
    49  	originalRows       map[TableID]*rowcontainer.RowContainer // Original values for rows that have been updated by Table ID
    50  	updatedRows        map[TableID]*rowcontainer.RowContainer // New values for rows that have been updated by Table ID
    51  }
    52  
    53  // makeDeleteCascader only creates a cascader if there is a chance that there is
    54  // a possible cascade. It returns a cascader if one is required and nil if not.
    55  func makeDeleteCascader(
    56  	ctx context.Context,
    57  	txn *kv.Txn,
    58  	table *sqlbase.ImmutableTableDescriptor,
    59  	tablesByID FkTableMetadata,
    60  	evalCtx *tree.EvalContext,
    61  	alloc *sqlbase.DatumAlloc,
    62  ) (*cascader, error) {
    63  	if evalCtx == nil {
    64  		return nil, errors.AssertionFailedf("evalContext is nil")
    65  	}
    66  	var required bool
    67  	for i := range table.InboundFKs {
    68  		ref := &table.InboundFKs[i]
    69  		referencingTable, ok := tablesByID[ref.OriginTableID]
    70  		if !ok {
    71  			return nil, errors.AssertionFailedf("could not find table:%d in table descriptor map", ref.OriginTableID)
    72  		}
    73  		if referencingTable.IsAdding {
    74  			// We can assume that a table being added but not yet public is empty,
    75  			// and thus does not need to be checked for cascading.
    76  			continue
    77  		}
    78  		foundFK, err := referencingTable.Desc.FindFKForBackRef(table.ID, ref)
    79  		if err != nil {
    80  			return nil, err
    81  		}
    82  		if foundFK.OnDelete == sqlbase.ForeignKeyReference_CASCADE ||
    83  			foundFK.OnDelete == sqlbase.ForeignKeyReference_SET_DEFAULT ||
    84  			foundFK.OnDelete == sqlbase.ForeignKeyReference_SET_NULL {
    85  			required = true
    86  			break
    87  		}
    88  	}
    89  	if !required {
    90  		return nil, nil
    91  	}
    92  
    93  	// TODO(knz,radu): FK cascading actions need to see the writes
    94  	// performed by the mutation.  Moreover, each stage of the cascading
    95  	// actions must observe the writes from the previous stages but not
    96  	// its own writes.
    97  	//
    98  	// In order to make this true, we need to split the cascading
    99  	// actions into separate sequencing steps, and have the first
   100  	// cascading action happen no early than the end of all the
   101  	// "main" part of the statement. Unfortunately, the organization
   102  	// of the code does not allow this today.
   103  	//
   104  	// See: https://github.com/cockroachdb/cockroach/issues/33475
   105  	//
   106  	// In order to "make do" and preserve a modicum of FK semantics we
   107  	// thus need to disable step-wise execution here. The result is that
   108  	// it will also enable any interleaved read part to observe the
   109  	// mutation, and thus introduce the risk of a Halloween problem for
   110  	// any mutation that uses FK relationships.
   111  	_ = txn.ConfigureStepping(ctx, kv.SteppingDisabled)
   112  
   113  	return &cascader{
   114  		txn:                txn,
   115  		fkTables:           tablesByID,
   116  		indexPKRowFetchers: make(map[TableID]map[sqlbase.IndexID]Fetcher),
   117  		rowDeleters:        make(map[TableID]Deleter),
   118  		deleterRowFetchers: make(map[TableID]Fetcher),
   119  		deletedRows:        make(map[TableID]*rowcontainer.RowContainer),
   120  		rowUpdaters:        make(map[TableID]Updater),
   121  		updaterRowFetchers: make(map[TableID]Fetcher),
   122  		originalRows:       make(map[TableID]*rowcontainer.RowContainer),
   123  		updatedRows:        make(map[TableID]*rowcontainer.RowContainer),
   124  		evalCtx:            evalCtx,
   125  		alloc:              alloc,
   126  	}, nil
   127  }
   128  
   129  // makeUpdateCascader only creates a cascader if there is a chance that there is
   130  // a possible cascade. It returns a cascader if one is required and nil if not.
   131  func makeUpdateCascader(
   132  	ctx context.Context,
   133  	txn *kv.Txn,
   134  	table *sqlbase.ImmutableTableDescriptor,
   135  	tablesByID FkTableMetadata,
   136  	updateCols []sqlbase.ColumnDescriptor,
   137  	evalCtx *tree.EvalContext,
   138  	alloc *sqlbase.DatumAlloc,
   139  ) (*cascader, error) {
   140  	if evalCtx == nil {
   141  		return nil, errors.AssertionFailedf("evalContext is nil")
   142  	}
   143  	var required bool
   144  	colIDs := make(map[sqlbase.ColumnID]struct{})
   145  	for i := range updateCols {
   146  		colIDs[updateCols[i].ID] = struct{}{}
   147  	}
   148  	for i := range table.InboundFKs {
   149  		ref := &table.InboundFKs[i]
   150  		var match bool
   151  		for _, colID := range ref.ReferencedColumnIDs {
   152  			if _, exists := colIDs[colID]; exists {
   153  				match = true
   154  				break
   155  			}
   156  		}
   157  		if !match {
   158  			continue
   159  		}
   160  		referencingTable, ok := tablesByID[ref.OriginTableID]
   161  		if !ok {
   162  			return nil, errors.AssertionFailedf("could not find table:%d in table descriptor map", ref.OriginTableID)
   163  		}
   164  		if referencingTable.IsAdding {
   165  			// We can assume that a table being added but not yet public is empty,
   166  			// and thus does not need to be checked for cascading.
   167  			continue
   168  		}
   169  		foundFK, err := referencingTable.Desc.FindFKForBackRef(table.ID, ref)
   170  		if err != nil {
   171  			return nil, err
   172  		}
   173  		if foundFK.OnUpdate == sqlbase.ForeignKeyReference_CASCADE ||
   174  			foundFK.OnUpdate == sqlbase.ForeignKeyReference_SET_DEFAULT ||
   175  			foundFK.OnUpdate == sqlbase.ForeignKeyReference_SET_NULL {
   176  			required = true
   177  			break
   178  		}
   179  	}
   180  	if !required {
   181  		return nil, nil
   182  	}
   183  
   184  	// TODO(knz,radu): FK cascading actions need to see the writes
   185  	// performed by the mutation.  Moreover, each stage of the cascading
   186  	// actions must observe the writes from the previous stages but not
   187  	// its own writes.
   188  	//
   189  	// In order to make this true, we need to split the cascading
   190  	// actions into separate sequencing steps, and have the first
   191  	// cascading action happen no early than the end of all the
   192  	// "main" part of the statement. Unfortunately, the organization
   193  	// of the code does not allow this today.
   194  	//
   195  	// See: https://github.com/cockroachdb/cockroach/issues/33475
   196  	//
   197  	// In order to "make do" and preserve a modicum of FK semantics we
   198  	// thus need to disable step-wise execution here. The result is that
   199  	// it will also enable any interleaved read part to observe the
   200  	// mutation, and thus introduce the risk of a Halloween problem for
   201  	// any mutation that uses FK relationships.
   202  	_ = txn.ConfigureStepping(ctx, kv.SteppingDisabled)
   203  
   204  	return &cascader{
   205  		txn:                txn,
   206  		fkTables:           tablesByID,
   207  		indexPKRowFetchers: make(map[TableID]map[sqlbase.IndexID]Fetcher),
   208  		rowDeleters:        make(map[TableID]Deleter),
   209  		deleterRowFetchers: make(map[TableID]Fetcher),
   210  		deletedRows:        make(map[TableID]*rowcontainer.RowContainer),
   211  		rowUpdaters:        make(map[TableID]Updater),
   212  		updaterRowFetchers: make(map[TableID]Fetcher),
   213  		originalRows:       make(map[TableID]*rowcontainer.RowContainer),
   214  		updatedRows:        make(map[TableID]*rowcontainer.RowContainer),
   215  		evalCtx:            evalCtx,
   216  		alloc:              alloc,
   217  	}, nil
   218  }
   219  
   220  func (c *cascader) clear(ctx context.Context) {
   221  	for _, container := range c.deletedRows {
   222  		container.Clear(ctx)
   223  	}
   224  	for _, container := range c.originalRows {
   225  		container.Clear(ctx)
   226  	}
   227  	for _, container := range c.updatedRows {
   228  		container.Clear(ctx)
   229  	}
   230  }
   231  
   232  // spanForIndexValues creates a span against an index to extract the primary
   233  // keys needed for cascading.
   234  func spanForIndexValues(
   235  	table *sqlbase.ImmutableTableDescriptor,
   236  	index *sqlbase.IndexDescriptor,
   237  	prefixLen int,
   238  	match sqlbase.ForeignKeyReference_Match,
   239  	indexColIDs map[sqlbase.ColumnID]int,
   240  	values []tree.Datum,
   241  	keyPrefix []byte,
   242  ) (roachpb.Span, error) {
   243  	// See https://github.com/cockroachdb/cockroach/issues/20305 or
   244  	// https://www.postgresql.org/docs/11/sql-createtable.html for details on the
   245  	// different composite foreign key matching methods.
   246  	switch match {
   247  	case sqlbase.ForeignKeyReference_SIMPLE:
   248  		for _, rowIndex := range indexColIDs {
   249  			if values[rowIndex] == tree.DNull {
   250  				return roachpb.Span{}, nil
   251  			}
   252  		}
   253  	case sqlbase.ForeignKeyReference_FULL:
   254  		var nulls, notNulls bool
   255  		for _, rowIndex := range indexColIDs {
   256  			if values[rowIndex] == tree.DNull {
   257  				nulls = true
   258  			} else {
   259  				notNulls = true
   260  			}
   261  			if nulls && notNulls {
   262  				// TODO(bram): expand this error to show more details.
   263  				return roachpb.Span{}, pgerror.Newf(pgcode.ForeignKeyViolation,
   264  					"foreign key violation: MATCH FULL does not allow mixing of null and nonnull values %s",
   265  					values,
   266  				)
   267  			}
   268  		}
   269  		// Only if all the values are all null should we skip the FK check for
   270  		// MATCH FULL.
   271  		if nulls {
   272  			return roachpb.Span{}, nil
   273  		}
   274  
   275  	case sqlbase.ForeignKeyReference_PARTIAL:
   276  		return roachpb.Span{}, unimplemented.NewWithIssue(20305, "MATCH PARTIAL not supported")
   277  
   278  	default:
   279  		return roachpb.Span{}, errors.AssertionFailedf("unknown composite key match type: %v", match)
   280  	}
   281  	span, _, err := sqlbase.EncodePartialIndexSpan(table.TableDesc(), index, prefixLen, indexColIDs, values, keyPrefix)
   282  	if err != nil {
   283  		return roachpb.Span{}, err
   284  	}
   285  	return span, nil
   286  }
   287  
   288  // batchRequestForIndexValues creates a batch request against an index to
   289  // extract the primary keys needed for cascading. It also returns the
   290  // colIDtoRowIndex that will map the columns that have been retrieved as part of
   291  // the request to the referencing table.
   292  func batchRequestForIndexValues(
   293  	ctx context.Context,
   294  	codec keys.SQLCodec,
   295  	referencedIndex *sqlbase.IndexDescriptor,
   296  	referencingTable *sqlbase.ImmutableTableDescriptor,
   297  	referencingIndex *sqlbase.IndexDescriptor,
   298  	match sqlbase.ForeignKeyReference_Match,
   299  	values cascadeQueueElement,
   300  	traceKV bool,
   301  ) (roachpb.BatchRequest, map[sqlbase.ColumnID]int, error) {
   302  
   303  	//TODO(bram): consider caching some of these values
   304  	keyPrefix := sqlbase.MakeIndexKeyPrefix(codec, referencingTable.TableDesc(), referencingIndex.ID)
   305  	prefixLen := len(referencingIndex.ColumnIDs)
   306  	if len(referencedIndex.ColumnIDs) < prefixLen {
   307  		prefixLen = len(referencedIndex.ColumnIDs)
   308  	}
   309  
   310  	colIDtoRowIndex := make(map[sqlbase.ColumnID]int, len(referencedIndex.ColumnIDs))
   311  	for i, referencedColID := range referencedIndex.ColumnIDs[:prefixLen] {
   312  		if found, ok := values.colIDtoRowIndex[referencedColID]; ok {
   313  			colIDtoRowIndex[referencingIndex.ColumnIDs[i]] = found
   314  		} else {
   315  			return roachpb.BatchRequest{}, nil, pgerror.Newf(pgcode.ForeignKeyViolation,
   316  				"missing value for column %q in multi-part foreign key", referencedIndex.ColumnNames[i],
   317  			)
   318  		}
   319  	}
   320  
   321  	var req roachpb.BatchRequest
   322  	for i := values.startIndex; i < values.endIndex; i++ {
   323  		span, err := spanForIndexValues(
   324  			referencingTable,
   325  			referencingIndex,
   326  			prefixLen,
   327  			match,
   328  			colIDtoRowIndex,
   329  			values.originalValues.At(i),
   330  			keyPrefix,
   331  		)
   332  		if err != nil {
   333  			return roachpb.BatchRequest{}, nil, err
   334  		}
   335  		if span.EndKey != nil {
   336  			req.Add(&roachpb.ScanRequest{RequestHeader: roachpb.RequestHeaderFromSpan(span)})
   337  			if traceKV {
   338  				log.VEventf(ctx, 2, "CascadeScan %s", span)
   339  			}
   340  		}
   341  	}
   342  	return req, colIDtoRowIndex, nil
   343  }
   344  
   345  // spanForPKValues creates a span against the primary index of a table and is
   346  // used to fetch rows for cascading.
   347  func spanForPKValues(
   348  	codec keys.SQLCodec,
   349  	table *sqlbase.ImmutableTableDescriptor,
   350  	fetchColIDtoRowIndex map[sqlbase.ColumnID]int,
   351  	values tree.Datums,
   352  ) (roachpb.Span, error) {
   353  	return spanForIndexValues(
   354  		table,
   355  		&table.PrimaryIndex,
   356  		len(table.PrimaryIndex.ColumnIDs),
   357  		sqlbase.ForeignKeyReference_SIMPLE, /* primary key lookup can always use MATCH SIMPLE */
   358  		fetchColIDtoRowIndex,
   359  		values,
   360  		sqlbase.MakeIndexKeyPrefix(codec, table.TableDesc(), table.PrimaryIndex.ID),
   361  	)
   362  }
   363  
   364  // batchRequestForPKValues creates a batch request against the primary index of
   365  // a table and is used to fetch rows for cascading.
   366  func batchRequestForPKValues(
   367  	ctx context.Context,
   368  	codec keys.SQLCodec,
   369  	table *sqlbase.ImmutableTableDescriptor,
   370  	fetchColIDtoRowIndex map[sqlbase.ColumnID]int,
   371  	values *rowcontainer.RowContainer,
   372  	traceKV bool,
   373  ) (roachpb.BatchRequest, error) {
   374  	var req roachpb.BatchRequest
   375  	for i := 0; i < values.Len(); i++ {
   376  		span, err := spanForPKValues(codec, table, fetchColIDtoRowIndex, values.At(i))
   377  		if err != nil {
   378  			return roachpb.BatchRequest{}, err
   379  		}
   380  		if span.EndKey != nil {
   381  			if traceKV {
   382  				log.VEventf(ctx, 2, "CascadeScan %s", span)
   383  			}
   384  			req.Add(&roachpb.ScanRequest{RequestHeader: roachpb.RequestHeaderFromSpan(span)})
   385  		}
   386  	}
   387  	return req, nil
   388  }
   389  
   390  // addIndexPKRowFetch will create or load a cached row fetcher on an index to
   391  // fetch the primary keys of the rows that will be affected by a cascading
   392  // action.
   393  func (c *cascader) addIndexPKRowFetcher(
   394  	table *sqlbase.ImmutableTableDescriptor, index *sqlbase.IndexDescriptor,
   395  ) (Fetcher, error) {
   396  	// Is there a cached row fetcher?
   397  	rowFetchersForTable, exists := c.indexPKRowFetchers[table.ID]
   398  	if exists {
   399  		rowFetcher, exists := rowFetchersForTable[index.ID]
   400  		if exists {
   401  			return rowFetcher, nil
   402  		}
   403  	} else {
   404  		c.indexPKRowFetchers[table.ID] = make(map[sqlbase.IndexID]Fetcher)
   405  	}
   406  
   407  	// Create a new row fetcher. Only the primary key columns are required.
   408  	var colDesc []sqlbase.ColumnDescriptor
   409  	for _, id := range table.PrimaryIndex.ColumnIDs {
   410  		cDesc, err := table.FindColumnByID(id)
   411  		if err != nil {
   412  			return Fetcher{}, err
   413  		}
   414  		colDesc = append(colDesc, *cDesc)
   415  	}
   416  	var valNeededForCol util.FastIntSet
   417  	valNeededForCol.AddRange(0, len(colDesc)-1)
   418  	isSecondary := table.PrimaryIndex.ID != index.ID
   419  	var rowFetcher Fetcher
   420  	if err := rowFetcher.Init(
   421  		c.evalCtx.Codec,
   422  		false, /* reverse */
   423  		sqlbase.ScanLockingStrength_FOR_NONE,
   424  		false, /* returnRangeInfo */
   425  		false, /* isCheck */
   426  		c.alloc,
   427  		FetcherTableArgs{
   428  			Desc:             table,
   429  			Index:            index,
   430  			ColIdxMap:        ColIDtoRowIndexFromCols(colDesc),
   431  			IsSecondaryIndex: isSecondary,
   432  			Cols:             colDesc,
   433  			ValNeededForCol:  valNeededForCol,
   434  		},
   435  	); err != nil {
   436  		return Fetcher{}, err
   437  	}
   438  	// Cache the row fetcher.
   439  	c.indexPKRowFetchers[table.ID][index.ID] = rowFetcher
   440  	return rowFetcher, nil
   441  }
   442  
   443  // addRowDeleter creates the row deleter and primary index row fetcher.
   444  func (c *cascader) addRowDeleter(
   445  	ctx context.Context, table *sqlbase.ImmutableTableDescriptor,
   446  ) (Deleter, Fetcher, error) {
   447  	// Is there a cached row fetcher and deleter?
   448  	if rowDeleter, exists := c.rowDeleters[table.ID]; exists {
   449  		rowFetcher, existsFetcher := c.deleterRowFetchers[table.ID]
   450  		if !existsFetcher {
   451  			return Deleter{}, Fetcher{}, errors.AssertionFailedf("no corresponding row fetcher for the row deleter for table: (%d)%s",
   452  				table.ID, table.Name,
   453  			)
   454  		}
   455  		return rowDeleter, rowFetcher, nil
   456  	}
   457  
   458  	// Create the row deleter. The row deleter is needed prior to the row fetcher
   459  	// as it will dictate what columns are required in the row fetcher.
   460  	rowDeleter, err := makeRowDeleterWithoutCascader(
   461  		ctx,
   462  		c.txn,
   463  		c.evalCtx.Codec,
   464  		table,
   465  		c.fkTables,
   466  		nil, /* requestedCol */
   467  		CheckFKs,
   468  		c.alloc,
   469  	)
   470  	if err != nil {
   471  		return Deleter{}, Fetcher{}, err
   472  	}
   473  
   474  	// Create the row fetcher that will retrive the rows and columns needed for
   475  	// deletion.
   476  	var valNeededForCol util.FastIntSet
   477  	valNeededForCol.AddRange(0, len(rowDeleter.FetchCols)-1)
   478  	tableArgs := FetcherTableArgs{
   479  		Desc:             table,
   480  		Index:            &table.PrimaryIndex,
   481  		ColIdxMap:        rowDeleter.FetchColIDtoRowIndex,
   482  		IsSecondaryIndex: false,
   483  		Cols:             rowDeleter.FetchCols,
   484  		ValNeededForCol:  valNeededForCol,
   485  	}
   486  	var rowFetcher Fetcher
   487  	if err := rowFetcher.Init(
   488  		c.evalCtx.Codec,
   489  		false, /* reverse */
   490  		// TODO(nvanbenschoten): it might make sense to use a FOR_UPDATE locking
   491  		// strength here. Consider hooking this in to the same knob that will
   492  		// control whether we perform locking implicitly during DELETEs.
   493  		sqlbase.ScanLockingStrength_FOR_NONE,
   494  		false, /* returnRangeInfo */
   495  		false, /* isCheck */
   496  		c.alloc,
   497  		tableArgs,
   498  	); err != nil {
   499  		return Deleter{}, Fetcher{}, err
   500  	}
   501  
   502  	// Cache both the fetcher and deleter.
   503  	c.rowDeleters[table.ID] = rowDeleter
   504  	c.deleterRowFetchers[table.ID] = rowFetcher
   505  	return rowDeleter, rowFetcher, nil
   506  }
   507  
   508  // addRowUpdater creates the row updater and primary index row fetcher.
   509  func (c *cascader) addRowUpdater(
   510  	ctx context.Context, table *sqlbase.ImmutableTableDescriptor,
   511  ) (Updater, Fetcher, error) {
   512  	// Is there a cached updater?
   513  	rowUpdater, existsUpdater := c.rowUpdaters[table.ID]
   514  	if existsUpdater {
   515  		rowFetcher, existsFetcher := c.updaterRowFetchers[table.ID]
   516  		if !existsFetcher {
   517  			return Updater{}, Fetcher{}, errors.AssertionFailedf("no corresponding row fetcher for the row updater for table: (%d)%s",
   518  				table.ID, table.Name,
   519  			)
   520  		}
   521  		return rowUpdater, rowFetcher, nil
   522  	}
   523  
   524  	// Create the row updater. The row updater requires all the columns in the
   525  	// table.
   526  	rowUpdater, err := makeUpdaterWithoutCascader(
   527  		ctx,
   528  		c.txn,
   529  		c.evalCtx.Codec,
   530  		table,
   531  		c.fkTables,
   532  		table.Columns,
   533  		nil, /* requestedCol */
   534  		UpdaterDefault,
   535  		CheckFKs,
   536  		c.alloc,
   537  	)
   538  	if err != nil {
   539  		return Updater{}, Fetcher{}, err
   540  	}
   541  
   542  	// Create the row fetcher that will retrive the rows and columns needed for
   543  	// deletion.
   544  	var valNeededForCol util.FastIntSet
   545  	valNeededForCol.AddRange(0, len(rowUpdater.FetchCols)-1)
   546  	tableArgs := FetcherTableArgs{
   547  		Desc:             table,
   548  		Index:            &table.PrimaryIndex,
   549  		ColIdxMap:        rowUpdater.FetchColIDtoRowIndex,
   550  		IsSecondaryIndex: false,
   551  		Cols:             rowUpdater.FetchCols,
   552  		ValNeededForCol:  valNeededForCol,
   553  	}
   554  	var rowFetcher Fetcher
   555  	if err := rowFetcher.Init(
   556  		c.evalCtx.Codec,
   557  		false, /* reverse */
   558  		// TODO(nvanbenschoten): it might make sense to use a FOR_UPDATE locking
   559  		// strength here. Consider hooking this in to the same knob that will
   560  		// control whether we perform locking implicitly during UPDATEs.
   561  		sqlbase.ScanLockingStrength_FOR_NONE,
   562  		false, /* returnRangeInfo */
   563  		false, /* isCheck */
   564  		c.alloc,
   565  		tableArgs,
   566  	); err != nil {
   567  		return Updater{}, Fetcher{}, err
   568  	}
   569  
   570  	// Cache the updater and the fetcher.
   571  	c.rowUpdaters[table.ID] = rowUpdater
   572  	c.updaterRowFetchers[table.ID] = rowFetcher
   573  	return rowUpdater, rowFetcher, nil
   574  }
   575  
   576  // deleteRows performs row deletions on a single table for all rows that match
   577  // the values. Returns the values of the rows that were deleted. This deletion
   578  // happens in a single batch.
   579  func (c *cascader) deleteRows(
   580  	ctx context.Context,
   581  	referencedIndex *sqlbase.IndexDescriptor,
   582  	referencingTable *sqlbase.ImmutableTableDescriptor,
   583  	referencingIndex *sqlbase.IndexDescriptor,
   584  	match sqlbase.ForeignKeyReference_Match,
   585  	values cascadeQueueElement,
   586  	traceKV bool,
   587  ) (*rowcontainer.RowContainer, map[sqlbase.ColumnID]int, int, error) {
   588  	// Create the span to search for index values.
   589  	// TODO(bram): This initial index lookup can be skipped if the index is the
   590  	// primary index.
   591  	if traceKV {
   592  		log.VEventf(ctx, 2, "cascading delete into table: %d using index: %d",
   593  			referencingTable.ID, referencingIndex.ID,
   594  		)
   595  	}
   596  	req, _, err := batchRequestForIndexValues(
   597  		ctx, c.evalCtx.Codec, referencedIndex, referencingTable, referencingIndex, match, values, traceKV,
   598  	)
   599  	if err != nil {
   600  		return nil, nil, 0, err
   601  	}
   602  	// If there are no spans to search, there is no need to cascade.
   603  	if len(req.Requests) == 0 {
   604  		return nil, nil, 0, nil
   605  	}
   606  	br, roachErr := c.txn.Send(ctx, req)
   607  	if roachErr != nil {
   608  		return nil, nil, 0, roachErr.GoError()
   609  	}
   610  
   611  	// Create or retrieve the index pk row fetcher.
   612  	indexPKRowFetcher, err := c.addIndexPKRowFetcher(referencingTable, referencingIndex)
   613  	if err != nil {
   614  		return nil, nil, 0, err
   615  	}
   616  	indexPKRowFetcherColIDToRowIndex := indexPKRowFetcher.tables[0].colIdxMap
   617  
   618  	// Fetch all the primary keys that need to be deleted.
   619  	// TODO(Bram): consider chunking this into n, primary keys, perhaps 100.
   620  	pkColTypeInfo, err := sqlbase.MakeColTypeInfo(referencingTable, indexPKRowFetcherColIDToRowIndex)
   621  	if err != nil {
   622  		return nil, nil, 0, err
   623  	}
   624  	primaryKeysToDelete := rowcontainer.NewRowContainer(
   625  		c.evalCtx.Mon.MakeBoundAccount(), pkColTypeInfo, values.originalValues.Len(),
   626  	)
   627  	defer primaryKeysToDelete.Close(ctx)
   628  
   629  	for _, resp := range br.Responses {
   630  		fetcher := SpanKVFetcher{
   631  			KVs: resp.GetInner().(*roachpb.ScanResponse).Rows,
   632  		}
   633  		if err := indexPKRowFetcher.StartScanFrom(ctx, &fetcher); err != nil {
   634  			return nil, nil, 0, err
   635  		}
   636  		for !indexPKRowFetcher.kvEnd {
   637  			primaryKey, _, _, err := indexPKRowFetcher.NextRowDecoded(ctx)
   638  			if err != nil {
   639  				return nil, nil, 0, err
   640  			}
   641  			if _, err := primaryKeysToDelete.AddRow(ctx, primaryKey); err != nil {
   642  				return nil, nil, 0, err
   643  			}
   644  		}
   645  	}
   646  
   647  	// Early exit if no rows need to be deleted.
   648  	if primaryKeysToDelete.Len() == 0 {
   649  		return nil, nil, 0, nil
   650  	}
   651  
   652  	// Create or retrieve the row deleter and primary index row fetcher.
   653  	rowDeleter, pkRowFetcher, err := c.addRowDeleter(ctx, referencingTable)
   654  	if err != nil {
   655  		return nil, nil, 0, err
   656  	}
   657  
   658  	// Create a batch request to get all the spans of the primary keys that need
   659  	// to be deleted.
   660  	pkLookupReq, err := batchRequestForPKValues(
   661  		ctx, c.evalCtx.Codec, referencingTable, indexPKRowFetcherColIDToRowIndex, primaryKeysToDelete, traceKV,
   662  	)
   663  	if err != nil {
   664  		return nil, nil, 0, err
   665  	}
   666  	primaryKeysToDelete.Clear(ctx)
   667  	// If there are no spans to search, there is no need to cascade.
   668  	if len(pkLookupReq.Requests) == 0 {
   669  		return nil, nil, 0, nil
   670  	}
   671  	pkResp, roachErr := c.txn.Send(ctx, pkLookupReq)
   672  	if roachErr != nil {
   673  		return nil, nil, 0, roachErr.GoError()
   674  	}
   675  
   676  	// Add the values to be checked for constraint violations after all cascading
   677  	// changes have completed. Here either fetch or create the deleted rows
   678  	// rowContainer for the table. This rowContainer for the table is also used by
   679  	// the queue to avoid having to double the memory used.
   680  	if _, exists := c.deletedRows[referencingTable.ID]; !exists {
   681  		// Fetch the rows for deletion and store them in a container.
   682  		colTypeInfo, err := sqlbase.MakeColTypeInfo(referencingTable, rowDeleter.FetchColIDtoRowIndex)
   683  		if err != nil {
   684  			return nil, nil, 0, err
   685  		}
   686  		c.deletedRows[referencingTable.ID] = rowcontainer.NewRowContainer(
   687  			c.evalCtx.Mon.MakeBoundAccount(), colTypeInfo, primaryKeysToDelete.Len(),
   688  		)
   689  	}
   690  	deletedRows := c.deletedRows[referencingTable.ID]
   691  	deletedRowsStartIndex := deletedRows.Len()
   692  
   693  	// Delete all the rows in a new batch.
   694  	batch := c.txn.NewBatch()
   695  
   696  	for _, resp := range pkResp.Responses {
   697  		fetcher := SpanKVFetcher{
   698  			KVs: resp.GetInner().(*roachpb.ScanResponse).Rows,
   699  		}
   700  		if err := pkRowFetcher.StartScanFrom(ctx, &fetcher); err != nil {
   701  			return nil, nil, 0, err
   702  		}
   703  		for !pkRowFetcher.kvEnd {
   704  			rowToDelete, _, _, err := pkRowFetcher.NextRowDecoded(ctx)
   705  			if err != nil {
   706  				return nil, nil, 0, err
   707  			}
   708  
   709  			// Add the row to be checked for consistency changes.
   710  			if _, err := deletedRows.AddRow(ctx, rowToDelete); err != nil {
   711  				return nil, nil, 0, err
   712  			}
   713  
   714  			// Delete the row.
   715  			if err := rowDeleter.DeleteRow(ctx, batch, rowToDelete, SkipFKs, traceKV); err != nil {
   716  				return nil, nil, 0, err
   717  			}
   718  		}
   719  	}
   720  
   721  	// Run the batch.
   722  	if err := c.txn.Run(ctx, batch); err != nil {
   723  		return nil, nil, 0, ConvertBatchError(ctx, referencingTable, batch)
   724  	}
   725  
   726  	return deletedRows, rowDeleter.FetchColIDtoRowIndex, deletedRowsStartIndex, nil
   727  }
   728  
   729  // updateRows performs row updates on a single table for all rows that match
   730  // the values. Returns both the values of the rows that were updated and their
   731  // new values. This update happens in a single batch.
   732  func (c *cascader) updateRows(
   733  	ctx context.Context,
   734  	referencedIndex *sqlbase.IndexDescriptor,
   735  	referencingTable *sqlbase.ImmutableTableDescriptor,
   736  	referencingIndex *sqlbase.IndexDescriptor,
   737  	match sqlbase.ForeignKeyReference_Match,
   738  	values cascadeQueueElement,
   739  	action sqlbase.ForeignKeyReference_Action,
   740  	fk *sqlbase.ForeignKeyConstraint,
   741  	traceKV bool,
   742  ) (*rowcontainer.RowContainer, *rowcontainer.RowContainer, map[sqlbase.ColumnID]int, int, error) {
   743  	// Create the span to search for index values.
   744  	if traceKV {
   745  		log.VEventf(ctx, 2, "cascading update into table: %d using index: %d",
   746  			referencingTable.ID, referencingIndex.ID,
   747  		)
   748  	}
   749  
   750  	// Create or retrieve the row updater and row fetcher.
   751  	rowUpdater, rowFetcher, err := c.addRowUpdater(ctx, referencingTable)
   752  	if err != nil {
   753  		return nil, nil, nil, 0, err
   754  	}
   755  
   756  	// Add the values to be checked for constraint violations after all cascading
   757  	// changes have completed. Here either fetch or create the rowContainers for
   758  	// both the original and updated values for the table and index combo. These
   759  	// rowContainers for are also used by the queue to avoid having to double the
   760  	// memory used.
   761  	if _, exists := c.originalRows[referencingTable.ID]; !exists {
   762  		colTypeInfo, err := sqlbase.MakeColTypeInfo(referencingTable, rowUpdater.FetchColIDtoRowIndex)
   763  		if err != nil {
   764  			return nil, nil, nil, 0, err
   765  		}
   766  		c.originalRows[referencingTable.ID] = rowcontainer.NewRowContainer(
   767  			c.evalCtx.Mon.MakeBoundAccount(), colTypeInfo, values.originalValues.Len(),
   768  		)
   769  		c.updatedRows[referencingTable.ID] = rowcontainer.NewRowContainer(
   770  			c.evalCtx.Mon.MakeBoundAccount(), colTypeInfo, values.originalValues.Len(),
   771  		)
   772  	}
   773  	originalRows := c.originalRows[referencingTable.ID]
   774  	updatedRows := c.updatedRows[referencingTable.ID]
   775  	startIndex := originalRows.Len()
   776  
   777  	// Update all the rows in a new batch.
   778  	batch := c.txn.NewBatch()
   779  
   780  	// Populate a map of all columns that need to be set if the action is not
   781  	// cascade.
   782  	var referencingIndexValuesByColIDs map[sqlbase.ColumnID]tree.Datum
   783  	switch action {
   784  	case sqlbase.ForeignKeyReference_SET_NULL:
   785  		referencingIndexValuesByColIDs = make(map[sqlbase.ColumnID]tree.Datum)
   786  		for _, columnID := range referencingIndex.ColumnIDs {
   787  			referencingIndexValuesByColIDs[columnID] = tree.DNull
   788  		}
   789  	case sqlbase.ForeignKeyReference_SET_DEFAULT:
   790  		referencingIndexValuesByColIDs = make(map[sqlbase.ColumnID]tree.Datum)
   791  		for _, columnID := range referencingIndex.ColumnIDs {
   792  			column, err := referencingTable.FindColumnByID(columnID)
   793  			if err != nil {
   794  				return nil, nil, nil, 0, err
   795  			}
   796  			// If the default expression is nil, treat it as a SET NULL case.
   797  			if !column.HasDefault() {
   798  				referencingIndexValuesByColIDs[columnID] = tree.DNull
   799  				continue
   800  			}
   801  			parsedExpr, err := parser.ParseExpr(*column.DefaultExpr)
   802  			if err != nil {
   803  				return nil, nil, nil, 0, err
   804  			}
   805  			typedExpr, err := tree.TypeCheck(ctx, parsedExpr, nil, column.Type)
   806  			if err != nil {
   807  				return nil, nil, nil, 0, err
   808  			}
   809  			normalizedExpr, err := c.evalCtx.NormalizeExpr(typedExpr)
   810  			if err != nil {
   811  				return nil, nil, nil, 0, err
   812  			}
   813  			referencingIndexValuesByColIDs[columnID], err = normalizedExpr.Eval(c.evalCtx)
   814  			if err != nil {
   815  				return nil, nil, nil, 0, err
   816  			}
   817  		}
   818  	}
   819  
   820  	// Sadly, this operation cannot be batched the same way as deletes, as the
   821  	// values being updated will change based on both the original and updated
   822  	// values.
   823  	for i := values.startIndex; i < values.endIndex; i++ {
   824  		// If the rows are the same on the FK columns, then we don't want to continue the cascade.
   825  		// We skip this check when values.updatedValues is nil. This happens when a cascade is started
   826  		// because of a delete, upon which a row container is not created because there is not an
   827  		// updated value when a row is deleted.
   828  		// TODO (rohany): this check could be sped up greatly by storing a mask/map of what
   829  		//  actually changed instead of having to check every FK column.
   830  		if values.updatedValues != nil {
   831  			allFKColsUnchanged := true
   832  			origValues := values.originalValues.At(i)
   833  			updatedValues := values.updatedValues.At(i)
   834  			for _, colID := range fk.ReferencedColumnIDs {
   835  				colIdx := values.colIDtoRowIndex[colID]
   836  				if origValues[colIdx].Compare(c.evalCtx, updatedValues[colIdx]) != 0 {
   837  					allFKColsUnchanged = false
   838  					break
   839  				}
   840  			}
   841  			if allFKColsUnchanged {
   842  				continue
   843  			}
   844  		}
   845  
   846  		// Extract a single value to update at a time.
   847  		req, valueColIDtoRowIndex, err := batchRequestForIndexValues(
   848  			ctx, c.evalCtx.Codec, referencedIndex, referencingTable, referencingIndex, match, cascadeQueueElement{
   849  				startIndex:      i,
   850  				endIndex:        i + 1,
   851  				originalValues:  values.originalValues,
   852  				updatedValues:   values.updatedValues,
   853  				table:           values.table,
   854  				colIDtoRowIndex: values.colIDtoRowIndex,
   855  			},
   856  			traceKV,
   857  		)
   858  		if err != nil {
   859  			return nil, nil, nil, 0, err
   860  		}
   861  		// If there are no spans to search, there is no need to cascade.
   862  		if len(req.Requests) == 0 {
   863  			return nil, nil, nil, 0, nil
   864  		}
   865  		br, roachErr := c.txn.Send(ctx, req)
   866  		if roachErr != nil {
   867  			return nil, nil, nil, 0, roachErr.GoError()
   868  		}
   869  
   870  		// Create or retrieve the index pk row fetcher.
   871  		indexPKRowFetcher, err := c.addIndexPKRowFetcher(referencingTable, referencingIndex)
   872  		if err != nil {
   873  			return nil, nil, nil, 0, err
   874  		}
   875  		indexPKRowFetcherColIDToRowIndex := indexPKRowFetcher.tables[0].colIdxMap
   876  
   877  		// Fetch all the primary keys for rows that will be updated.
   878  		// TODO(Bram): consider chunking this into n, primary keys, perhaps 100.
   879  		pkColTypeInfo, err := sqlbase.MakeColTypeInfo(referencingTable, indexPKRowFetcherColIDToRowIndex)
   880  		if err != nil {
   881  			return nil, nil, nil, 0, err
   882  		}
   883  		primaryKeysToUpdate := rowcontainer.NewRowContainer(
   884  			c.evalCtx.Mon.MakeBoundAccount(), pkColTypeInfo, values.originalValues.Len(),
   885  		)
   886  		defer primaryKeysToUpdate.Close(ctx)
   887  
   888  		for _, resp := range br.Responses {
   889  			fetcher := SpanKVFetcher{
   890  				KVs: resp.GetInner().(*roachpb.ScanResponse).Rows,
   891  			}
   892  			if err := indexPKRowFetcher.StartScanFrom(ctx, &fetcher); err != nil {
   893  				return nil, nil, nil, 0, err
   894  			}
   895  			for !indexPKRowFetcher.kvEnd {
   896  				primaryKey, _, _, err := indexPKRowFetcher.NextRowDecoded(ctx)
   897  				if err != nil {
   898  					return nil, nil, nil, 0, err
   899  				}
   900  				if _, err := primaryKeysToUpdate.AddRow(ctx, primaryKey); err != nil {
   901  					return nil, nil, nil, 0, err
   902  				}
   903  			}
   904  		}
   905  
   906  		// Early exit if no rows need to be updated.
   907  		if primaryKeysToUpdate.Len() == 0 {
   908  			continue
   909  		}
   910  
   911  		// Create a batch request to get all the spans of the primary keys that need
   912  		// to be updated.
   913  		pkLookupReq, err := batchRequestForPKValues(
   914  			ctx, c.evalCtx.Codec, referencingTable, indexPKRowFetcherColIDToRowIndex, primaryKeysToUpdate, traceKV,
   915  		)
   916  		if err != nil {
   917  			return nil, nil, nil, 0, err
   918  		}
   919  		primaryKeysToUpdate.Clear(ctx)
   920  		// If there are no spans to search, there is no need to cascade.
   921  		if len(pkLookupReq.Requests) == 0 {
   922  			return nil, nil, nil, 0, nil
   923  		}
   924  		pkResp, roachErr := c.txn.Send(ctx, pkLookupReq)
   925  		if roachErr != nil {
   926  			return nil, nil, nil, 0, roachErr.GoError()
   927  		}
   928  
   929  		for _, resp := range pkResp.Responses {
   930  			fetcher := SpanKVFetcher{
   931  				KVs: resp.GetInner().(*roachpb.ScanResponse).Rows,
   932  			}
   933  			if err := rowFetcher.StartScanFrom(ctx, &fetcher); err != nil {
   934  				return nil, nil, nil, 0, err
   935  			}
   936  			for !rowFetcher.kvEnd {
   937  				rowToUpdate, _, _, err := rowFetcher.NextRowDecoded(ctx)
   938  				if err != nil {
   939  					return nil, nil, nil, 0, err
   940  				}
   941  
   942  				updateRow := make(tree.Datums, len(rowUpdater.UpdateColIDtoRowIndex))
   943  				switch action {
   944  				case sqlbase.ForeignKeyReference_CASCADE:
   945  					// Create the updateRow based on the passed in updated values and from
   946  					// the retrieved row as a fallback.
   947  					currentUpdatedValue := values.updatedValues.At(i)
   948  					for colID, rowIndex := range rowUpdater.UpdateColIDtoRowIndex {
   949  						if valueRowIndex, exists := valueColIDtoRowIndex[colID]; exists {
   950  							updateRow[rowIndex] = currentUpdatedValue[valueRowIndex]
   951  							if updateRow[rowIndex] == tree.DNull {
   952  								column, err := referencingTable.FindColumnByID(colID)
   953  								if err != nil {
   954  									return nil, nil, nil, 0, err
   955  								}
   956  								if !column.Nullable {
   957  									database, err := sqlbase.GetDatabaseDescFromID(
   958  										ctx,
   959  										c.txn,
   960  										c.evalCtx.Codec,
   961  										referencingTable.ParentID,
   962  									)
   963  									if err != nil {
   964  										return nil, nil, nil, 0, err
   965  									}
   966  									schema, err := resolver.ResolveSchemaNameByID(
   967  										ctx,
   968  										c.txn,
   969  										c.evalCtx.Codec,
   970  										referencingTable.ParentID,
   971  										referencingTable.GetParentSchemaID(),
   972  									)
   973  									if err != nil {
   974  										return nil, nil, nil, 0, err
   975  									}
   976  									return nil, nil, nil, 0, pgerror.Newf(pgcode.NullValueNotAllowed,
   977  										"cannot cascade a null value into %q as it violates a NOT NULL constraint",
   978  										tree.ErrString(tree.NewUnresolvedName(database.Name, schema, referencingTable.Name, column.Name)))
   979  								}
   980  							}
   981  							continue
   982  						}
   983  						if fetchRowIndex, exists := rowUpdater.FetchColIDtoRowIndex[colID]; exists {
   984  							updateRow[rowIndex] = rowToUpdate[fetchRowIndex]
   985  							continue
   986  						}
   987  						return nil, nil, nil, 0, errors.AssertionFailedf("could find find colID %d in either updated columns or the fetched row",
   988  							colID,
   989  						)
   990  					}
   991  				case sqlbase.ForeignKeyReference_SET_NULL, sqlbase.ForeignKeyReference_SET_DEFAULT:
   992  					// Create the updateRow based on the original values and for all
   993  					// values in the index, either nulls (for SET NULL), or default (for
   994  					// SET DEFAULT).
   995  					for colID, rowIndex := range rowUpdater.UpdateColIDtoRowIndex {
   996  						if value, exists := referencingIndexValuesByColIDs[colID]; exists {
   997  							updateRow[rowIndex] = value
   998  							continue
   999  						}
  1000  						if fetchRowIndex, exists := rowUpdater.FetchColIDtoRowIndex[colID]; exists {
  1001  							updateRow[rowIndex] = rowToUpdate[fetchRowIndex]
  1002  							continue
  1003  						}
  1004  						return nil, nil, nil, 0, errors.AssertionFailedf("could find find colID %d in either the index columns or the fetched row",
  1005  							colID,
  1006  						)
  1007  					}
  1008  				}
  1009  
  1010  				// Is there something to update?  If not, skip it.
  1011  				if !rowToUpdate.IsDistinctFrom(c.evalCtx, updateRow) {
  1012  					continue
  1013  				}
  1014  
  1015  				updatedRow, err := rowUpdater.UpdateRow(
  1016  					ctx,
  1017  					batch,
  1018  					rowToUpdate,
  1019  					updateRow,
  1020  					SkipFKs,
  1021  					traceKV,
  1022  				)
  1023  				if err != nil {
  1024  					return nil, nil, nil, 0, err
  1025  				}
  1026  				if _, err := originalRows.AddRow(ctx, rowToUpdate); err != nil {
  1027  					return nil, nil, nil, 0, err
  1028  				}
  1029  				if _, err := updatedRows.AddRow(ctx, updatedRow); err != nil {
  1030  					return nil, nil, nil, 0, err
  1031  				}
  1032  			}
  1033  		}
  1034  	}
  1035  	if err := c.txn.Run(ctx, batch); err != nil {
  1036  		return nil, nil, nil, 0, ConvertBatchError(ctx, referencingTable, batch)
  1037  	}
  1038  
  1039  	return originalRows, updatedRows, rowUpdater.FetchColIDtoRowIndex, startIndex, nil
  1040  }
  1041  
  1042  type cascadeQueueElement struct {
  1043  	table *sqlbase.ImmutableTableDescriptor
  1044  	// These row containers are defined elsewhere and their memory is not managed
  1045  	// by the queue. The updated values can be nil for deleted rows. If it does
  1046  	// exist, every row in originalValues must have a corresponding row in
  1047  	// updatedValues at the exact same index. They also must have the exact same
  1048  	// rank.
  1049  	originalValues  *rowcontainer.RowContainer
  1050  	updatedValues   *rowcontainer.RowContainer
  1051  	colIDtoRowIndex map[sqlbase.ColumnID]int
  1052  	startIndex      int // Start of the range of rows in the row container.
  1053  	endIndex        int // End of the range of rows (exclusive) in the row container.
  1054  }
  1055  
  1056  // cascadeQueue is used for a breadth first walk of the referential integrity
  1057  // graph.
  1058  type cascadeQueue []cascadeQueueElement
  1059  
  1060  // Enqueue adds a range of values in a row container to the queue. Note that
  1061  // it always assumes that all the values start at the startIndex and extend to
  1062  // all the rows following that index.
  1063  func (q *cascadeQueue) enqueue(
  1064  	ctx context.Context,
  1065  	table *sqlbase.ImmutableTableDescriptor,
  1066  	originalValues *rowcontainer.RowContainer,
  1067  	updatedValues *rowcontainer.RowContainer,
  1068  	colIDtoRowIndex map[sqlbase.ColumnID]int,
  1069  	startIndex int,
  1070  ) error {
  1071  	*q = append(*q, cascadeQueueElement{
  1072  		table:           table,
  1073  		originalValues:  originalValues,
  1074  		updatedValues:   updatedValues,
  1075  		colIDtoRowIndex: colIDtoRowIndex,
  1076  		startIndex:      startIndex,
  1077  		endIndex:        originalValues.Len(),
  1078  	})
  1079  	return nil
  1080  }
  1081  
  1082  func (q *cascadeQueue) dequeue() (cascadeQueueElement, bool) {
  1083  	if len(*q) == 0 {
  1084  		return cascadeQueueElement{}, false
  1085  	}
  1086  	elem := (*q)[0]
  1087  	*q = (*q)[1:]
  1088  	return elem, true
  1089  }
  1090  
  1091  // cascadeAll performs all required cascading operations, then checks all the
  1092  // remaining indexes to ensure that no orphans were created.
  1093  func (c *cascader) cascadeAll(
  1094  	ctx context.Context,
  1095  	table *sqlbase.ImmutableTableDescriptor,
  1096  	originalValues tree.Datums,
  1097  	updatedValues tree.Datums,
  1098  	colIDtoRowIndex map[sqlbase.ColumnID]int,
  1099  	traceKV bool,
  1100  ) error {
  1101  	defer c.clear(ctx)
  1102  	var cascadeQ cascadeQueue
  1103  
  1104  	// Enqueue the first values.
  1105  	colTypeInfo, err := sqlbase.MakeColTypeInfo(table, colIDtoRowIndex)
  1106  	if err != nil {
  1107  		return err
  1108  	}
  1109  	originalRowContainer := rowcontainer.NewRowContainer(
  1110  		c.evalCtx.Mon.MakeBoundAccount(), colTypeInfo, len(originalValues),
  1111  	)
  1112  	defer originalRowContainer.Close(ctx)
  1113  	if _, err := originalRowContainer.AddRow(ctx, originalValues); err != nil {
  1114  		return err
  1115  	}
  1116  	var updatedRowContainer *rowcontainer.RowContainer
  1117  	if updatedValues != nil {
  1118  		updatedRowContainer = rowcontainer.NewRowContainer(
  1119  			c.evalCtx.Mon.MakeBoundAccount(), colTypeInfo, len(updatedValues),
  1120  		)
  1121  		defer updatedRowContainer.Close(ctx)
  1122  		if _, err := updatedRowContainer.AddRow(ctx, updatedValues); err != nil {
  1123  			return err
  1124  		}
  1125  	}
  1126  	if err := cascadeQ.enqueue(
  1127  		ctx, table, originalRowContainer, updatedRowContainer, colIDtoRowIndex, 0,
  1128  	); err != nil {
  1129  		return err
  1130  	}
  1131  	for {
  1132  		select {
  1133  		case <-ctx.Done():
  1134  			return sqlbase.QueryCanceledError
  1135  		default:
  1136  		}
  1137  		elem, exists := cascadeQ.dequeue()
  1138  		if !exists {
  1139  			break
  1140  		}
  1141  		for i := range elem.table.InboundFKs {
  1142  			ref := &elem.table.InboundFKs[i]
  1143  			referencingTable, ok := c.fkTables[ref.OriginTableID]
  1144  			if !ok {
  1145  				return errors.AssertionFailedf("could not find table:%d in table descriptor map", ref.OriginTableID)
  1146  			}
  1147  			if referencingTable.IsAdding {
  1148  				// We can assume that a table being added but not yet public is empty,
  1149  				// and thus does not need to be checked for cascading.
  1150  				continue
  1151  			}
  1152  			foundFK, err := referencingTable.Desc.FindFKForBackRef(elem.table.ID, ref)
  1153  			if err != nil {
  1154  				return err
  1155  			}
  1156  			referencedIndex, err := sqlbase.FindFKReferencedIndex(elem.table.TableDesc(), ref.ReferencedColumnIDs)
  1157  			if err != nil {
  1158  				return err
  1159  			}
  1160  			referencingIndex, err := sqlbase.FindFKOriginIndex(referencingTable.Desc.TableDesc(), ref.OriginColumnIDs)
  1161  			if err != nil {
  1162  				// TODO (rohany): Remove once #48224 is resolved.
  1163  				issueLink := errors.IssueLink{IssueURL: "https://github.com/cockroachdb/cockroach/issues/48224"}
  1164  				withLink := errors.WithIssueLink(err, issueLink)
  1165  				return withLink
  1166  			}
  1167  			if elem.updatedValues == nil {
  1168  				// Deleting a row.
  1169  				switch foundFK.OnDelete {
  1170  				case sqlbase.ForeignKeyReference_CASCADE:
  1171  					deletedRows, colIDtoRowIndex, startIndex, err := c.deleteRows(
  1172  						ctx,
  1173  						referencedIndex,
  1174  						referencingTable.Desc,
  1175  						referencingIndex,
  1176  						// Cascades in the DELETE direction always use MATCH SIMPLE, since
  1177  						// values in referenced tables are allowed to be partially NULL -
  1178  						// they just won't cascade.
  1179  						sqlbase.ForeignKeyReference_SIMPLE,
  1180  						elem,
  1181  						traceKV,
  1182  					)
  1183  					if err != nil {
  1184  						return err
  1185  					}
  1186  					if deletedRows != nil && deletedRows.Len() > startIndex {
  1187  						// If a row was deleted, add the table to the queue.
  1188  						if err := cascadeQ.enqueue(
  1189  							ctx,
  1190  							referencingTable.Desc,
  1191  							deletedRows,
  1192  							nil, /* updatedValues */
  1193  							colIDtoRowIndex,
  1194  							startIndex,
  1195  						); err != nil {
  1196  							return err
  1197  						}
  1198  					}
  1199  				case sqlbase.ForeignKeyReference_SET_NULL, sqlbase.ForeignKeyReference_SET_DEFAULT:
  1200  					originalAffectedRows, updatedAffectedRows, colIDtoRowIndex, startIndex, err := c.updateRows(
  1201  						ctx,
  1202  						referencedIndex,
  1203  						referencingTable.Desc,
  1204  						referencingIndex,
  1205  						sqlbase.ForeignKeyReference_SIMPLE,
  1206  						elem,
  1207  						foundFK.OnDelete,
  1208  						foundFK,
  1209  						traceKV,
  1210  					)
  1211  					if err != nil {
  1212  						return err
  1213  					}
  1214  					if originalAffectedRows != nil && originalAffectedRows.Len() > startIndex {
  1215  						// A row was updated, so let's add it to the queue.
  1216  						if err := cascadeQ.enqueue(
  1217  							ctx,
  1218  							referencingTable.Desc,
  1219  							originalAffectedRows,
  1220  							updatedAffectedRows,
  1221  							colIDtoRowIndex,
  1222  							startIndex,
  1223  						); err != nil {
  1224  							return err
  1225  						}
  1226  					}
  1227  				}
  1228  			} else {
  1229  				// Updating a row.
  1230  				switch foundFK.OnUpdate {
  1231  				case sqlbase.ForeignKeyReference_CASCADE, sqlbase.ForeignKeyReference_SET_NULL, sqlbase.ForeignKeyReference_SET_DEFAULT:
  1232  					originalAffectedRows, updatedAffectedRows, colIDtoRowIndex, startIndex, err := c.updateRows(
  1233  						ctx,
  1234  						referencedIndex,
  1235  						referencingTable.Desc,
  1236  						referencingIndex,
  1237  						sqlbase.ForeignKeyReference_SIMPLE,
  1238  						elem,
  1239  						foundFK.OnUpdate,
  1240  						foundFK,
  1241  						traceKV,
  1242  					)
  1243  					if err != nil {
  1244  						return err
  1245  					}
  1246  					if originalAffectedRows != nil && originalAffectedRows.Len() > startIndex {
  1247  						// A row was updated, so let's add it to the queue.
  1248  						if err := cascadeQ.enqueue(
  1249  							ctx,
  1250  							referencingTable.Desc,
  1251  							originalAffectedRows,
  1252  							updatedAffectedRows,
  1253  							colIDtoRowIndex,
  1254  							startIndex,
  1255  						); err != nil {
  1256  							return err
  1257  						}
  1258  					}
  1259  				}
  1260  			}
  1261  		}
  1262  	}
  1263  
  1264  	// Check all foreign key constraints that have been affected by the cascading
  1265  	// operation.
  1266  
  1267  	// Check all deleted rows to ensure there are no orphans.
  1268  	for tableID, deletedRows := range c.deletedRows {
  1269  		if deletedRows.Len() == 0 {
  1270  			continue
  1271  		}
  1272  		rowDeleter, exists := c.rowDeleters[tableID]
  1273  		if !exists {
  1274  			return errors.AssertionFailedf("could not find row deleter for table %d", tableID)
  1275  		}
  1276  		for deletedRows.Len() > 0 {
  1277  			if err := rowDeleter.Fks.addAllIdxChecks(ctx, deletedRows.At(0), traceKV); err != nil {
  1278  				return err
  1279  			}
  1280  			if err := rowDeleter.Fks.checker.runCheck(ctx, deletedRows.At(0), nil); err != nil {
  1281  				return err
  1282  			}
  1283  			deletedRows.PopFirst()
  1284  		}
  1285  	}
  1286  
  1287  	// Check all updated rows for orphans.
  1288  	// TODO(bram): This is running more checks than needed and may be a bit
  1289  	// brittle. All of these checks can be done selectively by storing a list of
  1290  	// checks to perform for each updated row which can be compiled while
  1291  	// cascading and performing them directly without relying on the rowUpdater.
  1292  	// There is also an opportunity to batch more of these checks together.
  1293  	for tableID, rowUpdater := range c.rowUpdaters {
  1294  		// Fetch the original and updated rows for the updater.
  1295  		originalRows, originalRowsExists := c.originalRows[tableID]
  1296  		if !originalRowsExists {
  1297  			return errors.AssertionFailedf("could not find original rows for table %d", tableID)
  1298  		}
  1299  		totalRows := originalRows.Len()
  1300  		if totalRows == 0 {
  1301  			continue
  1302  		}
  1303  
  1304  		updatedRows, updatedRowsExists := c.updatedRows[tableID]
  1305  		if !updatedRowsExists {
  1306  			return errors.AssertionFailedf("could not find updated rows for table %d", tableID)
  1307  		}
  1308  
  1309  		if totalRows != updatedRows.Len() {
  1310  			return errors.AssertionFailedf("original rows length:%d not equal to updated rows length:%d for table %d",
  1311  				totalRows, updatedRows.Len(), tableID,
  1312  			)
  1313  		}
  1314  
  1315  		if totalRows == 1 {
  1316  			// If there's only a single change, which is quite often the case, there
  1317  			// is no need to worry about intermediate states.  Just run the check and
  1318  			// avoid a bunch of allocations.
  1319  			if err := rowUpdater.Fks.addIndexChecks(ctx, originalRows.At(0), updatedRows.At(0), traceKV); err != nil {
  1320  				return err
  1321  			}
  1322  			if !rowUpdater.Fks.hasFKs() {
  1323  				continue
  1324  			}
  1325  			if err := rowUpdater.Fks.checker.runCheck(ctx, originalRows.At(0), updatedRows.At(0)); err != nil {
  1326  				return err
  1327  			}
  1328  			// Now check all check constraints for the table.
  1329  			checkHelper := c.fkTables[tableID].CheckHelper
  1330  			if checkHelper != nil {
  1331  				if err := checkHelper.LoadEvalRow(rowUpdater.UpdateColIDtoRowIndex, updatedRows.At(0), false); err != nil {
  1332  					return err
  1333  				}
  1334  				if err := checkHelper.CheckEval(c.evalCtx); err != nil {
  1335  					return err
  1336  				}
  1337  			}
  1338  			continue
  1339  		}
  1340  
  1341  		skipList := make(map[int]struct{}) // A map of already checked indices.
  1342  		for i := 0; i < totalRows; i++ {
  1343  			if _, exists := skipList[i]; exists {
  1344  				continue
  1345  			}
  1346  
  1347  			// Is this the final update for this row? Intermediate states will always
  1348  			// fail these checks so only check the original and final update for the
  1349  			// row.
  1350  			finalRow := updatedRows.At(i)
  1351  			for j := i + 1; j < totalRows; j++ {
  1352  				if _, exists := skipList[j]; exists {
  1353  					continue
  1354  				}
  1355  				if !originalRows.At(j).IsDistinctFrom(c.evalCtx, finalRow) {
  1356  					// The row has been updated again.
  1357  					finalRow = updatedRows.At(j)
  1358  					skipList[j] = struct{}{}
  1359  				}
  1360  			}
  1361  
  1362  			if err := rowUpdater.Fks.addIndexChecks(ctx, originalRows.At(i), finalRow, traceKV); err != nil {
  1363  				return err
  1364  			}
  1365  			if !rowUpdater.Fks.hasFKs() {
  1366  				continue
  1367  			}
  1368  			if err := rowUpdater.Fks.checker.runCheck(ctx, originalRows.At(i), finalRow); err != nil {
  1369  				return err
  1370  			}
  1371  			// Now check all check constraints for the table.
  1372  			checkHelper := c.fkTables[tableID].CheckHelper
  1373  			if checkHelper != nil {
  1374  				if err := checkHelper.LoadEvalRow(rowUpdater.UpdateColIDtoRowIndex, finalRow, false); err != nil {
  1375  					return err
  1376  				}
  1377  				if err := checkHelper.CheckEval(c.evalCtx); err != nil {
  1378  					return err
  1379  				}
  1380  			}
  1381  		}
  1382  	}
  1383  
  1384  	return nil
  1385  }