github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/sql/row/updater.go (about)

     1  // Copyright 2019 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package row
    12  
    13  import (
    14  	"bytes"
    15  	"context"
    16  	"sort"
    17  
    18  	"github.com/cockroachdb/cockroach/pkg/keys"
    19  	"github.com/cockroachdb/cockroach/pkg/kv"
    20  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    21  	"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
    22  	"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
    23  	"github.com/cockroachdb/cockroach/pkg/util"
    24  	"github.com/cockroachdb/cockroach/pkg/util/log"
    25  	"github.com/cockroachdb/cockroach/pkg/util/unique"
    26  	"github.com/cockroachdb/errors"
    27  )
    28  
    29  // Updater abstracts the key/value operations for updating table rows.
    30  type Updater struct {
    31  	Helper                rowHelper
    32  	DeleteHelper          *rowHelper
    33  	FetchCols             []sqlbase.ColumnDescriptor
    34  	FetchColIDtoRowIndex  map[sqlbase.ColumnID]int
    35  	UpdateCols            []sqlbase.ColumnDescriptor
    36  	UpdateColIDtoRowIndex map[sqlbase.ColumnID]int
    37  	primaryKeyColChange   bool
    38  
    39  	// rd and ri are used when the update this Updater is created for modifies
    40  	// the primary key of the table. In that case, rows must be deleted and
    41  	// re-added instead of merely updated, since the keys are changing.
    42  	rd Deleter
    43  	ri Inserter
    44  
    45  	Fks      fkExistenceCheckForUpdate
    46  	cascader *cascader
    47  
    48  	// For allocation avoidance.
    49  	marshaled       []roachpb.Value
    50  	newValues       []tree.Datum
    51  	key             roachpb.Key
    52  	valueBuf        []byte
    53  	value           roachpb.Value
    54  	oldIndexEntries [][]sqlbase.IndexEntry
    55  	newIndexEntries [][]sqlbase.IndexEntry
    56  }
    57  
    58  type rowUpdaterType int
    59  
    60  const (
    61  	// UpdaterDefault indicates that an Updater should update everything
    62  	// about a row, including secondary indexes.
    63  	UpdaterDefault rowUpdaterType = 0
    64  	// UpdaterOnlyColumns indicates that an Updater should only update the
    65  	// columns of a row.
    66  	UpdaterOnlyColumns rowUpdaterType = 1
    67  )
    68  
    69  // MakeUpdater creates a Updater for the given table.
    70  //
    71  // UpdateCols are the columns being updated and correspond to the updateValues
    72  // that will be passed to UpdateRow.
    73  //
    74  // The returned Updater contains a FetchCols field that defines the
    75  // expectation of which values are passed as oldValues to UpdateRow. All the columns
    76  // passed in requestedCols will be included in FetchCols at the beginning.
    77  func MakeUpdater(
    78  	ctx context.Context,
    79  	txn *kv.Txn,
    80  	codec keys.SQLCodec,
    81  	tableDesc *sqlbase.ImmutableTableDescriptor,
    82  	fkTables FkTableMetadata,
    83  	updateCols []sqlbase.ColumnDescriptor,
    84  	requestedCols []sqlbase.ColumnDescriptor,
    85  	updateType rowUpdaterType,
    86  	checkFKs checkFKConstraints,
    87  	evalCtx *tree.EvalContext,
    88  	alloc *sqlbase.DatumAlloc,
    89  ) (Updater, error) {
    90  	rowUpdater, err := makeUpdaterWithoutCascader(
    91  		ctx, txn, codec, tableDesc, fkTables, updateCols, requestedCols, updateType, checkFKs, alloc,
    92  	)
    93  	if err != nil {
    94  		return Updater{}, err
    95  	}
    96  	if checkFKs == CheckFKs {
    97  		rowUpdater.cascader, err = makeUpdateCascader(
    98  			ctx, txn, tableDesc, fkTables, updateCols, evalCtx, alloc,
    99  		)
   100  		if err != nil {
   101  			return Updater{}, err
   102  		}
   103  	}
   104  	return rowUpdater, nil
   105  }
   106  
   107  type returnTrue struct{}
   108  
   109  func (returnTrue) Error() string { panic(errors.AssertionFailedf("unimplemented")) }
   110  
   111  var returnTruePseudoError error = returnTrue{}
   112  
   113  // makeUpdaterWithoutCascader is the same function as MakeUpdater but does not
   114  // create a cascader.
   115  func makeUpdaterWithoutCascader(
   116  	ctx context.Context,
   117  	txn *kv.Txn,
   118  	codec keys.SQLCodec,
   119  	tableDesc *sqlbase.ImmutableTableDescriptor,
   120  	fkTables FkTableMetadata,
   121  	updateCols []sqlbase.ColumnDescriptor,
   122  	requestedCols []sqlbase.ColumnDescriptor,
   123  	updateType rowUpdaterType,
   124  	checkFKs checkFKConstraints,
   125  	alloc *sqlbase.DatumAlloc,
   126  ) (Updater, error) {
   127  	updateColIDtoRowIndex := ColIDtoRowIndexFromCols(updateCols)
   128  
   129  	primaryIndexCols := make(map[sqlbase.ColumnID]struct{}, len(tableDesc.PrimaryIndex.ColumnIDs))
   130  	for _, colID := range tableDesc.PrimaryIndex.ColumnIDs {
   131  		primaryIndexCols[colID] = struct{}{}
   132  	}
   133  
   134  	var primaryKeyColChange bool
   135  	for _, c := range updateCols {
   136  		if _, ok := primaryIndexCols[c.ID]; ok {
   137  			primaryKeyColChange = true
   138  			break
   139  		}
   140  	}
   141  
   142  	// Secondary indexes needing updating.
   143  	needsUpdate := func(index sqlbase.IndexDescriptor) bool {
   144  		if updateType == UpdaterOnlyColumns {
   145  			// Only update columns.
   146  			return false
   147  		}
   148  		// If the primary key changed, we need to update all of them.
   149  		if primaryKeyColChange {
   150  			return true
   151  		}
   152  		return index.RunOverAllColumns(func(id sqlbase.ColumnID) error {
   153  			if _, ok := updateColIDtoRowIndex[id]; ok {
   154  				return returnTruePseudoError
   155  			}
   156  			return nil
   157  		}) != nil
   158  	}
   159  
   160  	writableIndexes := tableDesc.WritableIndexes()
   161  	includeIndexes := make([]sqlbase.IndexDescriptor, 0, len(writableIndexes))
   162  	for _, index := range writableIndexes {
   163  		if needsUpdate(index) {
   164  			includeIndexes = append(includeIndexes, index)
   165  		}
   166  	}
   167  
   168  	// Columns of the table to update, including those in delete/write-only state
   169  	tableCols := tableDesc.DeletableColumns()
   170  
   171  	var deleteOnlyIndexes []sqlbase.IndexDescriptor
   172  	for _, idx := range tableDesc.DeleteOnlyIndexes() {
   173  		if needsUpdate(idx) {
   174  			if deleteOnlyIndexes == nil {
   175  				// Allocate at most once.
   176  				deleteOnlyIndexes = make([]sqlbase.IndexDescriptor, 0, len(tableDesc.DeleteOnlyIndexes()))
   177  			}
   178  			deleteOnlyIndexes = append(deleteOnlyIndexes, idx)
   179  		}
   180  	}
   181  
   182  	var deleteOnlyHelper *rowHelper
   183  	if len(deleteOnlyIndexes) > 0 {
   184  		rh := newRowHelper(codec, tableDesc, deleteOnlyIndexes)
   185  		deleteOnlyHelper = &rh
   186  	}
   187  
   188  	ru := Updater{
   189  		Helper:                newRowHelper(codec, tableDesc, includeIndexes),
   190  		DeleteHelper:          deleteOnlyHelper,
   191  		UpdateCols:            updateCols,
   192  		UpdateColIDtoRowIndex: updateColIDtoRowIndex,
   193  		primaryKeyColChange:   primaryKeyColChange,
   194  		marshaled:             make([]roachpb.Value, len(updateCols)),
   195  		oldIndexEntries:       make([][]sqlbase.IndexEntry, len(includeIndexes)),
   196  		newIndexEntries:       make([][]sqlbase.IndexEntry, len(includeIndexes)),
   197  	}
   198  
   199  	if primaryKeyColChange {
   200  		// These fields are only used when the primary key is changing.
   201  		// When changing the primary key, we delete the old values and reinsert
   202  		// them, so request them all.
   203  		var err error
   204  		if ru.rd, err = makeRowDeleterWithoutCascader(
   205  			ctx, txn, codec, tableDesc, fkTables, tableCols, SkipFKs, alloc,
   206  		); err != nil {
   207  			return Updater{}, err
   208  		}
   209  		ru.FetchCols = ru.rd.FetchCols
   210  		ru.FetchColIDtoRowIndex = ColIDtoRowIndexFromCols(ru.FetchCols)
   211  		if ru.ri, err = MakeInserter(
   212  			ctx, txn, codec, tableDesc, tableCols, SkipFKs, nil /* fkTables */, alloc,
   213  		); err != nil {
   214  			return Updater{}, err
   215  		}
   216  	} else {
   217  		ru.FetchCols = requestedCols[:len(requestedCols):len(requestedCols)]
   218  		ru.FetchColIDtoRowIndex = ColIDtoRowIndexFromCols(ru.FetchCols)
   219  
   220  		// maybeAddCol adds the provided column to ru.FetchCols and
   221  		// ru.FetchColIDtoRowIndex if it isn't already present.
   222  		maybeAddCol := func(colID sqlbase.ColumnID) error {
   223  			if _, ok := ru.FetchColIDtoRowIndex[colID]; !ok {
   224  				col, _, err := tableDesc.FindReadableColumnByID(colID)
   225  				if err != nil {
   226  					return err
   227  				}
   228  				ru.FetchColIDtoRowIndex[col.ID] = len(ru.FetchCols)
   229  				ru.FetchCols = append(ru.FetchCols, *col)
   230  			}
   231  			return nil
   232  		}
   233  
   234  		// Fetch all columns in the primary key so that we can construct the
   235  		// keys when writing out the new kvs to the primary index.
   236  		for _, colID := range tableDesc.PrimaryIndex.ColumnIDs {
   237  			if err := maybeAddCol(colID); err != nil {
   238  				return Updater{}, err
   239  			}
   240  		}
   241  
   242  		// If any part of a column family is being updated, fetch all columns in
   243  		// that column family so that we can reconstruct the column family with
   244  		// the updated columns before writing it.
   245  		for i := range tableDesc.Families {
   246  			family := &tableDesc.Families[i]
   247  			familyBeingUpdated := false
   248  			for _, colID := range family.ColumnIDs {
   249  				if _, ok := ru.UpdateColIDtoRowIndex[colID]; ok {
   250  					familyBeingUpdated = true
   251  					break
   252  				}
   253  			}
   254  			if familyBeingUpdated {
   255  				for _, colID := range family.ColumnIDs {
   256  					if err := maybeAddCol(colID); err != nil {
   257  						return Updater{}, err
   258  					}
   259  				}
   260  			}
   261  		}
   262  
   263  		// Fetch all columns from indices that are being update so that they can
   264  		// be used to create the new kv pairs for those indices.
   265  		for _, index := range includeIndexes {
   266  			if err := index.RunOverAllColumns(maybeAddCol); err != nil {
   267  				return Updater{}, err
   268  			}
   269  		}
   270  		for _, index := range deleteOnlyIndexes {
   271  			if err := index.RunOverAllColumns(maybeAddCol); err != nil {
   272  				return Updater{}, err
   273  			}
   274  		}
   275  	}
   276  
   277  	// If we are fetching from specific families, we might get
   278  	// less columns than in the table. So we cannot assign this to
   279  	// have length len(tableCols).
   280  	ru.newValues = make(tree.Datums, len(ru.FetchCols))
   281  
   282  	if checkFKs == CheckFKs {
   283  		var err error
   284  		if primaryKeyColChange {
   285  			updateCols = nil
   286  		}
   287  		if ru.Fks, err = makeFkExistenceCheckHelperForUpdate(
   288  			ctx, txn, codec, tableDesc, fkTables, updateCols, ru.FetchColIDtoRowIndex, alloc,
   289  		); err != nil {
   290  			return Updater{}, err
   291  		}
   292  	}
   293  	return ru, nil
   294  }
   295  
   296  // UpdateRow adds to the batch the kv operations necessary to update a table row
   297  // with the given values.
   298  //
   299  // The row corresponding to oldValues is updated with the ones in updateValues.
   300  // Note that updateValues only contains the ones that are changing.
   301  //
   302  // The return value is only good until the next call to UpdateRow.
   303  func (ru *Updater) UpdateRow(
   304  	ctx context.Context,
   305  	batch *kv.Batch,
   306  	oldValues []tree.Datum,
   307  	updateValues []tree.Datum,
   308  	checkFKs checkFKConstraints,
   309  	traceKV bool,
   310  ) ([]tree.Datum, error) {
   311  	if ru.cascader != nil {
   312  		batch = ru.cascader.txn.NewBatch()
   313  	}
   314  
   315  	if len(oldValues) != len(ru.FetchCols) {
   316  		return nil, errors.Errorf("got %d values but expected %d", len(oldValues), len(ru.FetchCols))
   317  	}
   318  	if len(updateValues) != len(ru.UpdateCols) {
   319  		return nil, errors.Errorf("got %d values but expected %d", len(updateValues), len(ru.UpdateCols))
   320  	}
   321  
   322  	primaryIndexKey, err := ru.Helper.encodePrimaryIndex(ru.FetchColIDtoRowIndex, oldValues)
   323  	if err != nil {
   324  		return nil, err
   325  	}
   326  	var deleteOldSecondaryIndexEntries []sqlbase.IndexEntry
   327  	if ru.DeleteHelper != nil {
   328  		// We want to include empty k/v pairs because we want
   329  		// to delete all k/v's for this row. By setting includeEmpty
   330  		// to true, we will get a k/v pair for each family in the row,
   331  		// which will guarantee that we delete all the k/v's in this row.
   332  		// N.B. that setting includeEmpty to true will sometimes cause
   333  		// deletes of keys that aren't present. We choose to make this
   334  		// compromise in order to avoid having to read all values of
   335  		// the row that is being updated.
   336  		// TODO(mgartner): Add partial index IDs to ignoreIndexes that we should
   337  		// not delete entries from.
   338  		var ignoreIndexes util.FastIntSet
   339  		_, deleteOldSecondaryIndexEntries, err = ru.DeleteHelper.encodeIndexes(
   340  			ru.FetchColIDtoRowIndex, oldValues, ignoreIndexes, true /* includeEmpty */)
   341  		if err != nil {
   342  			return nil, err
   343  		}
   344  	}
   345  
   346  	// Check that the new value types match the column types. This needs to
   347  	// happen before index encoding because certain datum types (i.e. tuple)
   348  	// cannot be used as index values.
   349  	for i, val := range updateValues {
   350  		if ru.marshaled[i], err = sqlbase.MarshalColumnValue(&ru.UpdateCols[i], val); err != nil {
   351  			return nil, err
   352  		}
   353  	}
   354  
   355  	// Update the row values.
   356  	copy(ru.newValues, oldValues)
   357  	for i, updateCol := range ru.UpdateCols {
   358  		ru.newValues[ru.FetchColIDtoRowIndex[updateCol.ID]] = updateValues[i]
   359  	}
   360  
   361  	rowPrimaryKeyChanged := false
   362  	if ru.primaryKeyColChange {
   363  		var newPrimaryIndexKey []byte
   364  		newPrimaryIndexKey, err =
   365  			ru.Helper.encodePrimaryIndex(ru.FetchColIDtoRowIndex, ru.newValues)
   366  		if err != nil {
   367  			return nil, err
   368  		}
   369  		rowPrimaryKeyChanged = !bytes.Equal(primaryIndexKey, newPrimaryIndexKey)
   370  	}
   371  
   372  	for i := range ru.Helper.Indexes {
   373  		// We don't want to insert any empty k/v's, so set includeEmpty to false.
   374  		// Consider the following case:
   375  		// TABLE t (
   376  		//   x INT PRIMARY KEY, y INT, z INT, w INT,
   377  		//   INDEX (y) STORING (z, w),
   378  		//   FAMILY (x), FAMILY (y), FAMILY (z), FAMILY (w)
   379  		//)
   380  		// If we are to perform an update on row (1, 2, 3, NULL),
   381  		// the k/v pair for index i that encodes column w would have
   382  		// an empty value because w is null and the sole resident
   383  		// of that family. We want to ensure that we don't insert
   384  		// empty k/v pairs during the process of the update, so
   385  		// set includeEmpty to false while generating the old
   386  		// and new index entries.
   387  		ru.oldIndexEntries[i], err = sqlbase.EncodeSecondaryIndex(
   388  			ru.Helper.Codec,
   389  			ru.Helper.TableDesc.TableDesc(),
   390  			&ru.Helper.Indexes[i],
   391  			ru.FetchColIDtoRowIndex,
   392  			oldValues,
   393  			false, /* includeEmpty */
   394  		)
   395  		if err != nil {
   396  			return nil, err
   397  		}
   398  		ru.newIndexEntries[i], err = sqlbase.EncodeSecondaryIndex(
   399  			ru.Helper.Codec,
   400  			ru.Helper.TableDesc.TableDesc(),
   401  			&ru.Helper.Indexes[i],
   402  			ru.FetchColIDtoRowIndex,
   403  			ru.newValues,
   404  			false, /* includeEmpty */
   405  		)
   406  		if err != nil {
   407  			return nil, err
   408  		}
   409  		if ru.Helper.Indexes[i].Type == sqlbase.IndexDescriptor_INVERTED {
   410  			// Deduplicate the keys we're adding and removing if we're updating an
   411  			// inverted index. For example, imagine a table with an inverted index on j:
   412  			//
   413  			// a | j
   414  			// --+----------------
   415  			// 1 | {"foo": "bar"}
   416  			//
   417  			// If we update the json value to be {"foo": "bar", "baz": "qux"}, we don't
   418  			// want to delete the /foo/bar key and re-add it, that would be wasted work.
   419  			// So, we are going to remove keys from both the new and old index entry
   420  			// array if they're identical.
   421  			newIndexEntries := ru.newIndexEntries[i]
   422  			oldIndexEntries := ru.oldIndexEntries[i]
   423  			sort.Slice(oldIndexEntries, func(i, j int) bool {
   424  				return compareIndexEntries(oldIndexEntries[i], oldIndexEntries[j]) < 0
   425  			})
   426  			sort.Slice(newIndexEntries, func(i, j int) bool {
   427  				return compareIndexEntries(newIndexEntries[i], newIndexEntries[j]) < 0
   428  			})
   429  			oldLen, newLen := unique.UniquifyAcrossSlices(
   430  				oldIndexEntries, newIndexEntries,
   431  				func(l, r int) int {
   432  					return compareIndexEntries(oldIndexEntries[l], newIndexEntries[r])
   433  				},
   434  				func(i, j int) {
   435  					oldIndexEntries[i] = oldIndexEntries[j]
   436  				},
   437  				func(i, j int) {
   438  					newIndexEntries[i] = newIndexEntries[j]
   439  				})
   440  			ru.oldIndexEntries[i] = oldIndexEntries[:oldLen]
   441  			ru.newIndexEntries[i] = newIndexEntries[:newLen]
   442  		}
   443  	}
   444  
   445  	if rowPrimaryKeyChanged {
   446  		if err := ru.rd.DeleteRow(ctx, batch, oldValues, SkipFKs, traceKV); err != nil {
   447  			return nil, err
   448  		}
   449  		// TODO(mgartner): Add partial index IDs to ignoreIndexes that we should
   450  		// not write entries to.
   451  		var ignoreIndexes util.FastIntSet
   452  		if err := ru.ri.InsertRow(
   453  			ctx, batch, ru.newValues, ignoreIndexes, false /* ignoreConflicts */, SkipFKs, traceKV,
   454  		); err != nil {
   455  			return nil, err
   456  		}
   457  
   458  		if ru.Fks.checker != nil {
   459  			ru.Fks.addCheckForIndex(ru.Helper.TableDesc.PrimaryIndex.ID, ru.Helper.TableDesc.PrimaryIndex.Type)
   460  			for i := range ru.Helper.Indexes {
   461  				if ru.Helper.Indexes[i].Type == sqlbase.IndexDescriptor_INVERTED {
   462  					// We ignore FK existence checks for inverted indexes.
   463  					//
   464  					// TODO(knz): verify that this is indeed correct.
   465  					continue
   466  				}
   467  				// * We always will have at least 1 entry in the index, so indexing 0 is safe.
   468  				// * The only difference between column family 0 vs other families encodings is
   469  				//   just the family key ending of the key, so if index[0] is different, the other
   470  				//   index entries will be different as well.
   471  				if !bytes.Equal(ru.newIndexEntries[i][0].Key, ru.oldIndexEntries[i][0].Key) {
   472  					ru.Fks.addCheckForIndex(ru.Helper.Indexes[i].ID, ru.Helper.Indexes[i].Type)
   473  				}
   474  			}
   475  
   476  			if ru.cascader != nil {
   477  				if err := ru.cascader.txn.Run(ctx, batch); err != nil {
   478  					return nil, ConvertBatchError(ctx, ru.Helper.TableDesc, batch)
   479  				}
   480  				if err := ru.cascader.cascadeAll(
   481  					ctx,
   482  					ru.Helper.TableDesc,
   483  					tree.Datums(oldValues),
   484  					tree.Datums(ru.newValues),
   485  					ru.FetchColIDtoRowIndex,
   486  					traceKV,
   487  				); err != nil {
   488  					return nil, err
   489  				}
   490  			}
   491  		}
   492  
   493  		if checkFKs {
   494  			if err := ru.Fks.addIndexChecks(ctx, oldValues, ru.newValues, traceKV); err != nil {
   495  				return nil, err
   496  			}
   497  			if !ru.Fks.hasFKs() {
   498  				return ru.newValues, nil
   499  			}
   500  			if err := ru.Fks.checker.runCheck(ctx, oldValues, ru.newValues); err != nil {
   501  				return nil, err
   502  			}
   503  		}
   504  
   505  		return ru.newValues, nil
   506  	}
   507  
   508  	// Add the new values.
   509  	ru.valueBuf, err = prepareInsertOrUpdateBatch(ctx, batch,
   510  		&ru.Helper, primaryIndexKey, ru.FetchCols,
   511  		ru.newValues, ru.FetchColIDtoRowIndex,
   512  		ru.marshaled, ru.UpdateColIDtoRowIndex,
   513  		&ru.key, &ru.value, ru.valueBuf, insertPutFn, true /* overwrite */, traceKV)
   514  	if err != nil {
   515  		return nil, err
   516  	}
   517  
   518  	// Update secondary indexes.
   519  	// We're iterating through all of the indexes, which should have corresponding entries
   520  	// in the new and old values.
   521  	for i := range ru.Helper.Indexes {
   522  		index := &ru.Helper.Indexes[i]
   523  		if index.Type == sqlbase.IndexDescriptor_FORWARD {
   524  			oldIdx, newIdx := 0, 0
   525  			oldEntries, newEntries := ru.oldIndexEntries[i], ru.newIndexEntries[i]
   526  			// The index entries for a particular index are stored in
   527  			// family sorted order. We use this fact to update rows.
   528  			// The algorithm to update a row using the old k/v pairs
   529  			// for the row and the new k/v pairs for the row is very
   530  			// similar to the algorithm to merge two sorted lists.
   531  			// We move in lock step through the entries, and potentially
   532  			// update k/v's that belong to the same family.
   533  			// If we are in the case where there exists a family's k/v
   534  			// in the old entries but not the new entries, we need to
   535  			// delete that k/v. If we are in the case where a family's
   536  			// k/v exists in the new index entries, then we need to just
   537  			// insert that new k/v.
   538  			for oldIdx < len(oldEntries) && newIdx < len(newEntries) {
   539  				oldEntry, newEntry := &oldEntries[oldIdx], &newEntries[newIdx]
   540  				if oldEntry.Family == newEntry.Family {
   541  					// If the families are equal, then check if the keys have changed. If so, delete the old key.
   542  					// Then, issue a CPut for the new value of the key if the value has changed.
   543  					// Because the indexes will always have a k/v for family 0, it suffices to only
   544  					// add foreign key checks in this case, because we are guaranteed to enter here.
   545  					oldIdx++
   546  					newIdx++
   547  					var expValue *roachpb.Value
   548  					if !bytes.Equal(oldEntry.Key, newEntry.Key) {
   549  						ru.Fks.addCheckForIndex(index.ID, index.Type)
   550  						if traceKV {
   551  							log.VEventf(ctx, 2, "Del %s", keys.PrettyPrint(ru.Helper.secIndexValDirs[i], oldEntry.Key))
   552  						}
   553  						batch.Del(oldEntry.Key)
   554  					} else if !newEntry.Value.EqualData(oldEntry.Value) {
   555  						expValue = &oldEntry.Value
   556  					} else {
   557  						continue
   558  					}
   559  					if traceKV {
   560  						k := keys.PrettyPrint(ru.Helper.secIndexValDirs[i], newEntry.Key)
   561  						v := newEntry.Value.PrettyPrint()
   562  						if expValue != nil {
   563  							log.VEventf(ctx, 2, "CPut %s -> %v (replacing %v, if exists)", k, v, expValue)
   564  						} else {
   565  							log.VEventf(ctx, 2, "CPut %s -> %v (expecting does not exist)", k, v)
   566  						}
   567  					}
   568  					batch.CPutAllowingIfNotExists(newEntry.Key, &newEntry.Value, expValue)
   569  				} else if oldEntry.Family < newEntry.Family {
   570  					if oldEntry.Family == sqlbase.FamilyID(0) {
   571  						return nil, errors.AssertionFailedf(
   572  							"index entry for family 0 for table %s, index %s was not generated",
   573  							ru.Helper.TableDesc.Name, index.Name,
   574  						)
   575  					}
   576  					// In this case, the index has a k/v for a family that does not exist in
   577  					// the new set of k/v's for the row. So, we need to delete the old k/v.
   578  					if traceKV {
   579  						log.VEventf(ctx, 2, "Del %s", keys.PrettyPrint(ru.Helper.secIndexValDirs[i], oldEntry.Key))
   580  					}
   581  					batch.Del(oldEntry.Key)
   582  					oldIdx++
   583  				} else {
   584  					if newEntry.Family == sqlbase.FamilyID(0) {
   585  						return nil, errors.AssertionFailedf(
   586  							"index entry for family 0 for table %s, index %s was not generated",
   587  							ru.Helper.TableDesc.Name, index.Name,
   588  						)
   589  					}
   590  					// In this case, the index now has a k/v that did not exist in the
   591  					// old row, so we should expect to not see a value for the new
   592  					// key, and put the new key in place.
   593  					if traceKV {
   594  						k := keys.PrettyPrint(ru.Helper.secIndexValDirs[i], newEntry.Key)
   595  						v := newEntry.Value.PrettyPrint()
   596  						log.VEventf(ctx, 2, "CPut %s -> %v (expecting does not exist)", k, v)
   597  					}
   598  					batch.CPut(newEntry.Key, &newEntry.Value, nil)
   599  					newIdx++
   600  				}
   601  			}
   602  			for oldIdx < len(oldEntries) {
   603  				// Delete any remaining old entries that are not matched by new entries in this row.
   604  				oldEntry := &oldEntries[oldIdx]
   605  				if oldEntry.Family == sqlbase.FamilyID(0) {
   606  					return nil, errors.AssertionFailedf(
   607  						"index entry for family 0 for table %s, index %s was not generated",
   608  						ru.Helper.TableDesc.Name, index.Name,
   609  					)
   610  				}
   611  				if traceKV {
   612  					log.VEventf(ctx, 2, "Del %s", keys.PrettyPrint(ru.Helper.secIndexValDirs[i], oldEntry.Key))
   613  				}
   614  				batch.Del(oldEntry.Key)
   615  				oldIdx++
   616  			}
   617  			for newIdx < len(newEntries) {
   618  				// Insert any remaining new entries that are not present in the old row.
   619  				newEntry := &newEntries[newIdx]
   620  				if newEntry.Family == sqlbase.FamilyID(0) {
   621  					return nil, errors.AssertionFailedf(
   622  						"index entry for family 0 for table %s, index %s was not generated",
   623  						ru.Helper.TableDesc.Name, index.Name,
   624  					)
   625  				}
   626  				if traceKV {
   627  					k := keys.PrettyPrint(ru.Helper.secIndexValDirs[i], newEntry.Key)
   628  					v := newEntry.Value.PrettyPrint()
   629  					log.VEventf(ctx, 2, "CPut %s -> %v (expecting does not exist)", k, v)
   630  				}
   631  				batch.CPut(newEntry.Key, &newEntry.Value, nil)
   632  				newIdx++
   633  			}
   634  		} else {
   635  			// Remove all inverted index entries, and re-add them.
   636  			for j := range ru.oldIndexEntries[i] {
   637  				if traceKV {
   638  					log.VEventf(ctx, 2, "Del %s", ru.oldIndexEntries[i][j].Key)
   639  				}
   640  				batch.Del(ru.oldIndexEntries[i][j].Key)
   641  			}
   642  			putFn := insertInvertedPutFn
   643  			// We're adding all of the inverted index entries from the row being updated.
   644  			for j := range ru.newIndexEntries[i] {
   645  				putFn(ctx, batch, &ru.newIndexEntries[i][j].Key, &ru.newIndexEntries[i][j].Value, traceKV)
   646  			}
   647  		}
   648  	}
   649  
   650  	// We're deleting indexes in a delete only state. We're bounding this by the number of indexes because inverted
   651  	// indexed will be handled separately.
   652  	if ru.DeleteHelper != nil {
   653  		for _, deletedSecondaryIndexEntry := range deleteOldSecondaryIndexEntries {
   654  			if traceKV {
   655  				log.VEventf(ctx, 2, "Del %s", deletedSecondaryIndexEntry.Key)
   656  			}
   657  			batch.Del(deletedSecondaryIndexEntry.Key)
   658  		}
   659  	}
   660  
   661  	if ru.cascader != nil {
   662  		if err := ru.cascader.txn.Run(ctx, batch); err != nil {
   663  			return nil, ConvertBatchError(ctx, ru.Helper.TableDesc, batch)
   664  		}
   665  		if err := ru.cascader.cascadeAll(
   666  			ctx,
   667  			ru.Helper.TableDesc,
   668  			tree.Datums(oldValues),
   669  			tree.Datums(ru.newValues),
   670  			ru.FetchColIDtoRowIndex,
   671  			traceKV,
   672  		); err != nil {
   673  			return nil, err
   674  		}
   675  	}
   676  
   677  	if checkFKs == CheckFKs {
   678  		if err := ru.Fks.addIndexChecks(ctx, oldValues, ru.newValues, traceKV); err != nil {
   679  			return nil, err
   680  		}
   681  		if ru.Fks.hasFKs() {
   682  			if err := ru.Fks.checker.runCheck(ctx, oldValues, ru.newValues); err != nil {
   683  				return nil, err
   684  			}
   685  		}
   686  	}
   687  
   688  	return ru.newValues, nil
   689  }
   690  
   691  func compareIndexEntries(left, right sqlbase.IndexEntry) int {
   692  	cmp := bytes.Compare(left.Key, right.Key)
   693  	if cmp != 0 {
   694  		return cmp
   695  	}
   696  
   697  	return bytes.Compare(left.Value.RawBytes, right.Value.RawBytes)
   698  }
   699  
   700  // IsColumnOnlyUpdate returns true if this Updater is only updating column
   701  // data (in contrast to updating the primary key or other indexes).
   702  func (ru *Updater) IsColumnOnlyUpdate() bool {
   703  	// TODO(dan): This is used in the schema change backfill to assert that it was
   704  	// configured correctly and will not be doing things it shouldn't. This is an
   705  	// unfortunate bleeding of responsibility and indicates the abstraction could
   706  	// be improved. Specifically, Updater currently has two responsibilities
   707  	// (computing which indexes need to be updated and mapping sql rows to k/v
   708  	// operations) and these should be split.
   709  	return !ru.primaryKeyColChange && ru.DeleteHelper == nil && len(ru.Helper.Indexes) == 0
   710  }