github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/sql/sqlbase/table.go (about)

     1  // Copyright 2015 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package sqlbase
    12  
    13  import (
    14  	"context"
    15  	"fmt"
    16  	"sort"
    17  	"strings"
    18  
    19  	"github.com/cockroachdb/cockroach/pkg/keys"
    20  	"github.com/cockroachdb/cockroach/pkg/kv"
    21  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    22  	"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
    23  	"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
    24  	"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
    25  	"github.com/cockroachdb/cockroach/pkg/sql/types"
    26  	"github.com/cockroachdb/cockroach/pkg/util/log"
    27  	"github.com/cockroachdb/errors"
    28  	"golang.org/x/text/language"
    29  )
    30  
    31  // SanitizeVarFreeExpr verifies that an expression is valid, has the correct
    32  // type and contains no variable expressions. It returns the type-checked and
    33  // constant-folded expression.
    34  func SanitizeVarFreeExpr(
    35  	ctx context.Context,
    36  	expr tree.Expr,
    37  	expectedType *types.T,
    38  	context string,
    39  	semaCtx *tree.SemaContext,
    40  	allowImpure bool,
    41  ) (tree.TypedExpr, error) {
    42  	if tree.ContainsVars(expr) {
    43  		return nil, pgerror.Newf(pgcode.Syntax,
    44  			"variable sub-expressions are not allowed in %s", context)
    45  	}
    46  
    47  	// We need to save and restore the previous value of the field in
    48  	// semaCtx in case we are recursively called from another context
    49  	// which uses the properties field.
    50  	defer semaCtx.Properties.Restore(semaCtx.Properties)
    51  
    52  	// Ensure that the expression doesn't contain special functions.
    53  	flags := tree.RejectSpecial
    54  	if !allowImpure {
    55  		flags |= tree.RejectImpureFunctions
    56  	}
    57  	semaCtx.Properties.Require(context, flags)
    58  
    59  	typedExpr, err := tree.TypeCheck(ctx, expr, semaCtx, expectedType)
    60  	if err != nil {
    61  		return nil, err
    62  	}
    63  
    64  	actualType := typedExpr.ResolvedType()
    65  	if !expectedType.Equivalent(actualType) && typedExpr != tree.DNull {
    66  		// The expression must match the column type exactly unless it is a constant
    67  		// NULL value.
    68  		return nil, fmt.Errorf("expected %s expression to have type %s, but '%s' has type %s",
    69  			context, expectedType, expr, actualType)
    70  	}
    71  	return typedExpr, nil
    72  }
    73  
    74  // ValidateColumnDefType returns an error if the type of a column definition is
    75  // not valid. It is checked when a column is created or altered.
    76  func ValidateColumnDefType(t *types.T) error {
    77  	switch t.Family() {
    78  	case types.StringFamily, types.CollatedStringFamily:
    79  		if t.Family() == types.CollatedStringFamily {
    80  			if _, err := language.Parse(t.Locale()); err != nil {
    81  				return pgerror.Newf(pgcode.Syntax, `invalid locale %s`, t.Locale())
    82  			}
    83  		}
    84  
    85  	case types.DecimalFamily:
    86  		switch {
    87  		case t.Precision() == 0 && t.Scale() > 0:
    88  			// TODO (seif): Find right range for error message.
    89  			return errors.New("invalid NUMERIC precision 0")
    90  		case t.Precision() < t.Scale():
    91  			return fmt.Errorf("NUMERIC scale %d must be between 0 and precision %d",
    92  				t.Scale(), t.Precision())
    93  		}
    94  
    95  	case types.ArrayFamily:
    96  		if t.ArrayContents().Family() == types.ArrayFamily {
    97  			// Nested arrays are not supported as a column type.
    98  			return errors.Errorf("nested array unsupported as column type: %s", t.String())
    99  		}
   100  		if err := types.CheckArrayElementType(t.ArrayContents()); err != nil {
   101  			return err
   102  		}
   103  		return ValidateColumnDefType(t.ArrayContents())
   104  
   105  	case types.BitFamily, types.IntFamily, types.FloatFamily, types.BoolFamily, types.BytesFamily, types.DateFamily,
   106  		types.INetFamily, types.IntervalFamily, types.JsonFamily, types.OidFamily, types.TimeFamily,
   107  		types.TimestampFamily, types.TimestampTZFamily, types.UuidFamily, types.TimeTZFamily,
   108  		types.GeographyFamily, types.GeometryFamily, types.EnumFamily:
   109  		// These types are OK.
   110  
   111  	default:
   112  		return pgerror.Newf(pgcode.InvalidTableDefinition,
   113  			"value type %s cannot be used for table columns", t.String())
   114  	}
   115  
   116  	return nil
   117  }
   118  
   119  // MakeColumnDefDescs creates the column descriptor for a column, as well as the
   120  // index descriptor if the column is a primary key or unique.
   121  //
   122  // If the column type *may* be SERIAL (or SERIAL-like), it is the
   123  // caller's responsibility to call sql.processSerialInColumnDef() and
   124  // sql.doCreateSequence() before MakeColumnDefDescs() to remove the
   125  // SERIAL type and replace it with a suitable integer type and default
   126  // expression.
   127  //
   128  // semaCtx can be nil if no default expression is used for the
   129  // column or during cluster bootstrapping.
   130  //
   131  // The DEFAULT expression is returned in TypedExpr form for analysis (e.g. recording
   132  // sequence dependencies).
   133  func MakeColumnDefDescs(
   134  	ctx context.Context, d *tree.ColumnTableDef, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext,
   135  ) (*ColumnDescriptor, *IndexDescriptor, tree.TypedExpr, error) {
   136  	if d.IsSerial {
   137  		// To the reader of this code: if control arrives here, this means
   138  		// the caller has not suitably called processSerialInColumnDef()
   139  		// prior to calling MakeColumnDefDescs. The dependent sequences
   140  		// must be created, and the SERIAL type eliminated, prior to this
   141  		// point.
   142  		return nil, nil, nil, pgerror.New(pgcode.FeatureNotSupported,
   143  			"SERIAL cannot be used in this context")
   144  	}
   145  
   146  	if len(d.CheckExprs) > 0 {
   147  		// Should never happen since `HoistConstraints` moves these to table level
   148  		return nil, nil, nil, errors.New("unexpected column CHECK constraint")
   149  	}
   150  	if d.HasFKConstraint() {
   151  		// Should never happen since `HoistConstraints` moves these to table level
   152  		return nil, nil, nil, errors.New("unexpected column REFERENCED constraint")
   153  	}
   154  
   155  	col := &ColumnDescriptor{
   156  		Name:     string(d.Name),
   157  		Nullable: d.Nullable.Nullability != tree.NotNull && !d.PrimaryKey.IsPrimaryKey,
   158  	}
   159  
   160  	// Validate and assign column type.
   161  	resType, err := tree.ResolveType(ctx, d.Type, semaCtx.GetTypeResolver())
   162  	if err != nil {
   163  		return nil, nil, nil, err
   164  	}
   165  	if err := ValidateColumnDefType(resType); err != nil {
   166  		return nil, nil, nil, err
   167  	}
   168  	col.Type = resType
   169  
   170  	var typedExpr tree.TypedExpr
   171  	if d.HasDefaultExpr() {
   172  		// Verify the default expression type is compatible with the column type
   173  		// and does not contain invalid functions.
   174  		var err error
   175  		if typedExpr, err = SanitizeVarFreeExpr(
   176  			ctx, d.DefaultExpr.Expr, resType, "DEFAULT", semaCtx, true, /* allowImpure */
   177  		); err != nil {
   178  			return nil, nil, nil, err
   179  		}
   180  
   181  		// Keep the type checked expression so that the type annotation gets
   182  		// properly stored, only if the default expression is not NULL.
   183  		// Otherwise we want to keep the default expression nil.
   184  		if typedExpr != tree.DNull {
   185  			d.DefaultExpr.Expr = typedExpr
   186  			s := tree.Serialize(d.DefaultExpr.Expr)
   187  			col.DefaultExpr = &s
   188  		}
   189  	}
   190  
   191  	if d.IsComputed() {
   192  		s := tree.Serialize(d.Computed.Expr)
   193  		col.ComputeExpr = &s
   194  	}
   195  
   196  	var idx *IndexDescriptor
   197  	if d.PrimaryKey.IsPrimaryKey || d.Unique {
   198  		if !d.PrimaryKey.Sharded {
   199  			idx = &IndexDescriptor{
   200  				Unique:           true,
   201  				ColumnNames:      []string{string(d.Name)},
   202  				ColumnDirections: []IndexDescriptor_Direction{IndexDescriptor_ASC},
   203  			}
   204  		} else {
   205  			buckets, err := EvalShardBucketCount(ctx, semaCtx, evalCtx, d.PrimaryKey.ShardBuckets)
   206  			if err != nil {
   207  				return nil, nil, nil, err
   208  			}
   209  			shardColName := GetShardColumnName([]string{string(d.Name)}, buckets)
   210  			idx = &IndexDescriptor{
   211  				Unique:           true,
   212  				ColumnNames:      []string{shardColName, string(d.Name)},
   213  				ColumnDirections: []IndexDescriptor_Direction{IndexDescriptor_ASC, IndexDescriptor_ASC},
   214  				Sharded: ShardedDescriptor{
   215  					IsSharded:    true,
   216  					Name:         shardColName,
   217  					ShardBuckets: buckets,
   218  					ColumnNames:  []string{string(d.Name)},
   219  				},
   220  			}
   221  		}
   222  		if d.UniqueConstraintName != "" {
   223  			idx.Name = string(d.UniqueConstraintName)
   224  		}
   225  	}
   226  
   227  	return col, idx, typedExpr, nil
   228  }
   229  
   230  // EvalShardBucketCount evaluates and checks the integer argument to a `USING HASH WITH
   231  // BUCKET_COUNT` index creation query.
   232  func EvalShardBucketCount(
   233  	ctx context.Context, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, shardBuckets tree.Expr,
   234  ) (int32, error) {
   235  	const invalidBucketCountMsg = `BUCKET_COUNT must be an integer greater than 1`
   236  	typedExpr, err := SanitizeVarFreeExpr(
   237  		ctx, shardBuckets, types.Int, "BUCKET_COUNT", semaCtx, true, /* allowImpure */
   238  	)
   239  	if err != nil {
   240  		return 0, err
   241  	}
   242  	d, err := typedExpr.Eval(evalCtx)
   243  	if err != nil {
   244  		return 0, pgerror.Wrap(err, pgcode.InvalidParameterValue, invalidBucketCountMsg)
   245  	}
   246  	buckets := tree.MustBeDInt(d)
   247  	if buckets < 2 {
   248  		return 0, pgerror.New(pgcode.InvalidParameterValue, invalidBucketCountMsg)
   249  	}
   250  	return int32(buckets), nil
   251  }
   252  
   253  // GetShardColumnName generates a name for the hidden shard column to be used to create a
   254  // hash sharded index.
   255  func GetShardColumnName(colNames []string, buckets int32) string {
   256  	// We sort the `colNames` here because we want to avoid creating a duplicate shard
   257  	// column if one already exists for the set of columns in `colNames`.
   258  	sort.Strings(colNames)
   259  	return strings.Join(
   260  		append(append([]string{`crdb_internal`}, colNames...), fmt.Sprintf(`shard_%v`, buckets)), `_`,
   261  	)
   262  }
   263  
   264  // EncodeColumns is a version of EncodePartialIndexKey that takes ColumnIDs and
   265  // directions explicitly. WARNING: unlike EncodePartialIndexKey, EncodeColumns
   266  // appends directly to keyPrefix.
   267  func EncodeColumns(
   268  	columnIDs []ColumnID,
   269  	directions directions,
   270  	colMap map[ColumnID]int,
   271  	values []tree.Datum,
   272  	keyPrefix []byte,
   273  ) (key []byte, containsNull bool, err error) {
   274  	key = keyPrefix
   275  	for colIdx, id := range columnIDs {
   276  		val := findColumnValue(id, colMap, values)
   277  		if val == tree.DNull {
   278  			containsNull = true
   279  		}
   280  
   281  		dir, err := directions.get(colIdx)
   282  		if err != nil {
   283  			return nil, containsNull, err
   284  		}
   285  
   286  		if key, err = EncodeTableKey(key, val, dir); err != nil {
   287  			return nil, containsNull, err
   288  		}
   289  	}
   290  	return key, containsNull, nil
   291  }
   292  
   293  // GetColumnTypes returns the types of the columns with the given IDs.
   294  func GetColumnTypes(desc *TableDescriptor, columnIDs []ColumnID) ([]*types.T, error) {
   295  	types := make([]*types.T, len(columnIDs))
   296  	for i, id := range columnIDs {
   297  		col, err := desc.FindActiveColumnByID(id)
   298  		if err != nil {
   299  			return nil, err
   300  		}
   301  		types[i] = col.Type
   302  	}
   303  	return types, nil
   304  }
   305  
   306  // ConstraintType is used to identify the type of a constraint.
   307  type ConstraintType string
   308  
   309  const (
   310  	// ConstraintTypePK identifies a PRIMARY KEY constraint.
   311  	ConstraintTypePK ConstraintType = "PRIMARY KEY"
   312  	// ConstraintTypeFK identifies a FOREIGN KEY constraint.
   313  	ConstraintTypeFK ConstraintType = "FOREIGN KEY"
   314  	// ConstraintTypeUnique identifies a FOREIGN constraint.
   315  	ConstraintTypeUnique ConstraintType = "UNIQUE"
   316  	// ConstraintTypeCheck identifies a CHECK constraint.
   317  	ConstraintTypeCheck ConstraintType = "CHECK"
   318  )
   319  
   320  // ConstraintDetail describes a constraint.
   321  type ConstraintDetail struct {
   322  	Kind        ConstraintType
   323  	Columns     []string
   324  	Details     string
   325  	Unvalidated bool
   326  
   327  	// Only populated for PK and Unique Constraints.
   328  	Index *IndexDescriptor
   329  
   330  	// Only populated for FK Constraints.
   331  	FK              *ForeignKeyConstraint
   332  	ReferencedTable *TableDescriptor
   333  
   334  	// Only populated for Check Constraints.
   335  	CheckConstraint *TableDescriptor_CheckConstraint
   336  }
   337  
   338  type tableLookupFn func(ID) (*TableDescriptor, error)
   339  
   340  // GetConstraintInfo returns a summary of all constraints on the table.
   341  func (desc *TableDescriptor) GetConstraintInfo(
   342  	ctx context.Context, txn *kv.Txn, codec keys.SQLCodec,
   343  ) (map[string]ConstraintDetail, error) {
   344  	var tableLookup tableLookupFn
   345  	if txn != nil {
   346  		tableLookup = func(id ID) (*TableDescriptor, error) {
   347  			return GetTableDescFromID(ctx, txn, codec, id)
   348  		}
   349  	}
   350  	return desc.collectConstraintInfo(tableLookup)
   351  }
   352  
   353  // GetConstraintInfoWithLookup returns a summary of all constraints on the
   354  // table using the provided function to fetch a TableDescriptor from an ID.
   355  func (desc *TableDescriptor) GetConstraintInfoWithLookup(
   356  	tableLookup tableLookupFn,
   357  ) (map[string]ConstraintDetail, error) {
   358  	return desc.collectConstraintInfo(tableLookup)
   359  }
   360  
   361  // CheckUniqueConstraints returns a non-nil error if a descriptor contains two
   362  // constraints with the same name.
   363  func (desc *TableDescriptor) CheckUniqueConstraints() error {
   364  	_, err := desc.collectConstraintInfo(nil)
   365  	return err
   366  }
   367  
   368  // if `tableLookup` is non-nil, provide a full summary of constraints, otherwise just
   369  // check that constraints have unique names.
   370  func (desc *TableDescriptor) collectConstraintInfo(
   371  	tableLookup tableLookupFn,
   372  ) (map[string]ConstraintDetail, error) {
   373  	info := make(map[string]ConstraintDetail)
   374  
   375  	// Indexes provide PK, Unique and FK constraints.
   376  	indexes := desc.AllNonDropIndexes()
   377  	for _, index := range indexes {
   378  		if index.ID == desc.PrimaryIndex.ID {
   379  			if _, ok := info[index.Name]; ok {
   380  				return nil, pgerror.Newf(pgcode.DuplicateObject,
   381  					"duplicate constraint name: %q", index.Name)
   382  			}
   383  			colHiddenMap := make(map[ColumnID]bool, len(desc.Columns))
   384  			for i := range desc.Columns {
   385  				col := &desc.Columns[i]
   386  				colHiddenMap[col.ID] = col.Hidden
   387  			}
   388  			// Don't include constraints against only hidden columns.
   389  			// This prevents the auto-created rowid primary key index from showing up
   390  			// in show constraints.
   391  			hidden := true
   392  			for _, id := range index.ColumnIDs {
   393  				if !colHiddenMap[id] {
   394  					hidden = false
   395  					break
   396  				}
   397  			}
   398  			if hidden {
   399  				continue
   400  			}
   401  			detail := ConstraintDetail{Kind: ConstraintTypePK}
   402  			detail.Columns = index.ColumnNames
   403  			detail.Index = index
   404  			info[index.Name] = detail
   405  		} else if index.Unique {
   406  			if _, ok := info[index.Name]; ok {
   407  				return nil, pgerror.Newf(pgcode.DuplicateObject,
   408  					"duplicate constraint name: %q", index.Name)
   409  			}
   410  			detail := ConstraintDetail{Kind: ConstraintTypeUnique}
   411  			detail.Columns = index.ColumnNames
   412  			detail.Index = index
   413  			info[index.Name] = detail
   414  		}
   415  	}
   416  
   417  	fks := desc.AllActiveAndInactiveForeignKeys()
   418  	for _, fk := range fks {
   419  		if _, ok := info[fk.Name]; ok {
   420  			return nil, pgerror.Newf(pgcode.DuplicateObject,
   421  				"duplicate constraint name: %q", fk.Name)
   422  		}
   423  		detail := ConstraintDetail{Kind: ConstraintTypeFK}
   424  		// Constraints in the Validating state are considered Unvalidated for this purpose
   425  		detail.Unvalidated = fk.Validity != ConstraintValidity_Validated
   426  		var err error
   427  		detail.Columns, err = desc.NamesForColumnIDs(fk.OriginColumnIDs)
   428  		if err != nil {
   429  			return nil, err
   430  		}
   431  		detail.FK = fk
   432  
   433  		if tableLookup != nil {
   434  			other, err := tableLookup(fk.ReferencedTableID)
   435  			if err != nil {
   436  				return nil, errors.NewAssertionErrorWithWrappedErrf(err,
   437  					"error resolving table %d referenced in foreign key",
   438  					log.Safe(fk.ReferencedTableID))
   439  			}
   440  			referencedColumnNames, err := other.NamesForColumnIDs(fk.ReferencedColumnIDs)
   441  			if err != nil {
   442  				return nil, err
   443  			}
   444  			detail.Details = fmt.Sprintf("%s.%v", other.Name, referencedColumnNames)
   445  			detail.ReferencedTable = other
   446  		}
   447  		info[fk.Name] = detail
   448  	}
   449  
   450  	for _, c := range desc.AllActiveAndInactiveChecks() {
   451  		if _, ok := info[c.Name]; ok {
   452  			return nil, pgerror.Newf(pgcode.DuplicateObject,
   453  				"duplicate constraint name: %q", c.Name)
   454  		}
   455  		detail := ConstraintDetail{Kind: ConstraintTypeCheck}
   456  		// Constraints in the Validating state are considered Unvalidated for this purpose
   457  		detail.Unvalidated = c.Validity != ConstraintValidity_Validated
   458  		detail.CheckConstraint = c
   459  		detail.Details = c.Expr
   460  		if tableLookup != nil {
   461  			colsUsed, err := c.ColumnsUsed(desc)
   462  			if err != nil {
   463  				return nil, errors.NewAssertionErrorWithWrappedErrf(err,
   464  					"error computing columns used in check constraint %q", c.Name)
   465  			}
   466  			for _, colID := range colsUsed {
   467  				col, err := desc.FindColumnByID(colID)
   468  				if err != nil {
   469  					return nil, errors.NewAssertionErrorWithWrappedErrf(err,
   470  						"error finding column %d in table %s", log.Safe(colID), desc.Name)
   471  				}
   472  				detail.Columns = append(detail.Columns, col.Name)
   473  			}
   474  		}
   475  		info[c.Name] = detail
   476  	}
   477  	return info, nil
   478  }
   479  
   480  // IsValidOriginIndex returns whether the index can serve as an origin index for a foreign
   481  // key constraint with the provided set of originColIDs.
   482  func (idx *IndexDescriptor) IsValidOriginIndex(originColIDs ColumnIDs) bool {
   483  	return ColumnIDs(idx.ColumnIDs).HasPrefix(originColIDs)
   484  }
   485  
   486  // IsValidReferencedIndex returns whether the index can serve as a referenced index for a foreign
   487  // key constraint with the provided set of referencedColumnIDs.
   488  func (idx *IndexDescriptor) IsValidReferencedIndex(referencedColIDs ColumnIDs) bool {
   489  	return idx.Unique && ColumnIDs(idx.ColumnIDs).Equals(referencedColIDs)
   490  }
   491  
   492  // FindFKReferencedIndex finds the first index in the supplied referencedTable
   493  // that can satisfy a foreign key of the supplied column ids.
   494  func FindFKReferencedIndex(
   495  	referencedTable *TableDescriptor, referencedColIDs ColumnIDs,
   496  ) (*IndexDescriptor, error) {
   497  	// Search for a unique index on the referenced table that matches our foreign
   498  	// key columns.
   499  	if referencedTable.PrimaryIndex.IsValidReferencedIndex(referencedColIDs) {
   500  		return &referencedTable.PrimaryIndex, nil
   501  	}
   502  	// If the PK doesn't match, find the index corresponding to the referenced column.
   503  	for i := range referencedTable.Indexes {
   504  		idx := &referencedTable.Indexes[i]
   505  		if idx.IsValidReferencedIndex(referencedColIDs) {
   506  			return idx, nil
   507  		}
   508  	}
   509  	return nil, pgerror.Newf(
   510  		pgcode.ForeignKeyViolation,
   511  		"there is no unique constraint matching given keys for referenced table %s",
   512  		referencedTable.Name,
   513  	)
   514  }
   515  
   516  // FindFKOriginIndex finds the first index in the supplied originTable
   517  // that can satisfy an outgoing foreign key of the supplied column ids.
   518  func FindFKOriginIndex(
   519  	originTable *TableDescriptor, originColIDs ColumnIDs,
   520  ) (*IndexDescriptor, error) {
   521  	// Search for an index on the origin table that matches our foreign
   522  	// key columns.
   523  	if originTable.PrimaryIndex.IsValidOriginIndex(originColIDs) {
   524  		return &originTable.PrimaryIndex, nil
   525  	}
   526  	// If the PK doesn't match, find the index corresponding to the origin column.
   527  	for i := range originTable.Indexes {
   528  		idx := &originTable.Indexes[i]
   529  		if idx.IsValidOriginIndex(originColIDs) {
   530  			return idx, nil
   531  		}
   532  	}
   533  	return nil, pgerror.Newf(
   534  		pgcode.ForeignKeyViolation,
   535  		"there is no index matching given keys for referenced table %s",
   536  		originTable.Name,
   537  	)
   538  }
   539  
   540  // FindFKOriginIndexInTxn finds the first index in the supplied originTable
   541  // that can satisfy an outgoing foreign key of the supplied column ids.
   542  // It returns either an index that is active, or an index that was created
   543  // in the same transaction that is currently running.
   544  func FindFKOriginIndexInTxn(
   545  	originTable *MutableTableDescriptor, originColIDs ColumnIDs,
   546  ) (*IndexDescriptor, error) {
   547  	// Search for an index on the origin table that matches our foreign
   548  	// key columns.
   549  	if originTable.PrimaryIndex.IsValidOriginIndex(originColIDs) {
   550  		return &originTable.PrimaryIndex, nil
   551  	}
   552  	// If the PK doesn't match, find the index corresponding to the origin column.
   553  	for i := range originTable.Indexes {
   554  		idx := &originTable.Indexes[i]
   555  		if idx.IsValidOriginIndex(originColIDs) {
   556  			return idx, nil
   557  		}
   558  	}
   559  	currentMutationID := originTable.ClusterVersion.NextMutationID
   560  	for i := range originTable.Mutations {
   561  		mut := &originTable.Mutations[i]
   562  		if idx := mut.GetIndex(); idx != nil &&
   563  			mut.MutationID == currentMutationID &&
   564  			mut.Direction == DescriptorMutation_ADD {
   565  			if idx.IsValidOriginIndex(originColIDs) {
   566  				return idx, nil
   567  			}
   568  		}
   569  	}
   570  	return nil, pgerror.Newf(
   571  		pgcode.ForeignKeyViolation,
   572  		"there is no index matching given keys for referenced table %s",
   573  		originTable.Name,
   574  	)
   575  }
   576  
   577  // ConditionalGetTableDescFromTxn validates that the supplied TableDescriptor
   578  // matches the one currently stored in kv. This simulates a CPut and returns a
   579  // ConditionFailedError on mismatch. We don't directly use CPut with protos
   580  // because the marshaling is not guaranteed to be stable and also because it's
   581  // sensitive to things like missing vs default values of fields.
   582  func ConditionalGetTableDescFromTxn(
   583  	ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, expectation *TableDescriptor,
   584  ) (*roachpb.Value, error) {
   585  	key := MakeDescMetadataKey(codec, expectation.ID)
   586  	existingKV, err := txn.Get(ctx, key)
   587  	if err != nil {
   588  		return nil, err
   589  	}
   590  	var existing *Descriptor
   591  	if existingKV.Value != nil {
   592  		existing = &Descriptor{}
   593  		if err := existingKV.Value.GetProto(existing); err != nil {
   594  			return nil, errors.Wrapf(err,
   595  				"decoding current table descriptor value for id: %d", expectation.ID)
   596  		}
   597  		existing.Table(existingKV.Value.Timestamp)
   598  	}
   599  	wrapped := WrapDescriptor(expectation)
   600  	if !existing.Equal(wrapped) {
   601  		return nil, &roachpb.ConditionFailedError{ActualValue: existingKV.Value}
   602  	}
   603  	return existingKV.Value, nil
   604  }
   605  
   606  // FilterTableState inspects the state of a given table and returns an error if
   607  // the state is anything but PUBLIC. The error describes the state of the table.
   608  func FilterTableState(tableDesc *TableDescriptor) error {
   609  	switch tableDesc.State {
   610  	case TableDescriptor_DROP:
   611  		return &inactiveTableError{errors.New("table is being dropped")}
   612  	case TableDescriptor_OFFLINE:
   613  		err := errors.Errorf("table %q is offline", tableDesc.Name)
   614  		if tableDesc.OfflineReason != "" {
   615  			err = errors.Errorf("table %q is offline: %s", tableDesc.Name, tableDesc.OfflineReason)
   616  		}
   617  		return &inactiveTableError{err}
   618  	case TableDescriptor_ADD:
   619  		return errTableAdding
   620  	case TableDescriptor_PUBLIC:
   621  		return nil
   622  	default:
   623  		return errors.Errorf("table in unknown state: %s", tableDesc.State.String())
   624  	}
   625  }
   626  
   627  var errTableAdding = errors.New("table is being added")
   628  
   629  type inactiveTableError struct {
   630  	cause error
   631  }
   632  
   633  func (i *inactiveTableError) Error() string { return i.cause.Error() }
   634  
   635  func (i *inactiveTableError) Unwrap() error { return i.cause }
   636  
   637  // HasAddingTableError returns true if the error contains an addingTableError.
   638  func HasAddingTableError(err error) bool {
   639  	return errors.Is(err, errTableAdding)
   640  }
   641  
   642  // HasInactiveTableError returns true if the error contains an
   643  // inactiveTableError.
   644  func HasInactiveTableError(err error) bool {
   645  	return errors.HasType(err, (*inactiveTableError)(nil))
   646  }