github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/sql/create_table.go (about)

     1  // Copyright 2017 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package sql
    12  
    13  import (
    14  	"bytes"
    15  	"context"
    16  	"fmt"
    17  	"go/constant"
    18  	"strconv"
    19  	"strings"
    20  
    21  	"github.com/cockroachdb/cockroach/pkg/clusterversion"
    22  	"github.com/cockroachdb/cockroach/pkg/geo/geoindex"
    23  	"github.com/cockroachdb/cockroach/pkg/keys"
    24  	"github.com/cockroachdb/cockroach/pkg/kv"
    25  	"github.com/cockroachdb/cockroach/pkg/server/telemetry"
    26  	"github.com/cockroachdb/cockroach/pkg/settings/cluster"
    27  	"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv"
    28  	"github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver"
    29  	"github.com/cockroachdb/cockroach/pkg/sql/parser"
    30  	"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
    31  	"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
    32  	"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice"
    33  	"github.com/cockroachdb/cockroach/pkg/sql/row"
    34  	"github.com/cockroachdb/cockroach/pkg/sql/schemaexpr"
    35  	"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
    36  	"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
    37  	"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
    38  	"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
    39  	"github.com/cockroachdb/cockroach/pkg/sql/types"
    40  	"github.com/cockroachdb/cockroach/pkg/util"
    41  	"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented"
    42  	"github.com/cockroachdb/cockroach/pkg/util/hlc"
    43  	"github.com/cockroachdb/errors"
    44  	"github.com/lib/pq/oid"
    45  )
    46  
    47  type createTableNode struct {
    48  	n          *tree.CreateTable
    49  	dbDesc     *sqlbase.DatabaseDescriptor
    50  	sourcePlan planNode
    51  
    52  	run createTableRun
    53  }
    54  
    55  // createTableRun contains the run-time state of createTableNode
    56  // during local execution.
    57  type createTableRun struct {
    58  	autoCommit autoCommitOpt
    59  
    60  	// synthRowID indicates whether an input column needs to be synthesized to
    61  	// provide the default value for the hidden rowid column. The optimizer's plan
    62  	// already includes this column if a user specified PK does not exist (so
    63  	// synthRowID is false), whereas the heuristic planner's plan does not in this
    64  	// case (so synthRowID is true).
    65  	synthRowID bool
    66  
    67  	// fromHeuristicPlanner indicates whether the planning was performed by the
    68  	// heuristic planner instead of the optimizer. This is used to determine
    69  	// whether or not a row_id was synthesized as part of the planning stage, if a
    70  	// user defined PK is not specified.
    71  	fromHeuristicPlanner bool
    72  }
    73  
    74  // storageParamType indicates the required type of a storage parameter.
    75  type storageParamType int
    76  
    77  // storageParamType values
    78  const (
    79  	storageParamBool storageParamType = iota
    80  	storageParamInt
    81  	storageParamFloat
    82  	storageParamUnimplemented
    83  )
    84  
    85  var storageParamExpectedTypes = map[string]storageParamType{
    86  	`fillfactor`:                                  storageParamInt,
    87  	`toast_tuple_target`:                          storageParamUnimplemented,
    88  	`parallel_workers`:                            storageParamUnimplemented,
    89  	`autovacuum_enabled`:                          storageParamUnimplemented,
    90  	`toast.autovacuum_enabled`:                    storageParamUnimplemented,
    91  	`autovacuum_vacuum_threshold`:                 storageParamUnimplemented,
    92  	`toast.autovacuum_vacuum_threshold`:           storageParamUnimplemented,
    93  	`autovacuum_vacuum_scale_factor`:              storageParamUnimplemented,
    94  	`toast.autovacuum_vacuum_scale_factor`:        storageParamUnimplemented,
    95  	`autovacuum_analyze_threshold`:                storageParamUnimplemented,
    96  	`autovacuum_analyze_scale_factor`:             storageParamUnimplemented,
    97  	`autovacuum_vacuum_cost_delay`:                storageParamUnimplemented,
    98  	`toast.autovacuum_vacuum_cost_delay`:          storageParamUnimplemented,
    99  	`autovacuum_vacuum_cost_limit`:                storageParamUnimplemented,
   100  	`autovacuum_freeze_min_age`:                   storageParamUnimplemented,
   101  	`toast.autovacuum_freeze_min_age`:             storageParamUnimplemented,
   102  	`autovacuum_freeze_max_age`:                   storageParamUnimplemented,
   103  	`toast.autovacuum_freeze_max_age`:             storageParamUnimplemented,
   104  	`autovacuum_freeze_table_age`:                 storageParamUnimplemented,
   105  	`toast.autovacuum_freeze_table_age`:           storageParamUnimplemented,
   106  	`autovacuum_multixact_freeze_min_age`:         storageParamUnimplemented,
   107  	`toast.autovacuum_multixact_freeze_min_age`:   storageParamUnimplemented,
   108  	`autovacuum_multixact_freeze_max_age`:         storageParamUnimplemented,
   109  	`toast.autovacuum_multixact_freeze_max_age`:   storageParamUnimplemented,
   110  	`autovacuum_multixact_freeze_table_age`:       storageParamUnimplemented,
   111  	`toast.autovacuum_multixact_freeze_table_age`: storageParamUnimplemented,
   112  	`log_autovacuum_min_duration`:                 storageParamUnimplemented,
   113  	`toast.log_autovacuum_min_duration`:           storageParamUnimplemented,
   114  	`user_catalog_table`:                          storageParamUnimplemented,
   115  }
   116  
   117  // minimumTypeUsageVersions defines the minimum version needed for a new
   118  // data type.
   119  var minimumTypeUsageVersions = map[types.Family]clusterversion.VersionKey{
   120  	types.TimeTZFamily:    clusterversion.VersionTimeTZType,
   121  	types.GeographyFamily: clusterversion.VersionGeospatialType,
   122  	types.GeometryFamily:  clusterversion.VersionGeospatialType,
   123  }
   124  
   125  // isTypeSupportedInVersion returns whether a given type is supported in the given version.
   126  func isTypeSupportedInVersion(v clusterversion.ClusterVersion, t *types.T) (bool, error) {
   127  	switch t.Family() {
   128  	case types.TimeFamily, types.TimestampFamily, types.TimestampTZFamily, types.TimeTZFamily:
   129  		if t.Precision() != 6 && !v.IsActive(clusterversion.VersionTimePrecision) {
   130  			return false, nil
   131  		}
   132  	case types.IntervalFamily:
   133  		itm, err := t.IntervalTypeMetadata()
   134  		if err != nil {
   135  			return false, err
   136  		}
   137  		if (t.Precision() != 6 || itm.DurationField != types.IntervalDurationField{}) &&
   138  			!v.IsActive(clusterversion.VersionTimePrecision) {
   139  			return false, nil
   140  		}
   141  	}
   142  	minVersion, ok := minimumTypeUsageVersions[t.Family()]
   143  	if !ok {
   144  		return true, nil
   145  	}
   146  	return v.IsActive(minVersion), nil
   147  }
   148  
   149  // ReadingOwnWrites implements the planNodeReadingOwnWrites interface.
   150  // This is because CREATE TABLE performs multiple KV operations on descriptors
   151  // and expects to see its own writes.
   152  func (n *createTableNode) ReadingOwnWrites() {}
   153  
   154  // getTableCreateParams returns the table key needed for the new table,
   155  // as well as the schema id. It returns valid data in the case that
   156  // the desired object exists.
   157  func getTableCreateParams(
   158  	params runParams, dbID sqlbase.ID, isTemporary bool, tableName string,
   159  ) (sqlbase.DescriptorKey, sqlbase.ID, error) {
   160  	// By default, all tables are created in the `public` schema.
   161  	schemaID := sqlbase.ID(keys.PublicSchemaID)
   162  	tKey := sqlbase.MakePublicTableNameKey(params.ctx,
   163  		params.ExecCfg().Settings, dbID, tableName)
   164  	if isTemporary {
   165  		if !params.SessionData().TempTablesEnabled {
   166  			return nil, 0, errors.WithTelemetry(
   167  				pgerror.WithCandidateCode(
   168  					errors.WithHint(
   169  						errors.WithIssueLink(
   170  							errors.Newf("temporary tables are only supported experimentally"),
   171  							errors.IssueLink{IssueURL: unimplemented.MakeURL(46260)},
   172  						),
   173  						"You can enable temporary tables by running `SET experimental_enable_temp_tables = 'on'`.",
   174  					),
   175  					pgcode.FeatureNotSupported,
   176  				),
   177  				"sql.schema.temp_tables_disabled",
   178  			)
   179  		}
   180  
   181  		tempSchemaName := params.p.TemporarySchemaName()
   182  		sKey := sqlbase.NewSchemaKey(dbID, tempSchemaName)
   183  		var err error
   184  		schemaID, err = catalogkv.GetDescriptorID(params.ctx, params.p.txn, params.ExecCfg().Codec, sKey)
   185  		if err != nil {
   186  			return nil, 0, err
   187  		} else if schemaID == sqlbase.InvalidID {
   188  			// The temporary schema has not been created yet.
   189  			if schemaID, err = createTempSchema(params, sKey); err != nil {
   190  				return nil, 0, err
   191  			}
   192  		}
   193  
   194  		tKey = sqlbase.NewTableKey(dbID, schemaID, tableName)
   195  	}
   196  
   197  	exists, id, err := sqlbase.LookupObjectID(
   198  		params.ctx, params.p.txn, params.ExecCfg().Codec, dbID, schemaID, tableName)
   199  	if err == nil && exists {
   200  		// Try and see what kind of object we collided with.
   201  		desc, err := catalogkv.GetDescriptorByID(params.ctx, params.p.txn, params.ExecCfg().Codec, id)
   202  		if err != nil {
   203  			return nil, 0, err
   204  		}
   205  		// Still return data in this case.
   206  		return tKey, schemaID, makeObjectAlreadyExistsError(desc, tableName)
   207  	} else if err != nil {
   208  		return nil, 0, err
   209  	}
   210  	return tKey, schemaID, nil
   211  }
   212  
   213  func (n *createTableNode) startExec(params runParams) error {
   214  	telemetry.Inc(sqltelemetry.SchemaChangeCreateCounter("table"))
   215  	isTemporary := n.n.Temporary
   216  
   217  	tKey, schemaID, err := getTableCreateParams(params, n.dbDesc.ID, isTemporary, n.n.Table.Table())
   218  	if err != nil {
   219  		if sqlbase.IsRelationAlreadyExistsError(err) && n.n.IfNotExists {
   220  			return nil
   221  		}
   222  		return err
   223  	}
   224  
   225  	if n.n.Interleave != nil {
   226  		telemetry.Inc(sqltelemetry.CreateInterleavedTableCounter)
   227  	}
   228  	if isTemporary {
   229  		telemetry.Inc(sqltelemetry.CreateTempTableCounter)
   230  
   231  		// TODO(#46556): support ON COMMIT DROP and DELETE ROWS on TEMPORARY TABLE.
   232  		// If we do this, the n.n.OnCommit variable should probably be stored on the
   233  		// table descriptor.
   234  		// Note UNSET / PRESERVE ROWS behave the same way so we do not need to do that for now.
   235  		switch n.n.OnCommit {
   236  		case tree.CreateTableOnCommitUnset, tree.CreateTableOnCommitPreserveRows:
   237  		default:
   238  			return errors.AssertionFailedf("ON COMMIT value %d is unrecognized", n.n.OnCommit)
   239  		}
   240  	} else if n.n.OnCommit != tree.CreateTableOnCommitUnset {
   241  		return pgerror.Newf(
   242  			pgcode.InvalidTableDefinition,
   243  			"ON COMMIT can only be used on temporary tables",
   244  		)
   245  	}
   246  
   247  	// Warn against creating non-partitioned indexes on a partitioned table,
   248  	// which is undesirable in most cases.
   249  	if n.n.PartitionBy != nil {
   250  		for _, def := range n.n.Defs {
   251  			if d, ok := def.(*tree.IndexTableDef); ok {
   252  				if d.PartitionBy == nil {
   253  					params.p.SendClientNotice(
   254  						params.ctx,
   255  						errors.WithHint(
   256  							pgnotice.Newf("creating non-partitioned index on partitioned table may not be performant"),
   257  							"Consider modifying the index such that it is also partitioned.",
   258  						),
   259  					)
   260  				}
   261  			}
   262  		}
   263  	}
   264  
   265  	id, err := catalogkv.GenerateUniqueDescID(params.ctx, params.p.ExecCfg().DB, params.p.ExecCfg().Codec)
   266  	if err != nil {
   267  		return err
   268  	}
   269  
   270  	// If a new system table is being created (which should only be doable by
   271  	// an internal user account), make sure it gets the correct privileges.
   272  	privs := n.dbDesc.GetPrivileges()
   273  	if n.dbDesc.ID == keys.SystemDatabaseID {
   274  		privs = sqlbase.NewDefaultPrivilegeDescriptor()
   275  	}
   276  
   277  	var asCols sqlbase.ResultColumns
   278  	var desc sqlbase.MutableTableDescriptor
   279  	var affected map[sqlbase.ID]*sqlbase.MutableTableDescriptor
   280  	creationTime := params.creationTimeForNewTableDescriptor()
   281  	if n.n.As() {
   282  		asCols = planColumns(n.sourcePlan)
   283  		if !n.run.fromHeuristicPlanner && !n.n.AsHasUserSpecifiedPrimaryKey() {
   284  			// rowID column is already present in the input as the last column if it
   285  			// was planned by the optimizer and the user did not specify a PRIMARY
   286  			// KEY. So ignore it for the purpose of creating column metadata (because
   287  			// makeTableDescIfAs does it automatically).
   288  			asCols = asCols[:len(asCols)-1]
   289  		}
   290  
   291  		desc, err = makeTableDescIfAs(params,
   292  			n.n, n.dbDesc.ID, schemaID, id, creationTime, asCols, privs, params.p.EvalContext(), isTemporary)
   293  		if err != nil {
   294  			return err
   295  		}
   296  
   297  		// If we have an implicit txn we want to run CTAS async, and consequently
   298  		// ensure it gets queued as a SchemaChange.
   299  		if params.p.ExtendedEvalContext().TxnImplicit {
   300  			desc.State = sqlbase.TableDescriptor_ADD
   301  		}
   302  	} else {
   303  		affected = make(map[sqlbase.ID]*sqlbase.MutableTableDescriptor)
   304  		desc, err = makeTableDesc(params, n.n, n.dbDesc.ID, schemaID, id, creationTime, privs, affected, isTemporary)
   305  		if err != nil {
   306  			return err
   307  		}
   308  
   309  		if desc.Adding() {
   310  			// if this table and all its references are created in the same
   311  			// transaction it can be made PUBLIC.
   312  			refs, err := desc.FindAllReferences()
   313  			if err != nil {
   314  				return err
   315  			}
   316  			var foundExternalReference bool
   317  			for id := range refs {
   318  				if t := params.p.Tables().GetUncommittedTableByID(id).MutableTableDescriptor; t == nil || !t.IsNewTable() {
   319  					foundExternalReference = true
   320  					break
   321  				}
   322  			}
   323  			if !foundExternalReference {
   324  				desc.State = sqlbase.TableDescriptor_PUBLIC
   325  			}
   326  		}
   327  	}
   328  
   329  	// Descriptor written to store here.
   330  	if err := params.p.createDescriptorWithID(
   331  		params.ctx, tKey.Key(params.ExecCfg().Codec), id, &desc, params.EvalContext().Settings,
   332  		tree.AsStringWithFQNames(n.n, params.Ann()),
   333  	); err != nil {
   334  		return err
   335  	}
   336  
   337  	for _, updated := range affected {
   338  		// TODO (lucy): Have more consistent/informative names for dependent jobs.
   339  		if err := params.p.writeSchemaChange(
   340  			params.ctx, updated, sqlbase.InvalidMutationID, "updating referenced table",
   341  		); err != nil {
   342  			return err
   343  		}
   344  	}
   345  
   346  	for _, index := range desc.AllNonDropIndexes() {
   347  		if len(index.Interleave.Ancestors) > 0 {
   348  			if err := params.p.finalizeInterleave(params.ctx, &desc, index); err != nil {
   349  				return err
   350  			}
   351  		}
   352  	}
   353  
   354  	if err := desc.Validate(params.ctx, params.p.txn, params.ExecCfg().Codec); err != nil {
   355  		return err
   356  	}
   357  
   358  	// Log Create Table event. This is an auditable log event and is
   359  	// recorded in the same transaction as the table descriptor update.
   360  	if err := MakeEventLogger(params.extendedEvalCtx.ExecCfg).InsertEventRecord(
   361  		params.ctx,
   362  		params.p.txn,
   363  		EventLogCreateTable,
   364  		int32(desc.ID),
   365  		int32(params.extendedEvalCtx.NodeID.SQLInstanceID()),
   366  		struct {
   367  			TableName string
   368  			Statement string
   369  			User      string
   370  		}{n.n.Table.FQString(), n.n.String(), params.SessionData().User},
   371  	); err != nil {
   372  		return err
   373  	}
   374  
   375  	// If we are in an explicit txn or the source has placeholders, we execute the
   376  	// CTAS query synchronously.
   377  	if n.n.As() && !params.p.ExtendedEvalContext().TxnImplicit {
   378  		err = func() error {
   379  			// The data fill portion of CREATE AS must operate on a read snapshot,
   380  			// so that it doesn't end up observing its own writes.
   381  			prevMode := params.p.Txn().ConfigureStepping(params.ctx, kv.SteppingEnabled)
   382  			defer func() { _ = params.p.Txn().ConfigureStepping(params.ctx, prevMode) }()
   383  
   384  			// This is a very simplified version of the INSERT logic: no CHECK
   385  			// expressions, no FK checks, no arbitrary insertion order, no
   386  			// RETURNING, etc.
   387  
   388  			// Instantiate a row inserter and table writer. It has a 1-1
   389  			// mapping to the definitions in the descriptor.
   390  			ri, err := row.MakeInserter(
   391  				params.ctx,
   392  				params.p.txn,
   393  				params.ExecCfg().Codec,
   394  				sqlbase.NewImmutableTableDescriptor(*desc.TableDesc()),
   395  				desc.Columns,
   396  				row.SkipFKs,
   397  				nil, /* fkTables */
   398  				params.p.alloc)
   399  			if err != nil {
   400  				return err
   401  			}
   402  			ti := tableInserterPool.Get().(*tableInserter)
   403  			*ti = tableInserter{ri: ri}
   404  			tw := tableWriter(ti)
   405  			if n.run.autoCommit == autoCommitEnabled {
   406  				tw.enableAutoCommit()
   407  			}
   408  			defer func() {
   409  				tw.close(params.ctx)
   410  				*ti = tableInserter{}
   411  				tableInserterPool.Put(ti)
   412  			}()
   413  			if err := tw.init(params.ctx, params.p.txn, params.p.EvalContext()); err != nil {
   414  				return err
   415  			}
   416  
   417  			// Prepare the buffer for row values. At this point, one more column has
   418  			// been added by ensurePrimaryKey() to the list of columns in sourcePlan, if
   419  			// a PRIMARY KEY is not specified by the user.
   420  			rowBuffer := make(tree.Datums, len(desc.Columns))
   421  			pkColIdx := len(desc.Columns) - 1
   422  
   423  			// The optimizer includes the rowID expression as part of the input
   424  			// expression. But the heuristic planner does not do this, so construct
   425  			// a rowID expression to be evaluated separately.
   426  			var defTypedExpr tree.TypedExpr
   427  			if n.run.synthRowID {
   428  				// Prepare the rowID expression.
   429  				defExprSQL := *desc.Columns[pkColIdx].DefaultExpr
   430  				defExpr, err := parser.ParseExpr(defExprSQL)
   431  				if err != nil {
   432  					return err
   433  				}
   434  				defTypedExpr, err = params.p.analyzeExpr(
   435  					params.ctx,
   436  					defExpr,
   437  					nil, /*sources*/
   438  					tree.IndexedVarHelper{},
   439  					types.Any,
   440  					false, /*requireType*/
   441  					"CREATE TABLE AS")
   442  				if err != nil {
   443  					return err
   444  				}
   445  			}
   446  
   447  			for {
   448  				if err := params.p.cancelChecker.Check(); err != nil {
   449  					return err
   450  				}
   451  				if next, err := n.sourcePlan.Next(params); !next {
   452  					if err != nil {
   453  						return err
   454  					}
   455  					_, err := tw.finalize(
   456  						params.ctx, params.extendedEvalCtx.Tracing.KVTracingEnabled())
   457  					if err != nil {
   458  						return err
   459  					}
   460  					break
   461  				}
   462  
   463  				// Populate the buffer and generate the PK value.
   464  				copy(rowBuffer, n.sourcePlan.Values())
   465  				if n.run.synthRowID {
   466  					rowBuffer[pkColIdx], err = defTypedExpr.Eval(params.p.EvalContext())
   467  					if err != nil {
   468  						return err
   469  					}
   470  				}
   471  
   472  				// TODO(mgartner): Add partial index IDs to ignoreIndexes that we should
   473  				// not add entries to.
   474  				var ignoreIndexes util.FastIntSet
   475  				if err := tw.row(params.ctx, rowBuffer, ignoreIndexes, params.extendedEvalCtx.Tracing.KVTracingEnabled()); err != nil {
   476  					return err
   477  				}
   478  			}
   479  			return nil
   480  		}()
   481  		if err != nil {
   482  			return err
   483  		}
   484  	}
   485  
   486  	return nil
   487  }
   488  
   489  func (*createTableNode) Next(runParams) (bool, error) { return false, nil }
   490  func (*createTableNode) Values() tree.Datums          { return tree.Datums{} }
   491  
   492  func (n *createTableNode) Close(ctx context.Context) {
   493  	if n.sourcePlan != nil {
   494  		n.sourcePlan.Close(ctx)
   495  		n.sourcePlan = nil
   496  	}
   497  }
   498  
   499  // resolveFK on the planner calls resolveFK() on the current txn.
   500  //
   501  // The caller must make sure the planner is configured to look up
   502  // descriptors without caching. See the comment on resolveFK().
   503  func (p *planner) resolveFK(
   504  	ctx context.Context,
   505  	tbl *sqlbase.MutableTableDescriptor,
   506  	d *tree.ForeignKeyConstraintTableDef,
   507  	backrefs map[sqlbase.ID]*sqlbase.MutableTableDescriptor,
   508  	ts FKTableState,
   509  	validationBehavior tree.ValidationBehavior,
   510  ) error {
   511  	return ResolveFK(ctx, p.txn, p, tbl, d, backrefs, ts, validationBehavior, p.EvalContext())
   512  }
   513  
   514  func qualifyFKColErrorWithDB(
   515  	ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, tbl *sqlbase.TableDescriptor, col string,
   516  ) string {
   517  	if txn == nil {
   518  		return tree.ErrString(tree.NewUnresolvedName(tbl.Name, col))
   519  	}
   520  
   521  	// TODO(solon): this ought to use a database cache.
   522  	db, err := sqlbase.GetDatabaseDescFromID(ctx, txn, codec, tbl.ParentID)
   523  	if err != nil {
   524  		return tree.ErrString(tree.NewUnresolvedName(tbl.Name, col))
   525  	}
   526  	schema, err := resolver.ResolveSchemaNameByID(ctx, txn, codec, db.ID, tbl.GetParentSchemaID())
   527  	if err != nil {
   528  		return tree.ErrString(tree.NewUnresolvedName(tbl.Name, col))
   529  	}
   530  	return tree.ErrString(tree.NewUnresolvedName(db.Name, schema, tbl.Name, col))
   531  }
   532  
   533  // FKTableState is the state of the referencing table resolveFK() is called on.
   534  type FKTableState int
   535  
   536  const (
   537  	// NewTable represents a new table, where the FK constraint is specified in the
   538  	// CREATE TABLE
   539  	NewTable FKTableState = iota
   540  	// EmptyTable represents an existing table that is empty
   541  	EmptyTable
   542  	// NonEmptyTable represents an existing non-empty table
   543  	NonEmptyTable
   544  )
   545  
   546  // MaybeUpgradeDependentOldForeignKeyVersionTables upgrades the on-disk foreign key descriptor
   547  // version of all table descriptors that have foreign key relationships with desc. This is intended
   548  // to catch upgrade 19.1 version table descriptors that haven't been upgraded yet before an operation
   549  // like drop index which could cause them to lose FK information in the old representation.
   550  func (p *planner) MaybeUpgradeDependentOldForeignKeyVersionTables(
   551  	ctx context.Context, desc *sqlbase.MutableTableDescriptor,
   552  ) error {
   553  	// In order to avoid having old version foreign key descriptors that depend on this
   554  	// index lose information when this index is dropped, ensure that they get updated.
   555  	maybeUpgradeFKRepresentation := func(id sqlbase.ID) error {
   556  		// Read the referenced table and see if the foreign key representation has changed. If it has, write
   557  		// the upgraded descriptor back to disk.
   558  		tbl, didUpgrade, err := sqlbase.GetTableDescFromIDWithFKsChanged(ctx, p.txn, p.ExecCfg().Codec, id)
   559  		if err != nil {
   560  			return err
   561  		}
   562  		if didUpgrade {
   563  			// TODO (lucy): Have more consistent/informative names for dependent jobs.
   564  			err := p.writeSchemaChange(
   565  				ctx, sqlbase.NewMutableExistingTableDescriptor(*tbl), sqlbase.InvalidMutationID,
   566  				"updating foreign key references on table",
   567  			)
   568  			if err != nil {
   569  				return err
   570  			}
   571  		}
   572  		return nil
   573  	}
   574  	for i := range desc.OutboundFKs {
   575  		if err := maybeUpgradeFKRepresentation(desc.OutboundFKs[i].ReferencedTableID); err != nil {
   576  			return err
   577  		}
   578  	}
   579  	for i := range desc.InboundFKs {
   580  		if err := maybeUpgradeFKRepresentation(desc.InboundFKs[i].OriginTableID); err != nil {
   581  			return err
   582  		}
   583  	}
   584  	return nil
   585  }
   586  
   587  // ResolveFK looks up the tables and columns mentioned in a `REFERENCES`
   588  // constraint and adds metadata representing that constraint to the descriptor.
   589  // It may, in doing so, add to or alter descriptors in the passed in `backrefs`
   590  // map of other tables that need to be updated when this table is created.
   591  // Constraints that are not known to hold for existing data are created
   592  // "unvalidated", but when table is empty (e.g. during creation), no existing
   593  // data implies no existing violations, and thus the constraint can be created
   594  // without the unvalidated flag.
   595  //
   596  // The caller should pass an instance of fkSelfResolver as
   597  // SchemaResolver, so that FK references can find the newly created
   598  // table for self-references.
   599  //
   600  // The caller must also ensure that the SchemaResolver is configured to
   601  // bypass caching and enable visibility of just-added descriptors.
   602  // If there are any FKs, the descriptor of the depended-on table must
   603  // be looked up uncached, and we'll allow FK dependencies on tables
   604  // that were just added.
   605  //
   606  // The passed Txn is used to lookup databases to qualify names in error messages
   607  // but if nil, will result in unqualified names in those errors.
   608  //
   609  // The passed validationBehavior is used to determine whether or not preexisting
   610  // entries in the table need to be validated against the foreign key being added.
   611  // This only applies for existing tables, not new tables.
   612  func ResolveFK(
   613  	ctx context.Context,
   614  	txn *kv.Txn,
   615  	sc resolver.SchemaResolver,
   616  	tbl *sqlbase.MutableTableDescriptor,
   617  	d *tree.ForeignKeyConstraintTableDef,
   618  	backrefs map[sqlbase.ID]*sqlbase.MutableTableDescriptor,
   619  	ts FKTableState,
   620  	validationBehavior tree.ValidationBehavior,
   621  	evalCtx *tree.EvalContext,
   622  ) error {
   623  	originCols := make([]*sqlbase.ColumnDescriptor, len(d.FromCols))
   624  	for i, col := range d.FromCols {
   625  		col, err := tbl.FindActiveOrNewColumnByName(col)
   626  		if err != nil {
   627  			return err
   628  		}
   629  		if err := col.CheckCanBeFKRef(); err != nil {
   630  			return err
   631  		}
   632  		originCols[i] = col
   633  	}
   634  
   635  	target, err := resolver.ResolveMutableExistingTableObject(ctx, sc, &d.Table, true /*required*/, resolver.ResolveRequireTableDesc)
   636  	if err != nil {
   637  		return err
   638  	}
   639  	if tbl.Temporary != target.Temporary {
   640  		tablePersistenceType := "permanent"
   641  		if tbl.Temporary {
   642  			tablePersistenceType = "temporary"
   643  		}
   644  		return pgerror.Newf(
   645  			pgcode.InvalidTableDefinition,
   646  			"constraints on %s tables may reference only %s tables",
   647  			tablePersistenceType,
   648  			tablePersistenceType,
   649  		)
   650  	}
   651  	if target.ID == tbl.ID {
   652  		// When adding a self-ref FK to an _existing_ table, we want to make sure
   653  		// we edit the same copy.
   654  		target = tbl
   655  	} else {
   656  		// Since this FK is referencing another table, this table must be created in
   657  		// a non-public "ADD" state and made public only after all leases on the
   658  		// other table are updated to include the backref, if it does not already
   659  		// exist.
   660  		if ts == NewTable {
   661  			tbl.State = sqlbase.TableDescriptor_ADD
   662  		}
   663  
   664  		// If we resolve the same table more than once, we only want to edit a
   665  		// single instance of it, so replace target with previously resolved table.
   666  		if prev, ok := backrefs[target.ID]; ok {
   667  			target = prev
   668  		} else {
   669  			backrefs[target.ID] = target
   670  		}
   671  	}
   672  
   673  	referencedColNames := d.ToCols
   674  	// If no columns are specified, attempt to default to PK.
   675  	if len(referencedColNames) == 0 {
   676  		referencedColNames = make(tree.NameList, len(target.PrimaryIndex.ColumnNames))
   677  		for i, n := range target.PrimaryIndex.ColumnNames {
   678  			referencedColNames[i] = tree.Name(n)
   679  		}
   680  	}
   681  
   682  	referencedCols, err := target.FindActiveColumnsByNames(referencedColNames)
   683  	if err != nil {
   684  		return err
   685  	}
   686  
   687  	if len(referencedCols) != len(originCols) {
   688  		return pgerror.Newf(pgcode.Syntax,
   689  			"%d columns must reference exactly %d columns in referenced table (found %d)",
   690  			len(originCols), len(originCols), len(referencedCols))
   691  	}
   692  
   693  	for i := range originCols {
   694  		if s, t := originCols[i], referencedCols[i]; !s.Type.Equivalent(t.Type) {
   695  			return pgerror.Newf(pgcode.DatatypeMismatch,
   696  				"type of %q (%s) does not match foreign key %q.%q (%s)",
   697  				s.Name, s.Type.String(), target.Name, t.Name, t.Type.String())
   698  		}
   699  	}
   700  
   701  	// Verify we are not writing a constraint over the same name.
   702  	// This check is done in Verify(), but we must do it earlier
   703  	// or else we can hit other checks that break things with
   704  	// undesired error codes, e.g. #42858.
   705  	// It may be removable after #37255 is complete.
   706  	constraintInfo, err := tbl.GetConstraintInfo(ctx, nil, evalCtx.Codec)
   707  	if err != nil {
   708  		return err
   709  	}
   710  	constraintName := string(d.Name)
   711  	if constraintName == "" {
   712  		constraintName = sqlbase.GenerateUniqueConstraintName(
   713  			fmt.Sprintf("fk_%s_ref_%s", string(d.FromCols[0]), target.Name),
   714  			func(p string) bool {
   715  				_, ok := constraintInfo[p]
   716  				return ok
   717  			},
   718  		)
   719  	} else {
   720  		if _, ok := constraintInfo[constraintName]; ok {
   721  			return pgerror.Newf(pgcode.DuplicateObject, "duplicate constraint name: %q", constraintName)
   722  		}
   723  	}
   724  
   725  	targetColIDs := make(sqlbase.ColumnIDs, len(referencedCols))
   726  	for i := range referencedCols {
   727  		targetColIDs[i] = referencedCols[i].ID
   728  	}
   729  
   730  	// Don't add a SET NULL action on an index that has any column that is NOT
   731  	// NULL.
   732  	if d.Actions.Delete == tree.SetNull || d.Actions.Update == tree.SetNull {
   733  		for _, originColumn := range originCols {
   734  			if !originColumn.Nullable {
   735  				col := qualifyFKColErrorWithDB(ctx, txn, evalCtx.Codec, tbl.TableDesc(), originColumn.Name)
   736  				return pgerror.Newf(pgcode.InvalidForeignKey,
   737  					"cannot add a SET NULL cascading action on column %q which has a NOT NULL constraint", col,
   738  				)
   739  			}
   740  		}
   741  	}
   742  
   743  	// Don't add a SET DEFAULT action on an index that has any column that has
   744  	// a DEFAULT expression of NULL and a NOT NULL constraint.
   745  	if d.Actions.Delete == tree.SetDefault || d.Actions.Update == tree.SetDefault {
   746  		for _, originColumn := range originCols {
   747  			// Having a default expression of NULL, and a constraint of NOT NULL is a
   748  			// contradiction and should never be allowed.
   749  			if originColumn.DefaultExpr == nil && !originColumn.Nullable {
   750  				col := qualifyFKColErrorWithDB(ctx, txn, evalCtx.Codec, tbl.TableDesc(), originColumn.Name)
   751  				return pgerror.Newf(pgcode.InvalidForeignKey,
   752  					"cannot add a SET DEFAULT cascading action on column %q which has a "+
   753  						"NOT NULL constraint and a NULL default expression", col,
   754  				)
   755  			}
   756  		}
   757  	}
   758  
   759  	originColumnIDs := make(sqlbase.ColumnIDs, len(originCols))
   760  	for i, col := range originCols {
   761  		originColumnIDs[i] = col.ID
   762  	}
   763  	var legacyOriginIndexID sqlbase.IndexID
   764  	// Search for an index on the origin table that matches. If one doesn't exist,
   765  	// we create one automatically if the table to alter is new or empty. We also
   766  	// search if an index for the set of columns was created in this transaction.
   767  	originIdx, err := sqlbase.FindFKOriginIndexInTxn(tbl, originColumnIDs)
   768  	if err == nil {
   769  		// If there was no error, we found a suitable index.
   770  		legacyOriginIndexID = originIdx.ID
   771  	} else {
   772  		// No existing suitable index was found.
   773  		if ts == NonEmptyTable {
   774  			var colNames bytes.Buffer
   775  			colNames.WriteString(`("`)
   776  			for i, id := range originColumnIDs {
   777  				if i != 0 {
   778  					colNames.WriteString(`", "`)
   779  				}
   780  				col, err := tbl.TableDesc().FindColumnByID(id)
   781  				if err != nil {
   782  					return err
   783  				}
   784  				colNames.WriteString(col.Name)
   785  			}
   786  			colNames.WriteString(`")`)
   787  			return pgerror.Newf(pgcode.ForeignKeyViolation,
   788  				"foreign key requires an existing index on columns %s", colNames.String())
   789  		}
   790  		id, err := addIndexForFK(tbl, originCols, constraintName, ts)
   791  		if err != nil {
   792  			return err
   793  		}
   794  		legacyOriginIndexID = id
   795  	}
   796  
   797  	referencedIdx, err := sqlbase.FindFKReferencedIndex(target.TableDesc(), targetColIDs)
   798  	if err != nil {
   799  		return err
   800  	}
   801  	legacyReferencedIndexID := referencedIdx.ID
   802  
   803  	var validity sqlbase.ConstraintValidity
   804  	if ts != NewTable {
   805  		if validationBehavior == tree.ValidationSkip {
   806  			validity = sqlbase.ConstraintValidity_Unvalidated
   807  		} else {
   808  			validity = sqlbase.ConstraintValidity_Validating
   809  		}
   810  	}
   811  
   812  	ref := sqlbase.ForeignKeyConstraint{
   813  		OriginTableID:         tbl.ID,
   814  		OriginColumnIDs:       originColumnIDs,
   815  		ReferencedColumnIDs:   targetColIDs,
   816  		ReferencedTableID:     target.ID,
   817  		Name:                  constraintName,
   818  		Validity:              validity,
   819  		OnDelete:              sqlbase.ForeignKeyReferenceActionValue[d.Actions.Delete],
   820  		OnUpdate:              sqlbase.ForeignKeyReferenceActionValue[d.Actions.Update],
   821  		Match:                 sqlbase.CompositeKeyMatchMethodValue[d.Match],
   822  		LegacyOriginIndex:     legacyOriginIndexID,
   823  		LegacyReferencedIndex: legacyReferencedIndexID,
   824  	}
   825  
   826  	if ts == NewTable {
   827  		tbl.OutboundFKs = append(tbl.OutboundFKs, ref)
   828  		target.InboundFKs = append(target.InboundFKs, ref)
   829  	} else {
   830  		tbl.AddForeignKeyMutation(&ref, sqlbase.DescriptorMutation_ADD)
   831  	}
   832  
   833  	return nil
   834  }
   835  
   836  // Adds an index to a table descriptor (that is in the process of being created)
   837  // that will support using `srcCols` as the referencing (src) side of an FK.
   838  func addIndexForFK(
   839  	tbl *sqlbase.MutableTableDescriptor,
   840  	srcCols []*sqlbase.ColumnDescriptor,
   841  	constraintName string,
   842  	ts FKTableState,
   843  ) (sqlbase.IndexID, error) {
   844  	autoIndexName := sqlbase.GenerateUniqueConstraintName(
   845  		fmt.Sprintf("%s_auto_index_%s", tbl.Name, constraintName),
   846  		func(name string) bool {
   847  			return tbl.ValidateIndexNameIsUnique(name) != nil
   848  		},
   849  	)
   850  	// No existing index for the referencing columns found, so we add one.
   851  	idx := sqlbase.IndexDescriptor{
   852  		Name:             autoIndexName,
   853  		ColumnNames:      make([]string, len(srcCols)),
   854  		ColumnDirections: make([]sqlbase.IndexDescriptor_Direction, len(srcCols)),
   855  	}
   856  	for i, c := range srcCols {
   857  		idx.ColumnDirections[i] = sqlbase.IndexDescriptor_ASC
   858  		idx.ColumnNames[i] = c.Name
   859  	}
   860  
   861  	if ts == NewTable {
   862  		if err := tbl.AddIndex(idx, false); err != nil {
   863  			return 0, err
   864  		}
   865  		if err := tbl.AllocateIDs(); err != nil {
   866  			return 0, err
   867  		}
   868  		added := tbl.Indexes[len(tbl.Indexes)-1]
   869  		return added.ID, nil
   870  	}
   871  
   872  	// TODO (lucy): In the EmptyTable case, we add an index mutation, making this
   873  	// the only case where a foreign key is added to an index being added.
   874  	// Allowing FKs to be added to other indexes/columns also being added should
   875  	// be a generalization of this special case.
   876  	if err := tbl.AddIndexMutation(&idx, sqlbase.DescriptorMutation_ADD); err != nil {
   877  		return 0, err
   878  	}
   879  	if err := tbl.AllocateIDs(); err != nil {
   880  		return 0, err
   881  	}
   882  	id := tbl.Mutations[len(tbl.Mutations)-1].GetIndex().ID
   883  	return id, nil
   884  }
   885  
   886  func (p *planner) addInterleave(
   887  	ctx context.Context,
   888  	desc *sqlbase.MutableTableDescriptor,
   889  	index *sqlbase.IndexDescriptor,
   890  	interleave *tree.InterleaveDef,
   891  ) error {
   892  	return addInterleave(ctx, p.txn, p, desc, index, interleave)
   893  }
   894  
   895  // addInterleave marks an index as one that is interleaved in some parent data
   896  // according to the given definition.
   897  func addInterleave(
   898  	ctx context.Context,
   899  	txn *kv.Txn,
   900  	vt resolver.SchemaResolver,
   901  	desc *sqlbase.MutableTableDescriptor,
   902  	index *sqlbase.IndexDescriptor,
   903  	interleave *tree.InterleaveDef,
   904  ) error {
   905  	if interleave.DropBehavior != tree.DropDefault {
   906  		return unimplemented.NewWithIssuef(
   907  			7854, "unsupported shorthand %s", interleave.DropBehavior)
   908  	}
   909  
   910  	parentTable, err := resolver.ResolveExistingTableObject(
   911  		ctx, vt, &interleave.Parent, tree.ObjectLookupFlagsWithRequired(), resolver.ResolveRequireTableDesc,
   912  	)
   913  	if err != nil {
   914  		return err
   915  	}
   916  	parentIndex := parentTable.PrimaryIndex
   917  
   918  	// typeOfIndex is used to give more informative error messages.
   919  	var typeOfIndex string
   920  	if index.ID == desc.PrimaryIndex.ID {
   921  		typeOfIndex = "primary key"
   922  	} else {
   923  		typeOfIndex = "index"
   924  	}
   925  
   926  	if len(interleave.Fields) != len(parentIndex.ColumnIDs) {
   927  		return pgerror.Newf(
   928  			pgcode.InvalidSchemaDefinition,
   929  			"declared interleaved columns (%s) must match the parent's primary index (%s)",
   930  			&interleave.Fields,
   931  			strings.Join(parentIndex.ColumnNames, ", "),
   932  		)
   933  	}
   934  	if len(interleave.Fields) > len(index.ColumnIDs) {
   935  		return pgerror.Newf(
   936  			pgcode.InvalidSchemaDefinition,
   937  			"declared interleaved columns (%s) must be a prefix of the %s columns being interleaved (%s)",
   938  			&interleave.Fields,
   939  			typeOfIndex,
   940  			strings.Join(index.ColumnNames, ", "),
   941  		)
   942  	}
   943  
   944  	for i, targetColID := range parentIndex.ColumnIDs {
   945  		targetCol, err := parentTable.FindColumnByID(targetColID)
   946  		if err != nil {
   947  			return err
   948  		}
   949  		col, err := desc.FindColumnByID(index.ColumnIDs[i])
   950  		if err != nil {
   951  			return err
   952  		}
   953  		if string(interleave.Fields[i]) != col.Name {
   954  			return pgerror.Newf(
   955  				pgcode.InvalidSchemaDefinition,
   956  				"declared interleaved columns (%s) must refer to a prefix of the %s column names being interleaved (%s)",
   957  				&interleave.Fields,
   958  				typeOfIndex,
   959  				strings.Join(index.ColumnNames, ", "),
   960  			)
   961  		}
   962  		if !col.Type.Identical(targetCol.Type) || index.ColumnDirections[i] != parentIndex.ColumnDirections[i] {
   963  			return pgerror.Newf(
   964  				pgcode.InvalidSchemaDefinition,
   965  				"declared interleaved columns (%s) must match type and sort direction of the parent's primary index (%s)",
   966  				&interleave.Fields,
   967  				strings.Join(parentIndex.ColumnNames, ", "),
   968  			)
   969  		}
   970  	}
   971  
   972  	ancestorPrefix := append(
   973  		[]sqlbase.InterleaveDescriptor_Ancestor(nil), parentIndex.Interleave.Ancestors...)
   974  	intl := sqlbase.InterleaveDescriptor_Ancestor{
   975  		TableID:         parentTable.ID,
   976  		IndexID:         parentIndex.ID,
   977  		SharedPrefixLen: uint32(len(parentIndex.ColumnIDs)),
   978  	}
   979  	for _, ancestor := range ancestorPrefix {
   980  		intl.SharedPrefixLen -= ancestor.SharedPrefixLen
   981  	}
   982  	index.Interleave = sqlbase.InterleaveDescriptor{Ancestors: append(ancestorPrefix, intl)}
   983  
   984  	desc.State = sqlbase.TableDescriptor_ADD
   985  	return nil
   986  }
   987  
   988  // finalizeInterleave creates backreferences from an interleaving parent to the
   989  // child data being interleaved.
   990  func (p *planner) finalizeInterleave(
   991  	ctx context.Context, desc *sqlbase.MutableTableDescriptor, index *sqlbase.IndexDescriptor,
   992  ) error {
   993  	// TODO(dan): This is similar to finalizeFKs. Consolidate them
   994  	if len(index.Interleave.Ancestors) == 0 {
   995  		return nil
   996  	}
   997  	// Only the last ancestor needs the backreference.
   998  	ancestor := index.Interleave.Ancestors[len(index.Interleave.Ancestors)-1]
   999  	var ancestorTable *sqlbase.MutableTableDescriptor
  1000  	if ancestor.TableID == desc.ID {
  1001  		ancestorTable = desc
  1002  	} else {
  1003  		var err error
  1004  		ancestorTable, err = p.Tables().GetMutableTableVersionByID(ctx, ancestor.TableID, p.txn)
  1005  		if err != nil {
  1006  			return err
  1007  		}
  1008  	}
  1009  	ancestorIndex, err := ancestorTable.FindIndexByID(ancestor.IndexID)
  1010  	if err != nil {
  1011  		return err
  1012  	}
  1013  	ancestorIndex.InterleavedBy = append(ancestorIndex.InterleavedBy,
  1014  		sqlbase.ForeignKeyReference{Table: desc.ID, Index: index.ID})
  1015  
  1016  	// TODO (lucy): Have more consistent/informative names for dependent jobs.
  1017  	if err := p.writeSchemaChange(
  1018  		ctx, ancestorTable, sqlbase.InvalidMutationID, "updating ancestor table",
  1019  	); err != nil {
  1020  		return err
  1021  	}
  1022  
  1023  	if desc.State == sqlbase.TableDescriptor_ADD {
  1024  		desc.State = sqlbase.TableDescriptor_PUBLIC
  1025  
  1026  		// No job description, since this is presumably part of some larger schema change.
  1027  		if err := p.writeSchemaChange(
  1028  			ctx, desc, sqlbase.InvalidMutationID, "",
  1029  		); err != nil {
  1030  			return err
  1031  		}
  1032  	}
  1033  
  1034  	return nil
  1035  }
  1036  
  1037  // CreatePartitioning constructs the partitioning descriptor for an index that
  1038  // is partitioned into ranges, each addressable by zone configs.
  1039  func CreatePartitioning(
  1040  	ctx context.Context,
  1041  	st *cluster.Settings,
  1042  	evalCtx *tree.EvalContext,
  1043  	tableDesc *sqlbase.MutableTableDescriptor,
  1044  	indexDesc *sqlbase.IndexDescriptor,
  1045  	partBy *tree.PartitionBy,
  1046  ) (sqlbase.PartitioningDescriptor, error) {
  1047  	if partBy == nil {
  1048  		// No CCL necessary if we're looking at PARTITION BY NOTHING.
  1049  		return sqlbase.PartitioningDescriptor{}, nil
  1050  	}
  1051  	return CreatePartitioningCCL(ctx, st, evalCtx, tableDesc, indexDesc, partBy)
  1052  }
  1053  
  1054  // CreatePartitioningCCL is the public hook point for the CCL-licensed
  1055  // partitioning creation code.
  1056  var CreatePartitioningCCL = func(
  1057  	ctx context.Context,
  1058  	st *cluster.Settings,
  1059  	evalCtx *tree.EvalContext,
  1060  	tableDesc *sqlbase.MutableTableDescriptor,
  1061  	indexDesc *sqlbase.IndexDescriptor,
  1062  	partBy *tree.PartitionBy,
  1063  ) (sqlbase.PartitioningDescriptor, error) {
  1064  	return sqlbase.PartitioningDescriptor{}, sqlbase.NewCCLRequiredError(errors.New(
  1065  		"creating or manipulating partitions requires a CCL binary"))
  1066  }
  1067  
  1068  // InitTableDescriptor returns a blank TableDescriptor.
  1069  func InitTableDescriptor(
  1070  	id, parentID, parentSchemaID sqlbase.ID,
  1071  	name string,
  1072  	creationTime hlc.Timestamp,
  1073  	privileges *sqlbase.PrivilegeDescriptor,
  1074  	temporary bool,
  1075  ) sqlbase.MutableTableDescriptor {
  1076  	return *sqlbase.NewMutableCreatedTableDescriptor(sqlbase.TableDescriptor{
  1077  		ID:                      id,
  1078  		Name:                    name,
  1079  		ParentID:                parentID,
  1080  		UnexposedParentSchemaID: parentSchemaID,
  1081  		FormatVersion:           sqlbase.InterleavedFormatVersion,
  1082  		Version:                 1,
  1083  		ModificationTime:        creationTime,
  1084  		Privileges:              privileges,
  1085  		CreateAsOfTime:          creationTime,
  1086  		Temporary:               temporary,
  1087  	})
  1088  }
  1089  
  1090  func getFinalSourceQuery(source *tree.Select, evalCtx *tree.EvalContext) string {
  1091  	// Ensure that all the table names pretty-print as fully qualified, so we
  1092  	// store that in the table descriptor.
  1093  	//
  1094  	// The traversal will update the TableNames in-place, so the changes are
  1095  	// persisted in n.n.AsSource. We exploit the fact that planning step above
  1096  	// has populated any missing db/schema details in the table names in-place.
  1097  	// We use tree.FormatNode merely as a traversal method; its output buffer is
  1098  	// discarded immediately after the traversal because it is not needed
  1099  	// further.
  1100  	f := tree.NewFmtCtx(tree.FmtParsable)
  1101  	f.SetReformatTableNames(
  1102  		func(_ *tree.FmtCtx, tn *tree.TableName) {
  1103  			// Persist the database prefix expansion.
  1104  			if tn.SchemaName != "" {
  1105  				// All CTE or table aliases have no schema
  1106  				// information. Those do not turn into explicit.
  1107  				tn.ExplicitSchema = true
  1108  				tn.ExplicitCatalog = true
  1109  			}
  1110  		},
  1111  	)
  1112  	f.FormatNode(source)
  1113  	f.Close()
  1114  
  1115  	// Substitute placeholders with their values.
  1116  	ctx := tree.NewFmtCtx(tree.FmtParsable)
  1117  	ctx.SetPlaceholderFormat(func(ctx *tree.FmtCtx, placeholder *tree.Placeholder) {
  1118  		d, err := placeholder.Eval(evalCtx)
  1119  		if err != nil {
  1120  			panic(fmt.Sprintf("failed to serialize placeholder: %s", err))
  1121  		}
  1122  		d.Format(ctx)
  1123  	})
  1124  	ctx.FormatNode(source)
  1125  
  1126  	return ctx.CloseAndGetString()
  1127  }
  1128  
  1129  // makeTableDescIfAs is the MakeTableDesc method for when we have a table
  1130  // that is created with the CREATE AS format.
  1131  func makeTableDescIfAs(
  1132  	params runParams,
  1133  	p *tree.CreateTable,
  1134  	parentID, parentSchemaID, id sqlbase.ID,
  1135  	creationTime hlc.Timestamp,
  1136  	resultColumns []sqlbase.ResultColumn,
  1137  	privileges *sqlbase.PrivilegeDescriptor,
  1138  	evalContext *tree.EvalContext,
  1139  	temporary bool,
  1140  ) (desc sqlbase.MutableTableDescriptor, err error) {
  1141  	colResIndex := 0
  1142  	// TableDefs for a CREATE TABLE ... AS AST node comprise of a ColumnTableDef
  1143  	// for each column, and a ConstraintTableDef for any constraints on those
  1144  	// columns.
  1145  	for _, defs := range p.Defs {
  1146  		var d *tree.ColumnTableDef
  1147  		var ok bool
  1148  		if d, ok = defs.(*tree.ColumnTableDef); ok {
  1149  			d.Type = resultColumns[colResIndex].Typ
  1150  			colResIndex++
  1151  		}
  1152  	}
  1153  
  1154  	// If there are no TableDefs defined by the parser, then we construct a
  1155  	// ColumnTableDef for each column using resultColumns.
  1156  	if len(p.Defs) == 0 {
  1157  		for _, colRes := range resultColumns {
  1158  			var d *tree.ColumnTableDef
  1159  			var ok bool
  1160  			var tableDef tree.TableDef = &tree.ColumnTableDef{Name: tree.Name(colRes.Name), Type: colRes.Typ}
  1161  			if d, ok = tableDef.(*tree.ColumnTableDef); !ok {
  1162  				return desc, errors.Errorf("failed to cast type to ColumnTableDef\n")
  1163  			}
  1164  			d.Nullable.Nullability = tree.SilentNull
  1165  			p.Defs = append(p.Defs, tableDef)
  1166  		}
  1167  	}
  1168  
  1169  	desc, err = makeTableDesc(
  1170  		params,
  1171  		p,
  1172  		parentID, parentSchemaID, id,
  1173  		creationTime,
  1174  		privileges,
  1175  		nil, /* affected */
  1176  		temporary,
  1177  	)
  1178  	desc.CreateQuery = getFinalSourceQuery(p.AsSource, evalContext)
  1179  	return desc, err
  1180  }
  1181  
  1182  // MakeTableDesc creates a table descriptor from a CreateTable statement.
  1183  //
  1184  // txn and vt can be nil if the table to be created does not contain references
  1185  // to other tables (e.g. foreign keys or interleaving). This is useful at
  1186  // bootstrap when creating descriptors for virtual tables.
  1187  //
  1188  // parentID refers to the databaseID under which the descriptor is being
  1189  // created,and parentSchemaID refers to the schemaID of the schema under which
  1190  // the descriptor is being created.
  1191  //
  1192  // evalCtx can be nil if the table to be created has no default expression for
  1193  // any of the columns and no partitioning expression.
  1194  //
  1195  // semaCtx can be nil if the table to be created has no default expression on
  1196  // any of the columns and no check constraints.
  1197  //
  1198  // The caller must also ensure that the SchemaResolver is configured
  1199  // to bypass caching and enable visibility of just-added descriptors.
  1200  // This is used to resolve sequence and FK dependencies. Also see the
  1201  // comment at the start of the global scope resolveFK().
  1202  //
  1203  // If the table definition *may* use the SERIAL type, the caller is
  1204  // also responsible for processing serial types using
  1205  // processSerialInColumnDef() on every column definition, and creating
  1206  // the necessary sequences in KV before calling MakeTableDesc().
  1207  func MakeTableDesc(
  1208  	ctx context.Context,
  1209  	txn *kv.Txn,
  1210  	vt resolver.SchemaResolver,
  1211  	st *cluster.Settings,
  1212  	n *tree.CreateTable,
  1213  	parentID, parentSchemaID, id sqlbase.ID,
  1214  	creationTime hlc.Timestamp,
  1215  	privileges *sqlbase.PrivilegeDescriptor,
  1216  	affected map[sqlbase.ID]*sqlbase.MutableTableDescriptor,
  1217  	semaCtx *tree.SemaContext,
  1218  	evalCtx *tree.EvalContext,
  1219  	sessionData *sessiondata.SessionData,
  1220  	temporary bool,
  1221  ) (sqlbase.MutableTableDescriptor, error) {
  1222  	// Used to delay establishing Column/Sequence dependency until ColumnIDs have
  1223  	// been populated.
  1224  	columnDefaultExprs := make([]tree.TypedExpr, len(n.Defs))
  1225  
  1226  	desc := InitTableDescriptor(
  1227  		id, parentID, parentSchemaID, n.Table.Table(), creationTime, privileges, temporary,
  1228  	)
  1229  
  1230  	if err := checkStorageParameters(ctx, semaCtx, n.StorageParams, storageParamExpectedTypes); err != nil {
  1231  		return desc, err
  1232  	}
  1233  
  1234  	// If all nodes in the cluster know how to handle secondary indexes with column families,
  1235  	// write the new version into new index descriptors.
  1236  	indexEncodingVersion := sqlbase.BaseIndexFormatVersion
  1237  	// We can't use st.Version.IsActive because this method is used during
  1238  	// server setup before the cluster version has been initialized.
  1239  	version := st.Version.ActiveVersionOrEmpty(ctx)
  1240  	if version != (clusterversion.ClusterVersion{}) &&
  1241  		version.IsActive(clusterversion.VersionSecondaryIndexColumnFamilies) {
  1242  		indexEncodingVersion = sqlbase.SecondaryIndexFamilyFormatVersion
  1243  	}
  1244  
  1245  	for i, def := range n.Defs {
  1246  		if d, ok := def.(*tree.ColumnTableDef); ok {
  1247  			// MakeTableDesc is called sometimes with a nil SemaCtx (for example
  1248  			// during bootstrapping). In order to not panic, pass a nil TypeResolver
  1249  			// when attempting to resolve the columns type.
  1250  			defType, err := tree.ResolveType(ctx, d.Type, semaCtx.GetTypeResolver())
  1251  			if err != nil {
  1252  				return sqlbase.MutableTableDescriptor{}, err
  1253  			}
  1254  			if !desc.IsVirtualTable() {
  1255  				switch defType.Oid() {
  1256  				case oid.T_int2vector, oid.T_oidvector:
  1257  					return desc, pgerror.Newf(
  1258  						pgcode.FeatureNotSupported,
  1259  						"VECTOR column types are unsupported",
  1260  					)
  1261  				}
  1262  			}
  1263  			if supported, err := isTypeSupportedInVersion(version, defType); err != nil {
  1264  				return desc, err
  1265  			} else if !supported {
  1266  				return desc, pgerror.Newf(
  1267  					pgcode.FeatureNotSupported,
  1268  					"type %s is not supported until version upgrade is finalized",
  1269  					defType.SQLString(),
  1270  				)
  1271  			}
  1272  			if d.PrimaryKey.Sharded {
  1273  				// This function can sometimes be called when `st` is nil,
  1274  				// and also before the version has been initialized. We only
  1275  				// allow hash sharded indexes to be created if we know for
  1276  				// certain that it supported by the cluster.
  1277  				if st == nil {
  1278  					return desc, invalidClusterForShardedIndexError
  1279  				}
  1280  				if version == (clusterversion.ClusterVersion{}) ||
  1281  					!version.IsActive(clusterversion.VersionHashShardedIndexes) {
  1282  					return desc, invalidClusterForShardedIndexError
  1283  				}
  1284  
  1285  				if !sessionData.HashShardedIndexesEnabled {
  1286  					return desc, hashShardedIndexesDisabledError
  1287  				}
  1288  				if n.PartitionBy != nil {
  1289  					return desc, pgerror.New(pgcode.FeatureNotSupported, "sharded indexes don't support partitioning")
  1290  				}
  1291  				if n.Interleave != nil {
  1292  					return desc, pgerror.New(pgcode.FeatureNotSupported, "interleaved indexes cannot also be hash sharded")
  1293  				}
  1294  				buckets, err := sqlbase.EvalShardBucketCount(ctx, semaCtx, evalCtx, d.PrimaryKey.ShardBuckets)
  1295  				if err != nil {
  1296  					return desc, err
  1297  				}
  1298  				shardCol, _, err := maybeCreateAndAddShardCol(int(buckets), &desc,
  1299  					[]string{string(d.Name)}, true /* isNewTable */)
  1300  				if err != nil {
  1301  					return desc, err
  1302  				}
  1303  				checkConstraint, err := makeShardCheckConstraintDef(&desc, int(buckets), shardCol)
  1304  				if err != nil {
  1305  					return desc, err
  1306  				}
  1307  				// Add the shard's check constraint to the list of TableDefs to treat it
  1308  				// like it's been "hoisted" like the explicitly added check constraints.
  1309  				// It'll then be added to this table's resulting table descriptor below in
  1310  				// the constraint pass.
  1311  				n.Defs = append(n.Defs, checkConstraint)
  1312  				columnDefaultExprs = append(columnDefaultExprs, nil)
  1313  			}
  1314  			col, idx, expr, err := sqlbase.MakeColumnDefDescs(ctx, d, semaCtx, evalCtx)
  1315  			if err != nil {
  1316  				return desc, err
  1317  			}
  1318  
  1319  			desc.AddColumn(col)
  1320  			if d.HasDefaultExpr() {
  1321  				// This resolution must be delayed until ColumnIDs have been populated.
  1322  				columnDefaultExprs[i] = expr
  1323  			} else {
  1324  				columnDefaultExprs[i] = nil
  1325  			}
  1326  
  1327  			if idx != nil {
  1328  				idx.Version = indexEncodingVersion
  1329  				if err := desc.AddIndex(*idx, d.PrimaryKey.IsPrimaryKey); err != nil {
  1330  					return desc, err
  1331  				}
  1332  			}
  1333  
  1334  			if d.HasColumnFamily() {
  1335  				// Pass true for `create` and `ifNotExists` because when we're creating
  1336  				// a table, we always want to create the specified family if it doesn't
  1337  				// exist.
  1338  				err := desc.AddColumnToFamilyMaybeCreate(col.Name, string(d.Family.Name), true, true)
  1339  				if err != nil {
  1340  					return desc, err
  1341  				}
  1342  			}
  1343  		}
  1344  	}
  1345  
  1346  	// Now that we've constructed our columns, we pop into any of our computed
  1347  	// columns so that we can dequalify any column references.
  1348  	sourceInfo := sqlbase.NewSourceInfoForSingleTable(
  1349  		n.Table, sqlbase.ResultColumnsFromColDescs(desc.GetID(), desc.Columns),
  1350  	)
  1351  
  1352  	for i := range desc.Columns {
  1353  		col := &desc.Columns[i]
  1354  		if col.IsComputed() {
  1355  			expr, err := parser.ParseExpr(*col.ComputeExpr)
  1356  			if err != nil {
  1357  				return desc, err
  1358  			}
  1359  
  1360  			expr, err = schemaexpr.DequalifyColumnRefs(ctx, sourceInfo, expr)
  1361  			if err != nil {
  1362  				return desc, err
  1363  			}
  1364  			serialized := tree.Serialize(expr)
  1365  			col.ComputeExpr = &serialized
  1366  		}
  1367  	}
  1368  
  1369  	var primaryIndexColumnSet map[string]struct{}
  1370  	setupShardedIndexForNewTable := func(d *tree.IndexTableDef, idx *sqlbase.IndexDescriptor) error {
  1371  		if n.PartitionBy != nil {
  1372  			return pgerror.New(pgcode.FeatureNotSupported, "sharded indexes don't support partitioning")
  1373  		}
  1374  		shardCol, newColumn, err := setupShardedIndex(
  1375  			ctx,
  1376  			evalCtx,
  1377  			semaCtx,
  1378  			sessionData.HashShardedIndexesEnabled,
  1379  			&d.Columns,
  1380  			d.Sharded.ShardBuckets,
  1381  			&desc,
  1382  			idx,
  1383  			true /* isNewTable */)
  1384  		if err != nil {
  1385  			return err
  1386  		}
  1387  		if newColumn {
  1388  			buckets, err := sqlbase.EvalShardBucketCount(ctx, semaCtx, evalCtx, d.Sharded.ShardBuckets)
  1389  			if err != nil {
  1390  				return err
  1391  			}
  1392  			checkConstraint, err := makeShardCheckConstraintDef(&desc, int(buckets), shardCol)
  1393  			if err != nil {
  1394  				return err
  1395  			}
  1396  			n.Defs = append(n.Defs, checkConstraint)
  1397  			columnDefaultExprs = append(columnDefaultExprs, nil)
  1398  		}
  1399  		return nil
  1400  	}
  1401  	idxValidator := schemaexpr.NewIndexPredicateValidator(ctx, n.Table, &desc, semaCtx)
  1402  	for _, def := range n.Defs {
  1403  		switch d := def.(type) {
  1404  		case *tree.ColumnTableDef, *tree.LikeTableDef:
  1405  			// pass, handled above.
  1406  
  1407  		case *tree.IndexTableDef:
  1408  			idx := sqlbase.IndexDescriptor{
  1409  				Name:             string(d.Name),
  1410  				StoreColumnNames: d.Storing.ToStrings(),
  1411  				Version:          indexEncodingVersion,
  1412  			}
  1413  			if d.Inverted {
  1414  				idx.Type = sqlbase.IndexDescriptor_INVERTED
  1415  			}
  1416  			if d.Sharded != nil {
  1417  				if d.Interleave != nil {
  1418  					return desc, pgerror.New(pgcode.FeatureNotSupported, "interleaved indexes cannot also be hash sharded")
  1419  				}
  1420  				if err := setupShardedIndexForNewTable(d, &idx); err != nil {
  1421  					return desc, err
  1422  				}
  1423  			}
  1424  			if err := idx.FillColumns(d.Columns); err != nil {
  1425  				return desc, err
  1426  			}
  1427  			if d.Inverted {
  1428  				columnDesc, _, err := desc.FindColumnByName(tree.Name(idx.ColumnNames[0]))
  1429  				if err != nil {
  1430  					return desc, err
  1431  				}
  1432  				if columnDesc.Type.InternalType.Family == types.GeometryFamily {
  1433  					idx.GeoConfig = *geoindex.DefaultGeometryIndexConfig()
  1434  				}
  1435  				if columnDesc.Type.InternalType.Family == types.GeographyFamily {
  1436  					idx.GeoConfig = *geoindex.DefaultGeographyIndexConfig()
  1437  				}
  1438  			}
  1439  			if d.PartitionBy != nil {
  1440  				partitioning, err := CreatePartitioning(ctx, st, evalCtx, &desc, &idx, d.PartitionBy)
  1441  				if err != nil {
  1442  					return desc, err
  1443  				}
  1444  				idx.Partitioning = partitioning
  1445  			}
  1446  			if d.Predicate != nil {
  1447  				// TODO(mgartner): remove this once partial indexes are fully supported.
  1448  				if !sessionData.PartialIndexes {
  1449  					return desc, unimplemented.NewWithIssue(9683, "partial indexes are not supported")
  1450  				}
  1451  
  1452  				expr, err := idxValidator.Validate(d.Predicate)
  1453  				if err != nil {
  1454  					return desc, err
  1455  				}
  1456  
  1457  				// Store the serialized predicate expression in the IndexDescriptor.
  1458  				idx.Predicate = tree.Serialize(expr)
  1459  			}
  1460  
  1461  			if err := desc.AddIndex(idx, false); err != nil {
  1462  				return desc, err
  1463  			}
  1464  			if d.Interleave != nil {
  1465  				return desc, unimplemented.NewWithIssue(9148, "use CREATE INDEX to make interleaved indexes")
  1466  			}
  1467  		case *tree.UniqueConstraintTableDef:
  1468  			idx := sqlbase.IndexDescriptor{
  1469  				Name:             string(d.Name),
  1470  				Unique:           true,
  1471  				StoreColumnNames: d.Storing.ToStrings(),
  1472  				Version:          indexEncodingVersion,
  1473  			}
  1474  			if d.Sharded != nil {
  1475  				if n.Interleave != nil && d.PrimaryKey {
  1476  					return desc, pgerror.New(pgcode.FeatureNotSupported, "interleaved indexes cannot also be hash sharded")
  1477  				}
  1478  				if err := setupShardedIndexForNewTable(&d.IndexTableDef, &idx); err != nil {
  1479  					return desc, err
  1480  				}
  1481  			}
  1482  			if err := idx.FillColumns(d.Columns); err != nil {
  1483  				return desc, err
  1484  			}
  1485  			if d.PartitionBy != nil {
  1486  				partitioning, err := CreatePartitioning(ctx, st, evalCtx, &desc, &idx, d.PartitionBy)
  1487  				if err != nil {
  1488  					return desc, err
  1489  				}
  1490  				idx.Partitioning = partitioning
  1491  			}
  1492  			if d.Predicate != nil {
  1493  				// TODO(mgartner): remove this once partial indexes are fully supported.
  1494  				if !sessionData.PartialIndexes {
  1495  					return desc, unimplemented.NewWithIssue(9683, "partial indexes are not supported")
  1496  				}
  1497  
  1498  				expr, err := idxValidator.Validate(d.Predicate)
  1499  				if err != nil {
  1500  					return desc, err
  1501  				}
  1502  
  1503  				// Store the serialized predicate expression in the IndexDescriptor.
  1504  				idx.Predicate = tree.Serialize(expr)
  1505  			}
  1506  			if err := desc.AddIndex(idx, d.PrimaryKey); err != nil {
  1507  				return desc, err
  1508  			}
  1509  			if d.PrimaryKey {
  1510  				if d.Interleave != nil {
  1511  					return desc, unimplemented.NewWithIssue(
  1512  						45710,
  1513  						"interleave not supported in primary key constraint definition",
  1514  					)
  1515  				}
  1516  				primaryIndexColumnSet = make(map[string]struct{})
  1517  				for _, c := range d.Columns {
  1518  					primaryIndexColumnSet[string(c.Column)] = struct{}{}
  1519  				}
  1520  			}
  1521  			if d.Interleave != nil {
  1522  				return desc, unimplemented.NewWithIssue(9148, "use CREATE INDEX to make interleaved indexes")
  1523  			}
  1524  		case *tree.CheckConstraintTableDef, *tree.ForeignKeyConstraintTableDef, *tree.FamilyTableDef:
  1525  			// pass, handled below.
  1526  
  1527  		default:
  1528  			return desc, errors.Errorf("unsupported table def: %T", def)
  1529  		}
  1530  	}
  1531  
  1532  	// If explicit primary keys are required, error out since a primary key was not supplied.
  1533  	if len(desc.PrimaryIndex.ColumnNames) == 0 && desc.IsPhysicalTable() && evalCtx != nil &&
  1534  		evalCtx.SessionData != nil && evalCtx.SessionData.RequireExplicitPrimaryKeys {
  1535  		return desc, errors.Errorf(
  1536  			"no primary key specified for table %s (require_explicit_primary_keys = true)", desc.Name)
  1537  	}
  1538  
  1539  	if primaryIndexColumnSet != nil {
  1540  		// Primary index columns are not nullable.
  1541  		for i := range desc.Columns {
  1542  			if _, ok := primaryIndexColumnSet[desc.Columns[i].Name]; ok {
  1543  				desc.Columns[i].Nullable = false
  1544  			}
  1545  		}
  1546  	}
  1547  
  1548  	// Now that all columns are in place, add any explicit families (this is done
  1549  	// here, rather than in the constraint pass below since we want to pick up
  1550  	// explicit allocations before AllocateIDs adds implicit ones).
  1551  	columnsInExplicitFamilies := map[string]bool{}
  1552  	for _, def := range n.Defs {
  1553  		if d, ok := def.(*tree.FamilyTableDef); ok {
  1554  			fam := sqlbase.ColumnFamilyDescriptor{
  1555  				Name:        string(d.Name),
  1556  				ColumnNames: d.Columns.ToStrings(),
  1557  			}
  1558  			for _, c := range fam.ColumnNames {
  1559  				columnsInExplicitFamilies[c] = true
  1560  			}
  1561  			desc.AddFamily(fam)
  1562  		}
  1563  	}
  1564  
  1565  	// Assign any implicitly added shard columns to the column family of the first column
  1566  	// in their corresponding set of index columns.
  1567  	for _, index := range desc.AllNonDropIndexes() {
  1568  		if index.IsSharded() && !columnsInExplicitFamilies[index.Sharded.Name] {
  1569  			// Ensure that the shard column wasn't explicitly assigned a column family
  1570  			// during table creation (this will happen when a create statement is
  1571  			// "roundtripped", for example).
  1572  			family := sqlbase.GetColumnFamilyForShard(&desc, index.Sharded.ColumnNames)
  1573  			if family != "" {
  1574  				if err := desc.AddColumnToFamilyMaybeCreate(index.Sharded.Name, family, false, false); err != nil {
  1575  					return desc, err
  1576  				}
  1577  			}
  1578  		}
  1579  	}
  1580  
  1581  	if err := desc.AllocateIDs(); err != nil {
  1582  		return desc, err
  1583  	}
  1584  
  1585  	// If any nodes are not at version VersionPrimaryKeyColumnsOutOfFamilyZero, then return an error
  1586  	// if a primary key column is not in column family 0.
  1587  	if st != nil {
  1588  		if version := st.Version.ActiveVersionOrEmpty(ctx); version != (clusterversion.ClusterVersion{}) &&
  1589  			!version.IsActive(clusterversion.VersionPrimaryKeyColumnsOutOfFamilyZero) {
  1590  			var colsInFamZero util.FastIntSet
  1591  			for _, colID := range desc.Families[0].ColumnIDs {
  1592  				colsInFamZero.Add(int(colID))
  1593  			}
  1594  			for _, colID := range desc.PrimaryIndex.ColumnIDs {
  1595  				if !colsInFamZero.Contains(int(colID)) {
  1596  					return desc, errors.Errorf("primary key column %d is not in column family 0", colID)
  1597  				}
  1598  			}
  1599  		}
  1600  	}
  1601  
  1602  	for i := range desc.Indexes {
  1603  		idx := &desc.Indexes[i]
  1604  		// Increment the counter if this index could be storing data across multiple column families.
  1605  		if len(idx.StoreColumnNames) > 1 && len(desc.Families) > 1 {
  1606  			telemetry.Inc(sqltelemetry.SecondaryIndexColumnFamiliesCounter)
  1607  		}
  1608  	}
  1609  
  1610  	if n.Interleave != nil {
  1611  		if err := addInterleave(ctx, txn, vt, &desc, &desc.PrimaryIndex, n.Interleave); err != nil {
  1612  			return desc, err
  1613  		}
  1614  	}
  1615  
  1616  	if n.PartitionBy != nil {
  1617  		partitioning, err := CreatePartitioning(
  1618  			ctx, st, evalCtx, &desc, &desc.PrimaryIndex, n.PartitionBy)
  1619  		if err != nil {
  1620  			return desc, err
  1621  		}
  1622  		desc.PrimaryIndex.Partitioning = partitioning
  1623  	}
  1624  
  1625  	// Once all the IDs have been allocated, we can add the Sequence dependencies
  1626  	// as maybeAddSequenceDependencies requires ColumnIDs to be correct.
  1627  	// Elements in n.Defs are not necessarily column definitions, so use a separate
  1628  	// counter to map ColumnDefs to columns.
  1629  	colIdx := 0
  1630  	for i := range n.Defs {
  1631  		if _, ok := n.Defs[i].(*tree.ColumnTableDef); ok {
  1632  			if expr := columnDefaultExprs[i]; expr != nil {
  1633  				changedSeqDescs, err := maybeAddSequenceDependencies(ctx, vt, &desc, &desc.Columns[colIdx], expr, affected)
  1634  				if err != nil {
  1635  					return desc, err
  1636  				}
  1637  				for _, changedSeqDesc := range changedSeqDescs {
  1638  					affected[changedSeqDesc.ID] = changedSeqDesc
  1639  				}
  1640  			}
  1641  			colIdx++
  1642  		}
  1643  	}
  1644  
  1645  	// With all structural elements in place and IDs allocated, we can resolve the
  1646  	// constraints and qualifications.
  1647  	// FKs are resolved after the descriptor is otherwise complete and IDs have
  1648  	// been allocated since the FKs will reference those IDs. Resolution also
  1649  	// accumulates updates to other tables (adding backreferences) in the passed
  1650  	// map -- anything in that map should be saved when the table is created.
  1651  	//
  1652  
  1653  	// We use a fkSelfResolver so that name resolution can find the newly created
  1654  	// table.
  1655  	fkResolver := &fkSelfResolver{
  1656  		SchemaResolver: vt,
  1657  		newTableDesc:   desc.TableDesc(),
  1658  		newTableName:   &n.Table,
  1659  	}
  1660  
  1661  	ckBuilder := schemaexpr.NewCheckConstraintBuilder(ctx, n.Table, &desc, semaCtx)
  1662  	for _, def := range n.Defs {
  1663  		switch d := def.(type) {
  1664  		case *tree.ColumnTableDef:
  1665  			// Check after all ResolveFK calls.
  1666  
  1667  		case *tree.IndexTableDef, *tree.UniqueConstraintTableDef, *tree.FamilyTableDef, *tree.LikeTableDef:
  1668  			// Pass, handled above.
  1669  
  1670  		case *tree.CheckConstraintTableDef:
  1671  			ck, err := ckBuilder.Build(d)
  1672  			if err != nil {
  1673  				return desc, err
  1674  			}
  1675  			desc.Checks = append(desc.Checks, ck)
  1676  
  1677  		case *tree.ForeignKeyConstraintTableDef:
  1678  			if err := ResolveFK(
  1679  				ctx, txn, fkResolver, &desc, d, affected, NewTable, tree.ValidationDefault, evalCtx,
  1680  			); err != nil {
  1681  				return desc, err
  1682  			}
  1683  
  1684  		default:
  1685  			return desc, errors.Errorf("unsupported table def: %T", def)
  1686  		}
  1687  	}
  1688  
  1689  	// Now that we have all the other columns set up, we can validate
  1690  	// any computed columns.
  1691  	computedColValidator := schemaexpr.NewComputedColumnValidator(ctx, &desc, semaCtx)
  1692  	for _, def := range n.Defs {
  1693  		switch d := def.(type) {
  1694  		case *tree.ColumnTableDef:
  1695  			if d.IsComputed() {
  1696  				if err := computedColValidator.Validate(d); err != nil {
  1697  					return desc, err
  1698  				}
  1699  			}
  1700  		}
  1701  	}
  1702  
  1703  	// AllocateIDs mutates its receiver. `return desc, desc.AllocateIDs()`
  1704  	// happens to work in gc, but does not work in gccgo.
  1705  	//
  1706  	// See https://github.com/golang/go/issues/23188.
  1707  	err := desc.AllocateIDs()
  1708  
  1709  	// Record the types of indexes that the table has.
  1710  	if err := desc.ForeachNonDropIndex(func(idx *sqlbase.IndexDescriptor) error {
  1711  		if idx.IsSharded() {
  1712  			telemetry.Inc(sqltelemetry.HashShardedIndexCounter)
  1713  		}
  1714  		if idx.Type == sqlbase.IndexDescriptor_INVERTED {
  1715  			telemetry.Inc(sqltelemetry.InvertedIndexCounter)
  1716  			if !geoindex.IsEmptyConfig(&idx.GeoConfig) {
  1717  				if geoindex.IsGeographyConfig(&idx.GeoConfig) {
  1718  					telemetry.Inc(sqltelemetry.GeographyInvertedIndexCounter)
  1719  				} else if geoindex.IsGeometryConfig(&idx.GeoConfig) {
  1720  					telemetry.Inc(sqltelemetry.GeometryInvertedIndexCounter)
  1721  				}
  1722  			}
  1723  		}
  1724  		return nil
  1725  	}); err != nil {
  1726  		return desc, err
  1727  	}
  1728  
  1729  	return desc, err
  1730  }
  1731  
  1732  func checkStorageParameters(
  1733  	ctx context.Context,
  1734  	semaCtx *tree.SemaContext,
  1735  	params tree.StorageParams,
  1736  	expectedTypes map[string]storageParamType,
  1737  ) error {
  1738  	for _, sp := range params {
  1739  		k := string(sp.Key)
  1740  		validate, ok := expectedTypes[k]
  1741  		if !ok {
  1742  			return errors.Errorf("invalid storage parameter %q", k)
  1743  		}
  1744  		if sp.Value == nil {
  1745  			return errors.Errorf("storage parameter %q requires a value", k)
  1746  		}
  1747  		var expectedType *types.T
  1748  		if validate == storageParamBool {
  1749  			expectedType = types.Bool
  1750  		} else if validate == storageParamInt {
  1751  			expectedType = types.Int
  1752  		} else if validate == storageParamFloat {
  1753  			expectedType = types.Float
  1754  		} else {
  1755  			return unimplemented.NewWithIssuef(43299, "storage parameter %q", k)
  1756  		}
  1757  
  1758  		_, err := tree.TypeCheckAndRequire(ctx, sp.Value, semaCtx, expectedType, k)
  1759  		if err != nil {
  1760  			return err
  1761  		}
  1762  	}
  1763  	return nil
  1764  }
  1765  
  1766  // makeTableDesc creates a table descriptor from a CreateTable statement.
  1767  func makeTableDesc(
  1768  	params runParams,
  1769  	n *tree.CreateTable,
  1770  	parentID, parentSchemaID, id sqlbase.ID,
  1771  	creationTime hlc.Timestamp,
  1772  	privileges *sqlbase.PrivilegeDescriptor,
  1773  	affected map[sqlbase.ID]*sqlbase.MutableTableDescriptor,
  1774  	temporary bool,
  1775  ) (ret sqlbase.MutableTableDescriptor, err error) {
  1776  	// Process any SERIAL columns to remove the SERIAL type,
  1777  	// as required by MakeTableDesc.
  1778  	createStmt := n
  1779  	ensureCopy := func() {
  1780  		if createStmt == n {
  1781  			newCreateStmt := *n
  1782  			n.Defs = append(tree.TableDefs(nil), n.Defs...)
  1783  			createStmt = &newCreateStmt
  1784  		}
  1785  	}
  1786  	newDefs, err := replaceLikeTableOpts(n, params)
  1787  	if err != nil {
  1788  		return ret, err
  1789  	}
  1790  
  1791  	if newDefs != nil {
  1792  		// If we found any LIKE table defs, we actually modified the list of
  1793  		// defs during iteration, so we re-assign the resultant list back to
  1794  		// n.Defs.
  1795  		n.Defs = newDefs
  1796  	}
  1797  
  1798  	for i, def := range n.Defs {
  1799  		d, ok := def.(*tree.ColumnTableDef)
  1800  		if !ok {
  1801  			continue
  1802  		}
  1803  		// Do not include virtual tables in these statistics.
  1804  		if !sqlbase.IsVirtualTable(id) {
  1805  			incTelemetryForNewColumn(d)
  1806  		}
  1807  		newDef, seqDbDesc, seqName, seqOpts, err := params.p.processSerialInColumnDef(params.ctx, d, &n.Table)
  1808  		if err != nil {
  1809  			return ret, err
  1810  		}
  1811  		// TODO (lucy): Have more consistent/informative names for dependent jobs.
  1812  		if seqName != nil {
  1813  			if err := doCreateSequence(
  1814  				params,
  1815  				n.String(),
  1816  				seqDbDesc,
  1817  				parentSchemaID,
  1818  				seqName,
  1819  				temporary,
  1820  				seqOpts,
  1821  				"creating sequence",
  1822  			); err != nil {
  1823  				return ret, err
  1824  			}
  1825  		}
  1826  		if d != newDef {
  1827  			ensureCopy()
  1828  			n.Defs[i] = newDef
  1829  		}
  1830  	}
  1831  
  1832  	// We need to run MakeTableDesc with caching disabled, because
  1833  	// it needs to pull in descriptors from FK depended-on tables
  1834  	// and interleaved parents using their current state in KV.
  1835  	// See the comment at the start of MakeTableDesc() and resolveFK().
  1836  	params.p.runWithOptions(resolveFlags{skipCache: true}, func() {
  1837  		ret, err = MakeTableDesc(
  1838  			params.ctx,
  1839  			params.p.txn,
  1840  			params.p,
  1841  			params.p.ExecCfg().Settings,
  1842  			n,
  1843  			parentID,
  1844  			parentSchemaID,
  1845  			id,
  1846  			creationTime,
  1847  			privileges,
  1848  			affected,
  1849  			&params.p.semaCtx,
  1850  			params.EvalContext(),
  1851  			params.SessionData(),
  1852  			temporary,
  1853  		)
  1854  	})
  1855  	return ret, err
  1856  }
  1857  
  1858  // replaceLikeTableOps processes the TableDefs in the input CreateTableNode,
  1859  // searching for LikeTableDefs. If any are found, each LikeTableDef will be
  1860  // replaced in the output tree.TableDefs (which will be a copy of the input
  1861  // node's TableDefs) by an equivalent set of TableDefs pulled from the
  1862  // LikeTableDef's target table.
  1863  // If no LikeTableDefs are found, the output tree.TableDefs will be nil.
  1864  func replaceLikeTableOpts(n *tree.CreateTable, params runParams) (tree.TableDefs, error) {
  1865  	var newDefs tree.TableDefs
  1866  	for i, def := range n.Defs {
  1867  		d, ok := def.(*tree.LikeTableDef)
  1868  		if !ok {
  1869  			if newDefs != nil {
  1870  				newDefs = append(newDefs, def)
  1871  			}
  1872  			continue
  1873  		}
  1874  		// We're definitely going to be editing n.Defs now, so make a copy of it.
  1875  		if newDefs == nil {
  1876  			newDefs = make(tree.TableDefs, 0, len(n.Defs))
  1877  			newDefs = append(newDefs, n.Defs[:i]...)
  1878  		}
  1879  		td, err := params.p.ResolveMutableTableDescriptor(params.ctx, &d.Name, true, resolver.ResolveRequireTableDesc)
  1880  		if err != nil {
  1881  			return nil, err
  1882  		}
  1883  		opts := tree.LikeTableOpt(0)
  1884  		// Process ons / offs.
  1885  		for _, opt := range d.Options {
  1886  			if opt.Excluded {
  1887  				opts &^= opt.Opt
  1888  			} else {
  1889  				opts |= opt.Opt
  1890  			}
  1891  		}
  1892  
  1893  		defs := make(tree.TableDefs, 0)
  1894  		// Add all columns. Columns are always added.
  1895  		for i := range td.Columns {
  1896  			c := &td.Columns[i]
  1897  			if c.Hidden {
  1898  				// Hidden columns automatically get added by the system; we don't need
  1899  				// to add them ourselves here.
  1900  				continue
  1901  			}
  1902  			def := tree.ColumnTableDef{
  1903  				Name: tree.Name(c.Name),
  1904  				Type: c.DatumType(),
  1905  			}
  1906  			if c.Nullable {
  1907  				def.Nullable.Nullability = tree.Null
  1908  			} else {
  1909  				def.Nullable.Nullability = tree.NotNull
  1910  			}
  1911  			if c.DefaultExpr != nil {
  1912  				if opts.Has(tree.LikeTableOptDefaults) {
  1913  					def.DefaultExpr.Expr, err = parser.ParseExpr(*c.DefaultExpr)
  1914  					if err != nil {
  1915  						return nil, err
  1916  					}
  1917  				}
  1918  			}
  1919  			if c.ComputeExpr != nil {
  1920  				if opts.Has(tree.LikeTableOptGenerated) {
  1921  					def.Computed.Computed = true
  1922  					def.Computed.Expr, err = parser.ParseExpr(*c.ComputeExpr)
  1923  					if err != nil {
  1924  						return nil, err
  1925  					}
  1926  				}
  1927  			}
  1928  			defs = append(defs, &def)
  1929  		}
  1930  		if opts.Has(tree.LikeTableOptConstraints) {
  1931  			for _, c := range td.Checks {
  1932  				def := tree.CheckConstraintTableDef{
  1933  					Name:   tree.Name(c.Name),
  1934  					Hidden: c.Hidden,
  1935  				}
  1936  				def.Expr, err = parser.ParseExpr(c.Expr)
  1937  				if err != nil {
  1938  					return nil, err
  1939  				}
  1940  				defs = append(defs, &def)
  1941  			}
  1942  		}
  1943  		if opts.Has(tree.LikeTableOptIndexes) {
  1944  			for _, idx := range td.AllNonDropIndexes() {
  1945  				indexDef := tree.IndexTableDef{
  1946  					Name:     tree.Name(idx.Name),
  1947  					Inverted: idx.Type == sqlbase.IndexDescriptor_INVERTED,
  1948  					Storing:  make(tree.NameList, 0, len(idx.StoreColumnNames)),
  1949  					Columns:  make(tree.IndexElemList, 0, len(idx.ColumnNames)),
  1950  				}
  1951  				columnNames := idx.ColumnNames
  1952  				if idx.IsSharded() {
  1953  					indexDef.Sharded = &tree.ShardedIndexDef{
  1954  						ShardBuckets: tree.NewDInt(tree.DInt(idx.Sharded.ShardBuckets)),
  1955  					}
  1956  					columnNames = idx.Sharded.ColumnNames
  1957  				}
  1958  				for i, name := range columnNames {
  1959  					elem := tree.IndexElem{
  1960  						Column:    tree.Name(name),
  1961  						Direction: tree.Ascending,
  1962  					}
  1963  					if idx.ColumnDirections[i] == sqlbase.IndexDescriptor_DESC {
  1964  						elem.Direction = tree.Descending
  1965  					}
  1966  					indexDef.Columns = append(indexDef.Columns, elem)
  1967  				}
  1968  				for _, name := range idx.StoreColumnNames {
  1969  					indexDef.Storing = append(indexDef.Storing, tree.Name(name))
  1970  				}
  1971  				var def tree.TableDef = &indexDef
  1972  				if idx.Unique {
  1973  					isPK := idx.ID == td.PrimaryIndex.ID
  1974  					if isPK && td.IsPrimaryIndexDefaultRowID() {
  1975  						continue
  1976  					}
  1977  
  1978  					def = &tree.UniqueConstraintTableDef{
  1979  						IndexTableDef: indexDef,
  1980  						PrimaryKey:    isPK,
  1981  					}
  1982  				}
  1983  				defs = append(defs, def)
  1984  			}
  1985  		}
  1986  		newDefs = append(newDefs, defs...)
  1987  	}
  1988  	return newDefs, nil
  1989  }
  1990  
  1991  func makeObjectAlreadyExistsError(collidingObject sqlbase.DescriptorProto, name string) error {
  1992  	switch collidingObject.(type) {
  1993  	case *TableDescriptor:
  1994  		return sqlbase.NewRelationAlreadyExistsError(name)
  1995  	case *TypeDescriptor:
  1996  		return sqlbase.NewTypeAlreadyExistsError(name)
  1997  	case *DatabaseDescriptor:
  1998  		return sqlbase.NewDatabaseAlreadyExistsError(name)
  1999  	}
  2000  	return nil
  2001  }
  2002  
  2003  // makeShardColumnDesc returns a new column descriptor for a hidden computed shard column
  2004  // based on all the `colNames`.
  2005  func makeShardColumnDesc(colNames []string, buckets int) (*sqlbase.ColumnDescriptor, error) {
  2006  	col := &sqlbase.ColumnDescriptor{
  2007  		Hidden:   true,
  2008  		Nullable: false,
  2009  		Type:     types.Int4,
  2010  	}
  2011  	col.Name = sqlbase.GetShardColumnName(colNames, int32(buckets))
  2012  	col.ComputeExpr = makeHashShardComputeExpr(colNames, buckets)
  2013  	return col, nil
  2014  }
  2015  
  2016  // makeHashShardComputeExpr creates the serialized computed expression for a hash shard
  2017  // column based on the column names and the number of buckets. The expression will be
  2018  // of the form:
  2019  //
  2020  //    mod(fnv32(colNames[0]::STRING)+fnv32(colNames[1])+...,buckets)
  2021  //
  2022  func makeHashShardComputeExpr(colNames []string, buckets int) *string {
  2023  	unresolvedFunc := func(funcName string) tree.ResolvableFunctionReference {
  2024  		return tree.ResolvableFunctionReference{
  2025  			FunctionReference: &tree.UnresolvedName{
  2026  				NumParts: 1,
  2027  				Parts:    tree.NameParts{funcName},
  2028  			},
  2029  		}
  2030  	}
  2031  	hashedColumnExpr := func(colName string) tree.Expr {
  2032  		return &tree.FuncExpr{
  2033  			Func: unresolvedFunc("fnv32"),
  2034  			Exprs: tree.Exprs{
  2035  				// NB: We have created the hash shard column as NOT NULL so we need
  2036  				// to coalesce NULLs into something else. There's a variety of different
  2037  				// reasonable choices here. We could pick some outlandish value, we
  2038  				// could pick a zero value for each type, or we can do the simple thing
  2039  				// we do here, however the empty string seems pretty reasonable. At worst
  2040  				// we'll have a collision for every combination of NULLable string
  2041  				// columns. That seems just fine.
  2042  				&tree.CoalesceExpr{
  2043  					Name: "COALESCE",
  2044  					Exprs: tree.Exprs{
  2045  						&tree.CastExpr{
  2046  							Type: types.String,
  2047  							Expr: &tree.ColumnItem{ColumnName: tree.Name(colName)},
  2048  						},
  2049  						tree.NewDString(""),
  2050  					},
  2051  				},
  2052  			},
  2053  		}
  2054  	}
  2055  
  2056  	// Construct an expression which is the sum of all of the casted and hashed
  2057  	// columns.
  2058  	var expr tree.Expr
  2059  	for i := len(colNames) - 1; i >= 0; i-- {
  2060  		c := colNames[i]
  2061  		if expr == nil {
  2062  			expr = hashedColumnExpr(c)
  2063  		} else {
  2064  			expr = &tree.BinaryExpr{
  2065  				Left:     hashedColumnExpr(c),
  2066  				Operator: tree.Plus,
  2067  				Right:    expr,
  2068  			}
  2069  		}
  2070  	}
  2071  	str := tree.Serialize(&tree.FuncExpr{
  2072  		Func: unresolvedFunc("mod"),
  2073  		Exprs: tree.Exprs{
  2074  			expr,
  2075  			tree.NewDInt(tree.DInt(buckets)),
  2076  		},
  2077  	})
  2078  	return &str
  2079  }
  2080  
  2081  func makeShardCheckConstraintDef(
  2082  	desc *MutableTableDescriptor, buckets int, shardCol *sqlbase.ColumnDescriptor,
  2083  ) (*tree.CheckConstraintTableDef, error) {
  2084  	values := &tree.Tuple{}
  2085  	for i := 0; i < buckets; i++ {
  2086  		const negative = false
  2087  		values.Exprs = append(values.Exprs, tree.NewNumVal(
  2088  			constant.MakeInt64(int64(i)),
  2089  			strconv.Itoa(i),
  2090  			negative))
  2091  	}
  2092  	return &tree.CheckConstraintTableDef{
  2093  		Expr: &tree.ComparisonExpr{
  2094  			Operator: tree.In,
  2095  			Left: &tree.ColumnItem{
  2096  				ColumnName: tree.Name(shardCol.Name),
  2097  			},
  2098  			Right: values,
  2099  		},
  2100  		Hidden: true,
  2101  	}, nil
  2102  }
  2103  
  2104  // incTelemetryForNewColumn increments relevant telemetry every time a new column
  2105  // is added to a table.
  2106  func incTelemetryForNewColumn(d *tree.ColumnTableDef) {
  2107  	if typ, ok := tree.GetStaticallyKnownType(d.Type); ok {
  2108  		telemetry.Inc(sqltelemetry.SchemaNewTypeCounter(typ.TelemetryName()))
  2109  	}
  2110  	if d.IsComputed() {
  2111  		telemetry.Inc(sqltelemetry.SchemaNewColumnTypeQualificationCounter("computed"))
  2112  	}
  2113  	if d.HasDefaultExpr() {
  2114  		telemetry.Inc(sqltelemetry.SchemaNewColumnTypeQualificationCounter("default_expr"))
  2115  	}
  2116  	if d.Unique {
  2117  		telemetry.Inc(sqltelemetry.SchemaNewColumnTypeQualificationCounter("unique"))
  2118  	}
  2119  }