github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/sql/planner.go (about)

     1  // Copyright 2016 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package sql
    12  
    13  import (
    14  	"context"
    15  	"fmt"
    16  	"time"
    17  
    18  	"github.com/cockroachdb/cockroach/pkg/jobs"
    19  	"github.com/cockroachdb/cockroach/pkg/kv"
    20  	"github.com/cockroachdb/cockroach/pkg/server/serverpb"
    21  	"github.com/cockroachdb/cockroach/pkg/sql/catalog"
    22  	"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
    23  	"github.com/cockroachdb/cockroach/pkg/sql/catalog/lease"
    24  	"github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver"
    25  	"github.com/cockroachdb/cockroach/pkg/sql/opt/exec"
    26  	"github.com/cockroachdb/cockroach/pkg/sql/parser"
    27  	"github.com/cockroachdb/cockroach/pkg/sql/querycache"
    28  	"github.com/cockroachdb/cockroach/pkg/sql/sem/transform"
    29  	"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
    30  	"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
    31  	"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
    32  	"github.com/cockroachdb/cockroach/pkg/sql/types"
    33  	"github.com/cockroachdb/cockroach/pkg/util/envutil"
    34  	"github.com/cockroachdb/cockroach/pkg/util/hlc"
    35  	"github.com/cockroachdb/cockroach/pkg/util/mon"
    36  	"github.com/cockroachdb/errors"
    37  	"github.com/cockroachdb/logtags"
    38  )
    39  
    40  // extendedEvalContext extends tree.EvalContext with fields that are needed for
    41  // distsql planning.
    42  type extendedEvalContext struct {
    43  	tree.EvalContext
    44  
    45  	SessionMutator *sessionDataMutator
    46  
    47  	// SessionID for this connection.
    48  	SessionID ClusterWideID
    49  
    50  	// VirtualSchemas can be used to access virtual tables.
    51  	VirtualSchemas VirtualTabler
    52  
    53  	// Tracing provides access to the session's tracing interface. Changes to the
    54  	// tracing state should be done through the sessionDataMutator.
    55  	Tracing *SessionTracing
    56  
    57  	// StatusServer gives access to the Status service.
    58  	StatusServer serverpb.OptionalStatusServer
    59  
    60  	// MemMetrics represent the group of metrics to which execution should
    61  	// contribute.
    62  	MemMetrics *MemoryMetrics
    63  
    64  	// Tables points to the Session's table collection (& cache).
    65  	Descs *descs.Collection
    66  
    67  	ExecCfg *ExecutorConfig
    68  
    69  	DistSQLPlanner *DistSQLPlanner
    70  
    71  	TxnModesSetter txnModesSetter
    72  
    73  	Jobs *jobsCollection
    74  
    75  	schemaAccessors *schemaInterface
    76  
    77  	sqlStatsCollector *sqlStatsCollector
    78  }
    79  
    80  // copy returns a deep copy of ctx.
    81  func (ctx *extendedEvalContext) copy() *extendedEvalContext {
    82  	cpy := *ctx
    83  	cpy.EvalContext = *ctx.EvalContext.Copy()
    84  	return &cpy
    85  }
    86  
    87  // QueueJob creates a new job from record and queues it for execution after
    88  // the transaction commits.
    89  func (ctx *extendedEvalContext) QueueJob(record jobs.Record) (*jobs.Job, error) {
    90  	job, err := ctx.ExecCfg.JobRegistry.CreateJobWithTxn(
    91  		ctx.Context,
    92  		record,
    93  		ctx.Txn,
    94  	)
    95  	if err != nil {
    96  		return nil, err
    97  	}
    98  	*ctx.Jobs = append(*ctx.Jobs, *job.ID())
    99  	return job, nil
   100  }
   101  
   102  // schemaInterface provides access to the database and table descriptors.
   103  // See schema_accessors.go.
   104  type schemaInterface struct {
   105  	physical catalog.Accessor
   106  	logical  catalog.Accessor
   107  }
   108  
   109  // planner is the centerpiece of SQL statement execution combining session
   110  // state and database state with the logic for SQL execution. It is logically
   111  // scoped to the execution of a single statement, and should not be used to
   112  // execute multiple statements. It is not safe to use the same planner from
   113  // multiple goroutines concurrently.
   114  //
   115  // planners are usually created by using the newPlanner method on a Session.
   116  // If one needs to be created outside of a Session, use makeInternalPlanner().
   117  type planner struct {
   118  	txn *kv.Txn
   119  
   120  	// Reference to the corresponding sql Statement for this query.
   121  	stmt *Statement
   122  
   123  	// Contexts for different stages of planning and execution.
   124  	semaCtx         tree.SemaContext
   125  	extendedEvalCtx extendedEvalContext
   126  
   127  	// sessionDataMutator is used to mutate the session variables. Read
   128  	// access to them is provided through evalCtx.
   129  	sessionDataMutator *sessionDataMutator
   130  
   131  	// execCfg is used to access the server configuration for the Executor.
   132  	execCfg *ExecutorConfig
   133  
   134  	preparedStatements preparedStatementsAccessor
   135  
   136  	// avoidCachedDescriptors, when true, instructs all code that
   137  	// accesses table/view descriptors to force reading the descriptors
   138  	// within the transaction. This is necessary to read descriptors
   139  	// from the store for:
   140  	// 1. Descriptors that are part of a schema change but are not
   141  	// modified by the schema change. (reading a table in CREATE VIEW)
   142  	// 2. Disable the use of the table cache in tests.
   143  	avoidCachedDescriptors bool
   144  
   145  	// If set, the planner should skip checking for the SELECT privilege when
   146  	// initializing plans to read from a table. This should be used with care.
   147  	skipSelectPrivilegeChecks bool
   148  
   149  	// autoCommit indicates whether we're planning for an implicit transaction.
   150  	// If autoCommit is true, the plan is allowed (but not required) to commit the
   151  	// transaction along with other KV operations. Committing the txn might be
   152  	// beneficial because it may enable the 1PC optimization.
   153  	//
   154  	// NOTE: plan node must be configured appropriately to actually perform an
   155  	// auto-commit. This is dependent on information from the optimizer.
   156  	autoCommit bool
   157  
   158  	// discardRows is set if we want to discard any results rather than sending
   159  	// them back to the client. Used for testing/benchmarking. Note that the
   160  	// resulting schema or the plan are not affected.
   161  	// See EXECUTE .. DISCARD ROWS.
   162  	discardRows bool
   163  
   164  	// cancelChecker is used by planNodes to check for cancellation of the associated
   165  	// query.
   166  	cancelChecker *sqlbase.CancelChecker
   167  
   168  	// collectBundle is set when we are collecting a diagnostics bundle for a
   169  	// statement; it triggers saving of extra information like the plan string.
   170  	collectBundle bool
   171  
   172  	// isPreparing is true if this planner is currently preparing.
   173  	isPreparing bool
   174  
   175  	// curPlan collects the properties of the current plan being prepared. This state
   176  	// is undefined at the beginning of the planning of each new statement, and cannot
   177  	// be reused for an old prepared statement after a new statement has been prepared.
   178  	curPlan planTop
   179  
   180  	// Avoid allocations by embedding commonly used objects and visitors.
   181  	txCtx                 transform.ExprTransformContext
   182  	nameResolutionVisitor sqlbase.NameResolutionVisitor
   183  	tableName             tree.TableName
   184  
   185  	// Use a common datum allocator across all the plan nodes. This separates the
   186  	// plan lifetime from the lifetime of returned results allowing plan nodes to
   187  	// be pool allocated.
   188  	alloc *sqlbase.DatumAlloc
   189  
   190  	// optPlanningCtx stores the optimizer planning context, which contains
   191  	// data structures that can be reused between queries (for efficiency).
   192  	optPlanningCtx optPlanningCtx
   193  
   194  	// noticeSender allows the sending of notices.
   195  	// Do not use this object directly; use the SendClientNotice() method
   196  	// instead.
   197  	noticeSender noticeSender
   198  
   199  	queryCacheSession querycache.Session
   200  }
   201  
   202  func (ctx *extendedEvalContext) setSessionID(sessionID ClusterWideID) {
   203  	ctx.SessionID = sessionID
   204  }
   205  
   206  // noteworthyInternalMemoryUsageBytes is the minimum size tracked by each
   207  // internal SQL pool before the pool starts explicitly logging overall usage
   208  // growth in the log.
   209  var noteworthyInternalMemoryUsageBytes = envutil.EnvOrDefaultInt64("COCKROACH_NOTEWORTHY_INTERNAL_MEMORY_USAGE", 1<<20 /* 1 MB */)
   210  
   211  // NewInternalPlanner is an exported version of newInternalPlanner. It
   212  // returns an interface{} so it can be used outside of the sql package.
   213  func NewInternalPlanner(
   214  	opName string, txn *kv.Txn, user string, memMetrics *MemoryMetrics, execCfg *ExecutorConfig,
   215  ) (interface{}, func()) {
   216  	return newInternalPlanner(opName, txn, user, memMetrics, execCfg)
   217  }
   218  
   219  // newInternalPlanner creates a new planner instance for internal usage. This
   220  // planner is not associated with a sql session.
   221  //
   222  // Since it can't be reset, the planner can be used only for planning a single
   223  // statement.
   224  //
   225  // Returns a cleanup function that must be called once the caller is done with
   226  // the planner.
   227  func newInternalPlanner(
   228  	opName string, txn *kv.Txn, user string, memMetrics *MemoryMetrics, execCfg *ExecutorConfig,
   229  ) (*planner, func()) {
   230  	// We need a context that outlives all the uses of the planner (since the
   231  	// planner captures it in the EvalCtx, and so does the cleanup function that
   232  	// we're going to return. We just create one here instead of asking the caller
   233  	// for a ctx with this property. This is really ugly, but the alternative of
   234  	// asking the caller for one is hard to explain. What we need is better and
   235  	// separate interfaces for planning and running plans, which could take
   236  	// suitable contexts.
   237  	ctx := logtags.AddTag(context.Background(), opName, "")
   238  
   239  	sd := &sessiondata.SessionData{
   240  		SearchPath:    sqlbase.DefaultSearchPath,
   241  		User:          user,
   242  		Database:      "system",
   243  		SequenceState: sessiondata.NewSequenceState(),
   244  		DataConversion: sessiondata.DataConversionConfig{
   245  			Location: time.UTC,
   246  		},
   247  	}
   248  	// The table collection used by the internal planner does not rely on the
   249  	// DatabaseCache and there are no subscribers to the DatabaseCache, so we can
   250  	// leave it uninitialized.
   251  	tables := descs.NewCollection(execCfg.LeaseManager, execCfg.Settings)
   252  	dataMutator := &sessionDataMutator{
   253  		data: sd,
   254  		defaults: SessionDefaults(map[string]string{
   255  			"application_name": "crdb-internal",
   256  			"database":         "system",
   257  		}),
   258  		settings:           execCfg.Settings,
   259  		paramStatusUpdater: &noopParamStatusUpdater{},
   260  		setCurTxnReadOnly:  func(bool) {},
   261  	}
   262  
   263  	var ts time.Time
   264  	if txn != nil {
   265  		readTimestamp := txn.ReadTimestamp()
   266  		if readTimestamp == (hlc.Timestamp{}) {
   267  			panic("makeInternalPlanner called with a transaction without timestamps")
   268  		}
   269  		ts = readTimestamp.GoTime()
   270  	}
   271  
   272  	p := &planner{execCfg: execCfg, alloc: &sqlbase.DatumAlloc{}}
   273  
   274  	p.txn = txn
   275  	p.stmt = nil
   276  	p.cancelChecker = sqlbase.NewCancelChecker(ctx)
   277  
   278  	p.semaCtx = tree.MakeSemaContext()
   279  	p.semaCtx.Location = &sd.DataConversion.Location
   280  	p.semaCtx.SearchPath = sd.SearchPath
   281  	p.semaCtx.TypeResolver = p
   282  
   283  	plannerMon := mon.MakeUnlimitedMonitor(ctx,
   284  		fmt.Sprintf("internal-planner.%s.%s", user, opName),
   285  		mon.MemoryResource,
   286  		memMetrics.CurBytesCount, memMetrics.MaxBytesHist,
   287  		noteworthyInternalMemoryUsageBytes, execCfg.Settings)
   288  
   289  	p.extendedEvalCtx = internalExtendedEvalCtx(
   290  		ctx, sd, dataMutator, tables, txn, ts, ts, execCfg, &plannerMon,
   291  	)
   292  	p.extendedEvalCtx.Planner = p
   293  	p.extendedEvalCtx.PrivilegedAccessor = p
   294  	p.extendedEvalCtx.SessionAccessor = p
   295  	p.extendedEvalCtx.ClientNoticeSender = p
   296  	p.extendedEvalCtx.Sequence = p
   297  	p.extendedEvalCtx.Tenant = p
   298  	p.extendedEvalCtx.ClusterID = execCfg.ClusterID()
   299  	p.extendedEvalCtx.ClusterName = execCfg.RPCContext.ClusterName()
   300  	p.extendedEvalCtx.NodeID = execCfg.NodeID
   301  	p.extendedEvalCtx.Locality = execCfg.Locality
   302  
   303  	p.sessionDataMutator = dataMutator
   304  	p.autoCommit = false
   305  
   306  	p.extendedEvalCtx.MemMetrics = memMetrics
   307  	p.extendedEvalCtx.ExecCfg = execCfg
   308  	p.extendedEvalCtx.Placeholders = &p.semaCtx.Placeholders
   309  	p.extendedEvalCtx.Annotations = &p.semaCtx.Annotations
   310  	p.extendedEvalCtx.Descs = tables
   311  
   312  	p.queryCacheSession.Init()
   313  	p.optPlanningCtx.init(p)
   314  
   315  	return p, func() {
   316  		// Note that we capture ctx here. This is only valid as long as we create
   317  		// the context as explained at the top of the method.
   318  		plannerMon.Stop(ctx)
   319  	}
   320  }
   321  
   322  // internalExtendedEvalCtx creates an evaluation context for an "internal
   323  // planner". Since the eval context is supposed to be tied to a session and
   324  // there's no session to speak of here, different fields are filled in here to
   325  // keep the tests using the internal planner passing.
   326  func internalExtendedEvalCtx(
   327  	ctx context.Context,
   328  	sd *sessiondata.SessionData,
   329  	dataMutator *sessionDataMutator,
   330  	tables *descs.Collection,
   331  	txn *kv.Txn,
   332  	txnTimestamp time.Time,
   333  	stmtTimestamp time.Time,
   334  	execCfg *ExecutorConfig,
   335  	plannerMon *mon.BytesMonitor,
   336  ) extendedEvalContext {
   337  	evalContextTestingKnobs := execCfg.EvalContextTestingKnobs
   338  
   339  	return extendedEvalContext{
   340  		EvalContext: tree.EvalContext{
   341  			Txn:              txn,
   342  			SessionData:      sd,
   343  			TxnReadOnly:      false,
   344  			TxnImplicit:      true,
   345  			Settings:         execCfg.Settings,
   346  			Codec:            execCfg.Codec,
   347  			Context:          ctx,
   348  			Mon:              plannerMon,
   349  			TestingKnobs:     evalContextTestingKnobs,
   350  			StmtTimestamp:    stmtTimestamp,
   351  			TxnTimestamp:     txnTimestamp,
   352  			InternalExecutor: execCfg.InternalExecutor,
   353  		},
   354  		SessionMutator:  dataMutator,
   355  		VirtualSchemas:  execCfg.VirtualSchemas,
   356  		Tracing:         &SessionTracing{},
   357  		StatusServer:    execCfg.StatusServer,
   358  		Descs:           tables,
   359  		ExecCfg:         execCfg,
   360  		schemaAccessors: newSchemaInterface(tables, execCfg.VirtualSchemas),
   361  		DistSQLPlanner:  execCfg.DistSQLPlanner,
   362  	}
   363  }
   364  
   365  func (p *planner) PhysicalSchemaAccessor() catalog.Accessor {
   366  	return p.extendedEvalCtx.schemaAccessors.physical
   367  }
   368  
   369  // LogicalSchemaAccessor is part of the resolver.SchemaResolver interface.
   370  func (p *planner) LogicalSchemaAccessor() catalog.Accessor {
   371  	return p.extendedEvalCtx.schemaAccessors.logical
   372  }
   373  
   374  // Note: if the context will be modified, use ExtendedEvalContextCopy instead.
   375  func (p *planner) ExtendedEvalContext() *extendedEvalContext {
   376  	return &p.extendedEvalCtx
   377  }
   378  
   379  func (p *planner) ExtendedEvalContextCopy() *extendedEvalContext {
   380  	return p.extendedEvalCtx.copy()
   381  }
   382  
   383  // CurrentDatabase is part of the resolver.SchemaResolver interface.
   384  func (p *planner) CurrentDatabase() string {
   385  	return p.SessionData().Database
   386  }
   387  
   388  // CurrentSearchPath is part of the resolver.SchemaResolver interface.
   389  func (p *planner) CurrentSearchPath() sessiondata.SearchPath {
   390  	return p.SessionData().SearchPath
   391  }
   392  
   393  // EvalContext() provides convenient access to the planner's EvalContext().
   394  func (p *planner) EvalContext() *tree.EvalContext {
   395  	return &p.extendedEvalCtx.EvalContext
   396  }
   397  
   398  func (p *planner) Tables() *descs.Collection {
   399  	return p.extendedEvalCtx.Descs
   400  }
   401  
   402  // ExecCfg implements the PlanHookState interface.
   403  func (p *planner) ExecCfg() *ExecutorConfig {
   404  	return p.extendedEvalCtx.ExecCfg
   405  }
   406  
   407  func (p *planner) LeaseMgr() *lease.Manager {
   408  	return p.Tables().LeaseManager()
   409  }
   410  
   411  func (p *planner) Txn() *kv.Txn {
   412  	return p.txn
   413  }
   414  
   415  func (p *planner) User() string {
   416  	return p.SessionData().User
   417  }
   418  
   419  func (p *planner) TemporarySchemaName() string {
   420  	return temporarySchemaName(p.ExtendedEvalContext().SessionID)
   421  }
   422  
   423  // DistSQLPlanner returns the DistSQLPlanner
   424  func (p *planner) DistSQLPlanner() *DistSQLPlanner {
   425  	return p.extendedEvalCtx.DistSQLPlanner
   426  }
   427  
   428  // ParseType implements the tree.EvalPlanner interface.
   429  // We define this here to break the dependency from eval.go to the parser.
   430  func (p *planner) ParseType(sql string) (*types.T, error) {
   431  	ref, err := parser.ParseType(sql)
   432  	if err != nil {
   433  		return nil, err
   434  	}
   435  	return tree.ResolveType(context.TODO(), ref, p.semaCtx.GetTypeResolver())
   436  }
   437  
   438  // ParseQualifiedTableName implements the tree.EvalDatabase interface.
   439  // This exists to get around a circular dependency between sql/sem/tree and
   440  // sql/parser. sql/parser depends on tree to make objects, so tree cannot import
   441  // ParseQualifiedTableName even though some builtins need that function.
   442  // TODO(jordan): remove this once builtins can be moved outside of sql/sem/tree.
   443  func (p *planner) ParseQualifiedTableName(sql string) (*tree.TableName, error) {
   444  	return parser.ParseQualifiedTableName(sql)
   445  }
   446  
   447  // ResolveTableName implements the tree.EvalDatabase interface.
   448  func (p *planner) ResolveTableName(ctx context.Context, tn *tree.TableName) (tree.ID, error) {
   449  	desc, err := resolver.ResolveExistingTableObject(ctx, p, tn, tree.ObjectLookupFlagsWithRequired(), resolver.ResolveAnyDescType)
   450  	if err != nil {
   451  		return 0, err
   452  	}
   453  	return tree.ID(desc.ID), nil
   454  }
   455  
   456  // LookupTableByID looks up a table, by the given descriptor ID. Based on the
   457  // CommonLookupFlags, it could use or skip the Collection cache. See
   458  // Collection.getTableVersionByID for how it's used.
   459  // TODO (SQLSchema): This should call into the set of SchemaAccessors instead
   460  //  of having its own logic for lookups.
   461  func (p *planner) LookupTableByID(
   462  	ctx context.Context, tableID sqlbase.ID,
   463  ) (catalog.TableEntry, error) {
   464  	if entry, err := p.getVirtualTabler().getVirtualTableEntryByID(tableID); err == nil {
   465  		return catalog.TableEntry{Desc: sqlbase.NewImmutableTableDescriptor(*entry.desc)}, nil
   466  	}
   467  	flags := tree.ObjectLookupFlags{CommonLookupFlags: tree.CommonLookupFlags{AvoidCached: p.avoidCachedDescriptors}}
   468  	table, err := p.Tables().GetTableVersionByID(ctx, p.txn, tableID, flags)
   469  	if err != nil {
   470  		if sqlbase.HasAddingTableError(err) {
   471  			return catalog.TableEntry{IsAdding: true}, nil
   472  		}
   473  		return catalog.TableEntry{}, err
   474  	}
   475  	// TODO (rohany): This shouldn't be needed once the descs.Collection always
   476  	//  returns descriptors with hydrated types.
   477  	if err := p.maybeHydrateTypesInDescriptor(ctx, table); err != nil {
   478  		return catalog.TableEntry{}, err
   479  	}
   480  	return catalog.TableEntry{Desc: table}, nil
   481  }
   482  
   483  // TypeAsString enforces (not hints) that the given expression typechecks as a
   484  // string and returns a function that can be called to get the string value
   485  // during (planNode).Start.
   486  // To also allow NULLs to be returned, use TypeAsStringOrNull() instead.
   487  func (p *planner) TypeAsString(
   488  	ctx context.Context, e tree.Expr, op string,
   489  ) (func() (string, error), error) {
   490  	typedE, err := tree.TypeCheckAndRequire(ctx, e, &p.semaCtx, types.String, op)
   491  	if err != nil {
   492  		return nil, err
   493  	}
   494  	evalFn := p.makeStringEvalFn(typedE)
   495  	return func() (string, error) {
   496  		isNull, str, err := evalFn()
   497  		if err != nil {
   498  			return "", err
   499  		}
   500  		if isNull {
   501  			return "", errors.Errorf("expected string, got NULL")
   502  		}
   503  		return str, nil
   504  	}, nil
   505  }
   506  
   507  // TypeAsStringOrNull is like TypeAsString but allows NULLs.
   508  func (p *planner) TypeAsStringOrNull(
   509  	ctx context.Context, e tree.Expr, op string,
   510  ) (func() (bool, string, error), error) {
   511  	typedE, err := tree.TypeCheckAndRequire(ctx, e, &p.semaCtx, types.String, op)
   512  	if err != nil {
   513  		return nil, err
   514  	}
   515  	return p.makeStringEvalFn(typedE), nil
   516  }
   517  
   518  func (p *planner) makeStringEvalFn(typedE tree.TypedExpr) func() (bool, string, error) {
   519  	return func() (bool, string, error) {
   520  		d, err := typedE.Eval(p.EvalContext())
   521  		if err != nil {
   522  			return false, "", err
   523  		}
   524  		if d == tree.DNull {
   525  			return true, "", nil
   526  		}
   527  		str, ok := d.(*tree.DString)
   528  		if !ok {
   529  			return false, "", errors.Errorf("failed to cast %T to string", d)
   530  		}
   531  		return false, string(*str), nil
   532  	}
   533  }
   534  
   535  // KVStringOptValidate indicates the requested validation of a TypeAsStringOpts
   536  // option.
   537  type KVStringOptValidate string
   538  
   539  // KVStringOptValidate values
   540  const (
   541  	KVStringOptAny            KVStringOptValidate = `any`
   542  	KVStringOptRequireNoValue KVStringOptValidate = `no-value`
   543  	KVStringOptRequireValue   KVStringOptValidate = `value`
   544  )
   545  
   546  // evalStringOptions evaluates the KVOption values as strings and returns them
   547  // in a map. Options with no value have an empty string.
   548  func evalStringOptions(
   549  	evalCtx *tree.EvalContext, opts []exec.KVOption, optValidate map[string]KVStringOptValidate,
   550  ) (map[string]string, error) {
   551  	res := make(map[string]string, len(opts))
   552  	for _, opt := range opts {
   553  		k := opt.Key
   554  		validate, ok := optValidate[k]
   555  		if !ok {
   556  			return nil, errors.Errorf("invalid option %q", k)
   557  		}
   558  		val, err := opt.Value.Eval(evalCtx)
   559  		if err != nil {
   560  			return nil, err
   561  		}
   562  		if val == tree.DNull {
   563  			if validate == KVStringOptRequireValue {
   564  				return nil, errors.Errorf("option %q requires a value", k)
   565  			}
   566  			res[k] = ""
   567  		} else {
   568  			if validate == KVStringOptRequireNoValue {
   569  				return nil, errors.Errorf("option %q does not take a value", k)
   570  			}
   571  			str, ok := val.(*tree.DString)
   572  			if !ok {
   573  				return nil, errors.Errorf("expected string value, got %T", val)
   574  			}
   575  			res[k] = string(*str)
   576  		}
   577  	}
   578  	return res, nil
   579  }
   580  
   581  // TypeAsStringOpts enforces (not hints) that the given expressions
   582  // typecheck as strings, and returns a function that can be called to
   583  // get the string value during (planNode).Start.
   584  func (p *planner) TypeAsStringOpts(
   585  	ctx context.Context, opts tree.KVOptions, optValidate map[string]KVStringOptValidate,
   586  ) (func() (map[string]string, error), error) {
   587  	typed := make(map[string]tree.TypedExpr, len(opts))
   588  	for _, opt := range opts {
   589  		k := string(opt.Key)
   590  		validate, ok := optValidate[k]
   591  		if !ok {
   592  			return nil, errors.Errorf("invalid option %q", k)
   593  		}
   594  
   595  		if opt.Value == nil {
   596  			if validate == KVStringOptRequireValue {
   597  				return nil, errors.Errorf("option %q requires a value", k)
   598  			}
   599  			typed[k] = nil
   600  			continue
   601  		}
   602  		if validate == KVStringOptRequireNoValue {
   603  			return nil, errors.Errorf("option %q does not take a value", k)
   604  		}
   605  		r, err := tree.TypeCheckAndRequire(ctx, opt.Value, &p.semaCtx, types.String, k)
   606  		if err != nil {
   607  			return nil, err
   608  		}
   609  		typed[k] = r
   610  	}
   611  	fn := func() (map[string]string, error) {
   612  		res := make(map[string]string, len(typed))
   613  		for name, e := range typed {
   614  			if e == nil {
   615  				res[name] = ""
   616  				continue
   617  			}
   618  			d, err := e.Eval(p.EvalContext())
   619  			if err != nil {
   620  				return nil, err
   621  			}
   622  			str, ok := d.(*tree.DString)
   623  			if !ok {
   624  				return res, errors.Errorf("failed to cast %T to string", d)
   625  			}
   626  			res[name] = string(*str)
   627  		}
   628  		return res, nil
   629  	}
   630  	return fn, nil
   631  }
   632  
   633  // TypeAsStringArray enforces (not hints) that the given expressions all typecheck as
   634  // strings and returns a function that can be called to get the string values
   635  // during (planNode).Start.
   636  func (p *planner) TypeAsStringArray(
   637  	ctx context.Context, exprs tree.Exprs, op string,
   638  ) (func() ([]string, error), error) {
   639  	typedExprs := make([]tree.TypedExpr, len(exprs))
   640  	for i := range exprs {
   641  		typedE, err := tree.TypeCheckAndRequire(ctx, exprs[i], &p.semaCtx, types.String, op)
   642  		if err != nil {
   643  			return nil, err
   644  		}
   645  		typedExprs[i] = typedE
   646  	}
   647  	fn := func() ([]string, error) {
   648  		strs := make([]string, len(exprs))
   649  		for i := range exprs {
   650  			d, err := typedExprs[i].Eval(p.EvalContext())
   651  			if err != nil {
   652  				return nil, err
   653  			}
   654  			str, ok := d.(*tree.DString)
   655  			if !ok {
   656  				return strs, errors.Errorf("failed to cast %T to string", d)
   657  			}
   658  			strs[i] = string(*str)
   659  		}
   660  		return strs, nil
   661  	}
   662  	return fn, nil
   663  }
   664  
   665  // SessionData is part of the PlanHookState interface.
   666  func (p *planner) SessionData() *sessiondata.SessionData {
   667  	return p.EvalContext().SessionData
   668  }
   669  
   670  // txnModesSetter is an interface used by SQL execution to influence the current
   671  // transaction.
   672  type txnModesSetter interface {
   673  	// setTransactionModes updates some characteristics of the current
   674  	// transaction.
   675  	// asOfTs, if not empty, is the evaluation of modes.AsOf.
   676  	setTransactionModes(modes tree.TransactionModes, asOfTs hlc.Timestamp) error
   677  }