github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/sql/opt/optbuilder/scope.go (about) 1 // Copyright 2018 The Cockroach Authors. 2 // 3 // Use of this software is governed by the Business Source License 4 // included in the file licenses/BSL.txt. 5 // 6 // As of the Change Date specified in that file, in accordance with 7 // the Business Source License, use of this software will be governed 8 // by the Apache License, Version 2.0, included in the file 9 // licenses/APL.txt. 10 11 package optbuilder 12 13 import ( 14 "bytes" 15 "context" 16 "fmt" 17 "strings" 18 19 "github.com/cockroachdb/cockroach/pkg/sql/opt" 20 "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" 21 "github.com/cockroachdb/cockroach/pkg/sql/opt/props" 22 "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" 23 "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" 24 "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" 25 "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" 26 "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" 27 "github.com/cockroachdb/cockroach/pkg/sql/types" 28 "github.com/cockroachdb/cockroach/pkg/util/log" 29 "github.com/cockroachdb/errors" 30 ) 31 32 // scopeOrdinal identifies an ordinal position with a list of scope columns. 33 type scopeOrdinal int 34 35 // scope is used for the build process and maintains the variables that have 36 // been bound within the current scope as columnProps. Variables bound in the 37 // parent scope are also visible in this scope. 38 // 39 // See builder.go for more details. 40 type scope struct { 41 builder *Builder 42 parent *scope 43 cols []scopeColumn 44 45 // groupby is the structure that keeps the grouping metadata when this scope 46 // includes aggregate functions or GROUP BY. 47 groupby *groupby 48 49 // inAgg is true within the body of an aggregate function. inAgg is used 50 // to ensure that nested aggregates are disallowed. 51 // TODO(radu): this, together with some other fields below, belongs in a 52 // context that is threaded through the calls instead of setting and resetting 53 // it in the scope. 54 inAgg bool 55 56 // windows contains the set of window functions encountered while building 57 // the current SELECT statement. 58 windows []scopeColumn 59 60 // windowDefs is the set of named window definitions present in the nearest 61 // SELECT. 62 windowDefs []*tree.WindowDef 63 64 // ordering records the ORDER BY columns associated with this scope. Each 65 // column is either in cols or in extraCols. 66 // Must not be modified in-place after being set. 67 ordering opt.Ordering 68 69 // distinctOnCols records the DISTINCT ON columns by ID. 70 distinctOnCols opt.ColSet 71 72 // extraCols contains columns specified by the ORDER BY or DISTINCT ON clauses 73 // which don't appear in cols. 74 extraCols []scopeColumn 75 76 // expr is the SQL node built with this scope. 77 expr memo.RelExpr 78 79 // Desired number of columns for subqueries found during name resolution and 80 // type checking. This only applies to the top-level subqueries that are 81 // anchored directly to a relational expression. 82 columns int 83 84 // If replaceSRFs is true, replace raw SRFs with an srf struct. See 85 // the replaceSRF() function for more details. 86 replaceSRFs bool 87 88 // singleSRFColumn is true if this scope has a single column that comes from 89 // an SRF. The flag is used to allow renaming the column to the table alias. 90 singleSRFColumn bool 91 92 // srfs contains all the SRFs that were replaced in this scope. It will be 93 // used by the Builder to convert the input from the FROM clause to a lateral 94 // cross join between the input and a Zip of all the srfs in this slice. 95 srfs []*srf 96 97 // ctes contains the CTEs which were created at this scope. This set 98 // is not exhaustive because expressions can reference CTEs from parent 99 // scopes. 100 ctes map[string]*cteSource 101 102 // context is the current context in the SQL query (e.g., "SELECT" or 103 // "HAVING"). It is used for error messages and to identify scoping errors 104 // (e.g., aggregates are not allowed in the FROM clause of their own query 105 // level). 106 context exprKind 107 108 // atRoot is whether we are currently at a root context. 109 atRoot bool 110 } 111 112 // cteSource represents a CTE in the given query. 113 type cteSource struct { 114 id opt.WithID 115 name tree.AliasClause 116 cols physical.Presentation 117 originalExpr tree.Statement 118 bindingProps *props.Relational 119 expr memo.RelExpr 120 mtr tree.MaterializeClause 121 // If set, this function is called when a CTE is referenced. It can throw an 122 // error. 123 onRef func() 124 } 125 126 // exprKind is used to represent the kind of the current expression in the 127 // SQL query. 128 type exprKind int8 129 130 const ( 131 exprKindNone exprKind = iota 132 exprKindAlterTableSplitAt 133 exprKindDistinctOn 134 exprKindFrom 135 exprKindGroupBy 136 exprKindHaving 137 exprKindLateralJoin 138 exprKindLimit 139 exprKindOffset 140 exprKindOn 141 exprKindOrderBy 142 exprKindReturning 143 exprKindSelect 144 exprKindValues 145 exprKindWhere 146 exprKindWindowFrameStart 147 exprKindWindowFrameEnd 148 ) 149 150 var exprKindName = [...]string{ 151 exprKindNone: "", 152 exprKindAlterTableSplitAt: "ALTER TABLE SPLIT AT", 153 exprKindDistinctOn: "DISTINCT ON", 154 exprKindFrom: "FROM", 155 exprKindGroupBy: "GROUP BY", 156 exprKindHaving: "HAVING", 157 exprKindLateralJoin: "LATERAL JOIN", 158 exprKindLimit: "LIMIT", 159 exprKindOffset: "OFFSET", 160 exprKindOn: "ON", 161 exprKindOrderBy: "ORDER BY", 162 exprKindReturning: "RETURNING", 163 exprKindSelect: "SELECT", 164 exprKindValues: "VALUES", 165 exprKindWhere: "WHERE", 166 exprKindWindowFrameStart: "WINDOW FRAME START", 167 exprKindWindowFrameEnd: "WINDOW FRAME END", 168 } 169 170 func (k exprKind) String() string { 171 if k < 0 || k > exprKind(len(exprKindName)-1) { 172 return fmt.Sprintf("exprKind(%d)", k) 173 } 174 return exprKindName[k] 175 } 176 177 // initGrouping initializes the groupby information for this scope. 178 func (s *scope) initGrouping() { 179 if s.groupby != nil { 180 panic(errors.AssertionFailedf("grouping initialized twice")) 181 } 182 s.groupby = &groupby{ 183 aggInScope: s.replace(), 184 aggOutScope: s.replace(), 185 } 186 } 187 188 // inGroupingContext returns true if initGrouping was called. This is the 189 // case when the builder is building expressions in a SELECT list, and 190 // aggregates, GROUP BY, or HAVING are present. This is also true when the 191 // builder is building expressions inside the HAVING clause. When 192 // inGroupingContext returns true, groupByStrSet will be utilized to enforce 193 // scoping rules. See the comment above groupByStrSet for more details. 194 func (s *scope) inGroupingContext() bool { 195 return s.groupby != nil 196 } 197 198 // push creates a new scope with this scope as its parent. 199 func (s *scope) push() *scope { 200 r := s.builder.allocScope() 201 r.parent = s 202 return r 203 } 204 205 // replace creates a new scope with the parent of this scope as its parent. 206 func (s *scope) replace() *scope { 207 r := s.builder.allocScope() 208 r.parent = s.parent 209 return r 210 } 211 212 // appendColumnsFromScope adds newly bound variables to this scope. 213 // The expressions in the new columns are reset to nil. 214 func (s *scope) appendColumnsFromScope(src *scope) { 215 l := len(s.cols) 216 s.cols = append(s.cols, src.cols...) 217 // We want to reset the expressions, as these become pass-through columns in 218 // the new scope. 219 for i := l; i < len(s.cols); i++ { 220 s.cols[i].scalar = nil 221 } 222 } 223 224 // appendColumnsFromTable adds all columns from the given table metadata to this 225 // scope. 226 func (s *scope) appendColumnsFromTable(tabMeta *opt.TableMeta, alias *tree.TableName) { 227 tab := tabMeta.Table 228 if s.cols == nil { 229 s.cols = make([]scopeColumn, 0, tab.ColumnCount()) 230 } 231 for i, n := 0, tab.ColumnCount(); i < n; i++ { 232 tabCol := tab.Column(i) 233 s.cols = append(s.cols, scopeColumn{ 234 name: tabCol.ColName(), 235 table: *alias, 236 typ: tabCol.DatumType(), 237 id: tabMeta.MetaID.ColumnID(i), 238 hidden: tabCol.IsHidden(), 239 }) 240 } 241 } 242 243 // appendColumns adds newly bound variables to this scope. 244 // The expressions in the new columns are reset to nil. 245 func (s *scope) appendColumns(cols []scopeColumn) { 246 l := len(s.cols) 247 s.cols = append(s.cols, cols...) 248 // We want to reset the expressions, as these become pass-through columns in 249 // the new scope. 250 for i := l; i < len(s.cols); i++ { 251 s.cols[i].scalar = nil 252 } 253 } 254 255 // appendColumn adds a newly bound variable to this scope. 256 // The expression in the new column is reset to nil. 257 func (s *scope) appendColumn(col *scopeColumn) { 258 s.cols = append(s.cols, *col) 259 // We want to reset the expression, as this becomes a pass-through column in 260 // the new scope. 261 s.cols[len(s.cols)-1].scalar = nil 262 } 263 264 // addExtraColumns adds the given columns as extra columns, ignoring any 265 // duplicate columns that are already in the scope. 266 func (s *scope) addExtraColumns(cols []scopeColumn) { 267 existing := s.colSetWithExtraCols() 268 for i := range cols { 269 if !existing.Contains(cols[i].id) { 270 s.extraCols = append(s.extraCols, cols[i]) 271 } 272 } 273 } 274 275 // setOrdering sets the ordering in the physical properties and adds any new 276 // columns as extra columns. 277 func (s *scope) setOrdering(cols []scopeColumn, ord opt.Ordering) { 278 s.addExtraColumns(cols) 279 s.ordering = ord 280 } 281 282 // copyOrdering copies the ordering and the ORDER BY columns from the src scope. 283 // The groups in the new columns are reset to 0. 284 func (s *scope) copyOrdering(src *scope) { 285 s.ordering = src.ordering 286 if src.ordering.Empty() { 287 return 288 } 289 // Copy any columns that the scope doesn't already have. 290 existing := s.colSetWithExtraCols() 291 for _, ordCol := range src.ordering { 292 if !existing.Contains(ordCol.ID()) { 293 col := *src.getColumn(ordCol.ID()) 294 // We want to reset the group, as this becomes a pass-through column in 295 // the new scope. 296 col.scalar = nil 297 s.extraCols = append(s.extraCols, col) 298 } 299 } 300 } 301 302 // getColumn returns the scopeColumn with the given id (either in cols or 303 // extraCols). 304 func (s *scope) getColumn(col opt.ColumnID) *scopeColumn { 305 for i := range s.cols { 306 if s.cols[i].id == col { 307 return &s.cols[i] 308 } 309 } 310 for i := range s.extraCols { 311 if s.extraCols[i].id == col { 312 return &s.extraCols[i] 313 } 314 } 315 return nil 316 } 317 318 func (s *scope) makeColumnTypes() []*types.T { 319 res := make([]*types.T, len(s.cols)) 320 for i := range res { 321 res[i] = s.cols[i].typ 322 } 323 return res 324 } 325 326 // makeOrderingChoice returns an OrderingChoice that corresponds to s.ordering. 327 func (s *scope) makeOrderingChoice() physical.OrderingChoice { 328 var oc physical.OrderingChoice 329 oc.FromOrdering(s.ordering) 330 return oc 331 } 332 333 // makePhysicalProps constructs physical properties using the columns in the 334 // scope for presentation and s.ordering for required ordering. 335 func (s *scope) makePhysicalProps() *physical.Required { 336 p := &physical.Required{ 337 Presentation: s.makePresentation(), 338 } 339 p.Ordering.FromOrdering(s.ordering) 340 return p 341 } 342 343 func (s *scope) makePresentation() physical.Presentation { 344 if len(s.cols) == 0 { 345 return nil 346 } 347 presentation := make(physical.Presentation, 0, len(s.cols)) 348 for i := range s.cols { 349 col := &s.cols[i] 350 if !col.hidden { 351 presentation = append(presentation, opt.AliasedColumn{ 352 Alias: string(col.name), 353 ID: col.id, 354 }) 355 } 356 } 357 return presentation 358 } 359 360 // makePresentationWithHiddenCols is only used when constructing the 361 // presentation for a [ ... ]-style data source. 362 func (s *scope) makePresentationWithHiddenCols() physical.Presentation { 363 if len(s.cols) == 0 { 364 return nil 365 } 366 presentation := make(physical.Presentation, 0, len(s.cols)) 367 for i := range s.cols { 368 col := &s.cols[i] 369 presentation = append(presentation, opt.AliasedColumn{ 370 Alias: string(col.name), 371 ID: col.id, 372 }) 373 } 374 return presentation 375 } 376 377 // walkExprTree walks the given expression and performs name resolution, 378 // replaces unresolved column names with columnProps, and replaces subqueries 379 // with typed subquery structs. 380 func (s *scope) walkExprTree(expr tree.Expr) tree.Expr { 381 // TODO(peter): The caller should specify the desired number of columns. This 382 // is needed when a subquery is used by an UPDATE statement. 383 // TODO(andy): shouldn't this be part of the desired type rather than yet 384 // another parameter? 385 s.columns = 1 386 387 expr, _ = tree.WalkExpr(s, expr) 388 s.builder.semaCtx.IVarContainer = s 389 return expr 390 } 391 392 // resolveCTE looks up a CTE name in this and the parent scopes, returning nil 393 // if it's not found. 394 func (s *scope) resolveCTE(name *tree.TableName) *cteSource { 395 var nameStr string 396 seenCTEs := false 397 for s != nil { 398 if s.ctes != nil { 399 // Only compute the stringified name if we see any CTEs. 400 if !seenCTEs { 401 nameStr = name.String() 402 seenCTEs = true 403 } 404 if cte, ok := s.ctes[nameStr]; ok { 405 if cte.onRef != nil { 406 cte.onRef() 407 } 408 return cte 409 } 410 } 411 s = s.parent 412 } 413 return nil 414 } 415 416 // resolveType converts the given expr to a tree.TypedExpr. As part of the 417 // conversion, it performs name resolution, replaces unresolved column names 418 // with columnProps, and replaces subqueries with typed subquery structs. 419 // 420 // The desired type is a suggestion, but resolveType does not throw an error if 421 // the resolved type turns out to be different from desired (in contrast to 422 // resolveAndRequireType, which throws an error). If the result type is 423 // types.Unknown, then resolveType will wrap the expression in a type cast in 424 // order to produce the desired type. 425 func (s *scope) resolveType(expr tree.Expr, desired *types.T) tree.TypedExpr { 426 expr = s.walkExprTree(expr) 427 texpr, err := tree.TypeCheck(s.builder.ctx, expr, s.builder.semaCtx, desired) 428 if err != nil { 429 panic(err) 430 } 431 return s.ensureNullType(texpr, desired) 432 } 433 434 // resolveAndRequireType converts the given expr to a tree.TypedExpr. As part 435 // of the conversion, it performs name resolution, replaces unresolved 436 // column names with columnProps, and replaces subqueries with typed subquery 437 // structs. 438 // 439 // If the resolved type does not match the desired type, resolveAndRequireType 440 // throws an error (in contrast to resolveType, which returns the typed 441 // expression with no error). If the result type is types.Unknown, then 442 // resolveType will wrap the expression in a type cast in order to produce the 443 // desired type. 444 func (s *scope) resolveAndRequireType(expr tree.Expr, desired *types.T) tree.TypedExpr { 445 expr = s.walkExprTree(expr) 446 texpr, err := tree.TypeCheckAndRequire(s.builder.ctx, expr, s.builder.semaCtx, desired, s.context.String()) 447 if err != nil { 448 panic(err) 449 } 450 return s.ensureNullType(texpr, desired) 451 } 452 453 // ensureNullType tests the type of the given expression. If types.Unknown, then 454 // ensureNullType wraps the expression in a CAST to the desired type (assuming 455 // it is not types.Any). types.Unknown is a special type used for null values, 456 // and can be cast to any other type. 457 func (s *scope) ensureNullType(texpr tree.TypedExpr, desired *types.T) tree.TypedExpr { 458 if desired.Family() != types.AnyFamily && texpr.ResolvedType().Family() == types.UnknownFamily { 459 texpr = tree.NewTypedCastExpr(texpr, desired) 460 } 461 return texpr 462 } 463 464 // isOuterColumn returns true if the given column is not present in the current 465 // scope (it may or may not be present in an ancestor scope). 466 func (s *scope) isOuterColumn(id opt.ColumnID) bool { 467 for i := range s.cols { 468 col := &s.cols[i] 469 if col.id == id { 470 return false 471 } 472 } 473 474 for i := range s.windows { 475 w := &s.windows[i] 476 if w.id == id { 477 return false 478 } 479 } 480 481 return true 482 } 483 484 // colSet returns a ColSet of all the columns in this scope, 485 // excluding orderByCols. 486 func (s *scope) colSet() opt.ColSet { 487 var colSet opt.ColSet 488 for i := range s.cols { 489 colSet.Add(s.cols[i].id) 490 } 491 return colSet 492 } 493 494 // colSetWithExtraCols returns a ColSet of all the columns in this scope, 495 // including extraCols. 496 func (s *scope) colSetWithExtraCols() opt.ColSet { 497 colSet := s.colSet() 498 for i := range s.extraCols { 499 colSet.Add(s.extraCols[i].id) 500 } 501 return colSet 502 } 503 504 // hasSameColumns returns true if this scope has the same columns 505 // as the other scope. 506 // 507 // NOTE: This function is currently only called by 508 // Builder.constructProjectForScope, which uses it to determine whether or not 509 // to construct a projection. Since the projection includes the extra columns, 510 // this check is sufficient to determine whether or not the projection is 511 // necessary. Be careful if using this function for another purpose. 512 func (s *scope) hasSameColumns(other *scope) bool { 513 return s.colSetWithExtraCols().Equals(other.colSetWithExtraCols()) 514 } 515 516 // removeHiddenCols removes hidden columns from the scope (and moves them to 517 // extraCols, in case they are referenced by ORDER BY or DISTINCT ON). 518 func (s *scope) removeHiddenCols() { 519 n := 0 520 for i := range s.cols { 521 if s.cols[i].hidden { 522 s.extraCols = append(s.extraCols, s.cols[i]) 523 } else { 524 if n != i { 525 s.cols[n] = s.cols[i] 526 } 527 n++ 528 } 529 } 530 s.cols = s.cols[:n] 531 } 532 533 // isAnonymousTable returns true if the table name of the first column 534 // in this scope is empty. 535 func (s *scope) isAnonymousTable() bool { 536 return len(s.cols) > 0 && s.cols[0].table.ObjectName == "" 537 } 538 539 // setTableAlias qualifies the names of all columns in this scope with the 540 // given alias name, as if they were part of a table with that name. If the 541 // alias is the empty string, then setTableAlias removes any existing column 542 // qualifications, as if the columns were part of an "anonymous" table. 543 func (s *scope) setTableAlias(alias tree.Name) { 544 tn := tree.MakeUnqualifiedTableName(alias) 545 for i := range s.cols { 546 s.cols[i].table = tn 547 } 548 } 549 550 // See (*scope).findExistingCol. 551 func findExistingColInList( 552 expr tree.TypedExpr, cols []scopeColumn, allowSideEffects bool, 553 ) *scopeColumn { 554 exprStr := symbolicExprStr(expr) 555 for i := range cols { 556 col := &cols[i] 557 if expr == col { 558 return col 559 } 560 if exprStr == col.getExprStr() { 561 if allowSideEffects || col.scalar == nil { 562 return col 563 } 564 var p props.Shared 565 memo.BuildSharedProps(col.scalar, &p) 566 if !p.CanHaveSideEffects { 567 return col 568 } 569 } 570 } 571 return nil 572 } 573 574 // findExistingCol finds the given expression among the bound variables in this 575 // scope. Returns nil if the expression is not found (or an expression is found 576 // but it has side-effects and allowSideEffects is false). 577 func (s *scope) findExistingCol(expr tree.TypedExpr, allowSideEffects bool) *scopeColumn { 578 return findExistingColInList(expr, s.cols, allowSideEffects) 579 } 580 581 // startAggFunc is called when the builder starts building an aggregate 582 // function. It is used to disallow nested aggregates and ensure that a 583 // grouping error is not called on the aggregate arguments. For example: 584 // SELECT max(v) FROM kv GROUP BY k 585 // should not throw an error, even though v is not a grouping column. 586 // Non-grouping columns are allowed inside aggregate functions. 587 // 588 // startAggFunc returns a temporary scope for building the aggregate arguments. 589 // It is not possible to know the correct scope until the arguments are fully 590 // built. At that point, endAggFunc can be used to find the correct scope. 591 // If endAggFunc returns a different scope than startAggFunc, the columns 592 // will be transferred to the correct scope by buildAggregateFunction. 593 func (s *scope) startAggFunc() *scope { 594 if s.inAgg { 595 panic(sqlbase.NewAggInAggError()) 596 } 597 s.inAgg = true 598 599 if s.groupby == nil { 600 return s.builder.allocScope() 601 } 602 return s.groupby.aggInScope 603 } 604 605 // endAggFunc is called when the builder finishes building an aggregate 606 // function. It is used in combination with startAggFunc to disallow nested 607 // aggregates and prevent grouping errors while building aggregate arguments. 608 // 609 // In addition, endAggFunc finds the correct groupby structure, given 610 // that the aggregate references the columns in cols. The reference scope 611 // is the one closest to the current scope which contains at least one of the 612 // variables referenced by the aggregate (or the current scope if the aggregate 613 // references no variables). endAggFunc also ensures that aggregate functions 614 // are only used in a groupings scope. 615 func (s *scope) endAggFunc(cols opt.ColSet) (g *groupby) { 616 if !s.inAgg { 617 panic(errors.AssertionFailedf("mismatched calls to start/end aggFunc")) 618 } 619 s.inAgg = false 620 621 for curr := s; curr != nil; curr = curr.parent { 622 if cols.Len() == 0 || cols.Intersects(curr.colSet()) { 623 curr.verifyAggregateContext() 624 if curr.groupby == nil { 625 curr.initGrouping() 626 } 627 return curr.groupby 628 } 629 } 630 631 panic(errors.AssertionFailedf("aggregate function is not allowed in this context")) 632 } 633 634 // verifyAggregateContext checks that the current scope is allowed to contain 635 // aggregate functions. 636 func (s *scope) verifyAggregateContext() { 637 switch s.context { 638 case exprKindLateralJoin: 639 panic(pgerror.Newf(pgcode.Grouping, 640 "aggregate functions are not allowed in FROM clause of their own query level", 641 )) 642 643 case exprKindOn: 644 panic(pgerror.Newf(pgcode.Grouping, 645 "aggregate functions are not allowed in JOIN conditions", 646 )) 647 648 case exprKindWhere: 649 panic(tree.NewInvalidFunctionUsageError(tree.AggregateClass, s.context.String())) 650 } 651 } 652 653 // scope implements the tree.Visitor interface so that it can walk through 654 // a tree.Expr tree, perform name resolution, and replace unresolved column 655 // names with a scopeColumn. The info stored in scopeColumn is necessary for 656 // Builder.buildScalar to construct a "variable" memo expression. 657 var _ tree.Visitor = &scope{} 658 659 // ColumnSourceMeta implements the tree.ColumnSourceMeta interface. 660 func (*scope) ColumnSourceMeta() {} 661 662 // ColumnSourceMeta implements the tree.ColumnSourceMeta interface. 663 func (*scopeColumn) ColumnSourceMeta() {} 664 665 // ColumnResolutionResult implements the tree.ColumnResolutionResult interface. 666 func (*scopeColumn) ColumnResolutionResult() {} 667 668 // FindSourceProvidingColumn is part of the tree.ColumnItemResolver interface. 669 func (s *scope) FindSourceProvidingColumn( 670 _ context.Context, colName tree.Name, 671 ) (prefix *tree.TableName, srcMeta tree.ColumnSourceMeta, colHint int, err error) { 672 var candidateFromAnonSource *scopeColumn 673 var candidateWithPrefix *scopeColumn 674 var hiddenCandidate *scopeColumn 675 var moreThanOneCandidateFromAnonSource bool 676 var moreThanOneCandidateWithPrefix bool 677 var moreThanOneHiddenCandidate bool 678 679 // We only allow hidden columns in the current scope. Hidden columns 680 // in parent scopes are not accessible. 681 allowHidden := true 682 683 // If multiple columns match c in the same scope, we return an error 684 // due to ambiguity. If no columns match in the current scope, we 685 // search the parent scope. If the column is not found in any of the 686 // ancestor scopes, we return an error. 687 reportBackfillError := false 688 for ; s != nil; s, allowHidden = s.parent, false { 689 for i := range s.cols { 690 col := &s.cols[i] 691 if col.name != colName { 692 continue 693 } 694 695 // If the matching column is a mutation column, then act as if it's not 696 // present so that matches in higher scopes can be found. However, if 697 // no match is found in higher scopes, report a backfill error rather 698 // than a "not found" error. 699 if col.mutation { 700 reportBackfillError = true 701 continue 702 } 703 704 if col.table.ObjectName == "" && !col.hidden { 705 if candidateFromAnonSource != nil { 706 moreThanOneCandidateFromAnonSource = true 707 break 708 } 709 candidateFromAnonSource = col 710 } else if !col.hidden { 711 if candidateWithPrefix != nil { 712 moreThanOneCandidateWithPrefix = true 713 } 714 candidateWithPrefix = col 715 } else if allowHidden { 716 if hiddenCandidate != nil { 717 moreThanOneHiddenCandidate = true 718 } 719 hiddenCandidate = col 720 } 721 } 722 723 // The table name was unqualified, so if a single anonymous source exists 724 // with a matching non-hidden column, use that. 725 if moreThanOneCandidateFromAnonSource { 726 return nil, nil, -1, s.newAmbiguousColumnError( 727 colName, allowHidden, moreThanOneCandidateFromAnonSource, moreThanOneCandidateWithPrefix, moreThanOneHiddenCandidate, 728 ) 729 } 730 if candidateFromAnonSource != nil { 731 return &candidateFromAnonSource.table, candidateFromAnonSource, int(candidateFromAnonSource.id), nil 732 } 733 734 // Else if a single named source exists with a matching non-hidden column, 735 // use that. 736 if candidateWithPrefix != nil && !moreThanOneCandidateWithPrefix { 737 return &candidateWithPrefix.table, candidateWithPrefix, int(candidateWithPrefix.id), nil 738 } 739 if moreThanOneCandidateWithPrefix || moreThanOneHiddenCandidate { 740 return nil, nil, -1, s.newAmbiguousColumnError( 741 colName, allowHidden, moreThanOneCandidateFromAnonSource, moreThanOneCandidateWithPrefix, moreThanOneHiddenCandidate, 742 ) 743 } 744 745 // One last option: if a single source exists with a matching hidden 746 // column, use that. 747 if hiddenCandidate != nil { 748 return &hiddenCandidate.table, hiddenCandidate, int(hiddenCandidate.id), nil 749 } 750 } 751 752 // Make a copy of colName so that passing a reference to tree.ErrString does 753 // not cause colName to be allocated on the heap in the happy (no error) path 754 // above. 755 tmpName := colName 756 if reportBackfillError { 757 return nil, nil, -1, makeBackfillError(tmpName) 758 } 759 return nil, nil, -1, sqlbase.NewUndefinedColumnError(tree.ErrString(&tmpName)) 760 } 761 762 // FindSourceMatchingName is part of the tree.ColumnItemResolver interface. 763 func (s *scope) FindSourceMatchingName( 764 _ context.Context, tn tree.TableName, 765 ) ( 766 res tree.NumResolutionResults, 767 prefix *tree.TableName, 768 srcMeta tree.ColumnSourceMeta, 769 err error, 770 ) { 771 // If multiple sources match tn in the same scope, we return an error 772 // due to ambiguity. If no sources match in the current scope, we 773 // search the parent scope. If the source is not found in any of the 774 // ancestor scopes, we return an error. 775 var source tree.TableName 776 for ; s != nil; s = s.parent { 777 sources := make(map[tree.TableName]struct{}) 778 for i := range s.cols { 779 sources[s.cols[i].table] = struct{}{} 780 } 781 782 found := false 783 for src := range sources { 784 if !sourceNameMatches(src, tn) { 785 continue 786 } 787 if found { 788 return tree.MoreThanOne, nil, s, newAmbiguousSourceError(&tn) 789 } 790 found = true 791 source = src 792 } 793 794 if found { 795 return tree.ExactlyOne, &source, s, nil 796 } 797 } 798 799 return tree.NoResults, nil, s, nil 800 } 801 802 // sourceNameMatches checks whether a request for table name toFind 803 // can be satisfied by the FROM source name srcName. 804 // 805 // For example: 806 // - a request for "kv" is matched by a source named "db1.public.kv" 807 // - a request for "public.kv" is not matched by a source named just "kv" 808 func sourceNameMatches(srcName tree.TableName, toFind tree.TableName) bool { 809 if srcName.ObjectName != toFind.ObjectName { 810 return false 811 } 812 if toFind.ExplicitSchema { 813 if srcName.SchemaName != toFind.SchemaName { 814 return false 815 } 816 if toFind.ExplicitCatalog { 817 if srcName.CatalogName != toFind.CatalogName { 818 return false 819 } 820 } 821 } 822 return true 823 } 824 825 // Resolve is part of the tree.ColumnItemResolver interface. 826 func (s *scope) Resolve( 827 _ context.Context, 828 prefix *tree.TableName, 829 srcMeta tree.ColumnSourceMeta, 830 colHint int, 831 colName tree.Name, 832 ) (tree.ColumnResolutionResult, error) { 833 if colHint >= 0 { 834 // Column was found by FindSourceProvidingColumn above. 835 return srcMeta.(*scopeColumn), nil 836 } 837 838 // Otherwise, a table is known but not the column yet. 839 inScope := srcMeta.(*scope) 840 for i := range inScope.cols { 841 col := &inScope.cols[i] 842 if col.name == colName && sourceNameMatches(*prefix, col.table) { 843 return col, nil 844 } 845 } 846 847 return nil, sqlbase.NewUndefinedColumnError(tree.ErrString(tree.NewColumnItem(prefix, colName))) 848 } 849 850 func makeUntypedTuple(labels []string, texprs []tree.TypedExpr) *tree.Tuple { 851 exprs := make(tree.Exprs, len(texprs)) 852 for i, e := range texprs { 853 exprs[i] = e 854 } 855 return &tree.Tuple{Exprs: exprs, Labels: labels} 856 } 857 858 // VisitPre is part of the Visitor interface. 859 // 860 // NB: This code is adapted from sql/select_name_resolution.go and 861 // sql/subquery.go. 862 func (s *scope) VisitPre(expr tree.Expr) (recurse bool, newExpr tree.Expr) { 863 switch t := expr.(type) { 864 case *tree.AllColumnsSelector, *tree.TupleStar: 865 // AllColumnsSelectors and TupleStars at the top level of a SELECT clause 866 // are replaced when the select's renders are prepared. If we 867 // encounter one here during expression analysis, it's being used 868 // as an argument to an inner expression/function. In that case, 869 // treat it as a tuple of the expanded columns. 870 // 871 // Hence: 872 // SELECT kv.* FROM kv -> SELECT k, v FROM kv 873 // SELECT (kv.*) FROM kv -> SELECT (k, v) FROM kv 874 // SELECT COUNT(DISTINCT kv.*) FROM kv -> SELECT COUNT(DISTINCT (k, v)) FROM kv 875 // 876 labels, exprs := s.builder.expandStar(expr, s) 877 // We return an untyped tuple because name resolution occurs 878 // before type checking, and type checking will resolve the 879 // tuple's type. However we need to preserve the labels in 880 // case of e.g. `SELECT (kv.*).v`. 881 return false, makeUntypedTuple(labels, exprs) 882 883 case *tree.UnresolvedName: 884 vn, err := t.NormalizeVarName() 885 if err != nil { 886 panic(err) 887 } 888 return s.VisitPre(vn) 889 890 case *tree.ColumnItem: 891 colI, err := t.Resolve(s.builder.ctx, s) 892 if err != nil { 893 panic(err) 894 } 895 return false, colI.(*scopeColumn) 896 897 case *tree.FuncExpr: 898 def, err := t.Func.Resolve(s.builder.semaCtx.SearchPath) 899 if err != nil { 900 panic(err) 901 } 902 903 if isGenerator(def) && s.replaceSRFs { 904 expr = s.replaceSRF(t, def) 905 break 906 } 907 908 if isAggregate(def) && t.WindowDef == nil { 909 expr = s.replaceAggregate(t, def) 910 break 911 } 912 913 if t.WindowDef != nil { 914 expr = s.replaceWindowFn(t, def) 915 break 916 } 917 918 if isSQLFn(def) { 919 expr = s.replaceSQLFn(t, def) 920 break 921 } 922 923 case *tree.ArrayFlatten: 924 if sub, ok := t.Subquery.(*tree.Subquery); ok { 925 // Copy the ArrayFlatten expression so that the tree isn't mutated. 926 copy := *t 927 copy.Subquery = s.replaceSubquery( 928 sub, false /* wrapInTuple */, 1 /* desiredNumColumns */, extraColsAllowed, 929 ) 930 expr = © 931 } 932 933 case *tree.ComparisonExpr: 934 switch t.Operator { 935 case tree.In, tree.NotIn, tree.Any, tree.Some, tree.All: 936 if sub, ok := t.Right.(*tree.Subquery); ok { 937 // Copy the Comparison expression so that the tree isn't mutated. 938 copy := *t 939 copy.Right = s.replaceSubquery( 940 sub, true /* wrapInTuple */, -1 /* desiredNumColumns */, noExtraColsAllowed, 941 ) 942 expr = © 943 } 944 } 945 946 case *tree.Subquery: 947 if t.Exists { 948 expr = s.replaceSubquery( 949 t, true /* wrapInTuple */, -1 /* desiredNumColumns */, noExtraColsAllowed, 950 ) 951 } else { 952 expr = s.replaceSubquery( 953 t, false /* wrapInTuple */, s.columns /* desiredNumColumns */, noExtraColsAllowed, 954 ) 955 } 956 } 957 958 // Reset the desired number of columns since if the subquery is a child of 959 // any other expression, type checking will verify the number of columns. 960 s.columns = -1 961 return true, expr 962 } 963 964 // replaceSRF returns an srf struct that can be used to replace a raw SRF. When 965 // this struct is encountered during the build process, it is replaced with a 966 // reference to the column returned by the SRF (if the SRF returns a single 967 // column) or a tuple of column references (if the SRF returns multiple 968 // columns). 969 // 970 // replaceSRF also stores a pointer to the new srf struct in this scope's srfs 971 // slice. The slice is used later by the Builder to convert the input from 972 // the FROM clause to a lateral cross join between the input and a Zip of all 973 // the srfs in the s.srfs slice. See Builder.buildProjectSet in srfs.go for 974 // more details. 975 func (s *scope) replaceSRF(f *tree.FuncExpr, def *tree.FunctionDefinition) *srf { 976 // We need to save and restore the previous value of the field in 977 // semaCtx in case we are recursively called within a subquery 978 // context. 979 defer s.builder.semaCtx.Properties.Restore(s.builder.semaCtx.Properties) 980 981 s.builder.semaCtx.Properties.Require(s.context.String(), 982 tree.RejectAggregates|tree.RejectWindowApplications|tree.RejectNestedGenerators) 983 984 expr := f.Walk(s) 985 typedFunc, err := tree.TypeCheck(s.builder.ctx, expr, s.builder.semaCtx, types.Any) 986 if err != nil { 987 panic(err) 988 } 989 990 srfScope := s.push() 991 var outCol *scopeColumn 992 993 var typedFuncExpr = typedFunc.(*tree.FuncExpr) 994 if s.builder.shouldCreateDefaultColumn(typedFuncExpr) { 995 outCol = s.builder.addColumn(srfScope, def.Name, typedFunc) 996 } 997 out := s.builder.buildFunction(typedFuncExpr, s, srfScope, outCol, nil) 998 srf := &srf{ 999 FuncExpr: typedFuncExpr, 1000 cols: srfScope.cols, 1001 fn: out, 1002 } 1003 s.srfs = append(s.srfs, srf) 1004 1005 // Add the output columns to this scope, so the column references added 1006 // by the build process will not be treated as outer columns. 1007 s.cols = append(s.cols, srf.cols...) 1008 return srf 1009 } 1010 1011 // isOrderedSetAggregate returns if the input function definition is an 1012 // ordered-set aggregate, and the overridden function definition if so. 1013 func isOrderedSetAggregate(def *tree.FunctionDefinition) (*tree.FunctionDefinition, bool) { 1014 // The impl functions are private because they should never be run directly. 1015 // Thus, they need to be marked as non-private before using them. 1016 switch def { 1017 case tree.FunDefs["percentile_disc"]: 1018 newDef := *tree.FunDefs["percentile_disc_impl"] 1019 newDef.Private = false 1020 return &newDef, true 1021 case tree.FunDefs["percentile_cont"]: 1022 newDef := *tree.FunDefs["percentile_cont_impl"] 1023 newDef.Private = false 1024 return &newDef, true 1025 } 1026 return def, false 1027 } 1028 1029 // replaceAggregate returns an aggregateInfo that can be used to replace a raw 1030 // aggregate function. When an aggregateInfo is encountered during the build 1031 // process, it is replaced with a reference to the column returned by the 1032 // aggregation. 1033 // 1034 // replaceAggregate also stores the aggregateInfo in the aggregation scope for 1035 // this aggregate, using the aggOutScope.groupby.aggs slice. The aggregation 1036 // scope is the one closest to the current scope which contains at least one of 1037 // the variables referenced by the aggregate (or the current scope if the 1038 // aggregate references no variables). The aggOutScope.groupby.aggs slice is 1039 // used later by the Builder to build aggregations in the aggregation scope. 1040 func (s *scope) replaceAggregate(f *tree.FuncExpr, def *tree.FunctionDefinition) tree.Expr { 1041 f, def = s.replaceCount(f, def) 1042 1043 // We need to save and restore the previous value of the field in 1044 // semaCtx in case we are recursively called within a subquery 1045 // context. 1046 defer s.builder.semaCtx.Properties.Restore(s.builder.semaCtx.Properties) 1047 1048 s.builder.semaCtx.Properties.Require("aggregate", 1049 tree.RejectNestedAggregates|tree.RejectWindowApplications|tree.RejectGenerators) 1050 1051 // Make a copy of f so we can modify it if needed. 1052 fCopy := *f 1053 // Override ordered-set aggregates to use their impl counterparts. 1054 if orderedSetDef, found := isOrderedSetAggregate(def); found { 1055 // Ensure that the aggregation is well formed. 1056 if f.AggType != tree.OrderedSetAgg || len(f.OrderBy) != 1 { 1057 panic(pgerror.Newf( 1058 pgcode.InvalidFunctionDefinition, 1059 "ordered-set aggregations must have a WITHIN GROUP clause containing one ORDER BY column")) 1060 } 1061 1062 // Override function definition. 1063 def = orderedSetDef 1064 fCopy.Func.FunctionReference = orderedSetDef 1065 1066 // Copy Exprs slice. 1067 oldExprs := f.Exprs 1068 fCopy.Exprs = make(tree.Exprs, len(oldExprs)) 1069 copy(fCopy.Exprs, oldExprs) 1070 1071 // Add implicit column to the input expressions. 1072 fCopy.Exprs = append(fCopy.Exprs, fCopy.OrderBy[0].Expr.(tree.TypedExpr)) 1073 } 1074 1075 expr := fCopy.Walk(s) 1076 1077 // We need to do this check here to ensure that we check the usage of special 1078 // functions with the right error message. 1079 if f.Filter != nil { 1080 func() { 1081 oldProps := s.builder.semaCtx.Properties 1082 defer func() { s.builder.semaCtx.Properties.Restore(oldProps) }() 1083 1084 s.builder.semaCtx.Properties.Require("FILTER", tree.RejectSpecial) 1085 _, err := tree.TypeCheck(s.builder.ctx, expr.(*tree.FuncExpr).Filter, s.builder.semaCtx, types.Any) 1086 if err != nil { 1087 panic(err) 1088 } 1089 }() 1090 } 1091 1092 typedFunc, err := tree.TypeCheck(s.builder.ctx, expr, s.builder.semaCtx, types.Any) 1093 if err != nil { 1094 panic(err) 1095 } 1096 if typedFunc == tree.DNull { 1097 return tree.DNull 1098 } 1099 1100 f = typedFunc.(*tree.FuncExpr) 1101 1102 private := memo.FunctionPrivate{ 1103 Name: def.Name, 1104 Properties: &def.FunctionProperties, 1105 Overload: f.ResolvedOverload(), 1106 } 1107 1108 return s.builder.buildAggregateFunction(f, &private, s) 1109 } 1110 1111 func (s *scope) lookupWindowDef(name tree.Name) *tree.WindowDef { 1112 for i := range s.windowDefs { 1113 if s.windowDefs[i].Name == name { 1114 return s.windowDefs[i] 1115 } 1116 } 1117 panic(pgerror.Newf(pgcode.UndefinedObject, "window %q does not exist", name)) 1118 } 1119 1120 func (s *scope) constructWindowDef(def tree.WindowDef) tree.WindowDef { 1121 switch { 1122 case def.RefName != "": 1123 // SELECT rank() OVER (w) FROM t WINDOW w AS (...) 1124 // We copy the referenced window specification, and modify it if necessary. 1125 result, err := tree.OverrideWindowDef(s.lookupWindowDef(def.RefName), def) 1126 if err != nil { 1127 panic(err) 1128 } 1129 return result 1130 1131 case def.Name != "": 1132 // SELECT rank() OVER w FROM t WINDOW w AS (...) 1133 // Note the lack of parens around w, compared to the first case. 1134 // We use the referenced window specification directly, without modification. 1135 return *s.lookupWindowDef(def.Name) 1136 1137 default: 1138 return def 1139 } 1140 } 1141 1142 func (s *scope) replaceWindowFn(f *tree.FuncExpr, def *tree.FunctionDefinition) tree.Expr { 1143 f, def = s.replaceCount(f, def) 1144 1145 if err := tree.CheckIsWindowOrAgg(def); err != nil { 1146 panic(err) 1147 } 1148 1149 // We need to save and restore the previous value of the field in 1150 // semaCtx in case we are recursively called within a subquery 1151 // context. 1152 defer s.builder.semaCtx.Properties.Restore(s.builder.semaCtx.Properties) 1153 1154 s.builder.semaCtx.Properties.Require("window", 1155 tree.RejectNestedWindowFunctions) 1156 1157 // Make a copy of f so we can modify the WindowDef. 1158 fCopy := *f 1159 newWindowDef := s.constructWindowDef(*f.WindowDef) 1160 fCopy.WindowDef = &newWindowDef 1161 1162 expr := fCopy.Walk(s) 1163 1164 typedFunc, err := tree.TypeCheck(s.builder.ctx, expr, s.builder.semaCtx, types.Any) 1165 if err != nil { 1166 panic(err) 1167 } 1168 if typedFunc == tree.DNull { 1169 return tree.DNull 1170 } 1171 1172 f = typedFunc.(*tree.FuncExpr) 1173 1174 // We will be performing type checking on expressions from PARTITION BY and 1175 // ORDER BY clauses below, and we need the semantic context to know that we 1176 // are in a window function. InWindowFunc is updated when type checking 1177 // FuncExpr above, but it is reset upon returning from that, so we need to do 1178 // this update manually. 1179 defer func(ctx *tree.SemaContext, prevWindow bool) { 1180 ctx.Properties.Derived.InWindowFunc = prevWindow 1181 }( 1182 s.builder.semaCtx, 1183 s.builder.semaCtx.Properties.Derived.InWindowFunc, 1184 ) 1185 s.builder.semaCtx.Properties.Derived.InWindowFunc = true 1186 1187 oldPartitions := f.WindowDef.Partitions 1188 f.WindowDef.Partitions = make(tree.Exprs, len(oldPartitions)) 1189 for i, e := range oldPartitions { 1190 typedExpr := s.resolveType(e, types.Any) 1191 f.WindowDef.Partitions[i] = typedExpr 1192 } 1193 1194 oldOrderBy := f.WindowDef.OrderBy 1195 f.WindowDef.OrderBy = make(tree.OrderBy, len(oldOrderBy)) 1196 for i := range oldOrderBy { 1197 ord := *oldOrderBy[i] 1198 if ord.OrderType != tree.OrderByColumn { 1199 panic(errOrderByIndexInWindow) 1200 } 1201 typedExpr := s.resolveType(ord.Expr, types.Any) 1202 ord.Expr = typedExpr 1203 f.WindowDef.OrderBy[i] = &ord 1204 } 1205 1206 if f.WindowDef.Frame != nil { 1207 if err := analyzeWindowFrame(s, f.WindowDef); err != nil { 1208 panic(err) 1209 } 1210 } 1211 1212 info := windowInfo{ 1213 FuncExpr: f, 1214 def: memo.FunctionPrivate{ 1215 Name: def.Name, 1216 Properties: &def.FunctionProperties, 1217 Overload: f.ResolvedOverload(), 1218 }, 1219 } 1220 1221 if col := findExistingColInList(&info, s.windows, false /* allowSideEffects */); col != nil { 1222 return col.expr 1223 } 1224 1225 info.col = &scopeColumn{ 1226 name: tree.Name(def.Name), 1227 typ: f.ResolvedType(), 1228 id: s.builder.factory.Metadata().AddColumn(def.Name, f.ResolvedType()), 1229 expr: &info, 1230 } 1231 1232 s.windows = append(s.windows, *info.col) 1233 1234 return &info 1235 } 1236 1237 // replaceSQLFn replaces a tree.SQLClass function with a sqlFnInfo struct. See 1238 // comments above tree.SQLClass and sqlFnInfo for details. 1239 func (s *scope) replaceSQLFn(f *tree.FuncExpr, def *tree.FunctionDefinition) tree.Expr { 1240 // We need to save and restore the previous value of the field in 1241 // semaCtx in case we are recursively called within a subquery 1242 // context. 1243 defer s.builder.semaCtx.Properties.Restore(s.builder.semaCtx.Properties) 1244 1245 s.builder.semaCtx.Properties.Require("SQL function", tree.RejectSpecial) 1246 1247 expr := f.Walk(s) 1248 typedFunc, err := tree.TypeCheck(s.builder.ctx, expr, s.builder.semaCtx, types.Any) 1249 if err != nil { 1250 panic(err) 1251 } 1252 1253 f = typedFunc.(*tree.FuncExpr) 1254 args := make(memo.ScalarListExpr, len(f.Exprs)) 1255 for i, arg := range f.Exprs { 1256 args[i] = s.builder.buildScalar(arg.(tree.TypedExpr), s, nil, nil, nil) 1257 } 1258 1259 info := sqlFnInfo{ 1260 FuncExpr: f, 1261 def: memo.FunctionPrivate{ 1262 Name: def.Name, 1263 Properties: &def.FunctionProperties, 1264 Overload: f.ResolvedOverload(), 1265 }, 1266 args: args, 1267 } 1268 return &info 1269 } 1270 1271 var ( 1272 errOrderByIndexInWindow = pgerror.New(pgcode.FeatureNotSupported, "ORDER BY INDEX in window definition is not supported") 1273 ) 1274 1275 // analyzeWindowFrame performs semantic analysis of offset expressions of 1276 // the window frame. 1277 func analyzeWindowFrame(s *scope, windowDef *tree.WindowDef) error { 1278 frame := windowDef.Frame 1279 bounds := frame.Bounds 1280 startBound, endBound := bounds.StartBound, bounds.EndBound 1281 var requiredType *types.T 1282 switch frame.Mode { 1283 case tree.ROWS: 1284 // In ROWS mode, offsets must be non-null, non-negative integers. Non-nullity 1285 // and non-negativity will be checked later. 1286 requiredType = types.Int 1287 case tree.RANGE: 1288 // In RANGE mode, offsets must be non-null and non-negative datums of a type 1289 // dependent on the type of the ordering column. Non-nullity and 1290 // non-negativity will be checked later. 1291 if bounds.HasOffset() { 1292 // At least one of the bounds is of type 'value' PRECEDING or 'value' FOLLOWING. 1293 // We require ordering on a single column that supports addition/subtraction. 1294 if len(windowDef.OrderBy) != 1 { 1295 return pgerror.Newf(pgcode.Windowing, 1296 "RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column") 1297 } 1298 requiredType = windowDef.OrderBy[0].Expr.(tree.TypedExpr).ResolvedType() 1299 if !types.IsAdditiveType(requiredType) { 1300 return pgerror.Newf(pgcode.Windowing, 1301 "RANGE with offset PRECEDING/FOLLOWING is not supported for column type %s", 1302 log.Safe(requiredType)) 1303 } 1304 if types.IsDateTimeType(requiredType) { 1305 // Spec: for datetime ordering columns, the required type is an 'interval'. 1306 requiredType = types.Interval 1307 } 1308 } 1309 case tree.GROUPS: 1310 if len(windowDef.OrderBy) == 0 { 1311 return pgerror.Newf(pgcode.Windowing, "GROUPS mode requires an ORDER BY clause") 1312 } 1313 // In GROUPS mode, offsets must be non-null, non-negative integers. 1314 // Non-nullity and non-negativity will be checked later. 1315 requiredType = types.Int 1316 default: 1317 return errors.AssertionFailedf("unexpected WindowFrameMode: %d", errors.Safe(frame.Mode)) 1318 } 1319 if startBound != nil && startBound.OffsetExpr != nil { 1320 oldContext := s.context 1321 s.context = exprKindWindowFrameStart 1322 startBound.OffsetExpr = s.resolveAndRequireType(startBound.OffsetExpr, requiredType) 1323 s.context = oldContext 1324 } 1325 if endBound != nil && endBound.OffsetExpr != nil { 1326 oldContext := s.context 1327 s.context = exprKindWindowFrameEnd 1328 endBound.OffsetExpr = s.resolveAndRequireType(endBound.OffsetExpr, requiredType) 1329 s.context = oldContext 1330 } 1331 return nil 1332 } 1333 1334 // replaceCount replaces count(*) with count_rows(). 1335 func (s *scope) replaceCount( 1336 f *tree.FuncExpr, def *tree.FunctionDefinition, 1337 ) (*tree.FuncExpr, *tree.FunctionDefinition) { 1338 if len(f.Exprs) != 1 { 1339 return f, def 1340 } 1341 vn, ok := f.Exprs[0].(tree.VarName) 1342 if !ok { 1343 return f, def 1344 } 1345 vn, err := vn.NormalizeVarName() 1346 if err != nil { 1347 panic(err) 1348 } 1349 f.Exprs[0] = vn 1350 1351 if strings.EqualFold(def.Name, "count") && f.Type == 0 { 1352 if _, ok := vn.(tree.UnqualifiedStar); ok { 1353 if f.Filter != nil { 1354 // If we have a COUNT(*) with a FILTER, we need to synthesize an input 1355 // for the aggregation to be over, because otherwise we have no input 1356 // to hang the AggFilter off of. 1357 // Thus, we convert 1358 // COUNT(*) FILTER (WHERE foo) 1359 // to 1360 // COUNT(true) FILTER (WHERE foo). 1361 cpy := *f 1362 e := &cpy 1363 e.Exprs = tree.Exprs{tree.DBoolTrue} 1364 1365 newDef, err := e.Func.Resolve(s.builder.semaCtx.SearchPath) 1366 if err != nil { 1367 panic(err) 1368 } 1369 1370 return e, newDef 1371 } 1372 1373 // Special case handling for COUNT(*) with no FILTER. This is a special 1374 // construct to count the number of rows; in this case * does NOT refer 1375 // to a set of columns. A * is invalid elsewhere (and will be caught by 1376 // TypeCheck()). Replace the function with COUNT_ROWS (which doesn't 1377 // take any arguments). 1378 e := &tree.FuncExpr{ 1379 Func: tree.ResolvableFunctionReference{ 1380 FunctionReference: &tree.UnresolvedName{ 1381 NumParts: 1, Parts: tree.NameParts{"count_rows"}, 1382 }, 1383 }, 1384 } 1385 // We call TypeCheck to fill in FuncExpr internals. This is a fixed 1386 // expression; we should not hit an error here. 1387 semaCtx := tree.MakeSemaContext() 1388 if _, err := e.TypeCheck(s.builder.ctx, &semaCtx, types.Any); err != nil { 1389 panic(err) 1390 } 1391 newDef, err := e.Func.Resolve(s.builder.semaCtx.SearchPath) 1392 if err != nil { 1393 panic(err) 1394 } 1395 e.Filter = f.Filter 1396 e.WindowDef = f.WindowDef 1397 return e, newDef 1398 } 1399 // TODO(rytaft): Add handling for tree.AllColumnsSelector to support 1400 // expressions like SELECT COUNT(kv.*) FROM kv 1401 // Similar to the work done in PR #17833. 1402 } 1403 1404 return f, def 1405 } 1406 1407 const ( 1408 extraColsAllowed = true 1409 noExtraColsAllowed = false 1410 ) 1411 1412 // Replace a raw tree.Subquery node with a lazily typed subquery. wrapInTuple 1413 // specifies whether the return type of the subquery should be wrapped in a 1414 // tuple. wrapInTuple is true for subqueries that may return multiple rows in 1415 // comparison expressions (e.g., IN, ANY, ALL) and EXISTS expressions. 1416 // desiredNumColumns specifies the desired number of columns for the subquery. 1417 // Specifying -1 for desiredNumColumns allows the subquery to return any 1418 // number of columns and is used when the normal type checking machinery will 1419 // verify that the correct number of columns is returned. 1420 // If extraColsAllowed is true, extra columns built from the subquery (such as 1421 // columns for which orderings have been requested) will not be stripped away. 1422 // It is the duty of the caller to ensure that those columns are eventually 1423 // dealt with. 1424 func (s *scope) replaceSubquery( 1425 sub *tree.Subquery, wrapInTuple bool, desiredNumColumns int, extraColsAllowed bool, 1426 ) *subquery { 1427 return &subquery{ 1428 Subquery: sub, 1429 wrapInTuple: wrapInTuple, 1430 desiredNumColumns: desiredNumColumns, 1431 extraColsAllowed: extraColsAllowed, 1432 scope: s, 1433 } 1434 } 1435 1436 // VisitPost is part of the Visitor interface. 1437 func (*scope) VisitPost(expr tree.Expr) tree.Expr { 1438 return expr 1439 } 1440 1441 // scope implements the IndexedVarContainer interface so it can be used as 1442 // semaCtx.IVarContainer. This allows tree.TypeCheck to determine the correct 1443 // type for any IndexedVars. 1444 var _ tree.IndexedVarContainer = &scope{} 1445 1446 // IndexedVarEval is part of the IndexedVarContainer interface. 1447 func (s *scope) IndexedVarEval(idx int, ctx *tree.EvalContext) (tree.Datum, error) { 1448 panic(errors.AssertionFailedf("unimplemented: scope.IndexedVarEval")) 1449 } 1450 1451 // IndexedVarResolvedType is part of the IndexedVarContainer interface. 1452 func (s *scope) IndexedVarResolvedType(idx int) *types.T { 1453 if idx >= len(s.cols) { 1454 if len(s.cols) == 0 { 1455 panic(pgerror.Newf(pgcode.UndefinedColumn, 1456 "column reference @%d not allowed in this context", idx+1)) 1457 } 1458 panic(pgerror.Newf(pgcode.UndefinedColumn, 1459 "invalid column ordinal: @%d", idx+1)) 1460 } 1461 return s.cols[idx].typ 1462 } 1463 1464 // IndexedVarNodeFormatter is part of the IndexedVarContainer interface. 1465 func (s *scope) IndexedVarNodeFormatter(idx int) tree.NodeFormatter { 1466 panic(errors.AssertionFailedf("unimplemented: scope.IndexedVarNodeFormatter")) 1467 } 1468 1469 // newAmbiguousColumnError returns an error with a helpful error message to be 1470 // used in case of an ambiguous column reference. 1471 func (s *scope) newAmbiguousColumnError( 1472 n tree.Name, 1473 allowHidden, moreThanOneCandidateFromAnonSource, moreThanOneCandidateWithPrefix, moreThanOneHiddenCandidate bool, 1474 ) error { 1475 colString := tree.ErrString(&n) 1476 var msgBuf bytes.Buffer 1477 sep := "" 1478 fmtCandidate := func(tn tree.TableName) { 1479 name := tree.ErrString(&tn) 1480 if len(name) == 0 { 1481 name = "<anonymous>" 1482 } 1483 fmt.Fprintf(&msgBuf, "%s%s.%s", sep, name, colString) 1484 sep = ", " 1485 } 1486 for i := range s.cols { 1487 col := &s.cols[i] 1488 if col.name == n && (allowHidden || !col.hidden) { 1489 if col.table.ObjectName == "" && !col.hidden { 1490 if moreThanOneCandidateFromAnonSource { 1491 // Only print first anonymous source, since other(s) are identical. 1492 fmtCandidate(col.table) 1493 break 1494 } 1495 } else if !col.hidden { 1496 if moreThanOneCandidateWithPrefix && !moreThanOneCandidateFromAnonSource { 1497 fmtCandidate(col.table) 1498 } 1499 } else { 1500 if moreThanOneHiddenCandidate && !moreThanOneCandidateWithPrefix && !moreThanOneCandidateFromAnonSource { 1501 fmtCandidate(col.table) 1502 } 1503 } 1504 } 1505 } 1506 1507 return pgerror.Newf(pgcode.AmbiguousColumn, 1508 "column reference %q is ambiguous (candidates: %s)", colString, msgBuf.String(), 1509 ) 1510 } 1511 1512 // newAmbiguousSourceError returns an error with a helpful error message to be 1513 // used in case of an ambiguous table name. 1514 func newAmbiguousSourceError(tn *tree.TableName) error { 1515 if tn.Catalog() == "" { 1516 return pgerror.Newf(pgcode.AmbiguousAlias, 1517 "ambiguous source name: %q", tree.ErrString(tn)) 1518 1519 } 1520 return pgerror.Newf(pgcode.AmbiguousAlias, 1521 "ambiguous source name: %q (within database %q)", 1522 tree.ErrString(&tn.ObjectName), tree.ErrString(&tn.CatalogName)) 1523 } 1524 1525 func (s *scope) String() string { 1526 var buf bytes.Buffer 1527 1528 if s.parent != nil { 1529 buf.WriteString(s.parent.String()) 1530 buf.WriteString("->") 1531 } 1532 1533 buf.WriteByte('(') 1534 for i, c := range s.cols { 1535 if i > 0 { 1536 buf.WriteByte(',') 1537 } 1538 fmt.Fprintf(&buf, "%s:%d", c.name.String(), c.id) 1539 } 1540 buf.WriteByte(')') 1541 1542 return buf.String() 1543 }