github.com/bir3/gocompiler@v0.9.2202/src/go/parser/parser.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package parser implements a parser for Go source files. Input may be 6 // provided in a variety of forms (see the various Parse* functions); the 7 // output is an abstract syntax tree (AST) representing the Go source. The 8 // parser is invoked through one of the Parse* functions. 9 // 10 // The parser accepts a larger language than is syntactically permitted by 11 // the Go spec, for simplicity, and for improved robustness in the presence 12 // of syntax errors. For instance, in method declarations, the receiver is 13 // treated like an ordinary parameter list and thus may contain multiple 14 // entries where the spec permits exactly one. Consequently, the corresponding 15 // field in the AST (ast.FuncDecl.Recv) field is not restricted to one entry. 16 package parser 17 18 import ( 19 "fmt" 20 "github.com/bir3/gocompiler/src/go/ast" 21 "github.com/bir3/gocompiler/src/go/build/constraint" 22 "github.com/bir3/gocompiler/src/go/internal/typeparams" 23 "github.com/bir3/gocompiler/src/go/scanner" 24 "github.com/bir3/gocompiler/src/go/token" 25 "strings" 26 ) 27 28 // The parser structure holds the parser's internal state. 29 type parser struct { 30 file *token.File 31 errors scanner.ErrorList 32 scanner scanner.Scanner 33 34 // Tracing/debugging 35 mode Mode // parsing mode 36 trace bool // == (mode&Trace != 0) 37 indent int // indentation used for tracing output 38 39 // Comments 40 comments []*ast.CommentGroup 41 leadComment *ast.CommentGroup // last lead comment 42 lineComment *ast.CommentGroup // last line comment 43 top bool // in top of file (before package clause) 44 goVersion string // minimum Go version found in //go:build comment 45 46 // Next token 47 pos token.Pos // token position 48 tok token.Token // one token look-ahead 49 lit string // token literal 50 51 // Error recovery 52 // (used to limit the number of calls to parser.advance 53 // w/o making scanning progress - avoids potential endless 54 // loops across multiple parser functions during error recovery) 55 syncPos token.Pos // last synchronization position 56 syncCnt int // number of parser.advance calls without progress 57 58 // Non-syntactic parser control 59 exprLev int // < 0: in control clause, >= 0: in expression 60 inRhs bool // if set, the parser is parsing a rhs expression 61 62 imports []*ast.ImportSpec // list of imports 63 64 // nestLev is used to track and limit the recursion depth 65 // during parsing. 66 nestLev int 67 } 68 69 func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) { 70 p.file = fset.AddFile(filename, -1, len(src)) 71 eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) } 72 p.scanner.Init(p.file, src, eh, scanner.ScanComments) 73 74 p.top = true 75 p.mode = mode 76 p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently) 77 p.next() 78 } 79 80 // ---------------------------------------------------------------------------- 81 // Parsing support 82 83 func (p *parser) printTrace(a ...any) { 84 const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " 85 const n = len(dots) 86 pos := p.file.Position(p.pos) 87 fmt.Printf("%5d:%3d: ", pos.Line, pos.Column) 88 i := 2 * p.indent 89 for i > n { 90 fmt.Print(dots) 91 i -= n 92 } 93 // i <= n 94 fmt.Print(dots[0:i]) 95 fmt.Println(a...) 96 } 97 98 func trace(p *parser, msg string) *parser { 99 p.printTrace(msg, "(") 100 p.indent++ 101 return p 102 } 103 104 // Usage pattern: defer un(trace(p, "...")) 105 func un(p *parser) { 106 p.indent-- 107 p.printTrace(")") 108 } 109 110 // maxNestLev is the deepest we're willing to recurse during parsing 111 const maxNestLev int = 1e5 112 113 func incNestLev(p *parser) *parser { 114 p.nestLev++ 115 if p.nestLev > maxNestLev { 116 p.error(p.pos, "exceeded max nesting depth") 117 panic(bailout{}) 118 } 119 return p 120 } 121 122 // decNestLev is used to track nesting depth during parsing to prevent stack exhaustion. 123 // It is used along with incNestLev in a similar fashion to how un and trace are used. 124 func decNestLev(p *parser) { 125 p.nestLev-- 126 } 127 128 // Advance to the next token. 129 func (p *parser) next0() { 130 // Because of one-token look-ahead, print the previous token 131 // when tracing as it provides a more readable output. The 132 // very first token (!p.pos.IsValid()) is not initialized 133 // (it is token.ILLEGAL), so don't print it. 134 if p.trace && p.pos.IsValid() { 135 s := p.tok.String() 136 switch { 137 case p.tok.IsLiteral(): 138 p.printTrace(s, p.lit) 139 case p.tok.IsOperator(), p.tok.IsKeyword(): 140 p.printTrace("\"" + s + "\"") 141 default: 142 p.printTrace(s) 143 } 144 } 145 146 for { 147 p.pos, p.tok, p.lit = p.scanner.Scan() 148 if p.tok == token.COMMENT { 149 if p.top && strings.HasPrefix(p.lit, "//go:build") { 150 if x, err := constraint.Parse(p.lit); err == nil { 151 p.goVersion = constraint.GoVersion(x) 152 } 153 } 154 if p.mode&ParseComments == 0 { 155 continue 156 } 157 } else { 158 // Found a non-comment; top of file is over. 159 p.top = false 160 } 161 break 162 } 163 } 164 165 // Consume a comment and return it and the line on which it ends. 166 func (p *parser) consumeComment() (comment *ast.Comment, endline int) { 167 // /*-style comments may end on a different line than where they start. 168 // Scan the comment for '\n' chars and adjust endline accordingly. 169 endline = p.file.Line(p.pos) 170 if p.lit[1] == '*' { 171 // don't use range here - no need to decode Unicode code points 172 for i := 0; i < len(p.lit); i++ { 173 if p.lit[i] == '\n' { 174 endline++ 175 } 176 } 177 } 178 179 comment = &ast.Comment{Slash: p.pos, Text: p.lit} 180 p.next0() 181 182 return 183 } 184 185 // Consume a group of adjacent comments, add it to the parser's 186 // comments list, and return it together with the line at which 187 // the last comment in the group ends. A non-comment token or n 188 // empty lines terminate a comment group. 189 func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { 190 var list []*ast.Comment 191 endline = p.file.Line(p.pos) 192 for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n { 193 var comment *ast.Comment 194 comment, endline = p.consumeComment() 195 list = append(list, comment) 196 } 197 198 // add comment group to the comments list 199 comments = &ast.CommentGroup{List: list} 200 p.comments = append(p.comments, comments) 201 202 return 203 } 204 205 // Advance to the next non-comment token. In the process, collect 206 // any comment groups encountered, and remember the last lead and 207 // line comments. 208 // 209 // A lead comment is a comment group that starts and ends in a 210 // line without any other tokens and that is followed by a non-comment 211 // token on the line immediately after the comment group. 212 // 213 // A line comment is a comment group that follows a non-comment 214 // token on the same line, and that has no tokens after it on the line 215 // where it ends. 216 // 217 // Lead and line comments may be considered documentation that is 218 // stored in the AST. 219 func (p *parser) next() { 220 p.leadComment = nil 221 p.lineComment = nil 222 prev := p.pos 223 p.next0() 224 225 if p.tok == token.COMMENT { 226 var comment *ast.CommentGroup 227 var endline int 228 229 if p.file.Line(p.pos) == p.file.Line(prev) { 230 // The comment is on same line as the previous token; it 231 // cannot be a lead comment but may be a line comment. 232 comment, endline = p.consumeCommentGroup(0) 233 if p.file.Line(p.pos) != endline || p.tok == token.SEMICOLON || p.tok == token.EOF { 234 // The next token is on a different line, thus 235 // the last comment group is a line comment. 236 p.lineComment = comment 237 } 238 } 239 240 // consume successor comments, if any 241 endline = -1 242 for p.tok == token.COMMENT { 243 comment, endline = p.consumeCommentGroup(1) 244 } 245 246 if endline+1 == p.file.Line(p.pos) { 247 // The next token is following on the line immediately after the 248 // comment group, thus the last comment group is a lead comment. 249 p.leadComment = comment 250 } 251 } 252 } 253 254 // A bailout panic is raised to indicate early termination. pos and msg are 255 // only populated when bailing out of object resolution. 256 type bailout struct { 257 pos token.Pos 258 msg string 259 } 260 261 func (p *parser) error(pos token.Pos, msg string) { 262 if p.trace { 263 defer un(trace(p, "error: "+msg)) 264 } 265 266 epos := p.file.Position(pos) 267 268 // If AllErrors is not set, discard errors reported on the same line 269 // as the last recorded error and stop parsing if there are more than 270 // 10 errors. 271 if p.mode&AllErrors == 0 { 272 n := len(p.errors) 273 if n > 0 && p.errors[n-1].Pos.Line == epos.Line { 274 return // discard - likely a spurious error 275 } 276 if n > 10 { 277 panic(bailout{}) 278 } 279 } 280 281 p.errors.Add(epos, msg) 282 } 283 284 func (p *parser) errorExpected(pos token.Pos, msg string) { 285 msg = "expected " + msg 286 if pos == p.pos { 287 // the error happened at the current position; 288 // make the error message more specific 289 switch { 290 case p.tok == token.SEMICOLON && p.lit == "\n": 291 msg += ", found newline" 292 case p.tok.IsLiteral(): 293 // print 123 rather than 'INT', etc. 294 msg += ", found " + p.lit 295 default: 296 msg += ", found '" + p.tok.String() + "'" 297 } 298 } 299 p.error(pos, msg) 300 } 301 302 func (p *parser) expect(tok token.Token) token.Pos { 303 pos := p.pos 304 if p.tok != tok { 305 p.errorExpected(pos, "'"+tok.String()+"'") 306 } 307 p.next() // make progress 308 return pos 309 } 310 311 // expect2 is like expect, but it returns an invalid position 312 // if the expected token is not found. 313 func (p *parser) expect2(tok token.Token) (pos token.Pos) { 314 if p.tok == tok { 315 pos = p.pos 316 } else { 317 p.errorExpected(p.pos, "'"+tok.String()+"'") 318 } 319 p.next() // make progress 320 return 321 } 322 323 // expectClosing is like expect but provides a better error message 324 // for the common case of a missing comma before a newline. 325 func (p *parser) expectClosing(tok token.Token, context string) token.Pos { 326 if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" { 327 p.error(p.pos, "missing ',' before newline in "+context) 328 p.next() 329 } 330 return p.expect(tok) 331 } 332 333 // expectSemi consumes a semicolon and returns the applicable line comment. 334 func (p *parser) expectSemi() (comment *ast.CommentGroup) { 335 // semicolon is optional before a closing ')' or '}' 336 if p.tok != token.RPAREN && p.tok != token.RBRACE { 337 switch p.tok { 338 case token.COMMA: 339 // permit a ',' instead of a ';' but complain 340 p.errorExpected(p.pos, "';'") 341 fallthrough 342 case token.SEMICOLON: 343 if p.lit == ";" { 344 // explicit semicolon 345 p.next() 346 comment = p.lineComment // use following comments 347 } else { 348 // artificial semicolon 349 comment = p.lineComment // use preceding comments 350 p.next() 351 } 352 return comment 353 default: 354 p.errorExpected(p.pos, "';'") 355 p.advance(stmtStart) 356 } 357 } 358 return nil 359 } 360 361 func (p *parser) atComma(context string, follow token.Token) bool { 362 if p.tok == token.COMMA { 363 return true 364 } 365 if p.tok != follow { 366 msg := "missing ','" 367 if p.tok == token.SEMICOLON && p.lit == "\n" { 368 msg += " before newline" 369 } 370 p.error(p.pos, msg+" in "+context) 371 return true // "insert" comma and continue 372 } 373 return false 374 } 375 376 func assert(cond bool, msg string) { 377 if !cond { 378 panic("go/parser internal error: " + msg) 379 } 380 } 381 382 // advance consumes tokens until the current token p.tok 383 // is in the 'to' set, or token.EOF. For error recovery. 384 func (p *parser) advance(to map[token.Token]bool) { 385 for ; p.tok != token.EOF; p.next() { 386 if to[p.tok] { 387 // Return only if parser made some progress since last 388 // sync or if it has not reached 10 advance calls without 389 // progress. Otherwise consume at least one token to 390 // avoid an endless parser loop (it is possible that 391 // both parseOperand and parseStmt call advance and 392 // correctly do not advance, thus the need for the 393 // invocation limit p.syncCnt). 394 if p.pos == p.syncPos && p.syncCnt < 10 { 395 p.syncCnt++ 396 return 397 } 398 if p.pos > p.syncPos { 399 p.syncPos = p.pos 400 p.syncCnt = 0 401 return 402 } 403 // Reaching here indicates a parser bug, likely an 404 // incorrect token list in this function, but it only 405 // leads to skipping of possibly correct code if a 406 // previous error is present, and thus is preferred 407 // over a non-terminating parse. 408 } 409 } 410 } 411 412 var stmtStart = map[token.Token]bool{ 413 token.BREAK: true, 414 token.CONST: true, 415 token.CONTINUE: true, 416 token.DEFER: true, 417 token.FALLTHROUGH: true, 418 token.FOR: true, 419 token.GO: true, 420 token.GOTO: true, 421 token.IF: true, 422 token.RETURN: true, 423 token.SELECT: true, 424 token.SWITCH: true, 425 token.TYPE: true, 426 token.VAR: true, 427 } 428 429 var declStart = map[token.Token]bool{ 430 token.IMPORT: true, 431 token.CONST: true, 432 token.TYPE: true, 433 token.VAR: true, 434 } 435 436 var exprEnd = map[token.Token]bool{ 437 token.COMMA: true, 438 token.COLON: true, 439 token.SEMICOLON: true, 440 token.RPAREN: true, 441 token.RBRACK: true, 442 token.RBRACE: true, 443 } 444 445 // safePos returns a valid file position for a given position: If pos 446 // is valid to begin with, safePos returns pos. If pos is out-of-range, 447 // safePos returns the EOF position. 448 // 449 // This is hack to work around "artificial" end positions in the AST which 450 // are computed by adding 1 to (presumably valid) token positions. If the 451 // token positions are invalid due to parse errors, the resulting end position 452 // may be past the file's EOF position, which would lead to panics if used 453 // later on. 454 func (p *parser) safePos(pos token.Pos) (res token.Pos) { 455 defer func() { 456 if recover() != nil { 457 res = token.Pos(p.file.Base() + p.file.Size()) // EOF position 458 } 459 }() 460 _ = p.file.Offset(pos) // trigger a panic if position is out-of-range 461 return pos 462 } 463 464 // ---------------------------------------------------------------------------- 465 // Identifiers 466 467 func (p *parser) parseIdent() *ast.Ident { 468 pos := p.pos 469 name := "_" 470 if p.tok == token.IDENT { 471 name = p.lit 472 p.next() 473 } else { 474 p.expect(token.IDENT) // use expect() error handling 475 } 476 return &ast.Ident{NamePos: pos, Name: name} 477 } 478 479 func (p *parser) parseIdentList() (list []*ast.Ident) { 480 if p.trace { 481 defer un(trace(p, "IdentList")) 482 } 483 484 list = append(list, p.parseIdent()) 485 for p.tok == token.COMMA { 486 p.next() 487 list = append(list, p.parseIdent()) 488 } 489 490 return 491 } 492 493 // ---------------------------------------------------------------------------- 494 // Common productions 495 496 // If lhs is set, result list elements which are identifiers are not resolved. 497 func (p *parser) parseExprList() (list []ast.Expr) { 498 if p.trace { 499 defer un(trace(p, "ExpressionList")) 500 } 501 502 list = append(list, p.parseExpr()) 503 for p.tok == token.COMMA { 504 p.next() 505 list = append(list, p.parseExpr()) 506 } 507 508 return 509 } 510 511 func (p *parser) parseList(inRhs bool) []ast.Expr { 512 old := p.inRhs 513 p.inRhs = inRhs 514 list := p.parseExprList() 515 p.inRhs = old 516 return list 517 } 518 519 // ---------------------------------------------------------------------------- 520 // Types 521 522 func (p *parser) parseType() ast.Expr { 523 if p.trace { 524 defer un(trace(p, "Type")) 525 } 526 527 typ := p.tryIdentOrType() 528 529 if typ == nil { 530 pos := p.pos 531 p.errorExpected(pos, "type") 532 p.advance(exprEnd) 533 return &ast.BadExpr{From: pos, To: p.pos} 534 } 535 536 return typ 537 } 538 539 func (p *parser) parseQualifiedIdent(ident *ast.Ident) ast.Expr { 540 if p.trace { 541 defer un(trace(p, "QualifiedIdent")) 542 } 543 544 typ := p.parseTypeName(ident) 545 if p.tok == token.LBRACK { 546 typ = p.parseTypeInstance(typ) 547 } 548 549 return typ 550 } 551 552 // If the result is an identifier, it is not resolved. 553 func (p *parser) parseTypeName(ident *ast.Ident) ast.Expr { 554 if p.trace { 555 defer un(trace(p, "TypeName")) 556 } 557 558 if ident == nil { 559 ident = p.parseIdent() 560 } 561 562 if p.tok == token.PERIOD { 563 // ident is a package name 564 p.next() 565 sel := p.parseIdent() 566 return &ast.SelectorExpr{X: ident, Sel: sel} 567 } 568 569 return ident 570 } 571 572 // "[" has already been consumed, and lbrack is its position. 573 // If len != nil it is the already consumed array length. 574 func (p *parser) parseArrayType(lbrack token.Pos, len ast.Expr) *ast.ArrayType { 575 if p.trace { 576 defer un(trace(p, "ArrayType")) 577 } 578 579 if len == nil { 580 p.exprLev++ 581 // always permit ellipsis for more fault-tolerant parsing 582 if p.tok == token.ELLIPSIS { 583 len = &ast.Ellipsis{Ellipsis: p.pos} 584 p.next() 585 } else if p.tok != token.RBRACK { 586 len = p.parseRhs() 587 } 588 p.exprLev-- 589 } 590 if p.tok == token.COMMA { 591 // Trailing commas are accepted in type parameter 592 // lists but not in array type declarations. 593 // Accept for better error handling but complain. 594 p.error(p.pos, "unexpected comma; expecting ]") 595 p.next() 596 } 597 p.expect(token.RBRACK) 598 elt := p.parseType() 599 return &ast.ArrayType{Lbrack: lbrack, Len: len, Elt: elt} 600 } 601 602 func (p *parser) parseArrayFieldOrTypeInstance(x *ast.Ident) (*ast.Ident, ast.Expr) { 603 if p.trace { 604 defer un(trace(p, "ArrayFieldOrTypeInstance")) 605 } 606 607 lbrack := p.expect(token.LBRACK) 608 trailingComma := token.NoPos // if valid, the position of a trailing comma preceding the ']' 609 var args []ast.Expr 610 if p.tok != token.RBRACK { 611 p.exprLev++ 612 args = append(args, p.parseRhs()) 613 for p.tok == token.COMMA { 614 comma := p.pos 615 p.next() 616 if p.tok == token.RBRACK { 617 trailingComma = comma 618 break 619 } 620 args = append(args, p.parseRhs()) 621 } 622 p.exprLev-- 623 } 624 rbrack := p.expect(token.RBRACK) 625 626 if len(args) == 0 { 627 // x []E 628 elt := p.parseType() 629 return x, &ast.ArrayType{Lbrack: lbrack, Elt: elt} 630 } 631 632 // x [P]E or x[P] 633 if len(args) == 1 { 634 elt := p.tryIdentOrType() 635 if elt != nil { 636 // x [P]E 637 if trailingComma.IsValid() { 638 // Trailing commas are invalid in array type fields. 639 p.error(trailingComma, "unexpected comma; expecting ]") 640 } 641 return x, &ast.ArrayType{Lbrack: lbrack, Len: args[0], Elt: elt} 642 } 643 } 644 645 // x[P], x[P1, P2], ... 646 return nil, typeparams.PackIndexExpr(x, lbrack, args, rbrack) 647 } 648 649 func (p *parser) parseFieldDecl() *ast.Field { 650 if p.trace { 651 defer un(trace(p, "FieldDecl")) 652 } 653 654 doc := p.leadComment 655 656 var names []*ast.Ident 657 var typ ast.Expr 658 switch p.tok { 659 case token.IDENT: 660 name := p.parseIdent() 661 if p.tok == token.PERIOD || p.tok == token.STRING || p.tok == token.SEMICOLON || p.tok == token.RBRACE { 662 // embedded type 663 typ = name 664 if p.tok == token.PERIOD { 665 typ = p.parseQualifiedIdent(name) 666 } 667 } else { 668 // name1, name2, ... T 669 names = []*ast.Ident{name} 670 for p.tok == token.COMMA { 671 p.next() 672 names = append(names, p.parseIdent()) 673 } 674 // Careful dance: We don't know if we have an embedded instantiated 675 // type T[P1, P2, ...] or a field T of array type []E or [P]E. 676 if len(names) == 1 && p.tok == token.LBRACK { 677 name, typ = p.parseArrayFieldOrTypeInstance(name) 678 if name == nil { 679 names = nil 680 } 681 } else { 682 // T P 683 typ = p.parseType() 684 } 685 } 686 case token.MUL: 687 star := p.pos 688 p.next() 689 if p.tok == token.LPAREN { 690 // *(T) 691 p.error(p.pos, "cannot parenthesize embedded type") 692 p.next() 693 typ = p.parseQualifiedIdent(nil) 694 // expect closing ')' but no need to complain if missing 695 if p.tok == token.RPAREN { 696 p.next() 697 } 698 } else { 699 // *T 700 typ = p.parseQualifiedIdent(nil) 701 } 702 typ = &ast.StarExpr{Star: star, X: typ} 703 704 case token.LPAREN: 705 p.error(p.pos, "cannot parenthesize embedded type") 706 p.next() 707 if p.tok == token.MUL { 708 // (*T) 709 star := p.pos 710 p.next() 711 typ = &ast.StarExpr{Star: star, X: p.parseQualifiedIdent(nil)} 712 } else { 713 // (T) 714 typ = p.parseQualifiedIdent(nil) 715 } 716 // expect closing ')' but no need to complain if missing 717 if p.tok == token.RPAREN { 718 p.next() 719 } 720 721 default: 722 pos := p.pos 723 p.errorExpected(pos, "field name or embedded type") 724 p.advance(exprEnd) 725 typ = &ast.BadExpr{From: pos, To: p.pos} 726 } 727 728 var tag *ast.BasicLit 729 if p.tok == token.STRING { 730 tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit} 731 p.next() 732 } 733 734 comment := p.expectSemi() 735 736 field := &ast.Field{Doc: doc, Names: names, Type: typ, Tag: tag, Comment: comment} 737 return field 738 } 739 740 func (p *parser) parseStructType() *ast.StructType { 741 if p.trace { 742 defer un(trace(p, "StructType")) 743 } 744 745 pos := p.expect(token.STRUCT) 746 lbrace := p.expect(token.LBRACE) 747 var list []*ast.Field 748 for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN { 749 // a field declaration cannot start with a '(' but we accept 750 // it here for more robust parsing and better error messages 751 // (parseFieldDecl will check and complain if necessary) 752 list = append(list, p.parseFieldDecl()) 753 } 754 rbrace := p.expect(token.RBRACE) 755 756 return &ast.StructType{ 757 Struct: pos, 758 Fields: &ast.FieldList{ 759 Opening: lbrace, 760 List: list, 761 Closing: rbrace, 762 }, 763 } 764 } 765 766 func (p *parser) parsePointerType() *ast.StarExpr { 767 if p.trace { 768 defer un(trace(p, "PointerType")) 769 } 770 771 star := p.expect(token.MUL) 772 base := p.parseType() 773 774 return &ast.StarExpr{Star: star, X: base} 775 } 776 777 func (p *parser) parseDotsType() *ast.Ellipsis { 778 if p.trace { 779 defer un(trace(p, "DotsType")) 780 } 781 782 pos := p.expect(token.ELLIPSIS) 783 elt := p.parseType() 784 785 return &ast.Ellipsis{Ellipsis: pos, Elt: elt} 786 } 787 788 type field struct { 789 name *ast.Ident 790 typ ast.Expr 791 } 792 793 func (p *parser) parseParamDecl(name *ast.Ident, typeSetsOK bool) (f field) { 794 // TODO(rFindley) refactor to be more similar to paramDeclOrNil in the syntax 795 // package 796 if p.trace { 797 defer un(trace(p, "ParamDeclOrNil")) 798 } 799 800 ptok := p.tok 801 if name != nil { 802 p.tok = token.IDENT // force token.IDENT case in switch below 803 } else if typeSetsOK && p.tok == token.TILDE { 804 // "~" ... 805 return field{nil, p.embeddedElem(nil)} 806 } 807 808 switch p.tok { 809 case token.IDENT: 810 // name 811 if name != nil { 812 f.name = name 813 p.tok = ptok 814 } else { 815 f.name = p.parseIdent() 816 } 817 switch p.tok { 818 case token.IDENT, token.MUL, token.ARROW, token.FUNC, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN: 819 // name type 820 f.typ = p.parseType() 821 822 case token.LBRACK: 823 // name "[" type1, ..., typeN "]" or name "[" n "]" type 824 f.name, f.typ = p.parseArrayFieldOrTypeInstance(f.name) 825 826 case token.ELLIPSIS: 827 // name "..." type 828 f.typ = p.parseDotsType() 829 return // don't allow ...type "|" ... 830 831 case token.PERIOD: 832 // name "." ... 833 f.typ = p.parseQualifiedIdent(f.name) 834 f.name = nil 835 836 case token.TILDE: 837 if typeSetsOK { 838 f.typ = p.embeddedElem(nil) 839 return 840 } 841 842 case token.OR: 843 if typeSetsOK { 844 // name "|" typeset 845 f.typ = p.embeddedElem(f.name) 846 f.name = nil 847 return 848 } 849 } 850 851 case token.MUL, token.ARROW, token.FUNC, token.LBRACK, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN: 852 // type 853 f.typ = p.parseType() 854 855 case token.ELLIPSIS: 856 // "..." type 857 // (always accepted) 858 f.typ = p.parseDotsType() 859 return // don't allow ...type "|" ... 860 861 default: 862 // TODO(rfindley): this is incorrect in the case of type parameter lists 863 // (should be "']'" in that case) 864 p.errorExpected(p.pos, "')'") 865 p.advance(exprEnd) 866 } 867 868 // [name] type "|" 869 if typeSetsOK && p.tok == token.OR && f.typ != nil { 870 f.typ = p.embeddedElem(f.typ) 871 } 872 873 return 874 } 875 876 func (p *parser) parseParameterList(name0 *ast.Ident, typ0 ast.Expr, closing token.Token) (params []*ast.Field) { 877 if p.trace { 878 defer un(trace(p, "ParameterList")) 879 } 880 881 // Type parameters are the only parameter list closed by ']'. 882 tparams := closing == token.RBRACK 883 884 pos0 := p.pos 885 if name0 != nil { 886 pos0 = name0.Pos() 887 } else if typ0 != nil { 888 pos0 = typ0.Pos() 889 } 890 891 // Note: The code below matches the corresponding code in the syntax 892 // parser closely. Changes must be reflected in either parser. 893 // For the code to match, we use the local []field list that 894 // corresponds to []syntax.Field. At the end, the list must be 895 // converted into an []*ast.Field. 896 897 var list []field 898 var named int // number of parameters that have an explicit name and type 899 var typed int // number of parameters that have an explicit type 900 901 for name0 != nil || p.tok != closing && p.tok != token.EOF { 902 var par field 903 if typ0 != nil { 904 if tparams { 905 typ0 = p.embeddedElem(typ0) 906 } 907 par = field{name0, typ0} 908 } else { 909 par = p.parseParamDecl(name0, tparams) 910 } 911 name0 = nil // 1st name was consumed if present 912 typ0 = nil // 1st typ was consumed if present 913 if par.name != nil || par.typ != nil { 914 list = append(list, par) 915 if par.name != nil && par.typ != nil { 916 named++ 917 } 918 if par.typ != nil { 919 typed++ 920 } 921 } 922 if !p.atComma("parameter list", closing) { 923 break 924 } 925 p.next() 926 } 927 928 if len(list) == 0 { 929 return // not uncommon 930 } 931 932 // distribute parameter types (len(list) > 0) 933 if named == 0 { 934 // all unnamed => found names are type names 935 for i := 0; i < len(list); i++ { 936 par := &list[i] 937 if typ := par.name; typ != nil { 938 par.typ = typ 939 par.name = nil 940 } 941 } 942 if tparams { 943 // This is the same error handling as below, adjusted for type parameters only. 944 // See comment below for details. (go.dev/issue/64534) 945 var errPos token.Pos 946 var msg string 947 if named == typed /* same as typed == 0 */ { 948 errPos = p.pos // position error at closing ] 949 msg = "missing type constraint" 950 } else { 951 errPos = pos0 // position at opening [ or first name 952 msg = "missing type parameter name" 953 if len(list) == 1 { 954 msg += " or invalid array length" 955 } 956 } 957 p.error(errPos, msg) 958 } 959 } else if named != len(list) { 960 // some named or we're in a type parameter list => all must be named 961 var errPos token.Pos // left-most error position (or invalid) 962 var typ ast.Expr // current type (from right to left) 963 for i := len(list) - 1; i >= 0; i-- { 964 if par := &list[i]; par.typ != nil { 965 typ = par.typ 966 if par.name == nil { 967 errPos = typ.Pos() 968 n := ast.NewIdent("_") 969 n.NamePos = errPos // correct position 970 par.name = n 971 } 972 } else if typ != nil { 973 par.typ = typ 974 } else { 975 // par.typ == nil && typ == nil => we only have a par.name 976 errPos = par.name.Pos() 977 par.typ = &ast.BadExpr{From: errPos, To: p.pos} 978 } 979 } 980 if errPos.IsValid() { 981 var msg string 982 if tparams { 983 // Not all parameters are named because named != len(list). 984 // If named == typed we must have parameters that have no types, 985 // and they must be at the end of the parameter list, otherwise 986 // the types would have been filled in by the right-to-left sweep 987 // above and we wouldn't have an error. Since we are in a type 988 // parameter list, the missing types are constraints. 989 if named == typed { 990 errPos = p.pos // position error at closing ] 991 msg = "missing type constraint" 992 } else { 993 msg = "missing type parameter name" 994 // go.dev/issue/60812 995 if len(list) == 1 { 996 msg += " or invalid array length" 997 } 998 } 999 } else { 1000 msg = "mixed named and unnamed parameters" 1001 } 1002 p.error(errPos, msg) 1003 } 1004 } 1005 1006 // Convert list to []*ast.Field. 1007 // If list contains types only, each type gets its own ast.Field. 1008 if named == 0 { 1009 // parameter list consists of types only 1010 for _, par := range list { 1011 assert(par.typ != nil, "nil type in unnamed parameter list") 1012 params = append(params, &ast.Field{Type: par.typ}) 1013 } 1014 return 1015 } 1016 1017 // If the parameter list consists of named parameters with types, 1018 // collect all names with the same types into a single ast.Field. 1019 var names []*ast.Ident 1020 var typ ast.Expr 1021 addParams := func() { 1022 assert(typ != nil, "nil type in named parameter list") 1023 field := &ast.Field{Names: names, Type: typ} 1024 params = append(params, field) 1025 names = nil 1026 } 1027 for _, par := range list { 1028 if par.typ != typ { 1029 if len(names) > 0 { 1030 addParams() 1031 } 1032 typ = par.typ 1033 } 1034 names = append(names, par.name) 1035 } 1036 if len(names) > 0 { 1037 addParams() 1038 } 1039 return 1040 } 1041 1042 func (p *parser) parseParameters(acceptTParams bool) (tparams, params *ast.FieldList) { 1043 if p.trace { 1044 defer un(trace(p, "Parameters")) 1045 } 1046 1047 if acceptTParams && p.tok == token.LBRACK { 1048 opening := p.pos 1049 p.next() 1050 // [T any](params) syntax 1051 list := p.parseParameterList(nil, nil, token.RBRACK) 1052 rbrack := p.expect(token.RBRACK) 1053 tparams = &ast.FieldList{Opening: opening, List: list, Closing: rbrack} 1054 // Type parameter lists must not be empty. 1055 if tparams.NumFields() == 0 { 1056 p.error(tparams.Closing, "empty type parameter list") 1057 tparams = nil // avoid follow-on errors 1058 } 1059 } 1060 1061 opening := p.expect(token.LPAREN) 1062 1063 var fields []*ast.Field 1064 if p.tok != token.RPAREN { 1065 fields = p.parseParameterList(nil, nil, token.RPAREN) 1066 } 1067 1068 rparen := p.expect(token.RPAREN) 1069 params = &ast.FieldList{Opening: opening, List: fields, Closing: rparen} 1070 1071 return 1072 } 1073 1074 func (p *parser) parseResult() *ast.FieldList { 1075 if p.trace { 1076 defer un(trace(p, "Result")) 1077 } 1078 1079 if p.tok == token.LPAREN { 1080 _, results := p.parseParameters(false) 1081 return results 1082 } 1083 1084 typ := p.tryIdentOrType() 1085 if typ != nil { 1086 list := make([]*ast.Field, 1) 1087 list[0] = &ast.Field{Type: typ} 1088 return &ast.FieldList{List: list} 1089 } 1090 1091 return nil 1092 } 1093 1094 func (p *parser) parseFuncType() *ast.FuncType { 1095 if p.trace { 1096 defer un(trace(p, "FuncType")) 1097 } 1098 1099 pos := p.expect(token.FUNC) 1100 tparams, params := p.parseParameters(true) 1101 if tparams != nil { 1102 p.error(tparams.Pos(), "function type must have no type parameters") 1103 } 1104 results := p.parseResult() 1105 1106 return &ast.FuncType{Func: pos, Params: params, Results: results} 1107 } 1108 1109 func (p *parser) parseMethodSpec() *ast.Field { 1110 if p.trace { 1111 defer un(trace(p, "MethodSpec")) 1112 } 1113 1114 doc := p.leadComment 1115 var idents []*ast.Ident 1116 var typ ast.Expr 1117 x := p.parseTypeName(nil) 1118 if ident, _ := x.(*ast.Ident); ident != nil { 1119 switch { 1120 case p.tok == token.LBRACK: 1121 // generic method or embedded instantiated type 1122 lbrack := p.pos 1123 p.next() 1124 p.exprLev++ 1125 x := p.parseExpr() 1126 p.exprLev-- 1127 if name0, _ := x.(*ast.Ident); name0 != nil && p.tok != token.COMMA && p.tok != token.RBRACK { 1128 // generic method m[T any] 1129 // 1130 // Interface methods do not have type parameters. We parse them for a 1131 // better error message and improved error recovery. 1132 _ = p.parseParameterList(name0, nil, token.RBRACK) 1133 _ = p.expect(token.RBRACK) 1134 p.error(lbrack, "interface method must have no type parameters") 1135 1136 // TODO(rfindley) refactor to share code with parseFuncType. 1137 _, params := p.parseParameters(false) 1138 results := p.parseResult() 1139 idents = []*ast.Ident{ident} 1140 typ = &ast.FuncType{ 1141 Func: token.NoPos, 1142 Params: params, 1143 Results: results, 1144 } 1145 } else { 1146 // embedded instantiated type 1147 // TODO(rfindley) should resolve all identifiers in x. 1148 list := []ast.Expr{x} 1149 if p.atComma("type argument list", token.RBRACK) { 1150 p.exprLev++ 1151 p.next() 1152 for p.tok != token.RBRACK && p.tok != token.EOF { 1153 list = append(list, p.parseType()) 1154 if !p.atComma("type argument list", token.RBRACK) { 1155 break 1156 } 1157 p.next() 1158 } 1159 p.exprLev-- 1160 } 1161 rbrack := p.expectClosing(token.RBRACK, "type argument list") 1162 typ = typeparams.PackIndexExpr(ident, lbrack, list, rbrack) 1163 } 1164 case p.tok == token.LPAREN: 1165 // ordinary method 1166 // TODO(rfindley) refactor to share code with parseFuncType. 1167 _, params := p.parseParameters(false) 1168 results := p.parseResult() 1169 idents = []*ast.Ident{ident} 1170 typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results} 1171 default: 1172 // embedded type 1173 typ = x 1174 } 1175 } else { 1176 // embedded, possibly instantiated type 1177 typ = x 1178 if p.tok == token.LBRACK { 1179 // embedded instantiated interface 1180 typ = p.parseTypeInstance(typ) 1181 } 1182 } 1183 1184 // Comment is added at the callsite: the field below may joined with 1185 // additional type specs using '|'. 1186 // TODO(rfindley) this should be refactored. 1187 // TODO(rfindley) add more tests for comment handling. 1188 return &ast.Field{Doc: doc, Names: idents, Type: typ} 1189 } 1190 1191 func (p *parser) embeddedElem(x ast.Expr) ast.Expr { 1192 if p.trace { 1193 defer un(trace(p, "EmbeddedElem")) 1194 } 1195 if x == nil { 1196 x = p.embeddedTerm() 1197 } 1198 for p.tok == token.OR { 1199 t := new(ast.BinaryExpr) 1200 t.OpPos = p.pos 1201 t.Op = token.OR 1202 p.next() 1203 t.X = x 1204 t.Y = p.embeddedTerm() 1205 x = t 1206 } 1207 return x 1208 } 1209 1210 func (p *parser) embeddedTerm() ast.Expr { 1211 if p.trace { 1212 defer un(trace(p, "EmbeddedTerm")) 1213 } 1214 if p.tok == token.TILDE { 1215 t := new(ast.UnaryExpr) 1216 t.OpPos = p.pos 1217 t.Op = token.TILDE 1218 p.next() 1219 t.X = p.parseType() 1220 return t 1221 } 1222 1223 t := p.tryIdentOrType() 1224 if t == nil { 1225 pos := p.pos 1226 p.errorExpected(pos, "~ term or type") 1227 p.advance(exprEnd) 1228 return &ast.BadExpr{From: pos, To: p.pos} 1229 } 1230 1231 return t 1232 } 1233 1234 func (p *parser) parseInterfaceType() *ast.InterfaceType { 1235 if p.trace { 1236 defer un(trace(p, "InterfaceType")) 1237 } 1238 1239 pos := p.expect(token.INTERFACE) 1240 lbrace := p.expect(token.LBRACE) 1241 1242 var list []*ast.Field 1243 1244 parseElements: 1245 for { 1246 switch { 1247 case p.tok == token.IDENT: 1248 f := p.parseMethodSpec() 1249 if f.Names == nil { 1250 f.Type = p.embeddedElem(f.Type) 1251 } 1252 f.Comment = p.expectSemi() 1253 list = append(list, f) 1254 case p.tok == token.TILDE: 1255 typ := p.embeddedElem(nil) 1256 comment := p.expectSemi() 1257 list = append(list, &ast.Field{Type: typ, Comment: comment}) 1258 default: 1259 if t := p.tryIdentOrType(); t != nil { 1260 typ := p.embeddedElem(t) 1261 comment := p.expectSemi() 1262 list = append(list, &ast.Field{Type: typ, Comment: comment}) 1263 } else { 1264 break parseElements 1265 } 1266 } 1267 } 1268 1269 // TODO(rfindley): the error produced here could be improved, since we could 1270 // accept an identifier, 'type', or a '}' at this point. 1271 rbrace := p.expect(token.RBRACE) 1272 1273 return &ast.InterfaceType{ 1274 Interface: pos, 1275 Methods: &ast.FieldList{ 1276 Opening: lbrace, 1277 List: list, 1278 Closing: rbrace, 1279 }, 1280 } 1281 } 1282 1283 func (p *parser) parseMapType() *ast.MapType { 1284 if p.trace { 1285 defer un(trace(p, "MapType")) 1286 } 1287 1288 pos := p.expect(token.MAP) 1289 p.expect(token.LBRACK) 1290 key := p.parseType() 1291 p.expect(token.RBRACK) 1292 value := p.parseType() 1293 1294 return &ast.MapType{Map: pos, Key: key, Value: value} 1295 } 1296 1297 func (p *parser) parseChanType() *ast.ChanType { 1298 if p.trace { 1299 defer un(trace(p, "ChanType")) 1300 } 1301 1302 pos := p.pos 1303 dir := ast.SEND | ast.RECV 1304 var arrow token.Pos 1305 if p.tok == token.CHAN { 1306 p.next() 1307 if p.tok == token.ARROW { 1308 arrow = p.pos 1309 p.next() 1310 dir = ast.SEND 1311 } 1312 } else { 1313 arrow = p.expect(token.ARROW) 1314 p.expect(token.CHAN) 1315 dir = ast.RECV 1316 } 1317 value := p.parseType() 1318 1319 return &ast.ChanType{Begin: pos, Arrow: arrow, Dir: dir, Value: value} 1320 } 1321 1322 func (p *parser) parseTypeInstance(typ ast.Expr) ast.Expr { 1323 if p.trace { 1324 defer un(trace(p, "TypeInstance")) 1325 } 1326 1327 opening := p.expect(token.LBRACK) 1328 p.exprLev++ 1329 var list []ast.Expr 1330 for p.tok != token.RBRACK && p.tok != token.EOF { 1331 list = append(list, p.parseType()) 1332 if !p.atComma("type argument list", token.RBRACK) { 1333 break 1334 } 1335 p.next() 1336 } 1337 p.exprLev-- 1338 1339 closing := p.expectClosing(token.RBRACK, "type argument list") 1340 1341 if len(list) == 0 { 1342 p.errorExpected(closing, "type argument list") 1343 return &ast.IndexExpr{ 1344 X: typ, 1345 Lbrack: opening, 1346 Index: &ast.BadExpr{From: opening + 1, To: closing}, 1347 Rbrack: closing, 1348 } 1349 } 1350 1351 return typeparams.PackIndexExpr(typ, opening, list, closing) 1352 } 1353 1354 func (p *parser) tryIdentOrType() ast.Expr { 1355 defer decNestLev(incNestLev(p)) 1356 1357 switch p.tok { 1358 case token.IDENT: 1359 typ := p.parseTypeName(nil) 1360 if p.tok == token.LBRACK { 1361 typ = p.parseTypeInstance(typ) 1362 } 1363 return typ 1364 case token.LBRACK: 1365 lbrack := p.expect(token.LBRACK) 1366 return p.parseArrayType(lbrack, nil) 1367 case token.STRUCT: 1368 return p.parseStructType() 1369 case token.MUL: 1370 return p.parsePointerType() 1371 case token.FUNC: 1372 return p.parseFuncType() 1373 case token.INTERFACE: 1374 return p.parseInterfaceType() 1375 case token.MAP: 1376 return p.parseMapType() 1377 case token.CHAN, token.ARROW: 1378 return p.parseChanType() 1379 case token.LPAREN: 1380 lparen := p.pos 1381 p.next() 1382 typ := p.parseType() 1383 rparen := p.expect(token.RPAREN) 1384 return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen} 1385 } 1386 1387 // no type found 1388 return nil 1389 } 1390 1391 // ---------------------------------------------------------------------------- 1392 // Blocks 1393 1394 func (p *parser) parseStmtList() (list []ast.Stmt) { 1395 if p.trace { 1396 defer un(trace(p, "StatementList")) 1397 } 1398 1399 for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF { 1400 list = append(list, p.parseStmt()) 1401 } 1402 1403 return 1404 } 1405 1406 func (p *parser) parseBody() *ast.BlockStmt { 1407 if p.trace { 1408 defer un(trace(p, "Body")) 1409 } 1410 1411 lbrace := p.expect(token.LBRACE) 1412 list := p.parseStmtList() 1413 rbrace := p.expect2(token.RBRACE) 1414 1415 return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace} 1416 } 1417 1418 func (p *parser) parseBlockStmt() *ast.BlockStmt { 1419 if p.trace { 1420 defer un(trace(p, "BlockStmt")) 1421 } 1422 1423 lbrace := p.expect(token.LBRACE) 1424 list := p.parseStmtList() 1425 rbrace := p.expect2(token.RBRACE) 1426 1427 return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace} 1428 } 1429 1430 // ---------------------------------------------------------------------------- 1431 // Expressions 1432 1433 func (p *parser) parseFuncTypeOrLit() ast.Expr { 1434 if p.trace { 1435 defer un(trace(p, "FuncTypeOrLit")) 1436 } 1437 1438 typ := p.parseFuncType() 1439 if p.tok != token.LBRACE { 1440 // function type only 1441 return typ 1442 } 1443 1444 p.exprLev++ 1445 body := p.parseBody() 1446 p.exprLev-- 1447 1448 return &ast.FuncLit{Type: typ, Body: body} 1449 } 1450 1451 // parseOperand may return an expression or a raw type (incl. array 1452 // types of the form [...]T). Callers must verify the result. 1453 func (p *parser) parseOperand() ast.Expr { 1454 if p.trace { 1455 defer un(trace(p, "Operand")) 1456 } 1457 1458 switch p.tok { 1459 case token.IDENT: 1460 x := p.parseIdent() 1461 return x 1462 1463 case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING: 1464 x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit} 1465 p.next() 1466 return x 1467 1468 case token.LPAREN: 1469 lparen := p.pos 1470 p.next() 1471 p.exprLev++ 1472 x := p.parseRhs() // types may be parenthesized: (some type) 1473 p.exprLev-- 1474 rparen := p.expect(token.RPAREN) 1475 return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen} 1476 1477 case token.FUNC: 1478 return p.parseFuncTypeOrLit() 1479 } 1480 1481 if typ := p.tryIdentOrType(); typ != nil { // do not consume trailing type parameters 1482 // could be type for composite literal or conversion 1483 _, isIdent := typ.(*ast.Ident) 1484 assert(!isIdent, "type cannot be identifier") 1485 return typ 1486 } 1487 1488 // we have an error 1489 pos := p.pos 1490 p.errorExpected(pos, "operand") 1491 p.advance(stmtStart) 1492 return &ast.BadExpr{From: pos, To: p.pos} 1493 } 1494 1495 func (p *parser) parseSelector(x ast.Expr) ast.Expr { 1496 if p.trace { 1497 defer un(trace(p, "Selector")) 1498 } 1499 1500 sel := p.parseIdent() 1501 1502 return &ast.SelectorExpr{X: x, Sel: sel} 1503 } 1504 1505 func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr { 1506 if p.trace { 1507 defer un(trace(p, "TypeAssertion")) 1508 } 1509 1510 lparen := p.expect(token.LPAREN) 1511 var typ ast.Expr 1512 if p.tok == token.TYPE { 1513 // type switch: typ == nil 1514 p.next() 1515 } else { 1516 typ = p.parseType() 1517 } 1518 rparen := p.expect(token.RPAREN) 1519 1520 return &ast.TypeAssertExpr{X: x, Type: typ, Lparen: lparen, Rparen: rparen} 1521 } 1522 1523 func (p *parser) parseIndexOrSliceOrInstance(x ast.Expr) ast.Expr { 1524 if p.trace { 1525 defer un(trace(p, "parseIndexOrSliceOrInstance")) 1526 } 1527 1528 lbrack := p.expect(token.LBRACK) 1529 if p.tok == token.RBRACK { 1530 // empty index, slice or index expressions are not permitted; 1531 // accept them for parsing tolerance, but complain 1532 p.errorExpected(p.pos, "operand") 1533 rbrack := p.pos 1534 p.next() 1535 return &ast.IndexExpr{ 1536 X: x, 1537 Lbrack: lbrack, 1538 Index: &ast.BadExpr{From: rbrack, To: rbrack}, 1539 Rbrack: rbrack, 1540 } 1541 } 1542 p.exprLev++ 1543 1544 const N = 3 // change the 3 to 2 to disable 3-index slices 1545 var args []ast.Expr 1546 var index [N]ast.Expr 1547 var colons [N - 1]token.Pos 1548 if p.tok != token.COLON { 1549 // We can't know if we have an index expression or a type instantiation; 1550 // so even if we see a (named) type we are not going to be in type context. 1551 index[0] = p.parseRhs() 1552 } 1553 ncolons := 0 1554 switch p.tok { 1555 case token.COLON: 1556 // slice expression 1557 for p.tok == token.COLON && ncolons < len(colons) { 1558 colons[ncolons] = p.pos 1559 ncolons++ 1560 p.next() 1561 if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF { 1562 index[ncolons] = p.parseRhs() 1563 } 1564 } 1565 case token.COMMA: 1566 // instance expression 1567 args = append(args, index[0]) 1568 for p.tok == token.COMMA { 1569 p.next() 1570 if p.tok != token.RBRACK && p.tok != token.EOF { 1571 args = append(args, p.parseType()) 1572 } 1573 } 1574 } 1575 1576 p.exprLev-- 1577 rbrack := p.expect(token.RBRACK) 1578 1579 if ncolons > 0 { 1580 // slice expression 1581 slice3 := false 1582 if ncolons == 2 { 1583 slice3 = true 1584 // Check presence of middle and final index here rather than during type-checking 1585 // to prevent erroneous programs from passing through gofmt (was go.dev/issue/7305). 1586 if index[1] == nil { 1587 p.error(colons[0], "middle index required in 3-index slice") 1588 index[1] = &ast.BadExpr{From: colons[0] + 1, To: colons[1]} 1589 } 1590 if index[2] == nil { 1591 p.error(colons[1], "final index required in 3-index slice") 1592 index[2] = &ast.BadExpr{From: colons[1] + 1, To: rbrack} 1593 } 1594 } 1595 return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: index[0], High: index[1], Max: index[2], Slice3: slice3, Rbrack: rbrack} 1596 } 1597 1598 if len(args) == 0 { 1599 // index expression 1600 return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: index[0], Rbrack: rbrack} 1601 } 1602 1603 // instance expression 1604 return typeparams.PackIndexExpr(x, lbrack, args, rbrack) 1605 } 1606 1607 func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr { 1608 if p.trace { 1609 defer un(trace(p, "CallOrConversion")) 1610 } 1611 1612 lparen := p.expect(token.LPAREN) 1613 p.exprLev++ 1614 var list []ast.Expr 1615 var ellipsis token.Pos 1616 for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() { 1617 list = append(list, p.parseRhs()) // builtins may expect a type: make(some type, ...) 1618 if p.tok == token.ELLIPSIS { 1619 ellipsis = p.pos 1620 p.next() 1621 } 1622 if !p.atComma("argument list", token.RPAREN) { 1623 break 1624 } 1625 p.next() 1626 } 1627 p.exprLev-- 1628 rparen := p.expectClosing(token.RPAREN, "argument list") 1629 1630 return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen} 1631 } 1632 1633 func (p *parser) parseValue() ast.Expr { 1634 if p.trace { 1635 defer un(trace(p, "Element")) 1636 } 1637 1638 if p.tok == token.LBRACE { 1639 return p.parseLiteralValue(nil) 1640 } 1641 1642 x := p.parseExpr() 1643 1644 return x 1645 } 1646 1647 func (p *parser) parseElement() ast.Expr { 1648 if p.trace { 1649 defer un(trace(p, "Element")) 1650 } 1651 1652 x := p.parseValue() 1653 if p.tok == token.COLON { 1654 colon := p.pos 1655 p.next() 1656 x = &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseValue()} 1657 } 1658 1659 return x 1660 } 1661 1662 func (p *parser) parseElementList() (list []ast.Expr) { 1663 if p.trace { 1664 defer un(trace(p, "ElementList")) 1665 } 1666 1667 for p.tok != token.RBRACE && p.tok != token.EOF { 1668 list = append(list, p.parseElement()) 1669 if !p.atComma("composite literal", token.RBRACE) { 1670 break 1671 } 1672 p.next() 1673 } 1674 1675 return 1676 } 1677 1678 func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr { 1679 if p.trace { 1680 defer un(trace(p, "LiteralValue")) 1681 } 1682 1683 lbrace := p.expect(token.LBRACE) 1684 var elts []ast.Expr 1685 p.exprLev++ 1686 if p.tok != token.RBRACE { 1687 elts = p.parseElementList() 1688 } 1689 p.exprLev-- 1690 rbrace := p.expectClosing(token.RBRACE, "composite literal") 1691 return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace} 1692 } 1693 1694 func (p *parser) parsePrimaryExpr(x ast.Expr) ast.Expr { 1695 if p.trace { 1696 defer un(trace(p, "PrimaryExpr")) 1697 } 1698 1699 if x == nil { 1700 x = p.parseOperand() 1701 } 1702 // We track the nesting here rather than at the entry for the function, 1703 // since it can iteratively produce a nested output, and we want to 1704 // limit how deep a structure we generate. 1705 var n int 1706 defer func() { p.nestLev -= n }() 1707 for n = 1; ; n++ { 1708 incNestLev(p) 1709 switch p.tok { 1710 case token.PERIOD: 1711 p.next() 1712 switch p.tok { 1713 case token.IDENT: 1714 x = p.parseSelector(x) 1715 case token.LPAREN: 1716 x = p.parseTypeAssertion(x) 1717 default: 1718 pos := p.pos 1719 p.errorExpected(pos, "selector or type assertion") 1720 // TODO(rFindley) The check for token.RBRACE below is a targeted fix 1721 // to error recovery sufficient to make the x/tools tests to 1722 // pass with the new parsing logic introduced for type 1723 // parameters. Remove this once error recovery has been 1724 // more generally reconsidered. 1725 if p.tok != token.RBRACE { 1726 p.next() // make progress 1727 } 1728 sel := &ast.Ident{NamePos: pos, Name: "_"} 1729 x = &ast.SelectorExpr{X: x, Sel: sel} 1730 } 1731 case token.LBRACK: 1732 x = p.parseIndexOrSliceOrInstance(x) 1733 case token.LPAREN: 1734 x = p.parseCallOrConversion(x) 1735 case token.LBRACE: 1736 // operand may have returned a parenthesized complit 1737 // type; accept it but complain if we have a complit 1738 t := ast.Unparen(x) 1739 // determine if '{' belongs to a composite literal or a block statement 1740 switch t.(type) { 1741 case *ast.BadExpr, *ast.Ident, *ast.SelectorExpr: 1742 if p.exprLev < 0 { 1743 return x 1744 } 1745 // x is possibly a composite literal type 1746 case *ast.IndexExpr, *ast.IndexListExpr: 1747 if p.exprLev < 0 { 1748 return x 1749 } 1750 // x is possibly a composite literal type 1751 case *ast.ArrayType, *ast.StructType, *ast.MapType: 1752 // x is a composite literal type 1753 default: 1754 return x 1755 } 1756 if t != x { 1757 p.error(t.Pos(), "cannot parenthesize type in composite literal") 1758 // already progressed, no need to advance 1759 } 1760 x = p.parseLiteralValue(x) 1761 default: 1762 return x 1763 } 1764 } 1765 } 1766 1767 func (p *parser) parseUnaryExpr() ast.Expr { 1768 defer decNestLev(incNestLev(p)) 1769 1770 if p.trace { 1771 defer un(trace(p, "UnaryExpr")) 1772 } 1773 1774 switch p.tok { 1775 case token.ADD, token.SUB, token.NOT, token.XOR, token.AND, token.TILDE: 1776 pos, op := p.pos, p.tok 1777 p.next() 1778 x := p.parseUnaryExpr() 1779 return &ast.UnaryExpr{OpPos: pos, Op: op, X: x} 1780 1781 case token.ARROW: 1782 // channel type or receive expression 1783 arrow := p.pos 1784 p.next() 1785 1786 // If the next token is token.CHAN we still don't know if it 1787 // is a channel type or a receive operation - we only know 1788 // once we have found the end of the unary expression. There 1789 // are two cases: 1790 // 1791 // <- type => (<-type) must be channel type 1792 // <- expr => <-(expr) is a receive from an expression 1793 // 1794 // In the first case, the arrow must be re-associated with 1795 // the channel type parsed already: 1796 // 1797 // <- (chan type) => (<-chan type) 1798 // <- (chan<- type) => (<-chan (<-type)) 1799 1800 x := p.parseUnaryExpr() 1801 1802 // determine which case we have 1803 if typ, ok := x.(*ast.ChanType); ok { 1804 // (<-type) 1805 1806 // re-associate position info and <- 1807 dir := ast.SEND 1808 for ok && dir == ast.SEND { 1809 if typ.Dir == ast.RECV { 1810 // error: (<-type) is (<-(<-chan T)) 1811 p.errorExpected(typ.Arrow, "'chan'") 1812 } 1813 arrow, typ.Begin, typ.Arrow = typ.Arrow, arrow, arrow 1814 dir, typ.Dir = typ.Dir, ast.RECV 1815 typ, ok = typ.Value.(*ast.ChanType) 1816 } 1817 if dir == ast.SEND { 1818 p.errorExpected(arrow, "channel type") 1819 } 1820 1821 return x 1822 } 1823 1824 // <-(expr) 1825 return &ast.UnaryExpr{OpPos: arrow, Op: token.ARROW, X: x} 1826 1827 case token.MUL: 1828 // pointer type or unary "*" expression 1829 pos := p.pos 1830 p.next() 1831 x := p.parseUnaryExpr() 1832 return &ast.StarExpr{Star: pos, X: x} 1833 } 1834 1835 return p.parsePrimaryExpr(nil) 1836 } 1837 1838 func (p *parser) tokPrec() (token.Token, int) { 1839 tok := p.tok 1840 if p.inRhs && tok == token.ASSIGN { 1841 tok = token.EQL 1842 } 1843 return tok, tok.Precedence() 1844 } 1845 1846 // parseBinaryExpr parses a (possibly) binary expression. 1847 // If x is non-nil, it is used as the left operand. 1848 // 1849 // TODO(rfindley): parseBinaryExpr has become overloaded. Consider refactoring. 1850 func (p *parser) parseBinaryExpr(x ast.Expr, prec1 int) ast.Expr { 1851 if p.trace { 1852 defer un(trace(p, "BinaryExpr")) 1853 } 1854 1855 if x == nil { 1856 x = p.parseUnaryExpr() 1857 } 1858 // We track the nesting here rather than at the entry for the function, 1859 // since it can iteratively produce a nested output, and we want to 1860 // limit how deep a structure we generate. 1861 var n int 1862 defer func() { p.nestLev -= n }() 1863 for n = 1; ; n++ { 1864 incNestLev(p) 1865 op, oprec := p.tokPrec() 1866 if oprec < prec1 { 1867 return x 1868 } 1869 pos := p.expect(op) 1870 y := p.parseBinaryExpr(nil, oprec+1) 1871 x = &ast.BinaryExpr{X: x, OpPos: pos, Op: op, Y: y} 1872 } 1873 } 1874 1875 // The result may be a type or even a raw type ([...]int). 1876 func (p *parser) parseExpr() ast.Expr { 1877 if p.trace { 1878 defer un(trace(p, "Expression")) 1879 } 1880 1881 return p.parseBinaryExpr(nil, token.LowestPrec+1) 1882 } 1883 1884 func (p *parser) parseRhs() ast.Expr { 1885 old := p.inRhs 1886 p.inRhs = true 1887 x := p.parseExpr() 1888 p.inRhs = old 1889 return x 1890 } 1891 1892 // ---------------------------------------------------------------------------- 1893 // Statements 1894 1895 // Parsing modes for parseSimpleStmt. 1896 const ( 1897 basic = iota 1898 labelOk 1899 rangeOk 1900 ) 1901 1902 // parseSimpleStmt returns true as 2nd result if it parsed the assignment 1903 // of a range clause (with mode == rangeOk). The returned statement is an 1904 // assignment with a right-hand side that is a single unary expression of 1905 // the form "range x". No guarantees are given for the left-hand side. 1906 func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) { 1907 if p.trace { 1908 defer un(trace(p, "SimpleStmt")) 1909 } 1910 1911 x := p.parseList(false) 1912 1913 switch p.tok { 1914 case 1915 token.DEFINE, token.ASSIGN, token.ADD_ASSIGN, 1916 token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN, 1917 token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN, 1918 token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN: 1919 // assignment statement, possibly part of a range clause 1920 pos, tok := p.pos, p.tok 1921 p.next() 1922 var y []ast.Expr 1923 isRange := false 1924 if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) { 1925 pos := p.pos 1926 p.next() 1927 y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}} 1928 isRange = true 1929 } else { 1930 y = p.parseList(true) 1931 } 1932 return &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}, isRange 1933 } 1934 1935 if len(x) > 1 { 1936 p.errorExpected(x[0].Pos(), "1 expression") 1937 // continue with first expression 1938 } 1939 1940 switch p.tok { 1941 case token.COLON: 1942 // labeled statement 1943 colon := p.pos 1944 p.next() 1945 if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent { 1946 // Go spec: The scope of a label is the body of the function 1947 // in which it is declared and excludes the body of any nested 1948 // function. 1949 stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()} 1950 return stmt, false 1951 } 1952 // The label declaration typically starts at x[0].Pos(), but the label 1953 // declaration may be erroneous due to a token after that position (and 1954 // before the ':'). If SpuriousErrors is not set, the (only) error 1955 // reported for the line is the illegal label error instead of the token 1956 // before the ':' that caused the problem. Thus, use the (latest) colon 1957 // position for error reporting. 1958 p.error(colon, "illegal label declaration") 1959 return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false 1960 1961 case token.ARROW: 1962 // send statement 1963 arrow := p.pos 1964 p.next() 1965 y := p.parseRhs() 1966 return &ast.SendStmt{Chan: x[0], Arrow: arrow, Value: y}, false 1967 1968 case token.INC, token.DEC: 1969 // increment or decrement 1970 s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok} 1971 p.next() 1972 return s, false 1973 } 1974 1975 // expression 1976 return &ast.ExprStmt{X: x[0]}, false 1977 } 1978 1979 func (p *parser) parseCallExpr(callType string) *ast.CallExpr { 1980 x := p.parseRhs() // could be a conversion: (some type)(x) 1981 if t := ast.Unparen(x); t != x { 1982 p.error(x.Pos(), fmt.Sprintf("expression in %s must not be parenthesized", callType)) 1983 x = t 1984 } 1985 if call, isCall := x.(*ast.CallExpr); isCall { 1986 return call 1987 } 1988 if _, isBad := x.(*ast.BadExpr); !isBad { 1989 // only report error if it's a new one 1990 p.error(p.safePos(x.End()), fmt.Sprintf("expression in %s must be function call", callType)) 1991 } 1992 return nil 1993 } 1994 1995 func (p *parser) parseGoStmt() ast.Stmt { 1996 if p.trace { 1997 defer un(trace(p, "GoStmt")) 1998 } 1999 2000 pos := p.expect(token.GO) 2001 call := p.parseCallExpr("go") 2002 p.expectSemi() 2003 if call == nil { 2004 return &ast.BadStmt{From: pos, To: pos + 2} // len("go") 2005 } 2006 2007 return &ast.GoStmt{Go: pos, Call: call} 2008 } 2009 2010 func (p *parser) parseDeferStmt() ast.Stmt { 2011 if p.trace { 2012 defer un(trace(p, "DeferStmt")) 2013 } 2014 2015 pos := p.expect(token.DEFER) 2016 call := p.parseCallExpr("defer") 2017 p.expectSemi() 2018 if call == nil { 2019 return &ast.BadStmt{From: pos, To: pos + 5} // len("defer") 2020 } 2021 2022 return &ast.DeferStmt{Defer: pos, Call: call} 2023 } 2024 2025 func (p *parser) parseReturnStmt() *ast.ReturnStmt { 2026 if p.trace { 2027 defer un(trace(p, "ReturnStmt")) 2028 } 2029 2030 pos := p.pos 2031 p.expect(token.RETURN) 2032 var x []ast.Expr 2033 if p.tok != token.SEMICOLON && p.tok != token.RBRACE { 2034 x = p.parseList(true) 2035 } 2036 p.expectSemi() 2037 2038 return &ast.ReturnStmt{Return: pos, Results: x} 2039 } 2040 2041 func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt { 2042 if p.trace { 2043 defer un(trace(p, "BranchStmt")) 2044 } 2045 2046 pos := p.expect(tok) 2047 var label *ast.Ident 2048 if tok != token.FALLTHROUGH && p.tok == token.IDENT { 2049 label = p.parseIdent() 2050 } 2051 p.expectSemi() 2052 2053 return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label} 2054 } 2055 2056 func (p *parser) makeExpr(s ast.Stmt, want string) ast.Expr { 2057 if s == nil { 2058 return nil 2059 } 2060 if es, isExpr := s.(*ast.ExprStmt); isExpr { 2061 return es.X 2062 } 2063 found := "simple statement" 2064 if _, isAss := s.(*ast.AssignStmt); isAss { 2065 found = "assignment" 2066 } 2067 p.error(s.Pos(), fmt.Sprintf("expected %s, found %s (missing parentheses around composite literal?)", want, found)) 2068 return &ast.BadExpr{From: s.Pos(), To: p.safePos(s.End())} 2069 } 2070 2071 // parseIfHeader is an adjusted version of parser.header 2072 // in cmd/compile/internal/syntax/parser.go, which has 2073 // been tuned for better error handling. 2074 func (p *parser) parseIfHeader() (init ast.Stmt, cond ast.Expr) { 2075 if p.tok == token.LBRACE { 2076 p.error(p.pos, "missing condition in if statement") 2077 cond = &ast.BadExpr{From: p.pos, To: p.pos} 2078 return 2079 } 2080 // p.tok != token.LBRACE 2081 2082 prevLev := p.exprLev 2083 p.exprLev = -1 2084 2085 if p.tok != token.SEMICOLON { 2086 // accept potential variable declaration but complain 2087 if p.tok == token.VAR { 2088 p.next() 2089 p.error(p.pos, "var declaration not allowed in if initializer") 2090 } 2091 init, _ = p.parseSimpleStmt(basic) 2092 } 2093 2094 var condStmt ast.Stmt 2095 var semi struct { 2096 pos token.Pos 2097 lit string // ";" or "\n"; valid if pos.IsValid() 2098 } 2099 if p.tok != token.LBRACE { 2100 if p.tok == token.SEMICOLON { 2101 semi.pos = p.pos 2102 semi.lit = p.lit 2103 p.next() 2104 } else { 2105 p.expect(token.SEMICOLON) 2106 } 2107 if p.tok != token.LBRACE { 2108 condStmt, _ = p.parseSimpleStmt(basic) 2109 } 2110 } else { 2111 condStmt = init 2112 init = nil 2113 } 2114 2115 if condStmt != nil { 2116 cond = p.makeExpr(condStmt, "boolean expression") 2117 } else if semi.pos.IsValid() { 2118 if semi.lit == "\n" { 2119 p.error(semi.pos, "unexpected newline, expecting { after if clause") 2120 } else { 2121 p.error(semi.pos, "missing condition in if statement") 2122 } 2123 } 2124 2125 // make sure we have a valid AST 2126 if cond == nil { 2127 cond = &ast.BadExpr{From: p.pos, To: p.pos} 2128 } 2129 2130 p.exprLev = prevLev 2131 return 2132 } 2133 2134 func (p *parser) parseIfStmt() *ast.IfStmt { 2135 defer decNestLev(incNestLev(p)) 2136 2137 if p.trace { 2138 defer un(trace(p, "IfStmt")) 2139 } 2140 2141 pos := p.expect(token.IF) 2142 2143 init, cond := p.parseIfHeader() 2144 body := p.parseBlockStmt() 2145 2146 var else_ ast.Stmt 2147 if p.tok == token.ELSE { 2148 p.next() 2149 switch p.tok { 2150 case token.IF: 2151 else_ = p.parseIfStmt() 2152 case token.LBRACE: 2153 else_ = p.parseBlockStmt() 2154 p.expectSemi() 2155 default: 2156 p.errorExpected(p.pos, "if statement or block") 2157 else_ = &ast.BadStmt{From: p.pos, To: p.pos} 2158 } 2159 } else { 2160 p.expectSemi() 2161 } 2162 2163 return &ast.IfStmt{If: pos, Init: init, Cond: cond, Body: body, Else: else_} 2164 } 2165 2166 func (p *parser) parseCaseClause() *ast.CaseClause { 2167 if p.trace { 2168 defer un(trace(p, "CaseClause")) 2169 } 2170 2171 pos := p.pos 2172 var list []ast.Expr 2173 if p.tok == token.CASE { 2174 p.next() 2175 list = p.parseList(true) 2176 } else { 2177 p.expect(token.DEFAULT) 2178 } 2179 2180 colon := p.expect(token.COLON) 2181 body := p.parseStmtList() 2182 2183 return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body} 2184 } 2185 2186 func isTypeSwitchAssert(x ast.Expr) bool { 2187 a, ok := x.(*ast.TypeAssertExpr) 2188 return ok && a.Type == nil 2189 } 2190 2191 func (p *parser) isTypeSwitchGuard(s ast.Stmt) bool { 2192 switch t := s.(type) { 2193 case *ast.ExprStmt: 2194 // x.(type) 2195 return isTypeSwitchAssert(t.X) 2196 case *ast.AssignStmt: 2197 // v := x.(type) 2198 if len(t.Lhs) == 1 && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0]) { 2199 switch t.Tok { 2200 case token.ASSIGN: 2201 // permit v = x.(type) but complain 2202 p.error(t.TokPos, "expected ':=', found '='") 2203 fallthrough 2204 case token.DEFINE: 2205 return true 2206 } 2207 } 2208 } 2209 return false 2210 } 2211 2212 func (p *parser) parseSwitchStmt() ast.Stmt { 2213 if p.trace { 2214 defer un(trace(p, "SwitchStmt")) 2215 } 2216 2217 pos := p.expect(token.SWITCH) 2218 2219 var s1, s2 ast.Stmt 2220 if p.tok != token.LBRACE { 2221 prevLev := p.exprLev 2222 p.exprLev = -1 2223 if p.tok != token.SEMICOLON { 2224 s2, _ = p.parseSimpleStmt(basic) 2225 } 2226 if p.tok == token.SEMICOLON { 2227 p.next() 2228 s1 = s2 2229 s2 = nil 2230 if p.tok != token.LBRACE { 2231 // A TypeSwitchGuard may declare a variable in addition 2232 // to the variable declared in the initial SimpleStmt. 2233 // Introduce extra scope to avoid redeclaration errors: 2234 // 2235 // switch t := 0; t := x.(T) { ... } 2236 // 2237 // (this code is not valid Go because the first t 2238 // cannot be accessed and thus is never used, the extra 2239 // scope is needed for the correct error message). 2240 // 2241 // If we don't have a type switch, s2 must be an expression. 2242 // Having the extra nested but empty scope won't affect it. 2243 s2, _ = p.parseSimpleStmt(basic) 2244 } 2245 } 2246 p.exprLev = prevLev 2247 } 2248 2249 typeSwitch := p.isTypeSwitchGuard(s2) 2250 lbrace := p.expect(token.LBRACE) 2251 var list []ast.Stmt 2252 for p.tok == token.CASE || p.tok == token.DEFAULT { 2253 list = append(list, p.parseCaseClause()) 2254 } 2255 rbrace := p.expect(token.RBRACE) 2256 p.expectSemi() 2257 body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace} 2258 2259 if typeSwitch { 2260 return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body} 2261 } 2262 2263 return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2, "switch expression"), Body: body} 2264 } 2265 2266 func (p *parser) parseCommClause() *ast.CommClause { 2267 if p.trace { 2268 defer un(trace(p, "CommClause")) 2269 } 2270 2271 pos := p.pos 2272 var comm ast.Stmt 2273 if p.tok == token.CASE { 2274 p.next() 2275 lhs := p.parseList(false) 2276 if p.tok == token.ARROW { 2277 // SendStmt 2278 if len(lhs) > 1 { 2279 p.errorExpected(lhs[0].Pos(), "1 expression") 2280 // continue with first expression 2281 } 2282 arrow := p.pos 2283 p.next() 2284 rhs := p.parseRhs() 2285 comm = &ast.SendStmt{Chan: lhs[0], Arrow: arrow, Value: rhs} 2286 } else { 2287 // RecvStmt 2288 if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE { 2289 // RecvStmt with assignment 2290 if len(lhs) > 2 { 2291 p.errorExpected(lhs[0].Pos(), "1 or 2 expressions") 2292 // continue with first two expressions 2293 lhs = lhs[0:2] 2294 } 2295 pos := p.pos 2296 p.next() 2297 rhs := p.parseRhs() 2298 comm = &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}} 2299 } else { 2300 // lhs must be single receive operation 2301 if len(lhs) > 1 { 2302 p.errorExpected(lhs[0].Pos(), "1 expression") 2303 // continue with first expression 2304 } 2305 comm = &ast.ExprStmt{X: lhs[0]} 2306 } 2307 } 2308 } else { 2309 p.expect(token.DEFAULT) 2310 } 2311 2312 colon := p.expect(token.COLON) 2313 body := p.parseStmtList() 2314 2315 return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body} 2316 } 2317 2318 func (p *parser) parseSelectStmt() *ast.SelectStmt { 2319 if p.trace { 2320 defer un(trace(p, "SelectStmt")) 2321 } 2322 2323 pos := p.expect(token.SELECT) 2324 lbrace := p.expect(token.LBRACE) 2325 var list []ast.Stmt 2326 for p.tok == token.CASE || p.tok == token.DEFAULT { 2327 list = append(list, p.parseCommClause()) 2328 } 2329 rbrace := p.expect(token.RBRACE) 2330 p.expectSemi() 2331 body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace} 2332 2333 return &ast.SelectStmt{Select: pos, Body: body} 2334 } 2335 2336 func (p *parser) parseForStmt() ast.Stmt { 2337 if p.trace { 2338 defer un(trace(p, "ForStmt")) 2339 } 2340 2341 pos := p.expect(token.FOR) 2342 2343 var s1, s2, s3 ast.Stmt 2344 var isRange bool 2345 if p.tok != token.LBRACE { 2346 prevLev := p.exprLev 2347 p.exprLev = -1 2348 if p.tok != token.SEMICOLON { 2349 if p.tok == token.RANGE { 2350 // "for range x" (nil lhs in assignment) 2351 pos := p.pos 2352 p.next() 2353 y := []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}} 2354 s2 = &ast.AssignStmt{Rhs: y} 2355 isRange = true 2356 } else { 2357 s2, isRange = p.parseSimpleStmt(rangeOk) 2358 } 2359 } 2360 if !isRange && p.tok == token.SEMICOLON { 2361 p.next() 2362 s1 = s2 2363 s2 = nil 2364 if p.tok != token.SEMICOLON { 2365 s2, _ = p.parseSimpleStmt(basic) 2366 } 2367 p.expectSemi() 2368 if p.tok != token.LBRACE { 2369 s3, _ = p.parseSimpleStmt(basic) 2370 } 2371 } 2372 p.exprLev = prevLev 2373 } 2374 2375 body := p.parseBlockStmt() 2376 p.expectSemi() 2377 2378 if isRange { 2379 as := s2.(*ast.AssignStmt) 2380 // check lhs 2381 var key, value ast.Expr 2382 switch len(as.Lhs) { 2383 case 0: 2384 // nothing to do 2385 case 1: 2386 key = as.Lhs[0] 2387 case 2: 2388 key, value = as.Lhs[0], as.Lhs[1] 2389 default: 2390 p.errorExpected(as.Lhs[len(as.Lhs)-1].Pos(), "at most 2 expressions") 2391 return &ast.BadStmt{From: pos, To: p.safePos(body.End())} 2392 } 2393 // parseSimpleStmt returned a right-hand side that 2394 // is a single unary expression of the form "range x" 2395 x := as.Rhs[0].(*ast.UnaryExpr).X 2396 return &ast.RangeStmt{ 2397 For: pos, 2398 Key: key, 2399 Value: value, 2400 TokPos: as.TokPos, 2401 Tok: as.Tok, 2402 Range: as.Rhs[0].Pos(), 2403 X: x, 2404 Body: body, 2405 } 2406 } 2407 2408 // regular for statement 2409 return &ast.ForStmt{ 2410 For: pos, 2411 Init: s1, 2412 Cond: p.makeExpr(s2, "boolean or range expression"), 2413 Post: s3, 2414 Body: body, 2415 } 2416 } 2417 2418 func (p *parser) parseStmt() (s ast.Stmt) { 2419 defer decNestLev(incNestLev(p)) 2420 2421 if p.trace { 2422 defer un(trace(p, "Statement")) 2423 } 2424 2425 switch p.tok { 2426 case token.CONST, token.TYPE, token.VAR: 2427 s = &ast.DeclStmt{Decl: p.parseDecl(stmtStart)} 2428 case 2429 // tokens that may start an expression 2430 token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands 2431 token.LBRACK, token.STRUCT, token.MAP, token.CHAN, token.INTERFACE, // composite types 2432 token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators 2433 s, _ = p.parseSimpleStmt(labelOk) 2434 // because of the required look-ahead, labeled statements are 2435 // parsed by parseSimpleStmt - don't expect a semicolon after 2436 // them 2437 if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt { 2438 p.expectSemi() 2439 } 2440 case token.GO: 2441 s = p.parseGoStmt() 2442 case token.DEFER: 2443 s = p.parseDeferStmt() 2444 case token.RETURN: 2445 s = p.parseReturnStmt() 2446 case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH: 2447 s = p.parseBranchStmt(p.tok) 2448 case token.LBRACE: 2449 s = p.parseBlockStmt() 2450 p.expectSemi() 2451 case token.IF: 2452 s = p.parseIfStmt() 2453 case token.SWITCH: 2454 s = p.parseSwitchStmt() 2455 case token.SELECT: 2456 s = p.parseSelectStmt() 2457 case token.FOR: 2458 s = p.parseForStmt() 2459 case token.SEMICOLON: 2460 // Is it ever possible to have an implicit semicolon 2461 // producing an empty statement in a valid program? 2462 // (handle correctly anyway) 2463 s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: p.lit == "\n"} 2464 p.next() 2465 case token.RBRACE: 2466 // a semicolon may be omitted before a closing "}" 2467 s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: true} 2468 default: 2469 // no statement found 2470 pos := p.pos 2471 p.errorExpected(pos, "statement") 2472 p.advance(stmtStart) 2473 s = &ast.BadStmt{From: pos, To: p.pos} 2474 } 2475 2476 return 2477 } 2478 2479 // ---------------------------------------------------------------------------- 2480 // Declarations 2481 2482 type parseSpecFunction func(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec 2483 2484 func (p *parser) parseImportSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec { 2485 if p.trace { 2486 defer un(trace(p, "ImportSpec")) 2487 } 2488 2489 var ident *ast.Ident 2490 switch p.tok { 2491 case token.IDENT: 2492 ident = p.parseIdent() 2493 case token.PERIOD: 2494 ident = &ast.Ident{NamePos: p.pos, Name: "."} 2495 p.next() 2496 } 2497 2498 pos := p.pos 2499 var path string 2500 if p.tok == token.STRING { 2501 path = p.lit 2502 p.next() 2503 } else if p.tok.IsLiteral() { 2504 p.error(pos, "import path must be a string") 2505 p.next() 2506 } else { 2507 p.error(pos, "missing import path") 2508 p.advance(exprEnd) 2509 } 2510 comment := p.expectSemi() 2511 2512 // collect imports 2513 spec := &ast.ImportSpec{ 2514 Doc: doc, 2515 Name: ident, 2516 Path: &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path}, 2517 Comment: comment, 2518 } 2519 p.imports = append(p.imports, spec) 2520 2521 return spec 2522 } 2523 2524 func (p *parser) parseValueSpec(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec { 2525 if p.trace { 2526 defer un(trace(p, keyword.String()+"Spec")) 2527 } 2528 2529 idents := p.parseIdentList() 2530 var typ ast.Expr 2531 var values []ast.Expr 2532 switch keyword { 2533 case token.CONST: 2534 // always permit optional type and initialization for more tolerant parsing 2535 if p.tok != token.EOF && p.tok != token.SEMICOLON && p.tok != token.RPAREN { 2536 typ = p.tryIdentOrType() 2537 if p.tok == token.ASSIGN { 2538 p.next() 2539 values = p.parseList(true) 2540 } 2541 } 2542 case token.VAR: 2543 if p.tok != token.ASSIGN { 2544 typ = p.parseType() 2545 } 2546 if p.tok == token.ASSIGN { 2547 p.next() 2548 values = p.parseList(true) 2549 } 2550 default: 2551 panic("unreachable") 2552 } 2553 comment := p.expectSemi() 2554 2555 spec := &ast.ValueSpec{ 2556 Doc: doc, 2557 Names: idents, 2558 Type: typ, 2559 Values: values, 2560 Comment: comment, 2561 } 2562 return spec 2563 } 2564 2565 func (p *parser) parseGenericType(spec *ast.TypeSpec, openPos token.Pos, name0 *ast.Ident, typ0 ast.Expr) { 2566 if p.trace { 2567 defer un(trace(p, "parseGenericType")) 2568 } 2569 2570 list := p.parseParameterList(name0, typ0, token.RBRACK) 2571 closePos := p.expect(token.RBRACK) 2572 spec.TypeParams = &ast.FieldList{Opening: openPos, List: list, Closing: closePos} 2573 // Let the type checker decide whether to accept type parameters on aliases: 2574 // see go.dev/issue/46477. 2575 if p.tok == token.ASSIGN { 2576 // type alias 2577 spec.Assign = p.pos 2578 p.next() 2579 } 2580 spec.Type = p.parseType() 2581 } 2582 2583 func (p *parser) parseTypeSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec { 2584 if p.trace { 2585 defer un(trace(p, "TypeSpec")) 2586 } 2587 2588 name := p.parseIdent() 2589 spec := &ast.TypeSpec{Doc: doc, Name: name} 2590 2591 if p.tok == token.LBRACK { 2592 // spec.Name "[" ... 2593 // array/slice type or type parameter list 2594 lbrack := p.pos 2595 p.next() 2596 if p.tok == token.IDENT { 2597 // We may have an array type or a type parameter list. 2598 // In either case we expect an expression x (which may 2599 // just be a name, or a more complex expression) which 2600 // we can analyze further. 2601 // 2602 // A type parameter list may have a type bound starting 2603 // with a "[" as in: P []E. In that case, simply parsing 2604 // an expression would lead to an error: P[] is invalid. 2605 // But since index or slice expressions are never constant 2606 // and thus invalid array length expressions, if the name 2607 // is followed by "[" it must be the start of an array or 2608 // slice constraint. Only if we don't see a "[" do we 2609 // need to parse a full expression. Notably, name <- x 2610 // is not a concern because name <- x is a statement and 2611 // not an expression. 2612 var x ast.Expr = p.parseIdent() 2613 if p.tok != token.LBRACK { 2614 // To parse the expression starting with name, expand 2615 // the call sequence we would get by passing in name 2616 // to parser.expr, and pass in name to parsePrimaryExpr. 2617 p.exprLev++ 2618 lhs := p.parsePrimaryExpr(x) 2619 x = p.parseBinaryExpr(lhs, token.LowestPrec+1) 2620 p.exprLev-- 2621 } 2622 // Analyze expression x. If we can split x into a type parameter 2623 // name, possibly followed by a type parameter type, we consider 2624 // this the start of a type parameter list, with some caveats: 2625 // a single name followed by "]" tilts the decision towards an 2626 // array declaration; a type parameter type that could also be 2627 // an ordinary expression but which is followed by a comma tilts 2628 // the decision towards a type parameter list. 2629 if pname, ptype := extractName(x, p.tok == token.COMMA); pname != nil && (ptype != nil || p.tok != token.RBRACK) { 2630 // spec.Name "[" pname ... 2631 // spec.Name "[" pname ptype ... 2632 // spec.Name "[" pname ptype "," ... 2633 p.parseGenericType(spec, lbrack, pname, ptype) // ptype may be nil 2634 } else { 2635 // spec.Name "[" pname "]" ... 2636 // spec.Name "[" x ... 2637 spec.Type = p.parseArrayType(lbrack, x) 2638 } 2639 } else { 2640 // array type 2641 spec.Type = p.parseArrayType(lbrack, nil) 2642 } 2643 } else { 2644 // no type parameters 2645 if p.tok == token.ASSIGN { 2646 // type alias 2647 spec.Assign = p.pos 2648 p.next() 2649 } 2650 spec.Type = p.parseType() 2651 } 2652 2653 spec.Comment = p.expectSemi() 2654 2655 return spec 2656 } 2657 2658 // extractName splits the expression x into (name, expr) if syntactically 2659 // x can be written as name expr. The split only happens if expr is a type 2660 // element (per the isTypeElem predicate) or if force is set. 2661 // If x is just a name, the result is (name, nil). If the split succeeds, 2662 // the result is (name, expr). Otherwise the result is (nil, x). 2663 // Examples: 2664 // 2665 // x force name expr 2666 // ------------------------------------ 2667 // P*[]int T/F P *[]int 2668 // P*E T P *E 2669 // P*E F nil P*E 2670 // P([]int) T/F P []int 2671 // P(E) T P E 2672 // P(E) F nil P(E) 2673 // P*E|F|~G T/F P *E|F|~G 2674 // P*E|F|G T P *E|F|G 2675 // P*E|F|G F nil P*E|F|G 2676 func extractName(x ast.Expr, force bool) (*ast.Ident, ast.Expr) { 2677 switch x := x.(type) { 2678 case *ast.Ident: 2679 return x, nil 2680 case *ast.BinaryExpr: 2681 switch x.Op { 2682 case token.MUL: 2683 if name, _ := x.X.(*ast.Ident); name != nil && (force || isTypeElem(x.Y)) { 2684 // x = name *x.Y 2685 return name, &ast.StarExpr{Star: x.OpPos, X: x.Y} 2686 } 2687 case token.OR: 2688 if name, lhs := extractName(x.X, force || isTypeElem(x.Y)); name != nil && lhs != nil { 2689 // x = name lhs|x.Y 2690 op := *x 2691 op.X = lhs 2692 return name, &op 2693 } 2694 } 2695 case *ast.CallExpr: 2696 if name, _ := x.Fun.(*ast.Ident); name != nil { 2697 if len(x.Args) == 1 && x.Ellipsis == token.NoPos && (force || isTypeElem(x.Args[0])) { 2698 // x = name "(" x.ArgList[0] ")" 2699 return name, x.Args[0] 2700 } 2701 } 2702 } 2703 return nil, x 2704 } 2705 2706 // isTypeElem reports whether x is a (possibly parenthesized) type element expression. 2707 // The result is false if x could be a type element OR an ordinary (value) expression. 2708 func isTypeElem(x ast.Expr) bool { 2709 switch x := x.(type) { 2710 case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType: 2711 return true 2712 case *ast.BinaryExpr: 2713 return isTypeElem(x.X) || isTypeElem(x.Y) 2714 case *ast.UnaryExpr: 2715 return x.Op == token.TILDE 2716 case *ast.ParenExpr: 2717 return isTypeElem(x.X) 2718 } 2719 return false 2720 } 2721 2722 func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl { 2723 if p.trace { 2724 defer un(trace(p, "GenDecl("+keyword.String()+")")) 2725 } 2726 2727 doc := p.leadComment 2728 pos := p.expect(keyword) 2729 var lparen, rparen token.Pos 2730 var list []ast.Spec 2731 if p.tok == token.LPAREN { 2732 lparen = p.pos 2733 p.next() 2734 for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ { 2735 list = append(list, f(p.leadComment, keyword, iota)) 2736 } 2737 rparen = p.expect(token.RPAREN) 2738 p.expectSemi() 2739 } else { 2740 list = append(list, f(nil, keyword, 0)) 2741 } 2742 2743 return &ast.GenDecl{ 2744 Doc: doc, 2745 TokPos: pos, 2746 Tok: keyword, 2747 Lparen: lparen, 2748 Specs: list, 2749 Rparen: rparen, 2750 } 2751 } 2752 2753 func (p *parser) parseFuncDecl() *ast.FuncDecl { 2754 if p.trace { 2755 defer un(trace(p, "FunctionDecl")) 2756 } 2757 2758 doc := p.leadComment 2759 pos := p.expect(token.FUNC) 2760 2761 var recv *ast.FieldList 2762 if p.tok == token.LPAREN { 2763 _, recv = p.parseParameters(false) 2764 } 2765 2766 ident := p.parseIdent() 2767 2768 tparams, params := p.parseParameters(true) 2769 if recv != nil && tparams != nil { 2770 // Method declarations do not have type parameters. We parse them for a 2771 // better error message and improved error recovery. 2772 p.error(tparams.Opening, "method must have no type parameters") 2773 tparams = nil 2774 } 2775 results := p.parseResult() 2776 2777 var body *ast.BlockStmt 2778 switch p.tok { 2779 case token.LBRACE: 2780 body = p.parseBody() 2781 p.expectSemi() 2782 case token.SEMICOLON: 2783 p.next() 2784 if p.tok == token.LBRACE { 2785 // opening { of function declaration on next line 2786 p.error(p.pos, "unexpected semicolon or newline before {") 2787 body = p.parseBody() 2788 p.expectSemi() 2789 } 2790 default: 2791 p.expectSemi() 2792 } 2793 2794 decl := &ast.FuncDecl{ 2795 Doc: doc, 2796 Recv: recv, 2797 Name: ident, 2798 Type: &ast.FuncType{ 2799 Func: pos, 2800 TypeParams: tparams, 2801 Params: params, 2802 Results: results, 2803 }, 2804 Body: body, 2805 } 2806 return decl 2807 } 2808 2809 func (p *parser) parseDecl(sync map[token.Token]bool) ast.Decl { 2810 if p.trace { 2811 defer un(trace(p, "Declaration")) 2812 } 2813 2814 var f parseSpecFunction 2815 switch p.tok { 2816 case token.IMPORT: 2817 f = p.parseImportSpec 2818 2819 case token.CONST, token.VAR: 2820 f = p.parseValueSpec 2821 2822 case token.TYPE: 2823 f = p.parseTypeSpec 2824 2825 case token.FUNC: 2826 return p.parseFuncDecl() 2827 2828 default: 2829 pos := p.pos 2830 p.errorExpected(pos, "declaration") 2831 p.advance(sync) 2832 return &ast.BadDecl{From: pos, To: p.pos} 2833 } 2834 2835 return p.parseGenDecl(p.tok, f) 2836 } 2837 2838 // ---------------------------------------------------------------------------- 2839 // Source files 2840 2841 func (p *parser) parseFile() *ast.File { 2842 if p.trace { 2843 defer un(trace(p, "File")) 2844 } 2845 2846 // Don't bother parsing the rest if we had errors scanning the first token. 2847 // Likely not a Go source file at all. 2848 if p.errors.Len() != 0 { 2849 return nil 2850 } 2851 2852 // package clause 2853 doc := p.leadComment 2854 pos := p.expect(token.PACKAGE) 2855 // Go spec: The package clause is not a declaration; 2856 // the package name does not appear in any scope. 2857 ident := p.parseIdent() 2858 if ident.Name == "_" && p.mode&DeclarationErrors != 0 { 2859 p.error(p.pos, "invalid package name _") 2860 } 2861 p.expectSemi() 2862 2863 // Don't bother parsing the rest if we had errors parsing the package clause. 2864 // Likely not a Go source file at all. 2865 if p.errors.Len() != 0 { 2866 return nil 2867 } 2868 2869 var decls []ast.Decl 2870 if p.mode&PackageClauseOnly == 0 { 2871 // import decls 2872 for p.tok == token.IMPORT { 2873 decls = append(decls, p.parseGenDecl(token.IMPORT, p.parseImportSpec)) 2874 } 2875 2876 if p.mode&ImportsOnly == 0 { 2877 // rest of package body 2878 prev := token.IMPORT 2879 for p.tok != token.EOF { 2880 // Continue to accept import declarations for error tolerance, but complain. 2881 if p.tok == token.IMPORT && prev != token.IMPORT { 2882 p.error(p.pos, "imports must appear before other declarations") 2883 } 2884 prev = p.tok 2885 2886 decls = append(decls, p.parseDecl(declStart)) 2887 } 2888 } 2889 } 2890 2891 f := &ast.File{ 2892 Doc: doc, 2893 Package: pos, 2894 Name: ident, 2895 Decls: decls, 2896 FileStart: token.Pos(p.file.Base()), 2897 FileEnd: token.Pos(p.file.Base() + p.file.Size()), 2898 Imports: p.imports, 2899 Comments: p.comments, 2900 GoVersion: p.goVersion, 2901 } 2902 var declErr func(token.Pos, string) 2903 if p.mode&DeclarationErrors != 0 { 2904 declErr = p.error 2905 } 2906 if p.mode&SkipObjectResolution == 0 { 2907 resolveFile(f, p.file, declErr) 2908 } 2909 2910 return f 2911 }