github.com/bir3/gocompiler@v0.3.205/src/go/parser/parser.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package parser implements a parser for Go source files. Input may be 6 // provided in a variety of forms (see the various Parse* functions); the 7 // output is an abstract syntax tree (AST) representing the Go source. The 8 // parser is invoked through one of the Parse* functions. 9 // 10 // The parser accepts a larger language than is syntactically permitted by 11 // the Go spec, for simplicity, and for improved robustness in the presence 12 // of syntax errors. For instance, in method declarations, the receiver is 13 // treated like an ordinary parameter list and thus may contain multiple 14 // entries where the spec permits exactly one. Consequently, the corresponding 15 // field in the AST (ast.FuncDecl.Recv) field is not restricted to one entry. 16 package parser 17 18 import ( 19 "fmt" 20 "github.com/bir3/gocompiler/src/go/ast" 21 "github.com/bir3/gocompiler/src/go/internal/typeparams" 22 "github.com/bir3/gocompiler/src/go/scanner" 23 "github.com/bir3/gocompiler/src/go/token" 24 ) 25 26 // The parser structure holds the parser's internal state. 27 type parser struct { 28 file *token.File 29 errors scanner.ErrorList 30 scanner scanner.Scanner 31 32 // Tracing/debugging 33 mode Mode // parsing mode 34 trace bool // == (mode&Trace != 0) 35 indent int // indentation used for tracing output 36 37 // Comments 38 comments []*ast.CommentGroup 39 leadComment *ast.CommentGroup // last lead comment 40 lineComment *ast.CommentGroup // last line comment 41 42 // Next token 43 pos token.Pos // token position 44 tok token.Token // one token look-ahead 45 lit string // token literal 46 47 // Error recovery 48 // (used to limit the number of calls to parser.advance 49 // w/o making scanning progress - avoids potential endless 50 // loops across multiple parser functions during error recovery) 51 syncPos token.Pos // last synchronization position 52 syncCnt int // number of parser.advance calls without progress 53 54 // Non-syntactic parser control 55 exprLev int // < 0: in control clause, >= 0: in expression 56 inRhs bool // if set, the parser is parsing a rhs expression 57 58 imports []*ast.ImportSpec // list of imports 59 60 // nestLev is used to track and limit the recursion depth 61 // during parsing. 62 nestLev int 63 } 64 65 func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) { 66 p.file = fset.AddFile(filename, -1, len(src)) 67 var m scanner.Mode 68 if mode&ParseComments != 0 { 69 m = scanner.ScanComments 70 } 71 eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) } 72 p.scanner.Init(p.file, src, eh, m) 73 74 p.mode = mode 75 p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently) 76 p.next() 77 } 78 79 // ---------------------------------------------------------------------------- 80 // Parsing support 81 82 func (p *parser) printTrace(a ...any) { 83 const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " 84 const n = len(dots) 85 pos := p.file.Position(p.pos) 86 fmt.Printf("%5d:%3d: ", pos.Line, pos.Column) 87 i := 2 * p.indent 88 for i > n { 89 fmt.Print(dots) 90 i -= n 91 } 92 // i <= n 93 fmt.Print(dots[0:i]) 94 fmt.Println(a...) 95 } 96 97 func trace(p *parser, msg string) *parser { 98 p.printTrace(msg, "(") 99 p.indent++ 100 return p 101 } 102 103 // Usage pattern: defer un(trace(p, "...")) 104 func un(p *parser) { 105 p.indent-- 106 p.printTrace(")") 107 } 108 109 // maxNestLev is the deepest we're willing to recurse during parsing 110 const maxNestLev int = 1e5 111 112 func incNestLev(p *parser) *parser { 113 p.nestLev++ 114 if p.nestLev > maxNestLev { 115 p.error(p.pos, "exceeded max nesting depth") 116 panic(bailout{}) 117 } 118 return p 119 } 120 121 // decNestLev is used to track nesting depth during parsing to prevent stack exhaustion. 122 // It is used along with incNestLev in a similar fashion to how un and trace are used. 123 func decNestLev(p *parser) { 124 p.nestLev-- 125 } 126 127 // Advance to the next token. 128 func (p *parser) next0() { 129 // Because of one-token look-ahead, print the previous token 130 // when tracing as it provides a more readable output. The 131 // very first token (!p.pos.IsValid()) is not initialized 132 // (it is token.ILLEGAL), so don't print it. 133 if p.trace && p.pos.IsValid() { 134 s := p.tok.String() 135 switch { 136 case p.tok.IsLiteral(): 137 p.printTrace(s, p.lit) 138 case p.tok.IsOperator(), p.tok.IsKeyword(): 139 p.printTrace("\"" + s + "\"") 140 default: 141 p.printTrace(s) 142 } 143 } 144 145 p.pos, p.tok, p.lit = p.scanner.Scan() 146 } 147 148 // Consume a comment and return it and the line on which it ends. 149 func (p *parser) consumeComment() (comment *ast.Comment, endline int) { 150 // /*-style comments may end on a different line than where they start. 151 // Scan the comment for '\n' chars and adjust endline accordingly. 152 endline = p.file.Line(p.pos) 153 if p.lit[1] == '*' { 154 // don't use range here - no need to decode Unicode code points 155 for i := 0; i < len(p.lit); i++ { 156 if p.lit[i] == '\n' { 157 endline++ 158 } 159 } 160 } 161 162 comment = &ast.Comment{Slash: p.pos, Text: p.lit} 163 p.next0() 164 165 return 166 } 167 168 // Consume a group of adjacent comments, add it to the parser's 169 // comments list, and return it together with the line at which 170 // the last comment in the group ends. A non-comment token or n 171 // empty lines terminate a comment group. 172 func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { 173 var list []*ast.Comment 174 endline = p.file.Line(p.pos) 175 for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n { 176 var comment *ast.Comment 177 comment, endline = p.consumeComment() 178 list = append(list, comment) 179 } 180 181 // add comment group to the comments list 182 comments = &ast.CommentGroup{List: list} 183 p.comments = append(p.comments, comments) 184 185 return 186 } 187 188 // Advance to the next non-comment token. In the process, collect 189 // any comment groups encountered, and remember the last lead and 190 // line comments. 191 // 192 // A lead comment is a comment group that starts and ends in a 193 // line without any other tokens and that is followed by a non-comment 194 // token on the line immediately after the comment group. 195 // 196 // A line comment is a comment group that follows a non-comment 197 // token on the same line, and that has no tokens after it on the line 198 // where it ends. 199 // 200 // Lead and line comments may be considered documentation that is 201 // stored in the AST. 202 func (p *parser) next() { 203 p.leadComment = nil 204 p.lineComment = nil 205 prev := p.pos 206 p.next0() 207 208 if p.tok == token.COMMENT { 209 var comment *ast.CommentGroup 210 var endline int 211 212 if p.file.Line(p.pos) == p.file.Line(prev) { 213 // The comment is on same line as the previous token; it 214 // cannot be a lead comment but may be a line comment. 215 comment, endline = p.consumeCommentGroup(0) 216 if p.file.Line(p.pos) != endline || p.tok == token.SEMICOLON || p.tok == token.EOF { 217 // The next token is on a different line, thus 218 // the last comment group is a line comment. 219 p.lineComment = comment 220 } 221 } 222 223 // consume successor comments, if any 224 endline = -1 225 for p.tok == token.COMMENT { 226 comment, endline = p.consumeCommentGroup(1) 227 } 228 229 if endline+1 == p.file.Line(p.pos) { 230 // The next token is following on the line immediately after the 231 // comment group, thus the last comment group is a lead comment. 232 p.leadComment = comment 233 } 234 } 235 } 236 237 // A bailout panic is raised to indicate early termination. pos and msg are 238 // only populated when bailing out of object resolution. 239 type bailout struct { 240 pos token.Pos 241 msg string 242 } 243 244 func (p *parser) error(pos token.Pos, msg string) { 245 if p.trace { 246 defer un(trace(p, "error: "+msg)) 247 } 248 249 epos := p.file.Position(pos) 250 251 // If AllErrors is not set, discard errors reported on the same line 252 // as the last recorded error and stop parsing if there are more than 253 // 10 errors. 254 if p.mode&AllErrors == 0 { 255 n := len(p.errors) 256 if n > 0 && p.errors[n-1].Pos.Line == epos.Line { 257 return // discard - likely a spurious error 258 } 259 if n > 10 { 260 panic(bailout{}) 261 } 262 } 263 264 p.errors.Add(epos, msg) 265 } 266 267 func (p *parser) errorExpected(pos token.Pos, msg string) { 268 msg = "expected " + msg 269 if pos == p.pos { 270 // the error happened at the current position; 271 // make the error message more specific 272 switch { 273 case p.tok == token.SEMICOLON && p.lit == "\n": 274 msg += ", found newline" 275 case p.tok.IsLiteral(): 276 // print 123 rather than 'INT', etc. 277 msg += ", found " + p.lit 278 default: 279 msg += ", found '" + p.tok.String() + "'" 280 } 281 } 282 p.error(pos, msg) 283 } 284 285 func (p *parser) expect(tok token.Token) token.Pos { 286 pos := p.pos 287 if p.tok != tok { 288 p.errorExpected(pos, "'"+tok.String()+"'") 289 } 290 p.next() // make progress 291 return pos 292 } 293 294 // expect2 is like expect, but it returns an invalid position 295 // if the expected token is not found. 296 func (p *parser) expect2(tok token.Token) (pos token.Pos) { 297 if p.tok == tok { 298 pos = p.pos 299 } else { 300 p.errorExpected(p.pos, "'"+tok.String()+"'") 301 } 302 p.next() // make progress 303 return 304 } 305 306 // expectClosing is like expect but provides a better error message 307 // for the common case of a missing comma before a newline. 308 func (p *parser) expectClosing(tok token.Token, context string) token.Pos { 309 if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" { 310 p.error(p.pos, "missing ',' before newline in "+context) 311 p.next() 312 } 313 return p.expect(tok) 314 } 315 316 // expectSemi consumes a semicolon and returns the applicable line comment. 317 func (p *parser) expectSemi() (comment *ast.CommentGroup) { 318 // semicolon is optional before a closing ')' or '}' 319 if p.tok != token.RPAREN && p.tok != token.RBRACE { 320 switch p.tok { 321 case token.COMMA: 322 // permit a ',' instead of a ';' but complain 323 p.errorExpected(p.pos, "';'") 324 fallthrough 325 case token.SEMICOLON: 326 if p.lit == ";" { 327 // explicit semicolon 328 p.next() 329 comment = p.lineComment // use following comments 330 } else { 331 // artificial semicolon 332 comment = p.lineComment // use preceding comments 333 p.next() 334 } 335 return comment 336 default: 337 p.errorExpected(p.pos, "';'") 338 p.advance(stmtStart) 339 } 340 } 341 return nil 342 } 343 344 func (p *parser) atComma(context string, follow token.Token) bool { 345 if p.tok == token.COMMA { 346 return true 347 } 348 if p.tok != follow { 349 msg := "missing ','" 350 if p.tok == token.SEMICOLON && p.lit == "\n" { 351 msg += " before newline" 352 } 353 p.error(p.pos, msg+" in "+context) 354 return true // "insert" comma and continue 355 } 356 return false 357 } 358 359 func assert(cond bool, msg string) { 360 if !cond { 361 panic("go/parser internal error: " + msg) 362 } 363 } 364 365 // advance consumes tokens until the current token p.tok 366 // is in the 'to' set, or token.EOF. For error recovery. 367 func (p *parser) advance(to map[token.Token]bool) { 368 for ; p.tok != token.EOF; p.next() { 369 if to[p.tok] { 370 // Return only if parser made some progress since last 371 // sync or if it has not reached 10 advance calls without 372 // progress. Otherwise consume at least one token to 373 // avoid an endless parser loop (it is possible that 374 // both parseOperand and parseStmt call advance and 375 // correctly do not advance, thus the need for the 376 // invocation limit p.syncCnt). 377 if p.pos == p.syncPos && p.syncCnt < 10 { 378 p.syncCnt++ 379 return 380 } 381 if p.pos > p.syncPos { 382 p.syncPos = p.pos 383 p.syncCnt = 0 384 return 385 } 386 // Reaching here indicates a parser bug, likely an 387 // incorrect token list in this function, but it only 388 // leads to skipping of possibly correct code if a 389 // previous error is present, and thus is preferred 390 // over a non-terminating parse. 391 } 392 } 393 } 394 395 var stmtStart = map[token.Token]bool{ 396 token.BREAK: true, 397 token.CONST: true, 398 token.CONTINUE: true, 399 token.DEFER: true, 400 token.FALLTHROUGH: true, 401 token.FOR: true, 402 token.GO: true, 403 token.GOTO: true, 404 token.IF: true, 405 token.RETURN: true, 406 token.SELECT: true, 407 token.SWITCH: true, 408 token.TYPE: true, 409 token.VAR: true, 410 } 411 412 var declStart = map[token.Token]bool{ 413 token.IMPORT: true, 414 token.CONST: true, 415 token.TYPE: true, 416 token.VAR: true, 417 } 418 419 var exprEnd = map[token.Token]bool{ 420 token.COMMA: true, 421 token.COLON: true, 422 token.SEMICOLON: true, 423 token.RPAREN: true, 424 token.RBRACK: true, 425 token.RBRACE: true, 426 } 427 428 // safePos returns a valid file position for a given position: If pos 429 // is valid to begin with, safePos returns pos. If pos is out-of-range, 430 // safePos returns the EOF position. 431 // 432 // This is hack to work around "artificial" end positions in the AST which 433 // are computed by adding 1 to (presumably valid) token positions. If the 434 // token positions are invalid due to parse errors, the resulting end position 435 // may be past the file's EOF position, which would lead to panics if used 436 // later on. 437 func (p *parser) safePos(pos token.Pos) (res token.Pos) { 438 defer func() { 439 if recover() != nil { 440 res = token.Pos(p.file.Base() + p.file.Size()) // EOF position 441 } 442 }() 443 _ = p.file.Offset(pos) // trigger a panic if position is out-of-range 444 return pos 445 } 446 447 // ---------------------------------------------------------------------------- 448 // Identifiers 449 450 func (p *parser) parseIdent() *ast.Ident { 451 pos := p.pos 452 name := "_" 453 if p.tok == token.IDENT { 454 name = p.lit 455 p.next() 456 } else { 457 p.expect(token.IDENT) // use expect() error handling 458 } 459 return &ast.Ident{NamePos: pos, Name: name} 460 } 461 462 func (p *parser) parseIdentList() (list []*ast.Ident) { 463 if p.trace { 464 defer un(trace(p, "IdentList")) 465 } 466 467 list = append(list, p.parseIdent()) 468 for p.tok == token.COMMA { 469 p.next() 470 list = append(list, p.parseIdent()) 471 } 472 473 return 474 } 475 476 // ---------------------------------------------------------------------------- 477 // Common productions 478 479 // If lhs is set, result list elements which are identifiers are not resolved. 480 func (p *parser) parseExprList() (list []ast.Expr) { 481 if p.trace { 482 defer un(trace(p, "ExpressionList")) 483 } 484 485 list = append(list, p.parseExpr()) 486 for p.tok == token.COMMA { 487 p.next() 488 list = append(list, p.parseExpr()) 489 } 490 491 return 492 } 493 494 func (p *parser) parseList(inRhs bool) []ast.Expr { 495 old := p.inRhs 496 p.inRhs = inRhs 497 list := p.parseExprList() 498 p.inRhs = old 499 return list 500 } 501 502 // ---------------------------------------------------------------------------- 503 // Types 504 505 func (p *parser) parseType() ast.Expr { 506 if p.trace { 507 defer un(trace(p, "Type")) 508 } 509 510 typ := p.tryIdentOrType() 511 512 if typ == nil { 513 pos := p.pos 514 p.errorExpected(pos, "type") 515 p.advance(exprEnd) 516 return &ast.BadExpr{From: pos, To: p.pos} 517 } 518 519 return typ 520 } 521 522 func (p *parser) parseQualifiedIdent(ident *ast.Ident) ast.Expr { 523 if p.trace { 524 defer un(trace(p, "QualifiedIdent")) 525 } 526 527 typ := p.parseTypeName(ident) 528 if p.tok == token.LBRACK { 529 typ = p.parseTypeInstance(typ) 530 } 531 532 return typ 533 } 534 535 // If the result is an identifier, it is not resolved. 536 func (p *parser) parseTypeName(ident *ast.Ident) ast.Expr { 537 if p.trace { 538 defer un(trace(p, "TypeName")) 539 } 540 541 if ident == nil { 542 ident = p.parseIdent() 543 } 544 545 if p.tok == token.PERIOD { 546 // ident is a package name 547 p.next() 548 sel := p.parseIdent() 549 return &ast.SelectorExpr{X: ident, Sel: sel} 550 } 551 552 return ident 553 } 554 555 // "[" has already been consumed, and lbrack is its position. 556 // If len != nil it is the already consumed array length. 557 func (p *parser) parseArrayType(lbrack token.Pos, len ast.Expr) *ast.ArrayType { 558 if p.trace { 559 defer un(trace(p, "ArrayType")) 560 } 561 562 if len == nil { 563 p.exprLev++ 564 // always permit ellipsis for more fault-tolerant parsing 565 if p.tok == token.ELLIPSIS { 566 len = &ast.Ellipsis{Ellipsis: p.pos} 567 p.next() 568 } else if p.tok != token.RBRACK { 569 len = p.parseRhs() 570 } 571 p.exprLev-- 572 } 573 if p.tok == token.COMMA { 574 // Trailing commas are accepted in type parameter 575 // lists but not in array type declarations. 576 // Accept for better error handling but complain. 577 p.error(p.pos, "unexpected comma; expecting ]") 578 p.next() 579 } 580 p.expect(token.RBRACK) 581 elt := p.parseType() 582 return &ast.ArrayType{Lbrack: lbrack, Len: len, Elt: elt} 583 } 584 585 func (p *parser) parseArrayFieldOrTypeInstance(x *ast.Ident) (*ast.Ident, ast.Expr) { 586 if p.trace { 587 defer un(trace(p, "ArrayFieldOrTypeInstance")) 588 } 589 590 lbrack := p.expect(token.LBRACK) 591 trailingComma := token.NoPos // if valid, the position of a trailing comma preceding the ']' 592 var args []ast.Expr 593 if p.tok != token.RBRACK { 594 p.exprLev++ 595 args = append(args, p.parseRhs()) 596 for p.tok == token.COMMA { 597 comma := p.pos 598 p.next() 599 if p.tok == token.RBRACK { 600 trailingComma = comma 601 break 602 } 603 args = append(args, p.parseRhs()) 604 } 605 p.exprLev-- 606 } 607 rbrack := p.expect(token.RBRACK) 608 609 if len(args) == 0 { 610 // x []E 611 elt := p.parseType() 612 return x, &ast.ArrayType{Lbrack: lbrack, Elt: elt} 613 } 614 615 // x [P]E or x[P] 616 if len(args) == 1 { 617 elt := p.tryIdentOrType() 618 if elt != nil { 619 // x [P]E 620 if trailingComma.IsValid() { 621 // Trailing commas are invalid in array type fields. 622 p.error(trailingComma, "unexpected comma; expecting ]") 623 } 624 return x, &ast.ArrayType{Lbrack: lbrack, Len: args[0], Elt: elt} 625 } 626 } 627 628 // x[P], x[P1, P2], ... 629 return nil, typeparams.PackIndexExpr(x, lbrack, args, rbrack) 630 } 631 632 func (p *parser) parseFieldDecl() *ast.Field { 633 if p.trace { 634 defer un(trace(p, "FieldDecl")) 635 } 636 637 doc := p.leadComment 638 639 var names []*ast.Ident 640 var typ ast.Expr 641 switch p.tok { 642 case token.IDENT: 643 name := p.parseIdent() 644 if p.tok == token.PERIOD || p.tok == token.STRING || p.tok == token.SEMICOLON || p.tok == token.RBRACE { 645 // embedded type 646 typ = name 647 if p.tok == token.PERIOD { 648 typ = p.parseQualifiedIdent(name) 649 } 650 } else { 651 // name1, name2, ... T 652 names = []*ast.Ident{name} 653 for p.tok == token.COMMA { 654 p.next() 655 names = append(names, p.parseIdent()) 656 } 657 // Careful dance: We don't know if we have an embedded instantiated 658 // type T[P1, P2, ...] or a field T of array type []E or [P]E. 659 if len(names) == 1 && p.tok == token.LBRACK { 660 name, typ = p.parseArrayFieldOrTypeInstance(name) 661 if name == nil { 662 names = nil 663 } 664 } else { 665 // T P 666 typ = p.parseType() 667 } 668 } 669 case token.MUL: 670 star := p.pos 671 p.next() 672 if p.tok == token.LPAREN { 673 // *(T) 674 p.error(p.pos, "cannot parenthesize embedded type") 675 p.next() 676 typ = p.parseQualifiedIdent(nil) 677 // expect closing ')' but no need to complain if missing 678 if p.tok == token.RPAREN { 679 p.next() 680 } 681 } else { 682 // *T 683 typ = p.parseQualifiedIdent(nil) 684 } 685 typ = &ast.StarExpr{Star: star, X: typ} 686 687 case token.LPAREN: 688 p.error(p.pos, "cannot parenthesize embedded type") 689 p.next() 690 if p.tok == token.MUL { 691 // (*T) 692 star := p.pos 693 p.next() 694 typ = &ast.StarExpr{Star: star, X: p.parseQualifiedIdent(nil)} 695 } else { 696 // (T) 697 typ = p.parseQualifiedIdent(nil) 698 } 699 // expect closing ')' but no need to complain if missing 700 if p.tok == token.RPAREN { 701 p.next() 702 } 703 704 default: 705 pos := p.pos 706 p.errorExpected(pos, "field name or embedded type") 707 p.advance(exprEnd) 708 typ = &ast.BadExpr{From: pos, To: p.pos} 709 } 710 711 var tag *ast.BasicLit 712 if p.tok == token.STRING { 713 tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit} 714 p.next() 715 } 716 717 comment := p.expectSemi() 718 719 field := &ast.Field{Doc: doc, Names: names, Type: typ, Tag: tag, Comment: comment} 720 return field 721 } 722 723 func (p *parser) parseStructType() *ast.StructType { 724 if p.trace { 725 defer un(trace(p, "StructType")) 726 } 727 728 pos := p.expect(token.STRUCT) 729 lbrace := p.expect(token.LBRACE) 730 var list []*ast.Field 731 for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN { 732 // a field declaration cannot start with a '(' but we accept 733 // it here for more robust parsing and better error messages 734 // (parseFieldDecl will check and complain if necessary) 735 list = append(list, p.parseFieldDecl()) 736 } 737 rbrace := p.expect(token.RBRACE) 738 739 return &ast.StructType{ 740 Struct: pos, 741 Fields: &ast.FieldList{ 742 Opening: lbrace, 743 List: list, 744 Closing: rbrace, 745 }, 746 } 747 } 748 749 func (p *parser) parsePointerType() *ast.StarExpr { 750 if p.trace { 751 defer un(trace(p, "PointerType")) 752 } 753 754 star := p.expect(token.MUL) 755 base := p.parseType() 756 757 return &ast.StarExpr{Star: star, X: base} 758 } 759 760 func (p *parser) parseDotsType() *ast.Ellipsis { 761 if p.trace { 762 defer un(trace(p, "DotsType")) 763 } 764 765 pos := p.expect(token.ELLIPSIS) 766 elt := p.parseType() 767 768 return &ast.Ellipsis{Ellipsis: pos, Elt: elt} 769 } 770 771 type field struct { 772 name *ast.Ident 773 typ ast.Expr 774 } 775 776 func (p *parser) parseParamDecl(name *ast.Ident, typeSetsOK bool) (f field) { 777 // TODO(rFindley) refactor to be more similar to paramDeclOrNil in the syntax 778 // package 779 if p.trace { 780 defer un(trace(p, "ParamDeclOrNil")) 781 } 782 783 ptok := p.tok 784 if name != nil { 785 p.tok = token.IDENT // force token.IDENT case in switch below 786 } else if typeSetsOK && p.tok == token.TILDE { 787 // "~" ... 788 return field{nil, p.embeddedElem(nil)} 789 } 790 791 switch p.tok { 792 case token.IDENT: 793 // name 794 if name != nil { 795 f.name = name 796 p.tok = ptok 797 } else { 798 f.name = p.parseIdent() 799 } 800 switch p.tok { 801 case token.IDENT, token.MUL, token.ARROW, token.FUNC, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN: 802 // name type 803 f.typ = p.parseType() 804 805 case token.LBRACK: 806 // name "[" type1, ..., typeN "]" or name "[" n "]" type 807 f.name, f.typ = p.parseArrayFieldOrTypeInstance(f.name) 808 809 case token.ELLIPSIS: 810 // name "..." type 811 f.typ = p.parseDotsType() 812 return // don't allow ...type "|" ... 813 814 case token.PERIOD: 815 // name "." ... 816 f.typ = p.parseQualifiedIdent(f.name) 817 f.name = nil 818 819 case token.TILDE: 820 if typeSetsOK { 821 f.typ = p.embeddedElem(nil) 822 return 823 } 824 825 case token.OR: 826 if typeSetsOK { 827 // name "|" typeset 828 f.typ = p.embeddedElem(f.name) 829 f.name = nil 830 return 831 } 832 } 833 834 case token.MUL, token.ARROW, token.FUNC, token.LBRACK, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN: 835 // type 836 f.typ = p.parseType() 837 838 case token.ELLIPSIS: 839 // "..." type 840 // (always accepted) 841 f.typ = p.parseDotsType() 842 return // don't allow ...type "|" ... 843 844 default: 845 // TODO(rfindley): this is incorrect in the case of type parameter lists 846 // (should be "']'" in that case) 847 p.errorExpected(p.pos, "')'") 848 p.advance(exprEnd) 849 } 850 851 // [name] type "|" 852 if typeSetsOK && p.tok == token.OR && f.typ != nil { 853 f.typ = p.embeddedElem(f.typ) 854 } 855 856 return 857 } 858 859 func (p *parser) parseParameterList(name0 *ast.Ident, typ0 ast.Expr, closing token.Token) (params []*ast.Field) { 860 if p.trace { 861 defer un(trace(p, "ParameterList")) 862 } 863 864 // Type parameters are the only parameter list closed by ']'. 865 tparams := closing == token.RBRACK 866 // Type set notation is ok in type parameter lists. 867 typeSetsOK := tparams 868 869 pos := p.pos 870 if name0 != nil { 871 pos = name0.Pos() 872 } 873 874 var list []field 875 var named int // number of parameters that have an explicit name and type 876 877 for name0 != nil || p.tok != closing && p.tok != token.EOF { 878 var par field 879 if typ0 != nil { 880 if typeSetsOK { 881 typ0 = p.embeddedElem(typ0) 882 } 883 par = field{name0, typ0} 884 } else { 885 par = p.parseParamDecl(name0, typeSetsOK) 886 } 887 name0 = nil // 1st name was consumed if present 888 typ0 = nil // 1st typ was consumed if present 889 if par.name != nil || par.typ != nil { 890 list = append(list, par) 891 if par.name != nil && par.typ != nil { 892 named++ 893 } 894 } 895 if !p.atComma("parameter list", closing) { 896 break 897 } 898 p.next() 899 } 900 901 if len(list) == 0 { 902 return // not uncommon 903 } 904 905 // TODO(gri) parameter distribution and conversion to []*ast.Field 906 // can be combined and made more efficient 907 908 // distribute parameter types 909 if named == 0 { 910 // all unnamed => found names are type names 911 for i := 0; i < len(list); i++ { 912 par := &list[i] 913 if typ := par.name; typ != nil { 914 par.typ = typ 915 par.name = nil 916 } 917 } 918 if tparams { 919 p.error(pos, "type parameters must be named") 920 } 921 } else if named != len(list) { 922 // some named => all must be named 923 ok := true 924 var typ ast.Expr 925 missingName := pos 926 for i := len(list) - 1; i >= 0; i-- { 927 if par := &list[i]; par.typ != nil { 928 typ = par.typ 929 if par.name == nil { 930 ok = false 931 missingName = par.typ.Pos() 932 n := ast.NewIdent("_") 933 n.NamePos = typ.Pos() // correct position 934 par.name = n 935 } 936 } else if typ != nil { 937 par.typ = typ 938 } else { 939 // par.typ == nil && typ == nil => we only have a par.name 940 ok = false 941 missingName = par.name.Pos() 942 par.typ = &ast.BadExpr{From: par.name.Pos(), To: p.pos} 943 } 944 } 945 if !ok { 946 if tparams { 947 p.error(missingName, "type parameters must be named") 948 } else { 949 p.error(pos, "mixed named and unnamed parameters") 950 } 951 } 952 } 953 954 // convert list []*ast.Field 955 if named == 0 { 956 // parameter list consists of types only 957 for _, par := range list { 958 assert(par.typ != nil, "nil type in unnamed parameter list") 959 params = append(params, &ast.Field{Type: par.typ}) 960 } 961 return 962 } 963 964 // parameter list consists of named parameters with types 965 var names []*ast.Ident 966 var typ ast.Expr 967 addParams := func() { 968 assert(typ != nil, "nil type in named parameter list") 969 field := &ast.Field{Names: names, Type: typ} 970 params = append(params, field) 971 names = nil 972 } 973 for _, par := range list { 974 if par.typ != typ { 975 if len(names) > 0 { 976 addParams() 977 } 978 typ = par.typ 979 } 980 names = append(names, par.name) 981 } 982 if len(names) > 0 { 983 addParams() 984 } 985 return 986 } 987 988 func (p *parser) parseParameters(acceptTParams bool) (tparams, params *ast.FieldList) { 989 if p.trace { 990 defer un(trace(p, "Parameters")) 991 } 992 993 if acceptTParams && p.tok == token.LBRACK { 994 opening := p.pos 995 p.next() 996 // [T any](params) syntax 997 list := p.parseParameterList(nil, nil, token.RBRACK) 998 rbrack := p.expect(token.RBRACK) 999 tparams = &ast.FieldList{Opening: opening, List: list, Closing: rbrack} 1000 // Type parameter lists must not be empty. 1001 if tparams.NumFields() == 0 { 1002 p.error(tparams.Closing, "empty type parameter list") 1003 tparams = nil // avoid follow-on errors 1004 } 1005 } 1006 1007 opening := p.expect(token.LPAREN) 1008 1009 var fields []*ast.Field 1010 if p.tok != token.RPAREN { 1011 fields = p.parseParameterList(nil, nil, token.RPAREN) 1012 } 1013 1014 rparen := p.expect(token.RPAREN) 1015 params = &ast.FieldList{Opening: opening, List: fields, Closing: rparen} 1016 1017 return 1018 } 1019 1020 func (p *parser) parseResult() *ast.FieldList { 1021 if p.trace { 1022 defer un(trace(p, "Result")) 1023 } 1024 1025 if p.tok == token.LPAREN { 1026 _, results := p.parseParameters(false) 1027 return results 1028 } 1029 1030 typ := p.tryIdentOrType() 1031 if typ != nil { 1032 list := make([]*ast.Field, 1) 1033 list[0] = &ast.Field{Type: typ} 1034 return &ast.FieldList{List: list} 1035 } 1036 1037 return nil 1038 } 1039 1040 func (p *parser) parseFuncType() *ast.FuncType { 1041 if p.trace { 1042 defer un(trace(p, "FuncType")) 1043 } 1044 1045 pos := p.expect(token.FUNC) 1046 tparams, params := p.parseParameters(true) 1047 if tparams != nil { 1048 p.error(tparams.Pos(), "function type must have no type parameters") 1049 } 1050 results := p.parseResult() 1051 1052 return &ast.FuncType{Func: pos, Params: params, Results: results} 1053 } 1054 1055 func (p *parser) parseMethodSpec() *ast.Field { 1056 if p.trace { 1057 defer un(trace(p, "MethodSpec")) 1058 } 1059 1060 doc := p.leadComment 1061 var idents []*ast.Ident 1062 var typ ast.Expr 1063 x := p.parseTypeName(nil) 1064 if ident, _ := x.(*ast.Ident); ident != nil { 1065 switch { 1066 case p.tok == token.LBRACK: 1067 // generic method or embedded instantiated type 1068 lbrack := p.pos 1069 p.next() 1070 p.exprLev++ 1071 x := p.parseExpr() 1072 p.exprLev-- 1073 if name0, _ := x.(*ast.Ident); name0 != nil && p.tok != token.COMMA && p.tok != token.RBRACK { 1074 // generic method m[T any] 1075 // 1076 // Interface methods do not have type parameters. We parse them for a 1077 // better error message and improved error recovery. 1078 _ = p.parseParameterList(name0, nil, token.RBRACK) 1079 _ = p.expect(token.RBRACK) 1080 p.error(lbrack, "interface method must have no type parameters") 1081 1082 // TODO(rfindley) refactor to share code with parseFuncType. 1083 _, params := p.parseParameters(false) 1084 results := p.parseResult() 1085 idents = []*ast.Ident{ident} 1086 typ = &ast.FuncType{ 1087 Func: token.NoPos, 1088 Params: params, 1089 Results: results, 1090 } 1091 } else { 1092 // embedded instantiated type 1093 // TODO(rfindley) should resolve all identifiers in x. 1094 list := []ast.Expr{x} 1095 if p.atComma("type argument list", token.RBRACK) { 1096 p.exprLev++ 1097 p.next() 1098 for p.tok != token.RBRACK && p.tok != token.EOF { 1099 list = append(list, p.parseType()) 1100 if !p.atComma("type argument list", token.RBRACK) { 1101 break 1102 } 1103 p.next() 1104 } 1105 p.exprLev-- 1106 } 1107 rbrack := p.expectClosing(token.RBRACK, "type argument list") 1108 typ = typeparams.PackIndexExpr(ident, lbrack, list, rbrack) 1109 } 1110 case p.tok == token.LPAREN: 1111 // ordinary method 1112 // TODO(rfindley) refactor to share code with parseFuncType. 1113 _, params := p.parseParameters(false) 1114 results := p.parseResult() 1115 idents = []*ast.Ident{ident} 1116 typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results} 1117 default: 1118 // embedded type 1119 typ = x 1120 } 1121 } else { 1122 // embedded, possibly instantiated type 1123 typ = x 1124 if p.tok == token.LBRACK { 1125 // embedded instantiated interface 1126 typ = p.parseTypeInstance(typ) 1127 } 1128 } 1129 1130 // Comment is added at the callsite: the field below may joined with 1131 // additional type specs using '|'. 1132 // TODO(rfindley) this should be refactored. 1133 // TODO(rfindley) add more tests for comment handling. 1134 return &ast.Field{Doc: doc, Names: idents, Type: typ} 1135 } 1136 1137 func (p *parser) embeddedElem(x ast.Expr) ast.Expr { 1138 if p.trace { 1139 defer un(trace(p, "EmbeddedElem")) 1140 } 1141 if x == nil { 1142 x = p.embeddedTerm() 1143 } 1144 for p.tok == token.OR { 1145 t := new(ast.BinaryExpr) 1146 t.OpPos = p.pos 1147 t.Op = token.OR 1148 p.next() 1149 t.X = x 1150 t.Y = p.embeddedTerm() 1151 x = t 1152 } 1153 return x 1154 } 1155 1156 func (p *parser) embeddedTerm() ast.Expr { 1157 if p.trace { 1158 defer un(trace(p, "EmbeddedTerm")) 1159 } 1160 if p.tok == token.TILDE { 1161 t := new(ast.UnaryExpr) 1162 t.OpPos = p.pos 1163 t.Op = token.TILDE 1164 p.next() 1165 t.X = p.parseType() 1166 return t 1167 } 1168 1169 t := p.tryIdentOrType() 1170 if t == nil { 1171 pos := p.pos 1172 p.errorExpected(pos, "~ term or type") 1173 p.advance(exprEnd) 1174 return &ast.BadExpr{From: pos, To: p.pos} 1175 } 1176 1177 return t 1178 } 1179 1180 func (p *parser) parseInterfaceType() *ast.InterfaceType { 1181 if p.trace { 1182 defer un(trace(p, "InterfaceType")) 1183 } 1184 1185 pos := p.expect(token.INTERFACE) 1186 lbrace := p.expect(token.LBRACE) 1187 1188 var list []*ast.Field 1189 1190 parseElements: 1191 for { 1192 switch { 1193 case p.tok == token.IDENT: 1194 f := p.parseMethodSpec() 1195 if f.Names == nil { 1196 f.Type = p.embeddedElem(f.Type) 1197 } 1198 f.Comment = p.expectSemi() 1199 list = append(list, f) 1200 case p.tok == token.TILDE: 1201 typ := p.embeddedElem(nil) 1202 comment := p.expectSemi() 1203 list = append(list, &ast.Field{Type: typ, Comment: comment}) 1204 default: 1205 if t := p.tryIdentOrType(); t != nil { 1206 typ := p.embeddedElem(t) 1207 comment := p.expectSemi() 1208 list = append(list, &ast.Field{Type: typ, Comment: comment}) 1209 } else { 1210 break parseElements 1211 } 1212 } 1213 } 1214 1215 // TODO(rfindley): the error produced here could be improved, since we could 1216 // accept a identifier, 'type', or a '}' at this point. 1217 rbrace := p.expect(token.RBRACE) 1218 1219 return &ast.InterfaceType{ 1220 Interface: pos, 1221 Methods: &ast.FieldList{ 1222 Opening: lbrace, 1223 List: list, 1224 Closing: rbrace, 1225 }, 1226 } 1227 } 1228 1229 func (p *parser) parseMapType() *ast.MapType { 1230 if p.trace { 1231 defer un(trace(p, "MapType")) 1232 } 1233 1234 pos := p.expect(token.MAP) 1235 p.expect(token.LBRACK) 1236 key := p.parseType() 1237 p.expect(token.RBRACK) 1238 value := p.parseType() 1239 1240 return &ast.MapType{Map: pos, Key: key, Value: value} 1241 } 1242 1243 func (p *parser) parseChanType() *ast.ChanType { 1244 if p.trace { 1245 defer un(trace(p, "ChanType")) 1246 } 1247 1248 pos := p.pos 1249 dir := ast.SEND | ast.RECV 1250 var arrow token.Pos 1251 if p.tok == token.CHAN { 1252 p.next() 1253 if p.tok == token.ARROW { 1254 arrow = p.pos 1255 p.next() 1256 dir = ast.SEND 1257 } 1258 } else { 1259 arrow = p.expect(token.ARROW) 1260 p.expect(token.CHAN) 1261 dir = ast.RECV 1262 } 1263 value := p.parseType() 1264 1265 return &ast.ChanType{Begin: pos, Arrow: arrow, Dir: dir, Value: value} 1266 } 1267 1268 func (p *parser) parseTypeInstance(typ ast.Expr) ast.Expr { 1269 if p.trace { 1270 defer un(trace(p, "TypeInstance")) 1271 } 1272 1273 opening := p.expect(token.LBRACK) 1274 p.exprLev++ 1275 var list []ast.Expr 1276 for p.tok != token.RBRACK && p.tok != token.EOF { 1277 list = append(list, p.parseType()) 1278 if !p.atComma("type argument list", token.RBRACK) { 1279 break 1280 } 1281 p.next() 1282 } 1283 p.exprLev-- 1284 1285 closing := p.expectClosing(token.RBRACK, "type argument list") 1286 1287 if len(list) == 0 { 1288 p.errorExpected(closing, "type argument list") 1289 return &ast.IndexExpr{ 1290 X: typ, 1291 Lbrack: opening, 1292 Index: &ast.BadExpr{From: opening + 1, To: closing}, 1293 Rbrack: closing, 1294 } 1295 } 1296 1297 return typeparams.PackIndexExpr(typ, opening, list, closing) 1298 } 1299 1300 func (p *parser) tryIdentOrType() ast.Expr { 1301 defer decNestLev(incNestLev(p)) 1302 1303 switch p.tok { 1304 case token.IDENT: 1305 typ := p.parseTypeName(nil) 1306 if p.tok == token.LBRACK { 1307 typ = p.parseTypeInstance(typ) 1308 } 1309 return typ 1310 case token.LBRACK: 1311 lbrack := p.expect(token.LBRACK) 1312 return p.parseArrayType(lbrack, nil) 1313 case token.STRUCT: 1314 return p.parseStructType() 1315 case token.MUL: 1316 return p.parsePointerType() 1317 case token.FUNC: 1318 return p.parseFuncType() 1319 case token.INTERFACE: 1320 return p.parseInterfaceType() 1321 case token.MAP: 1322 return p.parseMapType() 1323 case token.CHAN, token.ARROW: 1324 return p.parseChanType() 1325 case token.LPAREN: 1326 lparen := p.pos 1327 p.next() 1328 typ := p.parseType() 1329 rparen := p.expect(token.RPAREN) 1330 return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen} 1331 } 1332 1333 // no type found 1334 return nil 1335 } 1336 1337 // ---------------------------------------------------------------------------- 1338 // Blocks 1339 1340 func (p *parser) parseStmtList() (list []ast.Stmt) { 1341 if p.trace { 1342 defer un(trace(p, "StatementList")) 1343 } 1344 1345 for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF { 1346 list = append(list, p.parseStmt()) 1347 } 1348 1349 return 1350 } 1351 1352 func (p *parser) parseBody() *ast.BlockStmt { 1353 if p.trace { 1354 defer un(trace(p, "Body")) 1355 } 1356 1357 lbrace := p.expect(token.LBRACE) 1358 list := p.parseStmtList() 1359 rbrace := p.expect2(token.RBRACE) 1360 1361 return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace} 1362 } 1363 1364 func (p *parser) parseBlockStmt() *ast.BlockStmt { 1365 if p.trace { 1366 defer un(trace(p, "BlockStmt")) 1367 } 1368 1369 lbrace := p.expect(token.LBRACE) 1370 list := p.parseStmtList() 1371 rbrace := p.expect2(token.RBRACE) 1372 1373 return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace} 1374 } 1375 1376 // ---------------------------------------------------------------------------- 1377 // Expressions 1378 1379 func (p *parser) parseFuncTypeOrLit() ast.Expr { 1380 if p.trace { 1381 defer un(trace(p, "FuncTypeOrLit")) 1382 } 1383 1384 typ := p.parseFuncType() 1385 if p.tok != token.LBRACE { 1386 // function type only 1387 return typ 1388 } 1389 1390 p.exprLev++ 1391 body := p.parseBody() 1392 p.exprLev-- 1393 1394 return &ast.FuncLit{Type: typ, Body: body} 1395 } 1396 1397 // parseOperand may return an expression or a raw type (incl. array 1398 // types of the form [...]T). Callers must verify the result. 1399 func (p *parser) parseOperand() ast.Expr { 1400 if p.trace { 1401 defer un(trace(p, "Operand")) 1402 } 1403 1404 switch p.tok { 1405 case token.IDENT: 1406 x := p.parseIdent() 1407 return x 1408 1409 case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING: 1410 x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit} 1411 p.next() 1412 return x 1413 1414 case token.LPAREN: 1415 lparen := p.pos 1416 p.next() 1417 p.exprLev++ 1418 x := p.parseRhs() // types may be parenthesized: (some type) 1419 p.exprLev-- 1420 rparen := p.expect(token.RPAREN) 1421 return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen} 1422 1423 case token.FUNC: 1424 return p.parseFuncTypeOrLit() 1425 } 1426 1427 if typ := p.tryIdentOrType(); typ != nil { // do not consume trailing type parameters 1428 // could be type for composite literal or conversion 1429 _, isIdent := typ.(*ast.Ident) 1430 assert(!isIdent, "type cannot be identifier") 1431 return typ 1432 } 1433 1434 // we have an error 1435 pos := p.pos 1436 p.errorExpected(pos, "operand") 1437 p.advance(stmtStart) 1438 return &ast.BadExpr{From: pos, To: p.pos} 1439 } 1440 1441 func (p *parser) parseSelector(x ast.Expr) ast.Expr { 1442 if p.trace { 1443 defer un(trace(p, "Selector")) 1444 } 1445 1446 sel := p.parseIdent() 1447 1448 return &ast.SelectorExpr{X: x, Sel: sel} 1449 } 1450 1451 func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr { 1452 if p.trace { 1453 defer un(trace(p, "TypeAssertion")) 1454 } 1455 1456 lparen := p.expect(token.LPAREN) 1457 var typ ast.Expr 1458 if p.tok == token.TYPE { 1459 // type switch: typ == nil 1460 p.next() 1461 } else { 1462 typ = p.parseType() 1463 } 1464 rparen := p.expect(token.RPAREN) 1465 1466 return &ast.TypeAssertExpr{X: x, Type: typ, Lparen: lparen, Rparen: rparen} 1467 } 1468 1469 func (p *parser) parseIndexOrSliceOrInstance(x ast.Expr) ast.Expr { 1470 if p.trace { 1471 defer un(trace(p, "parseIndexOrSliceOrInstance")) 1472 } 1473 1474 lbrack := p.expect(token.LBRACK) 1475 if p.tok == token.RBRACK { 1476 // empty index, slice or index expressions are not permitted; 1477 // accept them for parsing tolerance, but complain 1478 p.errorExpected(p.pos, "operand") 1479 rbrack := p.pos 1480 p.next() 1481 return &ast.IndexExpr{ 1482 X: x, 1483 Lbrack: lbrack, 1484 Index: &ast.BadExpr{From: rbrack, To: rbrack}, 1485 Rbrack: rbrack, 1486 } 1487 } 1488 p.exprLev++ 1489 1490 const N = 3 // change the 3 to 2 to disable 3-index slices 1491 var args []ast.Expr 1492 var index [N]ast.Expr 1493 var colons [N - 1]token.Pos 1494 if p.tok != token.COLON { 1495 // We can't know if we have an index expression or a type instantiation; 1496 // so even if we see a (named) type we are not going to be in type context. 1497 index[0] = p.parseRhs() 1498 } 1499 ncolons := 0 1500 switch p.tok { 1501 case token.COLON: 1502 // slice expression 1503 for p.tok == token.COLON && ncolons < len(colons) { 1504 colons[ncolons] = p.pos 1505 ncolons++ 1506 p.next() 1507 if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF { 1508 index[ncolons] = p.parseRhs() 1509 } 1510 } 1511 case token.COMMA: 1512 // instance expression 1513 args = append(args, index[0]) 1514 for p.tok == token.COMMA { 1515 p.next() 1516 if p.tok != token.RBRACK && p.tok != token.EOF { 1517 args = append(args, p.parseType()) 1518 } 1519 } 1520 } 1521 1522 p.exprLev-- 1523 rbrack := p.expect(token.RBRACK) 1524 1525 if ncolons > 0 { 1526 // slice expression 1527 slice3 := false 1528 if ncolons == 2 { 1529 slice3 = true 1530 // Check presence of middle and final index here rather than during type-checking 1531 // to prevent erroneous programs from passing through gofmt (was issue 7305). 1532 if index[1] == nil { 1533 p.error(colons[0], "middle index required in 3-index slice") 1534 index[1] = &ast.BadExpr{From: colons[0] + 1, To: colons[1]} 1535 } 1536 if index[2] == nil { 1537 p.error(colons[1], "final index required in 3-index slice") 1538 index[2] = &ast.BadExpr{From: colons[1] + 1, To: rbrack} 1539 } 1540 } 1541 return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: index[0], High: index[1], Max: index[2], Slice3: slice3, Rbrack: rbrack} 1542 } 1543 1544 if len(args) == 0 { 1545 // index expression 1546 return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: index[0], Rbrack: rbrack} 1547 } 1548 1549 // instance expression 1550 return typeparams.PackIndexExpr(x, lbrack, args, rbrack) 1551 } 1552 1553 func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr { 1554 if p.trace { 1555 defer un(trace(p, "CallOrConversion")) 1556 } 1557 1558 lparen := p.expect(token.LPAREN) 1559 p.exprLev++ 1560 var list []ast.Expr 1561 var ellipsis token.Pos 1562 for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() { 1563 list = append(list, p.parseRhs()) // builtins may expect a type: make(some type, ...) 1564 if p.tok == token.ELLIPSIS { 1565 ellipsis = p.pos 1566 p.next() 1567 } 1568 if !p.atComma("argument list", token.RPAREN) { 1569 break 1570 } 1571 p.next() 1572 } 1573 p.exprLev-- 1574 rparen := p.expectClosing(token.RPAREN, "argument list") 1575 1576 return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen} 1577 } 1578 1579 func (p *parser) parseValue() ast.Expr { 1580 if p.trace { 1581 defer un(trace(p, "Element")) 1582 } 1583 1584 if p.tok == token.LBRACE { 1585 return p.parseLiteralValue(nil) 1586 } 1587 1588 x := p.parseExpr() 1589 1590 return x 1591 } 1592 1593 func (p *parser) parseElement() ast.Expr { 1594 if p.trace { 1595 defer un(trace(p, "Element")) 1596 } 1597 1598 x := p.parseValue() 1599 if p.tok == token.COLON { 1600 colon := p.pos 1601 p.next() 1602 x = &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseValue()} 1603 } 1604 1605 return x 1606 } 1607 1608 func (p *parser) parseElementList() (list []ast.Expr) { 1609 if p.trace { 1610 defer un(trace(p, "ElementList")) 1611 } 1612 1613 for p.tok != token.RBRACE && p.tok != token.EOF { 1614 list = append(list, p.parseElement()) 1615 if !p.atComma("composite literal", token.RBRACE) { 1616 break 1617 } 1618 p.next() 1619 } 1620 1621 return 1622 } 1623 1624 func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr { 1625 if p.trace { 1626 defer un(trace(p, "LiteralValue")) 1627 } 1628 1629 lbrace := p.expect(token.LBRACE) 1630 var elts []ast.Expr 1631 p.exprLev++ 1632 if p.tok != token.RBRACE { 1633 elts = p.parseElementList() 1634 } 1635 p.exprLev-- 1636 rbrace := p.expectClosing(token.RBRACE, "composite literal") 1637 return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace} 1638 } 1639 1640 // If x is of the form (T), unparen returns unparen(T), otherwise it returns x. 1641 func unparen(x ast.Expr) ast.Expr { 1642 if p, isParen := x.(*ast.ParenExpr); isParen { 1643 x = unparen(p.X) 1644 } 1645 return x 1646 } 1647 1648 func (p *parser) parsePrimaryExpr(x ast.Expr) ast.Expr { 1649 if p.trace { 1650 defer un(trace(p, "PrimaryExpr")) 1651 } 1652 1653 if x == nil { 1654 x = p.parseOperand() 1655 } 1656 // We track the nesting here rather than at the entry for the function, 1657 // since it can iteratively produce a nested output, and we want to 1658 // limit how deep a structure we generate. 1659 var n int 1660 defer func() { p.nestLev -= n }() 1661 for n = 1; ; n++ { 1662 incNestLev(p) 1663 switch p.tok { 1664 case token.PERIOD: 1665 p.next() 1666 switch p.tok { 1667 case token.IDENT: 1668 x = p.parseSelector(x) 1669 case token.LPAREN: 1670 x = p.parseTypeAssertion(x) 1671 default: 1672 pos := p.pos 1673 p.errorExpected(pos, "selector or type assertion") 1674 // TODO(rFindley) The check for token.RBRACE below is a targeted fix 1675 // to error recovery sufficient to make the x/tools tests to 1676 // pass with the new parsing logic introduced for type 1677 // parameters. Remove this once error recovery has been 1678 // more generally reconsidered. 1679 if p.tok != token.RBRACE { 1680 p.next() // make progress 1681 } 1682 sel := &ast.Ident{NamePos: pos, Name: "_"} 1683 x = &ast.SelectorExpr{X: x, Sel: sel} 1684 } 1685 case token.LBRACK: 1686 x = p.parseIndexOrSliceOrInstance(x) 1687 case token.LPAREN: 1688 x = p.parseCallOrConversion(x) 1689 case token.LBRACE: 1690 // operand may have returned a parenthesized complit 1691 // type; accept it but complain if we have a complit 1692 t := unparen(x) 1693 // determine if '{' belongs to a composite literal or a block statement 1694 switch t.(type) { 1695 case *ast.BadExpr, *ast.Ident, *ast.SelectorExpr: 1696 if p.exprLev < 0 { 1697 return x 1698 } 1699 // x is possibly a composite literal type 1700 case *ast.IndexExpr, *ast.IndexListExpr: 1701 if p.exprLev < 0 { 1702 return x 1703 } 1704 // x is possibly a composite literal type 1705 case *ast.ArrayType, *ast.StructType, *ast.MapType: 1706 // x is a composite literal type 1707 default: 1708 return x 1709 } 1710 if t != x { 1711 p.error(t.Pos(), "cannot parenthesize type in composite literal") 1712 // already progressed, no need to advance 1713 } 1714 x = p.parseLiteralValue(x) 1715 default: 1716 return x 1717 } 1718 } 1719 } 1720 1721 func (p *parser) parseUnaryExpr() ast.Expr { 1722 defer decNestLev(incNestLev(p)) 1723 1724 if p.trace { 1725 defer un(trace(p, "UnaryExpr")) 1726 } 1727 1728 switch p.tok { 1729 case token.ADD, token.SUB, token.NOT, token.XOR, token.AND, token.TILDE: 1730 pos, op := p.pos, p.tok 1731 p.next() 1732 x := p.parseUnaryExpr() 1733 return &ast.UnaryExpr{OpPos: pos, Op: op, X: x} 1734 1735 case token.ARROW: 1736 // channel type or receive expression 1737 arrow := p.pos 1738 p.next() 1739 1740 // If the next token is token.CHAN we still don't know if it 1741 // is a channel type or a receive operation - we only know 1742 // once we have found the end of the unary expression. There 1743 // are two cases: 1744 // 1745 // <- type => (<-type) must be channel type 1746 // <- expr => <-(expr) is a receive from an expression 1747 // 1748 // In the first case, the arrow must be re-associated with 1749 // the channel type parsed already: 1750 // 1751 // <- (chan type) => (<-chan type) 1752 // <- (chan<- type) => (<-chan (<-type)) 1753 1754 x := p.parseUnaryExpr() 1755 1756 // determine which case we have 1757 if typ, ok := x.(*ast.ChanType); ok { 1758 // (<-type) 1759 1760 // re-associate position info and <- 1761 dir := ast.SEND 1762 for ok && dir == ast.SEND { 1763 if typ.Dir == ast.RECV { 1764 // error: (<-type) is (<-(<-chan T)) 1765 p.errorExpected(typ.Arrow, "'chan'") 1766 } 1767 arrow, typ.Begin, typ.Arrow = typ.Arrow, arrow, arrow 1768 dir, typ.Dir = typ.Dir, ast.RECV 1769 typ, ok = typ.Value.(*ast.ChanType) 1770 } 1771 if dir == ast.SEND { 1772 p.errorExpected(arrow, "channel type") 1773 } 1774 1775 return x 1776 } 1777 1778 // <-(expr) 1779 return &ast.UnaryExpr{OpPos: arrow, Op: token.ARROW, X: x} 1780 1781 case token.MUL: 1782 // pointer type or unary "*" expression 1783 pos := p.pos 1784 p.next() 1785 x := p.parseUnaryExpr() 1786 return &ast.StarExpr{Star: pos, X: x} 1787 } 1788 1789 return p.parsePrimaryExpr(nil) 1790 } 1791 1792 func (p *parser) tokPrec() (token.Token, int) { 1793 tok := p.tok 1794 if p.inRhs && tok == token.ASSIGN { 1795 tok = token.EQL 1796 } 1797 return tok, tok.Precedence() 1798 } 1799 1800 // parseBinaryExpr parses a (possibly) binary expression. 1801 // If x is non-nil, it is used as the left operand. 1802 // 1803 // TODO(rfindley): parseBinaryExpr has become overloaded. Consider refactoring. 1804 func (p *parser) parseBinaryExpr(x ast.Expr, prec1 int) ast.Expr { 1805 if p.trace { 1806 defer un(trace(p, "BinaryExpr")) 1807 } 1808 1809 if x == nil { 1810 x = p.parseUnaryExpr() 1811 } 1812 // We track the nesting here rather than at the entry for the function, 1813 // since it can iteratively produce a nested output, and we want to 1814 // limit how deep a structure we generate. 1815 var n int 1816 defer func() { p.nestLev -= n }() 1817 for n = 1; ; n++ { 1818 incNestLev(p) 1819 op, oprec := p.tokPrec() 1820 if oprec < prec1 { 1821 return x 1822 } 1823 pos := p.expect(op) 1824 y := p.parseBinaryExpr(nil, oprec+1) 1825 x = &ast.BinaryExpr{X: x, OpPos: pos, Op: op, Y: y} 1826 } 1827 } 1828 1829 // The result may be a type or even a raw type ([...]int). 1830 func (p *parser) parseExpr() ast.Expr { 1831 if p.trace { 1832 defer un(trace(p, "Expression")) 1833 } 1834 1835 return p.parseBinaryExpr(nil, token.LowestPrec+1) 1836 } 1837 1838 func (p *parser) parseRhs() ast.Expr { 1839 old := p.inRhs 1840 p.inRhs = true 1841 x := p.parseExpr() 1842 p.inRhs = old 1843 return x 1844 } 1845 1846 // ---------------------------------------------------------------------------- 1847 // Statements 1848 1849 // Parsing modes for parseSimpleStmt. 1850 const ( 1851 basic = iota 1852 labelOk 1853 rangeOk 1854 ) 1855 1856 // parseSimpleStmt returns true as 2nd result if it parsed the assignment 1857 // of a range clause (with mode == rangeOk). The returned statement is an 1858 // assignment with a right-hand side that is a single unary expression of 1859 // the form "range x". No guarantees are given for the left-hand side. 1860 func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) { 1861 if p.trace { 1862 defer un(trace(p, "SimpleStmt")) 1863 } 1864 1865 x := p.parseList(false) 1866 1867 switch p.tok { 1868 case 1869 token.DEFINE, token.ASSIGN, token.ADD_ASSIGN, 1870 token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN, 1871 token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN, 1872 token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN: 1873 // assignment statement, possibly part of a range clause 1874 pos, tok := p.pos, p.tok 1875 p.next() 1876 var y []ast.Expr 1877 isRange := false 1878 if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) { 1879 pos := p.pos 1880 p.next() 1881 y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}} 1882 isRange = true 1883 } else { 1884 y = p.parseList(true) 1885 } 1886 return &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}, isRange 1887 } 1888 1889 if len(x) > 1 { 1890 p.errorExpected(x[0].Pos(), "1 expression") 1891 // continue with first expression 1892 } 1893 1894 switch p.tok { 1895 case token.COLON: 1896 // labeled statement 1897 colon := p.pos 1898 p.next() 1899 if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent { 1900 // Go spec: The scope of a label is the body of the function 1901 // in which it is declared and excludes the body of any nested 1902 // function. 1903 stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()} 1904 return stmt, false 1905 } 1906 // The label declaration typically starts at x[0].Pos(), but the label 1907 // declaration may be erroneous due to a token after that position (and 1908 // before the ':'). If SpuriousErrors is not set, the (only) error 1909 // reported for the line is the illegal label error instead of the token 1910 // before the ':' that caused the problem. Thus, use the (latest) colon 1911 // position for error reporting. 1912 p.error(colon, "illegal label declaration") 1913 return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false 1914 1915 case token.ARROW: 1916 // send statement 1917 arrow := p.pos 1918 p.next() 1919 y := p.parseRhs() 1920 return &ast.SendStmt{Chan: x[0], Arrow: arrow, Value: y}, false 1921 1922 case token.INC, token.DEC: 1923 // increment or decrement 1924 s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok} 1925 p.next() 1926 return s, false 1927 } 1928 1929 // expression 1930 return &ast.ExprStmt{X: x[0]}, false 1931 } 1932 1933 func (p *parser) parseCallExpr(callType string) *ast.CallExpr { 1934 x := p.parseRhs() // could be a conversion: (some type)(x) 1935 if t := unparen(x); t != x { 1936 p.error(x.Pos(), fmt.Sprintf("expression in %s must not be parenthesized", callType)) 1937 x = t 1938 } 1939 if call, isCall := x.(*ast.CallExpr); isCall { 1940 return call 1941 } 1942 if _, isBad := x.(*ast.BadExpr); !isBad { 1943 // only report error if it's a new one 1944 p.error(p.safePos(x.End()), fmt.Sprintf("expression in %s must be function call", callType)) 1945 } 1946 return nil 1947 } 1948 1949 func (p *parser) parseGoStmt() ast.Stmt { 1950 if p.trace { 1951 defer un(trace(p, "GoStmt")) 1952 } 1953 1954 pos := p.expect(token.GO) 1955 call := p.parseCallExpr("go") 1956 p.expectSemi() 1957 if call == nil { 1958 return &ast.BadStmt{From: pos, To: pos + 2} // len("go") 1959 } 1960 1961 return &ast.GoStmt{Go: pos, Call: call} 1962 } 1963 1964 func (p *parser) parseDeferStmt() ast.Stmt { 1965 if p.trace { 1966 defer un(trace(p, "DeferStmt")) 1967 } 1968 1969 pos := p.expect(token.DEFER) 1970 call := p.parseCallExpr("defer") 1971 p.expectSemi() 1972 if call == nil { 1973 return &ast.BadStmt{From: pos, To: pos + 5} // len("defer") 1974 } 1975 1976 return &ast.DeferStmt{Defer: pos, Call: call} 1977 } 1978 1979 func (p *parser) parseReturnStmt() *ast.ReturnStmt { 1980 if p.trace { 1981 defer un(trace(p, "ReturnStmt")) 1982 } 1983 1984 pos := p.pos 1985 p.expect(token.RETURN) 1986 var x []ast.Expr 1987 if p.tok != token.SEMICOLON && p.tok != token.RBRACE { 1988 x = p.parseList(true) 1989 } 1990 p.expectSemi() 1991 1992 return &ast.ReturnStmt{Return: pos, Results: x} 1993 } 1994 1995 func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt { 1996 if p.trace { 1997 defer un(trace(p, "BranchStmt")) 1998 } 1999 2000 pos := p.expect(tok) 2001 var label *ast.Ident 2002 if tok != token.FALLTHROUGH && p.tok == token.IDENT { 2003 label = p.parseIdent() 2004 } 2005 p.expectSemi() 2006 2007 return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label} 2008 } 2009 2010 func (p *parser) makeExpr(s ast.Stmt, want string) ast.Expr { 2011 if s == nil { 2012 return nil 2013 } 2014 if es, isExpr := s.(*ast.ExprStmt); isExpr { 2015 return es.X 2016 } 2017 found := "simple statement" 2018 if _, isAss := s.(*ast.AssignStmt); isAss { 2019 found = "assignment" 2020 } 2021 p.error(s.Pos(), fmt.Sprintf("expected %s, found %s (missing parentheses around composite literal?)", want, found)) 2022 return &ast.BadExpr{From: s.Pos(), To: p.safePos(s.End())} 2023 } 2024 2025 // parseIfHeader is an adjusted version of parser.header 2026 // in cmd/compile/internal/syntax/parser.go, which has 2027 // been tuned for better error handling. 2028 func (p *parser) parseIfHeader() (init ast.Stmt, cond ast.Expr) { 2029 if p.tok == token.LBRACE { 2030 p.error(p.pos, "missing condition in if statement") 2031 cond = &ast.BadExpr{From: p.pos, To: p.pos} 2032 return 2033 } 2034 // p.tok != token.LBRACE 2035 2036 prevLev := p.exprLev 2037 p.exprLev = -1 2038 2039 if p.tok != token.SEMICOLON { 2040 // accept potential variable declaration but complain 2041 if p.tok == token.VAR { 2042 p.next() 2043 p.error(p.pos, "var declaration not allowed in if initializer") 2044 } 2045 init, _ = p.parseSimpleStmt(basic) 2046 } 2047 2048 var condStmt ast.Stmt 2049 var semi struct { 2050 pos token.Pos 2051 lit string // ";" or "\n"; valid if pos.IsValid() 2052 } 2053 if p.tok != token.LBRACE { 2054 if p.tok == token.SEMICOLON { 2055 semi.pos = p.pos 2056 semi.lit = p.lit 2057 p.next() 2058 } else { 2059 p.expect(token.SEMICOLON) 2060 } 2061 if p.tok != token.LBRACE { 2062 condStmt, _ = p.parseSimpleStmt(basic) 2063 } 2064 } else { 2065 condStmt = init 2066 init = nil 2067 } 2068 2069 if condStmt != nil { 2070 cond = p.makeExpr(condStmt, "boolean expression") 2071 } else if semi.pos.IsValid() { 2072 if semi.lit == "\n" { 2073 p.error(semi.pos, "unexpected newline, expecting { after if clause") 2074 } else { 2075 p.error(semi.pos, "missing condition in if statement") 2076 } 2077 } 2078 2079 // make sure we have a valid AST 2080 if cond == nil { 2081 cond = &ast.BadExpr{From: p.pos, To: p.pos} 2082 } 2083 2084 p.exprLev = prevLev 2085 return 2086 } 2087 2088 func (p *parser) parseIfStmt() *ast.IfStmt { 2089 defer decNestLev(incNestLev(p)) 2090 2091 if p.trace { 2092 defer un(trace(p, "IfStmt")) 2093 } 2094 2095 pos := p.expect(token.IF) 2096 2097 init, cond := p.parseIfHeader() 2098 body := p.parseBlockStmt() 2099 2100 var else_ ast.Stmt 2101 if p.tok == token.ELSE { 2102 p.next() 2103 switch p.tok { 2104 case token.IF: 2105 else_ = p.parseIfStmt() 2106 case token.LBRACE: 2107 else_ = p.parseBlockStmt() 2108 p.expectSemi() 2109 default: 2110 p.errorExpected(p.pos, "if statement or block") 2111 else_ = &ast.BadStmt{From: p.pos, To: p.pos} 2112 } 2113 } else { 2114 p.expectSemi() 2115 } 2116 2117 return &ast.IfStmt{If: pos, Init: init, Cond: cond, Body: body, Else: else_} 2118 } 2119 2120 func (p *parser) parseCaseClause() *ast.CaseClause { 2121 if p.trace { 2122 defer un(trace(p, "CaseClause")) 2123 } 2124 2125 pos := p.pos 2126 var list []ast.Expr 2127 if p.tok == token.CASE { 2128 p.next() 2129 list = p.parseList(true) 2130 } else { 2131 p.expect(token.DEFAULT) 2132 } 2133 2134 colon := p.expect(token.COLON) 2135 body := p.parseStmtList() 2136 2137 return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body} 2138 } 2139 2140 func isTypeSwitchAssert(x ast.Expr) bool { 2141 a, ok := x.(*ast.TypeAssertExpr) 2142 return ok && a.Type == nil 2143 } 2144 2145 func (p *parser) isTypeSwitchGuard(s ast.Stmt) bool { 2146 switch t := s.(type) { 2147 case *ast.ExprStmt: 2148 // x.(type) 2149 return isTypeSwitchAssert(t.X) 2150 case *ast.AssignStmt: 2151 // v := x.(type) 2152 if len(t.Lhs) == 1 && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0]) { 2153 switch t.Tok { 2154 case token.ASSIGN: 2155 // permit v = x.(type) but complain 2156 p.error(t.TokPos, "expected ':=', found '='") 2157 fallthrough 2158 case token.DEFINE: 2159 return true 2160 } 2161 } 2162 } 2163 return false 2164 } 2165 2166 func (p *parser) parseSwitchStmt() ast.Stmt { 2167 if p.trace { 2168 defer un(trace(p, "SwitchStmt")) 2169 } 2170 2171 pos := p.expect(token.SWITCH) 2172 2173 var s1, s2 ast.Stmt 2174 if p.tok != token.LBRACE { 2175 prevLev := p.exprLev 2176 p.exprLev = -1 2177 if p.tok != token.SEMICOLON { 2178 s2, _ = p.parseSimpleStmt(basic) 2179 } 2180 if p.tok == token.SEMICOLON { 2181 p.next() 2182 s1 = s2 2183 s2 = nil 2184 if p.tok != token.LBRACE { 2185 // A TypeSwitchGuard may declare a variable in addition 2186 // to the variable declared in the initial SimpleStmt. 2187 // Introduce extra scope to avoid redeclaration errors: 2188 // 2189 // switch t := 0; t := x.(T) { ... } 2190 // 2191 // (this code is not valid Go because the first t 2192 // cannot be accessed and thus is never used, the extra 2193 // scope is needed for the correct error message). 2194 // 2195 // If we don't have a type switch, s2 must be an expression. 2196 // Having the extra nested but empty scope won't affect it. 2197 s2, _ = p.parseSimpleStmt(basic) 2198 } 2199 } 2200 p.exprLev = prevLev 2201 } 2202 2203 typeSwitch := p.isTypeSwitchGuard(s2) 2204 lbrace := p.expect(token.LBRACE) 2205 var list []ast.Stmt 2206 for p.tok == token.CASE || p.tok == token.DEFAULT { 2207 list = append(list, p.parseCaseClause()) 2208 } 2209 rbrace := p.expect(token.RBRACE) 2210 p.expectSemi() 2211 body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace} 2212 2213 if typeSwitch { 2214 return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body} 2215 } 2216 2217 return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2, "switch expression"), Body: body} 2218 } 2219 2220 func (p *parser) parseCommClause() *ast.CommClause { 2221 if p.trace { 2222 defer un(trace(p, "CommClause")) 2223 } 2224 2225 pos := p.pos 2226 var comm ast.Stmt 2227 if p.tok == token.CASE { 2228 p.next() 2229 lhs := p.parseList(false) 2230 if p.tok == token.ARROW { 2231 // SendStmt 2232 if len(lhs) > 1 { 2233 p.errorExpected(lhs[0].Pos(), "1 expression") 2234 // continue with first expression 2235 } 2236 arrow := p.pos 2237 p.next() 2238 rhs := p.parseRhs() 2239 comm = &ast.SendStmt{Chan: lhs[0], Arrow: arrow, Value: rhs} 2240 } else { 2241 // RecvStmt 2242 if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE { 2243 // RecvStmt with assignment 2244 if len(lhs) > 2 { 2245 p.errorExpected(lhs[0].Pos(), "1 or 2 expressions") 2246 // continue with first two expressions 2247 lhs = lhs[0:2] 2248 } 2249 pos := p.pos 2250 p.next() 2251 rhs := p.parseRhs() 2252 comm = &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}} 2253 } else { 2254 // lhs must be single receive operation 2255 if len(lhs) > 1 { 2256 p.errorExpected(lhs[0].Pos(), "1 expression") 2257 // continue with first expression 2258 } 2259 comm = &ast.ExprStmt{X: lhs[0]} 2260 } 2261 } 2262 } else { 2263 p.expect(token.DEFAULT) 2264 } 2265 2266 colon := p.expect(token.COLON) 2267 body := p.parseStmtList() 2268 2269 return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body} 2270 } 2271 2272 func (p *parser) parseSelectStmt() *ast.SelectStmt { 2273 if p.trace { 2274 defer un(trace(p, "SelectStmt")) 2275 } 2276 2277 pos := p.expect(token.SELECT) 2278 lbrace := p.expect(token.LBRACE) 2279 var list []ast.Stmt 2280 for p.tok == token.CASE || p.tok == token.DEFAULT { 2281 list = append(list, p.parseCommClause()) 2282 } 2283 rbrace := p.expect(token.RBRACE) 2284 p.expectSemi() 2285 body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace} 2286 2287 return &ast.SelectStmt{Select: pos, Body: body} 2288 } 2289 2290 func (p *parser) parseForStmt() ast.Stmt { 2291 if p.trace { 2292 defer un(trace(p, "ForStmt")) 2293 } 2294 2295 pos := p.expect(token.FOR) 2296 2297 var s1, s2, s3 ast.Stmt 2298 var isRange bool 2299 if p.tok != token.LBRACE { 2300 prevLev := p.exprLev 2301 p.exprLev = -1 2302 if p.tok != token.SEMICOLON { 2303 if p.tok == token.RANGE { 2304 // "for range x" (nil lhs in assignment) 2305 pos := p.pos 2306 p.next() 2307 y := []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}} 2308 s2 = &ast.AssignStmt{Rhs: y} 2309 isRange = true 2310 } else { 2311 s2, isRange = p.parseSimpleStmt(rangeOk) 2312 } 2313 } 2314 if !isRange && p.tok == token.SEMICOLON { 2315 p.next() 2316 s1 = s2 2317 s2 = nil 2318 if p.tok != token.SEMICOLON { 2319 s2, _ = p.parseSimpleStmt(basic) 2320 } 2321 p.expectSemi() 2322 if p.tok != token.LBRACE { 2323 s3, _ = p.parseSimpleStmt(basic) 2324 } 2325 } 2326 p.exprLev = prevLev 2327 } 2328 2329 body := p.parseBlockStmt() 2330 p.expectSemi() 2331 2332 if isRange { 2333 as := s2.(*ast.AssignStmt) 2334 // check lhs 2335 var key, value ast.Expr 2336 switch len(as.Lhs) { 2337 case 0: 2338 // nothing to do 2339 case 1: 2340 key = as.Lhs[0] 2341 case 2: 2342 key, value = as.Lhs[0], as.Lhs[1] 2343 default: 2344 p.errorExpected(as.Lhs[len(as.Lhs)-1].Pos(), "at most 2 expressions") 2345 return &ast.BadStmt{From: pos, To: p.safePos(body.End())} 2346 } 2347 // parseSimpleStmt returned a right-hand side that 2348 // is a single unary expression of the form "range x" 2349 x := as.Rhs[0].(*ast.UnaryExpr).X 2350 return &ast.RangeStmt{ 2351 For: pos, 2352 Key: key, 2353 Value: value, 2354 TokPos: as.TokPos, 2355 Tok: as.Tok, 2356 Range: as.Rhs[0].Pos(), 2357 X: x, 2358 Body: body, 2359 } 2360 } 2361 2362 // regular for statement 2363 return &ast.ForStmt{ 2364 For: pos, 2365 Init: s1, 2366 Cond: p.makeExpr(s2, "boolean or range expression"), 2367 Post: s3, 2368 Body: body, 2369 } 2370 } 2371 2372 func (p *parser) parseStmt() (s ast.Stmt) { 2373 defer decNestLev(incNestLev(p)) 2374 2375 if p.trace { 2376 defer un(trace(p, "Statement")) 2377 } 2378 2379 switch p.tok { 2380 case token.CONST, token.TYPE, token.VAR: 2381 s = &ast.DeclStmt{Decl: p.parseDecl(stmtStart)} 2382 case 2383 // tokens that may start an expression 2384 token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands 2385 token.LBRACK, token.STRUCT, token.MAP, token.CHAN, token.INTERFACE, // composite types 2386 token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators 2387 s, _ = p.parseSimpleStmt(labelOk) 2388 // because of the required look-ahead, labeled statements are 2389 // parsed by parseSimpleStmt - don't expect a semicolon after 2390 // them 2391 if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt { 2392 p.expectSemi() 2393 } 2394 case token.GO: 2395 s = p.parseGoStmt() 2396 case token.DEFER: 2397 s = p.parseDeferStmt() 2398 case token.RETURN: 2399 s = p.parseReturnStmt() 2400 case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH: 2401 s = p.parseBranchStmt(p.tok) 2402 case token.LBRACE: 2403 s = p.parseBlockStmt() 2404 p.expectSemi() 2405 case token.IF: 2406 s = p.parseIfStmt() 2407 case token.SWITCH: 2408 s = p.parseSwitchStmt() 2409 case token.SELECT: 2410 s = p.parseSelectStmt() 2411 case token.FOR: 2412 s = p.parseForStmt() 2413 case token.SEMICOLON: 2414 // Is it ever possible to have an implicit semicolon 2415 // producing an empty statement in a valid program? 2416 // (handle correctly anyway) 2417 s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: p.lit == "\n"} 2418 p.next() 2419 case token.RBRACE: 2420 // a semicolon may be omitted before a closing "}" 2421 s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: true} 2422 default: 2423 // no statement found 2424 pos := p.pos 2425 p.errorExpected(pos, "statement") 2426 p.advance(stmtStart) 2427 s = &ast.BadStmt{From: pos, To: p.pos} 2428 } 2429 2430 return 2431 } 2432 2433 // ---------------------------------------------------------------------------- 2434 // Declarations 2435 2436 type parseSpecFunction func(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec 2437 2438 func (p *parser) parseImportSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec { 2439 if p.trace { 2440 defer un(trace(p, "ImportSpec")) 2441 } 2442 2443 var ident *ast.Ident 2444 switch p.tok { 2445 case token.IDENT: 2446 ident = p.parseIdent() 2447 case token.PERIOD: 2448 ident = &ast.Ident{NamePos: p.pos, Name: "."} 2449 p.next() 2450 } 2451 2452 pos := p.pos 2453 var path string 2454 if p.tok == token.STRING { 2455 path = p.lit 2456 p.next() 2457 } else if p.tok.IsLiteral() { 2458 p.error(pos, "import path must be a string") 2459 p.next() 2460 } else { 2461 p.error(pos, "missing import path") 2462 p.advance(exprEnd) 2463 } 2464 comment := p.expectSemi() 2465 2466 // collect imports 2467 spec := &ast.ImportSpec{ 2468 Doc: doc, 2469 Name: ident, 2470 Path: &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path}, 2471 Comment: comment, 2472 } 2473 p.imports = append(p.imports, spec) 2474 2475 return spec 2476 } 2477 2478 func (p *parser) parseValueSpec(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec { 2479 if p.trace { 2480 defer un(trace(p, keyword.String()+"Spec")) 2481 } 2482 2483 idents := p.parseIdentList() 2484 var typ ast.Expr 2485 var values []ast.Expr 2486 switch keyword { 2487 case token.CONST: 2488 // always permit optional type and initialization for more tolerant parsing 2489 if p.tok != token.EOF && p.tok != token.SEMICOLON && p.tok != token.RPAREN { 2490 typ = p.tryIdentOrType() 2491 if p.tok == token.ASSIGN { 2492 p.next() 2493 values = p.parseList(true) 2494 } 2495 } 2496 case token.VAR: 2497 if p.tok != token.ASSIGN { 2498 typ = p.parseType() 2499 } 2500 if p.tok == token.ASSIGN { 2501 p.next() 2502 values = p.parseList(true) 2503 } 2504 default: 2505 panic("unreachable") 2506 } 2507 comment := p.expectSemi() 2508 2509 spec := &ast.ValueSpec{ 2510 Doc: doc, 2511 Names: idents, 2512 Type: typ, 2513 Values: values, 2514 Comment: comment, 2515 } 2516 return spec 2517 } 2518 2519 func (p *parser) parseGenericType(spec *ast.TypeSpec, openPos token.Pos, name0 *ast.Ident, typ0 ast.Expr) { 2520 if p.trace { 2521 defer un(trace(p, "parseGenericType")) 2522 } 2523 2524 list := p.parseParameterList(name0, typ0, token.RBRACK) 2525 closePos := p.expect(token.RBRACK) 2526 spec.TypeParams = &ast.FieldList{Opening: openPos, List: list, Closing: closePos} 2527 // Let the type checker decide whether to accept type parameters on aliases: 2528 // see issue #46477. 2529 if p.tok == token.ASSIGN { 2530 // type alias 2531 spec.Assign = p.pos 2532 p.next() 2533 } 2534 spec.Type = p.parseType() 2535 } 2536 2537 func (p *parser) parseTypeSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec { 2538 if p.trace { 2539 defer un(trace(p, "TypeSpec")) 2540 } 2541 2542 name := p.parseIdent() 2543 spec := &ast.TypeSpec{Doc: doc, Name: name} 2544 2545 if p.tok == token.LBRACK { 2546 // spec.Name "[" ... 2547 // array/slice type or type parameter list 2548 lbrack := p.pos 2549 p.next() 2550 if p.tok == token.IDENT { 2551 // We may have an array type or a type parameter list. 2552 // In either case we expect an expression x (which may 2553 // just be a name, or a more complex expression) which 2554 // we can analyze further. 2555 // 2556 // A type parameter list may have a type bound starting 2557 // with a "[" as in: P []E. In that case, simply parsing 2558 // an expression would lead to an error: P[] is invalid. 2559 // But since index or slice expressions are never constant 2560 // and thus invalid array length expressions, if the name 2561 // is followed by "[" it must be the start of an array or 2562 // slice constraint. Only if we don't see a "[" do we 2563 // need to parse a full expression. Notably, name <- x 2564 // is not a concern because name <- x is a statement and 2565 // not an expression. 2566 var x ast.Expr = p.parseIdent() 2567 if p.tok != token.LBRACK { 2568 // To parse the expression starting with name, expand 2569 // the call sequence we would get by passing in name 2570 // to parser.expr, and pass in name to parsePrimaryExpr. 2571 p.exprLev++ 2572 lhs := p.parsePrimaryExpr(x) 2573 x = p.parseBinaryExpr(lhs, token.LowestPrec+1) 2574 p.exprLev-- 2575 } 2576 // Analyze expression x. If we can split x into a type parameter 2577 // name, possibly followed by a type parameter type, we consider 2578 // this the start of a type parameter list, with some caveats: 2579 // a single name followed by "]" tilts the decision towards an 2580 // array declaration; a type parameter type that could also be 2581 // an ordinary expression but which is followed by a comma tilts 2582 // the decision towards a type parameter list. 2583 if pname, ptype := extractName(x, p.tok == token.COMMA); pname != nil && (ptype != nil || p.tok != token.RBRACK) { 2584 // spec.Name "[" pname ... 2585 // spec.Name "[" pname ptype ... 2586 // spec.Name "[" pname ptype "," ... 2587 p.parseGenericType(spec, lbrack, pname, ptype) // ptype may be nil 2588 } else { 2589 // spec.Name "[" pname "]" ... 2590 // spec.Name "[" x ... 2591 spec.Type = p.parseArrayType(lbrack, x) 2592 } 2593 } else { 2594 // array type 2595 spec.Type = p.parseArrayType(lbrack, nil) 2596 } 2597 } else { 2598 // no type parameters 2599 if p.tok == token.ASSIGN { 2600 // type alias 2601 spec.Assign = p.pos 2602 p.next() 2603 } 2604 spec.Type = p.parseType() 2605 } 2606 2607 spec.Comment = p.expectSemi() 2608 2609 return spec 2610 } 2611 2612 // extractName splits the expression x into (name, expr) if syntactically 2613 // x can be written as name expr. The split only happens if expr is a type 2614 // element (per the isTypeElem predicate) or if force is set. 2615 // If x is just a name, the result is (name, nil). If the split succeeds, 2616 // the result is (name, expr). Otherwise the result is (nil, x). 2617 // Examples: 2618 // 2619 // x force name expr 2620 // ------------------------------------ 2621 // P*[]int T/F P *[]int 2622 // P*E T P *E 2623 // P*E F nil P*E 2624 // P([]int) T/F P []int 2625 // P(E) T P E 2626 // P(E) F nil P(E) 2627 // P*E|F|~G T/F P *E|F|~G 2628 // P*E|F|G T P *E|F|G 2629 // P*E|F|G F nil P*E|F|G 2630 func extractName(x ast.Expr, force bool) (*ast.Ident, ast.Expr) { 2631 switch x := x.(type) { 2632 case *ast.Ident: 2633 return x, nil 2634 case *ast.BinaryExpr: 2635 switch x.Op { 2636 case token.MUL: 2637 if name, _ := x.X.(*ast.Ident); name != nil && (force || isTypeElem(x.Y)) { 2638 // x = name *x.Y 2639 return name, &ast.StarExpr{Star: x.OpPos, X: x.Y} 2640 } 2641 case token.OR: 2642 if name, lhs := extractName(x.X, force || isTypeElem(x.Y)); name != nil && lhs != nil { 2643 // x = name lhs|x.Y 2644 op := *x 2645 op.X = lhs 2646 return name, &op 2647 } 2648 } 2649 case *ast.CallExpr: 2650 if name, _ := x.Fun.(*ast.Ident); name != nil { 2651 if len(x.Args) == 1 && x.Ellipsis == token.NoPos && (force || isTypeElem(x.Args[0])) { 2652 // x = name "(" x.ArgList[0] ")" 2653 return name, x.Args[0] 2654 } 2655 } 2656 } 2657 return nil, x 2658 } 2659 2660 // isTypeElem reports whether x is a (possibly parenthesized) type element expression. 2661 // The result is false if x could be a type element OR an ordinary (value) expression. 2662 func isTypeElem(x ast.Expr) bool { 2663 switch x := x.(type) { 2664 case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType: 2665 return true 2666 case *ast.BinaryExpr: 2667 return isTypeElem(x.X) || isTypeElem(x.Y) 2668 case *ast.UnaryExpr: 2669 return x.Op == token.TILDE 2670 case *ast.ParenExpr: 2671 return isTypeElem(x.X) 2672 } 2673 return false 2674 } 2675 2676 func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl { 2677 if p.trace { 2678 defer un(trace(p, "GenDecl("+keyword.String()+")")) 2679 } 2680 2681 doc := p.leadComment 2682 pos := p.expect(keyword) 2683 var lparen, rparen token.Pos 2684 var list []ast.Spec 2685 if p.tok == token.LPAREN { 2686 lparen = p.pos 2687 p.next() 2688 for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ { 2689 list = append(list, f(p.leadComment, keyword, iota)) 2690 } 2691 rparen = p.expect(token.RPAREN) 2692 p.expectSemi() 2693 } else { 2694 list = append(list, f(nil, keyword, 0)) 2695 } 2696 2697 return &ast.GenDecl{ 2698 Doc: doc, 2699 TokPos: pos, 2700 Tok: keyword, 2701 Lparen: lparen, 2702 Specs: list, 2703 Rparen: rparen, 2704 } 2705 } 2706 2707 func (p *parser) parseFuncDecl() *ast.FuncDecl { 2708 if p.trace { 2709 defer un(trace(p, "FunctionDecl")) 2710 } 2711 2712 doc := p.leadComment 2713 pos := p.expect(token.FUNC) 2714 2715 var recv *ast.FieldList 2716 if p.tok == token.LPAREN { 2717 _, recv = p.parseParameters(false) 2718 } 2719 2720 ident := p.parseIdent() 2721 2722 tparams, params := p.parseParameters(true) 2723 if recv != nil && tparams != nil { 2724 // Method declarations do not have type parameters. We parse them for a 2725 // better error message and improved error recovery. 2726 p.error(tparams.Opening, "method must have no type parameters") 2727 tparams = nil 2728 } 2729 results := p.parseResult() 2730 2731 var body *ast.BlockStmt 2732 switch p.tok { 2733 case token.LBRACE: 2734 body = p.parseBody() 2735 p.expectSemi() 2736 case token.SEMICOLON: 2737 p.next() 2738 if p.tok == token.LBRACE { 2739 // opening { of function declaration on next line 2740 p.error(p.pos, "unexpected semicolon or newline before {") 2741 body = p.parseBody() 2742 p.expectSemi() 2743 } 2744 default: 2745 p.expectSemi() 2746 } 2747 2748 decl := &ast.FuncDecl{ 2749 Doc: doc, 2750 Recv: recv, 2751 Name: ident, 2752 Type: &ast.FuncType{ 2753 Func: pos, 2754 TypeParams: tparams, 2755 Params: params, 2756 Results: results, 2757 }, 2758 Body: body, 2759 } 2760 return decl 2761 } 2762 2763 func (p *parser) parseDecl(sync map[token.Token]bool) ast.Decl { 2764 if p.trace { 2765 defer un(trace(p, "Declaration")) 2766 } 2767 2768 var f parseSpecFunction 2769 switch p.tok { 2770 case token.IMPORT: 2771 f = p.parseImportSpec 2772 2773 case token.CONST, token.VAR: 2774 f = p.parseValueSpec 2775 2776 case token.TYPE: 2777 f = p.parseTypeSpec 2778 2779 case token.FUNC: 2780 return p.parseFuncDecl() 2781 2782 default: 2783 pos := p.pos 2784 p.errorExpected(pos, "declaration") 2785 p.advance(sync) 2786 return &ast.BadDecl{From: pos, To: p.pos} 2787 } 2788 2789 return p.parseGenDecl(p.tok, f) 2790 } 2791 2792 // ---------------------------------------------------------------------------- 2793 // Source files 2794 2795 func (p *parser) parseFile() *ast.File { 2796 if p.trace { 2797 defer un(trace(p, "File")) 2798 } 2799 2800 // Don't bother parsing the rest if we had errors scanning the first token. 2801 // Likely not a Go source file at all. 2802 if p.errors.Len() != 0 { 2803 return nil 2804 } 2805 2806 // package clause 2807 doc := p.leadComment 2808 pos := p.expect(token.PACKAGE) 2809 // Go spec: The package clause is not a declaration; 2810 // the package name does not appear in any scope. 2811 ident := p.parseIdent() 2812 if ident.Name == "_" && p.mode&DeclarationErrors != 0 { 2813 p.error(p.pos, "invalid package name _") 2814 } 2815 p.expectSemi() 2816 2817 // Don't bother parsing the rest if we had errors parsing the package clause. 2818 // Likely not a Go source file at all. 2819 if p.errors.Len() != 0 { 2820 return nil 2821 } 2822 2823 var decls []ast.Decl 2824 if p.mode&PackageClauseOnly == 0 { 2825 // import decls 2826 for p.tok == token.IMPORT { 2827 decls = append(decls, p.parseGenDecl(token.IMPORT, p.parseImportSpec)) 2828 } 2829 2830 if p.mode&ImportsOnly == 0 { 2831 // rest of package body 2832 prev := token.IMPORT 2833 for p.tok != token.EOF { 2834 // Continue to accept import declarations for error tolerance, but complain. 2835 if p.tok == token.IMPORT && prev != token.IMPORT { 2836 p.error(p.pos, "imports must appear before other declarations") 2837 } 2838 prev = p.tok 2839 2840 decls = append(decls, p.parseDecl(declStart)) 2841 } 2842 } 2843 } 2844 2845 f := &ast.File{ 2846 Doc: doc, 2847 Package: pos, 2848 Name: ident, 2849 Decls: decls, 2850 FileStart: token.Pos(p.file.Base()), 2851 FileEnd: token.Pos(p.file.Base() + p.file.Size()), 2852 Imports: p.imports, 2853 Comments: p.comments, 2854 } 2855 var declErr func(token.Pos, string) 2856 if p.mode&DeclarationErrors != 0 { 2857 declErr = p.error 2858 } 2859 if p.mode&SkipObjectResolution == 0 { 2860 resolveFile(f, p.file, declErr) 2861 } 2862 2863 return f 2864 }