github.com/joomcode/cue@v0.4.4-0.20221111115225-539fe3512047/cue/parser/parser.go (about)

     1  // Copyright 2018 The CUE Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package parser
    16  
    17  import (
    18  	"fmt"
    19  	"strings"
    20  	"unicode"
    21  
    22  	"github.com/joomcode/cue/cue/ast"
    23  	"github.com/joomcode/cue/cue/errors"
    24  	"github.com/joomcode/cue/cue/literal"
    25  	"github.com/joomcode/cue/cue/scanner"
    26  	"github.com/joomcode/cue/cue/token"
    27  	"github.com/joomcode/cue/internal/astinternal"
    28  )
    29  
    30  var debugStr = astinternal.DebugStr
    31  
    32  // The parser structure holds the parser's internal state.
    33  type parser struct {
    34  	file    *token.File
    35  	offset  int
    36  	errors  errors.Error
    37  	scanner scanner.Scanner
    38  
    39  	// Tracing/debugging
    40  	mode      mode // parsing mode
    41  	trace     bool // == (mode & Trace != 0)
    42  	panicking bool // set if we are bailing out due to too many errors.
    43  	indent    int  // indentation used for tracing output
    44  
    45  	// Comments
    46  	leadComment *ast.CommentGroup
    47  	comments    *commentState
    48  
    49  	// Next token
    50  	pos token.Pos   // token position
    51  	tok token.Token // one token look-ahead
    52  	lit string      // token literal
    53  
    54  	// Error recovery
    55  	// (used to limit the number of calls to syncXXX functions
    56  	// w/o making scanning progress - avoids potential endless
    57  	// loops across multiple parser functions during error recovery)
    58  	syncPos token.Pos // last synchronization position
    59  	syncCnt int       // number of calls to syncXXX without progress
    60  
    61  	// Non-syntactic parser control
    62  	exprLev int // < 0: in control clause, >= 0: in expression
    63  
    64  	imports []*ast.ImportSpec // list of imports
    65  
    66  	version int
    67  }
    68  
    69  func (p *parser) init(filename string, src []byte, mode []Option) {
    70  	p.offset = -1
    71  	for _, f := range mode {
    72  		f(p)
    73  	}
    74  	p.file = token.NewFile(filename, p.offset, len(src))
    75  
    76  	var m scanner.Mode
    77  	if p.mode&parseCommentsMode != 0 {
    78  		m = scanner.ScanComments
    79  	}
    80  	eh := func(pos token.Pos, msg string, args []interface{}) {
    81  		p.errors = errors.Append(p.errors, errors.Newf(pos, msg, args...))
    82  	}
    83  	p.scanner.Init(p.file, src, eh, m)
    84  
    85  	p.trace = p.mode&traceMode != 0 // for convenience (p.trace is used frequently)
    86  
    87  	p.comments = &commentState{pos: -1}
    88  
    89  	p.next()
    90  }
    91  
    92  type commentState struct {
    93  	parent *commentState
    94  	pos    int8
    95  	groups []*ast.CommentGroup
    96  
    97  	// lists are not attached to nodes themselves. Enclosed expressions may
    98  	// miss a comment due to commas and line termination. closeLists ensures
    99  	// that comments will be passed to someone.
   100  	isList    int
   101  	lastChild ast.Node
   102  	lastPos   int8
   103  }
   104  
   105  // openComments reserves the next doc comment for the caller and flushes
   106  func (p *parser) openComments() *commentState {
   107  	child := &commentState{
   108  		parent: p.comments,
   109  	}
   110  	if c := p.comments; c != nil && c.isList > 0 {
   111  		if c.lastChild != nil {
   112  			var groups []*ast.CommentGroup
   113  			for _, cg := range c.groups {
   114  				if cg.Position == 0 {
   115  					groups = append(groups, cg)
   116  				}
   117  			}
   118  			groups = append(groups, c.lastChild.Comments()...)
   119  			for _, cg := range c.groups {
   120  				if cg.Position != 0 {
   121  					cg.Position = c.lastPos
   122  					groups = append(groups, cg)
   123  				}
   124  			}
   125  			ast.SetComments(c.lastChild, groups)
   126  			c.groups = nil
   127  		} else {
   128  			c.lastChild = nil
   129  			// attach before next
   130  			for _, cg := range c.groups {
   131  				cg.Position = 0
   132  			}
   133  			child.groups = c.groups
   134  			c.groups = nil
   135  		}
   136  	}
   137  	if p.leadComment != nil {
   138  		child.groups = append(child.groups, p.leadComment)
   139  		p.leadComment = nil
   140  	}
   141  	p.comments = child
   142  	return child
   143  }
   144  
   145  // openList is used to treat a list of comments as a single comment
   146  // position in a production.
   147  func (p *parser) openList() {
   148  	if p.comments.isList > 0 {
   149  		p.comments.isList++
   150  		return
   151  	}
   152  	c := &commentState{
   153  		parent: p.comments,
   154  		isList: 1,
   155  	}
   156  	p.comments = c
   157  }
   158  
   159  func (c *commentState) add(g *ast.CommentGroup) {
   160  	g.Position = c.pos
   161  	c.groups = append(c.groups, g)
   162  }
   163  
   164  func (p *parser) closeList() {
   165  	c := p.comments
   166  	if c.lastChild != nil {
   167  		for _, cg := range c.groups {
   168  			cg.Position = c.lastPos
   169  			c.lastChild.AddComment(cg)
   170  		}
   171  		c.groups = nil
   172  	}
   173  	switch c.isList--; {
   174  	case c.isList < 0:
   175  		if !p.panicking {
   176  			err := errors.Newf(p.pos, "unmatched close list")
   177  			p.errors = errors.Append(p.errors, err)
   178  			p.panicking = true
   179  			panic(err)
   180  		}
   181  	case c.isList == 0:
   182  		parent := c.parent
   183  		if len(c.groups) > 0 {
   184  			parent.groups = append(parent.groups, c.groups...)
   185  		}
   186  		parent.pos++
   187  		p.comments = parent
   188  	}
   189  }
   190  
   191  func (c *commentState) closeNode(p *parser, n ast.Node) ast.Node {
   192  	if p.comments != c {
   193  		if !p.panicking {
   194  			err := errors.Newf(p.pos, "unmatched comments")
   195  			p.errors = errors.Append(p.errors, err)
   196  			p.panicking = true
   197  			panic(err)
   198  		}
   199  		return n
   200  	}
   201  	p.comments = c.parent
   202  	if c.parent != nil {
   203  		c.parent.lastChild = n
   204  		c.parent.lastPos = c.pos
   205  		c.parent.pos++
   206  	}
   207  	for _, cg := range c.groups {
   208  		if n != nil {
   209  			if cg != nil {
   210  				n.AddComment(cg)
   211  			}
   212  		}
   213  	}
   214  	c.groups = nil
   215  	return n
   216  }
   217  
   218  func (c *commentState) closeExpr(p *parser, n ast.Expr) ast.Expr {
   219  	c.closeNode(p, n)
   220  	return n
   221  }
   222  
   223  func (c *commentState) closeClause(p *parser, n ast.Clause) ast.Clause {
   224  	c.closeNode(p, n)
   225  	return n
   226  }
   227  
   228  // ----------------------------------------------------------------------------
   229  // Parsing support
   230  
   231  func (p *parser) printTrace(a ...interface{}) {
   232  	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
   233  	const n = len(dots)
   234  	pos := p.file.Position(p.pos)
   235  	fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
   236  	i := 2 * p.indent
   237  	for i > n {
   238  		fmt.Print(dots)
   239  		i -= n
   240  	}
   241  	// i <= n
   242  	fmt.Print(dots[0:i])
   243  	fmt.Println(a...)
   244  }
   245  
   246  func trace(p *parser, msg string) *parser {
   247  	p.printTrace(msg, "(")
   248  	p.indent++
   249  	return p
   250  }
   251  
   252  // Usage pattern: defer un(trace(p, "..."))
   253  func un(p *parser) {
   254  	p.indent--
   255  	p.printTrace(")")
   256  }
   257  
   258  // Advance to the next
   259  func (p *parser) next0() {
   260  	// Because of one-token look-ahead, print the previous token
   261  	// when tracing as it provides a more readable output. The
   262  	// very first token (!p.pos.IsValid()) is not initialized
   263  	// (it is ILLEGAL), so don't print it .
   264  	if p.trace && p.pos.IsValid() {
   265  		s := p.tok.String()
   266  		switch {
   267  		case p.tok.IsLiteral():
   268  			p.printTrace(s, p.lit)
   269  		case p.tok.IsOperator(), p.tok.IsKeyword():
   270  			p.printTrace("\"" + s + "\"")
   271  		default:
   272  			p.printTrace(s)
   273  		}
   274  	}
   275  
   276  	p.pos, p.tok, p.lit = p.scanner.Scan()
   277  }
   278  
   279  // Consume a comment and return it and the line on which it ends.
   280  func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
   281  	// /*-style comments may end on a different line than where they start.
   282  	// Scan the comment for '\n' chars and adjust endline accordingly.
   283  	endline = p.file.Line(p.pos)
   284  	if p.lit[1] == '*' {
   285  		p.assertV0(p.pos, 0, 10, "block quotes")
   286  
   287  		// don't use range here - no need to decode Unicode code points
   288  		for i := 0; i < len(p.lit); i++ {
   289  			if p.lit[i] == '\n' {
   290  				endline++
   291  			}
   292  		}
   293  	}
   294  
   295  	comment = &ast.Comment{Slash: p.pos, Text: p.lit}
   296  	p.next0()
   297  
   298  	return
   299  }
   300  
   301  // Consume a group of adjacent comments, add it to the parser's
   302  // comments list, and return it together with the line at which
   303  // the last comment in the group ends. A non-comment token or n
   304  // empty lines terminate a comment group.
   305  func (p *parser) consumeCommentGroup(prevLine, n int) (comments *ast.CommentGroup, endline int) {
   306  	var list []*ast.Comment
   307  	var rel token.RelPos
   308  	endline = p.file.Line(p.pos)
   309  	switch endline - prevLine {
   310  	case 0:
   311  		rel = token.Blank
   312  	case 1:
   313  		rel = token.Newline
   314  	default:
   315  		rel = token.NewSection
   316  	}
   317  	for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
   318  		var comment *ast.Comment
   319  		comment, endline = p.consumeComment()
   320  		list = append(list, comment)
   321  	}
   322  
   323  	cg := &ast.CommentGroup{List: list}
   324  	ast.SetRelPos(cg, rel)
   325  	comments = cg
   326  	return
   327  }
   328  
   329  // Advance to the next non-comment  In the process, collect
   330  // any comment groups encountered, and refield the last lead and
   331  // and line comments.
   332  //
   333  // A lead comment is a comment group that starts and ends in a
   334  // line without any other tokens and that is followed by a non-comment
   335  // token on the line immediately after the comment group.
   336  //
   337  // A line comment is a comment group that follows a non-comment
   338  // token on the same line, and that has no tokens after it on the line
   339  // where it ends.
   340  //
   341  // Lead and line comments may be considered documentation that is
   342  // stored in the AST.
   343  func (p *parser) next() {
   344  	// A leadComment may not be consumed if it leads an inner token of a node.
   345  	if p.leadComment != nil {
   346  		p.comments.add(p.leadComment)
   347  	}
   348  	p.leadComment = nil
   349  	prev := p.pos
   350  	p.next0()
   351  	p.comments.pos++
   352  
   353  	if p.tok == token.COMMENT {
   354  		var comment *ast.CommentGroup
   355  		var endline int
   356  
   357  		currentLine := p.file.Line(p.pos)
   358  		prevLine := p.file.Line(prev)
   359  		if prevLine == currentLine {
   360  			// The comment is on same line as the previous token; it
   361  			// cannot be a lead comment but may be a line comment.
   362  			comment, endline = p.consumeCommentGroup(prevLine, 0)
   363  			if p.file.Line(p.pos) != endline {
   364  				// The next token is on a different line, thus
   365  				// the last comment group is a line comment.
   366  				comment.Line = true
   367  			}
   368  		}
   369  
   370  		// consume successor comments, if any
   371  		endline = -1
   372  		for p.tok == token.COMMENT {
   373  			if comment != nil {
   374  				p.comments.add(comment)
   375  			}
   376  			comment, endline = p.consumeCommentGroup(prevLine, 1)
   377  			prevLine = currentLine
   378  			currentLine = p.file.Line(p.pos)
   379  
   380  		}
   381  
   382  		if endline+1 == p.file.Line(p.pos) && p.tok != token.EOF {
   383  			// The next token is following on the line immediately after the
   384  			// comment group, thus the last comment group is a lead comment.
   385  			comment.Doc = true
   386  			p.leadComment = comment
   387  		} else {
   388  			p.comments.add(comment)
   389  		}
   390  	}
   391  }
   392  
   393  // assertV0 indicates the last version at which a certain feature was
   394  // supported.
   395  func (p *parser) assertV0(pos token.Pos, minor, patch int, name string) {
   396  	v := version0(minor, patch)
   397  	if p.version != 0 && p.version > v {
   398  		p.errors = errors.Append(p.errors,
   399  			errors.Wrapf(&DeprecationError{v}, pos,
   400  				"use of deprecated %s (deprecated as of v0.%d.%d)", name, minor, patch+1))
   401  	}
   402  }
   403  
   404  func (p *parser) errf(pos token.Pos, msg string, args ...interface{}) {
   405  	// ePos := p.file.Position(pos)
   406  	ePos := pos
   407  
   408  	// If AllErrors is not set, discard errors reported on the same line
   409  	// as the last recorded error and stop parsing if there are more than
   410  	// 10 errors.
   411  	if p.mode&allErrorsMode == 0 {
   412  		errors := errors.Errors(p.errors)
   413  		n := len(errors)
   414  		if n > 0 && errors[n-1].Position().Line() == ePos.Line() {
   415  			return // discard - likely a spurious error
   416  		}
   417  		if n > 10 {
   418  			p.panicking = true
   419  			panic("too many errors")
   420  		}
   421  	}
   422  
   423  	p.errors = errors.Append(p.errors, errors.Newf(ePos, msg, args...))
   424  }
   425  
   426  func (p *parser) errorExpected(pos token.Pos, obj string) {
   427  	if pos != p.pos {
   428  		p.errf(pos, "expected %s", obj)
   429  		return
   430  	}
   431  	// the error happened at the current position;
   432  	// make the error message more specific
   433  	if p.tok == token.COMMA && p.lit == "\n" {
   434  		p.errf(pos, "expected %s, found newline", obj)
   435  		return
   436  	}
   437  
   438  	if p.tok.IsLiteral() {
   439  		p.errf(pos, "expected %s, found '%s' %s", obj, p.tok, p.lit)
   440  	} else {
   441  		p.errf(pos, "expected %s, found '%s'", obj, p.tok)
   442  	}
   443  }
   444  
   445  func (p *parser) expect(tok token.Token) token.Pos {
   446  	pos := p.pos
   447  	if p.tok != tok {
   448  		p.errorExpected(pos, "'"+tok.String()+"'")
   449  	}
   450  	p.next() // make progress
   451  	return pos
   452  }
   453  
   454  // expectClosing is like expect but provides a better error message
   455  // for the common case of a missing comma before a newline.
   456  func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
   457  	if p.tok != tok && p.tok == token.COMMA && p.lit == "\n" {
   458  		p.errf(p.pos, "missing ',' before newline in %s", context)
   459  		p.next()
   460  	}
   461  	return p.expect(tok)
   462  }
   463  
   464  func (p *parser) expectComma() {
   465  	// semicolon is optional before a closing ')', ']', '}', or newline
   466  	if p.tok != token.RPAREN && p.tok != token.RBRACE && p.tok != token.EOF {
   467  		switch p.tok {
   468  		case token.COMMA:
   469  			p.next()
   470  		default:
   471  			p.errorExpected(p.pos, "','")
   472  			syncExpr(p)
   473  		}
   474  	}
   475  }
   476  
   477  func (p *parser) atComma(context string, follow ...token.Token) bool {
   478  	if p.tok == token.COMMA {
   479  		return true
   480  	}
   481  	for _, t := range follow {
   482  		if p.tok == t {
   483  			return false
   484  		}
   485  	}
   486  	// TODO: find a way to detect crossing lines now we don't have a semi.
   487  	if p.lit == "\n" {
   488  		p.errf(p.pos, "missing ',' before newline")
   489  	} else {
   490  		p.errf(p.pos, "missing ',' in %s", context)
   491  	}
   492  	return true // "insert" comma and continue
   493  }
   494  
   495  // syncExpr advances to the next field in a field list.
   496  // Used for synchronization after an error.
   497  func syncExpr(p *parser) {
   498  	for {
   499  		switch p.tok {
   500  		case token.COMMA:
   501  			// Return only if parser made some progress since last
   502  			// sync or if it has not reached 10 sync calls without
   503  			// progress. Otherwise consume at least one token to
   504  			// avoid an endless parser loop (it is possible that
   505  			// both parseOperand and parseStmt call syncStmt and
   506  			// correctly do not advance, thus the need for the
   507  			// invocation limit p.syncCnt).
   508  			if p.pos == p.syncPos && p.syncCnt < 10 {
   509  				p.syncCnt++
   510  				return
   511  			}
   512  			if p.syncPos.Before(p.pos) {
   513  				p.syncPos = p.pos
   514  				p.syncCnt = 0
   515  				return
   516  			}
   517  			// Reaching here indicates a parser bug, likely an
   518  			// incorrect token list in this function, but it only
   519  			// leads to skipping of possibly correct code if a
   520  			// previous error is present, and thus is preferred
   521  			// over a non-terminating parse.
   522  		case token.EOF:
   523  			return
   524  		}
   525  		p.next()
   526  	}
   527  }
   528  
   529  // safePos returns a valid file position for a given position: If pos
   530  // is valid to begin with, safePos returns pos. If pos is out-of-range,
   531  // safePos returns the EOF position.
   532  //
   533  // This is hack to work around "artificial" end positions in the AST which
   534  // are computed by adding 1 to (presumably valid) token positions. If the
   535  // token positions are invalid due to parse errors, the resulting end position
   536  // may be past the file's EOF position, which would lead to panics if used
   537  // later on.
   538  func (p *parser) safePos(pos token.Pos) (res token.Pos) {
   539  	defer func() {
   540  		if recover() != nil {
   541  			res = p.file.Pos(p.file.Base()+p.file.Size(), pos.RelPos()) // EOF position
   542  		}
   543  	}()
   544  	_ = p.file.Offset(pos) // trigger a panic if position is out-of-range
   545  	return pos
   546  }
   547  
   548  // ----------------------------------------------------------------------------
   549  // Identifiers
   550  
   551  func (p *parser) parseIdent() *ast.Ident {
   552  	c := p.openComments()
   553  	pos := p.pos
   554  	name := "_"
   555  	if p.tok == token.IDENT {
   556  		name = p.lit
   557  		p.next()
   558  	} else {
   559  		p.expect(token.IDENT) // use expect() error handling
   560  	}
   561  	ident := &ast.Ident{NamePos: pos, Name: name}
   562  	c.closeNode(p, ident)
   563  	return ident
   564  }
   565  
   566  func (p *parser) parseKeyIdent() *ast.Ident {
   567  	c := p.openComments()
   568  	pos := p.pos
   569  	name := p.lit
   570  	p.next()
   571  	ident := &ast.Ident{NamePos: pos, Name: name}
   572  	c.closeNode(p, ident)
   573  	return ident
   574  }
   575  
   576  // ----------------------------------------------------------------------------
   577  // Expressions
   578  
   579  // parseOperand returns an expression.
   580  // Callers must verify the result.
   581  func (p *parser) parseOperand() (expr ast.Expr) {
   582  	if p.trace {
   583  		defer un(trace(p, "Operand"))
   584  	}
   585  
   586  	switch p.tok {
   587  	case token.IDENT:
   588  		return p.parseIdent()
   589  
   590  	case token.LBRACE:
   591  		return p.parseStruct()
   592  
   593  	case token.LBRACK:
   594  		return p.parseList()
   595  
   596  	case token.BOTTOM:
   597  		c := p.openComments()
   598  		x := &ast.BottomLit{Bottom: p.pos}
   599  		p.next()
   600  		return c.closeExpr(p, x)
   601  
   602  	case token.NULL, token.TRUE, token.FALSE, token.INT, token.FLOAT, token.STRING:
   603  		c := p.openComments()
   604  		x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
   605  		p.next()
   606  		return c.closeExpr(p, x)
   607  
   608  	case token.INTERPOLATION:
   609  		return p.parseInterpolation()
   610  
   611  	case token.LPAREN:
   612  		c := p.openComments()
   613  		defer func() { c.closeNode(p, expr) }()
   614  		lparen := p.pos
   615  		p.next()
   616  		p.exprLev++
   617  		p.openList()
   618  		x := p.parseRHS() // types may be parenthesized: (some type)
   619  		p.closeList()
   620  		p.exprLev--
   621  		rparen := p.expect(token.RPAREN)
   622  		return &ast.ParenExpr{
   623  			Lparen: lparen,
   624  			X:      x,
   625  			Rparen: rparen}
   626  
   627  	default:
   628  		if p.tok.IsKeyword() {
   629  			return p.parseKeyIdent()
   630  		}
   631  	}
   632  
   633  	// we have an error
   634  	c := p.openComments()
   635  	pos := p.pos
   636  	p.errorExpected(pos, "operand")
   637  	syncExpr(p)
   638  	return c.closeExpr(p, &ast.BadExpr{From: pos, To: p.pos})
   639  }
   640  
   641  func (p *parser) parseIndexOrSlice(x ast.Expr) (expr ast.Expr) {
   642  	if p.trace {
   643  		defer un(trace(p, "IndexOrSlice"))
   644  	}
   645  
   646  	c := p.openComments()
   647  	defer func() { c.closeNode(p, expr) }()
   648  	c.pos = 1
   649  
   650  	const N = 2
   651  	lbrack := p.expect(token.LBRACK)
   652  
   653  	p.exprLev++
   654  	var index [N]ast.Expr
   655  	var colons [N - 1]token.Pos
   656  	if p.tok != token.COLON {
   657  		index[0] = p.parseRHS()
   658  	}
   659  	nColons := 0
   660  	for p.tok == token.COLON && nColons < len(colons) {
   661  		colons[nColons] = p.pos
   662  		nColons++
   663  		p.next()
   664  		if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF {
   665  			index[nColons] = p.parseRHS()
   666  		}
   667  	}
   668  	p.exprLev--
   669  	rbrack := p.expect(token.RBRACK)
   670  
   671  	if nColons > 0 {
   672  		return &ast.SliceExpr{
   673  			X:      x,
   674  			Lbrack: lbrack,
   675  			Low:    index[0],
   676  			High:   index[1],
   677  			Rbrack: rbrack}
   678  	}
   679  
   680  	return &ast.IndexExpr{
   681  		X:      x,
   682  		Lbrack: lbrack,
   683  		Index:  index[0],
   684  		Rbrack: rbrack}
   685  }
   686  
   687  func (p *parser) parseCallOrConversion(fun ast.Expr) (expr *ast.CallExpr) {
   688  	if p.trace {
   689  		defer un(trace(p, "CallOrConversion"))
   690  	}
   691  	c := p.openComments()
   692  	defer func() { c.closeNode(p, expr) }()
   693  
   694  	p.openList()
   695  	defer p.closeList()
   696  
   697  	lparen := p.expect(token.LPAREN)
   698  
   699  	p.exprLev++
   700  	var list []ast.Expr
   701  	for p.tok != token.RPAREN && p.tok != token.EOF {
   702  		list = append(list, p.parseRHS()) // builtins may expect a type: make(some type, ...)
   703  		if !p.atComma("argument list", token.RPAREN) {
   704  			break
   705  		}
   706  		p.next()
   707  	}
   708  	p.exprLev--
   709  	rparen := p.expectClosing(token.RPAREN, "argument list")
   710  
   711  	return &ast.CallExpr{
   712  		Fun:    fun,
   713  		Lparen: lparen,
   714  		Args:   list,
   715  		Rparen: rparen}
   716  }
   717  
   718  // TODO: inline this function in parseFieldList once we no longer user comment
   719  // position information in parsing.
   720  func (p *parser) consumeDeclComma() {
   721  	if p.atComma("struct literal", token.RBRACE, token.EOF) {
   722  		p.next()
   723  	}
   724  }
   725  
   726  func (p *parser) parseFieldList() (list []ast.Decl) {
   727  	if p.trace {
   728  		defer un(trace(p, "FieldList"))
   729  	}
   730  	p.openList()
   731  	defer p.closeList()
   732  
   733  	for p.tok != token.RBRACE && p.tok != token.EOF {
   734  		switch p.tok {
   735  		case token.ATTRIBUTE:
   736  			list = append(list, p.parseAttribute())
   737  			p.consumeDeclComma()
   738  
   739  		case token.ELLIPSIS:
   740  			c := p.openComments()
   741  			ellipsis := &ast.Ellipsis{Ellipsis: p.pos}
   742  			p.next()
   743  			c.closeNode(p, ellipsis)
   744  			list = append(list, ellipsis)
   745  			p.consumeDeclComma()
   746  
   747  		default:
   748  			list = append(list, p.parseField())
   749  		}
   750  
   751  		// TODO: handle next comma here, after disallowing non-colon separator
   752  		// and we have eliminated the need comment positions.
   753  	}
   754  
   755  	return
   756  }
   757  
   758  func (p *parser) parseLetDecl() (decl ast.Decl, ident *ast.Ident) {
   759  	if p.trace {
   760  		defer un(trace(p, "Field"))
   761  	}
   762  
   763  	c := p.openComments()
   764  
   765  	letPos := p.expect(token.LET)
   766  	if p.tok != token.IDENT {
   767  		c.closeNode(p, ident)
   768  		return nil, &ast.Ident{
   769  			NamePos: letPos,
   770  			Name:    "let",
   771  		}
   772  	}
   773  	defer func() { c.closeNode(p, decl) }()
   774  
   775  	ident = p.parseIdent()
   776  	assign := p.expect(token.BIND)
   777  	expr := p.parseRHS()
   778  
   779  	p.consumeDeclComma()
   780  
   781  	return &ast.LetClause{
   782  		Let:   letPos,
   783  		Ident: ident,
   784  		Equal: assign,
   785  		Expr:  expr,
   786  	}, nil
   787  }
   788  
   789  func (p *parser) parseComprehension() (decl ast.Decl, ident *ast.Ident) {
   790  	if p.trace {
   791  		defer un(trace(p, "Comprehension"))
   792  	}
   793  
   794  	c := p.openComments()
   795  	defer func() { c.closeNode(p, decl) }()
   796  
   797  	tok := p.tok
   798  	pos := p.pos
   799  	clauses, fc := p.parseComprehensionClauses(true)
   800  	if fc != nil {
   801  		ident = &ast.Ident{
   802  			NamePos: pos,
   803  			Name:    tok.String(),
   804  		}
   805  		fc.closeNode(p, ident)
   806  		return nil, ident
   807  	}
   808  
   809  	sc := p.openComments()
   810  	expr := p.parseStruct()
   811  	sc.closeExpr(p, expr)
   812  
   813  	if p.atComma("struct literal", token.RBRACE) { // TODO: may be EOF
   814  		p.next()
   815  	}
   816  
   817  	return &ast.Comprehension{
   818  		Clauses: clauses,
   819  		Value:   expr,
   820  	}, nil
   821  }
   822  
   823  func (p *parser) parseField() (decl ast.Decl) {
   824  	if p.trace {
   825  		defer un(trace(p, "Field"))
   826  	}
   827  
   828  	c := p.openComments()
   829  	defer func() { c.closeNode(p, decl) }()
   830  
   831  	pos := p.pos
   832  
   833  	this := &ast.Field{Label: nil}
   834  	m := this
   835  
   836  	tok := p.tok
   837  
   838  	label, expr, decl, ok := p.parseLabel(false)
   839  	if decl != nil {
   840  		return decl
   841  	}
   842  	m.Label = label
   843  
   844  	if !ok {
   845  		if expr == nil {
   846  			expr = p.parseRHS()
   847  		}
   848  		if a, ok := expr.(*ast.Alias); ok {
   849  			p.assertV0(a.Pos(), 1, 3, `old-style alias; use "let X = expr" instead`)
   850  			p.consumeDeclComma()
   851  			return a
   852  		}
   853  		e := &ast.EmbedDecl{Expr: expr}
   854  		p.consumeDeclComma()
   855  		return e
   856  	}
   857  
   858  	if p.tok == token.OPTION {
   859  		m.Optional = p.pos
   860  		p.next()
   861  	}
   862  
   863  	// TODO: consider disallowing comprehensions with more than one label.
   864  	// This can be a bit awkward in some cases, but it would naturally
   865  	// enforce the proper style that a comprehension be defined in the
   866  	// smallest possible scope.
   867  	// allowComprehension = false
   868  
   869  	switch p.tok {
   870  	case token.COLON, token.ISA:
   871  	case token.COMMA:
   872  		p.expectComma() // sync parser.
   873  		fallthrough
   874  
   875  	case token.RBRACE, token.EOF:
   876  		if a, ok := expr.(*ast.Alias); ok {
   877  			p.assertV0(a.Pos(), 1, 3, `old-style alias; use "let X = expr" instead`)
   878  			return a
   879  		}
   880  		switch tok {
   881  		case token.IDENT, token.LBRACK, token.LPAREN,
   882  			token.STRING, token.INTERPOLATION,
   883  			token.NULL, token.TRUE, token.FALSE,
   884  			token.FOR, token.IF, token.LET, token.IN:
   885  			return &ast.EmbedDecl{Expr: expr}
   886  		}
   887  		fallthrough
   888  
   889  	default:
   890  		p.errorExpected(p.pos, "label or ':'")
   891  		return &ast.BadDecl{From: pos, To: p.pos}
   892  	}
   893  
   894  	m.TokenPos = p.pos
   895  	m.Token = p.tok
   896  	if p.tok == token.ISA {
   897  		p.assertV0(p.pos, 2, 0, "'::'")
   898  	}
   899  	if p.tok != token.COLON && p.tok != token.ISA {
   900  		p.errorExpected(pos, "':' or '::'")
   901  	}
   902  	p.next() // : or ::
   903  
   904  	for {
   905  		if l, ok := m.Label.(*ast.ListLit); ok && len(l.Elts) != 1 {
   906  			p.errf(l.Pos(), "square bracket must have exactly one element")
   907  		}
   908  
   909  		tok := p.tok
   910  		label, expr, _, ok := p.parseLabel(true)
   911  		if !ok || (p.tok != token.COLON && p.tok != token.ISA && p.tok != token.OPTION) {
   912  			if expr == nil {
   913  				expr = p.parseRHS()
   914  			}
   915  			m.Value = expr
   916  			break
   917  		}
   918  		field := &ast.Field{Label: label}
   919  		m.Value = &ast.StructLit{Elts: []ast.Decl{field}}
   920  		m = field
   921  
   922  		if tok != token.LSS && p.tok == token.OPTION {
   923  			m.Optional = p.pos
   924  			p.next()
   925  		}
   926  
   927  		m.TokenPos = p.pos
   928  		m.Token = p.tok
   929  		if p.tok == token.ISA {
   930  			p.assertV0(p.pos, 2, 0, "'::'")
   931  		}
   932  		if p.tok != token.COLON && p.tok != token.ISA {
   933  			if p.tok.IsLiteral() {
   934  				p.errf(p.pos, "expected ':' or '::'; found %s", p.lit)
   935  			} else {
   936  				p.errf(p.pos, "expected ':' or '::'; found %s", p.tok)
   937  			}
   938  			break
   939  		}
   940  		p.next()
   941  	}
   942  
   943  	if attrs := p.parseAttributes(); attrs != nil {
   944  		m.Attrs = attrs
   945  	}
   946  
   947  	p.consumeDeclComma()
   948  
   949  	return this
   950  }
   951  
   952  func (p *parser) parseAttributes() (attrs []*ast.Attribute) {
   953  	p.openList()
   954  	for p.tok == token.ATTRIBUTE {
   955  		attrs = append(attrs, p.parseAttribute())
   956  	}
   957  	p.closeList()
   958  	return attrs
   959  }
   960  
   961  func (p *parser) parseAttribute() *ast.Attribute {
   962  	c := p.openComments()
   963  	a := &ast.Attribute{At: p.pos, Text: p.lit}
   964  	p.next()
   965  	c.closeNode(p, a)
   966  	return a
   967  }
   968  
   969  func (p *parser) parseLabel(rhs bool) (label ast.Label, expr ast.Expr, decl ast.Decl, ok bool) {
   970  	tok := p.tok
   971  	switch tok {
   972  
   973  	case token.FOR, token.IF:
   974  		if rhs {
   975  			expr = p.parseExpr()
   976  			break
   977  		}
   978  		comp, ident := p.parseComprehension()
   979  		if comp != nil {
   980  			return nil, nil, comp, false
   981  		}
   982  		expr = ident
   983  
   984  	case token.LET:
   985  		let, ident := p.parseLetDecl()
   986  		if let != nil {
   987  			return nil, nil, let, false
   988  		}
   989  		expr = ident
   990  
   991  	case token.IDENT, token.STRING, token.INTERPOLATION, token.LPAREN,
   992  		token.NULL, token.TRUE, token.FALSE, token.IN:
   993  		expr = p.parseExpr()
   994  
   995  	case token.LBRACK:
   996  		expr = p.parseRHS()
   997  		switch x := expr.(type) {
   998  		case *ast.ListLit:
   999  			// Note: caller must verify this list is suitable as a label.
  1000  			label, ok = x, true
  1001  		}
  1002  	}
  1003  
  1004  	switch x := expr.(type) {
  1005  	case *ast.BasicLit:
  1006  		switch x.Kind {
  1007  		case token.STRING, token.NULL, token.TRUE, token.FALSE:
  1008  			// Keywords that represent operands.
  1009  
  1010  			// Allowing keywords to be used as a labels should not interfere with
  1011  			// generating good errors: any keyword can only appear on the RHS of a
  1012  			// field (after a ':'), whereas labels always appear on the LHS.
  1013  
  1014  			label, ok = x, true
  1015  		}
  1016  
  1017  	case *ast.Ident:
  1018  		if strings.HasPrefix(x.Name, "__") {
  1019  			p.errf(x.NamePos, "identifiers starting with '__' are reserved")
  1020  		}
  1021  
  1022  		expr = p.parseAlias(x)
  1023  		if a, ok := expr.(*ast.Alias); ok {
  1024  			if _, ok = a.Expr.(ast.Label); !ok {
  1025  				break
  1026  			}
  1027  			label = a
  1028  		} else {
  1029  			label = x
  1030  		}
  1031  		ok = true
  1032  
  1033  	case ast.Label:
  1034  		label, ok = x, true
  1035  	}
  1036  	return label, expr, nil, ok
  1037  }
  1038  
  1039  func (p *parser) parseStruct() (expr ast.Expr) {
  1040  	lbrace := p.expect(token.LBRACE)
  1041  
  1042  	if p.trace {
  1043  		defer un(trace(p, "StructLit"))
  1044  	}
  1045  
  1046  	elts := p.parseStructBody()
  1047  	rbrace := p.expectClosing(token.RBRACE, "struct literal")
  1048  	return &ast.StructLit{
  1049  		Lbrace: lbrace,
  1050  		Elts:   elts,
  1051  		Rbrace: rbrace,
  1052  	}
  1053  }
  1054  
  1055  func (p *parser) parseStructBody() []ast.Decl {
  1056  	if p.trace {
  1057  		defer un(trace(p, "StructBody"))
  1058  	}
  1059  
  1060  	p.exprLev++
  1061  	var elts []ast.Decl
  1062  
  1063  	// TODO: consider "stealing" non-lead comments.
  1064  	// for _, cg := range p.comments.groups {
  1065  	// 	if cg != nil {
  1066  	// 		elts = append(elts, cg)
  1067  	// 	}
  1068  	// }
  1069  	// p.comments.groups = p.comments.groups[:0]
  1070  
  1071  	if p.tok != token.RBRACE {
  1072  		elts = p.parseFieldList()
  1073  	}
  1074  	p.exprLev--
  1075  
  1076  	return elts
  1077  }
  1078  
  1079  // parseComprehensionClauses parses either new-style (first==true)
  1080  // or old-style (first==false).
  1081  // Should we now disallow keywords as identifiers? If not, we need to
  1082  // return a list of discovered labels as the alternative.
  1083  func (p *parser) parseComprehensionClauses(first bool) (clauses []ast.Clause, c *commentState) {
  1084  	// TODO: reuse Template spec, which is possible if it doesn't check the
  1085  	// first is an identifier.
  1086  
  1087  	for {
  1088  		switch p.tok {
  1089  		case token.FOR:
  1090  			c := p.openComments()
  1091  			forPos := p.expect(token.FOR)
  1092  			if first {
  1093  				switch p.tok {
  1094  				case token.COLON, token.ISA, token.BIND, token.OPTION,
  1095  					token.COMMA, token.EOF:
  1096  					return nil, c
  1097  				}
  1098  			}
  1099  
  1100  			var key, value *ast.Ident
  1101  			var colon token.Pos
  1102  			value = p.parseIdent()
  1103  			if p.tok == token.COMMA {
  1104  				colon = p.expect(token.COMMA)
  1105  				key = value
  1106  				value = p.parseIdent()
  1107  			}
  1108  			c.pos = 4
  1109  			// params := p.parseParams(nil, ARROW)
  1110  			clauses = append(clauses, c.closeClause(p, &ast.ForClause{
  1111  				For:    forPos,
  1112  				Key:    key,
  1113  				Colon:  colon,
  1114  				Value:  value,
  1115  				In:     p.expect(token.IN),
  1116  				Source: p.parseRHS(),
  1117  			}))
  1118  
  1119  		case token.IF:
  1120  			c := p.openComments()
  1121  			ifPos := p.expect(token.IF)
  1122  			if first {
  1123  				switch p.tok {
  1124  				case token.COLON, token.ISA, token.BIND, token.OPTION,
  1125  					token.COMMA, token.EOF:
  1126  					return nil, c
  1127  				}
  1128  			}
  1129  
  1130  			clauses = append(clauses, c.closeClause(p, &ast.IfClause{
  1131  				If:        ifPos,
  1132  				Condition: p.parseRHS(),
  1133  			}))
  1134  
  1135  		case token.LET:
  1136  			c := p.openComments()
  1137  			letPos := p.expect(token.LET)
  1138  
  1139  			ident := p.parseIdent()
  1140  			assign := p.expect(token.BIND)
  1141  			expr := p.parseRHS()
  1142  
  1143  			clauses = append(clauses, c.closeClause(p, &ast.LetClause{
  1144  				Let:   letPos,
  1145  				Ident: ident,
  1146  				Equal: assign,
  1147  				Expr:  expr,
  1148  			}))
  1149  
  1150  		default:
  1151  			return clauses, nil
  1152  		}
  1153  		if p.tok == token.COMMA {
  1154  			p.next()
  1155  		}
  1156  
  1157  		first = false
  1158  	}
  1159  }
  1160  
  1161  func (p *parser) parseList() (expr ast.Expr) {
  1162  	lbrack := p.expect(token.LBRACK)
  1163  
  1164  	if p.trace {
  1165  		defer un(trace(p, "ListLiteral"))
  1166  	}
  1167  
  1168  	elts := p.parseListElements()
  1169  
  1170  	if p.tok == token.ELLIPSIS {
  1171  		ellipsis := &ast.Ellipsis{
  1172  			Ellipsis: p.pos,
  1173  		}
  1174  		elts = append(elts, ellipsis)
  1175  		p.next()
  1176  		if p.tok != token.COMMA && p.tok != token.RBRACK {
  1177  			ellipsis.Type = p.parseRHS()
  1178  		}
  1179  		if p.atComma("list literal", token.RBRACK) {
  1180  			p.next()
  1181  		}
  1182  	}
  1183  
  1184  	rbrack := p.expectClosing(token.RBRACK, "list literal")
  1185  	return &ast.ListLit{
  1186  		Lbrack: lbrack,
  1187  		Elts:   elts,
  1188  		Rbrack: rbrack}
  1189  }
  1190  
  1191  func (p *parser) parseListElements() (list []ast.Expr) {
  1192  	if p.trace {
  1193  		defer un(trace(p, "ListElements"))
  1194  	}
  1195  	p.openList()
  1196  	defer p.closeList()
  1197  
  1198  	for p.tok != token.RBRACK && p.tok != token.ELLIPSIS && p.tok != token.EOF {
  1199  		expr, ok := p.parseListElement()
  1200  		list = append(list, expr)
  1201  		if !ok {
  1202  			break
  1203  		}
  1204  	}
  1205  
  1206  	return
  1207  }
  1208  
  1209  func (p *parser) parseListElement() (expr ast.Expr, ok bool) {
  1210  	if p.trace {
  1211  		defer un(trace(p, "ListElement"))
  1212  	}
  1213  	c := p.openComments()
  1214  	defer func() { c.closeNode(p, expr) }()
  1215  
  1216  	switch p.tok {
  1217  	case token.FOR, token.IF:
  1218  		tok := p.tok
  1219  		pos := p.pos
  1220  		clauses, fc := p.parseComprehensionClauses(true)
  1221  		if clauses != nil {
  1222  			sc := p.openComments()
  1223  			expr := p.parseStruct()
  1224  			sc.closeExpr(p, expr)
  1225  
  1226  			if p.atComma("list literal", token.RBRACK) { // TODO: may be EOF
  1227  				p.next()
  1228  			}
  1229  
  1230  			return &ast.Comprehension{
  1231  				Clauses: clauses,
  1232  				Value:   expr,
  1233  			}, true
  1234  		}
  1235  
  1236  		expr = &ast.Ident{
  1237  			NamePos: pos,
  1238  			Name:    tok.String(),
  1239  		}
  1240  		fc.closeNode(p, expr)
  1241  
  1242  	default:
  1243  		expr = p.parseUnaryExpr()
  1244  	}
  1245  
  1246  	expr = p.parseBinaryExprTail(token.LowestPrec+1, expr)
  1247  	expr = p.parseAlias(expr)
  1248  
  1249  	// Enforce there is an explicit comma. We could also allow the
  1250  	// omission of commas in lists, but this gives rise to some ambiguities
  1251  	// with list comprehensions.
  1252  	if p.tok == token.COMMA && p.lit != "," {
  1253  		p.next()
  1254  		// Allow missing comma for last element, though, to be compliant
  1255  		// with JSON.
  1256  		if p.tok == token.RBRACK || p.tok == token.FOR || p.tok == token.IF {
  1257  			return expr, false
  1258  		}
  1259  		p.errf(p.pos, "missing ',' before newline in list literal")
  1260  	} else if !p.atComma("list literal", token.RBRACK, token.FOR, token.IF) {
  1261  		return expr, false
  1262  	}
  1263  	p.next()
  1264  
  1265  	return expr, true
  1266  }
  1267  
  1268  // parseAlias turns an expression into an alias.
  1269  func (p *parser) parseAlias(lhs ast.Expr) (expr ast.Expr) {
  1270  	if p.tok != token.BIND {
  1271  		return lhs
  1272  	}
  1273  	pos := p.pos
  1274  	p.next()
  1275  	expr = p.parseRHS()
  1276  	if expr == nil {
  1277  		panic("empty return")
  1278  	}
  1279  	switch x := lhs.(type) {
  1280  	case *ast.Ident:
  1281  		return &ast.Alias{Ident: x, Equal: pos, Expr: expr}
  1282  	}
  1283  	p.errf(p.pos, "expected identifier for alias")
  1284  	return expr
  1285  }
  1286  
  1287  // checkExpr checks that x is an expression (and not a type).
  1288  func (p *parser) checkExpr(x ast.Expr) ast.Expr {
  1289  	switch unparen(x).(type) {
  1290  	case *ast.BadExpr:
  1291  	case *ast.BottomLit:
  1292  	case *ast.Ident:
  1293  	case *ast.BasicLit:
  1294  	case *ast.Interpolation:
  1295  	case *ast.StructLit:
  1296  	case *ast.ListLit:
  1297  	case *ast.ParenExpr:
  1298  		panic("unreachable")
  1299  	case *ast.SelectorExpr:
  1300  	case *ast.IndexExpr:
  1301  	case *ast.SliceExpr:
  1302  	case *ast.CallExpr:
  1303  	case *ast.UnaryExpr:
  1304  	case *ast.BinaryExpr:
  1305  	default:
  1306  		// all other nodes are not proper expressions
  1307  		p.errorExpected(x.Pos(), "expression")
  1308  		x = &ast.BadExpr{
  1309  			From: x.Pos(), To: p.safePos(x.End()),
  1310  		}
  1311  	}
  1312  	return x
  1313  }
  1314  
  1315  // If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
  1316  func unparen(x ast.Expr) ast.Expr {
  1317  	if p, isParen := x.(*ast.ParenExpr); isParen {
  1318  		x = unparen(p.X)
  1319  	}
  1320  	return x
  1321  }
  1322  
  1323  // If lhs is set and the result is an identifier, it is not resolved.
  1324  func (p *parser) parsePrimaryExpr() ast.Expr {
  1325  	if p.trace {
  1326  		defer un(trace(p, "PrimaryExpr"))
  1327  	}
  1328  
  1329  	return p.parsePrimaryExprTail(p.parseOperand())
  1330  }
  1331  
  1332  func (p *parser) parsePrimaryExprTail(operand ast.Expr) ast.Expr {
  1333  	x := operand
  1334  L:
  1335  	for {
  1336  		switch p.tok {
  1337  		case token.PERIOD:
  1338  			c := p.openComments()
  1339  			c.pos = 1
  1340  			p.next()
  1341  			switch p.tok {
  1342  			case token.IDENT:
  1343  				x = &ast.SelectorExpr{
  1344  					X:   p.checkExpr(x),
  1345  					Sel: p.parseIdent(),
  1346  				}
  1347  			case token.STRING:
  1348  				if strings.HasPrefix(p.lit, `"`) && !strings.HasPrefix(p.lit, `""`) {
  1349  					str := &ast.BasicLit{
  1350  						ValuePos: p.pos,
  1351  						Kind:     token.STRING,
  1352  						Value:    p.lit,
  1353  					}
  1354  					p.next()
  1355  					x = &ast.SelectorExpr{
  1356  						X:   p.checkExpr(x),
  1357  						Sel: str,
  1358  					}
  1359  					break
  1360  				}
  1361  				fallthrough
  1362  			default:
  1363  				pos := p.pos
  1364  				p.errorExpected(pos, "selector")
  1365  				p.next() // make progress
  1366  				x = &ast.SelectorExpr{X: x, Sel: &ast.Ident{NamePos: pos, Name: "_"}}
  1367  			}
  1368  			c.closeNode(p, x)
  1369  		case token.LBRACK:
  1370  			x = p.parseIndexOrSlice(p.checkExpr(x))
  1371  		case token.LPAREN:
  1372  			x = p.parseCallOrConversion(p.checkExpr(x))
  1373  		default:
  1374  			break L
  1375  		}
  1376  	}
  1377  
  1378  	return x
  1379  }
  1380  
  1381  // If lhs is set and the result is an identifier, it is not resolved.
  1382  func (p *parser) parseUnaryExpr() ast.Expr {
  1383  	if p.trace {
  1384  		defer un(trace(p, "UnaryExpr"))
  1385  	}
  1386  
  1387  	switch p.tok {
  1388  	case token.ADD, token.SUB, token.NOT, token.MUL,
  1389  		token.LSS, token.LEQ, token.GEQ, token.GTR,
  1390  		token.NEQ, token.MAT, token.NMAT:
  1391  		pos, op := p.pos, p.tok
  1392  		c := p.openComments()
  1393  		p.next()
  1394  		return c.closeExpr(p, &ast.UnaryExpr{
  1395  			OpPos: pos,
  1396  			Op:    op,
  1397  			X:     p.checkExpr(p.parseUnaryExpr()),
  1398  		})
  1399  	}
  1400  
  1401  	return p.parsePrimaryExpr()
  1402  }
  1403  
  1404  func (p *parser) tokPrec() (token.Token, int) {
  1405  	tok := p.tok
  1406  	if tok == token.IDENT {
  1407  		switch p.lit {
  1408  		case "quo":
  1409  			return token.IQUO, 7
  1410  		case "rem":
  1411  			return token.IREM, 7
  1412  		case "div":
  1413  			return token.IDIV, 7
  1414  		case "mod":
  1415  			return token.IMOD, 7
  1416  		default:
  1417  			return tok, 0
  1418  		}
  1419  	}
  1420  	return tok, tok.Precedence()
  1421  }
  1422  
  1423  // If lhs is set and the result is an identifier, it is not resolved.
  1424  func (p *parser) parseBinaryExpr(prec1 int) ast.Expr {
  1425  	if p.trace {
  1426  		defer un(trace(p, "BinaryExpr"))
  1427  	}
  1428  	p.openList()
  1429  	defer p.closeList()
  1430  
  1431  	return p.parseBinaryExprTail(prec1, p.parseUnaryExpr())
  1432  }
  1433  
  1434  func (p *parser) parseBinaryExprTail(prec1 int, x ast.Expr) ast.Expr {
  1435  	for {
  1436  		op, prec := p.tokPrec()
  1437  		if prec < prec1 {
  1438  			return x
  1439  		}
  1440  		c := p.openComments()
  1441  		c.pos = 1
  1442  		pos := p.expect(p.tok)
  1443  		x = c.closeExpr(p, &ast.BinaryExpr{
  1444  			X:     p.checkExpr(x),
  1445  			OpPos: pos,
  1446  			Op:    op,
  1447  			// Treat nested expressions as RHS.
  1448  			Y: p.checkExpr(p.parseBinaryExpr(prec + 1))})
  1449  	}
  1450  }
  1451  
  1452  func (p *parser) parseInterpolation() (expr ast.Expr) {
  1453  	c := p.openComments()
  1454  	defer func() { c.closeNode(p, expr) }()
  1455  
  1456  	p.openList()
  1457  	defer p.closeList()
  1458  
  1459  	cc := p.openComments()
  1460  
  1461  	lit := p.lit
  1462  	pos := p.pos
  1463  	p.next()
  1464  	last := &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: lit}
  1465  	exprs := []ast.Expr{last}
  1466  
  1467  	for p.tok == token.LPAREN {
  1468  		c.pos = 1
  1469  		p.expect(token.LPAREN)
  1470  		cc.closeExpr(p, last)
  1471  
  1472  		exprs = append(exprs, p.parseRHS())
  1473  
  1474  		cc = p.openComments()
  1475  		if p.tok != token.RPAREN {
  1476  			p.errf(p.pos, "expected ')' for string interpolation")
  1477  		}
  1478  		lit = p.scanner.ResumeInterpolation()
  1479  		pos = p.pos
  1480  		p.next()
  1481  		last = &ast.BasicLit{
  1482  			ValuePos: pos,
  1483  			Kind:     token.STRING,
  1484  			Value:    lit,
  1485  		}
  1486  		exprs = append(exprs, last)
  1487  	}
  1488  	cc.closeExpr(p, last)
  1489  	return &ast.Interpolation{Elts: exprs}
  1490  }
  1491  
  1492  // Callers must check the result (using checkExpr), depending on context.
  1493  func (p *parser) parseExpr() (expr ast.Expr) {
  1494  	if p.trace {
  1495  		defer un(trace(p, "Expression"))
  1496  	}
  1497  
  1498  	c := p.openComments()
  1499  	defer func() { c.closeExpr(p, expr) }()
  1500  
  1501  	return p.parseBinaryExpr(token.LowestPrec + 1)
  1502  }
  1503  
  1504  func (p *parser) parseRHS() ast.Expr {
  1505  	x := p.checkExpr(p.parseExpr())
  1506  	return x
  1507  }
  1508  
  1509  // ----------------------------------------------------------------------------
  1510  // Declarations
  1511  
  1512  func isValidImport(lit string) bool {
  1513  	const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
  1514  	s, _ := literal.Unquote(lit) // go/scanner returns a legal string literal
  1515  	if p := strings.LastIndexByte(s, ':'); p >= 0 {
  1516  		s = s[:p]
  1517  	}
  1518  	for _, r := range s {
  1519  		if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
  1520  			return false
  1521  		}
  1522  	}
  1523  	return s != ""
  1524  }
  1525  
  1526  func (p *parser) parseImportSpec(_ int) *ast.ImportSpec {
  1527  	if p.trace {
  1528  		defer un(trace(p, "ImportSpec"))
  1529  	}
  1530  
  1531  	c := p.openComments()
  1532  
  1533  	var ident *ast.Ident
  1534  	if p.tok == token.IDENT {
  1535  		ident = p.parseIdent()
  1536  	}
  1537  
  1538  	pos := p.pos
  1539  	var path string
  1540  	if p.tok == token.STRING {
  1541  		path = p.lit
  1542  		if !isValidImport(path) {
  1543  			p.errf(pos, "invalid import path: %s", path)
  1544  		}
  1545  		p.next()
  1546  		p.expectComma() // call before accessing p.linecomment
  1547  	} else {
  1548  		p.expect(token.STRING) // use expect() error handling
  1549  		if p.tok == token.COMMA {
  1550  			p.expectComma() // call before accessing p.linecomment
  1551  		}
  1552  	}
  1553  	// collect imports
  1554  	spec := &ast.ImportSpec{
  1555  		Name: ident,
  1556  		Path: &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path},
  1557  	}
  1558  	c.closeNode(p, spec)
  1559  	p.imports = append(p.imports, spec)
  1560  
  1561  	return spec
  1562  }
  1563  
  1564  func (p *parser) parseImports() *ast.ImportDecl {
  1565  	if p.trace {
  1566  		defer un(trace(p, "Imports"))
  1567  	}
  1568  	c := p.openComments()
  1569  
  1570  	ident := p.parseIdent()
  1571  	var lparen, rparen token.Pos
  1572  	var list []*ast.ImportSpec
  1573  	if p.tok == token.LPAREN {
  1574  		lparen = p.pos
  1575  		p.next()
  1576  		p.openList()
  1577  		for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
  1578  			list = append(list, p.parseImportSpec(iota))
  1579  		}
  1580  		p.closeList()
  1581  		rparen = p.expect(token.RPAREN)
  1582  		p.expectComma()
  1583  	} else {
  1584  		list = append(list, p.parseImportSpec(0))
  1585  	}
  1586  
  1587  	d := &ast.ImportDecl{
  1588  		Import: ident.Pos(),
  1589  		Lparen: lparen,
  1590  		Specs:  list,
  1591  		Rparen: rparen,
  1592  	}
  1593  	c.closeNode(p, d)
  1594  	return d
  1595  }
  1596  
  1597  // ----------------------------------------------------------------------------
  1598  // Source files
  1599  
  1600  func (p *parser) parseFile() *ast.File {
  1601  	if p.trace {
  1602  		defer un(trace(p, "File"))
  1603  	}
  1604  
  1605  	c := p.comments
  1606  
  1607  	// Don't bother parsing the rest if we had errors scanning the first
  1608  	// Likely not a Go source file at all.
  1609  	if p.errors != nil {
  1610  		return nil
  1611  	}
  1612  	p.openList()
  1613  
  1614  	var decls []ast.Decl
  1615  
  1616  	for p.tok == token.ATTRIBUTE {
  1617  		decls = append(decls, p.parseAttribute())
  1618  		p.consumeDeclComma()
  1619  	}
  1620  
  1621  	// The package clause is not a declaration: it does not appear in any
  1622  	// scope.
  1623  	if p.tok == token.IDENT && p.lit == "package" {
  1624  		c := p.openComments()
  1625  
  1626  		pos := p.pos
  1627  		var name *ast.Ident
  1628  		p.expect(token.IDENT)
  1629  		name = p.parseIdent()
  1630  		if name.Name == "_" && p.mode&declarationErrorsMode != 0 {
  1631  			p.errf(p.pos, "invalid package name _")
  1632  		}
  1633  
  1634  		pkg := &ast.Package{
  1635  			PackagePos: pos,
  1636  			Name:       name,
  1637  		}
  1638  		decls = append(decls, pkg)
  1639  		p.expectComma()
  1640  		c.closeNode(p, pkg)
  1641  	}
  1642  
  1643  	for p.tok == token.ATTRIBUTE {
  1644  		decls = append(decls, p.parseAttribute())
  1645  		p.consumeDeclComma()
  1646  	}
  1647  
  1648  	if p.mode&packageClauseOnlyMode == 0 {
  1649  		// import decls
  1650  		for p.tok == token.IDENT && p.lit == "import" {
  1651  			decls = append(decls, p.parseImports())
  1652  		}
  1653  
  1654  		if p.mode&importsOnlyMode == 0 {
  1655  			// rest of package decls
  1656  			// TODO: loop and allow multiple expressions.
  1657  			decls = append(decls, p.parseFieldList()...)
  1658  			p.expect(token.EOF)
  1659  		}
  1660  	}
  1661  	p.closeList()
  1662  
  1663  	f := &ast.File{
  1664  		Imports: p.imports,
  1665  		Decls:   decls,
  1666  	}
  1667  	c.closeNode(p, f)
  1668  	return f
  1669  }