cuelang.org/go@v0.13.0/cue/parser/parser.go (about)

     1  // Copyright 2018 The CUE Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package parser
    16  
    17  import (
    18  	"fmt"
    19  	"strings"
    20  	"unicode"
    21  
    22  	"cuelang.org/go/cue/ast"
    23  	"cuelang.org/go/cue/errors"
    24  	"cuelang.org/go/cue/literal"
    25  	"cuelang.org/go/cue/scanner"
    26  	"cuelang.org/go/cue/token"
    27  	"cuelang.org/go/internal"
    28  )
    29  
    30  // The parser structure holds the parser's internal state.
    31  type parser struct {
    32  	file    *token.File
    33  	errors  errors.Error
    34  	scanner scanner.Scanner
    35  
    36  	// Tracing/debugging
    37  	mode      mode // parsing mode
    38  	trace     bool // == (mode & Trace != 0)
    39  	panicking bool // set if we are bailing out due to too many errors.
    40  	indent    int  // indentation used for tracing output
    41  
    42  	// Comments
    43  	leadComment *ast.CommentGroup
    44  	comments    *commentState
    45  
    46  	// Next token, filled by [parser.next0].
    47  	pos token.Pos   // token position
    48  	tok token.Token // one token look-ahead
    49  	lit string      // token literal
    50  
    51  	// Token after next, filled by [parser.peek].
    52  	peekToken struct {
    53  		scanned bool
    54  
    55  		pos token.Pos
    56  		tok token.Token
    57  		lit string
    58  	}
    59  
    60  	// Error recovery
    61  	// (used to limit the number of calls to sync... functions
    62  	// w/o making scanning progress - avoids potential endless
    63  	// loops across multiple parser functions during error recovery)
    64  	syncPos token.Pos // last synchronization position
    65  	syncCnt int       // number of calls to sync... functions without progress
    66  
    67  	// Non-syntactic parser control
    68  	exprLev int // < 0: in control clause, >= 0: in expression
    69  
    70  	imports []*ast.ImportSpec // list of imports
    71  
    72  	version int
    73  }
    74  
    75  func (p *parser) init(filename string, src []byte, mode []Option) {
    76  	for _, f := range mode {
    77  		f(p)
    78  	}
    79  	p.file = token.NewFile(filename, -1, len(src))
    80  
    81  	var m scanner.Mode
    82  	if p.mode&parseCommentsMode != 0 {
    83  		m = scanner.ScanComments
    84  	}
    85  	eh := func(pos token.Pos, msg string, args []interface{}) {
    86  		p.errors = errors.Append(p.errors, errors.Newf(pos, msg, args...))
    87  	}
    88  	p.scanner.Init(p.file, src, eh, m)
    89  
    90  	p.trace = p.mode&traceMode != 0 // for convenience (p.trace is used frequently)
    91  
    92  	p.comments = &commentState{pos: -1}
    93  
    94  	p.next()
    95  }
    96  
    97  type commentState struct {
    98  	parent *commentState
    99  	pos    int8
   100  	groups []*ast.CommentGroup
   101  
   102  	// lists are not attached to nodes themselves. Enclosed expressions may
   103  	// miss a comment due to commas and line termination. closeLists ensures
   104  	// that comments will be passed to someone.
   105  	isList    int
   106  	lastChild ast.Node
   107  	lastPos   int8
   108  }
   109  
   110  // openComments reserves the next doc comment for the caller and flushes
   111  func (p *parser) openComments() *commentState {
   112  	child := &commentState{
   113  		parent: p.comments,
   114  	}
   115  	if c := p.comments; c != nil && c.isList > 0 {
   116  		if c.lastChild != nil {
   117  			var groups []*ast.CommentGroup
   118  			for _, cg := range c.groups {
   119  				if cg.Position == 0 {
   120  					groups = append(groups, cg)
   121  				}
   122  			}
   123  			groups = append(groups, ast.Comments(c.lastChild)...)
   124  			for _, cg := range c.groups {
   125  				if cg.Position != 0 {
   126  					cg.Position = c.lastPos
   127  					groups = append(groups, cg)
   128  				}
   129  			}
   130  			ast.SetComments(c.lastChild, groups)
   131  			c.groups = nil
   132  		} else {
   133  			c.lastChild = nil
   134  			// attach before next
   135  			for _, cg := range c.groups {
   136  				cg.Position = 0
   137  			}
   138  			child.groups = c.groups
   139  			c.groups = nil
   140  		}
   141  	}
   142  	if p.leadComment != nil {
   143  		child.groups = append(child.groups, p.leadComment)
   144  		p.leadComment = nil
   145  	}
   146  	p.comments = child
   147  	return child
   148  }
   149  
   150  // openList is used to treat a list of comments as a single comment
   151  // position in a production.
   152  func (p *parser) openList() {
   153  	if p.comments.isList > 0 {
   154  		p.comments.isList++
   155  		return
   156  	}
   157  	c := &commentState{
   158  		parent: p.comments,
   159  		isList: 1,
   160  	}
   161  	p.comments = c
   162  }
   163  
   164  func (c *commentState) add(g *ast.CommentGroup) {
   165  	g.Position = c.pos
   166  	c.groups = append(c.groups, g)
   167  }
   168  
   169  func (p *parser) closeList() {
   170  	c := p.comments
   171  	if c.lastChild != nil {
   172  		for _, cg := range c.groups {
   173  			cg.Position = c.lastPos
   174  			ast.AddComment(c.lastChild, cg)
   175  		}
   176  		c.groups = nil
   177  	}
   178  	switch c.isList--; {
   179  	case c.isList < 0:
   180  		if !p.panicking {
   181  			err := errors.Newf(p.pos, "unmatched close list")
   182  			p.errors = errors.Append(p.errors, err)
   183  			p.panicking = true
   184  			panic(err)
   185  		}
   186  	case c.isList == 0:
   187  		parent := c.parent
   188  		if len(c.groups) > 0 {
   189  			parent.groups = append(parent.groups, c.groups...)
   190  		}
   191  		parent.pos++
   192  		p.comments = parent
   193  	}
   194  }
   195  
   196  func (c *commentState) closeNode(p *parser, n ast.Node) ast.Node {
   197  	if p.comments != c {
   198  		if !p.panicking {
   199  			err := errors.Newf(p.pos, "unmatched comments")
   200  			p.errors = errors.Append(p.errors, err)
   201  			p.panicking = true
   202  			panic(err)
   203  		}
   204  		return n
   205  	}
   206  	p.comments = c.parent
   207  	if c.parent != nil {
   208  		c.parent.lastChild = n
   209  		c.parent.lastPos = c.pos
   210  		c.parent.pos++
   211  	}
   212  	for _, cg := range c.groups {
   213  		if n != nil {
   214  			if cg != nil {
   215  				ast.AddComment(n, cg)
   216  			}
   217  		}
   218  	}
   219  	c.groups = nil
   220  	return n
   221  }
   222  
   223  func (c *commentState) closeExpr(p *parser, n ast.Expr) ast.Expr {
   224  	c.closeNode(p, n)
   225  	return n
   226  }
   227  
   228  func (c *commentState) closeClause(p *parser, n ast.Clause) ast.Clause {
   229  	c.closeNode(p, n)
   230  	return n
   231  }
   232  
   233  // ----------------------------------------------------------------------------
   234  // Parsing support
   235  
   236  func (p *parser) printTrace(a ...interface{}) {
   237  	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
   238  	const n = len(dots)
   239  	pos := p.file.Position(p.pos)
   240  	fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
   241  	i := 2 * p.indent
   242  	for i > n {
   243  		fmt.Print(dots)
   244  		i -= n
   245  	}
   246  	// i <= n
   247  	fmt.Print(dots[0:i])
   248  	fmt.Println(a...)
   249  }
   250  
   251  func trace(p *parser, msg string) *parser {
   252  	p.printTrace(msg, "(")
   253  	p.indent++
   254  	return p
   255  }
   256  
   257  // Usage pattern: defer un(trace(p, "..."))
   258  func un(p *parser) {
   259  	p.indent--
   260  	p.printTrace(")")
   261  }
   262  
   263  // Advance to the next
   264  func (p *parser) next0() {
   265  	// Because of one-token look-ahead, print the previous token
   266  	// when tracing as it provides a more readable output. The
   267  	// very first token (!p.pos.IsValid()) is not initialized
   268  	// (it is ILLEGAL), so don't print it .
   269  	if p.trace && p.pos.IsValid() {
   270  		s := p.tok.String()
   271  		switch {
   272  		case p.tok.IsLiteral():
   273  			p.printTrace(s, p.lit)
   274  		case p.tok.IsOperator(), p.tok.IsKeyword():
   275  			p.printTrace("\"" + s + "\"")
   276  		default:
   277  			p.printTrace(s)
   278  		}
   279  	}
   280  
   281  	// We had peeked one token, effectively scanning it early; use it now.
   282  	if p.peekToken.scanned {
   283  		p.pos, p.tok, p.lit = p.peekToken.pos, p.peekToken.tok, p.peekToken.lit
   284  		p.peekToken.scanned = false
   285  		return
   286  	}
   287  
   288  	p.pos, p.tok, p.lit = p.scanner.Scan()
   289  }
   290  
   291  // peek scans one more token as a look-ahead and stores it in [parser.peekToken].
   292  // Peeking multiple tokens ahead is not supported.
   293  func (p *parser) peek() {
   294  	if p.peekToken.scanned {
   295  		panic("can only peek one token at a time")
   296  	}
   297  	p.peekToken.pos, p.peekToken.tok, p.peekToken.lit = p.scanner.Scan()
   298  	p.peekToken.scanned = true
   299  }
   300  
   301  // Consume a comment and return it and the line on which it ends.
   302  func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
   303  	endline = p.file.Line(p.pos)
   304  
   305  	comment = &ast.Comment{Slash: p.pos, Text: p.lit}
   306  	p.next0()
   307  
   308  	return
   309  }
   310  
   311  // Consume a group of adjacent comments, add it to the parser's
   312  // comments list, and return it together with the line at which
   313  // the last comment in the group ends. A non-comment token or n
   314  // empty lines terminate a comment group.
   315  func (p *parser) consumeCommentGroup(prevLine, n int) (comments *ast.CommentGroup, endline int) {
   316  	var list []*ast.Comment
   317  	var rel token.RelPos
   318  	endline = p.file.Line(p.pos)
   319  	switch endline - prevLine {
   320  	case 0:
   321  		rel = token.Blank
   322  	case 1:
   323  		rel = token.Newline
   324  	default:
   325  		rel = token.NewSection
   326  	}
   327  	for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
   328  		var comment *ast.Comment
   329  		comment, endline = p.consumeComment()
   330  		list = append(list, comment)
   331  	}
   332  
   333  	cg := &ast.CommentGroup{List: list}
   334  	ast.SetRelPos(cg, rel)
   335  	comments = cg
   336  	return
   337  }
   338  
   339  // Advance to the next non-comment  In the process, collect
   340  // any comment groups encountered, and refield the last lead and
   341  // line comments.
   342  //
   343  // A lead comment is a comment group that starts and ends in a
   344  // line without any other tokens and that is followed by a non-comment
   345  // token on the line immediately after the comment group.
   346  //
   347  // A line comment is a comment group that follows a non-comment
   348  // token on the same line, and that has no tokens after it on the line
   349  // where it ends.
   350  //
   351  // Lead and line comments may be considered documentation that is
   352  // stored in the AST.
   353  func (p *parser) next() {
   354  	// A leadComment may not be consumed if it leads an inner token of a node.
   355  	if p.leadComment != nil {
   356  		p.comments.add(p.leadComment)
   357  	}
   358  	p.leadComment = nil
   359  	prev := p.pos
   360  	p.next0()
   361  	p.comments.pos++
   362  
   363  	if p.tok == token.COMMENT {
   364  		var comment *ast.CommentGroup
   365  		var endline int
   366  
   367  		currentLine := p.file.Line(p.pos)
   368  		prevLine := p.file.Line(prev)
   369  		if prevLine == currentLine {
   370  			// The comment is on same line as the previous token; it
   371  			// cannot be a lead comment but may be a line comment.
   372  			comment, endline = p.consumeCommentGroup(prevLine, 0)
   373  			if p.file.Line(p.pos) != endline {
   374  				// The next token is on a different line, thus
   375  				// the last comment group is a line comment.
   376  				comment.Line = true
   377  			}
   378  		}
   379  
   380  		// consume successor comments, if any
   381  		endline = -1
   382  		for p.tok == token.COMMENT {
   383  			if comment != nil {
   384  				p.comments.add(comment)
   385  			}
   386  			comment, endline = p.consumeCommentGroup(prevLine, 1)
   387  			prevLine = currentLine
   388  			currentLine = p.file.Line(p.pos)
   389  
   390  		}
   391  
   392  		if endline+1 == p.file.Line(p.pos) && p.tok != token.EOF {
   393  			// The next token is following on the line immediately after the
   394  			// comment group, thus the last comment group is a lead comment.
   395  			comment.Doc = true
   396  			p.leadComment = comment
   397  		} else {
   398  			p.comments.add(comment)
   399  		}
   400  	}
   401  }
   402  
   403  // assertV0 indicates the last version at which a certain feature was
   404  // supported.
   405  func (p *parser) assertV0(pos token.Pos, minor, patch int, name string) {
   406  	v := internal.Version(minor, patch)
   407  	base := p.version
   408  	if base == 0 {
   409  		base = internal.APIVersionSupported
   410  	}
   411  	if base > v {
   412  		p.errors = errors.Append(p.errors,
   413  			errors.Wrapf(&DeprecationError{v}, pos,
   414  				"use of deprecated %s (deprecated as of v0.%d.%d)", name, minor, patch+1))
   415  	}
   416  }
   417  
   418  func (p *parser) errf(pos token.Pos, msg string, args ...interface{}) {
   419  	// ePos := p.file.Position(pos)
   420  	ePos := pos
   421  
   422  	// If AllErrors is not set, discard errors reported on the same line
   423  	// as the last recorded error and stop parsing if there are more than
   424  	// 10 errors.
   425  	if p.mode&allErrorsMode == 0 {
   426  		errors := errors.Errors(p.errors)
   427  		n := len(errors)
   428  		if n > 0 && errors[n-1].Position().Line() == ePos.Line() {
   429  			return // discard - likely a spurious error
   430  		}
   431  		if n > 10 {
   432  			p.panicking = true
   433  			panic("too many errors")
   434  		}
   435  	}
   436  
   437  	p.errors = errors.Append(p.errors, errors.Newf(ePos, msg, args...))
   438  }
   439  
   440  func (p *parser) errorExpected(pos token.Pos, obj string) {
   441  	if pos != p.pos {
   442  		p.errf(pos, "expected %s", obj)
   443  		return
   444  	}
   445  	// the error happened at the current position;
   446  	// make the error message more specific
   447  	if p.tok == token.COMMA && p.lit == "\n" {
   448  		p.errf(pos, "expected %s, found newline", obj)
   449  		return
   450  	}
   451  
   452  	if p.tok.IsLiteral() {
   453  		p.errf(pos, "expected %s, found '%s' %s", obj, p.tok, p.lit)
   454  	} else {
   455  		p.errf(pos, "expected %s, found '%s'", obj, p.tok)
   456  	}
   457  }
   458  
   459  func (p *parser) expect(tok token.Token) token.Pos {
   460  	pos := p.pos
   461  	if p.tok != tok {
   462  		p.errorExpected(pos, "'"+tok.String()+"'")
   463  	}
   464  	p.next() // make progress
   465  	return pos
   466  }
   467  
   468  // expectClosing is like expect but provides a better error message
   469  // for the common case of a missing comma before a newline.
   470  func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
   471  	if p.tok != tok && p.tok == token.COMMA && p.lit == "\n" {
   472  		p.errf(p.pos, "missing ',' before newline in %s", context)
   473  		p.next()
   474  	}
   475  	return p.expect(tok)
   476  }
   477  
   478  func (p *parser) expectComma() {
   479  	// semicolon is optional before a closing ')', ']', '}', or newline
   480  	if p.tok != token.RPAREN && p.tok != token.RBRACE && p.tok != token.EOF {
   481  		switch p.tok {
   482  		case token.COMMA:
   483  			p.next()
   484  		default:
   485  			p.errorExpected(p.pos, "','")
   486  			syncExpr(p)
   487  		}
   488  	}
   489  }
   490  
   491  func (p *parser) atComma(context string, follow ...token.Token) bool {
   492  	if p.tok == token.COMMA {
   493  		return true
   494  	}
   495  	for _, t := range follow {
   496  		if p.tok == t {
   497  			return false
   498  		}
   499  	}
   500  	// TODO: find a way to detect crossing lines now we don't have a semi.
   501  	if p.lit == "\n" {
   502  		p.errf(p.pos, "missing ',' before newline")
   503  	} else {
   504  		p.errf(p.pos, "missing ',' in %s", context)
   505  	}
   506  	return true // "insert" comma and continue
   507  }
   508  
   509  // syncExpr advances to the next field in a field list.
   510  // Used for synchronization after an error.
   511  func syncExpr(p *parser) {
   512  	for {
   513  		switch p.tok {
   514  		case token.COMMA:
   515  			// Return only if parser made some progress since last
   516  			// sync or if it has not reached 10 sync calls without
   517  			// progress. Otherwise consume at least one token to
   518  			// avoid an endless parser loop (it is possible that
   519  			// both parseOperand and parseStmt call syncStmt and
   520  			// correctly do not advance, thus the need for the
   521  			// invocation limit p.syncCnt).
   522  			if p.pos == p.syncPos && p.syncCnt < 10 {
   523  				p.syncCnt++
   524  				return
   525  			}
   526  			if p.syncPos.Before(p.pos) {
   527  				p.syncPos = p.pos
   528  				p.syncCnt = 0
   529  				return
   530  			}
   531  			// Reaching here indicates a parser bug, likely an
   532  			// incorrect token list in this function, but it only
   533  			// leads to skipping of possibly correct code if a
   534  			// previous error is present, and thus is preferred
   535  			// over a non-terminating parse.
   536  		case token.EOF:
   537  			return
   538  		}
   539  		p.next()
   540  	}
   541  }
   542  
   543  // safePos returns a valid file position for a given position: If pos
   544  // is valid to begin with, safePos returns pos. If pos is out-of-range,
   545  // safePos returns the EOF position.
   546  //
   547  // This is hack to work around "artificial" end positions in the AST which
   548  // are computed by adding 1 to (presumably valid) token positions. If the
   549  // token positions are invalid due to parse errors, the resulting end position
   550  // may be past the file's EOF position, which would lead to panics if used
   551  // later on.
   552  func (p *parser) safePos(pos token.Pos) (res token.Pos) {
   553  	defer func() {
   554  		if recover() != nil {
   555  			res = p.file.Pos(p.file.Base()+p.file.Size(), pos.RelPos()) // EOF position
   556  		}
   557  	}()
   558  	_ = p.file.Offset(pos) // trigger a panic if position is out-of-range
   559  	return pos
   560  }
   561  
   562  // ----------------------------------------------------------------------------
   563  // Identifiers
   564  
   565  func (p *parser) parseIdent() *ast.Ident {
   566  	c := p.openComments()
   567  	pos := p.pos
   568  	name := "_"
   569  	if p.tok == token.IDENT {
   570  		name = p.lit
   571  		p.next()
   572  	} else {
   573  		p.expect(token.IDENT) // use expect() error handling
   574  	}
   575  	ident := &ast.Ident{NamePos: pos, Name: name}
   576  	c.closeNode(p, ident)
   577  	return ident
   578  }
   579  
   580  func (p *parser) parseKeyIdent() *ast.Ident {
   581  	c := p.openComments()
   582  	pos := p.pos
   583  	name := p.lit
   584  	p.next()
   585  	ident := &ast.Ident{NamePos: pos, Name: name}
   586  	c.closeNode(p, ident)
   587  	return ident
   588  }
   589  
   590  // ----------------------------------------------------------------------------
   591  // Expressions
   592  
   593  // parseOperand returns an expression.
   594  // Callers must verify the result.
   595  func (p *parser) parseOperand() (expr ast.Expr) {
   596  	if p.trace {
   597  		defer un(trace(p, "Operand"))
   598  	}
   599  
   600  	switch p.tok {
   601  	case token.IDENT:
   602  		return p.parseIdent()
   603  
   604  	case token.LBRACE:
   605  		return p.parseStruct()
   606  
   607  	case token.LBRACK:
   608  		return p.parseList()
   609  
   610  	case token.FUNC:
   611  		if p.mode&parseFuncsMode != 0 {
   612  			return p.parseFunc()
   613  		} else {
   614  			return p.parseKeyIdent()
   615  		}
   616  
   617  	case token.BOTTOM:
   618  		c := p.openComments()
   619  		x := &ast.BottomLit{Bottom: p.pos}
   620  		p.next()
   621  		return c.closeExpr(p, x)
   622  
   623  	case token.NULL, token.TRUE, token.FALSE, token.INT, token.FLOAT, token.STRING:
   624  		c := p.openComments()
   625  		x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
   626  		p.next()
   627  		return c.closeExpr(p, x)
   628  
   629  	case token.INTERPOLATION:
   630  		return p.parseInterpolation()
   631  
   632  	case token.LPAREN:
   633  		c := p.openComments()
   634  		defer func() { c.closeNode(p, expr) }()
   635  		lparen := p.pos
   636  		p.next()
   637  		p.exprLev++
   638  		p.openList()
   639  		x := p.parseRHS() // types may be parenthesized: (some type)
   640  		p.closeList()
   641  		p.exprLev--
   642  		rparen := p.expect(token.RPAREN)
   643  		return &ast.ParenExpr{
   644  			Lparen: lparen,
   645  			X:      x,
   646  			Rparen: rparen}
   647  
   648  	default:
   649  		if p.tok.IsKeyword() {
   650  			return p.parseKeyIdent()
   651  		}
   652  	}
   653  
   654  	// we have an error
   655  	c := p.openComments()
   656  	pos := p.pos
   657  	p.errorExpected(pos, "operand")
   658  	syncExpr(p)
   659  	return c.closeExpr(p, &ast.BadExpr{From: pos, To: p.pos})
   660  }
   661  
   662  func (p *parser) parseIndexOrSlice(x ast.Expr) (expr ast.Expr) {
   663  	if p.trace {
   664  		defer un(trace(p, "IndexOrSlice"))
   665  	}
   666  
   667  	c := p.openComments()
   668  	defer func() { c.closeNode(p, expr) }()
   669  	c.pos = 1
   670  
   671  	const N = 2
   672  	lbrack := p.expect(token.LBRACK)
   673  
   674  	p.exprLev++
   675  	var index [N]ast.Expr
   676  	var colons [N - 1]token.Pos
   677  	if p.tok != token.COLON {
   678  		index[0] = p.parseRHS()
   679  	}
   680  	nColons := 0
   681  	for p.tok == token.COLON && nColons < len(colons) {
   682  		colons[nColons] = p.pos
   683  		nColons++
   684  		p.next()
   685  		if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF {
   686  			index[nColons] = p.parseRHS()
   687  		}
   688  	}
   689  	p.exprLev--
   690  	rbrack := p.expect(token.RBRACK)
   691  
   692  	if nColons > 0 {
   693  		return &ast.SliceExpr{
   694  			X:      x,
   695  			Lbrack: lbrack,
   696  			Low:    index[0],
   697  			High:   index[1],
   698  			Rbrack: rbrack}
   699  	}
   700  
   701  	return &ast.IndexExpr{
   702  		X:      x,
   703  		Lbrack: lbrack,
   704  		Index:  index[0],
   705  		Rbrack: rbrack}
   706  }
   707  
   708  func (p *parser) parseCallOrConversion(fun ast.Expr) (expr *ast.CallExpr) {
   709  	if p.trace {
   710  		defer un(trace(p, "CallOrConversion"))
   711  	}
   712  	c := p.openComments()
   713  	defer func() { c.closeNode(p, expr) }()
   714  
   715  	p.openList()
   716  	defer p.closeList()
   717  
   718  	lparen := p.expect(token.LPAREN)
   719  
   720  	p.exprLev++
   721  	var list []ast.Expr
   722  	for p.tok != token.RPAREN && p.tok != token.EOF {
   723  		list = append(list, p.parseRHS()) // builtins may expect a type: make(some type, ...)
   724  		if !p.atComma("argument list", token.RPAREN) {
   725  			break
   726  		}
   727  		p.next()
   728  	}
   729  	p.exprLev--
   730  	rparen := p.expectClosing(token.RPAREN, "argument list")
   731  
   732  	return &ast.CallExpr{
   733  		Fun:    fun,
   734  		Lparen: lparen,
   735  		Args:   list,
   736  		Rparen: rparen}
   737  }
   738  
   739  // TODO: inline this function in parseFieldList once we no longer user comment
   740  // position information in parsing.
   741  func (p *parser) consumeDeclComma() {
   742  	if p.atComma("struct literal", token.RBRACE, token.EOF) {
   743  		p.next()
   744  	}
   745  }
   746  
   747  func (p *parser) parseFieldList() (list []ast.Decl) {
   748  	if p.trace {
   749  		defer un(trace(p, "FieldList"))
   750  	}
   751  	p.openList()
   752  	defer p.closeList()
   753  
   754  	for p.tok != token.RBRACE && p.tok != token.EOF {
   755  		switch p.tok {
   756  		case token.ATTRIBUTE:
   757  			list = append(list, p.parseAttribute())
   758  			p.consumeDeclComma()
   759  
   760  		case token.ELLIPSIS:
   761  			c := p.openComments()
   762  			ellipsis := &ast.Ellipsis{Ellipsis: p.pos}
   763  			p.next()
   764  			c.closeNode(p, ellipsis)
   765  			list = append(list, ellipsis)
   766  			p.consumeDeclComma()
   767  
   768  		default:
   769  			list = append(list, p.parseField())
   770  		}
   771  
   772  		// TODO: handle next comma here, after disallowing non-colon separator
   773  		// and we have eliminated the need comment positions.
   774  	}
   775  
   776  	return
   777  }
   778  
   779  func (p *parser) parseLetDecl() (decl ast.Decl, ident *ast.Ident) {
   780  	if p.trace {
   781  		defer un(trace(p, "Field"))
   782  	}
   783  
   784  	c := p.openComments()
   785  
   786  	letPos := p.expect(token.LET)
   787  	if p.tok != token.IDENT {
   788  		c.closeNode(p, ident)
   789  		return nil, &ast.Ident{
   790  			NamePos: letPos,
   791  			Name:    "let",
   792  		}
   793  	}
   794  	defer func() { c.closeNode(p, decl) }()
   795  
   796  	ident = p.parseIdent()
   797  	assign := p.expect(token.BIND)
   798  	expr := p.parseRHS()
   799  
   800  	p.consumeDeclComma()
   801  
   802  	return &ast.LetClause{
   803  		Let:   letPos,
   804  		Ident: ident,
   805  		Equal: assign,
   806  		Expr:  expr,
   807  	}, nil
   808  }
   809  
   810  func (p *parser) parseComprehension() (decl ast.Decl, ident *ast.Ident) {
   811  	if p.trace {
   812  		defer un(trace(p, "Comprehension"))
   813  	}
   814  
   815  	c := p.openComments()
   816  	defer func() { c.closeNode(p, decl) }()
   817  
   818  	tok := p.tok
   819  	pos := p.pos
   820  	clauses, fc := p.parseComprehensionClauses(true)
   821  	if fc != nil {
   822  		ident = &ast.Ident{
   823  			NamePos: pos,
   824  			Name:    tok.String(),
   825  		}
   826  		fc.closeNode(p, ident)
   827  		return nil, ident
   828  	}
   829  
   830  	sc := p.openComments()
   831  	expr := p.parseStruct()
   832  	sc.closeExpr(p, expr)
   833  
   834  	if p.atComma("struct literal", token.RBRACE) { // TODO: may be EOF
   835  		p.next()
   836  	}
   837  
   838  	return &ast.Comprehension{
   839  		Clauses: clauses,
   840  		Value:   expr,
   841  	}, nil
   842  }
   843  
   844  func (p *parser) parseField() (decl ast.Decl) {
   845  	if p.trace {
   846  		defer un(trace(p, "Field"))
   847  	}
   848  
   849  	c := p.openComments()
   850  	defer func() { c.closeNode(p, decl) }()
   851  
   852  	pos := p.pos
   853  
   854  	this := &ast.Field{Label: nil}
   855  	m := this
   856  
   857  	tok := p.tok
   858  
   859  	label, expr, decl, ok := p.parseLabel(false)
   860  	if decl != nil {
   861  		return decl
   862  	}
   863  	m.Label = label
   864  
   865  	if !ok {
   866  		if expr == nil {
   867  			expr = p.parseRHS()
   868  		}
   869  		if a, ok := expr.(*ast.Alias); ok {
   870  			p.assertV0(a.Pos(), 1, 3, `old-style alias; use "let X = expr" instead`)
   871  			p.consumeDeclComma()
   872  			return a
   873  		}
   874  		e := &ast.EmbedDecl{Expr: expr}
   875  		p.consumeDeclComma()
   876  		return e
   877  	}
   878  
   879  	switch p.tok {
   880  	case token.OPTION, token.NOT:
   881  		m.Optional = p.pos
   882  		m.Constraint = p.tok
   883  		p.next()
   884  	}
   885  
   886  	// TODO: consider disallowing comprehensions with more than one label.
   887  	// This can be a bit awkward in some cases, but it would naturally
   888  	// enforce the proper style that a comprehension be defined in the
   889  	// smallest possible scope.
   890  	// allowComprehension = false
   891  
   892  	switch p.tok {
   893  	case token.COLON:
   894  	case token.COMMA:
   895  		p.expectComma() // sync parser.
   896  		fallthrough
   897  
   898  	case token.RBRACE, token.EOF:
   899  		if a, ok := expr.(*ast.Alias); ok {
   900  			p.assertV0(a.Pos(), 1, 3, `old-style alias; use "let X = expr" instead`)
   901  			return a
   902  		}
   903  		switch tok {
   904  		case token.IDENT, token.LBRACK, token.LPAREN,
   905  			token.STRING, token.INTERPOLATION,
   906  			token.NULL, token.TRUE, token.FALSE,
   907  			token.FOR, token.IF, token.LET, token.IN:
   908  			return &ast.EmbedDecl{Expr: expr}
   909  		}
   910  		fallthrough
   911  
   912  	default:
   913  		p.errorExpected(p.pos, "label or ':'")
   914  		return &ast.BadDecl{From: pos, To: p.pos}
   915  	}
   916  
   917  	m.TokenPos = p.pos
   918  	m.Token = p.tok
   919  	if p.tok != token.COLON {
   920  		p.errorExpected(pos, "':'")
   921  	}
   922  	p.next() // :
   923  
   924  	for {
   925  		if l, ok := m.Label.(*ast.ListLit); ok && len(l.Elts) != 1 {
   926  			p.errf(l.Pos(), "square bracket must have exactly one element")
   927  		}
   928  
   929  		label, expr, _, ok := p.parseLabel(true)
   930  		if !ok || (p.tok != token.COLON && p.tok != token.OPTION && p.tok != token.NOT) {
   931  			if expr == nil {
   932  				expr = p.parseRHS()
   933  			}
   934  			m.Value = expr
   935  			break
   936  		}
   937  		field := &ast.Field{Label: label}
   938  		m.Value = &ast.StructLit{Elts: []ast.Decl{field}}
   939  		m = field
   940  
   941  		switch p.tok {
   942  		case token.OPTION, token.NOT:
   943  			m.Optional = p.pos
   944  			m.Constraint = p.tok
   945  			p.next()
   946  		}
   947  
   948  		m.TokenPos = p.pos
   949  		m.Token = p.tok
   950  		if p.tok != token.COLON {
   951  			if p.tok.IsLiteral() {
   952  				p.errf(p.pos, "expected ':'; found %s", p.lit)
   953  			} else {
   954  				p.errf(p.pos, "expected ':'; found %s", p.tok)
   955  			}
   956  			break
   957  		}
   958  		p.next()
   959  	}
   960  
   961  	if attrs := p.parseAttributes(); attrs != nil {
   962  		m.Attrs = attrs
   963  	}
   964  
   965  	p.consumeDeclComma()
   966  
   967  	return this
   968  }
   969  
   970  func (p *parser) parseAttributes() (attrs []*ast.Attribute) {
   971  	p.openList()
   972  	for p.tok == token.ATTRIBUTE {
   973  		attrs = append(attrs, p.parseAttribute())
   974  	}
   975  	p.closeList()
   976  	return attrs
   977  }
   978  
   979  func (p *parser) parseAttribute() *ast.Attribute {
   980  	c := p.openComments()
   981  	a := &ast.Attribute{At: p.pos, Text: p.lit}
   982  	p.next()
   983  	c.closeNode(p, a)
   984  	return a
   985  }
   986  
   987  func (p *parser) parseLabel(rhs bool) (label ast.Label, expr ast.Expr, decl ast.Decl, ok bool) {
   988  	tok := p.tok
   989  	switch tok {
   990  
   991  	case token.FOR, token.IF:
   992  		if rhs {
   993  			expr = p.parseExpr()
   994  			break
   995  		}
   996  		comp, ident := p.parseComprehension()
   997  		if comp != nil {
   998  			return nil, nil, comp, false
   999  		}
  1000  		expr = ident
  1001  
  1002  	case token.LET:
  1003  		let, ident := p.parseLetDecl()
  1004  		if let != nil {
  1005  			return nil, nil, let, false
  1006  		}
  1007  		expr = ident
  1008  
  1009  	case token.IDENT, token.STRING, token.INTERPOLATION, token.LPAREN,
  1010  		token.NULL, token.TRUE, token.FALSE, token.IN, token.FUNC:
  1011  		expr = p.parseExpr()
  1012  
  1013  	case token.LBRACK:
  1014  		expr = p.parseRHS()
  1015  		switch x := expr.(type) {
  1016  		case *ast.ListLit:
  1017  			// Note: caller must verify this list is suitable as a label.
  1018  			label, ok = x, true
  1019  		}
  1020  	}
  1021  
  1022  	switch x := expr.(type) {
  1023  	case *ast.BasicLit:
  1024  		switch x.Kind {
  1025  		case token.STRING, token.NULL, token.TRUE, token.FALSE, token.FUNC:
  1026  			// Keywords that represent operands.
  1027  
  1028  			// Allowing keywords to be used as a labels should not interfere with
  1029  			// generating good errors: any keyword can only appear on the RHS of a
  1030  			// field (after a ':'), whereas labels always appear on the LHS.
  1031  
  1032  			label, ok = x, true
  1033  		}
  1034  
  1035  	case *ast.Ident:
  1036  		if strings.HasPrefix(x.Name, "__") && !rhs {
  1037  			p.errf(x.NamePos, "identifiers starting with '__' are reserved")
  1038  		}
  1039  
  1040  		expr = p.parseAlias(x)
  1041  		if a, ok := expr.(*ast.Alias); ok {
  1042  			if _, ok = a.Expr.(ast.Label); !ok {
  1043  				break
  1044  			}
  1045  			label = a
  1046  		} else {
  1047  			label = x
  1048  		}
  1049  		ok = true
  1050  
  1051  	case ast.Label:
  1052  		label, ok = x, true
  1053  	}
  1054  	return label, expr, nil, ok
  1055  }
  1056  
  1057  func (p *parser) parseStruct() (expr ast.Expr) {
  1058  	lbrace := p.expect(token.LBRACE)
  1059  
  1060  	if p.trace {
  1061  		defer un(trace(p, "StructLit"))
  1062  	}
  1063  
  1064  	elts := p.parseStructBody()
  1065  	rbrace := p.expectClosing(token.RBRACE, "struct literal")
  1066  	return &ast.StructLit{
  1067  		Lbrace: lbrace,
  1068  		Elts:   elts,
  1069  		Rbrace: rbrace,
  1070  	}
  1071  }
  1072  
  1073  func (p *parser) parseStructBody() []ast.Decl {
  1074  	if p.trace {
  1075  		defer un(trace(p, "StructBody"))
  1076  	}
  1077  
  1078  	p.exprLev++
  1079  	var elts []ast.Decl
  1080  
  1081  	// TODO: consider "stealing" non-lead comments.
  1082  	// for _, cg := range p.comments.groups {
  1083  	// 	if cg != nil {
  1084  	// 		elts = append(elts, cg)
  1085  	// 	}
  1086  	// }
  1087  	// p.comments.groups = p.comments.groups[:0]
  1088  
  1089  	if p.tok != token.RBRACE {
  1090  		elts = p.parseFieldList()
  1091  	}
  1092  	p.exprLev--
  1093  
  1094  	return elts
  1095  }
  1096  
  1097  // parseComprehensionClauses parses either new-style (first==true)
  1098  // or old-style (first==false).
  1099  // Should we now disallow keywords as identifiers? If not, we need to
  1100  // return a list of discovered labels as the alternative.
  1101  func (p *parser) parseComprehensionClauses(first bool) (clauses []ast.Clause, c *commentState) {
  1102  	// TODO: reuse Template spec, which is possible if it doesn't check the
  1103  	// first is an identifier.
  1104  
  1105  	for {
  1106  		switch p.tok {
  1107  		case token.FOR:
  1108  			c := p.openComments()
  1109  			forPos := p.expect(token.FOR)
  1110  			if first {
  1111  				switch p.tok {
  1112  				case token.COLON, token.BIND, token.OPTION, token.NOT,
  1113  					token.COMMA, token.EOF:
  1114  					return nil, c
  1115  				}
  1116  			}
  1117  
  1118  			var key, value *ast.Ident
  1119  			var colon token.Pos
  1120  			value = p.parseIdent()
  1121  			if p.tok == token.COMMA {
  1122  				colon = p.expect(token.COMMA)
  1123  				key = value
  1124  				value = p.parseIdent()
  1125  			}
  1126  			c.pos = 4
  1127  			// params := p.parseParams(nil, ARROW)
  1128  			clauses = append(clauses, c.closeClause(p, &ast.ForClause{
  1129  				For:    forPos,
  1130  				Key:    key,
  1131  				Colon:  colon,
  1132  				Value:  value,
  1133  				In:     p.expect(token.IN),
  1134  				Source: p.parseRHS(),
  1135  			}))
  1136  
  1137  		case token.IF:
  1138  			c := p.openComments()
  1139  			ifPos := p.expect(token.IF)
  1140  			if first {
  1141  				switch p.tok {
  1142  				case token.COLON, token.BIND, token.OPTION,
  1143  					token.COMMA, token.EOF:
  1144  					return nil, c
  1145  				case token.NOT:
  1146  					p.peek()
  1147  					if p.peekToken.tok == token.COLON {
  1148  						return nil, c
  1149  					}
  1150  				}
  1151  			}
  1152  
  1153  			clauses = append(clauses, c.closeClause(p, &ast.IfClause{
  1154  				If:        ifPos,
  1155  				Condition: p.parseRHS(),
  1156  			}))
  1157  
  1158  		case token.LET:
  1159  			c := p.openComments()
  1160  			letPos := p.expect(token.LET)
  1161  
  1162  			ident := p.parseIdent()
  1163  			assign := p.expect(token.BIND)
  1164  			expr := p.parseRHS()
  1165  
  1166  			clauses = append(clauses, c.closeClause(p, &ast.LetClause{
  1167  				Let:   letPos,
  1168  				Ident: ident,
  1169  				Equal: assign,
  1170  				Expr:  expr,
  1171  			}))
  1172  
  1173  		default:
  1174  			return clauses, nil
  1175  		}
  1176  		if p.tok == token.COMMA {
  1177  			p.next()
  1178  		}
  1179  
  1180  		first = false
  1181  	}
  1182  }
  1183  
  1184  func (p *parser) parseFunc() (expr ast.Expr) {
  1185  	if p.trace {
  1186  		defer un(trace(p, "Func"))
  1187  	}
  1188  	tok := p.tok
  1189  	pos := p.pos
  1190  	fun := p.expect(token.FUNC)
  1191  
  1192  	// "func" might be used as an identifier, in which case bail out early.
  1193  	switch p.tok {
  1194  	case token.COLON, token.BIND, token.OPTION,
  1195  		token.COMMA, token.EOF:
  1196  
  1197  		return &ast.Ident{
  1198  			NamePos: pos,
  1199  			Name:    tok.String(),
  1200  		}
  1201  	}
  1202  
  1203  	p.expect(token.LPAREN)
  1204  	args := p.parseFuncArgs()
  1205  	p.expectClosing(token.RPAREN, "argument type list")
  1206  
  1207  	p.expect(token.COLON)
  1208  	ret := p.parseExpr()
  1209  
  1210  	return &ast.Func{
  1211  		Func: fun,
  1212  		Args: args,
  1213  		Ret:  ret,
  1214  	}
  1215  }
  1216  
  1217  func (p *parser) parseFuncArgs() (list []ast.Expr) {
  1218  	if p.trace {
  1219  		defer un(trace(p, "FuncArgs"))
  1220  	}
  1221  	p.openList()
  1222  	defer p.closeList()
  1223  
  1224  	for p.tok != token.RPAREN && p.tok != token.EOF {
  1225  		list = append(list, p.parseFuncArg())
  1226  		if p.tok != token.RPAREN {
  1227  			p.expectComma()
  1228  		}
  1229  	}
  1230  
  1231  	return list
  1232  }
  1233  
  1234  func (p *parser) parseFuncArg() (expr ast.Expr) {
  1235  	if p.trace {
  1236  		defer un(trace(p, "FuncArg"))
  1237  	}
  1238  	return p.parseExpr()
  1239  }
  1240  
  1241  func (p *parser) parseList() (expr ast.Expr) {
  1242  	lbrack := p.expect(token.LBRACK)
  1243  
  1244  	if p.trace {
  1245  		defer un(trace(p, "ListLiteral"))
  1246  	}
  1247  
  1248  	elts := p.parseListElements()
  1249  
  1250  	if p.tok == token.ELLIPSIS {
  1251  		ellipsis := &ast.Ellipsis{
  1252  			Ellipsis: p.pos,
  1253  		}
  1254  		elts = append(elts, ellipsis)
  1255  		p.next()
  1256  		if p.tok != token.COMMA && p.tok != token.RBRACK {
  1257  			ellipsis.Type = p.parseRHS()
  1258  		}
  1259  		if p.atComma("list literal", token.RBRACK) {
  1260  			p.next()
  1261  		}
  1262  	}
  1263  
  1264  	rbrack := p.expectClosing(token.RBRACK, "list literal")
  1265  	return &ast.ListLit{
  1266  		Lbrack: lbrack,
  1267  		Elts:   elts,
  1268  		Rbrack: rbrack}
  1269  }
  1270  
  1271  func (p *parser) parseListElements() (list []ast.Expr) {
  1272  	if p.trace {
  1273  		defer un(trace(p, "ListElements"))
  1274  	}
  1275  	p.openList()
  1276  	defer p.closeList()
  1277  
  1278  	for p.tok != token.RBRACK && p.tok != token.ELLIPSIS && p.tok != token.EOF {
  1279  		expr, ok := p.parseListElement()
  1280  		list = append(list, expr)
  1281  		if !ok {
  1282  			break
  1283  		}
  1284  	}
  1285  
  1286  	return
  1287  }
  1288  
  1289  func (p *parser) parseListElement() (expr ast.Expr, ok bool) {
  1290  	if p.trace {
  1291  		defer un(trace(p, "ListElement"))
  1292  	}
  1293  	c := p.openComments()
  1294  	defer func() { c.closeNode(p, expr) }()
  1295  
  1296  	switch p.tok {
  1297  	case token.FOR, token.IF:
  1298  		tok := p.tok
  1299  		pos := p.pos
  1300  		clauses, fc := p.parseComprehensionClauses(true)
  1301  		if clauses != nil {
  1302  			sc := p.openComments()
  1303  			expr := p.parseStruct()
  1304  			sc.closeExpr(p, expr)
  1305  
  1306  			if p.atComma("list literal", token.RBRACK) { // TODO: may be EOF
  1307  				p.next()
  1308  			}
  1309  
  1310  			return &ast.Comprehension{
  1311  				Clauses: clauses,
  1312  				Value:   expr,
  1313  			}, true
  1314  		}
  1315  
  1316  		expr = &ast.Ident{
  1317  			NamePos: pos,
  1318  			Name:    tok.String(),
  1319  		}
  1320  		fc.closeNode(p, expr)
  1321  
  1322  	default:
  1323  		expr = p.parseUnaryExpr()
  1324  	}
  1325  
  1326  	expr = p.parseBinaryExprTail(token.LowestPrec+1, expr)
  1327  	expr = p.parseAlias(expr)
  1328  
  1329  	// Enforce there is an explicit comma. We could also allow the
  1330  	// omission of commas in lists, but this gives rise to some ambiguities
  1331  	// with list comprehensions.
  1332  	if p.tok == token.COMMA && p.lit != "," {
  1333  		p.next()
  1334  		// Allow missing comma for last element, though, to be compliant
  1335  		// with JSON.
  1336  		if p.tok == token.RBRACK || p.tok == token.FOR || p.tok == token.IF {
  1337  			return expr, false
  1338  		}
  1339  		p.errf(p.pos, "missing ',' before newline in list literal")
  1340  	} else if !p.atComma("list literal", token.RBRACK, token.FOR, token.IF) {
  1341  		return expr, false
  1342  	}
  1343  	p.next()
  1344  
  1345  	return expr, true
  1346  }
  1347  
  1348  // parseAlias turns an expression into an alias.
  1349  func (p *parser) parseAlias(lhs ast.Expr) (expr ast.Expr) {
  1350  	if p.tok != token.BIND {
  1351  		return lhs
  1352  	}
  1353  	pos := p.pos
  1354  	p.next()
  1355  	expr = p.parseRHS()
  1356  	if expr == nil {
  1357  		panic("empty return")
  1358  	}
  1359  	switch x := lhs.(type) {
  1360  	case *ast.Ident:
  1361  		return &ast.Alias{Ident: x, Equal: pos, Expr: expr}
  1362  	}
  1363  	p.errf(p.pos, "expected identifier for alias")
  1364  	return expr
  1365  }
  1366  
  1367  // checkExpr checks that x is an expression (and not a type).
  1368  func (p *parser) checkExpr(x ast.Expr) ast.Expr {
  1369  	switch unparen(x).(type) {
  1370  	case *ast.BadExpr:
  1371  	case *ast.BottomLit:
  1372  	case *ast.Ident:
  1373  	case *ast.BasicLit:
  1374  	case *ast.Interpolation:
  1375  	case *ast.Func:
  1376  	case *ast.StructLit:
  1377  	case *ast.ListLit:
  1378  	case *ast.ParenExpr:
  1379  		panic("unreachable")
  1380  	case *ast.SelectorExpr:
  1381  	case *ast.IndexExpr:
  1382  	case *ast.SliceExpr:
  1383  	case *ast.CallExpr:
  1384  	case *ast.UnaryExpr:
  1385  	case *ast.BinaryExpr:
  1386  	default:
  1387  		// all other nodes are not proper expressions
  1388  		p.errorExpected(x.Pos(), "expression")
  1389  		x = &ast.BadExpr{
  1390  			From: x.Pos(), To: p.safePos(x.End()),
  1391  		}
  1392  	}
  1393  	return x
  1394  }
  1395  
  1396  // If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
  1397  func unparen(x ast.Expr) ast.Expr {
  1398  	if p, isParen := x.(*ast.ParenExpr); isParen {
  1399  		x = unparen(p.X)
  1400  	}
  1401  	return x
  1402  }
  1403  
  1404  // If lhs is set and the result is an identifier, it is not resolved.
  1405  func (p *parser) parsePrimaryExpr() ast.Expr {
  1406  	if p.trace {
  1407  		defer un(trace(p, "PrimaryExpr"))
  1408  	}
  1409  
  1410  	return p.parsePrimaryExprTail(p.parseOperand())
  1411  }
  1412  
  1413  func (p *parser) parsePrimaryExprTail(operand ast.Expr) ast.Expr {
  1414  	x := operand
  1415  L:
  1416  	for {
  1417  		switch p.tok {
  1418  		case token.PERIOD:
  1419  			c := p.openComments()
  1420  			c.pos = 1
  1421  			p.next()
  1422  			switch p.tok {
  1423  			case token.IDENT:
  1424  				x = &ast.SelectorExpr{
  1425  					X:   p.checkExpr(x),
  1426  					Sel: p.parseIdent(),
  1427  				}
  1428  			case token.STRING:
  1429  				if strings.HasPrefix(p.lit, `"`) && !strings.HasPrefix(p.lit, `""`) {
  1430  					str := &ast.BasicLit{
  1431  						ValuePos: p.pos,
  1432  						Kind:     token.STRING,
  1433  						Value:    p.lit,
  1434  					}
  1435  					p.next()
  1436  					x = &ast.SelectorExpr{
  1437  						X:   p.checkExpr(x),
  1438  						Sel: str,
  1439  					}
  1440  					break
  1441  				}
  1442  				fallthrough
  1443  			default:
  1444  				if p.tok.IsKeyword() {
  1445  					x = &ast.SelectorExpr{
  1446  						X:   p.checkExpr(x),
  1447  						Sel: p.parseKeyIdent(),
  1448  					}
  1449  					break
  1450  				}
  1451  
  1452  				pos := p.pos
  1453  				p.errorExpected(pos, "selector")
  1454  				p.next() // make progress
  1455  				x = &ast.SelectorExpr{X: x, Sel: &ast.Ident{NamePos: pos, Name: "_"}}
  1456  			}
  1457  			c.closeNode(p, x)
  1458  		case token.LBRACK:
  1459  			x = p.parseIndexOrSlice(p.checkExpr(x))
  1460  		case token.LPAREN:
  1461  			x = p.parseCallOrConversion(p.checkExpr(x))
  1462  		default:
  1463  			break L
  1464  		}
  1465  	}
  1466  
  1467  	return x
  1468  }
  1469  
  1470  // If lhs is set and the result is an identifier, it is not resolved.
  1471  func (p *parser) parseUnaryExpr() ast.Expr {
  1472  	if p.trace {
  1473  		defer un(trace(p, "UnaryExpr"))
  1474  	}
  1475  
  1476  	switch p.tok {
  1477  	case token.ADD, token.SUB, token.NOT, token.MUL,
  1478  		token.LSS, token.LEQ, token.GEQ, token.GTR,
  1479  		token.NEQ, token.MAT, token.NMAT:
  1480  		pos, op := p.pos, p.tok
  1481  		c := p.openComments()
  1482  		p.next()
  1483  		return c.closeExpr(p, &ast.UnaryExpr{
  1484  			OpPos: pos,
  1485  			Op:    op,
  1486  			X:     p.checkExpr(p.parseUnaryExpr()),
  1487  		})
  1488  	}
  1489  
  1490  	return p.parsePrimaryExpr()
  1491  }
  1492  
  1493  func (p *parser) tokPrec() (token.Token, int) {
  1494  	tok := p.tok
  1495  	if tok == token.IDENT {
  1496  		switch p.lit {
  1497  		case "quo":
  1498  			return token.IQUO, 7
  1499  		case "rem":
  1500  			return token.IREM, 7
  1501  		case "div":
  1502  			return token.IDIV, 7
  1503  		case "mod":
  1504  			return token.IMOD, 7
  1505  		default:
  1506  			return tok, 0
  1507  		}
  1508  	}
  1509  	return tok, tok.Precedence()
  1510  }
  1511  
  1512  // If lhs is set and the result is an identifier, it is not resolved.
  1513  func (p *parser) parseBinaryExpr(prec1 int) ast.Expr {
  1514  	if p.trace {
  1515  		defer un(trace(p, "BinaryExpr"))
  1516  	}
  1517  	p.openList()
  1518  	defer p.closeList()
  1519  
  1520  	return p.parseBinaryExprTail(prec1, p.parseUnaryExpr())
  1521  }
  1522  
  1523  func (p *parser) parseBinaryExprTail(prec1 int, x ast.Expr) ast.Expr {
  1524  	for {
  1525  		op, prec := p.tokPrec()
  1526  		if prec < prec1 {
  1527  			return x
  1528  		}
  1529  		c := p.openComments()
  1530  		c.pos = 1
  1531  		pos := p.expect(p.tok)
  1532  		x = c.closeExpr(p, &ast.BinaryExpr{
  1533  			X:     p.checkExpr(x),
  1534  			OpPos: pos,
  1535  			Op:    op,
  1536  			// Treat nested expressions as RHS.
  1537  			Y: p.checkExpr(p.parseBinaryExpr(prec + 1))})
  1538  	}
  1539  }
  1540  
  1541  func (p *parser) parseInterpolation() (expr ast.Expr) {
  1542  	c := p.openComments()
  1543  	defer func() { c.closeNode(p, expr) }()
  1544  
  1545  	p.openList()
  1546  	defer p.closeList()
  1547  
  1548  	cc := p.openComments()
  1549  
  1550  	lit := p.lit
  1551  	pos := p.pos
  1552  	p.next()
  1553  	last := &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: lit}
  1554  	exprs := []ast.Expr{last}
  1555  
  1556  	for p.tok == token.LPAREN {
  1557  		c.pos = 1
  1558  		p.expect(token.LPAREN)
  1559  		cc.closeExpr(p, last)
  1560  
  1561  		exprs = append(exprs, p.parseRHS())
  1562  
  1563  		cc = p.openComments()
  1564  		if p.tok != token.RPAREN {
  1565  			p.errf(p.pos, "expected ')' for string interpolation")
  1566  		}
  1567  		lit = p.scanner.ResumeInterpolation()
  1568  		pos = p.pos
  1569  		p.next()
  1570  		last = &ast.BasicLit{
  1571  			ValuePos: pos,
  1572  			Kind:     token.STRING,
  1573  			Value:    lit,
  1574  		}
  1575  		exprs = append(exprs, last)
  1576  	}
  1577  	cc.closeExpr(p, last)
  1578  	return &ast.Interpolation{Elts: exprs}
  1579  }
  1580  
  1581  // Callers must check the result (using checkExpr), depending on context.
  1582  func (p *parser) parseExpr() (expr ast.Expr) {
  1583  	if p.trace {
  1584  		defer un(trace(p, "Expression"))
  1585  	}
  1586  
  1587  	c := p.openComments()
  1588  	defer func() { c.closeExpr(p, expr) }()
  1589  
  1590  	return p.parseBinaryExpr(token.LowestPrec + 1)
  1591  }
  1592  
  1593  func (p *parser) parseRHS() ast.Expr {
  1594  	x := p.checkExpr(p.parseExpr())
  1595  	return x
  1596  }
  1597  
  1598  // ----------------------------------------------------------------------------
  1599  // Declarations
  1600  
  1601  func isValidImport(lit string) bool {
  1602  	const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
  1603  	s, _ := literal.Unquote(lit) // go/scanner returns a legal string literal
  1604  	if p := strings.LastIndexByte(s, ':'); p >= 0 {
  1605  		s = s[:p]
  1606  	}
  1607  	for _, r := range s {
  1608  		if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
  1609  			return false
  1610  		}
  1611  	}
  1612  	return s != ""
  1613  }
  1614  
  1615  func (p *parser) parseImportSpec(_ int) *ast.ImportSpec {
  1616  	if p.trace {
  1617  		defer un(trace(p, "ImportSpec"))
  1618  	}
  1619  
  1620  	c := p.openComments()
  1621  
  1622  	var ident *ast.Ident
  1623  	if p.tok == token.IDENT {
  1624  		ident = p.parseIdent()
  1625  	}
  1626  
  1627  	pos := p.pos
  1628  	var path string
  1629  	if p.tok == token.STRING {
  1630  		path = p.lit
  1631  		if !isValidImport(path) {
  1632  			p.errf(pos, "invalid import path: %s", path)
  1633  		}
  1634  		p.next()
  1635  		p.expectComma() // call before accessing p.linecomment
  1636  	} else {
  1637  		p.expect(token.STRING) // use expect() error handling
  1638  		if p.tok == token.COMMA {
  1639  			p.expectComma() // call before accessing p.linecomment
  1640  		}
  1641  	}
  1642  	// collect imports
  1643  	spec := &ast.ImportSpec{
  1644  		Name: ident,
  1645  		Path: &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path},
  1646  	}
  1647  	c.closeNode(p, spec)
  1648  	p.imports = append(p.imports, spec)
  1649  
  1650  	return spec
  1651  }
  1652  
  1653  func (p *parser) parseImports() *ast.ImportDecl {
  1654  	if p.trace {
  1655  		defer un(trace(p, "Imports"))
  1656  	}
  1657  	c := p.openComments()
  1658  
  1659  	ident := p.parseIdent()
  1660  	var lparen, rparen token.Pos
  1661  	var list []*ast.ImportSpec
  1662  	if p.tok == token.LPAREN {
  1663  		lparen = p.pos
  1664  		p.next()
  1665  		p.openList()
  1666  		for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
  1667  			list = append(list, p.parseImportSpec(iota))
  1668  		}
  1669  		p.closeList()
  1670  		rparen = p.expect(token.RPAREN)
  1671  		p.expectComma()
  1672  	} else {
  1673  		list = append(list, p.parseImportSpec(0))
  1674  	}
  1675  
  1676  	d := &ast.ImportDecl{
  1677  		Import: ident.Pos(),
  1678  		Lparen: lparen,
  1679  		Specs:  list,
  1680  		Rparen: rparen,
  1681  	}
  1682  	c.closeNode(p, d)
  1683  	return d
  1684  }
  1685  
  1686  // ----------------------------------------------------------------------------
  1687  // Source files
  1688  
  1689  func (p *parser) parseFile() *ast.File {
  1690  	if p.trace {
  1691  		defer un(trace(p, "File"))
  1692  	}
  1693  
  1694  	c := p.comments
  1695  
  1696  	// Don't bother parsing the rest if we had errors scanning the first
  1697  	// Likely not a CUE source file at all.
  1698  	if p.errors != nil {
  1699  		return nil
  1700  	}
  1701  	p.openList()
  1702  
  1703  	var decls []ast.Decl
  1704  
  1705  	for p.tok == token.ATTRIBUTE {
  1706  		decls = append(decls, p.parseAttribute())
  1707  		p.consumeDeclComma()
  1708  	}
  1709  
  1710  	// The package clause is not a declaration: it does not appear in any
  1711  	// scope.
  1712  	if p.tok == token.IDENT && p.lit == "package" {
  1713  		c := p.openComments()
  1714  
  1715  		pos := p.pos
  1716  		var name *ast.Ident
  1717  		p.expect(token.IDENT)
  1718  		name = p.parseIdent()
  1719  		if name.Name == "_" && p.mode&declarationErrorsMode != 0 {
  1720  			p.errf(p.pos, "invalid package name _")
  1721  		}
  1722  
  1723  		pkg := &ast.Package{
  1724  			PackagePos: pos,
  1725  			Name:       name,
  1726  		}
  1727  		decls = append(decls, pkg)
  1728  		p.expectComma()
  1729  		c.closeNode(p, pkg)
  1730  	}
  1731  
  1732  	for p.tok == token.ATTRIBUTE {
  1733  		decls = append(decls, p.parseAttribute())
  1734  		p.consumeDeclComma()
  1735  	}
  1736  
  1737  	if p.mode&packageClauseOnlyMode == 0 {
  1738  		// import decls
  1739  		for p.tok == token.IDENT && p.lit == "import" {
  1740  			decls = append(decls, p.parseImports())
  1741  		}
  1742  
  1743  		if p.mode&importsOnlyMode == 0 {
  1744  			// rest of package decls
  1745  			// TODO: loop and allow multiple expressions.
  1746  			decls = append(decls, p.parseFieldList()...)
  1747  			p.expect(token.EOF)
  1748  		}
  1749  	}
  1750  	p.closeList()
  1751  
  1752  	f := &ast.File{
  1753  		Imports: p.imports,
  1754  		Decls:   decls,
  1755  	}
  1756  	c.closeNode(p, f)
  1757  	return f
  1758  }