cuelang.org/go@v0.10.1/cue/parser/parser.go (about)

     1  // Copyright 2018 The CUE Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package parser
    16  
    17  import (
    18  	"fmt"
    19  	"strings"
    20  	"unicode"
    21  
    22  	"cuelang.org/go/cue/ast"
    23  	"cuelang.org/go/cue/errors"
    24  	"cuelang.org/go/cue/literal"
    25  	"cuelang.org/go/cue/scanner"
    26  	"cuelang.org/go/cue/token"
    27  	"cuelang.org/go/internal"
    28  )
    29  
    30  // The parser structure holds the parser's internal state.
    31  type parser struct {
    32  	file    *token.File
    33  	errors  errors.Error
    34  	scanner scanner.Scanner
    35  
    36  	// Tracing/debugging
    37  	mode      mode // parsing mode
    38  	trace     bool // == (mode & Trace != 0)
    39  	panicking bool // set if we are bailing out due to too many errors.
    40  	indent    int  // indentation used for tracing output
    41  
    42  	// Comments
    43  	leadComment *ast.CommentGroup
    44  	comments    *commentState
    45  
    46  	// Next token
    47  	pos token.Pos   // token position
    48  	tok token.Token // one token look-ahead
    49  	lit string      // token literal
    50  
    51  	// Error recovery
    52  	// (used to limit the number of calls to sync... functions
    53  	// w/o making scanning progress - avoids potential endless
    54  	// loops across multiple parser functions during error recovery)
    55  	syncPos token.Pos // last synchronization position
    56  	syncCnt int       // number of calls to sync... functions without progress
    57  
    58  	// Non-syntactic parser control
    59  	exprLev int // < 0: in control clause, >= 0: in expression
    60  
    61  	imports []*ast.ImportSpec // list of imports
    62  
    63  	version int
    64  }
    65  
    66  func (p *parser) init(filename string, src []byte, mode []Option) {
    67  	for _, f := range mode {
    68  		f(p)
    69  	}
    70  	p.file = token.NewFile(filename, -1, len(src))
    71  
    72  	var m scanner.Mode
    73  	if p.mode&parseCommentsMode != 0 {
    74  		m = scanner.ScanComments
    75  	}
    76  	eh := func(pos token.Pos, msg string, args []interface{}) {
    77  		p.errors = errors.Append(p.errors, errors.Newf(pos, msg, args...))
    78  	}
    79  	p.scanner.Init(p.file, src, eh, m)
    80  
    81  	p.trace = p.mode&traceMode != 0 // for convenience (p.trace is used frequently)
    82  
    83  	p.comments = &commentState{pos: -1}
    84  
    85  	p.next()
    86  }
    87  
    88  type commentState struct {
    89  	parent *commentState
    90  	pos    int8
    91  	groups []*ast.CommentGroup
    92  
    93  	// lists are not attached to nodes themselves. Enclosed expressions may
    94  	// miss a comment due to commas and line termination. closeLists ensures
    95  	// that comments will be passed to someone.
    96  	isList    int
    97  	lastChild ast.Node
    98  	lastPos   int8
    99  }
   100  
   101  // openComments reserves the next doc comment for the caller and flushes
   102  func (p *parser) openComments() *commentState {
   103  	child := &commentState{
   104  		parent: p.comments,
   105  	}
   106  	if c := p.comments; c != nil && c.isList > 0 {
   107  		if c.lastChild != nil {
   108  			var groups []*ast.CommentGroup
   109  			for _, cg := range c.groups {
   110  				if cg.Position == 0 {
   111  					groups = append(groups, cg)
   112  				}
   113  			}
   114  			groups = append(groups, ast.Comments(c.lastChild)...)
   115  			for _, cg := range c.groups {
   116  				if cg.Position != 0 {
   117  					cg.Position = c.lastPos
   118  					groups = append(groups, cg)
   119  				}
   120  			}
   121  			ast.SetComments(c.lastChild, groups)
   122  			c.groups = nil
   123  		} else {
   124  			c.lastChild = nil
   125  			// attach before next
   126  			for _, cg := range c.groups {
   127  				cg.Position = 0
   128  			}
   129  			child.groups = c.groups
   130  			c.groups = nil
   131  		}
   132  	}
   133  	if p.leadComment != nil {
   134  		child.groups = append(child.groups, p.leadComment)
   135  		p.leadComment = nil
   136  	}
   137  	p.comments = child
   138  	return child
   139  }
   140  
   141  // openList is used to treat a list of comments as a single comment
   142  // position in a production.
   143  func (p *parser) openList() {
   144  	if p.comments.isList > 0 {
   145  		p.comments.isList++
   146  		return
   147  	}
   148  	c := &commentState{
   149  		parent: p.comments,
   150  		isList: 1,
   151  	}
   152  	p.comments = c
   153  }
   154  
   155  func (c *commentState) add(g *ast.CommentGroup) {
   156  	g.Position = c.pos
   157  	c.groups = append(c.groups, g)
   158  }
   159  
   160  func (p *parser) closeList() {
   161  	c := p.comments
   162  	if c.lastChild != nil {
   163  		for _, cg := range c.groups {
   164  			cg.Position = c.lastPos
   165  			ast.AddComment(c.lastChild, cg)
   166  		}
   167  		c.groups = nil
   168  	}
   169  	switch c.isList--; {
   170  	case c.isList < 0:
   171  		if !p.panicking {
   172  			err := errors.Newf(p.pos, "unmatched close list")
   173  			p.errors = errors.Append(p.errors, err)
   174  			p.panicking = true
   175  			panic(err)
   176  		}
   177  	case c.isList == 0:
   178  		parent := c.parent
   179  		if len(c.groups) > 0 {
   180  			parent.groups = append(parent.groups, c.groups...)
   181  		}
   182  		parent.pos++
   183  		p.comments = parent
   184  	}
   185  }
   186  
   187  func (c *commentState) closeNode(p *parser, n ast.Node) ast.Node {
   188  	if p.comments != c {
   189  		if !p.panicking {
   190  			err := errors.Newf(p.pos, "unmatched comments")
   191  			p.errors = errors.Append(p.errors, err)
   192  			p.panicking = true
   193  			panic(err)
   194  		}
   195  		return n
   196  	}
   197  	p.comments = c.parent
   198  	if c.parent != nil {
   199  		c.parent.lastChild = n
   200  		c.parent.lastPos = c.pos
   201  		c.parent.pos++
   202  	}
   203  	for _, cg := range c.groups {
   204  		if n != nil {
   205  			if cg != nil {
   206  				ast.AddComment(n, cg)
   207  			}
   208  		}
   209  	}
   210  	c.groups = nil
   211  	return n
   212  }
   213  
   214  func (c *commentState) closeExpr(p *parser, n ast.Expr) ast.Expr {
   215  	c.closeNode(p, n)
   216  	return n
   217  }
   218  
   219  func (c *commentState) closeClause(p *parser, n ast.Clause) ast.Clause {
   220  	c.closeNode(p, n)
   221  	return n
   222  }
   223  
   224  // ----------------------------------------------------------------------------
   225  // Parsing support
   226  
   227  func (p *parser) printTrace(a ...interface{}) {
   228  	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
   229  	const n = len(dots)
   230  	pos := p.file.Position(p.pos)
   231  	fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
   232  	i := 2 * p.indent
   233  	for i > n {
   234  		fmt.Print(dots)
   235  		i -= n
   236  	}
   237  	// i <= n
   238  	fmt.Print(dots[0:i])
   239  	fmt.Println(a...)
   240  }
   241  
   242  func trace(p *parser, msg string) *parser {
   243  	p.printTrace(msg, "(")
   244  	p.indent++
   245  	return p
   246  }
   247  
   248  // Usage pattern: defer un(trace(p, "..."))
   249  func un(p *parser) {
   250  	p.indent--
   251  	p.printTrace(")")
   252  }
   253  
   254  // Advance to the next
   255  func (p *parser) next0() {
   256  	// Because of one-token look-ahead, print the previous token
   257  	// when tracing as it provides a more readable output. The
   258  	// very first token (!p.pos.IsValid()) is not initialized
   259  	// (it is ILLEGAL), so don't print it .
   260  	if p.trace && p.pos.IsValid() {
   261  		s := p.tok.String()
   262  		switch {
   263  		case p.tok.IsLiteral():
   264  			p.printTrace(s, p.lit)
   265  		case p.tok.IsOperator(), p.tok.IsKeyword():
   266  			p.printTrace("\"" + s + "\"")
   267  		default:
   268  			p.printTrace(s)
   269  		}
   270  	}
   271  
   272  	p.pos, p.tok, p.lit = p.scanner.Scan()
   273  }
   274  
   275  // Consume a comment and return it and the line on which it ends.
   276  func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
   277  	endline = p.file.Line(p.pos)
   278  
   279  	comment = &ast.Comment{Slash: p.pos, Text: p.lit}
   280  	p.next0()
   281  
   282  	return
   283  }
   284  
   285  // Consume a group of adjacent comments, add it to the parser's
   286  // comments list, and return it together with the line at which
   287  // the last comment in the group ends. A non-comment token or n
   288  // empty lines terminate a comment group.
   289  func (p *parser) consumeCommentGroup(prevLine, n int) (comments *ast.CommentGroup, endline int) {
   290  	var list []*ast.Comment
   291  	var rel token.RelPos
   292  	endline = p.file.Line(p.pos)
   293  	switch endline - prevLine {
   294  	case 0:
   295  		rel = token.Blank
   296  	case 1:
   297  		rel = token.Newline
   298  	default:
   299  		rel = token.NewSection
   300  	}
   301  	for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
   302  		var comment *ast.Comment
   303  		comment, endline = p.consumeComment()
   304  		list = append(list, comment)
   305  	}
   306  
   307  	cg := &ast.CommentGroup{List: list}
   308  	ast.SetRelPos(cg, rel)
   309  	comments = cg
   310  	return
   311  }
   312  
   313  // Advance to the next non-comment  In the process, collect
   314  // any comment groups encountered, and refield the last lead and
   315  // line comments.
   316  //
   317  // A lead comment is a comment group that starts and ends in a
   318  // line without any other tokens and that is followed by a non-comment
   319  // token on the line immediately after the comment group.
   320  //
   321  // A line comment is a comment group that follows a non-comment
   322  // token on the same line, and that has no tokens after it on the line
   323  // where it ends.
   324  //
   325  // Lead and line comments may be considered documentation that is
   326  // stored in the AST.
   327  func (p *parser) next() {
   328  	// A leadComment may not be consumed if it leads an inner token of a node.
   329  	if p.leadComment != nil {
   330  		p.comments.add(p.leadComment)
   331  	}
   332  	p.leadComment = nil
   333  	prev := p.pos
   334  	p.next0()
   335  	p.comments.pos++
   336  
   337  	if p.tok == token.COMMENT {
   338  		var comment *ast.CommentGroup
   339  		var endline int
   340  
   341  		currentLine := p.file.Line(p.pos)
   342  		prevLine := p.file.Line(prev)
   343  		if prevLine == currentLine {
   344  			// The comment is on same line as the previous token; it
   345  			// cannot be a lead comment but may be a line comment.
   346  			comment, endline = p.consumeCommentGroup(prevLine, 0)
   347  			if p.file.Line(p.pos) != endline {
   348  				// The next token is on a different line, thus
   349  				// the last comment group is a line comment.
   350  				comment.Line = true
   351  			}
   352  		}
   353  
   354  		// consume successor comments, if any
   355  		endline = -1
   356  		for p.tok == token.COMMENT {
   357  			if comment != nil {
   358  				p.comments.add(comment)
   359  			}
   360  			comment, endline = p.consumeCommentGroup(prevLine, 1)
   361  			prevLine = currentLine
   362  			currentLine = p.file.Line(p.pos)
   363  
   364  		}
   365  
   366  		if endline+1 == p.file.Line(p.pos) && p.tok != token.EOF {
   367  			// The next token is following on the line immediately after the
   368  			// comment group, thus the last comment group is a lead comment.
   369  			comment.Doc = true
   370  			p.leadComment = comment
   371  		} else {
   372  			p.comments.add(comment)
   373  		}
   374  	}
   375  }
   376  
   377  // assertV0 indicates the last version at which a certain feature was
   378  // supported.
   379  func (p *parser) assertV0(pos token.Pos, minor, patch int, name string) {
   380  	v := internal.Version(minor, patch)
   381  	base := p.version
   382  	if base == 0 {
   383  		base = internal.APIVersionSupported
   384  	}
   385  	if base > v {
   386  		p.errors = errors.Append(p.errors,
   387  			errors.Wrapf(&DeprecationError{v}, pos,
   388  				"use of deprecated %s (deprecated as of v0.%d.%d)", name, minor, patch+1))
   389  	}
   390  }
   391  
   392  func (p *parser) errf(pos token.Pos, msg string, args ...interface{}) {
   393  	// ePos := p.file.Position(pos)
   394  	ePos := pos
   395  
   396  	// If AllErrors is not set, discard errors reported on the same line
   397  	// as the last recorded error and stop parsing if there are more than
   398  	// 10 errors.
   399  	if p.mode&allErrorsMode == 0 {
   400  		errors := errors.Errors(p.errors)
   401  		n := len(errors)
   402  		if n > 0 && errors[n-1].Position().Line() == ePos.Line() {
   403  			return // discard - likely a spurious error
   404  		}
   405  		if n > 10 {
   406  			p.panicking = true
   407  			panic("too many errors")
   408  		}
   409  	}
   410  
   411  	p.errors = errors.Append(p.errors, errors.Newf(ePos, msg, args...))
   412  }
   413  
   414  func (p *parser) errorExpected(pos token.Pos, obj string) {
   415  	if pos != p.pos {
   416  		p.errf(pos, "expected %s", obj)
   417  		return
   418  	}
   419  	// the error happened at the current position;
   420  	// make the error message more specific
   421  	if p.tok == token.COMMA && p.lit == "\n" {
   422  		p.errf(pos, "expected %s, found newline", obj)
   423  		return
   424  	}
   425  
   426  	if p.tok.IsLiteral() {
   427  		p.errf(pos, "expected %s, found '%s' %s", obj, p.tok, p.lit)
   428  	} else {
   429  		p.errf(pos, "expected %s, found '%s'", obj, p.tok)
   430  	}
   431  }
   432  
   433  func (p *parser) expect(tok token.Token) token.Pos {
   434  	pos := p.pos
   435  	if p.tok != tok {
   436  		p.errorExpected(pos, "'"+tok.String()+"'")
   437  	}
   438  	p.next() // make progress
   439  	return pos
   440  }
   441  
   442  // expectClosing is like expect but provides a better error message
   443  // for the common case of a missing comma before a newline.
   444  func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
   445  	if p.tok != tok && p.tok == token.COMMA && p.lit == "\n" {
   446  		p.errf(p.pos, "missing ',' before newline in %s", context)
   447  		p.next()
   448  	}
   449  	return p.expect(tok)
   450  }
   451  
   452  func (p *parser) expectComma() {
   453  	// semicolon is optional before a closing ')', ']', '}', or newline
   454  	if p.tok != token.RPAREN && p.tok != token.RBRACE && p.tok != token.EOF {
   455  		switch p.tok {
   456  		case token.COMMA:
   457  			p.next()
   458  		default:
   459  			p.errorExpected(p.pos, "','")
   460  			syncExpr(p)
   461  		}
   462  	}
   463  }
   464  
   465  func (p *parser) atComma(context string, follow ...token.Token) bool {
   466  	if p.tok == token.COMMA {
   467  		return true
   468  	}
   469  	for _, t := range follow {
   470  		if p.tok == t {
   471  			return false
   472  		}
   473  	}
   474  	// TODO: find a way to detect crossing lines now we don't have a semi.
   475  	if p.lit == "\n" {
   476  		p.errf(p.pos, "missing ',' before newline")
   477  	} else {
   478  		p.errf(p.pos, "missing ',' in %s", context)
   479  	}
   480  	return true // "insert" comma and continue
   481  }
   482  
   483  // syncExpr advances to the next field in a field list.
   484  // Used for synchronization after an error.
   485  func syncExpr(p *parser) {
   486  	for {
   487  		switch p.tok {
   488  		case token.COMMA:
   489  			// Return only if parser made some progress since last
   490  			// sync or if it has not reached 10 sync calls without
   491  			// progress. Otherwise consume at least one token to
   492  			// avoid an endless parser loop (it is possible that
   493  			// both parseOperand and parseStmt call syncStmt and
   494  			// correctly do not advance, thus the need for the
   495  			// invocation limit p.syncCnt).
   496  			if p.pos == p.syncPos && p.syncCnt < 10 {
   497  				p.syncCnt++
   498  				return
   499  			}
   500  			if p.syncPos.Before(p.pos) {
   501  				p.syncPos = p.pos
   502  				p.syncCnt = 0
   503  				return
   504  			}
   505  			// Reaching here indicates a parser bug, likely an
   506  			// incorrect token list in this function, but it only
   507  			// leads to skipping of possibly correct code if a
   508  			// previous error is present, and thus is preferred
   509  			// over a non-terminating parse.
   510  		case token.EOF:
   511  			return
   512  		}
   513  		p.next()
   514  	}
   515  }
   516  
   517  // safePos returns a valid file position for a given position: If pos
   518  // is valid to begin with, safePos returns pos. If pos is out-of-range,
   519  // safePos returns the EOF position.
   520  //
   521  // This is hack to work around "artificial" end positions in the AST which
   522  // are computed by adding 1 to (presumably valid) token positions. If the
   523  // token positions are invalid due to parse errors, the resulting end position
   524  // may be past the file's EOF position, which would lead to panics if used
   525  // later on.
   526  func (p *parser) safePos(pos token.Pos) (res token.Pos) {
   527  	defer func() {
   528  		if recover() != nil {
   529  			res = p.file.Pos(p.file.Base()+p.file.Size(), pos.RelPos()) // EOF position
   530  		}
   531  	}()
   532  	_ = p.file.Offset(pos) // trigger a panic if position is out-of-range
   533  	return pos
   534  }
   535  
   536  // ----------------------------------------------------------------------------
   537  // Identifiers
   538  
   539  func (p *parser) parseIdent() *ast.Ident {
   540  	c := p.openComments()
   541  	pos := p.pos
   542  	name := "_"
   543  	if p.tok == token.IDENT {
   544  		name = p.lit
   545  		p.next()
   546  	} else {
   547  		p.expect(token.IDENT) // use expect() error handling
   548  	}
   549  	ident := &ast.Ident{NamePos: pos, Name: name}
   550  	c.closeNode(p, ident)
   551  	return ident
   552  }
   553  
   554  func (p *parser) parseKeyIdent() *ast.Ident {
   555  	c := p.openComments()
   556  	pos := p.pos
   557  	name := p.lit
   558  	p.next()
   559  	ident := &ast.Ident{NamePos: pos, Name: name}
   560  	c.closeNode(p, ident)
   561  	return ident
   562  }
   563  
   564  // ----------------------------------------------------------------------------
   565  // Expressions
   566  
   567  // parseOperand returns an expression.
   568  // Callers must verify the result.
   569  func (p *parser) parseOperand() (expr ast.Expr) {
   570  	if p.trace {
   571  		defer un(trace(p, "Operand"))
   572  	}
   573  
   574  	switch p.tok {
   575  	case token.IDENT:
   576  		return p.parseIdent()
   577  
   578  	case token.LBRACE:
   579  		return p.parseStruct()
   580  
   581  	case token.LBRACK:
   582  		return p.parseList()
   583  
   584  	case token.FUNC:
   585  		if p.mode&parseFuncsMode != 0 {
   586  			return p.parseFunc()
   587  		} else {
   588  			return p.parseKeyIdent()
   589  		}
   590  
   591  	case token.BOTTOM:
   592  		c := p.openComments()
   593  		x := &ast.BottomLit{Bottom: p.pos}
   594  		p.next()
   595  		return c.closeExpr(p, x)
   596  
   597  	case token.NULL, token.TRUE, token.FALSE, token.INT, token.FLOAT, token.STRING:
   598  		c := p.openComments()
   599  		x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
   600  		p.next()
   601  		return c.closeExpr(p, x)
   602  
   603  	case token.INTERPOLATION:
   604  		return p.parseInterpolation()
   605  
   606  	case token.LPAREN:
   607  		c := p.openComments()
   608  		defer func() { c.closeNode(p, expr) }()
   609  		lparen := p.pos
   610  		p.next()
   611  		p.exprLev++
   612  		p.openList()
   613  		x := p.parseRHS() // types may be parenthesized: (some type)
   614  		p.closeList()
   615  		p.exprLev--
   616  		rparen := p.expect(token.RPAREN)
   617  		return &ast.ParenExpr{
   618  			Lparen: lparen,
   619  			X:      x,
   620  			Rparen: rparen}
   621  
   622  	default:
   623  		if p.tok.IsKeyword() {
   624  			return p.parseKeyIdent()
   625  		}
   626  	}
   627  
   628  	// we have an error
   629  	c := p.openComments()
   630  	pos := p.pos
   631  	p.errorExpected(pos, "operand")
   632  	syncExpr(p)
   633  	return c.closeExpr(p, &ast.BadExpr{From: pos, To: p.pos})
   634  }
   635  
   636  func (p *parser) parseIndexOrSlice(x ast.Expr) (expr ast.Expr) {
   637  	if p.trace {
   638  		defer un(trace(p, "IndexOrSlice"))
   639  	}
   640  
   641  	c := p.openComments()
   642  	defer func() { c.closeNode(p, expr) }()
   643  	c.pos = 1
   644  
   645  	const N = 2
   646  	lbrack := p.expect(token.LBRACK)
   647  
   648  	p.exprLev++
   649  	var index [N]ast.Expr
   650  	var colons [N - 1]token.Pos
   651  	if p.tok != token.COLON {
   652  		index[0] = p.parseRHS()
   653  	}
   654  	nColons := 0
   655  	for p.tok == token.COLON && nColons < len(colons) {
   656  		colons[nColons] = p.pos
   657  		nColons++
   658  		p.next()
   659  		if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF {
   660  			index[nColons] = p.parseRHS()
   661  		}
   662  	}
   663  	p.exprLev--
   664  	rbrack := p.expect(token.RBRACK)
   665  
   666  	if nColons > 0 {
   667  		return &ast.SliceExpr{
   668  			X:      x,
   669  			Lbrack: lbrack,
   670  			Low:    index[0],
   671  			High:   index[1],
   672  			Rbrack: rbrack}
   673  	}
   674  
   675  	return &ast.IndexExpr{
   676  		X:      x,
   677  		Lbrack: lbrack,
   678  		Index:  index[0],
   679  		Rbrack: rbrack}
   680  }
   681  
   682  func (p *parser) parseCallOrConversion(fun ast.Expr) (expr *ast.CallExpr) {
   683  	if p.trace {
   684  		defer un(trace(p, "CallOrConversion"))
   685  	}
   686  	c := p.openComments()
   687  	defer func() { c.closeNode(p, expr) }()
   688  
   689  	p.openList()
   690  	defer p.closeList()
   691  
   692  	lparen := p.expect(token.LPAREN)
   693  
   694  	p.exprLev++
   695  	var list []ast.Expr
   696  	for p.tok != token.RPAREN && p.tok != token.EOF {
   697  		list = append(list, p.parseRHS()) // builtins may expect a type: make(some type, ...)
   698  		if !p.atComma("argument list", token.RPAREN) {
   699  			break
   700  		}
   701  		p.next()
   702  	}
   703  	p.exprLev--
   704  	rparen := p.expectClosing(token.RPAREN, "argument list")
   705  
   706  	return &ast.CallExpr{
   707  		Fun:    fun,
   708  		Lparen: lparen,
   709  		Args:   list,
   710  		Rparen: rparen}
   711  }
   712  
   713  // TODO: inline this function in parseFieldList once we no longer user comment
   714  // position information in parsing.
   715  func (p *parser) consumeDeclComma() {
   716  	if p.atComma("struct literal", token.RBRACE, token.EOF) {
   717  		p.next()
   718  	}
   719  }
   720  
   721  func (p *parser) parseFieldList() (list []ast.Decl) {
   722  	if p.trace {
   723  		defer un(trace(p, "FieldList"))
   724  	}
   725  	p.openList()
   726  	defer p.closeList()
   727  
   728  	for p.tok != token.RBRACE && p.tok != token.EOF {
   729  		switch p.tok {
   730  		case token.ATTRIBUTE:
   731  			list = append(list, p.parseAttribute())
   732  			p.consumeDeclComma()
   733  
   734  		case token.ELLIPSIS:
   735  			c := p.openComments()
   736  			ellipsis := &ast.Ellipsis{Ellipsis: p.pos}
   737  			p.next()
   738  			c.closeNode(p, ellipsis)
   739  			list = append(list, ellipsis)
   740  			p.consumeDeclComma()
   741  
   742  		default:
   743  			list = append(list, p.parseField())
   744  		}
   745  
   746  		// TODO: handle next comma here, after disallowing non-colon separator
   747  		// and we have eliminated the need comment positions.
   748  	}
   749  
   750  	return
   751  }
   752  
   753  func (p *parser) parseLetDecl() (decl ast.Decl, ident *ast.Ident) {
   754  	if p.trace {
   755  		defer un(trace(p, "Field"))
   756  	}
   757  
   758  	c := p.openComments()
   759  
   760  	letPos := p.expect(token.LET)
   761  	if p.tok != token.IDENT {
   762  		c.closeNode(p, ident)
   763  		return nil, &ast.Ident{
   764  			NamePos: letPos,
   765  			Name:    "let",
   766  		}
   767  	}
   768  	defer func() { c.closeNode(p, decl) }()
   769  
   770  	ident = p.parseIdent()
   771  	assign := p.expect(token.BIND)
   772  	expr := p.parseRHS()
   773  
   774  	p.consumeDeclComma()
   775  
   776  	return &ast.LetClause{
   777  		Let:   letPos,
   778  		Ident: ident,
   779  		Equal: assign,
   780  		Expr:  expr,
   781  	}, nil
   782  }
   783  
   784  func (p *parser) parseComprehension() (decl ast.Decl, ident *ast.Ident) {
   785  	if p.trace {
   786  		defer un(trace(p, "Comprehension"))
   787  	}
   788  
   789  	c := p.openComments()
   790  	defer func() { c.closeNode(p, decl) }()
   791  
   792  	tok := p.tok
   793  	pos := p.pos
   794  	clauses, fc := p.parseComprehensionClauses(true)
   795  	if fc != nil {
   796  		ident = &ast.Ident{
   797  			NamePos: pos,
   798  			Name:    tok.String(),
   799  		}
   800  		fc.closeNode(p, ident)
   801  		return nil, ident
   802  	}
   803  
   804  	sc := p.openComments()
   805  	expr := p.parseStruct()
   806  	sc.closeExpr(p, expr)
   807  
   808  	if p.atComma("struct literal", token.RBRACE) { // TODO: may be EOF
   809  		p.next()
   810  	}
   811  
   812  	return &ast.Comprehension{
   813  		Clauses: clauses,
   814  		Value:   expr,
   815  	}, nil
   816  }
   817  
   818  func (p *parser) parseField() (decl ast.Decl) {
   819  	if p.trace {
   820  		defer un(trace(p, "Field"))
   821  	}
   822  
   823  	c := p.openComments()
   824  	defer func() { c.closeNode(p, decl) }()
   825  
   826  	pos := p.pos
   827  
   828  	this := &ast.Field{Label: nil}
   829  	m := this
   830  
   831  	tok := p.tok
   832  
   833  	label, expr, decl, ok := p.parseLabel(false)
   834  	if decl != nil {
   835  		return decl
   836  	}
   837  	m.Label = label
   838  
   839  	if !ok {
   840  		if expr == nil {
   841  			expr = p.parseRHS()
   842  		}
   843  		if a, ok := expr.(*ast.Alias); ok {
   844  			p.assertV0(a.Pos(), 1, 3, `old-style alias; use "let X = expr" instead`)
   845  			p.consumeDeclComma()
   846  			return a
   847  		}
   848  		e := &ast.EmbedDecl{Expr: expr}
   849  		p.consumeDeclComma()
   850  		return e
   851  	}
   852  
   853  	switch p.tok {
   854  	case token.OPTION, token.NOT:
   855  		m.Optional = p.pos
   856  		m.Constraint = p.tok
   857  		p.next()
   858  	}
   859  
   860  	// TODO: consider disallowing comprehensions with more than one label.
   861  	// This can be a bit awkward in some cases, but it would naturally
   862  	// enforce the proper style that a comprehension be defined in the
   863  	// smallest possible scope.
   864  	// allowComprehension = false
   865  
   866  	switch p.tok {
   867  	case token.COLON:
   868  	case token.COMMA:
   869  		p.expectComma() // sync parser.
   870  		fallthrough
   871  
   872  	case token.RBRACE, token.EOF:
   873  		if a, ok := expr.(*ast.Alias); ok {
   874  			p.assertV0(a.Pos(), 1, 3, `old-style alias; use "let X = expr" instead`)
   875  			return a
   876  		}
   877  		switch tok {
   878  		case token.IDENT, token.LBRACK, token.LPAREN,
   879  			token.STRING, token.INTERPOLATION,
   880  			token.NULL, token.TRUE, token.FALSE,
   881  			token.FOR, token.IF, token.LET, token.IN:
   882  			return &ast.EmbedDecl{Expr: expr}
   883  		}
   884  		fallthrough
   885  
   886  	default:
   887  		p.errorExpected(p.pos, "label or ':'")
   888  		return &ast.BadDecl{From: pos, To: p.pos}
   889  	}
   890  
   891  	m.TokenPos = p.pos
   892  	m.Token = p.tok
   893  	if p.tok != token.COLON {
   894  		p.errorExpected(pos, "':'")
   895  	}
   896  	p.next() // :
   897  
   898  	for {
   899  		if l, ok := m.Label.(*ast.ListLit); ok && len(l.Elts) != 1 {
   900  			p.errf(l.Pos(), "square bracket must have exactly one element")
   901  		}
   902  
   903  		label, expr, _, ok := p.parseLabel(true)
   904  		if !ok || (p.tok != token.COLON && p.tok != token.OPTION && p.tok != token.NOT) {
   905  			if expr == nil {
   906  				expr = p.parseRHS()
   907  			}
   908  			m.Value = expr
   909  			break
   910  		}
   911  		field := &ast.Field{Label: label}
   912  		m.Value = &ast.StructLit{Elts: []ast.Decl{field}}
   913  		m = field
   914  
   915  		switch p.tok {
   916  		case token.OPTION, token.NOT:
   917  			m.Optional = p.pos
   918  			m.Constraint = p.tok
   919  			p.next()
   920  		}
   921  
   922  		m.TokenPos = p.pos
   923  		m.Token = p.tok
   924  		if p.tok != token.COLON {
   925  			if p.tok.IsLiteral() {
   926  				p.errf(p.pos, "expected ':'; found %s", p.lit)
   927  			} else {
   928  				p.errf(p.pos, "expected ':'; found %s", p.tok)
   929  			}
   930  			break
   931  		}
   932  		p.next()
   933  	}
   934  
   935  	if attrs := p.parseAttributes(); attrs != nil {
   936  		m.Attrs = attrs
   937  	}
   938  
   939  	p.consumeDeclComma()
   940  
   941  	return this
   942  }
   943  
   944  func (p *parser) parseAttributes() (attrs []*ast.Attribute) {
   945  	p.openList()
   946  	for p.tok == token.ATTRIBUTE {
   947  		attrs = append(attrs, p.parseAttribute())
   948  	}
   949  	p.closeList()
   950  	return attrs
   951  }
   952  
   953  func (p *parser) parseAttribute() *ast.Attribute {
   954  	c := p.openComments()
   955  	a := &ast.Attribute{At: p.pos, Text: p.lit}
   956  	p.next()
   957  	c.closeNode(p, a)
   958  	return a
   959  }
   960  
   961  func (p *parser) parseLabel(rhs bool) (label ast.Label, expr ast.Expr, decl ast.Decl, ok bool) {
   962  	tok := p.tok
   963  	switch tok {
   964  
   965  	case token.FOR, token.IF:
   966  		if rhs {
   967  			expr = p.parseExpr()
   968  			break
   969  		}
   970  		comp, ident := p.parseComprehension()
   971  		if comp != nil {
   972  			return nil, nil, comp, false
   973  		}
   974  		expr = ident
   975  
   976  	case token.LET:
   977  		let, ident := p.parseLetDecl()
   978  		if let != nil {
   979  			return nil, nil, let, false
   980  		}
   981  		expr = ident
   982  
   983  	case token.IDENT, token.STRING, token.INTERPOLATION, token.LPAREN,
   984  		token.NULL, token.TRUE, token.FALSE, token.IN, token.FUNC:
   985  		expr = p.parseExpr()
   986  
   987  	case token.LBRACK:
   988  		expr = p.parseRHS()
   989  		switch x := expr.(type) {
   990  		case *ast.ListLit:
   991  			// Note: caller must verify this list is suitable as a label.
   992  			label, ok = x, true
   993  		}
   994  	}
   995  
   996  	switch x := expr.(type) {
   997  	case *ast.BasicLit:
   998  		switch x.Kind {
   999  		case token.STRING, token.NULL, token.TRUE, token.FALSE, token.FUNC:
  1000  			// Keywords that represent operands.
  1001  
  1002  			// Allowing keywords to be used as a labels should not interfere with
  1003  			// generating good errors: any keyword can only appear on the RHS of a
  1004  			// field (after a ':'), whereas labels always appear on the LHS.
  1005  
  1006  			label, ok = x, true
  1007  		}
  1008  
  1009  	case *ast.Ident:
  1010  		if strings.HasPrefix(x.Name, "__") {
  1011  			p.errf(x.NamePos, "identifiers starting with '__' are reserved")
  1012  		}
  1013  
  1014  		expr = p.parseAlias(x)
  1015  		if a, ok := expr.(*ast.Alias); ok {
  1016  			if _, ok = a.Expr.(ast.Label); !ok {
  1017  				break
  1018  			}
  1019  			label = a
  1020  		} else {
  1021  			label = x
  1022  		}
  1023  		ok = true
  1024  
  1025  	case ast.Label:
  1026  		label, ok = x, true
  1027  	}
  1028  	return label, expr, nil, ok
  1029  }
  1030  
  1031  func (p *parser) parseStruct() (expr ast.Expr) {
  1032  	lbrace := p.expect(token.LBRACE)
  1033  
  1034  	if p.trace {
  1035  		defer un(trace(p, "StructLit"))
  1036  	}
  1037  
  1038  	elts := p.parseStructBody()
  1039  	rbrace := p.expectClosing(token.RBRACE, "struct literal")
  1040  	return &ast.StructLit{
  1041  		Lbrace: lbrace,
  1042  		Elts:   elts,
  1043  		Rbrace: rbrace,
  1044  	}
  1045  }
  1046  
  1047  func (p *parser) parseStructBody() []ast.Decl {
  1048  	if p.trace {
  1049  		defer un(trace(p, "StructBody"))
  1050  	}
  1051  
  1052  	p.exprLev++
  1053  	var elts []ast.Decl
  1054  
  1055  	// TODO: consider "stealing" non-lead comments.
  1056  	// for _, cg := range p.comments.groups {
  1057  	// 	if cg != nil {
  1058  	// 		elts = append(elts, cg)
  1059  	// 	}
  1060  	// }
  1061  	// p.comments.groups = p.comments.groups[:0]
  1062  
  1063  	if p.tok != token.RBRACE {
  1064  		elts = p.parseFieldList()
  1065  	}
  1066  	p.exprLev--
  1067  
  1068  	return elts
  1069  }
  1070  
  1071  // parseComprehensionClauses parses either new-style (first==true)
  1072  // or old-style (first==false).
  1073  // Should we now disallow keywords as identifiers? If not, we need to
  1074  // return a list of discovered labels as the alternative.
  1075  func (p *parser) parseComprehensionClauses(first bool) (clauses []ast.Clause, c *commentState) {
  1076  	// TODO: reuse Template spec, which is possible if it doesn't check the
  1077  	// first is an identifier.
  1078  
  1079  	for {
  1080  		switch p.tok {
  1081  		case token.FOR:
  1082  			c := p.openComments()
  1083  			forPos := p.expect(token.FOR)
  1084  			if first {
  1085  				switch p.tok {
  1086  				case token.COLON, token.BIND, token.OPTION,
  1087  					token.COMMA, token.EOF:
  1088  					return nil, c
  1089  				}
  1090  			}
  1091  
  1092  			var key, value *ast.Ident
  1093  			var colon token.Pos
  1094  			value = p.parseIdent()
  1095  			if p.tok == token.COMMA {
  1096  				colon = p.expect(token.COMMA)
  1097  				key = value
  1098  				value = p.parseIdent()
  1099  			}
  1100  			c.pos = 4
  1101  			// params := p.parseParams(nil, ARROW)
  1102  			clauses = append(clauses, c.closeClause(p, &ast.ForClause{
  1103  				For:    forPos,
  1104  				Key:    key,
  1105  				Colon:  colon,
  1106  				Value:  value,
  1107  				In:     p.expect(token.IN),
  1108  				Source: p.parseRHS(),
  1109  			}))
  1110  
  1111  		case token.IF:
  1112  			c := p.openComments()
  1113  			ifPos := p.expect(token.IF)
  1114  			if first {
  1115  				switch p.tok {
  1116  				case token.COLON, token.BIND, token.OPTION,
  1117  					token.COMMA, token.EOF:
  1118  					return nil, c
  1119  				}
  1120  			}
  1121  
  1122  			clauses = append(clauses, c.closeClause(p, &ast.IfClause{
  1123  				If:        ifPos,
  1124  				Condition: p.parseRHS(),
  1125  			}))
  1126  
  1127  		case token.LET:
  1128  			c := p.openComments()
  1129  			letPos := p.expect(token.LET)
  1130  
  1131  			ident := p.parseIdent()
  1132  			assign := p.expect(token.BIND)
  1133  			expr := p.parseRHS()
  1134  
  1135  			clauses = append(clauses, c.closeClause(p, &ast.LetClause{
  1136  				Let:   letPos,
  1137  				Ident: ident,
  1138  				Equal: assign,
  1139  				Expr:  expr,
  1140  			}))
  1141  
  1142  		default:
  1143  			return clauses, nil
  1144  		}
  1145  		if p.tok == token.COMMA {
  1146  			p.next()
  1147  		}
  1148  
  1149  		first = false
  1150  	}
  1151  }
  1152  
  1153  func (p *parser) parseFunc() (expr ast.Expr) {
  1154  	if p.trace {
  1155  		defer un(trace(p, "Func"))
  1156  	}
  1157  	tok := p.tok
  1158  	pos := p.pos
  1159  	fun := p.expect(token.FUNC)
  1160  
  1161  	// "func" might be used as an identifier, in which case bail out early.
  1162  	switch p.tok {
  1163  	case token.COLON, token.BIND, token.OPTION,
  1164  		token.COMMA, token.EOF:
  1165  
  1166  		return &ast.Ident{
  1167  			NamePos: pos,
  1168  			Name:    tok.String(),
  1169  		}
  1170  	}
  1171  
  1172  	p.expect(token.LPAREN)
  1173  	args := p.parseFuncArgs()
  1174  	p.expectClosing(token.RPAREN, "argument type list")
  1175  
  1176  	p.expect(token.COLON)
  1177  	ret := p.parseExpr()
  1178  
  1179  	return &ast.Func{
  1180  		Func: fun,
  1181  		Args: args,
  1182  		Ret:  ret,
  1183  	}
  1184  }
  1185  
  1186  func (p *parser) parseFuncArgs() (list []ast.Expr) {
  1187  	if p.trace {
  1188  		defer un(trace(p, "FuncArgs"))
  1189  	}
  1190  	p.openList()
  1191  	defer p.closeList()
  1192  
  1193  	for p.tok != token.RPAREN && p.tok != token.EOF {
  1194  		list = append(list, p.parseFuncArg())
  1195  		if p.tok != token.RPAREN {
  1196  			p.expectComma()
  1197  		}
  1198  	}
  1199  
  1200  	return list
  1201  }
  1202  
  1203  func (p *parser) parseFuncArg() (expr ast.Expr) {
  1204  	if p.trace {
  1205  		defer un(trace(p, "FuncArg"))
  1206  	}
  1207  	return p.parseExpr()
  1208  }
  1209  
  1210  func (p *parser) parseList() (expr ast.Expr) {
  1211  	lbrack := p.expect(token.LBRACK)
  1212  
  1213  	if p.trace {
  1214  		defer un(trace(p, "ListLiteral"))
  1215  	}
  1216  
  1217  	elts := p.parseListElements()
  1218  
  1219  	if p.tok == token.ELLIPSIS {
  1220  		ellipsis := &ast.Ellipsis{
  1221  			Ellipsis: p.pos,
  1222  		}
  1223  		elts = append(elts, ellipsis)
  1224  		p.next()
  1225  		if p.tok != token.COMMA && p.tok != token.RBRACK {
  1226  			ellipsis.Type = p.parseRHS()
  1227  		}
  1228  		if p.atComma("list literal", token.RBRACK) {
  1229  			p.next()
  1230  		}
  1231  	}
  1232  
  1233  	rbrack := p.expectClosing(token.RBRACK, "list literal")
  1234  	return &ast.ListLit{
  1235  		Lbrack: lbrack,
  1236  		Elts:   elts,
  1237  		Rbrack: rbrack}
  1238  }
  1239  
  1240  func (p *parser) parseListElements() (list []ast.Expr) {
  1241  	if p.trace {
  1242  		defer un(trace(p, "ListElements"))
  1243  	}
  1244  	p.openList()
  1245  	defer p.closeList()
  1246  
  1247  	for p.tok != token.RBRACK && p.tok != token.ELLIPSIS && p.tok != token.EOF {
  1248  		expr, ok := p.parseListElement()
  1249  		list = append(list, expr)
  1250  		if !ok {
  1251  			break
  1252  		}
  1253  	}
  1254  
  1255  	return
  1256  }
  1257  
  1258  func (p *parser) parseListElement() (expr ast.Expr, ok bool) {
  1259  	if p.trace {
  1260  		defer un(trace(p, "ListElement"))
  1261  	}
  1262  	c := p.openComments()
  1263  	defer func() { c.closeNode(p, expr) }()
  1264  
  1265  	switch p.tok {
  1266  	case token.FOR, token.IF:
  1267  		tok := p.tok
  1268  		pos := p.pos
  1269  		clauses, fc := p.parseComprehensionClauses(true)
  1270  		if clauses != nil {
  1271  			sc := p.openComments()
  1272  			expr := p.parseStruct()
  1273  			sc.closeExpr(p, expr)
  1274  
  1275  			if p.atComma("list literal", token.RBRACK) { // TODO: may be EOF
  1276  				p.next()
  1277  			}
  1278  
  1279  			return &ast.Comprehension{
  1280  				Clauses: clauses,
  1281  				Value:   expr,
  1282  			}, true
  1283  		}
  1284  
  1285  		expr = &ast.Ident{
  1286  			NamePos: pos,
  1287  			Name:    tok.String(),
  1288  		}
  1289  		fc.closeNode(p, expr)
  1290  
  1291  	default:
  1292  		expr = p.parseUnaryExpr()
  1293  	}
  1294  
  1295  	expr = p.parseBinaryExprTail(token.LowestPrec+1, expr)
  1296  	expr = p.parseAlias(expr)
  1297  
  1298  	// Enforce there is an explicit comma. We could also allow the
  1299  	// omission of commas in lists, but this gives rise to some ambiguities
  1300  	// with list comprehensions.
  1301  	if p.tok == token.COMMA && p.lit != "," {
  1302  		p.next()
  1303  		// Allow missing comma for last element, though, to be compliant
  1304  		// with JSON.
  1305  		if p.tok == token.RBRACK || p.tok == token.FOR || p.tok == token.IF {
  1306  			return expr, false
  1307  		}
  1308  		p.errf(p.pos, "missing ',' before newline in list literal")
  1309  	} else if !p.atComma("list literal", token.RBRACK, token.FOR, token.IF) {
  1310  		return expr, false
  1311  	}
  1312  	p.next()
  1313  
  1314  	return expr, true
  1315  }
  1316  
  1317  // parseAlias turns an expression into an alias.
  1318  func (p *parser) parseAlias(lhs ast.Expr) (expr ast.Expr) {
  1319  	if p.tok != token.BIND {
  1320  		return lhs
  1321  	}
  1322  	pos := p.pos
  1323  	p.next()
  1324  	expr = p.parseRHS()
  1325  	if expr == nil {
  1326  		panic("empty return")
  1327  	}
  1328  	switch x := lhs.(type) {
  1329  	case *ast.Ident:
  1330  		return &ast.Alias{Ident: x, Equal: pos, Expr: expr}
  1331  	}
  1332  	p.errf(p.pos, "expected identifier for alias")
  1333  	return expr
  1334  }
  1335  
  1336  // checkExpr checks that x is an expression (and not a type).
  1337  func (p *parser) checkExpr(x ast.Expr) ast.Expr {
  1338  	switch unparen(x).(type) {
  1339  	case *ast.BadExpr:
  1340  	case *ast.BottomLit:
  1341  	case *ast.Ident:
  1342  	case *ast.BasicLit:
  1343  	case *ast.Interpolation:
  1344  	case *ast.Func:
  1345  	case *ast.StructLit:
  1346  	case *ast.ListLit:
  1347  	case *ast.ParenExpr:
  1348  		panic("unreachable")
  1349  	case *ast.SelectorExpr:
  1350  	case *ast.IndexExpr:
  1351  	case *ast.SliceExpr:
  1352  	case *ast.CallExpr:
  1353  	case *ast.UnaryExpr:
  1354  	case *ast.BinaryExpr:
  1355  	default:
  1356  		// all other nodes are not proper expressions
  1357  		p.errorExpected(x.Pos(), "expression")
  1358  		x = &ast.BadExpr{
  1359  			From: x.Pos(), To: p.safePos(x.End()),
  1360  		}
  1361  	}
  1362  	return x
  1363  }
  1364  
  1365  // If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
  1366  func unparen(x ast.Expr) ast.Expr {
  1367  	if p, isParen := x.(*ast.ParenExpr); isParen {
  1368  		x = unparen(p.X)
  1369  	}
  1370  	return x
  1371  }
  1372  
  1373  // If lhs is set and the result is an identifier, it is not resolved.
  1374  func (p *parser) parsePrimaryExpr() ast.Expr {
  1375  	if p.trace {
  1376  		defer un(trace(p, "PrimaryExpr"))
  1377  	}
  1378  
  1379  	return p.parsePrimaryExprTail(p.parseOperand())
  1380  }
  1381  
  1382  func (p *parser) parsePrimaryExprTail(operand ast.Expr) ast.Expr {
  1383  	x := operand
  1384  L:
  1385  	for {
  1386  		switch p.tok {
  1387  		case token.PERIOD:
  1388  			c := p.openComments()
  1389  			c.pos = 1
  1390  			p.next()
  1391  			switch p.tok {
  1392  			case token.IDENT:
  1393  				x = &ast.SelectorExpr{
  1394  					X:   p.checkExpr(x),
  1395  					Sel: p.parseIdent(),
  1396  				}
  1397  			case token.STRING:
  1398  				if strings.HasPrefix(p.lit, `"`) && !strings.HasPrefix(p.lit, `""`) {
  1399  					str := &ast.BasicLit{
  1400  						ValuePos: p.pos,
  1401  						Kind:     token.STRING,
  1402  						Value:    p.lit,
  1403  					}
  1404  					p.next()
  1405  					x = &ast.SelectorExpr{
  1406  						X:   p.checkExpr(x),
  1407  						Sel: str,
  1408  					}
  1409  					break
  1410  				}
  1411  				fallthrough
  1412  			default:
  1413  				if p.tok.IsKeyword() {
  1414  					x = &ast.SelectorExpr{
  1415  						X:   p.checkExpr(x),
  1416  						Sel: p.parseKeyIdent(),
  1417  					}
  1418  					break
  1419  				}
  1420  
  1421  				pos := p.pos
  1422  				p.errorExpected(pos, "selector")
  1423  				p.next() // make progress
  1424  				x = &ast.SelectorExpr{X: x, Sel: &ast.Ident{NamePos: pos, Name: "_"}}
  1425  			}
  1426  			c.closeNode(p, x)
  1427  		case token.LBRACK:
  1428  			x = p.parseIndexOrSlice(p.checkExpr(x))
  1429  		case token.LPAREN:
  1430  			x = p.parseCallOrConversion(p.checkExpr(x))
  1431  		default:
  1432  			break L
  1433  		}
  1434  	}
  1435  
  1436  	return x
  1437  }
  1438  
  1439  // If lhs is set and the result is an identifier, it is not resolved.
  1440  func (p *parser) parseUnaryExpr() ast.Expr {
  1441  	if p.trace {
  1442  		defer un(trace(p, "UnaryExpr"))
  1443  	}
  1444  
  1445  	switch p.tok {
  1446  	case token.ADD, token.SUB, token.NOT, token.MUL,
  1447  		token.LSS, token.LEQ, token.GEQ, token.GTR,
  1448  		token.NEQ, token.MAT, token.NMAT:
  1449  		pos, op := p.pos, p.tok
  1450  		c := p.openComments()
  1451  		p.next()
  1452  		return c.closeExpr(p, &ast.UnaryExpr{
  1453  			OpPos: pos,
  1454  			Op:    op,
  1455  			X:     p.checkExpr(p.parseUnaryExpr()),
  1456  		})
  1457  	}
  1458  
  1459  	return p.parsePrimaryExpr()
  1460  }
  1461  
  1462  func (p *parser) tokPrec() (token.Token, int) {
  1463  	tok := p.tok
  1464  	if tok == token.IDENT {
  1465  		switch p.lit {
  1466  		case "quo":
  1467  			return token.IQUO, 7
  1468  		case "rem":
  1469  			return token.IREM, 7
  1470  		case "div":
  1471  			return token.IDIV, 7
  1472  		case "mod":
  1473  			return token.IMOD, 7
  1474  		default:
  1475  			return tok, 0
  1476  		}
  1477  	}
  1478  	return tok, tok.Precedence()
  1479  }
  1480  
  1481  // If lhs is set and the result is an identifier, it is not resolved.
  1482  func (p *parser) parseBinaryExpr(prec1 int) ast.Expr {
  1483  	if p.trace {
  1484  		defer un(trace(p, "BinaryExpr"))
  1485  	}
  1486  	p.openList()
  1487  	defer p.closeList()
  1488  
  1489  	return p.parseBinaryExprTail(prec1, p.parseUnaryExpr())
  1490  }
  1491  
  1492  func (p *parser) parseBinaryExprTail(prec1 int, x ast.Expr) ast.Expr {
  1493  	for {
  1494  		op, prec := p.tokPrec()
  1495  		if prec < prec1 {
  1496  			return x
  1497  		}
  1498  		c := p.openComments()
  1499  		c.pos = 1
  1500  		pos := p.expect(p.tok)
  1501  		x = c.closeExpr(p, &ast.BinaryExpr{
  1502  			X:     p.checkExpr(x),
  1503  			OpPos: pos,
  1504  			Op:    op,
  1505  			// Treat nested expressions as RHS.
  1506  			Y: p.checkExpr(p.parseBinaryExpr(prec + 1))})
  1507  	}
  1508  }
  1509  
  1510  func (p *parser) parseInterpolation() (expr ast.Expr) {
  1511  	c := p.openComments()
  1512  	defer func() { c.closeNode(p, expr) }()
  1513  
  1514  	p.openList()
  1515  	defer p.closeList()
  1516  
  1517  	cc := p.openComments()
  1518  
  1519  	lit := p.lit
  1520  	pos := p.pos
  1521  	p.next()
  1522  	last := &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: lit}
  1523  	exprs := []ast.Expr{last}
  1524  
  1525  	for p.tok == token.LPAREN {
  1526  		c.pos = 1
  1527  		p.expect(token.LPAREN)
  1528  		cc.closeExpr(p, last)
  1529  
  1530  		exprs = append(exprs, p.parseRHS())
  1531  
  1532  		cc = p.openComments()
  1533  		if p.tok != token.RPAREN {
  1534  			p.errf(p.pos, "expected ')' for string interpolation")
  1535  		}
  1536  		lit = p.scanner.ResumeInterpolation()
  1537  		pos = p.pos
  1538  		p.next()
  1539  		last = &ast.BasicLit{
  1540  			ValuePos: pos,
  1541  			Kind:     token.STRING,
  1542  			Value:    lit,
  1543  		}
  1544  		exprs = append(exprs, last)
  1545  	}
  1546  	cc.closeExpr(p, last)
  1547  	return &ast.Interpolation{Elts: exprs}
  1548  }
  1549  
  1550  // Callers must check the result (using checkExpr), depending on context.
  1551  func (p *parser) parseExpr() (expr ast.Expr) {
  1552  	if p.trace {
  1553  		defer un(trace(p, "Expression"))
  1554  	}
  1555  
  1556  	c := p.openComments()
  1557  	defer func() { c.closeExpr(p, expr) }()
  1558  
  1559  	return p.parseBinaryExpr(token.LowestPrec + 1)
  1560  }
  1561  
  1562  func (p *parser) parseRHS() ast.Expr {
  1563  	x := p.checkExpr(p.parseExpr())
  1564  	return x
  1565  }
  1566  
  1567  // ----------------------------------------------------------------------------
  1568  // Declarations
  1569  
  1570  func isValidImport(lit string) bool {
  1571  	const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
  1572  	s, _ := literal.Unquote(lit) // go/scanner returns a legal string literal
  1573  	if p := strings.LastIndexByte(s, ':'); p >= 0 {
  1574  		s = s[:p]
  1575  	}
  1576  	for _, r := range s {
  1577  		if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
  1578  			return false
  1579  		}
  1580  	}
  1581  	return s != ""
  1582  }
  1583  
  1584  func (p *parser) parseImportSpec(_ int) *ast.ImportSpec {
  1585  	if p.trace {
  1586  		defer un(trace(p, "ImportSpec"))
  1587  	}
  1588  
  1589  	c := p.openComments()
  1590  
  1591  	var ident *ast.Ident
  1592  	if p.tok == token.IDENT {
  1593  		ident = p.parseIdent()
  1594  	}
  1595  
  1596  	pos := p.pos
  1597  	var path string
  1598  	if p.tok == token.STRING {
  1599  		path = p.lit
  1600  		if !isValidImport(path) {
  1601  			p.errf(pos, "invalid import path: %s", path)
  1602  		}
  1603  		p.next()
  1604  		p.expectComma() // call before accessing p.linecomment
  1605  	} else {
  1606  		p.expect(token.STRING) // use expect() error handling
  1607  		if p.tok == token.COMMA {
  1608  			p.expectComma() // call before accessing p.linecomment
  1609  		}
  1610  	}
  1611  	// collect imports
  1612  	spec := &ast.ImportSpec{
  1613  		Name: ident,
  1614  		Path: &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path},
  1615  	}
  1616  	c.closeNode(p, spec)
  1617  	p.imports = append(p.imports, spec)
  1618  
  1619  	return spec
  1620  }
  1621  
  1622  func (p *parser) parseImports() *ast.ImportDecl {
  1623  	if p.trace {
  1624  		defer un(trace(p, "Imports"))
  1625  	}
  1626  	c := p.openComments()
  1627  
  1628  	ident := p.parseIdent()
  1629  	var lparen, rparen token.Pos
  1630  	var list []*ast.ImportSpec
  1631  	if p.tok == token.LPAREN {
  1632  		lparen = p.pos
  1633  		p.next()
  1634  		p.openList()
  1635  		for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
  1636  			list = append(list, p.parseImportSpec(iota))
  1637  		}
  1638  		p.closeList()
  1639  		rparen = p.expect(token.RPAREN)
  1640  		p.expectComma()
  1641  	} else {
  1642  		list = append(list, p.parseImportSpec(0))
  1643  	}
  1644  
  1645  	d := &ast.ImportDecl{
  1646  		Import: ident.Pos(),
  1647  		Lparen: lparen,
  1648  		Specs:  list,
  1649  		Rparen: rparen,
  1650  	}
  1651  	c.closeNode(p, d)
  1652  	return d
  1653  }
  1654  
  1655  // ----------------------------------------------------------------------------
  1656  // Source files
  1657  
  1658  func (p *parser) parseFile() *ast.File {
  1659  	if p.trace {
  1660  		defer un(trace(p, "File"))
  1661  	}
  1662  
  1663  	c := p.comments
  1664  
  1665  	// Don't bother parsing the rest if we had errors scanning the first
  1666  	// Likely not a CUE source file at all.
  1667  	if p.errors != nil {
  1668  		return nil
  1669  	}
  1670  	p.openList()
  1671  
  1672  	var decls []ast.Decl
  1673  
  1674  	for p.tok == token.ATTRIBUTE {
  1675  		decls = append(decls, p.parseAttribute())
  1676  		p.consumeDeclComma()
  1677  	}
  1678  
  1679  	// The package clause is not a declaration: it does not appear in any
  1680  	// scope.
  1681  	if p.tok == token.IDENT && p.lit == "package" {
  1682  		c := p.openComments()
  1683  
  1684  		pos := p.pos
  1685  		var name *ast.Ident
  1686  		p.expect(token.IDENT)
  1687  		name = p.parseIdent()
  1688  		if name.Name == "_" && p.mode&declarationErrorsMode != 0 {
  1689  			p.errf(p.pos, "invalid package name _")
  1690  		}
  1691  
  1692  		pkg := &ast.Package{
  1693  			PackagePos: pos,
  1694  			Name:       name,
  1695  		}
  1696  		decls = append(decls, pkg)
  1697  		p.expectComma()
  1698  		c.closeNode(p, pkg)
  1699  	}
  1700  
  1701  	for p.tok == token.ATTRIBUTE {
  1702  		decls = append(decls, p.parseAttribute())
  1703  		p.consumeDeclComma()
  1704  	}
  1705  
  1706  	if p.mode&packageClauseOnlyMode == 0 {
  1707  		// import decls
  1708  		for p.tok == token.IDENT && p.lit == "import" {
  1709  			decls = append(decls, p.parseImports())
  1710  		}
  1711  
  1712  		if p.mode&importsOnlyMode == 0 {
  1713  			// rest of package decls
  1714  			// TODO: loop and allow multiple expressions.
  1715  			decls = append(decls, p.parseFieldList()...)
  1716  			p.expect(token.EOF)
  1717  		}
  1718  	}
  1719  	p.closeList()
  1720  
  1721  	f := &ast.File{
  1722  		Imports: p.imports,
  1723  		Decls:   decls,
  1724  	}
  1725  	c.closeNode(p, f)
  1726  	return f
  1727  }