github.com/stingnevermore/go@v0.0.0-20180120041312-3810f5bfed72/src/text/template/parse/lex.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package parse
     6  
     7  import (
     8  	"fmt"
     9  	"strings"
    10  	"unicode"
    11  	"unicode/utf8"
    12  )
    13  
    14  // item represents a token or text string returned from the scanner.
    15  type item struct {
    16  	typ  itemType // The type of this item.
    17  	pos  Pos      // The starting position, in bytes, of this item in the input string.
    18  	val  string   // The value of this item.
    19  	line int      // The line number at the start of this item.
    20  }
    21  
    22  func (i item) String() string {
    23  	switch {
    24  	case i.typ == itemEOF:
    25  		return "EOF"
    26  	case i.typ == itemError:
    27  		return i.val
    28  	case i.typ > itemKeyword:
    29  		return fmt.Sprintf("<%s>", i.val)
    30  	case len(i.val) > 10:
    31  		return fmt.Sprintf("%.10q...", i.val)
    32  	}
    33  	return fmt.Sprintf("%q", i.val)
    34  }
    35  
    36  // itemType identifies the type of lex items.
    37  type itemType int
    38  
    39  const (
    40  	itemError        itemType = iota // error occurred; value is text of error
    41  	itemBool                         // boolean constant
    42  	itemChar                         // printable ASCII character; grab bag for comma etc.
    43  	itemCharConstant                 // character constant
    44  	itemComplex                      // complex constant (1+2i); imaginary is just a number
    45  	itemColonEquals                  // colon-equals (':=') introducing a declaration
    46  	itemEOF
    47  	itemField      // alphanumeric identifier starting with '.'
    48  	itemIdentifier // alphanumeric identifier not starting with '.'
    49  	itemLeftDelim  // left action delimiter
    50  	itemLeftParen  // '(' inside action
    51  	itemNumber     // simple number, including imaginary
    52  	itemPipe       // pipe symbol
    53  	itemRawString  // raw quoted string (includes quotes)
    54  	itemRightDelim // right action delimiter
    55  	itemRightParen // ')' inside action
    56  	itemSpace      // run of spaces separating arguments
    57  	itemString     // quoted string (includes quotes)
    58  	itemText       // plain text
    59  	itemVariable   // variable starting with '$', such as '$' or  '$1' or '$hello'
    60  	// Keywords appear after all the rest.
    61  	itemKeyword  // used only to delimit the keywords
    62  	itemBlock    // block keyword
    63  	itemBreak    // break keyword
    64  	itemContinue // continue keyword
    65  	itemDot      // the cursor, spelled '.'
    66  	itemDefine   // define keyword
    67  	itemElse     // else keyword
    68  	itemEnd      // end keyword
    69  	itemIf       // if keyword
    70  	itemNil      // the untyped nil constant, easiest to treat as a keyword
    71  	itemRange    // range keyword
    72  	itemTemplate // template keyword
    73  	itemWith     // with keyword
    74  )
    75  
    76  var key = map[string]itemType{
    77  	".":        itemDot,
    78  	"block":    itemBlock,
    79  	"break":    itemBreak,
    80  	"continue": itemContinue,
    81  	"define":   itemDefine,
    82  	"else":     itemElse,
    83  	"end":      itemEnd,
    84  	"if":       itemIf,
    85  	"range":    itemRange,
    86  	"nil":      itemNil,
    87  	"template": itemTemplate,
    88  	"with":     itemWith,
    89  }
    90  
    91  const eof = -1
    92  
    93  // Trimming spaces.
    94  // If the action begins "{{- " rather than "{{", then all space/tab/newlines
    95  // preceding the action are trimmed; conversely if it ends " -}}" the
    96  // leading spaces are trimmed. This is done entirely in the lexer; the
    97  // parser never sees it happen. We require an ASCII space to be
    98  // present to avoid ambiguity with things like "{{-3}}". It reads
    99  // better with the space present anyway. For simplicity, only ASCII
   100  // space does the job.
   101  const (
   102  	spaceChars      = " \t\r\n" // These are the space characters defined by Go itself.
   103  	leftTrimMarker  = "- "      // Attached to left delimiter, trims trailing spaces from preceding text.
   104  	rightTrimMarker = " -"      // Attached to right delimiter, trims leading spaces from following text.
   105  	trimMarkerLen   = Pos(len(leftTrimMarker))
   106  )
   107  
   108  // stateFn represents the state of the scanner as a function that returns the next state.
   109  type stateFn func(*lexer) stateFn
   110  
   111  // lexer holds the state of the scanner.
   112  type lexer struct {
   113  	name       string    // the name of the input; used only for error reports
   114  	input      string    // the string being scanned
   115  	leftDelim  string    // start of action
   116  	rightDelim string    // end of action
   117  	pos        Pos       // current position in the input
   118  	start      Pos       // start position of this item
   119  	width      Pos       // width of last rune read from input
   120  	items      chan item // channel of scanned items
   121  	parenDepth int       // nesting depth of ( ) exprs
   122  	line       int       // 1+number of newlines seen
   123  }
   124  
   125  // next returns the next rune in the input.
   126  func (l *lexer) next() rune {
   127  	if int(l.pos) >= len(l.input) {
   128  		l.width = 0
   129  		return eof
   130  	}
   131  	r, w := utf8.DecodeRuneInString(l.input[l.pos:])
   132  	l.width = Pos(w)
   133  	l.pos += l.width
   134  	if r == '\n' {
   135  		l.line++
   136  	}
   137  	return r
   138  }
   139  
   140  // peek returns but does not consume the next rune in the input.
   141  func (l *lexer) peek() rune {
   142  	r := l.next()
   143  	l.backup()
   144  	return r
   145  }
   146  
   147  // backup steps back one rune. Can only be called once per call of next.
   148  func (l *lexer) backup() {
   149  	l.pos -= l.width
   150  	// Correct newline count.
   151  	if l.width == 1 && l.input[l.pos] == '\n' {
   152  		l.line--
   153  	}
   154  }
   155  
   156  // emit passes an item back to the client.
   157  func (l *lexer) emit(t itemType) {
   158  	l.items <- item{t, l.start, l.input[l.start:l.pos], l.line}
   159  	// Some items contain text internally. If so, count their newlines.
   160  	switch t {
   161  	case itemText, itemRawString, itemLeftDelim, itemRightDelim:
   162  		l.line += strings.Count(l.input[l.start:l.pos], "\n")
   163  	}
   164  	l.start = l.pos
   165  }
   166  
   167  // ignore skips over the pending input before this point.
   168  func (l *lexer) ignore() {
   169  	l.line += strings.Count(l.input[l.start:l.pos], "\n")
   170  	l.start = l.pos
   171  }
   172  
   173  // accept consumes the next rune if it's from the valid set.
   174  func (l *lexer) accept(valid string) bool {
   175  	if strings.ContainsRune(valid, l.next()) {
   176  		return true
   177  	}
   178  	l.backup()
   179  	return false
   180  }
   181  
   182  // acceptRun consumes a run of runes from the valid set.
   183  func (l *lexer) acceptRun(valid string) {
   184  	for strings.ContainsRune(valid, l.next()) {
   185  	}
   186  	l.backup()
   187  }
   188  
   189  // errorf returns an error token and terminates the scan by passing
   190  // back a nil pointer that will be the next state, terminating l.nextItem.
   191  func (l *lexer) errorf(format string, args ...interface{}) stateFn {
   192  	l.items <- item{itemError, l.start, fmt.Sprintf(format, args...), l.line}
   193  	return nil
   194  }
   195  
   196  // nextItem returns the next item from the input.
   197  // Called by the parser, not in the lexing goroutine.
   198  func (l *lexer) nextItem() item {
   199  	return <-l.items
   200  }
   201  
   202  // drain drains the output so the lexing goroutine will exit.
   203  // Called by the parser, not in the lexing goroutine.
   204  func (l *lexer) drain() {
   205  	for range l.items {
   206  	}
   207  }
   208  
   209  // lex creates a new scanner for the input string.
   210  func lex(name, input, left, right string) *lexer {
   211  	if left == "" {
   212  		left = leftDelim
   213  	}
   214  	if right == "" {
   215  		right = rightDelim
   216  	}
   217  	l := &lexer{
   218  		name:       name,
   219  		input:      input,
   220  		leftDelim:  left,
   221  		rightDelim: right,
   222  		items:      make(chan item),
   223  		line:       1,
   224  	}
   225  	go l.run()
   226  	return l
   227  }
   228  
   229  // run runs the state machine for the lexer.
   230  func (l *lexer) run() {
   231  	for state := lexText; state != nil; {
   232  		state = state(l)
   233  	}
   234  	close(l.items)
   235  }
   236  
   237  // state functions
   238  
   239  const (
   240  	leftDelim    = "{{"
   241  	rightDelim   = "}}"
   242  	leftComment  = "/*"
   243  	rightComment = "*/"
   244  )
   245  
   246  // lexText scans until an opening action delimiter, "{{".
   247  func lexText(l *lexer) stateFn {
   248  	l.width = 0
   249  	if x := strings.Index(l.input[l.pos:], l.leftDelim); x >= 0 {
   250  		ldn := Pos(len(l.leftDelim))
   251  		l.pos += Pos(x)
   252  		trimLength := Pos(0)
   253  		if strings.HasPrefix(l.input[l.pos+ldn:], leftTrimMarker) {
   254  			trimLength = rightTrimLength(l.input[l.start:l.pos])
   255  		}
   256  		l.pos -= trimLength
   257  		if l.pos > l.start {
   258  			l.emit(itemText)
   259  		}
   260  		l.pos += trimLength
   261  		l.ignore()
   262  		return lexLeftDelim
   263  	} else {
   264  		l.pos = Pos(len(l.input))
   265  	}
   266  	// Correctly reached EOF.
   267  	if l.pos > l.start {
   268  		l.emit(itemText)
   269  	}
   270  	l.emit(itemEOF)
   271  	return nil
   272  }
   273  
   274  // rightTrimLength returns the length of the spaces at the end of the string.
   275  func rightTrimLength(s string) Pos {
   276  	return Pos(len(s) - len(strings.TrimRight(s, spaceChars)))
   277  }
   278  
   279  // atRightDelim reports whether the lexer is at a right delimiter, possibly preceded by a trim marker.
   280  func (l *lexer) atRightDelim() (delim, trimSpaces bool) {
   281  	if strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
   282  		return true, false
   283  	}
   284  	// The right delim might have the marker before.
   285  	if strings.HasPrefix(l.input[l.pos:], rightTrimMarker) &&
   286  		strings.HasPrefix(l.input[l.pos+trimMarkerLen:], l.rightDelim) {
   287  		return true, true
   288  	}
   289  	return false, false
   290  }
   291  
   292  // leftTrimLength returns the length of the spaces at the beginning of the string.
   293  func leftTrimLength(s string) Pos {
   294  	return Pos(len(s) - len(strings.TrimLeft(s, spaceChars)))
   295  }
   296  
   297  // lexLeftDelim scans the left delimiter, which is known to be present, possibly with a trim marker.
   298  func lexLeftDelim(l *lexer) stateFn {
   299  	l.pos += Pos(len(l.leftDelim))
   300  	trimSpace := strings.HasPrefix(l.input[l.pos:], leftTrimMarker)
   301  	afterMarker := Pos(0)
   302  	if trimSpace {
   303  		afterMarker = trimMarkerLen
   304  	}
   305  	if strings.HasPrefix(l.input[l.pos+afterMarker:], leftComment) {
   306  		l.pos += afterMarker
   307  		l.ignore()
   308  		return lexComment
   309  	}
   310  	l.emit(itemLeftDelim)
   311  	l.pos += afterMarker
   312  	l.ignore()
   313  	l.parenDepth = 0
   314  	return lexInsideAction
   315  }
   316  
   317  // lexComment scans a comment. The left comment marker is known to be present.
   318  func lexComment(l *lexer) stateFn {
   319  	l.pos += Pos(len(leftComment))
   320  	i := strings.Index(l.input[l.pos:], rightComment)
   321  	if i < 0 {
   322  		return l.errorf("unclosed comment")
   323  	}
   324  	l.pos += Pos(i + len(rightComment))
   325  	delim, trimSpace := l.atRightDelim()
   326  	if !delim {
   327  		return l.errorf("comment ends before closing delimiter")
   328  	}
   329  	if trimSpace {
   330  		l.pos += trimMarkerLen
   331  	}
   332  	l.pos += Pos(len(l.rightDelim))
   333  	if trimSpace {
   334  		l.pos += leftTrimLength(l.input[l.pos:])
   335  	}
   336  	l.ignore()
   337  	return lexText
   338  }
   339  
   340  // lexRightDelim scans the right delimiter, which is known to be present, possibly with a trim marker.
   341  func lexRightDelim(l *lexer) stateFn {
   342  	trimSpace := strings.HasPrefix(l.input[l.pos:], rightTrimMarker)
   343  	if trimSpace {
   344  		l.pos += trimMarkerLen
   345  		l.ignore()
   346  	}
   347  	l.pos += Pos(len(l.rightDelim))
   348  	l.emit(itemRightDelim)
   349  	if trimSpace {
   350  		l.pos += leftTrimLength(l.input[l.pos:])
   351  		l.ignore()
   352  	}
   353  	return lexText
   354  }
   355  
   356  // lexInsideAction scans the elements inside action delimiters.
   357  func lexInsideAction(l *lexer) stateFn {
   358  	// Either number, quoted string, or identifier.
   359  	// Spaces separate arguments; runs of spaces turn into itemSpace.
   360  	// Pipe symbols separate and are emitted.
   361  	delim, _ := l.atRightDelim()
   362  	if delim {
   363  		if l.parenDepth == 0 {
   364  			return lexRightDelim
   365  		}
   366  		return l.errorf("unclosed left paren")
   367  	}
   368  	switch r := l.next(); {
   369  	case r == eof || isEndOfLine(r):
   370  		return l.errorf("unclosed action")
   371  	case isSpace(r):
   372  		return lexSpace
   373  	case r == ':':
   374  		if l.next() != '=' {
   375  			return l.errorf("expected :=")
   376  		}
   377  		l.emit(itemColonEquals)
   378  	case r == '|':
   379  		l.emit(itemPipe)
   380  	case r == '"':
   381  		return lexQuote
   382  	case r == '`':
   383  		return lexRawQuote
   384  	case r == '$':
   385  		return lexVariable
   386  	case r == '\'':
   387  		return lexChar
   388  	case r == '.':
   389  		// special look-ahead for ".field" so we don't break l.backup().
   390  		if l.pos < Pos(len(l.input)) {
   391  			r := l.input[l.pos]
   392  			if r < '0' || '9' < r {
   393  				return lexField
   394  			}
   395  		}
   396  		fallthrough // '.' can start a number.
   397  	case r == '+' || r == '-' || ('0' <= r && r <= '9'):
   398  		l.backup()
   399  		return lexNumber
   400  	case isAlphaNumeric(r):
   401  		l.backup()
   402  		return lexIdentifier
   403  	case r == '(':
   404  		l.emit(itemLeftParen)
   405  		l.parenDepth++
   406  	case r == ')':
   407  		l.emit(itemRightParen)
   408  		l.parenDepth--
   409  		if l.parenDepth < 0 {
   410  			return l.errorf("unexpected right paren %#U", r)
   411  		}
   412  	case r <= unicode.MaxASCII && unicode.IsPrint(r):
   413  		l.emit(itemChar)
   414  		return lexInsideAction
   415  	default:
   416  		return l.errorf("unrecognized character in action: %#U", r)
   417  	}
   418  	return lexInsideAction
   419  }
   420  
   421  // lexSpace scans a run of space characters.
   422  // One space has already been seen.
   423  func lexSpace(l *lexer) stateFn {
   424  	for isSpace(l.peek()) {
   425  		l.next()
   426  	}
   427  	l.emit(itemSpace)
   428  	return lexInsideAction
   429  }
   430  
   431  // lexIdentifier scans an alphanumeric.
   432  func lexIdentifier(l *lexer) stateFn {
   433  Loop:
   434  	for {
   435  		switch r := l.next(); {
   436  		case isAlphaNumeric(r):
   437  			// absorb.
   438  		default:
   439  			l.backup()
   440  			word := l.input[l.start:l.pos]
   441  			if !l.atTerminator() {
   442  				return l.errorf("bad character %#U", r)
   443  			}
   444  			switch {
   445  			case key[word] > itemKeyword:
   446  				l.emit(key[word])
   447  			case word[0] == '.':
   448  				l.emit(itemField)
   449  			case word == "true", word == "false":
   450  				l.emit(itemBool)
   451  			default:
   452  				l.emit(itemIdentifier)
   453  			}
   454  			break Loop
   455  		}
   456  	}
   457  	return lexInsideAction
   458  }
   459  
   460  // lexField scans a field: .Alphanumeric.
   461  // The . has been scanned.
   462  func lexField(l *lexer) stateFn {
   463  	return lexFieldOrVariable(l, itemField)
   464  }
   465  
   466  // lexVariable scans a Variable: $Alphanumeric.
   467  // The $ has been scanned.
   468  func lexVariable(l *lexer) stateFn {
   469  	if l.atTerminator() { // Nothing interesting follows -> "$".
   470  		l.emit(itemVariable)
   471  		return lexInsideAction
   472  	}
   473  	return lexFieldOrVariable(l, itemVariable)
   474  }
   475  
   476  // lexVariable scans a field or variable: [.$]Alphanumeric.
   477  // The . or $ has been scanned.
   478  func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
   479  	if l.atTerminator() { // Nothing interesting follows -> "." or "$".
   480  		if typ == itemVariable {
   481  			l.emit(itemVariable)
   482  		} else {
   483  			l.emit(itemDot)
   484  		}
   485  		return lexInsideAction
   486  	}
   487  	var r rune
   488  	for {
   489  		r = l.next()
   490  		if !isAlphaNumeric(r) {
   491  			l.backup()
   492  			break
   493  		}
   494  	}
   495  	if !l.atTerminator() {
   496  		return l.errorf("bad character %#U", r)
   497  	}
   498  	l.emit(typ)
   499  	return lexInsideAction
   500  }
   501  
   502  // atTerminator reports whether the input is at valid termination character to
   503  // appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
   504  // like "$x+2" not being acceptable without a space, in case we decide one
   505  // day to implement arithmetic.
   506  func (l *lexer) atTerminator() bool {
   507  	r := l.peek()
   508  	if isSpace(r) || isEndOfLine(r) {
   509  		return true
   510  	}
   511  	switch r {
   512  	case eof, '.', ',', '|', ':', ')', '(':
   513  		return true
   514  	}
   515  	// Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
   516  	// succeed but should fail) but only in extremely rare cases caused by willfully
   517  	// bad choice of delimiter.
   518  	if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
   519  		return true
   520  	}
   521  	return false
   522  }
   523  
   524  // lexChar scans a character constant. The initial quote is already
   525  // scanned. Syntax checking is done by the parser.
   526  func lexChar(l *lexer) stateFn {
   527  Loop:
   528  	for {
   529  		switch l.next() {
   530  		case '\\':
   531  			if r := l.next(); r != eof && r != '\n' {
   532  				break
   533  			}
   534  			fallthrough
   535  		case eof, '\n':
   536  			return l.errorf("unterminated character constant")
   537  		case '\'':
   538  			break Loop
   539  		}
   540  	}
   541  	l.emit(itemCharConstant)
   542  	return lexInsideAction
   543  }
   544  
   545  // lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
   546  // isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
   547  // and "089" - but when it's wrong the input is invalid and the parser (via
   548  // strconv) will notice.
   549  func lexNumber(l *lexer) stateFn {
   550  	if !l.scanNumber() {
   551  		return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
   552  	}
   553  	if sign := l.peek(); sign == '+' || sign == '-' {
   554  		// Complex: 1+2i. No spaces, must end in 'i'.
   555  		if !l.scanNumber() || l.input[l.pos-1] != 'i' {
   556  			return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
   557  		}
   558  		l.emit(itemComplex)
   559  	} else {
   560  		l.emit(itemNumber)
   561  	}
   562  	return lexInsideAction
   563  }
   564  
   565  func (l *lexer) scanNumber() bool {
   566  	// Optional leading sign.
   567  	l.accept("+-")
   568  	// Is it hex?
   569  	digits := "0123456789"
   570  	if l.accept("0") && l.accept("xX") {
   571  		digits = "0123456789abcdefABCDEF"
   572  	}
   573  	l.acceptRun(digits)
   574  	if l.accept(".") {
   575  		l.acceptRun(digits)
   576  	}
   577  	if l.accept("eE") {
   578  		l.accept("+-")
   579  		l.acceptRun("0123456789")
   580  	}
   581  	// Is it imaginary?
   582  	l.accept("i")
   583  	// Next thing mustn't be alphanumeric.
   584  	if isAlphaNumeric(l.peek()) {
   585  		l.next()
   586  		return false
   587  	}
   588  	return true
   589  }
   590  
   591  // lexQuote scans a quoted string.
   592  func lexQuote(l *lexer) stateFn {
   593  Loop:
   594  	for {
   595  		switch l.next() {
   596  		case '\\':
   597  			if r := l.next(); r != eof && r != '\n' {
   598  				break
   599  			}
   600  			fallthrough
   601  		case eof, '\n':
   602  			return l.errorf("unterminated quoted string")
   603  		case '"':
   604  			break Loop
   605  		}
   606  	}
   607  	l.emit(itemString)
   608  	return lexInsideAction
   609  }
   610  
   611  // lexRawQuote scans a raw quoted string.
   612  func lexRawQuote(l *lexer) stateFn {
   613  	startLine := l.line
   614  Loop:
   615  	for {
   616  		switch l.next() {
   617  		case eof:
   618  			// Restore line number to location of opening quote.
   619  			// We will error out so it's ok just to overwrite the field.
   620  			l.line = startLine
   621  			return l.errorf("unterminated raw quoted string")
   622  		case '`':
   623  			break Loop
   624  		}
   625  	}
   626  	l.emit(itemRawString)
   627  	return lexInsideAction
   628  }
   629  
   630  // isSpace reports whether r is a space character.
   631  func isSpace(r rune) bool {
   632  	return r == ' ' || r == '\t'
   633  }
   634  
   635  // isEndOfLine reports whether r is an end-of-line character.
   636  func isEndOfLine(r rune) bool {
   637  	return r == '\r' || r == '\n'
   638  }
   639  
   640  // isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
   641  func isAlphaNumeric(r rune) bool {
   642  	return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
   643  }