github.com/hlts2/go@v0.0.0-20170904000733-812b34efaed8/src/text/template/parse/lex.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package parse 6 7 import ( 8 "fmt" 9 "strings" 10 "unicode" 11 "unicode/utf8" 12 ) 13 14 // item represents a token or text string returned from the scanner. 15 type item struct { 16 typ itemType // The type of this item. 17 pos Pos // The starting position, in bytes, of this item in the input string. 18 val string // The value of this item. 19 line int // The line number at the start of this item. 20 } 21 22 func (i item) String() string { 23 switch { 24 case i.typ == itemEOF: 25 return "EOF" 26 case i.typ == itemError: 27 return i.val 28 case i.typ > itemKeyword: 29 return fmt.Sprintf("<%s>", i.val) 30 case len(i.val) > 10: 31 return fmt.Sprintf("%.10q...", i.val) 32 } 33 return fmt.Sprintf("%q", i.val) 34 } 35 36 // itemType identifies the type of lex items. 37 type itemType int 38 39 const ( 40 itemError itemType = iota // error occurred; value is text of error 41 itemBool // boolean constant 42 itemChar // printable ASCII character; grab bag for comma etc. 43 itemCharConstant // character constant 44 itemComplex // complex constant (1+2i); imaginary is just a number 45 itemColonEquals // colon-equals (':=') introducing a declaration 46 itemEOF 47 itemField // alphanumeric identifier starting with '.' 48 itemIdentifier // alphanumeric identifier not starting with '.' 49 itemLeftDelim // left action delimiter 50 itemLeftParen // '(' inside action 51 itemNumber // simple number, including imaginary 52 itemPipe // pipe symbol 53 itemRawString // raw quoted string (includes quotes) 54 itemRightDelim // right action delimiter 55 itemRightParen // ')' inside action 56 itemSpace // run of spaces separating arguments 57 itemString // quoted string (includes quotes) 58 itemText // plain text 59 itemVariable // variable starting with '$', such as '$' or '$1' or '$hello' 60 // Keywords appear after all the rest. 61 itemKeyword // used only to delimit the keywords 62 itemBlock // block keyword 63 itemDot // the cursor, spelled '.' 64 itemDefine // define keyword 65 itemElse // else keyword 66 itemEnd // end keyword 67 itemIf // if keyword 68 itemNil // the untyped nil constant, easiest to treat as a keyword 69 itemRange // range keyword 70 itemTemplate // template keyword 71 itemWith // with keyword 72 ) 73 74 var key = map[string]itemType{ 75 ".": itemDot, 76 "block": itemBlock, 77 "define": itemDefine, 78 "else": itemElse, 79 "end": itemEnd, 80 "if": itemIf, 81 "range": itemRange, 82 "nil": itemNil, 83 "template": itemTemplate, 84 "with": itemWith, 85 } 86 87 const eof = -1 88 89 // Trimming spaces. 90 // If the action begins "{{- " rather than "{{", then all space/tab/newlines 91 // preceding the action are trimmed; conversely if it ends " -}}" the 92 // leading spaces are trimmed. This is done entirely in the lexer; the 93 // parser never sees it happen. We require an ASCII space to be 94 // present to avoid ambiguity with things like "{{-3}}". It reads 95 // better with the space present anyway. For simplicity, only ASCII 96 // space does the job. 97 const ( 98 spaceChars = " \t\r\n" // These are the space characters defined by Go itself. 99 leftTrimMarker = "- " // Attached to left delimiter, trims trailing spaces from preceding text. 100 rightTrimMarker = " -" // Attached to right delimiter, trims leading spaces from following text. 101 trimMarkerLen = Pos(len(leftTrimMarker)) 102 ) 103 104 // stateFn represents the state of the scanner as a function that returns the next state. 105 type stateFn func(*lexer) stateFn 106 107 // lexer holds the state of the scanner. 108 type lexer struct { 109 name string // the name of the input; used only for error reports 110 input string // the string being scanned 111 leftDelim string // start of action 112 rightDelim string // end of action 113 state stateFn // the next lexing function to enter 114 pos Pos // current position in the input 115 start Pos // start position of this item 116 width Pos // width of last rune read from input 117 lastPos Pos // position of most recent item returned by nextItem 118 items chan item // channel of scanned items 119 parenDepth int // nesting depth of ( ) exprs 120 line int // 1+number of newlines seen 121 } 122 123 // next returns the next rune in the input. 124 func (l *lexer) next() rune { 125 if int(l.pos) >= len(l.input) { 126 l.width = 0 127 return eof 128 } 129 r, w := utf8.DecodeRuneInString(l.input[l.pos:]) 130 l.width = Pos(w) 131 l.pos += l.width 132 if r == '\n' { 133 l.line++ 134 } 135 return r 136 } 137 138 // peek returns but does not consume the next rune in the input. 139 func (l *lexer) peek() rune { 140 r := l.next() 141 l.backup() 142 return r 143 } 144 145 // backup steps back one rune. Can only be called once per call of next. 146 func (l *lexer) backup() { 147 l.pos -= l.width 148 // Correct newline count. 149 if l.width == 1 && l.input[l.pos] == '\n' { 150 l.line-- 151 } 152 } 153 154 // emit passes an item back to the client. 155 func (l *lexer) emit(t itemType) { 156 l.items <- item{t, l.start, l.input[l.start:l.pos], l.line} 157 // Some items contain text internally. If so, count their newlines. 158 switch t { 159 case itemText, itemRawString, itemLeftDelim, itemRightDelim: 160 l.line += strings.Count(l.input[l.start:l.pos], "\n") 161 } 162 l.start = l.pos 163 } 164 165 // ignore skips over the pending input before this point. 166 func (l *lexer) ignore() { 167 l.start = l.pos 168 } 169 170 // accept consumes the next rune if it's from the valid set. 171 func (l *lexer) accept(valid string) bool { 172 if strings.ContainsRune(valid, l.next()) { 173 return true 174 } 175 l.backup() 176 return false 177 } 178 179 // acceptRun consumes a run of runes from the valid set. 180 func (l *lexer) acceptRun(valid string) { 181 for strings.ContainsRune(valid, l.next()) { 182 } 183 l.backup() 184 } 185 186 // errorf returns an error token and terminates the scan by passing 187 // back a nil pointer that will be the next state, terminating l.nextItem. 188 func (l *lexer) errorf(format string, args ...interface{}) stateFn { 189 l.items <- item{itemError, l.start, fmt.Sprintf(format, args...), l.line} 190 return nil 191 } 192 193 // nextItem returns the next item from the input. 194 // Called by the parser, not in the lexing goroutine. 195 func (l *lexer) nextItem() item { 196 item := <-l.items 197 l.lastPos = item.pos 198 return item 199 } 200 201 // drain drains the output so the lexing goroutine will exit. 202 // Called by the parser, not in the lexing goroutine. 203 func (l *lexer) drain() { 204 for range l.items { 205 } 206 } 207 208 // lex creates a new scanner for the input string. 209 func lex(name, input, left, right string) *lexer { 210 if left == "" { 211 left = leftDelim 212 } 213 if right == "" { 214 right = rightDelim 215 } 216 l := &lexer{ 217 name: name, 218 input: input, 219 leftDelim: left, 220 rightDelim: right, 221 items: make(chan item), 222 line: 1, 223 } 224 go l.run() 225 return l 226 } 227 228 // run runs the state machine for the lexer. 229 func (l *lexer) run() { 230 for l.state = lexText; l.state != nil; { 231 l.state = l.state(l) 232 } 233 close(l.items) 234 } 235 236 // state functions 237 238 const ( 239 leftDelim = "{{" 240 rightDelim = "}}" 241 leftComment = "/*" 242 rightComment = "*/" 243 ) 244 245 // lexText scans until an opening action delimiter, "{{". 246 func lexText(l *lexer) stateFn { 247 l.width = 0 248 if x := strings.Index(l.input[l.pos:], l.leftDelim); x >= 0 { 249 ldn := Pos(len(l.leftDelim)) 250 l.pos += Pos(x) 251 trimLength := Pos(0) 252 if strings.HasPrefix(l.input[l.pos+ldn:], leftTrimMarker) { 253 trimLength = rightTrimLength(l.input[l.start:l.pos]) 254 } 255 l.pos -= trimLength 256 if l.pos > l.start { 257 l.emit(itemText) 258 } 259 l.pos += trimLength 260 l.ignore() 261 return lexLeftDelim 262 } else { 263 l.pos = Pos(len(l.input)) 264 } 265 // Correctly reached EOF. 266 if l.pos > l.start { 267 l.emit(itemText) 268 } 269 l.emit(itemEOF) 270 return nil 271 } 272 273 // rightTrimLength returns the length of the spaces at the end of the string. 274 func rightTrimLength(s string) Pos { 275 return Pos(len(s) - len(strings.TrimRight(s, spaceChars))) 276 } 277 278 // atRightDelim reports whether the lexer is at a right delimiter, possibly preceded by a trim marker. 279 func (l *lexer) atRightDelim() (delim, trimSpaces bool) { 280 if strings.HasPrefix(l.input[l.pos:], l.rightDelim) { 281 return true, false 282 } 283 // The right delim might have the marker before. 284 if strings.HasPrefix(l.input[l.pos:], rightTrimMarker) && 285 strings.HasPrefix(l.input[l.pos+trimMarkerLen:], l.rightDelim) { 286 return true, true 287 } 288 return false, false 289 } 290 291 // leftTrimLength returns the length of the spaces at the beginning of the string. 292 func leftTrimLength(s string) Pos { 293 return Pos(len(s) - len(strings.TrimLeft(s, spaceChars))) 294 } 295 296 // lexLeftDelim scans the left delimiter, which is known to be present, possibly with a trim marker. 297 func lexLeftDelim(l *lexer) stateFn { 298 l.pos += Pos(len(l.leftDelim)) 299 trimSpace := strings.HasPrefix(l.input[l.pos:], leftTrimMarker) 300 afterMarker := Pos(0) 301 if trimSpace { 302 afterMarker = trimMarkerLen 303 } 304 if strings.HasPrefix(l.input[l.pos+afterMarker:], leftComment) { 305 l.pos += afterMarker 306 l.ignore() 307 return lexComment 308 } 309 l.emit(itemLeftDelim) 310 l.pos += afterMarker 311 l.ignore() 312 l.parenDepth = 0 313 return lexInsideAction 314 } 315 316 // lexComment scans a comment. The left comment marker is known to be present. 317 func lexComment(l *lexer) stateFn { 318 l.pos += Pos(len(leftComment)) 319 i := strings.Index(l.input[l.pos:], rightComment) 320 if i < 0 { 321 return l.errorf("unclosed comment") 322 } 323 l.pos += Pos(i + len(rightComment)) 324 delim, trimSpace := l.atRightDelim() 325 if !delim { 326 return l.errorf("comment ends before closing delimiter") 327 } 328 if trimSpace { 329 l.pos += trimMarkerLen 330 } 331 l.pos += Pos(len(l.rightDelim)) 332 if trimSpace { 333 l.pos += leftTrimLength(l.input[l.pos:]) 334 } 335 l.ignore() 336 return lexText 337 } 338 339 // lexRightDelim scans the right delimiter, which is known to be present, possibly with a trim marker. 340 func lexRightDelim(l *lexer) stateFn { 341 trimSpace := strings.HasPrefix(l.input[l.pos:], rightTrimMarker) 342 if trimSpace { 343 l.pos += trimMarkerLen 344 l.ignore() 345 } 346 l.pos += Pos(len(l.rightDelim)) 347 l.emit(itemRightDelim) 348 if trimSpace { 349 l.pos += leftTrimLength(l.input[l.pos:]) 350 l.ignore() 351 } 352 return lexText 353 } 354 355 // lexInsideAction scans the elements inside action delimiters. 356 func lexInsideAction(l *lexer) stateFn { 357 // Either number, quoted string, or identifier. 358 // Spaces separate arguments; runs of spaces turn into itemSpace. 359 // Pipe symbols separate and are emitted. 360 delim, _ := l.atRightDelim() 361 if delim { 362 if l.parenDepth == 0 { 363 return lexRightDelim 364 } 365 return l.errorf("unclosed left paren") 366 } 367 switch r := l.next(); { 368 case r == eof || isEndOfLine(r): 369 return l.errorf("unclosed action") 370 case isSpace(r): 371 return lexSpace 372 case r == ':': 373 if l.next() != '=' { 374 return l.errorf("expected :=") 375 } 376 l.emit(itemColonEquals) 377 case r == '|': 378 l.emit(itemPipe) 379 case r == '"': 380 return lexQuote 381 case r == '`': 382 return lexRawQuote 383 case r == '$': 384 return lexVariable 385 case r == '\'': 386 return lexChar 387 case r == '.': 388 // special look-ahead for ".field" so we don't break l.backup(). 389 if l.pos < Pos(len(l.input)) { 390 r := l.input[l.pos] 391 if r < '0' || '9' < r { 392 return lexField 393 } 394 } 395 fallthrough // '.' can start a number. 396 case r == '+' || r == '-' || ('0' <= r && r <= '9'): 397 l.backup() 398 return lexNumber 399 case isAlphaNumeric(r): 400 l.backup() 401 return lexIdentifier 402 case r == '(': 403 l.emit(itemLeftParen) 404 l.parenDepth++ 405 case r == ')': 406 l.emit(itemRightParen) 407 l.parenDepth-- 408 if l.parenDepth < 0 { 409 return l.errorf("unexpected right paren %#U", r) 410 } 411 case r <= unicode.MaxASCII && unicode.IsPrint(r): 412 l.emit(itemChar) 413 return lexInsideAction 414 default: 415 return l.errorf("unrecognized character in action: %#U", r) 416 } 417 return lexInsideAction 418 } 419 420 // lexSpace scans a run of space characters. 421 // One space has already been seen. 422 func lexSpace(l *lexer) stateFn { 423 for isSpace(l.peek()) { 424 l.next() 425 } 426 l.emit(itemSpace) 427 return lexInsideAction 428 } 429 430 // lexIdentifier scans an alphanumeric. 431 func lexIdentifier(l *lexer) stateFn { 432 Loop: 433 for { 434 switch r := l.next(); { 435 case isAlphaNumeric(r): 436 // absorb. 437 default: 438 l.backup() 439 word := l.input[l.start:l.pos] 440 if !l.atTerminator() { 441 return l.errorf("bad character %#U", r) 442 } 443 switch { 444 case key[word] > itemKeyword: 445 l.emit(key[word]) 446 case word[0] == '.': 447 l.emit(itemField) 448 case word == "true", word == "false": 449 l.emit(itemBool) 450 default: 451 l.emit(itemIdentifier) 452 } 453 break Loop 454 } 455 } 456 return lexInsideAction 457 } 458 459 // lexField scans a field: .Alphanumeric. 460 // The . has been scanned. 461 func lexField(l *lexer) stateFn { 462 return lexFieldOrVariable(l, itemField) 463 } 464 465 // lexVariable scans a Variable: $Alphanumeric. 466 // The $ has been scanned. 467 func lexVariable(l *lexer) stateFn { 468 if l.atTerminator() { // Nothing interesting follows -> "$". 469 l.emit(itemVariable) 470 return lexInsideAction 471 } 472 return lexFieldOrVariable(l, itemVariable) 473 } 474 475 // lexVariable scans a field or variable: [.$]Alphanumeric. 476 // The . or $ has been scanned. 477 func lexFieldOrVariable(l *lexer, typ itemType) stateFn { 478 if l.atTerminator() { // Nothing interesting follows -> "." or "$". 479 if typ == itemVariable { 480 l.emit(itemVariable) 481 } else { 482 l.emit(itemDot) 483 } 484 return lexInsideAction 485 } 486 var r rune 487 for { 488 r = l.next() 489 if !isAlphaNumeric(r) { 490 l.backup() 491 break 492 } 493 } 494 if !l.atTerminator() { 495 return l.errorf("bad character %#U", r) 496 } 497 l.emit(typ) 498 return lexInsideAction 499 } 500 501 // atTerminator reports whether the input is at valid termination character to 502 // appear after an identifier. Breaks .X.Y into two pieces. Also catches cases 503 // like "$x+2" not being acceptable without a space, in case we decide one 504 // day to implement arithmetic. 505 func (l *lexer) atTerminator() bool { 506 r := l.peek() 507 if isSpace(r) || isEndOfLine(r) { 508 return true 509 } 510 switch r { 511 case eof, '.', ',', '|', ':', ')', '(': 512 return true 513 } 514 // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will 515 // succeed but should fail) but only in extremely rare cases caused by willfully 516 // bad choice of delimiter. 517 if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r { 518 return true 519 } 520 return false 521 } 522 523 // lexChar scans a character constant. The initial quote is already 524 // scanned. Syntax checking is done by the parser. 525 func lexChar(l *lexer) stateFn { 526 Loop: 527 for { 528 switch l.next() { 529 case '\\': 530 if r := l.next(); r != eof && r != '\n' { 531 break 532 } 533 fallthrough 534 case eof, '\n': 535 return l.errorf("unterminated character constant") 536 case '\'': 537 break Loop 538 } 539 } 540 l.emit(itemCharConstant) 541 return lexInsideAction 542 } 543 544 // lexNumber scans a number: decimal, octal, hex, float, or imaginary. This 545 // isn't a perfect number scanner - for instance it accepts "." and "0x0.2" 546 // and "089" - but when it's wrong the input is invalid and the parser (via 547 // strconv) will notice. 548 func lexNumber(l *lexer) stateFn { 549 if !l.scanNumber() { 550 return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) 551 } 552 if sign := l.peek(); sign == '+' || sign == '-' { 553 // Complex: 1+2i. No spaces, must end in 'i'. 554 if !l.scanNumber() || l.input[l.pos-1] != 'i' { 555 return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) 556 } 557 l.emit(itemComplex) 558 } else { 559 l.emit(itemNumber) 560 } 561 return lexInsideAction 562 } 563 564 func (l *lexer) scanNumber() bool { 565 // Optional leading sign. 566 l.accept("+-") 567 // Is it hex? 568 digits := "0123456789" 569 if l.accept("0") && l.accept("xX") { 570 digits = "0123456789abcdefABCDEF" 571 } 572 l.acceptRun(digits) 573 if l.accept(".") { 574 l.acceptRun(digits) 575 } 576 if l.accept("eE") { 577 l.accept("+-") 578 l.acceptRun("0123456789") 579 } 580 // Is it imaginary? 581 l.accept("i") 582 // Next thing mustn't be alphanumeric. 583 if isAlphaNumeric(l.peek()) { 584 l.next() 585 return false 586 } 587 return true 588 } 589 590 // lexQuote scans a quoted string. 591 func lexQuote(l *lexer) stateFn { 592 Loop: 593 for { 594 switch l.next() { 595 case '\\': 596 if r := l.next(); r != eof && r != '\n' { 597 break 598 } 599 fallthrough 600 case eof, '\n': 601 return l.errorf("unterminated quoted string") 602 case '"': 603 break Loop 604 } 605 } 606 l.emit(itemString) 607 return lexInsideAction 608 } 609 610 // lexRawQuote scans a raw quoted string. 611 func lexRawQuote(l *lexer) stateFn { 612 startLine := l.line 613 Loop: 614 for { 615 switch l.next() { 616 case eof: 617 // Restore line number to location of opening quote. 618 // We will error out so it's ok just to overwrite the field. 619 l.line = startLine 620 return l.errorf("unterminated raw quoted string") 621 case '`': 622 break Loop 623 } 624 } 625 l.emit(itemRawString) 626 return lexInsideAction 627 } 628 629 // isSpace reports whether r is a space character. 630 func isSpace(r rune) bool { 631 return r == ' ' || r == '\t' 632 } 633 634 // isEndOfLine reports whether r is an end-of-line character. 635 func isEndOfLine(r rune) bool { 636 return r == '\r' || r == '\n' 637 } 638 639 // isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. 640 func isAlphaNumeric(r rune) bool { 641 return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) 642 }