github.com/dannin/go@v0.0.0-20161031215817-d35dfd405eaa/src/go/scanner/scanner_test.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package scanner
     6  
     7  import (
     8  	"go/token"
     9  	"io/ioutil"
    10  	"os"
    11  	"path/filepath"
    12  	"runtime"
    13  	"testing"
    14  )
    15  
    16  var fset = token.NewFileSet()
    17  
    18  const /* class */ (
    19  	special = iota
    20  	literal
    21  	operator
    22  	keyword
    23  )
    24  
    25  func tokenclass(tok token.Token) int {
    26  	switch {
    27  	case tok.IsLiteral():
    28  		return literal
    29  	case tok.IsOperator():
    30  		return operator
    31  	case tok.IsKeyword():
    32  		return keyword
    33  	}
    34  	return special
    35  }
    36  
    37  type elt struct {
    38  	tok   token.Token
    39  	lit   string
    40  	class int
    41  }
    42  
    43  var tokens = [...]elt{
    44  	// Special tokens
    45  	{token.COMMENT, "/* a comment */", special},
    46  	{token.COMMENT, "// a comment \n", special},
    47  	{token.COMMENT, "/*\r*/", special},
    48  	{token.COMMENT, "//\r\n", special},
    49  
    50  	// Identifiers and basic type literals
    51  	{token.IDENT, "foobar", literal},
    52  	{token.IDENT, "a۰۱۸", literal},
    53  	{token.IDENT, "foo६४", literal},
    54  	{token.IDENT, "bar9876", literal},
    55  	{token.IDENT, "ŝ", literal},    // was bug (issue 4000)
    56  	{token.IDENT, "ŝfoo", literal}, // was bug (issue 4000)
    57  	{token.INT, "0", literal},
    58  	{token.INT, "1", literal},
    59  	{token.INT, "123456789012345678890", literal},
    60  	{token.INT, "01234567", literal},
    61  	{token.INT, "0xcafebabe", literal},
    62  	{token.FLOAT, "0.", literal},
    63  	{token.FLOAT, ".0", literal},
    64  	{token.FLOAT, "3.14159265", literal},
    65  	{token.FLOAT, "1e0", literal},
    66  	{token.FLOAT, "1e+100", literal},
    67  	{token.FLOAT, "1e-100", literal},
    68  	{token.FLOAT, "2.71828e-1000", literal},
    69  	{token.IMAG, "0i", literal},
    70  	{token.IMAG, "1i", literal},
    71  	{token.IMAG, "012345678901234567889i", literal},
    72  	{token.IMAG, "123456789012345678890i", literal},
    73  	{token.IMAG, "0.i", literal},
    74  	{token.IMAG, ".0i", literal},
    75  	{token.IMAG, "3.14159265i", literal},
    76  	{token.IMAG, "1e0i", literal},
    77  	{token.IMAG, "1e+100i", literal},
    78  	{token.IMAG, "1e-100i", literal},
    79  	{token.IMAG, "2.71828e-1000i", literal},
    80  	{token.CHAR, "'a'", literal},
    81  	{token.CHAR, "'\\000'", literal},
    82  	{token.CHAR, "'\\xFF'", literal},
    83  	{token.CHAR, "'\\uff16'", literal},
    84  	{token.CHAR, "'\\U0000ff16'", literal},
    85  	{token.STRING, "`foobar`", literal},
    86  	{token.STRING, "`" + `foo
    87  	                        bar` +
    88  		"`",
    89  		literal,
    90  	},
    91  	{token.STRING, "`\r`", literal},
    92  	{token.STRING, "`foo\r\nbar`", literal},
    93  
    94  	// Operators and delimiters
    95  	{token.ADD, "+", operator},
    96  	{token.SUB, "-", operator},
    97  	{token.MUL, "*", operator},
    98  	{token.QUO, "/", operator},
    99  	{token.REM, "%", operator},
   100  
   101  	{token.AND, "&", operator},
   102  	{token.OR, "|", operator},
   103  	{token.XOR, "^", operator},
   104  	{token.SHL, "<<", operator},
   105  	{token.SHR, ">>", operator},
   106  	{token.AND_NOT, "&^", operator},
   107  
   108  	{token.ADD_ASSIGN, "+=", operator},
   109  	{token.SUB_ASSIGN, "-=", operator},
   110  	{token.MUL_ASSIGN, "*=", operator},
   111  	{token.QUO_ASSIGN, "/=", operator},
   112  	{token.REM_ASSIGN, "%=", operator},
   113  
   114  	{token.AND_ASSIGN, "&=", operator},
   115  	{token.OR_ASSIGN, "|=", operator},
   116  	{token.XOR_ASSIGN, "^=", operator},
   117  	{token.SHL_ASSIGN, "<<=", operator},
   118  	{token.SHR_ASSIGN, ">>=", operator},
   119  	{token.AND_NOT_ASSIGN, "&^=", operator},
   120  
   121  	{token.LAND, "&&", operator},
   122  	{token.LOR, "||", operator},
   123  	{token.ARROW, "<-", operator},
   124  	{token.ALIAS, "=>", operator},
   125  	{token.INC, "++", operator},
   126  	{token.DEC, "--", operator},
   127  
   128  	{token.EQL, "==", operator},
   129  	{token.LSS, "<", operator},
   130  	{token.GTR, ">", operator},
   131  	{token.ASSIGN, "=", operator},
   132  	{token.NOT, "!", operator},
   133  
   134  	{token.NEQ, "!=", operator},
   135  	{token.LEQ, "<=", operator},
   136  	{token.GEQ, ">=", operator},
   137  	{token.DEFINE, ":=", operator},
   138  	{token.ELLIPSIS, "...", operator},
   139  
   140  	{token.LPAREN, "(", operator},
   141  	{token.LBRACK, "[", operator},
   142  	{token.LBRACE, "{", operator},
   143  	{token.COMMA, ",", operator},
   144  	{token.PERIOD, ".", operator},
   145  
   146  	{token.RPAREN, ")", operator},
   147  	{token.RBRACK, "]", operator},
   148  	{token.RBRACE, "}", operator},
   149  	{token.SEMICOLON, ";", operator},
   150  	{token.COLON, ":", operator},
   151  
   152  	// Keywords
   153  	{token.BREAK, "break", keyword},
   154  	{token.CASE, "case", keyword},
   155  	{token.CHAN, "chan", keyword},
   156  	{token.CONST, "const", keyword},
   157  	{token.CONTINUE, "continue", keyword},
   158  
   159  	{token.DEFAULT, "default", keyword},
   160  	{token.DEFER, "defer", keyword},
   161  	{token.ELSE, "else", keyword},
   162  	{token.FALLTHROUGH, "fallthrough", keyword},
   163  	{token.FOR, "for", keyword},
   164  
   165  	{token.FUNC, "func", keyword},
   166  	{token.GO, "go", keyword},
   167  	{token.GOTO, "goto", keyword},
   168  	{token.IF, "if", keyword},
   169  	{token.IMPORT, "import", keyword},
   170  
   171  	{token.INTERFACE, "interface", keyword},
   172  	{token.MAP, "map", keyword},
   173  	{token.PACKAGE, "package", keyword},
   174  	{token.RANGE, "range", keyword},
   175  	{token.RETURN, "return", keyword},
   176  
   177  	{token.SELECT, "select", keyword},
   178  	{token.STRUCT, "struct", keyword},
   179  	{token.SWITCH, "switch", keyword},
   180  	{token.TYPE, "type", keyword},
   181  	{token.VAR, "var", keyword},
   182  }
   183  
   184  const whitespace = "  \t  \n\n\n" // to separate tokens
   185  
   186  var source = func() []byte {
   187  	var src []byte
   188  	for _, t := range tokens {
   189  		src = append(src, t.lit...)
   190  		src = append(src, whitespace...)
   191  	}
   192  	return src
   193  }()
   194  
   195  func newlineCount(s string) int {
   196  	n := 0
   197  	for i := 0; i < len(s); i++ {
   198  		if s[i] == '\n' {
   199  			n++
   200  		}
   201  	}
   202  	return n
   203  }
   204  
   205  func checkPos(t *testing.T, lit string, p token.Pos, expected token.Position) {
   206  	pos := fset.Position(p)
   207  	if pos.Filename != expected.Filename {
   208  		t.Errorf("bad filename for %q: got %s, expected %s", lit, pos.Filename, expected.Filename)
   209  	}
   210  	if pos.Offset != expected.Offset {
   211  		t.Errorf("bad position for %q: got %d, expected %d", lit, pos.Offset, expected.Offset)
   212  	}
   213  	if pos.Line != expected.Line {
   214  		t.Errorf("bad line for %q: got %d, expected %d", lit, pos.Line, expected.Line)
   215  	}
   216  	if pos.Column != expected.Column {
   217  		t.Errorf("bad column for %q: got %d, expected %d", lit, pos.Column, expected.Column)
   218  	}
   219  }
   220  
   221  // Verify that calling Scan() provides the correct results.
   222  func TestScan(t *testing.T) {
   223  	whitespace_linecount := newlineCount(whitespace)
   224  
   225  	// error handler
   226  	eh := func(_ token.Position, msg string) {
   227  		t.Errorf("error handler called (msg = %s)", msg)
   228  	}
   229  
   230  	// verify scan
   231  	var s Scanner
   232  	s.Init(fset.AddFile("", fset.Base(), len(source)), source, eh, ScanComments|dontInsertSemis)
   233  
   234  	// set up expected position
   235  	epos := token.Position{
   236  		Filename: "",
   237  		Offset:   0,
   238  		Line:     1,
   239  		Column:   1,
   240  	}
   241  
   242  	index := 0
   243  	for {
   244  		pos, tok, lit := s.Scan()
   245  
   246  		// check position
   247  		if tok == token.EOF {
   248  			// correction for EOF
   249  			epos.Line = newlineCount(string(source))
   250  			epos.Column = 2
   251  		}
   252  		checkPos(t, lit, pos, epos)
   253  
   254  		// check token
   255  		e := elt{token.EOF, "", special}
   256  		if index < len(tokens) {
   257  			e = tokens[index]
   258  			index++
   259  		}
   260  		if tok != e.tok {
   261  			t.Errorf("bad token for %q: got %s, expected %s", lit, tok, e.tok)
   262  		}
   263  
   264  		// check token class
   265  		if tokenclass(tok) != e.class {
   266  			t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
   267  		}
   268  
   269  		// check literal
   270  		elit := ""
   271  		switch e.tok {
   272  		case token.COMMENT:
   273  			// no CRs in comments
   274  			elit = string(stripCR([]byte(e.lit)))
   275  			//-style comment literal doesn't contain newline
   276  			if elit[1] == '/' {
   277  				elit = elit[0 : len(elit)-1]
   278  			}
   279  		case token.IDENT:
   280  			elit = e.lit
   281  		case token.SEMICOLON:
   282  			elit = ";"
   283  		default:
   284  			if e.tok.IsLiteral() {
   285  				// no CRs in raw string literals
   286  				elit = e.lit
   287  				if elit[0] == '`' {
   288  					elit = string(stripCR([]byte(elit)))
   289  				}
   290  			} else if e.tok.IsKeyword() {
   291  				elit = e.lit
   292  			}
   293  		}
   294  		if lit != elit {
   295  			t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit)
   296  		}
   297  
   298  		if tok == token.EOF {
   299  			break
   300  		}
   301  
   302  		// update position
   303  		epos.Offset += len(e.lit) + len(whitespace)
   304  		epos.Line += newlineCount(e.lit) + whitespace_linecount
   305  
   306  	}
   307  
   308  	if s.ErrorCount != 0 {
   309  		t.Errorf("found %d errors", s.ErrorCount)
   310  	}
   311  }
   312  
   313  func checkSemi(t *testing.T, line string, mode Mode) {
   314  	var S Scanner
   315  	file := fset.AddFile("TestSemis", fset.Base(), len(line))
   316  	S.Init(file, []byte(line), nil, mode)
   317  	pos, tok, lit := S.Scan()
   318  	for tok != token.EOF {
   319  		if tok == token.ILLEGAL {
   320  			// the illegal token literal indicates what
   321  			// kind of semicolon literal to expect
   322  			semiLit := "\n"
   323  			if lit[0] == '#' {
   324  				semiLit = ";"
   325  			}
   326  			// next token must be a semicolon
   327  			semiPos := file.Position(pos)
   328  			semiPos.Offset++
   329  			semiPos.Column++
   330  			pos, tok, lit = S.Scan()
   331  			if tok == token.SEMICOLON {
   332  				if lit != semiLit {
   333  					t.Errorf(`bad literal for %q: got %q, expected %q`, line, lit, semiLit)
   334  				}
   335  				checkPos(t, line, pos, semiPos)
   336  			} else {
   337  				t.Errorf("bad token for %q: got %s, expected ;", line, tok)
   338  			}
   339  		} else if tok == token.SEMICOLON {
   340  			t.Errorf("bad token for %q: got ;, expected no ;", line)
   341  		}
   342  		pos, tok, lit = S.Scan()
   343  	}
   344  }
   345  
   346  var lines = []string{
   347  	// # indicates a semicolon present in the source
   348  	// $ indicates an automatically inserted semicolon
   349  	"",
   350  	"\ufeff#;", // first BOM is ignored
   351  	"#;",
   352  	"foo$\n",
   353  	"123$\n",
   354  	"1.2$\n",
   355  	"'x'$\n",
   356  	`"x"` + "$\n",
   357  	"`x`$\n",
   358  
   359  	"+\n",
   360  	"-\n",
   361  	"*\n",
   362  	"/\n",
   363  	"%\n",
   364  
   365  	"&\n",
   366  	"|\n",
   367  	"^\n",
   368  	"<<\n",
   369  	">>\n",
   370  	"&^\n",
   371  
   372  	"+=\n",
   373  	"-=\n",
   374  	"*=\n",
   375  	"/=\n",
   376  	"%=\n",
   377  
   378  	"&=\n",
   379  	"|=\n",
   380  	"^=\n",
   381  	"<<=\n",
   382  	">>=\n",
   383  	"&^=\n",
   384  
   385  	"&&\n",
   386  	"||\n",
   387  	"<-\n",
   388  	"++$\n",
   389  	"--$\n",
   390  
   391  	"==\n",
   392  	"<\n",
   393  	">\n",
   394  	"=\n",
   395  	"!\n",
   396  
   397  	"!=\n",
   398  	"<=\n",
   399  	">=\n",
   400  	":=\n",
   401  	"...\n",
   402  
   403  	"(\n",
   404  	"[\n",
   405  	"{\n",
   406  	",\n",
   407  	".\n",
   408  
   409  	")$\n",
   410  	"]$\n",
   411  	"}$\n",
   412  	"#;\n",
   413  	":\n",
   414  
   415  	"break$\n",
   416  	"case\n",
   417  	"chan\n",
   418  	"const\n",
   419  	"continue$\n",
   420  
   421  	"default\n",
   422  	"defer\n",
   423  	"else\n",
   424  	"fallthrough$\n",
   425  	"for\n",
   426  
   427  	"func\n",
   428  	"go\n",
   429  	"goto\n",
   430  	"if\n",
   431  	"import\n",
   432  
   433  	"interface\n",
   434  	"map\n",
   435  	"package\n",
   436  	"range\n",
   437  	"return$\n",
   438  
   439  	"select\n",
   440  	"struct\n",
   441  	"switch\n",
   442  	"type\n",
   443  	"var\n",
   444  
   445  	"foo$//comment\n",
   446  	"foo$//comment",
   447  	"foo$/*comment*/\n",
   448  	"foo$/*\n*/",
   449  	"foo$/*comment*/    \n",
   450  	"foo$/*\n*/    ",
   451  
   452  	"foo    $// comment\n",
   453  	"foo    $// comment",
   454  	"foo    $/*comment*/\n",
   455  	"foo    $/*\n*/",
   456  	"foo    $/*  */ /* \n */ bar$/**/\n",
   457  	"foo    $/*0*/ /*1*/ /*2*/\n",
   458  
   459  	"foo    $/*comment*/    \n",
   460  	"foo    $/*0*/ /*1*/ /*2*/    \n",
   461  	"foo	$/**/ /*-------------*/       /*----\n*/bar       $/*  \n*/baa$\n",
   462  	"foo    $/* an EOF terminates a line */",
   463  	"foo    $/* an EOF terminates a line */ /*",
   464  	"foo    $/* an EOF terminates a line */ //",
   465  
   466  	"package main$\n\nfunc main() {\n\tif {\n\t\treturn /* */ }$\n}$\n",
   467  	"package main$",
   468  }
   469  
   470  func TestSemis(t *testing.T) {
   471  	for _, line := range lines {
   472  		checkSemi(t, line, 0)
   473  		checkSemi(t, line, ScanComments)
   474  
   475  		// if the input ended in newlines, the input must tokenize the
   476  		// same with or without those newlines
   477  		for i := len(line) - 1; i >= 0 && line[i] == '\n'; i-- {
   478  			checkSemi(t, line[0:i], 0)
   479  			checkSemi(t, line[0:i], ScanComments)
   480  		}
   481  	}
   482  }
   483  
   484  type segment struct {
   485  	srcline  string // a line of source text
   486  	filename string // filename for current token
   487  	line     int    // line number for current token
   488  }
   489  
   490  var segments = []segment{
   491  	// exactly one token per line since the test consumes one token per segment
   492  	{"  line1", filepath.Join("dir", "TestLineComments"), 1},
   493  	{"\nline2", filepath.Join("dir", "TestLineComments"), 2},
   494  	{"\nline3  //line File1.go:100", filepath.Join("dir", "TestLineComments"), 3}, // bad line comment, ignored
   495  	{"\nline4", filepath.Join("dir", "TestLineComments"), 4},
   496  	{"\n//line File1.go:100\n  line100", filepath.Join("dir", "File1.go"), 100},
   497  	{"\n//line  \t :42\n  line1", "", 42},
   498  	{"\n//line File2.go:200\n  line200", filepath.Join("dir", "File2.go"), 200},
   499  	{"\n//line foo\t:42\n  line42", filepath.Join("dir", "foo"), 42},
   500  	{"\n //line foo:42\n  line44", filepath.Join("dir", "foo"), 44},           // bad line comment, ignored
   501  	{"\n//line foo 42\n  line46", filepath.Join("dir", "foo"), 46},            // bad line comment, ignored
   502  	{"\n//line foo:42 extra text\n  line48", filepath.Join("dir", "foo"), 48}, // bad line comment, ignored
   503  	{"\n//line ./foo:42\n  line42", filepath.Join("dir", "foo"), 42},
   504  	{"\n//line a/b/c/File1.go:100\n  line100", filepath.Join("dir", "a", "b", "c", "File1.go"), 100},
   505  }
   506  
   507  var unixsegments = []segment{
   508  	{"\n//line /bar:42\n  line42", "/bar", 42},
   509  }
   510  
   511  var winsegments = []segment{
   512  	{"\n//line c:\\bar:42\n  line42", "c:\\bar", 42},
   513  	{"\n//line c:\\dir\\File1.go:100\n  line100", "c:\\dir\\File1.go", 100},
   514  }
   515  
   516  // Verify that comments of the form "//line filename:line" are interpreted correctly.
   517  func TestLineComments(t *testing.T) {
   518  	segs := segments
   519  	if runtime.GOOS == "windows" {
   520  		segs = append(segs, winsegments...)
   521  	} else {
   522  		segs = append(segs, unixsegments...)
   523  	}
   524  
   525  	// make source
   526  	var src string
   527  	for _, e := range segs {
   528  		src += e.srcline
   529  	}
   530  
   531  	// verify scan
   532  	var S Scanner
   533  	file := fset.AddFile(filepath.Join("dir", "TestLineComments"), fset.Base(), len(src))
   534  	S.Init(file, []byte(src), nil, dontInsertSemis)
   535  	for _, s := range segs {
   536  		p, _, lit := S.Scan()
   537  		pos := file.Position(p)
   538  		checkPos(t, lit, p, token.Position{
   539  			Filename: s.filename,
   540  			Offset:   pos.Offset,
   541  			Line:     s.line,
   542  			Column:   pos.Column,
   543  		})
   544  	}
   545  
   546  	if S.ErrorCount != 0 {
   547  		t.Errorf("found %d errors", S.ErrorCount)
   548  	}
   549  }
   550  
   551  // Verify that initializing the same scanner more than once works correctly.
   552  func TestInit(t *testing.T) {
   553  	var s Scanner
   554  
   555  	// 1st init
   556  	src1 := "if true { }"
   557  	f1 := fset.AddFile("src1", fset.Base(), len(src1))
   558  	s.Init(f1, []byte(src1), nil, dontInsertSemis)
   559  	if f1.Size() != len(src1) {
   560  		t.Errorf("bad file size: got %d, expected %d", f1.Size(), len(src1))
   561  	}
   562  	s.Scan()              // if
   563  	s.Scan()              // true
   564  	_, tok, _ := s.Scan() // {
   565  	if tok != token.LBRACE {
   566  		t.Errorf("bad token: got %s, expected %s", tok, token.LBRACE)
   567  	}
   568  
   569  	// 2nd init
   570  	src2 := "go true { ]"
   571  	f2 := fset.AddFile("src2", fset.Base(), len(src2))
   572  	s.Init(f2, []byte(src2), nil, dontInsertSemis)
   573  	if f2.Size() != len(src2) {
   574  		t.Errorf("bad file size: got %d, expected %d", f2.Size(), len(src2))
   575  	}
   576  	_, tok, _ = s.Scan() // go
   577  	if tok != token.GO {
   578  		t.Errorf("bad token: got %s, expected %s", tok, token.GO)
   579  	}
   580  
   581  	if s.ErrorCount != 0 {
   582  		t.Errorf("found %d errors", s.ErrorCount)
   583  	}
   584  }
   585  
   586  func TestStdErrorHander(t *testing.T) {
   587  	const src = "@\n" + // illegal character, cause an error
   588  		"@ @\n" + // two errors on the same line
   589  		"//line File2:20\n" +
   590  		"@\n" + // different file, but same line
   591  		"//line File2:1\n" +
   592  		"@ @\n" + // same file, decreasing line number
   593  		"//line File1:1\n" +
   594  		"@ @ @" // original file, line 1 again
   595  
   596  	var list ErrorList
   597  	eh := func(pos token.Position, msg string) { list.Add(pos, msg) }
   598  
   599  	var s Scanner
   600  	s.Init(fset.AddFile("File1", fset.Base(), len(src)), []byte(src), eh, dontInsertSemis)
   601  	for {
   602  		if _, tok, _ := s.Scan(); tok == token.EOF {
   603  			break
   604  		}
   605  	}
   606  
   607  	if len(list) != s.ErrorCount {
   608  		t.Errorf("found %d errors, expected %d", len(list), s.ErrorCount)
   609  	}
   610  
   611  	if len(list) != 9 {
   612  		t.Errorf("found %d raw errors, expected 9", len(list))
   613  		PrintError(os.Stderr, list)
   614  	}
   615  
   616  	list.Sort()
   617  	if len(list) != 9 {
   618  		t.Errorf("found %d sorted errors, expected 9", len(list))
   619  		PrintError(os.Stderr, list)
   620  	}
   621  
   622  	list.RemoveMultiples()
   623  	if len(list) != 4 {
   624  		t.Errorf("found %d one-per-line errors, expected 4", len(list))
   625  		PrintError(os.Stderr, list)
   626  	}
   627  }
   628  
   629  type errorCollector struct {
   630  	cnt int            // number of errors encountered
   631  	msg string         // last error message encountered
   632  	pos token.Position // last error position encountered
   633  }
   634  
   635  func checkError(t *testing.T, src string, tok token.Token, pos int, lit, err string) {
   636  	var s Scanner
   637  	var h errorCollector
   638  	eh := func(pos token.Position, msg string) {
   639  		h.cnt++
   640  		h.msg = msg
   641  		h.pos = pos
   642  	}
   643  	s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), eh, ScanComments|dontInsertSemis)
   644  	_, tok0, lit0 := s.Scan()
   645  	if tok0 != tok {
   646  		t.Errorf("%q: got %s, expected %s", src, tok0, tok)
   647  	}
   648  	if tok0 != token.ILLEGAL && lit0 != lit {
   649  		t.Errorf("%q: got literal %q, expected %q", src, lit0, lit)
   650  	}
   651  	cnt := 0
   652  	if err != "" {
   653  		cnt = 1
   654  	}
   655  	if h.cnt != cnt {
   656  		t.Errorf("%q: got cnt %d, expected %d", src, h.cnt, cnt)
   657  	}
   658  	if h.msg != err {
   659  		t.Errorf("%q: got msg %q, expected %q", src, h.msg, err)
   660  	}
   661  	if h.pos.Offset != pos {
   662  		t.Errorf("%q: got offset %d, expected %d", src, h.pos.Offset, pos)
   663  	}
   664  }
   665  
   666  var errors = []struct {
   667  	src string
   668  	tok token.Token
   669  	pos int
   670  	lit string
   671  	err string
   672  }{
   673  	{"\a", token.ILLEGAL, 0, "", "illegal character U+0007"},
   674  	{`#`, token.ILLEGAL, 0, "", "illegal character U+0023 '#'"},
   675  	{`…`, token.ILLEGAL, 0, "", "illegal character U+2026 '…'"},
   676  	{`' '`, token.CHAR, 0, `' '`, ""},
   677  	{`''`, token.CHAR, 0, `''`, "illegal rune literal"},
   678  	{`'12'`, token.CHAR, 0, `'12'`, "illegal rune literal"},
   679  	{`'123'`, token.CHAR, 0, `'123'`, "illegal rune literal"},
   680  	{`'\0'`, token.CHAR, 3, `'\0'`, "illegal character U+0027 ''' in escape sequence"},
   681  	{`'\07'`, token.CHAR, 4, `'\07'`, "illegal character U+0027 ''' in escape sequence"},
   682  	{`'\8'`, token.CHAR, 2, `'\8'`, "unknown escape sequence"},
   683  	{`'\08'`, token.CHAR, 3, `'\08'`, "illegal character U+0038 '8' in escape sequence"},
   684  	{`'\x'`, token.CHAR, 3, `'\x'`, "illegal character U+0027 ''' in escape sequence"},
   685  	{`'\x0'`, token.CHAR, 4, `'\x0'`, "illegal character U+0027 ''' in escape sequence"},
   686  	{`'\x0g'`, token.CHAR, 4, `'\x0g'`, "illegal character U+0067 'g' in escape sequence"},
   687  	{`'\u'`, token.CHAR, 3, `'\u'`, "illegal character U+0027 ''' in escape sequence"},
   688  	{`'\u0'`, token.CHAR, 4, `'\u0'`, "illegal character U+0027 ''' in escape sequence"},
   689  	{`'\u00'`, token.CHAR, 5, `'\u00'`, "illegal character U+0027 ''' in escape sequence"},
   690  	{`'\u000'`, token.CHAR, 6, `'\u000'`, "illegal character U+0027 ''' in escape sequence"},
   691  	{`'\u000`, token.CHAR, 6, `'\u000`, "escape sequence not terminated"},
   692  	{`'\u0000'`, token.CHAR, 0, `'\u0000'`, ""},
   693  	{`'\U'`, token.CHAR, 3, `'\U'`, "illegal character U+0027 ''' in escape sequence"},
   694  	{`'\U0'`, token.CHAR, 4, `'\U0'`, "illegal character U+0027 ''' in escape sequence"},
   695  	{`'\U00'`, token.CHAR, 5, `'\U00'`, "illegal character U+0027 ''' in escape sequence"},
   696  	{`'\U000'`, token.CHAR, 6, `'\U000'`, "illegal character U+0027 ''' in escape sequence"},
   697  	{`'\U0000'`, token.CHAR, 7, `'\U0000'`, "illegal character U+0027 ''' in escape sequence"},
   698  	{`'\U00000'`, token.CHAR, 8, `'\U00000'`, "illegal character U+0027 ''' in escape sequence"},
   699  	{`'\U000000'`, token.CHAR, 9, `'\U000000'`, "illegal character U+0027 ''' in escape sequence"},
   700  	{`'\U0000000'`, token.CHAR, 10, `'\U0000000'`, "illegal character U+0027 ''' in escape sequence"},
   701  	{`'\U0000000`, token.CHAR, 10, `'\U0000000`, "escape sequence not terminated"},
   702  	{`'\U00000000'`, token.CHAR, 0, `'\U00000000'`, ""},
   703  	{`'\Uffffffff'`, token.CHAR, 2, `'\Uffffffff'`, "escape sequence is invalid Unicode code point"},
   704  	{`'`, token.CHAR, 0, `'`, "rune literal not terminated"},
   705  	{`'\`, token.CHAR, 2, `'\`, "escape sequence not terminated"},
   706  	{"'\n", token.CHAR, 0, "'", "rune literal not terminated"},
   707  	{"'\n   ", token.CHAR, 0, "'", "rune literal not terminated"},
   708  	{`""`, token.STRING, 0, `""`, ""},
   709  	{`"abc`, token.STRING, 0, `"abc`, "string literal not terminated"},
   710  	{"\"abc\n", token.STRING, 0, `"abc`, "string literal not terminated"},
   711  	{"\"abc\n   ", token.STRING, 0, `"abc`, "string literal not terminated"},
   712  	{"``", token.STRING, 0, "``", ""},
   713  	{"`", token.STRING, 0, "`", "raw string literal not terminated"},
   714  	{"/**/", token.COMMENT, 0, "/**/", ""},
   715  	{"/*", token.COMMENT, 0, "/*", "comment not terminated"},
   716  	{"077", token.INT, 0, "077", ""},
   717  	{"078.", token.FLOAT, 0, "078.", ""},
   718  	{"07801234567.", token.FLOAT, 0, "07801234567.", ""},
   719  	{"078e0", token.FLOAT, 0, "078e0", ""},
   720  	{"0E", token.FLOAT, 0, "0E", "illegal floating-point exponent"}, // issue 17621
   721  	{"078", token.INT, 0, "078", "illegal octal number"},
   722  	{"07800000009", token.INT, 0, "07800000009", "illegal octal number"},
   723  	{"0x", token.INT, 0, "0x", "illegal hexadecimal number"},
   724  	{"0X", token.INT, 0, "0X", "illegal hexadecimal number"},
   725  	{"\"abc\x00def\"", token.STRING, 4, "\"abc\x00def\"", "illegal character NUL"},
   726  	{"\"abc\x80def\"", token.STRING, 4, "\"abc\x80def\"", "illegal UTF-8 encoding"},
   727  	{"\ufeff\ufeff", token.ILLEGAL, 3, "\ufeff\ufeff", "illegal byte order mark"},                        // only first BOM is ignored
   728  	{"//\ufeff", token.COMMENT, 2, "//\ufeff", "illegal byte order mark"},                                // only first BOM is ignored
   729  	{"'\ufeff" + `'`, token.CHAR, 1, "'\ufeff" + `'`, "illegal byte order mark"},                         // only first BOM is ignored
   730  	{`"` + "abc\ufeffdef" + `"`, token.STRING, 4, `"` + "abc\ufeffdef" + `"`, "illegal byte order mark"}, // only first BOM is ignored
   731  }
   732  
   733  func TestScanErrors(t *testing.T) {
   734  	for _, e := range errors {
   735  		checkError(t, e.src, e.tok, e.pos, e.lit, e.err)
   736  	}
   737  }
   738  
   739  // Verify that no comments show up as literal values when skipping comments.
   740  func TestIssue10213(t *testing.T) {
   741  	var src = `
   742  		var (
   743  			A = 1 // foo
   744  		)
   745  
   746  		var (
   747  			B = 2
   748  			// foo
   749  		)
   750  
   751  		var C = 3 // foo
   752  
   753  		var D = 4
   754  		// foo
   755  
   756  		func anycode() {
   757  		// foo
   758  		}
   759  	`
   760  	var s Scanner
   761  	s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), nil, 0)
   762  	for {
   763  		pos, tok, lit := s.Scan()
   764  		class := tokenclass(tok)
   765  		if lit != "" && class != keyword && class != literal && tok != token.SEMICOLON {
   766  			t.Errorf("%s: tok = %s, lit = %q", fset.Position(pos), tok, lit)
   767  		}
   768  		if tok <= token.EOF {
   769  			break
   770  		}
   771  	}
   772  }
   773  
   774  func BenchmarkScan(b *testing.B) {
   775  	b.StopTimer()
   776  	fset := token.NewFileSet()
   777  	file := fset.AddFile("", fset.Base(), len(source))
   778  	var s Scanner
   779  	b.StartTimer()
   780  	for i := 0; i < b.N; i++ {
   781  		s.Init(file, source, nil, ScanComments)
   782  		for {
   783  			_, tok, _ := s.Scan()
   784  			if tok == token.EOF {
   785  				break
   786  			}
   787  		}
   788  	}
   789  }
   790  
   791  func BenchmarkScanFile(b *testing.B) {
   792  	b.StopTimer()
   793  	const filename = "scanner.go"
   794  	src, err := ioutil.ReadFile(filename)
   795  	if err != nil {
   796  		panic(err)
   797  	}
   798  	fset := token.NewFileSet()
   799  	file := fset.AddFile(filename, fset.Base(), len(src))
   800  	b.SetBytes(int64(len(src)))
   801  	var s Scanner
   802  	b.StartTimer()
   803  	for i := 0; i < b.N; i++ {
   804  		s.Init(file, src, nil, ScanComments)
   805  		for {
   806  			_, tok, _ := s.Scan()
   807  			if tok == token.EOF {
   808  				break
   809  			}
   810  		}
   811  	}
   812  }