github.com/gernest/nezuko@v0.1.2/internal/modfile/read.go (about) 1 // Copyright 2018 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Module file parser. 6 // This is a simplified copy of Google's buildifier parser. 7 8 package modfile 9 10 import ( 11 "bytes" 12 "fmt" 13 "os" 14 "strconv" 15 "strings" 16 "unicode" 17 "unicode/utf8" 18 ) 19 20 // A Position describes the position between two bytes of input. 21 type Position struct { 22 Line int // line in input (starting at 1) 23 LineRune int // rune in line (starting at 1) 24 Byte int // byte in input (starting at 0) 25 } 26 27 // add returns the position at the end of s, assuming it starts at p. 28 func (p Position) add(s string) Position { 29 p.Byte += len(s) 30 if n := strings.Count(s, "\n"); n > 0 { 31 p.Line += n 32 s = s[strings.LastIndex(s, "\n")+1:] 33 p.LineRune = 1 34 } 35 p.LineRune += utf8.RuneCountInString(s) 36 return p 37 } 38 39 // An Expr represents an input element. 40 type Expr interface { 41 // Span returns the start and end position of the expression, 42 // excluding leading or trailing comments. 43 Span() (start, end Position) 44 45 // Comment returns the comments attached to the expression. 46 // This method would normally be named 'Comments' but that 47 // would interfere with embedding a type of the same name. 48 Comment() *Comments 49 } 50 51 // A Comment represents a single // comment. 52 type Comment struct { 53 Start Position 54 Token string // without trailing newline 55 Suffix bool // an end of line (not whole line) comment 56 } 57 58 // Comments collects the comments associated with an expression. 59 type Comments struct { 60 Before []Comment // whole-line comments before this expression 61 Suffix []Comment // end-of-line comments after this expression 62 63 // For top-level expressions only, After lists whole-line 64 // comments following the expression. 65 After []Comment 66 } 67 68 // Comment returns the receiver. This isn't useful by itself, but 69 // a Comments struct is embedded into all the expression 70 // implementation types, and this gives each of those a Comment 71 // method to satisfy the Expr interface. 72 func (c *Comments) Comment() *Comments { 73 return c 74 } 75 76 // A FileSyntax represents an entire z.mod file. 77 type FileSyntax struct { 78 Name string // file path 79 Comments 80 Stmt []Expr 81 } 82 83 func (x *FileSyntax) Span() (start, end Position) { 84 if len(x.Stmt) == 0 { 85 return 86 } 87 start, _ = x.Stmt[0].Span() 88 _, end = x.Stmt[len(x.Stmt)-1].Span() 89 return start, end 90 } 91 92 func (x *FileSyntax) addLine(hint Expr, tokens ...string) *Line { 93 if hint == nil { 94 // If no hint given, add to the last statement of the given type. 95 Loop: 96 for i := len(x.Stmt) - 1; i >= 0; i-- { 97 stmt := x.Stmt[i] 98 switch stmt := stmt.(type) { 99 case *Line: 100 if stmt.Token != nil && stmt.Token[0] == tokens[0] { 101 hint = stmt 102 break Loop 103 } 104 case *LineBlock: 105 if stmt.Token[0] == tokens[0] { 106 hint = stmt 107 break Loop 108 } 109 } 110 } 111 } 112 113 if hint != nil { 114 for i, stmt := range x.Stmt { 115 switch stmt := stmt.(type) { 116 case *Line: 117 if stmt == hint { 118 // Convert line to line block. 119 stmt.InBlock = true 120 block := &LineBlock{Token: stmt.Token[:1], Line: []*Line{stmt}} 121 stmt.Token = stmt.Token[1:] 122 x.Stmt[i] = block 123 new := &Line{Token: tokens[1:], InBlock: true} 124 block.Line = append(block.Line, new) 125 return new 126 } 127 case *LineBlock: 128 if stmt == hint { 129 new := &Line{Token: tokens[1:], InBlock: true} 130 stmt.Line = append(stmt.Line, new) 131 return new 132 } 133 for j, line := range stmt.Line { 134 if line == hint { 135 // Add new line after hint. 136 stmt.Line = append(stmt.Line, nil) 137 copy(stmt.Line[j+2:], stmt.Line[j+1:]) 138 new := &Line{Token: tokens[1:], InBlock: true} 139 stmt.Line[j+1] = new 140 return new 141 } 142 } 143 } 144 } 145 } 146 147 new := &Line{Token: tokens} 148 x.Stmt = append(x.Stmt, new) 149 return new 150 } 151 152 func (x *FileSyntax) updateLine(line *Line, tokens ...string) { 153 if line.InBlock { 154 tokens = tokens[1:] 155 } 156 line.Token = tokens 157 } 158 159 func (x *FileSyntax) removeLine(line *Line) { 160 line.Token = nil 161 } 162 163 // Cleanup cleans up the file syntax x after any edit operations. 164 // To avoid quadratic behavior, removeLine marks the line as dead 165 // by setting line.Token = nil but does not remove it from the slice 166 // in which it appears. After edits have all been indicated, 167 // calling Cleanup cleans out the dead lines. 168 func (x *FileSyntax) Cleanup() { 169 w := 0 170 for _, stmt := range x.Stmt { 171 switch stmt := stmt.(type) { 172 case *Line: 173 if stmt.Token == nil { 174 continue 175 } 176 case *LineBlock: 177 ww := 0 178 for _, line := range stmt.Line { 179 if line.Token != nil { 180 stmt.Line[ww] = line 181 ww++ 182 } 183 } 184 if ww == 0 { 185 continue 186 } 187 if ww == 1 { 188 // Collapse block into single line. 189 line := &Line{ 190 Comments: Comments{ 191 Before: commentsAdd(stmt.Before, stmt.Line[0].Before), 192 Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix), 193 After: commentsAdd(stmt.Line[0].After, stmt.After), 194 }, 195 Token: stringsAdd(stmt.Token, stmt.Line[0].Token), 196 } 197 x.Stmt[w] = line 198 w++ 199 continue 200 } 201 stmt.Line = stmt.Line[:ww] 202 } 203 x.Stmt[w] = stmt 204 w++ 205 } 206 x.Stmt = x.Stmt[:w] 207 } 208 209 func commentsAdd(x, y []Comment) []Comment { 210 return append(x[:len(x):len(x)], y...) 211 } 212 213 func stringsAdd(x, y []string) []string { 214 return append(x[:len(x):len(x)], y...) 215 } 216 217 // A CommentBlock represents a top-level block of comments separate 218 // from any rule. 219 type CommentBlock struct { 220 Comments 221 Start Position 222 } 223 224 func (x *CommentBlock) Span() (start, end Position) { 225 return x.Start, x.Start 226 } 227 228 // A Line is a single line of tokens. 229 type Line struct { 230 Comments 231 Start Position 232 Token []string 233 InBlock bool 234 End Position 235 } 236 237 func (x *Line) Span() (start, end Position) { 238 return x.Start, x.End 239 } 240 241 // A LineBlock is a factored block of lines, like 242 // 243 // require ( 244 // "x" 245 // "y" 246 // ) 247 // 248 type LineBlock struct { 249 Comments 250 Start Position 251 LParen LParen 252 Token []string 253 Line []*Line 254 RParen RParen 255 } 256 257 func (x *LineBlock) Span() (start, end Position) { 258 return x.Start, x.RParen.Pos.add(")") 259 } 260 261 // An LParen represents the beginning of a parenthesized line block. 262 // It is a place to store suffix comments. 263 type LParen struct { 264 Comments 265 Pos Position 266 } 267 268 func (x *LParen) Span() (start, end Position) { 269 return x.Pos, x.Pos.add(")") 270 } 271 272 // An RParen represents the end of a parenthesized line block. 273 // It is a place to store whole-line (before) comments. 274 type RParen struct { 275 Comments 276 Pos Position 277 } 278 279 func (x *RParen) Span() (start, end Position) { 280 return x.Pos, x.Pos.add(")") 281 } 282 283 // An input represents a single input file being parsed. 284 type input struct { 285 // Lexing state. 286 filename string // name of input file, for errors 287 complete []byte // entire input 288 remaining []byte // remaining input 289 token []byte // token being scanned 290 lastToken string // most recently returned token, for error messages 291 pos Position // current input position 292 comments []Comment // accumulated comments 293 endRule int // position of end of current rule 294 295 // Parser state. 296 file *FileSyntax // returned top-level syntax tree 297 parseError error // error encountered during parsing 298 299 // Comment assignment state. 300 pre []Expr // all expressions, in preorder traversal 301 post []Expr // all expressions, in postorder traversal 302 } 303 304 func newInput(filename string, data []byte) *input { 305 return &input{ 306 filename: filename, 307 complete: data, 308 remaining: data, 309 pos: Position{Line: 1, LineRune: 1, Byte: 0}, 310 } 311 } 312 313 // parse parses the input file. 314 func parse(file string, data []byte) (f *FileSyntax, err error) { 315 in := newInput(file, data) 316 // The parser panics for both routine errors like syntax errors 317 // and for programmer bugs like array index errors. 318 // Turn both into error returns. Catching bug panics is 319 // especially important when processing many files. 320 defer func() { 321 if e := recover(); e != nil { 322 if e == in.parseError { 323 err = in.parseError 324 } else { 325 err = fmt.Errorf("%s:%d:%d: internal error: %v", in.filename, in.pos.Line, in.pos.LineRune, e) 326 } 327 } 328 }() 329 330 // Invoke the parser. 331 in.parseFile() 332 if in.parseError != nil { 333 return nil, in.parseError 334 } 335 in.file.Name = in.filename 336 337 // Assign comments to nearby syntax. 338 in.assignComments() 339 340 return in.file, nil 341 } 342 343 // Error is called to report an error. 344 // The reason s is often "syntax error". 345 // Error does not return: it panics. 346 func (in *input) Error(s string) { 347 if s == "syntax error" && in.lastToken != "" { 348 s += " near " + in.lastToken 349 } 350 in.parseError = fmt.Errorf("%s:%d:%d: %v", in.filename, in.pos.Line, in.pos.LineRune, s) 351 panic(in.parseError) 352 } 353 354 // eof reports whether the input has reached end of file. 355 func (in *input) eof() bool { 356 return len(in.remaining) == 0 357 } 358 359 // peekRune returns the next rune in the input without consuming it. 360 func (in *input) peekRune() int { 361 if len(in.remaining) == 0 { 362 return 0 363 } 364 r, _ := utf8.DecodeRune(in.remaining) 365 return int(r) 366 } 367 368 // peekPrefix reports whether the remaining input begins with the given prefix. 369 func (in *input) peekPrefix(prefix string) bool { 370 // This is like bytes.HasPrefix(in.remaining, []byte(prefix)) 371 // but without the allocation of the []byte copy of prefix. 372 for i := 0; i < len(prefix); i++ { 373 if i >= len(in.remaining) || in.remaining[i] != prefix[i] { 374 return false 375 } 376 } 377 return true 378 } 379 380 // readRune consumes and returns the next rune in the input. 381 func (in *input) readRune() int { 382 if len(in.remaining) == 0 { 383 in.Error("internal lexer error: readRune at EOF") 384 } 385 r, size := utf8.DecodeRune(in.remaining) 386 in.remaining = in.remaining[size:] 387 if r == '\n' { 388 in.pos.Line++ 389 in.pos.LineRune = 1 390 } else { 391 in.pos.LineRune++ 392 } 393 in.pos.Byte += size 394 return int(r) 395 } 396 397 type symType struct { 398 pos Position 399 endPos Position 400 text string 401 } 402 403 // startToken marks the beginning of the next input token. 404 // It must be followed by a call to endToken, once the token has 405 // been consumed using readRune. 406 func (in *input) startToken(sym *symType) { 407 in.token = in.remaining 408 sym.text = "" 409 sym.pos = in.pos 410 } 411 412 // endToken marks the end of an input token. 413 // It records the actual token string in sym.text if the caller 414 // has not done that already. 415 func (in *input) endToken(sym *symType) { 416 if sym.text == "" { 417 tok := string(in.token[:len(in.token)-len(in.remaining)]) 418 sym.text = tok 419 in.lastToken = sym.text 420 } 421 sym.endPos = in.pos 422 } 423 424 // lex is called from the parser to obtain the next input token. 425 // It returns the token value (either a rune like '+' or a symbolic token _FOR) 426 // and sets val to the data associated with the token. 427 // For all our input tokens, the associated data is 428 // val.Pos (the position where the token begins) 429 // and val.Token (the input string corresponding to the token). 430 func (in *input) lex(sym *symType) int { 431 // Skip past spaces, stopping at non-space or EOF. 432 countNL := 0 // number of newlines we've skipped past 433 for !in.eof() { 434 // Skip over spaces. Count newlines so we can give the parser 435 // information about where top-level blank lines are, 436 // for top-level comment assignment. 437 c := in.peekRune() 438 if c == ' ' || c == '\t' || c == '\r' { 439 in.readRune() 440 continue 441 } 442 443 // Comment runs to end of line. 444 if in.peekPrefix("//") { 445 in.startToken(sym) 446 447 // Is this comment the only thing on its line? 448 // Find the last \n before this // and see if it's all 449 // spaces from there to here. 450 i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n")) 451 suffix := len(bytes.TrimSpace(in.complete[i+1:in.pos.Byte])) > 0 452 in.readRune() 453 in.readRune() 454 455 // Consume comment. 456 for len(in.remaining) > 0 && in.readRune() != '\n' { 457 } 458 in.endToken(sym) 459 460 sym.text = strings.TrimRight(sym.text, "\n") 461 in.lastToken = "comment" 462 463 // If we are at top level (not in a statement), hand the comment to 464 // the parser as a _COMMENT token. The grammar is written 465 // to handle top-level comments itself. 466 if !suffix { 467 // Not in a statement. Tell parser about top-level comment. 468 return _COMMENT 469 } 470 471 // Otherwise, save comment for later attachment to syntax tree. 472 if countNL > 1 { 473 in.comments = append(in.comments, Comment{sym.pos, "", false}) 474 } 475 in.comments = append(in.comments, Comment{sym.pos, sym.text, suffix}) 476 countNL = 1 477 return _EOL 478 } 479 480 if in.peekPrefix("/*") { 481 in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)")) 482 } 483 484 // Found non-space non-comment. 485 break 486 } 487 488 // Found the beginning of the next token. 489 in.startToken(sym) 490 defer in.endToken(sym) 491 492 // End of file. 493 if in.eof() { 494 in.lastToken = "EOF" 495 return _EOF 496 } 497 498 // Punctuation tokens. 499 switch c := in.peekRune(); c { 500 case '\n': 501 in.readRune() 502 return c 503 504 case '(': 505 in.readRune() 506 return c 507 508 case ')': 509 in.readRune() 510 return c 511 512 case '"', '`': // quoted string 513 quote := c 514 in.readRune() 515 for { 516 if in.eof() { 517 in.pos = sym.pos 518 in.Error("unexpected EOF in string") 519 } 520 if in.peekRune() == '\n' { 521 in.Error("unexpected newline in string") 522 } 523 c := in.readRune() 524 if c == quote { 525 break 526 } 527 if c == '\\' && quote != '`' { 528 if in.eof() { 529 in.pos = sym.pos 530 in.Error("unexpected EOF in string") 531 } 532 in.readRune() 533 } 534 } 535 in.endToken(sym) 536 return _STRING 537 } 538 539 // Checked all punctuation. Must be identifier token. 540 if c := in.peekRune(); !isIdent(c) { 541 in.Error(fmt.Sprintf("unexpected input character %#q", c)) 542 } 543 544 // Scan over identifier. 545 for isIdent(in.peekRune()) { 546 if in.peekPrefix("//") { 547 break 548 } 549 if in.peekPrefix("/*") { 550 in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)")) 551 } 552 in.readRune() 553 } 554 return _IDENT 555 } 556 557 // isIdent reports whether c is an identifier rune. 558 // We treat nearly all runes as identifier runes. 559 func isIdent(c int) bool { 560 return c != 0 && !unicode.IsSpace(rune(c)) 561 } 562 563 // Comment assignment. 564 // We build two lists of all subexpressions, preorder and postorder. 565 // The preorder list is ordered by start location, with outer expressions first. 566 // The postorder list is ordered by end location, with outer expressions last. 567 // We use the preorder list to assign each whole-line comment to the syntax 568 // immediately following it, and we use the postorder list to assign each 569 // end-of-line comment to the syntax immediately preceding it. 570 571 // order walks the expression adding it and its subexpressions to the 572 // preorder and postorder lists. 573 func (in *input) order(x Expr) { 574 if x != nil { 575 in.pre = append(in.pre, x) 576 } 577 switch x := x.(type) { 578 default: 579 panic(fmt.Errorf("order: unexpected type %T", x)) 580 case nil: 581 // nothing 582 case *LParen, *RParen: 583 // nothing 584 case *CommentBlock: 585 // nothing 586 case *Line: 587 // nothing 588 case *FileSyntax: 589 for _, stmt := range x.Stmt { 590 in.order(stmt) 591 } 592 case *LineBlock: 593 in.order(&x.LParen) 594 for _, l := range x.Line { 595 in.order(l) 596 } 597 in.order(&x.RParen) 598 } 599 if x != nil { 600 in.post = append(in.post, x) 601 } 602 } 603 604 // assignComments attaches comments to nearby syntax. 605 func (in *input) assignComments() { 606 const debug = false 607 608 // Generate preorder and postorder lists. 609 in.order(in.file) 610 611 // Split into whole-line comments and suffix comments. 612 var line, suffix []Comment 613 for _, com := range in.comments { 614 if com.Suffix { 615 suffix = append(suffix, com) 616 } else { 617 line = append(line, com) 618 } 619 } 620 621 if debug { 622 for _, c := range line { 623 fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) 624 } 625 } 626 627 // Assign line comments to syntax immediately following. 628 for _, x := range in.pre { 629 start, _ := x.Span() 630 if debug { 631 fmt.Printf("pre %T :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte) 632 } 633 xcom := x.Comment() 634 for len(line) > 0 && start.Byte >= line[0].Start.Byte { 635 if debug { 636 fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", line[0].Token, line[0].Start.Byte) 637 } 638 xcom.Before = append(xcom.Before, line[0]) 639 line = line[1:] 640 } 641 } 642 643 // Remaining line comments go at end of file. 644 in.file.After = append(in.file.After, line...) 645 646 if debug { 647 for _, c := range suffix { 648 fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) 649 } 650 } 651 652 // Assign suffix comments to syntax immediately before. 653 for i := len(in.post) - 1; i >= 0; i-- { 654 x := in.post[i] 655 656 start, end := x.Span() 657 if debug { 658 fmt.Printf("post %T :%d:%d #%d :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte, end.Line, end.LineRune, end.Byte) 659 } 660 661 // Do not assign suffix comments to end of line block or whole file. 662 // Instead assign them to the last element inside. 663 switch x.(type) { 664 case *FileSyntax: 665 continue 666 } 667 668 // Do not assign suffix comments to something that starts 669 // on an earlier line, so that in 670 // 671 // x ( y 672 // z ) // comment 673 // 674 // we assign the comment to z and not to x ( ... ). 675 if start.Line != end.Line { 676 continue 677 } 678 xcom := x.Comment() 679 for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte { 680 if debug { 681 fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", suffix[len(suffix)-1].Token, suffix[len(suffix)-1].Start.Byte) 682 } 683 xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1]) 684 suffix = suffix[:len(suffix)-1] 685 } 686 } 687 688 // We assigned suffix comments in reverse. 689 // If multiple suffix comments were appended to the same 690 // expression node, they are now in reverse. Fix that. 691 for _, x := range in.post { 692 reverseComments(x.Comment().Suffix) 693 } 694 695 // Remaining suffix comments go at beginning of file. 696 in.file.Before = append(in.file.Before, suffix...) 697 } 698 699 // reverseComments reverses the []Comment list. 700 func reverseComments(list []Comment) { 701 for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 { 702 list[i], list[j] = list[j], list[i] 703 } 704 } 705 706 func (in *input) parseFile() { 707 in.file = new(FileSyntax) 708 var sym symType 709 var cb *CommentBlock 710 for { 711 tok := in.lex(&sym) 712 switch tok { 713 case '\n': 714 if cb != nil { 715 in.file.Stmt = append(in.file.Stmt, cb) 716 cb = nil 717 } 718 case _COMMENT: 719 if cb == nil { 720 cb = &CommentBlock{Start: sym.pos} 721 } 722 com := cb.Comment() 723 com.Before = append(com.Before, Comment{Start: sym.pos, Token: sym.text}) 724 case _EOF: 725 if cb != nil { 726 in.file.Stmt = append(in.file.Stmt, cb) 727 } 728 return 729 default: 730 in.parseStmt(&sym) 731 if cb != nil { 732 in.file.Stmt[len(in.file.Stmt)-1].Comment().Before = cb.Before 733 cb = nil 734 } 735 } 736 } 737 } 738 739 func (in *input) parseStmt(sym *symType) { 740 start := sym.pos 741 end := sym.endPos 742 token := []string{sym.text} 743 for { 744 tok := in.lex(sym) 745 switch tok { 746 case '\n', _EOF, _EOL: 747 in.file.Stmt = append(in.file.Stmt, &Line{ 748 Start: start, 749 Token: token, 750 End: end, 751 }) 752 return 753 case '(': 754 in.file.Stmt = append(in.file.Stmt, in.parseLineBlock(start, token, sym)) 755 return 756 default: 757 token = append(token, sym.text) 758 end = sym.endPos 759 } 760 } 761 } 762 763 func (in *input) parseLineBlock(start Position, token []string, sym *symType) *LineBlock { 764 x := &LineBlock{ 765 Start: start, 766 Token: token, 767 LParen: LParen{Pos: sym.pos}, 768 } 769 var comments []Comment 770 for { 771 tok := in.lex(sym) 772 switch tok { 773 case _EOL: 774 // ignore 775 case '\n': 776 if len(comments) == 0 && len(x.Line) > 0 || len(comments) > 0 && comments[len(comments)-1].Token != "" { 777 comments = append(comments, Comment{}) 778 } 779 case _COMMENT: 780 comments = append(comments, Comment{Start: sym.pos, Token: sym.text}) 781 case _EOF: 782 in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune)) 783 case ')': 784 x.RParen.Before = comments 785 x.RParen.Pos = sym.pos 786 tok = in.lex(sym) 787 if tok != '\n' && tok != _EOF && tok != _EOL { 788 in.Error("syntax error (expected newline after closing paren)") 789 } 790 return x 791 default: 792 l := in.parseLine(sym) 793 x.Line = append(x.Line, l) 794 l.Comment().Before = comments 795 comments = nil 796 } 797 } 798 } 799 800 func (in *input) parseLine(sym *symType) *Line { 801 start := sym.pos 802 end := sym.endPos 803 token := []string{sym.text} 804 for { 805 tok := in.lex(sym) 806 switch tok { 807 case '\n', _EOF, _EOL: 808 return &Line{ 809 Start: start, 810 Token: token, 811 End: end, 812 InBlock: true, 813 } 814 default: 815 token = append(token, sym.text) 816 end = sym.endPos 817 } 818 } 819 } 820 821 const ( 822 _EOF = -(1 + iota) 823 _EOL 824 _IDENT 825 _STRING 826 _COMMENT 827 ) 828 829 var ( 830 slashSlash = []byte("//") 831 moduleStr = []byte("module") 832 ) 833 834 // ModulePath returns the module path from the gomod file text. 835 // If it cannot find a module path, it returns an empty string. 836 // It is tolerant of unrelated problems in the z.mod file. 837 func ModulePath(mod []byte) string { 838 for len(mod) > 0 { 839 line := mod 840 mod = nil 841 if i := bytes.IndexByte(line, '\n'); i >= 0 { 842 line, mod = line[:i], line[i+1:] 843 } 844 if i := bytes.Index(line, slashSlash); i >= 0 { 845 line = line[:i] 846 } 847 line = bytes.TrimSpace(line) 848 if !bytes.HasPrefix(line, moduleStr) { 849 continue 850 } 851 line = line[len(moduleStr):] 852 n := len(line) 853 line = bytes.TrimSpace(line) 854 if len(line) == n || len(line) == 0 { 855 continue 856 } 857 858 if line[0] == '"' || line[0] == '`' { 859 p, err := strconv.Unquote(string(line)) 860 if err != nil { 861 return "" // malformed quoted string or multiline module path 862 } 863 return p 864 } 865 866 return string(line) 867 } 868 return "" // missing module path 869 }