github.com/evanw/esbuild@v0.21.4/internal/sourcemap/sourcemap.go (about)

     1  package sourcemap
     2  
     3  import (
     4  	"bytes"
     5  	"unicode/utf8"
     6  
     7  	"github.com/evanw/esbuild/internal/ast"
     8  	"github.com/evanw/esbuild/internal/helpers"
     9  	"github.com/evanw/esbuild/internal/logger"
    10  )
    11  
    12  type Mapping struct {
    13  	GeneratedLine   int32 // 0-based
    14  	GeneratedColumn int32 // 0-based count of UTF-16 code units
    15  
    16  	SourceIndex    int32       // 0-based
    17  	OriginalLine   int32       // 0-based
    18  	OriginalColumn int32       // 0-based count of UTF-16 code units
    19  	OriginalName   ast.Index32 // 0-based, optional
    20  }
    21  
    22  type SourceMap struct {
    23  	Sources        []string
    24  	SourcesContent []SourceContent
    25  	Mappings       []Mapping
    26  	Names          []string
    27  }
    28  
    29  type SourceContent struct {
    30  	// This stores both the unquoted and the quoted values. We try to use the
    31  	// already-quoted value if possible so we don't need to re-quote it
    32  	// unnecessarily for maximum performance.
    33  	Quoted string
    34  
    35  	// But sometimes we need to re-quote the value, such as when it contains
    36  	// non-ASCII characters and we are in ASCII-only mode. In that case we quote
    37  	// this parsed UTF-16 value.
    38  	Value []uint16
    39  }
    40  
    41  func (sm *SourceMap) Find(line int32, column int32) *Mapping {
    42  	mappings := sm.Mappings
    43  
    44  	// Binary search
    45  	count := len(mappings)
    46  	index := 0
    47  	for count > 0 {
    48  		step := count / 2
    49  		i := index + step
    50  		mapping := mappings[i]
    51  		if mapping.GeneratedLine < line || (mapping.GeneratedLine == line && mapping.GeneratedColumn <= column) {
    52  			index = i + 1
    53  			count -= step + 1
    54  		} else {
    55  			count = step
    56  		}
    57  	}
    58  
    59  	// Handle search failure
    60  	if index > 0 {
    61  		mapping := &mappings[index-1]
    62  
    63  		// Match the behavior of the popular "source-map" library from Mozilla
    64  		if mapping.GeneratedLine == line {
    65  			return mapping
    66  		}
    67  	}
    68  	return nil
    69  }
    70  
    71  var base64 = []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/")
    72  
    73  // A single base 64 digit can contain 6 bits of data. For the base 64 variable
    74  // length quantities we use in the source map spec, the first bit is the sign,
    75  // the next four bits are the actual value, and the 6th bit is the continuation
    76  // bit. The continuation bit tells us whether there are more digits in this
    77  // value following this digit.
    78  //
    79  //	Continuation
    80  //	|    Sign
    81  //	|    |
    82  //	V    V
    83  //	101011
    84  func encodeVLQ(encoded []byte, value int) []byte {
    85  	var vlq int
    86  	if value < 0 {
    87  		vlq = ((-value) << 1) | 1
    88  	} else {
    89  		vlq = value << 1
    90  	}
    91  
    92  	// Handle the common case
    93  	if (vlq >> 5) == 0 {
    94  		digit := vlq & 31
    95  		encoded = append(encoded, base64[digit])
    96  		return encoded
    97  	}
    98  
    99  	for {
   100  		digit := vlq & 31
   101  		vlq >>= 5
   102  
   103  		// If there are still more digits in this value, we must make sure the
   104  		// continuation bit is marked
   105  		if vlq != 0 {
   106  			digit |= 32
   107  		}
   108  
   109  		encoded = append(encoded, base64[digit])
   110  
   111  		if vlq == 0 {
   112  			break
   113  		}
   114  	}
   115  
   116  	return encoded
   117  }
   118  
   119  func DecodeVLQ(encoded []byte, start int) (int, int) {
   120  	shift := 0
   121  	vlq := 0
   122  
   123  	// Scan over the input
   124  	for {
   125  		index := bytes.IndexByte(base64, encoded[start])
   126  		if index < 0 {
   127  			break
   128  		}
   129  
   130  		// Decode a single byte
   131  		vlq |= (index & 31) << shift
   132  		start++
   133  		shift += 5
   134  
   135  		// Stop if there's no continuation bit
   136  		if (index & 32) == 0 {
   137  			break
   138  		}
   139  	}
   140  
   141  	// Recover the value
   142  	value := vlq >> 1
   143  	if (vlq & 1) != 0 {
   144  		value = -value
   145  	}
   146  	return value, start
   147  }
   148  
   149  func DecodeVLQUTF16(encoded []uint16) (int32, int, bool) {
   150  	n := len(encoded)
   151  	if n == 0 {
   152  		return 0, 0, false
   153  	}
   154  
   155  	// Scan over the input
   156  	current := 0
   157  	shift := 0
   158  	var vlq int32
   159  	for {
   160  		if current >= n {
   161  			return 0, 0, false
   162  		}
   163  		index := int32(bytes.IndexByte(base64, byte(encoded[current])))
   164  		if index < 0 {
   165  			return 0, 0, false
   166  		}
   167  
   168  		// Decode a single byte
   169  		vlq |= (index & 31) << shift
   170  		current++
   171  		shift += 5
   172  
   173  		// Stop if there's no continuation bit
   174  		if (index & 32) == 0 {
   175  			break
   176  		}
   177  	}
   178  
   179  	// Recover the value
   180  	var value = vlq >> 1
   181  	if (vlq & 1) != 0 {
   182  		value = -value
   183  	}
   184  	return value, current, true
   185  }
   186  
   187  type LineColumnOffset struct {
   188  	Lines   int
   189  	Columns int
   190  }
   191  
   192  func (a LineColumnOffset) ComesBefore(b LineColumnOffset) bool {
   193  	return a.Lines < b.Lines || (a.Lines == b.Lines && a.Columns < b.Columns)
   194  }
   195  
   196  func (a *LineColumnOffset) Add(b LineColumnOffset) {
   197  	if b.Lines == 0 {
   198  		a.Columns += b.Columns
   199  	} else {
   200  		a.Lines += b.Lines
   201  		a.Columns = b.Columns
   202  	}
   203  }
   204  
   205  func (offset *LineColumnOffset) AdvanceBytes(bytes []byte) {
   206  	columns := offset.Columns
   207  	for len(bytes) > 0 {
   208  		c, width := utf8.DecodeRune(bytes)
   209  		bytes = bytes[width:]
   210  		switch c {
   211  		case '\r', '\n', '\u2028', '\u2029':
   212  			// Handle Windows-specific "\r\n" newlines
   213  			if c == '\r' && len(bytes) > 0 && bytes[0] == '\n' {
   214  				columns++
   215  				continue
   216  			}
   217  
   218  			offset.Lines++
   219  			columns = 0
   220  
   221  		default:
   222  			// Mozilla's "source-map" library counts columns using UTF-16 code units
   223  			if c <= 0xFFFF {
   224  				columns++
   225  			} else {
   226  				columns += 2
   227  			}
   228  		}
   229  	}
   230  	offset.Columns = columns
   231  }
   232  
   233  func (offset *LineColumnOffset) AdvanceString(text string) {
   234  	columns := offset.Columns
   235  	for i, c := range text {
   236  		switch c {
   237  		case '\r', '\n', '\u2028', '\u2029':
   238  			// Handle Windows-specific "\r\n" newlines
   239  			if c == '\r' && i+1 < len(text) && text[i+1] == '\n' {
   240  				columns++
   241  				continue
   242  			}
   243  
   244  			offset.Lines++
   245  			columns = 0
   246  
   247  		default:
   248  			// Mozilla's "source-map" library counts columns using UTF-16 code units
   249  			if c <= 0xFFFF {
   250  				columns++
   251  			} else {
   252  				columns += 2
   253  			}
   254  		}
   255  	}
   256  	offset.Columns = columns
   257  }
   258  
   259  type SourceMapPieces struct {
   260  	Prefix   []byte
   261  	Mappings []byte
   262  	Suffix   []byte
   263  }
   264  
   265  func (pieces SourceMapPieces) HasContent() bool {
   266  	return len(pieces.Prefix)+len(pieces.Mappings)+len(pieces.Suffix) > 0
   267  }
   268  
   269  type SourceMapShift struct {
   270  	Before LineColumnOffset
   271  	After  LineColumnOffset
   272  }
   273  
   274  func (pieces SourceMapPieces) Finalize(shifts []SourceMapShift) []byte {
   275  	// An optimized path for when there are no shifts
   276  	if len(shifts) == 1 {
   277  		bytes := pieces.Prefix
   278  		minCap := len(bytes) + len(pieces.Mappings) + len(pieces.Suffix)
   279  		if cap(bytes) < minCap {
   280  			bytes = append(make([]byte, 0, minCap), bytes...)
   281  		}
   282  		bytes = append(bytes, pieces.Mappings...)
   283  		bytes = append(bytes, pieces.Suffix...)
   284  		return bytes
   285  	}
   286  
   287  	startOfRun := 0
   288  	current := 0
   289  	generated := LineColumnOffset{}
   290  	prevShiftColumnDelta := 0
   291  	j := helpers.Joiner{}
   292  
   293  	// Start the source map
   294  	j.AddBytes(pieces.Prefix)
   295  
   296  	// This assumes that a) all mappings are valid and b) all mappings are ordered
   297  	// by increasing generated position. This should be the case for all mappings
   298  	// generated by esbuild, which should be the only mappings we process here.
   299  	for current < len(pieces.Mappings) {
   300  		// Handle a line break
   301  		if pieces.Mappings[current] == ';' {
   302  			generated.Lines++
   303  			generated.Columns = 0
   304  			prevShiftColumnDelta = 0
   305  			current++
   306  			continue
   307  		}
   308  
   309  		potentialEndOfRun := current
   310  
   311  		// Read the generated column
   312  		generatedColumnDelta, next := DecodeVLQ(pieces.Mappings, current)
   313  		generated.Columns += generatedColumnDelta
   314  		current = next
   315  
   316  		potentialStartOfRun := current
   317  
   318  		// Skip over the original position information
   319  		_, current = DecodeVLQ(pieces.Mappings, current) // The original source
   320  		_, current = DecodeVLQ(pieces.Mappings, current) // The original line
   321  		_, current = DecodeVLQ(pieces.Mappings, current) // The original column
   322  
   323  		// Skip over the original name
   324  		if current < len(pieces.Mappings) {
   325  			if c := pieces.Mappings[current]; c != ',' && c != ';' {
   326  				_, current = DecodeVLQ(pieces.Mappings, current)
   327  			}
   328  		}
   329  
   330  		// Skip a trailing comma
   331  		if current < len(pieces.Mappings) && pieces.Mappings[current] == ',' {
   332  			current++
   333  		}
   334  
   335  		// Detect crossing shift boundaries
   336  		didCrossBoundary := false
   337  		for len(shifts) > 1 && shifts[1].Before.ComesBefore(generated) {
   338  			shifts = shifts[1:]
   339  			didCrossBoundary = true
   340  		}
   341  		if !didCrossBoundary {
   342  			continue
   343  		}
   344  
   345  		// This shift isn't relevant if the next mapping after this shift is on a
   346  		// following line. In that case, don't split and keep scanning instead.
   347  		shift := shifts[0]
   348  		if shift.After.Lines != generated.Lines {
   349  			continue
   350  		}
   351  
   352  		// Add all previous mappings in a single run for efficiency. Since source
   353  		// mappings are relative, no data needs to be modified inside this run.
   354  		j.AddBytes(pieces.Mappings[startOfRun:potentialEndOfRun])
   355  
   356  		// Then modify the first mapping across the shift boundary with the updated
   357  		// generated column value. It's simplest to only support column shifts. This
   358  		// is reasonable because import paths should not contain newlines.
   359  		if shift.Before.Lines != shift.After.Lines {
   360  			panic("Unexpected line change when shifting source maps")
   361  		}
   362  		shiftColumnDelta := shift.After.Columns - shift.Before.Columns
   363  		j.AddBytes(encodeVLQ(nil, generatedColumnDelta+shiftColumnDelta-prevShiftColumnDelta))
   364  		prevShiftColumnDelta = shiftColumnDelta
   365  
   366  		// Finally, start the next run after the end of this generated column offset
   367  		startOfRun = potentialStartOfRun
   368  	}
   369  
   370  	// Finish the source map
   371  	j.AddBytes(pieces.Mappings[startOfRun:])
   372  	j.AddBytes(pieces.Suffix)
   373  	return j.Done()
   374  }
   375  
   376  // Coordinates in source maps are stored using relative offsets for size
   377  // reasons. When joining together chunks of a source map that were emitted
   378  // in parallel for different parts of a file, we need to fix up the first
   379  // segment of each chunk to be relative to the end of the previous chunk.
   380  type SourceMapState struct {
   381  	// This isn't stored in the source map. It's only used by the bundler to join
   382  	// source map chunks together correctly.
   383  	GeneratedLine int
   384  
   385  	// These are stored in the source map in VLQ format.
   386  	GeneratedColumn int
   387  	SourceIndex     int
   388  	OriginalLine    int
   389  	OriginalColumn  int
   390  	OriginalName    int
   391  	HasOriginalName bool
   392  }
   393  
   394  // Source map chunks are computed in parallel for speed. Each chunk is relative
   395  // to the zero state instead of being relative to the end state of the previous
   396  // chunk, since it's impossible to know the end state of the previous chunk in
   397  // a parallel computation.
   398  //
   399  // After all chunks are computed, they are joined together in a second pass.
   400  // This rewrites the first mapping in each chunk to be relative to the end
   401  // state of the previous chunk.
   402  func AppendSourceMapChunk(j *helpers.Joiner, prevEndState SourceMapState, startState SourceMapState, buffer MappingsBuffer) {
   403  	// Handle line breaks in between this mapping and the previous one
   404  	if startState.GeneratedLine != 0 {
   405  		j.AddBytes(bytes.Repeat([]byte{';'}, startState.GeneratedLine))
   406  		prevEndState.GeneratedColumn = 0
   407  	}
   408  
   409  	// Skip past any leading semicolons, which indicate line breaks
   410  	semicolons := 0
   411  	for buffer.Data[semicolons] == ';' {
   412  		semicolons++
   413  	}
   414  	if semicolons > 0 {
   415  		j.AddBytes(buffer.Data[:semicolons])
   416  		prevEndState.GeneratedColumn = 0
   417  		startState.GeneratedColumn = 0
   418  	}
   419  
   420  	// Strip off the first mapping from the buffer. The first mapping should be
   421  	// for the start of the original file (the printer always generates one for
   422  	// the start of the file).
   423  	//
   424  	// Note that we do not want to strip off the original name, even though it
   425  	// could be a part of the first mapping. This will be handled using a special
   426  	// case below instead. Original names are optional and are often omitted, so
   427  	// we handle it uniformly by saving an index to the first original name,
   428  	// which may or may not be a part of the first mapping.
   429  	generatedColumn, i := DecodeVLQ(buffer.Data, semicolons)
   430  	sourceIndex, i := DecodeVLQ(buffer.Data, i)
   431  	originalLine, i := DecodeVLQ(buffer.Data, i)
   432  	originalColumn, i := DecodeVLQ(buffer.Data, i)
   433  
   434  	// Rewrite the first mapping to be relative to the end state of the previous
   435  	// chunk. We now know what the end state is because we're in the second pass
   436  	// where all chunks have already been generated.
   437  	startState.SourceIndex += sourceIndex
   438  	startState.GeneratedColumn += generatedColumn
   439  	startState.OriginalLine += originalLine
   440  	startState.OriginalColumn += originalColumn
   441  	prevEndState.HasOriginalName = false // This is handled separately below
   442  	rewritten, _ := appendMappingToBuffer(nil, j.LastByte(), prevEndState, startState)
   443  	j.AddBytes(rewritten)
   444  
   445  	// Next, if there's an original name, we need to rewrite that as well to be
   446  	// relative to that of the previous chunk.
   447  	if buffer.FirstNameOffset.IsValid() {
   448  		before := int(buffer.FirstNameOffset.GetIndex())
   449  		originalName, after := DecodeVLQ(buffer.Data, before)
   450  		originalName += startState.OriginalName - prevEndState.OriginalName
   451  		j.AddBytes(buffer.Data[i:before])
   452  		j.AddBytes(encodeVLQ(nil, originalName))
   453  		j.AddBytes(buffer.Data[after:])
   454  		return
   455  	}
   456  
   457  	// Otherwise, just append everything after that without modification
   458  	j.AddBytes(buffer.Data[i:])
   459  }
   460  
   461  func appendMappingToBuffer(buffer []byte, lastByte byte, prevState SourceMapState, currentState SourceMapState) ([]byte, ast.Index32) {
   462  	// Put commas in between mappings
   463  	if lastByte != 0 && lastByte != ';' && lastByte != '"' {
   464  		buffer = append(buffer, ',')
   465  	}
   466  
   467  	// Record the mapping (note that the generated line is recorded using ';' elsewhere)
   468  	buffer = encodeVLQ(buffer, currentState.GeneratedColumn-prevState.GeneratedColumn)
   469  	buffer = encodeVLQ(buffer, currentState.SourceIndex-prevState.SourceIndex)
   470  	buffer = encodeVLQ(buffer, currentState.OriginalLine-prevState.OriginalLine)
   471  	buffer = encodeVLQ(buffer, currentState.OriginalColumn-prevState.OriginalColumn)
   472  
   473  	// Record the optional original name
   474  	var nameOffset ast.Index32
   475  	if currentState.HasOriginalName {
   476  		nameOffset = ast.MakeIndex32(uint32(len(buffer)))
   477  		buffer = encodeVLQ(buffer, currentState.OriginalName-prevState.OriginalName)
   478  	}
   479  
   480  	return buffer, nameOffset
   481  }
   482  
   483  type LineOffsetTable struct {
   484  	// The source map specification is very loose and does not specify what
   485  	// column numbers actually mean. The popular "source-map" library from Mozilla
   486  	// appears to interpret them as counts of UTF-16 code units, so we generate
   487  	// those too for compatibility.
   488  	//
   489  	// We keep mapping tables around to accelerate conversion from byte offsets
   490  	// to UTF-16 code unit counts. However, this mapping takes up a lot of memory
   491  	// and generates a lot of garbage. Since most JavaScript is ASCII and the
   492  	// mapping for ASCII is 1:1, we avoid creating a table for ASCII-only lines
   493  	// as an optimization.
   494  	columnsForNonASCII        []int32
   495  	byteOffsetToFirstNonASCII int32
   496  
   497  	byteOffsetToStartOfLine int32
   498  }
   499  
   500  func GenerateLineOffsetTables(contents string, approximateLineCount int32) []LineOffsetTable {
   501  	var ColumnsForNonASCII []int32
   502  	ByteOffsetToFirstNonASCII := int32(0)
   503  	lineByteOffset := 0
   504  	columnByteOffset := 0
   505  	column := int32(0)
   506  
   507  	// Preallocate the top-level table using the approximate line count from the lexer
   508  	lineOffsetTables := make([]LineOffsetTable, 0, approximateLineCount)
   509  
   510  	for i, c := range contents {
   511  		// Mark the start of the next line
   512  		if column == 0 {
   513  			lineByteOffset = i
   514  		}
   515  
   516  		// Start the mapping if this character is non-ASCII
   517  		if c > 0x7F && ColumnsForNonASCII == nil {
   518  			columnByteOffset = i - lineByteOffset
   519  			ByteOffsetToFirstNonASCII = int32(columnByteOffset)
   520  			ColumnsForNonASCII = []int32{}
   521  		}
   522  
   523  		// Update the per-byte column offsets
   524  		if ColumnsForNonASCII != nil {
   525  			for lineBytesSoFar := i - lineByteOffset; columnByteOffset <= lineBytesSoFar; columnByteOffset++ {
   526  				ColumnsForNonASCII = append(ColumnsForNonASCII, column)
   527  			}
   528  		}
   529  
   530  		switch c {
   531  		case '\r', '\n', '\u2028', '\u2029':
   532  			// Handle Windows-specific "\r\n" newlines
   533  			if c == '\r' && i+1 < len(contents) && contents[i+1] == '\n' {
   534  				column++
   535  				continue
   536  			}
   537  
   538  			lineOffsetTables = append(lineOffsetTables, LineOffsetTable{
   539  				byteOffsetToStartOfLine:   int32(lineByteOffset),
   540  				byteOffsetToFirstNonASCII: ByteOffsetToFirstNonASCII,
   541  				columnsForNonASCII:        ColumnsForNonASCII,
   542  			})
   543  			columnByteOffset = 0
   544  			ByteOffsetToFirstNonASCII = 0
   545  			ColumnsForNonASCII = nil
   546  			column = 0
   547  
   548  		default:
   549  			// Mozilla's "source-map" library counts columns using UTF-16 code units
   550  			if c <= 0xFFFF {
   551  				column++
   552  			} else {
   553  				column += 2
   554  			}
   555  		}
   556  	}
   557  
   558  	// Mark the start of the next line
   559  	if column == 0 {
   560  		lineByteOffset = len(contents)
   561  	}
   562  
   563  	// Do one last update for the column at the end of the file
   564  	if ColumnsForNonASCII != nil {
   565  		for lineBytesSoFar := len(contents) - lineByteOffset; columnByteOffset <= lineBytesSoFar; columnByteOffset++ {
   566  			ColumnsForNonASCII = append(ColumnsForNonASCII, column)
   567  		}
   568  	}
   569  
   570  	lineOffsetTables = append(lineOffsetTables, LineOffsetTable{
   571  		byteOffsetToStartOfLine:   int32(lineByteOffset),
   572  		byteOffsetToFirstNonASCII: ByteOffsetToFirstNonASCII,
   573  		columnsForNonASCII:        ColumnsForNonASCII,
   574  	})
   575  	return lineOffsetTables
   576  }
   577  
   578  type MappingsBuffer struct {
   579  	Data            []byte
   580  	FirstNameOffset ast.Index32
   581  }
   582  
   583  type Chunk struct {
   584  	Buffer      MappingsBuffer
   585  	QuotedNames [][]byte
   586  
   587  	// This end state will be used to rewrite the start of the following source
   588  	// map chunk so that the delta-encoded VLQ numbers are preserved.
   589  	EndState SourceMapState
   590  
   591  	// There probably isn't a source mapping at the end of the file (nor should
   592  	// there be) but if we're appending another source map chunk after this one,
   593  	// we'll need to know how many characters were in the last line we generated.
   594  	FinalGeneratedColumn int
   595  
   596  	ShouldIgnore bool
   597  }
   598  
   599  type ChunkBuilder struct {
   600  	inputSourceMap      *SourceMap
   601  	sourceMap           []byte
   602  	quotedNames         [][]byte
   603  	namesMap            map[string]uint32
   604  	lineOffsetTables    []LineOffsetTable
   605  	prevOriginalName    string
   606  	prevState           SourceMapState
   607  	lastGeneratedUpdate int
   608  	generatedColumn     int
   609  	prevGeneratedLen    int
   610  	prevOriginalLoc     logger.Loc
   611  	firstNameOffset     ast.Index32
   612  	hasPrevState        bool
   613  	asciiOnly           bool
   614  
   615  	// This is a workaround for a bug in the popular "source-map" library:
   616  	// https://github.com/mozilla/source-map/issues/261. The library will
   617  	// sometimes return null when querying a source map unless every line
   618  	// starts with a mapping at column zero.
   619  	//
   620  	// The workaround is to replicate the previous mapping if a line ends
   621  	// up not starting with a mapping. This is done lazily because we want
   622  	// to avoid replicating the previous mapping if we don't need to.
   623  	lineStartsWithMapping     bool
   624  	coverLinesWithoutMappings bool
   625  }
   626  
   627  func MakeChunkBuilder(inputSourceMap *SourceMap, lineOffsetTables []LineOffsetTable, asciiOnly bool) ChunkBuilder {
   628  	return ChunkBuilder{
   629  		inputSourceMap:   inputSourceMap,
   630  		prevOriginalLoc:  logger.Loc{Start: -1},
   631  		lineOffsetTables: lineOffsetTables,
   632  		asciiOnly:        asciiOnly,
   633  		namesMap:         make(map[string]uint32),
   634  
   635  		// We automatically repeat the previous source mapping if we ever generate
   636  		// a line that doesn't start with a mapping. This helps give files more
   637  		// complete mapping coverage without gaps.
   638  		//
   639  		// However, we probably shouldn't do this if the input file has a nested
   640  		// source map that we will be remapping through. We have no idea what state
   641  		// that source map is in and it could be pretty scrambled.
   642  		//
   643  		// I've seen cases where blindly repeating the last mapping for subsequent
   644  		// lines gives very strange and unhelpful results with source maps from
   645  		// other tools.
   646  		coverLinesWithoutMappings: inputSourceMap == nil,
   647  	}
   648  }
   649  
   650  func (b *ChunkBuilder) AddSourceMapping(originalLoc logger.Loc, originalName string, output []byte) {
   651  	// Avoid generating duplicate mappings
   652  	if originalLoc == b.prevOriginalLoc && (b.prevGeneratedLen == len(output) || b.prevOriginalName == originalName) {
   653  		return
   654  	}
   655  
   656  	b.prevOriginalLoc = originalLoc
   657  	b.prevGeneratedLen = len(output)
   658  	b.prevOriginalName = originalName
   659  
   660  	// Binary search to find the line
   661  	lineOffsetTables := b.lineOffsetTables
   662  	count := len(lineOffsetTables)
   663  	originalLine := 0
   664  	for count > 0 {
   665  		step := count / 2
   666  		i := originalLine + step
   667  		if lineOffsetTables[i].byteOffsetToStartOfLine <= originalLoc.Start {
   668  			originalLine = i + 1
   669  			count = count - step - 1
   670  		} else {
   671  			count = step
   672  		}
   673  	}
   674  	originalLine--
   675  
   676  	// Use the line to compute the column
   677  	line := &lineOffsetTables[originalLine]
   678  	originalColumn := int(originalLoc.Start - line.byteOffsetToStartOfLine)
   679  	if line.columnsForNonASCII != nil && originalColumn >= int(line.byteOffsetToFirstNonASCII) {
   680  		originalColumn = int(line.columnsForNonASCII[originalColumn-int(line.byteOffsetToFirstNonASCII)])
   681  	}
   682  
   683  	b.updateGeneratedLineAndColumn(output)
   684  
   685  	// If this line doesn't start with a mapping and we're about to add a mapping
   686  	// that's not at the start, insert a mapping first so the line starts with one.
   687  	if b.coverLinesWithoutMappings && !b.lineStartsWithMapping && b.generatedColumn > 0 && b.hasPrevState {
   688  		b.appendMappingWithoutRemapping(SourceMapState{
   689  			GeneratedLine:   b.prevState.GeneratedLine,
   690  			GeneratedColumn: 0,
   691  			SourceIndex:     b.prevState.SourceIndex,
   692  			OriginalLine:    b.prevState.OriginalLine,
   693  			OriginalColumn:  b.prevState.OriginalColumn,
   694  		})
   695  	}
   696  
   697  	b.appendMapping(originalName, SourceMapState{
   698  		GeneratedLine:   b.prevState.GeneratedLine,
   699  		GeneratedColumn: b.generatedColumn,
   700  		OriginalLine:    originalLine,
   701  		OriginalColumn:  originalColumn,
   702  	})
   703  
   704  	// This line now has a mapping on it, so don't insert another one
   705  	b.lineStartsWithMapping = true
   706  }
   707  
   708  func (b *ChunkBuilder) GenerateChunk(output []byte) Chunk {
   709  	b.updateGeneratedLineAndColumn(output)
   710  	shouldIgnore := true
   711  	for _, c := range b.sourceMap {
   712  		if c != ';' {
   713  			shouldIgnore = false
   714  			break
   715  		}
   716  	}
   717  	return Chunk{
   718  		Buffer: MappingsBuffer{
   719  			Data:            b.sourceMap,
   720  			FirstNameOffset: b.firstNameOffset,
   721  		},
   722  		QuotedNames:          b.quotedNames,
   723  		EndState:             b.prevState,
   724  		FinalGeneratedColumn: b.generatedColumn,
   725  		ShouldIgnore:         shouldIgnore,
   726  	}
   727  }
   728  
   729  // Scan over the printed text since the last source mapping and update the
   730  // generated line and column numbers
   731  func (b *ChunkBuilder) updateGeneratedLineAndColumn(output []byte) {
   732  	for i, c := range string(output[b.lastGeneratedUpdate:]) {
   733  		switch c {
   734  		case '\r', '\n', '\u2028', '\u2029':
   735  			// Handle Windows-specific "\r\n" newlines
   736  			if c == '\r' {
   737  				newlineCheck := b.lastGeneratedUpdate + i + 1
   738  				if newlineCheck < len(output) && output[newlineCheck] == '\n' {
   739  					continue
   740  				}
   741  			}
   742  
   743  			// If we're about to move to the next line and the previous line didn't have
   744  			// any mappings, add a mapping at the start of the previous line.
   745  			if b.coverLinesWithoutMappings && !b.lineStartsWithMapping && b.hasPrevState {
   746  				b.appendMappingWithoutRemapping(SourceMapState{
   747  					GeneratedLine:   b.prevState.GeneratedLine,
   748  					GeneratedColumn: 0,
   749  					SourceIndex:     b.prevState.SourceIndex,
   750  					OriginalLine:    b.prevState.OriginalLine,
   751  					OriginalColumn:  b.prevState.OriginalColumn,
   752  				})
   753  			}
   754  
   755  			b.prevState.GeneratedLine++
   756  			b.prevState.GeneratedColumn = 0
   757  			b.generatedColumn = 0
   758  			b.sourceMap = append(b.sourceMap, ';')
   759  
   760  			// This new line doesn't have a mapping yet
   761  			b.lineStartsWithMapping = false
   762  
   763  		default:
   764  			// Mozilla's "source-map" library counts columns using UTF-16 code units
   765  			if c <= 0xFFFF {
   766  				b.generatedColumn++
   767  			} else {
   768  				b.generatedColumn += 2
   769  			}
   770  		}
   771  	}
   772  
   773  	b.lastGeneratedUpdate = len(output)
   774  }
   775  
   776  func (b *ChunkBuilder) appendMapping(originalName string, currentState SourceMapState) {
   777  	// If the input file had a source map, map all the way back to the original
   778  	if b.inputSourceMap != nil {
   779  		mapping := b.inputSourceMap.Find(
   780  			int32(currentState.OriginalLine),
   781  			int32(currentState.OriginalColumn))
   782  
   783  		// Some locations won't have a mapping
   784  		if mapping == nil {
   785  			return
   786  		}
   787  
   788  		currentState.SourceIndex = int(mapping.SourceIndex)
   789  		currentState.OriginalLine = int(mapping.OriginalLine)
   790  		currentState.OriginalColumn = int(mapping.OriginalColumn)
   791  
   792  		// Map all the way back to the original name if present. Otherwise, keep
   793  		// the original name from esbuild, which corresponds to the name in the
   794  		// intermediate source code. This is important for tools that only emit
   795  		// a name mapping when the name is different than the original name.
   796  		if mapping.OriginalName.IsValid() {
   797  			originalName = b.inputSourceMap.Names[mapping.OriginalName.GetIndex()]
   798  		}
   799  	}
   800  
   801  	// Optionally reference the original name
   802  	if originalName != "" {
   803  		i, ok := b.namesMap[originalName]
   804  		if !ok {
   805  			i = uint32(len(b.quotedNames))
   806  			b.quotedNames = append(b.quotedNames, helpers.QuoteForJSON(originalName, b.asciiOnly))
   807  			b.namesMap[originalName] = i
   808  		}
   809  		currentState.OriginalName = int(i)
   810  		currentState.HasOriginalName = true
   811  	}
   812  
   813  	b.appendMappingWithoutRemapping(currentState)
   814  }
   815  
   816  func (b *ChunkBuilder) appendMappingWithoutRemapping(currentState SourceMapState) {
   817  	var lastByte byte
   818  	if len(b.sourceMap) != 0 {
   819  		lastByte = b.sourceMap[len(b.sourceMap)-1]
   820  	}
   821  
   822  	var nameOffset ast.Index32
   823  	b.sourceMap, nameOffset = appendMappingToBuffer(b.sourceMap, lastByte, b.prevState, currentState)
   824  	prevOriginalName := b.prevState.OriginalName
   825  	b.prevState = currentState
   826  	if !currentState.HasOriginalName {
   827  		// Revert the original name change if it's invalid
   828  		b.prevState.OriginalName = prevOriginalName
   829  	} else if !b.firstNameOffset.IsValid() {
   830  		// Keep track of the first name offset so we can jump right to it later
   831  		b.firstNameOffset = nameOffset
   832  	}
   833  	b.hasPrevState = true
   834  }