github.com/nicocha30/gvisor-ligolo@v0.0.0-20230726075806-989fa2c0a413/pkg/buffer/buffer.go (about)

     1  // Copyright 2022 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Package buffer provides the implementation of a non-contiguous buffer that
    16  // is reference counted, pooled, and copy-on-write. It allows O(1) append,
    17  // and prepend operations.
    18  package buffer
    19  
    20  import (
    21  	"fmt"
    22  	"io"
    23  
    24  	"github.com/nicocha30/gvisor-ligolo/pkg/tcpip/checksum"
    25  )
    26  
    27  // Buffer is a non-linear buffer.
    28  //
    29  // +stateify savable
    30  type Buffer struct {
    31  	data viewList `state:".([]byte)"`
    32  	size int64
    33  }
    34  
    35  func (b *Buffer) removeView(v *View) {
    36  	b.data.Remove(v)
    37  	v.Release()
    38  }
    39  
    40  // MakeWithData creates a new Buffer initialized with given data. This function
    41  // should be used with caution to avoid unnecessary []byte allocations. When in
    42  // doubt use NewWithView to maximize chunk reuse.
    43  func MakeWithData(b []byte) Buffer {
    44  	buf := Buffer{}
    45  	if len(b) == 0 {
    46  		return buf
    47  	}
    48  	v := NewViewWithData(b)
    49  	buf.Append(v)
    50  	return buf
    51  }
    52  
    53  // MakeWithView creates a new Buffer initialized with given view. This function
    54  // takes ownership of v.
    55  func MakeWithView(v *View) Buffer {
    56  	if v == nil {
    57  		return Buffer{}
    58  	}
    59  	b := Buffer{
    60  		size: int64(v.Size()),
    61  	}
    62  	if b.size == 0 {
    63  		v.Release()
    64  		return b
    65  	}
    66  	b.data.PushBack(v)
    67  	return b
    68  }
    69  
    70  // Release frees all resources held by b.
    71  func (b *Buffer) Release() {
    72  	for v := b.data.Front(); v != nil; v = b.data.Front() {
    73  		b.removeView(v)
    74  	}
    75  	b.size = 0
    76  }
    77  
    78  // TrimFront removes the first count bytes from the buffer.
    79  func (b *Buffer) TrimFront(count int64) {
    80  	if count >= b.size {
    81  		b.advanceRead(b.size)
    82  	} else {
    83  		b.advanceRead(count)
    84  	}
    85  }
    86  
    87  // ReadAt implements io.ReaderAt.ReadAt.
    88  func (b *Buffer) ReadAt(p []byte, offset int64) (int, error) {
    89  	var (
    90  		skipped int64
    91  		done    int64
    92  	)
    93  	for v := b.data.Front(); v != nil && done < int64(len(p)); v = v.Next() {
    94  		needToSkip := int(offset - skipped)
    95  		if sz := v.Size(); sz <= needToSkip {
    96  			skipped += int64(sz)
    97  			continue
    98  		}
    99  
   100  		// Actually read data.
   101  		n := copy(p[done:], v.AsSlice()[needToSkip:])
   102  		skipped += int64(needToSkip)
   103  		done += int64(n)
   104  	}
   105  	if int(done) < len(p) || offset+done == b.size {
   106  		return int(done), io.EOF
   107  	}
   108  	return int(done), nil
   109  }
   110  
   111  // advanceRead advances the Buffer's read index.
   112  //
   113  // Precondition: there must be sufficient bytes in the buffer.
   114  func (b *Buffer) advanceRead(count int64) {
   115  	for v := b.data.Front(); v != nil && count > 0; {
   116  		sz := int64(v.Size())
   117  		if sz > count {
   118  			// There is still data for reading.
   119  			v.TrimFront(int(count))
   120  			b.size -= count
   121  			count = 0
   122  			return
   123  		}
   124  
   125  		// Consume the whole view.
   126  		oldView := v
   127  		v = v.Next() // Iterate.
   128  		b.removeView(oldView)
   129  
   130  		// Update counts.
   131  		count -= sz
   132  		b.size -= sz
   133  	}
   134  	if count > 0 {
   135  		panic(fmt.Sprintf("advanceRead still has %d bytes remaining", count))
   136  	}
   137  }
   138  
   139  // Truncate truncates the Buffer to the given length.
   140  //
   141  // This will not grow the Buffer, only shrink it. If a length is passed that is
   142  // greater than the current size of the Buffer, then nothing will happen.
   143  //
   144  // Precondition: length must be >= 0.
   145  func (b *Buffer) Truncate(length int64) {
   146  	if length < 0 {
   147  		panic("negative length provided")
   148  	}
   149  	if length >= b.size {
   150  		return // Nothing to do.
   151  	}
   152  	for v := b.data.Back(); v != nil && b.size > length; v = b.data.Back() {
   153  		sz := int64(v.Size())
   154  		if after := b.size - sz; after < length {
   155  			// Truncate the buffer locally.
   156  			left := (length - after)
   157  			v.write = v.read + int(left)
   158  			b.size = length
   159  			break
   160  		}
   161  
   162  		// Drop the buffer completely; see above.
   163  		b.removeView(v)
   164  		b.size -= sz
   165  	}
   166  }
   167  
   168  // GrowTo grows the given Buffer to the number of bytes, which will be appended.
   169  // If zero is true, all these bytes will be zero. If zero is false, then this is
   170  // the caller's responsibility.
   171  //
   172  // Precondition: length must be >= 0.
   173  func (b *Buffer) GrowTo(length int64, zero bool) {
   174  	if length < 0 {
   175  		panic("negative length provided")
   176  	}
   177  	for b.size < length {
   178  		v := b.data.Back()
   179  
   180  		// Is there some space in the last buffer?
   181  		if v.Full() {
   182  			v = NewView(int(length - b.size))
   183  			b.data.PushBack(v)
   184  		}
   185  
   186  		// Write up to length bytes.
   187  		sz := v.AvailableSize()
   188  		if int64(sz) > length-b.size {
   189  			sz = int(length - b.size)
   190  		}
   191  
   192  		// Zero the written section; note that this pattern is
   193  		// specifically recognized and optimized by the compiler.
   194  		if zero {
   195  			for i := v.write; i < v.write+sz; i++ {
   196  				v.chunk.data[i] = 0
   197  			}
   198  		}
   199  
   200  		// Advance the index.
   201  		v.Grow(sz)
   202  		b.size += int64(sz)
   203  	}
   204  }
   205  
   206  // Prepend prepends the given data. Prepend takes ownership of src.
   207  func (b *Buffer) Prepend(src *View) error {
   208  	if src == nil {
   209  		return nil
   210  	}
   211  	if src.Size() == 0 {
   212  		src.Release()
   213  		return nil
   214  	}
   215  	// If the first buffer does not have room just prepend the view.
   216  	v := b.data.Front()
   217  	if v == nil || v.read == 0 {
   218  		b.prependOwned(src)
   219  		return nil
   220  	}
   221  
   222  	// If there's room at the front and we won't incur a copy by writing to this
   223  	// view, fill in the extra room first.
   224  	if !v.sharesChunk() {
   225  		avail := v.read
   226  		vStart := 0
   227  		srcStart := src.Size() - avail
   228  		if avail > src.Size() {
   229  			vStart = avail - src.Size()
   230  			srcStart = 0
   231  		}
   232  		// Save the write index and restore it after.
   233  		old := v.write
   234  		v.read = vStart
   235  		n, err := v.WriteAt(src.AsSlice()[srcStart:], 0)
   236  		if err != nil {
   237  			return fmt.Errorf("could not write to view during append: %w", err)
   238  		}
   239  		b.size += int64(n)
   240  		v.write = old
   241  		src.write = srcStart
   242  
   243  		// If there's no more to be written, then we're done.
   244  		if src.Size() == 0 {
   245  			src.Release()
   246  			return nil
   247  		}
   248  	}
   249  
   250  	// Otherwise, just prepend the view.
   251  	b.prependOwned(src)
   252  	return nil
   253  }
   254  
   255  // Append appends the given data. Append takes ownership of src.
   256  func (b *Buffer) Append(src *View) error {
   257  	if src == nil {
   258  		return nil
   259  	}
   260  	if src.Size() == 0 {
   261  		src.Release()
   262  		return nil
   263  	}
   264  	// If the last buffer is full, just append the view.
   265  	v := b.data.Back()
   266  	if v.Full() {
   267  		b.appendOwned(src)
   268  		return nil
   269  	}
   270  
   271  	// If a write won't incur a copy, then fill the back of the existing last
   272  	// chunk.
   273  	if !v.sharesChunk() {
   274  		writeSz := src.Size()
   275  		if src.Size() > v.AvailableSize() {
   276  			writeSz = v.AvailableSize()
   277  		}
   278  		done, err := v.Write(src.AsSlice()[:writeSz])
   279  		if err != nil {
   280  			return fmt.Errorf("could not write to view during append: %w", err)
   281  		}
   282  		src.TrimFront(done)
   283  		b.size += int64(done)
   284  		if src.Size() == 0 {
   285  			src.Release()
   286  			return nil
   287  		}
   288  	}
   289  
   290  	// If there is still data left just append the src.
   291  	b.appendOwned(src)
   292  	return nil
   293  }
   294  
   295  func (b *Buffer) appendOwned(v *View) {
   296  	b.data.PushBack(v)
   297  	b.size += int64(v.Size())
   298  }
   299  
   300  func (b *Buffer) prependOwned(v *View) {
   301  	b.data.PushFront(v)
   302  	b.size += int64(v.Size())
   303  }
   304  
   305  // PullUp makes the specified range contiguous and returns the backing memory.
   306  func (b *Buffer) PullUp(offset, length int) (View, bool) {
   307  	if length == 0 {
   308  		return View{}, true
   309  	}
   310  	tgt := Range{begin: offset, end: offset + length}
   311  	if tgt.Intersect(Range{end: int(b.size)}).Len() != length {
   312  		return View{}, false
   313  	}
   314  
   315  	curr := Range{}
   316  	v := b.data.Front()
   317  	for ; v != nil; v = v.Next() {
   318  		origLen := v.Size()
   319  		curr.end = curr.begin + origLen
   320  
   321  		if x := curr.Intersect(tgt); x.Len() == tgt.Len() {
   322  			// buf covers the whole requested target range.
   323  			sub := x.Offset(-curr.begin)
   324  			// Don't increment the reference count of the underlying chunk. Views
   325  			// returned by PullUp are explicitly unowned and read only
   326  			new := View{
   327  				read:  v.read + sub.begin,
   328  				write: v.read + sub.end,
   329  				chunk: v.chunk,
   330  			}
   331  			return new, true
   332  		} else if x.Len() > 0 {
   333  			// buf is pointing at the starting buffer we want to merge.
   334  			break
   335  		}
   336  
   337  		curr.begin += origLen
   338  	}
   339  
   340  	// Calculate the total merged length.
   341  	totLen := 0
   342  	for n := v; n != nil; n = n.Next() {
   343  		totLen += n.Size()
   344  		if curr.begin+totLen >= tgt.end {
   345  			break
   346  		}
   347  	}
   348  
   349  	// Merge the buffers.
   350  	merged := NewViewSize(totLen)
   351  	off := 0
   352  	for n := v; n != nil && off < totLen; {
   353  		merged.WriteAt(n.AsSlice(), off)
   354  		off += n.Size()
   355  
   356  		// Remove buffers except for the first one, which will be reused.
   357  		if n == v {
   358  			n = n.Next()
   359  		} else {
   360  			old := n
   361  			n = n.Next()
   362  			b.removeView(old)
   363  		}
   364  	}
   365  	// Make data the first buffer.
   366  	b.data.InsertBefore(v, merged)
   367  	b.removeView(v)
   368  
   369  	r := tgt.Offset(-curr.begin)
   370  	pulled := View{
   371  		read:  r.begin,
   372  		write: r.end,
   373  		chunk: merged.chunk,
   374  	}
   375  	return pulled, true
   376  }
   377  
   378  // Flatten returns a flattened copy of this data.
   379  //
   380  // This method should not be used in any performance-sensitive paths. It may
   381  // allocate a fresh byte slice sufficiently large to contain all the data in
   382  // the buffer. This is principally for debugging.
   383  //
   384  // N.B. Tee data still belongs to this Buffer, as if there is a single buffer
   385  // present, then it will be returned directly. This should be used for
   386  // temporary use only, and a reference to the given slice should not be held.
   387  func (b *Buffer) Flatten() []byte {
   388  	if v := b.data.Front(); v == nil {
   389  		return nil // No data at all.
   390  	}
   391  	data := make([]byte, 0, b.size) // Need to flatten.
   392  	for v := b.data.Front(); v != nil; v = v.Next() {
   393  		// Copy to the allocated slice.
   394  		data = append(data, v.AsSlice()...)
   395  	}
   396  	return data
   397  }
   398  
   399  // Size indicates the total amount of data available in this Buffer.
   400  func (b *Buffer) Size() int64 {
   401  	return b.size
   402  }
   403  
   404  // Clone creates a copy-on-write clone of b. The underlying chunks are shared
   405  // until they are written to.
   406  func (b *Buffer) Clone() Buffer {
   407  	other := Buffer{
   408  		size: b.size,
   409  	}
   410  	for v := b.data.Front(); v != nil; v = v.Next() {
   411  		newView := v.Clone()
   412  		other.data.PushBack(newView)
   413  	}
   414  	return other
   415  }
   416  
   417  // DeepClone creates a deep clone of b, copying data such that no bytes are
   418  // shared with any other Buffers.
   419  func (b *Buffer) DeepClone() Buffer {
   420  	newBuf := Buffer{}
   421  	buf := b.Clone()
   422  	reader := buf.AsBufferReader()
   423  	newBuf.WriteFromReader(&reader, b.size)
   424  	return newBuf
   425  }
   426  
   427  // Apply applies the given function across all valid data.
   428  func (b *Buffer) Apply(fn func(*View)) {
   429  	for v := b.data.Front(); v != nil; v = v.Next() {
   430  		d := v.Clone()
   431  		fn(d)
   432  		d.Release()
   433  	}
   434  }
   435  
   436  // SubApply applies fn to a given range of data in b. Any part of the range
   437  // outside of b is ignored.
   438  func (b *Buffer) SubApply(offset, length int, fn func(*View)) {
   439  	for v := b.data.Front(); length > 0 && v != nil; v = v.Next() {
   440  		if offset >= v.Size() {
   441  			offset -= v.Size()
   442  			continue
   443  		}
   444  		d := v.Clone()
   445  		if offset > 0 {
   446  			d.TrimFront(offset)
   447  			offset = 0
   448  		}
   449  		if length < d.Size() {
   450  			d.write = d.read + length
   451  		}
   452  		fn(d)
   453  		length -= d.Size()
   454  		d.Release()
   455  	}
   456  }
   457  
   458  // Checksum calculates a checksum over the buffer's payload starting at offset.
   459  func (b *Buffer) Checksum(offset int) uint16 {
   460  	if offset >= int(b.size) {
   461  		return 0
   462  	}
   463  	var v *View
   464  	for v = b.data.Front(); v != nil && offset >= v.Size(); v = v.Next() {
   465  		offset -= v.Size()
   466  	}
   467  
   468  	var cs checksum.Checksumer
   469  	cs.Add(v.AsSlice()[offset:])
   470  	for v = v.Next(); v != nil; v = v.Next() {
   471  		cs.Add(v.AsSlice())
   472  	}
   473  	return cs.Checksum()
   474  }
   475  
   476  // Merge merges the provided Buffer with this one.
   477  //
   478  // The other Buffer will be appended to v, and other will be empty after this
   479  // operation completes.
   480  func (b *Buffer) Merge(other *Buffer) {
   481  	b.data.PushBackList(&other.data)
   482  	other.data = viewList{}
   483  
   484  	// Adjust sizes.
   485  	b.size += other.size
   486  	other.size = 0
   487  }
   488  
   489  // WriteFromReader writes to the buffer from an io.Reader. A maximum read size
   490  // of MaxChunkSize is enforced to prevent allocating views from the heap.
   491  func (b *Buffer) WriteFromReader(r io.Reader, count int64) (int64, error) {
   492  	var done int64
   493  	for done < count {
   494  		vsize := count - done
   495  		if vsize > MaxChunkSize {
   496  			vsize = MaxChunkSize
   497  		}
   498  		v := NewView(int(vsize))
   499  		lr := io.LimitedReader{R: r, N: vsize}
   500  		n, err := io.Copy(v, &lr)
   501  		b.Append(v)
   502  		done += n
   503  		if err == io.EOF {
   504  			break
   505  		}
   506  		if err != nil {
   507  			return done, err
   508  		}
   509  	}
   510  	return done, nil
   511  }
   512  
   513  // ReadToWriter reads from the buffer into an io.Writer.
   514  //
   515  // N.B. This does not consume the bytes read. TrimFront should
   516  // be called appropriately after this call in order to do so.
   517  func (b *Buffer) ReadToWriter(w io.Writer, count int64) (int64, error) {
   518  	bytesLeft := int(count)
   519  	for v := b.data.Front(); v != nil && bytesLeft > 0; v = v.Next() {
   520  		view := v.Clone()
   521  		if view.Size() > bytesLeft {
   522  			view.CapLength(bytesLeft)
   523  		}
   524  		n, err := io.Copy(w, view)
   525  		bytesLeft -= int(n)
   526  		view.Release()
   527  		if err != nil {
   528  			return count - int64(bytesLeft), err
   529  		}
   530  	}
   531  	return count - int64(bytesLeft), nil
   532  }
   533  
   534  // read implements the io.Reader interface. This method is used by BufferReader
   535  // to consume its underlying buffer. To perform io operations on buffers
   536  // directly, use ReadToWriter or WriteToReader.
   537  func (b *Buffer) read(p []byte) (int, error) {
   538  	if len(p) == 0 {
   539  		return 0, nil
   540  	}
   541  	if b.Size() == 0 {
   542  		return 0, io.EOF
   543  	}
   544  	done := 0
   545  	v := b.data.Front()
   546  	for v != nil && done < len(p) {
   547  		n, err := v.Read(p[done:])
   548  		done += n
   549  		next := v.Next()
   550  		if v.Size() == 0 {
   551  			b.removeView(v)
   552  		}
   553  		b.size -= int64(n)
   554  		if err != nil && err != io.EOF {
   555  			return done, err
   556  		}
   557  		v = next
   558  	}
   559  	return done, nil
   560  }
   561  
   562  // readByte implements the io.ByteReader interface. This method is used by
   563  // BufferReader to consume its underlying buffer. To perform io operations on
   564  // buffers directly, use ReadToWriter or WriteToReader.
   565  func (b *Buffer) readByte() (byte, error) {
   566  	if b.Size() == 0 {
   567  		return 0, io.EOF
   568  	}
   569  	v := b.data.Front()
   570  	bt := v.AsSlice()[0]
   571  	b.TrimFront(1)
   572  	return bt, nil
   573  }
   574  
   575  // AsBufferReader returns the Buffer as a BufferReader capabable of io methods.
   576  // The new BufferReader takes ownership of b.
   577  func (b *Buffer) AsBufferReader() BufferReader {
   578  	return BufferReader{b}
   579  }
   580  
   581  // BufferReader implements io methods on Buffer. Users must call Close()
   582  // when finished with the buffer to free the underlying memory.
   583  type BufferReader struct {
   584  	b *Buffer
   585  }
   586  
   587  // Read implements the io.Reader interface.
   588  func (br *BufferReader) Read(p []byte) (int, error) {
   589  	return br.b.read(p)
   590  }
   591  
   592  // ReadByte implements the io.ByteReader interface.
   593  func (br *BufferReader) ReadByte() (byte, error) {
   594  	return br.b.readByte()
   595  }
   596  
   597  // Close implements the io.Closer interface.
   598  func (br *BufferReader) Close() {
   599  	br.b.Release()
   600  }
   601  
   602  // Len returns the number of bytes in the unread portion of the buffer.
   603  func (br *BufferReader) Len() int {
   604  	return int(br.b.Size())
   605  }
   606  
   607  // Range specifies a range of buffer.
   608  type Range struct {
   609  	begin int
   610  	end   int
   611  }
   612  
   613  // Intersect returns the intersection of x and y.
   614  func (x Range) Intersect(y Range) Range {
   615  	if x.begin < y.begin {
   616  		x.begin = y.begin
   617  	}
   618  	if x.end > y.end {
   619  		x.end = y.end
   620  	}
   621  	if x.begin >= x.end {
   622  		return Range{}
   623  	}
   624  	return x
   625  }
   626  
   627  // Offset returns x offset by off.
   628  func (x Range) Offset(off int) Range {
   629  	x.begin += off
   630  	x.end += off
   631  	return x
   632  }
   633  
   634  // Len returns the length of x.
   635  func (x Range) Len() int {
   636  	l := x.end - x.begin
   637  	if l < 0 {
   638  		l = 0
   639  	}
   640  	return l
   641  }