inet.af/netstack@v0.0.0-20220214151720-7585b01ddccf/buffer/view.go (about)

     1  // Copyright 2020 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package buffer
    16  
    17  import (
    18  	"fmt"
    19  	"io"
    20  )
    21  
    22  // Buffer is an alias to View.
    23  type Buffer = View
    24  
    25  // View is a non-linear buffer.
    26  //
    27  // All methods are thread compatible.
    28  //
    29  // +stateify savable
    30  type View struct {
    31  	data bufferList
    32  	size int64
    33  	pool pool
    34  }
    35  
    36  // TrimFront removes the first count bytes from the buffer.
    37  func (v *View) TrimFront(count int64) {
    38  	if count >= v.size {
    39  		v.advanceRead(v.size)
    40  	} else {
    41  		v.advanceRead(count)
    42  	}
    43  }
    44  
    45  // Remove deletes data at specified location in v. It returns false if specified
    46  // range does not fully reside in v.
    47  func (v *View) Remove(offset, length int) bool {
    48  	if offset < 0 || length < 0 {
    49  		return false
    50  	}
    51  	tgt := Range{begin: offset, end: offset + length}
    52  	if tgt.Len() != tgt.Intersect(Range{end: int(v.size)}).Len() {
    53  		return false
    54  	}
    55  
    56  	// Scan through each buffer and remove intersections.
    57  	var curr Range
    58  	for buf := v.data.Front(); buf != nil; {
    59  		origLen := buf.ReadSize()
    60  		curr.end = curr.begin + origLen
    61  
    62  		if x := curr.Intersect(tgt); x.Len() > 0 {
    63  			if !buf.Remove(x.Offset(-curr.begin)) {
    64  				panic("buf.Remove() failed")
    65  			}
    66  			if buf.ReadSize() == 0 {
    67  				// buf fully removed, removing it from the list.
    68  				oldBuf := buf
    69  				buf = buf.Next()
    70  				v.data.Remove(oldBuf)
    71  				v.pool.put(oldBuf)
    72  			} else {
    73  				// Only partial data intersects, moving on to next one.
    74  				buf = buf.Next()
    75  			}
    76  			v.size -= int64(x.Len())
    77  		} else {
    78  			// This buffer is not in range, moving on to next one.
    79  			buf = buf.Next()
    80  		}
    81  
    82  		curr.begin += origLen
    83  		if curr.begin >= tgt.end {
    84  			break
    85  		}
    86  	}
    87  	return true
    88  }
    89  
    90  // ReadAt implements io.ReaderAt.ReadAt.
    91  func (v *View) ReadAt(p []byte, offset int64) (int, error) {
    92  	var (
    93  		skipped int64
    94  		done    int64
    95  	)
    96  	for buf := v.data.Front(); buf != nil && done < int64(len(p)); buf = buf.Next() {
    97  		needToSkip := int(offset - skipped)
    98  		if sz := buf.ReadSize(); sz <= needToSkip {
    99  			skipped += int64(sz)
   100  			continue
   101  		}
   102  
   103  		// Actually read data.
   104  		n := copy(p[done:], buf.ReadSlice()[needToSkip:])
   105  		skipped += int64(needToSkip)
   106  		done += int64(n)
   107  	}
   108  	if int(done) < len(p) || offset+done == v.size {
   109  		return int(done), io.EOF
   110  	}
   111  	return int(done), nil
   112  }
   113  
   114  // advanceRead advances the view's read index.
   115  //
   116  // Precondition: there must be sufficient bytes in the buffer.
   117  func (v *View) advanceRead(count int64) {
   118  	for buf := v.data.Front(); buf != nil && count > 0; {
   119  		sz := int64(buf.ReadSize())
   120  		if sz > count {
   121  			// There is still data for reading.
   122  			buf.ReadMove(int(count))
   123  			v.size -= count
   124  			count = 0
   125  			break
   126  		}
   127  
   128  		// Consume the whole buffer.
   129  		oldBuf := buf
   130  		buf = buf.Next() // Iterate.
   131  		v.data.Remove(oldBuf)
   132  		v.pool.put(oldBuf)
   133  
   134  		// Update counts.
   135  		count -= sz
   136  		v.size -= sz
   137  	}
   138  	if count > 0 {
   139  		panic(fmt.Sprintf("advanceRead still has %d bytes remaining", count))
   140  	}
   141  }
   142  
   143  // Truncate truncates the view to the given bytes.
   144  //
   145  // This will not grow the view, only shrink it. If a length is passed that is
   146  // greater than the current size of the view, then nothing will happen.
   147  //
   148  // Precondition: length must be >= 0.
   149  func (v *View) Truncate(length int64) {
   150  	if length < 0 {
   151  		panic("negative length provided")
   152  	}
   153  	if length >= v.size {
   154  		return // Nothing to do.
   155  	}
   156  	for buf := v.data.Back(); buf != nil && v.size > length; buf = v.data.Back() {
   157  		sz := int64(buf.ReadSize())
   158  		if after := v.size - sz; after < length {
   159  			// Truncate the buffer locally.
   160  			left := (length - after)
   161  			buf.write = buf.read + int(left)
   162  			v.size = length
   163  			break
   164  		}
   165  
   166  		// Drop the buffer completely; see above.
   167  		v.data.Remove(buf)
   168  		v.pool.put(buf)
   169  		v.size -= sz
   170  	}
   171  }
   172  
   173  // Grow grows the given view to the number of bytes, which will be appended. If
   174  // zero is true, all these bytes will be zero. If zero is false, then this is
   175  // the caller's responsibility.
   176  //
   177  // Precondition: length must be >= 0.
   178  func (v *View) Grow(length int64, zero bool) {
   179  	if length < 0 {
   180  		panic("negative length provided")
   181  	}
   182  	for v.size < length {
   183  		buf := v.data.Back()
   184  
   185  		// Is there some space in the last buffer?
   186  		if buf == nil || buf.Full() {
   187  			buf = v.pool.get()
   188  			v.data.PushBack(buf)
   189  		}
   190  
   191  		// Write up to length bytes.
   192  		sz := buf.WriteSize()
   193  		if int64(sz) > length-v.size {
   194  			sz = int(length - v.size)
   195  		}
   196  
   197  		// Zero the written section; note that this pattern is
   198  		// specifically recognized and optimized by the compiler.
   199  		if zero {
   200  			for i := buf.write; i < buf.write+sz; i++ {
   201  				buf.data[i] = 0
   202  			}
   203  		}
   204  
   205  		// Advance the index.
   206  		buf.WriteMove(sz)
   207  		v.size += int64(sz)
   208  	}
   209  }
   210  
   211  // Prepend prepends the given data.
   212  func (v *View) Prepend(data []byte) {
   213  	// Is there any space in the first buffer?
   214  	if buf := v.data.Front(); buf != nil && buf.read > 0 {
   215  		// Fill up before the first write.
   216  		avail := buf.read
   217  		bStart := 0
   218  		dStart := len(data) - avail
   219  		if avail > len(data) {
   220  			bStart = avail - len(data)
   221  			dStart = 0
   222  		}
   223  		n := copy(buf.data[bStart:], data[dStart:])
   224  		data = data[:dStart]
   225  		v.size += int64(n)
   226  		buf.read -= n
   227  	}
   228  
   229  	for len(data) > 0 {
   230  		// Do we need an empty buffer?
   231  		buf := v.pool.get()
   232  		v.data.PushFront(buf)
   233  
   234  		// The buffer is empty; copy last chunk.
   235  		avail := len(buf.data)
   236  		bStart := 0
   237  		dStart := len(data) - avail
   238  		if avail > len(data) {
   239  			bStart = avail - len(data)
   240  			dStart = 0
   241  		}
   242  
   243  		// We have to put the data at the end of the current
   244  		// buffer in order to ensure that the next prepend will
   245  		// correctly fill up the beginning of this buffer.
   246  		n := copy(buf.data[bStart:], data[dStart:])
   247  		data = data[:dStart]
   248  		v.size += int64(n)
   249  		buf.read = len(buf.data) - n
   250  		buf.write = len(buf.data)
   251  	}
   252  }
   253  
   254  // Append appends the given data.
   255  func (v *View) Append(data []byte) {
   256  	for done := 0; done < len(data); {
   257  		buf := v.data.Back()
   258  
   259  		// Ensure there's a buffer with space.
   260  		if buf == nil || buf.Full() {
   261  			buf = v.pool.get()
   262  			v.data.PushBack(buf)
   263  		}
   264  
   265  		// Copy in to the given buffer.
   266  		n := copy(buf.WriteSlice(), data[done:])
   267  		done += n
   268  		buf.WriteMove(n)
   269  		v.size += int64(n)
   270  	}
   271  }
   272  
   273  // AppendOwned takes ownership of data and appends it to v.
   274  func (v *View) AppendOwned(data []byte) {
   275  	if len(data) > 0 {
   276  		buf := v.pool.getNoInit()
   277  		buf.initWithData(data)
   278  		v.data.PushBack(buf)
   279  		v.size += int64(len(data))
   280  	}
   281  }
   282  
   283  // PullUp makes the specified range contiguous and returns the backing memory.
   284  func (v *View) PullUp(offset, length int) ([]byte, bool) {
   285  	if length == 0 {
   286  		return nil, true
   287  	}
   288  	tgt := Range{begin: offset, end: offset + length}
   289  	if tgt.Intersect(Range{end: int(v.size)}).Len() != length {
   290  		return nil, false
   291  	}
   292  
   293  	curr := Range{}
   294  	buf := v.data.Front()
   295  	for ; buf != nil; buf = buf.Next() {
   296  		origLen := buf.ReadSize()
   297  		curr.end = curr.begin + origLen
   298  
   299  		if x := curr.Intersect(tgt); x.Len() == tgt.Len() {
   300  			// buf covers the whole requested target range.
   301  			sub := x.Offset(-curr.begin)
   302  			return buf.ReadSlice()[sub.begin:sub.end], true
   303  		} else if x.Len() > 0 {
   304  			// buf is pointing at the starting buffer we want to merge.
   305  			break
   306  		}
   307  
   308  		curr.begin += origLen
   309  	}
   310  
   311  	// Calculate the total merged length.
   312  	totLen := 0
   313  	for n := buf; n != nil; n = n.Next() {
   314  		totLen += n.ReadSize()
   315  		if curr.begin+totLen >= tgt.end {
   316  			break
   317  		}
   318  	}
   319  
   320  	// Merge the buffers.
   321  	data := make([]byte, totLen)
   322  	off := 0
   323  	for n := buf; n != nil && off < totLen; {
   324  		copy(data[off:], n.ReadSlice())
   325  		off += n.ReadSize()
   326  
   327  		// Remove buffers except for the first one, which will be reused.
   328  		if n == buf {
   329  			n = n.Next()
   330  		} else {
   331  			old := n
   332  			n = n.Next()
   333  			v.data.Remove(old)
   334  			v.pool.put(old)
   335  		}
   336  	}
   337  
   338  	// Update the first buffer with merged data.
   339  	buf.initWithData(data)
   340  
   341  	r := tgt.Offset(-curr.begin)
   342  	return buf.data[r.begin:r.end], true
   343  }
   344  
   345  // Flatten returns a flattened copy of this data.
   346  //
   347  // This method should not be used in any performance-sensitive paths. It may
   348  // allocate a fresh byte slice sufficiently large to contain all the data in
   349  // the buffer. This is principally for debugging.
   350  //
   351  // N.B. Tee data still belongs to this view, as if there is a single buffer
   352  // present, then it will be returned directly. This should be used for
   353  // temporary use only, and a reference to the given slice should not be held.
   354  func (v *View) Flatten() []byte {
   355  	if buf := v.data.Front(); buf == nil {
   356  		return nil // No data at all.
   357  	} else if buf.Next() == nil {
   358  		return buf.ReadSlice() // Only one buffer.
   359  	}
   360  	data := make([]byte, 0, v.size) // Need to flatten.
   361  	for buf := v.data.Front(); buf != nil; buf = buf.Next() {
   362  		// Copy to the allocated slice.
   363  		data = append(data, buf.ReadSlice()...)
   364  	}
   365  	return data
   366  }
   367  
   368  // Size indicates the total amount of data available in this view.
   369  func (v *View) Size() int64 {
   370  	return v.size
   371  }
   372  
   373  // Copy makes a strict copy of this view.
   374  func (v *View) Copy() (other View) {
   375  	for buf := v.data.Front(); buf != nil; buf = buf.Next() {
   376  		other.Append(buf.ReadSlice())
   377  	}
   378  	return
   379  }
   380  
   381  // Clone makes a more shallow copy compared to Copy. The underlying payload
   382  // slice (buffer.data) is shared but the buffers themselves are copied.
   383  func (v *View) Clone() *View {
   384  	other := &View{
   385  		size: v.size,
   386  	}
   387  	for buf := v.data.Front(); buf != nil; buf = buf.Next() {
   388  		newBuf := other.pool.getNoInit()
   389  		*newBuf = *buf
   390  		other.data.PushBack(newBuf)
   391  	}
   392  	return other
   393  }
   394  
   395  // Apply applies the given function across all valid data.
   396  func (v *View) Apply(fn func([]byte)) {
   397  	for buf := v.data.Front(); buf != nil; buf = buf.Next() {
   398  		fn(buf.ReadSlice())
   399  	}
   400  }
   401  
   402  // SubApply applies fn to a given range of data in v. Any part of the range
   403  // outside of v is ignored.
   404  func (v *View) SubApply(offset, length int, fn func([]byte)) {
   405  	for buf := v.data.Front(); length > 0 && buf != nil; buf = buf.Next() {
   406  		d := buf.ReadSlice()
   407  		if offset >= len(d) {
   408  			offset -= len(d)
   409  			continue
   410  		}
   411  		if offset > 0 {
   412  			d = d[offset:]
   413  			offset = 0
   414  		}
   415  		if length < len(d) {
   416  			d = d[:length]
   417  		}
   418  		fn(d)
   419  		length -= len(d)
   420  	}
   421  }
   422  
   423  // Merge merges the provided View with this one.
   424  //
   425  // The other view will be appended to v, and other will be empty after this
   426  // operation completes.
   427  func (v *View) Merge(other *View) {
   428  	// Copy over all buffers.
   429  	for buf := other.data.Front(); buf != nil; buf = other.data.Front() {
   430  		other.data.Remove(buf)
   431  		v.data.PushBack(buf)
   432  	}
   433  
   434  	// Adjust sizes.
   435  	v.size += other.size
   436  	other.size = 0
   437  }
   438  
   439  // WriteFromReader writes to the buffer from an io.Reader.
   440  //
   441  // A minimum read size equal to unsafe.Sizeof(unintptr) is enforced,
   442  // provided that count is greater than or equal to unsafe.Sizeof(uintptr).
   443  func (v *View) WriteFromReader(r io.Reader, count int64) (int64, error) {
   444  	var (
   445  		done int64
   446  		n    int
   447  		err  error
   448  	)
   449  	for done < count {
   450  		buf := v.data.Back()
   451  
   452  		// Ensure we have an empty buffer.
   453  		if buf == nil || buf.Full() {
   454  			buf = v.pool.get()
   455  			v.data.PushBack(buf)
   456  		}
   457  
   458  		// Is this less than the minimum batch?
   459  		if buf.WriteSize() < minBatch && (count-done) >= int64(minBatch) {
   460  			tmp := make([]byte, minBatch)
   461  			n, err = r.Read(tmp)
   462  			v.Append(tmp[:n])
   463  			done += int64(n)
   464  			if err != nil {
   465  				break
   466  			}
   467  			continue
   468  		}
   469  
   470  		// Limit the read, if necessary.
   471  		sz := buf.WriteSize()
   472  		if left := count - done; int64(sz) > left {
   473  			sz = int(left)
   474  		}
   475  
   476  		// Pass the relevant portion of the buffer.
   477  		n, err = r.Read(buf.WriteSlice()[:sz])
   478  		buf.WriteMove(n)
   479  		done += int64(n)
   480  		v.size += int64(n)
   481  		if err == io.EOF {
   482  			err = nil // Short write allowed.
   483  			break
   484  		} else if err != nil {
   485  			break
   486  		}
   487  	}
   488  	return done, err
   489  }
   490  
   491  // ReadToWriter reads from the buffer into an io.Writer.
   492  //
   493  // N.B. This does not consume the bytes read. TrimFront should
   494  // be called appropriately after this call in order to do so.
   495  //
   496  // A minimum write size equal to unsafe.Sizeof(unintptr) is enforced,
   497  // provided that count is greater than or equal to unsafe.Sizeof(uintptr).
   498  func (v *View) ReadToWriter(w io.Writer, count int64) (int64, error) {
   499  	var (
   500  		done int64
   501  		n    int
   502  		err  error
   503  	)
   504  	offset := 0 // Spill-over for batching.
   505  	for buf := v.data.Front(); buf != nil && done < count; buf = buf.Next() {
   506  		// Has this been consumed? Skip it.
   507  		sz := buf.ReadSize()
   508  		if sz <= offset {
   509  			offset -= sz
   510  			continue
   511  		}
   512  		sz -= offset
   513  
   514  		// Is this less than the minimum batch?
   515  		left := count - done
   516  		if sz < minBatch && left >= int64(minBatch) && (v.size-done) >= int64(minBatch) {
   517  			tmp := make([]byte, minBatch)
   518  			n, err = v.ReadAt(tmp, done)
   519  			w.Write(tmp[:n])
   520  			done += int64(n)
   521  			offset = n - sz // Reset below.
   522  			if err != nil {
   523  				break
   524  			}
   525  			continue
   526  		}
   527  
   528  		// Limit the write if necessary.
   529  		if int64(sz) >= left {
   530  			sz = int(left)
   531  		}
   532  
   533  		// Perform the actual write.
   534  		n, err = w.Write(buf.ReadSlice()[offset : offset+sz])
   535  		done += int64(n)
   536  		if err != nil {
   537  			break
   538  		}
   539  
   540  		// Reset spill-over.
   541  		offset = 0
   542  	}
   543  	return done, err
   544  }
   545  
   546  // A Range specifies a range of buffer.
   547  type Range struct {
   548  	begin int
   549  	end   int
   550  }
   551  
   552  // Intersect returns the intersection of x and y.
   553  func (x Range) Intersect(y Range) Range {
   554  	if x.begin < y.begin {
   555  		x.begin = y.begin
   556  	}
   557  	if x.end > y.end {
   558  		x.end = y.end
   559  	}
   560  	if x.begin >= x.end {
   561  		return Range{}
   562  	}
   563  	return x
   564  }
   565  
   566  // Offset returns x offset by off.
   567  func (x Range) Offset(off int) Range {
   568  	x.begin += off
   569  	x.end += off
   570  	return x
   571  }
   572  
   573  // Len returns the length of x.
   574  func (x Range) Len() int {
   575  	l := x.end - x.begin
   576  	if l < 0 {
   577  		l = 0
   578  	}
   579  	return l
   580  }