github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/pkg/buffer/view.go (about)

     1  // Copyright 2020 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package buffer
    16  
    17  import (
    18  	"fmt"
    19  	"io"
    20  )
    21  
    22  // Buffer is an alias to View.
    23  type Buffer = View
    24  
    25  // View is a non-linear buffer.
    26  //
    27  // All methods are thread compatible.
    28  //
    29  // +stateify savable
    30  type View struct {
    31  	data bufferList
    32  	size int64
    33  	pool pool
    34  }
    35  
    36  // TrimFront removes the first count bytes from the buffer.
    37  func (v *View) TrimFront(count int64) {
    38  	if count >= v.size {
    39  		v.advanceRead(v.size)
    40  	} else {
    41  		v.advanceRead(count)
    42  	}
    43  }
    44  
    45  // Remove deletes data at specified location in v. It returns false if specified
    46  // range does not fully reside in v.
    47  func (v *View) Remove(offset, length int) bool {
    48  	if offset < 0 || length < 0 {
    49  		return false
    50  	}
    51  	tgt := Range{begin: offset, end: offset + length}
    52  	if tgt.Len() != tgt.Intersect(Range{end: int(v.size)}).Len() {
    53  		return false
    54  	}
    55  
    56  	// Scan through each buffer and remove intersections.
    57  	var curr Range
    58  	for buf := v.data.Front(); buf != nil; {
    59  		origLen := buf.ReadSize()
    60  		curr.end = curr.begin + origLen
    61  
    62  		if x := curr.Intersect(tgt); x.Len() > 0 {
    63  			if !buf.Remove(x.Offset(-curr.begin)) {
    64  				panic("buf.Remove() failed")
    65  			}
    66  			if buf.ReadSize() == 0 {
    67  				// buf fully removed, removing it from the list.
    68  				oldBuf := buf
    69  				buf = buf.Next()
    70  				v.data.Remove(oldBuf)
    71  				v.pool.put(oldBuf)
    72  			} else {
    73  				// Only partial data intersects, moving on to next one.
    74  				buf = buf.Next()
    75  			}
    76  			v.size -= int64(x.Len())
    77  		} else {
    78  			// This buffer is not in range, moving on to next one.
    79  			buf = buf.Next()
    80  		}
    81  
    82  		curr.begin += origLen
    83  		if curr.begin >= tgt.end {
    84  			break
    85  		}
    86  	}
    87  	return true
    88  }
    89  
    90  // ReadAt implements io.ReaderAt.ReadAt.
    91  func (v *View) ReadAt(p []byte, offset int64) (int, error) {
    92  	var (
    93  		skipped int64
    94  		done    int64
    95  	)
    96  	for buf := v.data.Front(); buf != nil && done < int64(len(p)); buf = buf.Next() {
    97  		needToSkip := int(offset - skipped)
    98  		if sz := buf.ReadSize(); sz <= needToSkip {
    99  			skipped += int64(sz)
   100  			continue
   101  		}
   102  
   103  		// Actually read data.
   104  		n := copy(p[done:], buf.ReadSlice()[needToSkip:])
   105  		skipped += int64(needToSkip)
   106  		done += int64(n)
   107  	}
   108  	if int(done) < len(p) || offset+done == v.size {
   109  		return int(done), io.EOF
   110  	}
   111  	return int(done), nil
   112  }
   113  
   114  // advanceRead advances the view's read index.
   115  //
   116  // Precondition: there must be sufficient bytes in the buffer.
   117  func (v *View) advanceRead(count int64) {
   118  	for buf := v.data.Front(); buf != nil && count > 0; {
   119  		sz := int64(buf.ReadSize())
   120  		if sz > count {
   121  			// There is still data for reading.
   122  			buf.ReadMove(int(count))
   123  			v.size -= count
   124  			count = 0
   125  			break
   126  		}
   127  
   128  		// Consume the whole buffer.
   129  		oldBuf := buf
   130  		buf = buf.Next() // Iterate.
   131  		v.data.Remove(oldBuf)
   132  		v.pool.put(oldBuf)
   133  
   134  		// Update counts.
   135  		count -= sz
   136  		v.size -= sz
   137  	}
   138  	if count > 0 {
   139  		panic(fmt.Sprintf("advanceRead still has %d bytes remaining", count))
   140  	}
   141  }
   142  
   143  // Truncate truncates the view to the given bytes.
   144  //
   145  // This will not grow the view, only shrink it. If a length is passed that is
   146  // greater than the current size of the view, then nothing will happen.
   147  //
   148  // Precondition: length must be >= 0.
   149  func (v *View) Truncate(length int64) {
   150  	if length < 0 {
   151  		panic("negative length provided")
   152  	}
   153  	if length >= v.size {
   154  		return // Nothing to do.
   155  	}
   156  	for buf := v.data.Back(); buf != nil && v.size > length; buf = v.data.Back() {
   157  		sz := int64(buf.ReadSize())
   158  		if after := v.size - sz; after < length {
   159  			// Truncate the buffer locally.
   160  			left := (length - after)
   161  			buf.write = buf.read + int(left)
   162  			v.size = length
   163  			break
   164  		}
   165  
   166  		// Drop the buffer completely; see above.
   167  		v.data.Remove(buf)
   168  		v.pool.put(buf)
   169  		v.size -= sz
   170  	}
   171  }
   172  
   173  // Grow grows the given view to the number of bytes, which will be appended. If
   174  // zero is true, all these bytes will be zero. If zero is false, then this is
   175  // the caller's responsibility.
   176  //
   177  // Precondition: length must be >= 0.
   178  func (v *View) Grow(length int64, zero bool) {
   179  	if length < 0 {
   180  		panic("negative length provided")
   181  	}
   182  	for v.size < length {
   183  		buf := v.data.Back()
   184  
   185  		// Is there some space in the last buffer?
   186  		if buf == nil || buf.Full() {
   187  			buf = v.pool.get()
   188  			v.data.PushBack(buf)
   189  		}
   190  
   191  		// Write up to length bytes.
   192  		sz := buf.WriteSize()
   193  		if int64(sz) > length-v.size {
   194  			sz = int(length - v.size)
   195  		}
   196  
   197  		// Zero the written section; note that this pattern is
   198  		// specifically recognized and optimized by the compiler.
   199  		if zero {
   200  			for i := buf.write; i < buf.write+sz; i++ {
   201  				buf.data[i] = 0
   202  			}
   203  		}
   204  
   205  		// Advance the index.
   206  		buf.WriteMove(sz)
   207  		v.size += int64(sz)
   208  	}
   209  }
   210  
   211  // Prepend prepends the given data.
   212  func (v *View) Prepend(data []byte) {
   213  	// Is there any space in the first buffer?
   214  	if buf := v.data.Front(); buf != nil && buf.read > 0 {
   215  		// Fill up before the first write.
   216  		avail := buf.read
   217  		bStart := 0
   218  		dStart := len(data) - avail
   219  		if avail > len(data) {
   220  			bStart = avail - len(data)
   221  			dStart = 0
   222  		}
   223  		n := copy(buf.data[bStart:], data[dStart:])
   224  		data = data[:dStart]
   225  		v.size += int64(n)
   226  		buf.read -= n
   227  	}
   228  
   229  	for len(data) > 0 {
   230  		// Do we need an empty buffer?
   231  		buf := v.pool.get()
   232  		v.data.PushFront(buf)
   233  
   234  		// The buffer is empty; copy last chunk.
   235  		avail := len(buf.data)
   236  		bStart := 0
   237  		dStart := len(data) - avail
   238  		if avail > len(data) {
   239  			bStart = avail - len(data)
   240  			dStart = 0
   241  		}
   242  
   243  		// We have to put the data at the end of the current
   244  		// buffer in order to ensure that the next prepend will
   245  		// correctly fill up the beginning of this buffer.
   246  		n := copy(buf.data[bStart:], data[dStart:])
   247  		data = data[:dStart]
   248  		v.size += int64(n)
   249  		buf.read = len(buf.data) - n
   250  		buf.write = len(buf.data)
   251  	}
   252  }
   253  
   254  // Append appends the given data.
   255  func (v *View) Append(data []byte) {
   256  	for done := 0; done < len(data); {
   257  		buf := v.data.Back()
   258  
   259  		// Ensure there's a buffer with space.
   260  		if buf == nil || buf.Full() {
   261  			buf = v.pool.get()
   262  			v.data.PushBack(buf)
   263  		}
   264  
   265  		// Copy in to the given buffer.
   266  		n := copy(buf.WriteSlice(), data[done:])
   267  		done += n
   268  		buf.WriteMove(n)
   269  		v.size += int64(n)
   270  	}
   271  }
   272  
   273  // AppendOwned takes ownership of data and appends it to v.
   274  func (v *View) AppendOwned(data []byte) {
   275  	if len(data) > 0 {
   276  		buf := v.pool.getNoInit()
   277  		buf.initWithData(data)
   278  		v.data.PushBack(buf)
   279  		v.size += int64(len(data))
   280  	}
   281  }
   282  
   283  // PullUp makes the specified range contiguous and returns the backing memory.
   284  func (v *View) PullUp(offset, length int) ([]byte, bool) {
   285  	if length == 0 {
   286  		return nil, true
   287  	}
   288  	tgt := Range{begin: offset, end: offset + length}
   289  	if tgt.Intersect(Range{end: int(v.size)}).Len() != length {
   290  		return nil, false
   291  	}
   292  
   293  	curr := Range{}
   294  	buf := v.data.Front()
   295  	for ; buf != nil; buf = buf.Next() {
   296  		origLen := buf.ReadSize()
   297  		curr.end = curr.begin + origLen
   298  
   299  		if x := curr.Intersect(tgt); x.Len() == tgt.Len() {
   300  			// buf covers the whole requested target range.
   301  			sub := x.Offset(-curr.begin)
   302  			return buf.ReadSlice()[sub.begin:sub.end], true
   303  		} else if x.Len() > 0 {
   304  			// buf is pointing at the starting buffer we want to merge.
   305  			break
   306  		}
   307  
   308  		curr.begin += origLen
   309  	}
   310  
   311  	// Calculate the total merged length.
   312  	totLen := 0
   313  	for n := buf; n != nil; n = n.Next() {
   314  		totLen += n.ReadSize()
   315  		if curr.begin+totLen >= tgt.end {
   316  			break
   317  		}
   318  	}
   319  
   320  	// Merge the buffers.
   321  	data := make([]byte, totLen)
   322  	off := 0
   323  	for n := buf; n != nil && off < totLen; {
   324  		copy(data[off:], n.ReadSlice())
   325  		off += n.ReadSize()
   326  
   327  		// Remove buffers except for the first one, which will be reused.
   328  		if n == buf {
   329  			n = n.Next()
   330  		} else {
   331  			old := n
   332  			n = n.Next()
   333  			v.data.Remove(old)
   334  			v.pool.put(old)
   335  		}
   336  	}
   337  
   338  	// Update the first buffer with merged data.
   339  	buf.initWithData(data)
   340  
   341  	r := tgt.Offset(-curr.begin)
   342  	return buf.data[r.begin:r.end], true
   343  }
   344  
   345  // Flatten returns a flattened copy of this data.
   346  //
   347  // This method should not be used in any performance-sensitive paths. It may
   348  // allocate a fresh byte slice sufficiently large to contain all the data in
   349  // the buffer. This is principally for debugging.
   350  //
   351  // N.B. Tee data still belongs to this view, as if there is a single buffer
   352  // present, then it will be returned directly. This should be used for
   353  // temporary use only, and a reference to the given slice should not be held.
   354  func (v *View) Flatten() []byte {
   355  	if buf := v.data.Front(); buf == nil {
   356  		return nil // No data at all.
   357  	} else if buf.Next() == nil {
   358  		return buf.ReadSlice() // Only one buffer.
   359  	}
   360  	data := make([]byte, 0, v.size) // Need to flatten.
   361  	for buf := v.data.Front(); buf != nil; buf = buf.Next() {
   362  		// Copy to the allocated slice.
   363  		data = append(data, buf.ReadSlice()...)
   364  	}
   365  	return data
   366  }
   367  
   368  // Size indicates the total amount of data available in this view.
   369  func (v *View) Size() int64 {
   370  	return v.size
   371  }
   372  
   373  // Copy makes a strict copy of this view.
   374  func (v *View) Copy() (other View) {
   375  	for buf := v.data.Front(); buf != nil; buf = buf.Next() {
   376  		other.Append(buf.ReadSlice())
   377  	}
   378  	return
   379  }
   380  
   381  // Apply applies the given function across all valid data.
   382  func (v *View) Apply(fn func([]byte)) {
   383  	for buf := v.data.Front(); buf != nil; buf = buf.Next() {
   384  		fn(buf.ReadSlice())
   385  	}
   386  }
   387  
   388  // SubApply applies fn to a given range of data in v. Any part of the range
   389  // outside of v is ignored.
   390  func (v *View) SubApply(offset, length int, fn func([]byte)) {
   391  	for buf := v.data.Front(); length > 0 && buf != nil; buf = buf.Next() {
   392  		d := buf.ReadSlice()
   393  		if offset >= len(d) {
   394  			offset -= len(d)
   395  			continue
   396  		}
   397  		if offset > 0 {
   398  			d = d[offset:]
   399  			offset = 0
   400  		}
   401  		if length < len(d) {
   402  			d = d[:length]
   403  		}
   404  		fn(d)
   405  		length -= len(d)
   406  	}
   407  }
   408  
   409  // Merge merges the provided View with this one.
   410  //
   411  // The other view will be appended to v, and other will be empty after this
   412  // operation completes.
   413  func (v *View) Merge(other *View) {
   414  	// Copy over all buffers.
   415  	for buf := other.data.Front(); buf != nil; buf = other.data.Front() {
   416  		other.data.Remove(buf)
   417  		v.data.PushBack(buf)
   418  	}
   419  
   420  	// Adjust sizes.
   421  	v.size += other.size
   422  	other.size = 0
   423  }
   424  
   425  // WriteFromReader writes to the buffer from an io.Reader.
   426  //
   427  // A minimum read size equal to unsafe.Sizeof(unintptr) is enforced,
   428  // provided that count is greater than or equal to unsafe.Sizeof(uintptr).
   429  func (v *View) WriteFromReader(r io.Reader, count int64) (int64, error) {
   430  	var (
   431  		done int64
   432  		n    int
   433  		err  error
   434  	)
   435  	for done < count {
   436  		buf := v.data.Back()
   437  
   438  		// Ensure we have an empty buffer.
   439  		if buf == nil || buf.Full() {
   440  			buf = v.pool.get()
   441  			v.data.PushBack(buf)
   442  		}
   443  
   444  		// Is this less than the minimum batch?
   445  		if buf.WriteSize() < minBatch && (count-done) >= int64(minBatch) {
   446  			tmp := make([]byte, minBatch)
   447  			n, err = r.Read(tmp)
   448  			v.Append(tmp[:n])
   449  			done += int64(n)
   450  			if err != nil {
   451  				break
   452  			}
   453  			continue
   454  		}
   455  
   456  		// Limit the read, if necessary.
   457  		sz := buf.WriteSize()
   458  		if left := count - done; int64(sz) > left {
   459  			sz = int(left)
   460  		}
   461  
   462  		// Pass the relevant portion of the buffer.
   463  		n, err = r.Read(buf.WriteSlice()[:sz])
   464  		buf.WriteMove(n)
   465  		done += int64(n)
   466  		v.size += int64(n)
   467  		if err == io.EOF {
   468  			err = nil // Short write allowed.
   469  			break
   470  		} else if err != nil {
   471  			break
   472  		}
   473  	}
   474  	return done, err
   475  }
   476  
   477  // ReadToWriter reads from the buffer into an io.Writer.
   478  //
   479  // N.B. This does not consume the bytes read. TrimFront should
   480  // be called appropriately after this call in order to do so.
   481  //
   482  // A minimum write size equal to unsafe.Sizeof(unintptr) is enforced,
   483  // provided that count is greater than or equal to unsafe.Sizeof(uintptr).
   484  func (v *View) ReadToWriter(w io.Writer, count int64) (int64, error) {
   485  	var (
   486  		done int64
   487  		n    int
   488  		err  error
   489  	)
   490  	offset := 0 // Spill-over for batching.
   491  	for buf := v.data.Front(); buf != nil && done < count; buf = buf.Next() {
   492  		// Has this been consumed? Skip it.
   493  		sz := buf.ReadSize()
   494  		if sz <= offset {
   495  			offset -= sz
   496  			continue
   497  		}
   498  		sz -= offset
   499  
   500  		// Is this less than the minimum batch?
   501  		left := count - done
   502  		if sz < minBatch && left >= int64(minBatch) && (v.size-done) >= int64(minBatch) {
   503  			tmp := make([]byte, minBatch)
   504  			n, err = v.ReadAt(tmp, done)
   505  			w.Write(tmp[:n])
   506  			done += int64(n)
   507  			offset = n - sz // Reset below.
   508  			if err != nil {
   509  				break
   510  			}
   511  			continue
   512  		}
   513  
   514  		// Limit the write if necessary.
   515  		if int64(sz) >= left {
   516  			sz = int(left)
   517  		}
   518  
   519  		// Perform the actual write.
   520  		n, err = w.Write(buf.ReadSlice()[offset : offset+sz])
   521  		done += int64(n)
   522  		if err != nil {
   523  			break
   524  		}
   525  
   526  		// Reset spill-over.
   527  		offset = 0
   528  	}
   529  	return done, err
   530  }
   531  
   532  // A Range specifies a range of buffer.
   533  type Range struct {
   534  	begin int
   535  	end   int
   536  }
   537  
   538  // Intersect returns the intersection of x and y.
   539  func (x Range) Intersect(y Range) Range {
   540  	if x.begin < y.begin {
   541  		x.begin = y.begin
   542  	}
   543  	if x.end > y.end {
   544  		x.end = y.end
   545  	}
   546  	if x.begin >= x.end {
   547  		return Range{}
   548  	}
   549  	return x
   550  }
   551  
   552  // Offset returns x offset by off.
   553  func (x Range) Offset(off int) Range {
   554  	x.begin += off
   555  	x.end += off
   556  	return x
   557  }
   558  
   559  // Len returns the length of x.
   560  func (x Range) Len() int {
   561  	l := x.end - x.begin
   562  	if l < 0 {
   563  		l = 0
   564  	}
   565  	return l
   566  }