github.com/sagernet/gvisor@v0.0.0-20240428053021-e691de28565f/pkg/buffer/chunk.go (about)

     1  // Copyright 2022 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package buffer
    16  
    17  import (
    18  	"fmt"
    19  	"github.com/sagernet/sing/common"
    20  
    21  	"github.com/sagernet/gvisor/pkg/bits"
    22  	"github.com/sagernet/gvisor/pkg/sync"
    23  )
    24  
    25  const (
    26  	// This is log2(baseChunkSize). This number is used to calculate which pool
    27  	// to use for a payload size by right shifting the payload size by this
    28  	// number and passing the result to MostSignificantOne64.
    29  	baseChunkSizeLog2 = 6
    30  
    31  	// This is the size of the buffers in the first pool. Each subsequent pool
    32  	// creates payloads 2^(pool index) times larger than the first pool's
    33  	// payloads.
    34  	baseChunkSize = 1 << baseChunkSizeLog2 // 64
    35  
    36  	// MaxChunkSize is largest payload size that we pool. Payloads larger than
    37  	// this will be allocated from the heap and garbage collected as normal.
    38  	MaxChunkSize = baseChunkSize << (numPools - 1) // 64k
    39  
    40  	// The number of chunk pools we have for use.
    41  	numPools = 11
    42  )
    43  
    44  // chunkPools is a collection of pools for payloads of different sizes. The
    45  // size of the payloads doubles in each successive pool.
    46  var chunkPools [numPools]sync.Pool
    47  
    48  func init() {
    49  	for i := 0; i < numPools; i++ {
    50  		chunkSize := baseChunkSize * (1 << i)
    51  		chunkPools[i].New = func() any {
    52  			return &chunk{
    53  				data: make([]byte, chunkSize),
    54  			}
    55  		}
    56  	}
    57  }
    58  
    59  // Precondition: 0 <= size <= maxChunkSize
    60  func getChunkPool(size int) *sync.Pool {
    61  	idx := 0
    62  	if size > baseChunkSize {
    63  		idx = bits.MostSignificantOne64(uint64(size) >> baseChunkSizeLog2)
    64  		if size > 1<<(idx+baseChunkSizeLog2) {
    65  			idx++
    66  		}
    67  	}
    68  	if idx >= numPools {
    69  		panic(fmt.Sprintf("pool for chunk size %d does not exist", size))
    70  	}
    71  	return &chunkPools[idx]
    72  }
    73  
    74  // Chunk represents a slice of pooled memory.
    75  //
    76  // +stateify savable
    77  type chunk struct {
    78  	chunkRefs
    79  	data []byte
    80  }
    81  
    82  func newChunk(size int) *chunk {
    83  	var c *chunk
    84  	if size > MaxChunkSize {
    85  		c = &chunk{
    86  			data: make([]byte, size),
    87  		}
    88  	} else {
    89  		pool := getChunkPool(size)
    90  		c = pool.Get().(*chunk)
    91  		common.ClearArray(c.data)
    92  	}
    93  	c.InitRefs()
    94  	return c
    95  }
    96  
    97  func (c *chunk) destroy() {
    98  	if len(c.data) > MaxChunkSize {
    99  		c.data = nil
   100  		return
   101  	}
   102  	pool := getChunkPool(len(c.data))
   103  	pool.Put(c)
   104  }
   105  
   106  func (c *chunk) DecRef() {
   107  	c.chunkRefs.DecRef(c.destroy)
   108  }
   109  
   110  func (c *chunk) Clone() *chunk {
   111  	cpy := newChunk(len(c.data))
   112  	copy(cpy.data, c.data)
   113  	return cpy
   114  }