github.com/aavshr/aws-sdk-go@v1.41.3/service/s3/s3manager/pool.go (about)

     1  package s3manager
     2  
     3  import (
     4  	"fmt"
     5  	"sync"
     6  
     7  	"github.com/aavshr/aws-sdk-go/aws"
     8  )
     9  
    10  type byteSlicePool interface {
    11  	Get(aws.Context) (*[]byte, error)
    12  	Put(*[]byte)
    13  	ModifyCapacity(int)
    14  	SliceSize() int64
    15  	Close()
    16  }
    17  
    18  type maxSlicePool struct {
    19  	// allocator is defined as a function pointer to allow
    20  	// for test cases to instrument custom tracers when allocations
    21  	// occur.
    22  	allocator sliceAllocator
    23  
    24  	slices         chan *[]byte
    25  	allocations    chan struct{}
    26  	capacityChange chan struct{}
    27  
    28  	max       int
    29  	sliceSize int64
    30  
    31  	mtx sync.RWMutex
    32  }
    33  
    34  func newMaxSlicePool(sliceSize int64) *maxSlicePool {
    35  	p := &maxSlicePool{sliceSize: sliceSize}
    36  	p.allocator = p.newSlice
    37  
    38  	return p
    39  }
    40  
    41  var errZeroCapacity = fmt.Errorf("get called on zero capacity pool")
    42  
    43  func (p *maxSlicePool) Get(ctx aws.Context) (*[]byte, error) {
    44  	// check if context is canceled before attempting to get a slice
    45  	// this ensures priority is given to the cancel case first
    46  	select {
    47  	case <-ctx.Done():
    48  		return nil, ctx.Err()
    49  	default:
    50  	}
    51  
    52  	p.mtx.RLock()
    53  
    54  	for {
    55  		select {
    56  		case bs, ok := <-p.slices:
    57  			p.mtx.RUnlock()
    58  			if !ok {
    59  				// attempt to get on a zero capacity pool
    60  				return nil, errZeroCapacity
    61  			}
    62  			return bs, nil
    63  		case <-ctx.Done():
    64  			p.mtx.RUnlock()
    65  			return nil, ctx.Err()
    66  		default:
    67  			// pass
    68  		}
    69  
    70  		select {
    71  		case _, ok := <-p.allocations:
    72  			p.mtx.RUnlock()
    73  			if !ok {
    74  				// attempt to get on a zero capacity pool
    75  				return nil, errZeroCapacity
    76  			}
    77  			return p.allocator(), nil
    78  		case <-ctx.Done():
    79  			p.mtx.RUnlock()
    80  			return nil, ctx.Err()
    81  		default:
    82  			// In the event that there are no slices or allocations available
    83  			// This prevents some deadlock situations that can occur around sync.RWMutex
    84  			// When a lock request occurs on ModifyCapacity, no new readers are allowed to acquire a read lock.
    85  			// By releasing the read lock here and waiting for a notification, we prevent a deadlock situation where
    86  			// Get could hold the read lock indefinitely waiting for capacity, ModifyCapacity is waiting for a write lock,
    87  			// and a Put is blocked trying to get a read-lock which is blocked by ModifyCapacity.
    88  
    89  			// Short-circuit if the pool capacity is zero.
    90  			if p.max == 0 {
    91  				p.mtx.RUnlock()
    92  				return nil, errZeroCapacity
    93  			}
    94  
    95  			// Since we will be releasing the read-lock we need to take the reference to the channel.
    96  			// Since channels are references we will still get notified if slices are added, or if
    97  			// the channel is closed due to a capacity modification. This specifically avoids a data race condition
    98  			// where ModifyCapacity both closes a channel and initializes a new one while we don't have a read-lock.
    99  			c := p.capacityChange
   100  
   101  			p.mtx.RUnlock()
   102  
   103  			select {
   104  			case _ = <-c:
   105  				p.mtx.RLock()
   106  			case <-ctx.Done():
   107  				return nil, ctx.Err()
   108  			}
   109  		}
   110  	}
   111  }
   112  
   113  func (p *maxSlicePool) Put(bs *[]byte) {
   114  	p.mtx.RLock()
   115  	defer p.mtx.RUnlock()
   116  
   117  	if p.max == 0 {
   118  		return
   119  	}
   120  
   121  	select {
   122  	case p.slices <- bs:
   123  		p.notifyCapacity()
   124  	default:
   125  		// If the new channel when attempting to add the slice then we drop the slice.
   126  		// The logic here is to prevent a deadlock situation if channel is already at max capacity.
   127  		// Allows us to reap allocations that are returned and are no longer needed.
   128  	}
   129  }
   130  
   131  func (p *maxSlicePool) ModifyCapacity(delta int) {
   132  	if delta == 0 {
   133  		return
   134  	}
   135  
   136  	p.mtx.Lock()
   137  	defer p.mtx.Unlock()
   138  
   139  	p.max += delta
   140  
   141  	if p.max == 0 {
   142  		p.empty()
   143  		return
   144  	}
   145  
   146  	if p.capacityChange != nil {
   147  		close(p.capacityChange)
   148  	}
   149  	p.capacityChange = make(chan struct{}, p.max)
   150  
   151  	origAllocations := p.allocations
   152  	p.allocations = make(chan struct{}, p.max)
   153  
   154  	newAllocs := len(origAllocations) + delta
   155  	for i := 0; i < newAllocs; i++ {
   156  		p.allocations <- struct{}{}
   157  	}
   158  
   159  	if origAllocations != nil {
   160  		close(origAllocations)
   161  	}
   162  
   163  	origSlices := p.slices
   164  	p.slices = make(chan *[]byte, p.max)
   165  	if origSlices == nil {
   166  		return
   167  	}
   168  
   169  	close(origSlices)
   170  	for bs := range origSlices {
   171  		select {
   172  		case p.slices <- bs:
   173  		default:
   174  			// If the new channel blocks while adding slices from the old channel
   175  			// then we drop the slice. The logic here is to prevent a deadlock situation
   176  			// if the new channel has a smaller capacity then the old.
   177  		}
   178  	}
   179  }
   180  
   181  func (p *maxSlicePool) notifyCapacity() {
   182  	select {
   183  	case p.capacityChange <- struct{}{}:
   184  	default:
   185  		// This *shouldn't* happen as the channel is both buffered to the max pool capacity size and is resized
   186  		// on capacity modifications. This is just a safety to ensure that a blocking situation can't occur.
   187  	}
   188  }
   189  
   190  func (p *maxSlicePool) SliceSize() int64 {
   191  	return p.sliceSize
   192  }
   193  
   194  func (p *maxSlicePool) Close() {
   195  	p.mtx.Lock()
   196  	defer p.mtx.Unlock()
   197  	p.empty()
   198  }
   199  
   200  func (p *maxSlicePool) empty() {
   201  	p.max = 0
   202  
   203  	if p.capacityChange != nil {
   204  		close(p.capacityChange)
   205  		p.capacityChange = nil
   206  	}
   207  
   208  	if p.allocations != nil {
   209  		close(p.allocations)
   210  		for range p.allocations {
   211  			// drain channel
   212  		}
   213  		p.allocations = nil
   214  	}
   215  
   216  	if p.slices != nil {
   217  		close(p.slices)
   218  		for range p.slices {
   219  			// drain channel
   220  		}
   221  		p.slices = nil
   222  	}
   223  }
   224  
   225  func (p *maxSlicePool) newSlice() *[]byte {
   226  	bs := make([]byte, p.sliceSize)
   227  	return &bs
   228  }
   229  
   230  type returnCapacityPoolCloser struct {
   231  	byteSlicePool
   232  	returnCapacity int
   233  }
   234  
   235  func (n *returnCapacityPoolCloser) ModifyCapacity(delta int) {
   236  	if delta > 0 {
   237  		n.returnCapacity = -1 * delta
   238  	}
   239  	n.byteSlicePool.ModifyCapacity(delta)
   240  }
   241  
   242  func (n *returnCapacityPoolCloser) Close() {
   243  	if n.returnCapacity < 0 {
   244  		n.byteSlicePool.ModifyCapacity(n.returnCapacity)
   245  	}
   246  }
   247  
   248  type sliceAllocator func() *[]byte
   249  
   250  var newByteSlicePool = func(sliceSize int64) byteSlicePool {
   251  	return newMaxSlicePool(sliceSize)
   252  }