github.com/aavshr/aws-sdk-go@v1.41.3/service/s3/s3manager/pool_test.go (about)

     1  //go:build go1.7
     2  // +build go1.7
     3  
     4  package s3manager
     5  
     6  import (
     7  	"context"
     8  	"sync"
     9  	"sync/atomic"
    10  	"testing"
    11  
    12  	"github.com/aavshr/aws-sdk-go/aws"
    13  )
    14  
    15  func TestMaxSlicePool(t *testing.T) {
    16  	pool := newMaxSlicePool(0)
    17  
    18  	var wg sync.WaitGroup
    19  	for i := 0; i < 100; i++ {
    20  		wg.Add(1)
    21  		go func() {
    22  			defer wg.Done()
    23  
    24  			// increase pool capacity by 2
    25  			pool.ModifyCapacity(2)
    26  
    27  			// remove 2 items
    28  			bsOne, err := pool.Get(context.Background())
    29  			if err != nil {
    30  				t.Errorf("failed to get slice from pool: %v", err)
    31  			}
    32  			bsTwo, err := pool.Get(context.Background())
    33  			if err != nil {
    34  				t.Errorf("failed to get slice from pool: %v", err)
    35  			}
    36  
    37  			done := make(chan struct{})
    38  			go func() {
    39  				defer close(done)
    40  
    41  				// attempt to remove a 3rd in parallel
    42  				bs, err := pool.Get(context.Background())
    43  				if err != nil {
    44  					t.Errorf("failed to get slice from pool: %v", err)
    45  				}
    46  				pool.Put(bs)
    47  
    48  				// attempt to remove a 4th that has been canceled
    49  				ctx, cancel := context.WithCancel(context.Background())
    50  				cancel()
    51  				bs, err = pool.Get(ctx)
    52  				if err == nil {
    53  					pool.Put(bs)
    54  					t.Errorf("expected no slice to be returned")
    55  					return
    56  				}
    57  			}()
    58  
    59  			pool.Put(bsOne)
    60  
    61  			<-done
    62  
    63  			pool.ModifyCapacity(-1)
    64  
    65  			pool.Put(bsTwo)
    66  
    67  			pool.ModifyCapacity(-1)
    68  
    69  			// any excess returns should drop
    70  			rando := make([]byte, 0)
    71  			pool.Put(&rando)
    72  		}()
    73  	}
    74  	wg.Wait()
    75  
    76  	if e, a := 0, len(pool.slices); e != a {
    77  		t.Errorf("expected %v, got %v", e, a)
    78  	}
    79  	if e, a := 0, len(pool.allocations); e != a {
    80  		t.Errorf("expected %v, got %v", e, a)
    81  	}
    82  	if e, a := 0, pool.max; e != a {
    83  		t.Errorf("expected %v, got %v", e, a)
    84  	}
    85  
    86  	_, err := pool.Get(context.Background())
    87  	if err == nil {
    88  		t.Errorf("expected error on zero capacity pool")
    89  	}
    90  
    91  	pool.Close()
    92  }
    93  
    94  func TestPoolShouldPreferAllocatedSlicesOverNewAllocations(t *testing.T) {
    95  	pool := newMaxSlicePool(0)
    96  	defer pool.Close()
    97  
    98  	// Prepare pool: make it so that pool contains 1 allocated slice and 1 allocation permit
    99  	pool.ModifyCapacity(2)
   100  	initialSlice, err := pool.Get(context.Background())
   101  	if err != nil {
   102  		t.Errorf("failed to get slice from pool: %v", err)
   103  	}
   104  	pool.Put(initialSlice)
   105  
   106  	for i := 0; i < 100; i++ {
   107  		newSlice, err := pool.Get(context.Background())
   108  		if err != nil {
   109  			t.Errorf("failed to get slice from pool: %v", err)
   110  			return
   111  		}
   112  
   113  		if newSlice != initialSlice {
   114  			t.Errorf("pool allocated a new slice despite it having pre-allocated one")
   115  			return
   116  		}
   117  		pool.Put(newSlice)
   118  	}
   119  }
   120  
   121  type recordedPartPool struct {
   122  	recordedAllocs      uint64
   123  	recordedGets        uint64
   124  	recordedOutstanding int64
   125  	*maxSlicePool
   126  }
   127  
   128  func newRecordedPartPool(sliceSize int64) *recordedPartPool {
   129  	sp := newMaxSlicePool(sliceSize)
   130  
   131  	rp := &recordedPartPool{}
   132  
   133  	allocator := sp.allocator
   134  	sp.allocator = func() *[]byte {
   135  		atomic.AddUint64(&rp.recordedAllocs, 1)
   136  		return allocator()
   137  	}
   138  
   139  	rp.maxSlicePool = sp
   140  
   141  	return rp
   142  }
   143  
   144  func (r *recordedPartPool) Get(ctx aws.Context) (*[]byte, error) {
   145  	atomic.AddUint64(&r.recordedGets, 1)
   146  	atomic.AddInt64(&r.recordedOutstanding, 1)
   147  	return r.maxSlicePool.Get(ctx)
   148  }
   149  
   150  func (r *recordedPartPool) Put(b *[]byte) {
   151  	atomic.AddInt64(&r.recordedOutstanding, -1)
   152  	r.maxSlicePool.Put(b)
   153  }
   154  
   155  func swapByteSlicePool(f func(sliceSize int64) byteSlicePool) func() {
   156  	orig := newByteSlicePool
   157  
   158  	newByteSlicePool = f
   159  
   160  	return func() {
   161  		newByteSlicePool = orig
   162  	}
   163  }
   164  
   165  type syncSlicePool struct {
   166  	sync.Pool
   167  	sliceSize int64
   168  }
   169  
   170  func newSyncSlicePool(sliceSize int64) *syncSlicePool {
   171  	p := &syncSlicePool{sliceSize: sliceSize}
   172  	p.New = func() interface{} {
   173  		bs := make([]byte, p.sliceSize)
   174  		return &bs
   175  	}
   176  	return p
   177  }
   178  
   179  func (s *syncSlicePool) Get(ctx aws.Context) (*[]byte, error) {
   180  	select {
   181  	case <-ctx.Done():
   182  		return nil, ctx.Err()
   183  	default:
   184  		return s.Pool.Get().(*[]byte), nil
   185  	}
   186  }
   187  
   188  func (s *syncSlicePool) Put(bs *[]byte) {
   189  	s.Pool.Put(bs)
   190  }
   191  
   192  func (s *syncSlicePool) ModifyCapacity(_ int) {
   193  	return
   194  }
   195  
   196  func (s *syncSlicePool) SliceSize() int64 {
   197  	return s.sliceSize
   198  }
   199  
   200  func (s *syncSlicePool) Close() {
   201  	return
   202  }