github.com/m3db/m3@v1.5.0/src/dbnode/encoding/multi_reader_iterator_array_pool.go (about)

     1  // Copyright (c) 2016 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package encoding
    22  
    23  import (
    24  	"sort"
    25  	"sync"
    26  
    27  	"github.com/m3db/m3/src/x/pool"
    28  )
    29  
    30  // TODO(r): there should be an ArrayPool that can take an alloc function
    31  // that given a "capacity" can return a "interface{}" which consists of an array
    32  // with the specifified capacity and does the bucket logic for the type safe
    33  // higher level specific array pool, i.e. IteratorArrayPool.
    34  // This way we can avoid having the bucketing logic for each pool in each concrete
    35  // array pool.
    36  
    37  // TODO(r): instrument this to tune pooling
    38  type multiReaderIteratorArrayPool struct {
    39  	sizesAsc          []pool.Bucket
    40  	buckets           []multiReaderIteratorArrayPoolBucket
    41  	maxBucketCapacity int
    42  }
    43  
    44  type multiReaderIteratorArrayPoolBucket struct {
    45  	capacity   int
    46  	values     chan []MultiReaderIterator
    47  	valuesPool *sync.Pool
    48  }
    49  
    50  // NewMultiReaderIteratorArrayPool creates a new pool
    51  func NewMultiReaderIteratorArrayPool(sizes []pool.Bucket) MultiReaderIteratorArrayPool {
    52  	sizesAsc := make([]pool.Bucket, len(sizes))
    53  	copy(sizesAsc, sizes)
    54  	sort.Sort(pool.BucketByCapacity(sizesAsc))
    55  	var maxBucketCapacity int
    56  	if len(sizesAsc) != 0 {
    57  		maxBucketCapacity = sizesAsc[len(sizesAsc)-1].Capacity
    58  	}
    59  	return &multiReaderIteratorArrayPool{sizesAsc: sizesAsc, maxBucketCapacity: maxBucketCapacity}
    60  }
    61  
    62  func (p *multiReaderIteratorArrayPool) alloc(capacity int) []MultiReaderIterator {
    63  	return make([]MultiReaderIterator, 0, capacity)
    64  }
    65  
    66  func (p *multiReaderIteratorArrayPool) Init() {
    67  	buckets := make([]multiReaderIteratorArrayPoolBucket, len(p.sizesAsc))
    68  	for i := range p.sizesAsc {
    69  		buckets[i].capacity = p.sizesAsc[i].Capacity
    70  
    71  		if !p.sizesAsc[i].Count.IsDynamic() {
    72  			buckets[i].values = make(chan []MultiReaderIterator, p.sizesAsc[i].Count)
    73  			for j := 0; pool.Size(j) < p.sizesAsc[i].Count; j++ {
    74  				buckets[i].values <- p.alloc(p.sizesAsc[i].Capacity)
    75  			}
    76  			continue
    77  		}
    78  
    79  		i := i
    80  		buckets[i].valuesPool = &sync.Pool{New: func() interface{} {
    81  			// NB: if leaking slices is ever a problem, change APIs to return *[]MultiReaderIterator
    82  			return p.alloc(p.sizesAsc[i].Capacity)
    83  		}}
    84  	}
    85  	p.buckets = buckets
    86  }
    87  
    88  func (p *multiReaderIteratorArrayPool) Get(capacity int) []MultiReaderIterator {
    89  	if capacity > p.maxBucketCapacity {
    90  		return p.alloc(capacity)
    91  	}
    92  	for i := range p.buckets {
    93  		if p.buckets[i].capacity >= capacity {
    94  			if p.sizesAsc[i].Count.IsDynamic() {
    95  				return p.buckets[i].valuesPool.Get().([]MultiReaderIterator)
    96  			}
    97  			select {
    98  			case b := <-p.buckets[i].values:
    99  				return b
   100  			default:
   101  				// NB(r): use the bucket's capacity so can potentially
   102  				// be returned to pool when it's finished with.
   103  				return p.alloc(p.buckets[i].capacity)
   104  			}
   105  		}
   106  	}
   107  	return p.alloc(capacity)
   108  }
   109  
   110  func (p *multiReaderIteratorArrayPool) Put(array []MultiReaderIterator) {
   111  	capacity := cap(array)
   112  	if capacity > p.maxBucketCapacity {
   113  		return
   114  	}
   115  
   116  	for i := range array {
   117  		array[i] = nil
   118  	}
   119  	array = array[:0]
   120  	for i := range p.buckets {
   121  		if p.buckets[i].capacity >= capacity {
   122  			if p.sizesAsc[i].Count.IsDynamic() {
   123  				p.buckets[i].valuesPool.Put(array) //nolint:staticcheck
   124  				return
   125  			}
   126  			select {
   127  			case p.buckets[i].values <- array:
   128  			default:
   129  			}
   130  			return
   131  		}
   132  	}
   133  }