github.com/dshulyak/uring@v0.0.0-20210209113719-1b2ec51f1542/fixed/pool.go (about)

     1  package fixed
     2  
     3  import (
     4  	"runtime"
     5  	"sync"
     6  	"sync/atomic"
     7  	"syscall"
     8  	"unsafe"
     9  )
    10  
    11  // BufferRegistry ...
    12  type BufferRegistry interface {
    13  	RegisterBuffers([]syscall.Iovec) error
    14  }
    15  
    16  var bufferPool = sync.Pool{
    17  	New: func() interface{} {
    18  		return &Buffer{}
    19  	},
    20  }
    21  
    22  // New will initialize mmap'ed memory region, of the total size 16 bytes + size*bufsize
    23  // and register mmap'ed memory as buffer in io_uring.
    24  func New(reg BufferRegistry, bufsize, size int) (*Pool, error) {
    25  	alloc := &allocator{
    26  		max:        size,
    27  		bufferSize: bufsize,
    28  		reg:        reg,
    29  	}
    30  	if err := alloc.init(); err != nil {
    31  		return nil, err
    32  	}
    33  	var (
    34  		head *node
    35  	)
    36  	for i := size - 1; i >= 0; i-- {
    37  		head = &node{
    38  			next:  head,
    39  			index: i,
    40  		}
    41  	}
    42  	return &Pool{
    43  		alloc: alloc,
    44  		head:  head,
    45  	}, nil
    46  }
    47  
    48  // Pool manages registered offheap buffers. Allocated with MAP_ANON | MAP_PRIVATE.
    49  // TODO performance is not really excellent, several ideas to try:
    50  // - backoff on contention (runtime.Gosched achieves same purpose)
    51  // - elimitation array
    52  // This is much better than mutex-based version (3.5 times faster), but much more worse
    53  // than simple sync.Pool (10 times slower).
    54  type Pool struct {
    55  	alloc *allocator
    56  	head  *node
    57  }
    58  
    59  // Get buffer.
    60  func (p *Pool) Get() *Buffer {
    61  	for {
    62  		old := (*node)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&p.head))))
    63  		if old != nil {
    64  			index := old.index
    65  			next := old.next
    66  			if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&p.head)), unsafe.Pointer(old), unsafe.Pointer(next)) {
    67  				buf := bufferPool.Get().(*Buffer)
    68  				buf.B = p.alloc.bufAt(index)
    69  				buf.poolIndex = index
    70  				buf.index = 0 // placeholder, until i will have more pages
    71  				return buf
    72  			}
    73  		}
    74  		runtime.Gosched()
    75  	}
    76  }
    77  
    78  // Put buffer into the pool. Note that if caller won't put used buffer's into the pool
    79  // Get operation will block indefinitely.
    80  func (p *Pool) Put(b *Buffer) {
    81  	next := &node{index: b.poolIndex}
    82  	for {
    83  		head := (*node)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&p.head))))
    84  		next.next = head
    85  		if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&p.head)), unsafe.Pointer(head), unsafe.Pointer(next)) {
    86  			bufferPool.Put(b)
    87  			return
    88  		}
    89  		runtime.Gosched()
    90  	}
    91  }
    92  
    93  // Close prefents future Get's from the pool and munmap's allocated memory.
    94  // Caller must ensure that all users of the pool exited before calling Close.
    95  // Otherwise program will crash referencing to an invalid memory region.
    96  func (p *Pool) Close() error {
    97  	return p.alloc.close()
    98  }
    99  
   100  type node struct {
   101  	next  *node
   102  	index int
   103  }