github.com/maypok86/otter@v1.2.1/internal/lossy/buffer.go (about)

     1  // Copyright (c) 2023 Alexey Mayshev. All rights reserved.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package lossy
    16  
    17  import (
    18  	"runtime"
    19  	"sync/atomic"
    20  	"unsafe"
    21  
    22  	"github.com/maypok86/otter/internal/generated/node"
    23  	"github.com/maypok86/otter/internal/xruntime"
    24  )
    25  
    26  const (
    27  	// The maximum number of elements per buffer.
    28  	capacity = 16
    29  	mask     = uint64(capacity - 1)
    30  )
    31  
    32  // PolicyBuffers is the set of buffers returned by the lossy buffer.
    33  type PolicyBuffers[K comparable, V any] struct {
    34  	Returned []node.Node[K, V]
    35  }
    36  
    37  // Buffer is a circular ring buffer stores the elements being transferred by the producers to the consumer.
    38  // The monotonically increasing count of reads and writes allow indexing sequentially to the next
    39  // element location based upon a power-of-two sizing.
    40  //
    41  // The producers race to read the counts, check if there is available capacity, and if so then try
    42  // once to CAS to the next write count. If the increment is successful then the producer lazily
    43  // publishes the element. The producer does not retry or block when unsuccessful due to a failed
    44  // CAS or the buffer being full.
    45  //
    46  // The consumer reads the counts and takes the available elements. The clearing of the elements
    47  // and the next read count are lazily set.
    48  //
    49  // This implementation is striped to further increase concurrency.
    50  type Buffer[K comparable, V any] struct {
    51  	head                 atomic.Uint64
    52  	headPadding          [xruntime.CacheLineSize - unsafe.Sizeof(atomic.Uint64{})]byte
    53  	tail                 atomic.Uint64
    54  	tailPadding          [xruntime.CacheLineSize - unsafe.Sizeof(atomic.Uint64{})]byte
    55  	nodeManager          *node.Manager[K, V]
    56  	returned             unsafe.Pointer
    57  	returnedPadding      [xruntime.CacheLineSize - 2*8]byte
    58  	policyBuffers        unsafe.Pointer
    59  	returnedSlicePadding [xruntime.CacheLineSize - 8]byte
    60  	buffer               [capacity]unsafe.Pointer
    61  }
    62  
    63  // New creates a new lossy Buffer.
    64  func New[K comparable, V any](nodeManager *node.Manager[K, V]) *Buffer[K, V] {
    65  	pb := &PolicyBuffers[K, V]{
    66  		Returned: make([]node.Node[K, V], 0, capacity),
    67  	}
    68  	b := &Buffer[K, V]{
    69  		nodeManager:   nodeManager,
    70  		policyBuffers: unsafe.Pointer(pb),
    71  	}
    72  	b.returned = b.policyBuffers
    73  	return b
    74  }
    75  
    76  // Add lazily publishes the item to the consumer.
    77  //
    78  // item may be lost due to contention.
    79  func (b *Buffer[K, V]) Add(n node.Node[K, V]) *PolicyBuffers[K, V] {
    80  	head := b.head.Load()
    81  	tail := b.tail.Load()
    82  	size := tail - head
    83  	if size >= capacity {
    84  		// full buffer
    85  		return nil
    86  	}
    87  	if b.tail.CompareAndSwap(tail, tail+1) {
    88  		// success
    89  		index := int(tail & mask)
    90  		atomic.StorePointer(&b.buffer[index], n.AsPointer())
    91  		if size == capacity-1 {
    92  			// try return new buffer
    93  			if !atomic.CompareAndSwapPointer(&b.returned, b.policyBuffers, nil) {
    94  				// somebody already get buffer
    95  				return nil
    96  			}
    97  
    98  			pb := (*PolicyBuffers[K, V])(b.policyBuffers)
    99  			for i := 0; i < capacity; i++ {
   100  				index := int(head & mask)
   101  				v := atomic.LoadPointer(&b.buffer[index])
   102  				if v != nil {
   103  					// published
   104  					pb.Returned = append(pb.Returned, b.nodeManager.FromPointer(v))
   105  					// release
   106  					atomic.StorePointer(&b.buffer[index], nil)
   107  				}
   108  				head++
   109  			}
   110  
   111  			b.head.Store(head)
   112  			return pb
   113  		}
   114  	}
   115  
   116  	// failed
   117  	return nil
   118  }
   119  
   120  // Free returns the processed buffer back and also clears it.
   121  func (b *Buffer[K, V]) Free() {
   122  	pb := (*PolicyBuffers[K, V])(b.policyBuffers)
   123  	for i := 0; i < len(pb.Returned); i++ {
   124  		pb.Returned[i] = nil
   125  	}
   126  	pb.Returned = pb.Returned[:0]
   127  	atomic.StorePointer(&b.returned, b.policyBuffers)
   128  }
   129  
   130  // Clear clears the lossy Buffer and returns it to the default state.
   131  func (b *Buffer[K, V]) Clear() {
   132  	for !atomic.CompareAndSwapPointer(&b.returned, b.policyBuffers, nil) {
   133  		runtime.Gosched()
   134  	}
   135  	for i := 0; i < capacity; i++ {
   136  		atomic.StorePointer(&b.buffer[i], nil)
   137  	}
   138  	b.Free()
   139  	b.tail.Store(0)
   140  	b.head.Store(0)
   141  }