github.com/aclements/go-misc@v0.0.0-20240129233631-2f6ede80790c/split/vlogger_test.go (about)

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package split
     6  
     7  import (
     8  	"sync"
     9  	"sync/atomic"
    10  	"testing"
    11  )
    12  
    13  // valueLoggerLocked is an implementation of a value logger that uses
    14  // locking to protect concurrent access.
    15  type valueLoggerLocked struct {
    16  	sync.Mutex
    17  	vals *valueLoggerBuf
    18  	pos  int
    19  }
    20  
    21  func newValueLoggerLocked() valueLoggerLocked {
    22  	var l valueLoggerLocked
    23  	l.vals = valueLoggerBufPool.Get().(*valueLoggerBuf)
    24  	return l
    25  }
    26  
    27  func (l *valueLoggerLocked) append(v uint64) {
    28  	l.Lock()
    29  	l.vals[l.pos] = v
    30  	l.pos++
    31  	if l.pos == len(l.vals) {
    32  		buf := l.vals
    33  		l.vals = new(valueLoggerBuf)
    34  		l.pos = 0
    35  		l.Unlock()
    36  		l.process(buf)
    37  	} else {
    38  		l.Unlock()
    39  	}
    40  }
    41  
    42  func (l *valueLoggerLocked) process(buf *valueLoggerBuf) {
    43  	// In a real system, this would do something with the data in
    44  	// buf. Here we just discard it.
    45  	valueLoggerBufPool.Put(buf)
    46  }
    47  
    48  func BenchmarkLazyAggregationSplitLocked(b *testing.B) {
    49  	// Benchmark a lazy aggregating value logger that uses locking
    50  	// instead of atomics.
    51  	logger := New(func(l *valueLoggerLocked) { *l = newValueLoggerLocked() })
    52  
    53  	b.RunParallel(func(pb *testing.PB) {
    54  		for i := uint64(0); pb.Next(); i++ {
    55  			logger.Get().(*valueLoggerLocked).append(i)
    56  		}
    57  	})
    58  }
    59  
    60  const (
    61  	log2ValueLoggerBuf  = 8 // 256 entries per buffer
    62  	log2ValueLoggerBufs = 1 // Double buffering
    63  
    64  	valueLoggerIndexShift = 64 - (log2ValueLoggerBuf + log2ValueLoggerBufs)
    65  	activeWriterBits      = 1 + log2ValueLoggerBuf // Room for max writers to a buffer, plus mark bit.
    66  	bufMarkMask           = 1 << (activeWriterBits - 1)
    67  )
    68  
    69  type valueLoggerBuf [1 << log2ValueLoggerBuf]uint64
    70  
    71  var valueLoggerBufPool = sync.Pool{New: func() interface{} { return new(valueLoggerBuf) }}
    72  
    73  // valueLoggerAtomic is a value logger that uses atomics to protect
    74  // concurrent access.
    75  type valueLoggerAtomic struct {
    76  	// control is the buffer control field. It consists of several
    77  	// bit fields. The low bits consist of N fields that are each
    78  	// activeWriterBits wide and corresponds to indexes into vals.
    79  	// Field i counts the number of active writers to vals[i],
    80  	// plus a bufMarkMask bit that indicates vals[i] is full.
    81  	//
    82  	// Bits valueLoggerIndexShift and up are an index into the
    83  	// logical ring buffer formed by concatenating vals.
    84  	//
    85  	// TODO: Put this bit packing behind a type with methods?
    86  	control uint64
    87  	// vals is a double-buffered (though it could be more) ring
    88  	// buffer for storing values. Using a pair of buffers allows
    89  	// writes to proceed in one buffer while the other buffer is
    90  	// being reallocated.
    91  	vals [1 << log2ValueLoggerBufs]*valueLoggerBuf
    92  	// allocLock protects allocating new buffers for vals. Access
    93  	// to vals is already synchronized by control, but this offers
    94  	// a convenient way to block writers waiting on a buffer to be
    95  	// swapped out.
    96  	allocLock sync.Mutex
    97  }
    98  
    99  func newValueLogger() valueLoggerAtomic {
   100  	var l valueLoggerAtomic
   101  	for i := range l.vals {
   102  		l.vals[i] = valueLoggerBufPool.Get().(*valueLoggerBuf)
   103  	}
   104  	return l
   105  }
   106  
   107  func (l *valueLoggerAtomic) append(v uint64) {
   108  	// Claim a slot and increment the active count for that
   109  	// buffer. The active count acts as a lock on vals[bufIdx].
   110  	var i, bufIdx, activeShift uint64
   111  	for {
   112  		c := atomic.LoadUint64(&l.control)
   113  		i = c >> valueLoggerIndexShift
   114  		bufIdx = i / uint64(len(valueLoggerBuf{}))
   115  		activeShift = bufIdx * activeWriterBits
   116  		if (c>>activeShift)&bufMarkMask != 0 {
   117  			// This buffer is still being swapped out.
   118  			// Wait for it and retry.
   119  			l.allocLock.Lock()
   120  			l.allocLock.Unlock()
   121  			continue
   122  		}
   123  
   124  		// Increment the index. This depends on uint64
   125  		// wrap-around.
   126  		newC := c + 1<<valueLoggerIndexShift
   127  		// Increment the active writer count.
   128  		newC += 1 << activeShift
   129  
   130  		if atomic.CompareAndSwapUint64(&l.control, c, newC) {
   131  			break
   132  		}
   133  	}
   134  
   135  	// Put the value in the slot we claimed.
   136  	l.vals[bufIdx][i%uint64(len(valueLoggerBuf{}))] = v
   137  
   138  	// Decrement the active writer count for the buffer. If this
   139  	// wrote to the last slot, set the buffer mark. If this is the
   140  	// last writer to this buffer and the buffer is marked, this
   141  	// writer is responsible for re-allocating the buffer.
   142  	for {
   143  		c := atomic.LoadUint64(&l.control)
   144  		// Decrement the active writer count for this buffer.
   145  		newC := c + (^uint64(0) << activeShift)
   146  		// If this wrote to the last slot, set the buffer mark.
   147  		if i%uint64(len(valueLoggerBuf{})) == uint64(len(valueLoggerBuf{})-1) {
   148  			newC |= bufMarkMask << activeShift
   149  		}
   150  		if atomic.CompareAndSwapUint64(&l.control, c, newC) {
   151  			// If this was the last writer to this buffer
   152  			// and it's marked, this this writer is
   153  			// responsible for re-allocating the buffer.
   154  			if (newC>>activeShift)&(1<<activeWriterBits-1) != bufMarkMask {
   155  				return
   156  			}
   157  			break
   158  		}
   159  	}
   160  
   161  	// This writer is responsible for re-allocating the buffer.
   162  	l.allocLock.Lock()
   163  	completeBuf := l.vals[bufIdx]
   164  	l.vals[bufIdx] = valueLoggerBufPool.Get().(*valueLoggerBuf)
   165  	// Clear the buffer mark so writers can use this
   166  	// buffer slot again. Too bad there's no AndUint64.
   167  	for {
   168  		c := atomic.LoadUint64(&l.control)
   169  		newC := c &^ (bufMarkMask << activeShift)
   170  		if atomic.CompareAndSwapUint64(&l.control, c, newC) {
   171  			break
   172  		}
   173  	}
   174  	l.allocLock.Unlock()
   175  	l.process(completeBuf)
   176  }
   177  
   178  func (l *valueLoggerAtomic) process(buf *valueLoggerBuf) {
   179  	// In a real system, this would do something with the data in
   180  	// buf. Here we just discard it.
   181  	valueLoggerBufPool.Put(buf)
   182  }
   183  
   184  func BenchmarkLazyAggregationSplitAtomic(b *testing.B) {
   185  	// Benchmark a lazy aggregating value logger.
   186  	logger := New(func(l *valueLoggerAtomic) { *l = newValueLogger() })
   187  
   188  	b.RunParallel(func(pb *testing.PB) {
   189  		for i := uint64(0); pb.Next(); i++ {
   190  			logger.Get().(*valueLoggerAtomic).append(i)
   191  		}
   192  	})
   193  }
   194  
   195  func BenchmarkLazyAggregationShared(b *testing.B) {
   196  	// Non-sharded version of BenchmarkLazyAggregation.
   197  	logger := newValueLogger()
   198  
   199  	b.RunParallel(func(pb *testing.PB) {
   200  		for i := uint64(0); pb.Next(); i++ {
   201  			logger.append(i)
   202  		}
   203  	})
   204  }