github.com/aclements/go-misc@v0.0.0-20240129233631-2f6ede80790c/split/examples_test.go (about)

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package split
     6  
     7  import (
     8  	"fmt"
     9  	"math/rand"
    10  	"runtime"
    11  	"sort"
    12  	"sync"
    13  	"sync/atomic"
    14  	"time"
    15  )
    16  
    17  func Example_counter() {
    18  	// This example demonstrates concurrent updates to a split
    19  	// counter. The counter can be updated using an atomic
    20  	// operation. The final result is the sum of the shard values.
    21  	counter := New(func(*uint64) {})
    22  
    23  	var wg sync.WaitGroup
    24  	for i := 0; i < 64; i++ {
    25  		wg.Add(1)
    26  		go func() {
    27  			atomic.AddUint64(counter.Get().(*uint64), 1)
    28  			wg.Done()
    29  		}()
    30  	}
    31  	wg.Wait()
    32  
    33  	// Sum up the counter. In this example, the Range isn't
    34  	// running concurrently with the updates above, but if it
    35  	// were, the sum would be approximate. Specifically, any
    36  	// updates that happened between the call to Range and when it
    37  	// returns may or may not be included in the sum depending on
    38  	// exact timing. For most counters, this is acceptable because
    39  	// updates to the counter are already unordered.
    40  	var sum uint64
    41  	counter.Range(func(np *uint64) {
    42  		sum += atomic.LoadUint64(np)
    43  	})
    44  	fmt.Println(sum)
    45  
    46  	// Output: 64
    47  }
    48  
    49  func Example_counterConsistent() {
    50  	// This example is similar to the "Counter" example, but the
    51  	// counter goes both up and down. Specifically, each goroutine
    52  	// increments the counter and then decrements the counter, but
    53  	// the increment and decrement may happen on different shards.
    54  	// The sum of the counter at any instant is between 0 and the
    55  	// number of goroutines, but since Range can't see all of the
    56  	// shards at the same instant, it may observe a decrement
    57  	// without an increment, leading to a negative sum.
    58  	//
    59  	// In this example, we solve this problem using two-phase
    60  	// locking.
    61  	type shard struct {
    62  		val uint64
    63  		sync.Mutex
    64  	}
    65  	counter := New(func(*shard) {})
    66  
    67  	const N = 64
    68  	var wg sync.WaitGroup
    69  	var stop uint32
    70  	for i := 0; i < N; i++ {
    71  		wg.Add(1)
    72  		go func() {
    73  			for atomic.LoadUint32(&stop) == 0 {
    74  				s := counter.Get().(*shard)
    75  				s.Lock()
    76  				s.val++
    77  				s.Unlock()
    78  
    79  				// .. do some work, maybe get moved to
    80  				// a different shard ..
    81  				runtime.Gosched()
    82  
    83  				s = counter.Get().(*shard)
    84  				s.Lock()
    85  				s.val--
    86  				s.Unlock()
    87  			}
    88  			wg.Done()
    89  		}()
    90  	}
    91  
    92  	// Let the goroutines do some work.
    93  	time.Sleep(time.Millisecond)
    94  
    95  	// Capture a consistent sum by locking all of the shards, then
    96  	// unlocking all of them. This must be done in a single Range
    97  	// call to prevent the number of shards from changing.
    98  	var sum uint64
    99  	counter.Range(func(s *shard) {
   100  		s.Lock()
   101  		sum += s.val
   102  	}, func(s *shard) {
   103  		s.Unlock()
   104  	})
   105  
   106  	// Stop the writers.
   107  	atomic.StoreUint32(&stop, 1)
   108  	wg.Wait()
   109  
   110  	if sum < 0 || sum > N {
   111  		fmt.Println("bad sum:", sum)
   112  	}
   113  	// Output:
   114  }
   115  
   116  func Example_logging() {
   117  	// This example demonstrates concurrent appends to a split
   118  	// log. Each shard of the log is protected by a mutex. The log
   119  	// is combined by sorting the log records in timestamp order.
   120  	// This example collects a consistent snapshot of the log
   121  	// using these timestamps.
   122  	type record struct {
   123  		when time.Time
   124  		msg  string
   125  	}
   126  	type shard struct {
   127  		sync.Mutex
   128  		log []record
   129  	}
   130  	logger := New(func(*shard) {})
   131  
   132  	var wg sync.WaitGroup
   133  	for i := 0; i < 4; i++ {
   134  		wg.Add(1)
   135  		go func(i int) {
   136  			for j := 0; j < 4; j++ {
   137  				msg := fmt.Sprintf("goroutine %d message %d", i, j)
   138  				shard := logger.Get().(*shard)
   139  				shard.Lock()
   140  				// We have to record the time under
   141  				// the lock to ensure it's ordered for
   142  				// the reader.
   143  				rec := record{time.Now(), msg}
   144  				shard.log = append(shard.log, rec)
   145  				shard.Unlock()
   146  			}
   147  			wg.Done()
   148  		}(i)
   149  	}
   150  	wg.Wait()
   151  
   152  	// Collect and sort the log records. This isn't running
   153  	// concurrently with log appends, but for demonstration
   154  	// purposes it's written so it could. To get a consistent view
   155  	// of the log, this uses timestamp ordering: it records the
   156  	// current time before calling Range and ignores any records
   157  	// from after that time. For logs it makes sense to get a
   158  	// consistent snapshot: a given worker could move between
   159  	// shards and it would be confusing to see later log records
   160  	// from that worker without earlier log records.
   161  	snapshot := time.Now()
   162  	var combined []record
   163  	logger.Range(func(val *shard) {
   164  		val.Lock()
   165  		log := val.log
   166  		val.Unlock()
   167  		// Trim records after time "snapshot".
   168  		i := sort.Search(len(log), func(i int) bool {
   169  			return log[i].when.After(snapshot)
   170  		})
   171  		log = log[:i]
   172  		combined = append(combined, log...)
   173  	})
   174  	sort.Slice(combined, func(i, j int) bool { return combined[i].when.Before(combined[j].when) })
   175  
   176  	for _, rec := range combined {
   177  		fmt.Println(rec.msg)
   178  	}
   179  
   180  	// Unordered output:
   181  	// goroutine 3 message 0
   182  	// goroutine 3 message 1
   183  	// goroutine 3 message 2
   184  	// goroutine 3 message 3
   185  	// goroutine 2 message 0
   186  	// goroutine 1 message 0
   187  	// goroutine 1 message 1
   188  	// goroutine 1 message 2
   189  	// goroutine 1 message 3
   190  	// goroutine 2 message 1
   191  	// goroutine 0 message 0
   192  	// goroutine 2 message 2
   193  	// goroutine 2 message 3
   194  	// goroutine 0 message 1
   195  	// goroutine 0 message 2
   196  	// goroutine 0 message 3
   197  }
   198  
   199  func Example_randomSource() {
   200  	// This example demonstrates concurrent random number
   201  	// generation using split random number generators.
   202  	var seed int64
   203  	type lockedRand struct {
   204  		sync.Mutex
   205  		*rand.Rand
   206  	}
   207  	randSource := New(func(r *lockedRand) {
   208  		r.Rand = rand.New(rand.NewSource(atomic.AddInt64(&seed, 1)))
   209  	})
   210  
   211  	var wg sync.WaitGroup
   212  	for i := 0; i < 64; i++ {
   213  		wg.Add(1)
   214  		go func() {
   215  			for j := 0; j < 64; j++ {
   216  				// Generate a random number using a
   217  				// local random source. rand.Rand
   218  				// isn't thread-safe, so we lock it.
   219  				r := randSource.Get().(*lockedRand)
   220  				r.Lock()
   221  				fmt.Println(r.Int())
   222  				r.Unlock()
   223  			}
   224  			wg.Done()
   225  		}()
   226  	}
   227  	wg.Wait()
   228  }
   229  
   230  func Example_optimisticTransactions() {
   231  	// This example demonstrates computing an instant-in-time
   232  	// consistent snapshot of a sharded value without blocking
   233  	// writers. Writers in this example can update multiple shards
   234  	// transactionally, so this requires careful synchronization
   235  	// between readers and writers to get a sequentially
   236  	// consistent view of the entire sharded value.
   237  	//
   238  	// Each transaction moves a "unit" between two shards.
   239  	// Initially all shards have a count of 0. Each writer
   240  	// repeatedly picks two shards and transactionally decrements
   241  	// the value of one shard and increments the value of the
   242  	// other. Hence, at any instant, the shards should all sum to
   243  	// 0.
   244  	//
   245  	// Since the Range callback doesn't see all shards at the same
   246  	// instant, it can't simply add up the values of the shards.
   247  	// If it did, the following could happen:
   248  	//
   249  	// 1. Suppose there are two shards with counts {0, 0}
   250  	//
   251  	// 2. Goroutine 1 calls Range. The callback reads shard 1,
   252  	// which is 0, and adds 0 to the sum.
   253  	//
   254  	// 3. On goroutine 2, a writer transactionally moves a unit
   255  	// from shard 1 to shard 2, so now the shard values are {-1,
   256  	// 1}.
   257  	//
   258  	// 4. On goroutine 1, the Range continues and the callback
   259  	// reads shard 2, which has value 1, and adds 1 to the sum.
   260  	//
   261  	// Now the value of the sum is 1, even though at any given
   262  	// instant all of the shards added up to 0.
   263  	//
   264  	// This examples solves this using a sequence number in each
   265  	// shard that is updated on every change to that shard. The
   266  	// reader reads all of the shards repeatedly until it gets two
   267  	// complete reads in a row where the sequence numbers didn't
   268  	// change. This means no modifications raced with the read, so
   269  	// it observed a consistent snapshot.
   270  
   271  	type shard struct {
   272  		order uint32 // Lock order of the shards.
   273  		val   int64  // The unit count of this shard.
   274  		seq   uint64 // Sequence number; the low bit indicates this shard is unstable.
   275  	}
   276  	var lockOrder uint32
   277  	val := New(func(s *shard) {
   278  		s.order = atomic.AddUint32(&lockOrder, 1) - 1
   279  	})
   280  
   281  	acquireSeq := func(p *uint64) {
   282  		// "Acquire" a sequence counter by spinning until the
   283  		// counter is even and then incrementing it.
   284  		for {
   285  			v := atomic.LoadUint64(p)
   286  			if v&1 == 0 && atomic.CompareAndSwapUint64(p, v, v+1) {
   287  				return
   288  			}
   289  			runtime.Gosched()
   290  		}
   291  	}
   292  
   293  	// Start a set of writer goroutines.
   294  	var wg sync.WaitGroup
   295  	var stop uint32
   296  	for i := 0; i < 64; i++ {
   297  		wg.Add(1)
   298  		go func() {
   299  			for atomic.LoadUint32(&stop) == 0 {
   300  				// Pick a first shard.
   301  				shard1 := val.Get().(*shard)
   302  				// Try to get moved to a different shard.
   303  				runtime.Gosched()
   304  				// Pick a second shard.
   305  				shard2 := val.Get().(*shard)
   306  				if shard1 == shard2 {
   307  					continue
   308  				}
   309  
   310  				// Put the shards in lock order.
   311  				lock1, lock2 := shard1, shard2
   312  				if lock1.order > lock2.order {
   313  					lock1, lock2 = lock2, lock1
   314  				}
   315  
   316  				// Lock both shards. Odd sequence
   317  				// numbers also indicates their values
   318  				// are unstable.
   319  				acquireSeq(&lock1.seq)
   320  				acquireSeq(&lock2.seq)
   321  
   322  				// Move a unit from shard1 to shard2.
   323  				atomic.AddInt64(&shard1.val, -1)
   324  				atomic.AddInt64(&shard2.val, +1)
   325  
   326  				// Increment the sequence numbers
   327  				// again to indicate the shards
   328  				// changed, but are now stable.
   329  				atomic.AddUint64(&lock1.seq, 1)
   330  				atomic.AddUint64(&lock2.seq, 1)
   331  			}
   332  			wg.Done()
   333  		}()
   334  	}
   335  
   336  	// Let the writers get going.
   337  	time.Sleep(time.Millisecond)
   338  
   339  	// Retrieve a consistent sum of the shards. The sum should be
   340  	// zero. This uses optimistic concurrency control and does not
   341  	// block the writer, so it may have to read the shards
   342  	// multiple times until it gets two reads in a row where none
   343  	// of the sequence numbers have changed.
   344  	var valSum int64
   345  	var prevSeqSum uint64
   346  	for {
   347  		valSum = 0
   348  		var seqSum uint64
   349  		val.Range(func(s *shard) {
   350  			// Within just this shard, we also need to
   351  			// perform a consistent read of its value and
   352  			// sequence number. If we could read both
   353  			// fields in a single atomic operation, this
   354  			// wouldn't be necessary, but since we can't,
   355  			// we also use optimistic concurrency within
   356  			// the shard.
   357  			for {
   358  				// Wait until the sequence number is
   359  				// even, indicating that the sequence
   360  				// number and value are stable.
   361  				var seq1 uint64
   362  				for {
   363  					if seq1 = atomic.LoadUint64(&s.seq); seq1&1 == 0 {
   364  						break
   365  					}
   366  					runtime.Gosched()
   367  				}
   368  				// Read the value optimistically.
   369  				val := atomic.LoadInt64(&s.val)
   370  				// Re-read the sequence number. If it
   371  				// hasn't changed, then we know we got
   372  				// a consistent read of both the value
   373  				// and the sequence number. Otherwise,
   374  				// try again.
   375  				if atomic.LoadUint64(&s.seq) == seq1 {
   376  					// Got a consistent read.
   377  					// Update the value sum and
   378  					// the sequence number
   379  					// snapshot.
   380  					valSum += val
   381  					seqSum += seq1
   382  					break
   383  				}
   384  			}
   385  		})
   386  		if seqSum == prevSeqSum {
   387  			// We got two reads of the shards in a row
   388  			// with the same sequence numbers. That means
   389  			// no updates happened between those reads, so
   390  			// the values we observed were consistent.
   391  			break
   392  		}
   393  		prevSeqSum = seqSum
   394  	}
   395  
   396  	// Exit all workers.
   397  	atomic.StoreUint32(&stop, 1)
   398  	wg.Wait()
   399  
   400  	fmt.Printf("Values sum to %d\n", valSum)
   401  	// Output: Values sum to 0
   402  }
   403  
   404  // TODO: SRCU-style grace period algorithm? Consistent counter using
   405  // two epochs (though I'm not sure what it could be tracking that
   406  // would require a sequentially consistent snapshot)?