github.com/zuoyebang/bitalostable@v1.0.1-0.20240229032404-e3b99a834294/commit_test.go (about)

     1  // Copyright 2018 The LevelDB-Go and Pebble and Bitalostored Authors. All rights reserved. Use
     2  // of this source code is governed by a BSD-style license that can be found in
     3  // the LICENSE file.
     4  
     5  package bitalostable
     6  
     7  import (
     8  	"encoding/binary"
     9  	"fmt"
    10  	"io/ioutil"
    11  	"sync"
    12  	"sync/atomic"
    13  	"testing"
    14  	"time"
    15  
    16  	"github.com/stretchr/testify/require"
    17  	"github.com/zuoyebang/bitalostable/internal/arenaskl"
    18  	"github.com/zuoyebang/bitalostable/internal/invariants"
    19  	"github.com/zuoyebang/bitalostable/record"
    20  	"github.com/zuoyebang/bitalostable/vfs"
    21  	"golang.org/x/exp/rand"
    22  )
    23  
    24  type testCommitEnv struct {
    25  	logSeqNum     uint64
    26  	visibleSeqNum uint64
    27  	writePos      int64
    28  	writeCount    uint64
    29  	applyBuf      struct {
    30  		sync.Mutex
    31  		buf []uint64
    32  	}
    33  }
    34  
    35  func (e *testCommitEnv) env() commitEnv {
    36  	return commitEnv{
    37  		logSeqNum:     &e.logSeqNum,
    38  		visibleSeqNum: &e.visibleSeqNum,
    39  		apply:         e.apply,
    40  		write:         e.write,
    41  	}
    42  }
    43  
    44  func (e *testCommitEnv) apply(b *Batch, mem *memTable) error {
    45  	e.applyBuf.Lock()
    46  	e.applyBuf.buf = append(e.applyBuf.buf, b.SeqNum())
    47  	e.applyBuf.Unlock()
    48  	return nil
    49  }
    50  
    51  func (e *testCommitEnv) write(b *Batch, _ *sync.WaitGroup, _ *error) (*memTable, error) {
    52  	n := int64(len(b.data))
    53  	atomic.AddInt64(&e.writePos, n)
    54  	atomic.AddUint64(&e.writeCount, 1)
    55  	return nil, nil
    56  }
    57  
    58  func TestCommitQueue(t *testing.T) {
    59  	var q commitQueue
    60  	var batches [16]Batch
    61  	for i := range batches {
    62  		q.enqueue(&batches[i])
    63  	}
    64  	if b := q.dequeue(); b != nil {
    65  		t.Fatalf("unexpectedly dequeued batch: %p", b)
    66  	}
    67  	atomic.StoreUint32(&batches[1].applied, 1)
    68  	if b := q.dequeue(); b != nil {
    69  		t.Fatalf("unexpectedly dequeued batch: %p", b)
    70  	}
    71  	for i := range batches {
    72  		atomic.StoreUint32(&batches[i].applied, 1)
    73  		if b := q.dequeue(); b != &batches[i] {
    74  			t.Fatalf("%d: expected batch %p, but found %p", i, &batches[i], b)
    75  		}
    76  	}
    77  	if b := q.dequeue(); b != nil {
    78  		t.Fatalf("unexpectedly dequeued batch: %p", b)
    79  	}
    80  }
    81  
    82  func TestCommitPipeline(t *testing.T) {
    83  	var e testCommitEnv
    84  	p := newCommitPipeline(e.env())
    85  
    86  	n := 10000
    87  	if invariants.RaceEnabled {
    88  		// Under race builds we have to limit the concurrency or we hit the
    89  		// following error:
    90  		//
    91  		//   race: limit on 8128 simultaneously alive goroutines is exceeded, dying
    92  		n = 1000
    93  	}
    94  
    95  	var wg sync.WaitGroup
    96  	wg.Add(n)
    97  	for i := 0; i < n; i++ {
    98  		go func(i int) {
    99  			defer wg.Done()
   100  			var b Batch
   101  			_ = b.Set([]byte(fmt.Sprint(i)), nil, nil)
   102  			_ = p.Commit(&b, false)
   103  		}(i)
   104  	}
   105  	wg.Wait()
   106  
   107  	if s := atomic.LoadUint64(&e.writeCount); uint64(n) != s {
   108  		t.Fatalf("expected %d written batches, but found %d", n, s)
   109  	}
   110  	if n != len(e.applyBuf.buf) {
   111  		t.Fatalf("expected %d written batches, but found %d",
   112  			n, len(e.applyBuf.buf))
   113  	}
   114  	if s := atomic.LoadUint64(&e.logSeqNum); uint64(n) != s {
   115  		t.Fatalf("expected %d, but found %d", n, s)
   116  	}
   117  	if s := atomic.LoadUint64(&e.visibleSeqNum); uint64(n) != s {
   118  		t.Fatalf("expected %d, but found %d", n, s)
   119  	}
   120  }
   121  
   122  func TestCommitPipelineAllocateSeqNum(t *testing.T) {
   123  	var e testCommitEnv
   124  	p := newCommitPipeline(e.env())
   125  
   126  	const n = 10
   127  	var wg sync.WaitGroup
   128  	wg.Add(n)
   129  	var prepareCount uint64
   130  	var applyCount uint64
   131  	for i := 1; i <= n; i++ {
   132  		go func(i int) {
   133  			defer wg.Done()
   134  			p.AllocateSeqNum(i, func() {
   135  				atomic.AddUint64(&prepareCount, uint64(1))
   136  			}, func(seqNum uint64) {
   137  				atomic.AddUint64(&applyCount, uint64(1))
   138  			})
   139  		}(i)
   140  	}
   141  	wg.Wait()
   142  
   143  	if s := atomic.LoadUint64(&prepareCount); n != s {
   144  		t.Fatalf("expected %d prepares, but found %d", n, s)
   145  	}
   146  	if s := atomic.LoadUint64(&applyCount); n != s {
   147  		t.Fatalf("expected %d applies, but found %d", n, s)
   148  	}
   149  	// AllocateSeqNum always returns a non-zero sequence number causing the
   150  	// values we see to be offset from 1.
   151  	const total = 1 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10
   152  	if s := atomic.LoadUint64(&e.logSeqNum); total != s {
   153  		t.Fatalf("expected %d, but found %d", total, s)
   154  	}
   155  	if s := atomic.LoadUint64(&e.visibleSeqNum); total != s {
   156  		t.Fatalf("expected %d, but found %d", total, s)
   157  	}
   158  }
   159  
   160  type syncDelayFile struct {
   161  	vfs.File
   162  	done chan struct{}
   163  }
   164  
   165  func (f *syncDelayFile) Sync() error {
   166  	<-f.done
   167  	return nil
   168  }
   169  
   170  func TestCommitPipelineWALClose(t *testing.T) {
   171  	// This test stresses the edge case of N goroutines blocked in the
   172  	// commitPipeline waiting for the log to sync when we concurrently decide to
   173  	// rotate and close the log.
   174  
   175  	mem := vfs.NewMem()
   176  	f, err := mem.Create("test-wal")
   177  	require.NoError(t, err)
   178  
   179  	// syncDelayFile will block on the done channel befor returning from Sync
   180  	// call.
   181  	sf := &syncDelayFile{
   182  		File: f,
   183  		done: make(chan struct{}),
   184  	}
   185  
   186  	// A basic commitEnv which writes to a WAL.
   187  	wal := record.NewLogWriter(sf, 0 /* logNum */)
   188  	var walDone sync.WaitGroup
   189  	testEnv := commitEnv{
   190  		logSeqNum:     new(uint64),
   191  		visibleSeqNum: new(uint64),
   192  		apply: func(b *Batch, mem *memTable) error {
   193  			// At this point, we've called SyncRecord but the sync is blocked.
   194  			walDone.Done()
   195  			return nil
   196  		},
   197  		write: func(b *Batch, syncWG *sync.WaitGroup, syncErr *error) (*memTable, error) {
   198  			_, err := wal.SyncRecord(b.data, syncWG, syncErr)
   199  			return nil, err
   200  		},
   201  	}
   202  	p := newCommitPipeline(testEnv)
   203  
   204  	// Launch N (commitConcurrency) goroutines which each create a batch and
   205  	// commit it with sync==true. Because of the syncDelayFile, none of these
   206  	// operations can complete until syncDelayFile.done is closed.
   207  	errCh := make(chan error, cap(p.sem))
   208  	walDone.Add(cap(errCh))
   209  	for i := 0; i < cap(errCh); i++ {
   210  		go func(i int) {
   211  			b := &Batch{}
   212  			if err := b.LogData([]byte("foo"), nil); err != nil {
   213  				errCh <- err
   214  				return
   215  			}
   216  			errCh <- p.Commit(b, true /* sync */)
   217  		}(i)
   218  	}
   219  
   220  	// Wait for all of the WAL writes to queue up. This ensures we don't violate
   221  	// the concurrency requirements of LogWriter, and also ensures all of the WAL
   222  	// writes are queued.
   223  	walDone.Wait()
   224  	close(sf.done)
   225  
   226  	// Close the WAL. A "queue is full" panic means that something is broken.
   227  	require.NoError(t, wal.Close())
   228  	for i := 0; i < cap(errCh); i++ {
   229  		require.NoError(t, <-errCh)
   230  	}
   231  }
   232  
   233  func BenchmarkCommitPipeline(b *testing.B) {
   234  	for _, parallelism := range []int{1, 2, 4, 8, 16, 32, 64, 128} {
   235  		b.Run(fmt.Sprintf("parallel=%d", parallelism), func(b *testing.B) {
   236  			b.SetParallelism(parallelism)
   237  			mem := newMemTable(memTableOptions{})
   238  			wal := record.NewLogWriter(ioutil.Discard, 0 /* logNum */)
   239  
   240  			nullCommitEnv := commitEnv{
   241  				logSeqNum:     new(uint64),
   242  				visibleSeqNum: new(uint64),
   243  				apply: func(b *Batch, mem *memTable) error {
   244  					err := mem.apply(b, b.SeqNum())
   245  					if err != nil {
   246  						return err
   247  					}
   248  					mem.writerUnref()
   249  					return nil
   250  				},
   251  				write: func(b *Batch, syncWG *sync.WaitGroup, syncErr *error) (*memTable, error) {
   252  					for {
   253  						err := mem.prepare(b)
   254  						if err == arenaskl.ErrArenaFull {
   255  							mem = newMemTable(memTableOptions{})
   256  							continue
   257  						}
   258  						if err != nil {
   259  							return nil, err
   260  						}
   261  						break
   262  					}
   263  
   264  					_, err := wal.SyncRecord(b.data, syncWG, syncErr)
   265  					return mem, err
   266  				},
   267  			}
   268  			p := newCommitPipeline(nullCommitEnv)
   269  
   270  			const keySize = 8
   271  			b.SetBytes(2 * keySize)
   272  			b.ResetTimer()
   273  
   274  			b.RunParallel(func(pb *testing.PB) {
   275  				rng := rand.New(rand.NewSource(uint64(time.Now().UnixNano())))
   276  				buf := make([]byte, keySize)
   277  
   278  				for pb.Next() {
   279  					batch := newBatch(nil)
   280  					binary.BigEndian.PutUint64(buf, rng.Uint64())
   281  					batch.Set(buf, buf, nil)
   282  					if err := p.Commit(batch, true /* sync */); err != nil {
   283  						b.Fatal(err)
   284  					}
   285  					batch.release()
   286  				}
   287  			})
   288  		})
   289  	}
   290  }