github.com/petermattis/pebble@v0.0.0-20190905164901-ab51a2166067/commit_test.go (about)

     1  // Copyright 2018 The LevelDB-Go and Pebble Authors. All rights reserved. Use
     2  // of this source code is governed by a BSD-style license that can be found in
     3  // the LICENSE file.
     4  
     5  package pebble
     6  
     7  import (
     8  	"encoding/binary"
     9  	"fmt"
    10  	"io/ioutil"
    11  	"sync"
    12  	"sync/atomic"
    13  	"testing"
    14  	"time"
    15  
    16  	"github.com/petermattis/pebble/internal/arenaskl"
    17  	"github.com/petermattis/pebble/internal/record"
    18  	"golang.org/x/exp/rand"
    19  )
    20  
    21  type testCommitEnv struct {
    22  	logSeqNum     uint64
    23  	visibleSeqNum uint64
    24  	writePos      int64
    25  	writeCount    uint64
    26  	applyBuf      struct {
    27  		sync.Mutex
    28  		buf []uint64
    29  	}
    30  }
    31  
    32  func (e *testCommitEnv) env() commitEnv {
    33  	return commitEnv{
    34  		logSeqNum:     &e.logSeqNum,
    35  		visibleSeqNum: &e.visibleSeqNum,
    36  		apply:         e.apply,
    37  		write:         e.write,
    38  	}
    39  }
    40  
    41  func (e *testCommitEnv) apply(b *Batch, mem *memTable) error {
    42  	e.applyBuf.Lock()
    43  	e.applyBuf.buf = append(e.applyBuf.buf, b.SeqNum())
    44  	e.applyBuf.Unlock()
    45  	return nil
    46  }
    47  
    48  func (e *testCommitEnv) write(b *Batch, _ *sync.WaitGroup) (*memTable, error) {
    49  	n := int64(len(b.storage.data))
    50  	atomic.AddInt64(&e.writePos, n)
    51  	atomic.AddUint64(&e.writeCount, 1)
    52  	return nil, nil
    53  }
    54  
    55  func TestCommitQueue(t *testing.T) {
    56  	var q commitQueue
    57  	var batches [16]Batch
    58  	for i := range batches {
    59  		q.enqueue(&batches[i])
    60  	}
    61  	if b := q.dequeue(); b != nil {
    62  		t.Fatalf("unexpectedly dequeued batch: %p", b)
    63  	}
    64  	atomic.StoreUint32(&batches[1].applied, 1)
    65  	if b := q.dequeue(); b != nil {
    66  		t.Fatalf("unexpectedly dequeued batch: %p", b)
    67  	}
    68  	for i := range batches {
    69  		atomic.StoreUint32(&batches[i].applied, 1)
    70  		if b := q.dequeue(); b != &batches[i] {
    71  			t.Fatalf("%d: expected batch %p, but found %p", i, &batches[i], b)
    72  		}
    73  	}
    74  	if b := q.dequeue(); b != nil {
    75  		t.Fatalf("unexpectedly dequeued batch: %p", b)
    76  	}
    77  }
    78  
    79  func TestCommitPipeline(t *testing.T) {
    80  	var e testCommitEnv
    81  	p := newCommitPipeline(e.env())
    82  
    83  	const n = 10000
    84  	var wg sync.WaitGroup
    85  	wg.Add(n)
    86  	for i := 0; i < n; i++ {
    87  		go func(i int) {
    88  			defer wg.Done()
    89  			var b Batch
    90  			_ = b.Set([]byte(fmt.Sprint(i)), nil, nil)
    91  			_ = p.Commit(&b, false)
    92  		}(i)
    93  	}
    94  	wg.Wait()
    95  
    96  	if s := atomic.LoadUint64(&e.writeCount); n != s {
    97  		t.Fatalf("expected %d written batches, but found %d", n, s)
    98  	}
    99  	if n != len(e.applyBuf.buf) {
   100  		t.Fatalf("expected %d written batches, but found %d",
   101  			n, len(e.applyBuf.buf))
   102  	}
   103  	if s := atomic.LoadUint64(&e.logSeqNum); n != s {
   104  		t.Fatalf("expected %d, but found %d", n, s)
   105  	}
   106  	if s := atomic.LoadUint64(&e.visibleSeqNum); n != s {
   107  		t.Fatalf("expected %d, but found %d", n, s)
   108  	}
   109  }
   110  
   111  func TestCommitPipelineAllocateSeqNum(t *testing.T) {
   112  	var e testCommitEnv
   113  	p := newCommitPipeline(e.env())
   114  
   115  	const n = 10
   116  	var wg sync.WaitGroup
   117  	wg.Add(n)
   118  	var prepareCount uint64
   119  	var applyCount uint64
   120  	for i := 1; i <= n; i++ {
   121  		go func(i int) {
   122  			defer wg.Done()
   123  			p.AllocateSeqNum(i, func() {
   124  				atomic.AddUint64(&prepareCount, uint64(1))
   125  			}, func(seqNum uint64) {
   126  				atomic.AddUint64(&applyCount, uint64(1))
   127  			})
   128  		}(i)
   129  	}
   130  	wg.Wait()
   131  
   132  	if s := atomic.LoadUint64(&prepareCount); n != s {
   133  		t.Fatalf("expected %d prepares, but found %d", n, s)
   134  	}
   135  	if s := atomic.LoadUint64(&applyCount); n != s {
   136  		t.Fatalf("expected %d applies, but found %d", n, s)
   137  	}
   138  	// AllocateSeqNum always returns a non-zero sequence number causing the
   139  	// values we see to be offset from 1.
   140  	const total = 1 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10
   141  	if s := atomic.LoadUint64(&e.logSeqNum); total != s {
   142  		t.Fatalf("expected %d, but found %d", total, s)
   143  	}
   144  	if s := atomic.LoadUint64(&e.visibleSeqNum); total != s {
   145  		t.Fatalf("expected %d, but found %d", total, s)
   146  	}
   147  }
   148  
   149  func BenchmarkCommitPipeline(b *testing.B) {
   150  	for _, parallelism := range []int{1, 2, 4, 8, 16, 32, 64, 128} {
   151  		b.Run(fmt.Sprintf("parallel=%d", parallelism), func(b *testing.B) {
   152  			b.SetParallelism(parallelism)
   153  			mem := newMemTable(nil)
   154  			wal := record.NewLogWriter(ioutil.Discard, 0 /* logNum */)
   155  
   156  			nullCommitEnv := commitEnv{
   157  				logSeqNum:     new(uint64),
   158  				visibleSeqNum: new(uint64),
   159  				apply: func(b *Batch, mem *memTable) error {
   160  					err := mem.apply(b, b.SeqNum())
   161  					if err != nil {
   162  						return err
   163  					}
   164  					mem.unref()
   165  					return nil
   166  				},
   167  				write: func(b *Batch, wg *sync.WaitGroup) (*memTable, error) {
   168  					for {
   169  						err := mem.prepare(b)
   170  						if err == arenaskl.ErrArenaFull {
   171  							mem = newMemTable(nil)
   172  							continue
   173  						}
   174  						if err != nil {
   175  							return nil, err
   176  						}
   177  						break
   178  					}
   179  
   180  					_, err := wal.SyncRecord(b.storage.data, wg)
   181  					return mem, err
   182  				},
   183  			}
   184  			p := newCommitPipeline(nullCommitEnv)
   185  
   186  			const keySize = 8
   187  			b.SetBytes(2 * keySize)
   188  			b.ResetTimer()
   189  
   190  			b.RunParallel(func(pb *testing.PB) {
   191  				rng := rand.New(rand.NewSource(uint64(time.Now().UnixNano())))
   192  				buf := make([]byte, keySize)
   193  
   194  				for pb.Next() {
   195  					batch := newBatch(nil)
   196  					binary.BigEndian.PutUint64(buf, rng.Uint64())
   197  					batch.Set(buf, buf, nil)
   198  					if err := p.Commit(batch, true /* sync */); err != nil {
   199  						b.Fatal(err)
   200  					}
   201  					batch.release()
   202  				}
   203  			})
   204  		})
   205  	}
   206  }