github.com/zuoyebang/bitalosdb@v1.1.1-0.20240516111551-79a8c4d8ce20/commit_test.go (about) 1 // Copyright 2021 The Bitalosdb author(hustxrb@163.com) and other contributors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package bitalosdb 16 17 import ( 18 "encoding/binary" 19 "fmt" 20 "io/ioutil" 21 "sync" 22 "sync/atomic" 23 "testing" 24 "time" 25 26 "github.com/stretchr/testify/require" 27 "github.com/zuoyebang/bitalosdb/internal/arenaskl" 28 "github.com/zuoyebang/bitalosdb/internal/invariants" 29 "github.com/zuoyebang/bitalosdb/internal/record" 30 "github.com/zuoyebang/bitalosdb/internal/vfs" 31 "golang.org/x/exp/rand" 32 ) 33 34 type testCommitEnv struct { 35 logSeqNum uint64 36 visibleSeqNum uint64 37 writePos int64 38 writeCount uint64 39 applyBuf struct { 40 sync.Mutex 41 buf []uint64 42 } 43 } 44 45 func (e *testCommitEnv) env() commitEnv { 46 return commitEnv{ 47 logSeqNum: &e.logSeqNum, 48 visibleSeqNum: &e.visibleSeqNum, 49 apply: e.apply, 50 write: e.write, 51 } 52 } 53 54 func (e *testCommitEnv) apply(b *BatchBitower, mem *memTable) error { 55 e.applyBuf.Lock() 56 e.applyBuf.buf = append(e.applyBuf.buf, b.SeqNum()) 57 e.applyBuf.Unlock() 58 return nil 59 } 60 61 func (e *testCommitEnv) write(b *BatchBitower, _ *sync.WaitGroup, _ *error) (*memTable, error) { 62 n := int64(len(b.data)) 63 atomic.AddInt64(&e.writePos, n) 64 atomic.AddUint64(&e.writeCount, 1) 65 return nil, nil 66 } 67 68 func TestCommitQueue(t *testing.T) { 69 var q commitQueue 70 var batches [16]BatchBitower 71 for i := range batches { 72 q.enqueue(&batches[i]) 73 } 74 if b := q.dequeue(); b != nil { 75 t.Fatalf("unexpectedly dequeued batch: %p", b) 76 } 77 atomic.StoreUint32(&batches[1].applied, 1) 78 if b := q.dequeue(); b != nil { 79 t.Fatalf("unexpectedly dequeued batch: %p", b) 80 } 81 for i := range batches { 82 atomic.StoreUint32(&batches[i].applied, 1) 83 if b := q.dequeue(); b != &batches[i] { 84 t.Fatalf("%d: expected batch %p, but found %p", i, &batches[i], b) 85 } 86 } 87 if b := q.dequeue(); b != nil { 88 t.Fatalf("unexpectedly dequeued batch: %p", b) 89 } 90 } 91 92 func TestCommitPipeline(t *testing.T) { 93 var e testCommitEnv 94 p := newCommitPipeline(e.env()) 95 96 n := 10000 97 if invariants.RaceEnabled { 98 n = 1000 99 } 100 101 var wg sync.WaitGroup 102 wg.Add(n) 103 for i := 0; i < n; i++ { 104 go func(i int) { 105 defer wg.Done() 106 var b BatchBitower 107 _ = b.Set([]byte(fmt.Sprint(i)), nil, nil) 108 _ = p.Commit(&b, false) 109 }(i) 110 } 111 wg.Wait() 112 113 if s := atomic.LoadUint64(&e.writeCount); uint64(n) != s { 114 t.Fatalf("expected %d written batches, but found %d", n, s) 115 } 116 if n != len(e.applyBuf.buf) { 117 t.Fatalf("expected %d written batches, but found %d", 118 n, len(e.applyBuf.buf)) 119 } 120 if s := atomic.LoadUint64(&e.logSeqNum); uint64(n) != s { 121 t.Fatalf("expected %d, but found %d", n, s) 122 } 123 if s := atomic.LoadUint64(&e.visibleSeqNum); uint64(n) != s { 124 t.Fatalf("expected %d, but found %d", n, s) 125 } 126 } 127 128 type syncDelayFile struct { 129 vfs.File 130 done chan struct{} 131 } 132 133 func (f *syncDelayFile) Sync() error { 134 <-f.done 135 return nil 136 } 137 138 func TestCommitPipelineWALClose(t *testing.T) { 139 mem := vfs.NewMem() 140 f, err := mem.Create("test-wal") 141 require.NoError(t, err) 142 143 sf := &syncDelayFile{ 144 File: f, 145 done: make(chan struct{}), 146 } 147 148 wal := record.NewLogWriter(sf, 0) 149 var walDone sync.WaitGroup 150 testEnv := commitEnv{ 151 logSeqNum: new(uint64), 152 visibleSeqNum: new(uint64), 153 apply: func(b *BatchBitower, mem *memTable) error { 154 walDone.Done() 155 return nil 156 }, 157 write: func(b *BatchBitower, syncWG *sync.WaitGroup, syncErr *error) (*memTable, error) { 158 _, err := wal.SyncRecord(b.data, syncWG, syncErr) 159 return nil, err 160 }, 161 useQueue: true, 162 } 163 p := newCommitPipeline(testEnv) 164 165 errCh := make(chan error, cap(p.sem)) 166 walDone.Add(cap(errCh)) 167 for i := 0; i < cap(errCh); i++ { 168 go func(i int) { 169 b := &BatchBitower{} 170 if err := b.LogData([]byte("foo"), nil); err != nil { 171 errCh <- err 172 return 173 } 174 errCh <- p.Commit(b, true) 175 }(i) 176 } 177 178 walDone.Wait() 179 close(sf.done) 180 181 require.NoError(t, wal.Close()) 182 for i := 0; i < cap(errCh); i++ { 183 require.NoError(t, <-errCh) 184 } 185 } 186 187 func BenchmarkCommitPipeline(b *testing.B) { 188 for _, parallelism := range []int{1, 2, 4, 8, 16, 32, 64, 128} { 189 b.Run(fmt.Sprintf("parallel=%d", parallelism), func(b *testing.B) { 190 b.SetParallelism(parallelism) 191 mem := newMemTable(memTableOptions{}) 192 wal := record.NewLogWriter(ioutil.Discard, 0 /* logNum */) 193 194 nullCommitEnv := commitEnv{ 195 logSeqNum: new(uint64), 196 visibleSeqNum: new(uint64), 197 apply: func(b *BatchBitower, mem *memTable) error { 198 err := mem.apply(b, b.SeqNum()) 199 if err != nil { 200 return err 201 } 202 mem.writerUnref() 203 return nil 204 }, 205 write: func(b *BatchBitower, syncWG *sync.WaitGroup, syncErr *error) (*memTable, error) { 206 for { 207 err := mem.prepare(b, false) 208 if err == arenaskl.ErrArenaFull { 209 mem = newMemTable(memTableOptions{}) 210 continue 211 } 212 if err != nil { 213 return nil, err 214 } 215 break 216 } 217 218 _, err := wal.SyncRecord(b.data, syncWG, syncErr) 219 return mem, err 220 }, 221 } 222 p := newCommitPipeline(nullCommitEnv) 223 224 const keySize = 8 225 b.SetBytes(2 * keySize) 226 b.ResetTimer() 227 228 b.RunParallel(func(pb *testing.PB) { 229 rng := rand.New(rand.NewSource(uint64(time.Now().UnixNano()))) 230 buf := make([]byte, keySize) 231 232 for pb.Next() { 233 batch := newBatchBitower(nil) 234 binary.BigEndian.PutUint64(buf, rng.Uint64()) 235 batch.Set(buf, buf, nil) 236 if err := p.Commit(batch, true /* sync */); err != nil { 237 b.Fatal(err) 238 } 239 batch.release() 240 } 241 }) 242 }) 243 } 244 }