github.com/dolthub/dolt/go@v0.40.5-0.20240520175717-68db7794bea6/store/nbs/benchmarks/block_store_benchmarks.go (about) 1 // Copyright 2019 Dolthub, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 // 15 // This file incorporates work covered by the following copyright and 16 // permission notice: 17 // 18 // Copyright 2016 Attic Labs, Inc. All rights reserved. 19 // Licensed under the Apache License, version 2.0: 20 // http://www.apache.org/licenses/LICENSE-2.0 21 22 package main 23 24 import ( 25 "context" 26 "fmt" 27 "sync" 28 29 "github.com/stretchr/testify/assert" 30 31 "github.com/dolthub/dolt/go/store/chunks" 32 "github.com/dolthub/dolt/go/store/d" 33 "github.com/dolthub/dolt/go/store/hash" 34 ) 35 36 type storeOpenFn func() (chunks.ChunkStore, error) 37 38 func benchmarkNovelWrite(refreshStore storeOpenFn, src *dataSource, t assert.TestingT) bool { 39 store, err := refreshStore() 40 assert.NoError(t, err) 41 writeToEmptyStore(store, src, t) 42 assert.NoError(t, store.Close()) 43 return true 44 } 45 46 func noopGetAddrs(c chunks.Chunk) chunks.GetAddrsCb { 47 return func(_ context.Context, _ hash.HashSet, _ chunks.PendingRefExists) error { return nil } 48 } 49 50 func writeToEmptyStore(store chunks.ChunkStore, src *dataSource, t assert.TestingT) { 51 root, err := store.Root(context.Background()) 52 assert.NoError(t, err) 53 assert.Equal(t, hash.Hash{}, root) 54 55 chunx := goReadChunks(src) 56 for c := range chunx { 57 err := store.Put(context.Background(), *c, noopGetAddrs) 58 assert.NoError(t, err) 59 } 60 newRoot := chunks.NewChunk([]byte("root")) 61 err = store.Put(context.Background(), newRoot, noopGetAddrs) 62 assert.NoError(t, err) 63 success, err := store.Commit(context.Background(), newRoot.Hash(), root) 64 assert.NoError(t, err) 65 assert.True(t, success) 66 } 67 68 func goReadChunks(src *dataSource) <-chan *chunks.Chunk { 69 chunx := make(chan *chunks.Chunk, 1024) 70 go func() { 71 err := src.ReadChunks(chunx) 72 73 d.PanicIfError(err) 74 75 close(chunx) 76 }() 77 return chunx 78 } 79 80 func benchmarkNoRefreshWrite(openStore storeOpenFn, src *dataSource, t assert.TestingT) { 81 store, err := openStore() 82 assert.NoError(t, err) 83 chunx := goReadChunks(src) 84 for c := range chunx { 85 err := store.Put(context.Background(), *c, noopGetAddrs) 86 assert.NoError(t, err) 87 } 88 assert.NoError(t, store.Close()) 89 } 90 91 func verifyChunk(h hash.Hash, c chunks.Chunk) { 92 if len(c.Data()) == 0 { 93 panic(fmt.Sprintf("Failed to fetch %s\n", h.String())) 94 } 95 } 96 97 func benchmarkRead(openStore storeOpenFn, hashes hashSlice, src *dataSource, t assert.TestingT) { 98 store, err := openStore() 99 assert.NoError(t, err) 100 for _, h := range hashes { 101 c, err := store.Get(context.Background(), h) 102 assert.NoError(t, err) 103 verifyChunk(h, c) 104 } 105 assert.NoError(t, store.Close()) 106 } 107 108 func verifyChunks(hashes hash.HashSlice, foundChunks chan *chunks.Chunk) { 109 requested := hashes.HashSet() 110 111 for c := range foundChunks { 112 if _, ok := requested[c.Hash()]; !ok { 113 panic(fmt.Sprintf("Got unexpected chunk: %s", c.Hash().String())) 114 } 115 116 delete(requested, c.Hash()) 117 } 118 119 if len(requested) > 0 { 120 for h := range requested { 121 fmt.Printf("Failed to fetch %s\n", h.String()) 122 } 123 panic("failed to fetch chunks") 124 } 125 } 126 127 func benchmarkReadMany(openStore storeOpenFn, hashes hashSlice, src *dataSource, batchSize, concurrency int, t assert.TestingT) { 128 store, err := openStore() 129 assert.NoError(t, err) 130 131 batch := make(hash.HashSlice, 0, batchSize) 132 133 wg := sync.WaitGroup{} 134 limit := make(chan struct{}, concurrency) 135 136 for _, h := range hashes { 137 batch = append(batch, h) 138 139 if len(batch) == batchSize { 140 limit <- struct{}{} 141 wg.Add(1) 142 go func(hashes hash.HashSlice) { 143 chunkChan := make(chan *chunks.Chunk, len(hashes)) 144 err := store.GetMany(context.Background(), hashes.HashSet(), func(ctx context.Context, c *chunks.Chunk) { 145 select { 146 case chunkChan <- c: 147 case <-ctx.Done(): 148 } 149 }) 150 151 d.PanicIfError(err) 152 153 close(chunkChan) 154 verifyChunks(hashes, chunkChan) 155 wg.Done() 156 <-limit 157 }(batch) 158 159 batch = make([]hash.Hash, 0, batchSize) 160 } 161 } 162 163 if len(batch) > 0 { 164 chunkChan := make(chan *chunks.Chunk, len(batch)) 165 err := store.GetMany(context.Background(), batch.HashSet(), func(ctx context.Context, c *chunks.Chunk) { 166 select { 167 case chunkChan <- c: 168 case <-ctx.Done(): 169 } 170 }) 171 assert.NoError(t, err) 172 173 close(chunkChan) 174 175 verifyChunks(batch, chunkChan) 176 } 177 178 wg.Wait() 179 180 assert.NoError(t, store.Close()) 181 } 182 183 func ensureNovelWrite(wrote bool, openStore storeOpenFn, src *dataSource, t assert.TestingT) bool { 184 if !wrote { 185 store, err := openStore() 186 assert.NoError(t, err) 187 defer store.Close() 188 writeToEmptyStore(store, src, t) 189 } 190 return true 191 }