github.com/hasnat/dolt/go@v0.0.0-20210628190320-9eb5d843fbb7/store/nbs/benchmarks/block_store_benchmarks.go (about) 1 // Copyright 2019 Dolthub, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 // 15 // This file incorporates work covered by the following copyright and 16 // permission notice: 17 // 18 // Copyright 2016 Attic Labs, Inc. All rights reserved. 19 // Licensed under the Apache License, version 2.0: 20 // http://www.apache.org/licenses/LICENSE-2.0 21 22 package main 23 24 import ( 25 "context" 26 "fmt" 27 "sync" 28 29 "github.com/stretchr/testify/assert" 30 31 "github.com/dolthub/dolt/go/store/chunks" 32 "github.com/dolthub/dolt/go/store/d" 33 "github.com/dolthub/dolt/go/store/hash" 34 ) 35 36 type storeOpenFn func() (chunks.ChunkStore, error) 37 38 func benchmarkNovelWrite(refreshStore storeOpenFn, src *dataSource, t assert.TestingT) bool { 39 store, err := refreshStore() 40 assert.NoError(t, err) 41 writeToEmptyStore(store, src, t) 42 assert.NoError(t, store.Close()) 43 return true 44 } 45 46 func writeToEmptyStore(store chunks.ChunkStore, src *dataSource, t assert.TestingT) { 47 root, err := store.Root(context.Background()) 48 assert.NoError(t, err) 49 assert.Equal(t, hash.Hash{}, root) 50 51 chunx := goReadChunks(src) 52 for c := range chunx { 53 err := store.Put(context.Background(), *c) 54 assert.NoError(t, err) 55 } 56 newRoot := chunks.NewChunk([]byte("root")) 57 err = store.Put(context.Background(), newRoot) 58 assert.NoError(t, err) 59 success, err := store.Commit(context.Background(), newRoot.Hash(), root) 60 assert.NoError(t, err) 61 assert.True(t, success) 62 } 63 64 func goReadChunks(src *dataSource) <-chan *chunks.Chunk { 65 chunx := make(chan *chunks.Chunk, 1024) 66 go func() { 67 err := src.ReadChunks(chunx) 68 69 d.PanicIfError(err) 70 71 close(chunx) 72 }() 73 return chunx 74 } 75 76 func benchmarkNoRefreshWrite(openStore storeOpenFn, src *dataSource, t assert.TestingT) { 77 store, err := openStore() 78 assert.NoError(t, err) 79 chunx := goReadChunks(src) 80 for c := range chunx { 81 err := store.Put(context.Background(), *c) 82 assert.NoError(t, err) 83 } 84 assert.NoError(t, store.Close()) 85 } 86 87 func verifyChunk(h hash.Hash, c chunks.Chunk) { 88 if len(c.Data()) == 0 { 89 panic(fmt.Sprintf("Failed to fetch %s\n", h.String())) 90 } 91 } 92 93 func benchmarkRead(openStore storeOpenFn, hashes hashSlice, src *dataSource, t assert.TestingT) { 94 store, err := openStore() 95 assert.NoError(t, err) 96 for _, h := range hashes { 97 c, err := store.Get(context.Background(), h) 98 assert.NoError(t, err) 99 verifyChunk(h, c) 100 } 101 assert.NoError(t, store.Close()) 102 } 103 104 func verifyChunks(hashes hash.HashSlice, foundChunks chan *chunks.Chunk) { 105 requested := hashes.HashSet() 106 107 for c := range foundChunks { 108 if _, ok := requested[c.Hash()]; !ok { 109 panic(fmt.Sprintf("Got unexpected chunk: %s", c.Hash().String())) 110 } 111 112 delete(requested, c.Hash()) 113 } 114 115 if len(requested) > 0 { 116 for h := range requested { 117 fmt.Printf("Failed to fetch %s\n", h.String()) 118 } 119 panic("failed to fetch chunks") 120 } 121 } 122 123 func benchmarkReadMany(openStore storeOpenFn, hashes hashSlice, src *dataSource, batchSize, concurrency int, t assert.TestingT) { 124 store, err := openStore() 125 assert.NoError(t, err) 126 127 batch := make(hash.HashSlice, 0, batchSize) 128 129 wg := sync.WaitGroup{} 130 limit := make(chan struct{}, concurrency) 131 132 for _, h := range hashes { 133 batch = append(batch, h) 134 135 if len(batch) == batchSize { 136 limit <- struct{}{} 137 wg.Add(1) 138 go func(hashes hash.HashSlice) { 139 chunkChan := make(chan *chunks.Chunk, len(hashes)) 140 err := store.GetMany(context.Background(), hashes.HashSet(), func(c *chunks.Chunk) { chunkChan <- c }) 141 142 d.PanicIfError(err) 143 144 close(chunkChan) 145 verifyChunks(hashes, chunkChan) 146 wg.Done() 147 <-limit 148 }(batch) 149 150 batch = make([]hash.Hash, 0, batchSize) 151 } 152 } 153 154 if len(batch) > 0 { 155 chunkChan := make(chan *chunks.Chunk, len(batch)) 156 err := store.GetMany(context.Background(), batch.HashSet(), func(c *chunks.Chunk) { chunkChan <- c }) 157 assert.NoError(t, err) 158 159 close(chunkChan) 160 161 verifyChunks(batch, chunkChan) 162 } 163 164 wg.Wait() 165 166 assert.NoError(t, store.Close()) 167 } 168 169 func ensureNovelWrite(wrote bool, openStore storeOpenFn, src *dataSource, t assert.TestingT) bool { 170 if !wrote { 171 store, err := openStore() 172 assert.NoError(t, err) 173 defer store.Close() 174 writeToEmptyStore(store, src, t) 175 } 176 return true 177 }