github.com/cockroachdb/pebble@v1.1.2/objstorage/objstorageprovider/sharedcache/shared_cache_test.go (about) 1 package sharedcache_test 2 3 import ( 4 "bytes" 5 "context" 6 "fmt" 7 "strconv" 8 "sync" 9 "testing" 10 "time" 11 12 "github.com/cockroachdb/datadriven" 13 "github.com/cockroachdb/pebble/internal/base" 14 "github.com/cockroachdb/pebble/internal/invariants" 15 "github.com/cockroachdb/pebble/objstorage" 16 "github.com/cockroachdb/pebble/objstorage/objstorageprovider" 17 "github.com/cockroachdb/pebble/objstorage/objstorageprovider/sharedcache" 18 "github.com/cockroachdb/pebble/vfs" 19 "github.com/stretchr/testify/require" 20 "golang.org/x/exp/rand" 21 ) 22 23 func TestSharedCache(t *testing.T) { 24 ctx := context.Background() 25 26 datadriven.Walk(t, "testdata/cache", func(t *testing.T, path string) { 27 var log base.InMemLogger 28 fs := vfs.WithLogging(vfs.NewMem(), func(fmt string, args ...interface{}) { 29 log.Infof("<local fs> "+fmt, args...) 30 }) 31 32 provider, err := objstorageprovider.Open(objstorageprovider.DefaultSettings(fs, "")) 33 require.NoError(t, err) 34 35 var cache *sharedcache.Cache 36 defer func() { 37 if cache != nil { 38 cache.Close() 39 } 40 }() 41 42 var objData []byte 43 datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string { 44 log.Reset() 45 switch d.Cmd { 46 case "init": 47 blockSize := parseBytesArg(t, d, "block-size", 32*1024) 48 shardingBlockSize := parseBytesArg(t, d, "sharding-block-size", 1024*1024) 49 numShards := parseBytesArg(t, d, "num-shards", 32) 50 size := parseBytesArg(t, d, "size", numShards*shardingBlockSize) 51 if size%(numShards*shardingBlockSize) != 0 { 52 d.Fatalf(t, "size (%d) must be a multiple of numShards (%d) * shardingBlockSize(%d)", 53 size, numShards, shardingBlockSize, 54 ) 55 } 56 cache, err = sharedcache.Open( 57 fs, base.DefaultLogger, "", blockSize, int64(shardingBlockSize), int64(size), numShards, 58 ) 59 require.NoError(t, err) 60 return fmt.Sprintf("initialized with block-size=%d size=%d num-shards=%d", blockSize, size, numShards) 61 62 case "write": 63 size := mustParseBytesArg(t, d, "size") 64 65 writable, _, err := provider.Create(ctx, base.FileTypeTable, base.FileNum(1).DiskFileNum(), objstorage.CreateOptions{}) 66 require.NoError(t, err) 67 defer writable.Finish() 68 69 // With invariants on, Write will modify its input buffer. 70 objData = make([]byte, size) 71 wrote := make([]byte, size) 72 for i := 0; i < size; i++ { 73 objData[i] = byte(i) 74 wrote[i] = byte(i) 75 } 76 err = writable.Write(wrote) 77 // Writing a file is test setup, and it always is expected to succeed, so we assert 78 // within the test, rather than returning n and/or err here. 79 require.NoError(t, err) 80 81 return "" 82 case "read", "read-for-compaction": 83 missesBefore := cache.Metrics().ReadsWithPartialHit + cache.Metrics().ReadsWithNoHit 84 offset := mustParseBytesArg(t, d, "offset") 85 size := mustParseBytesArg(t, d, "size") 86 87 readable, err := provider.OpenForReading(ctx, base.FileTypeTable, base.FileNum(1).DiskFileNum(), objstorage.OpenOptions{}) 88 require.NoError(t, err) 89 defer readable.Close() 90 91 got := make([]byte, size) 92 flags := sharedcache.ReadFlags{ 93 ReadOnly: d.Cmd == "read-for-compaction", 94 } 95 err = cache.ReadAt(ctx, base.FileNum(1).DiskFileNum(), got, int64(offset), readable, readable.Size(), flags) 96 // We always expect cache.ReadAt to succeed. 97 require.NoError(t, err) 98 // It is easier to assert this condition programmatically, rather than returning 99 // got, which may be very large. 100 require.True(t, bytes.Equal(objData[int(offset):int(offset)+size], got), "incorrect data returned") 101 102 // In order to ensure we get a hit on the next read, we must wait for writing to 103 // the cache to complete. 104 cache.WaitForWritesToComplete() 105 106 // TODO(josh): Not tracing out filesystem activity here, since logging_fs.go 107 // doesn't trace calls to ReadAt or WriteAt. We should consider changing this. 108 missesAfter := cache.Metrics().ReadsWithPartialHit + cache.Metrics().ReadsWithNoHit 109 return fmt.Sprintf("misses=%d", missesAfter-missesBefore) 110 default: 111 d.Fatalf(t, "unknown command %s", d.Cmd) 112 return "" 113 } 114 }) 115 }) 116 } 117 118 func TestSharedCacheRandomized(t *testing.T) { 119 ctx := context.Background() 120 121 var log base.InMemLogger 122 fs := vfs.WithLogging(vfs.NewMem(), func(fmt string, args ...interface{}) { 123 log.Infof("<local fs> "+fmt, args...) 124 }) 125 126 provider, err := objstorageprovider.Open(objstorageprovider.DefaultSettings(fs, "")) 127 require.NoError(t, err) 128 129 seed := uint64(time.Now().UnixNano()) 130 fmt.Printf("seed: %v\n", seed) 131 rand.Seed(seed) 132 133 helper := func( 134 blockSize int, 135 shardingBlockSize int64) func(t *testing.T) { 136 return func(t *testing.T) { 137 for _, concurrentReads := range []bool{false, true} { 138 t.Run(fmt.Sprintf("concurrentReads=%v", concurrentReads), func(t *testing.T) { 139 maxShards := 32 140 if invariants.RaceEnabled { 141 maxShards = 8 142 } 143 numShards := rand.Intn(maxShards) + 1 144 cacheSize := shardingBlockSize * int64(numShards) // minimum allowed cache size 145 146 cache, err := sharedcache.Open(fs, base.DefaultLogger, "", blockSize, shardingBlockSize, cacheSize, numShards) 147 require.NoError(t, err) 148 defer cache.Close() 149 150 writable, _, err := provider.Create(ctx, base.FileTypeTable, base.FileNum(1).DiskFileNum(), objstorage.CreateOptions{}) 151 require.NoError(t, err) 152 153 // With invariants on, Write will modify its input buffer. 154 // If size == 0, we can see panics below, so force a nonzero size. 155 size := rand.Int63n(cacheSize-1) + 1 156 objData := make([]byte, size) 157 wrote := make([]byte, size) 158 for i := 0; i < int(size); i++ { 159 objData[i] = byte(i) 160 wrote[i] = byte(i) 161 } 162 163 require.NoError(t, writable.Write(wrote)) 164 require.NoError(t, writable.Finish()) 165 166 readable, err := provider.OpenForReading(ctx, base.FileTypeTable, base.FileNum(1).DiskFileNum(), objstorage.OpenOptions{}) 167 require.NoError(t, err) 168 defer readable.Close() 169 170 const numDistinctReads = 100 171 wg := sync.WaitGroup{} 172 for i := 0; i < numDistinctReads; i++ { 173 wg.Add(1) 174 go func() { 175 defer wg.Done() 176 offset := rand.Int63n(size) 177 178 got := make([]byte, size-offset) 179 err := cache.ReadAt(ctx, base.FileNum(1).DiskFileNum(), got, offset, readable, readable.Size(), sharedcache.ReadFlags{}) 180 require.NoError(t, err) 181 require.Equal(t, objData[int(offset):], got) 182 183 got = make([]byte, size-offset) 184 err = cache.ReadAt(ctx, base.FileNum(1).DiskFileNum(), got, offset, readable, readable.Size(), sharedcache.ReadFlags{}) 185 require.NoError(t, err) 186 require.Equal(t, objData[int(offset):], got) 187 }() 188 // If concurrent reads, only wait 50% of loop iterations on average. 189 if concurrentReads && rand.Intn(2) == 0 { 190 wg.Wait() 191 } 192 if !concurrentReads { 193 wg.Wait() 194 } 195 } 196 wg.Wait() 197 }) 198 } 199 } 200 } 201 t.Run("32 KB block size", helper(32*1024, 1024*1024)) 202 t.Run("1 MB block size", helper(1024*1024, 1024*1024)) 203 204 if !invariants.RaceEnabled { 205 for i := 0; i < 5; i++ { 206 exp := rand.Intn(11) + 10 // [10, 20] 207 randomBlockSize := 1 << exp // [1 KB, 1 MB] 208 209 factor := rand.Intn(4) + 1 // [1, 4] 210 randomShardingBlockSize := int64(randomBlockSize * factor) // [1 KB, 4 MB] 211 212 t.Run("random block and sharding block size", helper(randomBlockSize, randomShardingBlockSize)) 213 } 214 } 215 } 216 217 // parseBytesArg parses an optional argument that specifies a byte size; if the 218 // argument is not specified the default value is used. K/M/G suffixes are 219 // supported. 220 func parseBytesArg(t testing.TB, d *datadriven.TestData, argName string, defaultValue int) int { 221 res, ok := tryParseBytesArg(t, d, argName) 222 if !ok { 223 return defaultValue 224 } 225 return res 226 } 227 228 // parseBytesArg parses a mandatory argument that specifies a byte size; K/M/G 229 // suffixes are supported. 230 func mustParseBytesArg(t testing.TB, d *datadriven.TestData, argName string) int { 231 res, ok := tryParseBytesArg(t, d, argName) 232 if !ok { 233 t.Fatalf("argument '%s' missing", argName) 234 } 235 return res 236 } 237 238 func tryParseBytesArg(t testing.TB, d *datadriven.TestData, argName string) (val int, ok bool) { 239 arg, ok := d.Arg(argName) 240 if !ok { 241 return 0, false 242 } 243 if len(arg.Vals) != 1 { 244 t.Fatalf("expected 1 value for '%s'", argName) 245 } 246 v := arg.Vals[0] 247 factor := 1 248 switch v[len(v)-1] { 249 case 'k', 'K': 250 factor = 1024 251 case 'm', 'M': 252 factor = 1024 * 1024 253 case 'g', 'G': 254 factor = 1024 * 1024 * 1024 255 } 256 if factor > 1 { 257 v = v[:len(v)-1] 258 } 259 res, err := strconv.Atoi(v) 260 if err != nil { 261 t.Fatalf("could not parse value '%s' for '%s'", arg.Vals[0], argName) 262 } 263 264 return res * factor, true 265 }