github.com/Schaudge/grailbase@v0.0.0-20240223061707-44c758a471c0/file/internal/readmatcher/readmatchertest/stress.go (about) 1 package readmatchertest 2 3 import ( 4 "bytes" 5 "context" 6 "fmt" 7 "io" 8 "math/rand" 9 10 "github.com/Schaudge/grailbase/file/internal/kernel" 11 "github.com/Schaudge/grailbase/ioctx" 12 "github.com/Schaudge/grailbase/must" 13 "github.com/Schaudge/grailbase/traverse" 14 ) 15 16 // Stress runs a stress test on rAt. 17 // If rAt is a readmatcher, consider setting parallelism below and above readmatcher.SoftMaxReaders 18 // to exercise those cases. 19 func Stress(want []byte, rAt ioctx.ReaderAt, parallelism int) { 20 ctx := context.Background() 21 22 size := len(want) 23 // Only use rnd in the sequentially-executed task builders, not the parallelized actual reads. 24 rnd := rand.New(rand.NewSource(1)) 25 26 const fuseReadSize = 1 << 17 // 128 KiB = FUSE max read size. 27 sequentialBuilder := func() task { 28 var t task 29 start, limit := randInterval(rnd, size) 30 for ; start < limit; start += fuseReadSize { 31 limit := start + fuseReadSize 32 if limit > size { 33 limit = size 34 } 35 t = append(t, read{start, limit}) 36 } 37 return t 38 } 39 taskBuilders := []func() task{ 40 // Read sequentially in FUSE-like chunks. 41 sequentialBuilder, 42 // Read some subset of the file, mostly sequentially, occasionally jumping. 43 // The jumps reorder reads within the bounds of kernel.MaxReadAhead. 44 // This is not quite the kernel readahead pattern because our reads are inherently 45 // sequential, whereas kernel readahead parallelizes. But, assuming we know that there is 46 // internal serialization somewhere, this at least simulates the variable ordering. 47 func() task { 48 // For simplicity, we choose to swap (or skip) adjacent pairs. Each item can only move 1 49 // position, so the largest interval we can generate is changing a 1 read gap (adjacent) 50 // into a 3 read gap. 51 // If we allowed further, or second, swaps, we'd have to be careful about introducing 52 // longer gaps. Maybe we'll do that later. 53 must.True(kernel.MaxReadAhead >= 3*fuseReadSize) 54 t := sequentialBuilder() 55 for i := 0; i+1 < len(t); i += 2 { 56 if rnd.Intn(2) == 0 { 57 t[i], t[i+1] = t[i+1], t[i] 58 } 59 } 60 return t 61 }, 62 // Random reads covering some part of the data. 63 func() task { 64 t := sequentialBuilder() 65 rnd.Shuffle(len(t), func(i, j int) { t[i], t[j] = t[j], t[i] }) 66 return t[:rnd.Intn(len(t))] 67 }, 68 } 69 tasks := make([]task, parallelism*10) 70 for i := range tasks { 71 tasks[i] = taskBuilders[rnd.Intn(len(taskBuilders))]() 72 } 73 err := traverse.T{Limit: parallelism}.Each(len(tasks), func(i int) (err error) { 74 var dst []byte 75 for _, t := range tasks[i] { 76 readSize := t.limit - t.start 77 if cap(dst) < readSize { 78 dst = make([]byte, 2*readSize) 79 } 80 dst = dst[:readSize] 81 n, err := rAt.ReadAt(ctx, dst, int64(t.start)) 82 if err == io.EOF { 83 if n == readSize { 84 err = nil 85 } else { 86 err = fmt.Errorf("early EOF: %d, %v", n, t) 87 } 88 } 89 if err != nil { 90 return err 91 } 92 if !bytes.Equal(want[t.start:t.limit], dst) { 93 return fmt.Errorf("read mismatch: %v", t) 94 } 95 } 96 return nil 97 }) 98 must.Nil(err) 99 } 100 101 type ( 102 read struct{ start, limit int } 103 task []read 104 ) 105 106 // randInterval returns a subset of [0, size). Interval selection is biased so that a substantial 107 // number of returned intervals will touch 0 and/or size. 108 func randInterval(rnd *rand.Rand, size int) (start, limit int) { 109 start = rnd.Intn(2*size) - size 110 if start < 0 { // Around half will start at 0. 111 start = 0 112 } 113 limit = start + rnd.Intn(2*(size-start+1)) 114 if limit > size { // And around half read till the end. 115 limit = size 116 } 117 return 118 }