github.com/peterbourgon/diskv@v2.0.1+incompatible/issues_test.go (about) 1 package diskv 2 3 import ( 4 "bytes" 5 "io/ioutil" 6 "sync" 7 "testing" 8 "time" 9 ) 10 11 // ReadStream from cache shouldn't panic on a nil dereference from a nonexistent 12 // Compression :) 13 func TestIssue2A(t *testing.T) { 14 d := New(Options{ 15 BasePath: "test-issue-2a", 16 Transform: func(string) []string { return []string{} }, 17 CacheSizeMax: 1024, 18 }) 19 defer d.EraseAll() 20 21 input := "abcdefghijklmnopqrstuvwxy" 22 key, writeBuf, sync := "a", bytes.NewBufferString(input), false 23 if err := d.WriteStream(key, writeBuf, sync); err != nil { 24 t.Fatal(err) 25 } 26 27 for i := 0; i < 2; i++ { 28 began := time.Now() 29 rc, err := d.ReadStream(key, false) 30 if err != nil { 31 t.Fatal(err) 32 } 33 buf, err := ioutil.ReadAll(rc) 34 if err != nil { 35 t.Fatal(err) 36 } 37 if !cmpBytes(buf, []byte(input)) { 38 t.Fatalf("read #%d: '%s' != '%s'", i+1, string(buf), input) 39 } 40 rc.Close() 41 t.Logf("read #%d in %s", i+1, time.Since(began)) 42 } 43 } 44 45 // ReadStream on a key that resolves to a directory should return an error. 46 func TestIssue2B(t *testing.T) { 47 blockTransform := func(s string) []string { 48 transformBlockSize := 3 49 sliceSize := len(s) / transformBlockSize 50 pathSlice := make([]string, sliceSize) 51 for i := 0; i < sliceSize; i++ { 52 from, to := i*transformBlockSize, (i*transformBlockSize)+transformBlockSize 53 pathSlice[i] = s[from:to] 54 } 55 return pathSlice 56 } 57 58 d := New(Options{ 59 BasePath: "test-issue-2b", 60 Transform: blockTransform, 61 CacheSizeMax: 0, 62 }) 63 defer d.EraseAll() 64 65 v := []byte{'1', '2', '3'} 66 if err := d.Write("abcabc", v); err != nil { 67 t.Fatal(err) 68 } 69 70 _, err := d.ReadStream("abc", false) 71 if err == nil { 72 t.Fatal("ReadStream('abc') should return error") 73 } 74 t.Logf("ReadStream('abc') returned error: %v", err) 75 } 76 77 // Ensure ReadStream with direct=true isn't racy. 78 func TestIssue17(t *testing.T) { 79 var ( 80 basePath = "test-data" 81 ) 82 83 dWrite := New(Options{ 84 BasePath: basePath, 85 CacheSizeMax: 0, 86 }) 87 defer dWrite.EraseAll() 88 89 dRead := New(Options{ 90 BasePath: basePath, 91 CacheSizeMax: 50, 92 }) 93 94 cases := map[string]string{ 95 "a": `1234567890`, 96 "b": `2345678901`, 97 "c": `3456789012`, 98 "d": `4567890123`, 99 "e": `5678901234`, 100 } 101 102 for k, v := range cases { 103 if err := dWrite.Write(k, []byte(v)); err != nil { 104 t.Fatalf("during write: %s", err) 105 } 106 dRead.Read(k) // ensure it's added to cache 107 } 108 109 var wg sync.WaitGroup 110 start := make(chan struct{}) 111 for k, v := range cases { 112 wg.Add(1) 113 go func(k, v string) { 114 <-start 115 dRead.ReadStream(k, true) 116 wg.Done() 117 }(k, v) 118 } 119 close(start) 120 wg.Wait() 121 }