github.com/jbendotnet/noms@v0.0.0-20190904222105-c43e4293ea92/go/nbs/mem_table_test.go (about) 1 // Copyright 2016 Attic Labs, Inc. All rights reserved. 2 // Licensed under the Apache License, version 2.0: 3 // http://www.apache.org/licenses/LICENSE-2.0 4 5 package nbs 6 7 import ( 8 "bytes" 9 "sync" 10 "testing" 11 12 "github.com/attic-labs/noms/go/chunks" 13 "github.com/golang/snappy" 14 "github.com/stretchr/testify/assert" 15 ) 16 17 func TestMemTableAddHasGetChunk(t *testing.T) { 18 assert := assert.New(t) 19 mt := newMemTable(1024) 20 21 chunks := [][]byte{ 22 []byte("hello2"), 23 []byte("goodbye2"), 24 []byte("badbye2"), 25 } 26 27 for _, c := range chunks { 28 assert.True(mt.addChunk(computeAddr(c), c)) 29 } 30 31 assertChunksInReader(chunks, mt, assert) 32 33 for _, c := range chunks { 34 assert.Equal(bytes.Compare(c, mt.get(computeAddr(c), &Stats{})), 0) 35 } 36 37 notPresent := []byte("nope") 38 assert.False(mt.has(computeAddr(notPresent))) 39 assert.Nil(mt.get(computeAddr(notPresent), &Stats{})) 40 } 41 42 func TestMemTableAddOverflowChunk(t *testing.T) { 43 memTableSize := uint64(1024) 44 45 assert := assert.New(t) 46 big := make([]byte, memTableSize) 47 little := []byte{0x01} 48 { 49 bigAddr := computeAddr(big) 50 mt := newMemTable(memTableSize) 51 assert.True(mt.addChunk(bigAddr, big)) 52 assert.True(mt.has(bigAddr)) 53 assert.False(mt.addChunk(computeAddr(little), little)) 54 assert.False(mt.has(computeAddr(little))) 55 } 56 57 { 58 big := big[:memTableSize-1] 59 bigAddr := computeAddr(big) 60 mt := newMemTable(memTableSize) 61 assert.True(mt.addChunk(bigAddr, big)) 62 assert.True(mt.has(bigAddr)) 63 assert.True(mt.addChunk(computeAddr(little), little)) 64 assert.True(mt.has(computeAddr(little))) 65 other := []byte("o") 66 assert.False(mt.addChunk(computeAddr(other), other)) 67 assert.False(mt.has(computeAddr(other))) 68 } 69 } 70 71 func TestMemTableWrite(t *testing.T) { 72 assert := assert.New(t) 73 mt := newMemTable(1024) 74 75 chunks := [][]byte{ 76 []byte("hello2"), 77 []byte("goodbye2"), 78 []byte("badbye2"), 79 } 80 81 for _, c := range chunks { 82 assert.True(mt.addChunk(computeAddr(c), c)) 83 } 84 85 td1, _ := buildTable(chunks[1:2]) 86 td2, _ := buildTable(chunks[2:]) 87 tr1 := newTableReader(parseTableIndex(td1), tableReaderAtFromBytes(td1), fileBlockSize) 88 tr2 := newTableReader(parseTableIndex(td2), tableReaderAtFromBytes(td2), fileBlockSize) 89 assert.True(tr1.has(computeAddr(chunks[1]))) 90 assert.True(tr2.has(computeAddr(chunks[2]))) 91 92 _, data, count := mt.write(chunkReaderGroup{tr1, tr2}, &Stats{}) 93 assert.Equal(uint32(1), count) 94 95 outReader := newTableReader(parseTableIndex(data), tableReaderAtFromBytes(data), fileBlockSize) 96 assert.True(outReader.has(computeAddr(chunks[0]))) 97 assert.False(outReader.has(computeAddr(chunks[1]))) 98 assert.False(outReader.has(computeAddr(chunks[2]))) 99 } 100 101 type tableReaderAtAdapter struct { 102 *bytes.Reader 103 } 104 105 func tableReaderAtFromBytes(b []byte) tableReaderAt { 106 return tableReaderAtAdapter{bytes.NewReader(b)} 107 } 108 109 func (adapter tableReaderAtAdapter) ReadAtWithStats(p []byte, off int64, stats *Stats) (n int, err error) { 110 return adapter.ReadAt(p, off) 111 } 112 113 func TestMemTableSnappyWriteOutOfLine(t *testing.T) { 114 assert := assert.New(t) 115 mt := newMemTable(1024) 116 117 chunks := [][]byte{ 118 []byte("hello2"), 119 []byte("goodbye2"), 120 []byte("badbye2"), 121 } 122 123 for _, c := range chunks { 124 assert.True(mt.addChunk(computeAddr(c), c)) 125 } 126 mt.snapper = &outOfLineSnappy{[]bool{false, true, false}} // chunks[1] should trigger a panic 127 128 assert.Panics(func() { mt.write(nil, &Stats{}) }) 129 } 130 131 type outOfLineSnappy struct { 132 policy []bool 133 } 134 135 func (o *outOfLineSnappy) Encode(dst, src []byte) []byte { 136 outOfLine := false 137 if len(o.policy) > 0 { 138 outOfLine = o.policy[0] 139 o.policy = o.policy[1:] 140 } 141 if outOfLine { 142 return snappy.Encode(nil, src) 143 } 144 return snappy.Encode(dst, src) 145 } 146 147 type chunkReaderGroup []chunkReader 148 149 func (crg chunkReaderGroup) has(h addr) bool { 150 for _, haver := range crg { 151 if haver.has(h) { 152 return true 153 } 154 } 155 return false 156 } 157 158 func (crg chunkReaderGroup) get(h addr, stats *Stats) []byte { 159 for _, haver := range crg { 160 if data := haver.get(h, stats); data != nil { 161 return data 162 } 163 } 164 return nil 165 } 166 167 func (crg chunkReaderGroup) hasMany(addrs []hasRecord) (remaining bool) { 168 for _, haver := range crg { 169 if !haver.hasMany(addrs) { 170 return false 171 } 172 } 173 return true 174 } 175 176 func (crg chunkReaderGroup) getMany(reqs []getRecord, foundChunks chan *chunks.Chunk, wg *sync.WaitGroup, stats *Stats) (remaining bool) { 177 for _, haver := range crg { 178 if !haver.getMany(reqs, foundChunks, wg, stats) { 179 return false 180 } 181 } 182 return true 183 } 184 185 func (crg chunkReaderGroup) count() (count uint32) { 186 for _, haver := range crg { 187 count += haver.count() 188 } 189 return 190 } 191 192 func (crg chunkReaderGroup) uncompressedLen() (data uint64) { 193 for _, haver := range crg { 194 data += haver.uncompressedLen() 195 } 196 return 197 } 198 199 func (crg chunkReaderGroup) extract(chunks chan<- extractRecord) { 200 for _, haver := range crg { 201 haver.extract(chunks) 202 } 203 }