github.com/ndau/noms@v1.0.5/go/nbs/file_table_persister_test.go (about)

     1  // Copyright 2016 Attic Labs, Inc. All rights reserved.
     2  // Licensed under the Apache License, version 2.0:
     3  // http://www.apache.org/licenses/LICENSE-2.0
     4  
     5  package nbs
     6  
     7  import (
     8  	"crypto/rand"
     9  	"fmt"
    10  	"io/ioutil"
    11  	"os"
    12  	"path/filepath"
    13  	"sort"
    14  	"testing"
    15  
    16  	"github.com/stretchr/testify/assert"
    17  )
    18  
    19  func TestFSTableCacheOnOpen(t *testing.T) {
    20  	assert := assert.New(t)
    21  	dir := makeTempDir(t)
    22  	defer os.RemoveAll(dir)
    23  
    24  	names := []addr{}
    25  	cacheSize := 2
    26  	fc := newFDCache(cacheSize)
    27  	defer fc.Drop()
    28  	fts := newFSTablePersister(dir, fc, nil)
    29  
    30  	// Create some tables manually, load them into the cache, and then blow them away
    31  	func() {
    32  		for i := 0; i < cacheSize; i++ {
    33  			name, err := writeTableData(dir, []byte{byte(i)})
    34  			assert.NoError(err)
    35  			names = append(names, name)
    36  		}
    37  		for _, name := range names {
    38  			fts.Open(name, 1, nil)
    39  		}
    40  		removeTables(dir, names...)
    41  	}()
    42  
    43  	// Tables should still be cached, even though they're gone from disk
    44  	for i, name := range names {
    45  		src := fts.Open(name, 1, nil)
    46  		h := computeAddr([]byte{byte(i)})
    47  		assert.True(src.has(h))
    48  	}
    49  
    50  	// Kick a table out of the cache
    51  	name, err := writeTableData(dir, []byte{0xff})
    52  	assert.NoError(err)
    53  	fts.Open(name, 1, nil)
    54  
    55  	present := fc.reportEntries()
    56  	// Since 0 refcount entries are evicted randomly, the only thing we can validate is that fc remains at its target size
    57  	assert.Len(present, cacheSize)
    58  }
    59  
    60  func makeTempDir(t *testing.T) string {
    61  	dir, err := ioutil.TempDir("", "")
    62  	assert.NoError(t, err)
    63  	return dir
    64  }
    65  
    66  func writeTableData(dir string, chunx ...[]byte) (name addr, err error) {
    67  	var tableData []byte
    68  	tableData, name = buildTable(chunx)
    69  	err = ioutil.WriteFile(filepath.Join(dir, name.String()), tableData, 0666)
    70  	return
    71  }
    72  
    73  func removeTables(dir string, names ...addr) error {
    74  	for _, name := range names {
    75  		if err := os.Remove(filepath.Join(dir, name.String())); err != nil {
    76  			return err
    77  		}
    78  	}
    79  	return nil
    80  }
    81  
    82  func contains(s sort.StringSlice, e string) bool {
    83  	for _, c := range s {
    84  		if c == e {
    85  			return true
    86  		}
    87  	}
    88  	return false
    89  }
    90  
    91  func TestFSTablePersisterPersist(t *testing.T) {
    92  	assert := assert.New(t)
    93  	dir := makeTempDir(t)
    94  	defer os.RemoveAll(dir)
    95  	fc := newFDCache(defaultMaxTables)
    96  	defer fc.Drop()
    97  	fts := newFSTablePersister(dir, fc, nil)
    98  
    99  	src, err := persistTableData(fts, testChunks...)
   100  	assert.NoError(err)
   101  	if assert.True(src.count() > 0) {
   102  		buff, err := ioutil.ReadFile(filepath.Join(dir, src.hash().String()))
   103  		assert.NoError(err)
   104  		tr := newTableReader(parseTableIndex(buff), tableReaderAtFromBytes(buff), fileBlockSize)
   105  		assertChunksInReader(testChunks, tr, assert)
   106  	}
   107  }
   108  
   109  func persistTableData(p tablePersister, chunx ...[]byte) (src chunkSource, err error) {
   110  	mt := newMemTable(testMemTableSize)
   111  	for _, c := range chunx {
   112  		if !mt.addChunk(computeAddr(c), c) {
   113  			return nil, fmt.Errorf("memTable too full to add %s", computeAddr(c))
   114  		}
   115  	}
   116  	return p.Persist(mt, nil, &Stats{}), nil
   117  }
   118  
   119  func TestFSTablePersisterPersistNoData(t *testing.T) {
   120  	assert := assert.New(t)
   121  	mt := newMemTable(testMemTableSize)
   122  	existingTable := newMemTable(testMemTableSize)
   123  
   124  	for _, c := range testChunks {
   125  		assert.True(mt.addChunk(computeAddr(c), c))
   126  		assert.True(existingTable.addChunk(computeAddr(c), c))
   127  	}
   128  
   129  	dir := makeTempDir(t)
   130  	defer os.RemoveAll(dir)
   131  	fc := newFDCache(defaultMaxTables)
   132  	defer fc.Drop()
   133  	fts := newFSTablePersister(dir, fc, nil)
   134  
   135  	src := fts.Persist(mt, existingTable, &Stats{})
   136  	assert.True(src.count() == 0)
   137  
   138  	_, err := os.Stat(filepath.Join(dir, src.hash().String()))
   139  	assert.True(os.IsNotExist(err), "%v", err)
   140  }
   141  
   142  func TestFSTablePersisterCacheOnPersist(t *testing.T) {
   143  	assert := assert.New(t)
   144  	dir := makeTempDir(t)
   145  	fc := newFDCache(1)
   146  	defer fc.Drop()
   147  	fts := newFSTablePersister(dir, fc, nil)
   148  	defer os.RemoveAll(dir)
   149  
   150  	var name addr
   151  	func() {
   152  		src, err := persistTableData(fts, testChunks...)
   153  		assert.NoError(err)
   154  		name = src.hash()
   155  		removeTables(dir, name)
   156  	}()
   157  
   158  	// Table should still be cached, even though it's gone from disk
   159  	src := fts.Open(name, uint32(len(testChunks)), nil)
   160  	assertChunksInReader(testChunks, src, assert)
   161  
   162  	// Evict |name| from cache
   163  	src, err := persistTableData(fts, []byte{0xff})
   164  	assert.NoError(err)
   165  
   166  	present := fc.reportEntries()
   167  	// Since 0 refcount entries are evicted randomly, the only thing we can validate is that fc remains at its target size
   168  	assert.Len(present, 1)
   169  }
   170  
   171  func TestFSTablePersisterConjoinAll(t *testing.T) {
   172  	assert := assert.New(t)
   173  	assert.True(len(testChunks) > 1, "Whoops, this test isn't meaningful")
   174  	sources := make(chunkSources, len(testChunks))
   175  
   176  	dir := makeTempDir(t)
   177  	defer os.RemoveAll(dir)
   178  	fc := newFDCache(len(sources))
   179  	defer fc.Drop()
   180  	fts := newFSTablePersister(dir, fc, nil)
   181  
   182  	for i, c := range testChunks {
   183  		randChunk := make([]byte, (i+1)*13)
   184  		_, err := rand.Read(randChunk)
   185  		assert.NoError(err)
   186  		name, err := writeTableData(dir, c, randChunk)
   187  		assert.NoError(err)
   188  		sources[i] = fts.Open(name, 2, nil)
   189  	}
   190  
   191  	src := fts.ConjoinAll(sources, &Stats{})
   192  
   193  	if assert.True(src.count() > 0) {
   194  		buff, err := ioutil.ReadFile(filepath.Join(dir, src.hash().String()))
   195  		assert.NoError(err)
   196  		tr := newTableReader(parseTableIndex(buff), tableReaderAtFromBytes(buff), fileBlockSize)
   197  		assertChunksInReader(testChunks, tr, assert)
   198  	}
   199  
   200  	present := fc.reportEntries()
   201  	// Since 0 refcount entries are evicted randomly, the only thing we can validate is that fc remains at its target size
   202  	assert.Len(present, len(sources))
   203  }
   204  
   205  func TestFSTablePersisterConjoinAllDups(t *testing.T) {
   206  	assert := assert.New(t)
   207  	dir := makeTempDir(t)
   208  	defer os.RemoveAll(dir)
   209  	fc := newFDCache(defaultMaxTables)
   210  	defer fc.Drop()
   211  	fts := newFSTablePersister(dir, fc, nil)
   212  
   213  	reps := 3
   214  	sources := make(chunkSources, reps)
   215  	for i := 0; i < reps; i++ {
   216  		mt := newMemTable(1 << 10)
   217  		for _, c := range testChunks {
   218  			mt.addChunk(computeAddr(c), c)
   219  		}
   220  		sources[i] = fts.Persist(mt, nil, &Stats{})
   221  	}
   222  	src := fts.ConjoinAll(sources, &Stats{})
   223  
   224  	if assert.True(src.count() > 0) {
   225  		buff, err := ioutil.ReadFile(filepath.Join(dir, src.hash().String()))
   226  		assert.NoError(err)
   227  		tr := newTableReader(parseTableIndex(buff), tableReaderAtFromBytes(buff), fileBlockSize)
   228  		assertChunksInReader(testChunks, tr, assert)
   229  		assert.EqualValues(reps*len(testChunks), tr.count())
   230  	}
   231  }