github.com/olivere/camlistore@v0.0.0-20140121221811-1b7ac2da0199/pkg/schema/filewriter_test.go (about)

     1  /*
     2  Copyright 2012 Google Inc.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8       http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package schema
    18  
    19  import (
    20  	"bytes"
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"math/rand"
    25  	"sort"
    26  	"sync"
    27  	"testing"
    28  
    29  	"camlistore.org/pkg/blobserver/stats"
    30  	"camlistore.org/pkg/test"
    31  )
    32  
    33  func TestWriteFileMap(t *testing.T) {
    34  	m := NewFileMap("test-file")
    35  	r := &randReader{seed: 123, length: 5 << 20}
    36  	sr := new(stats.Receiver)
    37  	var buf bytes.Buffer
    38  	br, err := WriteFileMap(sr, m, io.TeeReader(r, &buf))
    39  	if err != nil {
    40  		t.Fatal(err)
    41  	}
    42  	t.Logf("Got root file %v; %d blobs, %d bytes", br, sr.NumBlobs(), sr.SumBlobSize())
    43  	sizes := sr.Sizes()
    44  	t.Logf("Sizes are %v", sizes)
    45  
    46  	// TODO(bradfitz): these are fragile tests and mostly just a placeholder.
    47  	// Real tests to add:
    48  	//   -- no "bytes" schema with a single "blobref"
    49  	//   -- more seeds (including some that tickle the above)
    50  	//   -- file reader reading back the root gets the same sha1 content back
    51  	//      (will require keeping the full data in our stats receiver, not
    52  	//       just the size)
    53  	//   -- well-balanced tree
    54  	//   -- nothing too big, nothing too small.
    55  	if g, w := br.String(), "sha1-95a5d2686b239e36dff3aeb5a45ed18153121835"; g != w {
    56  		t.Errorf("root blobref = %v; want %v", g, w)
    57  	}
    58  	if g, w := sr.NumBlobs(), 88; g != w {
    59  		t.Errorf("num blobs = %v; want %v", g, w)
    60  	}
    61  	if g, w := sr.SumBlobSize(), int64(5252655); g != w {
    62  		t.Errorf("sum blob size = %v; want %v", g, w)
    63  	}
    64  	if g, w := sizes[len(sizes)-1], 262144; g != w {
    65  		t.Errorf("biggest blob is %d; want %d", g, w)
    66  	}
    67  }
    68  
    69  func TestWriteThenRead(t *testing.T) {
    70  	m := NewFileMap("test-file")
    71  	const size = 5 << 20
    72  	r := &randReader{seed: 123, length: size}
    73  	sto := new(test.Fetcher)
    74  	var buf bytes.Buffer
    75  	br, err := WriteFileMap(sto, m, io.TeeReader(r, &buf))
    76  	if err != nil {
    77  		t.Fatal(err)
    78  	}
    79  
    80  	var got bytes.Buffer
    81  	fr, err := NewFileReader(sto, br)
    82  	if err != nil {
    83  		t.Fatal(err)
    84  	}
    85  
    86  	n, err := io.Copy(&got, fr)
    87  	if err != nil {
    88  		t.Fatal(err)
    89  	}
    90  	if n != size {
    91  		t.Errorf("read back %d bytes; want %d", n, size)
    92  	}
    93  	if !bytes.Equal(buf.Bytes(), got.Bytes()) {
    94  		t.Error("bytes differ")
    95  	}
    96  
    97  	var offc chan int64
    98  	var offs []int
    99  
   100  	getOffsets := func() error {
   101  		offs = offs[:0]
   102  		offc = make(chan int64)
   103  		go func() {
   104  			for off := range offc {
   105  				offs = append(offs, int(off))
   106  			}
   107  		}()
   108  		return fr.GetChunkOffsets(offc)
   109  	}
   110  
   111  	if err := getOffsets(); err != nil {
   112  		t.Fatal(err)
   113  	}
   114  	sort.Ints(offs)
   115  	wantOffs := "[0 262144 358150 433428 525437 602690 675039 748088 816210 898743 980993 1053410 1120438 1188662 1265192 1332541 1398316 1463899 1530446 1596700 1668839 1738909 1817065 1891025 1961646 2031127 2099232 2170640 2238692 2304743 2374317 2440449 2514327 2582670 2653257 2753975 2827518 2905783 2975426 3053820 3134057 3204879 3271019 3346750 3421351 3487420 3557939 3624006 3701093 3768863 3842013 3918267 4001933 4069157 4139132 4208109 4281390 4348801 4422695 4490535 4568111 4642769 4709005 4785526 4866313 4933575 5005564 5071633 5152695 5227716]"
   116  	gotOffs := fmt.Sprintf("%v", offs)
   117  	if wantOffs != gotOffs {
   118  		t.Errorf("Got chunk offsets %v; want %v", gotOffs, wantOffs)
   119  	}
   120  
   121  	// Now force a fetch failure on one of the filereader schema chunks, to
   122  	// force a failure of GetChunkOffsets
   123  	errFetch := errors.New("fake fetch error")
   124  	var fetches struct {
   125  		sync.Mutex
   126  		n int
   127  	}
   128  	sto.FetchErr = func() error {
   129  		fetches.Lock()
   130  		defer fetches.Unlock()
   131  		fetches.n++
   132  		if fetches.n == 1 {
   133  			return nil
   134  		}
   135  		return errFetch
   136  	}
   137  
   138  	fr, err = NewFileReader(sto, br)
   139  	if err != nil {
   140  		t.Fatal(err)
   141  	}
   142  	if err := getOffsets(); fmt.Sprint(err) != "schema/filereader: fetching file schema blob: fake fetch error" {
   143  		t.Errorf("expected second call of GetChunkOffsets to return wrapped errFetch; got %v", err)
   144  	}
   145  }
   146  
   147  type randReader struct {
   148  	seed   int64
   149  	length int
   150  	rnd    *rand.Rand // lazy init
   151  	remain int        // lazy init
   152  }
   153  
   154  func (r *randReader) Read(p []byte) (n int, err error) {
   155  	if r.rnd == nil {
   156  		r.rnd = rand.New(rand.NewSource(r.seed))
   157  		r.remain = r.length
   158  	}
   159  	if r.remain == 0 {
   160  		return 0, io.EOF
   161  	}
   162  	if len(p) > r.remain {
   163  		p = p[:r.remain]
   164  	}
   165  	for i := range p {
   166  		p[i] = byte(r.rnd.Intn(256))
   167  	}
   168  	r.remain -= len(p)
   169  	return len(p), nil
   170  }