github.com/daragao/go-ethereum@v1.8.14-0.20180809141559-45eaef243198/swarm/storage/mru/resource_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package mru
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"crypto/rand"
    23  	"encoding/binary"
    24  	"flag"
    25  	"io/ioutil"
    26  	"os"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/ethereum/go-ethereum/contracts/ens"
    31  	"github.com/ethereum/go-ethereum/crypto"
    32  	"github.com/ethereum/go-ethereum/log"
    33  	"github.com/ethereum/go-ethereum/swarm/multihash"
    34  	"github.com/ethereum/go-ethereum/swarm/storage"
    35  )
    36  
    37  var (
    38  	loglevel   = flag.Int("loglevel", 3, "loglevel")
    39  	testHasher = storage.MakeHashFunc(resourceHashAlgorithm)()
    40  	startTime  = Timestamp{
    41  		Time: uint64(4200),
    42  	}
    43  	resourceFrequency = uint64(42)
    44  	cleanF            func()
    45  	resourceName      = "føø.bar"
    46  	hashfunc          = storage.MakeHashFunc(storage.DefaultHash)
    47  )
    48  
    49  func init() {
    50  	flag.Parse()
    51  	log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true)))))
    52  }
    53  
    54  // simulated timeProvider
    55  type fakeTimeProvider struct {
    56  	currentTime uint64
    57  }
    58  
    59  func (f *fakeTimeProvider) Tick() {
    60  	f.currentTime++
    61  }
    62  
    63  func (f *fakeTimeProvider) Now() Timestamp {
    64  	return Timestamp{
    65  		Time: f.currentTime,
    66  	}
    67  }
    68  
    69  func TestUpdateChunkSerializationErrorChecking(t *testing.T) {
    70  
    71  	// Test that parseUpdate fails if the chunk is too small
    72  	var r SignedResourceUpdate
    73  	if err := r.fromChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1)); err == nil {
    74  		t.Fatalf("Expected parseUpdate to fail when chunkData contains less than %d bytes", minimumUpdateDataLength)
    75  	}
    76  
    77  	r = SignedResourceUpdate{}
    78  	// Test that parseUpdate fails when the length header does not match the data array length
    79  	fakeChunk := make([]byte, 150)
    80  	binary.LittleEndian.PutUint16(fakeChunk, 44)
    81  	if err := r.fromChunk(storage.ZeroAddr, fakeChunk); err == nil {
    82  		t.Fatal("Expected parseUpdate to fail when the header length does not match the actual data array passed in")
    83  	}
    84  
    85  	r = SignedResourceUpdate{
    86  		resourceUpdate: resourceUpdate{
    87  			updateHeader: updateHeader{
    88  				UpdateLookup: UpdateLookup{
    89  
    90  					rootAddr: make([]byte, 79), // put the wrong length, should be storage.KeyLength
    91  				},
    92  				metaHash:  nil,
    93  				multihash: false,
    94  			},
    95  		},
    96  	}
    97  	_, err := r.toChunk()
    98  	if err == nil {
    99  		t.Fatal("Expected newUpdateChunk to fail when rootAddr or metaHash have the wrong length")
   100  	}
   101  	r.rootAddr = make([]byte, storage.KeyLength)
   102  	r.metaHash = make([]byte, storage.KeyLength)
   103  	_, err = r.toChunk()
   104  	if err == nil {
   105  		t.Fatal("Expected newUpdateChunk to fail when there is no data")
   106  	}
   107  	r.data = make([]byte, 79) // put some arbitrary length data
   108  	_, err = r.toChunk()
   109  	if err == nil {
   110  		t.Fatal("expected newUpdateChunk to fail when there is no signature", err)
   111  	}
   112  
   113  	alice := newAliceSigner()
   114  	if err := r.Sign(alice); err != nil {
   115  		t.Fatalf("error signing:%s", err)
   116  
   117  	}
   118  	_, err = r.toChunk()
   119  	if err != nil {
   120  		t.Fatalf("error creating update chunk:%s", err)
   121  	}
   122  
   123  	r.multihash = true
   124  	r.data[1] = 79 // mess with the multihash, corrupting one byte of it.
   125  	if err := r.Sign(alice); err == nil {
   126  		t.Fatal("expected Sign() to fail when an invalid multihash is in data and multihash=true", err)
   127  	}
   128  }
   129  
   130  // check that signature address matches update signer address
   131  func TestReverse(t *testing.T) {
   132  
   133  	period := uint32(4)
   134  	version := uint32(2)
   135  
   136  	// make fake timeProvider
   137  	timeProvider := &fakeTimeProvider{
   138  		currentTime: startTime.Time,
   139  	}
   140  
   141  	// signer containing private key
   142  	signer := newAliceSigner()
   143  
   144  	// set up rpc and create resourcehandler
   145  	_, _, teardownTest, err := setupTest(timeProvider, signer)
   146  	if err != nil {
   147  		t.Fatal(err)
   148  	}
   149  	defer teardownTest()
   150  
   151  	metadata := ResourceMetadata{
   152  		Name:      resourceName,
   153  		StartTime: startTime,
   154  		Frequency: resourceFrequency,
   155  		Owner:     signer.Address(),
   156  	}
   157  
   158  	rootAddr, metaHash, _, err := metadata.serializeAndHash()
   159  	if err != nil {
   160  		t.Fatal(err)
   161  	}
   162  
   163  	// generate some bogus data for the chunk and sign it
   164  	data := make([]byte, 8)
   165  	_, err = rand.Read(data)
   166  	if err != nil {
   167  		t.Fatal(err)
   168  	}
   169  	testHasher.Reset()
   170  	testHasher.Write(data)
   171  
   172  	update := &SignedResourceUpdate{
   173  		resourceUpdate: resourceUpdate{
   174  			updateHeader: updateHeader{
   175  				UpdateLookup: UpdateLookup{
   176  					period:   period,
   177  					version:  version,
   178  					rootAddr: rootAddr,
   179  				},
   180  				metaHash: metaHash,
   181  			},
   182  			data: data,
   183  		},
   184  	}
   185  	// generate a hash for t=4200 version 1
   186  	key := update.UpdateAddr()
   187  
   188  	if err = update.Sign(signer); err != nil {
   189  		t.Fatal(err)
   190  	}
   191  
   192  	chunk, err := update.toChunk()
   193  	if err != nil {
   194  		t.Fatal(err)
   195  	}
   196  
   197  	// check that we can recover the owner account from the update chunk's signature
   198  	var checkUpdate SignedResourceUpdate
   199  	if err := checkUpdate.fromChunk(chunk.Addr, chunk.SData); err != nil {
   200  		t.Fatal(err)
   201  	}
   202  	checkdigest, err := checkUpdate.GetDigest()
   203  	if err != nil {
   204  		t.Fatal(err)
   205  	}
   206  	recoveredaddress, err := getOwner(checkdigest, *checkUpdate.signature)
   207  	if err != nil {
   208  		t.Fatalf("Retrieve address from signature fail: %v", err)
   209  	}
   210  	originaladdress := crypto.PubkeyToAddress(signer.PrivKey.PublicKey)
   211  
   212  	// check that the metadata retrieved from the chunk matches what we gave it
   213  	if recoveredaddress != originaladdress {
   214  		t.Fatalf("addresses dont match: %x != %x", originaladdress, recoveredaddress)
   215  	}
   216  
   217  	if !bytes.Equal(key[:], chunk.Addr[:]) {
   218  		t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Addr)
   219  	}
   220  	if period != checkUpdate.period {
   221  		t.Fatalf("Expected period '%d', was '%d'", period, checkUpdate.period)
   222  	}
   223  	if version != checkUpdate.version {
   224  		t.Fatalf("Expected version '%d', was '%d'", version, checkUpdate.version)
   225  	}
   226  	if !bytes.Equal(data, checkUpdate.data) {
   227  		t.Fatalf("Expectedn data '%x', was '%x'", data, checkUpdate.data)
   228  	}
   229  }
   230  
   231  // make updates and retrieve them based on periods and versions
   232  func TestResourceHandler(t *testing.T) {
   233  
   234  	// make fake timeProvider
   235  	timeProvider := &fakeTimeProvider{
   236  		currentTime: startTime.Time,
   237  	}
   238  
   239  	// signer containing private key
   240  	signer := newAliceSigner()
   241  
   242  	rh, datadir, teardownTest, err := setupTest(timeProvider, signer)
   243  	if err != nil {
   244  		t.Fatal(err)
   245  	}
   246  	defer teardownTest()
   247  
   248  	// create a new resource
   249  	ctx, cancel := context.WithCancel(context.Background())
   250  	defer cancel()
   251  
   252  	metadata := &ResourceMetadata{
   253  		Name:      resourceName,
   254  		Frequency: resourceFrequency,
   255  		StartTime: Timestamp{Time: timeProvider.Now().Time},
   256  		Owner:     signer.Address(),
   257  	}
   258  
   259  	request, err := NewCreateUpdateRequest(metadata)
   260  	if err != nil {
   261  		t.Fatal(err)
   262  	}
   263  	request.Sign(signer)
   264  	if err != nil {
   265  		t.Fatal(err)
   266  	}
   267  	err = rh.New(ctx, request)
   268  	if err != nil {
   269  		t.Fatal(err)
   270  	}
   271  
   272  	chunk, err := rh.chunkStore.Get(context.TODO(), storage.Address(request.rootAddr))
   273  	if err != nil {
   274  		t.Fatal(err)
   275  	} else if len(chunk.SData) < 16 {
   276  		t.Fatalf("chunk data must be minimum 16 bytes, is %d", len(chunk.SData))
   277  	}
   278  
   279  	var recoveredMetadata ResourceMetadata
   280  
   281  	recoveredMetadata.binaryGet(chunk.SData)
   282  	if err != nil {
   283  		t.Fatal(err)
   284  	}
   285  	if recoveredMetadata.StartTime.Time != timeProvider.currentTime {
   286  		t.Fatalf("stored startTime %d does not match provided startTime %d", recoveredMetadata.StartTime.Time, timeProvider.currentTime)
   287  	}
   288  	if recoveredMetadata.Frequency != resourceFrequency {
   289  		t.Fatalf("stored frequency %d does not match provided frequency %d", recoveredMetadata.Frequency, resourceFrequency)
   290  	}
   291  
   292  	// data for updates:
   293  	updates := []string{
   294  		"blinky",
   295  		"pinky",
   296  		"inky",
   297  		"clyde",
   298  	}
   299  
   300  	// update halfway to first period. period=1, version=1
   301  	resourcekey := make(map[string]storage.Address)
   302  	fwdClock(int(resourceFrequency/2), timeProvider)
   303  	data := []byte(updates[0])
   304  	request.SetData(data, false)
   305  	if err := request.Sign(signer); err != nil {
   306  		t.Fatal(err)
   307  	}
   308  	resourcekey[updates[0]], err = rh.Update(ctx, &request.SignedResourceUpdate)
   309  	if err != nil {
   310  		t.Fatal(err)
   311  	}
   312  
   313  	// update on first period with version = 1 to make it fail since there is already one update with version=1
   314  	request, err = rh.NewUpdateRequest(ctx, request.rootAddr)
   315  	if err != nil {
   316  		t.Fatal(err)
   317  	}
   318  	if request.version != 2 || request.period != 1 {
   319  		t.Fatal("Suggested period should be 1 and version should be 2")
   320  	}
   321  
   322  	request.version = 1 // force version 1 instead of 2 to make it fail
   323  	data = []byte(updates[1])
   324  	request.SetData(data, false)
   325  	if err := request.Sign(signer); err != nil {
   326  		t.Fatal(err)
   327  	}
   328  	resourcekey[updates[1]], err = rh.Update(ctx, &request.SignedResourceUpdate)
   329  	if err == nil {
   330  		t.Fatal("Expected update to fail since this version already exists")
   331  	}
   332  
   333  	// update on second period with version = 1, correct. period=2, version=1
   334  	fwdClock(int(resourceFrequency/2), timeProvider)
   335  	request, err = rh.NewUpdateRequest(ctx, request.rootAddr)
   336  	if err != nil {
   337  		t.Fatal(err)
   338  	}
   339  	request.SetData(data, false)
   340  	if err := request.Sign(signer); err != nil {
   341  		t.Fatal(err)
   342  	}
   343  	resourcekey[updates[1]], err = rh.Update(ctx, &request.SignedResourceUpdate)
   344  	if err != nil {
   345  		t.Fatal(err)
   346  	}
   347  
   348  	fwdClock(int(resourceFrequency), timeProvider)
   349  	// Update on third period, with version = 1
   350  	request, err = rh.NewUpdateRequest(ctx, request.rootAddr)
   351  	if err != nil {
   352  		t.Fatal(err)
   353  	}
   354  	data = []byte(updates[2])
   355  	request.SetData(data, false)
   356  	if err := request.Sign(signer); err != nil {
   357  		t.Fatal(err)
   358  	}
   359  	resourcekey[updates[2]], err = rh.Update(ctx, &request.SignedResourceUpdate)
   360  	if err != nil {
   361  		t.Fatal(err)
   362  	}
   363  
   364  	// update just after third period
   365  	fwdClock(1, timeProvider)
   366  	request, err = rh.NewUpdateRequest(ctx, request.rootAddr)
   367  	if err != nil {
   368  		t.Fatal(err)
   369  	}
   370  	if request.period != 3 || request.version != 2 {
   371  		t.Fatal("Suggested period should be 3 and version should be 2")
   372  	}
   373  	data = []byte(updates[3])
   374  	request.SetData(data, false)
   375  
   376  	if err := request.Sign(signer); err != nil {
   377  		t.Fatal(err)
   378  	}
   379  	resourcekey[updates[3]], err = rh.Update(ctx, &request.SignedResourceUpdate)
   380  	if err != nil {
   381  		t.Fatal(err)
   382  	}
   383  
   384  	time.Sleep(time.Second)
   385  	rh.Close()
   386  
   387  	// check we can retrieve the updates after close
   388  	// it will match on second iteration startTime + (resourceFrequency * 3)
   389  	fwdClock(int(resourceFrequency*2)-1, timeProvider)
   390  
   391  	rhparams := &HandlerParams{}
   392  
   393  	rh2, err := NewTestHandler(datadir, rhparams)
   394  	if err != nil {
   395  		t.Fatal(err)
   396  	}
   397  
   398  	rsrc2, err := rh2.Load(context.TODO(), request.rootAddr)
   399  	if err != nil {
   400  		t.Fatal(err)
   401  	}
   402  
   403  	_, err = rh2.Lookup(ctx, LookupLatest(request.rootAddr))
   404  	if err != nil {
   405  		t.Fatal(err)
   406  	}
   407  
   408  	// last update should be "clyde", version two, time= startTime + (resourcefrequency * 3)
   409  	if !bytes.Equal(rsrc2.data, []byte(updates[len(updates)-1])) {
   410  		t.Fatalf("resource data was %v, expected %v", string(rsrc2.data), updates[len(updates)-1])
   411  	}
   412  	if rsrc2.version != 2 {
   413  		t.Fatalf("resource version was %d, expected 2", rsrc2.version)
   414  	}
   415  	if rsrc2.period != 3 {
   416  		t.Fatalf("resource period was %d, expected 3", rsrc2.period)
   417  	}
   418  	log.Debug("Latest lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data)
   419  
   420  	// specific period, latest version
   421  	rsrc, err := rh2.Lookup(ctx, LookupLatestVersionInPeriod(request.rootAddr, 3))
   422  	if err != nil {
   423  		t.Fatal(err)
   424  	}
   425  	// check data
   426  	if !bytes.Equal(rsrc.data, []byte(updates[len(updates)-1])) {
   427  		t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[len(updates)-1])
   428  	}
   429  	log.Debug("Historical lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data)
   430  
   431  	// specific period, specific version
   432  	lookupParams := LookupVersion(request.rootAddr, 3, 1)
   433  	rsrc, err = rh2.Lookup(ctx, lookupParams)
   434  	if err != nil {
   435  		t.Fatal(err)
   436  	}
   437  	// check data
   438  	if !bytes.Equal(rsrc.data, []byte(updates[2])) {
   439  		t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[2])
   440  	}
   441  	log.Debug("Specific version lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data)
   442  
   443  	// we are now at third update
   444  	// check backwards stepping to the first
   445  	for i := 1; i >= 0; i-- {
   446  		rsrc, err := rh2.LookupPrevious(ctx, lookupParams)
   447  		if err != nil {
   448  			t.Fatal(err)
   449  		}
   450  		if !bytes.Equal(rsrc.data, []byte(updates[i])) {
   451  			t.Fatalf("resource data (previous) was %v, expected %v", rsrc.data, updates[i])
   452  
   453  		}
   454  	}
   455  
   456  	// beyond the first should yield an error
   457  	rsrc, err = rh2.LookupPrevious(ctx, lookupParams)
   458  	if err == nil {
   459  		t.Fatalf("expected previous to fail, returned period %d version %d data %v", rsrc.period, rsrc.version, rsrc.data)
   460  	}
   461  
   462  }
   463  
   464  func TestMultihash(t *testing.T) {
   465  
   466  	// make fake timeProvider
   467  	timeProvider := &fakeTimeProvider{
   468  		currentTime: startTime.Time,
   469  	}
   470  
   471  	// signer containing private key
   472  	signer := newAliceSigner()
   473  
   474  	// set up rpc and create resourcehandler
   475  	rh, datadir, teardownTest, err := setupTest(timeProvider, signer)
   476  	if err != nil {
   477  		t.Fatal(err)
   478  	}
   479  	defer teardownTest()
   480  
   481  	// create a new resource
   482  	ctx, cancel := context.WithCancel(context.Background())
   483  	defer cancel()
   484  
   485  	metadata := &ResourceMetadata{
   486  		Name:      resourceName,
   487  		Frequency: resourceFrequency,
   488  		StartTime: Timestamp{Time: timeProvider.Now().Time},
   489  		Owner:     signer.Address(),
   490  	}
   491  
   492  	mr, err := NewCreateRequest(metadata)
   493  	if err != nil {
   494  		t.Fatal(err)
   495  	}
   496  	err = rh.New(ctx, mr)
   497  	if err != nil {
   498  		t.Fatal(err)
   499  	}
   500  
   501  	// we're naïvely assuming keccak256 for swarm hashes
   502  	// if it ever changes this test should also change
   503  	multihashbytes := ens.EnsNode("foo")
   504  	multihashmulti := multihash.ToMultihash(multihashbytes.Bytes())
   505  	if err != nil {
   506  		t.Fatal(err)
   507  	}
   508  	mr.SetData(multihashmulti, true)
   509  	mr.Sign(signer)
   510  	if err != nil {
   511  		t.Fatal(err)
   512  	}
   513  	multihashkey, err := rh.Update(ctx, &mr.SignedResourceUpdate)
   514  	if err != nil {
   515  		t.Fatal(err)
   516  	}
   517  
   518  	sha1bytes := make([]byte, multihash.MultihashLength)
   519  	sha1multi := multihash.ToMultihash(sha1bytes)
   520  	if err != nil {
   521  		t.Fatal(err)
   522  	}
   523  	mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr)
   524  	if err != nil {
   525  		t.Fatal(err)
   526  	}
   527  	mr.SetData(sha1multi, true)
   528  	mr.Sign(signer)
   529  	if err != nil {
   530  		t.Fatal(err)
   531  	}
   532  	sha1key, err := rh.Update(ctx, &mr.SignedResourceUpdate)
   533  	if err != nil {
   534  		t.Fatal(err)
   535  	}
   536  
   537  	// invalid multihashes
   538  	mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr)
   539  	if err != nil {
   540  		t.Fatal(err)
   541  	}
   542  	mr.SetData(multihashmulti[1:], true)
   543  	mr.Sign(signer)
   544  	if err != nil {
   545  		t.Fatal(err)
   546  	}
   547  	_, err = rh.Update(ctx, &mr.SignedResourceUpdate)
   548  	if err == nil {
   549  		t.Fatalf("Expected update to fail with first byte skipped")
   550  	}
   551  	mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr)
   552  	if err != nil {
   553  		t.Fatal(err)
   554  	}
   555  	mr.SetData(multihashmulti[:len(multihashmulti)-2], true)
   556  	mr.Sign(signer)
   557  	if err != nil {
   558  		t.Fatal(err)
   559  	}
   560  
   561  	_, err = rh.Update(ctx, &mr.SignedResourceUpdate)
   562  	if err == nil {
   563  		t.Fatalf("Expected update to fail with last byte skipped")
   564  	}
   565  
   566  	data, err := getUpdateDirect(rh.Handler, multihashkey)
   567  	if err != nil {
   568  		t.Fatal(err)
   569  	}
   570  	multihashdecode, err := multihash.FromMultihash(data)
   571  	if err != nil {
   572  		t.Fatal(err)
   573  	}
   574  	if !bytes.Equal(multihashdecode, multihashbytes.Bytes()) {
   575  		t.Fatalf("Decoded hash '%x' does not match original hash '%x'", multihashdecode, multihashbytes.Bytes())
   576  	}
   577  	data, err = getUpdateDirect(rh.Handler, sha1key)
   578  	if err != nil {
   579  		t.Fatal(err)
   580  	}
   581  	shadecode, err := multihash.FromMultihash(data)
   582  	if err != nil {
   583  		t.Fatal(err)
   584  	}
   585  	if !bytes.Equal(shadecode, sha1bytes) {
   586  		t.Fatalf("Decoded hash '%x' does not match original hash '%x'", shadecode, sha1bytes)
   587  	}
   588  	rh.Close()
   589  
   590  	rhparams := &HandlerParams{}
   591  	// test with signed data
   592  	rh2, err := NewTestHandler(datadir, rhparams)
   593  	if err != nil {
   594  		t.Fatal(err)
   595  	}
   596  	mr, err = NewCreateRequest(metadata)
   597  	if err != nil {
   598  		t.Fatal(err)
   599  	}
   600  	err = rh2.New(ctx, mr)
   601  	if err != nil {
   602  		t.Fatal(err)
   603  	}
   604  
   605  	mr.SetData(multihashmulti, true)
   606  	mr.Sign(signer)
   607  
   608  	if err != nil {
   609  		t.Fatal(err)
   610  	}
   611  	multihashsignedkey, err := rh2.Update(ctx, &mr.SignedResourceUpdate)
   612  	if err != nil {
   613  		t.Fatal(err)
   614  	}
   615  
   616  	mr, err = rh2.NewUpdateRequest(ctx, mr.rootAddr)
   617  	if err != nil {
   618  		t.Fatal(err)
   619  	}
   620  	mr.SetData(sha1multi, true)
   621  	mr.Sign(signer)
   622  	if err != nil {
   623  		t.Fatal(err)
   624  	}
   625  
   626  	sha1signedkey, err := rh2.Update(ctx, &mr.SignedResourceUpdate)
   627  	if err != nil {
   628  		t.Fatal(err)
   629  	}
   630  
   631  	data, err = getUpdateDirect(rh2.Handler, multihashsignedkey)
   632  	if err != nil {
   633  		t.Fatal(err)
   634  	}
   635  	multihashdecode, err = multihash.FromMultihash(data)
   636  	if err != nil {
   637  		t.Fatal(err)
   638  	}
   639  	if !bytes.Equal(multihashdecode, multihashbytes.Bytes()) {
   640  		t.Fatalf("Decoded hash '%x' does not match original hash '%x'", multihashdecode, multihashbytes.Bytes())
   641  	}
   642  	data, err = getUpdateDirect(rh2.Handler, sha1signedkey)
   643  	if err != nil {
   644  		t.Fatal(err)
   645  	}
   646  	shadecode, err = multihash.FromMultihash(data)
   647  	if err != nil {
   648  		t.Fatal(err)
   649  	}
   650  	if !bytes.Equal(shadecode, sha1bytes) {
   651  		t.Fatalf("Decoded hash '%x' does not match original hash '%x'", shadecode, sha1bytes)
   652  	}
   653  }
   654  
   655  // \TODO verify testing of signature validation and enforcement
   656  func TestValidator(t *testing.T) {
   657  
   658  	// make fake timeProvider
   659  	timeProvider := &fakeTimeProvider{
   660  		currentTime: startTime.Time,
   661  	}
   662  
   663  	// signer containing private key. Alice will be the good girl
   664  	signer := newAliceSigner()
   665  
   666  	// fake signer for false results. Bob will play the bad guy today.
   667  	falseSigner := newBobSigner()
   668  
   669  	// set up  sim timeProvider
   670  	rh, _, teardownTest, err := setupTest(timeProvider, signer)
   671  	if err != nil {
   672  		t.Fatal(err)
   673  	}
   674  	defer teardownTest()
   675  
   676  	// create new resource
   677  	ctx, cancel := context.WithCancel(context.Background())
   678  	defer cancel()
   679  	metadata := &ResourceMetadata{
   680  		Name:      resourceName,
   681  		Frequency: resourceFrequency,
   682  		StartTime: Timestamp{Time: timeProvider.Now().Time},
   683  		Owner:     signer.Address(),
   684  	}
   685  	mr, err := NewCreateRequest(metadata)
   686  	if err != nil {
   687  		t.Fatal(err)
   688  	}
   689  	mr.Sign(signer)
   690  
   691  	err = rh.New(ctx, mr)
   692  	if err != nil {
   693  		t.Fatalf("Create resource fail: %v", err)
   694  	}
   695  
   696  	// chunk with address
   697  	data := []byte("foo")
   698  	mr.SetData(data, false)
   699  	if err := mr.Sign(signer); err != nil {
   700  		t.Fatalf("sign fail: %v", err)
   701  	}
   702  	chunk, err := mr.SignedResourceUpdate.toChunk()
   703  	if err != nil {
   704  		t.Fatal(err)
   705  	}
   706  	if !rh.Validate(chunk.Addr, chunk.SData) {
   707  		t.Fatal("Chunk validator fail on update chunk")
   708  	}
   709  
   710  	// chunk with address made from different publickey
   711  	if err := mr.Sign(falseSigner); err == nil {
   712  		t.Fatalf("Expected Sign to fail since we are using a different OwnerAddr: %v", err)
   713  	}
   714  
   715  	// chunk with address made from different publickey
   716  	mr.metadata.Owner = zeroAddr // set to zero to bypass .Sign() check
   717  	if err := mr.Sign(falseSigner); err != nil {
   718  		t.Fatalf("sign fail: %v", err)
   719  	}
   720  
   721  	chunk, err = mr.SignedResourceUpdate.toChunk()
   722  	if err != nil {
   723  		t.Fatal(err)
   724  	}
   725  
   726  	if rh.Validate(chunk.Addr, chunk.SData) {
   727  		t.Fatal("Chunk validator did not fail on update chunk with false address")
   728  	}
   729  
   730  	ctx, cancel = context.WithTimeout(context.Background(), time.Second)
   731  	defer cancel()
   732  
   733  	metadata = &ResourceMetadata{
   734  		Name:      resourceName,
   735  		StartTime: TimestampProvider.Now(),
   736  		Frequency: resourceFrequency,
   737  		Owner:     signer.Address(),
   738  	}
   739  	chunk, _, err = metadata.newChunk()
   740  	if err != nil {
   741  		t.Fatal(err)
   742  	}
   743  
   744  	if !rh.Validate(chunk.Addr, chunk.SData) {
   745  		t.Fatal("Chunk validator fail on metadata chunk")
   746  	}
   747  }
   748  
   749  // tests that the content address validator correctly checks the data
   750  // tests that resource update chunks are passed through content address validator
   751  // there is some redundancy in this test as it also tests content addressed chunks,
   752  // which should be evaluated as invalid chunks by this validator
   753  func TestValidatorInStore(t *testing.T) {
   754  
   755  	// make fake timeProvider
   756  	TimestampProvider = &fakeTimeProvider{
   757  		currentTime: startTime.Time,
   758  	}
   759  
   760  	// signer containing private key
   761  	signer := newAliceSigner()
   762  
   763  	// set up localstore
   764  	datadir, err := ioutil.TempDir("", "storage-testresourcevalidator")
   765  	if err != nil {
   766  		t.Fatal(err)
   767  	}
   768  	defer os.RemoveAll(datadir)
   769  
   770  	params := storage.NewDefaultLocalStoreParams()
   771  	params.Init(datadir)
   772  	store, err := storage.NewLocalStore(params, nil)
   773  	if err != nil {
   774  		t.Fatal(err)
   775  	}
   776  
   777  	// set up resource handler and add is as a validator to the localstore
   778  	rhParams := &HandlerParams{}
   779  	rh, err := NewHandler(rhParams)
   780  	if err != nil {
   781  		t.Fatal(err)
   782  	}
   783  	store.Validators = append(store.Validators, rh)
   784  
   785  	// create content addressed chunks, one good, one faulty
   786  	chunks := storage.GenerateRandomChunks(storage.DefaultChunkSize, 2)
   787  	goodChunk := chunks[0]
   788  	badChunk := chunks[1]
   789  	badChunk.SData = goodChunk.SData
   790  
   791  	metadata := &ResourceMetadata{
   792  		StartTime: startTime,
   793  		Name:      "xyzzy",
   794  		Frequency: resourceFrequency,
   795  		Owner:     signer.Address(),
   796  	}
   797  
   798  	rootChunk, metaHash, err := metadata.newChunk()
   799  	if err != nil {
   800  		t.Fatal(err)
   801  	}
   802  	// create a resource update chunk with correct publickey
   803  	updateLookup := UpdateLookup{
   804  		period:   42,
   805  		version:  1,
   806  		rootAddr: rootChunk.Addr,
   807  	}
   808  
   809  	updateAddr := updateLookup.UpdateAddr()
   810  	data := []byte("bar")
   811  
   812  	r := SignedResourceUpdate{
   813  		updateAddr: updateAddr,
   814  		resourceUpdate: resourceUpdate{
   815  			updateHeader: updateHeader{
   816  				UpdateLookup: updateLookup,
   817  				metaHash:     metaHash,
   818  			},
   819  			data: data,
   820  		},
   821  	}
   822  
   823  	r.Sign(signer)
   824  
   825  	uglyChunk, err := r.toChunk()
   826  	if err != nil {
   827  		t.Fatal(err)
   828  	}
   829  
   830  	// put the chunks in the store and check their error status
   831  	storage.PutChunks(store, goodChunk)
   832  	if goodChunk.GetErrored() == nil {
   833  		t.Fatal("expected error on good content address chunk with resource validator only, but got nil")
   834  	}
   835  	storage.PutChunks(store, badChunk)
   836  	if badChunk.GetErrored() == nil {
   837  		t.Fatal("expected error on bad content address chunk with resource validator only, but got nil")
   838  	}
   839  	storage.PutChunks(store, uglyChunk)
   840  	if err := uglyChunk.GetErrored(); err != nil {
   841  		t.Fatalf("expected no error on resource update chunk with resource validator only, but got: %s", err)
   842  	}
   843  }
   844  
   845  // fast-forward clock
   846  func fwdClock(count int, timeProvider *fakeTimeProvider) {
   847  	for i := 0; i < count; i++ {
   848  		timeProvider.Tick()
   849  	}
   850  }
   851  
   852  // create rpc and resourcehandler
   853  func setupTest(timeProvider timestampProvider, signer Signer) (rh *TestHandler, datadir string, teardown func(), err error) {
   854  
   855  	var fsClean func()
   856  	var rpcClean func()
   857  	cleanF = func() {
   858  		if fsClean != nil {
   859  			fsClean()
   860  		}
   861  		if rpcClean != nil {
   862  			rpcClean()
   863  		}
   864  	}
   865  
   866  	// temp datadir
   867  	datadir, err = ioutil.TempDir("", "rh")
   868  	if err != nil {
   869  		return nil, "", nil, err
   870  	}
   871  	fsClean = func() {
   872  		os.RemoveAll(datadir)
   873  	}
   874  
   875  	TimestampProvider = timeProvider
   876  	rhparams := &HandlerParams{}
   877  	rh, err = NewTestHandler(datadir, rhparams)
   878  	return rh, datadir, cleanF, err
   879  }
   880  
   881  func newAliceSigner() *GenericSigner {
   882  	privKey, _ := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
   883  	return NewGenericSigner(privKey)
   884  }
   885  
   886  func newBobSigner() *GenericSigner {
   887  	privKey, _ := crypto.HexToECDSA("accedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedecaca")
   888  	return NewGenericSigner(privKey)
   889  }
   890  
   891  func newCharlieSigner() *GenericSigner {
   892  	privKey, _ := crypto.HexToECDSA("facadefacadefacadefacadefacadefacadefacadefacadefacadefacadefaca")
   893  	return NewGenericSigner(privKey)
   894  }
   895  
   896  func getUpdateDirect(rh *Handler, addr storage.Address) ([]byte, error) {
   897  	chunk, err := rh.chunkStore.Get(context.TODO(), addr)
   898  	if err != nil {
   899  		return nil, err
   900  	}
   901  	var r SignedResourceUpdate
   902  	if err := r.fromChunk(addr, chunk.SData); err != nil {
   903  		return nil, err
   904  	}
   905  	return r.data, nil
   906  }