github.com/divan/go-ethereum@v1.8.14-0.20180820134928-1de9ada4016d/swarm/storage/mru/resource_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package mru
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"crypto/rand"
    23  	"encoding/binary"
    24  	"flag"
    25  	"io/ioutil"
    26  	"os"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/ethereum/go-ethereum/contracts/ens"
    31  	"github.com/ethereum/go-ethereum/crypto"
    32  	"github.com/ethereum/go-ethereum/log"
    33  	"github.com/ethereum/go-ethereum/swarm/chunk"
    34  	"github.com/ethereum/go-ethereum/swarm/multihash"
    35  	"github.com/ethereum/go-ethereum/swarm/storage"
    36  )
    37  
    38  var (
    39  	loglevel   = flag.Int("loglevel", 3, "loglevel")
    40  	testHasher = storage.MakeHashFunc(resourceHashAlgorithm)()
    41  	startTime  = Timestamp{
    42  		Time: uint64(4200),
    43  	}
    44  	resourceFrequency = uint64(42)
    45  	cleanF            func()
    46  	resourceName      = "føø.bar"
    47  	hashfunc          = storage.MakeHashFunc(storage.DefaultHash)
    48  )
    49  
    50  func init() {
    51  	flag.Parse()
    52  	log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true)))))
    53  }
    54  
    55  // simulated timeProvider
    56  type fakeTimeProvider struct {
    57  	currentTime uint64
    58  }
    59  
    60  func (f *fakeTimeProvider) Tick() {
    61  	f.currentTime++
    62  }
    63  
    64  func (f *fakeTimeProvider) Now() Timestamp {
    65  	return Timestamp{
    66  		Time: f.currentTime,
    67  	}
    68  }
    69  
    70  func TestUpdateChunkSerializationErrorChecking(t *testing.T) {
    71  
    72  	// Test that parseUpdate fails if the chunk is too small
    73  	var r SignedResourceUpdate
    74  	if err := r.fromChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1)); err == nil {
    75  		t.Fatalf("Expected parseUpdate to fail when chunkData contains less than %d bytes", minimumUpdateDataLength)
    76  	}
    77  
    78  	r = SignedResourceUpdate{}
    79  	// Test that parseUpdate fails when the length header does not match the data array length
    80  	fakeChunk := make([]byte, 150)
    81  	binary.LittleEndian.PutUint16(fakeChunk, 44)
    82  	if err := r.fromChunk(storage.ZeroAddr, fakeChunk); err == nil {
    83  		t.Fatal("Expected parseUpdate to fail when the header length does not match the actual data array passed in")
    84  	}
    85  
    86  	r = SignedResourceUpdate{
    87  		resourceUpdate: resourceUpdate{
    88  			updateHeader: updateHeader{
    89  				UpdateLookup: UpdateLookup{
    90  
    91  					rootAddr: make([]byte, 79), // put the wrong length, should be storage.KeyLength
    92  				},
    93  				metaHash:  nil,
    94  				multihash: false,
    95  			},
    96  		},
    97  	}
    98  	_, err := r.toChunk()
    99  	if err == nil {
   100  		t.Fatal("Expected newUpdateChunk to fail when rootAddr or metaHash have the wrong length")
   101  	}
   102  	r.rootAddr = make([]byte, storage.KeyLength)
   103  	r.metaHash = make([]byte, storage.KeyLength)
   104  	_, err = r.toChunk()
   105  	if err == nil {
   106  		t.Fatal("Expected newUpdateChunk to fail when there is no data")
   107  	}
   108  	r.data = make([]byte, 79) // put some arbitrary length data
   109  	_, err = r.toChunk()
   110  	if err == nil {
   111  		t.Fatal("expected newUpdateChunk to fail when there is no signature", err)
   112  	}
   113  
   114  	alice := newAliceSigner()
   115  	if err := r.Sign(alice); err != nil {
   116  		t.Fatalf("error signing:%s", err)
   117  
   118  	}
   119  	_, err = r.toChunk()
   120  	if err != nil {
   121  		t.Fatalf("error creating update chunk:%s", err)
   122  	}
   123  
   124  	r.multihash = true
   125  	r.data[1] = 79 // mess with the multihash, corrupting one byte of it.
   126  	if err := r.Sign(alice); err == nil {
   127  		t.Fatal("expected Sign() to fail when an invalid multihash is in data and multihash=true", err)
   128  	}
   129  }
   130  
   131  // check that signature address matches update signer address
   132  func TestReverse(t *testing.T) {
   133  
   134  	period := uint32(4)
   135  	version := uint32(2)
   136  
   137  	// make fake timeProvider
   138  	timeProvider := &fakeTimeProvider{
   139  		currentTime: startTime.Time,
   140  	}
   141  
   142  	// signer containing private key
   143  	signer := newAliceSigner()
   144  
   145  	// set up rpc and create resourcehandler
   146  	_, _, teardownTest, err := setupTest(timeProvider, signer)
   147  	if err != nil {
   148  		t.Fatal(err)
   149  	}
   150  	defer teardownTest()
   151  
   152  	metadata := ResourceMetadata{
   153  		Name:      resourceName,
   154  		StartTime: startTime,
   155  		Frequency: resourceFrequency,
   156  		Owner:     signer.Address(),
   157  	}
   158  
   159  	rootAddr, metaHash, _, err := metadata.serializeAndHash()
   160  	if err != nil {
   161  		t.Fatal(err)
   162  	}
   163  
   164  	// generate some bogus data for the chunk and sign it
   165  	data := make([]byte, 8)
   166  	_, err = rand.Read(data)
   167  	if err != nil {
   168  		t.Fatal(err)
   169  	}
   170  	testHasher.Reset()
   171  	testHasher.Write(data)
   172  
   173  	update := &SignedResourceUpdate{
   174  		resourceUpdate: resourceUpdate{
   175  			updateHeader: updateHeader{
   176  				UpdateLookup: UpdateLookup{
   177  					period:   period,
   178  					version:  version,
   179  					rootAddr: rootAddr,
   180  				},
   181  				metaHash: metaHash,
   182  			},
   183  			data: data,
   184  		},
   185  	}
   186  	// generate a hash for t=4200 version 1
   187  	key := update.UpdateAddr()
   188  
   189  	if err = update.Sign(signer); err != nil {
   190  		t.Fatal(err)
   191  	}
   192  
   193  	chunk, err := update.toChunk()
   194  	if err != nil {
   195  		t.Fatal(err)
   196  	}
   197  
   198  	// check that we can recover the owner account from the update chunk's signature
   199  	var checkUpdate SignedResourceUpdate
   200  	if err := checkUpdate.fromChunk(chunk.Addr, chunk.SData); err != nil {
   201  		t.Fatal(err)
   202  	}
   203  	checkdigest, err := checkUpdate.GetDigest()
   204  	if err != nil {
   205  		t.Fatal(err)
   206  	}
   207  	recoveredaddress, err := getOwner(checkdigest, *checkUpdate.signature)
   208  	if err != nil {
   209  		t.Fatalf("Retrieve address from signature fail: %v", err)
   210  	}
   211  	originaladdress := crypto.PubkeyToAddress(signer.PrivKey.PublicKey)
   212  
   213  	// check that the metadata retrieved from the chunk matches what we gave it
   214  	if recoveredaddress != originaladdress {
   215  		t.Fatalf("addresses dont match: %x != %x", originaladdress, recoveredaddress)
   216  	}
   217  
   218  	if !bytes.Equal(key[:], chunk.Addr[:]) {
   219  		t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Addr)
   220  	}
   221  	if period != checkUpdate.period {
   222  		t.Fatalf("Expected period '%d', was '%d'", period, checkUpdate.period)
   223  	}
   224  	if version != checkUpdate.version {
   225  		t.Fatalf("Expected version '%d', was '%d'", version, checkUpdate.version)
   226  	}
   227  	if !bytes.Equal(data, checkUpdate.data) {
   228  		t.Fatalf("Expectedn data '%x', was '%x'", data, checkUpdate.data)
   229  	}
   230  }
   231  
   232  // make updates and retrieve them based on periods and versions
   233  func TestResourceHandler(t *testing.T) {
   234  
   235  	// make fake timeProvider
   236  	timeProvider := &fakeTimeProvider{
   237  		currentTime: startTime.Time,
   238  	}
   239  
   240  	// signer containing private key
   241  	signer := newAliceSigner()
   242  
   243  	rh, datadir, teardownTest, err := setupTest(timeProvider, signer)
   244  	if err != nil {
   245  		t.Fatal(err)
   246  	}
   247  	defer teardownTest()
   248  
   249  	// create a new resource
   250  	ctx, cancel := context.WithCancel(context.Background())
   251  	defer cancel()
   252  
   253  	metadata := &ResourceMetadata{
   254  		Name:      resourceName,
   255  		Frequency: resourceFrequency,
   256  		StartTime: Timestamp{Time: timeProvider.Now().Time},
   257  		Owner:     signer.Address(),
   258  	}
   259  
   260  	request, err := NewCreateUpdateRequest(metadata)
   261  	if err != nil {
   262  		t.Fatal(err)
   263  	}
   264  	request.Sign(signer)
   265  	if err != nil {
   266  		t.Fatal(err)
   267  	}
   268  	err = rh.New(ctx, request)
   269  	if err != nil {
   270  		t.Fatal(err)
   271  	}
   272  
   273  	chunk, err := rh.chunkStore.Get(context.TODO(), storage.Address(request.rootAddr))
   274  	if err != nil {
   275  		t.Fatal(err)
   276  	} else if len(chunk.SData) < 16 {
   277  		t.Fatalf("chunk data must be minimum 16 bytes, is %d", len(chunk.SData))
   278  	}
   279  
   280  	var recoveredMetadata ResourceMetadata
   281  
   282  	recoveredMetadata.binaryGet(chunk.SData)
   283  	if err != nil {
   284  		t.Fatal(err)
   285  	}
   286  	if recoveredMetadata.StartTime.Time != timeProvider.currentTime {
   287  		t.Fatalf("stored startTime %d does not match provided startTime %d", recoveredMetadata.StartTime.Time, timeProvider.currentTime)
   288  	}
   289  	if recoveredMetadata.Frequency != resourceFrequency {
   290  		t.Fatalf("stored frequency %d does not match provided frequency %d", recoveredMetadata.Frequency, resourceFrequency)
   291  	}
   292  
   293  	// data for updates:
   294  	updates := []string{
   295  		"blinky",
   296  		"pinky",
   297  		"inky",
   298  		"clyde",
   299  	}
   300  
   301  	// update halfway to first period. period=1, version=1
   302  	resourcekey := make(map[string]storage.Address)
   303  	fwdClock(int(resourceFrequency/2), timeProvider)
   304  	data := []byte(updates[0])
   305  	request.SetData(data, false)
   306  	if err := request.Sign(signer); err != nil {
   307  		t.Fatal(err)
   308  	}
   309  	resourcekey[updates[0]], err = rh.Update(ctx, &request.SignedResourceUpdate)
   310  	if err != nil {
   311  		t.Fatal(err)
   312  	}
   313  
   314  	// update on first period with version = 1 to make it fail since there is already one update with version=1
   315  	request, err = rh.NewUpdateRequest(ctx, request.rootAddr)
   316  	if err != nil {
   317  		t.Fatal(err)
   318  	}
   319  	if request.version != 2 || request.period != 1 {
   320  		t.Fatal("Suggested period should be 1 and version should be 2")
   321  	}
   322  
   323  	request.version = 1 // force version 1 instead of 2 to make it fail
   324  	data = []byte(updates[1])
   325  	request.SetData(data, false)
   326  	if err := request.Sign(signer); err != nil {
   327  		t.Fatal(err)
   328  	}
   329  	resourcekey[updates[1]], err = rh.Update(ctx, &request.SignedResourceUpdate)
   330  	if err == nil {
   331  		t.Fatal("Expected update to fail since this version already exists")
   332  	}
   333  
   334  	// update on second period with version = 1, correct. period=2, version=1
   335  	fwdClock(int(resourceFrequency/2), timeProvider)
   336  	request, err = rh.NewUpdateRequest(ctx, request.rootAddr)
   337  	if err != nil {
   338  		t.Fatal(err)
   339  	}
   340  	request.SetData(data, false)
   341  	if err := request.Sign(signer); err != nil {
   342  		t.Fatal(err)
   343  	}
   344  	resourcekey[updates[1]], err = rh.Update(ctx, &request.SignedResourceUpdate)
   345  	if err != nil {
   346  		t.Fatal(err)
   347  	}
   348  
   349  	fwdClock(int(resourceFrequency), timeProvider)
   350  	// Update on third period, with version = 1
   351  	request, err = rh.NewUpdateRequest(ctx, request.rootAddr)
   352  	if err != nil {
   353  		t.Fatal(err)
   354  	}
   355  	data = []byte(updates[2])
   356  	request.SetData(data, false)
   357  	if err := request.Sign(signer); err != nil {
   358  		t.Fatal(err)
   359  	}
   360  	resourcekey[updates[2]], err = rh.Update(ctx, &request.SignedResourceUpdate)
   361  	if err != nil {
   362  		t.Fatal(err)
   363  	}
   364  
   365  	// update just after third period
   366  	fwdClock(1, timeProvider)
   367  	request, err = rh.NewUpdateRequest(ctx, request.rootAddr)
   368  	if err != nil {
   369  		t.Fatal(err)
   370  	}
   371  	if request.period != 3 || request.version != 2 {
   372  		t.Fatal("Suggested period should be 3 and version should be 2")
   373  	}
   374  	data = []byte(updates[3])
   375  	request.SetData(data, false)
   376  
   377  	if err := request.Sign(signer); err != nil {
   378  		t.Fatal(err)
   379  	}
   380  	resourcekey[updates[3]], err = rh.Update(ctx, &request.SignedResourceUpdate)
   381  	if err != nil {
   382  		t.Fatal(err)
   383  	}
   384  
   385  	time.Sleep(time.Second)
   386  	rh.Close()
   387  
   388  	// check we can retrieve the updates after close
   389  	// it will match on second iteration startTime + (resourceFrequency * 3)
   390  	fwdClock(int(resourceFrequency*2)-1, timeProvider)
   391  
   392  	rhparams := &HandlerParams{}
   393  
   394  	rh2, err := NewTestHandler(datadir, rhparams)
   395  	if err != nil {
   396  		t.Fatal(err)
   397  	}
   398  
   399  	rsrc2, err := rh2.Load(context.TODO(), request.rootAddr)
   400  	if err != nil {
   401  		t.Fatal(err)
   402  	}
   403  
   404  	_, err = rh2.Lookup(ctx, LookupLatest(request.rootAddr))
   405  	if err != nil {
   406  		t.Fatal(err)
   407  	}
   408  
   409  	// last update should be "clyde", version two, time= startTime + (resourcefrequency * 3)
   410  	if !bytes.Equal(rsrc2.data, []byte(updates[len(updates)-1])) {
   411  		t.Fatalf("resource data was %v, expected %v", string(rsrc2.data), updates[len(updates)-1])
   412  	}
   413  	if rsrc2.version != 2 {
   414  		t.Fatalf("resource version was %d, expected 2", rsrc2.version)
   415  	}
   416  	if rsrc2.period != 3 {
   417  		t.Fatalf("resource period was %d, expected 3", rsrc2.period)
   418  	}
   419  	log.Debug("Latest lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data)
   420  
   421  	// specific period, latest version
   422  	rsrc, err := rh2.Lookup(ctx, LookupLatestVersionInPeriod(request.rootAddr, 3))
   423  	if err != nil {
   424  		t.Fatal(err)
   425  	}
   426  	// check data
   427  	if !bytes.Equal(rsrc.data, []byte(updates[len(updates)-1])) {
   428  		t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[len(updates)-1])
   429  	}
   430  	log.Debug("Historical lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data)
   431  
   432  	// specific period, specific version
   433  	lookupParams := LookupVersion(request.rootAddr, 3, 1)
   434  	rsrc, err = rh2.Lookup(ctx, lookupParams)
   435  	if err != nil {
   436  		t.Fatal(err)
   437  	}
   438  	// check data
   439  	if !bytes.Equal(rsrc.data, []byte(updates[2])) {
   440  		t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[2])
   441  	}
   442  	log.Debug("Specific version lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data)
   443  
   444  	// we are now at third update
   445  	// check backwards stepping to the first
   446  	for i := 1; i >= 0; i-- {
   447  		rsrc, err := rh2.LookupPrevious(ctx, lookupParams)
   448  		if err != nil {
   449  			t.Fatal(err)
   450  		}
   451  		if !bytes.Equal(rsrc.data, []byte(updates[i])) {
   452  			t.Fatalf("resource data (previous) was %v, expected %v", rsrc.data, updates[i])
   453  
   454  		}
   455  	}
   456  
   457  	// beyond the first should yield an error
   458  	rsrc, err = rh2.LookupPrevious(ctx, lookupParams)
   459  	if err == nil {
   460  		t.Fatalf("expected previous to fail, returned period %d version %d data %v", rsrc.period, rsrc.version, rsrc.data)
   461  	}
   462  
   463  }
   464  
   465  func TestMultihash(t *testing.T) {
   466  
   467  	// make fake timeProvider
   468  	timeProvider := &fakeTimeProvider{
   469  		currentTime: startTime.Time,
   470  	}
   471  
   472  	// signer containing private key
   473  	signer := newAliceSigner()
   474  
   475  	// set up rpc and create resourcehandler
   476  	rh, datadir, teardownTest, err := setupTest(timeProvider, signer)
   477  	if err != nil {
   478  		t.Fatal(err)
   479  	}
   480  	defer teardownTest()
   481  
   482  	// create a new resource
   483  	ctx, cancel := context.WithCancel(context.Background())
   484  	defer cancel()
   485  
   486  	metadata := &ResourceMetadata{
   487  		Name:      resourceName,
   488  		Frequency: resourceFrequency,
   489  		StartTime: Timestamp{Time: timeProvider.Now().Time},
   490  		Owner:     signer.Address(),
   491  	}
   492  
   493  	mr, err := NewCreateRequest(metadata)
   494  	if err != nil {
   495  		t.Fatal(err)
   496  	}
   497  	err = rh.New(ctx, mr)
   498  	if err != nil {
   499  		t.Fatal(err)
   500  	}
   501  
   502  	// we're naïvely assuming keccak256 for swarm hashes
   503  	// if it ever changes this test should also change
   504  	multihashbytes := ens.EnsNode("foo")
   505  	multihashmulti := multihash.ToMultihash(multihashbytes.Bytes())
   506  	if err != nil {
   507  		t.Fatal(err)
   508  	}
   509  	mr.SetData(multihashmulti, true)
   510  	mr.Sign(signer)
   511  	if err != nil {
   512  		t.Fatal(err)
   513  	}
   514  	multihashkey, err := rh.Update(ctx, &mr.SignedResourceUpdate)
   515  	if err != nil {
   516  		t.Fatal(err)
   517  	}
   518  
   519  	sha1bytes := make([]byte, multihash.MultihashLength)
   520  	sha1multi := multihash.ToMultihash(sha1bytes)
   521  	if err != nil {
   522  		t.Fatal(err)
   523  	}
   524  	mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr)
   525  	if err != nil {
   526  		t.Fatal(err)
   527  	}
   528  	mr.SetData(sha1multi, true)
   529  	mr.Sign(signer)
   530  	if err != nil {
   531  		t.Fatal(err)
   532  	}
   533  	sha1key, err := rh.Update(ctx, &mr.SignedResourceUpdate)
   534  	if err != nil {
   535  		t.Fatal(err)
   536  	}
   537  
   538  	// invalid multihashes
   539  	mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr)
   540  	if err != nil {
   541  		t.Fatal(err)
   542  	}
   543  	mr.SetData(multihashmulti[1:], true)
   544  	mr.Sign(signer)
   545  	if err != nil {
   546  		t.Fatal(err)
   547  	}
   548  	_, err = rh.Update(ctx, &mr.SignedResourceUpdate)
   549  	if err == nil {
   550  		t.Fatalf("Expected update to fail with first byte skipped")
   551  	}
   552  	mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr)
   553  	if err != nil {
   554  		t.Fatal(err)
   555  	}
   556  	mr.SetData(multihashmulti[:len(multihashmulti)-2], true)
   557  	mr.Sign(signer)
   558  	if err != nil {
   559  		t.Fatal(err)
   560  	}
   561  
   562  	_, err = rh.Update(ctx, &mr.SignedResourceUpdate)
   563  	if err == nil {
   564  		t.Fatalf("Expected update to fail with last byte skipped")
   565  	}
   566  
   567  	data, err := getUpdateDirect(rh.Handler, multihashkey)
   568  	if err != nil {
   569  		t.Fatal(err)
   570  	}
   571  	multihashdecode, err := multihash.FromMultihash(data)
   572  	if err != nil {
   573  		t.Fatal(err)
   574  	}
   575  	if !bytes.Equal(multihashdecode, multihashbytes.Bytes()) {
   576  		t.Fatalf("Decoded hash '%x' does not match original hash '%x'", multihashdecode, multihashbytes.Bytes())
   577  	}
   578  	data, err = getUpdateDirect(rh.Handler, sha1key)
   579  	if err != nil {
   580  		t.Fatal(err)
   581  	}
   582  	shadecode, err := multihash.FromMultihash(data)
   583  	if err != nil {
   584  		t.Fatal(err)
   585  	}
   586  	if !bytes.Equal(shadecode, sha1bytes) {
   587  		t.Fatalf("Decoded hash '%x' does not match original hash '%x'", shadecode, sha1bytes)
   588  	}
   589  	rh.Close()
   590  
   591  	rhparams := &HandlerParams{}
   592  	// test with signed data
   593  	rh2, err := NewTestHandler(datadir, rhparams)
   594  	if err != nil {
   595  		t.Fatal(err)
   596  	}
   597  	mr, err = NewCreateRequest(metadata)
   598  	if err != nil {
   599  		t.Fatal(err)
   600  	}
   601  	err = rh2.New(ctx, mr)
   602  	if err != nil {
   603  		t.Fatal(err)
   604  	}
   605  
   606  	mr.SetData(multihashmulti, true)
   607  	mr.Sign(signer)
   608  
   609  	if err != nil {
   610  		t.Fatal(err)
   611  	}
   612  	multihashsignedkey, err := rh2.Update(ctx, &mr.SignedResourceUpdate)
   613  	if err != nil {
   614  		t.Fatal(err)
   615  	}
   616  
   617  	mr, err = rh2.NewUpdateRequest(ctx, mr.rootAddr)
   618  	if err != nil {
   619  		t.Fatal(err)
   620  	}
   621  	mr.SetData(sha1multi, true)
   622  	mr.Sign(signer)
   623  	if err != nil {
   624  		t.Fatal(err)
   625  	}
   626  
   627  	sha1signedkey, err := rh2.Update(ctx, &mr.SignedResourceUpdate)
   628  	if err != nil {
   629  		t.Fatal(err)
   630  	}
   631  
   632  	data, err = getUpdateDirect(rh2.Handler, multihashsignedkey)
   633  	if err != nil {
   634  		t.Fatal(err)
   635  	}
   636  	multihashdecode, err = multihash.FromMultihash(data)
   637  	if err != nil {
   638  		t.Fatal(err)
   639  	}
   640  	if !bytes.Equal(multihashdecode, multihashbytes.Bytes()) {
   641  		t.Fatalf("Decoded hash '%x' does not match original hash '%x'", multihashdecode, multihashbytes.Bytes())
   642  	}
   643  	data, err = getUpdateDirect(rh2.Handler, sha1signedkey)
   644  	if err != nil {
   645  		t.Fatal(err)
   646  	}
   647  	shadecode, err = multihash.FromMultihash(data)
   648  	if err != nil {
   649  		t.Fatal(err)
   650  	}
   651  	if !bytes.Equal(shadecode, sha1bytes) {
   652  		t.Fatalf("Decoded hash '%x' does not match original hash '%x'", shadecode, sha1bytes)
   653  	}
   654  }
   655  
   656  // \TODO verify testing of signature validation and enforcement
   657  func TestValidator(t *testing.T) {
   658  
   659  	// make fake timeProvider
   660  	timeProvider := &fakeTimeProvider{
   661  		currentTime: startTime.Time,
   662  	}
   663  
   664  	// signer containing private key. Alice will be the good girl
   665  	signer := newAliceSigner()
   666  
   667  	// fake signer for false results. Bob will play the bad guy today.
   668  	falseSigner := newBobSigner()
   669  
   670  	// set up  sim timeProvider
   671  	rh, _, teardownTest, err := setupTest(timeProvider, signer)
   672  	if err != nil {
   673  		t.Fatal(err)
   674  	}
   675  	defer teardownTest()
   676  
   677  	// create new resource
   678  	ctx, cancel := context.WithCancel(context.Background())
   679  	defer cancel()
   680  	metadata := &ResourceMetadata{
   681  		Name:      resourceName,
   682  		Frequency: resourceFrequency,
   683  		StartTime: Timestamp{Time: timeProvider.Now().Time},
   684  		Owner:     signer.Address(),
   685  	}
   686  	mr, err := NewCreateRequest(metadata)
   687  	if err != nil {
   688  		t.Fatal(err)
   689  	}
   690  	mr.Sign(signer)
   691  
   692  	err = rh.New(ctx, mr)
   693  	if err != nil {
   694  		t.Fatalf("Create resource fail: %v", err)
   695  	}
   696  
   697  	// chunk with address
   698  	data := []byte("foo")
   699  	mr.SetData(data, false)
   700  	if err := mr.Sign(signer); err != nil {
   701  		t.Fatalf("sign fail: %v", err)
   702  	}
   703  	chunk, err := mr.SignedResourceUpdate.toChunk()
   704  	if err != nil {
   705  		t.Fatal(err)
   706  	}
   707  	if !rh.Validate(chunk.Addr, chunk.SData) {
   708  		t.Fatal("Chunk validator fail on update chunk")
   709  	}
   710  
   711  	// chunk with address made from different publickey
   712  	if err := mr.Sign(falseSigner); err == nil {
   713  		t.Fatalf("Expected Sign to fail since we are using a different OwnerAddr: %v", err)
   714  	}
   715  
   716  	// chunk with address made from different publickey
   717  	mr.metadata.Owner = zeroAddr // set to zero to bypass .Sign() check
   718  	if err := mr.Sign(falseSigner); err != nil {
   719  		t.Fatalf("sign fail: %v", err)
   720  	}
   721  
   722  	chunk, err = mr.SignedResourceUpdate.toChunk()
   723  	if err != nil {
   724  		t.Fatal(err)
   725  	}
   726  
   727  	if rh.Validate(chunk.Addr, chunk.SData) {
   728  		t.Fatal("Chunk validator did not fail on update chunk with false address")
   729  	}
   730  
   731  	ctx, cancel = context.WithTimeout(context.Background(), time.Second)
   732  	defer cancel()
   733  
   734  	metadata = &ResourceMetadata{
   735  		Name:      resourceName,
   736  		StartTime: TimestampProvider.Now(),
   737  		Frequency: resourceFrequency,
   738  		Owner:     signer.Address(),
   739  	}
   740  	chunk, _, err = metadata.newChunk()
   741  	if err != nil {
   742  		t.Fatal(err)
   743  	}
   744  
   745  	if !rh.Validate(chunk.Addr, chunk.SData) {
   746  		t.Fatal("Chunk validator fail on metadata chunk")
   747  	}
   748  }
   749  
   750  // tests that the content address validator correctly checks the data
   751  // tests that resource update chunks are passed through content address validator
   752  // there is some redundancy in this test as it also tests content addressed chunks,
   753  // which should be evaluated as invalid chunks by this validator
   754  func TestValidatorInStore(t *testing.T) {
   755  
   756  	// make fake timeProvider
   757  	TimestampProvider = &fakeTimeProvider{
   758  		currentTime: startTime.Time,
   759  	}
   760  
   761  	// signer containing private key
   762  	signer := newAliceSigner()
   763  
   764  	// set up localstore
   765  	datadir, err := ioutil.TempDir("", "storage-testresourcevalidator")
   766  	if err != nil {
   767  		t.Fatal(err)
   768  	}
   769  	defer os.RemoveAll(datadir)
   770  
   771  	params := storage.NewDefaultLocalStoreParams()
   772  	params.Init(datadir)
   773  	store, err := storage.NewLocalStore(params, nil)
   774  	if err != nil {
   775  		t.Fatal(err)
   776  	}
   777  
   778  	// set up resource handler and add is as a validator to the localstore
   779  	rhParams := &HandlerParams{}
   780  	rh := NewHandler(rhParams)
   781  	store.Validators = append(store.Validators, rh)
   782  
   783  	// create content addressed chunks, one good, one faulty
   784  	chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2)
   785  	goodChunk := chunks[0]
   786  	badChunk := chunks[1]
   787  	badChunk.SData = goodChunk.SData
   788  
   789  	metadata := &ResourceMetadata{
   790  		StartTime: startTime,
   791  		Name:      "xyzzy",
   792  		Frequency: resourceFrequency,
   793  		Owner:     signer.Address(),
   794  	}
   795  
   796  	rootChunk, metaHash, err := metadata.newChunk()
   797  	if err != nil {
   798  		t.Fatal(err)
   799  	}
   800  	// create a resource update chunk with correct publickey
   801  	updateLookup := UpdateLookup{
   802  		period:   42,
   803  		version:  1,
   804  		rootAddr: rootChunk.Addr,
   805  	}
   806  
   807  	updateAddr := updateLookup.UpdateAddr()
   808  	data := []byte("bar")
   809  
   810  	r := SignedResourceUpdate{
   811  		updateAddr: updateAddr,
   812  		resourceUpdate: resourceUpdate{
   813  			updateHeader: updateHeader{
   814  				UpdateLookup: updateLookup,
   815  				metaHash:     metaHash,
   816  			},
   817  			data: data,
   818  		},
   819  	}
   820  
   821  	r.Sign(signer)
   822  
   823  	uglyChunk, err := r.toChunk()
   824  	if err != nil {
   825  		t.Fatal(err)
   826  	}
   827  
   828  	// put the chunks in the store and check their error status
   829  	storage.PutChunks(store, goodChunk)
   830  	if goodChunk.GetErrored() == nil {
   831  		t.Fatal("expected error on good content address chunk with resource validator only, but got nil")
   832  	}
   833  	storage.PutChunks(store, badChunk)
   834  	if badChunk.GetErrored() == nil {
   835  		t.Fatal("expected error on bad content address chunk with resource validator only, but got nil")
   836  	}
   837  	storage.PutChunks(store, uglyChunk)
   838  	if err := uglyChunk.GetErrored(); err != nil {
   839  		t.Fatalf("expected no error on resource update chunk with resource validator only, but got: %s", err)
   840  	}
   841  }
   842  
   843  // fast-forward clock
   844  func fwdClock(count int, timeProvider *fakeTimeProvider) {
   845  	for i := 0; i < count; i++ {
   846  		timeProvider.Tick()
   847  	}
   848  }
   849  
   850  // create rpc and resourcehandler
   851  func setupTest(timeProvider timestampProvider, signer Signer) (rh *TestHandler, datadir string, teardown func(), err error) {
   852  
   853  	var fsClean func()
   854  	var rpcClean func()
   855  	cleanF = func() {
   856  		if fsClean != nil {
   857  			fsClean()
   858  		}
   859  		if rpcClean != nil {
   860  			rpcClean()
   861  		}
   862  	}
   863  
   864  	// temp datadir
   865  	datadir, err = ioutil.TempDir("", "rh")
   866  	if err != nil {
   867  		return nil, "", nil, err
   868  	}
   869  	fsClean = func() {
   870  		os.RemoveAll(datadir)
   871  	}
   872  
   873  	TimestampProvider = timeProvider
   874  	rhparams := &HandlerParams{}
   875  	rh, err = NewTestHandler(datadir, rhparams)
   876  	return rh, datadir, cleanF, err
   877  }
   878  
   879  func newAliceSigner() *GenericSigner {
   880  	privKey, _ := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
   881  	return NewGenericSigner(privKey)
   882  }
   883  
   884  func newBobSigner() *GenericSigner {
   885  	privKey, _ := crypto.HexToECDSA("accedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedecaca")
   886  	return NewGenericSigner(privKey)
   887  }
   888  
   889  func newCharlieSigner() *GenericSigner {
   890  	privKey, _ := crypto.HexToECDSA("facadefacadefacadefacadefacadefacadefacadefacadefacadefacadefaca")
   891  	return NewGenericSigner(privKey)
   892  }
   893  
   894  func getUpdateDirect(rh *Handler, addr storage.Address) ([]byte, error) {
   895  	chunk, err := rh.chunkStore.Get(context.TODO(), addr)
   896  	if err != nil {
   897  		return nil, err
   898  	}
   899  	var r SignedResourceUpdate
   900  	if err := r.fromChunk(addr, chunk.SData); err != nil {
   901  		return nil, err
   902  	}
   903  	return r.data, nil
   904  }