github.com/gobitfly/go-ethereum@v1.8.12/swarm/storage/mru/resource_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package mru
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"crypto/rand"
    23  	"encoding/binary"
    24  	"flag"
    25  	"fmt"
    26  	"io/ioutil"
    27  	"math/big"
    28  	"os"
    29  	"strings"
    30  	"testing"
    31  	"time"
    32  
    33  	"github.com/ethereum/go-ethereum/accounts/abi/bind"
    34  	"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
    35  	"github.com/ethereum/go-ethereum/common"
    36  	"github.com/ethereum/go-ethereum/contracts/ens"
    37  	"github.com/ethereum/go-ethereum/contracts/ens/contract"
    38  	"github.com/ethereum/go-ethereum/core"
    39  	"github.com/ethereum/go-ethereum/core/types"
    40  	"github.com/ethereum/go-ethereum/crypto"
    41  	"github.com/ethereum/go-ethereum/log"
    42  	"github.com/ethereum/go-ethereum/swarm/multihash"
    43  	"github.com/ethereum/go-ethereum/swarm/storage"
    44  )
    45  
    46  var (
    47  	loglevel          = flag.Int("loglevel", 3, "loglevel")
    48  	testHasher        = storage.MakeHashFunc(storage.SHA3Hash)()
    49  	zeroAddr          = common.Address{}
    50  	startBlock        = uint64(4200)
    51  	resourceFrequency = uint64(42)
    52  	cleanF            func()
    53  	domainName        = "føø.bar"
    54  	safeName          string
    55  	nameHash          common.Hash
    56  	hashfunc          = storage.MakeHashFunc(storage.DefaultHash)
    57  )
    58  
    59  func init() {
    60  	var err error
    61  	flag.Parse()
    62  	log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true)))))
    63  	safeName, err = ToSafeName(domainName)
    64  	if err != nil {
    65  		panic(err)
    66  	}
    67  	nameHash = ens.EnsNode(safeName)
    68  }
    69  
    70  // simulated backend does not have the blocknumber call
    71  // so we use this wrapper to fake returning the block count
    72  type fakeBackend struct {
    73  	*backends.SimulatedBackend
    74  	blocknumber int64
    75  }
    76  
    77  func (f *fakeBackend) Commit() {
    78  	if f.SimulatedBackend != nil {
    79  		f.SimulatedBackend.Commit()
    80  	}
    81  	f.blocknumber++
    82  }
    83  
    84  func (f *fakeBackend) HeaderByNumber(context context.Context, name string, bigblock *big.Int) (*types.Header, error) {
    85  	f.blocknumber++
    86  	biggie := big.NewInt(f.blocknumber)
    87  	return &types.Header{
    88  		Number: biggie,
    89  	}, nil
    90  }
    91  
    92  // check that signature address matches update signer address
    93  func TestReverse(t *testing.T) {
    94  
    95  	period := uint32(4)
    96  	version := uint32(2)
    97  
    98  	// signer containing private key
    99  	signer, err := newTestSigner()
   100  	if err != nil {
   101  		t.Fatal(err)
   102  	}
   103  
   104  	// set up rpc and create resourcehandler
   105  	rh, _, teardownTest, err := setupTest(nil, nil, signer)
   106  	if err != nil {
   107  		t.Fatal(err)
   108  	}
   109  	defer teardownTest()
   110  
   111  	// generate a hash for block 4200 version 1
   112  	key := rh.resourceHash(period, version, ens.EnsNode(safeName))
   113  
   114  	// generate some bogus data for the chunk and sign it
   115  	data := make([]byte, 8)
   116  	_, err = rand.Read(data)
   117  	if err != nil {
   118  		t.Fatal(err)
   119  	}
   120  	testHasher.Reset()
   121  	testHasher.Write(data)
   122  	digest := rh.keyDataHash(key, data)
   123  	sig, err := rh.signer.Sign(digest)
   124  	if err != nil {
   125  		t.Fatal(err)
   126  	}
   127  
   128  	chunk := newUpdateChunk(key, &sig, period, version, safeName, data, len(data))
   129  
   130  	// check that we can recover the owner account from the update chunk's signature
   131  	checksig, checkperiod, checkversion, checkname, checkdata, _, err := rh.parseUpdate(chunk.SData)
   132  	if err != nil {
   133  		t.Fatal(err)
   134  	}
   135  	checkdigest := rh.keyDataHash(chunk.Addr, checkdata)
   136  	recoveredaddress, err := getAddressFromDataSig(checkdigest, *checksig)
   137  	if err != nil {
   138  		t.Fatalf("Retrieve address from signature fail: %v", err)
   139  	}
   140  	originaladdress := crypto.PubkeyToAddress(signer.PrivKey.PublicKey)
   141  
   142  	// check that the metadata retrieved from the chunk matches what we gave it
   143  	if recoveredaddress != originaladdress {
   144  		t.Fatalf("addresses dont match: %x != %x", originaladdress, recoveredaddress)
   145  	}
   146  
   147  	if !bytes.Equal(key[:], chunk.Addr[:]) {
   148  		t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Addr)
   149  	}
   150  	if period != checkperiod {
   151  		t.Fatalf("Expected period '%d', was '%d'", period, checkperiod)
   152  	}
   153  	if version != checkversion {
   154  		t.Fatalf("Expected version '%d', was '%d'", version, checkversion)
   155  	}
   156  	if safeName != checkname {
   157  		t.Fatalf("Expected name '%s', was '%s'", safeName, checkname)
   158  	}
   159  	if !bytes.Equal(data, checkdata) {
   160  		t.Fatalf("Expectedn data '%x', was '%x'", data, checkdata)
   161  	}
   162  }
   163  
   164  // make updates and retrieve them based on periods and versions
   165  func TestHandler(t *testing.T) {
   166  
   167  	// make fake backend, set up rpc and create resourcehandler
   168  	backend := &fakeBackend{
   169  		blocknumber: int64(startBlock),
   170  	}
   171  	rh, datadir, teardownTest, err := setupTest(backend, nil, nil)
   172  	if err != nil {
   173  		t.Fatal(err)
   174  	}
   175  	defer teardownTest()
   176  
   177  	// create a new resource
   178  	ctx, cancel := context.WithCancel(context.Background())
   179  	defer cancel()
   180  	rootChunkKey, _, err := rh.New(ctx, safeName, resourceFrequency)
   181  	if err != nil {
   182  		t.Fatal(err)
   183  	}
   184  
   185  	chunk, err := rh.chunkStore.Get(storage.Address(rootChunkKey))
   186  	if err != nil {
   187  		t.Fatal(err)
   188  	} else if len(chunk.SData) < 16 {
   189  		t.Fatalf("chunk data must be minimum 16 bytes, is %d", len(chunk.SData))
   190  	}
   191  	startblocknumber := binary.LittleEndian.Uint64(chunk.SData[2:10])
   192  	chunkfrequency := binary.LittleEndian.Uint64(chunk.SData[10:])
   193  	if startblocknumber != uint64(backend.blocknumber) {
   194  		t.Fatalf("stored block number %d does not match provided block number %d", startblocknumber, backend.blocknumber)
   195  	}
   196  	if chunkfrequency != resourceFrequency {
   197  		t.Fatalf("stored frequency %d does not match provided frequency %d", chunkfrequency, resourceFrequency)
   198  	}
   199  
   200  	// data for updates:
   201  	updates := []string{
   202  		"blinky",
   203  		"pinky",
   204  		"inky",
   205  		"clyde",
   206  	}
   207  
   208  	// update halfway to first period
   209  	resourcekey := make(map[string]storage.Address)
   210  	fwdBlocks(int(resourceFrequency/2), backend)
   211  	data := []byte(updates[0])
   212  	resourcekey[updates[0]], err = rh.Update(ctx, safeName, data)
   213  	if err != nil {
   214  		t.Fatal(err)
   215  	}
   216  
   217  	// update on first period
   218  	fwdBlocks(int(resourceFrequency/2), backend)
   219  	data = []byte(updates[1])
   220  	resourcekey[updates[1]], err = rh.Update(ctx, safeName, data)
   221  	if err != nil {
   222  		t.Fatal(err)
   223  	}
   224  
   225  	// update on second period
   226  	fwdBlocks(int(resourceFrequency), backend)
   227  	data = []byte(updates[2])
   228  	resourcekey[updates[2]], err = rh.Update(ctx, safeName, data)
   229  	if err != nil {
   230  		t.Fatal(err)
   231  	}
   232  
   233  	// update just after second period
   234  	fwdBlocks(1, backend)
   235  	data = []byte(updates[3])
   236  	resourcekey[updates[3]], err = rh.Update(ctx, safeName, data)
   237  	if err != nil {
   238  		t.Fatal(err)
   239  	}
   240  	time.Sleep(time.Second)
   241  	rh.Close()
   242  
   243  	// check we can retrieve the updates after close
   244  	// it will match on second iteration startblocknumber + (resourceFrequency * 3)
   245  	fwdBlocks(int(resourceFrequency*2)-1, backend)
   246  
   247  	rhparams := &HandlerParams{
   248  		QueryMaxPeriods: &LookupParams{
   249  			Limit: false,
   250  		},
   251  		Signer:       nil,
   252  		HeaderGetter: rh.headerGetter,
   253  	}
   254  
   255  	rh2, err := NewTestHandler(datadir, rhparams)
   256  	if err != nil {
   257  		t.Fatal(err)
   258  	}
   259  	rsrc2, err := rh2.Load(rootChunkKey)
   260  	_, err = rh2.LookupLatest(ctx, nameHash, true, nil)
   261  	if err != nil {
   262  		t.Fatal(err)
   263  	}
   264  
   265  	// last update should be "clyde", version two, blockheight startblocknumber + (resourcefrequency * 3)
   266  	if !bytes.Equal(rsrc2.data, []byte(updates[len(updates)-1])) {
   267  		t.Fatalf("resource data was %v, expected %v", rsrc2.data, updates[len(updates)-1])
   268  	}
   269  	if rsrc2.version != 2 {
   270  		t.Fatalf("resource version was %d, expected 2", rsrc2.version)
   271  	}
   272  	if rsrc2.lastPeriod != 3 {
   273  		t.Fatalf("resource period was %d, expected 3", rsrc2.lastPeriod)
   274  	}
   275  	log.Debug("Latest lookup", "period", rsrc2.lastPeriod, "version", rsrc2.version, "data", rsrc2.data)
   276  
   277  	// specific block, latest version
   278  	rsrc, err := rh2.LookupHistorical(ctx, nameHash, 3, true, rh2.queryMaxPeriods)
   279  	if err != nil {
   280  		t.Fatal(err)
   281  	}
   282  	// check data
   283  	if !bytes.Equal(rsrc.data, []byte(updates[len(updates)-1])) {
   284  		t.Fatalf("resource data (historical) was %v, expected %v", rsrc2.data, updates[len(updates)-1])
   285  	}
   286  	log.Debug("Historical lookup", "period", rsrc2.lastPeriod, "version", rsrc2.version, "data", rsrc2.data)
   287  
   288  	// specific block, specific version
   289  	rsrc, err = rh2.LookupVersion(ctx, nameHash, 3, 1, true, rh2.queryMaxPeriods)
   290  	if err != nil {
   291  		t.Fatal(err)
   292  	}
   293  	// check data
   294  	if !bytes.Equal(rsrc.data, []byte(updates[2])) {
   295  		t.Fatalf("resource data (historical) was %v, expected %v", rsrc2.data, updates[2])
   296  	}
   297  	log.Debug("Specific version lookup", "period", rsrc2.lastPeriod, "version", rsrc2.version, "data", rsrc2.data)
   298  
   299  	// we are now at third update
   300  	// check backwards stepping to the first
   301  	for i := 1; i >= 0; i-- {
   302  		rsrc, err := rh2.LookupPreviousByName(ctx, safeName, rh2.queryMaxPeriods)
   303  		if err != nil {
   304  			t.Fatal(err)
   305  		}
   306  		if !bytes.Equal(rsrc.data, []byte(updates[i])) {
   307  			t.Fatalf("resource data (previous) was %v, expected %v", rsrc2.data, updates[i])
   308  
   309  		}
   310  	}
   311  
   312  	// beyond the first should yield an error
   313  	rsrc, err = rh2.LookupPreviousByName(ctx, safeName, rh2.queryMaxPeriods)
   314  	if err == nil {
   315  		t.Fatalf("expeected previous to fail, returned period %d version %d data %v", rsrc2.lastPeriod, rsrc2.version, rsrc2.data)
   316  	}
   317  
   318  }
   319  
   320  // create ENS enabled resource update, with and without valid owner
   321  func TestENSOwner(t *testing.T) {
   322  
   323  	// signer containing private key
   324  	signer, err := newTestSigner()
   325  	if err != nil {
   326  		t.Fatal(err)
   327  	}
   328  
   329  	// ens address and transact options
   330  	addr := crypto.PubkeyToAddress(signer.PrivKey.PublicKey)
   331  	transactOpts := bind.NewKeyedTransactor(signer.PrivKey)
   332  
   333  	// set up ENS sim
   334  	domainparts := strings.Split(safeName, ".")
   335  	contractAddr, contractbackend, err := setupENS(addr, transactOpts, domainparts[0], domainparts[1])
   336  	if err != nil {
   337  		t.Fatal(err)
   338  	}
   339  
   340  	ensClient, err := ens.NewENS(transactOpts, contractAddr, contractbackend)
   341  	if err != nil {
   342  		t.Fatal(err)
   343  	}
   344  
   345  	// set up rpc and create resourcehandler with ENS sim backend
   346  	rh, _, teardownTest, err := setupTest(contractbackend, ensClient, signer)
   347  	if err != nil {
   348  		t.Fatal(err)
   349  	}
   350  	defer teardownTest()
   351  
   352  	// create new resource when we are owner = ok
   353  	ctx, cancel := context.WithCancel(context.Background())
   354  	defer cancel()
   355  	_, _, err = rh.New(ctx, safeName, resourceFrequency)
   356  	if err != nil {
   357  		t.Fatalf("Create resource fail: %v", err)
   358  	}
   359  
   360  	data := []byte("foo")
   361  	// update resource when we are owner = ok
   362  	_, err = rh.Update(ctx, safeName, data)
   363  	if err != nil {
   364  		t.Fatalf("Update resource fail: %v", err)
   365  	}
   366  
   367  	// update resource when we are not owner = !ok
   368  	signertwo, err := newTestSigner()
   369  	if err != nil {
   370  		t.Fatal(err)
   371  	}
   372  	rh.signer = signertwo
   373  	_, err = rh.Update(ctx, safeName, data)
   374  	if err == nil {
   375  		t.Fatalf("Expected resource update fail due to owner mismatch")
   376  	}
   377  }
   378  
   379  func TestMultihash(t *testing.T) {
   380  
   381  	// signer containing private key
   382  	signer, err := newTestSigner()
   383  	if err != nil {
   384  		t.Fatal(err)
   385  	}
   386  
   387  	// make fake backend, set up rpc and create resourcehandler
   388  	backend := &fakeBackend{
   389  		blocknumber: int64(startBlock),
   390  	}
   391  
   392  	// set up rpc and create resourcehandler
   393  	rh, datadir, teardownTest, err := setupTest(backend, nil, nil)
   394  	if err != nil {
   395  		t.Fatal(err)
   396  	}
   397  	defer teardownTest()
   398  
   399  	// create a new resource
   400  	ctx, cancel := context.WithCancel(context.Background())
   401  	defer cancel()
   402  	_, _, err = rh.New(ctx, safeName, resourceFrequency)
   403  	if err != nil {
   404  		t.Fatal(err)
   405  	}
   406  
   407  	// we're naïvely assuming keccak256 for swarm hashes
   408  	// if it ever changes this test should also change
   409  	multihashbytes := ens.EnsNode("foo")
   410  	multihashmulti := multihash.ToMultihash(multihashbytes.Bytes())
   411  	multihashkey, err := rh.UpdateMultihash(ctx, safeName, multihashmulti)
   412  	if err != nil {
   413  		t.Fatal(err)
   414  	}
   415  
   416  	sha1bytes := make([]byte, multihash.MultihashLength)
   417  	sha1multi := multihash.ToMultihash(sha1bytes)
   418  	sha1key, err := rh.UpdateMultihash(ctx, safeName, sha1multi)
   419  	if err != nil {
   420  		t.Fatal(err)
   421  	}
   422  
   423  	// invalid multihashes
   424  	_, err = rh.UpdateMultihash(ctx, safeName, multihashmulti[1:])
   425  	if err == nil {
   426  		t.Fatalf("Expected update to fail with first byte skipped")
   427  	}
   428  	_, err = rh.UpdateMultihash(ctx, safeName, multihashmulti[:len(multihashmulti)-2])
   429  	if err == nil {
   430  		t.Fatalf("Expected update to fail with last byte skipped")
   431  	}
   432  
   433  	data, err := getUpdateDirect(rh, multihashkey)
   434  	if err != nil {
   435  		t.Fatal(err)
   436  	}
   437  	multihashdecode, err := multihash.FromMultihash(data)
   438  	if err != nil {
   439  		t.Fatal(err)
   440  	}
   441  	if !bytes.Equal(multihashdecode, multihashbytes.Bytes()) {
   442  		t.Fatalf("Decoded hash '%x' does not match original hash '%x'", multihashdecode, multihashbytes.Bytes())
   443  	}
   444  	data, err = getUpdateDirect(rh, sha1key)
   445  	if err != nil {
   446  		t.Fatal(err)
   447  	}
   448  	shadecode, err := multihash.FromMultihash(data)
   449  	if err != nil {
   450  		t.Fatal(err)
   451  	}
   452  	if !bytes.Equal(shadecode, sha1bytes) {
   453  		t.Fatalf("Decoded hash '%x' does not match original hash '%x'", shadecode, sha1bytes)
   454  	}
   455  	rh.Close()
   456  
   457  	rhparams := &HandlerParams{
   458  		QueryMaxPeriods: &LookupParams{
   459  			Limit: false,
   460  		},
   461  		Signer:         signer,
   462  		HeaderGetter:   rh.headerGetter,
   463  		OwnerValidator: rh.ownerValidator,
   464  	}
   465  	// test with signed data
   466  	rh2, err := NewTestHandler(datadir, rhparams)
   467  	if err != nil {
   468  		t.Fatal(err)
   469  	}
   470  	_, _, err = rh2.New(ctx, safeName, resourceFrequency)
   471  	if err != nil {
   472  		t.Fatal(err)
   473  	}
   474  	multihashsignedkey, err := rh2.UpdateMultihash(ctx, safeName, multihashmulti)
   475  	if err != nil {
   476  		t.Fatal(err)
   477  	}
   478  	sha1signedkey, err := rh2.UpdateMultihash(ctx, safeName, sha1multi)
   479  	if err != nil {
   480  		t.Fatal(err)
   481  	}
   482  
   483  	data, err = getUpdateDirect(rh2, multihashsignedkey)
   484  	if err != nil {
   485  		t.Fatal(err)
   486  	}
   487  	multihashdecode, err = multihash.FromMultihash(data)
   488  	if err != nil {
   489  		t.Fatal(err)
   490  	}
   491  	if !bytes.Equal(multihashdecode, multihashbytes.Bytes()) {
   492  		t.Fatalf("Decoded hash '%x' does not match original hash '%x'", multihashdecode, multihashbytes.Bytes())
   493  	}
   494  	data, err = getUpdateDirect(rh2, sha1signedkey)
   495  	if err != nil {
   496  		t.Fatal(err)
   497  	}
   498  	shadecode, err = multihash.FromMultihash(data)
   499  	if err != nil {
   500  		t.Fatal(err)
   501  	}
   502  	if !bytes.Equal(shadecode, sha1bytes) {
   503  		t.Fatalf("Decoded hash '%x' does not match original hash '%x'", shadecode, sha1bytes)
   504  	}
   505  }
   506  
   507  func TestChunkValidator(t *testing.T) {
   508  	// signer containing private key
   509  	signer, err := newTestSigner()
   510  	if err != nil {
   511  		t.Fatal(err)
   512  	}
   513  
   514  	// ens address and transact options
   515  	addr := crypto.PubkeyToAddress(signer.PrivKey.PublicKey)
   516  	transactOpts := bind.NewKeyedTransactor(signer.PrivKey)
   517  
   518  	// set up ENS sim
   519  	domainparts := strings.Split(safeName, ".")
   520  	contractAddr, contractbackend, err := setupENS(addr, transactOpts, domainparts[0], domainparts[1])
   521  	if err != nil {
   522  		t.Fatal(err)
   523  	}
   524  
   525  	ensClient, err := ens.NewENS(transactOpts, contractAddr, contractbackend)
   526  	if err != nil {
   527  		t.Fatal(err)
   528  	}
   529  
   530  	// set up rpc and create resourcehandler with ENS sim backend
   531  	rh, _, teardownTest, err := setupTest(contractbackend, ensClient, signer)
   532  	if err != nil {
   533  		t.Fatal(err)
   534  	}
   535  	defer teardownTest()
   536  
   537  	// create new resource when we are owner = ok
   538  	ctx, cancel := context.WithCancel(context.Background())
   539  	defer cancel()
   540  	key, rsrc, err := rh.New(ctx, safeName, resourceFrequency)
   541  	if err != nil {
   542  		t.Fatalf("Create resource fail: %v", err)
   543  	}
   544  
   545  	data := []byte("foo")
   546  	key = rh.resourceHash(1, 1, rsrc.nameHash)
   547  	digest := rh.keyDataHash(key, data)
   548  	sig, err := rh.signer.Sign(digest)
   549  	if err != nil {
   550  		t.Fatalf("sign fail: %v", err)
   551  	}
   552  	chunk := newUpdateChunk(key, &sig, 1, 1, safeName, data, len(data))
   553  	if !rh.Validate(chunk.Addr, chunk.SData) {
   554  		t.Fatal("Chunk validator fail on update chunk")
   555  	}
   556  
   557  	ctx, cancel = context.WithTimeout(context.Background(), time.Second)
   558  	defer cancel()
   559  	startBlock, err := rh.getBlock(ctx, safeName)
   560  	if err != nil {
   561  		t.Fatal(err)
   562  	}
   563  	chunk = rh.newMetaChunk(safeName, startBlock, resourceFrequency)
   564  	if !rh.Validate(chunk.Addr, chunk.SData) {
   565  		t.Fatal("Chunk validator fail on metadata chunk")
   566  	}
   567  }
   568  
   569  // tests that the content address validator correctly checks the data
   570  // tests that resource update chunks are passed through content address validator
   571  // the test checking the resouce update validator internal correctness is found in resource_test.go
   572  func TestValidator(t *testing.T) {
   573  
   574  	// set up localstore
   575  	datadir, err := ioutil.TempDir("", "storage-testresourcevalidator")
   576  	if err != nil {
   577  		t.Fatal(err)
   578  	}
   579  	defer os.RemoveAll(datadir)
   580  
   581  	params := storage.NewDefaultLocalStoreParams()
   582  	params.Init(datadir)
   583  	store, err := storage.NewLocalStore(params, nil)
   584  	if err != nil {
   585  		t.Fatal(err)
   586  	}
   587  
   588  	// add content address validator and resource validator to validators and check puts
   589  	// bad should fail, good should pass
   590  	store.Validators = append(store.Validators, storage.NewContentAddressValidator(hashfunc))
   591  	rhParams := &HandlerParams{}
   592  	rh, err := NewHandler(rhParams)
   593  	if err != nil {
   594  		t.Fatal(err)
   595  	}
   596  	store.Validators = append(store.Validators, rh)
   597  
   598  	chunks := storage.GenerateRandomChunks(storage.DefaultChunkSize, 2)
   599  	goodChunk := chunks[0]
   600  	badChunk := chunks[1]
   601  	badChunk.SData = goodChunk.SData
   602  	key := rh.resourceHash(42, 1, ens.EnsNode("xyzzy.eth"))
   603  	data := []byte("bar")
   604  	uglyChunk := newUpdateChunk(key, nil, 42, 1, "xyzzy.eth", data, len(data))
   605  
   606  	storage.PutChunks(store, goodChunk, badChunk, uglyChunk)
   607  	if err := goodChunk.GetErrored(); err != nil {
   608  		t.Fatalf("expected no error on good content address chunk with both validators, but got: %s", err)
   609  	}
   610  	if err := badChunk.GetErrored(); err == nil {
   611  		t.Fatal("expected error on bad chunk address with both validators, but got nil")
   612  	}
   613  	if err := uglyChunk.GetErrored(); err != nil {
   614  		t.Fatalf("expected no error on resource update chunk with both validators, but got: %s", err)
   615  	}
   616  
   617  	// (redundant check)
   618  	// use only resource validator, and check puts
   619  	// bad should fail, good should fail, resource should pass
   620  	store.Validators[0] = store.Validators[1]
   621  	store.Validators = store.Validators[:1]
   622  
   623  	chunks = storage.GenerateRandomChunks(storage.DefaultChunkSize, 2)
   624  	goodChunk = chunks[0]
   625  	badChunk = chunks[1]
   626  	badChunk.SData = goodChunk.SData
   627  
   628  	key = rh.resourceHash(42, 2, ens.EnsNode("xyzzy.eth"))
   629  	data = []byte("baz")
   630  	uglyChunk = newUpdateChunk(key, nil, 42, 2, "xyzzy.eth", data, len(data))
   631  
   632  	storage.PutChunks(store, goodChunk, badChunk, uglyChunk)
   633  	if goodChunk.GetErrored() == nil {
   634  		t.Fatal("expected error on good content address chunk with resource validator only, but got nil")
   635  	}
   636  	if badChunk.GetErrored() == nil {
   637  		t.Fatal("expected error on bad content address chunk with resource validator only, but got nil")
   638  	}
   639  	if err := uglyChunk.GetErrored(); err != nil {
   640  		t.Fatalf("expected no error on resource update chunk with resource validator only, but got: %s", err)
   641  	}
   642  }
   643  
   644  // fast-forward blockheight
   645  func fwdBlocks(count int, backend *fakeBackend) {
   646  	for i := 0; i < count; i++ {
   647  		backend.Commit()
   648  	}
   649  }
   650  
   651  type ensOwnerValidator struct {
   652  	*ens.ENS
   653  }
   654  
   655  func (e ensOwnerValidator) ValidateOwner(name string, address common.Address) (bool, error) {
   656  	addr, err := e.Owner(ens.EnsNode(name))
   657  	if err != nil {
   658  		return false, err
   659  	}
   660  	return address == addr, nil
   661  }
   662  
   663  // create rpc and resourcehandler
   664  func setupTest(backend headerGetter, ensBackend *ens.ENS, signer Signer) (rh *Handler, datadir string, teardown func(), err error) {
   665  
   666  	var fsClean func()
   667  	var rpcClean func()
   668  	cleanF = func() {
   669  		if fsClean != nil {
   670  			fsClean()
   671  		}
   672  		if rpcClean != nil {
   673  			rpcClean()
   674  		}
   675  	}
   676  
   677  	// temp datadir
   678  	datadir, err = ioutil.TempDir("", "rh")
   679  	if err != nil {
   680  		return nil, "", nil, err
   681  	}
   682  	fsClean = func() {
   683  		os.RemoveAll(datadir)
   684  	}
   685  
   686  	var ov ownerValidator
   687  	if ensBackend != nil {
   688  		ov = ensOwnerValidator{ensBackend}
   689  	}
   690  
   691  	rhparams := &HandlerParams{
   692  		QueryMaxPeriods: &LookupParams{
   693  			Limit: false,
   694  		},
   695  		Signer:         signer,
   696  		HeaderGetter:   backend,
   697  		OwnerValidator: ov,
   698  	}
   699  	rh, err = NewTestHandler(datadir, rhparams)
   700  	return rh, datadir, cleanF, err
   701  }
   702  
   703  // Set up simulated ENS backend for use with ENSHandler tests
   704  func setupENS(addr common.Address, transactOpts *bind.TransactOpts, sub string, top string) (common.Address, *fakeBackend, error) {
   705  
   706  	// create the domain hash values to pass to the ENS contract methods
   707  	var tophash [32]byte
   708  	var subhash [32]byte
   709  
   710  	testHasher.Reset()
   711  	testHasher.Write([]byte(top))
   712  	copy(tophash[:], testHasher.Sum(nil))
   713  	testHasher.Reset()
   714  	testHasher.Write([]byte(sub))
   715  	copy(subhash[:], testHasher.Sum(nil))
   716  
   717  	// initialize contract backend and deploy
   718  	contractBackend := &fakeBackend{
   719  		SimulatedBackend: backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}),
   720  	}
   721  
   722  	contractAddress, _, ensinstance, err := contract.DeployENS(transactOpts, contractBackend)
   723  	if err != nil {
   724  		return zeroAddr, nil, fmt.Errorf("can't deploy: %v", err)
   725  	}
   726  
   727  	// update the registry for the correct owner address
   728  	if _, err = ensinstance.SetOwner(transactOpts, [32]byte{}, addr); err != nil {
   729  		return zeroAddr, nil, fmt.Errorf("can't setowner: %v", err)
   730  	}
   731  	contractBackend.Commit()
   732  
   733  	if _, err = ensinstance.SetSubnodeOwner(transactOpts, [32]byte{}, tophash, addr); err != nil {
   734  		return zeroAddr, nil, fmt.Errorf("can't register top: %v", err)
   735  	}
   736  	contractBackend.Commit()
   737  
   738  	if _, err = ensinstance.SetSubnodeOwner(transactOpts, ens.EnsNode(top), subhash, addr); err != nil {
   739  		return zeroAddr, nil, fmt.Errorf("can't register top: %v", err)
   740  	}
   741  	contractBackend.Commit()
   742  
   743  	return contractAddress, contractBackend, nil
   744  }
   745  
   746  func newTestSigner() (*GenericSigner, error) {
   747  	privKey, err := crypto.GenerateKey()
   748  	if err != nil {
   749  		return nil, err
   750  	}
   751  	return &GenericSigner{
   752  		PrivKey: privKey,
   753  	}, nil
   754  }
   755  
   756  func getUpdateDirect(rh *Handler, addr storage.Address) ([]byte, error) {
   757  	chunk, err := rh.chunkStore.Get(addr)
   758  	if err != nil {
   759  		return nil, err
   760  	}
   761  	_, _, _, _, data, _, err := rh.parseUpdate(chunk.SData)
   762  	if err != nil {
   763  		return nil, err
   764  	}
   765  	return data, nil
   766  }