github.com/gobitfly/go-ethereum@v1.8.12/swarm/storage/mru/resource.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package mru
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"encoding/binary"
    23  	"errors"
    24  	"fmt"
    25  	"math/big"
    26  	"path/filepath"
    27  	"sync"
    28  	"time"
    29  
    30  	"golang.org/x/net/idna"
    31  
    32  	"github.com/ethereum/go-ethereum/common"
    33  	"github.com/ethereum/go-ethereum/contracts/ens"
    34  	"github.com/ethereum/go-ethereum/core/types"
    35  	"github.com/ethereum/go-ethereum/crypto"
    36  	"github.com/ethereum/go-ethereum/swarm/log"
    37  	"github.com/ethereum/go-ethereum/swarm/multihash"
    38  	"github.com/ethereum/go-ethereum/swarm/storage"
    39  )
    40  
    41  const (
    42  	signatureLength         = 65
    43  	metadataChunkOffsetSize = 18
    44  	DbDirName               = "resource"
    45  	chunkSize               = 4096 // temporary until we implement FileStore in the resourcehandler
    46  	defaultStoreTimeout     = 4000 * time.Millisecond
    47  	hasherCount             = 8
    48  	resourceHash            = storage.SHA3Hash
    49  	defaultRetrieveTimeout  = 100 * time.Millisecond
    50  )
    51  
    52  type blockEstimator struct {
    53  	Start   time.Time
    54  	Average time.Duration
    55  }
    56  
    57  // TODO: Average must  be adjusted when blockchain connection is present and synced
    58  func NewBlockEstimator() *blockEstimator {
    59  	sampleDate, _ := time.Parse(time.RFC3339, "2018-05-04T20:35:22Z")   // from etherscan.io
    60  	sampleBlock := int64(3169691)                                       // from etherscan.io
    61  	ropstenStart, _ := time.Parse(time.RFC3339, "2016-11-20T11:48:50Z") // from etherscan.io
    62  	ns := sampleDate.Sub(ropstenStart).Nanoseconds()
    63  	period := int(ns / sampleBlock)
    64  	parsestring := fmt.Sprintf("%dns", int(float64(period)*1.0005)) // increase the blockcount a little, so we don't overshoot the read block height; if we do, we will never find the updates when getting synced data
    65  	periodNs, _ := time.ParseDuration(parsestring)
    66  	return &blockEstimator{
    67  		Start:   ropstenStart,
    68  		Average: periodNs,
    69  	}
    70  }
    71  
    72  func (b *blockEstimator) HeaderByNumber(context.Context, string, *big.Int) (*types.Header, error) {
    73  	return &types.Header{
    74  		Number: big.NewInt(time.Since(b.Start).Nanoseconds() / b.Average.Nanoseconds()),
    75  	}, nil
    76  }
    77  
    78  type Error struct {
    79  	code int
    80  	err  string
    81  }
    82  
    83  func (e *Error) Error() string {
    84  	return e.err
    85  }
    86  
    87  func (e *Error) Code() int {
    88  	return e.code
    89  }
    90  
    91  func NewError(code int, s string) error {
    92  	if code < 0 || code >= ErrCnt {
    93  		panic("no such error code!")
    94  	}
    95  	r := &Error{
    96  		err: s,
    97  	}
    98  	switch code {
    99  	case ErrNotFound, ErrIO, ErrUnauthorized, ErrInvalidValue, ErrDataOverflow, ErrNothingToReturn, ErrInvalidSignature, ErrNotSynced, ErrPeriodDepth, ErrCorruptData:
   100  		r.code = code
   101  	}
   102  	return r
   103  }
   104  
   105  type Signature [signatureLength]byte
   106  
   107  type LookupParams struct {
   108  	Limit bool
   109  	Max   uint32
   110  }
   111  
   112  // Encapsulates an specific resource update. When synced it contains the most recent
   113  // version of the resource update data.
   114  type resource struct {
   115  	*bytes.Reader
   116  	Multihash  bool
   117  	name       string
   118  	nameHash   common.Hash
   119  	startBlock uint64
   120  	lastPeriod uint32
   121  	lastKey    storage.Address
   122  	frequency  uint64
   123  	version    uint32
   124  	data       []byte
   125  	updated    time.Time
   126  }
   127  
   128  // TODO Expire content after a defined period (to force resync)
   129  func (r *resource) isSynced() bool {
   130  	return !r.updated.IsZero()
   131  }
   132  
   133  func (r *resource) NameHash() common.Hash {
   134  	return r.nameHash
   135  }
   136  
   137  func (r *resource) Size(chan bool) (int64, error) {
   138  	if !r.isSynced() {
   139  		return 0, NewError(ErrNotSynced, "Not synced")
   140  	}
   141  	return int64(len(r.data)), nil
   142  }
   143  
   144  func (r *resource) Name() string {
   145  	return r.name
   146  }
   147  
   148  func (r *resource) UnmarshalBinary(data []byte) error {
   149  	r.startBlock = binary.LittleEndian.Uint64(data[:8])
   150  	r.frequency = binary.LittleEndian.Uint64(data[8:16])
   151  	r.name = string(data[16:])
   152  	return nil
   153  }
   154  
   155  func (r *resource) MarshalBinary() ([]byte, error) {
   156  	b := make([]byte, 16+len(r.name))
   157  	binary.LittleEndian.PutUint64(b, r.startBlock)
   158  	binary.LittleEndian.PutUint64(b[8:], r.frequency)
   159  	copy(b[16:], []byte(r.name))
   160  	return b, nil
   161  }
   162  
   163  type headerGetter interface {
   164  	HeaderByNumber(context.Context, string, *big.Int) (*types.Header, error)
   165  }
   166  
   167  type ownerValidator interface {
   168  	ValidateOwner(name string, address common.Address) (bool, error)
   169  }
   170  
   171  // Mutable resource is an entity which allows updates to a resource
   172  // without resorting to ENS on each update.
   173  // The update scheme is built on swarm chunks with chunk keys following
   174  // a predictable, versionable pattern.
   175  //
   176  // Updates are defined to be periodic in nature, where periods are
   177  // expressed in terms of number of blocks.
   178  //
   179  // The root entry of a mutable resource is tied to a unique identifier,
   180  // typically - but not necessarily - an ens name.  The identifier must be
   181  // an valid IDNA string. It also contains the block number
   182  // when the resource update was first registered, and
   183  // the block frequency with which the resource will be updated, both of
   184  // which are stored as little-endian uint64 values in the database (for a
   185  // total of 16 bytes). It also contains the unique identifier.
   186  // It is stored in a separate content-addressed chunk (call it the metadata chunk),
   187  // with the following layout:
   188  //
   189  // (0x0000|startblock|frequency|identifier)
   190  //
   191  // (The two first zero-value bytes are used for disambiguation by the chunk validator,
   192  // and update chunk will always have a value > 0 there.)
   193  //
   194  // The root entry tells the requester from when the mutable resource was
   195  // first added (block number) and in which block number to look for the
   196  // actual updates. Thus, a resource update for identifier "føø.bar"
   197  // starting at block 4200 with frequency 42 will have updates on block 4242,
   198  // 4284, 4326 and so on.
   199  //
   200  // Actual data updates are also made in the form of swarm chunks. The keys
   201  // of the updates are the hash of a concatenation of properties as follows:
   202  //
   203  // sha256(period|version|namehash)
   204  //
   205  // The period is (currentblock - startblock) / frequency
   206  //
   207  // Using our previous example, this means that a period 3 will have 4326 as
   208  // the block number.
   209  //
   210  // If more than one update is made to the same block number, incremental
   211  // version numbers are used successively.
   212  //
   213  // A lookup agent need only know the identifier name in order to get the versions
   214  //
   215  // the resourcedata is:
   216  // headerlength|period|version|identifier|data
   217  //
   218  // if a validator is active, the chunk data is:
   219  // resourcedata|sign(resourcedata)
   220  // otherwise, the chunk data is the same as the resourcedata
   221  //
   222  // headerlength is a 16 bit value containing the byte length of period|version|name
   223  //
   224  // TODO: Include modtime in chunk data + signature
   225  type Handler struct {
   226  	chunkStore      *storage.NetStore
   227  	HashSize        int
   228  	signer          Signer
   229  	headerGetter    headerGetter
   230  	ownerValidator  ownerValidator
   231  	resources       map[string]*resource
   232  	hashPool        sync.Pool
   233  	resourceLock    sync.RWMutex
   234  	storeTimeout    time.Duration
   235  	queryMaxPeriods *LookupParams
   236  }
   237  
   238  type HandlerParams struct {
   239  	QueryMaxPeriods *LookupParams
   240  	Signer          Signer
   241  	HeaderGetter    headerGetter
   242  	OwnerValidator  ownerValidator
   243  }
   244  
   245  // Create or open resource update chunk store
   246  func NewHandler(params *HandlerParams) (*Handler, error) {
   247  	if params.QueryMaxPeriods == nil {
   248  		params.QueryMaxPeriods = &LookupParams{
   249  			Limit: false,
   250  		}
   251  	}
   252  	rh := &Handler{
   253  		headerGetter:   params.HeaderGetter,
   254  		ownerValidator: params.OwnerValidator,
   255  		resources:      make(map[string]*resource),
   256  		storeTimeout:   defaultStoreTimeout,
   257  		signer:         params.Signer,
   258  		hashPool: sync.Pool{
   259  			New: func() interface{} {
   260  				return storage.MakeHashFunc(resourceHash)()
   261  			},
   262  		},
   263  		queryMaxPeriods: params.QueryMaxPeriods,
   264  	}
   265  
   266  	for i := 0; i < hasherCount; i++ {
   267  		hashfunc := storage.MakeHashFunc(resourceHash)()
   268  		if rh.HashSize == 0 {
   269  			rh.HashSize = hashfunc.Size()
   270  		}
   271  		rh.hashPool.Put(hashfunc)
   272  	}
   273  
   274  	return rh, nil
   275  }
   276  
   277  // SetStore sets the store backend for resource updates
   278  func (h *Handler) SetStore(store *storage.NetStore) {
   279  	h.chunkStore = store
   280  }
   281  
   282  // Validate is a chunk validation method (matches ChunkValidatorFunc signature)
   283  //
   284  // If resource update, owner is checked against ENS record of resource name inferred from chunk data
   285  // If parsed signature is nil, validates automatically
   286  // If not resource update, it validates are root chunk if length is metadataChunkOffsetSize and first two bytes are 0
   287  func (h *Handler) Validate(addr storage.Address, data []byte) bool {
   288  	signature, period, version, name, parseddata, _, err := h.parseUpdate(data)
   289  	if err != nil {
   290  		log.Warn(err.Error())
   291  		if len(data) > metadataChunkOffsetSize { // identifier comes after this byte range, and must be at least one byte
   292  			if bytes.Equal(data[:2], []byte{0, 0}) {
   293  				return true
   294  			}
   295  		}
   296  		log.Error("Invalid resource chunk")
   297  		return false
   298  	} else if signature == nil {
   299  		return bytes.Equal(h.resourceHash(period, version, ens.EnsNode(name)), addr)
   300  	}
   301  
   302  	digest := h.keyDataHash(addr, parseddata)
   303  	addrSig, err := getAddressFromDataSig(digest, *signature)
   304  	if err != nil {
   305  		log.Error("Invalid signature on resource chunk")
   306  		return false
   307  	}
   308  	ok, _ := h.checkAccess(name, addrSig)
   309  	return ok
   310  }
   311  
   312  // If no ens client is supplied, resource updates are not validated
   313  func (h *Handler) IsValidated() bool {
   314  	return h.ownerValidator != nil
   315  }
   316  
   317  // Create the resource update digest used in signatures
   318  func (h *Handler) keyDataHash(addr storage.Address, data []byte) common.Hash {
   319  	hasher := h.hashPool.Get().(storage.SwarmHash)
   320  	defer h.hashPool.Put(hasher)
   321  	hasher.Reset()
   322  	hasher.Write(addr[:])
   323  	hasher.Write(data)
   324  	return common.BytesToHash(hasher.Sum(nil))
   325  }
   326  
   327  // Checks if current address matches owner address of ENS
   328  func (h *Handler) checkAccess(name string, address common.Address) (bool, error) {
   329  	if h.ownerValidator == nil {
   330  		return true, nil
   331  	}
   332  	return h.ownerValidator.ValidateOwner(name, address)
   333  }
   334  
   335  // get data from current resource
   336  func (h *Handler) GetContent(name string) (storage.Address, []byte, error) {
   337  	rsrc := h.get(name)
   338  	if rsrc == nil || !rsrc.isSynced() {
   339  		return nil, nil, NewError(ErrNotFound, " does not exist or is not synced")
   340  	}
   341  	return rsrc.lastKey, rsrc.data, nil
   342  }
   343  
   344  // Gets the period of the current data loaded in the resource
   345  func (h *Handler) GetLastPeriod(nameHash string) (uint32, error) {
   346  	rsrc := h.get(nameHash)
   347  	if rsrc == nil {
   348  		return 0, NewError(ErrNotFound, " does not exist")
   349  	} else if !rsrc.isSynced() {
   350  		return 0, NewError(ErrNotSynced, " is not synced")
   351  	}
   352  	return rsrc.lastPeriod, nil
   353  }
   354  
   355  // Gets the version of the current data loaded in the resource
   356  func (h *Handler) GetVersion(nameHash string) (uint32, error) {
   357  	rsrc := h.get(nameHash)
   358  	if rsrc == nil {
   359  		return 0, NewError(ErrNotFound, " does not exist")
   360  	} else if !rsrc.isSynced() {
   361  		return 0, NewError(ErrNotSynced, " is not synced")
   362  	}
   363  	return rsrc.version, nil
   364  }
   365  
   366  // \TODO should be hashsize * branches from the chosen chunker, implement with FileStore
   367  func (h *Handler) chunkSize() int64 {
   368  	return chunkSize
   369  }
   370  
   371  // Creates a new root entry for a mutable resource identified by `name` with the specified `frequency`.
   372  //
   373  // The signature data should match the hash of the idna-converted name by the validator's namehash function, NOT the raw name bytes.
   374  //
   375  // The start block of the resource update will be the actual current block height of the connected network.
   376  func (h *Handler) New(ctx context.Context, name string, frequency uint64) (storage.Address, *resource, error) {
   377  
   378  	// frequency 0 is invalid
   379  	if frequency == 0 {
   380  		return nil, nil, NewError(ErrInvalidValue, "Frequency cannot be 0")
   381  	}
   382  
   383  	// make sure name only contains ascii values
   384  	if !isSafeName(name) {
   385  		return nil, nil, NewError(ErrInvalidValue, fmt.Sprintf("Invalid name: '%s'", name))
   386  	}
   387  
   388  	nameHash := ens.EnsNode(name)
   389  
   390  	// if the signer function is set, validate that the key of the signer has access to modify this ENS name
   391  	if h.signer != nil {
   392  		signature, err := h.signer.Sign(nameHash)
   393  		if err != nil {
   394  			return nil, nil, NewError(ErrInvalidSignature, fmt.Sprintf("Sign fail: %v", err))
   395  		}
   396  		addr, err := getAddressFromDataSig(nameHash, signature)
   397  		if err != nil {
   398  			return nil, nil, NewError(ErrInvalidSignature, fmt.Sprintf("Retrieve address from signature fail: %v", err))
   399  		}
   400  		ok, err := h.checkAccess(name, addr)
   401  		if err != nil {
   402  			return nil, nil, err
   403  		} else if !ok {
   404  			return nil, nil, NewError(ErrUnauthorized, fmt.Sprintf("Not owner of '%s'", name))
   405  		}
   406  	}
   407  
   408  	// get our blockheight at this time
   409  	currentblock, err := h.getBlock(ctx, name)
   410  	if err != nil {
   411  		return nil, nil, err
   412  	}
   413  
   414  	chunk := h.newMetaChunk(name, currentblock, frequency)
   415  
   416  	h.chunkStore.Put(chunk)
   417  	log.Debug("new resource", "name", name, "key", nameHash, "startBlock", currentblock, "frequency", frequency)
   418  
   419  	// create the internal index for the resource and populate it with the data of the first version
   420  	rsrc := &resource{
   421  		startBlock: currentblock,
   422  		frequency:  frequency,
   423  		name:       name,
   424  		nameHash:   nameHash,
   425  		updated:    time.Now(),
   426  	}
   427  	h.set(nameHash.Hex(), rsrc)
   428  
   429  	return chunk.Addr, rsrc, nil
   430  }
   431  
   432  func (h *Handler) newMetaChunk(name string, startBlock uint64, frequency uint64) *storage.Chunk {
   433  	// the metadata chunk points to data of first blockheight + update frequency
   434  	// from this we know from what blockheight we should look for updates, and how often
   435  	// it also contains the name of the resource, so we know what resource we are working with
   436  	data := make([]byte, metadataChunkOffsetSize+len(name))
   437  
   438  	// root block has first two bytes both set to 0, which distinguishes from update bytes
   439  	val := make([]byte, 8)
   440  	binary.LittleEndian.PutUint64(val, startBlock)
   441  	copy(data[2:10], val)
   442  	binary.LittleEndian.PutUint64(val, frequency)
   443  	copy(data[10:18], val)
   444  	copy(data[18:], []byte(name))
   445  
   446  	// the key of the metadata chunk is content-addressed
   447  	// if it wasn't we couldn't replace it later
   448  	// resolving this relationship is left up to external agents (for example ENS)
   449  	hasher := h.hashPool.Get().(storage.SwarmHash)
   450  	hasher.Reset()
   451  	hasher.Write(data)
   452  	key := hasher.Sum(nil)
   453  	h.hashPool.Put(hasher)
   454  
   455  	// make the chunk and send it to swarm
   456  	chunk := storage.NewChunk(key, nil)
   457  	chunk.SData = make([]byte, metadataChunkOffsetSize+len(name))
   458  	copy(chunk.SData, data)
   459  	return chunk
   460  }
   461  
   462  // Searches and retrieves the specific version of the resource update identified by `name`
   463  // at the specific block height
   464  //
   465  // If refresh is set to true, the resource data will be reloaded from the resource update
   466  // metadata chunk.
   467  // It is the callers responsibility to make sure that this chunk exists (if the resource
   468  // update root data was retrieved externally, it typically doesn't)
   469  func (h *Handler) LookupVersionByName(ctx context.Context, name string, period uint32, version uint32, refresh bool, maxLookup *LookupParams) (*resource, error) {
   470  	return h.LookupVersion(ctx, ens.EnsNode(name), period, version, refresh, maxLookup)
   471  }
   472  
   473  func (h *Handler) LookupVersion(ctx context.Context, nameHash common.Hash, period uint32, version uint32, refresh bool, maxLookup *LookupParams) (*resource, error) {
   474  	rsrc := h.get(nameHash.Hex())
   475  	if rsrc == nil {
   476  		return nil, NewError(ErrNothingToReturn, "resource not loaded")
   477  	}
   478  	return h.lookup(rsrc, period, version, refresh, maxLookup)
   479  }
   480  
   481  // Retrieves the latest version of the resource update identified by `name`
   482  // at the specified block height
   483  //
   484  // If an update is found, version numbers are iterated until failure, and the last
   485  // successfully retrieved version is copied to the corresponding resources map entry
   486  // and returned.
   487  //
   488  // See also (*Handler).LookupVersion
   489  func (h *Handler) LookupHistoricalByName(ctx context.Context, name string, period uint32, refresh bool, maxLookup *LookupParams) (*resource, error) {
   490  	return h.LookupHistorical(ctx, ens.EnsNode(name), period, refresh, maxLookup)
   491  }
   492  
   493  func (h *Handler) LookupHistorical(ctx context.Context, nameHash common.Hash, period uint32, refresh bool, maxLookup *LookupParams) (*resource, error) {
   494  	rsrc := h.get(nameHash.Hex())
   495  	if rsrc == nil {
   496  		return nil, NewError(ErrNothingToReturn, "resource not loaded")
   497  	}
   498  	return h.lookup(rsrc, period, 0, refresh, maxLookup)
   499  }
   500  
   501  // Retrieves the latest version of the resource update identified by `name`
   502  // at the next update block height
   503  //
   504  // It starts at the next period after the current block height, and upon failure
   505  // tries the corresponding keys of each previous period until one is found
   506  // (or startBlock is reached, in which case there are no updates).
   507  //
   508  // Version iteration is done as in (*Handler).LookupHistorical
   509  //
   510  // See also (*Handler).LookupHistorical
   511  func (h *Handler) LookupLatestByName(ctx context.Context, name string, refresh bool, maxLookup *LookupParams) (*resource, error) {
   512  	return h.LookupLatest(ctx, ens.EnsNode(name), refresh, maxLookup)
   513  }
   514  
   515  func (h *Handler) LookupLatest(ctx context.Context, nameHash common.Hash, refresh bool, maxLookup *LookupParams) (*resource, error) {
   516  
   517  	// get our blockheight at this time and the next block of the update period
   518  	rsrc := h.get(nameHash.Hex())
   519  	if rsrc == nil {
   520  		return nil, NewError(ErrNothingToReturn, "resource not loaded")
   521  	}
   522  	currentblock, err := h.getBlock(ctx, rsrc.name)
   523  	if err != nil {
   524  		return nil, err
   525  	}
   526  	nextperiod, err := getNextPeriod(rsrc.startBlock, currentblock, rsrc.frequency)
   527  	if err != nil {
   528  		return nil, err
   529  	}
   530  	return h.lookup(rsrc, nextperiod, 0, refresh, maxLookup)
   531  }
   532  
   533  // Returns the resource before the one currently loaded in the resource index
   534  //
   535  // This is useful where resource updates are used incrementally in contrast to
   536  // merely replacing content.
   537  //
   538  // Requires a synced resource object
   539  func (h *Handler) LookupPreviousByName(ctx context.Context, name string, maxLookup *LookupParams) (*resource, error) {
   540  	return h.LookupPrevious(ctx, ens.EnsNode(name), maxLookup)
   541  }
   542  
   543  func (h *Handler) LookupPrevious(ctx context.Context, nameHash common.Hash, maxLookup *LookupParams) (*resource, error) {
   544  	rsrc := h.get(nameHash.Hex())
   545  	if rsrc == nil {
   546  		return nil, NewError(ErrNothingToReturn, "resource not loaded")
   547  	}
   548  	if !rsrc.isSynced() {
   549  		return nil, NewError(ErrNotSynced, "LookupPrevious requires synced resource.")
   550  	} else if rsrc.lastPeriod == 0 {
   551  		return nil, NewError(ErrNothingToReturn, " not found")
   552  	}
   553  	if rsrc.version > 1 {
   554  		rsrc.version--
   555  	} else if rsrc.lastPeriod == 1 {
   556  		return nil, NewError(ErrNothingToReturn, "Current update is the oldest")
   557  	} else {
   558  		rsrc.version = 0
   559  		rsrc.lastPeriod--
   560  	}
   561  	return h.lookup(rsrc, rsrc.lastPeriod, rsrc.version, false, maxLookup)
   562  }
   563  
   564  // base code for public lookup methods
   565  func (h *Handler) lookup(rsrc *resource, period uint32, version uint32, refresh bool, maxLookup *LookupParams) (*resource, error) {
   566  
   567  	// we can't look for anything without a store
   568  	if h.chunkStore == nil {
   569  		return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups")
   570  	}
   571  
   572  	// period 0 does not exist
   573  	if period == 0 {
   574  		return nil, NewError(ErrInvalidValue, "period must be >0")
   575  	}
   576  
   577  	// start from the last possible block period, and iterate previous ones until we find a match
   578  	// if we hit startBlock we're out of options
   579  	var specificversion bool
   580  	if version > 0 {
   581  		specificversion = true
   582  	} else {
   583  		version = 1
   584  	}
   585  
   586  	var hops uint32
   587  	if maxLookup == nil {
   588  		maxLookup = h.queryMaxPeriods
   589  	}
   590  	log.Trace("resource lookup", "period", period, "version", version, "limit", maxLookup.Limit, "max", maxLookup.Max)
   591  	for period > 0 {
   592  		if maxLookup.Limit && hops > maxLookup.Max {
   593  			return nil, NewError(ErrPeriodDepth, fmt.Sprintf("Lookup exceeded max period hops (%d)", maxLookup.Max))
   594  		}
   595  		key := h.resourceHash(period, version, rsrc.nameHash)
   596  		chunk, err := h.chunkStore.GetWithTimeout(key, defaultRetrieveTimeout)
   597  		if err == nil {
   598  			if specificversion {
   599  				return h.updateIndex(rsrc, chunk)
   600  			}
   601  			// check if we have versions > 1. If a version fails, the previous version is used and returned.
   602  			log.Trace("rsrc update version 1 found, checking for version updates", "period", period, "key", key)
   603  			for {
   604  				newversion := version + 1
   605  				key := h.resourceHash(period, newversion, rsrc.nameHash)
   606  				newchunk, err := h.chunkStore.GetWithTimeout(key, defaultRetrieveTimeout)
   607  				if err != nil {
   608  					return h.updateIndex(rsrc, chunk)
   609  				}
   610  				chunk = newchunk
   611  				version = newversion
   612  				log.Trace("version update found, checking next", "version", version, "period", period, "key", key)
   613  			}
   614  		}
   615  		log.Trace("rsrc update not found, checking previous period", "period", period, "key", key)
   616  		period--
   617  		hops++
   618  	}
   619  	return nil, NewError(ErrNotFound, "no updates found")
   620  }
   621  
   622  // Retrieves a resource metadata chunk and creates/updates the index entry for it
   623  // with the resulting metadata
   624  func (h *Handler) Load(addr storage.Address) (*resource, error) {
   625  	chunk, err := h.chunkStore.GetWithTimeout(addr, defaultRetrieveTimeout)
   626  	if err != nil {
   627  		return nil, NewError(ErrNotFound, err.Error())
   628  	}
   629  
   630  	// minimum sanity check for chunk data (an update chunk first two bytes is headerlength uint16, and cannot be 0)
   631  	// \TODO this is not enough to make sure the data isn't bogus. A normal content addressed chunk could still satisfy these criteria
   632  	if !bytes.Equal(chunk.SData[:2], []byte{0x0, 0x0}) {
   633  		return nil, NewError(ErrCorruptData, fmt.Sprintf("Chunk is not a resource metadata chunk"))
   634  	} else if len(chunk.SData) <= metadataChunkOffsetSize {
   635  		return nil, NewError(ErrNothingToReturn, fmt.Sprintf("Invalid chunk length %d, should be minimum %d", len(chunk.SData), metadataChunkOffsetSize+1))
   636  	}
   637  
   638  	// create the index entry
   639  	rsrc := &resource{}
   640  	rsrc.UnmarshalBinary(chunk.SData[2:])
   641  	rsrc.nameHash = ens.EnsNode(rsrc.name)
   642  	h.set(rsrc.nameHash.Hex(), rsrc)
   643  	log.Trace("resource index load", "rootkey", addr, "name", rsrc.name, "namehash", rsrc.nameHash, "startblock", rsrc.startBlock, "frequency", rsrc.frequency)
   644  	return rsrc, nil
   645  }
   646  
   647  // update mutable resource index map with specified content
   648  func (h *Handler) updateIndex(rsrc *resource, chunk *storage.Chunk) (*resource, error) {
   649  
   650  	// retrieve metadata from chunk data and check that it matches this mutable resource
   651  	signature, period, version, name, data, multihash, err := h.parseUpdate(chunk.SData)
   652  	if rsrc.name != name {
   653  		return nil, NewError(ErrNothingToReturn, fmt.Sprintf("Update belongs to '%s', but have '%s'", name, rsrc.name))
   654  	}
   655  	log.Trace("resource index update", "name", rsrc.name, "namehash", rsrc.nameHash, "updatekey", chunk.Addr, "period", period, "version", version)
   656  
   657  	// check signature (if signer algorithm is present)
   658  	// \TODO maybe this check is redundant if also checked upon retrieval of chunk
   659  	if signature != nil {
   660  		digest := h.keyDataHash(chunk.Addr, data)
   661  		_, err = getAddressFromDataSig(digest, *signature)
   662  		if err != nil {
   663  			return nil, NewError(ErrUnauthorized, fmt.Sprintf("Invalid signature: %v", err))
   664  		}
   665  	}
   666  
   667  	// update our rsrcs entry map
   668  	rsrc.lastKey = chunk.Addr
   669  	rsrc.lastPeriod = period
   670  	rsrc.version = version
   671  	rsrc.updated = time.Now()
   672  	rsrc.data = make([]byte, len(data))
   673  	rsrc.Multihash = multihash
   674  	rsrc.Reader = bytes.NewReader(rsrc.data)
   675  	copy(rsrc.data, data)
   676  	log.Debug(" synced", "name", rsrc.name, "key", chunk.Addr, "period", rsrc.lastPeriod, "version", rsrc.version)
   677  	h.set(rsrc.nameHash.Hex(), rsrc)
   678  	return rsrc, nil
   679  }
   680  
   681  // retrieve update metadata from chunk data
   682  // mirrors newUpdateChunk()
   683  func (h *Handler) parseUpdate(chunkdata []byte) (*Signature, uint32, uint32, string, []byte, bool, error) {
   684  	// absolute minimum an update chunk can contain:
   685  	// 14 = header + one byte of name + one byte of data
   686  	if len(chunkdata) < 14 {
   687  		return nil, 0, 0, "", nil, false, NewError(ErrNothingToReturn, "chunk less than 13 bytes cannot be a resource update chunk")
   688  	}
   689  	cursor := 0
   690  	headerlength := binary.LittleEndian.Uint16(chunkdata[cursor : cursor+2])
   691  	cursor += 2
   692  	datalength := binary.LittleEndian.Uint16(chunkdata[cursor : cursor+2])
   693  	cursor += 2
   694  	var exclsignlength int
   695  	// we need extra magic if it's a multihash, since we used datalength 0 in header as an indicator of multihash content
   696  	// retrieve the second varint and set this as the data length
   697  	// TODO: merge with isMultihash code
   698  	if datalength == 0 {
   699  		uvarintbuf := bytes.NewBuffer(chunkdata[headerlength+4:])
   700  		r, err := binary.ReadUvarint(uvarintbuf)
   701  		if err != nil {
   702  			errstr := fmt.Sprintf("corrupt multihash, hash id varint could not be read: %v", err)
   703  			log.Warn(errstr)
   704  			return nil, 0, 0, "", nil, false, NewError(ErrCorruptData, errstr)
   705  
   706  		}
   707  		r, err = binary.ReadUvarint(uvarintbuf)
   708  		if err != nil {
   709  			errstr := fmt.Sprintf("corrupt multihash, hash length field could not be read: %v", err)
   710  			log.Warn(errstr)
   711  			return nil, 0, 0, "", nil, false, NewError(ErrCorruptData, errstr)
   712  
   713  		}
   714  		exclsignlength = int(headerlength + uint16(r))
   715  	} else {
   716  		exclsignlength = int(headerlength + datalength + 4)
   717  	}
   718  
   719  	// the total length excluding signature is headerlength and datalength fields plus the length of the header and the data given in these fields
   720  	exclsignlength = int(headerlength + datalength + 4)
   721  	if exclsignlength > len(chunkdata) || exclsignlength < 14 {
   722  		return nil, 0, 0, "", nil, false, NewError(ErrNothingToReturn, fmt.Sprintf("Reported headerlength %d + datalength %d longer than actual chunk data length %d", headerlength, exclsignlength, len(chunkdata)))
   723  	} else if exclsignlength < 14 {
   724  		return nil, 0, 0, "", nil, false, NewError(ErrNothingToReturn, fmt.Sprintf("Reported headerlength %d + datalength %d is smaller than minimum valid resource chunk length %d", headerlength, datalength, 14))
   725  	}
   726  
   727  	// at this point we can be satisfied that the data integrity is ok
   728  	var period uint32
   729  	var version uint32
   730  	var name string
   731  	var data []byte
   732  	period = binary.LittleEndian.Uint32(chunkdata[cursor : cursor+4])
   733  	cursor += 4
   734  	version = binary.LittleEndian.Uint32(chunkdata[cursor : cursor+4])
   735  	cursor += 4
   736  	namelength := int(headerlength) - cursor + 4
   737  	if l := len(chunkdata); l < cursor+namelength {
   738  		return nil, 0, 0, "", nil, false, NewError(ErrNothingToReturn, fmt.Sprintf("chunk less than %v bytes is too short to read the name", l))
   739  	}
   740  	name = string(chunkdata[cursor : cursor+namelength])
   741  	cursor += namelength
   742  
   743  	// if multihash content is indicated we check the validity of the multihash
   744  	// \TODO the check above for multihash probably is sufficient also for this case (or can be with a small adjustment) and if so this code should be removed
   745  	var intdatalength int
   746  	var ismultihash bool
   747  	if datalength == 0 {
   748  		var intheaderlength int
   749  		var err error
   750  		intdatalength, intheaderlength, err = multihash.GetMultihashLength(chunkdata[cursor:])
   751  		if err != nil {
   752  			log.Error("multihash parse error", "err", err)
   753  			return nil, 0, 0, "", nil, false, err
   754  		}
   755  		intdatalength += intheaderlength
   756  		multihashboundary := cursor + intdatalength
   757  		if len(chunkdata) != multihashboundary && len(chunkdata) < multihashboundary+signatureLength {
   758  			log.Debug("multihash error", "chunkdatalen", len(chunkdata), "multihashboundary", multihashboundary)
   759  			return nil, 0, 0, "", nil, false, errors.New("Corrupt multihash data")
   760  		}
   761  		ismultihash = true
   762  	} else {
   763  		intdatalength = int(datalength)
   764  	}
   765  	data = make([]byte, intdatalength)
   766  	copy(data, chunkdata[cursor:cursor+intdatalength])
   767  
   768  	// omit signatures if we have no validator
   769  	var signature *Signature
   770  	cursor += intdatalength
   771  	if h.signer != nil {
   772  		sigdata := chunkdata[cursor : cursor+signatureLength]
   773  		if len(sigdata) > 0 {
   774  			signature = &Signature{}
   775  			copy(signature[:], sigdata)
   776  		}
   777  	}
   778  
   779  	return signature, period, version, name, data, ismultihash, nil
   780  }
   781  
   782  // Adds an actual data update
   783  //
   784  // Uses the data currently loaded in the resources map entry.
   785  // It is the caller's responsibility to make sure that this data is not stale.
   786  //
   787  // A resource update cannot span chunks, and thus has max length 4096
   788  func (h *Handler) UpdateMultihash(ctx context.Context, name string, data []byte) (storage.Address, error) {
   789  	// \TODO perhaps this check should be in newUpdateChunk()
   790  	if _, _, err := multihash.GetMultihashLength(data); err != nil {
   791  		return nil, NewError(ErrNothingToReturn, err.Error())
   792  	}
   793  	return h.update(ctx, name, data, true)
   794  }
   795  
   796  func (h *Handler) Update(ctx context.Context, name string, data []byte) (storage.Address, error) {
   797  	return h.update(ctx, name, data, false)
   798  }
   799  
   800  // create and commit an update
   801  func (h *Handler) update(ctx context.Context, name string, data []byte, multihash bool) (storage.Address, error) {
   802  
   803  	// zero-length updates are bogus
   804  	if len(data) == 0 {
   805  		return nil, NewError(ErrInvalidValue, "I refuse to waste swarm space for updates with empty values, amigo (data length is 0)")
   806  	}
   807  
   808  	// we can't update anything without a store
   809  	if h.chunkStore == nil {
   810  		return nil, NewError(ErrInit, "Call Handler.SetStore() before updating")
   811  	}
   812  
   813  	// signature length is 0 if we are not using them
   814  	var signaturelength int
   815  	if h.signer != nil {
   816  		signaturelength = signatureLength
   817  	}
   818  
   819  	// get the cached information
   820  	nameHash := ens.EnsNode(name)
   821  	nameHashHex := nameHash.Hex()
   822  	rsrc := h.get(nameHashHex)
   823  	if rsrc == nil {
   824  		return nil, NewError(ErrNotFound, fmt.Sprintf(" object '%s' not in index", name))
   825  	} else if !rsrc.isSynced() {
   826  		return nil, NewError(ErrNotSynced, " object not in sync")
   827  	}
   828  
   829  	// an update can be only one chunk long; data length less header and signature data
   830  	// 12 = length of header and data length fields (2xuint16) plus period and frequency value fields (2xuint32)
   831  	datalimit := h.chunkSize() - int64(signaturelength-len(name)-12)
   832  	if int64(len(data)) > datalimit {
   833  		return nil, NewError(ErrDataOverflow, fmt.Sprintf("Data overflow: %d / %d bytes", len(data), datalimit))
   834  	}
   835  
   836  	// get our blockheight at this time and the next block of the update period
   837  	currentblock, err := h.getBlock(ctx, name)
   838  	if err != nil {
   839  		return nil, NewError(ErrIO, fmt.Sprintf("Could not get block height: %v", err))
   840  	}
   841  	nextperiod, err := getNextPeriod(rsrc.startBlock, currentblock, rsrc.frequency)
   842  	if err != nil {
   843  		return nil, err
   844  	}
   845  
   846  	// if we already have an update for this block then increment version
   847  	// resource object MUST be in sync for version to be correct, but we checked this earlier in the method already
   848  	var version uint32
   849  	if h.hasUpdate(nameHashHex, nextperiod) {
   850  		version = rsrc.version
   851  	}
   852  	version++
   853  
   854  	// calculate the chunk key
   855  	key := h.resourceHash(nextperiod, version, rsrc.nameHash)
   856  
   857  	// if we have a signing function, sign the update
   858  	// \TODO this code should probably be consolidated with corresponding code in New()
   859  	var signature *Signature
   860  	if h.signer != nil {
   861  		// sign the data hash with the key
   862  		digest := h.keyDataHash(key, data)
   863  		sig, err := h.signer.Sign(digest)
   864  		if err != nil {
   865  			return nil, NewError(ErrInvalidSignature, fmt.Sprintf("Sign fail: %v", err))
   866  		}
   867  		signature = &sig
   868  
   869  		// get the address of the signer (which also checks that it's a valid signature)
   870  		addr, err := getAddressFromDataSig(digest, *signature)
   871  		if err != nil {
   872  			return nil, NewError(ErrInvalidSignature, fmt.Sprintf("Invalid data/signature: %v", err))
   873  		}
   874  		if h.signer != nil {
   875  			// check if the signer has access to update
   876  			ok, err := h.checkAccess(name, addr)
   877  			if err != nil {
   878  				return nil, NewError(ErrIO, fmt.Sprintf("Access check fail: %v", err))
   879  			} else if !ok {
   880  				return nil, NewError(ErrUnauthorized, fmt.Sprintf("Address %x does not have access to update %s", addr, name))
   881  			}
   882  		}
   883  	}
   884  
   885  	// a datalength field set to 0 means the content is a multihash
   886  	var datalength int
   887  	if !multihash {
   888  		datalength = len(data)
   889  	}
   890  	chunk := newUpdateChunk(key, signature, nextperiod, version, name, data, datalength)
   891  
   892  	// send the chunk
   893  	h.chunkStore.Put(chunk)
   894  	log.Trace("resource update", "name", name, "key", key, "currentblock", currentblock, "lastperiod", nextperiod, "version", version, "data", chunk.SData, "multihash", multihash)
   895  
   896  	// update our resources map entry and return the new key
   897  	rsrc.lastPeriod = nextperiod
   898  	rsrc.version = version
   899  	rsrc.data = make([]byte, len(data))
   900  	copy(rsrc.data, data)
   901  	return key, nil
   902  }
   903  
   904  // Closes the datastore.
   905  // Always call this at shutdown to avoid data corruption.
   906  func (h *Handler) Close() {
   907  	h.chunkStore.Close()
   908  }
   909  
   910  // gets the current block height
   911  func (h *Handler) getBlock(ctx context.Context, name string) (uint64, error) {
   912  	blockheader, err := h.headerGetter.HeaderByNumber(ctx, name, nil)
   913  	if err != nil {
   914  		return 0, err
   915  	}
   916  	return blockheader.Number.Uint64(), nil
   917  }
   918  
   919  // Calculate the period index (aka major version number) from a given block number
   920  func (h *Handler) BlockToPeriod(name string, blocknumber uint64) (uint32, error) {
   921  	return getNextPeriod(h.resources[name].startBlock, blocknumber, h.resources[name].frequency)
   922  }
   923  
   924  // Calculate the block number from a given period index (aka major version number)
   925  func (h *Handler) PeriodToBlock(name string, period uint32) uint64 {
   926  	return h.resources[name].startBlock + (uint64(period) * h.resources[name].frequency)
   927  }
   928  
   929  // Retrieves the resource index value for the given nameHash
   930  func (h *Handler) get(nameHash string) *resource {
   931  	h.resourceLock.RLock()
   932  	defer h.resourceLock.RUnlock()
   933  	rsrc := h.resources[nameHash]
   934  	return rsrc
   935  }
   936  
   937  // Sets the resource index value for the given nameHash
   938  func (h *Handler) set(nameHash string, rsrc *resource) {
   939  	h.resourceLock.Lock()
   940  	defer h.resourceLock.Unlock()
   941  	h.resources[nameHash] = rsrc
   942  }
   943  
   944  // used for chunk keys
   945  func (h *Handler) resourceHash(period uint32, version uint32, namehash common.Hash) storage.Address {
   946  	// format is: hash(period|version|namehash)
   947  	hasher := h.hashPool.Get().(storage.SwarmHash)
   948  	defer h.hashPool.Put(hasher)
   949  	hasher.Reset()
   950  	b := make([]byte, 4)
   951  	binary.LittleEndian.PutUint32(b, period)
   952  	hasher.Write(b)
   953  	binary.LittleEndian.PutUint32(b, version)
   954  	hasher.Write(b)
   955  	hasher.Write(namehash[:])
   956  	return hasher.Sum(nil)
   957  }
   958  
   959  // Checks if we already have an update on this resource, according to the value in the current state of the resource index
   960  func (h *Handler) hasUpdate(nameHash string, period uint32) bool {
   961  	return h.resources[nameHash].lastPeriod == period
   962  }
   963  
   964  func getAddressFromDataSig(datahash common.Hash, signature Signature) (common.Address, error) {
   965  	pub, err := crypto.SigToPub(datahash.Bytes(), signature[:])
   966  	if err != nil {
   967  		return common.Address{}, err
   968  	}
   969  	return crypto.PubkeyToAddress(*pub), nil
   970  }
   971  
   972  // create an update chunk
   973  func newUpdateChunk(addr storage.Address, signature *Signature, period uint32, version uint32, name string, data []byte, datalength int) *storage.Chunk {
   974  
   975  	// no signatures if no validator
   976  	var signaturelength int
   977  	if signature != nil {
   978  		signaturelength = signatureLength
   979  	}
   980  
   981  	// prepend version and period to allow reverse lookups
   982  	headerlength := len(name) + 4 + 4
   983  
   984  	actualdatalength := len(data)
   985  	chunk := storage.NewChunk(addr, nil)
   986  	chunk.SData = make([]byte, 4+signaturelength+headerlength+actualdatalength) // initial 4 are uint16 length descriptors for headerlength and datalength
   987  
   988  	// data header length does NOT include the header length prefix bytes themselves
   989  	cursor := 0
   990  	binary.LittleEndian.PutUint16(chunk.SData[cursor:], uint16(headerlength))
   991  	cursor += 2
   992  
   993  	// data length
   994  	binary.LittleEndian.PutUint16(chunk.SData[cursor:], uint16(datalength))
   995  	cursor += 2
   996  
   997  	// header = period + version + name
   998  	binary.LittleEndian.PutUint32(chunk.SData[cursor:], period)
   999  	cursor += 4
  1000  
  1001  	binary.LittleEndian.PutUint32(chunk.SData[cursor:], version)
  1002  	cursor += 4
  1003  
  1004  	namebytes := []byte(name)
  1005  	copy(chunk.SData[cursor:], namebytes)
  1006  	cursor += len(namebytes)
  1007  
  1008  	// add the data
  1009  	copy(chunk.SData[cursor:], data)
  1010  
  1011  	// if signature is present it's the last item in the chunk data
  1012  	if signature != nil {
  1013  		cursor += actualdatalength
  1014  		copy(chunk.SData[cursor:], signature[:])
  1015  	}
  1016  
  1017  	chunk.Size = int64(len(chunk.SData))
  1018  	return chunk
  1019  }
  1020  
  1021  // Helper function to calculate the next update period number from the current block, start block and frequency
  1022  func getNextPeriod(start uint64, current uint64, frequency uint64) (uint32, error) {
  1023  	if current < start {
  1024  		return 0, NewError(ErrInvalidValue, fmt.Sprintf("given current block value %d < start block %d", current, start))
  1025  	}
  1026  	blockdiff := current - start
  1027  	period := blockdiff / frequency
  1028  	return uint32(period + 1), nil
  1029  }
  1030  
  1031  // ToSafeName is a helper function to create an valid idna of a given resource update name
  1032  func ToSafeName(name string) (string, error) {
  1033  	return idna.ToASCII(name)
  1034  }
  1035  
  1036  // check that name identifiers contain valid bytes
  1037  // Strings created using ToSafeName() should satisfy this check
  1038  func isSafeName(name string) bool {
  1039  	if name == "" {
  1040  		return false
  1041  	}
  1042  	validname, err := idna.ToASCII(name)
  1043  	if err != nil {
  1044  		return false
  1045  	}
  1046  	return validname == name
  1047  }
  1048  
  1049  func NewTestHandler(datadir string, params *HandlerParams) (*Handler, error) {
  1050  	path := filepath.Join(datadir, DbDirName)
  1051  	rh, err := NewHandler(params)
  1052  	if err != nil {
  1053  		return nil, fmt.Errorf("resource handler create fail: %v", err)
  1054  	}
  1055  	localstoreparams := storage.NewDefaultLocalStoreParams()
  1056  	localstoreparams.Init(path)
  1057  	localStore, err := storage.NewLocalStore(localstoreparams, nil)
  1058  	if err != nil {
  1059  		return nil, fmt.Errorf("localstore create fail, path %s: %v", path, err)
  1060  	}
  1061  	localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(resourceHash)))
  1062  	localStore.Validators = append(localStore.Validators, rh)
  1063  	netStore := storage.NewNetStore(localStore, nil)
  1064  	rh.SetStore(netStore)
  1065  	return rh, nil
  1066  }