github.com/alexdevranger/node-1.8.27@v0.0.0-20221128213301-aa5841e41d2d/swarm/storage/feed/handler.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-dubxcoin library.
     3  //
     4  // The go-dubxcoin library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-dubxcoin library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-dubxcoin library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Handler is the API for feeds
    18  // It enables creating, updating, syncing and retrieving feed updates and their data
    19  package feed
    20  
    21  import (
    22  	"bytes"
    23  	"context"
    24  	"fmt"
    25  	"sync"
    26  
    27  	"github.com/alexdevranger/node-1.8.27/swarm/storage/feed/lookup"
    28  
    29  	"github.com/alexdevranger/node-1.8.27/swarm/log"
    30  	"github.com/alexdevranger/node-1.8.27/swarm/storage"
    31  )
    32  
    33  type Handler struct {
    34  	chunkStore *storage.NetStore
    35  	HashSize   int
    36  	cache      map[uint64]*cacheEntry
    37  	cacheLock  sync.RWMutex
    38  }
    39  
    40  // HandlerParams pass parameters to the Handler constructor NewHandler
    41  // Signer and TimestampProvider are mandatory parameters
    42  type HandlerParams struct {
    43  }
    44  
    45  // hashPool contains a pool of ready hashers
    46  var hashPool sync.Pool
    47  
    48  // init initializes the package and hashPool
    49  func init() {
    50  	hashPool = sync.Pool{
    51  		New: func() interface{} {
    52  			return storage.MakeHashFunc(feedsHashAlgorithm)()
    53  		},
    54  	}
    55  }
    56  
    57  // NewHandler creates a new Swarm feeds API
    58  func NewHandler(params *HandlerParams) *Handler {
    59  	fh := &Handler{
    60  		cache: make(map[uint64]*cacheEntry),
    61  	}
    62  
    63  	for i := 0; i < hasherCount; i++ {
    64  		hashfunc := storage.MakeHashFunc(feedsHashAlgorithm)()
    65  		if fh.HashSize == 0 {
    66  			fh.HashSize = hashfunc.Size()
    67  		}
    68  		hashPool.Put(hashfunc)
    69  	}
    70  
    71  	return fh
    72  }
    73  
    74  // SetStore sets the store backend for the Swarm feeds API
    75  func (h *Handler) SetStore(store *storage.NetStore) {
    76  	h.chunkStore = store
    77  }
    78  
    79  // Validate is a chunk validation method
    80  // If it looks like a feed update, the chunk address is checked against the userAddr of the update's signature
    81  // It implements the storage.ChunkValidator interface
    82  func (h *Handler) Validate(chunk storage.Chunk) bool {
    83  	if len(chunk.Data()) < minimumSignedUpdateLength {
    84  		return false
    85  	}
    86  
    87  	// check if it is a properly formatted update chunk with
    88  	// valid signature and proof of ownership of the feed it is trying
    89  	// to update
    90  
    91  	// First, deserialize the chunk
    92  	var r Request
    93  	if err := r.fromChunk(chunk); err != nil {
    94  		log.Debug("Invalid feed update chunk", "addr", chunk.Address(), "err", err)
    95  		return false
    96  	}
    97  
    98  	// Verify signatures and that the signer actually owns the feed
    99  	// If it fails, it means either the signature is not valid, data is corrupted
   100  	// or someone is trying to update someone else's feed.
   101  	if err := r.Verify(); err != nil {
   102  		log.Debug("Invalid feed update signature", "err", err)
   103  		return false
   104  	}
   105  
   106  	return true
   107  }
   108  
   109  // GetContent retrieves the data payload of the last synced update of the feed
   110  func (h *Handler) GetContent(feed *Feed) (storage.Address, []byte, error) {
   111  	if feed == nil {
   112  		return nil, nil, NewError(ErrInvalidValue, "feed is nil")
   113  	}
   114  	feedUpdate := h.get(feed)
   115  	if feedUpdate == nil {
   116  		return nil, nil, NewError(ErrNotFound, "feed update not cached")
   117  	}
   118  	return feedUpdate.lastKey, feedUpdate.data, nil
   119  }
   120  
   121  // NewRequest prepares a Request structure with all the necessary information to
   122  // just add the desired data and sign it.
   123  // The resulting structure can then be signed and passed to Handler.Update to be verified and sent
   124  func (h *Handler) NewRequest(ctx context.Context, feed *Feed) (request *Request, err error) {
   125  	if feed == nil {
   126  		return nil, NewError(ErrInvalidValue, "feed cannot be nil")
   127  	}
   128  
   129  	now := TimestampProvider.Now().Time
   130  	request = new(Request)
   131  	request.Header.Version = ProtocolVersion
   132  
   133  	query := NewQueryLatest(feed, lookup.NoClue)
   134  
   135  	feedUpdate, err := h.Lookup(ctx, query)
   136  	if err != nil {
   137  		if err.(*Error).code != ErrNotFound {
   138  			return nil, err
   139  		}
   140  		// not finding updates means that there is a network error
   141  		// or that the feed really does not have updates
   142  	}
   143  
   144  	request.Feed = *feed
   145  
   146  	// if we already have an update, then find next epoch
   147  	if feedUpdate != nil {
   148  		request.Epoch = lookup.GetNextEpoch(feedUpdate.Epoch, now)
   149  	} else {
   150  		request.Epoch = lookup.GetFirstEpoch(now)
   151  	}
   152  
   153  	return request, nil
   154  }
   155  
   156  // Lookup retrieves a specific or latest feed update
   157  // Lookup works differently depending on the configuration of `query`
   158  // See the `query` documentation and helper functions:
   159  // `NewQueryLatest` and `NewQuery`
   160  func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) {
   161  
   162  	timeLimit := query.TimeLimit
   163  	if timeLimit == 0 { // if time limit is set to zero, the user wants to get the latest update
   164  		timeLimit = TimestampProvider.Now().Time
   165  	}
   166  
   167  	if query.Hint == lookup.NoClue { // try to use our cache
   168  		entry := h.get(&query.Feed)
   169  		if entry != nil && entry.Epoch.Time <= timeLimit { // avoid bad hints
   170  			query.Hint = entry.Epoch
   171  		}
   172  	}
   173  
   174  	// we can't look for anything without a store
   175  	if h.chunkStore == nil {
   176  		return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups")
   177  	}
   178  
   179  	var id ID
   180  	id.Feed = query.Feed
   181  	var readCount int
   182  
   183  	// Invoke the lookup engine.
   184  	// The callback will be called every time the lookup algorithm needs to guess
   185  	requestPtr, err := lookup.Lookup(timeLimit, query.Hint, func(epoch lookup.Epoch, now uint64) (interface{}, error) {
   186  		readCount++
   187  		id.Epoch = epoch
   188  		ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout)
   189  		defer cancel()
   190  
   191  		chunk, err := h.chunkStore.Get(ctx, id.Addr())
   192  		if err != nil { // TODO: check for catastrophic errors other than chunk not found
   193  			return nil, nil
   194  		}
   195  
   196  		var request Request
   197  		if err := request.fromChunk(chunk); err != nil {
   198  			return nil, nil
   199  		}
   200  		if request.Time <= timeLimit {
   201  			return &request, nil
   202  		}
   203  		return nil, nil
   204  	})
   205  	if err != nil {
   206  		return nil, err
   207  	}
   208  
   209  	log.Info(fmt.Sprintf("Feed lookup finished in %d lookups", readCount))
   210  
   211  	request, _ := requestPtr.(*Request)
   212  	if request == nil {
   213  		return nil, NewError(ErrNotFound, "no feed updates found")
   214  	}
   215  	return h.updateCache(request)
   216  
   217  }
   218  
   219  // update feed updates cache with specified content
   220  func (h *Handler) updateCache(request *Request) (*cacheEntry, error) {
   221  
   222  	updateAddr := request.Addr()
   223  	log.Trace("feed cache update", "topic", request.Topic.Hex(), "updateaddr", updateAddr, "epoch time", request.Epoch.Time, "epoch level", request.Epoch.Level)
   224  
   225  	feedUpdate := h.get(&request.Feed)
   226  	if feedUpdate == nil {
   227  		feedUpdate = &cacheEntry{}
   228  		h.set(&request.Feed, feedUpdate)
   229  	}
   230  
   231  	// update our rsrcs entry map
   232  	feedUpdate.lastKey = updateAddr
   233  	feedUpdate.Update = request.Update
   234  	feedUpdate.Reader = bytes.NewReader(feedUpdate.data)
   235  	return feedUpdate, nil
   236  }
   237  
   238  // Update publishes a feed update
   239  // Note that a feed update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature.
   240  // This results in a max payload of `maxUpdateDataLength` (check update.go for more details)
   241  // An error will be returned if the total length of the chunk payload will exceed this limit.
   242  // Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update
   243  // on the network.
   244  func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Address, err error) {
   245  
   246  	// we can't update anything without a store
   247  	if h.chunkStore == nil {
   248  		return nil, NewError(ErrInit, "Call Handler.SetStore() before updating")
   249  	}
   250  
   251  	feedUpdate := h.get(&r.Feed)
   252  	if feedUpdate != nil && feedUpdate.Epoch.Equals(r.Epoch) { // This is the only cheap check we can do for sure
   253  		return nil, NewError(ErrInvalidValue, "A former update in this epoch is already known to exist")
   254  	}
   255  
   256  	chunk, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big
   257  	if err != nil {
   258  		return nil, err
   259  	}
   260  
   261  	// send the chunk
   262  	h.chunkStore.Put(ctx, chunk)
   263  	log.Trace("feed update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", chunk.Data())
   264  	// update our feed updates map cache entry if the new update is older than the one we have, if we have it.
   265  	if feedUpdate != nil && r.Epoch.After(feedUpdate.Epoch) {
   266  		feedUpdate.Epoch = r.Epoch
   267  		feedUpdate.data = make([]byte, len(r.data))
   268  		feedUpdate.lastKey = r.idAddr
   269  		copy(feedUpdate.data, r.data)
   270  		feedUpdate.Reader = bytes.NewReader(feedUpdate.data)
   271  	}
   272  
   273  	return r.idAddr, nil
   274  }
   275  
   276  // Retrieves the feed update cache value for the given nameHash
   277  func (h *Handler) get(feed *Feed) *cacheEntry {
   278  	mapKey := feed.mapKey()
   279  	h.cacheLock.RLock()
   280  	defer h.cacheLock.RUnlock()
   281  	feedUpdate := h.cache[mapKey]
   282  	return feedUpdate
   283  }
   284  
   285  // Sets the feed update cache value for the given feed
   286  func (h *Handler) set(feed *Feed, feedUpdate *cacheEntry) {
   287  	mapKey := feed.mapKey()
   288  	h.cacheLock.Lock()
   289  	defer h.cacheLock.Unlock()
   290  	h.cache[mapKey] = feedUpdate
   291  }