github.com/oskarth/go-ethereum@v1.6.8-0.20191013093314-dac24a9d3494/swarm/network/stream/syncer.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"context"
    21  	"strconv"
    22  	"time"
    23  
    24  	"github.com/ethereum/go-ethereum/metrics"
    25  	"github.com/ethereum/go-ethereum/swarm/log"
    26  	"github.com/ethereum/go-ethereum/swarm/storage"
    27  )
    28  
    29  const (
    30  	BatchSize = 128
    31  )
    32  
    33  // SwarmSyncerServer implements an Server for history syncing on bins
    34  // offered streams:
    35  // * live request delivery with or without checkback
    36  // * (live/non-live historical) chunk syncing per proximity bin
    37  type SwarmSyncerServer struct {
    38  	po    uint8
    39  	store storage.SyncChunkStore
    40  	quit  chan struct{}
    41  }
    42  
    43  // NewSwarmSyncerServer is constructor for SwarmSyncerServer
    44  func NewSwarmSyncerServer(po uint8, syncChunkStore storage.SyncChunkStore) (*SwarmSyncerServer, error) {
    45  	return &SwarmSyncerServer{
    46  		po:    po,
    47  		store: syncChunkStore,
    48  		quit:  make(chan struct{}),
    49  	}, nil
    50  }
    51  
    52  func RegisterSwarmSyncerServer(streamer *Registry, syncChunkStore storage.SyncChunkStore) {
    53  	streamer.RegisterServerFunc("SYNC", func(_ *Peer, t string, _ bool) (Server, error) {
    54  		po, err := ParseSyncBinKey(t)
    55  		if err != nil {
    56  			return nil, err
    57  		}
    58  		return NewSwarmSyncerServer(po, syncChunkStore)
    59  	})
    60  	// streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) {
    61  	// 	return NewOutgoingProvableSwarmSyncer(po, db)
    62  	// })
    63  }
    64  
    65  // Close needs to be called on a stream server
    66  func (s *SwarmSyncerServer) Close() {
    67  	close(s.quit)
    68  }
    69  
    70  // GetData retrieves the actual chunk from netstore
    71  func (s *SwarmSyncerServer) GetData(ctx context.Context, key []byte) ([]byte, error) {
    72  	chunk, err := s.store.Get(ctx, storage.Address(key))
    73  	if err != nil {
    74  		return nil, err
    75  	}
    76  	return chunk.Data(), nil
    77  }
    78  
    79  // SessionIndex returns current storage bin (po) index.
    80  func (s *SwarmSyncerServer) SessionIndex() (uint64, error) {
    81  	return s.store.BinIndex(s.po), nil
    82  }
    83  
    84  // GetBatch retrieves the next batch of hashes from the dbstore
    85  func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
    86  	var batch []byte
    87  	i := 0
    88  
    89  	var ticker *time.Ticker
    90  	defer func() {
    91  		if ticker != nil {
    92  			ticker.Stop()
    93  		}
    94  	}()
    95  	var wait bool
    96  	for {
    97  		if wait {
    98  			if ticker == nil {
    99  				ticker = time.NewTicker(1000 * time.Millisecond)
   100  			}
   101  			select {
   102  			case <-ticker.C:
   103  			case <-s.quit:
   104  				return nil, 0, 0, nil, nil
   105  			}
   106  		}
   107  
   108  		metrics.GetOrRegisterCounter("syncer.setnextbatch.iterator", nil).Inc(1)
   109  		err := s.store.Iterator(from, to, s.po, func(key storage.Address, idx uint64) bool {
   110  			batch = append(batch, key[:]...)
   111  			i++
   112  			to = idx
   113  			return i < BatchSize
   114  		})
   115  		if err != nil {
   116  			return nil, 0, 0, nil, err
   117  		}
   118  		if len(batch) > 0 {
   119  			break
   120  		}
   121  		wait = true
   122  	}
   123  
   124  	log.Trace("Swarm syncer offer batch", "po", s.po, "len", i, "from", from, "to", to, "current store count", s.store.BinIndex(s.po))
   125  	return batch, from, to, nil, nil
   126  }
   127  
   128  // SwarmSyncerClient
   129  type SwarmSyncerClient struct {
   130  	sessionAt     uint64
   131  	nextC         chan struct{}
   132  	sessionRoot   storage.Address
   133  	sessionReader storage.LazySectionReader
   134  	retrieveC     chan *storage.Chunk
   135  	storeC        chan *storage.Chunk
   136  	store         storage.SyncChunkStore
   137  	// chunker               storage.Chunker
   138  	currentRoot storage.Address
   139  	requestFunc func(chunk *storage.Chunk)
   140  	end, start  uint64
   141  	peer        *Peer
   142  	stream      Stream
   143  }
   144  
   145  // NewSwarmSyncerClient is a contructor for provable data exchange syncer
   146  func NewSwarmSyncerClient(p *Peer, store storage.SyncChunkStore, stream Stream) (*SwarmSyncerClient, error) {
   147  	return &SwarmSyncerClient{
   148  		store:  store,
   149  		peer:   p,
   150  		stream: stream,
   151  	}, nil
   152  }
   153  
   154  // // NewIncomingProvableSwarmSyncer is a contructor for provable data exchange syncer
   155  // func NewIncomingProvableSwarmSyncer(po int, priority int, index uint64, sessionAt uint64, intervals []uint64, sessionRoot storage.Address, chunker *storage.PyramidChunker, store storage.ChunkStore, p Peer) *SwarmSyncerClient {
   156  // 	retrieveC := make(storage.Chunk, chunksCap)
   157  // 	RunChunkRequestor(p, retrieveC)
   158  // 	storeC := make(storage.Chunk, chunksCap)
   159  // 	RunChunkStorer(store, storeC)
   160  // 	s := &SwarmSyncerClient{
   161  // 		po:            po,
   162  // 		priority:      priority,
   163  // 		sessionAt:     sessionAt,
   164  // 		start:         index,
   165  // 		end:           index,
   166  // 		nextC:         make(chan struct{}, 1),
   167  // 		intervals:     intervals,
   168  // 		sessionRoot:   sessionRoot,
   169  // 		sessionReader: chunker.Join(sessionRoot, retrieveC),
   170  // 		retrieveC:     retrieveC,
   171  // 		storeC:        storeC,
   172  // 	}
   173  // 	return s
   174  // }
   175  
   176  // // StartSyncing is called on the Peer to start the syncing process
   177  // // the idea is that it is called only after kademlia is close to healthy
   178  // func StartSyncing(s *Streamer, peerId enode.ID, po uint8, nn bool) {
   179  // 	lastPO := po
   180  // 	if nn {
   181  // 		lastPO = maxPO
   182  // 	}
   183  //
   184  // 	for i := po; i <= lastPO; i++ {
   185  // 		s.Subscribe(peerId, "SYNC", newSyncLabel("LIVE", po), 0, 0, High, true)
   186  // 		s.Subscribe(peerId, "SYNC", newSyncLabel("HISTORY", po), 0, 0, Mid, false)
   187  // 	}
   188  // }
   189  
   190  // RegisterSwarmSyncerClient registers the client constructor function for
   191  // to handle incoming sync streams
   192  func RegisterSwarmSyncerClient(streamer *Registry, store storage.SyncChunkStore) {
   193  	streamer.RegisterClientFunc("SYNC", func(p *Peer, t string, live bool) (Client, error) {
   194  		return NewSwarmSyncerClient(p, store, NewStream("SYNC", t, live))
   195  	})
   196  }
   197  
   198  // NeedData
   199  func (s *SwarmSyncerClient) NeedData(ctx context.Context, key []byte) (wait func(context.Context) error) {
   200  	return s.store.FetchFunc(ctx, key)
   201  }
   202  
   203  // BatchDone
   204  func (s *SwarmSyncerClient) BatchDone(stream Stream, from uint64, hashes []byte, root []byte) func() (*TakeoverProof, error) {
   205  	// TODO: reenable this with putter/getter refactored code
   206  	// if s.chunker != nil {
   207  	// 	return func() (*TakeoverProof, error) { return s.TakeoverProof(stream, from, hashes, root) }
   208  	// }
   209  	return nil
   210  }
   211  
   212  func (s *SwarmSyncerClient) TakeoverProof(stream Stream, from uint64, hashes []byte, root storage.Address) (*TakeoverProof, error) {
   213  	// for provable syncer currentRoot is non-zero length
   214  	// TODO: reenable this with putter/getter
   215  	// if s.chunker != nil {
   216  	// 	if from > s.sessionAt { // for live syncing currentRoot is always updated
   217  	// 		//expRoot, err := s.chunker.Append(s.currentRoot, bytes.NewReader(hashes), s.retrieveC, s.storeC)
   218  	// 		expRoot, _, err := s.chunker.Append(s.currentRoot, bytes.NewReader(hashes), s.retrieveC)
   219  	// 		if err != nil {
   220  	// 			return nil, err
   221  	// 		}
   222  	// 		if !bytes.Equal(root, expRoot) {
   223  	// 			return nil, fmt.Errorf("HandoverProof mismatch")
   224  	// 		}
   225  	// 		s.currentRoot = root
   226  	// 	} else {
   227  	// 		expHashes := make([]byte, len(hashes))
   228  	// 		_, err := s.sessionReader.ReadAt(expHashes, int64(s.end*HashSize))
   229  	// 		if err != nil && err != io.EOF {
   230  	// 			return nil, err
   231  	// 		}
   232  	// 		if !bytes.Equal(expHashes, hashes) {
   233  	// 			return nil, errors.New("invalid proof")
   234  	// 		}
   235  	// 	}
   236  	// 	return nil, nil
   237  	// }
   238  	s.end += uint64(len(hashes)) / HashSize
   239  	takeover := &Takeover{
   240  		Stream: stream,
   241  		Start:  s.start,
   242  		End:    s.end,
   243  		Root:   root,
   244  	}
   245  	// serialise and sign
   246  	return &TakeoverProof{
   247  		Takeover: takeover,
   248  		Sig:      nil,
   249  	}, nil
   250  }
   251  
   252  func (s *SwarmSyncerClient) Close() {}
   253  
   254  // base for parsing and formating sync bin key
   255  // it must be 2 <= base <= 36
   256  const syncBinKeyBase = 36
   257  
   258  // FormatSyncBinKey returns a string representation of
   259  // Kademlia bin number to be used as key for SYNC stream.
   260  func FormatSyncBinKey(bin uint8) string {
   261  	return strconv.FormatUint(uint64(bin), syncBinKeyBase)
   262  }
   263  
   264  // ParseSyncBinKey parses the string representation
   265  // and returns the Kademlia bin number.
   266  func ParseSyncBinKey(s string) (uint8, error) {
   267  	bin, err := strconv.ParseUint(s, syncBinKeyBase, 8)
   268  	if err != nil {
   269  		return 0, err
   270  	}
   271  	return uint8(bin), nil
   272  }