github.com/divan/go-ethereum@v1.8.14-0.20180820134928-1de9ada4016d/swarm/network/stream/syncer.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"context"
    21  	"math"
    22  	"strconv"
    23  	"time"
    24  
    25  	"github.com/ethereum/go-ethereum/metrics"
    26  	"github.com/ethereum/go-ethereum/swarm/log"
    27  	"github.com/ethereum/go-ethereum/swarm/storage"
    28  )
    29  
    30  const (
    31  	// BatchSize = 2
    32  	BatchSize = 128
    33  )
    34  
    35  // SwarmSyncerServer implements an Server for history syncing on bins
    36  // offered streams:
    37  // * live request delivery with or without checkback
    38  // * (live/non-live historical) chunk syncing per proximity bin
    39  type SwarmSyncerServer struct {
    40  	po        uint8
    41  	db        *storage.DBAPI
    42  	sessionAt uint64
    43  	start     uint64
    44  	quit      chan struct{}
    45  }
    46  
    47  // NewSwarmSyncerServer is contructor for SwarmSyncerServer
    48  func NewSwarmSyncerServer(live bool, po uint8, db *storage.DBAPI) (*SwarmSyncerServer, error) {
    49  	sessionAt := db.CurrentBucketStorageIndex(po)
    50  	var start uint64
    51  	if live {
    52  		start = sessionAt
    53  	}
    54  	return &SwarmSyncerServer{
    55  		po:        po,
    56  		db:        db,
    57  		sessionAt: sessionAt,
    58  		start:     start,
    59  		quit:      make(chan struct{}),
    60  	}, nil
    61  }
    62  
    63  func RegisterSwarmSyncerServer(streamer *Registry, db *storage.DBAPI) {
    64  	streamer.RegisterServerFunc("SYNC", func(p *Peer, t string, live bool) (Server, error) {
    65  		po, err := ParseSyncBinKey(t)
    66  		if err != nil {
    67  			return nil, err
    68  		}
    69  		return NewSwarmSyncerServer(live, po, db)
    70  	})
    71  	// streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) {
    72  	// 	return NewOutgoingProvableSwarmSyncer(po, db)
    73  	// })
    74  }
    75  
    76  // Close needs to be called on a stream server
    77  func (s *SwarmSyncerServer) Close() {
    78  	close(s.quit)
    79  }
    80  
    81  // GetSection retrieves the actual chunk from localstore
    82  func (s *SwarmSyncerServer) GetData(ctx context.Context, key []byte) ([]byte, error) {
    83  	chunk, err := s.db.Get(ctx, storage.Address(key))
    84  	if err == storage.ErrFetching {
    85  		<-chunk.ReqC
    86  	} else if err != nil {
    87  		return nil, err
    88  	}
    89  	return chunk.SData, nil
    90  }
    91  
    92  // GetBatch retrieves the next batch of hashes from the dbstore
    93  func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
    94  	var batch []byte
    95  	i := 0
    96  	if from == 0 {
    97  		from = s.start
    98  	}
    99  	if to <= from || from >= s.sessionAt {
   100  		to = math.MaxUint64
   101  	}
   102  	var ticker *time.Ticker
   103  	defer func() {
   104  		if ticker != nil {
   105  			ticker.Stop()
   106  		}
   107  	}()
   108  	var wait bool
   109  	for {
   110  		if wait {
   111  			if ticker == nil {
   112  				ticker = time.NewTicker(1000 * time.Millisecond)
   113  			}
   114  			select {
   115  			case <-ticker.C:
   116  			case <-s.quit:
   117  				return nil, 0, 0, nil, nil
   118  			}
   119  		}
   120  
   121  		metrics.GetOrRegisterCounter("syncer.setnextbatch.iterator", nil).Inc(1)
   122  		err := s.db.Iterator(from, to, s.po, func(addr storage.Address, idx uint64) bool {
   123  			batch = append(batch, addr[:]...)
   124  			i++
   125  			to = idx
   126  			return i < BatchSize
   127  		})
   128  		if err != nil {
   129  			return nil, 0, 0, nil, err
   130  		}
   131  		if len(batch) > 0 {
   132  			break
   133  		}
   134  		wait = true
   135  	}
   136  
   137  	log.Trace("Swarm syncer offer batch", "po", s.po, "len", i, "from", from, "to", to, "current store count", s.db.CurrentBucketStorageIndex(s.po))
   138  	return batch, from, to, nil, nil
   139  }
   140  
   141  // SwarmSyncerClient
   142  type SwarmSyncerClient struct {
   143  	sessionAt     uint64
   144  	nextC         chan struct{}
   145  	sessionRoot   storage.Address
   146  	sessionReader storage.LazySectionReader
   147  	retrieveC     chan *storage.Chunk
   148  	storeC        chan *storage.Chunk
   149  	db            *storage.DBAPI
   150  	// chunker               storage.Chunker
   151  	currentRoot           storage.Address
   152  	requestFunc           func(chunk *storage.Chunk)
   153  	end, start            uint64
   154  	peer                  *Peer
   155  	ignoreExistingRequest bool
   156  	stream                Stream
   157  }
   158  
   159  // NewSwarmSyncerClient is a contructor for provable data exchange syncer
   160  func NewSwarmSyncerClient(p *Peer, db *storage.DBAPI, ignoreExistingRequest bool, stream Stream) (*SwarmSyncerClient, error) {
   161  	return &SwarmSyncerClient{
   162  		db:   db,
   163  		peer: p,
   164  		ignoreExistingRequest: ignoreExistingRequest,
   165  		stream:                stream,
   166  	}, nil
   167  }
   168  
   169  // // NewIncomingProvableSwarmSyncer is a contructor for provable data exchange syncer
   170  // func NewIncomingProvableSwarmSyncer(po int, priority int, index uint64, sessionAt uint64, intervals []uint64, sessionRoot storage.Key, chunker *storage.PyramidChunker, store storage.ChunkStore, p Peer) *SwarmSyncerClient {
   171  // 	retrieveC := make(storage.Chunk, chunksCap)
   172  // 	RunChunkRequestor(p, retrieveC)
   173  // 	storeC := make(storage.Chunk, chunksCap)
   174  // 	RunChunkStorer(store, storeC)
   175  // 	s := &SwarmSyncerClient{
   176  // 		po:            po,
   177  // 		priority:      priority,
   178  // 		sessionAt:     sessionAt,
   179  // 		start:         index,
   180  // 		end:           index,
   181  // 		nextC:         make(chan struct{}, 1),
   182  // 		intervals:     intervals,
   183  // 		sessionRoot:   sessionRoot,
   184  // 		sessionReader: chunker.Join(sessionRoot, retrieveC),
   185  // 		retrieveC:     retrieveC,
   186  // 		storeC:        storeC,
   187  // 	}
   188  // 	return s
   189  // }
   190  
   191  // // StartSyncing is called on the Peer to start the syncing process
   192  // // the idea is that it is called only after kademlia is close to healthy
   193  // func StartSyncing(s *Streamer, peerId discover.NodeID, po uint8, nn bool) {
   194  // 	lastPO := po
   195  // 	if nn {
   196  // 		lastPO = maxPO
   197  // 	}
   198  //
   199  // 	for i := po; i <= lastPO; i++ {
   200  // 		s.Subscribe(peerId, "SYNC", newSyncLabel("LIVE", po), 0, 0, High, true)
   201  // 		s.Subscribe(peerId, "SYNC", newSyncLabel("HISTORY", po), 0, 0, Mid, false)
   202  // 	}
   203  // }
   204  
   205  // RegisterSwarmSyncerClient registers the client constructor function for
   206  // to handle incoming sync streams
   207  func RegisterSwarmSyncerClient(streamer *Registry, db *storage.DBAPI) {
   208  	streamer.RegisterClientFunc("SYNC", func(p *Peer, t string, live bool) (Client, error) {
   209  		return NewSwarmSyncerClient(p, db, true, NewStream("SYNC", t, live))
   210  	})
   211  }
   212  
   213  // NeedData
   214  func (s *SwarmSyncerClient) NeedData(ctx context.Context, key []byte) (wait func()) {
   215  	chunk, _ := s.db.GetOrCreateRequest(ctx, key)
   216  	// TODO: we may want to request from this peer anyway even if the request exists
   217  
   218  	// ignoreExistingRequest is temporary commented out until its functionality is verified.
   219  	// For now, this optimization can be disabled.
   220  	if chunk.ReqC == nil { //|| (s.ignoreExistingRequest && !created) {
   221  		return nil
   222  	}
   223  	// create request and wait until the chunk data arrives and is stored
   224  	return func() {
   225  		chunk.WaitToStore()
   226  	}
   227  }
   228  
   229  // BatchDone
   230  func (s *SwarmSyncerClient) BatchDone(stream Stream, from uint64, hashes []byte, root []byte) func() (*TakeoverProof, error) {
   231  	// TODO: reenable this with putter/getter refactored code
   232  	// if s.chunker != nil {
   233  	// 	return func() (*TakeoverProof, error) { return s.TakeoverProof(stream, from, hashes, root) }
   234  	// }
   235  	return nil
   236  }
   237  
   238  func (s *SwarmSyncerClient) TakeoverProof(stream Stream, from uint64, hashes []byte, root storage.Address) (*TakeoverProof, error) {
   239  	// for provable syncer currentRoot is non-zero length
   240  	// TODO: reenable this with putter/getter
   241  	// if s.chunker != nil {
   242  	// 	if from > s.sessionAt { // for live syncing currentRoot is always updated
   243  	// 		//expRoot, err := s.chunker.Append(s.currentRoot, bytes.NewReader(hashes), s.retrieveC, s.storeC)
   244  	// 		expRoot, _, err := s.chunker.Append(s.currentRoot, bytes.NewReader(hashes), s.retrieveC)
   245  	// 		if err != nil {
   246  	// 			return nil, err
   247  	// 		}
   248  	// 		if !bytes.Equal(root, expRoot) {
   249  	// 			return nil, fmt.Errorf("HandoverProof mismatch")
   250  	// 		}
   251  	// 		s.currentRoot = root
   252  	// 	} else {
   253  	// 		expHashes := make([]byte, len(hashes))
   254  	// 		_, err := s.sessionReader.ReadAt(expHashes, int64(s.end*HashSize))
   255  	// 		if err != nil && err != io.EOF {
   256  	// 			return nil, err
   257  	// 		}
   258  	// 		if !bytes.Equal(expHashes, hashes) {
   259  	// 			return nil, errors.New("invalid proof")
   260  	// 		}
   261  	// 	}
   262  	// 	return nil, nil
   263  	// }
   264  	s.end += uint64(len(hashes)) / HashSize
   265  	takeover := &Takeover{
   266  		Stream: stream,
   267  		Start:  s.start,
   268  		End:    s.end,
   269  		Root:   root,
   270  	}
   271  	// serialise and sign
   272  	return &TakeoverProof{
   273  		Takeover: takeover,
   274  		Sig:      nil,
   275  	}, nil
   276  }
   277  
   278  func (s *SwarmSyncerClient) Close() {}
   279  
   280  // base for parsing and formating sync bin key
   281  // it must be 2 <= base <= 36
   282  const syncBinKeyBase = 36
   283  
   284  // FormatSyncBinKey returns a string representation of
   285  // Kademlia bin number to be used as key for SYNC stream.
   286  func FormatSyncBinKey(bin uint8) string {
   287  	return strconv.FormatUint(uint64(bin), syncBinKeyBase)
   288  }
   289  
   290  // ParseSyncBinKey parses the string representation
   291  // and returns the Kademlia bin number.
   292  func ParseSyncBinKey(s string) (uint8, error) {
   293  	bin, err := strconv.ParseUint(s, syncBinKeyBase, 8)
   294  	if err != nil {
   295  		return 0, err
   296  	}
   297  	return uint8(bin), nil
   298  }