github.com/aaa256/atlantis@v0.0.0-20210707112435-42ee889287a2/swarm/network/stream/syncer.go (about)

     1  // Copyright 2018 The go-athereum Authors
     2  // This file is part of the go-athereum library.
     3  //
     4  // The go-athereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-athereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-athereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"math"
    21  	"strconv"
    22  	"time"
    23  
    24  	"github.com/athereum/go-athereum/metrics"
    25  	"github.com/athereum/go-athereum/swarm/log"
    26  	"github.com/athereum/go-athereum/swarm/storage"
    27  )
    28  
    29  const (
    30  	// BatchSize = 2
    31  	BatchSize = 128
    32  )
    33  
    34  // SwarmSyncerServer implements an Server for history syncing on bins
    35  // offered streams:
    36  // * live request delivery with or without checkback
    37  // * (live/non-live historical) chunk syncing per proximity bin
    38  type SwarmSyncerServer struct {
    39  	po        uint8
    40  	db        *storage.DBAPI
    41  	sessionAt uint64
    42  	start     uint64
    43  	quit      chan struct{}
    44  }
    45  
    46  // NewSwarmSyncerServer is contructor for SwarmSyncerServer
    47  func NewSwarmSyncerServer(live bool, po uint8, db *storage.DBAPI) (*SwarmSyncerServer, error) {
    48  	sessionAt := db.CurrentBucketStorageIndex(po)
    49  	var start uint64
    50  	if live {
    51  		start = sessionAt
    52  	}
    53  	return &SwarmSyncerServer{
    54  		po:        po,
    55  		db:        db,
    56  		sessionAt: sessionAt,
    57  		start:     start,
    58  		quit:      make(chan struct{}),
    59  	}, nil
    60  }
    61  
    62  func RegisterSwarmSyncerServer(streamer *Registry, db *storage.DBAPI) {
    63  	streamer.RegisterServerFunc("SYNC", func(p *Peer, t string, live bool) (Server, error) {
    64  		po, err := ParseSyncBinKey(t)
    65  		if err != nil {
    66  			return nil, err
    67  		}
    68  		return NewSwarmSyncerServer(live, po, db)
    69  	})
    70  	// streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) {
    71  	// 	return NewOutgoingProvableSwarmSyncer(po, db)
    72  	// })
    73  }
    74  
    75  // Close needs to be called on a stream server
    76  func (s *SwarmSyncerServer) Close() {
    77  	close(s.quit)
    78  }
    79  
    80  // GetSection retrieves the actual chunk from localstore
    81  func (s *SwarmSyncerServer) GetData(key []byte) ([]byte, error) {
    82  	chunk, err := s.db.Get(storage.Address(key))
    83  	if err == storage.ErrFetching {
    84  		<-chunk.ReqC
    85  	} else if err != nil {
    86  		return nil, err
    87  	}
    88  	return chunk.SData, nil
    89  }
    90  
    91  // GetBatch retrieves the next batch of hashes from the dbstore
    92  func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
    93  	var batch []byte
    94  	i := 0
    95  	if from == 0 {
    96  		from = s.start
    97  	}
    98  	if to <= from || from >= s.sessionAt {
    99  		to = math.MaxUint64
   100  	}
   101  	var ticker *time.Ticker
   102  	defer func() {
   103  		if ticker != nil {
   104  			ticker.Stop()
   105  		}
   106  	}()
   107  	var wait bool
   108  	for {
   109  		if wait {
   110  			if ticker == nil {
   111  				ticker = time.NewTicker(1000 * time.Millisecond)
   112  			}
   113  			select {
   114  			case <-ticker.C:
   115  			case <-s.quit:
   116  				return nil, 0, 0, nil, nil
   117  			}
   118  		}
   119  
   120  		metrics.GetOrRegisterCounter("syncer.setnextbatch.iterator", nil).Inc(1)
   121  		err := s.db.Iterator(from, to, s.po, func(addr storage.Address, idx uint64) bool {
   122  			batch = append(batch, addr[:]...)
   123  			i++
   124  			to = idx
   125  			return i < BatchSize
   126  		})
   127  		if err != nil {
   128  			return nil, 0, 0, nil, err
   129  		}
   130  		if len(batch) > 0 {
   131  			break
   132  		}
   133  		wait = true
   134  	}
   135  
   136  	log.Trace("Swarm syncer offer batch", "po", s.po, "len", i, "from", from, "to", to, "current store count", s.db.CurrentBucketStorageIndex(s.po))
   137  	return batch, from, to, nil, nil
   138  }
   139  
   140  // SwarmSyncerClient
   141  type SwarmSyncerClient struct {
   142  	sessionAt     uint64
   143  	nextC         chan struct{}
   144  	sessionRoot   storage.Address
   145  	sessionReader storage.LazySectionReader
   146  	retrieveC     chan *storage.Chunk
   147  	storeC        chan *storage.Chunk
   148  	db            *storage.DBAPI
   149  	// chunker               storage.Chunker
   150  	currentRoot           storage.Address
   151  	requestFunc           func(chunk *storage.Chunk)
   152  	end, start            uint64
   153  	peer                  *Peer
   154  	ignoreExistingRequest bool
   155  	stream                Stream
   156  }
   157  
   158  // NewSwarmSyncerClient is a contructor for provable data exchange syncer
   159  func NewSwarmSyncerClient(p *Peer, db *storage.DBAPI, ignoreExistingRequest bool, stream Stream) (*SwarmSyncerClient, error) {
   160  	return &SwarmSyncerClient{
   161  		db:   db,
   162  		peer: p,
   163  		ignoreExistingRequest: ignoreExistingRequest,
   164  		stream:                stream,
   165  	}, nil
   166  }
   167  
   168  // // NewIncomingProvableSwarmSyncer is a contructor for provable data exchange syncer
   169  // func NewIncomingProvableSwarmSyncer(po int, priority int, index uint64, sessionAt uint64, intervals []uint64, sessionRoot storage.Key, chunker *storage.PyramidChunker, store storage.ChunkStore, p Peer) *SwarmSyncerClient {
   170  // 	retrieveC := make(storage.Chunk, chunksCap)
   171  // 	RunChunkRequestor(p, retrieveC)
   172  // 	storeC := make(storage.Chunk, chunksCap)
   173  // 	RunChunkStorer(store, storeC)
   174  // 	s := &SwarmSyncerClient{
   175  // 		po:            po,
   176  // 		priority:      priority,
   177  // 		sessionAt:     sessionAt,
   178  // 		start:         index,
   179  // 		end:           index,
   180  // 		nextC:         make(chan struct{}, 1),
   181  // 		intervals:     intervals,
   182  // 		sessionRoot:   sessionRoot,
   183  // 		sessionReader: chunker.Join(sessionRoot, retrieveC),
   184  // 		retrieveC:     retrieveC,
   185  // 		storeC:        storeC,
   186  // 	}
   187  // 	return s
   188  // }
   189  
   190  // // StartSyncing is called on the Peer to start the syncing process
   191  // // the idea is that it is called only after kademlia is close to healthy
   192  // func StartSyncing(s *Streamer, peerId discover.NodeID, po uint8, nn bool) {
   193  // 	lastPO := po
   194  // 	if nn {
   195  // 		lastPO = maxPO
   196  // 	}
   197  //
   198  // 	for i := po; i <= lastPO; i++ {
   199  // 		s.Subscribe(peerId, "SYNC", newSyncLabel("LIVE", po), 0, 0, High, true)
   200  // 		s.Subscribe(peerId, "SYNC", newSyncLabel("HISTORY", po), 0, 0, Mid, false)
   201  // 	}
   202  // }
   203  
   204  // RegisterSwarmSyncerClient registers the client constructor function for
   205  // to handle incoming sync streams
   206  func RegisterSwarmSyncerClient(streamer *Registry, db *storage.DBAPI) {
   207  	streamer.RegisterClientFunc("SYNC", func(p *Peer, t string, live bool) (Client, error) {
   208  		return NewSwarmSyncerClient(p, db, true, NewStream("SYNC", t, live))
   209  	})
   210  }
   211  
   212  // NeedData
   213  func (s *SwarmSyncerClient) NeedData(key []byte) (wait func()) {
   214  	chunk, _ := s.db.GetOrCreateRequest(key)
   215  	// TODO: we may want to request from this peer anyway even if the request exists
   216  
   217  	// ignoreExistingRequest is temporary commented out until its functionality is verified.
   218  	// For now, this optimization can be disabled.
   219  	if chunk.ReqC == nil { //|| (s.ignoreExistingRequest && !created) {
   220  		return nil
   221  	}
   222  	// create request and wait until the chunk data arrives and is stored
   223  	return func() {
   224  		chunk.WaitToStore()
   225  	}
   226  }
   227  
   228  // BatchDone
   229  func (s *SwarmSyncerClient) BatchDone(stream Stream, from uint64, hashes []byte, root []byte) func() (*TakeoverProof, error) {
   230  	// TODO: reenable this with putter/getter refactored code
   231  	// if s.chunker != nil {
   232  	// 	return func() (*TakeoverProof, error) { return s.TakeoverProof(stream, from, hashes, root) }
   233  	// }
   234  	return nil
   235  }
   236  
   237  func (s *SwarmSyncerClient) TakeoverProof(stream Stream, from uint64, hashes []byte, root storage.Address) (*TakeoverProof, error) {
   238  	// for provable syncer currentRoot is non-zero length
   239  	// TODO: reenable this with putter/getter
   240  	// if s.chunker != nil {
   241  	// 	if from > s.sessionAt { // for live syncing currentRoot is always updated
   242  	// 		//expRoot, err := s.chunker.Append(s.currentRoot, bytes.NewReader(hashes), s.retrieveC, s.storeC)
   243  	// 		expRoot, _, err := s.chunker.Append(s.currentRoot, bytes.NewReader(hashes), s.retrieveC)
   244  	// 		if err != nil {
   245  	// 			return nil, err
   246  	// 		}
   247  	// 		if !bytes.Equal(root, expRoot) {
   248  	// 			return nil, fmt.Errorf("HandoverProof mismatch")
   249  	// 		}
   250  	// 		s.currentRoot = root
   251  	// 	} else {
   252  	// 		expHashes := make([]byte, len(hashes))
   253  	// 		_, err := s.sessionReader.ReadAt(expHashes, int64(s.end*HashSize))
   254  	// 		if err != nil && err != io.EOF {
   255  	// 			return nil, err
   256  	// 		}
   257  	// 		if !bytes.Equal(expHashes, hashes) {
   258  	// 			return nil, errors.New("invalid proof")
   259  	// 		}
   260  	// 	}
   261  	// 	return nil, nil
   262  	// }
   263  	s.end += uint64(len(hashes)) / HashSize
   264  	takeover := &Takeover{
   265  		Stream: stream,
   266  		Start:  s.start,
   267  		End:    s.end,
   268  		Root:   root,
   269  	}
   270  	// serialise and sign
   271  	return &TakeoverProof{
   272  		Takeover: takeover,
   273  		Sig:      nil,
   274  	}, nil
   275  }
   276  
   277  func (s *SwarmSyncerClient) Close() {}
   278  
   279  // base for parsing and formating sync bin key
   280  // it must be 2 <= base <= 36
   281  const syncBinKeyBase = 36
   282  
   283  // FormatSyncBinKey returns a string representation of
   284  // Kademlia bin number to be used as key for SYNC stream.
   285  func FormatSyncBinKey(bin uint8) string {
   286  	return strconv.FormatUint(uint64(bin), syncBinKeyBase)
   287  }
   288  
   289  // ParseSyncBinKey parses the string representation
   290  // and returns the Kademlia bin number.
   291  func ParseSyncBinKey(s string) (uint8, error) {
   292  	bin, err := strconv.ParseUint(s, syncBinKeyBase, 8)
   293  	if err != nil {
   294  		return 0, err
   295  	}
   296  	return uint8(bin), nil
   297  }