github.com/alexdevranger/node-1.8.27@v0.0.0-20221128213301-aa5841e41d2d/swarm/network/stream/syncer.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-dubxcoin library.
     3  //
     4  // The go-dubxcoin library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-dubxcoin library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-dubxcoin library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"context"
    21  	"strconv"
    22  	"time"
    23  
    24  	"github.com/alexdevranger/node-1.8.27/metrics"
    25  	"github.com/alexdevranger/node-1.8.27/swarm/log"
    26  	"github.com/alexdevranger/node-1.8.27/swarm/storage"
    27  )
    28  
    29  const (
    30  	BatchSize = 128
    31  )
    32  
    33  // SwarmSyncerServer implements an Server for history syncing on bins
    34  // offered streams:
    35  // * live request delivery with or without checkback
    36  // * (live/non-live historical) chunk syncing per proximity bin
    37  type SwarmSyncerServer struct {
    38  	po    uint8
    39  	store storage.SyncChunkStore
    40  	quit  chan struct{}
    41  }
    42  
    43  // NewSwarmSyncerServer is constructor for SwarmSyncerServer
    44  func NewSwarmSyncerServer(po uint8, syncChunkStore storage.SyncChunkStore) (*SwarmSyncerServer, error) {
    45  	return &SwarmSyncerServer{
    46  		po:    po,
    47  		store: syncChunkStore,
    48  		quit:  make(chan struct{}),
    49  	}, nil
    50  }
    51  
    52  func RegisterSwarmSyncerServer(streamer *Registry, syncChunkStore storage.SyncChunkStore) {
    53  	streamer.RegisterServerFunc("SYNC", func(_ *Peer, t string, _ bool) (Server, error) {
    54  		po, err := ParseSyncBinKey(t)
    55  		if err != nil {
    56  			return nil, err
    57  		}
    58  		return NewSwarmSyncerServer(po, syncChunkStore)
    59  	})
    60  	// streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) {
    61  	// 	return NewOutgoingProvableSwarmSyncer(po, db)
    62  	// })
    63  }
    64  
    65  // Close needs to be called on a stream server
    66  func (s *SwarmSyncerServer) Close() {
    67  	close(s.quit)
    68  }
    69  
    70  // GetData retrieves the actual chunk from netstore
    71  func (s *SwarmSyncerServer) GetData(ctx context.Context, key []byte) ([]byte, error) {
    72  	chunk, err := s.store.Get(ctx, storage.Address(key))
    73  	if err != nil {
    74  		return nil, err
    75  	}
    76  	return chunk.Data(), nil
    77  }
    78  
    79  // SessionIndex returns current storage bin (po) index.
    80  func (s *SwarmSyncerServer) SessionIndex() (uint64, error) {
    81  	return s.store.BinIndex(s.po), nil
    82  }
    83  
    84  // GetBatch retrieves the next batch of hashes from the dbstore
    85  func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
    86  	var batch []byte
    87  	i := 0
    88  
    89  	var ticker *time.Ticker
    90  	defer func() {
    91  		if ticker != nil {
    92  			ticker.Stop()
    93  		}
    94  	}()
    95  	var wait bool
    96  	for {
    97  		if wait {
    98  			if ticker == nil {
    99  				ticker = time.NewTicker(1000 * time.Millisecond)
   100  			}
   101  			select {
   102  			case <-ticker.C:
   103  			case <-s.quit:
   104  				return nil, 0, 0, nil, nil
   105  			}
   106  		}
   107  
   108  		metrics.GetOrRegisterCounter("syncer.setnextbatch.iterator", nil).Inc(1)
   109  		err := s.store.Iterator(from, to, s.po, func(key storage.Address, idx uint64) bool {
   110  			batch = append(batch, key[:]...)
   111  			i++
   112  			to = idx
   113  			return i < BatchSize
   114  		})
   115  		if err != nil {
   116  			return nil, 0, 0, nil, err
   117  		}
   118  		if len(batch) > 0 {
   119  			break
   120  		}
   121  		wait = true
   122  	}
   123  
   124  	log.Trace("Swarm syncer offer batch", "po", s.po, "len", i, "from", from, "to", to, "current store count", s.store.BinIndex(s.po))
   125  	return batch, from, to, nil, nil
   126  }
   127  
   128  // SwarmSyncerClient
   129  type SwarmSyncerClient struct {
   130  	store  storage.SyncChunkStore
   131  	peer   *Peer
   132  	stream Stream
   133  }
   134  
   135  // NewSwarmSyncerClient is a contructor for provable data exchange syncer
   136  func NewSwarmSyncerClient(p *Peer, store storage.SyncChunkStore, stream Stream) (*SwarmSyncerClient, error) {
   137  	return &SwarmSyncerClient{
   138  		store:  store,
   139  		peer:   p,
   140  		stream: stream,
   141  	}, nil
   142  }
   143  
   144  // // NewIncomingProvableSwarmSyncer is a contructor for provable data exchange syncer
   145  // func NewIncomingProvableSwarmSyncer(po int, priority int, index uint64, sessionAt uint64, intervals []uint64, sessionRoot storage.Address, chunker *storage.PyramidChunker, store storage.ChunkStore, p Peer) *SwarmSyncerClient {
   146  // 	retrieveC := make(storage.Chunk, chunksCap)
   147  // 	RunChunkRequestor(p, retrieveC)
   148  // 	storeC := make(storage.Chunk, chunksCap)
   149  // 	RunChunkStorer(store, storeC)
   150  // 	s := &SwarmSyncerClient{
   151  // 		po:            po,
   152  // 		priority:      priority,
   153  // 		sessionAt:     sessionAt,
   154  // 		start:         index,
   155  // 		end:           index,
   156  // 		nextC:         make(chan struct{}, 1),
   157  // 		intervals:     intervals,
   158  // 		sessionRoot:   sessionRoot,
   159  // 		sessionReader: chunker.Join(sessionRoot, retrieveC),
   160  // 		retrieveC:     retrieveC,
   161  // 		storeC:        storeC,
   162  // 	}
   163  // 	return s
   164  // }
   165  
   166  // // StartSyncing is called on the Peer to start the syncing process
   167  // // the idea is that it is called only after kademlia is close to healthy
   168  // func StartSyncing(s *Streamer, peerId enode.ID, po uint8, nn bool) {
   169  // 	lastPO := po
   170  // 	if nn {
   171  // 		lastPO = maxPO
   172  // 	}
   173  //
   174  // 	for i := po; i <= lastPO; i++ {
   175  // 		s.Subscribe(peerId, "SYNC", newSyncLabel("LIVE", po), 0, 0, High, true)
   176  // 		s.Subscribe(peerId, "SYNC", newSyncLabel("HISTORY", po), 0, 0, Mid, false)
   177  // 	}
   178  // }
   179  
   180  // RegisterSwarmSyncerClient registers the client constructor function for
   181  // to handle incoming sync streams
   182  func RegisterSwarmSyncerClient(streamer *Registry, store storage.SyncChunkStore) {
   183  	streamer.RegisterClientFunc("SYNC", func(p *Peer, t string, live bool) (Client, error) {
   184  		return NewSwarmSyncerClient(p, store, NewStream("SYNC", t, live))
   185  	})
   186  }
   187  
   188  // NeedData
   189  func (s *SwarmSyncerClient) NeedData(ctx context.Context, key []byte) (wait func(context.Context) error) {
   190  	return s.store.FetchFunc(ctx, key)
   191  }
   192  
   193  // BatchDone
   194  func (s *SwarmSyncerClient) BatchDone(stream Stream, from uint64, hashes []byte, root []byte) func() (*TakeoverProof, error) {
   195  	// TODO: reenable this with putter/getter refactored code
   196  	// if s.chunker != nil {
   197  	// 	return func() (*TakeoverProof, error) { return s.TakeoverProof(stream, from, hashes, root) }
   198  	// }
   199  	return nil
   200  }
   201  
   202  func (s *SwarmSyncerClient) Close() {}
   203  
   204  // base for parsing and formating sync bin key
   205  // it must be 2 <= base <= 36
   206  const syncBinKeyBase = 36
   207  
   208  // FormatSyncBinKey returns a string representation of
   209  // Kademlia bin number to be used as key for SYNC stream.
   210  func FormatSyncBinKey(bin uint8) string {
   211  	return strconv.FormatUint(uint64(bin), syncBinKeyBase)
   212  }
   213  
   214  // ParseSyncBinKey parses the string representation
   215  // and returns the Kademlia bin number.
   216  func ParseSyncBinKey(s string) (uint8, error) {
   217  	bin, err := strconv.ParseUint(s, syncBinKeyBase, 8)
   218  	if err != nil {
   219  		return 0, err
   220  	}
   221  	return uint8(bin), nil
   222  }