github.com/Ethersocial/go-esn@v0.3.7/swarm/network/stream/syncer.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"context"
    21  	"math"
    22  	"strconv"
    23  	"time"
    24  
    25  	"github.com/ethersocial/go-esn/metrics"
    26  	"github.com/ethersocial/go-esn/swarm/log"
    27  	"github.com/ethersocial/go-esn/swarm/storage"
    28  )
    29  
    30  const (
    31  	BatchSize = 128
    32  )
    33  
    34  // SwarmSyncerServer implements an Server for history syncing on bins
    35  // offered streams:
    36  // * live request delivery with or without checkback
    37  // * (live/non-live historical) chunk syncing per proximity bin
    38  type SwarmSyncerServer struct {
    39  	po        uint8
    40  	store     storage.SyncChunkStore
    41  	sessionAt uint64
    42  	start     uint64
    43  	live      bool
    44  	quit      chan struct{}
    45  }
    46  
    47  // NewSwarmSyncerServer is contructor for SwarmSyncerServer
    48  func NewSwarmSyncerServer(live bool, po uint8, syncChunkStore storage.SyncChunkStore) (*SwarmSyncerServer, error) {
    49  	sessionAt := syncChunkStore.BinIndex(po)
    50  	var start uint64
    51  	if live {
    52  		start = sessionAt
    53  	}
    54  	return &SwarmSyncerServer{
    55  		po:        po,
    56  		store:     syncChunkStore,
    57  		sessionAt: sessionAt,
    58  		start:     start,
    59  		live:      live,
    60  		quit:      make(chan struct{}),
    61  	}, nil
    62  }
    63  
    64  func RegisterSwarmSyncerServer(streamer *Registry, syncChunkStore storage.SyncChunkStore) {
    65  	streamer.RegisterServerFunc("SYNC", func(p *Peer, t string, live bool) (Server, error) {
    66  		po, err := ParseSyncBinKey(t)
    67  		if err != nil {
    68  			return nil, err
    69  		}
    70  		return NewSwarmSyncerServer(live, po, syncChunkStore)
    71  	})
    72  	// streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) {
    73  	// 	return NewOutgoingProvableSwarmSyncer(po, db)
    74  	// })
    75  }
    76  
    77  // Close needs to be called on a stream server
    78  func (s *SwarmSyncerServer) Close() {
    79  	close(s.quit)
    80  }
    81  
    82  // GetData retrieves the actual chunk from netstore
    83  func (s *SwarmSyncerServer) GetData(ctx context.Context, key []byte) ([]byte, error) {
    84  	chunk, err := s.store.Get(ctx, storage.Address(key))
    85  	if err != nil {
    86  		return nil, err
    87  	}
    88  	return chunk.Data(), nil
    89  }
    90  
    91  // GetBatch retrieves the next batch of hashes from the dbstore
    92  func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
    93  	var batch []byte
    94  	i := 0
    95  	if s.live {
    96  		if from == 0 {
    97  			from = s.start
    98  		}
    99  		if to <= from || from >= s.sessionAt {
   100  			to = math.MaxUint64
   101  		}
   102  	} else {
   103  		if (to < from && to != 0) || from > s.sessionAt {
   104  			return nil, 0, 0, nil, nil
   105  		}
   106  		if to == 0 || to > s.sessionAt {
   107  			to = s.sessionAt
   108  		}
   109  	}
   110  
   111  	var ticker *time.Ticker
   112  	defer func() {
   113  		if ticker != nil {
   114  			ticker.Stop()
   115  		}
   116  	}()
   117  	var wait bool
   118  	for {
   119  		if wait {
   120  			if ticker == nil {
   121  				ticker = time.NewTicker(1000 * time.Millisecond)
   122  			}
   123  			select {
   124  			case <-ticker.C:
   125  			case <-s.quit:
   126  				return nil, 0, 0, nil, nil
   127  			}
   128  		}
   129  
   130  		metrics.GetOrRegisterCounter("syncer.setnextbatch.iterator", nil).Inc(1)
   131  		err := s.store.Iterator(from, to, s.po, func(key storage.Address, idx uint64) bool {
   132  			batch = append(batch, key[:]...)
   133  			i++
   134  			to = idx
   135  			return i < BatchSize
   136  		})
   137  		if err != nil {
   138  			return nil, 0, 0, nil, err
   139  		}
   140  		if len(batch) > 0 {
   141  			break
   142  		}
   143  		wait = true
   144  	}
   145  
   146  	log.Trace("Swarm syncer offer batch", "po", s.po, "len", i, "from", from, "to", to, "current store count", s.store.BinIndex(s.po))
   147  	return batch, from, to, nil, nil
   148  }
   149  
   150  // SwarmSyncerClient
   151  type SwarmSyncerClient struct {
   152  	sessionAt     uint64
   153  	nextC         chan struct{}
   154  	sessionRoot   storage.Address
   155  	sessionReader storage.LazySectionReader
   156  	retrieveC     chan *storage.Chunk
   157  	storeC        chan *storage.Chunk
   158  	store         storage.SyncChunkStore
   159  	// chunker               storage.Chunker
   160  	currentRoot storage.Address
   161  	requestFunc func(chunk *storage.Chunk)
   162  	end, start  uint64
   163  	peer        *Peer
   164  	stream      Stream
   165  }
   166  
   167  // NewSwarmSyncerClient is a contructor for provable data exchange syncer
   168  func NewSwarmSyncerClient(p *Peer, store storage.SyncChunkStore, stream Stream) (*SwarmSyncerClient, error) {
   169  	return &SwarmSyncerClient{
   170  		store:  store,
   171  		peer:   p,
   172  		stream: stream,
   173  	}, nil
   174  }
   175  
   176  // // NewIncomingProvableSwarmSyncer is a contructor for provable data exchange syncer
   177  // func NewIncomingProvableSwarmSyncer(po int, priority int, index uint64, sessionAt uint64, intervals []uint64, sessionRoot storage.Address, chunker *storage.PyramidChunker, store storage.ChunkStore, p Peer) *SwarmSyncerClient {
   178  // 	retrieveC := make(storage.Chunk, chunksCap)
   179  // 	RunChunkRequestor(p, retrieveC)
   180  // 	storeC := make(storage.Chunk, chunksCap)
   181  // 	RunChunkStorer(store, storeC)
   182  // 	s := &SwarmSyncerClient{
   183  // 		po:            po,
   184  // 		priority:      priority,
   185  // 		sessionAt:     sessionAt,
   186  // 		start:         index,
   187  // 		end:           index,
   188  // 		nextC:         make(chan struct{}, 1),
   189  // 		intervals:     intervals,
   190  // 		sessionRoot:   sessionRoot,
   191  // 		sessionReader: chunker.Join(sessionRoot, retrieveC),
   192  // 		retrieveC:     retrieveC,
   193  // 		storeC:        storeC,
   194  // 	}
   195  // 	return s
   196  // }
   197  
   198  // // StartSyncing is called on the Peer to start the syncing process
   199  // // the idea is that it is called only after kademlia is close to healthy
   200  // func StartSyncing(s *Streamer, peerId enode.ID, po uint8, nn bool) {
   201  // 	lastPO := po
   202  // 	if nn {
   203  // 		lastPO = maxPO
   204  // 	}
   205  //
   206  // 	for i := po; i <= lastPO; i++ {
   207  // 		s.Subscribe(peerId, "SYNC", newSyncLabel("LIVE", po), 0, 0, High, true)
   208  // 		s.Subscribe(peerId, "SYNC", newSyncLabel("HISTORY", po), 0, 0, Mid, false)
   209  // 	}
   210  // }
   211  
   212  // RegisterSwarmSyncerClient registers the client constructor function for
   213  // to handle incoming sync streams
   214  func RegisterSwarmSyncerClient(streamer *Registry, store storage.SyncChunkStore) {
   215  	streamer.RegisterClientFunc("SYNC", func(p *Peer, t string, live bool) (Client, error) {
   216  		return NewSwarmSyncerClient(p, store, NewStream("SYNC", t, live))
   217  	})
   218  }
   219  
   220  // NeedData
   221  func (s *SwarmSyncerClient) NeedData(ctx context.Context, key []byte) (wait func(context.Context) error) {
   222  	return s.store.FetchFunc(ctx, key)
   223  }
   224  
   225  // BatchDone
   226  func (s *SwarmSyncerClient) BatchDone(stream Stream, from uint64, hashes []byte, root []byte) func() (*TakeoverProof, error) {
   227  	// TODO: reenable this with putter/getter refactored code
   228  	// if s.chunker != nil {
   229  	// 	return func() (*TakeoverProof, error) { return s.TakeoverProof(stream, from, hashes, root) }
   230  	// }
   231  	return nil
   232  }
   233  
   234  func (s *SwarmSyncerClient) TakeoverProof(stream Stream, from uint64, hashes []byte, root storage.Address) (*TakeoverProof, error) {
   235  	// for provable syncer currentRoot is non-zero length
   236  	// TODO: reenable this with putter/getter
   237  	// if s.chunker != nil {
   238  	// 	if from > s.sessionAt { // for live syncing currentRoot is always updated
   239  	// 		//expRoot, err := s.chunker.Append(s.currentRoot, bytes.NewReader(hashes), s.retrieveC, s.storeC)
   240  	// 		expRoot, _, err := s.chunker.Append(s.currentRoot, bytes.NewReader(hashes), s.retrieveC)
   241  	// 		if err != nil {
   242  	// 			return nil, err
   243  	// 		}
   244  	// 		if !bytes.Equal(root, expRoot) {
   245  	// 			return nil, fmt.Errorf("HandoverProof mismatch")
   246  	// 		}
   247  	// 		s.currentRoot = root
   248  	// 	} else {
   249  	// 		expHashes := make([]byte, len(hashes))
   250  	// 		_, err := s.sessionReader.ReadAt(expHashes, int64(s.end*HashSize))
   251  	// 		if err != nil && err != io.EOF {
   252  	// 			return nil, err
   253  	// 		}
   254  	// 		if !bytes.Equal(expHashes, hashes) {
   255  	// 			return nil, errors.New("invalid proof")
   256  	// 		}
   257  	// 	}
   258  	// 	return nil, nil
   259  	// }
   260  	s.end += uint64(len(hashes)) / HashSize
   261  	takeover := &Takeover{
   262  		Stream: stream,
   263  		Start:  s.start,
   264  		End:    s.end,
   265  		Root:   root,
   266  	}
   267  	// serialise and sign
   268  	return &TakeoverProof{
   269  		Takeover: takeover,
   270  		Sig:      nil,
   271  	}, nil
   272  }
   273  
   274  func (s *SwarmSyncerClient) Close() {}
   275  
   276  // base for parsing and formating sync bin key
   277  // it must be 2 <= base <= 36
   278  const syncBinKeyBase = 36
   279  
   280  // FormatSyncBinKey returns a string representation of
   281  // Kademlia bin number to be used as key for SYNC stream.
   282  func FormatSyncBinKey(bin uint8) string {
   283  	return strconv.FormatUint(uint64(bin), syncBinKeyBase)
   284  }
   285  
   286  // ParseSyncBinKey parses the string representation
   287  // and returns the Kademlia bin number.
   288  func ParseSyncBinKey(s string) (uint8, error) {
   289  	bin, err := strconv.ParseUint(s, syncBinKeyBase, 8)
   290  	if err != nil {
   291  		return 0, err
   292  	}
   293  	return uint8(bin), nil
   294  }