github.com/pslzym/go-ethereum@v1.8.17-0.20180926104442-4b6824e07b1b/swarm/network/fetcher.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package network 18 19 import ( 20 "context" 21 "sync" 22 "time" 23 24 "github.com/ethereum/go-ethereum/log" 25 "github.com/ethereum/go-ethereum/p2p/enode" 26 "github.com/ethereum/go-ethereum/swarm/storage" 27 ) 28 29 var searchTimeout = 1 * time.Second 30 31 // Time to consider peer to be skipped. 32 // Also used in stream delivery. 33 var RequestTimeout = 10 * time.Second 34 35 type RequestFunc func(context.Context, *Request) (*enode.ID, chan struct{}, error) 36 37 // Fetcher is created when a chunk is not found locally. It starts a request handler loop once and 38 // keeps it alive until all active requests are completed. This can happen: 39 // 1. either because the chunk is delivered 40 // 2. or becuse the requestor cancelled/timed out 41 // Fetcher self destroys itself after it is completed. 42 // TODO: cancel all forward requests after termination 43 type Fetcher struct { 44 protoRequestFunc RequestFunc // request function fetcher calls to issue retrieve request for a chunk 45 addr storage.Address // the address of the chunk to be fetched 46 offerC chan *enode.ID // channel of sources (peer node id strings) 47 requestC chan struct{} 48 skipCheck bool 49 } 50 51 type Request struct { 52 Addr storage.Address // chunk address 53 Source *enode.ID // nodeID of peer to request from (can be nil) 54 SkipCheck bool // whether to offer the chunk first or deliver directly 55 peersToSkip *sync.Map // peers not to request chunk from (only makes sense if source is nil) 56 } 57 58 // NewRequest returns a new instance of Request based on chunk address skip check and 59 // a map of peers to skip. 60 func NewRequest(addr storage.Address, skipCheck bool, peersToSkip *sync.Map) *Request { 61 return &Request{ 62 Addr: addr, 63 SkipCheck: skipCheck, 64 peersToSkip: peersToSkip, 65 } 66 } 67 68 // SkipPeer returns if the peer with nodeID should not be requested to deliver a chunk. 69 // Peers to skip are kept per Request and for a time period of RequestTimeout. 70 // This function is used in stream package in Delivery.RequestFromPeers to optimize 71 // requests for chunks. 72 func (r *Request) SkipPeer(nodeID string) bool { 73 val, ok := r.peersToSkip.Load(nodeID) 74 if !ok { 75 return false 76 } 77 t, ok := val.(time.Time) 78 if ok && time.Now().After(t.Add(RequestTimeout)) { 79 // deadine expired 80 r.peersToSkip.Delete(nodeID) 81 return false 82 } 83 return true 84 } 85 86 // FetcherFactory is initialised with a request function and can create fetchers 87 type FetcherFactory struct { 88 request RequestFunc 89 skipCheck bool 90 } 91 92 // NewFetcherFactory takes a request function and skip check parameter and creates a FetcherFactory 93 func NewFetcherFactory(request RequestFunc, skipCheck bool) *FetcherFactory { 94 return &FetcherFactory{ 95 request: request, 96 skipCheck: skipCheck, 97 } 98 } 99 100 // New contructs a new Fetcher, for the given chunk. All peers in peersToSkip are not requested to 101 // deliver the given chunk. peersToSkip should always contain the peers which are actively requesting 102 // this chunk, to make sure we don't request back the chunks from them. 103 // The created Fetcher is started and returned. 104 func (f *FetcherFactory) New(ctx context.Context, source storage.Address, peersToSkip *sync.Map) storage.NetFetcher { 105 fetcher := NewFetcher(source, f.request, f.skipCheck) 106 go fetcher.run(ctx, peersToSkip) 107 return fetcher 108 } 109 110 // NewFetcher creates a new Fetcher for the given chunk address using the given request function. 111 func NewFetcher(addr storage.Address, rf RequestFunc, skipCheck bool) *Fetcher { 112 return &Fetcher{ 113 addr: addr, 114 protoRequestFunc: rf, 115 offerC: make(chan *enode.ID), 116 requestC: make(chan struct{}), 117 skipCheck: skipCheck, 118 } 119 } 120 121 // Offer is called when an upstream peer offers the chunk via syncing as part of `OfferedHashesMsg` and the node does not have the chunk locally. 122 func (f *Fetcher) Offer(ctx context.Context, source *enode.ID) { 123 // First we need to have this select to make sure that we return if context is done 124 select { 125 case <-ctx.Done(): 126 return 127 default: 128 } 129 130 // This select alone would not guarantee that we return of context is done, it could potentially 131 // push to offerC instead if offerC is available (see number 2 in https://golang.org/ref/spec#Select_statements) 132 select { 133 case f.offerC <- source: 134 case <-ctx.Done(): 135 } 136 } 137 138 // Request is called when an upstream peer request the chunk as part of `RetrieveRequestMsg`, or from a local request through FileStore, and the node does not have the chunk locally. 139 func (f *Fetcher) Request(ctx context.Context) { 140 // First we need to have this select to make sure that we return if context is done 141 select { 142 case <-ctx.Done(): 143 return 144 default: 145 } 146 147 // This select alone would not guarantee that we return of context is done, it could potentially 148 // push to offerC instead if offerC is available (see number 2 in https://golang.org/ref/spec#Select_statements) 149 select { 150 case f.requestC <- struct{}{}: 151 case <-ctx.Done(): 152 } 153 } 154 155 // start prepares the Fetcher 156 // it keeps the Fetcher alive within the lifecycle of the passed context 157 func (f *Fetcher) run(ctx context.Context, peers *sync.Map) { 158 var ( 159 doRequest bool // determines if retrieval is initiated in the current iteration 160 wait *time.Timer // timer for search timeout 161 waitC <-chan time.Time // timer channel 162 sources []*enode.ID // known sources, ie. peers that offered the chunk 163 requested bool // true if the chunk was actually requested 164 ) 165 gone := make(chan *enode.ID) // channel to signal that a peer we requested from disconnected 166 167 // loop that keeps the fetching process alive 168 // after every request a timer is set. If this goes off we request again from another peer 169 // note that the previous request is still alive and has the chance to deliver, so 170 // rerequesting extends the search. ie., 171 // if a peer we requested from is gone we issue a new request, so the number of active 172 // requests never decreases 173 for { 174 select { 175 176 // incoming offer 177 case source := <-f.offerC: 178 log.Trace("new source", "peer addr", source, "request addr", f.addr) 179 // 1) the chunk is offered by a syncing peer 180 // add to known sources 181 sources = append(sources, source) 182 // launch a request to the source iff the chunk was requested (not just expected because its offered by a syncing peer) 183 doRequest = requested 184 185 // incoming request 186 case <-f.requestC: 187 log.Trace("new request", "request addr", f.addr) 188 // 2) chunk is requested, set requested flag 189 // launch a request iff none been launched yet 190 doRequest = !requested 191 requested = true 192 193 // peer we requested from is gone. fall back to another 194 // and remove the peer from the peers map 195 case id := <-gone: 196 log.Trace("peer gone", "peer id", id.String(), "request addr", f.addr) 197 peers.Delete(id.String()) 198 doRequest = requested 199 200 // search timeout: too much time passed since the last request, 201 // extend the search to a new peer if we can find one 202 case <-waitC: 203 log.Trace("search timed out: rerequesting", "request addr", f.addr) 204 doRequest = requested 205 206 // all Fetcher context closed, can quit 207 case <-ctx.Done(): 208 log.Trace("terminate fetcher", "request addr", f.addr) 209 // TODO: send cancelations to all peers left over in peers map (i.e., those we requested from) 210 return 211 } 212 213 // need to issue a new request 214 if doRequest { 215 var err error 216 sources, err = f.doRequest(ctx, gone, peers, sources) 217 if err != nil { 218 log.Info("unable to request", "request addr", f.addr, "err", err) 219 } 220 } 221 222 // if wait channel is not set, set it to a timer 223 if requested { 224 if wait == nil { 225 wait = time.NewTimer(searchTimeout) 226 defer wait.Stop() 227 waitC = wait.C 228 } else { 229 // stop the timer and drain the channel if it was not drained earlier 230 if !wait.Stop() { 231 select { 232 case <-wait.C: 233 default: 234 } 235 } 236 // reset the timer to go off after searchTimeout 237 wait.Reset(searchTimeout) 238 } 239 } 240 doRequest = false 241 } 242 } 243 244 // doRequest attempts at finding a peer to request the chunk from 245 // * first it tries to request explicitly from peers that are known to have offered the chunk 246 // * if there are no such peers (available) it tries to request it from a peer closest to the chunk address 247 // excluding those in the peersToSkip map 248 // * if no such peer is found an error is returned 249 // 250 // if a request is successful, 251 // * the peer's address is added to the set of peers to skip 252 // * the peer's address is removed from prospective sources, and 253 // * a go routine is started that reports on the gone channel if the peer is disconnected (or terminated their streamer) 254 func (f *Fetcher) doRequest(ctx context.Context, gone chan *enode.ID, peersToSkip *sync.Map, sources []*enode.ID) ([]*enode.ID, error) { 255 var i int 256 var sourceID *enode.ID 257 var quit chan struct{} 258 259 req := &Request{ 260 Addr: f.addr, 261 SkipCheck: f.skipCheck, 262 peersToSkip: peersToSkip, 263 } 264 265 foundSource := false 266 // iterate over known sources 267 for i = 0; i < len(sources); i++ { 268 req.Source = sources[i] 269 var err error 270 sourceID, quit, err = f.protoRequestFunc(ctx, req) 271 if err == nil { 272 // remove the peer from known sources 273 // Note: we can modify the source although we are looping on it, because we break from the loop immediately 274 sources = append(sources[:i], sources[i+1:]...) 275 foundSource = true 276 break 277 } 278 } 279 280 // if there are no known sources, or none available, we try request from a closest node 281 if !foundSource { 282 req.Source = nil 283 var err error 284 sourceID, quit, err = f.protoRequestFunc(ctx, req) 285 if err != nil { 286 // if no peers found to request from 287 return sources, err 288 } 289 } 290 // add peer to the set of peers to skip from now 291 peersToSkip.Store(sourceID.String(), time.Now()) 292 293 // if the quit channel is closed, it indicates that the source peer we requested from 294 // disconnected or terminated its streamer 295 // here start a go routine that watches this channel and reports the source peer on the gone channel 296 // this go routine quits if the fetcher global context is done to prevent process leak 297 go func() { 298 select { 299 case <-quit: 300 gone <- sourceID 301 case <-ctx.Done(): 302 } 303 }() 304 return sources, nil 305 }