github.com/sberex/go-sberex@v1.8.2-0.20181113200658-ed96ac38f7d7/swarm/network/depo.go (about) 1 // This file is part of the go-sberex library. The go-sberex library is 2 // free software: you can redistribute it and/or modify it under the terms 3 // of the GNU Lesser General Public License as published by the Free 4 // Software Foundation, either version 3 of the License, or (at your option) 5 // any later version. 6 // 7 // The go-sberex library is distributed in the hope that it will be useful, 8 // but WITHOUT ANY WARRANTY; without even the implied warranty of 9 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 10 // General Public License <http://www.gnu.org/licenses/> for more details. 11 12 package network 13 14 import ( 15 "bytes" 16 "encoding/binary" 17 "fmt" 18 "time" 19 20 "github.com/Sberex/go-sberex/log" 21 "github.com/Sberex/go-sberex/metrics" 22 "github.com/Sberex/go-sberex/swarm/storage" 23 ) 24 25 //metrics variables 26 var ( 27 syncReceiveCount = metrics.NewRegisteredCounter("network.sync.recv.count", nil) 28 syncReceiveIgnore = metrics.NewRegisteredCounter("network.sync.recv.ignore", nil) 29 syncSendCount = metrics.NewRegisteredCounter("network.sync.send.count", nil) 30 syncSendRefused = metrics.NewRegisteredCounter("network.sync.send.refused", nil) 31 syncSendNotFound = metrics.NewRegisteredCounter("network.sync.send.notfound", nil) 32 ) 33 34 // Handler for storage/retrieval related protocol requests 35 // implements the StorageHandler interface used by the bzz protocol 36 type Depo struct { 37 hashfunc storage.SwarmHasher 38 localStore storage.ChunkStore 39 netStore storage.ChunkStore 40 } 41 42 func NewDepo(hash storage.SwarmHasher, localStore, remoteStore storage.ChunkStore) *Depo { 43 return &Depo{ 44 hashfunc: hash, 45 localStore: localStore, 46 netStore: remoteStore, // entrypoint internal 47 } 48 } 49 50 // Handles UnsyncedKeysMsg after msg decoding - unsynced hashes upto sync state 51 // * the remote sync state is just stored and handled in protocol 52 // * filters through the new syncRequests and send the ones missing 53 // * back immediately as a deliveryRequest message 54 // * empty message just pings back for more (is this needed?) 55 // * strict signed sync states may be needed. 56 func (self *Depo) HandleUnsyncedKeysMsg(req *unsyncedKeysMsgData, p *peer) error { 57 unsynced := req.Unsynced 58 var missing []*syncRequest 59 var chunk *storage.Chunk 60 var err error 61 for _, req := range unsynced { 62 // skip keys that are found, 63 chunk, err = self.localStore.Get(req.Key[:]) 64 if err != nil || chunk.SData == nil { 65 missing = append(missing, req) 66 } 67 } 68 log.Debug(fmt.Sprintf("Depo.HandleUnsyncedKeysMsg: received %v unsynced keys: %v missing. new state: %v", len(unsynced), len(missing), req.State)) 69 log.Trace(fmt.Sprintf("Depo.HandleUnsyncedKeysMsg: received %v", unsynced)) 70 // send delivery request with missing keys 71 err = p.deliveryRequest(missing) 72 if err != nil { 73 return err 74 } 75 // set peers state to persist 76 p.syncState = req.State 77 return nil 78 } 79 80 // Handles deliveryRequestMsg 81 // * serves actual chunks asked by the remote peer 82 // by pushing to the delivery queue (sync db) of the correct priority 83 // (remote peer is free to reprioritize) 84 // * the message implies remote peer wants more, so trigger for 85 // * new outgoing unsynced keys message is fired 86 func (self *Depo) HandleDeliveryRequestMsg(req *deliveryRequestMsgData, p *peer) error { 87 deliver := req.Deliver 88 // queue the actual delivery of a chunk () 89 log.Trace(fmt.Sprintf("Depo.HandleDeliveryRequestMsg: received %v delivery requests: %v", len(deliver), deliver)) 90 for _, sreq := range deliver { 91 // TODO: look up in cache here or in deliveries 92 // priorities are taken from the message so the remote party can 93 // reprioritise to at their leisure 94 // r = self.pullCached(sreq.Key) // pulls and deletes from cache 95 Push(p, sreq.Key, sreq.Priority) 96 } 97 98 // sends it out as unsyncedKeysMsg 99 p.syncer.sendUnsyncedKeys() 100 return nil 101 } 102 103 // the entrypoint for store requests coming from the bzz wire protocol 104 // if key found locally, return. otherwise 105 // remote is untrusted, so hash is verified and chunk passed on to NetStore 106 func (self *Depo) HandleStoreRequestMsg(req *storeRequestMsgData, p *peer) { 107 var islocal bool 108 req.from = p 109 chunk, err := self.localStore.Get(req.Key) 110 switch { 111 case err != nil: 112 log.Trace(fmt.Sprintf("Depo.handleStoreRequest: %v not found locally. create new chunk/request", req.Key)) 113 // not found in memory cache, ie., a genuine store request 114 // create chunk 115 syncReceiveCount.Inc(1) 116 chunk = storage.NewChunk(req.Key, nil) 117 118 case chunk.SData == nil: 119 // found chunk in memory store, needs the data, validate now 120 log.Trace(fmt.Sprintf("Depo.HandleStoreRequest: %v. request entry found", req)) 121 122 default: 123 // data is found, store request ignored 124 // this should update access count? 125 syncReceiveIgnore.Inc(1) 126 log.Trace(fmt.Sprintf("Depo.HandleStoreRequest: %v found locally. ignore.", req)) 127 islocal = true 128 //return 129 } 130 131 hasher := self.hashfunc() 132 hasher.Write(req.SData) 133 if !bytes.Equal(hasher.Sum(nil), req.Key) { 134 // data does not validate, ignore 135 // TODO: peer should be penalised/dropped? 136 log.Warn(fmt.Sprintf("Depo.HandleStoreRequest: chunk invalid. store request ignored: %v", req)) 137 return 138 } 139 140 if islocal { 141 return 142 } 143 // update chunk with size and data 144 chunk.SData = req.SData // protocol validates that SData is minimum 9 bytes long (int64 size + at least one byte of data) 145 chunk.Size = int64(binary.LittleEndian.Uint64(req.SData[0:8])) 146 log.Trace(fmt.Sprintf("delivery of %v from %v", chunk, p)) 147 chunk.Source = p 148 self.netStore.Put(chunk) 149 } 150 151 // entrypoint for retrieve requests coming from the bzz wire protocol 152 // checks swap balance - return if peer has no credit 153 func (self *Depo) HandleRetrieveRequestMsg(req *retrieveRequestMsgData, p *peer) { 154 req.from = p 155 // swap - record credit for 1 request 156 // note that only charge actual reqsearches 157 var err error 158 if p.swap != nil { 159 err = p.swap.Add(1) 160 } 161 if err != nil { 162 log.Warn(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - cannot process request: %v", req.Key.Log(), err)) 163 return 164 } 165 166 // call storage.NetStore#Get which 167 // blocks until local retrieval finished 168 // launches cloud retrieval 169 chunk, _ := self.netStore.Get(req.Key) 170 req = self.strategyUpdateRequest(chunk.Req, req) 171 // check if we can immediately deliver 172 if chunk.SData != nil { 173 log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content found, delivering...", req.Key.Log())) 174 175 if req.MaxSize == 0 || int64(req.MaxSize) >= chunk.Size { 176 sreq := &storeRequestMsgData{ 177 Id: req.Id, 178 Key: chunk.Key, 179 SData: chunk.SData, 180 requestTimeout: req.timeout, // 181 } 182 syncSendCount.Inc(1) 183 p.syncer.addRequest(sreq, DeliverReq) 184 } else { 185 syncSendRefused.Inc(1) 186 log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content found, not wanted", req.Key.Log())) 187 } 188 } else { 189 syncSendNotFound.Inc(1) 190 log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content not found locally. asked swarm for help. will get back", req.Key.Log())) 191 } 192 } 193 194 // add peer request the chunk and decides the timeout for the response if still searching 195 func (self *Depo) strategyUpdateRequest(rs *storage.RequestStatus, origReq *retrieveRequestMsgData) (req *retrieveRequestMsgData) { 196 log.Trace(fmt.Sprintf("Depo.strategyUpdateRequest: key %v", origReq.Key.Log())) 197 // we do not create an alternative one 198 req = origReq 199 if rs != nil { 200 self.addRequester(rs, req) 201 req.setTimeout(self.searchTimeout(rs, req)) 202 } 203 return 204 } 205 206 // decides the timeout promise sent with the immediate peers response to a retrieve request 207 // if timeout is explicitly set and expired 208 func (self *Depo) searchTimeout(rs *storage.RequestStatus, req *retrieveRequestMsgData) (timeout *time.Time) { 209 reqt := req.getTimeout() 210 t := time.Now().Add(searchTimeout) 211 if reqt != nil && reqt.Before(t) { 212 return reqt 213 } else { 214 return &t 215 } 216 } 217 218 /* 219 adds a new peer to an existing open request 220 only add if less than requesterCount peers forwarded the same request id so far 221 note this is done irrespective of status (searching or found) 222 */ 223 func (self *Depo) addRequester(rs *storage.RequestStatus, req *retrieveRequestMsgData) { 224 log.Trace(fmt.Sprintf("Depo.addRequester: key %v - add peer to req.Id %v", req.Key.Log(), req.Id)) 225 list := rs.Requesters[req.Id] 226 rs.Requesters[req.Id] = append(list, req) 227 }