github.com/memikequinn/go-ethereum@v1.6.6-0.20170621145815-58a1e13e6dd7/swarm/network/syncdb.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package network 18 19 import ( 20 "encoding/binary" 21 "fmt" 22 23 "github.com/ethereum/go-ethereum/log" 24 "github.com/ethereum/go-ethereum/swarm/storage" 25 "github.com/syndtr/goleveldb/leveldb" 26 "github.com/syndtr/goleveldb/leveldb/iterator" 27 ) 28 29 const counterKeyPrefix = 0x01 30 31 /* 32 syncDb is a queueing service for outgoing deliveries. 33 One instance per priority queue for each peer 34 35 a syncDb instance maintains an in-memory buffer (of capacity bufferSize) 36 once its in-memory buffer is full it switches to persisting in db 37 and dbRead iterator iterates through the items keeping their order 38 once the db read catches up (there is no more items in the db) then 39 it switches back to in-memory buffer. 40 41 when syncdb is stopped all items in the buffer are saved to the db 42 */ 43 type syncDb struct { 44 start []byte // this syncdb starting index in requestdb 45 key storage.Key // remote peers address key 46 counterKey []byte // db key to persist counter 47 priority uint // priotity High|Medium|Low 48 buffer chan interface{} // incoming request channel 49 db *storage.LDBDatabase // underlying db (TODO should be interface) 50 done chan bool // chan to signal goroutines finished quitting 51 quit chan bool // chan to signal quitting to goroutines 52 total, dbTotal int // counts for one session 53 batch chan chan int // channel for batch requests 54 dbBatchSize uint // number of items before batch is saved 55 } 56 57 // constructor needs a shared request db (leveldb) 58 // priority is used in the index key 59 // uses a buffer and a leveldb for persistent storage 60 // bufferSize, dbBatchSize are config parameters 61 func newSyncDb(db *storage.LDBDatabase, key storage.Key, priority uint, bufferSize, dbBatchSize uint, deliver func(interface{}, chan bool) bool) *syncDb { 62 start := make([]byte, 42) 63 start[1] = byte(priorities - priority) 64 copy(start[2:34], key) 65 66 counterKey := make([]byte, 34) 67 counterKey[0] = counterKeyPrefix 68 copy(counterKey[1:], start[1:34]) 69 70 syncdb := &syncDb{ 71 start: start, 72 key: key, 73 counterKey: counterKey, 74 priority: priority, 75 buffer: make(chan interface{}, bufferSize), 76 db: db, 77 done: make(chan bool), 78 quit: make(chan bool), 79 batch: make(chan chan int), 80 dbBatchSize: dbBatchSize, 81 } 82 log.Trace(fmt.Sprintf("syncDb[peer: %v, priority: %v] - initialised", key.Log(), priority)) 83 84 // starts the main forever loop reading from buffer 85 go syncdb.bufferRead(deliver) 86 return syncdb 87 } 88 89 /* 90 bufferRead is a forever iterator loop that takes care of delivering 91 outgoing store requests reads from incoming buffer 92 93 its argument is the deliver function taking the item as first argument 94 and a quit channel as second. 95 Closing of this channel is supposed to abort all waiting for delivery 96 (typically network write) 97 98 The iteration switches between 2 modes, 99 * buffer mode reads the in-memory buffer and delivers the items directly 100 * db mode reads from the buffer and writes to the db, parallelly another 101 routine is started that reads from the db and delivers items 102 103 If there is buffer contention in buffer mode (slow network, high upload volume) 104 syncdb switches to db mode and starts dbRead 105 Once db backlog is delivered, it reverts back to in-memory buffer 106 107 It is automatically started when syncdb is initialised. 108 109 It saves the buffer to db upon receiving quit signal. syncDb#stop() 110 */ 111 func (self *syncDb) bufferRead(deliver func(interface{}, chan bool) bool) { 112 var buffer, db chan interface{} // channels representing the two read modes 113 var more bool 114 var req interface{} 115 var entry *syncDbEntry 116 var inBatch, inDb int 117 batch := new(leveldb.Batch) 118 var dbSize chan int 119 quit := self.quit 120 counterValue := make([]byte, 8) 121 122 // counter is used for keeping the items in order, persisted to db 123 // start counter where db was at, 0 if not found 124 data, err := self.db.Get(self.counterKey) 125 var counter uint64 126 if err == nil { 127 counter = binary.BigEndian.Uint64(data) 128 log.Trace(fmt.Sprintf("syncDb[%v/%v] - counter read from db at %v", self.key.Log(), self.priority, counter)) 129 } else { 130 log.Trace(fmt.Sprintf("syncDb[%v/%v] - counter starts at %v", self.key.Log(), self.priority, counter)) 131 } 132 133 LOOP: 134 for { 135 // waiting for item next in the buffer, or quit signal or batch request 136 select { 137 // buffer only closes when writing to db 138 case req = <-buffer: 139 // deliver request : this is blocking on network write so 140 // it is passed the quit channel as argument, so that it returns 141 // if syncdb is stopped. In this case we need to save the item to the db 142 more = deliver(req, self.quit) 143 if !more { 144 log.Debug(fmt.Sprintf("syncDb[%v/%v] quit: switching to db. session tally (db/total): %v/%v", self.key.Log(), self.priority, self.dbTotal, self.total)) 145 // received quit signal, save request currently waiting delivery 146 // by switching to db mode and closing the buffer 147 buffer = nil 148 db = self.buffer 149 close(db) 150 quit = nil // needs to block the quit case in select 151 break // break from select, this item will be written to the db 152 } 153 self.total++ 154 log.Trace(fmt.Sprintf("syncDb[%v/%v] deliver (db/total): %v/%v", self.key.Log(), self.priority, self.dbTotal, self.total)) 155 // by the time deliver returns, there were new writes to the buffer 156 // if buffer contention is detected, switch to db mode which drains 157 // the buffer so no process will block on pushing store requests 158 if len(buffer) == cap(buffer) { 159 log.Debug(fmt.Sprintf("syncDb[%v/%v] buffer full %v: switching to db. session tally (db/total): %v/%v", self.key.Log(), self.priority, cap(buffer), self.dbTotal, self.total)) 160 buffer = nil 161 db = self.buffer 162 } 163 continue LOOP 164 165 // incoming entry to put into db 166 case req, more = <-db: 167 if !more { 168 // only if quit is called, saved all the buffer 169 binary.BigEndian.PutUint64(counterValue, counter) 170 batch.Put(self.counterKey, counterValue) // persist counter in batch 171 self.writeSyncBatch(batch) // save batch 172 log.Trace(fmt.Sprintf("syncDb[%v/%v] quitting: save current batch to db", self.key.Log(), self.priority)) 173 break LOOP 174 } 175 self.dbTotal++ 176 self.total++ 177 // otherwise break after select 178 case dbSize = <-self.batch: 179 // explicit request for batch 180 if inBatch == 0 && quit != nil { 181 // there was no writes since the last batch so db depleted 182 // switch to buffer mode 183 log.Debug(fmt.Sprintf("syncDb[%v/%v] empty db: switching to buffer", self.key.Log(), self.priority)) 184 db = nil 185 buffer = self.buffer 186 dbSize <- 0 // indicates to 'caller' that batch has been written 187 inDb = 0 188 continue LOOP 189 } 190 binary.BigEndian.PutUint64(counterValue, counter) 191 batch.Put(self.counterKey, counterValue) 192 log.Debug(fmt.Sprintf("syncDb[%v/%v] write batch %v/%v - %x - %x", self.key.Log(), self.priority, inBatch, counter, self.counterKey, counterValue)) 193 batch = self.writeSyncBatch(batch) 194 dbSize <- inBatch // indicates to 'caller' that batch has been written 195 inBatch = 0 196 continue LOOP 197 198 // closing syncDb#quit channel is used to signal to all goroutines to quit 199 case <-quit: 200 // need to save backlog, so switch to db mode 201 db = self.buffer 202 buffer = nil 203 quit = nil 204 log.Trace(fmt.Sprintf("syncDb[%v/%v] quitting: save buffer to db", self.key.Log(), self.priority)) 205 close(db) 206 continue LOOP 207 } 208 209 // only get here if we put req into db 210 entry, err = self.newSyncDbEntry(req, counter) 211 if err != nil { 212 log.Warn(fmt.Sprintf("syncDb[%v/%v] saving request %v (#%v/%v) failed: %v", self.key.Log(), self.priority, req, inBatch, inDb, err)) 213 continue LOOP 214 } 215 batch.Put(entry.key, entry.val) 216 log.Trace(fmt.Sprintf("syncDb[%v/%v] to batch %v '%v' (#%v/%v/%v)", self.key.Log(), self.priority, req, entry, inBatch, inDb, counter)) 217 // if just switched to db mode and not quitting, then launch dbRead 218 // in a parallel go routine to send deliveries from db 219 if inDb == 0 && quit != nil { 220 log.Trace(fmt.Sprintf("syncDb[%v/%v] start dbRead", self.key.Log(), self.priority)) 221 go self.dbRead(true, counter, deliver) 222 } 223 inDb++ 224 inBatch++ 225 counter++ 226 // need to save the batch if it gets too large (== dbBatchSize) 227 if inBatch%int(self.dbBatchSize) == 0 { 228 batch = self.writeSyncBatch(batch) 229 } 230 } 231 log.Info(fmt.Sprintf("syncDb[%v:%v]: saved %v keys (saved counter at %v)", self.key.Log(), self.priority, inBatch, counter)) 232 close(self.done) 233 } 234 235 // writes the batch to the db and returns a new batch object 236 func (self *syncDb) writeSyncBatch(batch *leveldb.Batch) *leveldb.Batch { 237 err := self.db.Write(batch) 238 if err != nil { 239 log.Warn(fmt.Sprintf("syncDb[%v/%v] saving batch to db failed: %v", self.key.Log(), self.priority, err)) 240 return batch 241 } 242 return new(leveldb.Batch) 243 } 244 245 // abstract type for db entries (TODO could be a feature of Receipts) 246 type syncDbEntry struct { 247 key, val []byte 248 } 249 250 func (self syncDbEntry) String() string { 251 return fmt.Sprintf("key: %x, value: %x", self.key, self.val) 252 } 253 254 /* 255 dbRead is iterating over store requests to be sent over to the peer 256 this is mainly to prevent crashes due to network output buffer contention (???) 257 as well as to make syncronisation resilient to disconnects 258 the messages are supposed to be sent in the p2p priority queue. 259 260 the request DB is shared between peers, but domains for each syncdb 261 are disjoint. dbkeys (42 bytes) are structured: 262 * 0: 0x00 (0x01 reserved for counter key) 263 * 1: priorities - priority (so that high priority can be replayed first) 264 * 2-33: peers address 265 * 34-41: syncdb counter to preserve order (this field is missing for the counter key) 266 267 values (40 bytes) are: 268 * 0-31: key 269 * 32-39: request id 270 271 dbRead needs a boolean to indicate if on first round all the historical 272 record is synced. Second argument to indicate current db counter 273 The third is the function to apply 274 */ 275 func (self *syncDb) dbRead(useBatches bool, counter uint64, fun func(interface{}, chan bool) bool) { 276 key := make([]byte, 42) 277 copy(key, self.start) 278 binary.BigEndian.PutUint64(key[34:], counter) 279 var batches, n, cnt, total int 280 var more bool 281 var entry *syncDbEntry 282 var it iterator.Iterator 283 var del *leveldb.Batch 284 batchSizes := make(chan int) 285 286 for { 287 // if useBatches is false, cnt is not set 288 if useBatches { 289 // this could be called before all cnt items sent out 290 // so that loop is not blocking while delivering 291 // only relevant if cnt is large 292 select { 293 case self.batch <- batchSizes: 294 case <-self.quit: 295 return 296 } 297 // wait for the write to finish and get the item count in the next batch 298 cnt = <-batchSizes 299 batches++ 300 if cnt == 0 { 301 // empty 302 return 303 } 304 } 305 it = self.db.NewIterator() 306 it.Seek(key) 307 if !it.Valid() { 308 copy(key, self.start) 309 useBatches = true 310 continue 311 } 312 del = new(leveldb.Batch) 313 log.Trace(fmt.Sprintf("syncDb[%v/%v]: new iterator: %x (batch %v, count %v)", self.key.Log(), self.priority, key, batches, cnt)) 314 315 for n = 0; !useBatches || n < cnt; it.Next() { 316 copy(key, it.Key()) 317 if len(key) == 0 || key[0] != 0 { 318 copy(key, self.start) 319 useBatches = true 320 break 321 } 322 val := make([]byte, 40) 323 copy(val, it.Value()) 324 entry = &syncDbEntry{key, val} 325 // log.Trace(fmt.Sprintf("syncDb[%v/%v] - %v, batches: %v, total: %v, session total from db: %v/%v", self.key.Log(), self.priority, self.key.Log(), batches, total, self.dbTotal, self.total)) 326 more = fun(entry, self.quit) 327 if !more { 328 // quit received when waiting to deliver entry, the entry will not be deleted 329 log.Trace(fmt.Sprintf("syncDb[%v/%v] batch %v quit after %v/%v items", self.key.Log(), self.priority, batches, n, cnt)) 330 break 331 } 332 // since subsequent batches of the same db session are indexed incrementally 333 // deleting earlier batches can be delayed and parallelised 334 // this could be batch delete when db is idle (but added complexity esp when quitting) 335 del.Delete(key) 336 n++ 337 total++ 338 } 339 log.Debug(fmt.Sprintf("syncDb[%v/%v] - db session closed, batches: %v, total: %v, session total from db: %v/%v", self.key.Log(), self.priority, batches, total, self.dbTotal, self.total)) 340 self.db.Write(del) // this could be async called only when db is idle 341 it.Release() 342 } 343 } 344 345 // 346 func (self *syncDb) stop() { 347 close(self.quit) 348 <-self.done 349 } 350 351 // calculate a dbkey for the request, for the db to work 352 // see syncdb for db key structure 353 // polimorphic: accepted types, see syncer#addRequest 354 func (self *syncDb) newSyncDbEntry(req interface{}, counter uint64) (entry *syncDbEntry, err error) { 355 var key storage.Key 356 var chunk *storage.Chunk 357 var id uint64 358 var ok bool 359 var sreq *storeRequestMsgData 360 361 if key, ok = req.(storage.Key); ok { 362 id = generateId() 363 } else if chunk, ok = req.(*storage.Chunk); ok { 364 key = chunk.Key 365 id = generateId() 366 } else if sreq, ok = req.(*storeRequestMsgData); ok { 367 key = sreq.Key 368 id = sreq.Id 369 } else if entry, ok = req.(*syncDbEntry); !ok { 370 return nil, fmt.Errorf("type not allowed: %v (%T)", req, req) 371 } 372 373 // order by peer > priority > seqid 374 // value is request id if exists 375 if entry == nil { 376 dbkey := make([]byte, 42) 377 dbval := make([]byte, 40) 378 379 // encode key 380 copy(dbkey[:], self.start[:34]) // db peer 381 binary.BigEndian.PutUint64(dbkey[34:], counter) 382 // encode value 383 copy(dbval, key[:]) 384 binary.BigEndian.PutUint64(dbval[32:], id) 385 386 entry = &syncDbEntry{dbkey, dbval} 387 } 388 return 389 }