github.com/digdeepmining/go-atheios@v1.5.13-0.20180902133602-d5687a2e6f43/swarm/network/syncdb.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package network
    18  
    19  import (
    20  	"encoding/binary"
    21  	"fmt"
    22  
    23  	"github.com/atheioschain/go-atheios/logger"
    24  	"github.com/atheioschain/go-atheios/logger/glog"
    25  	"github.com/atheioschain/go-atheios/swarm/storage"
    26  	"github.com/syndtr/goleveldb/leveldb"
    27  	"github.com/syndtr/goleveldb/leveldb/iterator"
    28  )
    29  
    30  const counterKeyPrefix = 0x01
    31  
    32  /*
    33  syncDb is a queueing service for outgoing deliveries.
    34  One instance per priority queue for each peer
    35  
    36  a syncDb instance maintains an in-memory buffer (of capacity bufferSize)
    37  once its in-memory buffer is full it switches to persisting in db
    38  and dbRead iterator iterates through the items keeping their order
    39  once the db read catches up (there is no more items in the db) then
    40  it switches back to in-memory buffer.
    41  
    42  when syncdb is stopped all items in the buffer are saved to the db
    43  */
    44  type syncDb struct {
    45  	start          []byte               // this syncdb starting index in requestdb
    46  	key            storage.Key          // remote peers address key
    47  	counterKey     []byte               // db key to persist counter
    48  	priority       uint                 // priotity High|Medium|Low
    49  	buffer         chan interface{}     // incoming request channel
    50  	db             *storage.LDBDatabase // underlying db (TODO should be interface)
    51  	done           chan bool            // chan to signal goroutines finished quitting
    52  	quit           chan bool            // chan to signal quitting to goroutines
    53  	total, dbTotal int                  // counts for one session
    54  	batch          chan chan int        // channel for batch requests
    55  	dbBatchSize    uint                 // number of items before batch is saved
    56  }
    57  
    58  // constructor needs a shared request db (leveldb)
    59  // priority is used in the index key
    60  // uses a buffer and a leveldb for persistent storage
    61  // bufferSize, dbBatchSize are config parameters
    62  func newSyncDb(db *storage.LDBDatabase, key storage.Key, priority uint, bufferSize, dbBatchSize uint, deliver func(interface{}, chan bool) bool) *syncDb {
    63  	start := make([]byte, 42)
    64  	start[1] = byte(priorities - priority)
    65  	copy(start[2:34], key)
    66  
    67  	counterKey := make([]byte, 34)
    68  	counterKey[0] = counterKeyPrefix
    69  	copy(counterKey[1:], start[1:34])
    70  
    71  	syncdb := &syncDb{
    72  		start:       start,
    73  		key:         key,
    74  		counterKey:  counterKey,
    75  		priority:    priority,
    76  		buffer:      make(chan interface{}, bufferSize),
    77  		db:          db,
    78  		done:        make(chan bool),
    79  		quit:        make(chan bool),
    80  		batch:       make(chan chan int),
    81  		dbBatchSize: dbBatchSize,
    82  	}
    83  	glog.V(logger.Detail).Infof("syncDb[peer: %v, priority: %v] - initialised", key.Log(), priority)
    84  
    85  	// starts the main forever loop reading from buffer
    86  	go syncdb.bufferRead(deliver)
    87  	return syncdb
    88  }
    89  
    90  /*
    91  bufferRead is a forever iterator loop that takes care of delivering
    92  outgoing store requests reads from incoming buffer
    93  
    94  its argument is the deliver function taking the item as first argument
    95  and a quit channel as second.
    96  Closing of this channel is supposed to abort all waiting for delivery
    97  (typically network write)
    98  
    99  The iteration switches between 2 modes,
   100  * buffer mode reads the in-memory buffer and delivers the items directly
   101  * db mode reads from the buffer and writes to the db, parallelly another
   102  routine is started that reads from the db and delivers items
   103  
   104  If there is buffer contention in buffer mode (slow network, high upload volume)
   105  syncdb switches to db mode and starts dbRead
   106  Once db backlog is delivered, it reverts back to in-memory buffer
   107  
   108  It is automatically started when syncdb is initialised.
   109  
   110  It saves the buffer to db upon receiving quit signal. syncDb#stop()
   111  */
   112  func (self *syncDb) bufferRead(deliver func(interface{}, chan bool) bool) {
   113  	var buffer, db chan interface{} // channels representing the two read modes
   114  	var more bool
   115  	var req interface{}
   116  	var entry *syncDbEntry
   117  	var inBatch, inDb int
   118  	batch := new(leveldb.Batch)
   119  	var dbSize chan int
   120  	quit := self.quit
   121  	counterValue := make([]byte, 8)
   122  
   123  	// counter is used for keeping the items in order, persisted to db
   124  	// start counter where db was at, 0 if not found
   125  	data, err := self.db.Get(self.counterKey)
   126  	var counter uint64
   127  	if err == nil {
   128  		counter = binary.BigEndian.Uint64(data)
   129  		glog.V(logger.Detail).Infof("syncDb[%v/%v] - counter read from db at %v", self.key.Log(), self.priority, counter)
   130  	} else {
   131  		glog.V(logger.Detail).Infof("syncDb[%v/%v] - counter starts at %v", self.key.Log(), self.priority, counter)
   132  	}
   133  
   134  LOOP:
   135  	for {
   136  		// waiting for item next in the buffer, or quit signal or batch request
   137  		select {
   138  		// buffer only closes when writing to db
   139  		case req = <-buffer:
   140  			// deliver request : this is blocking on network write so
   141  			// it is passed the quit channel as argument, so that it returns
   142  			// if syncdb is stopped. In this case we need to save the item to the db
   143  			more = deliver(req, self.quit)
   144  			if !more {
   145  				glog.V(logger.Debug).Infof("syncDb[%v/%v] quit: switching to db. session tally (db/total): %v/%v", self.key.Log(), self.priority, self.dbTotal, self.total)
   146  				// received quit signal, save request currently waiting delivery
   147  				// by switching to db mode and closing the buffer
   148  				buffer = nil
   149  				db = self.buffer
   150  				close(db)
   151  				quit = nil // needs to block the quit case in select
   152  				break      // break from select, this item will be written to the db
   153  			}
   154  			self.total++
   155  			glog.V(logger.Detail).Infof("syncDb[%v/%v] deliver (db/total): %v/%v", self.key.Log(), self.priority, self.dbTotal, self.total)
   156  			// by the time deliver returns, there were new writes to the buffer
   157  			// if buffer contention is detected, switch to db mode which drains
   158  			// the buffer so no process will block on pushing store requests
   159  			if len(buffer) == cap(buffer) {
   160  				glog.V(logger.Debug).Infof("syncDb[%v/%v] buffer full %v: switching to db. session tally (db/total): %v/%v", self.key.Log(), self.priority, cap(buffer), self.dbTotal, self.total)
   161  				buffer = nil
   162  				db = self.buffer
   163  			}
   164  			continue LOOP
   165  
   166  			// incoming entry to put into db
   167  		case req, more = <-db:
   168  			if !more {
   169  				// only if quit is called, saved all the buffer
   170  				binary.BigEndian.PutUint64(counterValue, counter)
   171  				batch.Put(self.counterKey, counterValue) // persist counter in batch
   172  				self.writeSyncBatch(batch)               // save batch
   173  				glog.V(logger.Detail).Infof("syncDb[%v/%v] quitting: save current batch to db", self.key.Log(), self.priority)
   174  				break LOOP
   175  			}
   176  			self.dbTotal++
   177  			self.total++
   178  			// otherwise break after select
   179  		case dbSize = <-self.batch:
   180  			// explicit request for batch
   181  			if inBatch == 0 && quit != nil {
   182  				// there was no writes since the last batch so db depleted
   183  				// switch to buffer mode
   184  				glog.V(logger.Debug).Infof("syncDb[%v/%v] empty db: switching to buffer", self.key.Log(), self.priority)
   185  				db = nil
   186  				buffer = self.buffer
   187  				dbSize <- 0 // indicates to 'caller' that batch has been written
   188  				inDb = 0
   189  				continue LOOP
   190  			}
   191  			binary.BigEndian.PutUint64(counterValue, counter)
   192  			batch.Put(self.counterKey, counterValue)
   193  			glog.V(logger.Debug).Infof("syncDb[%v/%v] write batch %v/%v - %x - %x", self.key.Log(), self.priority, inBatch, counter, self.counterKey, counterValue)
   194  			batch = self.writeSyncBatch(batch)
   195  			dbSize <- inBatch // indicates to 'caller' that batch has been written
   196  			inBatch = 0
   197  			continue LOOP
   198  
   199  			// closing syncDb#quit channel is used to signal to all goroutines to quit
   200  		case <-quit:
   201  			// need to save backlog, so switch to db mode
   202  			db = self.buffer
   203  			buffer = nil
   204  			quit = nil
   205  			glog.V(logger.Detail).Infof("syncDb[%v/%v] quitting: save buffer to db", self.key.Log(), self.priority)
   206  			close(db)
   207  			continue LOOP
   208  		}
   209  
   210  		// only get here if we put req into db
   211  		entry, err = self.newSyncDbEntry(req, counter)
   212  		if err != nil {
   213  			glog.V(logger.Warn).Infof("syncDb[%v/%v] saving request %v (#%v/%v) failed: %v", self.key.Log(), self.priority, req, inBatch, inDb, err)
   214  			continue LOOP
   215  		}
   216  		batch.Put(entry.key, entry.val)
   217  		glog.V(logger.Detail).Infof("syncDb[%v/%v] to batch %v '%v' (#%v/%v/%v)", self.key.Log(), self.priority, req, entry, inBatch, inDb, counter)
   218  		// if just switched to db mode and not quitting, then launch dbRead
   219  		// in a parallel go routine to send deliveries from db
   220  		if inDb == 0 && quit != nil {
   221  			glog.V(logger.Detail).Infof("syncDb[%v/%v] start dbRead")
   222  			go self.dbRead(true, counter, deliver)
   223  		}
   224  		inDb++
   225  		inBatch++
   226  		counter++
   227  		// need to save the batch if it gets too large (== dbBatchSize)
   228  		if inBatch%int(self.dbBatchSize) == 0 {
   229  			batch = self.writeSyncBatch(batch)
   230  		}
   231  	}
   232  	glog.V(logger.Info).Infof("syncDb[%v:%v]: saved %v keys (saved counter at %v)", self.key.Log(), self.priority, inBatch, counter)
   233  	close(self.done)
   234  }
   235  
   236  // writes the batch to the db and returns a new batch object
   237  func (self *syncDb) writeSyncBatch(batch *leveldb.Batch) *leveldb.Batch {
   238  	err := self.db.Write(batch)
   239  	if err != nil {
   240  		glog.V(logger.Warn).Infof("syncDb[%v/%v] saving batch to db failed: %v", self.key.Log(), self.priority, err)
   241  		return batch
   242  	}
   243  	return new(leveldb.Batch)
   244  }
   245  
   246  // abstract type for db entries (TODO could be a feature of Receipts)
   247  type syncDbEntry struct {
   248  	key, val []byte
   249  }
   250  
   251  func (self syncDbEntry) String() string {
   252  	return fmt.Sprintf("key: %x, value: %x", self.key, self.val)
   253  }
   254  
   255  /*
   256  	dbRead is iterating over store requests to be sent over to the peer
   257  	this is mainly to prevent crashes due to network output buffer contention (???)
   258  	as well as to make syncronisation resilient to disconnects
   259  	the messages are supposed to be sent in the p2p priority queue.
   260  
   261  	the request DB is shared between peers, but domains for each syncdb
   262  	are disjoint. dbkeys (42 bytes) are structured:
   263  	* 0: 0x00 (0x01 reserved for counter key)
   264  	* 1: priorities - priority (so that high priority can be replayed first)
   265  	* 2-33: peers address
   266  	* 34-41: syncdb counter to preserve order (this field is missing for the counter key)
   267  
   268  	values (40 bytes) are:
   269  	* 0-31: key
   270  	* 32-39: request id
   271  
   272  dbRead needs a boolean to indicate if on first round all the historical
   273  record is synced. Second argument to indicate current db counter
   274  The third is the function to apply
   275  */
   276  func (self *syncDb) dbRead(useBatches bool, counter uint64, fun func(interface{}, chan bool) bool) {
   277  	key := make([]byte, 42)
   278  	copy(key, self.start)
   279  	binary.BigEndian.PutUint64(key[34:], counter)
   280  	var batches, n, cnt, total int
   281  	var more bool
   282  	var entry *syncDbEntry
   283  	var it iterator.Iterator
   284  	var del *leveldb.Batch
   285  	batchSizes := make(chan int)
   286  
   287  	for {
   288  		// if useBatches is false, cnt is not set
   289  		if useBatches {
   290  			// this could be called before all cnt items sent out
   291  			// so that loop is not blocking while delivering
   292  			// only relevant if cnt is large
   293  			select {
   294  			case self.batch <- batchSizes:
   295  			case <-self.quit:
   296  				return
   297  			}
   298  			// wait for the write to finish and get the item count in the next batch
   299  			cnt = <-batchSizes
   300  			batches++
   301  			if cnt == 0 {
   302  				// empty
   303  				return
   304  			}
   305  		}
   306  		it = self.db.NewIterator()
   307  		it.Seek(key)
   308  		if !it.Valid() {
   309  			copy(key, self.start)
   310  			useBatches = true
   311  			continue
   312  		}
   313  		del = new(leveldb.Batch)
   314  		glog.V(logger.Detail).Infof("syncDb[%v/%v]: new iterator: %x (batch %v, count %v)", self.key.Log(), self.priority, key, batches, cnt)
   315  
   316  		for n = 0; !useBatches || n < cnt; it.Next() {
   317  			copy(key, it.Key())
   318  			if len(key) == 0 || key[0] != 0 {
   319  				copy(key, self.start)
   320  				useBatches = true
   321  				break
   322  			}
   323  			val := make([]byte, 40)
   324  			copy(val, it.Value())
   325  			entry = &syncDbEntry{key, val}
   326  			// glog.V(logger.Detail).Infof("syncDb[%v/%v] - %v, batches: %v, total: %v, session total from db: %v/%v", self.key.Log(), self.priority, self.key.Log(), batches, total, self.dbTotal, self.total)
   327  			more = fun(entry, self.quit)
   328  			if !more {
   329  				// quit received when waiting to deliver entry, the entry will not be deleted
   330  				glog.V(logger.Detail).Infof("syncDb[%v/%v] batch %v quit after %v/%v items", self.key.Log(), self.priority, batches, n, cnt)
   331  				break
   332  			}
   333  			// since subsequent batches of the same db session are indexed incrementally
   334  			// deleting earlier batches can be delayed and parallelised
   335  			// this could be batch delete when db is idle (but added complexity esp when quitting)
   336  			del.Delete(key)
   337  			n++
   338  			total++
   339  		}
   340  		glog.V(logger.Debug).Infof("syncDb[%v/%v] - db session closed, batches: %v, total: %v, session total from db: %v/%v", self.key.Log(), self.priority, batches, total, self.dbTotal, self.total)
   341  		self.db.Write(del) // this could be async called only when db is idle
   342  		it.Release()
   343  	}
   344  }
   345  
   346  //
   347  func (self *syncDb) stop() {
   348  	close(self.quit)
   349  	<-self.done
   350  }
   351  
   352  // calculate a dbkey for the request, for the db to work
   353  // see syncdb for db key structure
   354  // polimorphic: accepted types, see syncer#addRequest
   355  func (self *syncDb) newSyncDbEntry(req interface{}, counter uint64) (entry *syncDbEntry, err error) {
   356  	var key storage.Key
   357  	var chunk *storage.Chunk
   358  	var id uint64
   359  	var ok bool
   360  	var sreq *storeRequestMsgData
   361  
   362  	if key, ok = req.(storage.Key); ok {
   363  		id = generateId()
   364  	} else if chunk, ok = req.(*storage.Chunk); ok {
   365  		key = chunk.Key
   366  		id = generateId()
   367  	} else if sreq, ok = req.(*storeRequestMsgData); ok {
   368  		key = sreq.Key
   369  		id = sreq.Id
   370  	} else if entry, ok = req.(*syncDbEntry); !ok {
   371  		return nil, fmt.Errorf("type not allowed: %v (%T)", req, req)
   372  	}
   373  
   374  	// order by peer > priority > seqid
   375  	// value is request id if exists
   376  	if entry == nil {
   377  		dbkey := make([]byte, 42)
   378  		dbval := make([]byte, 40)
   379  
   380  		// encode key
   381  		copy(dbkey[:], self.start[:34]) // db  peer
   382  		binary.BigEndian.PutUint64(dbkey[34:], counter)
   383  		// encode value
   384  		copy(dbval, key[:])
   385  		binary.BigEndian.PutUint64(dbval[32:], id)
   386  
   387  		entry = &syncDbEntry{dbkey, dbval}
   388  	}
   389  	return
   390  }