github.com/truechain/go-ethereum@v1.8.11/swarm/network/syncer.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package network
    18  
    19  import (
    20  	"encoding/binary"
    21  	"encoding/json"
    22  	"fmt"
    23  	"path/filepath"
    24  
    25  	"github.com/ethereum/go-ethereum/log"
    26  	"github.com/ethereum/go-ethereum/swarm/storage"
    27  )
    28  
    29  // syncer parameters (global, not peer specific) default values
    30  const (
    31  	requestDbBatchSize = 512  // size of batch before written to request db
    32  	keyBufferSize      = 1024 // size of buffer  for unsynced keys
    33  	syncBatchSize      = 128  // maximum batchsize for outgoing requests
    34  	syncBufferSize     = 128  // size of buffer  for delivery requests
    35  	syncCacheSize      = 1024 // cache capacity to store request queue in memory
    36  )
    37  
    38  // priorities
    39  const (
    40  	Low        = iota // 0
    41  	Medium            // 1
    42  	High              // 2
    43  	priorities        // 3 number of priority levels
    44  )
    45  
    46  // request types
    47  const (
    48  	DeliverReq   = iota // 0
    49  	PushReq             // 1
    50  	PropagateReq        // 2
    51  	HistoryReq          // 3
    52  	BacklogReq          // 4
    53  )
    54  
    55  // json serialisable struct to record the syncronisation state between 2 peers
    56  type syncState struct {
    57  	*storage.DbSyncState // embeds the following 4 fields:
    58  	// Start      Key    // lower limit of address space
    59  	// Stop       Key    // upper limit of address space
    60  	// First      uint64 // counter taken from last sync state
    61  	// Last       uint64 // counter of remote peer dbStore at the time of last connection
    62  	SessionAt  uint64      // set at the time of connection
    63  	LastSeenAt uint64      // set at the time of connection
    64  	Latest     storage.Key // cursor of dbstore when last (continuously set by syncer)
    65  	Synced     bool        // true iff Sync is done up to the last disconnect
    66  	synced     chan bool   // signal that sync stage finished
    67  }
    68  
    69  // wrapper of db-s to provide mockable custom local chunk store access to syncer
    70  type DbAccess struct {
    71  	db  *storage.DbStore
    72  	loc *storage.LocalStore
    73  }
    74  
    75  func NewDbAccess(loc *storage.LocalStore) *DbAccess {
    76  	return &DbAccess{loc.DbStore.(*storage.DbStore), loc}
    77  }
    78  
    79  // to obtain the chunks from key or request db entry only
    80  func (self *DbAccess) get(key storage.Key) (*storage.Chunk, error) {
    81  	return self.loc.Get(key)
    82  }
    83  
    84  // current storage counter of chunk db
    85  func (self *DbAccess) counter() uint64 {
    86  	return self.db.Counter()
    87  }
    88  
    89  // implemented by dbStoreSyncIterator
    90  type keyIterator interface {
    91  	Next() storage.Key
    92  }
    93  
    94  // generator function for iteration by address range and storage counter
    95  func (self *DbAccess) iterator(s *syncState) keyIterator {
    96  	it, err := self.db.NewSyncIterator(*(s.DbSyncState))
    97  	if err != nil {
    98  		return nil
    99  	}
   100  	return keyIterator(it)
   101  }
   102  
   103  func (self syncState) String() string {
   104  	if self.Synced {
   105  		return fmt.Sprintf(
   106  			"session started at: %v, last seen at: %v, latest key: %v",
   107  			self.SessionAt, self.LastSeenAt,
   108  			self.Latest.Log(),
   109  		)
   110  	} else {
   111  		return fmt.Sprintf(
   112  			"address: %v-%v, index: %v-%v, session started at: %v, last seen at: %v, latest key: %v",
   113  			self.Start.Log(), self.Stop.Log(),
   114  			self.First, self.Last,
   115  			self.SessionAt, self.LastSeenAt,
   116  			self.Latest.Log(),
   117  		)
   118  	}
   119  }
   120  
   121  // syncer parameters (global, not peer specific)
   122  type SyncParams struct {
   123  	RequestDbPath      string // path for request db (leveldb)
   124  	RequestDbBatchSize uint   // nuber of items before batch is saved to requestdb
   125  	KeyBufferSize      uint   // size of key buffer
   126  	SyncBatchSize      uint   // maximum batchsize for outgoing requests
   127  	SyncBufferSize     uint   // size of buffer for
   128  	SyncCacheSize      uint   // cache capacity to store request queue in memory
   129  	SyncPriorities     []uint // list of priority levels for req types 0-3
   130  	SyncModes          []bool // list of sync modes for  for req types 0-3
   131  }
   132  
   133  // constructor with default values
   134  func NewDefaultSyncParams() *SyncParams {
   135  	return &SyncParams{
   136  		RequestDbBatchSize: requestDbBatchSize,
   137  		KeyBufferSize:      keyBufferSize,
   138  		SyncBufferSize:     syncBufferSize,
   139  		SyncBatchSize:      syncBatchSize,
   140  		SyncCacheSize:      syncCacheSize,
   141  		SyncPriorities:     []uint{High, Medium, Medium, Low, Low},
   142  		SyncModes:          []bool{true, true, true, true, false},
   143  	}
   144  }
   145  
   146  //this can only finally be set after all config options (file, cmd line, env vars)
   147  //have been evaluated
   148  func (self *SyncParams) Init(path string) {
   149  	self.RequestDbPath = filepath.Join(path, "requests")
   150  }
   151  
   152  // syncer is the agent that manages content distribution/storage replication/chunk storeRequest forwarding
   153  type syncer struct {
   154  	*SyncParams                     // sync parameters
   155  	syncF           func() bool     // if syncing is needed
   156  	key             storage.Key     // remote peers address key
   157  	state           *syncState      // sync state for our dbStore
   158  	syncStates      chan *syncState // different stages of sync
   159  	deliveryRequest chan bool       // one of two triggers needed to send unsyncedKeys
   160  	newUnsyncedKeys chan bool       // one of two triggers needed to send unsynced keys
   161  	quit            chan bool       // signal to quit loops
   162  
   163  	// DB related fields
   164  	dbAccess *DbAccess // access to dbStore
   165  
   166  	// native fields
   167  	queues     [priorities]*syncDb                   // in-memory cache / queues for sync reqs
   168  	keys       [priorities]chan interface{}          // buffer for unsynced keys
   169  	deliveries [priorities]chan *storeRequestMsgData // delivery
   170  
   171  	// bzz protocol instance outgoing message callbacks (mockable for testing)
   172  	unsyncedKeys func([]*syncRequest, *syncState) error // send unsyncedKeysMsg
   173  	store        func(*storeRequestMsgData) error       // send storeRequestMsg
   174  }
   175  
   176  // a syncer instance is linked to each peer connection
   177  // constructor is called from protocol after successful handshake
   178  // the returned instance is attached to the peer and can be called
   179  // by the forwarder
   180  func newSyncer(
   181  	db *storage.LDBDatabase, remotekey storage.Key,
   182  	dbAccess *DbAccess,
   183  	unsyncedKeys func([]*syncRequest, *syncState) error,
   184  	store func(*storeRequestMsgData) error,
   185  	params *SyncParams,
   186  	state *syncState,
   187  	syncF func() bool,
   188  ) (*syncer, error) {
   189  
   190  	syncBufferSize := params.SyncBufferSize
   191  	keyBufferSize := params.KeyBufferSize
   192  	dbBatchSize := params.RequestDbBatchSize
   193  
   194  	self := &syncer{
   195  		syncF:           syncF,
   196  		key:             remotekey,
   197  		dbAccess:        dbAccess,
   198  		syncStates:      make(chan *syncState, 20),
   199  		deliveryRequest: make(chan bool, 1),
   200  		newUnsyncedKeys: make(chan bool, 1),
   201  		SyncParams:      params,
   202  		state:           state,
   203  		quit:            make(chan bool),
   204  		unsyncedKeys:    unsyncedKeys,
   205  		store:           store,
   206  	}
   207  
   208  	// initialising
   209  	for i := 0; i < priorities; i++ {
   210  		self.keys[i] = make(chan interface{}, keyBufferSize)
   211  		self.deliveries[i] = make(chan *storeRequestMsgData)
   212  		// initialise a syncdb instance for each priority queue
   213  		self.queues[i] = newSyncDb(db, remotekey, uint(i), syncBufferSize, dbBatchSize, self.deliver(uint(i)))
   214  	}
   215  	log.Info(fmt.Sprintf("syncer started: %v", state))
   216  	// launch chunk delivery service
   217  	go self.syncDeliveries()
   218  	// launch sync task manager
   219  	if self.syncF() {
   220  		go self.sync()
   221  	}
   222  	// process unsynced keys to broadcast
   223  	go self.syncUnsyncedKeys()
   224  
   225  	return self, nil
   226  }
   227  
   228  // metadata serialisation
   229  func encodeSync(state *syncState) (*json.RawMessage, error) {
   230  	data, err := json.MarshalIndent(state, "", " ")
   231  	if err != nil {
   232  		return nil, err
   233  	}
   234  	meta := json.RawMessage(data)
   235  	return &meta, nil
   236  }
   237  
   238  func decodeSync(meta *json.RawMessage) (*syncState, error) {
   239  	if meta == nil {
   240  		return nil, fmt.Errorf("unable to deserialise sync state from <nil>")
   241  	}
   242  	data := []byte(*(meta))
   243  	if len(data) == 0 {
   244  		return nil, fmt.Errorf("unable to deserialise sync state from <nil>")
   245  	}
   246  	state := &syncState{DbSyncState: &storage.DbSyncState{}}
   247  	err := json.Unmarshal(data, state)
   248  	return state, err
   249  }
   250  
   251  /*
   252   sync implements the syncing script
   253   * first all items left in the request Db are replayed
   254     * type = StaleSync
   255     * Mode: by default once again via confirmation roundtrip
   256     * Priority: the items are replayed as the proirity specified for StaleSync
   257     * but within the order respects earlier priority level of request
   258   * after all items are consumed for a priority level, the the respective
   259    queue for delivery requests is open (this way new reqs not written to db)
   260    (TODO: this should be checked)
   261   * the sync state provided by the remote peer is used to sync history
   262     * all the backlog from earlier (aborted) syncing is completed starting from latest
   263     * if Last  < LastSeenAt then all items in between then process all
   264       backlog from upto last disconnect
   265     * if Last > 0 &&
   266  
   267   sync is called from the syncer constructor and is not supposed to be used externally
   268  */
   269  func (self *syncer) sync() {
   270  	state := self.state
   271  	// sync finished
   272  	defer close(self.syncStates)
   273  
   274  	// 0. first replay stale requests from request db
   275  	if state.SessionAt == 0 {
   276  		log.Debug(fmt.Sprintf("syncer[%v]: nothing to sync", self.key.Log()))
   277  		return
   278  	}
   279  	log.Debug(fmt.Sprintf("syncer[%v]: start replaying stale requests from request db", self.key.Log()))
   280  	for p := priorities - 1; p >= 0; p-- {
   281  		self.queues[p].dbRead(false, 0, self.replay())
   282  	}
   283  	log.Debug(fmt.Sprintf("syncer[%v]: done replaying stale requests from request db", self.key.Log()))
   284  
   285  	// unless peer is synced sync unfinished history beginning on
   286  	if !state.Synced {
   287  		start := state.Start
   288  
   289  		if !storage.IsZeroKey(state.Latest) {
   290  			// 1. there is unfinished earlier sync
   291  			state.Start = state.Latest
   292  			log.Debug(fmt.Sprintf("syncer[%v]: start syncronising backlog (unfinished sync: %v)", self.key.Log(), state))
   293  			// blocks while the entire history upto state is synced
   294  			self.syncState(state)
   295  			if state.Last < state.SessionAt {
   296  				state.First = state.Last + 1
   297  			}
   298  		}
   299  		state.Latest = storage.ZeroKey
   300  		state.Start = start
   301  		// 2. sync up to last disconnect1
   302  		if state.First < state.LastSeenAt {
   303  			state.Last = state.LastSeenAt
   304  			log.Debug(fmt.Sprintf("syncer[%v]: start syncronising history upto last disconnect at %v: %v", self.key.Log(), state.LastSeenAt, state))
   305  			self.syncState(state)
   306  			state.First = state.LastSeenAt
   307  		}
   308  		state.Latest = storage.ZeroKey
   309  
   310  	} else {
   311  		// synchronisation starts at end of last session
   312  		state.First = state.LastSeenAt
   313  	}
   314  
   315  	// 3. sync up to current session start
   316  	// if there have been new chunks since last session
   317  	if state.LastSeenAt < state.SessionAt {
   318  		state.Last = state.SessionAt
   319  		log.Debug(fmt.Sprintf("syncer[%v]: start syncronising history since last disconnect at %v up until session start at %v: %v", self.key.Log(), state.LastSeenAt, state.SessionAt, state))
   320  		// blocks until state syncing is finished
   321  		self.syncState(state)
   322  	}
   323  	log.Info(fmt.Sprintf("syncer[%v]: syncing all history complete", self.key.Log()))
   324  
   325  }
   326  
   327  // wait till syncronised block uptil state is synced
   328  func (self *syncer) syncState(state *syncState) {
   329  	self.syncStates <- state
   330  	select {
   331  	case <-state.synced:
   332  	case <-self.quit:
   333  	}
   334  }
   335  
   336  // stop quits both request processor and saves the request cache to disk
   337  func (self *syncer) stop() {
   338  	close(self.quit)
   339  	log.Trace(fmt.Sprintf("syncer[%v]: stop and save sync request db backlog", self.key.Log()))
   340  	for _, db := range self.queues {
   341  		db.stop()
   342  	}
   343  }
   344  
   345  // rlp serialisable sync request
   346  type syncRequest struct {
   347  	Key      storage.Key
   348  	Priority uint
   349  }
   350  
   351  func (self *syncRequest) String() string {
   352  	return fmt.Sprintf("<Key: %v, Priority: %v>", self.Key.Log(), self.Priority)
   353  }
   354  
   355  func (self *syncer) newSyncRequest(req interface{}, p int) (*syncRequest, error) {
   356  	key, _, _, _, err := parseRequest(req)
   357  	// TODO: if req has chunk, it should be put in a cache
   358  	// create
   359  	if err != nil {
   360  		return nil, err
   361  	}
   362  	return &syncRequest{key, uint(p)}, nil
   363  }
   364  
   365  // serves historical items from the DB
   366  // * read is on demand, blocking unless history channel is read
   367  // * accepts sync requests (syncStates) to create new db iterator
   368  // * closes the channel one iteration finishes
   369  func (self *syncer) syncHistory(state *syncState) chan interface{} {
   370  	var n uint
   371  	history := make(chan interface{})
   372  	log.Debug(fmt.Sprintf("syncer[%v]: syncing history between %v - %v for chunk addresses %v - %v", self.key.Log(), state.First, state.Last, state.Start, state.Stop))
   373  	it := self.dbAccess.iterator(state)
   374  	if it != nil {
   375  		go func() {
   376  			// signal end of the iteration ended
   377  			defer close(history)
   378  		IT:
   379  			for {
   380  				key := it.Next()
   381  				if key == nil {
   382  					break IT
   383  				}
   384  				select {
   385  				// blocking until history channel is read from
   386  				case history <- key:
   387  					n++
   388  					log.Trace(fmt.Sprintf("syncer[%v]: history: %v (%v keys)", self.key.Log(), key.Log(), n))
   389  					state.Latest = key
   390  				case <-self.quit:
   391  					return
   392  				}
   393  			}
   394  			log.Debug(fmt.Sprintf("syncer[%v]: finished syncing history between %v - %v for chunk addresses %v - %v (at %v) (chunks = %v)", self.key.Log(), state.First, state.Last, state.Start, state.Stop, state.Latest, n))
   395  		}()
   396  	}
   397  	return history
   398  }
   399  
   400  // triggers key syncronisation
   401  func (self *syncer) sendUnsyncedKeys() {
   402  	select {
   403  	case self.deliveryRequest <- true:
   404  	default:
   405  	}
   406  }
   407  
   408  // assembles a new batch of unsynced keys
   409  // * keys are drawn from the key buffers in order of priority queue
   410  // * if the queues of priority for History (HistoryReq) or higher are depleted,
   411  //   historical data is used so historical items are lower priority within
   412  //   their priority group.
   413  // * Order of historical data is unspecified
   414  func (self *syncer) syncUnsyncedKeys() {
   415  	// send out new
   416  	var unsynced []*syncRequest
   417  	var more, justSynced bool
   418  	var keyCount, historyCnt int
   419  	var history chan interface{}
   420  
   421  	priority := High
   422  	keys := self.keys[priority]
   423  	var newUnsyncedKeys, deliveryRequest chan bool
   424  	keyCounts := make([]int, priorities)
   425  	histPrior := self.SyncPriorities[HistoryReq]
   426  	syncStates := self.syncStates
   427  	state := self.state
   428  
   429  LOOP:
   430  	for {
   431  
   432  		var req interface{}
   433  		// select the highest priority channel to read from
   434  		// keys channels are buffered so the highest priority ones
   435  		// are checked first - integrity can only be guaranteed if writing
   436  		// is locked while selecting
   437  		if priority != High || len(keys) == 0 {
   438  			// selection is not needed if the High priority queue has items
   439  			keys = nil
   440  		PRIORITIES:
   441  			for priority = High; priority >= 0; priority-- {
   442  				// the first priority channel that is non-empty will be assigned to keys
   443  				if len(self.keys[priority]) > 0 {
   444  					log.Trace(fmt.Sprintf("syncer[%v]: reading request with	priority %v", self.key.Log(), priority))
   445  					keys = self.keys[priority]
   446  					break PRIORITIES
   447  				}
   448  				log.Trace(fmt.Sprintf("syncer[%v/%v]: queue: [%v, %v, %v]", self.key.Log(), priority, len(self.keys[High]), len(self.keys[Medium]), len(self.keys[Low])))
   449  				// if the input queue is empty on this level, resort to history if there is any
   450  				if uint(priority) == histPrior && history != nil {
   451  					log.Trace(fmt.Sprintf("syncer[%v]: reading history for %v", self.key.Log(), self.key))
   452  					keys = history
   453  					break PRIORITIES
   454  				}
   455  			}
   456  		}
   457  
   458  		// if peer ready to receive but nothing to send
   459  		if keys == nil && deliveryRequest == nil {
   460  			// if no items left and switch to waiting mode
   461  			log.Trace(fmt.Sprintf("syncer[%v]: buffers consumed. Waiting", self.key.Log()))
   462  			newUnsyncedKeys = self.newUnsyncedKeys
   463  		}
   464  
   465  		// send msg iff
   466  		// * peer is ready to receive keys AND (
   467  		// * all queues and history are depleted OR
   468  		// * batch full OR
   469  		// * all history have been consumed, synced)
   470  		if deliveryRequest == nil &&
   471  			(justSynced ||
   472  				len(unsynced) > 0 && keys == nil ||
   473  				len(unsynced) == int(self.SyncBatchSize)) {
   474  			justSynced = false
   475  			// listen to requests
   476  			deliveryRequest = self.deliveryRequest
   477  			newUnsyncedKeys = nil // not care about data until next req comes in
   478  			// set sync to current counter
   479  			// (all nonhistorical outgoing traffic sheduled and persisted
   480  			state.LastSeenAt = self.dbAccess.counter()
   481  			state.Latest = storage.ZeroKey
   482  			log.Trace(fmt.Sprintf("syncer[%v]: sending %v", self.key.Log(), unsynced))
   483  			//  send the unsynced keys
   484  			stateCopy := *state
   485  			err := self.unsyncedKeys(unsynced, &stateCopy)
   486  			if err != nil {
   487  				log.Warn(fmt.Sprintf("syncer[%v]: unable to send unsynced keys: %v", self.key.Log(), err))
   488  			}
   489  			self.state = state
   490  			log.Debug(fmt.Sprintf("syncer[%v]: --> %v keys sent: (total: %v (%v), history: %v), sent sync state: %v", self.key.Log(), len(unsynced), keyCounts, keyCount, historyCnt, stateCopy))
   491  			unsynced = nil
   492  			keys = nil
   493  		}
   494  
   495  		// process item and add it to the batch
   496  		select {
   497  		case <-self.quit:
   498  			break LOOP
   499  		case req, more = <-keys:
   500  			if keys == history && !more {
   501  				log.Trace(fmt.Sprintf("syncer[%v]: syncing history segment complete", self.key.Log()))
   502  				// history channel is closed, waiting for new state (called from sync())
   503  				syncStates = self.syncStates
   504  				state.Synced = true // this signals that the  current segment is complete
   505  				select {
   506  				case state.synced <- false:
   507  				case <-self.quit:
   508  					break LOOP
   509  				}
   510  				justSynced = true
   511  				history = nil
   512  			}
   513  		case <-deliveryRequest:
   514  			log.Trace(fmt.Sprintf("syncer[%v]: peer ready to receive", self.key.Log()))
   515  
   516  			// this 1 cap channel can wake up the loop
   517  			// signaling that peer is ready to receive unsynced Keys
   518  			// the channel is set to nil any further writes will be ignored
   519  			deliveryRequest = nil
   520  
   521  		case <-newUnsyncedKeys:
   522  			log.Trace(fmt.Sprintf("syncer[%v]: new unsynced keys available", self.key.Log()))
   523  			// this 1 cap channel can wake up the loop
   524  			// signals that data is available to send if peer is ready to receive
   525  			newUnsyncedKeys = nil
   526  			keys = self.keys[High]
   527  
   528  		case state, more = <-syncStates:
   529  			// this resets the state
   530  			if !more {
   531  				state = self.state
   532  				log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) syncing complete upto %v)", self.key.Log(), priority, state))
   533  				state.Synced = true
   534  				syncStates = nil
   535  			} else {
   536  				log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) syncing history upto %v priority %v)", self.key.Log(), priority, state, histPrior))
   537  				state.Synced = false
   538  				history = self.syncHistory(state)
   539  				// only one history at a time, only allow another one once the
   540  				// history channel is closed
   541  				syncStates = nil
   542  			}
   543  		}
   544  		if req == nil {
   545  			continue LOOP
   546  		}
   547  
   548  		log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) added to unsynced keys: %v", self.key.Log(), priority, req))
   549  		keyCounts[priority]++
   550  		keyCount++
   551  		if keys == history {
   552  			log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) history item %v (synced = %v)", self.key.Log(), priority, req, state.Synced))
   553  			historyCnt++
   554  		}
   555  		if sreq, err := self.newSyncRequest(req, priority); err == nil {
   556  			// extract key from req
   557  			log.Trace(fmt.Sprintf("syncer[%v]: (priority %v): request %v (synced = %v)", self.key.Log(), priority, req, state.Synced))
   558  			unsynced = append(unsynced, sreq)
   559  		} else {
   560  			log.Warn(fmt.Sprintf("syncer[%v]: (priority %v): error creating request for %v: %v)", self.key.Log(), priority, req, err))
   561  		}
   562  
   563  	}
   564  }
   565  
   566  // delivery loop
   567  // takes into account priority, send store Requests with chunk (delivery)
   568  // idle blocking if no new deliveries in any of the queues
   569  func (self *syncer) syncDeliveries() {
   570  	var req *storeRequestMsgData
   571  	p := High
   572  	var deliveries chan *storeRequestMsgData
   573  	var msg *storeRequestMsgData
   574  	var err error
   575  	var c = [priorities]int{}
   576  	var n = [priorities]int{}
   577  	var total, success uint
   578  
   579  	for {
   580  		deliveries = self.deliveries[p]
   581  		select {
   582  		case req = <-deliveries:
   583  			n[p]++
   584  			c[p]++
   585  		default:
   586  			if p == Low {
   587  				// blocking, depletion on all channels, no preference for priority
   588  				select {
   589  				case req = <-self.deliveries[High]:
   590  					n[High]++
   591  				case req = <-self.deliveries[Medium]:
   592  					n[Medium]++
   593  				case req = <-self.deliveries[Low]:
   594  					n[Low]++
   595  				case <-self.quit:
   596  					return
   597  				}
   598  				p = High
   599  			} else {
   600  				p--
   601  				continue
   602  			}
   603  		}
   604  		total++
   605  		msg, err = self.newStoreRequestMsgData(req)
   606  		if err != nil {
   607  			log.Warn(fmt.Sprintf("syncer[%v]: failed to create store request for %v: %v", self.key.Log(), req, err))
   608  		} else {
   609  			err = self.store(msg)
   610  			if err != nil {
   611  				log.Warn(fmt.Sprintf("syncer[%v]: failed to deliver %v: %v", self.key.Log(), req, err))
   612  			} else {
   613  				success++
   614  				log.Trace(fmt.Sprintf("syncer[%v]: %v successfully delivered", self.key.Log(), req))
   615  			}
   616  		}
   617  		if total%self.SyncBatchSize == 0 {
   618  			log.Debug(fmt.Sprintf("syncer[%v]: deliver Total: %v, Success: %v, High: %v/%v, Medium: %v/%v, Low %v/%v", self.key.Log(), total, success, c[High], n[High], c[Medium], n[Medium], c[Low], n[Low]))
   619  		}
   620  	}
   621  }
   622  
   623  /*
   624   addRequest handles requests for delivery
   625   it accepts 4 types:
   626  
   627   * storeRequestMsgData: coming from netstore propagate response
   628   * chunk: coming from forwarding (questionable: id?)
   629   * key: from incoming syncRequest
   630   * syncDbEntry: key,id encoded in db
   631  
   632   If sync mode is on for the type of request, then
   633   it sends the request to the keys queue of the correct priority
   634   channel buffered with capacity (SyncBufferSize)
   635  
   636   If sync mode is off then, requests are directly sent to deliveries
   637  */
   638  func (self *syncer) addRequest(req interface{}, ty int) {
   639  	// retrieve priority for request type name int8
   640  
   641  	priority := self.SyncPriorities[ty]
   642  	// sync mode for this type ON
   643  	if self.syncF() || ty == DeliverReq {
   644  		if self.SyncModes[ty] {
   645  			self.addKey(req, priority, self.quit)
   646  		} else {
   647  			self.addDelivery(req, priority, self.quit)
   648  		}
   649  	}
   650  }
   651  
   652  // addKey queues sync request for sync confirmation with given priority
   653  // ie the key will go out in an unsyncedKeys message
   654  func (self *syncer) addKey(req interface{}, priority uint, quit chan bool) bool {
   655  	select {
   656  	case self.keys[priority] <- req:
   657  		// this wakes up the unsynced keys loop if idle
   658  		select {
   659  		case self.newUnsyncedKeys <- true:
   660  		default:
   661  		}
   662  		return true
   663  	case <-quit:
   664  		return false
   665  	}
   666  }
   667  
   668  // addDelivery queues delivery request for with given priority
   669  // ie the chunk will be delivered ASAP mod priority queueing handled by syncdb
   670  // requests are persisted across sessions for correct sync
   671  func (self *syncer) addDelivery(req interface{}, priority uint, quit chan bool) bool {
   672  	select {
   673  	case self.queues[priority].buffer <- req:
   674  		return true
   675  	case <-quit:
   676  		return false
   677  	}
   678  }
   679  
   680  // doDelivery delivers the chunk for the request with given priority
   681  // without queuing
   682  func (self *syncer) doDelivery(req interface{}, priority uint, quit chan bool) bool {
   683  	msgdata, err := self.newStoreRequestMsgData(req)
   684  	if err != nil {
   685  		log.Warn(fmt.Sprintf("unable to deliver request %v: %v", msgdata, err))
   686  		return false
   687  	}
   688  	select {
   689  	case self.deliveries[priority] <- msgdata:
   690  		return true
   691  	case <-quit:
   692  		return false
   693  	}
   694  }
   695  
   696  // returns the delivery function for given priority
   697  // passed on to syncDb
   698  func (self *syncer) deliver(priority uint) func(req interface{}, quit chan bool) bool {
   699  	return func(req interface{}, quit chan bool) bool {
   700  		return self.doDelivery(req, priority, quit)
   701  	}
   702  }
   703  
   704  // returns the replay function passed on to syncDb
   705  // depending on sync mode settings for BacklogReq,
   706  // re	play of request db backlog sends items via confirmation
   707  // or directly delivers
   708  func (self *syncer) replay() func(req interface{}, quit chan bool) bool {
   709  	sync := self.SyncModes[BacklogReq]
   710  	priority := self.SyncPriorities[BacklogReq]
   711  	// sync mode for this type ON
   712  	if sync {
   713  		return func(req interface{}, quit chan bool) bool {
   714  			return self.addKey(req, priority, quit)
   715  		}
   716  	} else {
   717  		return func(req interface{}, quit chan bool) bool {
   718  			return self.doDelivery(req, priority, quit)
   719  		}
   720  
   721  	}
   722  }
   723  
   724  // given a request, extends it to a full storeRequestMsgData
   725  // polimorphic: see addRequest for the types accepted
   726  func (self *syncer) newStoreRequestMsgData(req interface{}) (*storeRequestMsgData, error) {
   727  
   728  	key, id, chunk, sreq, err := parseRequest(req)
   729  	if err != nil {
   730  		return nil, err
   731  	}
   732  
   733  	if sreq == nil {
   734  		if chunk == nil {
   735  			var err error
   736  			chunk, err = self.dbAccess.get(key)
   737  			if err != nil {
   738  				return nil, err
   739  			}
   740  		}
   741  
   742  		sreq = &storeRequestMsgData{
   743  			Id:    id,
   744  			Key:   chunk.Key,
   745  			SData: chunk.SData,
   746  		}
   747  	}
   748  
   749  	return sreq, nil
   750  }
   751  
   752  // parse request types and extracts, key, id, chunk, request if available
   753  // does not do chunk lookup !
   754  func parseRequest(req interface{}) (storage.Key, uint64, *storage.Chunk, *storeRequestMsgData, error) {
   755  	var key storage.Key
   756  	var entry *syncDbEntry
   757  	var chunk *storage.Chunk
   758  	var id uint64
   759  	var ok bool
   760  	var sreq *storeRequestMsgData
   761  	var err error
   762  
   763  	if key, ok = req.(storage.Key); ok {
   764  		id = generateId()
   765  
   766  	} else if entry, ok = req.(*syncDbEntry); ok {
   767  		id = binary.BigEndian.Uint64(entry.val[32:])
   768  		key = storage.Key(entry.val[:32])
   769  
   770  	} else if chunk, ok = req.(*storage.Chunk); ok {
   771  		key = chunk.Key
   772  		id = generateId()
   773  
   774  	} else if sreq, ok = req.(*storeRequestMsgData); ok {
   775  		key = sreq.Key
   776  	} else {
   777  		err = fmt.Errorf("type not allowed: %v (%T)", req, req)
   778  	}
   779  
   780  	return key, id, chunk, sreq, err
   781  }