github.com/janotchain/janota@v0.0.0-20220824112012-93ea4c5dee78/swarm/network/syncer.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package network
    18  
    19  import (
    20  	"encoding/binary"
    21  	"encoding/json"
    22  	"fmt"
    23  	"path/filepath"
    24  
    25  	"github.com/ethereum/go-ethereum/log"
    26  	"github.com/ethereum/go-ethereum/swarm/storage"
    27  )
    28  
    29  // syncer parameters (global, not peer specific) default values
    30  const (
    31  	requestDbBatchSize = 512  // size of batch before written to request db
    32  	keyBufferSize      = 1024 // size of buffer  for unsynced keys
    33  	syncBatchSize      = 128  // maximum batchsize for outgoing requests
    34  	syncBufferSize     = 128  // size of buffer  for delivery requests
    35  	syncCacheSize      = 1024 // cache capacity to store request queue in memory
    36  )
    37  
    38  // priorities
    39  const (
    40  	Low        = iota // 0
    41  	Medium            // 1
    42  	High              // 2
    43  	priorities        // 3 number of priority levels
    44  )
    45  
    46  // request types
    47  const (
    48  	DeliverReq   = iota // 0
    49  	PushReq             // 1
    50  	PropagateReq        // 2
    51  	HistoryReq          // 3
    52  	BacklogReq          // 4
    53  )
    54  
    55  // json serialisable struct to record the syncronisation state between 2 peers
    56  type syncState struct {
    57  	*storage.DbSyncState // embeds the following 4 fields:
    58  	// Start      Key    // lower limit of address space
    59  	// Stop       Key    // upper limit of address space
    60  	// First      uint64 // counter taken from last sync state
    61  	// Last       uint64 // counter of remote peer dbStore at the time of last connection
    62  	SessionAt  uint64      // set at the time of connection
    63  	LastSeenAt uint64      // set at the time of connection
    64  	Latest     storage.Key // cursor of dbstore when last (continuously set by syncer)
    65  	Synced     bool        // true iff Sync is done up to the last disconnect
    66  	synced     chan bool   // signal that sync stage finished
    67  }
    68  
    69  // wrapper of db-s to provide mockable custom local chunk store access to syncer
    70  type DbAccess struct {
    71  	db  *storage.DbStore
    72  	loc *storage.LocalStore
    73  }
    74  
    75  func NewDbAccess(loc *storage.LocalStore) *DbAccess {
    76  	return &DbAccess{loc.DbStore.(*storage.DbStore), loc}
    77  }
    78  
    79  // to obtain the chunks from key or request db entry only
    80  func (self *DbAccess) get(key storage.Key) (*storage.Chunk, error) {
    81  	return self.loc.Get(key)
    82  }
    83  
    84  // current storage counter of chunk db
    85  func (self *DbAccess) counter() uint64 {
    86  	return self.db.Counter()
    87  }
    88  
    89  // implemented by dbStoreSyncIterator
    90  type keyIterator interface {
    91  	Next() storage.Key
    92  }
    93  
    94  // generator function for iteration by address range and storage counter
    95  func (self *DbAccess) iterator(s *syncState) keyIterator {
    96  	it, err := self.db.NewSyncIterator(*(s.DbSyncState))
    97  	if err != nil {
    98  		return nil
    99  	}
   100  	return keyIterator(it)
   101  }
   102  
   103  func (self syncState) String() string {
   104  	if self.Synced {
   105  		return fmt.Sprintf(
   106  			"session started at: %v, last seen at: %v, latest key: %v",
   107  			self.SessionAt, self.LastSeenAt,
   108  			self.Latest.Log(),
   109  		)
   110  	} else {
   111  		return fmt.Sprintf(
   112  			"address: %v-%v, index: %v-%v, session started at: %v, last seen at: %v, latest key: %v",
   113  			self.Start.Log(), self.Stop.Log(),
   114  			self.First, self.Last,
   115  			self.SessionAt, self.LastSeenAt,
   116  			self.Latest.Log(),
   117  		)
   118  	}
   119  }
   120  
   121  // syncer parameters (global, not peer specific)
   122  type SyncParams struct {
   123  	RequestDbPath      string // path for request db (leveldb)
   124  	RequestDbBatchSize uint   // nuber of items before batch is saved to requestdb
   125  	KeyBufferSize      uint   // size of key buffer
   126  	SyncBatchSize      uint   // maximum batchsize for outgoing requests
   127  	SyncBufferSize     uint   // size of buffer for
   128  	SyncCacheSize      uint   // cache capacity to store request queue in memory
   129  	SyncPriorities     []uint // list of priority levels for req types 0-3
   130  	SyncModes          []bool // list of sync modes for  for req types 0-3
   131  }
   132  
   133  // constructor with default values
   134  func NewSyncParams(bzzdir string) *SyncParams {
   135  	return &SyncParams{
   136  		RequestDbPath:      filepath.Join(bzzdir, "requests"),
   137  		RequestDbBatchSize: requestDbBatchSize,
   138  		KeyBufferSize:      keyBufferSize,
   139  		SyncBufferSize:     syncBufferSize,
   140  		SyncBatchSize:      syncBatchSize,
   141  		SyncCacheSize:      syncCacheSize,
   142  		SyncPriorities:     []uint{High, Medium, Medium, Low, Low},
   143  		SyncModes:          []bool{true, true, true, true, false},
   144  	}
   145  }
   146  
   147  // syncer is the agent that manages content distribution/storage replication/chunk storeRequest forwarding
   148  type syncer struct {
   149  	*SyncParams                     // sync parameters
   150  	syncF           func() bool     // if syncing is needed
   151  	key             storage.Key     // remote peers address key
   152  	state           *syncState      // sync state for our dbStore
   153  	syncStates      chan *syncState // different stages of sync
   154  	deliveryRequest chan bool       // one of two triggers needed to send unsyncedKeys
   155  	newUnsyncedKeys chan bool       // one of two triggers needed to send unsynced keys
   156  	quit            chan bool       // signal to quit loops
   157  
   158  	// DB related fields
   159  	dbAccess *DbAccess // access to dbStore
   160  
   161  	// native fields
   162  	queues     [priorities]*syncDb                   // in-memory cache / queues for sync reqs
   163  	keys       [priorities]chan interface{}          // buffer for unsynced keys
   164  	deliveries [priorities]chan *storeRequestMsgData // delivery
   165  
   166  	// bzz protocol instance outgoing message callbacks (mockable for testing)
   167  	unsyncedKeys func([]*syncRequest, *syncState) error // send unsyncedKeysMsg
   168  	store        func(*storeRequestMsgData) error       // send storeRequestMsg
   169  }
   170  
   171  // a syncer instance is linked to each peer connection
   172  // constructor is called from protocol after successful handshake
   173  // the returned instance is attached to the peer and can be called
   174  // by the forwarder
   175  func newSyncer(
   176  	db *storage.LDBDatabase, remotekey storage.Key,
   177  	dbAccess *DbAccess,
   178  	unsyncedKeys func([]*syncRequest, *syncState) error,
   179  	store func(*storeRequestMsgData) error,
   180  	params *SyncParams,
   181  	state *syncState,
   182  	syncF func() bool,
   183  ) (*syncer, error) {
   184  
   185  	syncBufferSize := params.SyncBufferSize
   186  	keyBufferSize := params.KeyBufferSize
   187  	dbBatchSize := params.RequestDbBatchSize
   188  
   189  	self := &syncer{
   190  		syncF:           syncF,
   191  		key:             remotekey,
   192  		dbAccess:        dbAccess,
   193  		syncStates:      make(chan *syncState, 20),
   194  		deliveryRequest: make(chan bool, 1),
   195  		newUnsyncedKeys: make(chan bool, 1),
   196  		SyncParams:      params,
   197  		state:           state,
   198  		quit:            make(chan bool),
   199  		unsyncedKeys:    unsyncedKeys,
   200  		store:           store,
   201  	}
   202  
   203  	// initialising
   204  	for i := 0; i < priorities; i++ {
   205  		self.keys[i] = make(chan interface{}, keyBufferSize)
   206  		self.deliveries[i] = make(chan *storeRequestMsgData)
   207  		// initialise a syncdb instance for each priority queue
   208  		self.queues[i] = newSyncDb(db, remotekey, uint(i), syncBufferSize, dbBatchSize, self.deliver(uint(i)))
   209  	}
   210  	log.Info(fmt.Sprintf("syncer started: %v", state))
   211  	// launch chunk delivery service
   212  	go self.syncDeliveries()
   213  	// launch sync task manager
   214  	if self.syncF() {
   215  		go self.sync()
   216  	}
   217  	// process unsynced keys to broadcast
   218  	go self.syncUnsyncedKeys()
   219  
   220  	return self, nil
   221  }
   222  
   223  // metadata serialisation
   224  func encodeSync(state *syncState) (*json.RawMessage, error) {
   225  	data, err := json.MarshalIndent(state, "", " ")
   226  	if err != nil {
   227  		return nil, err
   228  	}
   229  	meta := json.RawMessage(data)
   230  	return &meta, nil
   231  }
   232  
   233  func decodeSync(meta *json.RawMessage) (*syncState, error) {
   234  	if meta == nil {
   235  		return nil, fmt.Errorf("unable to deserialise sync state from <nil>")
   236  	}
   237  	data := []byte(*(meta))
   238  	if len(data) == 0 {
   239  		return nil, fmt.Errorf("unable to deserialise sync state from <nil>")
   240  	}
   241  	state := &syncState{DbSyncState: &storage.DbSyncState{}}
   242  	err := json.Unmarshal(data, state)
   243  	return state, err
   244  }
   245  
   246  /*
   247   sync implements the syncing script
   248   * first all items left in the request Db are replayed
   249     * type = StaleSync
   250     * Mode: by default once again via confirmation roundtrip
   251     * Priority: the items are replayed as the proirity specified for StaleSync
   252     * but within the order respects earlier priority level of request
   253   * after all items are consumed for a priority level, the the respective
   254    queue for delivery requests is open (this way new reqs not written to db)
   255    (TODO: this should be checked)
   256   * the sync state provided by the remote peer is used to sync history
   257     * all the backlog from earlier (aborted) syncing is completed starting from latest
   258     * if Last  < LastSeenAt then all items in between then process all
   259       backlog from upto last disconnect
   260     * if Last > 0 &&
   261  
   262   sync is called from the syncer constructor and is not supposed to be used externally
   263  */
   264  func (self *syncer) sync() {
   265  	state := self.state
   266  	// sync finished
   267  	defer close(self.syncStates)
   268  
   269  	// 0. first replay stale requests from request db
   270  	if state.SessionAt == 0 {
   271  		log.Debug(fmt.Sprintf("syncer[%v]: nothing to sync", self.key.Log()))
   272  		return
   273  	}
   274  	log.Debug(fmt.Sprintf("syncer[%v]: start replaying stale requests from request db", self.key.Log()))
   275  	for p := priorities - 1; p >= 0; p-- {
   276  		self.queues[p].dbRead(false, 0, self.replay())
   277  	}
   278  	log.Debug(fmt.Sprintf("syncer[%v]: done replaying stale requests from request db", self.key.Log()))
   279  
   280  	// unless peer is synced sync unfinished history beginning on
   281  	if !state.Synced {
   282  		start := state.Start
   283  
   284  		if !storage.IsZeroKey(state.Latest) {
   285  			// 1. there is unfinished earlier sync
   286  			state.Start = state.Latest
   287  			log.Debug(fmt.Sprintf("syncer[%v]: start syncronising backlog (unfinished sync: %v)", self.key.Log(), state))
   288  			// blocks while the entire history upto state is synced
   289  			self.syncState(state)
   290  			if state.Last < state.SessionAt {
   291  				state.First = state.Last + 1
   292  			}
   293  		}
   294  		state.Latest = storage.ZeroKey
   295  		state.Start = start
   296  		// 2. sync up to last disconnect1
   297  		if state.First < state.LastSeenAt {
   298  			state.Last = state.LastSeenAt
   299  			log.Debug(fmt.Sprintf("syncer[%v]: start syncronising history upto last disconnect at %v: %v", self.key.Log(), state.LastSeenAt, state))
   300  			self.syncState(state)
   301  			state.First = state.LastSeenAt
   302  		}
   303  		state.Latest = storage.ZeroKey
   304  
   305  	} else {
   306  		// synchronisation starts at end of last session
   307  		state.First = state.LastSeenAt
   308  	}
   309  
   310  	// 3. sync up to current session start
   311  	// if there have been new chunks since last session
   312  	if state.LastSeenAt < state.SessionAt {
   313  		state.Last = state.SessionAt
   314  		log.Debug(fmt.Sprintf("syncer[%v]: start syncronising history since last disconnect at %v up until session start at %v: %v", self.key.Log(), state.LastSeenAt, state.SessionAt, state))
   315  		// blocks until state syncing is finished
   316  		self.syncState(state)
   317  	}
   318  	log.Info(fmt.Sprintf("syncer[%v]: syncing all history complete", self.key.Log()))
   319  
   320  }
   321  
   322  // wait till syncronised block uptil state is synced
   323  func (self *syncer) syncState(state *syncState) {
   324  	self.syncStates <- state
   325  	select {
   326  	case <-state.synced:
   327  	case <-self.quit:
   328  	}
   329  }
   330  
   331  // stop quits both request processor and saves the request cache to disk
   332  func (self *syncer) stop() {
   333  	close(self.quit)
   334  	log.Trace(fmt.Sprintf("syncer[%v]: stop and save sync request db backlog", self.key.Log()))
   335  	for _, db := range self.queues {
   336  		db.stop()
   337  	}
   338  }
   339  
   340  // rlp serialisable sync request
   341  type syncRequest struct {
   342  	Key      storage.Key
   343  	Priority uint
   344  }
   345  
   346  func (self *syncRequest) String() string {
   347  	return fmt.Sprintf("<Key: %v, Priority: %v>", self.Key.Log(), self.Priority)
   348  }
   349  
   350  func (self *syncer) newSyncRequest(req interface{}, p int) (*syncRequest, error) {
   351  	key, _, _, _, err := parseRequest(req)
   352  	// TODO: if req has chunk, it should be put in a cache
   353  	// create
   354  	if err != nil {
   355  		return nil, err
   356  	}
   357  	return &syncRequest{key, uint(p)}, nil
   358  }
   359  
   360  // serves historical items from the DB
   361  // * read is on demand, blocking unless history channel is read
   362  // * accepts sync requests (syncStates) to create new db iterator
   363  // * closes the channel one iteration finishes
   364  func (self *syncer) syncHistory(state *syncState) chan interface{} {
   365  	var n uint
   366  	history := make(chan interface{})
   367  	log.Debug(fmt.Sprintf("syncer[%v]: syncing history between %v - %v for chunk addresses %v - %v", self.key.Log(), state.First, state.Last, state.Start, state.Stop))
   368  	it := self.dbAccess.iterator(state)
   369  	if it != nil {
   370  		go func() {
   371  			// signal end of the iteration ended
   372  			defer close(history)
   373  		IT:
   374  			for {
   375  				key := it.Next()
   376  				if key == nil {
   377  					break IT
   378  				}
   379  				select {
   380  				// blocking until history channel is read from
   381  				case history <- key:
   382  					n++
   383  					log.Trace(fmt.Sprintf("syncer[%v]: history: %v (%v keys)", self.key.Log(), key.Log(), n))
   384  					state.Latest = key
   385  				case <-self.quit:
   386  					return
   387  				}
   388  			}
   389  			log.Debug(fmt.Sprintf("syncer[%v]: finished syncing history between %v - %v for chunk addresses %v - %v (at %v) (chunks = %v)", self.key.Log(), state.First, state.Last, state.Start, state.Stop, state.Latest, n))
   390  		}()
   391  	}
   392  	return history
   393  }
   394  
   395  // triggers key syncronisation
   396  func (self *syncer) sendUnsyncedKeys() {
   397  	select {
   398  	case self.deliveryRequest <- true:
   399  	default:
   400  	}
   401  }
   402  
   403  // assembles a new batch of unsynced keys
   404  // * keys are drawn from the key buffers in order of priority queue
   405  // * if the queues of priority for History (HistoryReq) or higher are depleted,
   406  //   historical data is used so historical items are lower priority within
   407  //   their priority group.
   408  // * Order of historical data is unspecified
   409  func (self *syncer) syncUnsyncedKeys() {
   410  	// send out new
   411  	var unsynced []*syncRequest
   412  	var more, justSynced bool
   413  	var keyCount, historyCnt int
   414  	var history chan interface{}
   415  
   416  	priority := High
   417  	keys := self.keys[priority]
   418  	var newUnsyncedKeys, deliveryRequest chan bool
   419  	keyCounts := make([]int, priorities)
   420  	histPrior := self.SyncPriorities[HistoryReq]
   421  	syncStates := self.syncStates
   422  	state := self.state
   423  
   424  LOOP:
   425  	for {
   426  
   427  		var req interface{}
   428  		// select the highest priority channel to read from
   429  		// keys channels are buffered so the highest priority ones
   430  		// are checked first - integrity can only be guaranteed if writing
   431  		// is locked while selecting
   432  		if priority != High || len(keys) == 0 {
   433  			// selection is not needed if the High priority queue has items
   434  			keys = nil
   435  		PRIORITIES:
   436  			for priority = High; priority >= 0; priority-- {
   437  				// the first priority channel that is non-empty will be assigned to keys
   438  				if len(self.keys[priority]) > 0 {
   439  					log.Trace(fmt.Sprintf("syncer[%v]: reading request with	priority %v", self.key.Log(), priority))
   440  					keys = self.keys[priority]
   441  					break PRIORITIES
   442  				}
   443  				log.Trace(fmt.Sprintf("syncer[%v/%v]: queue: [%v, %v, %v]", self.key.Log(), priority, len(self.keys[High]), len(self.keys[Medium]), len(self.keys[Low])))
   444  				// if the input queue is empty on this level, resort to history if there is any
   445  				if uint(priority) == histPrior && history != nil {
   446  					log.Trace(fmt.Sprintf("syncer[%v]: reading history for %v", self.key.Log(), self.key))
   447  					keys = history
   448  					break PRIORITIES
   449  				}
   450  			}
   451  		}
   452  
   453  		// if peer ready to receive but nothing to send
   454  		if keys == nil && deliveryRequest == nil {
   455  			// if no items left and switch to waiting mode
   456  			log.Trace(fmt.Sprintf("syncer[%v]: buffers consumed. Waiting", self.key.Log()))
   457  			newUnsyncedKeys = self.newUnsyncedKeys
   458  		}
   459  
   460  		// send msg iff
   461  		// * peer is ready to receive keys AND (
   462  		// * all queues and history are depleted OR
   463  		// * batch full OR
   464  		// * all history have been consumed, synced)
   465  		if deliveryRequest == nil &&
   466  			(justSynced ||
   467  				len(unsynced) > 0 && keys == nil ||
   468  				len(unsynced) == int(self.SyncBatchSize)) {
   469  			justSynced = false
   470  			// listen to requests
   471  			deliveryRequest = self.deliveryRequest
   472  			newUnsyncedKeys = nil // not care about data until next req comes in
   473  			// set sync to current counter
   474  			// (all nonhistorical outgoing traffic sheduled and persisted
   475  			state.LastSeenAt = self.dbAccess.counter()
   476  			state.Latest = storage.ZeroKey
   477  			log.Trace(fmt.Sprintf("syncer[%v]: sending %v", self.key.Log(), unsynced))
   478  			//  send the unsynced keys
   479  			stateCopy := *state
   480  			err := self.unsyncedKeys(unsynced, &stateCopy)
   481  			if err != nil {
   482  				log.Warn(fmt.Sprintf("syncer[%v]: unable to send unsynced keys: %v", self.key.Log(), err))
   483  			}
   484  			self.state = state
   485  			log.Debug(fmt.Sprintf("syncer[%v]: --> %v keys sent: (total: %v (%v), history: %v), sent sync state: %v", self.key.Log(), len(unsynced), keyCounts, keyCount, historyCnt, stateCopy))
   486  			unsynced = nil
   487  			keys = nil
   488  		}
   489  
   490  		// process item and add it to the batch
   491  		select {
   492  		case <-self.quit:
   493  			break LOOP
   494  		case req, more = <-keys:
   495  			if keys == history && !more {
   496  				log.Trace(fmt.Sprintf("syncer[%v]: syncing history segment complete", self.key.Log()))
   497  				// history channel is closed, waiting for new state (called from sync())
   498  				syncStates = self.syncStates
   499  				state.Synced = true // this signals that the  current segment is complete
   500  				select {
   501  				case state.synced <- false:
   502  				case <-self.quit:
   503  					break LOOP
   504  				}
   505  				justSynced = true
   506  				history = nil
   507  			}
   508  		case <-deliveryRequest:
   509  			log.Trace(fmt.Sprintf("syncer[%v]: peer ready to receive", self.key.Log()))
   510  
   511  			// this 1 cap channel can wake up the loop
   512  			// signaling that peer is ready to receive unsynced Keys
   513  			// the channel is set to nil any further writes will be ignored
   514  			deliveryRequest = nil
   515  
   516  		case <-newUnsyncedKeys:
   517  			log.Trace(fmt.Sprintf("syncer[%v]: new unsynced keys available", self.key.Log()))
   518  			// this 1 cap channel can wake up the loop
   519  			// signals that data is available to send if peer is ready to receive
   520  			newUnsyncedKeys = nil
   521  			keys = self.keys[High]
   522  
   523  		case state, more = <-syncStates:
   524  			// this resets the state
   525  			if !more {
   526  				state = self.state
   527  				log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) syncing complete upto %v)", self.key.Log(), priority, state))
   528  				state.Synced = true
   529  				syncStates = nil
   530  			} else {
   531  				log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) syncing history upto %v priority %v)", self.key.Log(), priority, state, histPrior))
   532  				state.Synced = false
   533  				history = self.syncHistory(state)
   534  				// only one history at a time, only allow another one once the
   535  				// history channel is closed
   536  				syncStates = nil
   537  			}
   538  		}
   539  		if req == nil {
   540  			continue LOOP
   541  		}
   542  
   543  		log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) added to unsynced keys: %v", self.key.Log(), priority, req))
   544  		keyCounts[priority]++
   545  		keyCount++
   546  		if keys == history {
   547  			log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) history item %v (synced = %v)", self.key.Log(), priority, req, state.Synced))
   548  			historyCnt++
   549  		}
   550  		if sreq, err := self.newSyncRequest(req, priority); err == nil {
   551  			// extract key from req
   552  			log.Trace(fmt.Sprintf("syncer[%v]: (priority %v): request %v (synced = %v)", self.key.Log(), priority, req, state.Synced))
   553  			unsynced = append(unsynced, sreq)
   554  		} else {
   555  			log.Warn(fmt.Sprintf("syncer[%v]: (priority %v): error creating request for %v: %v)", self.key.Log(), priority, req, err))
   556  		}
   557  
   558  	}
   559  }
   560  
   561  // delivery loop
   562  // takes into account priority, send store Requests with chunk (delivery)
   563  // idle blocking if no new deliveries in any of the queues
   564  func (self *syncer) syncDeliveries() {
   565  	var req *storeRequestMsgData
   566  	p := High
   567  	var deliveries chan *storeRequestMsgData
   568  	var msg *storeRequestMsgData
   569  	var err error
   570  	var c = [priorities]int{}
   571  	var n = [priorities]int{}
   572  	var total, success uint
   573  
   574  	for {
   575  		deliveries = self.deliveries[p]
   576  		select {
   577  		case req = <-deliveries:
   578  			n[p]++
   579  			c[p]++
   580  		default:
   581  			if p == Low {
   582  				// blocking, depletion on all channels, no preference for priority
   583  				select {
   584  				case req = <-self.deliveries[High]:
   585  					n[High]++
   586  				case req = <-self.deliveries[Medium]:
   587  					n[Medium]++
   588  				case req = <-self.deliveries[Low]:
   589  					n[Low]++
   590  				case <-self.quit:
   591  					return
   592  				}
   593  				p = High
   594  			} else {
   595  				p--
   596  				continue
   597  			}
   598  		}
   599  		total++
   600  		msg, err = self.newStoreRequestMsgData(req)
   601  		if err != nil {
   602  			log.Warn(fmt.Sprintf("syncer[%v]: failed to create store request for %v: %v", self.key.Log(), req, err))
   603  		} else {
   604  			err = self.store(msg)
   605  			if err != nil {
   606  				log.Warn(fmt.Sprintf("syncer[%v]: failed to deliver %v: %v", self.key.Log(), req, err))
   607  			} else {
   608  				success++
   609  				log.Trace(fmt.Sprintf("syncer[%v]: %v successfully delivered", self.key.Log(), req))
   610  			}
   611  		}
   612  		if total%self.SyncBatchSize == 0 {
   613  			log.Debug(fmt.Sprintf("syncer[%v]: deliver Total: %v, Success: %v, High: %v/%v, Medium: %v/%v, Low %v/%v", self.key.Log(), total, success, c[High], n[High], c[Medium], n[Medium], c[Low], n[Low]))
   614  		}
   615  	}
   616  }
   617  
   618  /*
   619   addRequest handles requests for delivery
   620   it accepts 4 types:
   621  
   622   * storeRequestMsgData: coming from netstore propagate response
   623   * chunk: coming from forwarding (questionable: id?)
   624   * key: from incoming syncRequest
   625   * syncDbEntry: key,id encoded in db
   626  
   627   If sync mode is on for the type of request, then
   628   it sends the request to the keys queue of the correct priority
   629   channel buffered with capacity (SyncBufferSize)
   630  
   631   If sync mode is off then, requests are directly sent to deliveries
   632  */
   633  func (self *syncer) addRequest(req interface{}, ty int) {
   634  	// retrieve priority for request type name int8
   635  
   636  	priority := self.SyncPriorities[ty]
   637  	// sync mode for this type ON
   638  	if self.syncF() || ty == DeliverReq {
   639  		if self.SyncModes[ty] {
   640  			self.addKey(req, priority, self.quit)
   641  		} else {
   642  			self.addDelivery(req, priority, self.quit)
   643  		}
   644  	}
   645  }
   646  
   647  // addKey queues sync request for sync confirmation with given priority
   648  // ie the key will go out in an unsyncedKeys message
   649  func (self *syncer) addKey(req interface{}, priority uint, quit chan bool) bool {
   650  	select {
   651  	case self.keys[priority] <- req:
   652  		// this wakes up the unsynced keys loop if idle
   653  		select {
   654  		case self.newUnsyncedKeys <- true:
   655  		default:
   656  		}
   657  		return true
   658  	case <-quit:
   659  		return false
   660  	}
   661  }
   662  
   663  // addDelivery queues delivery request for with given priority
   664  // ie the chunk will be delivered ASAP mod priority queueing handled by syncdb
   665  // requests are persisted across sessions for correct sync
   666  func (self *syncer) addDelivery(req interface{}, priority uint, quit chan bool) bool {
   667  	select {
   668  	case self.queues[priority].buffer <- req:
   669  		return true
   670  	case <-quit:
   671  		return false
   672  	}
   673  }
   674  
   675  // doDelivery delivers the chunk for the request with given priority
   676  // without queuing
   677  func (self *syncer) doDelivery(req interface{}, priority uint, quit chan bool) bool {
   678  	msgdata, err := self.newStoreRequestMsgData(req)
   679  	if err != nil {
   680  		log.Warn(fmt.Sprintf("unable to deliver request %v: %v", msgdata, err))
   681  		return false
   682  	}
   683  	select {
   684  	case self.deliveries[priority] <- msgdata:
   685  		return true
   686  	case <-quit:
   687  		return false
   688  	}
   689  }
   690  
   691  // returns the delivery function for given priority
   692  // passed on to syncDb
   693  func (self *syncer) deliver(priority uint) func(req interface{}, quit chan bool) bool {
   694  	return func(req interface{}, quit chan bool) bool {
   695  		return self.doDelivery(req, priority, quit)
   696  	}
   697  }
   698  
   699  // returns the replay function passed on to syncDb
   700  // depending on sync mode settings for BacklogReq,
   701  // re	play of request db backlog sends items via confirmation
   702  // or directly delivers
   703  func (self *syncer) replay() func(req interface{}, quit chan bool) bool {
   704  	sync := self.SyncModes[BacklogReq]
   705  	priority := self.SyncPriorities[BacklogReq]
   706  	// sync mode for this type ON
   707  	if sync {
   708  		return func(req interface{}, quit chan bool) bool {
   709  			return self.addKey(req, priority, quit)
   710  		}
   711  	} else {
   712  		return func(req interface{}, quit chan bool) bool {
   713  			return self.doDelivery(req, priority, quit)
   714  		}
   715  
   716  	}
   717  }
   718  
   719  // given a request, extends it to a full storeRequestMsgData
   720  // polimorphic: see addRequest for the types accepted
   721  func (self *syncer) newStoreRequestMsgData(req interface{}) (*storeRequestMsgData, error) {
   722  
   723  	key, id, chunk, sreq, err := parseRequest(req)
   724  	if err != nil {
   725  		return nil, err
   726  	}
   727  
   728  	if sreq == nil {
   729  		if chunk == nil {
   730  			var err error
   731  			chunk, err = self.dbAccess.get(key)
   732  			if err != nil {
   733  				return nil, err
   734  			}
   735  		}
   736  
   737  		sreq = &storeRequestMsgData{
   738  			Id:    id,
   739  			Key:   chunk.Key,
   740  			SData: chunk.SData,
   741  		}
   742  	}
   743  
   744  	return sreq, nil
   745  }
   746  
   747  // parse request types and extracts, key, id, chunk, request if available
   748  // does not do chunk lookup !
   749  func parseRequest(req interface{}) (storage.Key, uint64, *storage.Chunk, *storeRequestMsgData, error) {
   750  	var key storage.Key
   751  	var entry *syncDbEntry
   752  	var chunk *storage.Chunk
   753  	var id uint64
   754  	var ok bool
   755  	var sreq *storeRequestMsgData
   756  	var err error
   757  
   758  	if key, ok = req.(storage.Key); ok {
   759  		id = generateId()
   760  
   761  	} else if entry, ok = req.(*syncDbEntry); ok {
   762  		id = binary.BigEndian.Uint64(entry.val[32:])
   763  		key = storage.Key(entry.val[:32])
   764  
   765  	} else if chunk, ok = req.(*storage.Chunk); ok {
   766  		key = chunk.Key
   767  		id = generateId()
   768  
   769  	} else if sreq, ok = req.(*storeRequestMsgData); ok {
   770  		key = sreq.Key
   771  	} else {
   772  		err = fmt.Errorf("type not allowed: %v (%T)", req, req)
   773  	}
   774  
   775  	return key, id, chunk, sreq, err
   776  }