github.com/alanchchen/go-ethereum@v1.6.6-0.20170601190819-6171d01b1195/swarm/network/syncer.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package network
    18  
    19  import (
    20  	"encoding/binary"
    21  	"encoding/json"
    22  	"fmt"
    23  	"path/filepath"
    24  
    25  	"github.com/ethereum/go-ethereum/log"
    26  	"github.com/ethereum/go-ethereum/swarm/storage"
    27  )
    28  
    29  // syncer parameters (global, not peer specific) default values
    30  const (
    31  	requestDbBatchSize = 512  // size of batch before written to request db
    32  	keyBufferSize      = 1024 // size of buffer  for unsynced keys
    33  	syncBatchSize      = 128  // maximum batchsize for outgoing requests
    34  	syncBufferSize     = 128  // size of buffer  for delivery requests
    35  	syncCacheSize      = 1024 // cache capacity to store request queue in memory
    36  )
    37  
    38  // priorities
    39  const (
    40  	Low        = iota // 0
    41  	Medium            // 1
    42  	High              // 2
    43  	priorities        // 3 number of priority levels
    44  )
    45  
    46  // request types
    47  const (
    48  	DeliverReq   = iota // 0
    49  	PushReq             // 1
    50  	PropagateReq        // 2
    51  	HistoryReq          // 3
    52  	BacklogReq          // 4
    53  )
    54  
    55  // json serialisable struct to record the syncronisation state between 2 peers
    56  type syncState struct {
    57  	*storage.DbSyncState // embeds the following 4 fields:
    58  	// Start      Key    // lower limit of address space
    59  	// Stop       Key    // upper limit of address space
    60  	// First      uint64 // counter taken from last sync state
    61  	// Last       uint64 // counter of remote peer dbStore at the time of last connection
    62  	SessionAt  uint64      // set at the time of connection
    63  	LastSeenAt uint64      // set at the time of connection
    64  	Latest     storage.Key // cursor of dbstore when last (continuously set by syncer)
    65  	Synced     bool        // true iff Sync is done up to the last disconnect
    66  	synced     chan bool   // signal that sync stage finished
    67  }
    68  
    69  // wrapper of db-s to provide mockable custom local chunk store access to syncer
    70  type DbAccess struct {
    71  	db  *storage.DbStore
    72  	loc *storage.LocalStore
    73  }
    74  
    75  func NewDbAccess(loc *storage.LocalStore) *DbAccess {
    76  	return &DbAccess{loc.DbStore.(*storage.DbStore), loc}
    77  }
    78  
    79  // to obtain the chunks from key or request db entry only
    80  func (self *DbAccess) get(key storage.Key) (*storage.Chunk, error) {
    81  	return self.loc.Get(key)
    82  }
    83  
    84  // current storage counter of chunk db
    85  func (self *DbAccess) counter() uint64 {
    86  	return self.db.Counter()
    87  }
    88  
    89  // implemented by dbStoreSyncIterator
    90  type keyIterator interface {
    91  	Next() storage.Key
    92  }
    93  
    94  // generator function for iteration by address range and storage counter
    95  func (self *DbAccess) iterator(s *syncState) keyIterator {
    96  	it, err := self.db.NewSyncIterator(*(s.DbSyncState))
    97  	if err != nil {
    98  		return nil
    99  	}
   100  	return keyIterator(it)
   101  }
   102  
   103  func (self syncState) String() string {
   104  	if self.Synced {
   105  		return fmt.Sprintf(
   106  			"session started at: %v, last seen at: %v, latest key: %v",
   107  			self.SessionAt, self.LastSeenAt,
   108  			self.Latest.Log(),
   109  		)
   110  	} else {
   111  		return fmt.Sprintf(
   112  			"address: %v-%v, index: %v-%v, session started at: %v, last seen at: %v, latest key: %v",
   113  			self.Start.Log(), self.Stop.Log(),
   114  			self.First, self.Last,
   115  			self.SessionAt, self.LastSeenAt,
   116  			self.Latest.Log(),
   117  		)
   118  	}
   119  }
   120  
   121  // syncer parameters (global, not peer specific)
   122  type SyncParams struct {
   123  	RequestDbPath      string // path for request db (leveldb)
   124  	RequestDbBatchSize uint   // nuber of items before batch is saved to requestdb
   125  	KeyBufferSize      uint   // size of key buffer
   126  	SyncBatchSize      uint   // maximum batchsize for outgoing requests
   127  	SyncBufferSize     uint   // size of buffer for
   128  	SyncCacheSize      uint   // cache capacity to store request queue in memory
   129  	SyncPriorities     []uint // list of priority levels for req types 0-3
   130  	SyncModes          []bool // list of sync modes for  for req types 0-3
   131  }
   132  
   133  // constructor with default values
   134  func NewSyncParams(bzzdir string) *SyncParams {
   135  	return &SyncParams{
   136  		RequestDbPath:      filepath.Join(bzzdir, "requests"),
   137  		RequestDbBatchSize: requestDbBatchSize,
   138  		KeyBufferSize:      keyBufferSize,
   139  		SyncBufferSize:     syncBufferSize,
   140  		SyncBatchSize:      syncBatchSize,
   141  		SyncCacheSize:      syncCacheSize,
   142  		SyncPriorities:     []uint{High, Medium, Medium, Low, Low},
   143  		SyncModes:          []bool{true, true, true, true, false},
   144  	}
   145  }
   146  
   147  // syncer is the agent that manages content distribution/storage replication/chunk storeRequest forwarding
   148  type syncer struct {
   149  	*SyncParams                     // sync parameters
   150  	syncF           func() bool     // if syncing is needed
   151  	key             storage.Key     // remote peers address key
   152  	state           *syncState      // sync state for our dbStore
   153  	syncStates      chan *syncState // different stages of sync
   154  	deliveryRequest chan bool       // one of two triggers needed to send unsyncedKeys
   155  	newUnsyncedKeys chan bool       // one of two triggers needed to send unsynced keys
   156  	quit            chan bool       // signal to quit loops
   157  
   158  	// DB related fields
   159  	dbAccess *DbAccess            // access to dbStore
   160  	db       *storage.LDBDatabase // delivery msg db
   161  
   162  	// native fields
   163  	queues     [priorities]*syncDb                   // in-memory cache / queues for sync reqs
   164  	keys       [priorities]chan interface{}          // buffer for unsynced keys
   165  	deliveries [priorities]chan *storeRequestMsgData // delivery
   166  
   167  	// bzz protocol instance outgoing message callbacks (mockable for testing)
   168  	unsyncedKeys func([]*syncRequest, *syncState) error // send unsyncedKeysMsg
   169  	store        func(*storeRequestMsgData) error       // send storeRequestMsg
   170  }
   171  
   172  // a syncer instance is linked to each peer connection
   173  // constructor is called from protocol after successful handshake
   174  // the returned instance is attached to the peer and can be called
   175  // by the forwarder
   176  func newSyncer(
   177  	db *storage.LDBDatabase, remotekey storage.Key,
   178  	dbAccess *DbAccess,
   179  	unsyncedKeys func([]*syncRequest, *syncState) error,
   180  	store func(*storeRequestMsgData) error,
   181  	params *SyncParams,
   182  	state *syncState,
   183  	syncF func() bool,
   184  ) (*syncer, error) {
   185  
   186  	syncBufferSize := params.SyncBufferSize
   187  	keyBufferSize := params.KeyBufferSize
   188  	dbBatchSize := params.RequestDbBatchSize
   189  
   190  	self := &syncer{
   191  		syncF:           syncF,
   192  		key:             remotekey,
   193  		dbAccess:        dbAccess,
   194  		syncStates:      make(chan *syncState, 20),
   195  		deliveryRequest: make(chan bool, 1),
   196  		newUnsyncedKeys: make(chan bool, 1),
   197  		SyncParams:      params,
   198  		state:           state,
   199  		quit:            make(chan bool),
   200  		unsyncedKeys:    unsyncedKeys,
   201  		store:           store,
   202  	}
   203  
   204  	// initialising
   205  	for i := 0; i < priorities; i++ {
   206  		self.keys[i] = make(chan interface{}, keyBufferSize)
   207  		self.deliveries[i] = make(chan *storeRequestMsgData)
   208  		// initialise a syncdb instance for each priority queue
   209  		self.queues[i] = newSyncDb(db, remotekey, uint(i), syncBufferSize, dbBatchSize, self.deliver(uint(i)))
   210  	}
   211  	log.Info(fmt.Sprintf("syncer started: %v", state))
   212  	// launch chunk delivery service
   213  	go self.syncDeliveries()
   214  	// launch sync task manager
   215  	if self.syncF() {
   216  		go self.sync()
   217  	}
   218  	// process unsynced keys to broadcast
   219  	go self.syncUnsyncedKeys()
   220  
   221  	return self, nil
   222  }
   223  
   224  // metadata serialisation
   225  func encodeSync(state *syncState) (*json.RawMessage, error) {
   226  	data, err := json.MarshalIndent(state, "", " ")
   227  	if err != nil {
   228  		return nil, err
   229  	}
   230  	meta := json.RawMessage(data)
   231  	return &meta, nil
   232  }
   233  
   234  func decodeSync(meta *json.RawMessage) (*syncState, error) {
   235  	if meta == nil {
   236  		return nil, fmt.Errorf("unable to deserialise sync state from <nil>")
   237  	}
   238  	data := []byte(*(meta))
   239  	if len(data) == 0 {
   240  		return nil, fmt.Errorf("unable to deserialise sync state from <nil>")
   241  	}
   242  	state := &syncState{DbSyncState: &storage.DbSyncState{}}
   243  	err := json.Unmarshal(data, state)
   244  	return state, err
   245  }
   246  
   247  /*
   248   sync implements the syncing script
   249   * first all items left in the request Db are replayed
   250     * type = StaleSync
   251     * Mode: by default once again via confirmation roundtrip
   252     * Priority: the items are replayed as the proirity specified for StaleSync
   253     * but within the order respects earlier priority level of request
   254   * after all items are consumed for a priority level, the the respective
   255    queue for delivery requests is open (this way new reqs not written to db)
   256    (TODO: this should be checked)
   257   * the sync state provided by the remote peer is used to sync history
   258     * all the backlog from earlier (aborted) syncing is completed starting from latest
   259     * if Last  < LastSeenAt then all items in between then process all
   260       backlog from upto last disconnect
   261     * if Last > 0 &&
   262  
   263   sync is called from the syncer constructor and is not supposed to be used externally
   264  */
   265  func (self *syncer) sync() {
   266  	state := self.state
   267  	// sync finished
   268  	defer close(self.syncStates)
   269  
   270  	// 0. first replay stale requests from request db
   271  	if state.SessionAt == 0 {
   272  		log.Debug(fmt.Sprintf("syncer[%v]: nothing to sync", self.key.Log()))
   273  		return
   274  	}
   275  	log.Debug(fmt.Sprintf("syncer[%v]: start replaying stale requests from request db", self.key.Log()))
   276  	for p := priorities - 1; p >= 0; p-- {
   277  		self.queues[p].dbRead(false, 0, self.replay())
   278  	}
   279  	log.Debug(fmt.Sprintf("syncer[%v]: done replaying stale requests from request db", self.key.Log()))
   280  
   281  	// unless peer is synced sync unfinished history beginning on
   282  	if !state.Synced {
   283  		start := state.Start
   284  
   285  		if !storage.IsZeroKey(state.Latest) {
   286  			// 1. there is unfinished earlier sync
   287  			state.Start = state.Latest
   288  			log.Debug(fmt.Sprintf("syncer[%v]: start syncronising backlog (unfinished sync: %v)", self.key.Log(), state))
   289  			// blocks while the entire history upto state is synced
   290  			self.syncState(state)
   291  			if state.Last < state.SessionAt {
   292  				state.First = state.Last + 1
   293  			}
   294  		}
   295  		state.Latest = storage.ZeroKey
   296  		state.Start = start
   297  		// 2. sync up to last disconnect1
   298  		if state.First < state.LastSeenAt {
   299  			state.Last = state.LastSeenAt
   300  			log.Debug(fmt.Sprintf("syncer[%v]: start syncronising history upto last disconnect at %v: %v", self.key.Log(), state.LastSeenAt, state))
   301  			self.syncState(state)
   302  			state.First = state.LastSeenAt
   303  		}
   304  		state.Latest = storage.ZeroKey
   305  
   306  	} else {
   307  		// synchronisation starts at end of last session
   308  		state.First = state.LastSeenAt
   309  	}
   310  
   311  	// 3. sync up to current session start
   312  	// if there have been new chunks since last session
   313  	if state.LastSeenAt < state.SessionAt {
   314  		state.Last = state.SessionAt
   315  		log.Debug(fmt.Sprintf("syncer[%v]: start syncronising history since last disconnect at %v up until session start at %v: %v", self.key.Log(), state.LastSeenAt, state.SessionAt, state))
   316  		// blocks until state syncing is finished
   317  		self.syncState(state)
   318  	}
   319  	log.Info(fmt.Sprintf("syncer[%v]: syncing all history complete", self.key.Log()))
   320  
   321  }
   322  
   323  // wait till syncronised block uptil state is synced
   324  func (self *syncer) syncState(state *syncState) {
   325  	self.syncStates <- state
   326  	select {
   327  	case <-state.synced:
   328  	case <-self.quit:
   329  	}
   330  }
   331  
   332  // stop quits both request processor and saves the request cache to disk
   333  func (self *syncer) stop() {
   334  	close(self.quit)
   335  	log.Trace(fmt.Sprintf("syncer[%v]: stop and save sync request db backlog", self.key.Log()))
   336  	for _, db := range self.queues {
   337  		db.stop()
   338  	}
   339  }
   340  
   341  // rlp serialisable sync request
   342  type syncRequest struct {
   343  	Key      storage.Key
   344  	Priority uint
   345  }
   346  
   347  func (self *syncRequest) String() string {
   348  	return fmt.Sprintf("<Key: %v, Priority: %v>", self.Key.Log(), self.Priority)
   349  }
   350  
   351  func (self *syncer) newSyncRequest(req interface{}, p int) (*syncRequest, error) {
   352  	key, _, _, _, err := parseRequest(req)
   353  	// TODO: if req has chunk, it should be put in a cache
   354  	// create
   355  	if err != nil {
   356  		return nil, err
   357  	}
   358  	return &syncRequest{key, uint(p)}, nil
   359  }
   360  
   361  // serves historical items from the DB
   362  // * read is on demand, blocking unless history channel is read
   363  // * accepts sync requests (syncStates) to create new db iterator
   364  // * closes the channel one iteration finishes
   365  func (self *syncer) syncHistory(state *syncState) chan interface{} {
   366  	var n uint
   367  	history := make(chan interface{})
   368  	log.Debug(fmt.Sprintf("syncer[%v]: syncing history between %v - %v for chunk addresses %v - %v", self.key.Log(), state.First, state.Last, state.Start, state.Stop))
   369  	it := self.dbAccess.iterator(state)
   370  	if it != nil {
   371  		go func() {
   372  			// signal end of the iteration ended
   373  			defer close(history)
   374  		IT:
   375  			for {
   376  				key := it.Next()
   377  				if key == nil {
   378  					break IT
   379  				}
   380  				select {
   381  				// blocking until history channel is read from
   382  				case history <- storage.Key(key):
   383  					n++
   384  					log.Trace(fmt.Sprintf("syncer[%v]: history: %v (%v keys)", self.key.Log(), key.Log(), n))
   385  					state.Latest = key
   386  				case <-self.quit:
   387  					return
   388  				}
   389  			}
   390  			log.Debug(fmt.Sprintf("syncer[%v]: finished syncing history between %v - %v for chunk addresses %v - %v (at %v) (chunks = %v)", self.key.Log(), state.First, state.Last, state.Start, state.Stop, state.Latest, n))
   391  		}()
   392  	}
   393  	return history
   394  }
   395  
   396  // triggers key syncronisation
   397  func (self *syncer) sendUnsyncedKeys() {
   398  	select {
   399  	case self.deliveryRequest <- true:
   400  	default:
   401  	}
   402  }
   403  
   404  // assembles a new batch of unsynced keys
   405  // * keys are drawn from the key buffers in order of priority queue
   406  // * if the queues of priority for History (HistoryReq) or higher are depleted,
   407  //   historical data is used so historical items are lower priority within
   408  //   their priority group.
   409  // * Order of historical data is unspecified
   410  func (self *syncer) syncUnsyncedKeys() {
   411  	// send out new
   412  	var unsynced []*syncRequest
   413  	var more, justSynced bool
   414  	var keyCount, historyCnt int
   415  	var history chan interface{}
   416  
   417  	priority := High
   418  	keys := self.keys[priority]
   419  	var newUnsyncedKeys, deliveryRequest chan bool
   420  	keyCounts := make([]int, priorities)
   421  	histPrior := self.SyncPriorities[HistoryReq]
   422  	syncStates := self.syncStates
   423  	state := self.state
   424  
   425  LOOP:
   426  	for {
   427  
   428  		var req interface{}
   429  		// select the highest priority channel to read from
   430  		// keys channels are buffered so the highest priority ones
   431  		// are checked first - integrity can only be guaranteed if writing
   432  		// is locked while selecting
   433  		if priority != High || len(keys) == 0 {
   434  			// selection is not needed if the High priority queue has items
   435  			keys = nil
   436  		PRIORITIES:
   437  			for priority = High; priority >= 0; priority-- {
   438  				// the first priority channel that is non-empty will be assigned to keys
   439  				if len(self.keys[priority]) > 0 {
   440  					log.Trace(fmt.Sprintf("syncer[%v]: reading request with	priority %v", self.key.Log(), priority))
   441  					keys = self.keys[priority]
   442  					break PRIORITIES
   443  				}
   444  				log.Trace(fmt.Sprintf("syncer[%v/%v]: queue: [%v, %v, %v]", self.key.Log(), priority, len(self.keys[High]), len(self.keys[Medium]), len(self.keys[Low])))
   445  				// if the input queue is empty on this level, resort to history if there is any
   446  				if uint(priority) == histPrior && history != nil {
   447  					log.Trace(fmt.Sprintf("syncer[%v]: reading history for %v", self.key.Log(), self.key))
   448  					keys = history
   449  					break PRIORITIES
   450  				}
   451  			}
   452  		}
   453  
   454  		// if peer ready to receive but nothing to send
   455  		if keys == nil && deliveryRequest == nil {
   456  			// if no items left and switch to waiting mode
   457  			log.Trace(fmt.Sprintf("syncer[%v]: buffers consumed. Waiting", self.key.Log()))
   458  			newUnsyncedKeys = self.newUnsyncedKeys
   459  		}
   460  
   461  		// send msg iff
   462  		// * peer is ready to receive keys AND (
   463  		// * all queues and history are depleted OR
   464  		// * batch full OR
   465  		// * all history have been consumed, synced)
   466  		if deliveryRequest == nil &&
   467  			(justSynced ||
   468  				len(unsynced) > 0 && keys == nil ||
   469  				len(unsynced) == int(self.SyncBatchSize)) {
   470  			justSynced = false
   471  			// listen to requests
   472  			deliveryRequest = self.deliveryRequest
   473  			newUnsyncedKeys = nil // not care about data until next req comes in
   474  			// set sync to current counter
   475  			// (all nonhistorical outgoing traffic sheduled and persisted
   476  			state.LastSeenAt = self.dbAccess.counter()
   477  			state.Latest = storage.ZeroKey
   478  			log.Trace(fmt.Sprintf("syncer[%v]: sending %v", self.key.Log(), unsynced))
   479  			//  send the unsynced keys
   480  			stateCopy := *state
   481  			err := self.unsyncedKeys(unsynced, &stateCopy)
   482  			if err != nil {
   483  				log.Warn(fmt.Sprintf("syncer[%v]: unable to send unsynced keys: %v", self.key.Log(), err))
   484  			}
   485  			self.state = state
   486  			log.Debug(fmt.Sprintf("syncer[%v]: --> %v keys sent: (total: %v (%v), history: %v), sent sync state: %v", self.key.Log(), len(unsynced), keyCounts, keyCount, historyCnt, stateCopy))
   487  			unsynced = nil
   488  			keys = nil
   489  		}
   490  
   491  		// process item and add it to the batch
   492  		select {
   493  		case <-self.quit:
   494  			break LOOP
   495  		case req, more = <-keys:
   496  			if keys == history && !more {
   497  				log.Trace(fmt.Sprintf("syncer[%v]: syncing history segment complete", self.key.Log()))
   498  				// history channel is closed, waiting for new state (called from sync())
   499  				syncStates = self.syncStates
   500  				state.Synced = true // this signals that the  current segment is complete
   501  				select {
   502  				case state.synced <- false:
   503  				case <-self.quit:
   504  					break LOOP
   505  				}
   506  				justSynced = true
   507  				history = nil
   508  			}
   509  		case <-deliveryRequest:
   510  			log.Trace(fmt.Sprintf("syncer[%v]: peer ready to receive", self.key.Log()))
   511  
   512  			// this 1 cap channel can wake up the loop
   513  			// signaling that peer is ready to receive unsynced Keys
   514  			// the channel is set to nil any further writes will be ignored
   515  			deliveryRequest = nil
   516  
   517  		case <-newUnsyncedKeys:
   518  			log.Trace(fmt.Sprintf("syncer[%v]: new unsynced keys available", self.key.Log()))
   519  			// this 1 cap channel can wake up the loop
   520  			// signals that data is available to send if peer is ready to receive
   521  			newUnsyncedKeys = nil
   522  			keys = self.keys[High]
   523  
   524  		case state, more = <-syncStates:
   525  			// this resets the state
   526  			if !more {
   527  				state = self.state
   528  				log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) syncing complete upto %v)", self.key.Log(), priority, state))
   529  				state.Synced = true
   530  				syncStates = nil
   531  			} else {
   532  				log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) syncing history upto %v priority %v)", self.key.Log(), priority, state, histPrior))
   533  				state.Synced = false
   534  				history = self.syncHistory(state)
   535  				// only one history at a time, only allow another one once the
   536  				// history channel is closed
   537  				syncStates = nil
   538  			}
   539  		}
   540  		if req == nil {
   541  			continue LOOP
   542  		}
   543  
   544  		log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) added to unsynced keys: %v", self.key.Log(), priority, req))
   545  		keyCounts[priority]++
   546  		keyCount++
   547  		if keys == history {
   548  			log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) history item %v (synced = %v)", self.key.Log(), priority, req, state.Synced))
   549  			historyCnt++
   550  		}
   551  		if sreq, err := self.newSyncRequest(req, priority); err == nil {
   552  			// extract key from req
   553  			log.Trace(fmt.Sprintf("syncer[%v]: (priority %v): request %v (synced = %v)", self.key.Log(), priority, req, state.Synced))
   554  			unsynced = append(unsynced, sreq)
   555  		} else {
   556  			log.Warn(fmt.Sprintf("syncer[%v]: (priority %v): error creating request for %v: %v)", self.key.Log(), priority, req, err))
   557  		}
   558  
   559  	}
   560  }
   561  
   562  // delivery loop
   563  // takes into account priority, send store Requests with chunk (delivery)
   564  // idle blocking if no new deliveries in any of the queues
   565  func (self *syncer) syncDeliveries() {
   566  	var req *storeRequestMsgData
   567  	p := High
   568  	var deliveries chan *storeRequestMsgData
   569  	var msg *storeRequestMsgData
   570  	var err error
   571  	var c = [priorities]int{}
   572  	var n = [priorities]int{}
   573  	var total, success uint
   574  
   575  	for {
   576  		deliveries = self.deliveries[p]
   577  		select {
   578  		case req = <-deliveries:
   579  			n[p]++
   580  			c[p]++
   581  		default:
   582  			if p == Low {
   583  				// blocking, depletion on all channels, no preference for priority
   584  				select {
   585  				case req = <-self.deliveries[High]:
   586  					n[High]++
   587  				case req = <-self.deliveries[Medium]:
   588  					n[Medium]++
   589  				case req = <-self.deliveries[Low]:
   590  					n[Low]++
   591  				case <-self.quit:
   592  					return
   593  				}
   594  				p = High
   595  			} else {
   596  				p--
   597  				continue
   598  			}
   599  		}
   600  		total++
   601  		msg, err = self.newStoreRequestMsgData(req)
   602  		if err != nil {
   603  			log.Warn(fmt.Sprintf("syncer[%v]: failed to create store request for %v: %v", self.key.Log(), req, err))
   604  		} else {
   605  			err = self.store(msg)
   606  			if err != nil {
   607  				log.Warn(fmt.Sprintf("syncer[%v]: failed to deliver %v: %v", self.key.Log(), req, err))
   608  			} else {
   609  				success++
   610  				log.Trace(fmt.Sprintf("syncer[%v]: %v successfully delivered", self.key.Log(), req))
   611  			}
   612  		}
   613  		if total%self.SyncBatchSize == 0 {
   614  			log.Debug(fmt.Sprintf("syncer[%v]: deliver Total: %v, Success: %v, High: %v/%v, Medium: %v/%v, Low %v/%v", self.key.Log(), total, success, c[High], n[High], c[Medium], n[Medium], c[Low], n[Low]))
   615  		}
   616  	}
   617  }
   618  
   619  /*
   620   addRequest handles requests for delivery
   621   it accepts 4 types:
   622  
   623   * storeRequestMsgData: coming from netstore propagate response
   624   * chunk: coming from forwarding (questionable: id?)
   625   * key: from incoming syncRequest
   626   * syncDbEntry: key,id encoded in db
   627  
   628   If sync mode is on for the type of request, then
   629   it sends the request to the keys queue of the correct priority
   630   channel buffered with capacity (SyncBufferSize)
   631  
   632   If sync mode is off then, requests are directly sent to deliveries
   633  */
   634  func (self *syncer) addRequest(req interface{}, ty int) {
   635  	// retrieve priority for request type name int8
   636  
   637  	priority := self.SyncPriorities[ty]
   638  	// sync mode for this type ON
   639  	if self.syncF() || ty == DeliverReq {
   640  		if self.SyncModes[ty] {
   641  			self.addKey(req, priority, self.quit)
   642  		} else {
   643  			self.addDelivery(req, priority, self.quit)
   644  		}
   645  	}
   646  }
   647  
   648  // addKey queues sync request for sync confirmation with given priority
   649  // ie the key will go out in an unsyncedKeys message
   650  func (self *syncer) addKey(req interface{}, priority uint, quit chan bool) bool {
   651  	select {
   652  	case self.keys[priority] <- req:
   653  		// this wakes up the unsynced keys loop if idle
   654  		select {
   655  		case self.newUnsyncedKeys <- true:
   656  		default:
   657  		}
   658  		return true
   659  	case <-quit:
   660  		return false
   661  	}
   662  }
   663  
   664  // addDelivery queues delivery request for with given priority
   665  // ie the chunk will be delivered ASAP mod priority queueing handled by syncdb
   666  // requests are persisted across sessions for correct sync
   667  func (self *syncer) addDelivery(req interface{}, priority uint, quit chan bool) bool {
   668  	select {
   669  	case self.queues[priority].buffer <- req:
   670  		return true
   671  	case <-quit:
   672  		return false
   673  	}
   674  }
   675  
   676  // doDelivery delivers the chunk for the request with given priority
   677  // without queuing
   678  func (self *syncer) doDelivery(req interface{}, priority uint, quit chan bool) bool {
   679  	msgdata, err := self.newStoreRequestMsgData(req)
   680  	if err != nil {
   681  		log.Warn(fmt.Sprintf("unable to deliver request %v: %v", msgdata, err))
   682  		return false
   683  	}
   684  	select {
   685  	case self.deliveries[priority] <- msgdata:
   686  		return true
   687  	case <-quit:
   688  		return false
   689  	}
   690  }
   691  
   692  // returns the delivery function for given priority
   693  // passed on to syncDb
   694  func (self *syncer) deliver(priority uint) func(req interface{}, quit chan bool) bool {
   695  	return func(req interface{}, quit chan bool) bool {
   696  		return self.doDelivery(req, priority, quit)
   697  	}
   698  }
   699  
   700  // returns the replay function passed on to syncDb
   701  // depending on sync mode settings for BacklogReq,
   702  // re	play of request db backlog sends items via confirmation
   703  // or directly delivers
   704  func (self *syncer) replay() func(req interface{}, quit chan bool) bool {
   705  	sync := self.SyncModes[BacklogReq]
   706  	priority := self.SyncPriorities[BacklogReq]
   707  	// sync mode for this type ON
   708  	if sync {
   709  		return func(req interface{}, quit chan bool) bool {
   710  			return self.addKey(req, priority, quit)
   711  		}
   712  	} else {
   713  		return func(req interface{}, quit chan bool) bool {
   714  			return self.doDelivery(req, priority, quit)
   715  		}
   716  
   717  	}
   718  }
   719  
   720  // given a request, extends it to a full storeRequestMsgData
   721  // polimorphic: see addRequest for the types accepted
   722  func (self *syncer) newStoreRequestMsgData(req interface{}) (*storeRequestMsgData, error) {
   723  
   724  	key, id, chunk, sreq, err := parseRequest(req)
   725  	if err != nil {
   726  		return nil, err
   727  	}
   728  
   729  	if sreq == nil {
   730  		if chunk == nil {
   731  			var err error
   732  			chunk, err = self.dbAccess.get(key)
   733  			if err != nil {
   734  				return nil, err
   735  			}
   736  		}
   737  
   738  		sreq = &storeRequestMsgData{
   739  			Id:    id,
   740  			Key:   chunk.Key,
   741  			SData: chunk.SData,
   742  		}
   743  	}
   744  
   745  	return sreq, nil
   746  }
   747  
   748  // parse request types and extracts, key, id, chunk, request if available
   749  // does not do chunk lookup !
   750  func parseRequest(req interface{}) (storage.Key, uint64, *storage.Chunk, *storeRequestMsgData, error) {
   751  	var key storage.Key
   752  	var entry *syncDbEntry
   753  	var chunk *storage.Chunk
   754  	var id uint64
   755  	var ok bool
   756  	var sreq *storeRequestMsgData
   757  	var err error
   758  
   759  	if key, ok = req.(storage.Key); ok {
   760  		id = generateId()
   761  
   762  	} else if entry, ok = req.(*syncDbEntry); ok {
   763  		id = binary.BigEndian.Uint64(entry.val[32:])
   764  		key = storage.Key(entry.val[:32])
   765  
   766  	} else if chunk, ok = req.(*storage.Chunk); ok {
   767  		key = chunk.Key
   768  		id = generateId()
   769  
   770  	} else if sreq, ok = req.(*storeRequestMsgData); ok {
   771  		key = sreq.Key
   772  	} else {
   773  		err = fmt.Errorf("type not allowed: %v (%T)", req, req)
   774  	}
   775  
   776  	return key, id, chunk, sreq, err
   777  }