github.com/annchain/OG@v0.0.9/og/syncer/syncer.go (about)

     1  // Copyright © 2019 Annchain Authors <EMAIL ADDRESS>
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  package syncer
    15  
    16  import (
    17  	"github.com/annchain/OG/arefactor/common/goroutine"
    18  	types2 "github.com/annchain/OG/arefactor/og/types"
    19  	"github.com/annchain/OG/og/types"
    20  	"github.com/annchain/OG/og/types/archive"
    21  
    22  	"github.com/annchain/OG/types/msg"
    23  	"sync"
    24  	"time"
    25  
    26  	"github.com/annchain/OG/og/txcache"
    27  
    28  	"github.com/annchain/gcache"
    29  )
    30  
    31  const BloomFilterRate = 4 //sending 4 req
    32  
    33  type MessageSender interface {
    34  	BroadcastMessage(message msg.OgMessage)
    35  	MulticastMessage(message msg.OgMessage)
    36  	MulticastToSource(message msg.OgMessage, sourceMsgHash *types2.Hash)
    37  	BroadcastMessageWithLink(message msg.OgMessage)
    38  	SendToPeer(peerId string, msg msg.OgMessage)
    39  }
    40  
    41  type FireHistory struct {
    42  	StartTime  time.Time
    43  	LastTime   time.Time
    44  	FiredTimes int
    45  }
    46  
    47  // IncrementalSyncer fetches tx from other  peers. (incremental)
    48  // IncrementalSyncer will not fire duplicate requests in a period of time.
    49  type IncrementalSyncer struct {
    50  	config                   *SyncerConfig
    51  	messageSender            MessageSender
    52  	getTxsHashes             func() types2.Hashes
    53  	isKnownHash              func(hash types2.Hash) bool
    54  	getHeight                func() uint64
    55  	acquireTxQueue           chan *types2.Hash
    56  	acquireTxDuplicateCache  gcache.Cache     // list of hashes that are queried recently. Prevent duplicate requests.
    57  	bufferedIncomingTxCache  *txcache.TxCache // cache of incoming txs that are not fired during full sync.
    58  	firedTxCache             gcache.Cache     // cache of hashes that are fired however haven't got any response yet
    59  	quitLoopSync             chan bool
    60  	quitLoopEvent            chan bool
    61  	quitNotifyEvent          chan bool
    62  	EnableEvent              chan bool
    63  	Enabled                  bool
    64  	OnNewTxiReceived         []chan []types.Txi
    65  	notifyTxEvent            chan bool
    66  	notifying                bool
    67  	cacheNewTxEnabled        func() bool
    68  	mu                       sync.RWMutex
    69  	NewLatestSequencerCh     chan bool
    70  	bloomFilterStatus        *BloomFilterFireStatus
    71  	RemoveContrlMsgFromCache func(hash types2.Hash)
    72  	SequencerCache           *SequencerCache
    73  }
    74  
    75  func (m *IncrementalSyncer) GetBenchmarks() map[string]interface{} {
    76  	return map[string]interface{}{
    77  		"acquireTxQueue":          len(m.acquireTxQueue),
    78  		"bufferedIncomingTxCache": m.bufferedIncomingTxCache.Len(),
    79  		"firedTxCache":            m.firedTxCache.Len(true),
    80  		"acquireTxDuplicateCache": m.acquireTxDuplicateCache.Len(true),
    81  	}
    82  }
    83  
    84  type SyncerConfig struct {
    85  	AcquireTxQueueSize                       uint
    86  	MaxBatchSize                             int
    87  	BatchTimeoutMilliSecond                  uint
    88  	AcquireTxDedupCacheMaxSize               int
    89  	AcquireTxDedupCacheExpirationSeconds     int
    90  	BufferedIncomingTxCacheEnabled           bool
    91  	BufferedIncomingTxCacheMaxSize           int
    92  	BufferedIncomingTxCacheExpirationSeconds int
    93  	FiredTxCacheMaxSize                      int
    94  	FiredTxCacheExpirationSeconds            int
    95  	NewTxsChannelSize                        int
    96  }
    97  
    98  func NewIncrementalSyncer(config *SyncerConfig, messageSender MessageSender, getTxsHashes func() types2.Hashes,
    99  	isKnownHash func(hash types2.Hash) bool, getHeight func() uint64, cacheNewTxEnabled func() bool) *IncrementalSyncer {
   100  	return &IncrementalSyncer{
   101  		config:         config,
   102  		messageSender:  messageSender,
   103  		acquireTxQueue: make(chan *types2.Hash, config.AcquireTxQueueSize),
   104  		acquireTxDuplicateCache: gcache.New(config.AcquireTxDedupCacheMaxSize).Simple().
   105  			Expiration(time.Second * time.Duration(config.AcquireTxDedupCacheExpirationSeconds)).Build(),
   106  		bufferedIncomingTxCache: txcache.NewTxCache(config.BufferedIncomingTxCacheMaxSize,
   107  			config.BufferedIncomingTxCacheExpirationSeconds, isKnownHash, true),
   108  		firedTxCache: gcache.New(config.BufferedIncomingTxCacheMaxSize).Simple().
   109  			Expiration(time.Second * time.Duration(config.BufferedIncomingTxCacheExpirationSeconds)).Build(),
   110  		quitLoopSync:         make(chan bool),
   111  		quitLoopEvent:        make(chan bool),
   112  		EnableEvent:          make(chan bool),
   113  		notifyTxEvent:        make(chan bool),
   114  		NewLatestSequencerCh: make(chan bool),
   115  		quitNotifyEvent:      make(chan bool),
   116  		Enabled:              false,
   117  		getTxsHashes:         getTxsHashes,
   118  		isKnownHash:          isKnownHash,
   119  		getHeight:            getHeight,
   120  		cacheNewTxEnabled:    cacheNewTxEnabled,
   121  		bloomFilterStatus:    NewBloomFilterFireStatus(120, 500),
   122  		SequencerCache:       NewSequencerCache(15),
   123  	}
   124  }
   125  
   126  func (m *IncrementalSyncer) Start() {
   127  	goroutine.New(m.eventLoop)
   128  	goroutine.New(m.loopSync)
   129  	goroutine.New(m.txNotifyLoop)
   130  }
   131  
   132  func (m *IncrementalSyncer) Stop() {
   133  	m.Enabled = false
   134  	close(m.quitLoopEvent)
   135  	close(m.quitLoopSync)
   136  	close(m.quitNotifyEvent)
   137  	// <-ffchan.NewTimeoutSender(m.quitLoopEvent, true, "increSyncerQuitLoopEvent", 1000).C
   138  	// <-ffchan.NewTimeoutSender(m.quitLoopSync, true, "increSyncerQuitLoopSync", 1000).C
   139  }
   140  
   141  func (m *IncrementalSyncer) Name() string {
   142  	return "IncrementalSyncer"
   143  }
   144  
   145  func (m *IncrementalSyncer) CacheTxs(txs types.Txis) {
   146  	m.bufferedIncomingTxCache.EnQueueBatch(txs)
   147  }
   148  
   149  func (m *IncrementalSyncer) CacheTx(tx types.Txi) {
   150  	m.bufferedIncomingTxCache.EnQueue(tx)
   151  }
   152  
   153  func (m *IncrementalSyncer) fireRequest(buffer map[types2.Hash]struct{}) {
   154  	if len(buffer) == 0 {
   155  		return
   156  	}
   157  	req := archive.MessageSyncRequest{
   158  		RequestId: message_archive.MsgCounter.Get(),
   159  	}
   160  	var source interface{}
   161  	var err error
   162  	var reqHashes types2.Hashes
   163  	for key := range buffer {
   164  		if source, err = m.acquireTxDuplicateCache.GetIFPresent(key); err != nil {
   165  			continue
   166  		}
   167  		// add it to the missing queue in case no one responds us.
   168  		// will retry after some time
   169  		if history, err := m.firedTxCache.GetIFPresent(key); err != nil {
   170  			m.firedTxCache.Set(key, FireHistory{
   171  				FiredTimes: 1,
   172  				StartTime:  time.Now(),
   173  				LastTime:   time.Now(),
   174  			})
   175  		} else {
   176  			h := history.(FireHistory)
   177  			h.FiredTimes++
   178  			h.LastTime = time.Now()
   179  			m.firedTxCache.Set(key, h)
   180  		}
   181  		reqHashes = append(reqHashes, key)
   182  		//req.Hashes = append(req.Hashes, key)
   183  	}
   184  	if len(reqHashes) == 0 {
   185  		return
   186  	}
   187  	req.Hashes = &reqHashes
   188  
   189  	log.WithField("type", req.GetType()).
   190  		WithField("length", len(reqHashes)).WithField("hashes", req.String()).Debugf(
   191  		"sending message")
   192  
   193  	//m.messageSender.UnicastMessageRandomly(p2p_message.MessageTypeFetchByHashRequest, bytes)
   194  	//if the random peer dose't have this txs ,we will get nil response ,so broadcast it
   195  	//todo optimize later
   196  	//get source msg
   197  	soucrHash := source.(types2.Hash)
   198  
   199  	m.messageSender.MulticastToSource(&req, &soucrHash)
   200  }
   201  
   202  // LoopSync checks if there is new hash to fetch. Dedup.
   203  func (m *IncrementalSyncer) loopSync() {
   204  	buffer := make(map[types2.Hash]struct{})
   205  	var triggerTime int
   206  	sleepDuration := time.Duration(m.config.BatchTimeoutMilliSecond) * time.Millisecond
   207  	pauseCheckDuration := time.Duration(time.Millisecond * 100)
   208  	var fired int
   209  	for {
   210  		//if paused wait until resume
   211  		if !m.Enabled {
   212  			select {
   213  			case <-m.quitLoopSync:
   214  				log.Info("syncer received quit message. Quitting...")
   215  				return
   216  			case <-time.After(pauseCheckDuration):
   217  				continue
   218  			}
   219  		}
   220  		select {
   221  		case <-m.quitLoopSync:
   222  			log.Info("syncer received quit message. Quitting...")
   223  			return
   224  		case v := <-m.acquireTxQueue:
   225  			// collect to the set so that we can query in batch
   226  			if v != nil {
   227  				hash := *v
   228  				buffer[hash] = struct{}{}
   229  			}
   230  			triggerTime++
   231  			if len(buffer) > 0 && triggerTime >= m.config.MaxBatchSize {
   232  				//bloom filter msg is large , don't send too frequently
   233  				if fired%BloomFilterRate == 0 {
   234  					var hash types2.Hash
   235  					for key := range buffer {
   236  						hash = key
   237  						source, err := m.acquireTxDuplicateCache.GetIFPresent(key)
   238  						if err != nil {
   239  							continue
   240  						}
   241  						hash = source.(types2.Hash)
   242  						break
   243  					}
   244  					m.sendBloomFilter(hash)
   245  					fired = 0
   246  				} else {
   247  					m.fireRequest(buffer)
   248  					buffer = make(map[types2.Hash]struct{})
   249  					triggerTime = 0
   250  				}
   251  				fired++
   252  			}
   253  		case <-time.After(sleepDuration):
   254  			// trigger the message if we do not have new queries in such duration
   255  			// check duplicate here in the future
   256  			//bloom filter msg is large , don't send too frequently
   257  			if len(buffer) > 0 {
   258  				if fired%BloomFilterRate == 0 {
   259  					var hash types2.Hash
   260  					for key := range buffer {
   261  						hash = key
   262  						source, err := m.acquireTxDuplicateCache.GetIFPresent(key)
   263  						if err != nil {
   264  							continue
   265  						}
   266  						hash = source.(types2.Hash)
   267  						break
   268  					}
   269  					m.sendBloomFilter(hash)
   270  					fired = 0
   271  				} else {
   272  					m.fireRequest(buffer)
   273  					buffer = make(map[types2.Hash]struct{})
   274  					triggerTime = 0
   275  				}
   276  				fired++
   277  			}
   278  
   279  		case <-time.After(sleepDuration * 5):
   280  			repickedHashes := m.repickHashes()
   281  			log.WithField("hashes", repickedHashes).Info("syncer repicked hashes")
   282  			for _, hash := range repickedHashes {
   283  				buffer[hash] = struct{}{}
   284  			}
   285  		}
   286  	}
   287  }
   288  
   289  func (m *IncrementalSyncer) Enqueue(phash *types2.Hash, childHash types2.Hash, sendBloomfilter bool) {
   290  	if !m.Enabled {
   291  		log.WithField("hash", phash).Info("sync task is ignored since syncer is paused")
   292  		return
   293  	}
   294  	if phash != nil {
   295  		hash := *phash
   296  		if _, err := m.acquireTxDuplicateCache.Get(hash); err == nil {
   297  			log.WithField("hash", hash).Debugf("duplicate sync task")
   298  			return
   299  		}
   300  		if m.bufferedIncomingTxCache.Has(hash) {
   301  			log.WithField("hash", hash).Debugf("already in the bufferedCache. Will be announced later")
   302  			return
   303  		}
   304  		m.acquireTxDuplicateCache.Set(hash, childHash)
   305  		if sendBloomfilter {
   306  			goroutine.New(func() {
   307  				m.sendBloomFilter(childHash)
   308  			})
   309  		}
   310  	}
   311  	m.acquireTxQueue <- phash
   312  	// <-ffchan.NewTimeoutSender(m.acquireTxQueue, hash, "timeoutAcquireTx", 1000).C
   313  }
   314  
   315  func (m *IncrementalSyncer) ClearQueue() {
   316  	// clear all pending tasks
   317  	for len(m.acquireTxQueue) > 0 {
   318  		<-m.acquireTxQueue
   319  	}
   320  	m.acquireTxDuplicateCache.Purge()
   321  }
   322  
   323  func (m *IncrementalSyncer) eventLoop() {
   324  	for {
   325  		select {
   326  		case v := <-m.EnableEvent:
   327  			log.WithField("enable", v).Info("incremental syncer got enable event")
   328  			//old := m.Enabled
   329  			m.Enabled = v
   330  			m.notifyTxEvent <- true
   331  			//if !old && v {
   332  		// changed from disable to enable.
   333  		//goroutine.New( m.notifyAllCachedTxs )
   334  		//}
   335  		//notify txs from cached first and enable to receive new tx from p2p
   336  
   337  		case <-m.quitLoopEvent:
   338  			m.Enabled = false
   339  			log.Info("incremental syncer eventLoop received quit message. Quitting...")
   340  			return
   341  		}
   342  	}
   343  }
   344  
   345  func (m *IncrementalSyncer) txNotifyLoop() {
   346  	for {
   347  		select {
   348  		//todo 20*microsecond to millisecond , check this later
   349  		case <-time.After(20 * time.Millisecond):
   350  			goroutine.New(m.notifyNewTxi)
   351  		case <-m.notifyTxEvent:
   352  			goroutine.New(m.notifyNewTxi)
   353  		case <-m.NewLatestSequencerCh:
   354  			log.Debug("sequencer updated")
   355  			goroutine.New(m.RemoveConfirmedFromCache)
   356  		case <-m.quitNotifyEvent:
   357  			m.Enabled = false
   358  			log.Info("incremental syncer txNotifyLoop received quit message. Quitting...")
   359  			return
   360  		}
   361  	}
   362  }
   363  
   364  func (m *IncrementalSyncer) notifyNewTxi() {
   365  	if !m.Enabled || m.GetNotifying() {
   366  		return
   367  	}
   368  	m.SetNotifying(true)
   369  	defer m.SetNotifying(false)
   370  	for m.bufferedIncomingTxCache.Len() != 0 {
   371  		if !m.Enabled {
   372  			break
   373  		}
   374  		log.Trace("len cache ", m.bufferedIncomingTxCache.Len())
   375  		txis, err := m.bufferedIncomingTxCache.DeQueueBatch(m.config.NewTxsChannelSize)
   376  		if err != nil {
   377  			log.WithError(err).Warn("got tx failed")
   378  			break
   379  		}
   380  		log.Trace("got txis ", txis)
   381  		var txs types.Txis
   382  		for _, txi := range txis {
   383  			if txi != nil && !m.isKnownHash(txi.GetHash()) {
   384  				txs = append(txs, txi)
   385  			}
   386  		}
   387  		for _, c := range m.OnNewTxiReceived {
   388  			c <- txs
   389  			// <-ffchan.NewTimeoutSenderShort(c, txi, fmt.Sprintf("syncerNotifyNewTxi_%d", i)).C
   390  		}
   391  		log.Trace("len cache ", m.bufferedIncomingTxCache.Len())
   392  	}
   393  	if m.bufferedIncomingTxCache.Len() != 0 {
   394  		log.Debug("len cache ", m.bufferedIncomingTxCache.Len())
   395  	}
   396  }
   397  
   398  /*
   399  func (m *IncrementalSyncer) notifyAllCachedTxs() {
   400  	log.WithField("size", m.bufferedIncomingTxCache.Len()).Debug("incoming cache is being dumped")
   401  	txs := m.bufferedIncomingTxCache.PopALl()
   402  	for _, tx := range txs {
   403  		// announce and then remove
   404  		if m.isKnownHash(tx.GetHash()) {
   405  			log.WithField("tx ", tx).Debug("duplicated tx ")
   406  			continue
   407  		}
   408  		m.notifyNewTxi(tx)
   409  	}
   410  	log.WithField("size", len(txs)).Debug("incoming cache dumped")
   411  }
   412  */
   413  
   414  func (m *IncrementalSyncer) repickHashes() types2.Hashes {
   415  	maps := m.firedTxCache.GetALL(true)
   416  	sleepDuration := time.Duration(m.config.BatchTimeoutMilliSecond) * time.Millisecond
   417  	duration := time.Duration(sleepDuration * 20)
   418  	var result types2.Hashes
   419  	for ik, iv := range maps {
   420  		v := iv.(FireHistory)
   421  		if time.Now().Sub(v.LastTime) > duration {
   422  			// haven't got response after 10 seconds
   423  			result = append(result, ik.(types2.Hash))
   424  		}
   425  	}
   426  	return result
   427  }
   428  
   429  func (m *IncrementalSyncer) SetNotifying(v bool) {
   430  	m.mu.Lock()
   431  	defer m.mu.Unlock()
   432  	m.notifying = v
   433  }
   434  
   435  func (m *IncrementalSyncer) GetNotifying() bool {
   436  	m.mu.Lock()
   437  	defer m.mu.Unlock()
   438  	return m.notifying
   439  }
   440  
   441  func (m *IncrementalSyncer) RemoveConfirmedFromCache() {
   442  	log.WithField("total cache item ", m.bufferedIncomingTxCache.Len()).Debug("removing expired item")
   443  	m.bufferedIncomingTxCache.RemoveExpiredAndInvalid(60)
   444  	log.WithField("total cache item ", m.bufferedIncomingTxCache.Len()).Debug("removed expired item")
   445  }
   446  
   447  func (m *IncrementalSyncer) IsCachedHash(hash types2.Hash) bool {
   448  	return m.bufferedIncomingTxCache.Has(hash)
   449  }
   450  
   451  func (m *IncrementalSyncer) TxEnable() bool {
   452  	return m.Enabled
   453  }
   454  
   455  func (m *IncrementalSyncer) SyncHashList(seqHash types2.Hash) {
   456  	peerId := m.SequencerCache.GetPeer(seqHash)
   457  	if peerId == "" {
   458  		log.Warn("nil peer id")
   459  		return
   460  	}
   461  	goroutine.New(func() {
   462  		m.syncHashList(peerId)
   463  	})
   464  }
   465  
   466  func (m *IncrementalSyncer) syncHashList(peerId string) {
   467  	req := archive.MessageSyncRequest{
   468  		RequestId: message_archive.MsgCounter.Get(),
   469  	}
   470  	height := m.getHeight()
   471  	req.Height = &height
   472  	hashs := m.getTxsHashes()
   473  	var hashTerminates archive.HashTerminats
   474  	for _, hash := range hashs {
   475  		var hashTerminate archive.HashTerminat
   476  		copy(hashTerminate[:], hash.Bytes[:4])
   477  		hashTerminates = append(hashTerminates, hashTerminate)
   478  	}
   479  	req.HashTerminats = &hashTerminates
   480  	log.WithField("to ", peerId).WithField("hash list num ", len(hashTerminates)).
   481  		WithField("height ", height).
   482  		WithField("type", req.GetType()).
   483  		WithField("len ", req.HashTerminats).Debug("sending hashList MessageTypeFetchByHashRequest")
   484  
   485  	//m.messageSender.UnicastMessageRandomly(p2p_message.MessageTypeFetchByHashRequest, bytes)
   486  	//if the random peer dose't have this txs ,we will get nil response ,so broadcast it
   487  	m.messageSender.SendToPeer(peerId, &req)
   488  	return
   489  }