github.com/deroproject/derosuite@v2.1.6-1.0.20200307070847-0f2e589c7a2b+incompatible/p2p/connection_pool.go (about)

     1  // Copyright 2017-2018 DERO Project. All rights reserved.
     2  // Use of this source code in any form is governed by RESEARCH license.
     3  // license can be found in the LICENSE file.
     4  // GPG: 0F39 E425 8C65 3947 702A  8234 08B2 0360 A03A 9DE8
     5  //
     6  //
     7  // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
     8  // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     9  // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
    10  // THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    11  // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
    12  // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
    13  // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
    14  // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
    15  // THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    16  
    17  package p2p
    18  
    19  /* this file implements the connection pool manager, keeping a list of active connections etc
    20   * this will also ensure that a single IP is connected only once
    21   *
    22   */
    23  import "fmt"
    24  import "net"
    25  import "sync"
    26  import "sort"
    27  import "time"
    28  import "strings"
    29  import "math/big"
    30  import "math/rand"
    31  import "sync/atomic"
    32  import "runtime/debug"
    33  
    34  import "encoding/binary"
    35  
    36  //import "container/list"
    37  
    38  import "github.com/romana/rlog"
    39  import "github.com/vmihailenco/msgpack"
    40  import "github.com/dustin/go-humanize"
    41  import log "github.com/sirupsen/logrus"
    42  import "github.com/paulbellamy/ratecounter"
    43  import "github.com/prometheus/client_golang/prometheus"
    44  
    45  import "github.com/deroproject/derosuite/block"
    46  import "github.com/deroproject/derosuite/crypto"
    47  import "github.com/deroproject/derosuite/globals"
    48  import "github.com/deroproject/derosuite/transaction"
    49  
    50  // any connection incoming/outgoing can only be in this state
    51  //type Conn_State uint32
    52  
    53  const (
    54  	HANDSHAKE_PENDING uint32 = 0 // "Pending"
    55  	IDLE                     = 1 // "Idle"
    56  	ACTIVE                   = 2 // "Active"
    57  )
    58  
    59  type Queued_Command struct {
    60  	Command uint64 // we are waiting for this response
    61  	BLID    []crypto.Hash
    62  	TXID    []crypto.Hash
    63  }
    64  
    65  // This structure is used to do book keeping for the connection and keeps other DATA related to peer
    66  // golang restricts 64 bit uint64/int atomic on a 64 bit boundary
    67  // therefore all atomics are on the top
    68  type Connection struct {
    69  	Height       int64 // last height sent by peer  ( first member alignments issues)
    70  	StableHeight int64 // last stable height
    71  	TopoHeight   int64 // topo height, current topo height, this is the only thing we require for syncing
    72  
    73  	LastObjectRequestTime int64  // when was the last item placed in object list
    74  	BytesIn               uint64 // total bytes in
    75  	BytesOut              uint64 // total bytes out
    76  	Latency               int64  // time.Duration            // latency to this node when sending timed sync
    77  
    78  	Incoming          bool              // is connection incoming or outgoing
    79  	Addr              *net.TCPAddr      // endpoint on the other end
    80  	Port              uint32            // port advertised by other end as its server,if it's 0 server cannot accept connections
    81  	Peer_ID           uint64            // Remote peer id
    82  	Lowcpuram         bool              // whether the peer has low cpu ram
    83  	SyncNode          bool              // whether the peer has been added to command line as sync node
    84  	Top_Version       uint64            // current hard fork version supported by peer
    85  	TXpool_cache      map[uint64]uint32 // used for ultra blocks in miner mode,cache where we keep TX which have been broadcasted to this peer
    86  	TXpool_cache_lock sync.RWMutex
    87  	ProtocolVersion   string
    88  	Tag               string // tag for the other end
    89  	DaemonVersion     string
    90  	//Exit                  chan bool   // Exit marker that connection needs to be killed
    91  	ExitCounter           int32
    92  	State                 uint32       // state of the connection
    93  	Top_ID                crypto.Hash  // top block id of the connection
    94  	Cumulative_Difficulty string       // cumulative difficulty of top block of peer, this is NOT required
    95  	CDIFF                 atomic.Value //*big.Int    // NOTE: this field is used internally and is the parsed from Cumulative_Difficulty
    96  
    97  	logger            *log.Entry // connection specific logger
    98  	logid             string     // formatted version of connection
    99  	Requested_Objects [][32]byte // currently unused as we sync up with a single peer at a time
   100  	Conn              net.Conn   // actual object to talk
   101  	//	Command_queue     *list.List               // New protocol is partly syncronous
   102  	Objects      chan Queued_Command      // contains all objects that are requested
   103  	SpeedIn      *ratecounter.RateCounter // average speed in last 60 seconds
   104  	SpeedOut     *ratecounter.RateCounter // average speed in last 60 secs
   105  	request_time atomic.Value             //time.Time                // used to track latency
   106  	writelock    sync.Mutex               // used to Serialize writes
   107  
   108  	sync.Mutex // used only by connection go routine
   109  
   110  }
   111  
   112  // 300 such buckets can be used to track block propagation accuratly upto a minute
   113  var block_propagation = prometheus.NewHistogram(prometheus.HistogramOpts{
   114  	Name:    "block_propagation_ms",
   115  	Help:    "Block Propagation time milliseconds as detected by daemon",
   116  	Buckets: prometheus.LinearBuckets(0, 1000, 20), // start 0 ms, each 1000 ms,  20 such buckets.
   117  })
   118  
   119  // 300 such buckets can be used to track transaction propagation accurately upto a minute
   120  var transaction_propagation = prometheus.NewHistogram(prometheus.HistogramOpts{
   121  	Name:    "tx_propagation_ms",
   122  	Help:    "TX Propagation time milliseconds as detected by daemon",
   123  	Buckets: prometheus.LinearBuckets(0, 1000, 20), // start 0 ms, each 1000 ms,  20 such buckets.
   124  })
   125  
   126  var block_propagation_map sync.Map
   127  var tx_propagation_map sync.Map
   128  
   129  /* // used to debug locks
   130   var x = debug.Stack
   131  func (connection *Connection )Lock() {
   132      connection.x.Lock()
   133      connection.logger.Warnf("Locking Stack trace  \n%s", debug.Stack())
   134  
   135  }
   136  
   137  func (connection *Connection )Unlock() {
   138      connection.x.Unlock()
   139      connection.logger.Warnf("Unlock Stack trace  \n%s", debug.Stack())
   140  
   141  }
   142  */
   143  
   144  var connection_map sync.Map                      // map[string]*Connection{}
   145  var connection_per_ip_counter = map[string]int{} // only keeps the counter of counter of connections
   146  //var connection_mutex sync.Mutex
   147  
   148  // clean up propagation
   149  func clean_up_propagation() {
   150  
   151  	for {
   152  		time.Sleep(time.Minute) // cleanup every minute
   153  		current_time := time.Now()
   154  
   155  		// track propagation upto 10 minutes
   156  		block_propagation_map.Range(func(k, value interface{}) bool {
   157  			first_seen := value.(time.Time)
   158  			if current_time.Sub(first_seen).Round(time.Second) > 600 {
   159  				block_propagation_map.Delete(k)
   160  			}
   161  			return true
   162  		})
   163  
   164  		tx_propagation_map.Range(func(k, value interface{}) bool {
   165  			first_seen := value.(time.Time)
   166  			if current_time.Sub(first_seen).Round(time.Second) > 600 {
   167  				tx_propagation_map.Delete(k)
   168  			}
   169  			return true
   170  		})
   171  	}
   172  
   173  }
   174  
   175  // for incoming connections we use their peer id to assertain uniquenesss
   176  // for outgoing connections, we use the tcp endpoint address, so as not more than 1 connection is done
   177  func Key(c *Connection) string {
   178  	if c.Incoming {
   179  		return fmt.Sprintf("%d", c.Peer_ID)
   180  	}
   181  	return string(c.Addr.String()) // Simple []byte => string conversion
   182  }
   183  
   184  // check whether an IP is in the map already
   185  func IsAddressConnected(address string) bool {
   186  
   187  	if _, ok := connection_map.Load(strings.TrimSpace(address)); ok {
   188  		return true
   189  	}
   190  	return false
   191  }
   192  
   193  // add connection to  map
   194  // we also check for limits for incoming connections
   195  // same ip max 8 ip ( considering NAT)
   196  //same Peer ID   4
   197  func Connection_Add(c *Connection) {
   198  	//connection_mutex.Lock()
   199  	//defer connection_mutex.Unlock()
   200  
   201  	ip_count := 0
   202  	peer_id_count := 0
   203  
   204  	incoming_ip := c.Addr.IP.String()
   205  	incoming_peer_id := c.Peer_ID
   206  
   207  	if c.Incoming { // we need extra protection for incoming for various attacks
   208  
   209  		connection_map.Range(func(k, value interface{}) bool {
   210  			v := value.(*Connection)
   211  			if v.Incoming {
   212  				if incoming_ip == v.Addr.IP.String() {
   213  					ip_count++
   214  				}
   215  
   216  				if incoming_peer_id == v.Peer_ID {
   217  					peer_id_count++
   218  				}
   219  			}
   220  			return true
   221  		})
   222  
   223  	}
   224  
   225  	if ip_count >= 8 || peer_id_count >= 4 {
   226  		rlog.Warnf("IP address %s (%d) Peer ID %d(%d) already has too many connections, exiting this connection", incoming_ip, ip_count, incoming_peer_id, peer_id_count)
   227  		c.Exit()
   228  		return
   229  	}
   230  
   231  	connection_map.Store(Key(c), c)
   232  }
   233  
   234  // unique connection list
   235  // since 2 nodes may be connected in both directions, we need to deliver new blocks/tx to only one
   236  // thereby saving NW/computing costs
   237  // we find duplicates using peer id
   238  func UniqueConnections() map[uint64]*Connection {
   239  	unique_map := map[uint64]*Connection{}
   240  
   241  	connection_map.Range(func(k, value interface{}) bool {
   242  		v := value.(*Connection)
   243  		if atomic.LoadUint32(&v.State) != HANDSHAKE_PENDING && GetPeerID() != v.Peer_ID { //and skip ourselves
   244  			unique_map[v.Peer_ID] = v // map will automatically deduplicate/overwrite previous
   245  		}
   246  		return true
   247  	})
   248  
   249  	return unique_map
   250  }
   251  
   252  // add connection to  map
   253  func Connection_Delete(c *Connection) {
   254  	connection_map.Delete(Key(c))
   255  }
   256  
   257  // prints all the connection info to screen
   258  func Connection_Print() {
   259  
   260  	fmt.Printf("Connection info for peers\n")
   261  
   262  	if globals.Arguments["--debug"].(bool) == true {
   263  		fmt.Printf("%-20s %-16s %-5s %-7s %-7s %23s %3s %5s %s %s %s %s %10s\n", "Remote Addr", "PEER ID", "PORT", " State", "Latency", "S/H/T", "DIR", "QUEUE", "     IN", "    OUT", " IN SPEED", " OUT SPEED", "Version")
   264  	} else {
   265  		fmt.Printf("%-20s %-16s %-5s %-7s %-7s %17s %3s %5s %s %s %s %s %10s\n", "Remote Addr", "PEER ID", "PORT", " State", "Latency", "H/T", "DIR", "QUEUE", "     IN", "    OUT", " IN SPEED", " OUT SPEED", "Version")
   266  
   267  	}
   268  
   269  	var clist []*Connection
   270  
   271  	connection_map.Range(func(k, value interface{}) bool {
   272  		v := value.(*Connection)
   273  		clist = append(clist, v)
   274  		return true
   275  	})
   276  
   277  	rlog.Infof("Obtained %d for connections printing", len(clist))
   278  
   279  	// sort the list
   280  	sort.Slice(clist, func(i, j int) bool { return clist[i].Addr.String() < clist[j].Addr.String() })
   281  
   282  	our_topo_height := chain.Load_TOPO_HEIGHT(nil)
   283  
   284  	for i := range clist {
   285  
   286  		// skip pending  handshakes and skip ourselves
   287  		if atomic.LoadUint32(&clist[i].State) == HANDSHAKE_PENDING || GetPeerID() == clist[i].Peer_ID {
   288  			continue
   289  		}
   290  
   291  		dir := "OUT"
   292  		if clist[i].Incoming {
   293  			dir = "INC"
   294  		}
   295  		state := "PENDING"
   296  		if atomic.LoadUint32(&clist[i].State) == IDLE {
   297  			state = "IDLE"
   298  		} else if atomic.LoadUint32(&clist[i].State) == ACTIVE {
   299  			state = "ACTIVE"
   300  		}
   301  
   302  		version := clist[i].DaemonVersion
   303  
   304  		if len(version) > 20 {
   305  			version = version[:20]
   306  		}
   307  
   308  		tag := clist[i].Tag
   309  		if len(tag) > 20 {
   310  			tag = tag[:20]
   311  		}
   312  
   313  		var color_yellow = "\033[33m"
   314  		var color_normal = "\033[0m"
   315  
   316  		//if our_height is more than
   317  		if our_topo_height > clist[i].TopoHeight {
   318  			fmt.Print(color_yellow)
   319  		}
   320  
   321  		if globals.Arguments["--debug"].(bool) == true {
   322  			hstring := fmt.Sprintf("%d/%d/%d", clist[i].StableHeight, clist[i].Height, clist[i].TopoHeight)
   323  			fmt.Printf("%-20s %16x %5d %7s %7s %23s %s %5d %7s %7s %8s %9s     %10s %s\n", clist[i].Addr.IP, clist[i].Peer_ID, clist[i].Port, state, time.Duration(atomic.LoadInt64(&clist[i].Latency)).Round(time.Millisecond).String(), hstring, dir, clist[i].IsConnectionSyncing(), humanize.Bytes(atomic.LoadUint64(&clist[i].BytesIn)), humanize.Bytes(atomic.LoadUint64(&clist[i].BytesOut)), humanize.Bytes(uint64(clist[i].SpeedIn.Rate()/60)), humanize.Bytes(uint64(clist[i].SpeedOut.Rate()/60)), version, tag)
   324  
   325  		} else {
   326  			hstring := fmt.Sprintf("%d/%d", clist[i].Height, clist[i].TopoHeight)
   327  			fmt.Printf("%-20s %16x %5d %7s %7s %17s %s %5d %7s %7s %8s %9s     %10s %s\n", clist[i].Addr.IP, clist[i].Peer_ID, clist[i].Port, state, time.Duration(atomic.LoadInt64(&clist[i].Latency)).Round(time.Millisecond).String(), hstring, dir, clist[i].IsConnectionSyncing(), humanize.Bytes(atomic.LoadUint64(&clist[i].BytesIn)), humanize.Bytes(atomic.LoadUint64(&clist[i].BytesOut)), humanize.Bytes(uint64(clist[i].SpeedIn.Rate()/60)), humanize.Bytes(uint64(clist[i].SpeedOut.Rate()/60)), version, tag)
   328  
   329  		}
   330  
   331  		fmt.Print(color_normal)
   332  	}
   333  
   334  }
   335  
   336  // for continuos update on command line, get the maximum height of all peers
   337  // show the average network status
   338  func Best_Peer_Height() (best_height, best_topo_height int64) {
   339  
   340  	var heights []uint64
   341  	var topoheights []uint64
   342  
   343  	connection_map.Range(func(k, value interface{}) bool {
   344  		v := value.(*Connection)
   345  		if atomic.LoadUint32(&v.State) != HANDSHAKE_PENDING {
   346  			height := atomic.LoadInt64(&v.Height)
   347  			heights = append(heights, uint64(height))
   348  			topoheights = append(topoheights, uint64(atomic.LoadInt64(&v.TopoHeight)))
   349  		}
   350  		return true
   351  	})
   352  
   353  	best_height = int64(Median(heights))
   354  	best_topo_height = int64(Median(topoheights))
   355  
   356  	return
   357  }
   358  
   359  // this function return peer count which have successful handshake
   360  func Disconnect_All() (Count uint64) {
   361  	return
   362  	/*
   363  		connection_mutex.Lock()
   364  		for _, v := range connection_map {
   365  			// v.Lock()
   366  			close(v.Exit) // close the connection
   367  			//v.Unlock()
   368  		}
   369  		connection_mutex.Unlock()
   370  		return
   371  	*/
   372  }
   373  
   374  // this function return peer count which have successful handshake
   375  func Peer_Count() (Count uint64) {
   376  
   377  	connection_map.Range(func(k, value interface{}) bool {
   378  		v := value.(*Connection)
   379  		if atomic.LoadUint32(&v.State) != HANDSHAKE_PENDING && GetPeerID() != v.Peer_ID {
   380  			Count++
   381  		}
   382  		return true
   383  	})
   384  
   385  	return
   386  }
   387  
   388  // this function returnw random connection which have successful handshake
   389  func Random_Connection(height int64) (c *Connection) {
   390  
   391  	var clist []*Connection
   392  
   393  	connection_map.Range(func(k, value interface{}) bool {
   394  		v := value.(*Connection)
   395  		if atomic.LoadInt64(&v.Height) >= height {
   396  			clist = append(clist, v)
   397  		}
   398  		return true
   399  	})
   400  
   401  	if len(clist) > 0 {
   402  		return clist[rand.Int()%len(clist)]
   403  	}
   404  
   405  	return nil
   406  }
   407  
   408  // this returns count of peers in both directions
   409  func Peer_Direction_Count() (Incoming uint64, Outgoing uint64) {
   410  
   411  	connection_map.Range(func(k, value interface{}) bool {
   412  		v := value.(*Connection)
   413  		if atomic.LoadUint32(&v.State) != HANDSHAKE_PENDING && GetPeerID() != v.Peer_ID {
   414  			if v.Incoming {
   415  				Incoming++
   416  			} else {
   417  				Outgoing++
   418  			}
   419  		}
   420  		return true
   421  	})
   422  
   423  	return
   424  }
   425  
   426  // broad cast a block to all connected peers
   427  // we can only broadcast a block which is in our db
   428  // this function is trigger from 2 points, one when we receive a unknown block which can be successfully added to chain
   429  // second from the blockchain which has to relay locally  mined blocks as soon as possible
   430  func Broadcast_Block(cbl *block.Complete_Block, PeerID uint64) { // if peerid is provided it is skipped
   431  	var request Notify_New_Objects_Struct
   432  
   433  	defer func() {
   434  		if r := recover(); r != nil {
   435  			logger.Warnf("Recovered while broadcasting Block, Stack trace below %+v", r)
   436  			logger.Warnf("Stack trace  \n%s", debug.Stack())
   437  		}
   438  	}()
   439  
   440  
   441  
   442  	/*if IsSyncing() { // if we are syncing, do NOT broadcast the block
   443  		return
   444  	}*/
   445  
   446  	fill_common(&request.Common) // fill common info
   447  	request.Command = V2_NOTIFY_NEW_BLOCK
   448  	request.CBlock.Block = cbl.Bl.Serialize()
   449  
   450  	for i := range cbl.Txs {
   451  		request.CBlock.Txs = append(request.CBlock.Txs, cbl.Txs[i].Serialize())
   452  	}
   453  
   454  	serialized, err := msgpack.Marshal(&request)
   455  	if err != nil {
   456  		panic(err)
   457  	}
   458  
   459  	our_height := chain.Get_Height()
   460  	// build the request once and dispatch it to all possible peers
   461  	count := 0
   462  	unique_map := UniqueConnections()
   463  
   464  	for _, v := range unique_map {
   465  		select {
   466  		case <-Exit_Event:
   467  			return
   468  		default:
   469  		}
   470  		if atomic.LoadUint32(&v.State) != HANDSHAKE_PENDING && PeerID != v.Peer_ID { // skip pre-handshake connections
   471  
   472  			// if the other end is > 50 blocks behind, do not broadcast block to hime
   473  			// this is an optimisation, since if the other end is syncing
   474  			// every peer will keep on broadcasting and thus making it more lagging
   475  			// due to overheads
   476  			peer_height := atomic.LoadInt64(&v.Height)
   477  			if (our_height - peer_height) > 25 {
   478  				continue
   479  			}
   480  
   481  			count++
   482  			go func(connection *Connection) {
   483  				defer func() {
   484  					if r := recover(); r != nil {
   485  						rlog.Warnf("Recovered while handling connection, Stack trace below", r)
   486  						rlog.Warnf("Stack trace  \n%s", debug.Stack())
   487  					}
   488  				}()
   489  				if globals.Arguments["--lowcpuram"].(bool) == false && connection.TXpool_cache != nil { // everyone needs ultrac compact block if possible
   490  					var miner_specific_request Notify_New_Objects_Struct
   491  					miner_specific_request.Common = request.Common
   492  					miner_specific_request.Command = V2_NOTIFY_NEW_BLOCK
   493  					miner_specific_request.CBlock.Block = request.CBlock.Block
   494  
   495  					sent := 0
   496  					skipped := 0
   497  					connection.TXpool_cache_lock.RLock()
   498  					for i := range cbl.Bl.Tx_hashes {
   499  						// in ultra compact mode send a transaction only if we know that we have not sent that transaction earlier
   500  						// send only tx not found in cache
   501  
   502  						if _, ok := connection.TXpool_cache[binary.LittleEndian.Uint64(cbl.Bl.Tx_hashes[i][:])]; !ok {
   503  							miner_specific_request.CBlock.Txs = append(miner_specific_request.CBlock.Txs, request.CBlock.Txs[i])
   504  							sent++
   505  
   506  						} else {
   507  							skipped++
   508  						}
   509  
   510  					}
   511  					connection.TXpool_cache_lock.RUnlock()
   512  
   513  					connection.logger.Debugf("Sending ultra block to peer total %d tx skipped %d sent %d", len(cbl.Bl.Tx_hashes), skipped, sent)
   514  
   515  					serialized_miner_specific, err := msgpack.Marshal(&miner_specific_request)
   516  					if err != nil {
   517  						panic(err)
   518  					}
   519  					connection.Send_Message(serialized_miner_specific) // miners need ultra compact blocks
   520  
   521  				} else {
   522  					connection.logger.Debugf("Sending full block to peer")
   523  
   524  					connection.Send_Message(serialized) // non miners need full blocks
   525  				}
   526  			}(v)
   527  		}
   528  
   529  	}
   530  
   531  	rlog.Infof("Broadcasted block %s to %d peers", cbl.Bl.GetHash(), count)
   532  
   533  }
   534  
   535  // broadcast a new transaction, return to how many peers the transaction has been broadcasted
   536  // this function is trigger from 2 points, one when we receive a unknown tx
   537  // second from the mempool which may want to relay local ot soon going to expire transactions
   538  func Broadcast_Tx(tx *transaction.Transaction, PeerID uint64) (relayed_count int) {
   539  
   540  	defer func() {
   541  		if r := recover(); r != nil {
   542  			logger.Warnf("Recovered while broadcasting TX, Stack trace below %+v", r)
   543  			logger.Warnf("Stack trace  \n%s", debug.Stack())
   544  		}
   545  	}()
   546  
   547  	var request Notify_New_Objects_Struct
   548  
   549  	fill_common_skip_topoheight(&request.Common) // fill common info, but skip topo height
   550  	request.Command = V2_NOTIFY_NEW_TX
   551  	request.Tx = tx.Serialize()
   552  
   553  	serialized, err := msgpack.Marshal(&request)
   554  	if err != nil {
   555  		panic(err)
   556  	}
   557  
   558  	txhash := tx.GetHash()
   559  	our_height := chain.Get_Height()
   560  
   561  	unique_map := UniqueConnections()
   562  
   563  	for _, v := range unique_map {
   564  		select {
   565  		case <-Exit_Event:
   566  			return
   567  		default:
   568  		}
   569  		if atomic.LoadUint32(&v.State) != HANDSHAKE_PENDING && PeerID != v.Peer_ID { // skip pre-handshake connections
   570  
   571  			// if the other end is > 50 blocks behind, do not broadcast block to hime
   572  			// this is an optimisation, since if the other end is syncing
   573  			// every peer will keep on broadcasting and thus making it more lagging
   574  			// due to overheads
   575  			// if we are lagging or peer is lagging, do not brodcast transactions
   576  			peer_height := atomic.LoadInt64(&v.Height)
   577  			if (our_height - peer_height) > 25 ||  (our_height+5) < peer_height   {
   578  				continue
   579  			}
   580  
   581  			relayed_count++
   582  			go func(connection *Connection) {
   583  				defer func() {
   584  					if r := recover(); r != nil {
   585  						rlog.Warnf("Recovered while handling connection, Stack trace below", r)
   586  						rlog.Warnf("Stack trace  \n%s", debug.Stack())
   587  					}
   588  				}()
   589  
   590  				connection.Send_Message(serialized) // send the bytes
   591  
   592  				// mark the TX  as having sent to this node
   593  				// when this tx is
   594  				connection.TXpool_cache_lock.Lock()
   595  				// disable cache if not possible due to options
   596  				if globals.Arguments["--lowcpuram"].(bool) == false && connection.TXpool_cache != nil {
   597  					connection.TXpool_cache[binary.LittleEndian.Uint64(txhash[:])] = uint32(time.Now().Unix())
   598  				}
   599  				connection.TXpool_cache_lock.Unlock()
   600  
   601  			}(v)
   602  		}
   603  
   604  	}
   605  	//rlog.Infof("Broadcasted tx %s to %d peers", txhash, relayed_count)
   606  	return
   607  }
   608  
   609  //var sync_in_progress bool
   610  
   611  // we can tell whether we are syncing by seeing the pending queue of expected response
   612  // if objects response are queued, we are syncing
   613  // if even one of the connection is syncing, then we are syncronising
   614  // returns a number how many blocks are queued
   615  func (connection *Connection) IsConnectionSyncing() (count int) {
   616  	//connection.Lock()
   617  	//defer connection.Unlock()
   618  
   619  	if atomic.LoadUint32(&connection.State) == HANDSHAKE_PENDING { // skip pre-handshake connections
   620  		return 0
   621  	}
   622  
   623  	// check whether 15 secs have passed, if yes close the connection
   624  	// so we can try some other connection
   625  	if len(connection.Objects) > 0 {
   626  		if time.Now().Unix() >= (13 + atomic.LoadInt64(&connection.LastObjectRequestTime)) {
   627  			connection.Exit()
   628  			return 0
   629  		}
   630  	}
   631  
   632  	return len(connection.Objects)
   633  
   634  }
   635  
   636  // trigger a sync with a random peer
   637  func trigger_sync() {
   638  
   639  	defer func() {
   640  		if r := recover(); r != nil {
   641  			logger.Warnf("Recovered while triggering sync, Stack trace below %+v ", r)
   642  			logger.Warnf("Stack trace  \n%s", debug.Stack())
   643  		}
   644  	}()
   645  
   646  	_, topoheight := Best_Peer_Height()
   647  
   648  	unique_map := UniqueConnections()
   649  
   650  
   651  	var clist []*Connection
   652  
   653  	for _,value := range unique_map {
   654  		clist = append(clist, value)
   655  
   656  	}
   657  
   658  	
   659  	// sort the list random
   660  	// do random shuffling, can we get away with len/2 random shuffling
   661  	globals.Global_Random.Shuffle(len(clist), func(i, j int) {
   662  		clist[i], clist[j] = clist[j], clist[i]
   663  	})
   664  
   665  	
   666  
   667  	for _, connection := range clist {
   668  
   669  		//connection.Lock()   recursive mutex are not suported
   670  		// only choose highest available peers for syncing
   671  		if atomic.LoadUint32(&connection.State) != HANDSHAKE_PENDING && topoheight <= atomic.LoadInt64(&connection.TopoHeight) { // skip pre-handshake connections
   672  			// check whether we are lagging with this connection
   673  			//connection.Lock()
   674  			islagging := chain.IsLagging(connection.CDIFF.Load().(*big.Int)) // we only use cdiff to see if we need to resync
   675  			// islagging := true
   676  			//connection.Unlock()
   677  			if islagging {
   678  
   679  				//connection.Lock()
   680  				connection.logger.Debugf("We need to resync with the peer height %d", connection.Height)
   681  				//connection.Unlock()
   682  				// set mode to syncronising
   683  				connection.Send_ChainRequest()
   684  				break
   685  			}
   686  		}
   687  
   688  	}
   689  
   690  }
   691  
   692  //detect if something is queued to any of the peer
   693  // is something is queue we are syncing
   694  func IsSyncing() (result bool) {
   695  
   696  	syncing := false
   697  	connection_map.Range(func(k, value interface{}) bool {
   698  		v := value.(*Connection)
   699  		if v.IsConnectionSyncing() != 0 {
   700  			syncing = true
   701  			return false
   702  		}
   703  		return true
   704  	})
   705  	return syncing
   706  }
   707  
   708  // detect whether we are behind any of the connected peers and trigger sync ASAP
   709  // randomly with one of the peers
   710  func syncroniser() {
   711  	for {
   712  		select {
   713  		case <-Exit_Event:
   714  			return
   715  		case <-time.After(1000 * time.Millisecond):
   716  		}
   717  
   718  		if !IsSyncing() {
   719  			trigger_sync() // check whether we are out of sync
   720  		}
   721  
   722  	}
   723  
   724  }