github.com/deroproject/derosuite@v2.1.6-1.0.20200307070847-0f2e589c7a2b+incompatible/p2p/connection_handler.go (about)

     1  // Copyright 2017-2018 DERO Project. All rights reserved.
     2  // Use of this source code in any form is governed by RESEARCH license.
     3  // license can be found in the LICENSE file.
     4  // GPG: 0F39 E425 8C65 3947 702A  8234 08B2 0360 A03A 9DE8
     5  //
     6  //
     7  // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
     8  // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     9  // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
    10  // THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    11  // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
    12  // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
    13  // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
    14  // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
    15  // THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    16  
    17  package p2p
    18  
    19  import "io"
    20  
    21  //import "fmt"
    22  import "net"
    23  
    24  //import "sync"
    25  //import "math/rand"
    26  import "time"
    27  import "math/big"
    28  import "runtime/debug"
    29  import "sync/atomic"
    30  import "encoding/binary"
    31  
    32  //import "container/list"
    33  
    34  import "github.com/romana/rlog"
    35  import log "github.com/sirupsen/logrus"
    36  import "github.com/paulbellamy/ratecounter"
    37  import "github.com/vmihailenco/msgpack"
    38  
    39  import "github.com/deroproject/derosuite/config"
    40  
    41  //import "github.com/deroproject/derosuite/crypto"
    42  import "github.com/deroproject/derosuite/globals"
    43  
    44  //import "github.com/deroproject/derosuite/blockchain"
    45  
    46  // This file defines  what all needs to be responded to become a server ( handling incoming requests)
    47  
    48  // fill the common part from our chain
    49  func fill_common(common *Common_Struct) {
    50  	common.Height = chain.Get_Height()
    51  	//common.StableHeight = chain.Get_Stable_Height()
    52  	common.TopoHeight = chain.Load_TOPO_HEIGHT(nil)
    53  	//common.Top_ID, _ = chain.Load_BL_ID_at_Height(common.Height - 1)
    54  
    55  	high_block, err := chain.Load_Block_Topological_order_at_index(nil, common.TopoHeight)
    56  	if err != nil {
    57  		common.Cumulative_Difficulty = "0"
    58  	} else {
    59  		common.Cumulative_Difficulty = chain.Load_Block_Cumulative_Difficulty(nil, high_block).String()
    60  	}
    61  	common.Top_Version = uint64(chain.Get_Current_Version_at_Height(int64(common.Height))) // this must be taken from the hardfork
    62  
    63  }
    64  
    65  // used while sendint TX ASAP
    66  func fill_common_skip_topoheight(common *Common_Struct) {
    67  	common.Height = chain.Get_Height()
    68  	//common.StableHeight = chain.Get_Stable_Height()
    69  	common.TopoHeight = chain.Load_TOPO_HEIGHT(nil)
    70  	//common.Top_ID, _ = chain.Load_BL_ID_at_Height(common.Height - 1)
    71  
    72  	high_block, err := chain.Load_Block_Topological_order_at_index(nil, common.TopoHeight)
    73  	if err != nil {
    74  		common.Cumulative_Difficulty = "0"
    75  	} else {
    76  		common.Cumulative_Difficulty = chain.Load_Block_Cumulative_Difficulty(nil, high_block).String()
    77  	}
    78  	common.Top_Version = uint64(chain.Get_Current_Version_at_Height(int64(common.Height))) // this must be taken from the hardfork
    79  
    80  }
    81  
    82  // update some common properties quickly
    83  func (connection *Connection) Update(common *Common_Struct) {
    84  	//connection.Lock()
    85  	//defer connection.Unlock()
    86  	atomic.StoreInt64(&connection.Height, common.Height)             // satify race detector GOD
    87  	if  common.StableHeight != 0{
    88  		atomic.StoreInt64(&connection.StableHeight, common.StableHeight) // satify race detector GOD
    89  	}
    90  	atomic.StoreInt64(&connection.TopoHeight, common.TopoHeight)     // satify race detector GOD
    91  
    92  	//connection.Top_ID = common.Top_ID
    93  	connection.Cumulative_Difficulty = common.Cumulative_Difficulty
    94  
    95  	var x *big.Int
    96  	x = new(big.Int)
    97  	if _, ok := x.SetString(connection.Cumulative_Difficulty, 10); !ok { // if Cumulative_Difficulty could not be parsed, kill connection
    98  		rlog.Warnf("Could not Parse Cumulative_Difficulty in common %s \"%s\" ", connection.Cumulative_Difficulty, globals.CTXString(connection.logger))
    99  
   100  		connection.Exit()
   101  	}
   102  
   103  	connection.CDIFF.Store(x) // do it atomically
   104  
   105  	if connection.Top_Version != common.Top_Version {
   106  		atomic.StoreUint64(&connection.Top_Version, common.Top_Version) // satify race detector GOD
   107  	}
   108  }
   109  
   110  // sets  timeout based on connection state, so as stale connections are cleared quickly
   111  // if these timeouts are met connection is closed
   112  func (connection *Connection) set_timeout() {
   113  	if atomic.LoadUint32(&connection.State) == HANDSHAKE_PENDING {
   114  		connection.Conn.SetReadDeadline(time.Now().Add(8 * time.Second)) // new connections have 8 seconds to handshake
   115  	} else {
   116  		// if we have queued something, time should be less than 30 sec assumming 10KB/sec BW is available
   117  		connection.Conn.SetReadDeadline(time.Now().Add(40 * time.Second)) // good connections have 120 secs to communicate
   118  	}
   119  
   120  }
   121  
   122  // marks connection as exit in progress
   123  func (conn *Connection) Exit() {
   124  	atomic.AddInt32(&conn.ExitCounter, 1)
   125  }
   126  
   127  func (conn *Connection) IsExitInProgress() bool {
   128  	if atomic.AddInt32(&conn.ExitCounter, 0) != 0 {
   129  		return true
   130  	}
   131  	return false
   132  }
   133  
   134  // the message is sent as follows
   135  
   136  func (conn *Connection) Send_Message(data_bytes []byte) {
   137  
   138  	// measure parameters
   139  	conn.SpeedOut.Incr(int64(len(data_bytes)) + 4)
   140  	atomic.StoreUint64(&conn.BytesOut, atomic.LoadUint64(&conn.BytesOut)+(uint64(len(data_bytes))+4))
   141  
   142  	if atomic.LoadUint32(&conn.State) != HANDSHAKE_PENDING {
   143  		atomic.StoreUint32(&conn.State, ACTIVE)
   144  	}
   145  	conn.Send_Message_prelocked(data_bytes)
   146  }
   147  
   148  // used when we have command queue
   149  // assumingin peer lock has already been taken
   150  func (conn *Connection) Send_Message_prelocked(data_bytes []byte) {
   151  
   152  	conn.writelock.Lock()
   153  	defer conn.writelock.Unlock()
   154  
   155  	defer func() {
   156  		if r := recover(); r != nil {
   157  			conn.logger.Warnf("Recovered while handling connection, Stack trace below %+v", r)
   158  			conn.logger.Warnf("Stack trace  \n%s", debug.Stack())
   159  			conn.Exit()
   160  		}
   161  	}()
   162  
   163  	if conn.IsExitInProgress() {
   164  		return
   165  	}
   166  
   167  	var length_bytes [4]byte
   168  	binary.LittleEndian.PutUint32(length_bytes[:], uint32(len(data_bytes)))
   169          
   170  
   171  	// each connection has a write deadline of 60 secs
   172      conn.Conn.SetWriteDeadline(time.Now().Add(60 * time.Second)) 
   173  	if _, err := conn.Conn.Write(length_bytes[:]); err != nil { // send the length prefix
   174  		conn.Exit()
   175  		return
   176  	}
   177  
   178  	conn.Conn.SetWriteDeadline(time.Now().Add(60 * time.Second)) 
   179  	if _, err := conn.Conn.Write(data_bytes[:]); err != nil { // send the message itself
   180  		conn.Exit()
   181  		return
   182  	}
   183  
   184  }
   185  
   186  // reads our data, length prefix blocks
   187  func (connection *Connection) Read_Data_Frame(timeout int64, max_block_size uint32) (data_buf []byte) {
   188  
   189  	var frame_length_buf [4]byte
   190  
   191  	connection.set_timeout()
   192  	nbyte, err := io.ReadFull(connection.Conn, frame_length_buf[:])
   193  	if err != nil || nbyte != 4 {
   194  		// error while reading from connection we must disconnect it
   195  		rlog.Warnf("Could not read length prefix err %s %s", err, globals.CTXString(connection.logger))
   196  
   197  		connection.Exit()
   198  
   199  		return
   200  	}
   201  
   202  	//  time to ban
   203  	frame_length := binary.LittleEndian.Uint32(frame_length_buf[:])
   204  	if frame_length == 0 || uint64(frame_length) > (15*config.CRYPTONOTE_MAX_BLOCK_SIZE/10) || uint64(frame_length) > (2*uint64(max_block_size)) {
   205  		// most probably memory DDOS attack, kill the connection
   206  		rlog.Warnf("Frame length is too big Expected %d Actual %d %s", (2 * uint64(max_block_size)), frame_length, globals.CTXString(connection.logger))
   207  		connection.Exit()
   208  		return
   209  
   210  	}
   211  
   212  	data_buf = make([]byte, frame_length)
   213  	connection.set_timeout()
   214  	data_size, err := io.ReadFull(connection.Conn, data_buf)
   215  	if err != nil || data_size <= 0 || uint32(data_size) != frame_length {
   216  		// error while reading from connection we must kiil it
   217  		rlog.Warnf("Could not read data size  read %d, frame length %d err %s", data_size, frame_length, err, globals.CTXString(connection.logger))
   218  		connection.Exit()
   219  		return
   220  
   221  	}
   222  	data_buf = data_buf[:frame_length]
   223  
   224  	// measure parameters
   225  	connection.SpeedIn.Incr(int64(frame_length) + 4)
   226  	connection.BytesIn += (uint64(frame_length) + 4)
   227  	if atomic.LoadUint32(&connection.State) != HANDSHAKE_PENDING {
   228  		atomic.StoreUint32(&connection.State, ACTIVE)
   229  	}
   230  	
   231  	return data_buf
   232  
   233  }
   234  
   235  // handles both server and client connections
   236  func Handle_Connection(conn net.Conn, remote_addr *net.TCPAddr, incoming bool, sync_node bool) {
   237  
   238  	var err error
   239  
   240  	defer func() {
   241  		if r := recover(); r != nil { // under rare condition below defer can also raise an exception, catch it now
   242  			// connection.logger.Warnf("Recovered while handling connection RARE", r)
   243  			rlog.Warnf("Recovered while handling connection RARE")
   244  			//conn.Exit()
   245  		}
   246  	}()
   247  
   248  	var connection Connection
   249  	connection.Incoming = incoming
   250  	connection.Conn = conn
   251  	connection.SyncNode = sync_node
   252  	connection.Addr = remote_addr //  since we may be connecting via socks, get target IP
   253  	//connection.Command_queue = list.New() // init command queue
   254  	connection.Objects = make(chan Queued_Command, 2048)
   255  	connection.CDIFF.Store(new(big.Int).SetUint64(1))
   256  	connection.State = HANDSHAKE_PENDING
   257  
   258  	connection.request_time.Store(time.Now())
   259  	//connection.Exit = make(chan bool)
   260  	connection.SpeedIn = ratecounter.NewRateCounter(60 * time.Second)
   261  	connection.SpeedOut = ratecounter.NewRateCounter(60 * time.Second)
   262  
   263  	if incoming {
   264  		connection.logger = logger.WithFields(log.Fields{"RIP": remote_addr.String(), "DIR": "INC"})
   265  	} else {
   266  		connection.logger = logger.WithFields(log.Fields{"RIP": remote_addr.String(), "DIR": "OUT"})
   267  	}
   268  	// this is to kill most of the races, related to logger
   269  	connection.logid = globals.CTXString(connection.logger)
   270  
   271  	defer func() {
   272  		if r := recover(); r != nil {
   273  			connection.logger.Warnf("Recovered while handling connection, Stack trace below %+v", r)
   274  			connection.logger.Warnf("Stack trace  \n%s", debug.Stack())
   275  			connection.Exit()
   276  		}
   277  	}()
   278  
   279  	// goroutine to exit the connection if signalled
   280  	go func() {
   281  
   282  		defer func() {
   283  			if r := recover(); r != nil { // under rare condition below defer can also raise an exception, catch it now
   284  				// connection.logger.Warnf("Recovered while handling connection RARE", r)
   285  				rlog.Warnf("Recovered while handling Timed Sync")
   286  				connection.Exit()
   287  			}
   288  		}()
   289  
   290  		ticker := time.NewTicker(1 * time.Second) // 1 second ticker
   291  		idle := 0
   292  		rehandshake := 0
   293  		for {
   294  
   295  			if connection.IsExitInProgress() {
   296  
   297  				//connection.logger.Warnf("Removing connection")
   298  				ticker.Stop() // release resources of timer
   299                  conn.Close()
   300  				Connection_Delete(&connection)
   301  				return // close the connection and close the routine
   302  			}
   303  
   304  			select {
   305  			case <-ticker.C:
   306  				idle++
   307  				rehandshake++
   308  				if idle > 2 { // if idle more than 2 secs, we should send a timed sync
   309  					if atomic.LoadUint32(&connection.State) != HANDSHAKE_PENDING {
   310  						atomic.StoreUint32(&connection.State, IDLE)
   311  						connection.Send_TimedSync(true)
   312  						idle = 0
   313  					}
   314  
   315  					// if no timed sync response in 2 minute kill the connection
   316  					if time.Now().Sub(connection.request_time.Load().(time.Time)) > 120*time.Second{
   317  						connection.Exit()
   318  					}
   319  
   320  					//if !connection.Incoming { // there is not point in sending timed sync both sides simultaneously
   321  
   322  					if rehandshake > 1800 { // rehandshake to exchange peers every half hour
   323  						connection.Send_Handshake(true)
   324  						rehandshake = 0
   325  
   326  						// run cleanup process every 1800 seconds
   327  						connection.TXpool_cache_lock.Lock()
   328  						if connection.TXpool_cache != nil {
   329  							current_time := uint32(time.Now().Unix())
   330  							for k, v := range connection.TXpool_cache {
   331  								if (v + 1800) < current_time {
   332  									delete(connection.TXpool_cache, k)
   333  								}
   334  							}
   335  						}
   336  						connection.TXpool_cache_lock.Unlock()
   337  					}
   338  					//}
   339  				}
   340  			case <-Exit_Event:
   341  				ticker.Stop() // release resources of timer
   342  				conn.Close()
   343  				Connection_Delete(&connection)
   344  				return // close the connection and close the routine
   345  
   346  			}
   347  		}
   348  	}()
   349  
   350  	if !incoming {
   351  		Connection_Add(&connection)     // add outgoing connection to pool, incoming are added when handshake are done
   352  		connection.Send_Handshake(true) // send handshake request
   353  	}
   354  
   355  	for {
   356  
   357  		var command Sync_Struct // decode as sync minimum
   358  
   359  		//connection.logger.Info("Waiting for frame")
   360  		if connection.IsExitInProgress() {
   361  			return
   362  		}
   363  
   364  		//connection.logger.Infof("Waiting for data frame")
   365  		// no one should be able to request more than necessary amount of buffer
   366  		data_read := connection.Read_Data_Frame(0, uint32(config.CRYPTONOTE_MAX_BLOCK_SIZE*2))
   367  
   368  		if connection.IsExitInProgress() {
   369  			return
   370  		}
   371  		//connection.logger.Info("frame received")
   372  
   373  		// connection.logger.Infof(" data frame arrived %d", len(data_read))
   374  
   375  		// lets decode command and make sure we understand it
   376  		err = msgpack.Unmarshal(data_read, &command)
   377  		if err != nil {
   378  			rlog.Warnf("Error while decoding incoming frame err %s %s", err, globals.CTXString(connection.logger))
   379  			connection.Exit()
   380  			return
   381  		}
   382  
   383  		// check version sanctity
   384  		//connection.logger.Infof(" data frame parsed %+v", command)
   385  
   386  		// till the time handshake is done, we donot process any commands
   387  		if atomic.LoadUint32(&connection.State) == HANDSHAKE_PENDING && !(command.Command == V2_COMMAND_HANDSHAKE || command.Command == V2_COMMAND_SYNC) {
   388  			// client sent something else when we were waiting for handshake, ban the peer
   389  			rlog.Warnf("Terminating connection, we were waiting for handshake but received %d %s", command.Command, globals.CTXString(connection.logger))
   390  
   391  			connection.Exit()
   392  			return
   393  		}
   394  
   395  		//connection.logger.Debugf("v2 command incoming  %d", command.Command)
   396  
   397  		switch command.Command {
   398  		case V2_COMMAND_HANDSHAKE:
   399  			connection.Update(&command.Common)
   400  			connection.Handle_Handshake(data_read)
   401  
   402  		case V2_COMMAND_SYNC:
   403  			connection.Update(&command.Common)
   404  			connection.Handle_TimedSync(data_read)
   405  		case V2_COMMAND_CHAIN_REQUEST:
   406  			connection.Update(&command.Common)
   407  			connection.Handle_ChainRequest(data_read)
   408  		case V2_COMMAND_CHAIN_RESPONSE:
   409  
   410  			connection.Update(&command.Common)
   411  			connection.Handle_ChainResponse(data_read)
   412  		case V2_COMMAND_OBJECTS_REQUEST:
   413  			connection.Update(&command.Common)
   414  			connection.Handle_ObjectRequest(data_read)
   415  		case V2_COMMAND_OBJECTS_RESPONSE:
   416  			connection.Update(&command.Common)
   417  			connection.Handle_ObjectResponse(data_read)
   418  		case V2_NOTIFY_NEW_BLOCK: // for notification,  instead of syncing, we will process notificaton first
   419  			connection.Handle_Notification_Block(data_read)
   420  			connection.Update(&command.Common) // we do it a bit later so we donot staart syncing
   421  
   422  		case V2_NOTIFY_NEW_TX:
   423  			connection.Update(&command.Common)
   424  			connection.Handle_Notification_Transaction(data_read)
   425  
   426  		default:
   427  			connection.logger.Debugf("Unhandled v2 command %d", command.Command)
   428  
   429  		}
   430  
   431  	}
   432  
   433  }