gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/miningpool/dispatcher.go (about)

     1  package pool
     2  
     3  import (
     4  	// "fmt"
     5  
     6  	"net"
     7  	"time"
     8  
     9  	"github.com/sasha-s/go-deadlock"
    10  
    11  	"gitlab.com/SiaPrime/SiaPrime/persist"
    12  )
    13  
    14  // Dispatcher contains a map of ip addresses to handlers
    15  // Dispatcher contains a map of ip addresses to handlers
    16  type Dispatcher struct {
    17  	handlers          map[string]*Handler
    18  	ln                net.Listener
    19  	mu                deadlock.RWMutex
    20  	p                 *Pool
    21  	log               *persist.Logger
    22  	connectionsOpened uint64
    23  }
    24  
    25  // NumConnections returns the number of open tcp connections
    26  func (d *Dispatcher) NumConnections() int {
    27  	d.mu.Lock()
    28  	defer d.mu.Unlock()
    29  	return len(d.handlers)
    30  }
    31  
    32  // NumConnectionsOpened returns the number of tcp connections that the pool
    33  // has ever opened
    34  func (d *Dispatcher) NumConnectionsOpened() uint64 {
    35  	d.mu.Lock()
    36  	defer d.mu.Unlock()
    37  	return d.connectionsOpened
    38  }
    39  
    40  // IncrementConnectionsOpened increments the number of tcp connections that the
    41  // pool has ever opened
    42  func (d *Dispatcher) IncrementConnectionsOpened() {
    43  	// XXX: this is causing a deadlock
    44  	/*
    45  		d.mu.Lock()
    46  		defer d.mu.Unlock()
    47  		d.connectionsOpened += 1
    48  	*/
    49  }
    50  
    51  //AddHandler connects the incoming connection to the handler which will handle it
    52  func (d *Dispatcher) AddHandler(conn net.Conn) {
    53  	addr := conn.RemoteAddr().String()
    54  	handler := &Handler{
    55  		conn:   conn,
    56  		closed: make(chan bool, 2),
    57  		notify: make(chan bool, numPendingNotifies),
    58  		p:      d.p,
    59  		log:    d.log,
    60  	}
    61  	d.mu.Lock()
    62  	d.handlers[addr] = handler
    63  	d.mu.Unlock()
    64  
    65  	// fmt.Printf("AddHandler listen() called: %s\n", addr)
    66  	handler.Listen()
    67  
    68  	<-handler.closed // when connection closed, remove handler from handlers
    69  	d.mu.Lock()
    70  	delete(d.handlers, addr)
    71  	//fmt.Printf("Exiting AddHandler, %d connections remaining\n", len(d.handlers))
    72  	d.mu.Unlock()
    73  }
    74  
    75  // ListenHandlers listens on a passed port and upon accepting the incoming connection,
    76  // adds the handler to deal with it
    77  func (d *Dispatcher) ListenHandlers(port string) {
    78  	var err error
    79  	err = d.p.tg.Add()
    80  	if err != nil {
    81  		// If this goroutine is not run before shutdown starts, this
    82  		// codeblock is reachable.
    83  		return
    84  	}
    85  
    86  	d.ln, err = net.Listen("tcp", ":"+port)
    87  	if err != nil {
    88  		d.log.Println(err)
    89  		panic(err)
    90  		// TODO: add error chan to report this
    91  		//return
    92  	}
    93  	// fmt.Printf("Listening: %s\n", port)
    94  
    95  	defer d.ln.Close()
    96  	defer d.p.tg.Done()
    97  
    98  	for {
    99  		var conn net.Conn
   100  		var err error
   101  		select {
   102  		case <-d.p.tg.StopChan():
   103  			//fmt.Println("Closing listener")
   104  			//d.ln.Close()
   105  			//fmt.Println("Done closing listener")
   106  			return
   107  		default:
   108  			conn, err = d.ln.Accept() // accept connection
   109  			d.IncrementConnectionsOpened()
   110  			if err != nil {
   111  				d.log.Println(err)
   112  				continue
   113  			}
   114  		}
   115  
   116  		tcpconn := conn.(*net.TCPConn)
   117  		tcpconn.SetKeepAlive(true)
   118  		//tcpconn.SetKeepAlivePeriod(30 * time.Second)
   119  		tcpconn.SetKeepAlivePeriod(15 * time.Second)
   120  		tcpconn.SetNoDelay(true)
   121  		// maybe this will help with our disconnection problems
   122  		tcpconn.SetLinger(2)
   123  
   124  		go d.AddHandler(conn)
   125  	}
   126  }
   127  
   128  // NotifyClients tells the dispatcher to notify all clients that the block has
   129  // changed
   130  func (d *Dispatcher) NotifyClients() {
   131  	d.mu.Lock()
   132  	defer d.mu.Unlock()
   133  	d.log.Printf("Notifying %d clients\n", len(d.handlers))
   134  	for _, h := range d.handlers {
   135  		h.notify <- true
   136  	}
   137  }
   138  
   139  // ClearJobAndNotifyClients clear all stale jobs and tells the dispatcher to notify all clients that the block has
   140  // changed
   141  func (d *Dispatcher) ClearJobAndNotifyClients() {
   142  	d.mu.Lock()
   143  	defer d.mu.Unlock()
   144  	d.log.Printf("Clear jobs and Notifying %d clients\n", len(d.handlers))
   145  	for _, h := range d.handlers {
   146  		if h != nil && h.s != nil {
   147  			if h.s.CurrentWorker == nil {
   148  				// this will happen when handler init, session init,
   149  				// no mining.authorize happen yet, so worker is nil,
   150  				// at this time, no stratum notify ever happen, no need to clear or notify
   151  				d.log.Printf("Clear jobs and Notifying client: worker is nil\n")
   152  				continue
   153  			}
   154  		} else {
   155  			// this will happen when handler init, seesion is not
   156  			d.log.Printf("Clear jobs and Notifying client: handler or session nil\n")
   157  			continue
   158  		}
   159  		h.s.clearJobs()
   160  		h.notify <- true
   161  	}
   162  }