github.com/fozzysec/SiaPrime@v0.0.0-20190612043147-66c8e8d11fe3/modules/miningpool/dispatcher.go (about) 1 package pool 2 3 import ( 4 // "fmt" 5 6 "net" 7 "time" 8 9 "sync" 10 "sync/atomic" 11 12 "SiaPrime/persist" 13 ) 14 15 // Dispatcher contains a map of ip addresses to handlers 16 // Dispatcher contains a map of ip addresses to handlers 17 type Dispatcher struct { 18 handlers map[string]*Handler 19 ln net.Listener 20 mu sync.RWMutex 21 p *Pool 22 log *persist.Logger 23 connectionsOpened uint64 24 } 25 26 // NumConnections returns the number of open tcp connections 27 func (d *Dispatcher) NumConnections() int { 28 d.mu.Lock() 29 defer d.mu.Unlock() 30 return len(d.handlers) 31 } 32 33 // NumConnectionsOpened returns the number of tcp connections that the pool 34 // has ever opened 35 func (d *Dispatcher) NumConnectionsOpened() uint64 { 36 return atomic.LoadUint64(&d.connectionsOpened) 37 } 38 39 // IncrementConnectionsOpened increments the number of tcp connections that the 40 // pool has ever opened 41 func (d *Dispatcher) IncrementConnectionsOpened() { 42 atomic.AddUint64(&d.connectionsOpened, 1) 43 } 44 45 //AddHandler connects the incoming connection to the handler which will handle it 46 func (d *Dispatcher) AddHandler(conn net.Conn) { 47 tcpconn := conn.(*net.TCPConn) 48 tcpconn.SetKeepAlive(true) 49 //tcpconn.SetKeepAlivePeriod(30 * time.Second) 50 tcpconn.SetKeepAlivePeriod(15 * time.Second) 51 tcpconn.SetNoDelay(true) 52 // maybe this will help with our disconnection problems 53 tcpconn.SetLinger(2) 54 55 addr := conn.RemoteAddr().String() 56 handler := &Handler{ 57 conn: conn, 58 ready: make(chan bool, 1), 59 closed: make(chan bool, 2), 60 notify: make(chan bool, numPendingNotifies), 61 p: d.p, 62 log: d.log, 63 } 64 d.mu.Lock() 65 d.handlers[addr] = handler 66 d.mu.Unlock() 67 68 go d.AddNotifier(handler) 69 // fmt.Printf("AddHandler listen() called: %s\n", addr) 70 handler.Listen() 71 72 <-handler.closed // when connection closed, remove handler from handlers 73 d.log.Println("handler releasing") 74 d.mu.Lock() 75 delete(d.handlers, addr) 76 //fmt.Printf("Exiting AddHandler, %d connections remaining\n", len(d.handlers)) 77 d.mu.Unlock() 78 } 79 80 func (d *Dispatcher) AddNotifier(h *Handler) { 81 //case <-h.closed no need, won't fail when setup handler 82 select { 83 case <-h.ready: 84 d.log.Println("Handler done, Notifier spawning.") 85 h.setupNotifier() 86 } 87 } 88 89 // ListenHandlers listens on a passed port and upon accepting the incoming connection, 90 // adds the handler to deal with it 91 func (d *Dispatcher) ListenHandlers(port string) { 92 var err error 93 err = d.p.tg.Add() 94 if err != nil { 95 // If this goroutine is not run before shutdown starts, this 96 // codeblock is reachable. 97 return 98 } 99 100 d.ln, err = net.Listen("tcp", ":"+port) 101 if err != nil { 102 //d.log.Println(err) 103 panic(err) 104 // TODO: add error chan to report this 105 //return 106 } 107 // fmt.Printf("Listening: %s\n", port) 108 109 defer d.ln.Close() 110 defer d.p.tg.Done() 111 112 for { 113 var conn net.Conn 114 var err error 115 select { 116 case <-d.p.tg.StopChan(): 117 //fmt.Println("Closing listener") 118 //d.ln.Close() 119 //fmt.Println("Done closing listener") 120 return 121 default: 122 conn, err = d.ln.Accept() // accept connection 123 d.IncrementConnectionsOpened() 124 if err != nil { 125 //d.log.Println(err) 126 continue 127 } 128 } 129 go d.AddHandler(conn) 130 } 131 } 132 133 // NotifyClients tells the dispatcher to notify all clients that the block has 134 // changed 135 func (d *Dispatcher) NotifyClients() { 136 d.mu.Lock() 137 defer d.mu.Unlock() 138 d.log.Printf("Block changed, notifying %d clients\n", len(d.handlers)) 139 for _, h := range d.handlers { 140 //with new notifier it is no longer needed to be that large(20), 5 is for sure that won't block for long 141 if len(h.notify) < numPendingNotifies { 142 h.notify <- true 143 } 144 } 145 } 146 147 // ClearJobAndNotifyClients clear all stale jobs and tells the dispatcher to notify all clients that the block has 148 // changed 149 func (d *Dispatcher) ClearJobAndNotifyClients() { 150 d.mu.Lock() 151 defer d.mu.Unlock() 152 d.log.Printf("Work on new block, clear jobs and notifying %d clients\n", len(d.handlers)) 153 for _, h := range d.handlers { 154 if h != nil && h.GetSession() != nil { 155 if h.GetSession().GetCurrentWorker() == nil { 156 // this will happen when handler init, session init, 157 // no mining.authorize happen yet, so worker is nil, 158 // at this time, no stratum notify ever happen, no need to clear or notify 159 //d.log.Printf("Clear jobs and Notifying client: worker is nil\n") 160 continue 161 } 162 } else { 163 // this will happen when handler init, seesion is not 164 //d.log.Printf("Clear jobs and Notifying client: handler or session nil\n") 165 continue 166 } 167 h.GetSession().clearJobs() 168 //with new notifier it is no longer needed to be that large(20), 5 is for sure that won't block for long 169 if len(h.notify) < numPendingNotifies { 170 h.notify <- true 171 } 172 } 173 }