github.com/okex/exchain@v1.8.0/libs/tendermint/blockchain/v2/routine.go (about) 1 package v2 2 3 import ( 4 "fmt" 5 "sync/atomic" 6 7 "github.com/Workiva/go-datastructures/queue" 8 9 "github.com/okex/exchain/libs/tendermint/libs/log" 10 ) 11 12 type handleFunc = func(event Event) (Event, error) 13 14 // Routine is a structure that models a finite state machine as serialized 15 // stream of events processed by a handle function. This Routine structure 16 // handles the concurrency and messaging guarantees. Events are sent via 17 // `send` are handled by the `handle` function to produce an iterator 18 // `next()`. Calling `stop()` on a routine will conclude processing of all 19 // sent events and produce `final()` event representing the terminal state. 20 type Routine struct { 21 name string 22 handle handleFunc 23 queue *queue.PriorityQueue 24 out chan Event 25 fin chan error 26 rdy chan struct{} 27 running *uint32 28 logger log.Logger 29 metrics *Metrics 30 } 31 32 func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine { 33 return &Routine{ 34 name: name, 35 handle: handleFunc, 36 queue: queue.NewPriorityQueue(bufferSize, true), 37 out: make(chan Event, bufferSize), 38 rdy: make(chan struct{}, 1), 39 fin: make(chan error, 1), 40 running: new(uint32), 41 logger: log.NewNopLogger(), 42 metrics: NopMetrics(), 43 } 44 } 45 46 // nolint: unused 47 func (rt *Routine) setLogger(logger log.Logger) { 48 rt.logger = logger 49 } 50 51 // nolint:unused 52 func (rt *Routine) setMetrics(metrics *Metrics) { 53 rt.metrics = metrics 54 } 55 56 func (rt *Routine) start() { 57 rt.logger.Info(fmt.Sprintf("%s: run\n", rt.name)) 58 running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1)) 59 if !running { 60 panic(fmt.Sprintf("%s is already running", rt.name)) 61 } 62 close(rt.rdy) 63 defer func() { 64 stopped := atomic.CompareAndSwapUint32(rt.running, uint32(1), uint32(0)) 65 if !stopped { 66 panic(fmt.Sprintf("%s is failed to stop", rt.name)) 67 } 68 }() 69 70 for { 71 events, err := rt.queue.Get(1) 72 if err == queue.ErrDisposed { 73 rt.terminate(nil) 74 return 75 } else if err != nil { 76 rt.terminate(err) 77 return 78 } 79 oEvent, err := rt.handle(events[0].(Event)) 80 rt.metrics.EventsHandled.With("routine", rt.name).Add(1) 81 if err != nil { 82 rt.terminate(err) 83 return 84 } 85 rt.metrics.EventsOut.With("routine", rt.name).Add(1) 86 rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v\n", rt.name, oEvent, oEvent)) 87 88 rt.out <- oEvent 89 } 90 } 91 92 // XXX: look into returning OpError in the net package 93 func (rt *Routine) send(event Event) bool { 94 rt.logger.Debug(fmt.Sprintf("%s: received %T %+v", rt.name, event, event)) 95 if !rt.isRunning() { 96 return false 97 } 98 err := rt.queue.Put(event) 99 if err != nil { 100 rt.metrics.EventsShed.With("routine", rt.name).Add(1) 101 rt.logger.Info(fmt.Sprintf("%s: send failed, queue was full/stopped \n", rt.name)) 102 return false 103 } 104 105 rt.metrics.EventsSent.With("routine", rt.name).Add(1) 106 return true 107 } 108 109 func (rt *Routine) isRunning() bool { 110 return atomic.LoadUint32(rt.running) == 1 111 } 112 113 func (rt *Routine) next() chan Event { 114 return rt.out 115 } 116 117 func (rt *Routine) ready() chan struct{} { 118 return rt.rdy 119 } 120 121 func (rt *Routine) stop() { 122 if !rt.isRunning() { // XXX: this should check rt.queue.Disposed() 123 return 124 } 125 126 rt.logger.Info(fmt.Sprintf("%s: stop\n", rt.name)) 127 rt.queue.Dispose() // this should block until all queue items are free? 128 } 129 130 func (rt *Routine) final() chan error { 131 return rt.fin 132 } 133 134 // XXX: Maybe get rid of this 135 func (rt *Routine) terminate(reason error) { 136 // We don't close the rt.out channel here, to avoid spinning on the closed channel 137 // in the event loop. 138 rt.fin <- reason 139 }