github.com/noirx94/tendermintmp@v0.0.1/blockchain/v2/routine.go (about)

     1  package v2
     2  
     3  import (
     4  	"fmt"
     5  	"strings"
     6  	"sync/atomic"
     7  
     8  	"github.com/Workiva/go-datastructures/queue"
     9  
    10  	"github.com/tendermint/tendermint/libs/log"
    11  )
    12  
    13  type handleFunc = func(event Event) (Event, error)
    14  
    15  const historySize = 25
    16  
    17  // Routine is a structure that models a finite state machine as serialized
    18  // stream of events processed by a handle function. This Routine structure
    19  // handles the concurrency and messaging guarantees. Events are sent via
    20  // `send` are handled by the `handle` function to produce an iterator
    21  // `next()`. Calling `stop()` on a routine will conclude processing of all
    22  // sent events and produce `final()` event representing the terminal state.
    23  type Routine struct {
    24  	name    string
    25  	handle  handleFunc
    26  	queue   *queue.PriorityQueue
    27  	history []Event
    28  	out     chan Event
    29  	fin     chan error
    30  	rdy     chan struct{}
    31  	running *uint32
    32  	logger  log.Logger
    33  	metrics *Metrics
    34  }
    35  
    36  func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine {
    37  	return &Routine{
    38  		name:    name,
    39  		handle:  handleFunc,
    40  		queue:   queue.NewPriorityQueue(bufferSize, true),
    41  		history: make([]Event, 0, historySize),
    42  		out:     make(chan Event, bufferSize),
    43  		rdy:     make(chan struct{}, 1),
    44  		fin:     make(chan error, 1),
    45  		running: new(uint32),
    46  		logger:  log.NewNopLogger(),
    47  		metrics: NopMetrics(),
    48  	}
    49  }
    50  
    51  func (rt *Routine) setLogger(logger log.Logger) {
    52  	rt.logger = logger
    53  }
    54  
    55  // nolint:unused
    56  func (rt *Routine) setMetrics(metrics *Metrics) {
    57  	rt.metrics = metrics
    58  }
    59  
    60  func (rt *Routine) start() {
    61  	rt.logger.Info(fmt.Sprintf("%s: run", rt.name))
    62  	running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1))
    63  	if !running {
    64  		panic(fmt.Sprintf("%s is already running", rt.name))
    65  	}
    66  	close(rt.rdy)
    67  	defer func() {
    68  		if r := recover(); r != nil {
    69  			var (
    70  				b strings.Builder
    71  				j int
    72  			)
    73  			for i := len(rt.history) - 1; i >= 0; i-- {
    74  				fmt.Fprintf(&b, "%d: %+v\n", j, rt.history[i])
    75  				j++
    76  			}
    77  			panic(fmt.Sprintf("%v\nlast events:\n%v", r, b.String()))
    78  		}
    79  		stopped := atomic.CompareAndSwapUint32(rt.running, uint32(1), uint32(0))
    80  		if !stopped {
    81  			panic(fmt.Sprintf("%s is failed to stop", rt.name))
    82  		}
    83  	}()
    84  
    85  	for {
    86  		events, err := rt.queue.Get(1)
    87  		if err == queue.ErrDisposed {
    88  			rt.terminate(nil)
    89  			return
    90  		} else if err != nil {
    91  			rt.terminate(err)
    92  			return
    93  		}
    94  		oEvent, err := rt.handle(events[0].(Event))
    95  		rt.metrics.EventsHandled.With("routine", rt.name).Add(1)
    96  		if err != nil {
    97  			rt.terminate(err)
    98  			return
    99  		}
   100  		rt.metrics.EventsOut.With("routine", rt.name).Add(1)
   101  		rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v", rt.name, oEvent, oEvent))
   102  
   103  		// Skip rTrySchedule and rProcessBlock events as they clutter the history
   104  		// due to their frequency.
   105  		switch events[0].(type) {
   106  		case rTrySchedule:
   107  		case rProcessBlock:
   108  		default:
   109  			rt.history = append(rt.history, events[0].(Event))
   110  			if len(rt.history) > historySize {
   111  				rt.history = rt.history[1:]
   112  			}
   113  		}
   114  
   115  		rt.out <- oEvent
   116  	}
   117  }
   118  
   119  // XXX: look into returning OpError in the net package
   120  func (rt *Routine) send(event Event) bool {
   121  	rt.logger.Debug(fmt.Sprintf("%s: received %T %+v", rt.name, event, event))
   122  	if !rt.isRunning() {
   123  		return false
   124  	}
   125  	err := rt.queue.Put(event)
   126  	if err != nil {
   127  		rt.metrics.EventsShed.With("routine", rt.name).Add(1)
   128  		rt.logger.Error(fmt.Sprintf("%s: send failed, queue was full/stopped", rt.name))
   129  		return false
   130  	}
   131  
   132  	rt.metrics.EventsSent.With("routine", rt.name).Add(1)
   133  	return true
   134  }
   135  
   136  func (rt *Routine) isRunning() bool {
   137  	return atomic.LoadUint32(rt.running) == 1
   138  }
   139  
   140  func (rt *Routine) next() chan Event {
   141  	return rt.out
   142  }
   143  
   144  func (rt *Routine) ready() chan struct{} {
   145  	return rt.rdy
   146  }
   147  
   148  func (rt *Routine) stop() {
   149  	if !rt.isRunning() { // XXX: this should check rt.queue.Disposed()
   150  		return
   151  	}
   152  
   153  	rt.logger.Info(fmt.Sprintf("%s: stop", rt.name))
   154  	rt.queue.Dispose() // this should block until all queue items are free?
   155  }
   156  
   157  func (rt *Routine) final() chan error {
   158  	return rt.fin
   159  }
   160  
   161  // XXX: Maybe get rid of this
   162  func (rt *Routine) terminate(reason error) {
   163  	// We don't close the rt.out channel here, to avoid spinning on the closed channel
   164  	// in the event loop.
   165  	rt.fin <- reason
   166  }