github.com/MagHErmit/tendermint@v0.282.1/blockchain/v2/routine.go (about)

     1  package v2
     2  
     3  import (
     4  	"fmt"
     5  	"strings"
     6  	"sync/atomic"
     7  
     8  	"github.com/Workiva/go-datastructures/queue"
     9  
    10  	"github.com/MagHErmit/tendermint/libs/log"
    11  )
    12  
    13  type handleFunc = func(event Event) (Event, error)
    14  
    15  const historySize = 25
    16  
    17  // Routine is a structure that models a finite state machine as serialized
    18  // stream of events processed by a handle function. This Routine structure
    19  // handles the concurrency and messaging guarantees. Events are sent via
    20  // `send` are handled by the `handle` function to produce an iterator
    21  // `next()`. Calling `stop()` on a routine will conclude processing of all
    22  // sent events and produce `final()` event representing the terminal state.
    23  type Routine struct {
    24  	name    string
    25  	handle  handleFunc
    26  	queue   *queue.PriorityQueue
    27  	history []Event
    28  	out     chan Event
    29  	fin     chan error
    30  	rdy     chan struct{}
    31  	running *uint32
    32  	logger  log.Logger
    33  	metrics *Metrics
    34  }
    35  
    36  func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine {
    37  	return &Routine{
    38  		name:    name,
    39  		handle:  handleFunc,
    40  		queue:   queue.NewPriorityQueue(bufferSize, true),
    41  		history: make([]Event, 0, historySize),
    42  		out:     make(chan Event, bufferSize),
    43  		rdy:     make(chan struct{}, 1),
    44  		fin:     make(chan error, 1),
    45  		running: new(uint32),
    46  		logger:  log.NewNopLogger(),
    47  		metrics: NopMetrics(),
    48  	}
    49  }
    50  
    51  func (rt *Routine) setLogger(logger log.Logger) {
    52  	rt.logger = logger
    53  }
    54  
    55  func (rt *Routine) setMetrics(metrics *Metrics) {
    56  	rt.metrics = metrics
    57  }
    58  
    59  func (rt *Routine) start() {
    60  	rt.logger.Info("routine start", "msg", log.NewLazySprintf("%s: run", rt.name))
    61  	running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1))
    62  	if !running {
    63  		panic(fmt.Sprintf("%s is already running", rt.name))
    64  	}
    65  	close(rt.rdy)
    66  	defer func() {
    67  		if r := recover(); r != nil {
    68  			var (
    69  				b strings.Builder
    70  				j int
    71  			)
    72  			for i := len(rt.history) - 1; i >= 0; i-- {
    73  				fmt.Fprintf(&b, "%d: %+v\n", j, rt.history[i])
    74  				j++
    75  			}
    76  			panic(fmt.Sprintf("%v\nlast events:\n%v", r, b.String()))
    77  		}
    78  		stopped := atomic.CompareAndSwapUint32(rt.running, uint32(1), uint32(0))
    79  		if !stopped {
    80  			panic(fmt.Sprintf("%s is failed to stop", rt.name))
    81  		}
    82  	}()
    83  
    84  	for {
    85  		events, err := rt.queue.Get(1)
    86  		if err == queue.ErrDisposed {
    87  			rt.terminate(nil)
    88  			return
    89  		} else if err != nil {
    90  			rt.terminate(err)
    91  			return
    92  		}
    93  		oEvent, err := rt.handle(events[0].(Event))
    94  		rt.metrics.EventsHandled.With("routine", rt.name).Add(1)
    95  		if err != nil {
    96  			rt.terminate(err)
    97  			return
    98  		}
    99  		rt.metrics.EventsOut.With("routine", rt.name).Add(1)
   100  		rt.logger.Debug("routine start", "msg", log.NewLazySprintf("%s: produced %T %+v", rt.name, oEvent, oEvent))
   101  
   102  		// Skip rTrySchedule and rProcessBlock events as they clutter the history
   103  		// due to their frequency.
   104  		switch events[0].(type) {
   105  		case rTrySchedule:
   106  		case rProcessBlock:
   107  		default:
   108  			rt.history = append(rt.history, events[0].(Event))
   109  			if len(rt.history) > historySize {
   110  				rt.history = rt.history[1:]
   111  			}
   112  		}
   113  
   114  		rt.out <- oEvent
   115  	}
   116  }
   117  
   118  // XXX: look into returning OpError in the net package
   119  func (rt *Routine) send(event Event) bool {
   120  	rt.logger.Debug("routine send", "msg", log.NewLazySprintf("%s: received %T %+v", rt.name, event, event))
   121  	if !rt.isRunning() {
   122  		return false
   123  	}
   124  	err := rt.queue.Put(event)
   125  	if err != nil {
   126  		rt.metrics.EventsShed.With("routine", rt.name).Add(1)
   127  		rt.logger.Error(fmt.Sprintf("%s: send failed, queue was full/stopped", rt.name))
   128  		return false
   129  	}
   130  
   131  	rt.metrics.EventsSent.With("routine", rt.name).Add(1)
   132  	return true
   133  }
   134  
   135  func (rt *Routine) isRunning() bool {
   136  	return atomic.LoadUint32(rt.running) == 1
   137  }
   138  
   139  func (rt *Routine) next() chan Event {
   140  	return rt.out
   141  }
   142  
   143  func (rt *Routine) ready() chan struct{} {
   144  	return rt.rdy
   145  }
   146  
   147  func (rt *Routine) stop() {
   148  	if !rt.isRunning() { // XXX: this should check rt.queue.Disposed()
   149  		return
   150  	}
   151  
   152  	rt.logger.Info("routine stop", "msg", log.NewLazySprintf("%s: stop", rt.name))
   153  	rt.queue.Dispose() // this should block until all queue items are free?
   154  }
   155  
   156  func (rt *Routine) final() chan error {
   157  	return rt.fin
   158  }
   159  
   160  // XXX: Maybe get rid of this
   161  func (rt *Routine) terminate(reason error) {
   162  	// We don't close the rt.out channel here, to avoid spinning on the closed channel
   163  	// in the event loop.
   164  	rt.fin <- reason
   165  }