github.com/vipernet-xyz/tm@v0.34.24/blockchain/v2/routine.go (about)

     1  package v2
     2  
     3  import (
     4  	"fmt"
     5  	"strings"
     6  	"sync/atomic"
     7  
     8  	"github.com/Workiva/go-datastructures/queue"
     9  
    10  	"github.com/vipernet-xyz/tm/libs/log"
    11  )
    12  
    13  type handleFunc = func(event Event) (Event, error)
    14  
    15  const historySize = 25
    16  
    17  // Routine is a structure that models a finite state machine as serialized
    18  // stream of events processed by a handle function. This Routine structure
    19  // handles the concurrency and messaging guarantees. Events are sent via
    20  // `send` are handled by the `handle` function to produce an iterator
    21  // `next()`. Calling `stop()` on a routine will conclude processing of all
    22  // sent events and produce `final()` event representing the terminal state.
    23  type Routine struct {
    24  	name    string
    25  	handle  handleFunc
    26  	queue   *queue.PriorityQueue
    27  	history []Event
    28  	out     chan Event
    29  	fin     chan error
    30  	rdy     chan struct{}
    31  	running *uint32
    32  	logger  log.Logger
    33  	metrics *Metrics
    34  }
    35  
    36  func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine {
    37  	return &Routine{
    38  		name:    name,
    39  		handle:  handleFunc,
    40  		queue:   queue.NewPriorityQueue(bufferSize, true),
    41  		history: make([]Event, 0, historySize),
    42  		out:     make(chan Event, bufferSize),
    43  		rdy:     make(chan struct{}, 1),
    44  		fin:     make(chan error, 1),
    45  		running: new(uint32),
    46  		logger:  log.NewNopLogger(),
    47  		metrics: NopMetrics(),
    48  	}
    49  }
    50  
    51  func (rt *Routine) setLogger(logger log.Logger) {
    52  	rt.logger = logger
    53  }
    54  
    55  func (rt *Routine) start() {
    56  	rt.logger.Info("routine start", "msg", log.NewLazySprintf("%s: run", rt.name))
    57  	running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1))
    58  	if !running {
    59  		panic(fmt.Sprintf("%s is already running", rt.name))
    60  	}
    61  	close(rt.rdy)
    62  	defer func() {
    63  		if r := recover(); r != nil {
    64  			var (
    65  				b strings.Builder
    66  				j int
    67  			)
    68  			for i := len(rt.history) - 1; i >= 0; i-- {
    69  				fmt.Fprintf(&b, "%d: %+v\n", j, rt.history[i])
    70  				j++
    71  			}
    72  			panic(fmt.Sprintf("%v\nlast events:\n%v", r, b.String()))
    73  		}
    74  		stopped := atomic.CompareAndSwapUint32(rt.running, uint32(1), uint32(0))
    75  		if !stopped {
    76  			panic(fmt.Sprintf("%s is failed to stop", rt.name))
    77  		}
    78  	}()
    79  
    80  	for {
    81  		events, err := rt.queue.Get(1)
    82  		if err == queue.ErrDisposed {
    83  			rt.terminate(nil)
    84  			return
    85  		} else if err != nil {
    86  			rt.terminate(err)
    87  			return
    88  		}
    89  		oEvent, err := rt.handle(events[0].(Event))
    90  		rt.metrics.EventsHandled.With("routine", rt.name).Add(1)
    91  		if err != nil {
    92  			rt.terminate(err)
    93  			return
    94  		}
    95  		rt.metrics.EventsOut.With("routine", rt.name).Add(1)
    96  		rt.logger.Debug("routine start", "msg", log.NewLazySprintf("%s: produced %T %+v", rt.name, oEvent, oEvent))
    97  
    98  		// Skip rTrySchedule and rProcessBlock events as they clutter the history
    99  		// due to their frequency.
   100  		switch events[0].(type) {
   101  		case rTrySchedule:
   102  		case rProcessBlock:
   103  		default:
   104  			rt.history = append(rt.history, events[0].(Event))
   105  			if len(rt.history) > historySize {
   106  				rt.history = rt.history[1:]
   107  			}
   108  		}
   109  
   110  		rt.out <- oEvent
   111  	}
   112  }
   113  
   114  // XXX: look into returning OpError in the net package
   115  func (rt *Routine) send(event Event) bool {
   116  	rt.logger.Debug("routine send", "msg", log.NewLazySprintf("%s: received %T %+v", rt.name, event, event))
   117  	if !rt.isRunning() {
   118  		return false
   119  	}
   120  	err := rt.queue.Put(event)
   121  	if err != nil {
   122  		rt.metrics.EventsShed.With("routine", rt.name).Add(1)
   123  		rt.logger.Error(fmt.Sprintf("%s: send failed, queue was full/stopped", rt.name))
   124  		return false
   125  	}
   126  
   127  	rt.metrics.EventsSent.With("routine", rt.name).Add(1)
   128  	return true
   129  }
   130  
   131  func (rt *Routine) isRunning() bool {
   132  	return atomic.LoadUint32(rt.running) == 1
   133  }
   134  
   135  func (rt *Routine) next() chan Event {
   136  	return rt.out
   137  }
   138  
   139  func (rt *Routine) ready() chan struct{} {
   140  	return rt.rdy
   141  }
   142  
   143  func (rt *Routine) stop() {
   144  	if !rt.isRunning() { // XXX: this should check rt.queue.Disposed()
   145  		return
   146  	}
   147  
   148  	rt.logger.Info("routine stop", "msg", log.NewLazySprintf("%s: stop", rt.name))
   149  	rt.queue.Dispose() // this should block until all queue items are free?
   150  }
   151  
   152  func (rt *Routine) final() chan error {
   153  	return rt.fin
   154  }
   155  
   156  // XXX: Maybe get rid of this
   157  func (rt *Routine) terminate(reason error) {
   158  	// We don't close the rt.out channel here, to avoid spinning on the closed channel
   159  	// in the event loop.
   160  	rt.fin <- reason
   161  }