github.com/xmidt-org/webpa-common@v1.11.9/service/monitor/monitor.go (about)

     1  package monitor
     2  
     3  import (
     4  	"errors"
     5  	"sync"
     6  
     7  	"github.com/go-kit/kit/log"
     8  	"github.com/go-kit/kit/log/level"
     9  	"github.com/go-kit/kit/sd"
    10  	"github.com/xmidt-org/webpa-common/logging"
    11  	"github.com/xmidt-org/webpa-common/service"
    12  )
    13  
    14  var errNoInstances = errors.New("No instances to monitor")
    15  
    16  // Interface represents an active monitor for one or more sd.Instancer objects.
    17  type Interface interface {
    18  	// Stopped returns a channel that is closed when this Monitor is stopped.
    19  	// Semantics are equivalent to context.Context.Done().
    20  	Stopped() <-chan struct{}
    21  
    22  	// Stop halts all goroutines that are dispatching events, but does not stop
    23  	// or close the service discovery backend.  This method is idempotent.  Once stopped,
    24  	// a Monitor cannot be reused.
    25  	Stop()
    26  }
    27  
    28  // Option represents a configuration option for a monitor
    29  type Option func(*monitor)
    30  
    31  // WithLogger sets a go-kit Logger for this monitor.  This logger will be enriched with information
    32  // about each instancer, if available.  If nil, the default logger is used instead.
    33  func WithLogger(l log.Logger) Option {
    34  	return func(m *monitor) {
    35  		if l == nil {
    36  			m.logger = logging.DefaultLogger()
    37  		} else {
    38  			m.logger = l
    39  		}
    40  	}
    41  }
    42  
    43  // WithClosed sets an external channel that, when closed, will cause all goroutines spawned
    44  // by this monitor to exit.  This is useful when orchestrating multiple monitors, or when restarting
    45  // service discovery clients.
    46  func WithClosed(c <-chan struct{}) Option {
    47  	return func(m *monitor) {
    48  		m.closed = c
    49  	}
    50  }
    51  
    52  // WithFilter establishes the filtering strategy for discovered service instances.  By default, TrimAndSortFilter is used.
    53  // If the filter is nil, filtering is disabled and every Listener will receive the raw, unfiltered instances from the
    54  // service discovery backend.
    55  func WithFilter(f Filter) Option {
    56  	return func(m *monitor) {
    57  		if f == nil {
    58  			m.filter = NopFilter
    59  		} else {
    60  			m.filter = f
    61  		}
    62  	}
    63  }
    64  
    65  // WithListeners configures the monitor to dispatch to zero or more Listeners.  It is legal to start a Monitor
    66  // with no Listeners, as this is equivalent to just logging messages for the service discovery backend.
    67  func WithListeners(l ...Listener) Option {
    68  	return func(m *monitor) {
    69  		if len(l) > 0 {
    70  			m.listeners = append(Listeners{}, l...)
    71  		} else {
    72  			m.listeners = nil
    73  		}
    74  	}
    75  }
    76  
    77  // WithInstancers establishes the set of sd.Instancer objects to be monitored.  The given Instancers
    78  // is copied to maintain the monitor's immutability.
    79  func WithInstancers(i service.Instancers) Option {
    80  	return func(m *monitor) {
    81  		m.instancers = i.Copy()
    82  	}
    83  }
    84  
    85  func WithEnvironment(e service.Environment) Option {
    86  	return func(m *monitor) {
    87  		m.instancers = e.Instancers()
    88  		m.closed = e.Closed()
    89  	}
    90  }
    91  
    92  // New begins monitoring one or more sd.Instancer objects, dispatching events to any Listeners that are configured.
    93  // This function returns an error if i is empty or nil.
    94  func New(options ...Option) (Interface, error) {
    95  	var (
    96  		m = &monitor{
    97  			logger:  logging.DefaultLogger(),
    98  			stopped: make(chan struct{}),
    99  			filter:  DefaultFilter(),
   100  		}
   101  	)
   102  
   103  	for _, o := range options {
   104  		o(m)
   105  	}
   106  
   107  	if err := m.start(); err != nil {
   108  		return nil, err
   109  	}
   110  
   111  	return m, nil
   112  }
   113  
   114  // monitor is the internal implementation of Monitor.  This type is a shared context
   115  // among all goroutines that monitor a (key, instancer) pair.
   116  type monitor struct {
   117  	logger     log.Logger
   118  	instancers service.Instancers
   119  	filter     Filter
   120  	listeners  Listeners
   121  
   122  	closed   <-chan struct{}
   123  	stopped  chan struct{}
   124  	stopOnce sync.Once
   125  }
   126  
   127  func (m *monitor) Stopped() <-chan struct{} {
   128  	return m.stopped
   129  }
   130  
   131  func (m *monitor) Stop() {
   132  	m.stopOnce.Do(func() {
   133  		close(m.stopped)
   134  	})
   135  }
   136  
   137  func (m *monitor) start() error {
   138  	if m.instancers.Len() == 0 {
   139  		return errNoInstances
   140  	}
   141  
   142  	for k, v := range m.instancers {
   143  		var svc = k
   144  		if ci, ok := v.(service.ContextualInstancer); ok {
   145  			if svcName, ok := ci.Metadata()["service"].(string); ok {
   146  				svc = svcName
   147  			}
   148  		}
   149  		go m.dispatchEvents(k, svc, logging.Enrich(m.logger, v), v)
   150  	}
   151  
   152  	return nil
   153  }
   154  
   155  // dispatchEvents is a goroutine that consumes service discovery events from an sd.Instancer
   156  // and dispatches those events zero or more Listeners.  If configured, the filter is used to
   157  // preprocess the set of instances sent to the listener.
   158  func (m *monitor) dispatchEvents(key, service string, l log.Logger, i sd.Instancer) {
   159  	var (
   160  		eventCount              = 0
   161  		eventCounter log.Valuer = func() interface{} {
   162  			return eventCount
   163  		}
   164  
   165  		logger = log.With(l, EventCountKey(), eventCounter)
   166  		events = make(chan sd.Event, 10)
   167  	)
   168  
   169  	logger.Log(level.Key(), level.InfoValue(), logging.MessageKey(), "subscription monitor starting")
   170  
   171  	defer i.Deregister(events)
   172  	i.Register(events)
   173  
   174  	for {
   175  		select {
   176  		case sdEvent := <-events:
   177  			eventCount++
   178  			event := Event{
   179  				Key:        key,
   180  				Service:    service,
   181  				Instancer:  i,
   182  				EventCount: eventCount,
   183  			}
   184  
   185  			if sdEvent.Err != nil {
   186  				logger.Log(level.Key(), level.ErrorValue(), logging.MessageKey(), "service discovery error", logging.ErrorKey(), sdEvent.Err)
   187  				event.Err = sdEvent.Err
   188  			} else {
   189  				logger.Log(level.Key(), level.ErrorValue(), logging.MessageKey(), "service discovery update", "instances", sdEvent.Instances)
   190  				if len(sdEvent.Instances) > 0 {
   191  					event.Instances = m.filter(sdEvent.Instances)
   192  				}
   193  			}
   194  
   195  			m.listeners.MonitorEvent(event)
   196  
   197  		case <-m.stopped:
   198  			logger.Log(level.Key(), level.InfoValue(), logging.MessageKey(), "subscription monitor was stopped")
   199  			m.listeners.MonitorEvent(Event{Key: key, Service: service, Instancer: i, EventCount: eventCount, Stopped: true})
   200  			return
   201  
   202  		case <-m.closed:
   203  			logger.Log(level.Key(), level.InfoValue(), logging.MessageKey(), "subscription monitor exiting due to external closure")
   204  			m.Stop() // ensure that the Stopped state is correct
   205  			m.listeners.MonitorEvent(Event{Key: key, Service: service, Instancer: i, EventCount: eventCount, Stopped: true})
   206  			return
   207  		}
   208  	}
   209  }