github.com/hspak/nomad@v0.7.2-0.20180309000617-bc4ae22a39a5/nomad/rpc.go (about)

     1  package nomad
     2  
     3  import (
     4  	"context"
     5  	"crypto/tls"
     6  	"crypto/x509"
     7  	"errors"
     8  	"fmt"
     9  	"io"
    10  	"math/rand"
    11  	"net"
    12  	"net/rpc"
    13  	"strings"
    14  	"time"
    15  
    16  	metrics "github.com/armon/go-metrics"
    17  	"github.com/hashicorp/consul/lib"
    18  	memdb "github.com/hashicorp/go-memdb"
    19  	"github.com/hashicorp/nomad/helper/pool"
    20  	"github.com/hashicorp/nomad/nomad/state"
    21  	"github.com/hashicorp/nomad/nomad/structs"
    22  	"github.com/hashicorp/raft"
    23  	"github.com/hashicorp/yamux"
    24  	"github.com/ugorji/go/codec"
    25  )
    26  
    27  const (
    28  	// maxQueryTime is used to bound the limit of a blocking query
    29  	maxQueryTime = 300 * time.Second
    30  
    31  	// defaultQueryTime is the amount of time we block waiting for a change
    32  	// if no time is specified. Previously we would wait the maxQueryTime.
    33  	defaultQueryTime = 300 * time.Second
    34  
    35  	// Warn if the Raft command is larger than this.
    36  	// If it's over 1MB something is probably being abusive.
    37  	raftWarnSize = 1024 * 1024
    38  
    39  	// enqueueLimit caps how long we will wait to enqueue
    40  	// a new Raft command. Something is probably wrong if this
    41  	// value is ever reached. However, it prevents us from blocking
    42  	// the requesting goroutine forever.
    43  	enqueueLimit = 30 * time.Second
    44  )
    45  
    46  // RPCContext provides metadata about the RPC connection.
    47  type RPCContext struct {
    48  	// Conn exposes the raw connection.
    49  	Conn net.Conn
    50  
    51  	// Session exposes the multiplexed connection session.
    52  	Session *yamux.Session
    53  
    54  	// TLS marks whether the RPC is over a TLS based connection
    55  	TLS bool
    56  
    57  	// VerifiedChains is is the Verified certificates presented by the incoming
    58  	// connection.
    59  	VerifiedChains [][]*x509.Certificate
    60  
    61  	// NodeID marks the NodeID that initiated the connection.
    62  	NodeID string
    63  }
    64  
    65  // listen is used to listen for incoming RPC connections
    66  func (s *Server) listen(ctx context.Context) {
    67  	for {
    68  		select {
    69  		case <-ctx.Done():
    70  			s.logger.Println("[INFO] nomad.rpc: Closing server RPC connection")
    71  			return
    72  		default:
    73  		}
    74  
    75  		// Accept a connection
    76  		conn, err := s.rpcListener.Accept()
    77  		if err != nil {
    78  			if s.shutdown {
    79  				return
    80  			}
    81  
    82  			select {
    83  			case <-ctx.Done():
    84  				return
    85  			default:
    86  			}
    87  
    88  			s.logger.Printf("[ERR] nomad.rpc: failed to accept RPC conn: %v", err)
    89  			continue
    90  		}
    91  
    92  		go s.handleConn(ctx, conn, &RPCContext{Conn: conn})
    93  		metrics.IncrCounter([]string{"nomad", "rpc", "accept_conn"}, 1)
    94  	}
    95  }
    96  
    97  // handleConn is used to determine if this is a Raft or
    98  // Nomad type RPC connection and invoke the correct handler
    99  func (s *Server) handleConn(ctx context.Context, conn net.Conn, rpcCtx *RPCContext) {
   100  	// Read a single byte
   101  	buf := make([]byte, 1)
   102  	if _, err := conn.Read(buf); err != nil {
   103  		if err != io.EOF {
   104  			s.logger.Printf("[ERR] nomad.rpc: failed to read byte: %v", err)
   105  		}
   106  		conn.Close()
   107  		return
   108  	}
   109  
   110  	// Enforce TLS if EnableRPC is set
   111  	if s.config.TLSConfig.EnableRPC && !rpcCtx.TLS && pool.RPCType(buf[0]) != pool.RpcTLS {
   112  		if !s.config.TLSConfig.RPCUpgradeMode {
   113  			s.logger.Printf("[WARN] nomad.rpc: Non-TLS connection attempted from %s with RequireTLS set", conn.RemoteAddr().String())
   114  			conn.Close()
   115  			return
   116  		}
   117  	}
   118  
   119  	// Switch on the byte
   120  	switch pool.RPCType(buf[0]) {
   121  	case pool.RpcNomad:
   122  		// Create an RPC Server and handle the request
   123  		server := rpc.NewServer()
   124  		s.setupRpcServer(server, rpcCtx)
   125  		s.handleNomadConn(ctx, conn, server)
   126  
   127  		// Remove any potential mapping between a NodeID to this connection and
   128  		// close the underlying connection.
   129  		s.removeNodeConn(rpcCtx)
   130  
   131  	case pool.RpcRaft:
   132  		metrics.IncrCounter([]string{"nomad", "rpc", "raft_handoff"}, 1)
   133  		s.raftLayer.Handoff(ctx, conn)
   134  
   135  	case pool.RpcMultiplex:
   136  		s.handleMultiplex(ctx, conn, rpcCtx)
   137  
   138  	case pool.RpcTLS:
   139  		if s.rpcTLS == nil {
   140  			s.logger.Printf("[WARN] nomad.rpc: TLS connection attempted, server not configured for TLS")
   141  			conn.Close()
   142  			return
   143  		}
   144  		conn = tls.Server(conn, s.rpcTLS)
   145  
   146  		// Force a handshake so we can get information about the TLS connection
   147  		// state.
   148  		tlsConn, ok := conn.(*tls.Conn)
   149  		if !ok {
   150  			s.logger.Printf("[ERR] nomad.rpc: expected TLS connection but got %T", conn)
   151  			conn.Close()
   152  			return
   153  		}
   154  
   155  		if err := tlsConn.Handshake(); err != nil {
   156  			s.logger.Printf("[WARN] nomad.rpc: failed TLS handshake from connection from %v: %v", tlsConn.RemoteAddr(), err)
   157  			conn.Close()
   158  			return
   159  		}
   160  
   161  		// Update the connection context with the fact that the connection is
   162  		// using TLS
   163  		rpcCtx.TLS = true
   164  
   165  		// Store the verified chains so they can be inspected later.
   166  		state := tlsConn.ConnectionState()
   167  		rpcCtx.VerifiedChains = state.VerifiedChains
   168  
   169  		s.handleConn(ctx, conn, rpcCtx)
   170  
   171  	case pool.RpcStreaming:
   172  		s.handleStreamingConn(conn)
   173  
   174  	case pool.RpcMultiplexV2:
   175  		s.handleMultiplexV2(ctx, conn, rpcCtx)
   176  
   177  	default:
   178  		s.logger.Printf("[ERR] nomad.rpc: unrecognized RPC byte: %v", buf[0])
   179  		conn.Close()
   180  		return
   181  	}
   182  }
   183  
   184  // handleMultiplex is used to multiplex a single incoming connection
   185  // using the Yamux multiplexer
   186  func (s *Server) handleMultiplex(ctx context.Context, conn net.Conn, rpcCtx *RPCContext) {
   187  	defer func() {
   188  		// Remove any potential mapping between a NodeID to this connection and
   189  		// close the underlying connection.
   190  		s.removeNodeConn(rpcCtx)
   191  		conn.Close()
   192  	}()
   193  
   194  	conf := yamux.DefaultConfig()
   195  	conf.LogOutput = s.config.LogOutput
   196  	server, err := yamux.Server(conn, conf)
   197  	if err != nil {
   198  		s.logger.Printf("[ERR] nomad.rpc: multiplex failed to create yamux server: %v", err)
   199  		return
   200  	}
   201  
   202  	// Update the context to store the yamux session
   203  	rpcCtx.Session = server
   204  
   205  	// Create the RPC server for this connection
   206  	rpcServer := rpc.NewServer()
   207  	s.setupRpcServer(rpcServer, rpcCtx)
   208  
   209  	for {
   210  		sub, err := server.Accept()
   211  		if err != nil {
   212  			if err != io.EOF {
   213  				s.logger.Printf("[ERR] nomad.rpc: multiplex conn accept failed: %v", err)
   214  			}
   215  			return
   216  		}
   217  		go s.handleNomadConn(ctx, sub, rpcServer)
   218  	}
   219  }
   220  
   221  // handleNomadConn is used to service a single Nomad RPC connection
   222  func (s *Server) handleNomadConn(ctx context.Context, conn net.Conn, server *rpc.Server) {
   223  	defer conn.Close()
   224  	rpcCodec := pool.NewServerCodec(conn)
   225  	for {
   226  		select {
   227  		case <-ctx.Done():
   228  			s.logger.Println("[INFO] nomad.rpc: Closing server RPC connection")
   229  			return
   230  		case <-s.shutdownCh:
   231  			return
   232  		default:
   233  		}
   234  
   235  		if err := server.ServeRequest(rpcCodec); err != nil {
   236  			if err != io.EOF && !strings.Contains(err.Error(), "closed") {
   237  				s.logger.Printf("[ERR] nomad.rpc: RPC error: %v (%v)", err, conn)
   238  				metrics.IncrCounter([]string{"nomad", "rpc", "request_error"}, 1)
   239  			}
   240  			return
   241  		}
   242  		metrics.IncrCounter([]string{"nomad", "rpc", "request"}, 1)
   243  	}
   244  }
   245  
   246  // handleStreamingConn is used to handle a single Streaming Nomad RPC connection.
   247  func (s *Server) handleStreamingConn(conn net.Conn) {
   248  	defer conn.Close()
   249  
   250  	// Decode the header
   251  	var header structs.StreamingRpcHeader
   252  	decoder := codec.NewDecoder(conn, structs.MsgpackHandle)
   253  	if err := decoder.Decode(&header); err != nil {
   254  		if err != io.EOF && !strings.Contains(err.Error(), "closed") {
   255  			s.logger.Printf("[ERR] nomad.rpc: Streaming RPC error: %v (%v)", err, conn)
   256  			metrics.IncrCounter([]string{"nomad", "streaming_rpc", "request_error"}, 1)
   257  		}
   258  
   259  		return
   260  	}
   261  
   262  	ack := structs.StreamingRpcAck{}
   263  	handler, err := s.streamingRpcs.GetHandler(header.Method)
   264  	if err != nil {
   265  		s.logger.Printf("[ERR] nomad.rpc: Streaming RPC error: %v (%v)", err, conn)
   266  		metrics.IncrCounter([]string{"nomad", "streaming_rpc", "request_error"}, 1)
   267  		ack.Error = err.Error()
   268  	}
   269  
   270  	// Send the acknowledgement
   271  	encoder := codec.NewEncoder(conn, structs.MsgpackHandle)
   272  	if err := encoder.Encode(ack); err != nil {
   273  		conn.Close()
   274  		return
   275  	}
   276  
   277  	if ack.Error != "" {
   278  		return
   279  	}
   280  
   281  	// Invoke the handler
   282  	metrics.IncrCounter([]string{"nomad", "streaming_rpc", "request"}, 1)
   283  	handler(conn)
   284  }
   285  
   286  // handleMultiplexV2 is used to multiplex a single incoming connection
   287  // using the Yamux multiplexer. Version 2 handling allows a single connection to
   288  // switch streams between regulars RPCs and Streaming RPCs.
   289  func (s *Server) handleMultiplexV2(ctx context.Context, conn net.Conn, rpcCtx *RPCContext) {
   290  	defer func() {
   291  		// Remove any potential mapping between a NodeID to this connection and
   292  		// close the underlying connection.
   293  		s.removeNodeConn(rpcCtx)
   294  		conn.Close()
   295  	}()
   296  
   297  	conf := yamux.DefaultConfig()
   298  	conf.LogOutput = s.config.LogOutput
   299  	server, err := yamux.Server(conn, conf)
   300  	if err != nil {
   301  		s.logger.Printf("[ERR] nomad.rpc: multiplex_v2 failed to create yamux server: %v", err)
   302  		return
   303  	}
   304  
   305  	// Update the context to store the yamux session
   306  	rpcCtx.Session = server
   307  
   308  	// Create the RPC server for this connection
   309  	rpcServer := rpc.NewServer()
   310  	s.setupRpcServer(rpcServer, rpcCtx)
   311  
   312  	for {
   313  		// Accept a new stream
   314  		sub, err := server.Accept()
   315  		if err != nil {
   316  			if err != io.EOF {
   317  				s.logger.Printf("[ERR] nomad.rpc: multiplex_v2 conn accept failed: %v", err)
   318  			}
   319  			return
   320  		}
   321  
   322  		// Read a single byte
   323  		buf := make([]byte, 1)
   324  		if _, err := sub.Read(buf); err != nil {
   325  			if err != io.EOF {
   326  				s.logger.Printf("[ERR] nomad.rpc: multiplex_v2 failed to read byte: %v", err)
   327  			}
   328  			return
   329  		}
   330  
   331  		// Determine which handler to use
   332  		switch pool.RPCType(buf[0]) {
   333  		case pool.RpcNomad:
   334  			go s.handleNomadConn(ctx, sub, rpcServer)
   335  		case pool.RpcStreaming:
   336  			go s.handleStreamingConn(sub)
   337  
   338  		default:
   339  			s.logger.Printf("[ERR] nomad.rpc: multiplex_v2 unrecognized RPC byte: %v", buf[0])
   340  			return
   341  		}
   342  	}
   343  
   344  }
   345  
   346  // forward is used to forward to a remote region or to forward to the local leader
   347  // Returns a bool of if forwarding was performed, as well as any error
   348  func (s *Server) forward(method string, info structs.RPCInfo, args interface{}, reply interface{}) (bool, error) {
   349  	var firstCheck time.Time
   350  
   351  	region := info.RequestRegion()
   352  	if region == "" {
   353  		return true, fmt.Errorf("missing target RPC")
   354  	}
   355  
   356  	// Handle region forwarding
   357  	if region != s.config.Region {
   358  		err := s.forwardRegion(region, method, args, reply)
   359  		return true, err
   360  	}
   361  
   362  	// Check if we can allow a stale read
   363  	if info.IsRead() && info.AllowStaleRead() {
   364  		return false, nil
   365  	}
   366  
   367  CHECK_LEADER:
   368  	// Find the leader
   369  	isLeader, remoteServer := s.getLeader()
   370  
   371  	// Handle the case we are the leader
   372  	if isLeader {
   373  		return false, nil
   374  	}
   375  
   376  	// Handle the case of a known leader
   377  	if remoteServer != nil {
   378  		err := s.forwardLeader(remoteServer, method, args, reply)
   379  		return true, err
   380  	}
   381  
   382  	// Gate the request until there is a leader
   383  	if firstCheck.IsZero() {
   384  		firstCheck = time.Now()
   385  	}
   386  	if time.Now().Sub(firstCheck) < s.config.RPCHoldTimeout {
   387  		jitter := lib.RandomStagger(s.config.RPCHoldTimeout / structs.JitterFraction)
   388  		select {
   389  		case <-time.After(jitter):
   390  			goto CHECK_LEADER
   391  		case <-s.shutdownCh:
   392  		}
   393  	}
   394  
   395  	// No leader found and hold time exceeded
   396  	return true, structs.ErrNoLeader
   397  }
   398  
   399  // getLeader returns if the current node is the leader, and if not
   400  // then it returns the leader which is potentially nil if the cluster
   401  // has not yet elected a leader.
   402  func (s *Server) getLeader() (bool, *serverParts) {
   403  	// Check if we are the leader
   404  	if s.IsLeader() {
   405  		return true, nil
   406  	}
   407  
   408  	// Get the leader
   409  	leader := s.raft.Leader()
   410  	if leader == "" {
   411  		return false, nil
   412  	}
   413  
   414  	// Lookup the server
   415  	s.peerLock.RLock()
   416  	server := s.localPeers[leader]
   417  	s.peerLock.RUnlock()
   418  
   419  	// Server could be nil
   420  	return false, server
   421  }
   422  
   423  // forwardLeader is used to forward an RPC call to the leader, or fail if no leader
   424  func (s *Server) forwardLeader(server *serverParts, method string, args interface{}, reply interface{}) error {
   425  	// Handle a missing server
   426  	if server == nil {
   427  		return structs.ErrNoLeader
   428  	}
   429  	return s.connPool.RPC(s.config.Region, server.Addr, server.MajorVersion, method, args, reply)
   430  }
   431  
   432  // forwardServer is used to forward an RPC call to a particular server
   433  func (s *Server) forwardServer(server *serverParts, method string, args interface{}, reply interface{}) error {
   434  	// Handle a missing server
   435  	if server == nil {
   436  		return errors.New("must be given a valid server address")
   437  	}
   438  	return s.connPool.RPC(s.config.Region, server.Addr, server.MajorVersion, method, args, reply)
   439  }
   440  
   441  // forwardRegion is used to forward an RPC call to a remote region, or fail if no servers
   442  func (s *Server) forwardRegion(region, method string, args interface{}, reply interface{}) error {
   443  	// Bail if we can't find any servers
   444  	s.peerLock.RLock()
   445  	servers := s.peers[region]
   446  	if len(servers) == 0 {
   447  		s.peerLock.RUnlock()
   448  		s.logger.Printf("[WARN] nomad.rpc: RPC request for region '%s', no path found",
   449  			region)
   450  		return structs.ErrNoRegionPath
   451  	}
   452  
   453  	// Select a random addr
   454  	offset := rand.Intn(len(servers))
   455  	server := servers[offset]
   456  	s.peerLock.RUnlock()
   457  
   458  	// Forward to remote Nomad
   459  	metrics.IncrCounter([]string{"nomad", "rpc", "cross-region", region}, 1)
   460  	return s.connPool.RPC(region, server.Addr, server.MajorVersion, method, args, reply)
   461  }
   462  
   463  // streamingRpc creates a connection to the given server and conducts the
   464  // initial handshake, returning the connection or an error. It is the callers
   465  // responsibility to close the connection if there is no returned error.
   466  func (s *Server) streamingRpc(server *serverParts, method string) (net.Conn, error) {
   467  	// Try to dial the server
   468  	conn, err := net.DialTimeout("tcp", server.Addr.String(), 10*time.Second)
   469  	if err != nil {
   470  		return nil, err
   471  	}
   472  
   473  	// Cast to TCPConn
   474  	if tcp, ok := conn.(*net.TCPConn); ok {
   475  		tcp.SetKeepAlive(true)
   476  		tcp.SetNoDelay(true)
   477  	}
   478  
   479  	if err := s.streamingRpcImpl(conn, server.Region, method); err != nil {
   480  		return nil, err
   481  	}
   482  
   483  	return conn, nil
   484  }
   485  
   486  // streamingRpcImpl takes a pre-established connection to a server and conducts
   487  // the handshake to establish a streaming RPC for the given method. If an error
   488  // is returned, the underlying connection has been closed. Otherwise it is
   489  // assumed that the connection has been hijacked by the RPC method.
   490  func (s *Server) streamingRpcImpl(conn net.Conn, region, method string) error {
   491  	// Check if TLS is enabled
   492  	s.tlsWrapLock.RLock()
   493  	tlsWrap := s.tlsWrap
   494  	s.tlsWrapLock.RUnlock()
   495  
   496  	if tlsWrap != nil {
   497  		// Switch the connection into TLS mode
   498  		if _, err := conn.Write([]byte{byte(pool.RpcTLS)}); err != nil {
   499  			conn.Close()
   500  			return err
   501  		}
   502  
   503  		// Wrap the connection in a TLS client
   504  		tlsConn, err := tlsWrap(region, conn)
   505  		if err != nil {
   506  			conn.Close()
   507  			return err
   508  		}
   509  		conn = tlsConn
   510  	}
   511  
   512  	// Write the multiplex byte to set the mode
   513  	if _, err := conn.Write([]byte{byte(pool.RpcStreaming)}); err != nil {
   514  		conn.Close()
   515  		return err
   516  	}
   517  
   518  	// Send the header
   519  	encoder := codec.NewEncoder(conn, structs.MsgpackHandle)
   520  	decoder := codec.NewDecoder(conn, structs.MsgpackHandle)
   521  	header := structs.StreamingRpcHeader{
   522  		Method: method,
   523  	}
   524  	if err := encoder.Encode(header); err != nil {
   525  		conn.Close()
   526  		return err
   527  	}
   528  
   529  	// Wait for the acknowledgement
   530  	var ack structs.StreamingRpcAck
   531  	if err := decoder.Decode(&ack); err != nil {
   532  		conn.Close()
   533  		return err
   534  	}
   535  
   536  	if ack.Error != "" {
   537  		conn.Close()
   538  		return errors.New(ack.Error)
   539  	}
   540  
   541  	return nil
   542  }
   543  
   544  // raftApplyFuture is used to encode a message, run it through raft, and return the Raft future.
   545  func (s *Server) raftApplyFuture(t structs.MessageType, msg interface{}) (raft.ApplyFuture, error) {
   546  	buf, err := structs.Encode(t, msg)
   547  	if err != nil {
   548  		return nil, fmt.Errorf("Failed to encode request: %v", err)
   549  	}
   550  
   551  	// Warn if the command is very large
   552  	if n := len(buf); n > raftWarnSize {
   553  		s.logger.Printf("[WARN] nomad: Attempting to apply large raft entry (type %d) (%d bytes)", t, n)
   554  	}
   555  
   556  	future := s.raft.Apply(buf, enqueueLimit)
   557  	return future, nil
   558  }
   559  
   560  // raftApplyFn is the function signature for applying a msg to Raft
   561  type raftApplyFn func(t structs.MessageType, msg interface{}) (interface{}, uint64, error)
   562  
   563  // raftApply is used to encode a message, run it through raft, and return
   564  // the FSM response along with any errors
   565  func (s *Server) raftApply(t structs.MessageType, msg interface{}) (interface{}, uint64, error) {
   566  	future, err := s.raftApplyFuture(t, msg)
   567  	if err != nil {
   568  		return nil, 0, err
   569  	}
   570  	if err := future.Error(); err != nil {
   571  		return nil, 0, err
   572  	}
   573  	return future.Response(), future.Index(), nil
   574  }
   575  
   576  // setQueryMeta is used to populate the QueryMeta data for an RPC call
   577  func (s *Server) setQueryMeta(m *structs.QueryMeta) {
   578  	if s.IsLeader() {
   579  		m.LastContact = 0
   580  		m.KnownLeader = true
   581  	} else {
   582  		m.LastContact = time.Now().Sub(s.raft.LastContact())
   583  		m.KnownLeader = (s.raft.Leader() != "")
   584  	}
   585  }
   586  
   587  // queryFn is used to perform a query operation. If a re-query is needed, the
   588  // passed-in watch set will be used to block for changes. The passed-in state
   589  // store should be used (vs. calling fsm.State()) since the given state store
   590  // will be correctly watched for changes if the state store is restored from
   591  // a snapshot.
   592  type queryFn func(memdb.WatchSet, *state.StateStore) error
   593  
   594  // blockingOptions is used to parameterize blockingRPC
   595  type blockingOptions struct {
   596  	queryOpts *structs.QueryOptions
   597  	queryMeta *structs.QueryMeta
   598  	run       queryFn
   599  }
   600  
   601  // blockingRPC is used for queries that need to wait for a
   602  // minimum index. This is used to block and wait for changes.
   603  func (s *Server) blockingRPC(opts *blockingOptions) error {
   604  	ctx := context.Background()
   605  	var cancel context.CancelFunc
   606  	var state *state.StateStore
   607  
   608  	// Fast path non-blocking
   609  	if opts.queryOpts.MinQueryIndex == 0 {
   610  		goto RUN_QUERY
   611  	}
   612  
   613  	// Restrict the max query time, and ensure there is always one
   614  	if opts.queryOpts.MaxQueryTime > maxQueryTime {
   615  		opts.queryOpts.MaxQueryTime = maxQueryTime
   616  	} else if opts.queryOpts.MaxQueryTime <= 0 {
   617  		opts.queryOpts.MaxQueryTime = defaultQueryTime
   618  	}
   619  
   620  	// Apply a small amount of jitter to the request
   621  	opts.queryOpts.MaxQueryTime += lib.RandomStagger(opts.queryOpts.MaxQueryTime / structs.JitterFraction)
   622  
   623  	// Setup a query timeout
   624  	ctx, cancel = context.WithTimeout(context.Background(), opts.queryOpts.MaxQueryTime)
   625  	defer cancel()
   626  
   627  RUN_QUERY:
   628  	// Update the query meta data
   629  	s.setQueryMeta(opts.queryMeta)
   630  
   631  	// Increment the rpc query counter
   632  	metrics.IncrCounter([]string{"nomad", "rpc", "query"}, 1)
   633  
   634  	// We capture the state store and its abandon channel but pass a snapshot to
   635  	// the blocking query function. We operate on the snapshot to allow separate
   636  	// calls to the state store not all wrapped within the same transaction.
   637  	state = s.fsm.State()
   638  	abandonCh := state.AbandonCh()
   639  	snap, _ := state.Snapshot()
   640  	stateSnap := &snap.StateStore
   641  
   642  	// We can skip all watch tracking if this isn't a blocking query.
   643  	var ws memdb.WatchSet
   644  	if opts.queryOpts.MinQueryIndex > 0 {
   645  		ws = memdb.NewWatchSet()
   646  
   647  		// This channel will be closed if a snapshot is restored and the
   648  		// whole state store is abandoned.
   649  		ws.Add(abandonCh)
   650  	}
   651  
   652  	// Block up to the timeout if we didn't see anything fresh.
   653  	err := opts.run(ws, stateSnap)
   654  
   655  	// Check for minimum query time
   656  	if err == nil && opts.queryOpts.MinQueryIndex > 0 && opts.queryMeta.Index <= opts.queryOpts.MinQueryIndex {
   657  		if err := ws.WatchCtx(ctx); err == nil {
   658  			goto RUN_QUERY
   659  		}
   660  	}
   661  	return err
   662  }