github.com/dkerwin/nomad@v0.3.3-0.20160525181927-74554135514b/nomad/server.go (about)

     1  package nomad
     2  
     3  import (
     4  	"crypto/tls"
     5  	"errors"
     6  	"fmt"
     7  	"log"
     8  	"net"
     9  	"net/rpc"
    10  	"os"
    11  	"path/filepath"
    12  	"reflect"
    13  	"sort"
    14  	"strconv"
    15  	"strings"
    16  	"sync"
    17  	"time"
    18  
    19  	"github.com/hashicorp/consul/tlsutil"
    20  	"github.com/hashicorp/nomad/nomad/state"
    21  	"github.com/hashicorp/raft"
    22  	"github.com/hashicorp/raft-boltdb"
    23  	"github.com/hashicorp/serf/serf"
    24  )
    25  
    26  const (
    27  	raftState         = "raft/"
    28  	serfSnapshot      = "serf/snapshot"
    29  	snapshotsRetained = 2
    30  
    31  	// serverRPCCache controls how long we keep an idle connection open to a server
    32  	serverRPCCache = 2 * time.Minute
    33  
    34  	// serverMaxStreams controsl how many idle streams we keep open to a server
    35  	serverMaxStreams = 64
    36  
    37  	// raftLogCacheSize is the maximum number of logs to cache in-memory.
    38  	// This is used to reduce disk I/O for the recently committed entries.
    39  	raftLogCacheSize = 512
    40  
    41  	// raftRemoveGracePeriod is how long we wait to allow a RemovePeer
    42  	// to replicate to gracefully leave the cluster.
    43  	raftRemoveGracePeriod = 5 * time.Second
    44  
    45  	// apiMajorVersion is returned as part of the Status.Version request.
    46  	// It should be incremented anytime the APIs are changed in a way that
    47  	// would break clients for sane client versioning.
    48  	apiMajorVersion = 1
    49  
    50  	// apiMinorVersion is returned as part of the Status.Version request.
    51  	// It should be incremented anytime the APIs are changed to allow
    52  	// for sane client versioning. Minor changes should be compatible
    53  	// within the major version.
    54  	apiMinorVersion = 1
    55  )
    56  
    57  // Server is Nomad server which manages the job queues,
    58  // schedulers, and notification bus for agents.
    59  type Server struct {
    60  	config *Config
    61  	logger *log.Logger
    62  
    63  	// Connection pool to other Nomad servers
    64  	connPool *ConnPool
    65  
    66  	// Endpoints holds our RPC endpoints
    67  	endpoints endpoints
    68  
    69  	// The raft instance is used among Nomad nodes within the
    70  	// region to protect operations that require strong consistency
    71  	leaderCh      <-chan bool
    72  	raft          *raft.Raft
    73  	raftLayer     *RaftLayer
    74  	raftPeers     raft.PeerStore
    75  	raftStore     *raftboltdb.BoltStore
    76  	raftInmem     *raft.InmemStore
    77  	raftTransport *raft.NetworkTransport
    78  
    79  	// fsm is the state machine used with Raft
    80  	fsm *nomadFSM
    81  
    82  	// rpcListener is used to listen for incoming connections
    83  	rpcListener  net.Listener
    84  	rpcServer    *rpc.Server
    85  	rpcAdvertise net.Addr
    86  
    87  	// rpcTLS is the TLS config for incoming TLS requests
    88  	rpcTLS *tls.Config
    89  
    90  	// peers is used to track the known Nomad servers. This is
    91  	// used for region forwarding and clustering.
    92  	peers      map[string][]*serverParts
    93  	localPeers map[string]*serverParts
    94  	peerLock   sync.RWMutex
    95  
    96  	// serf is the Serf cluster containing only Nomad
    97  	// servers. This is used for multi-region federation
    98  	// and automatic clustering within regions.
    99  	serf *serf.Serf
   100  
   101  	// reconcileCh is used to pass events from the serf handler
   102  	// into the leader manager. Mostly used to handle when servers
   103  	// join/leave from the region.
   104  	reconcileCh chan serf.Member
   105  
   106  	// eventCh is used to receive events from the serf cluster
   107  	eventCh chan serf.Event
   108  
   109  	// evalBroker is used to manage the in-progress evaluations
   110  	// that are waiting to be brokered to a sub-scheduler
   111  	evalBroker *EvalBroker
   112  
   113  	// BlockedEvals is used to manage evaluations that are blocked on node
   114  	// capacity changes.
   115  	blockedEvals *BlockedEvals
   116  
   117  	// planQueue is used to manage the submitted allocation
   118  	// plans that are waiting to be assessed by the leader
   119  	planQueue *PlanQueue
   120  
   121  	// periodicDispatcher is used to track and create evaluations for periodic jobs.
   122  	periodicDispatcher *PeriodicDispatch
   123  
   124  	// heartbeatTimers track the expiration time of each heartbeat that has
   125  	// a TTL. On expiration, the node status is updated to be 'down'.
   126  	heartbeatTimers     map[string]*time.Timer
   127  	heartbeatTimersLock sync.Mutex
   128  
   129  	// Worker used for processing
   130  	workers []*Worker
   131  
   132  	left         bool
   133  	shutdown     bool
   134  	shutdownCh   chan struct{}
   135  	shutdownLock sync.Mutex
   136  }
   137  
   138  // Holds the RPC endpoints
   139  type endpoints struct {
   140  	Status   *Status
   141  	Node     *Node
   142  	Job      *Job
   143  	Eval     *Eval
   144  	Plan     *Plan
   145  	Alloc    *Alloc
   146  	Region   *Region
   147  	Periodic *Periodic
   148  	System   *System
   149  }
   150  
   151  // NewServer is used to construct a new Nomad server from the
   152  // configuration, potentially returning an error
   153  func NewServer(config *Config) (*Server, error) {
   154  	// Check the protocol version
   155  	if err := config.CheckVersion(); err != nil {
   156  		return nil, err
   157  	}
   158  
   159  	// Ensure we have a log output
   160  	if config.LogOutput == nil {
   161  		config.LogOutput = os.Stderr
   162  	}
   163  
   164  	// Create a logger
   165  	logger := log.New(config.LogOutput, "", log.LstdFlags)
   166  
   167  	// Create an eval broker
   168  	evalBroker, err := NewEvalBroker(config.EvalNackTimeout, config.EvalDeliveryLimit)
   169  	if err != nil {
   170  		return nil, err
   171  	}
   172  
   173  	// Create a new blocked eval tracker.
   174  	blockedEvals := NewBlockedEvals(evalBroker)
   175  
   176  	// Create a plan queue
   177  	planQueue, err := NewPlanQueue()
   178  	if err != nil {
   179  		return nil, err
   180  	}
   181  
   182  	// Create the server
   183  	s := &Server{
   184  		config:       config,
   185  		connPool:     NewPool(config.LogOutput, serverRPCCache, serverMaxStreams, nil),
   186  		logger:       logger,
   187  		rpcServer:    rpc.NewServer(),
   188  		peers:        make(map[string][]*serverParts),
   189  		localPeers:   make(map[string]*serverParts),
   190  		reconcileCh:  make(chan serf.Member, 32),
   191  		eventCh:      make(chan serf.Event, 256),
   192  		evalBroker:   evalBroker,
   193  		blockedEvals: blockedEvals,
   194  		planQueue:    planQueue,
   195  		shutdownCh:   make(chan struct{}),
   196  	}
   197  
   198  	// Create the periodic dispatcher for launching periodic jobs.
   199  	s.periodicDispatcher = NewPeriodicDispatch(s.logger, s)
   200  
   201  	// Initialize the RPC layer
   202  	// TODO: TLS...
   203  	if err := s.setupRPC(nil); err != nil {
   204  		s.Shutdown()
   205  		logger.Printf("[ERR] nomad: failed to start RPC layer: %s", err)
   206  		return nil, fmt.Errorf("Failed to start RPC layer: %v", err)
   207  	}
   208  
   209  	// Initialize the Raft server
   210  	if err := s.setupRaft(); err != nil {
   211  		s.Shutdown()
   212  		logger.Printf("[ERR] nomad: failed to start Raft: %s", err)
   213  		return nil, fmt.Errorf("Failed to start Raft: %v", err)
   214  	}
   215  
   216  	// Initialize the wan Serf
   217  	s.serf, err = s.setupSerf(config.SerfConfig, s.eventCh, serfSnapshot)
   218  	if err != nil {
   219  		s.Shutdown()
   220  		logger.Printf("[ERR] nomad: failed to start serf WAN: %s", err)
   221  		return nil, fmt.Errorf("Failed to start serf: %v", err)
   222  	}
   223  
   224  	// Initialize the scheduling workers
   225  	if err := s.setupWorkers(); err != nil {
   226  		s.Shutdown()
   227  		logger.Printf("[ERR] nomad: failed to start workers: %s", err)
   228  		return nil, fmt.Errorf("Failed to start workers: %v", err)
   229  	}
   230  
   231  	// Monitor leadership changes
   232  	go s.monitorLeadership()
   233  
   234  	// Start ingesting events for Serf
   235  	go s.serfEventHandler()
   236  
   237  	// Start the RPC listeners
   238  	go s.listen()
   239  
   240  	// Emit metrics for the eval broker
   241  	go evalBroker.EmitStats(time.Second, s.shutdownCh)
   242  
   243  	// Emit metrics for the plan queue
   244  	go planQueue.EmitStats(time.Second, s.shutdownCh)
   245  
   246  	// Emit metrics for the blocked eval tracker.
   247  	go blockedEvals.EmitStats(time.Second, s.shutdownCh)
   248  
   249  	// Emit metrics
   250  	go s.heartbeatStats()
   251  
   252  	// Seed the global random.
   253  	if err := seedRandom(); err != nil {
   254  		return nil, err
   255  	}
   256  
   257  	// Done
   258  	return s, nil
   259  }
   260  
   261  // Shutdown is used to shutdown the server
   262  func (s *Server) Shutdown() error {
   263  	s.logger.Printf("[INFO] nomad: shutting down server")
   264  	s.shutdownLock.Lock()
   265  	defer s.shutdownLock.Unlock()
   266  
   267  	if s.shutdown {
   268  		return nil
   269  	}
   270  
   271  	s.shutdown = true
   272  	close(s.shutdownCh)
   273  
   274  	if s.serf != nil {
   275  		s.serf.Shutdown()
   276  	}
   277  
   278  	if s.raft != nil {
   279  		s.raftTransport.Close()
   280  		s.raftLayer.Close()
   281  		future := s.raft.Shutdown()
   282  		if err := future.Error(); err != nil {
   283  			s.logger.Printf("[WARN] nomad: Error shutting down raft: %s", err)
   284  		}
   285  		if s.raftStore != nil {
   286  			s.raftStore.Close()
   287  		}
   288  	}
   289  
   290  	// Shutdown the RPC listener
   291  	if s.rpcListener != nil {
   292  		s.rpcListener.Close()
   293  	}
   294  
   295  	// Close the connection pool
   296  	s.connPool.Shutdown()
   297  
   298  	// Close the fsm
   299  	if s.fsm != nil {
   300  		s.fsm.Close()
   301  	}
   302  	return nil
   303  }
   304  
   305  // IsShutdown checks if the server is shutdown
   306  func (s *Server) IsShutdown() bool {
   307  	select {
   308  	case <-s.shutdownCh:
   309  		return true
   310  	default:
   311  		return false
   312  	}
   313  }
   314  
   315  // Leave is used to prepare for a graceful shutdown of the server
   316  func (s *Server) Leave() error {
   317  	s.logger.Printf("[INFO] nomad: server starting leave")
   318  	s.left = true
   319  
   320  	// Check the number of known peers
   321  	numPeers, err := s.numOtherPeers()
   322  	if err != nil {
   323  		s.logger.Printf("[ERR] nomad: failed to check raft peers: %v", err)
   324  		return err
   325  	}
   326  
   327  	// If we are the current leader, and we have any other peers (cluster has multiple
   328  	// servers), we should do a RemovePeer to safely reduce the quorum size. If we are
   329  	// not the leader, then we should issue our leave intention and wait to be removed
   330  	// for some sane period of time.
   331  	isLeader := s.IsLeader()
   332  	if isLeader && numPeers > 0 {
   333  		future := s.raft.RemovePeer(s.raftTransport.LocalAddr())
   334  		if err := future.Error(); err != nil && err != raft.ErrUnknownPeer {
   335  			s.logger.Printf("[ERR] nomad: failed to remove ourself as raft peer: %v", err)
   336  		}
   337  	}
   338  
   339  	// Leave the gossip pool
   340  	if s.serf != nil {
   341  		if err := s.serf.Leave(); err != nil {
   342  			s.logger.Printf("[ERR] nomad: failed to leave Serf cluster: %v", err)
   343  		}
   344  	}
   345  
   346  	// If we were not leader, wait to be safely removed from the cluster.
   347  	// We must wait to allow the raft replication to take place, otherwise
   348  	// an immediate shutdown could cause a loss of quorum.
   349  	if !isLeader {
   350  		limit := time.Now().Add(raftRemoveGracePeriod)
   351  		for numPeers > 0 && time.Now().Before(limit) {
   352  			// Update the number of peers
   353  			numPeers, err = s.numOtherPeers()
   354  			if err != nil {
   355  				s.logger.Printf("[ERR] nomad: failed to check raft peers: %v", err)
   356  				break
   357  			}
   358  
   359  			// Avoid the sleep if we are done
   360  			if numPeers == 0 {
   361  				break
   362  			}
   363  
   364  			// Sleep a while and check again
   365  			time.Sleep(50 * time.Millisecond)
   366  		}
   367  		if numPeers != 0 {
   368  			s.logger.Printf("[WARN] nomad: failed to leave raft peer set gracefully, timeout")
   369  		}
   370  	}
   371  	return nil
   372  }
   373  
   374  // setupRPC is used to setup the RPC listener
   375  func (s *Server) setupRPC(tlsWrap tlsutil.DCWrapper) error {
   376  	// Create endpoints
   377  	s.endpoints.Status = &Status{s}
   378  	s.endpoints.Node = &Node{srv: s}
   379  	s.endpoints.Job = &Job{s}
   380  	s.endpoints.Eval = &Eval{s}
   381  	s.endpoints.Plan = &Plan{s}
   382  	s.endpoints.Alloc = &Alloc{s}
   383  	s.endpoints.Region = &Region{s}
   384  	s.endpoints.Periodic = &Periodic{s}
   385  	s.endpoints.System = &System{s}
   386  
   387  	// Register the handlers
   388  	s.rpcServer.Register(s.endpoints.Status)
   389  	s.rpcServer.Register(s.endpoints.Node)
   390  	s.rpcServer.Register(s.endpoints.Job)
   391  	s.rpcServer.Register(s.endpoints.Eval)
   392  	s.rpcServer.Register(s.endpoints.Plan)
   393  	s.rpcServer.Register(s.endpoints.Alloc)
   394  	s.rpcServer.Register(s.endpoints.Region)
   395  	s.rpcServer.Register(s.endpoints.Periodic)
   396  	s.rpcServer.Register(s.endpoints.System)
   397  
   398  	list, err := net.ListenTCP("tcp", s.config.RPCAddr)
   399  	if err != nil {
   400  		return err
   401  	}
   402  	s.rpcListener = list
   403  
   404  	if s.config.RPCAdvertise != nil {
   405  		s.rpcAdvertise = s.config.RPCAdvertise
   406  	} else {
   407  		s.rpcAdvertise = s.rpcListener.Addr()
   408  	}
   409  
   410  	// Verify that we have a usable advertise address
   411  	addr, ok := s.rpcAdvertise.(*net.TCPAddr)
   412  	if !ok {
   413  		list.Close()
   414  		return fmt.Errorf("RPC advertise address is not a TCP Address: %v", addr)
   415  	}
   416  	if addr.IP.IsUnspecified() {
   417  		list.Close()
   418  		return fmt.Errorf("RPC advertise address is not advertisable: %v", addr)
   419  	}
   420  
   421  	// Provide a DC specific wrapper. Raft replication is only
   422  	// ever done in the same datacenter, so we can provide it as a constant.
   423  	// wrapper := tlsutil.SpecificDC(s.config.Datacenter, tlsWrap)
   424  	// TODO: TLS...
   425  	s.raftLayer = NewRaftLayer(s.rpcAdvertise, nil)
   426  	return nil
   427  }
   428  
   429  // setupRaft is used to setup and initialize Raft
   430  func (s *Server) setupRaft() error {
   431  	// If we are in bootstrap mode, enable a single node cluster
   432  	if s.config.Bootstrap || (s.config.DevMode && !s.config.DevDisableBootstrap) {
   433  		s.config.RaftConfig.EnableSingleNode = true
   434  	}
   435  
   436  	// Create the FSM
   437  	var err error
   438  	s.fsm, err = NewFSM(s.evalBroker, s.periodicDispatcher, s.blockedEvals, s.config.LogOutput)
   439  	if err != nil {
   440  		return err
   441  	}
   442  
   443  	// Create a transport layer
   444  	trans := raft.NewNetworkTransport(s.raftLayer, 3, s.config.RaftTimeout,
   445  		s.config.LogOutput)
   446  	s.raftTransport = trans
   447  
   448  	// Create the backend raft store for logs and stable storage
   449  	var log raft.LogStore
   450  	var stable raft.StableStore
   451  	var snap raft.SnapshotStore
   452  	var peers raft.PeerStore
   453  	if s.config.DevMode {
   454  		store := raft.NewInmemStore()
   455  		s.raftInmem = store
   456  		stable = store
   457  		log = store
   458  		snap = raft.NewDiscardSnapshotStore()
   459  		peers = &raft.StaticPeers{}
   460  		s.raftPeers = peers
   461  
   462  	} else {
   463  		// Create the base raft path
   464  		path := filepath.Join(s.config.DataDir, raftState)
   465  		if err := ensurePath(path, true); err != nil {
   466  			return err
   467  		}
   468  
   469  		// Create the BoltDB backend
   470  		store, err := raftboltdb.NewBoltStore(filepath.Join(path, "raft.db"))
   471  		if err != nil {
   472  			return err
   473  		}
   474  		s.raftStore = store
   475  		stable = store
   476  
   477  		// Wrap the store in a LogCache to improve performance
   478  		cacheStore, err := raft.NewLogCache(raftLogCacheSize, store)
   479  		if err != nil {
   480  			store.Close()
   481  			return err
   482  		}
   483  		log = cacheStore
   484  
   485  		// Create the snapshot store
   486  		snapshots, err := raft.NewFileSnapshotStore(path, snapshotsRetained, s.config.LogOutput)
   487  		if err != nil {
   488  			if s.raftStore != nil {
   489  				s.raftStore.Close()
   490  			}
   491  			return err
   492  		}
   493  		snap = snapshots
   494  
   495  		// Setup the peer store
   496  		s.raftPeers = raft.NewJSONPeers(path, trans)
   497  		peers = s.raftPeers
   498  	}
   499  
   500  	// Ensure local host is always included if we are in bootstrap mode
   501  	if s.config.RaftConfig.EnableSingleNode {
   502  		p, err := peers.Peers()
   503  		if err != nil {
   504  			if s.raftStore != nil {
   505  				s.raftStore.Close()
   506  			}
   507  			return err
   508  		}
   509  		if !raft.PeerContained(p, trans.LocalAddr()) {
   510  			peers.SetPeers(raft.AddUniquePeer(p, trans.LocalAddr()))
   511  		}
   512  	}
   513  
   514  	// Make sure we set the LogOutput
   515  	s.config.RaftConfig.LogOutput = s.config.LogOutput
   516  
   517  	// Setup the leader channel
   518  	leaderCh := make(chan bool, 1)
   519  	s.config.RaftConfig.NotifyCh = leaderCh
   520  	s.leaderCh = leaderCh
   521  
   522  	// Setup the Raft store
   523  	s.raft, err = raft.NewRaft(s.config.RaftConfig, s.fsm, log, stable,
   524  		snap, peers, trans)
   525  	if err != nil {
   526  		if s.raftStore != nil {
   527  			s.raftStore.Close()
   528  		}
   529  		trans.Close()
   530  		return err
   531  	}
   532  	return nil
   533  }
   534  
   535  // setupSerf is used to setup and initialize a Serf
   536  func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string) (*serf.Serf, error) {
   537  	conf.Init()
   538  	conf.NodeName = fmt.Sprintf("%s.%s", s.config.NodeName, s.config.Region)
   539  	conf.Tags["role"] = "nomad"
   540  	conf.Tags["region"] = s.config.Region
   541  	conf.Tags["dc"] = s.config.Datacenter
   542  	conf.Tags["vsn"] = fmt.Sprintf("%d", s.config.ProtocolVersion)
   543  	conf.Tags["vsn_min"] = fmt.Sprintf("%d", ProtocolVersionMin)
   544  	conf.Tags["vsn_max"] = fmt.Sprintf("%d", ProtocolVersionMax)
   545  	conf.Tags["build"] = s.config.Build
   546  	conf.Tags["port"] = fmt.Sprintf("%d", s.rpcAdvertise.(*net.TCPAddr).Port)
   547  	if s.config.Bootstrap || (s.config.DevMode && !s.config.DevDisableBootstrap) {
   548  		conf.Tags["bootstrap"] = "1"
   549  	}
   550  	if s.config.BootstrapExpect != 0 {
   551  		conf.Tags["expect"] = fmt.Sprintf("%d", s.config.BootstrapExpect)
   552  	}
   553  	conf.MemberlistConfig.LogOutput = s.config.LogOutput
   554  	conf.LogOutput = s.config.LogOutput
   555  	conf.EventCh = ch
   556  	if !s.config.DevMode {
   557  		conf.SnapshotPath = filepath.Join(s.config.DataDir, path)
   558  		if err := ensurePath(conf.SnapshotPath, false); err != nil {
   559  			return nil, err
   560  		}
   561  	}
   562  	conf.ProtocolVersion = protocolVersionMap[s.config.ProtocolVersion]
   563  	conf.RejoinAfterLeave = true
   564  	conf.Merge = &serfMergeDelegate{}
   565  
   566  	// Until Nomad supports this fully, we disable automatic resolution.
   567  	// When enabled, the Serf gossip may just turn off if we are the minority
   568  	// node which is rather unexpected.
   569  	conf.EnableNameConflictResolution = false
   570  	return serf.Create(conf)
   571  }
   572  
   573  // setupWorkers is used to start the scheduling workers
   574  func (s *Server) setupWorkers() error {
   575  	// Check if all the schedulers are disabled
   576  	if len(s.config.EnabledSchedulers) == 0 || s.config.NumSchedulers == 0 {
   577  		s.logger.Printf("[WARN] nomad: no enabled schedulers")
   578  		return nil
   579  	}
   580  
   581  	// Start the workers
   582  	for i := 0; i < s.config.NumSchedulers; i++ {
   583  		if w, err := NewWorker(s); err != nil {
   584  			return err
   585  		} else {
   586  			s.workers = append(s.workers, w)
   587  		}
   588  	}
   589  	s.logger.Printf("[INFO] nomad: starting %d scheduling worker(s) for %v",
   590  		s.config.NumSchedulers, s.config.EnabledSchedulers)
   591  	return nil
   592  }
   593  
   594  // numOtherPeers is used to check on the number of known peers
   595  // excluding the local node
   596  func (s *Server) numOtherPeers() (int, error) {
   597  	peers, err := s.raftPeers.Peers()
   598  	if err != nil {
   599  		return 0, err
   600  	}
   601  	otherPeers := raft.ExcludePeer(peers, s.raftTransport.LocalAddr())
   602  	return len(otherPeers), nil
   603  }
   604  
   605  // IsLeader checks if this server is the cluster leader
   606  func (s *Server) IsLeader() bool {
   607  	return s.raft.State() == raft.Leader
   608  }
   609  
   610  // Join is used to have Nomad join the gossip ring
   611  // The target address should be another node listening on the
   612  // Serf address
   613  func (s *Server) Join(addrs []string) (int, error) {
   614  	return s.serf.Join(addrs, true)
   615  }
   616  
   617  // LocalMember is used to return the local node
   618  func (c *Server) LocalMember() serf.Member {
   619  	return c.serf.LocalMember()
   620  }
   621  
   622  // Members is used to return the members of the serf cluster
   623  func (s *Server) Members() []serf.Member {
   624  	return s.serf.Members()
   625  }
   626  
   627  // RemoveFailedNode is used to remove a failed node from the cluster
   628  func (s *Server) RemoveFailedNode(node string) error {
   629  	return s.serf.RemoveFailedNode(node)
   630  }
   631  
   632  // KeyManager returns the Serf keyring manager
   633  func (s *Server) KeyManager() *serf.KeyManager {
   634  	return s.serf.KeyManager()
   635  }
   636  
   637  // Encrypted determines if gossip is encrypted
   638  func (s *Server) Encrypted() bool {
   639  	return s.serf.EncryptionEnabled()
   640  }
   641  
   642  // State returns the underlying state store. This should *not*
   643  // be used to modify state directly.
   644  func (s *Server) State() *state.StateStore {
   645  	return s.fsm.State()
   646  }
   647  
   648  // Regions returns the known regions in the cluster.
   649  func (s *Server) Regions() []string {
   650  	s.peerLock.RLock()
   651  	defer s.peerLock.RUnlock()
   652  
   653  	regions := make([]string, 0, len(s.peers))
   654  	for region, _ := range s.peers {
   655  		regions = append(regions, region)
   656  	}
   657  	sort.Strings(regions)
   658  	return regions
   659  }
   660  
   661  // inmemCodec is used to do an RPC call without going over a network
   662  type inmemCodec struct {
   663  	method string
   664  	args   interface{}
   665  	reply  interface{}
   666  	err    error
   667  }
   668  
   669  func (i *inmemCodec) ReadRequestHeader(req *rpc.Request) error {
   670  	req.ServiceMethod = i.method
   671  	return nil
   672  }
   673  
   674  func (i *inmemCodec) ReadRequestBody(args interface{}) error {
   675  	sourceValue := reflect.Indirect(reflect.Indirect(reflect.ValueOf(i.args)))
   676  	dst := reflect.Indirect(reflect.Indirect(reflect.ValueOf(args)))
   677  	dst.Set(sourceValue)
   678  	return nil
   679  }
   680  
   681  func (i *inmemCodec) WriteResponse(resp *rpc.Response, reply interface{}) error {
   682  	if resp.Error != "" {
   683  		i.err = errors.New(resp.Error)
   684  		return nil
   685  	}
   686  	sourceValue := reflect.Indirect(reflect.Indirect(reflect.ValueOf(reply)))
   687  	dst := reflect.Indirect(reflect.Indirect(reflect.ValueOf(i.reply)))
   688  	dst.Set(sourceValue)
   689  	return nil
   690  }
   691  
   692  func (i *inmemCodec) Close() error {
   693  	return nil
   694  }
   695  
   696  // RPC is used to make a local RPC call
   697  func (s *Server) RPC(method string, args interface{}, reply interface{}) error {
   698  	codec := &inmemCodec{
   699  		method: method,
   700  		args:   args,
   701  		reply:  reply,
   702  	}
   703  	if err := s.rpcServer.ServeRequest(codec); err != nil {
   704  		return err
   705  	}
   706  	return codec.err
   707  }
   708  
   709  // Stats is used to return statistics for debugging and insight
   710  // for various sub-systems
   711  func (s *Server) Stats() map[string]map[string]string {
   712  	toString := func(v uint64) string {
   713  		return strconv.FormatUint(v, 10)
   714  	}
   715  	stats := map[string]map[string]string{
   716  		"nomad": map[string]string{
   717  			"server":        "true",
   718  			"leader":        fmt.Sprintf("%v", s.IsLeader()),
   719  			"leader_addr":   s.raft.Leader(),
   720  			"bootstrap":     fmt.Sprintf("%v", s.config.Bootstrap),
   721  			"known_regions": toString(uint64(len(s.peers))),
   722  		},
   723  		"raft":    s.raft.Stats(),
   724  		"serf":    s.serf.Stats(),
   725  		"runtime": RuntimeStats(),
   726  	}
   727  	if peers, err := s.raftPeers.Peers(); err == nil {
   728  		stats["raft"]["raft_peers"] = strings.Join(peers, ",")
   729  	} else {
   730  		s.logger.Printf("[DEBUG] server: error getting raft peers: %v", err)
   731  	}
   732  	return stats
   733  }