github.com/blixtra/nomad@v0.7.2-0.20171221000451-da9a1d7bb050/nomad/leader_test.go (about)

     1  package nomad
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"testing"
     7  	"time"
     8  
     9  	memdb "github.com/hashicorp/go-memdb"
    10  	"github.com/hashicorp/nomad/nomad/mock"
    11  	"github.com/hashicorp/nomad/nomad/state"
    12  	"github.com/hashicorp/nomad/nomad/structs"
    13  	"github.com/hashicorp/nomad/testutil"
    14  	"github.com/stretchr/testify/assert"
    15  )
    16  
    17  func TestLeader_LeftServer(t *testing.T) {
    18  	s1 := testServer(t, nil)
    19  	defer s1.Shutdown()
    20  
    21  	s2 := testServer(t, func(c *Config) {
    22  		c.DevDisableBootstrap = true
    23  	})
    24  	defer s2.Shutdown()
    25  
    26  	s3 := testServer(t, func(c *Config) {
    27  		c.DevDisableBootstrap = true
    28  	})
    29  	defer s3.Shutdown()
    30  	servers := []*Server{s1, s2, s3}
    31  	testJoin(t, s1, s2, s3)
    32  
    33  	for _, s := range servers {
    34  		testutil.WaitForResult(func() (bool, error) {
    35  			peers, _ := s.numPeers()
    36  			return peers == 3, nil
    37  		}, func(err error) {
    38  			t.Fatalf("should have 3 peers")
    39  		})
    40  	}
    41  
    42  	// Kill any server
    43  	var peer *Server
    44  	for _, s := range servers {
    45  		if !s.IsLeader() {
    46  			peer = s
    47  			break
    48  		}
    49  	}
    50  	if peer == nil {
    51  		t.Fatalf("Should have a non-leader")
    52  	}
    53  	peer.Shutdown()
    54  	name := fmt.Sprintf("%s.%s", peer.config.NodeName, peer.config.Region)
    55  
    56  	testutil.WaitForResult(func() (bool, error) {
    57  		for _, s := range servers {
    58  			if s == peer {
    59  				continue
    60  			}
    61  
    62  			// Force remove the non-leader (transition to left state)
    63  			if err := s.RemoveFailedNode(name); err != nil {
    64  				return false, err
    65  			}
    66  
    67  			peers, _ := s.numPeers()
    68  			return peers == 2, errors.New(fmt.Sprintf("%v", peers))
    69  		}
    70  
    71  		return true, nil
    72  	}, func(err error) {
    73  		t.Fatalf("err: %s", err)
    74  	})
    75  }
    76  
    77  func TestLeader_LeftLeader(t *testing.T) {
    78  	s1 := testServer(t, nil)
    79  	defer s1.Shutdown()
    80  
    81  	s2 := testServer(t, func(c *Config) {
    82  		c.DevDisableBootstrap = true
    83  	})
    84  	defer s2.Shutdown()
    85  
    86  	s3 := testServer(t, func(c *Config) {
    87  		c.DevDisableBootstrap = true
    88  	})
    89  	defer s3.Shutdown()
    90  	servers := []*Server{s1, s2, s3}
    91  	testJoin(t, s1, s2, s3)
    92  
    93  	for _, s := range servers {
    94  		testutil.WaitForResult(func() (bool, error) {
    95  			peers, _ := s.numPeers()
    96  			return peers == 3, nil
    97  		}, func(err error) {
    98  			t.Fatalf("should have 3 peers")
    99  		})
   100  	}
   101  
   102  	// Kill the leader!
   103  	var leader *Server
   104  	for _, s := range servers {
   105  		if s.IsLeader() {
   106  			leader = s
   107  			break
   108  		}
   109  	}
   110  	if leader == nil {
   111  		t.Fatalf("Should have a leader")
   112  	}
   113  	leader.Leave()
   114  	leader.Shutdown()
   115  
   116  	for _, s := range servers {
   117  		if s == leader {
   118  			continue
   119  		}
   120  		testutil.WaitForResult(func() (bool, error) {
   121  			peers, _ := s.numPeers()
   122  			return peers == 2, errors.New(fmt.Sprintf("%v", peers))
   123  		}, func(err error) {
   124  			t.Fatalf("should have 2 peers: %v", err)
   125  		})
   126  	}
   127  }
   128  
   129  func TestLeader_MultiBootstrap(t *testing.T) {
   130  	s1 := testServer(t, nil)
   131  	defer s1.Shutdown()
   132  
   133  	s2 := testServer(t, nil)
   134  	defer s2.Shutdown()
   135  	servers := []*Server{s1, s2}
   136  	testJoin(t, s1, s2)
   137  
   138  	for _, s := range servers {
   139  		testutil.WaitForResult(func() (bool, error) {
   140  			peers := s.Members()
   141  			return len(peers) == 2, nil
   142  		}, func(err error) {
   143  			t.Fatalf("should have 2 peers")
   144  		})
   145  	}
   146  
   147  	// Ensure we don't have multiple raft peers
   148  	for _, s := range servers {
   149  		peers, _ := s.numPeers()
   150  		if peers != 1 {
   151  			t.Fatalf("should only have 1 raft peer!")
   152  		}
   153  	}
   154  }
   155  
   156  func TestLeader_PlanQueue_Reset(t *testing.T) {
   157  	s1 := testServer(t, nil)
   158  	defer s1.Shutdown()
   159  
   160  	s2 := testServer(t, func(c *Config) {
   161  		c.DevDisableBootstrap = true
   162  	})
   163  	defer s2.Shutdown()
   164  
   165  	s3 := testServer(t, func(c *Config) {
   166  		c.DevDisableBootstrap = true
   167  	})
   168  	defer s3.Shutdown()
   169  	servers := []*Server{s1, s2, s3}
   170  	testJoin(t, s1, s2, s3)
   171  
   172  	for _, s := range servers {
   173  		testutil.WaitForResult(func() (bool, error) {
   174  			peers, _ := s.numPeers()
   175  			return peers == 3, nil
   176  		}, func(err error) {
   177  			t.Fatalf("should have 3 peers")
   178  		})
   179  	}
   180  
   181  	var leader *Server
   182  	for _, s := range servers {
   183  		if s.IsLeader() {
   184  			leader = s
   185  			break
   186  		}
   187  	}
   188  	if leader == nil {
   189  		t.Fatalf("Should have a leader")
   190  	}
   191  
   192  	if !leader.planQueue.Enabled() {
   193  		t.Fatalf("should enable plan queue")
   194  	}
   195  
   196  	for _, s := range servers {
   197  		if !s.IsLeader() && s.planQueue.Enabled() {
   198  			t.Fatalf("plan queue should not be enabled")
   199  		}
   200  	}
   201  
   202  	// Kill the leader
   203  	leader.Shutdown()
   204  	time.Sleep(100 * time.Millisecond)
   205  
   206  	// Wait for a new leader
   207  	leader = nil
   208  	testutil.WaitForResult(func() (bool, error) {
   209  		for _, s := range servers {
   210  			if s.IsLeader() {
   211  				leader = s
   212  				return true, nil
   213  			}
   214  		}
   215  		return false, nil
   216  	}, func(err error) {
   217  		t.Fatalf("should have leader")
   218  	})
   219  
   220  	// Check that the new leader has a pending GC expiration
   221  	testutil.WaitForResult(func() (bool, error) {
   222  		return leader.planQueue.Enabled(), nil
   223  	}, func(err error) {
   224  		t.Fatalf("should enable plan queue")
   225  	})
   226  }
   227  
   228  func TestLeader_EvalBroker_Reset(t *testing.T) {
   229  	s1 := testServer(t, func(c *Config) {
   230  		c.NumSchedulers = 0
   231  	})
   232  	defer s1.Shutdown()
   233  
   234  	s2 := testServer(t, func(c *Config) {
   235  		c.NumSchedulers = 0
   236  		c.DevDisableBootstrap = true
   237  	})
   238  	defer s2.Shutdown()
   239  
   240  	s3 := testServer(t, func(c *Config) {
   241  		c.NumSchedulers = 0
   242  		c.DevDisableBootstrap = true
   243  	})
   244  	defer s3.Shutdown()
   245  	servers := []*Server{s1, s2, s3}
   246  	testJoin(t, s1, s2, s3)
   247  	testutil.WaitForLeader(t, s1.RPC)
   248  
   249  	for _, s := range servers {
   250  		testutil.WaitForResult(func() (bool, error) {
   251  			peers, _ := s.numPeers()
   252  			return peers == 3, nil
   253  		}, func(err error) {
   254  			t.Fatalf("should have 3 peers")
   255  		})
   256  	}
   257  
   258  	var leader *Server
   259  	for _, s := range servers {
   260  		if s.IsLeader() {
   261  			leader = s
   262  			break
   263  		}
   264  	}
   265  	if leader == nil {
   266  		t.Fatalf("Should have a leader")
   267  	}
   268  
   269  	// Inject a pending eval
   270  	req := structs.EvalUpdateRequest{
   271  		Evals: []*structs.Evaluation{mock.Eval()},
   272  	}
   273  	_, _, err := leader.raftApply(structs.EvalUpdateRequestType, req)
   274  	if err != nil {
   275  		t.Fatalf("err: %v", err)
   276  	}
   277  
   278  	// Kill the leader
   279  	leader.Shutdown()
   280  	time.Sleep(100 * time.Millisecond)
   281  
   282  	// Wait for a new leader
   283  	leader = nil
   284  	testutil.WaitForResult(func() (bool, error) {
   285  		for _, s := range servers {
   286  			if s.IsLeader() {
   287  				leader = s
   288  				return true, nil
   289  			}
   290  		}
   291  		return false, nil
   292  	}, func(err error) {
   293  		t.Fatalf("should have leader")
   294  	})
   295  
   296  	// Check that the new leader has a pending evaluation
   297  	testutil.WaitForResult(func() (bool, error) {
   298  		stats := leader.evalBroker.Stats()
   299  		return stats.TotalReady == 1, nil
   300  	}, func(err error) {
   301  		t.Fatalf("should have pending evaluation")
   302  	})
   303  }
   304  
   305  func TestLeader_PeriodicDispatcher_Restore_Adds(t *testing.T) {
   306  	s1 := testServer(t, func(c *Config) {
   307  		c.NumSchedulers = 0
   308  	})
   309  	defer s1.Shutdown()
   310  
   311  	s2 := testServer(t, func(c *Config) {
   312  		c.NumSchedulers = 0
   313  		c.DevDisableBootstrap = true
   314  	})
   315  	defer s2.Shutdown()
   316  
   317  	s3 := testServer(t, func(c *Config) {
   318  		c.NumSchedulers = 0
   319  		c.DevDisableBootstrap = true
   320  	})
   321  	defer s3.Shutdown()
   322  	servers := []*Server{s1, s2, s3}
   323  	testJoin(t, s1, s2, s3)
   324  	testutil.WaitForLeader(t, s1.RPC)
   325  
   326  	for _, s := range servers {
   327  		testutil.WaitForResult(func() (bool, error) {
   328  			peers, _ := s.numPeers()
   329  			return peers == 3, nil
   330  		}, func(err error) {
   331  			t.Fatalf("should have 3 peers")
   332  		})
   333  	}
   334  
   335  	var leader *Server
   336  	for _, s := range servers {
   337  		if s.IsLeader() {
   338  			leader = s
   339  			break
   340  		}
   341  	}
   342  	if leader == nil {
   343  		t.Fatalf("Should have a leader")
   344  	}
   345  
   346  	// Inject a periodic job, a parameterized periodic job and a non-periodic job
   347  	periodic := mock.PeriodicJob()
   348  	nonPeriodic := mock.Job()
   349  	parameterizedPeriodic := mock.PeriodicJob()
   350  	parameterizedPeriodic.ParameterizedJob = &structs.ParameterizedJobConfig{}
   351  	for _, job := range []*structs.Job{nonPeriodic, periodic, parameterizedPeriodic} {
   352  		req := structs.JobRegisterRequest{
   353  			Job: job,
   354  			WriteRequest: structs.WriteRequest{
   355  				Namespace: job.Namespace,
   356  			},
   357  		}
   358  		_, _, err := leader.raftApply(structs.JobRegisterRequestType, req)
   359  		if err != nil {
   360  			t.Fatalf("err: %v", err)
   361  		}
   362  	}
   363  
   364  	// Kill the leader
   365  	leader.Shutdown()
   366  	time.Sleep(100 * time.Millisecond)
   367  
   368  	// Wait for a new leader
   369  	leader = nil
   370  	testutil.WaitForResult(func() (bool, error) {
   371  		for _, s := range servers {
   372  			if s.IsLeader() {
   373  				leader = s
   374  				return true, nil
   375  			}
   376  		}
   377  		return false, nil
   378  	}, func(err error) {
   379  		t.Fatalf("should have leader")
   380  	})
   381  
   382  	tuplePeriodic := structs.NamespacedID{
   383  		ID:        periodic.ID,
   384  		Namespace: periodic.Namespace,
   385  	}
   386  	tupleNonPeriodic := structs.NamespacedID{
   387  		ID:        nonPeriodic.ID,
   388  		Namespace: nonPeriodic.Namespace,
   389  	}
   390  	tupleParameterized := structs.NamespacedID{
   391  		ID:        parameterizedPeriodic.ID,
   392  		Namespace: parameterizedPeriodic.Namespace,
   393  	}
   394  
   395  	// Check that the new leader is tracking the periodic job only
   396  	testutil.WaitForResult(func() (bool, error) {
   397  		if _, tracked := leader.periodicDispatcher.tracked[tuplePeriodic]; !tracked {
   398  			return false, fmt.Errorf("periodic job not tracked")
   399  		}
   400  		if _, tracked := leader.periodicDispatcher.tracked[tupleNonPeriodic]; tracked {
   401  			return false, fmt.Errorf("non periodic job tracked")
   402  		}
   403  		if _, tracked := leader.periodicDispatcher.tracked[tupleParameterized]; tracked {
   404  			return false, fmt.Errorf("parameterized periodic job tracked")
   405  		}
   406  		return true, nil
   407  	}, func(err error) {
   408  		t.Fatalf(err.Error())
   409  	})
   410  }
   411  
   412  func TestLeader_PeriodicDispatcher_Restore_NoEvals(t *testing.T) {
   413  	s1 := testServer(t, func(c *Config) {
   414  		c.NumSchedulers = 0
   415  	})
   416  	defer s1.Shutdown()
   417  	testutil.WaitForLeader(t, s1.RPC)
   418  
   419  	// Inject a periodic job that will be triggered soon.
   420  	launch := time.Now().Add(1 * time.Second)
   421  	job := testPeriodicJob(launch)
   422  	req := structs.JobRegisterRequest{
   423  		Job: job,
   424  		WriteRequest: structs.WriteRequest{
   425  			Namespace: job.Namespace,
   426  		},
   427  	}
   428  	_, _, err := s1.raftApply(structs.JobRegisterRequestType, req)
   429  	if err != nil {
   430  		t.Fatalf("err: %v", err)
   431  	}
   432  
   433  	// Flush the periodic dispatcher, ensuring that no evals will be created.
   434  	s1.periodicDispatcher.SetEnabled(false)
   435  
   436  	// Get the current time to ensure the launch time is after this once we
   437  	// restore.
   438  	now := time.Now()
   439  
   440  	// Sleep till after the job should have been launched.
   441  	time.Sleep(3 * time.Second)
   442  
   443  	// Restore the periodic dispatcher.
   444  	s1.periodicDispatcher.SetEnabled(true)
   445  	s1.restorePeriodicDispatcher()
   446  
   447  	// Ensure the job is tracked.
   448  	tuple := structs.NamespacedID{
   449  		ID:        job.ID,
   450  		Namespace: job.Namespace,
   451  	}
   452  	if _, tracked := s1.periodicDispatcher.tracked[tuple]; !tracked {
   453  		t.Fatalf("periodic job not restored")
   454  	}
   455  
   456  	// Check that an eval was made.
   457  	ws := memdb.NewWatchSet()
   458  	last, err := s1.fsm.State().PeriodicLaunchByID(ws, job.Namespace, job.ID)
   459  	if err != nil || last == nil {
   460  		t.Fatalf("failed to get periodic launch time: %v", err)
   461  	}
   462  
   463  	if last.Launch.Before(now) {
   464  		t.Fatalf("restorePeriodicDispatcher did not force launch: last %v; want after %v", last.Launch, now)
   465  	}
   466  }
   467  
   468  func TestLeader_PeriodicDispatcher_Restore_Evals(t *testing.T) {
   469  	s1 := testServer(t, func(c *Config) {
   470  		c.NumSchedulers = 0
   471  	})
   472  	defer s1.Shutdown()
   473  	testutil.WaitForLeader(t, s1.RPC)
   474  
   475  	// Inject a periodic job that triggered once in the past, should trigger now
   476  	// and once in the future.
   477  	now := time.Now()
   478  	past := now.Add(-1 * time.Second)
   479  	future := now.Add(10 * time.Second)
   480  	job := testPeriodicJob(past, now, future)
   481  	req := structs.JobRegisterRequest{
   482  		Job: job,
   483  		WriteRequest: structs.WriteRequest{
   484  			Namespace: job.Namespace,
   485  		},
   486  	}
   487  	_, _, err := s1.raftApply(structs.JobRegisterRequestType, req)
   488  	if err != nil {
   489  		t.Fatalf("err: %v", err)
   490  	}
   491  
   492  	// Create an eval for the past launch.
   493  	s1.periodicDispatcher.createEval(job, past)
   494  
   495  	// Flush the periodic dispatcher, ensuring that no evals will be created.
   496  	s1.periodicDispatcher.SetEnabled(false)
   497  
   498  	// Sleep till after the job should have been launched.
   499  	time.Sleep(3 * time.Second)
   500  
   501  	// Restore the periodic dispatcher.
   502  	s1.periodicDispatcher.SetEnabled(true)
   503  	s1.restorePeriodicDispatcher()
   504  
   505  	// Ensure the job is tracked.
   506  	tuple := structs.NamespacedID{
   507  		ID:        job.ID,
   508  		Namespace: job.Namespace,
   509  	}
   510  	if _, tracked := s1.periodicDispatcher.tracked[tuple]; !tracked {
   511  		t.Fatalf("periodic job not restored")
   512  	}
   513  
   514  	// Check that an eval was made.
   515  	ws := memdb.NewWatchSet()
   516  	last, err := s1.fsm.State().PeriodicLaunchByID(ws, job.Namespace, job.ID)
   517  	if err != nil || last == nil {
   518  		t.Fatalf("failed to get periodic launch time: %v", err)
   519  	}
   520  	if last.Launch == past {
   521  		t.Fatalf("restorePeriodicDispatcher did not force launch")
   522  	}
   523  }
   524  
   525  func TestLeader_PeriodicDispatch(t *testing.T) {
   526  	s1 := testServer(t, func(c *Config) {
   527  		c.NumSchedulers = 0
   528  		c.EvalGCInterval = 5 * time.Millisecond
   529  	})
   530  	defer s1.Shutdown()
   531  
   532  	// Wait for a periodic dispatch
   533  	testutil.WaitForResult(func() (bool, error) {
   534  		stats := s1.evalBroker.Stats()
   535  		bySched, ok := stats.ByScheduler[structs.JobTypeCore]
   536  		if !ok {
   537  			return false, nil
   538  		}
   539  		return bySched.Ready > 0, nil
   540  	}, func(err error) {
   541  		t.Fatalf("should pending job")
   542  	})
   543  }
   544  
   545  func TestLeader_ReapFailedEval(t *testing.T) {
   546  	s1 := testServer(t, func(c *Config) {
   547  		c.NumSchedulers = 0
   548  		c.EvalDeliveryLimit = 1
   549  	})
   550  	defer s1.Shutdown()
   551  	testutil.WaitForLeader(t, s1.RPC)
   552  
   553  	// Wait for a periodic dispatch
   554  	eval := mock.Eval()
   555  	s1.evalBroker.Enqueue(eval)
   556  
   557  	// Dequeue and Nack
   558  	out, token, err := s1.evalBroker.Dequeue(defaultSched, time.Second)
   559  	if err != nil {
   560  		t.Fatalf("err: %v", err)
   561  	}
   562  	s1.evalBroker.Nack(out.ID, token)
   563  
   564  	// Wait for an updated and followup evaluation
   565  	state := s1.fsm.State()
   566  	testutil.WaitForResult(func() (bool, error) {
   567  		ws := memdb.NewWatchSet()
   568  		out, err := state.EvalByID(ws, eval.ID)
   569  		if err != nil {
   570  			return false, err
   571  		}
   572  		if out == nil {
   573  			return false, fmt.Errorf("expect original evaluation to exist")
   574  		}
   575  		if out.Status != structs.EvalStatusFailed {
   576  			return false, fmt.Errorf("got status %v; want %v", out.Status, structs.EvalStatusFailed)
   577  		}
   578  
   579  		// See if there is a followup
   580  		evals, err := state.EvalsByJob(ws, eval.Namespace, eval.JobID)
   581  		if err != nil {
   582  			return false, err
   583  		}
   584  
   585  		if l := len(evals); l != 2 {
   586  			return false, fmt.Errorf("got %d evals, want 2", l)
   587  		}
   588  
   589  		for _, e := range evals {
   590  			if e.ID == eval.ID {
   591  				continue
   592  			}
   593  
   594  			if e.Status != structs.EvalStatusPending {
   595  				return false, fmt.Errorf("follow up eval has status %v; want %v",
   596  					e.Status, structs.EvalStatusPending)
   597  			}
   598  
   599  			if e.Wait < s1.config.EvalFailedFollowupBaselineDelay ||
   600  				e.Wait > s1.config.EvalFailedFollowupBaselineDelay+s1.config.EvalFailedFollowupDelayRange {
   601  				return false, fmt.Errorf("bad wait: %v", e.Wait)
   602  			}
   603  
   604  			if e.TriggeredBy != structs.EvalTriggerFailedFollowUp {
   605  				return false, fmt.Errorf("follow up eval TriggeredBy %v; want %v",
   606  					e.TriggeredBy, structs.EvalTriggerFailedFollowUp)
   607  			}
   608  		}
   609  
   610  		return true, nil
   611  	}, func(err error) {
   612  		t.Fatalf("err: %v", err)
   613  	})
   614  }
   615  
   616  func TestLeader_ReapDuplicateEval(t *testing.T) {
   617  	s1 := testServer(t, func(c *Config) {
   618  		c.NumSchedulers = 0
   619  	})
   620  	defer s1.Shutdown()
   621  	testutil.WaitForLeader(t, s1.RPC)
   622  
   623  	// Create a duplicate blocked eval
   624  	eval := mock.Eval()
   625  	eval2 := mock.Eval()
   626  	eval2.JobID = eval.JobID
   627  	s1.blockedEvals.Block(eval)
   628  	s1.blockedEvals.Block(eval2)
   629  
   630  	// Wait for the evaluation to marked as cancelled
   631  	state := s1.fsm.State()
   632  	testutil.WaitForResult(func() (bool, error) {
   633  		ws := memdb.NewWatchSet()
   634  		out, err := state.EvalByID(ws, eval2.ID)
   635  		if err != nil {
   636  			return false, err
   637  		}
   638  		return out != nil && out.Status == structs.EvalStatusCancelled, nil
   639  	}, func(err error) {
   640  		t.Fatalf("err: %v", err)
   641  	})
   642  }
   643  
   644  func TestLeader_RestoreVaultAccessors(t *testing.T) {
   645  	s1 := testServer(t, func(c *Config) {
   646  		c.NumSchedulers = 0
   647  	})
   648  	defer s1.Shutdown()
   649  	testutil.WaitForLeader(t, s1.RPC)
   650  
   651  	// Insert a vault accessor that should be revoked
   652  	state := s1.fsm.State()
   653  	va := mock.VaultAccessor()
   654  	if err := state.UpsertVaultAccessor(100, []*structs.VaultAccessor{va}); err != nil {
   655  		t.Fatalf("bad: %v", err)
   656  	}
   657  
   658  	// Swap the Vault client
   659  	tvc := &TestVaultClient{}
   660  	s1.vault = tvc
   661  
   662  	// Do a restore
   663  	if err := s1.restoreRevokingAccessors(); err != nil {
   664  		t.Fatalf("Failed to restore: %v", err)
   665  	}
   666  
   667  	if len(tvc.RevokedTokens) != 1 && tvc.RevokedTokens[0].Accessor != va.Accessor {
   668  		t.Fatalf("Bad revoked accessors: %v", tvc.RevokedTokens)
   669  	}
   670  }
   671  
   672  func TestLeader_ReplicateACLPolicies(t *testing.T) {
   673  	t.Parallel()
   674  	s1, root := testACLServer(t, func(c *Config) {
   675  		c.Region = "region1"
   676  		c.AuthoritativeRegion = "region1"
   677  		c.ACLEnabled = true
   678  	})
   679  	defer s1.Shutdown()
   680  	s2, _ := testACLServer(t, func(c *Config) {
   681  		c.Region = "region2"
   682  		c.AuthoritativeRegion = "region1"
   683  		c.ACLEnabled = true
   684  		c.ReplicationBackoff = 20 * time.Millisecond
   685  		c.ReplicationToken = root.SecretID
   686  	})
   687  	defer s2.Shutdown()
   688  	testJoin(t, s1, s2)
   689  	testutil.WaitForLeader(t, s1.RPC)
   690  	testutil.WaitForLeader(t, s2.RPC)
   691  
   692  	// Write a policy to the authoritative region
   693  	p1 := mock.ACLPolicy()
   694  	if err := s1.State().UpsertACLPolicies(100, []*structs.ACLPolicy{p1}); err != nil {
   695  		t.Fatalf("bad: %v", err)
   696  	}
   697  
   698  	// Wait for the policy to replicate
   699  	testutil.WaitForResult(func() (bool, error) {
   700  		state := s2.State()
   701  		out, err := state.ACLPolicyByName(nil, p1.Name)
   702  		return out != nil, err
   703  	}, func(err error) {
   704  		t.Fatalf("should replicate policy")
   705  	})
   706  }
   707  
   708  func TestLeader_DiffACLPolicies(t *testing.T) {
   709  	t.Parallel()
   710  
   711  	state := state.TestStateStore(t)
   712  
   713  	// Populate the local state
   714  	p1 := mock.ACLPolicy()
   715  	p2 := mock.ACLPolicy()
   716  	p3 := mock.ACLPolicy()
   717  	assert.Nil(t, state.UpsertACLPolicies(100, []*structs.ACLPolicy{p1, p2, p3}))
   718  
   719  	// Simulate a remote list
   720  	p2Stub := p2.Stub()
   721  	p2Stub.ModifyIndex = 50 // Ignored, same index
   722  	p3Stub := p3.Stub()
   723  	p3Stub.ModifyIndex = 100 // Updated, higher index
   724  	p3Stub.Hash = []byte{0, 1, 2, 3}
   725  	p4 := mock.ACLPolicy()
   726  	remoteList := []*structs.ACLPolicyListStub{
   727  		p2Stub,
   728  		p3Stub,
   729  		p4.Stub(),
   730  	}
   731  	delete, update := diffACLPolicies(state, 50, remoteList)
   732  
   733  	// P1 does not exist on the remote side, should delete
   734  	assert.Equal(t, []string{p1.Name}, delete)
   735  
   736  	// P2 is un-modified - ignore. P3 modified, P4 new.
   737  	assert.Equal(t, []string{p3.Name, p4.Name}, update)
   738  }
   739  
   740  func TestLeader_ReplicateACLTokens(t *testing.T) {
   741  	t.Parallel()
   742  	s1, root := testACLServer(t, func(c *Config) {
   743  		c.Region = "region1"
   744  		c.AuthoritativeRegion = "region1"
   745  		c.ACLEnabled = true
   746  	})
   747  	defer s1.Shutdown()
   748  	s2, _ := testACLServer(t, func(c *Config) {
   749  		c.Region = "region2"
   750  		c.AuthoritativeRegion = "region1"
   751  		c.ACLEnabled = true
   752  		c.ReplicationBackoff = 20 * time.Millisecond
   753  		c.ReplicationToken = root.SecretID
   754  	})
   755  	defer s2.Shutdown()
   756  	testJoin(t, s1, s2)
   757  	testutil.WaitForLeader(t, s1.RPC)
   758  	testutil.WaitForLeader(t, s2.RPC)
   759  
   760  	// Write a token to the authoritative region
   761  	p1 := mock.ACLToken()
   762  	p1.Global = true
   763  	if err := s1.State().UpsertACLTokens(100, []*structs.ACLToken{p1}); err != nil {
   764  		t.Fatalf("bad: %v", err)
   765  	}
   766  
   767  	// Wait for the token to replicate
   768  	testutil.WaitForResult(func() (bool, error) {
   769  		state := s2.State()
   770  		out, err := state.ACLTokenByAccessorID(nil, p1.AccessorID)
   771  		return out != nil, err
   772  	}, func(err error) {
   773  		t.Fatalf("should replicate token")
   774  	})
   775  }
   776  
   777  func TestLeader_DiffACLTokens(t *testing.T) {
   778  	t.Parallel()
   779  
   780  	state := state.TestStateStore(t)
   781  
   782  	// Populate the local state
   783  	p0 := mock.ACLToken()
   784  	p1 := mock.ACLToken()
   785  	p1.Global = true
   786  	p2 := mock.ACLToken()
   787  	p2.Global = true
   788  	p3 := mock.ACLToken()
   789  	p3.Global = true
   790  	assert.Nil(t, state.UpsertACLTokens(100, []*structs.ACLToken{p0, p1, p2, p3}))
   791  
   792  	// Simulate a remote list
   793  	p2Stub := p2.Stub()
   794  	p2Stub.ModifyIndex = 50 // Ignored, same index
   795  	p3Stub := p3.Stub()
   796  	p3Stub.ModifyIndex = 100 // Updated, higher index
   797  	p3Stub.Hash = []byte{0, 1, 2, 3}
   798  	p4 := mock.ACLToken()
   799  	p4.Global = true
   800  	remoteList := []*structs.ACLTokenListStub{
   801  		p2Stub,
   802  		p3Stub,
   803  		p4.Stub(),
   804  	}
   805  	delete, update := diffACLTokens(state, 50, remoteList)
   806  
   807  	// P0 is local and should be ignored
   808  	// P1 does not exist on the remote side, should delete
   809  	assert.Equal(t, []string{p1.AccessorID}, delete)
   810  
   811  	// P2 is un-modified - ignore. P3 modified, P4 new.
   812  	assert.Equal(t, []string{p3.AccessorID, p4.AccessorID}, update)
   813  }