github.com/muhammadn/cortex@v1.9.1-0.20220510110439-46bb7000d03d/pkg/ruler/ruler_test.go (about)

     1  package ruler
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io/ioutil"
     7  	"math/rand"
     8  	"net/http"
     9  	"net/http/httptest"
    10  	"os"
    11  	"path/filepath"
    12  	"reflect"
    13  	"sort"
    14  	"strings"
    15  	"sync"
    16  	"testing"
    17  	"time"
    18  	"unsafe"
    19  
    20  	"github.com/prometheus/common/model"
    21  	"github.com/stretchr/testify/mock"
    22  
    23  	"github.com/cortexproject/cortex/pkg/chunk/purger"
    24  	"github.com/cortexproject/cortex/pkg/querier"
    25  	"github.com/cortexproject/cortex/pkg/util/validation"
    26  
    27  	"go.uber.org/atomic"
    28  
    29  	"google.golang.org/grpc"
    30  
    31  	"github.com/go-kit/log"
    32  	"github.com/go-kit/log/level"
    33  	"github.com/gorilla/mux"
    34  	"github.com/grafana/dskit/flagext"
    35  	"github.com/grafana/dskit/kv"
    36  	"github.com/grafana/dskit/kv/consul"
    37  	"github.com/grafana/dskit/ring"
    38  	"github.com/grafana/dskit/services"
    39  	"github.com/prometheus/client_golang/prometheus"
    40  	prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
    41  	"github.com/prometheus/prometheus/notifier"
    42  	"github.com/prometheus/prometheus/pkg/labels"
    43  	"github.com/prometheus/prometheus/pkg/rulefmt"
    44  	"github.com/prometheus/prometheus/promql"
    45  	promRules "github.com/prometheus/prometheus/rules"
    46  	"github.com/prometheus/prometheus/storage"
    47  	"github.com/stretchr/testify/assert"
    48  	"github.com/stretchr/testify/require"
    49  	"github.com/weaveworks/common/user"
    50  	"gopkg.in/yaml.v2"
    51  
    52  	"github.com/cortexproject/cortex/pkg/chunk"
    53  	"github.com/cortexproject/cortex/pkg/cortexpb"
    54  	"github.com/cortexproject/cortex/pkg/ruler/rulespb"
    55  	"github.com/cortexproject/cortex/pkg/ruler/rulestore"
    56  	"github.com/cortexproject/cortex/pkg/ruler/rulestore/objectclient"
    57  	"github.com/cortexproject/cortex/pkg/tenant"
    58  	"github.com/cortexproject/cortex/pkg/util"
    59  )
    60  
    61  func defaultRulerConfig(t testing.TB, store rulestore.RuleStore) (Config, func()) {
    62  	t.Helper()
    63  
    64  	// Create a new temporary directory for the rules, so that
    65  	// each test will run in isolation.
    66  	rulesDir, _ := ioutil.TempDir("/tmp", "ruler-tests")
    67  
    68  	codec := ring.GetCodec()
    69  	consul, closer := consul.NewInMemoryClient(codec, log.NewNopLogger(), nil)
    70  	t.Cleanup(func() { assert.NoError(t, closer.Close()) })
    71  
    72  	cfg := Config{}
    73  	flagext.DefaultValues(&cfg)
    74  	cfg.RulePath = rulesDir
    75  	cfg.StoreConfig.mock = store
    76  	cfg.Ring.KVStore.Mock = consul
    77  	cfg.Ring.NumTokens = 1
    78  	cfg.Ring.ListenPort = 0
    79  	cfg.Ring.InstanceAddr = "localhost"
    80  	cfg.Ring.InstanceID = "localhost"
    81  	cfg.EnableQueryStats = false
    82  
    83  	// Create a cleanup function that will be called at the end of the test
    84  	cleanup := func() {
    85  		defer os.RemoveAll(rulesDir)
    86  	}
    87  
    88  	return cfg, cleanup
    89  }
    90  
    91  type ruleLimits struct {
    92  	evalDelay            time.Duration
    93  	tenantShard          int
    94  	maxRulesPerRuleGroup int
    95  	maxRuleGroups        int
    96  }
    97  
    98  func (r ruleLimits) EvaluationDelay(_ string) time.Duration {
    99  	return r.evalDelay
   100  }
   101  
   102  func (r ruleLimits) RulerTenantShardSize(_ string) int {
   103  	return r.tenantShard
   104  }
   105  
   106  func (r ruleLimits) RulerMaxRuleGroupsPerTenant(_ string) int {
   107  	return r.maxRuleGroups
   108  }
   109  
   110  func (r ruleLimits) RulerMaxRulesPerRuleGroup(_ string) int {
   111  	return r.maxRulesPerRuleGroup
   112  }
   113  
   114  type emptyChunkStore struct {
   115  	sync.Mutex
   116  	called bool
   117  }
   118  
   119  func (c *emptyChunkStore) Get(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]chunk.Chunk, error) {
   120  	c.Lock()
   121  	defer c.Unlock()
   122  	c.called = true
   123  	return nil, nil
   124  }
   125  
   126  func (c *emptyChunkStore) IsCalled() bool {
   127  	c.Lock()
   128  	defer c.Unlock()
   129  	return c.called
   130  }
   131  
   132  func testQueryableFunc(querierTestConfig *querier.TestConfig, reg prometheus.Registerer, logger log.Logger) storage.QueryableFunc {
   133  	if querierTestConfig != nil {
   134  		// disable active query tracking for test
   135  		querierTestConfig.Cfg.ActiveQueryTrackerDir = ""
   136  
   137  		overrides, _ := validation.NewOverrides(querier.DefaultLimitsConfig(), nil)
   138  		q, _, _ := querier.New(querierTestConfig.Cfg, overrides, querierTestConfig.Distributor, querierTestConfig.Stores, purger.NewTombstonesLoader(nil, nil), reg, logger)
   139  		return func(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
   140  			return q.Querier(ctx, mint, maxt)
   141  		}
   142  	}
   143  
   144  	return func(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
   145  		return storage.NoopQuerier(), nil
   146  	}
   147  }
   148  
   149  func testSetup(t *testing.T, querierTestConfig *querier.TestConfig) (*promql.Engine, storage.QueryableFunc, Pusher, log.Logger, RulesLimits, prometheus.Registerer, func()) {
   150  	dir, err := ioutil.TempDir("", filepath.Base(t.Name()))
   151  	assert.NoError(t, err)
   152  	cleanup := func() {
   153  		os.RemoveAll(dir)
   154  	}
   155  
   156  	tracker := promql.NewActiveQueryTracker(dir, 20, log.NewNopLogger())
   157  
   158  	engine := promql.NewEngine(promql.EngineOpts{
   159  		MaxSamples:         1e6,
   160  		ActiveQueryTracker: tracker,
   161  		Timeout:            2 * time.Minute,
   162  	})
   163  
   164  	// Mock the pusher
   165  	pusher := newPusherMock()
   166  	pusher.MockPush(&cortexpb.WriteResponse{}, nil)
   167  
   168  	l := log.NewLogfmtLogger(os.Stdout)
   169  	l = level.NewFilter(l, level.AllowInfo())
   170  
   171  	reg := prometheus.NewRegistry()
   172  	queryable := testQueryableFunc(querierTestConfig, reg, l)
   173  
   174  	return engine, queryable, pusher, l, ruleLimits{evalDelay: 0, maxRuleGroups: 20, maxRulesPerRuleGroup: 15}, reg, cleanup
   175  }
   176  
   177  func newManager(t *testing.T, cfg Config) (*DefaultMultiTenantManager, func()) {
   178  	engine, queryable, pusher, logger, overrides, reg, cleanup := testSetup(t, nil)
   179  	manager, err := NewDefaultMultiTenantManager(cfg, DefaultTenantManagerFactory(cfg, pusher, queryable, engine, overrides, nil), reg, logger)
   180  	require.NoError(t, err)
   181  
   182  	return manager, cleanup
   183  }
   184  
   185  type mockRulerClientsPool struct {
   186  	ClientsPool
   187  	cfg           Config
   188  	rulerAddrMap  map[string]*Ruler
   189  	numberOfCalls atomic.Int32
   190  }
   191  
   192  type mockRulerClient struct {
   193  	ruler         *Ruler
   194  	numberOfCalls *atomic.Int32
   195  }
   196  
   197  func (c *mockRulerClient) Rules(ctx context.Context, in *RulesRequest, _ ...grpc.CallOption) (*RulesResponse, error) {
   198  	c.numberOfCalls.Inc()
   199  	return c.ruler.Rules(ctx, in)
   200  }
   201  
   202  func (p *mockRulerClientsPool) GetClientFor(addr string) (RulerClient, error) {
   203  	for _, r := range p.rulerAddrMap {
   204  		if r.lifecycler.GetInstanceAddr() == addr {
   205  			return &mockRulerClient{
   206  				ruler:         r,
   207  				numberOfCalls: &p.numberOfCalls,
   208  			}, nil
   209  		}
   210  	}
   211  
   212  	return nil, fmt.Errorf("unable to find ruler for add %s", addr)
   213  }
   214  
   215  func newMockClientsPool(cfg Config, logger log.Logger, reg prometheus.Registerer, rulerAddrMap map[string]*Ruler) *mockRulerClientsPool {
   216  	return &mockRulerClientsPool{
   217  		ClientsPool:  newRulerClientPool(cfg.ClientTLSConfig, logger, reg),
   218  		cfg:          cfg,
   219  		rulerAddrMap: rulerAddrMap,
   220  	}
   221  }
   222  
   223  func buildRuler(t *testing.T, rulerConfig Config, querierTestConfig *querier.TestConfig, rulerAddrMap map[string]*Ruler) (*Ruler, func()) {
   224  	engine, queryable, pusher, logger, overrides, reg, cleanup := testSetup(t, querierTestConfig)
   225  	storage, err := NewLegacyRuleStore(rulerConfig.StoreConfig, promRules.FileLoader{}, log.NewNopLogger())
   226  	require.NoError(t, err)
   227  
   228  	managerFactory := DefaultTenantManagerFactory(rulerConfig, pusher, queryable, engine, overrides, reg)
   229  	manager, err := NewDefaultMultiTenantManager(rulerConfig, managerFactory, reg, log.NewNopLogger())
   230  	require.NoError(t, err)
   231  
   232  	ruler, err := newRuler(
   233  		rulerConfig,
   234  		manager,
   235  		reg,
   236  		logger,
   237  		storage,
   238  		overrides,
   239  		newMockClientsPool(rulerConfig, logger, reg, rulerAddrMap),
   240  	)
   241  	require.NoError(t, err)
   242  	return ruler, cleanup
   243  }
   244  
   245  func newTestRuler(t *testing.T, rulerConfig Config, querierTestConfig *querier.TestConfig) (*Ruler, func()) {
   246  	ruler, cleanup := buildRuler(t, rulerConfig, querierTestConfig, nil)
   247  	require.NoError(t, services.StartAndAwaitRunning(context.Background(), ruler))
   248  
   249  	// Ensure all rules are loaded before usage
   250  	ruler.syncRules(context.Background(), rulerSyncReasonInitial)
   251  
   252  	return ruler, cleanup
   253  }
   254  
   255  var _ MultiTenantManager = &DefaultMultiTenantManager{}
   256  
   257  func TestNotifierSendsUserIDHeader(t *testing.T) {
   258  	var wg sync.WaitGroup
   259  
   260  	// We do expect 1 API call for the user create with the getOrCreateNotifier()
   261  	wg.Add(1)
   262  	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
   263  		userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r)
   264  		assert.NoError(t, err)
   265  		assert.Equal(t, userID, "1")
   266  		wg.Done()
   267  	}))
   268  	defer ts.Close()
   269  
   270  	// We create an empty rule store so that the ruler will not load any rule from it.
   271  	cfg, cleanup := defaultRulerConfig(t, newMockRuleStore(nil))
   272  	defer cleanup()
   273  
   274  	cfg.AlertmanagerURL = ts.URL
   275  	cfg.AlertmanagerDiscovery = false
   276  
   277  	manager, rcleanup := newManager(t, cfg)
   278  	defer rcleanup()
   279  	defer manager.Stop()
   280  
   281  	n, err := manager.getOrCreateNotifier("1")
   282  	require.NoError(t, err)
   283  
   284  	// Loop until notifier discovery syncs up
   285  	for len(n.Alertmanagers()) == 0 {
   286  		time.Sleep(10 * time.Millisecond)
   287  	}
   288  	n.Send(&notifier.Alert{
   289  		Labels: labels.Labels{labels.Label{Name: "alertname", Value: "testalert"}},
   290  	})
   291  
   292  	wg.Wait()
   293  
   294  	// Ensure we have metrics in the notifier.
   295  	assert.NoError(t, prom_testutil.GatherAndCompare(manager.registry.(*prometheus.Registry), strings.NewReader(`
   296  		# HELP cortex_prometheus_notifications_dropped_total Total number of alerts dropped due to errors when sending to Alertmanager.
   297  		# TYPE cortex_prometheus_notifications_dropped_total counter
   298  		cortex_prometheus_notifications_dropped_total{user="1"} 0
   299  	`), "cortex_prometheus_notifications_dropped_total"))
   300  }
   301  
   302  func TestRuler_Rules(t *testing.T) {
   303  	cfg, cleanup := defaultRulerConfig(t, newMockRuleStore(mockRules))
   304  	defer cleanup()
   305  
   306  	r, rcleanup := newTestRuler(t, cfg, nil)
   307  	defer rcleanup()
   308  	defer services.StopAndAwaitTerminated(context.Background(), r) //nolint:errcheck
   309  
   310  	// test user1
   311  	ctx := user.InjectOrgID(context.Background(), "user1")
   312  	rls, err := r.Rules(ctx, &RulesRequest{})
   313  	require.NoError(t, err)
   314  	require.Len(t, rls.Groups, 1)
   315  	rg := rls.Groups[0]
   316  	expectedRg := mockRules["user1"][0]
   317  	compareRuleGroupDescToStateDesc(t, expectedRg, rg)
   318  
   319  	// test user2
   320  	ctx = user.InjectOrgID(context.Background(), "user2")
   321  	rls, err = r.Rules(ctx, &RulesRequest{})
   322  	require.NoError(t, err)
   323  	require.Len(t, rls.Groups, 1)
   324  	rg = rls.Groups[0]
   325  	expectedRg = mockRules["user2"][0]
   326  	compareRuleGroupDescToStateDesc(t, expectedRg, rg)
   327  }
   328  
   329  func compareRuleGroupDescToStateDesc(t *testing.T, expected *rulespb.RuleGroupDesc, got *GroupStateDesc) {
   330  	require.Equal(t, got.Group.Name, expected.Name)
   331  	require.Equal(t, got.Group.Namespace, expected.Namespace)
   332  	require.Len(t, expected.Rules, len(got.ActiveRules))
   333  	for i := range got.ActiveRules {
   334  		require.Equal(t, expected.Rules[i].Record, got.ActiveRules[i].Rule.Record)
   335  		require.Equal(t, expected.Rules[i].Alert, got.ActiveRules[i].Rule.Alert)
   336  	}
   337  }
   338  
   339  func TestGetRules(t *testing.T) {
   340  	// ruler ID -> (user ID -> list of groups).
   341  	type expectedRulesMap map[string]map[string]rulespb.RuleGroupList
   342  
   343  	type testCase struct {
   344  		sharding         bool
   345  		shardingStrategy string
   346  		shuffleShardSize int
   347  	}
   348  
   349  	expectedRules := expectedRulesMap{
   350  		"ruler1": map[string]rulespb.RuleGroupList{
   351  			"user1": {
   352  				&rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "first", Interval: 10 * time.Second},
   353  				&rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "second", Interval: 10 * time.Second},
   354  			},
   355  			"user2": {
   356  				&rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "third", Interval: 10 * time.Second},
   357  			},
   358  		},
   359  		"ruler2": map[string]rulespb.RuleGroupList{
   360  			"user1": {
   361  				&rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "third", Interval: 10 * time.Second},
   362  			},
   363  			"user2": {
   364  				&rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "first", Interval: 10 * time.Second},
   365  				&rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "second", Interval: 10 * time.Second},
   366  			},
   367  		},
   368  		"ruler3": map[string]rulespb.RuleGroupList{
   369  			"user3": {
   370  				&rulespb.RuleGroupDesc{User: "user3", Namespace: "namespace", Name: "third", Interval: 10 * time.Second},
   371  			},
   372  			"user2": {
   373  				&rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "forth", Interval: 10 * time.Second},
   374  				&rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "fifty", Interval: 10 * time.Second},
   375  			},
   376  		},
   377  	}
   378  
   379  	testCases := map[string]testCase{
   380  		"No Sharding": {
   381  			sharding: false,
   382  		},
   383  		"Default Sharding": {
   384  			sharding:         true,
   385  			shardingStrategy: util.ShardingStrategyDefault,
   386  		},
   387  		"Shuffle Sharding and ShardSize = 2": {
   388  			sharding:         true,
   389  			shuffleShardSize: 2,
   390  			shardingStrategy: util.ShardingStrategyShuffle,
   391  		},
   392  	}
   393  
   394  	for name, tc := range testCases {
   395  		t.Run(name, func(t *testing.T) {
   396  			kvStore, cleanUp := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil)
   397  			t.Cleanup(func() { assert.NoError(t, cleanUp.Close()) })
   398  			allRulesByUser := map[string]rulespb.RuleGroupList{}
   399  			allRulesByRuler := map[string]rulespb.RuleGroupList{}
   400  			allTokensByRuler := map[string][]uint32{}
   401  			rulerAddrMap := map[string]*Ruler{}
   402  
   403  			createRuler := func(id string) *Ruler {
   404  				cfg, cleanUp := defaultRulerConfig(t, newMockRuleStore(allRulesByUser))
   405  				t.Cleanup(cleanUp)
   406  
   407  				cfg.ShardingStrategy = tc.shardingStrategy
   408  				cfg.EnableSharding = tc.sharding
   409  
   410  				cfg.Ring = RingConfig{
   411  					InstanceID:   id,
   412  					InstanceAddr: id,
   413  					KVStore: kv.Config{
   414  						Mock: kvStore,
   415  					},
   416  				}
   417  
   418  				r, cleanUp := buildRuler(t, cfg, nil, rulerAddrMap)
   419  				r.limits = ruleLimits{evalDelay: 0, tenantShard: tc.shuffleShardSize}
   420  				t.Cleanup(cleanUp)
   421  				rulerAddrMap[id] = r
   422  				if r.ring != nil {
   423  					require.NoError(t, services.StartAndAwaitRunning(context.Background(), r.ring))
   424  					t.Cleanup(r.ring.StopAsync)
   425  				}
   426  				return r
   427  			}
   428  
   429  			for rID, r := range expectedRules {
   430  				createRuler(rID)
   431  				for user, rules := range r {
   432  					allRulesByUser[user] = append(allRulesByUser[user], rules...)
   433  					allRulesByRuler[rID] = append(allRulesByRuler[rID], rules...)
   434  					allTokensByRuler[rID] = generateTokenForGroups(rules, 1)
   435  				}
   436  			}
   437  
   438  			if tc.sharding {
   439  				err := kvStore.CAS(context.Background(), ring.RulerRingKey, func(in interface{}) (out interface{}, retry bool, err error) {
   440  					d, _ := in.(*ring.Desc)
   441  					if d == nil {
   442  						d = ring.NewDesc()
   443  					}
   444  					for rID, tokens := range allTokensByRuler {
   445  						d.AddIngester(rID, rulerAddrMap[rID].lifecycler.GetInstanceAddr(), "", tokens, ring.ACTIVE, time.Now())
   446  					}
   447  					return d, true, nil
   448  				})
   449  				require.NoError(t, err)
   450  				// Wait a bit to make sure ruler's ring is updated.
   451  				time.Sleep(100 * time.Millisecond)
   452  			}
   453  
   454  			forEachRuler := func(f func(rID string, r *Ruler)) {
   455  				for rID, r := range rulerAddrMap {
   456  					f(rID, r)
   457  				}
   458  			}
   459  
   460  			// Sync Rules
   461  			forEachRuler(func(_ string, r *Ruler) {
   462  				r.syncRules(context.Background(), rulerSyncReasonInitial)
   463  			})
   464  
   465  			for u := range allRulesByUser {
   466  				ctx := user.InjectOrgID(context.Background(), u)
   467  				forEachRuler(func(_ string, r *Ruler) {
   468  					rules, err := r.GetRules(ctx)
   469  					require.NoError(t, err)
   470  					require.Equal(t, len(allRulesByUser[u]), len(rules))
   471  					if tc.sharding {
   472  						mockPoolClient := r.clientsPool.(*mockRulerClientsPool)
   473  
   474  						if tc.shardingStrategy == util.ShardingStrategyShuffle {
   475  							require.Equal(t, int32(tc.shuffleShardSize), mockPoolClient.numberOfCalls.Load())
   476  						} else {
   477  							require.Equal(t, int32(len(rulerAddrMap)), mockPoolClient.numberOfCalls.Load())
   478  						}
   479  						mockPoolClient.numberOfCalls.Store(0)
   480  					}
   481  				})
   482  			}
   483  
   484  			totalLoadedRules := 0
   485  			totalConfiguredRules := 0
   486  
   487  			forEachRuler(func(rID string, r *Ruler) {
   488  				localRules, err := r.listRules(context.Background())
   489  				require.NoError(t, err)
   490  				for _, rules := range localRules {
   491  					totalLoadedRules += len(rules)
   492  				}
   493  				totalConfiguredRules += len(allRulesByRuler[rID])
   494  			})
   495  
   496  			if tc.sharding {
   497  				require.Equal(t, totalConfiguredRules, totalLoadedRules)
   498  			} else {
   499  				// Not sharding means that all rules will be loaded on all rulers
   500  				numberOfRulers := len(rulerAddrMap)
   501  				require.Equal(t, totalConfiguredRules*numberOfRulers, totalLoadedRules)
   502  			}
   503  		})
   504  	}
   505  }
   506  
   507  func TestSharding(t *testing.T) {
   508  	const (
   509  		user1 = "user1"
   510  		user2 = "user2"
   511  		user3 = "user3"
   512  	)
   513  
   514  	user1Group1 := &rulespb.RuleGroupDesc{User: user1, Namespace: "namespace", Name: "first"}
   515  	user1Group2 := &rulespb.RuleGroupDesc{User: user1, Namespace: "namespace", Name: "second"}
   516  	user2Group1 := &rulespb.RuleGroupDesc{User: user2, Namespace: "namespace", Name: "first"}
   517  	user3Group1 := &rulespb.RuleGroupDesc{User: user3, Namespace: "namespace", Name: "first"}
   518  
   519  	// Must be distinct for test to work.
   520  	user1Group1Token := tokenForGroup(user1Group1)
   521  	user1Group2Token := tokenForGroup(user1Group2)
   522  	user2Group1Token := tokenForGroup(user2Group1)
   523  	user3Group1Token := tokenForGroup(user3Group1)
   524  
   525  	noRules := map[string]rulespb.RuleGroupList{}
   526  	allRules := map[string]rulespb.RuleGroupList{
   527  		user1: {user1Group1, user1Group2},
   528  		user2: {user2Group1},
   529  		user3: {user3Group1},
   530  	}
   531  
   532  	// ruler ID -> (user ID -> list of groups).
   533  	type expectedRulesMap map[string]map[string]rulespb.RuleGroupList
   534  
   535  	type testCase struct {
   536  		sharding         bool
   537  		shardingStrategy string
   538  		shuffleShardSize int
   539  		setupRing        func(*ring.Desc)
   540  		enabledUsers     []string
   541  		disabledUsers    []string
   542  
   543  		expectedRules expectedRulesMap
   544  	}
   545  
   546  	const (
   547  		ruler1     = "ruler-1"
   548  		ruler1Host = "1.1.1.1"
   549  		ruler1Port = 9999
   550  		ruler1Addr = "1.1.1.1:9999"
   551  
   552  		ruler2     = "ruler-2"
   553  		ruler2Host = "2.2.2.2"
   554  		ruler2Port = 9999
   555  		ruler2Addr = "2.2.2.2:9999"
   556  
   557  		ruler3     = "ruler-3"
   558  		ruler3Host = "3.3.3.3"
   559  		ruler3Port = 9999
   560  		ruler3Addr = "3.3.3.3:9999"
   561  	)
   562  
   563  	testCases := map[string]testCase{
   564  		"no sharding": {
   565  			sharding:      false,
   566  			expectedRules: expectedRulesMap{ruler1: allRules},
   567  		},
   568  
   569  		"no sharding, single user allowed": {
   570  			sharding:     false,
   571  			enabledUsers: []string{user1},
   572  			expectedRules: expectedRulesMap{ruler1: map[string]rulespb.RuleGroupList{
   573  				user1: {user1Group1, user1Group2},
   574  			}},
   575  		},
   576  
   577  		"no sharding, single user disabled": {
   578  			sharding:      false,
   579  			disabledUsers: []string{user1},
   580  			expectedRules: expectedRulesMap{ruler1: map[string]rulespb.RuleGroupList{
   581  				user2: {user2Group1},
   582  				user3: {user3Group1},
   583  			}},
   584  		},
   585  
   586  		"default sharding, single ruler": {
   587  			sharding:         true,
   588  			shardingStrategy: util.ShardingStrategyDefault,
   589  			setupRing: func(desc *ring.Desc) {
   590  				desc.AddIngester(ruler1, ruler1Addr, "", []uint32{0}, ring.ACTIVE, time.Now())
   591  			},
   592  			expectedRules: expectedRulesMap{ruler1: allRules},
   593  		},
   594  
   595  		"default sharding, single ruler, single enabled user": {
   596  			sharding:         true,
   597  			shardingStrategy: util.ShardingStrategyDefault,
   598  			enabledUsers:     []string{user1},
   599  			setupRing: func(desc *ring.Desc) {
   600  				desc.AddIngester(ruler1, ruler1Addr, "", []uint32{0}, ring.ACTIVE, time.Now())
   601  			},
   602  			expectedRules: expectedRulesMap{ruler1: map[string]rulespb.RuleGroupList{
   603  				user1: {user1Group1, user1Group2},
   604  			}},
   605  		},
   606  
   607  		"default sharding, single ruler, single disabled user": {
   608  			sharding:         true,
   609  			shardingStrategy: util.ShardingStrategyDefault,
   610  			disabledUsers:    []string{user1},
   611  			setupRing: func(desc *ring.Desc) {
   612  				desc.AddIngester(ruler1, ruler1Addr, "", []uint32{0}, ring.ACTIVE, time.Now())
   613  			},
   614  			expectedRules: expectedRulesMap{ruler1: map[string]rulespb.RuleGroupList{
   615  				user2: {user2Group1},
   616  				user3: {user3Group1},
   617  			}},
   618  		},
   619  
   620  		"default sharding, multiple ACTIVE rulers": {
   621  			sharding:         true,
   622  			shardingStrategy: util.ShardingStrategyDefault,
   623  			setupRing: func(desc *ring.Desc) {
   624  				desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Token + 1, user2Group1Token + 1}), ring.ACTIVE, time.Now())
   625  				desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now())
   626  			},
   627  
   628  			expectedRules: expectedRulesMap{
   629  				ruler1: map[string]rulespb.RuleGroupList{
   630  					user1: {user1Group1},
   631  					user2: {user2Group1},
   632  				},
   633  
   634  				ruler2: map[string]rulespb.RuleGroupList{
   635  					user1: {user1Group2},
   636  					user3: {user3Group1},
   637  				},
   638  			},
   639  		},
   640  
   641  		"default sharding, multiple ACTIVE rulers, single enabled user": {
   642  			sharding:         true,
   643  			shardingStrategy: util.ShardingStrategyDefault,
   644  			enabledUsers:     []string{user1},
   645  			setupRing: func(desc *ring.Desc) {
   646  				desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Token + 1, user2Group1Token + 1}), ring.ACTIVE, time.Now())
   647  				desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now())
   648  			},
   649  
   650  			expectedRules: expectedRulesMap{
   651  				ruler1: map[string]rulespb.RuleGroupList{
   652  					user1: {user1Group1},
   653  				},
   654  
   655  				ruler2: map[string]rulespb.RuleGroupList{
   656  					user1: {user1Group2},
   657  				},
   658  			},
   659  		},
   660  
   661  		"default sharding, multiple ACTIVE rulers, single disabled user": {
   662  			sharding:         true,
   663  			shardingStrategy: util.ShardingStrategyDefault,
   664  			disabledUsers:    []string{user1},
   665  			setupRing: func(desc *ring.Desc) {
   666  				desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Token + 1, user2Group1Token + 1}), ring.ACTIVE, time.Now())
   667  				desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now())
   668  			},
   669  
   670  			expectedRules: expectedRulesMap{
   671  				ruler1: map[string]rulespb.RuleGroupList{
   672  					user2: {user2Group1},
   673  				},
   674  
   675  				ruler2: map[string]rulespb.RuleGroupList{
   676  					user3: {user3Group1},
   677  				},
   678  			},
   679  		},
   680  
   681  		"default sharding, unhealthy ACTIVE ruler": {
   682  			sharding:         true,
   683  			shardingStrategy: util.ShardingStrategyDefault,
   684  
   685  			setupRing: func(desc *ring.Desc) {
   686  				desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Token + 1, user2Group1Token + 1}), ring.ACTIVE, time.Now())
   687  				desc.Ingesters[ruler2] = ring.InstanceDesc{
   688  					Addr:      ruler2Addr,
   689  					Timestamp: time.Now().Add(-time.Hour).Unix(),
   690  					State:     ring.ACTIVE,
   691  					Tokens:    sortTokens([]uint32{user1Group2Token + 1, user3Group1Token + 1}),
   692  				}
   693  			},
   694  
   695  			expectedRules: expectedRulesMap{
   696  				// This ruler doesn't get rules from unhealthy ruler (RF=1).
   697  				ruler1: map[string]rulespb.RuleGroupList{
   698  					user1: {user1Group1},
   699  					user2: {user2Group1},
   700  				},
   701  				ruler2: noRules,
   702  			},
   703  		},
   704  
   705  		"default sharding, LEAVING ruler": {
   706  			sharding:         true,
   707  			shardingStrategy: util.ShardingStrategyDefault,
   708  
   709  			setupRing: func(desc *ring.Desc) {
   710  				desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Token + 1, user2Group1Token + 1}), ring.LEAVING, time.Now())
   711  				desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now())
   712  			},
   713  
   714  			expectedRules: expectedRulesMap{
   715  				// LEAVING ruler doesn't get any rules.
   716  				ruler1: noRules,
   717  				ruler2: allRules,
   718  			},
   719  		},
   720  
   721  		"default sharding, JOINING ruler": {
   722  			sharding:         true,
   723  			shardingStrategy: util.ShardingStrategyDefault,
   724  
   725  			setupRing: func(desc *ring.Desc) {
   726  				desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{user1Group1Token + 1, user2Group1Token + 1}), ring.JOINING, time.Now())
   727  				desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group2Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now())
   728  			},
   729  
   730  			expectedRules: expectedRulesMap{
   731  				// JOINING ruler has no rules yet.
   732  				ruler1: noRules,
   733  				ruler2: allRules,
   734  			},
   735  		},
   736  
   737  		"shuffle sharding, single ruler": {
   738  			sharding:         true,
   739  			shardingStrategy: util.ShardingStrategyShuffle,
   740  
   741  			setupRing: func(desc *ring.Desc) {
   742  				desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{0}), ring.ACTIVE, time.Now())
   743  			},
   744  
   745  			expectedRules: expectedRulesMap{
   746  				ruler1: allRules,
   747  			},
   748  		},
   749  
   750  		"shuffle sharding, multiple rulers, shard size 1": {
   751  			sharding:         true,
   752  			shardingStrategy: util.ShardingStrategyShuffle,
   753  			shuffleShardSize: 1,
   754  
   755  			setupRing: func(desc *ring.Desc) {
   756  				desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 0) + 1, userToken(user3, 0) + 1}), ring.ACTIVE, time.Now())
   757  				desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group1Token + 1, user1Group2Token + 1, user2Group1Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now())
   758  			},
   759  
   760  			expectedRules: expectedRulesMap{
   761  				ruler1: allRules,
   762  				ruler2: noRules,
   763  			},
   764  		},
   765  
   766  		// Same test as previous one, but with shard size=2. Second ruler gets all the rules.
   767  		"shuffle sharding, two rulers, shard size 2": {
   768  			sharding:         true,
   769  			shardingStrategy: util.ShardingStrategyShuffle,
   770  			shuffleShardSize: 2,
   771  
   772  			setupRing: func(desc *ring.Desc) {
   773  				// Exact same tokens setup as previous test.
   774  				desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 0) + 1, userToken(user3, 0) + 1}), ring.ACTIVE, time.Now())
   775  				desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{user1Group1Token + 1, user1Group2Token + 1, user2Group1Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now())
   776  			},
   777  
   778  			expectedRules: expectedRulesMap{
   779  				ruler1: noRules,
   780  				ruler2: allRules,
   781  			},
   782  		},
   783  
   784  		"shuffle sharding, two rulers, shard size 1, distributed users": {
   785  			sharding:         true,
   786  			shardingStrategy: util.ShardingStrategyShuffle,
   787  			shuffleShardSize: 1,
   788  
   789  			setupRing: func(desc *ring.Desc) {
   790  				desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1}), ring.ACTIVE, time.Now())
   791  				desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1}), ring.ACTIVE, time.Now())
   792  			},
   793  
   794  			expectedRules: expectedRulesMap{
   795  				ruler1: map[string]rulespb.RuleGroupList{
   796  					user1: {user1Group1, user1Group2},
   797  				},
   798  				ruler2: map[string]rulespb.RuleGroupList{
   799  					user2: {user2Group1},
   800  					user3: {user3Group1},
   801  				},
   802  			},
   803  		},
   804  		"shuffle sharding, three rulers, shard size 2": {
   805  			sharding:         true,
   806  			shardingStrategy: util.ShardingStrategyShuffle,
   807  			shuffleShardSize: 2,
   808  
   809  			setupRing: func(desc *ring.Desc) {
   810  				desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, user1Group1Token + 1}), ring.ACTIVE, time.Now())
   811  				desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, user1Group2Token + 1, userToken(user2, 1) + 1, userToken(user3, 1) + 1}), ring.ACTIVE, time.Now())
   812  				desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user2Group1Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now())
   813  			},
   814  
   815  			expectedRules: expectedRulesMap{
   816  				ruler1: map[string]rulespb.RuleGroupList{
   817  					user1: {user1Group1},
   818  				},
   819  				ruler2: map[string]rulespb.RuleGroupList{
   820  					user1: {user1Group2},
   821  				},
   822  				ruler3: map[string]rulespb.RuleGroupList{
   823  					user2: {user2Group1},
   824  					user3: {user3Group1},
   825  				},
   826  			},
   827  		},
   828  		"shuffle sharding, three rulers, shard size 2, ruler2 has no users": {
   829  			sharding:         true,
   830  			shardingStrategy: util.ShardingStrategyShuffle,
   831  			shuffleShardSize: 2,
   832  
   833  			setupRing: func(desc *ring.Desc) {
   834  				desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, userToken(user2, 1) + 1, user1Group1Token + 1, user1Group2Token + 1}), ring.ACTIVE, time.Now())
   835  				desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, userToken(user3, 1) + 1, user2Group1Token + 1}), ring.ACTIVE, time.Now())
   836  				desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now())
   837  			},
   838  
   839  			expectedRules: expectedRulesMap{
   840  				ruler1: map[string]rulespb.RuleGroupList{
   841  					user1: {user1Group1, user1Group2},
   842  				},
   843  				ruler2: noRules, // Ruler2 owns token for user2group1, but user-2 will only be handled by ruler-1 and 3.
   844  				ruler3: map[string]rulespb.RuleGroupList{
   845  					user2: {user2Group1},
   846  					user3: {user3Group1},
   847  				},
   848  			},
   849  		},
   850  
   851  		"shuffle sharding, three rulers, shard size 2, single enabled user": {
   852  			sharding:         true,
   853  			shardingStrategy: util.ShardingStrategyShuffle,
   854  			shuffleShardSize: 2,
   855  			enabledUsers:     []string{user1},
   856  
   857  			setupRing: func(desc *ring.Desc) {
   858  				desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, user1Group1Token + 1}), ring.ACTIVE, time.Now())
   859  				desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, user1Group2Token + 1, userToken(user2, 1) + 1, userToken(user3, 1) + 1}), ring.ACTIVE, time.Now())
   860  				desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user2Group1Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now())
   861  			},
   862  
   863  			expectedRules: expectedRulesMap{
   864  				ruler1: map[string]rulespb.RuleGroupList{
   865  					user1: {user1Group1},
   866  				},
   867  				ruler2: map[string]rulespb.RuleGroupList{
   868  					user1: {user1Group2},
   869  				},
   870  				ruler3: map[string]rulespb.RuleGroupList{},
   871  			},
   872  		},
   873  
   874  		"shuffle sharding, three rulers, shard size 2, single disabled user": {
   875  			sharding:         true,
   876  			shardingStrategy: util.ShardingStrategyShuffle,
   877  			shuffleShardSize: 2,
   878  			disabledUsers:    []string{user1},
   879  
   880  			setupRing: func(desc *ring.Desc) {
   881  				desc.AddIngester(ruler1, ruler1Addr, "", sortTokens([]uint32{userToken(user1, 0) + 1, user1Group1Token + 1}), ring.ACTIVE, time.Now())
   882  				desc.AddIngester(ruler2, ruler2Addr, "", sortTokens([]uint32{userToken(user1, 1) + 1, user1Group2Token + 1, userToken(user2, 1) + 1, userToken(user3, 1) + 1}), ring.ACTIVE, time.Now())
   883  				desc.AddIngester(ruler3, ruler3Addr, "", sortTokens([]uint32{userToken(user2, 0) + 1, userToken(user3, 0) + 1, user2Group1Token + 1, user3Group1Token + 1}), ring.ACTIVE, time.Now())
   884  			},
   885  
   886  			expectedRules: expectedRulesMap{
   887  				ruler1: map[string]rulespb.RuleGroupList{},
   888  				ruler2: map[string]rulespb.RuleGroupList{},
   889  				ruler3: map[string]rulespb.RuleGroupList{
   890  					user2: {user2Group1},
   891  					user3: {user3Group1},
   892  				},
   893  			},
   894  		},
   895  	}
   896  
   897  	for name, tc := range testCases {
   898  		t.Run(name, func(t *testing.T) {
   899  			kvStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil)
   900  			t.Cleanup(func() { assert.NoError(t, closer.Close()) })
   901  
   902  			setupRuler := func(id string, host string, port int, forceRing *ring.Ring) *Ruler {
   903  				cfg := Config{
   904  					StoreConfig:      RuleStoreConfig{mock: newMockRuleStore(allRules)},
   905  					EnableSharding:   tc.sharding,
   906  					ShardingStrategy: tc.shardingStrategy,
   907  					Ring: RingConfig{
   908  						InstanceID:   id,
   909  						InstanceAddr: host,
   910  						InstancePort: port,
   911  						KVStore: kv.Config{
   912  							Mock: kvStore,
   913  						},
   914  						HeartbeatTimeout: 1 * time.Minute,
   915  					},
   916  					FlushCheckPeriod: 0,
   917  					EnabledTenants:   tc.enabledUsers,
   918  					DisabledTenants:  tc.disabledUsers,
   919  				}
   920  
   921  				r, cleanup := buildRuler(t, cfg, nil, nil)
   922  				r.limits = ruleLimits{evalDelay: 0, tenantShard: tc.shuffleShardSize}
   923  				t.Cleanup(cleanup)
   924  
   925  				if forceRing != nil {
   926  					r.ring = forceRing
   927  				}
   928  				return r
   929  			}
   930  
   931  			r1 := setupRuler(ruler1, ruler1Host, ruler1Port, nil)
   932  
   933  			rulerRing := r1.ring
   934  
   935  			// We start ruler's ring, but nothing else (not even lifecycler).
   936  			if rulerRing != nil {
   937  				require.NoError(t, services.StartAndAwaitRunning(context.Background(), rulerRing))
   938  				t.Cleanup(rulerRing.StopAsync)
   939  			}
   940  
   941  			var r2, r3 *Ruler
   942  			if rulerRing != nil {
   943  				// Reuse ring from r1.
   944  				r2 = setupRuler(ruler2, ruler2Host, ruler2Port, rulerRing)
   945  				r3 = setupRuler(ruler3, ruler3Host, ruler3Port, rulerRing)
   946  			}
   947  
   948  			if tc.setupRing != nil {
   949  				err := kvStore.CAS(context.Background(), ring.RulerRingKey, func(in interface{}) (out interface{}, retry bool, err error) {
   950  					d, _ := in.(*ring.Desc)
   951  					if d == nil {
   952  						d = ring.NewDesc()
   953  					}
   954  
   955  					tc.setupRing(d)
   956  
   957  					return d, true, nil
   958  				})
   959  				require.NoError(t, err)
   960  				// Wait a bit to make sure ruler's ring is updated.
   961  				time.Sleep(100 * time.Millisecond)
   962  			}
   963  
   964  			// Always add ruler1 to expected rulers, even if there is no ring (no sharding).
   965  			loadedRules1, err := r1.listRules(context.Background())
   966  			require.NoError(t, err)
   967  
   968  			expected := expectedRulesMap{
   969  				ruler1: loadedRules1,
   970  			}
   971  
   972  			addToExpected := func(id string, r *Ruler) {
   973  				// Only expect rules from other rulers when using ring, and they are present in the ring.
   974  				if r != nil && rulerRing != nil && rulerRing.HasInstance(id) {
   975  					loaded, err := r.listRules(context.Background())
   976  					require.NoError(t, err)
   977  					// Normalize nil map to empty one.
   978  					if loaded == nil {
   979  						loaded = map[string]rulespb.RuleGroupList{}
   980  					}
   981  					expected[id] = loaded
   982  				}
   983  			}
   984  
   985  			addToExpected(ruler2, r2)
   986  			addToExpected(ruler3, r3)
   987  
   988  			require.Equal(t, tc.expectedRules, expected)
   989  		})
   990  	}
   991  }
   992  
   993  // User shuffle shard token.
   994  func userToken(user string, skip int) uint32 {
   995  	r := rand.New(rand.NewSource(util.ShuffleShardSeed(user, "")))
   996  
   997  	for ; skip > 0; skip-- {
   998  		_ = r.Uint32()
   999  	}
  1000  	return r.Uint32()
  1001  }
  1002  
  1003  func sortTokens(tokens []uint32) []uint32 {
  1004  	sort.Slice(tokens, func(i, j int) bool {
  1005  		return tokens[i] < tokens[j]
  1006  	})
  1007  	return tokens
  1008  }
  1009  
  1010  func TestDeleteTenantRuleGroups(t *testing.T) {
  1011  	ruleGroups := []ruleGroupKey{
  1012  		{user: "userA", namespace: "namespace", group: "group"},
  1013  		{user: "userB", namespace: "namespace1", group: "group"},
  1014  		{user: "userB", namespace: "namespace2", group: "group"},
  1015  	}
  1016  
  1017  	obj, rs := setupRuleGroupsStore(t, ruleGroups)
  1018  	require.Equal(t, 3, obj.GetObjectCount())
  1019  
  1020  	api, err := NewRuler(Config{}, nil, nil, log.NewNopLogger(), rs, nil)
  1021  	require.NoError(t, err)
  1022  
  1023  	{
  1024  		req := &http.Request{}
  1025  		resp := httptest.NewRecorder()
  1026  		api.DeleteTenantConfiguration(resp, req)
  1027  
  1028  		require.Equal(t, http.StatusUnauthorized, resp.Code)
  1029  	}
  1030  
  1031  	{
  1032  		callDeleteTenantAPI(t, api, "user-with-no-rule-groups")
  1033  		require.Equal(t, 3, obj.GetObjectCount())
  1034  
  1035  		verifyExpectedDeletedRuleGroupsForUser(t, api, "user-with-no-rule-groups", true) // Has no rule groups
  1036  		verifyExpectedDeletedRuleGroupsForUser(t, api, "userA", false)
  1037  		verifyExpectedDeletedRuleGroupsForUser(t, api, "userB", false)
  1038  	}
  1039  
  1040  	{
  1041  		callDeleteTenantAPI(t, api, "userA")
  1042  		require.Equal(t, 2, obj.GetObjectCount())
  1043  
  1044  		verifyExpectedDeletedRuleGroupsForUser(t, api, "user-with-no-rule-groups", true) // Has no rule groups
  1045  		verifyExpectedDeletedRuleGroupsForUser(t, api, "userA", true)                    // Just deleted.
  1046  		verifyExpectedDeletedRuleGroupsForUser(t, api, "userB", false)
  1047  	}
  1048  
  1049  	// Deleting same user again works fine and reports no problems.
  1050  	{
  1051  		callDeleteTenantAPI(t, api, "userA")
  1052  		require.Equal(t, 2, obj.GetObjectCount())
  1053  
  1054  		verifyExpectedDeletedRuleGroupsForUser(t, api, "user-with-no-rule-groups", true) // Has no rule groups
  1055  		verifyExpectedDeletedRuleGroupsForUser(t, api, "userA", true)                    // Already deleted before.
  1056  		verifyExpectedDeletedRuleGroupsForUser(t, api, "userB", false)
  1057  	}
  1058  
  1059  	{
  1060  		callDeleteTenantAPI(t, api, "userB")
  1061  		require.Equal(t, 0, obj.GetObjectCount())
  1062  
  1063  		verifyExpectedDeletedRuleGroupsForUser(t, api, "user-with-no-rule-groups", true) // Has no rule groups
  1064  		verifyExpectedDeletedRuleGroupsForUser(t, api, "userA", true)                    // Deleted previously
  1065  		verifyExpectedDeletedRuleGroupsForUser(t, api, "userB", true)                    // Just deleted
  1066  	}
  1067  }
  1068  
  1069  func generateTokenForGroups(groups []*rulespb.RuleGroupDesc, offset uint32) []uint32 {
  1070  	var tokens []uint32
  1071  
  1072  	for _, g := range groups {
  1073  		tokens = append(tokens, tokenForGroup(g)+offset)
  1074  	}
  1075  
  1076  	return tokens
  1077  }
  1078  
  1079  func callDeleteTenantAPI(t *testing.T, api *Ruler, userID string) {
  1080  	ctx := user.InjectOrgID(context.Background(), userID)
  1081  
  1082  	req := &http.Request{}
  1083  	resp := httptest.NewRecorder()
  1084  	api.DeleteTenantConfiguration(resp, req.WithContext(ctx))
  1085  
  1086  	require.Equal(t, http.StatusOK, resp.Code)
  1087  }
  1088  
  1089  func verifyExpectedDeletedRuleGroupsForUser(t *testing.T, r *Ruler, userID string, expectedDeleted bool) {
  1090  	list, err := r.store.ListRuleGroupsForUserAndNamespace(context.Background(), userID, "")
  1091  	require.NoError(t, err)
  1092  
  1093  	if expectedDeleted {
  1094  		require.Equal(t, 0, len(list))
  1095  	} else {
  1096  		require.NotEqual(t, 0, len(list))
  1097  	}
  1098  }
  1099  
  1100  func setupRuleGroupsStore(t *testing.T, ruleGroups []ruleGroupKey) (*chunk.MockStorage, rulestore.RuleStore) {
  1101  	obj := chunk.NewMockStorage()
  1102  	rs := objectclient.NewRuleStore(obj, 5, log.NewNopLogger())
  1103  
  1104  	// "upload" rule groups
  1105  	for _, key := range ruleGroups {
  1106  		desc := rulespb.ToProto(key.user, key.namespace, rulefmt.RuleGroup{Name: key.group})
  1107  		require.NoError(t, rs.SetRuleGroup(context.Background(), key.user, key.namespace, desc))
  1108  	}
  1109  
  1110  	return obj, rs
  1111  }
  1112  
  1113  type ruleGroupKey struct {
  1114  	user, namespace, group string
  1115  }
  1116  
  1117  func TestRuler_ListAllRules(t *testing.T) {
  1118  	cfg, cleanup := defaultRulerConfig(t, newMockRuleStore(mockRules))
  1119  	defer cleanup()
  1120  
  1121  	r, rcleanup := newTestRuler(t, cfg, nil)
  1122  	defer rcleanup()
  1123  	defer services.StopAndAwaitTerminated(context.Background(), r) //nolint:errcheck
  1124  
  1125  	router := mux.NewRouter()
  1126  	router.Path("/ruler/rule_groups").Methods(http.MethodGet).HandlerFunc(r.ListAllRules)
  1127  
  1128  	req := requestFor(t, http.MethodGet, "https://localhost:8080/ruler/rule_groups", nil, "")
  1129  	w := httptest.NewRecorder()
  1130  	router.ServeHTTP(w, req)
  1131  
  1132  	resp := w.Result()
  1133  	body, _ := ioutil.ReadAll(resp.Body)
  1134  
  1135  	// Check status code and header
  1136  	require.Equal(t, http.StatusOK, resp.StatusCode)
  1137  	require.Equal(t, "application/yaml", resp.Header.Get("Content-Type"))
  1138  
  1139  	gs := make(map[string]map[string][]rulefmt.RuleGroup) // user:namespace:[]rulefmt.RuleGroup
  1140  	for userID := range mockRules {
  1141  		gs[userID] = mockRules[userID].Formatted()
  1142  	}
  1143  	expectedResponse, err := yaml.Marshal(gs)
  1144  	require.NoError(t, err)
  1145  	require.YAMLEq(t, string(expectedResponse), string(body))
  1146  }
  1147  
  1148  type senderFunc func(alerts ...*notifier.Alert)
  1149  
  1150  func (s senderFunc) Send(alerts ...*notifier.Alert) {
  1151  	s(alerts...)
  1152  }
  1153  
  1154  func TestSendAlerts(t *testing.T) {
  1155  	testCases := []struct {
  1156  		in  []*promRules.Alert
  1157  		exp []*notifier.Alert
  1158  	}{
  1159  		{
  1160  			in: []*promRules.Alert{
  1161  				{
  1162  					Labels:      []labels.Label{{Name: "l1", Value: "v1"}},
  1163  					Annotations: []labels.Label{{Name: "a2", Value: "v2"}},
  1164  					ActiveAt:    time.Unix(1, 0),
  1165  					FiredAt:     time.Unix(2, 0),
  1166  					ValidUntil:  time.Unix(3, 0),
  1167  				},
  1168  			},
  1169  			exp: []*notifier.Alert{
  1170  				{
  1171  					Labels:       []labels.Label{{Name: "l1", Value: "v1"}},
  1172  					Annotations:  []labels.Label{{Name: "a2", Value: "v2"}},
  1173  					StartsAt:     time.Unix(2, 0),
  1174  					EndsAt:       time.Unix(3, 0),
  1175  					GeneratorURL: "http://localhost:9090/graph?g0.expr=up&g0.tab=1",
  1176  				},
  1177  			},
  1178  		},
  1179  		{
  1180  			in: []*promRules.Alert{
  1181  				{
  1182  					Labels:      []labels.Label{{Name: "l1", Value: "v1"}},
  1183  					Annotations: []labels.Label{{Name: "a2", Value: "v2"}},
  1184  					ActiveAt:    time.Unix(1, 0),
  1185  					FiredAt:     time.Unix(2, 0),
  1186  					ResolvedAt:  time.Unix(4, 0),
  1187  				},
  1188  			},
  1189  			exp: []*notifier.Alert{
  1190  				{
  1191  					Labels:       []labels.Label{{Name: "l1", Value: "v1"}},
  1192  					Annotations:  []labels.Label{{Name: "a2", Value: "v2"}},
  1193  					StartsAt:     time.Unix(2, 0),
  1194  					EndsAt:       time.Unix(4, 0),
  1195  					GeneratorURL: "http://localhost:9090/graph?g0.expr=up&g0.tab=1",
  1196  				},
  1197  			},
  1198  		},
  1199  		{
  1200  			in: []*promRules.Alert{},
  1201  		},
  1202  	}
  1203  
  1204  	for i, tc := range testCases {
  1205  		tc := tc
  1206  		t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
  1207  			senderFunc := senderFunc(func(alerts ...*notifier.Alert) {
  1208  				if len(tc.in) == 0 {
  1209  					t.Fatalf("sender called with 0 alert")
  1210  				}
  1211  				require.Equal(t, tc.exp, alerts)
  1212  			})
  1213  			SendAlerts(senderFunc, "http://localhost:9090")(context.TODO(), "up", tc.in...)
  1214  		})
  1215  	}
  1216  }
  1217  
  1218  // Tests for whether the Ruler is able to recover ALERTS_FOR_STATE state
  1219  func TestRecoverAlertsPostOutage(t *testing.T) {
  1220  	// Test Setup
  1221  	// alert FOR 30m, already ran for 10m, outage down at 15m prior to now(), outage tolerance set to 1hr
  1222  	// EXPECTATION: for state for alert restores to 10m+(now-15m)
  1223  
  1224  	// FIRST set up 1 Alert rule with 30m FOR duration
  1225  	alertForDuration, _ := time.ParseDuration("30m")
  1226  	mockRules := map[string]rulespb.RuleGroupList{
  1227  		"user1": {
  1228  			&rulespb.RuleGroupDesc{
  1229  				Name:      "group1",
  1230  				Namespace: "namespace1",
  1231  				User:      "user1",
  1232  				Rules: []*rulespb.RuleDesc{
  1233  					{
  1234  						Alert: "UP_ALERT",
  1235  						Expr:  "1", // always fire for this test
  1236  						For:   alertForDuration,
  1237  					},
  1238  				},
  1239  				Interval: interval,
  1240  			},
  1241  		},
  1242  	}
  1243  
  1244  	// NEXT, set up ruler config with outage tolerance = 1hr
  1245  	rulerCfg, cleanup := defaultRulerConfig(t, newMockRuleStore(mockRules))
  1246  	rulerCfg.OutageTolerance, _ = time.ParseDuration("1h")
  1247  	defer cleanup()
  1248  
  1249  	// NEXT, set up mock distributor containing sample,
  1250  	// metric: ALERTS_FOR_STATE{alertname="UP_ALERT"}, ts: time.now()-15m, value: time.now()-25m
  1251  	currentTime := time.Now().UTC()
  1252  	downAtTime := currentTime.Add(time.Minute * -15)
  1253  	downAtTimeMs := downAtTime.UnixNano() / int64(time.Millisecond)
  1254  	downAtActiveAtTime := currentTime.Add(time.Minute * -25)
  1255  	downAtActiveSec := downAtActiveAtTime.Unix()
  1256  	d := &querier.MockDistributor{}
  1257  	d.On("Query", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(
  1258  		model.Matrix{
  1259  			&model.SampleStream{
  1260  				Metric: model.Metric{
  1261  					labels.MetricName: "ALERTS_FOR_STATE",
  1262  					// user1's only alert rule
  1263  					labels.AlertName: model.LabelValue(mockRules["user1"][0].GetRules()[0].Alert),
  1264  				},
  1265  				Values: []model.SamplePair{{Timestamp: model.Time(downAtTimeMs), Value: model.SampleValue(downAtActiveSec)}},
  1266  			},
  1267  		},
  1268  		nil)
  1269  	d.On("MetricsForLabelMatchers", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Panic("This should not be called for the ruler use-cases.")
  1270  	querierConfig := querier.DefaultQuerierConfig()
  1271  	querierConfig.IngesterStreaming = false
  1272  
  1273  	// set up an empty store
  1274  	queryables := []querier.QueryableWithFilter{
  1275  		querier.UseAlwaysQueryable(querier.NewChunkStoreQueryable(querierConfig, &emptyChunkStore{})),
  1276  	}
  1277  
  1278  	// create a ruler but don't start it. instead, we'll evaluate the rule groups manually.
  1279  	r, rcleanup := buildRuler(t, rulerCfg, &querier.TestConfig{Cfg: querierConfig, Distributor: d, Stores: queryables}, nil)
  1280  	r.syncRules(context.Background(), rulerSyncReasonInitial)
  1281  	defer rcleanup()
  1282  
  1283  	// assert initial state of rule group
  1284  	ruleGroup := r.manager.GetRules("user1")[0]
  1285  	require.Equal(t, time.Time{}, ruleGroup.GetLastEvaluation())
  1286  	require.Equal(t, "group1", ruleGroup.Name())
  1287  	require.Equal(t, 1, len(ruleGroup.Rules()))
  1288  
  1289  	// assert initial state of rule within rule group
  1290  	alertRule := ruleGroup.Rules()[0]
  1291  	require.Equal(t, time.Time{}, alertRule.GetEvaluationTimestamp())
  1292  	require.Equal(t, "UP_ALERT", alertRule.Name())
  1293  	require.Equal(t, promRules.HealthUnknown, alertRule.Health())
  1294  
  1295  	// NEXT, evaluate the rule group the first time and assert
  1296  	ctx := user.InjectOrgID(context.Background(), "user1")
  1297  	ruleGroup.Eval(ctx, currentTime)
  1298  
  1299  	// since the eval is done at the current timestamp, the activeAt timestamp of alert should equal current timestamp
  1300  	require.Equal(t, "UP_ALERT", alertRule.Name())
  1301  	require.Equal(t, promRules.HealthGood, alertRule.Health())
  1302  
  1303  	activeMapRaw := reflect.ValueOf(alertRule).Elem().FieldByName("active")
  1304  	activeMapKeys := activeMapRaw.MapKeys()
  1305  	require.True(t, len(activeMapKeys) == 1)
  1306  
  1307  	activeAlertRuleRaw := activeMapRaw.MapIndex(activeMapKeys[0]).Elem()
  1308  	activeAtTimeRaw := activeAlertRuleRaw.FieldByName("ActiveAt")
  1309  
  1310  	require.Equal(t, promRules.StatePending, promRules.AlertState(activeAlertRuleRaw.FieldByName("State").Int()))
  1311  	require.Equal(t, reflect.NewAt(activeAtTimeRaw.Type(), unsafe.Pointer(activeAtTimeRaw.UnsafeAddr())).Elem().Interface().(time.Time), currentTime)
  1312  
  1313  	// NEXT, restore the FOR state and assert
  1314  	ruleGroup.RestoreForState(currentTime)
  1315  
  1316  	require.Equal(t, "UP_ALERT", alertRule.Name())
  1317  	require.Equal(t, promRules.HealthGood, alertRule.Health())
  1318  	require.Equal(t, promRules.StatePending, promRules.AlertState(activeAlertRuleRaw.FieldByName("State").Int()))
  1319  	require.Equal(t, reflect.NewAt(activeAtTimeRaw.Type(), unsafe.Pointer(activeAtTimeRaw.UnsafeAddr())).Elem().Interface().(time.Time), downAtActiveAtTime.Add(currentTime.Sub(downAtTime)))
  1320  
  1321  	// NEXT, 20 minutes is expected to be left, eval timestamp at currentTimestamp +20m
  1322  	currentTime = currentTime.Add(time.Minute * 20)
  1323  	ruleGroup.Eval(ctx, currentTime)
  1324  
  1325  	// assert alert state after alert is firing
  1326  	firedAtRaw := activeAlertRuleRaw.FieldByName("FiredAt")
  1327  	firedAtTime := reflect.NewAt(firedAtRaw.Type(), unsafe.Pointer(firedAtRaw.UnsafeAddr())).Elem().Interface().(time.Time)
  1328  	require.Equal(t, firedAtTime, currentTime)
  1329  
  1330  	require.Equal(t, promRules.StateFiring, promRules.AlertState(activeAlertRuleRaw.FieldByName("State").Int()))
  1331  }