github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/cli/debug_test.go (about)

     1  // Copyright 2017 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package cli
    12  
    13  import (
    14  	"context"
    15  	"fmt"
    16  	"path/filepath"
    17  	"regexp"
    18  	"sort"
    19  	"strings"
    20  	"testing"
    21  	"time"
    22  
    23  	"github.com/cockroachdb/cockroach/pkg/base"
    24  	"github.com/cockroachdb/cockroach/pkg/gossip"
    25  	"github.com/cockroachdb/cockroach/pkg/keys"
    26  	"github.com/cockroachdb/cockroach/pkg/kv"
    27  	"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
    28  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    29  	"github.com/cockroachdb/cockroach/pkg/rpc"
    30  	"github.com/cockroachdb/cockroach/pkg/server"
    31  	"github.com/cockroachdb/cockroach/pkg/server/serverpb"
    32  	"github.com/cockroachdb/cockroach/pkg/storage"
    33  	"github.com/cockroachdb/cockroach/pkg/testutils"
    34  	"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
    35  	"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
    36  	"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
    37  	"github.com/cockroachdb/cockroach/pkg/util"
    38  	"github.com/cockroachdb/cockroach/pkg/util/leaktest"
    39  	"github.com/cockroachdb/cockroach/pkg/util/stop"
    40  	"github.com/cockroachdb/errors"
    41  )
    42  
    43  func createStore(t *testing.T, path string) {
    44  	t.Helper()
    45  	cache := storage.NewRocksDBCache(server.DefaultCacheSize)
    46  	defer cache.Release()
    47  	db, err := storage.NewRocksDB(
    48  		storage.RocksDBConfig{
    49  			StorageConfig: base.StorageConfig{
    50  				Dir:       path,
    51  				MustExist: false,
    52  			},
    53  		},
    54  		cache,
    55  	)
    56  	if err != nil {
    57  		t.Fatal(err)
    58  	}
    59  	db.Close()
    60  }
    61  
    62  func TestOpenExistingStore(t *testing.T) {
    63  	defer leaktest.AfterTest(t)()
    64  	stopper := stop.NewStopper()
    65  	defer stopper.Stop(context.Background())
    66  
    67  	baseDir, dirCleanupFn := testutils.TempDir(t)
    68  	defer dirCleanupFn()
    69  
    70  	dirExists := filepath.Join(baseDir, "exists")
    71  	dirMissing := filepath.Join(baseDir, "missing")
    72  	createStore(t, dirExists)
    73  
    74  	for _, test := range []struct {
    75  		dir    string
    76  		expErr string
    77  	}{
    78  		{
    79  			dir:    dirExists,
    80  			expErr: "",
    81  		},
    82  		{
    83  			dir:    dirMissing,
    84  			expErr: `does not exist|no such file or directory`,
    85  		},
    86  	} {
    87  		t.Run(fmt.Sprintf("dir=%s", test.dir), func(t *testing.T) {
    88  			_, err := OpenExistingStore(test.dir, stopper, false /* readOnly */)
    89  			if !testutils.IsError(err, test.expErr) {
    90  				t.Errorf("wanted %s but got %v", test.expErr, err)
    91  			}
    92  		})
    93  	}
    94  }
    95  
    96  func TestOpenReadOnlyStore(t *testing.T) {
    97  	defer leaktest.AfterTest(t)()
    98  	stopper := stop.NewStopper()
    99  	defer stopper.Stop(context.Background())
   100  
   101  	baseDir, dirCleanupFn := testutils.TempDir(t)
   102  	defer dirCleanupFn()
   103  
   104  	storePath := filepath.Join(baseDir, "store")
   105  	createStore(t, storePath)
   106  
   107  	for _, test := range []struct {
   108  		readOnly bool
   109  		expErr   string
   110  	}{
   111  		{
   112  			readOnly: false,
   113  			expErr:   "",
   114  		},
   115  		{
   116  			readOnly: true,
   117  			expErr:   `Not supported operation in read only mode|pebble: read-only`,
   118  		},
   119  	} {
   120  		t.Run(fmt.Sprintf("readOnly=%t", test.readOnly), func(t *testing.T) {
   121  			db, err := OpenExistingStore(storePath, stopper, test.readOnly)
   122  			if err != nil {
   123  				t.Fatal(err)
   124  			}
   125  
   126  			key := storage.MakeMVCCMetadataKey(roachpb.Key("key"))
   127  			val := []byte("value")
   128  			err = db.Put(key, val)
   129  			if !testutils.IsError(err, test.expErr) {
   130  				t.Fatalf("wanted %s but got %v", test.expErr, err)
   131  			}
   132  		})
   133  	}
   134  }
   135  
   136  func TestRemoveDeadReplicas(t *testing.T) {
   137  	defer leaktest.AfterTest(t)()
   138  
   139  	// This test is pretty slow under race (200+ cpu-seconds) because it
   140  	// uses multiple real disk-backed stores and goes through multiple
   141  	// cycles of rereplicating all ranges.
   142  	if util.RaceEnabled {
   143  		t.Skip("skipping under race")
   144  	}
   145  
   146  	ctx := context.Background()
   147  
   148  	testCases := []struct {
   149  		survivingNodes, totalNodes, replicationFactor int
   150  	}{
   151  		{1, 3, 3},
   152  		{2, 4, 4},
   153  	}
   154  
   155  	for _, testCase := range testCases {
   156  		t.Run(fmt.Sprintf("%d/%d/r=%d", testCase.survivingNodes, testCase.totalNodes,
   157  			testCase.replicationFactor),
   158  			func(t *testing.T) {
   159  				baseDir, dirCleanupFn := testutils.TempDir(t)
   160  				defer dirCleanupFn()
   161  
   162  				// The surviving nodes get a real store, others are just in memory.
   163  				var storePaths []string
   164  				clusterArgs := base.TestClusterArgs{
   165  					ServerArgsPerNode: map[int]base.TestServerArgs{},
   166  				}
   167  				deadReplicas := map[roachpb.StoreID]struct{}{}
   168  
   169  				for i := 0; i < testCase.totalNodes; i++ {
   170  					args := base.TestServerArgs{}
   171  					args.ScanMaxIdleTime = time.Millisecond
   172  					storeID := roachpb.StoreID(i + 1)
   173  					if i < testCase.survivingNodes {
   174  						path := filepath.Join(baseDir, fmt.Sprintf("store%d", storeID))
   175  						storePaths = append(storePaths, path)
   176  						// ServerArgsPerNode uses 0-based index, not node ID.
   177  						args.StoreSpecs = []base.StoreSpec{{Path: path}}
   178  					} else {
   179  						deadReplicas[storeID] = struct{}{}
   180  					}
   181  					clusterArgs.ServerArgsPerNode[i] = args
   182  				}
   183  
   184  				// Start the cluster, let it replicate, then stop it. Since the
   185  				// non-surviving nodes use in-memory stores, this automatically
   186  				// causes the cluster to lose its quorum.
   187  				//
   188  				// While it's running, start a transaction and write an intent to
   189  				// one of the range descriptors (without committing or aborting the
   190  				// transaction). This exercises a special case in removeDeadReplicas.
   191  				func() {
   192  					tc := testcluster.StartTestCluster(t, testCase.totalNodes, clusterArgs)
   193  					defer tc.Stopper().Stop(ctx)
   194  
   195  					s := sqlutils.MakeSQLRunner(tc.Conns[0])
   196  
   197  					// Set the replication factor on all zones.
   198  					func() {
   199  						rows := s.Query(t, "show all zone configurations")
   200  						defer rows.Close()
   201  						re := regexp.MustCompile(`num_replicas = \d+`)
   202  						var sqls []string
   203  						for rows.Next() {
   204  							var name, configSQL string
   205  							if err := rows.Scan(&name, &configSQL); err != nil {
   206  								t.Fatal(err)
   207  							}
   208  							sqls = append(sqls, configSQL)
   209  						}
   210  						for _, sql := range sqls {
   211  							if re.FindString(sql) != "" {
   212  								sql = re.ReplaceAllString(sql,
   213  									fmt.Sprintf("num_replicas = %d", testCase.replicationFactor))
   214  							} else {
   215  								sql = fmt.Sprintf("%s, num_replicas = %d", sql, testCase.replicationFactor)
   216  							}
   217  							s.Exec(t, sql)
   218  						}
   219  					}()
   220  					if err := tc.WaitForFullReplication(); err != nil {
   221  						t.Fatal(err)
   222  					}
   223  
   224  					// Perform a write, to ensure that pre-crash data is preserved.
   225  					// Creating a table causes extra friction in the test harness when
   226  					// we restart the cluster, so just write a setting.
   227  					s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'")
   228  
   229  					txn := kv.NewTxn(ctx, tc.Servers[0].DB(), 1)
   230  					var desc roachpb.RangeDescriptor
   231  					// Pick one of the predefined split points.
   232  					rdKey := keys.RangeDescriptorKey(roachpb.RKey(keys.TimeseriesPrefix))
   233  					if err := txn.GetProto(ctx, rdKey, &desc); err != nil {
   234  						t.Fatal(err)
   235  					}
   236  					desc.NextReplicaID++
   237  					if err := txn.Put(ctx, rdKey, &desc); err != nil {
   238  						t.Fatal(err)
   239  					}
   240  
   241  					// At this point the intent has been written to rocksdb but this
   242  					// write was not synced (only the raft log append was synced). We
   243  					// need to force another sync, but we're far from the storage
   244  					// layer here so the easiest thing to do is simply perform a
   245  					// second write. This will force the first write to be persisted
   246  					// to disk (the second write may or may not make it to disk due to
   247  					// timing).
   248  					desc.NextReplicaID++
   249  					if err := txn.Put(ctx, rdKey, &desc); err != nil {
   250  						t.Fatal(err)
   251  					}
   252  
   253  					// We deliberately do not close this transaction so the intent is
   254  					// left behind.
   255  				}()
   256  
   257  				// Open the surviving stores directly to repair them.
   258  				repairStore := func(idx int) error {
   259  					stopper := stop.NewStopper()
   260  					defer stopper.Stop(ctx)
   261  
   262  					db, err := OpenExistingStore(storePaths[idx], stopper, false /* readOnly */)
   263  					if err != nil {
   264  						return err
   265  					}
   266  
   267  					batch, err := removeDeadReplicas(db, deadReplicas)
   268  					if err != nil {
   269  						return err
   270  					}
   271  					if batch != nil {
   272  						if err := batch.Commit(true); err != nil {
   273  							return err
   274  						}
   275  						batch.Close()
   276  					}
   277  
   278  					// The repair process is idempotent and should give a nil batch the second time
   279  					batch, err = removeDeadReplicas(db, deadReplicas)
   280  					if err != nil {
   281  						return err
   282  					}
   283  					if batch != nil {
   284  						batch.Close()
   285  						return errors.New("expected nil batch on second attempt")
   286  					}
   287  					return nil
   288  				}
   289  				for i := 0; i < testCase.survivingNodes; i++ {
   290  					err := repairStore(i)
   291  					if err != nil {
   292  						t.Fatal(err)
   293  					}
   294  				}
   295  
   296  				// Now that the data is salvaged, we can restart the cluster.
   297  				// The nodes with the in-memory stores will be assigned new,
   298  				// higher node IDs (e.g. in the 1/3 case, the restarted nodes
   299  				// will be 4 and 5).
   300  				//
   301  				// This can activate the adaptive zone config feature (using 5x
   302  				// replication for clusters of 5 nodes or more), so we must
   303  				// decommission the dead nodes for WaitForFullReplication to
   304  				// complete. (note that the cluster is working even when it
   305  				// doesn't consider itself fully replicated - that's what allows
   306  				// the decommissioning to succeed. We're just waiting for full
   307  				// replication so that we can validate that ranges were moved
   308  				// from e.g. {1,2,3} to {1,4,5}).
   309  				//
   310  				// Set replication mode to manual so that TestCluster doesn't call
   311  				// WaitForFullReplication before we've decommissioned the nodes.
   312  				clusterArgs.ReplicationMode = base.ReplicationManual
   313  				clusterArgs.ParallelStart = true
   314  
   315  				// Sleep before restarting to allow all previous leases to expire.
   316  				// Without this sleep, the 2/4 case is flaky because sometimes
   317  				// node 1 will be left with an unexpired lease, which tricks
   318  				// some heuristics on node 2 into never campaigning even though it
   319  				// is the only surviving replica.
   320  				//
   321  				// The reason it never campaigns is incompletely understood.
   322  				// The result of shouldCampaignOnWake will correctly
   323  				// transition from false to true when the old lease expires,
   324  				// but nothing triggers a "wake" event after this point.
   325  				//
   326  				// TODO(bdarnell): This is really a bug in Replica.leaseStatus.
   327  				// It should not return VALID for a lease held by a node that
   328  				// has been removed from the configuration.
   329  				time.Sleep(10 * time.Second)
   330  
   331  				tc := testcluster.StartTestCluster(t, testCase.totalNodes, clusterArgs)
   332  				defer tc.Stopper().Stop(ctx)
   333  
   334  				grpcConn, err := tc.Server(0).RPCContext().GRPCDialNode(
   335  					tc.Server(0).ServingRPCAddr(),
   336  					tc.Server(0).NodeID(),
   337  					rpc.DefaultClass,
   338  				).Connect(ctx)
   339  				if err != nil {
   340  					t.Fatal(err)
   341  				}
   342  				adminClient := serverpb.NewAdminClient(grpcConn)
   343  
   344  				deadNodes := []roachpb.NodeID{}
   345  				for i := testCase.survivingNodes; i < testCase.totalNodes; i++ {
   346  					deadNodes = append(deadNodes, roachpb.NodeID(i+1))
   347  				}
   348  
   349  				if err := runDecommissionNodeImpl(
   350  					ctx, adminClient, nodeDecommissionWaitNone, deadNodes,
   351  				); err != nil {
   352  					t.Fatal(err)
   353  				}
   354  
   355  				for i := 0; i < len(tc.Servers); i++ {
   356  					err = tc.Servers[i].Stores().VisitStores(func(store *kvserver.Store) error {
   357  						store.SetReplicateQueueActive(true)
   358  						return nil
   359  					})
   360  					if err != nil {
   361  						t.Fatal(err)
   362  					}
   363  				}
   364  				if err := tc.WaitForFullReplication(); err != nil {
   365  					t.Fatal(err)
   366  				}
   367  
   368  				for i := 0; i < len(tc.Servers); i++ {
   369  					err = tc.Servers[i].Stores().VisitStores(func(store *kvserver.Store) error {
   370  						return store.ForceConsistencyQueueProcess()
   371  					})
   372  					if err != nil {
   373  						t.Fatal(err)
   374  					}
   375  				}
   376  
   377  				expectedReplicas := []string{}
   378  				for i := 0; i < testCase.totalNodes; i++ {
   379  					var storeID int
   380  					if i < testCase.survivingNodes {
   381  						// If the replica survived, just adjust from zero-based to one-based.
   382  						storeID = i + 1
   383  					} else {
   384  						storeID = i + 1 + testCase.totalNodes - testCase.survivingNodes
   385  					}
   386  					expectedReplicas = append(expectedReplicas, fmt.Sprintf("%d", storeID))
   387  				}
   388  				expectedReplicaStr := fmt.Sprintf("{%s}", strings.Join(expectedReplicas, ","))
   389  
   390  				s := sqlutils.MakeSQLRunner(tc.Conns[0])
   391  				row := s.QueryRow(t, "select replicas from [show ranges from table system.namespace] limit 1")
   392  				var replicaStr string
   393  				row.Scan(&replicaStr)
   394  				if replicaStr != expectedReplicaStr {
   395  					t.Fatalf("expected replicas on %s but got %s", expectedReplicaStr, replicaStr)
   396  				}
   397  
   398  				row = s.QueryRow(t, "show cluster setting cluster.organization")
   399  				var org string
   400  				row.Scan(&org)
   401  
   402  				// If there was only one surviving node, we know we
   403  				// recovered the write we did at the start of the test. But
   404  				// if there were multiples, we might have picked a lagging
   405  				// replica that didn't have it. See comments around
   406  				// maxLivePeer in debug.go.
   407  				//
   408  				// TODO(bdarnell): It doesn't look to me like this is
   409  				// guaranteed even in the single-survivor case, but it's
   410  				// only flaky when there are multiple survivors.
   411  				if testCase.survivingNodes == 1 {
   412  					if org != "remove dead replicas test" {
   413  						t.Fatalf("expected old setting to be present, got %s instead", org)
   414  					}
   415  				}
   416  			})
   417  	}
   418  }
   419  
   420  func TestParseGossipValues(t *testing.T) {
   421  	defer leaktest.AfterTest(t)()
   422  	ctx := context.Background()
   423  
   424  	tc := testcluster.StartTestCluster(t, 3, base.TestClusterArgs{})
   425  	defer tc.Stopper().Stop(ctx)
   426  
   427  	var gossipInfo gossip.InfoStatus
   428  	if err := serverutils.GetJSONProto(tc.Server(0), "/_status/gossip/1", &gossipInfo); err != nil {
   429  		t.Fatal(err)
   430  	}
   431  
   432  	debugOutput, err := parseGossipValues(&gossipInfo)
   433  	if err != nil {
   434  		t.Fatal(err)
   435  	}
   436  	debugLines := strings.Split(debugOutput, "\n")
   437  	if len(debugLines) != len(gossipInfo.Infos) {
   438  		var gossipInfoKeys []string
   439  		for key := range gossipInfo.Infos {
   440  			gossipInfoKeys = append(gossipInfoKeys, key)
   441  		}
   442  		sort.Strings(gossipInfoKeys)
   443  		t.Errorf("`debug gossip-values` output contains %d entries, but the source gossip contains %d:\ndebug output:\n%v\n\ngossipInfos:\n%v",
   444  			len(debugLines), len(gossipInfo.Infos), debugOutput, strings.Join(gossipInfoKeys, "\n"))
   445  	}
   446  }