github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/kv/kvserver/client_raft_log_queue_test.go (about) 1 // Copyright 2015 The Cockroach Authors. 2 // 3 // Use of this software is governed by the Business Source License 4 // included in the file licenses/BSL.txt. 5 // 6 // As of the Change Date specified in that file, in accordance with 7 // the Business Source License, use of this software will be governed 8 // by the Apache License, Version 2.0, included in the file 9 // licenses/APL.txt. 10 11 package kvserver_test 12 13 import ( 14 "bytes" 15 "context" 16 "fmt" 17 "math" 18 "testing" 19 20 "github.com/cockroachdb/cockroach/pkg/kv" 21 "github.com/cockroachdb/cockroach/pkg/kv/kvserver" 22 "github.com/cockroachdb/cockroach/pkg/util/leaktest" 23 "github.com/gogo/protobuf/proto" 24 ) 25 26 // TestRaftLogQueue verifies that the raft log queue correctly truncates the 27 // raft log. 28 func TestRaftLogQueue(t *testing.T) { 29 defer leaktest.AfterTest(t)() 30 31 mtc := &multiTestContext{} 32 33 // Set maxBytes to something small so we can trigger the raft log truncation 34 // without adding 64MB of logs. 35 const maxBytes = 1 << 16 36 37 // Turn off raft elections so the raft leader won't change out from under 38 // us in this test. 39 sc := kvserver.TestStoreConfig(nil) 40 sc.DefaultZoneConfig.RangeMaxBytes = proto.Int64(maxBytes) 41 sc.RaftTickInterval = math.MaxInt32 42 sc.RaftElectionTimeoutTicks = 1000000 43 mtc.storeConfig = &sc 44 45 defer mtc.Stop() 46 mtc.Start(t, 3) 47 48 // Write a single value to ensure we have a leader. 49 pArgs := putArgs([]byte("key"), []byte("value")) 50 if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), pArgs); err != nil { 51 t.Fatal(err) 52 } 53 54 // Get the raft leader (and ensure one exists). 55 rangeID := mtc.stores[0].LookupReplica([]byte("a")).RangeID 56 raftLeaderRepl := mtc.getRaftLeader(rangeID) 57 if raftLeaderRepl == nil { 58 t.Fatalf("could not find raft leader replica for range %d", rangeID) 59 } 60 originalIndex, err := raftLeaderRepl.GetFirstIndex() 61 if err != nil { 62 t.Fatal(err) 63 } 64 65 // Disable splits since we're increasing the raft log with puts. 66 for _, store := range mtc.stores { 67 store.SetSplitQueueActive(false) 68 } 69 70 // Write a collection of values to increase the raft log. 71 value := bytes.Repeat([]byte("a"), 1000) // 1KB 72 for size := int64(0); size < 2*maxBytes; size += int64(len(value)) { 73 pArgs = putArgs([]byte(fmt.Sprintf("key-%d", size)), value) 74 if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), pArgs); err != nil { 75 t.Fatal(err) 76 } 77 } 78 79 // Force a truncation check. 80 for _, store := range mtc.stores { 81 store.MustForceRaftLogScanAndProcess() 82 } 83 84 // Ensure that firstIndex has increased indicating that the log 85 // truncation has occurred. 86 afterTruncationIndex, err := raftLeaderRepl.GetFirstIndex() 87 if err != nil { 88 t.Fatal(err) 89 } 90 if afterTruncationIndex <= originalIndex { 91 t.Fatalf("raft log has not been truncated yet, afterTruncationIndex:%d originalIndex:%d", 92 afterTruncationIndex, originalIndex) 93 } 94 95 // Force a truncation check again to ensure that attempting to truncate an 96 // already truncated log has no effect. This check, unlike in the last 97 // iteration, cannot use a succeedsSoon. This check is fragile in that the 98 // truncation triggered here may lose the race against the call to 99 // GetFirstIndex, giving a false negative. Fixing this requires additional 100 // instrumentation of the queues, which was deemed to require too much work 101 // at the time of this writing. 102 for _, store := range mtc.stores { 103 store.MustForceRaftLogScanAndProcess() 104 } 105 106 after2ndTruncationIndex, err := raftLeaderRepl.GetFirstIndex() 107 if err != nil { 108 t.Fatal(err) 109 } 110 if afterTruncationIndex > after2ndTruncationIndex { 111 t.Fatalf("second truncation destroyed state: afterTruncationIndex:%d after2ndTruncationIndex:%d", 112 afterTruncationIndex, after2ndTruncationIndex) 113 } 114 }