github.com/bartle-stripe/trillian@v1.2.1/storage/cache/subtree_cache_test.go (about) 1 // Copyright 2016 Google Inc. All Rights Reserved. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package cache 16 17 import ( 18 "bytes" 19 "errors" 20 "fmt" 21 "testing" 22 23 "github.com/google/trillian/merkle" 24 "github.com/google/trillian/merkle/maphasher" 25 "github.com/google/trillian/merkle/rfc6962" 26 "github.com/google/trillian/storage" 27 "github.com/google/trillian/storage/storagepb" 28 29 "github.com/golang/mock/gomock" 30 "github.com/kylelemons/godebug/pretty" 31 32 stestonly "github.com/google/trillian/storage/testonly" 33 ) 34 35 var ( 36 defaultLogStrata = []int{8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8} 37 defaultMapStrata = []int{8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 176} 38 ) 39 40 const treeID = int64(0) 41 42 func TestSplitNodeID(t *testing.T) { 43 c := NewSubtreeCache(defaultMapStrata, populateMapSubtreeNodes(treeID, maphasher.Default), prepareMapSubtreeWrite()) 44 for _, tc := range []struct { 45 inPath []byte 46 inPathLenBits int 47 outPrefix []byte 48 outSuffixBits int 49 outSuffix []byte 50 }{ 51 {[]byte{0x12, 0x34, 0x56, 0x7f}, 32, []byte{0x12, 0x34, 0x56}, 8, []byte{0x7f}}, 52 {[]byte{0x12, 0x34, 0x56, 0xff}, 29, []byte{0x12, 0x34, 0x56}, 5, []byte{0xf8}}, 53 {[]byte{0x12, 0x34, 0x56, 0xff}, 25, []byte{0x12, 0x34, 0x56}, 1, []byte{0x80}}, 54 {[]byte{0x12, 0x34, 0x56, 0x78}, 16, []byte{0x12}, 8, []byte{0x34}}, 55 {[]byte{0x12, 0x34, 0x56, 0x78}, 9, []byte{0x12}, 1, []byte{0x00}}, 56 {[]byte{0x12, 0x34, 0x56, 0x78}, 8, []byte{}, 8, []byte{0x12}}, 57 {[]byte{0x12, 0x34, 0x56, 0x78}, 7, []byte{}, 7, []byte{0x12}}, 58 {[]byte{0x12, 0x34, 0x56, 0x78}, 0, []byte{}, 0, []byte{0}}, 59 {[]byte{0x70}, 2, []byte{}, 2, []byte{0x40}}, 60 {[]byte{0x70}, 3, []byte{}, 3, []byte{0x60}}, 61 {[]byte{0x70}, 4, []byte{}, 4, []byte{0x70}}, 62 {[]byte{0x70}, 5, []byte{}, 5, []byte{0x70}}, 63 {[]byte{0x00, 0x03}, 16, []byte{0x00}, 8, []byte{0x03}}, 64 {[]byte{0x00, 0x03}, 15, []byte{0x00}, 7, []byte{0x02}}, 65 } { 66 n := storage.NewNodeIDFromHash(tc.inPath) 67 n.PrefixLenBits = tc.inPathLenBits 68 69 p, s := c.splitNodeID(n) 70 if got, want := p, tc.outPrefix; !bytes.Equal(got, want) { 71 t.Errorf("splitNodeID(%v): prefix %x, want %x", n, got, want) 72 continue 73 } 74 if got, want := int(s.Bits), tc.outSuffixBits; got != want { 75 t.Errorf("splitNodeID(%v): suffix.Bits %v, want %v", n, got, want) 76 continue 77 } 78 if got, want := s.Path, tc.outSuffix; !bytes.Equal(got, want) { 79 t.Errorf("splitNodeID(%v): suffix.Path %x, want %x", n, got, want) 80 } 81 } 82 } 83 84 func TestCacheFillOnlyReadsSubtrees(t *testing.T) { 85 mockCtrl := gomock.NewController(t) 86 defer mockCtrl.Finish() 87 88 m := NewMockNodeStorage(mockCtrl) 89 c := NewSubtreeCache(defaultLogStrata, populateMapSubtreeNodes(treeID, maphasher.Default), prepareMapSubtreeWrite()) 90 91 nodeID := storage.NewNodeIDFromHash([]byte("1234")) 92 // When we loop around asking for all 0..32 bit prefix lengths of the above 93 // NodeID, we should see just one "Get" request for each subtree. 94 si := 0 95 for b := 0; b < nodeID.PrefixLenBits; b += defaultLogStrata[si] { 96 e := nodeID 97 e.PrefixLenBits = b 98 m.EXPECT().GetSubtree(stestonly.NodeIDEq(e)).Return(&storagepb.SubtreeProto{ 99 Prefix: e.Path, 100 }, nil) 101 si++ 102 } 103 104 for nodeID.PrefixLenBits > 0 { 105 _, err := c.GetNodeHash(nodeID, m.GetSubtree) 106 if err != nil { 107 t.Fatalf("failed to get node hash: %v", err) 108 } 109 nodeID.PrefixLenBits-- 110 } 111 } 112 113 func TestCacheGetNodesReadsSubtrees(t *testing.T) { 114 mockCtrl := gomock.NewController(t) 115 defer mockCtrl.Finish() 116 117 m := NewMockNodeStorage(mockCtrl) 118 c := NewSubtreeCache(defaultLogStrata, populateMapSubtreeNodes(treeID, maphasher.Default), prepareMapSubtreeWrite()) 119 120 nodeIDs := []storage.NodeID{ 121 storage.NewNodeIDFromHash([]byte("1234")), 122 storage.NewNodeIDFromHash([]byte("4567")), 123 storage.NewNodeIDFromHash([]byte("89ab")), 124 } 125 126 // Set up the expected reads: 127 // We expect one subtree read per entry in nodeIDs 128 for _, nodeID := range nodeIDs { 129 nodeID := nodeID 130 // And it'll be for the prefix of the full node ID (with the default log 131 // strata that'll be everything except the last byte), so modify the prefix 132 // length here accoringly: 133 nodeID.PrefixLenBits -= 8 134 m.EXPECT().GetSubtree(stestonly.NodeIDEq(nodeID)).Return(&storagepb.SubtreeProto{ 135 Prefix: nodeID.Path[:len(nodeID.Path)-1], 136 }, nil) 137 } 138 139 // Now request the nodes: 140 _, err := c.GetNodes( 141 nodeIDs, 142 // Glue function to convert a call requesting multiple subtrees into a 143 // sequence of calls to our mock storage: 144 func(ids []storage.NodeID) ([]*storagepb.SubtreeProto, error) { 145 ret := make([]*storagepb.SubtreeProto, 0) 146 for _, i := range ids { 147 r, err := m.GetSubtree(i) 148 if err != nil { 149 return nil, err 150 } 151 if r != nil { 152 ret = append(ret, r) 153 } 154 } 155 return ret, nil 156 }) 157 if err != nil { 158 t.Errorf("GetNodeHash(_, _) = _, %v", err) 159 } 160 } 161 162 func noFetch(storage.NodeID) (*storagepb.SubtreeProto, error) { 163 return nil, errors.New("not supposed to read anything") 164 } 165 166 func TestCacheFlush(t *testing.T) { 167 mockCtrl := gomock.NewController(t) 168 defer mockCtrl.Finish() 169 170 m := NewMockNodeStorage(mockCtrl) 171 c := NewSubtreeCache(defaultMapStrata, populateMapSubtreeNodes(treeID, maphasher.Default), prepareMapSubtreeWrite()) 172 173 h := "0123456789abcdef0123456789abcdef" 174 nodeID := storage.NewNodeIDFromHash([]byte(h)) 175 expectedSetIDs := make(map[string]string) 176 // When we loop around asking for all 0..32 bit prefix lengths of the above 177 // NodeID, we should see just one "Get" request for each subtree. 178 si := -1 179 for b := 0; b < nodeID.PrefixLenBits; b += defaultMapStrata[si] { 180 si++ 181 e := storage.NewNodeIDFromHash([]byte(h)) 182 //e := nodeID 183 e.PrefixLenBits = b 184 expectedSetIDs[e.String()] = "expected" 185 m.EXPECT().GetSubtree(stestonly.NodeIDEq(e)).Do(func(n storage.NodeID) { 186 t.Logf("read %v", n) 187 }).Return((*storagepb.SubtreeProto)(nil), nil) 188 } 189 m.EXPECT().SetSubtrees(gomock.Any()).Do(func(trees []*storagepb.SubtreeProto) { 190 for _, s := range trees { 191 subID := storage.NewNodeIDFromHash(s.Prefix) 192 if got, want := s.Depth, c.stratumInfoForPrefixLength(subID.PrefixLenBits).depth; got != int32(want) { 193 t.Errorf("Got subtree with depth %d, expected %d for prefixLen %d", got, want, subID.PrefixLenBits) 194 } 195 state, ok := expectedSetIDs[subID.String()] 196 if !ok { 197 t.Errorf("Unexpected write to subtree %s", subID.String()) 198 } 199 switch state { 200 case "expected": 201 expectedSetIDs[subID.String()] = "met" 202 case "met": 203 t.Errorf("Second write to subtree %s", subID.String()) 204 default: 205 t.Errorf("Unknown state for subtree %s: %s", subID.String(), state) 206 } 207 t.Logf("write %v -> (%d leaves)", subID, len(s.Leaves)) 208 } 209 }).Return(nil) 210 211 // Read nodes which touch the subtrees we'll write to: 212 sibs := nodeID.Siblings() 213 for s := range sibs { 214 _, err := c.GetNodeHash(sibs[s], m.GetSubtree) 215 if err != nil { 216 t.Fatalf("failed to get node hash: %v", err) 217 } 218 } 219 220 t.Logf("after sibs: %v", nodeID) 221 222 // Write nodes 223 for nodeID.PrefixLenBits > 0 { 224 h := []byte(nodeID.String()) 225 err := c.SetNodeHash(nodeID, append([]byte("hash-"), h...), noFetch) 226 if err != nil { 227 t.Fatalf("failed to set node hash: %v", err) 228 } 229 nodeID.PrefixLenBits-- 230 } 231 232 if err := c.Flush(m.SetSubtrees); err != nil { 233 t.Fatalf("failed to flush cache: %v", err) 234 } 235 236 for k, v := range expectedSetIDs { 237 switch v { 238 case "expected": 239 t.Errorf("Subtree %s remains unset", k) 240 case "met": 241 // 242 default: 243 t.Errorf("Unknown state for subtree %s: %s", k, v) 244 } 245 } 246 } 247 248 func TestRepopulateLogSubtree(t *testing.T) { 249 populateTheThing := populateLogSubtreeNodes(rfc6962.DefaultHasher) 250 cmt := merkle.NewCompactMerkleTree(rfc6962.DefaultHasher) 251 cmtStorage := storagepb.SubtreeProto{ 252 Leaves: make(map[string][]byte), 253 InternalNodes: make(map[string][]byte), 254 Depth: int32(defaultLogStrata[0]), 255 } 256 s := storagepb.SubtreeProto{ 257 Leaves: make(map[string][]byte), 258 Depth: int32(defaultLogStrata[0]), 259 } 260 c := NewSubtreeCache(defaultLogStrata, populateLogSubtreeNodes(rfc6962.DefaultHasher), prepareLogSubtreeWrite()) 261 for numLeaves := int64(1); numLeaves <= 256; numLeaves++ { 262 // clear internal nodes 263 s.InternalNodes = make(map[string][]byte) 264 265 leaf := []byte(fmt.Sprintf("this is leaf %d", numLeaves)) 266 leafHash, err := rfc6962.DefaultHasher.HashLeaf(leaf) 267 if err != nil { 268 t.Fatalf("HashLeaf(%v): %v", leaf, err) 269 } 270 _, err = cmt.AddLeafHash(leafHash, func(depth int, index int64, h []byte) error { 271 n, err := storage.NewNodeIDForTreeCoords(int64(depth), index, 8) 272 if err != nil { 273 return fmt.Errorf("failed to create nodeID for cmt tree: %v", err) 274 } 275 // Don't store leaves or the subtree root in InternalNodes 276 if depth > 0 && depth < 8 { 277 _, sfx := c.splitNodeID(n) 278 cmtStorage.InternalNodes[sfx.String()] = h 279 } 280 return nil 281 }) 282 if err != nil { 283 t.Fatalf("merkle tree update failed: %v", err) 284 } 285 286 nodeID := storage.NewNodeIDFromPrefix(s.Prefix, logStrataDepth, numLeaves-1, logStrataDepth, maxLogDepth) 287 _, sfx := nodeID.Split(len(s.Prefix), int(s.Depth)) 288 sfxKey := sfx.String() 289 s.Leaves[sfxKey] = leafHash 290 if numLeaves == 1<<uint(defaultLogStrata[0]) { 291 s.InternalNodeCount = uint32(len(cmtStorage.InternalNodes)) 292 } else { 293 s.InternalNodeCount = 0 294 } 295 cmtStorage.Leaves[sfxKey] = leafHash 296 297 if err := populateTheThing(&s); err != nil { 298 t.Fatalf("failed populate subtree: %v", err) 299 } 300 if got, expected := s.RootHash, cmt.CurrentRoot(); !bytes.Equal(got, expected) { 301 t.Fatalf("Got root %v for tree size %d, expected %v. subtree:\n%#v", got, numLeaves, expected, s.String()) 302 } 303 304 // Repopulation should only have happened with a full subtree, otherwise the internal nodes map 305 // should be empty 306 if numLeaves != 1<<uint(defaultLogStrata[0]) { 307 if len(s.InternalNodes) != 0 { 308 t.Fatalf("(it %d) internal nodes should be empty but got: %v", numLeaves, s.InternalNodes) 309 } 310 } else if diff := pretty.Compare(cmtStorage.InternalNodes, s.InternalNodes); diff != "" { 311 t.Fatalf("(it %d) CMT/sparse internal nodes diff:\n%v", numLeaves, diff) 312 } 313 } 314 } 315 316 func TestPrefixLengths(t *testing.T) { 317 strata := []int{8, 8, 16, 32, 64, 128} 318 stratumInfo := []stratumInfo{{0, 8}, {1, 8}, {2, 16}, {2, 16}, {4, 32}, {4, 32}, {4, 32}, {4, 32}, {8, 64}, {8, 64}, {8, 64}, {8, 64}, {8, 64}, {8, 64}, {8, 64}, {8, 64}, {16, 128}, {16, 128}, {16, 128}, {16, 128}, {16, 128}, {16, 128}, {16, 128}, {16, 128}, {16, 128}, {16, 128}, {16, 128}, {16, 128}, {16, 128}, {16, 128}, {16, 128}, {16, 128}} 319 320 c := NewSubtreeCache(strata, populateMapSubtreeNodes(treeID, maphasher.Default), prepareMapSubtreeWrite()) 321 322 if diff := pretty.Compare(c.stratumInfo, stratumInfo); diff != "" { 323 t.Fatalf("prefixLengths diff:\n%v", diff) 324 } 325 } 326 327 func TestGetStratumInfo(t *testing.T) { 328 c := NewSubtreeCache(defaultMapStrata, populateMapSubtreeNodes(treeID, maphasher.Default), prepareMapSubtreeWrite()) 329 testVec := []struct { 330 depth int 331 info stratumInfo 332 }{ 333 {0, stratumInfo{0, 8}}, 334 {1, stratumInfo{0, 8}}, 335 {7, stratumInfo{0, 8}}, 336 {8, stratumInfo{1, 8}}, 337 {15, stratumInfo{1, 8}}, 338 {79, stratumInfo{9, 8}}, 339 {80, stratumInfo{10, 176}}, 340 {81, stratumInfo{10, 176}}, 341 {156, stratumInfo{10, 176}}, 342 } 343 for i, tv := range testVec { 344 if diff := pretty.Compare(c.stratumInfoForPrefixLength(tv.depth), tv.info); diff != "" { 345 t.Errorf("(test %d for depth %d) diff:\n%v", i, tv.depth, diff) 346 } 347 } 348 } 349 350 func TestIdempotentWrites(t *testing.T) { 351 mockCtrl := gomock.NewController(t) 352 defer mockCtrl.Finish() 353 354 m := NewMockNodeStorage(mockCtrl) 355 356 h := "0123456789abcdef0123456789abcdef" 357 nodeID := storage.NewNodeIDFromHash([]byte(h)) 358 nodeID.PrefixLenBits = 40 359 subtreeID := nodeID 360 subtreeID.PrefixLenBits = 32 361 362 expectedSetIDs := make(map[string]string) 363 expectedSetIDs[subtreeID.String()] = "expected" 364 365 // The first time we read the subtree we'll emulate an empty subtree: 366 m.EXPECT().GetSubtree(stestonly.NodeIDEq(subtreeID)).Do(func(n storage.NodeID) { 367 t.Logf("read %v", n.String()) 368 }).Return((*storagepb.SubtreeProto)(nil), nil) 369 370 // We should only see a single write attempt 371 m.EXPECT().SetSubtrees(gomock.Any()).Times(1).Do(func(trees []*storagepb.SubtreeProto) { 372 for _, s := range trees { 373 subID := storage.NewNodeIDFromHash(s.Prefix) 374 state, ok := expectedSetIDs[subID.String()] 375 if !ok { 376 t.Errorf("Unexpected write to subtree %s", subID.String()) 377 } 378 switch state { 379 case "expected": 380 expectedSetIDs[subID.String()] = "met" 381 case "met": 382 t.Errorf("Second write to subtree %s", subID.String()) 383 default: 384 t.Errorf("Unknown state for subtree %s: %s", subID.String(), state) 385 } 386 387 // After this write completes, subsequent reads will see the subtree 388 // being written now: 389 m.EXPECT().GetSubtree(stestonly.NodeIDEq(subID)).AnyTimes().Do(func(n storage.NodeID) { 390 t.Logf("read again %v", n.String()) 391 }).Return(s, nil) 392 393 t.Logf("write %v -> %#v", subID.String(), s) 394 } 395 }).Return(nil) 396 397 // Now write the same value to the same node multiple times. 398 // We should see many reads, but only the first call to SetNodeHash should 399 // result in an actual write being flushed through to storage. 400 for i := 0; i < 10; i++ { 401 c := NewSubtreeCache(defaultMapStrata, populateMapSubtreeNodes(treeID, maphasher.Default), prepareMapSubtreeWrite()) 402 _, err := c.GetNodeHash(nodeID, m.GetSubtree) 403 if err != nil { 404 t.Fatalf("%d: failed to get node hash: %v", i, err) 405 } 406 407 err = c.SetNodeHash(nodeID, []byte("noodled"), noFetch) 408 if err != nil { 409 t.Fatalf("%d: failed to set node hash: %v", i, err) 410 } 411 412 if err := c.Flush(m.SetSubtrees); err != nil { 413 t.Fatalf("%d: failed to flush cache: %v", i, err) 414 } 415 } 416 417 for k, v := range expectedSetIDs { 418 switch v { 419 case "expected": 420 t.Errorf("Subtree %s remains unset", k) 421 case "met": 422 // 423 default: 424 t.Errorf("Unknown state for subtree %s: %s", k, v) 425 } 426 } 427 }