github.com/bartle-stripe/trillian@v1.2.1/storage/testonly/fake_node_reader.go (about)

     1  // Copyright 2017 Google Inc. All Rights Reserved.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package testonly
    16  
    17  import (
    18  	"bytes"
    19  	"context"
    20  	"fmt"
    21  
    22  	"github.com/golang/glog"
    23  	"github.com/google/trillian/merkle"
    24  	"github.com/google/trillian/merkle/rfc6962"
    25  	"github.com/google/trillian/storage"
    26  )
    27  
    28  // This is a fake implementation of a NodeReader intended for use in testing Merkle path code.
    29  // Building node sets for tests by hand is onerous and error prone, especially when trying
    30  // to test code reading from multiple tree revisions. It cannot live in the main testonly
    31  // package as this creates import cycles.
    32  
    33  // NodeMapping is a struct we use because we can't use NodeIDs as map keys. Callers pass this
    34  // and FakeNodeReader internally manages derived keys.
    35  type NodeMapping struct {
    36  	NodeID storage.NodeID
    37  	Node   storage.Node
    38  }
    39  
    40  // FakeNodeReader is an implementation of storage.NodeReader that's preloaded with a set of
    41  // NodeID -> Node mappings and will return only those. Requesting any other nodes results in
    42  // an error. For use in tests only, does not implement any other storage APIs.
    43  type FakeNodeReader struct {
    44  	treeSize     int64
    45  	treeRevision int64
    46  	nodeMap      map[string]storage.Node
    47  }
    48  
    49  // NewFakeNodeReader creates and returns a FakeNodeReader with the supplied nodeID -> Node
    50  // mappings assuming that all the nodes are at a specified tree revision. All the nodeIDs
    51  // must be distinct.
    52  func NewFakeNodeReader(mappings []NodeMapping, treeSize, treeRevision int64) *FakeNodeReader {
    53  	nodeMap := make(map[string]storage.Node)
    54  
    55  	for _, mapping := range mappings {
    56  		_, ok := nodeMap[mapping.NodeID.String()]
    57  
    58  		if ok {
    59  			// Duplicate mapping - the test data is invalid so don't continue
    60  			glog.Fatalf("NewFakeNodeReader duplicate mapping for: %s in:\n%v", mapping.NodeID.String(), mappings)
    61  		}
    62  
    63  		nodeMap[mapping.NodeID.String()] = mapping.Node
    64  	}
    65  
    66  	return &FakeNodeReader{nodeMap: nodeMap, treeSize: treeSize, treeRevision: treeRevision}
    67  }
    68  
    69  // GetTreeRevisionIncludingSize implements the corresponding NodeReader API.
    70  func (f FakeNodeReader) GetTreeRevisionIncludingSize(treeSize int64) (int64, error) {
    71  	if f.treeSize < treeSize {
    72  		return int64(0), fmt.Errorf("GetTreeRevisionIncludingSize() got treeSize:%d, want: >= %d", treeSize, f.treeSize)
    73  	}
    74  
    75  	return f.treeRevision, nil
    76  }
    77  
    78  // GetMerkleNodes implements the corresponding NodeReader API.
    79  func (f FakeNodeReader) GetMerkleNodes(treeRevision int64, NodeIDs []storage.NodeID) ([]storage.Node, error) {
    80  	if f.treeRevision > treeRevision {
    81  		return nil, fmt.Errorf("GetMerkleNodes() got treeRevision:%d, want up to: %d", treeRevision, f.treeRevision)
    82  	}
    83  
    84  	nodes := make([]storage.Node, 0, len(NodeIDs))
    85  	for _, nodeID := range NodeIDs {
    86  		node, ok := f.nodeMap[nodeID.String()]
    87  
    88  		if !ok {
    89  			return nil, fmt.Errorf("GetMerkleNodes() unknown node ID: %v", nodeID)
    90  		}
    91  
    92  		nodes = append(nodes, node)
    93  	}
    94  
    95  	return nodes, nil
    96  }
    97  
    98  func (f FakeNodeReader) hasID(nodeID storage.NodeID) bool {
    99  	_, ok := f.nodeMap[nodeID.String()]
   100  	return ok
   101  }
   102  
   103  // MultiFakeNodeReader can provide nodes at multiple revisions. It delegates to a number of
   104  // FakeNodeReaders, each set up to handle one revision.
   105  type MultiFakeNodeReader struct {
   106  	readers []FakeNodeReader
   107  }
   108  
   109  // LeafBatch describes a set of leaves to be loaded into a MultiFakeNodeReader via a compact
   110  // merkle tree. As each batch is added to the tree a set of node updates are collected
   111  // and recorded in a FakeNodeReader for that revision. The expected root should be the
   112  // result of calling CurrentRoot() on the compact Merkle tree encoded by hex.EncodeToString().
   113  type LeafBatch struct {
   114  	TreeRevision int64
   115  	Leaves       []string
   116  	ExpectedRoot []byte
   117  }
   118  
   119  // NewMultiFakeNodeReader creates a MultiFakeNodeReader delegating to a number of FakeNodeReaders
   120  func NewMultiFakeNodeReader(readers []FakeNodeReader) *MultiFakeNodeReader {
   121  	return &MultiFakeNodeReader{readers: readers}
   122  }
   123  
   124  // NewMultiFakeNodeReaderFromLeaves uses a compact Merkle tree to set up the nodes at various
   125  // revisions. It collates all node updates from a batch of leaf data into one FakeNodeReader.
   126  // This has the advantage of not needing to manually create all the data structures but the
   127  // disadvantage is that a bug in the compact tree could be reflected in test using this
   128  // code. To help guard against this we check the tree root hash after each batch has been
   129  // processed. The supplied batches should be in ascending order of tree revision.
   130  func NewMultiFakeNodeReaderFromLeaves(batches []LeafBatch) *MultiFakeNodeReader {
   131  	tree := merkle.NewCompactMerkleTree(rfc6962.DefaultHasher)
   132  	readers := make([]FakeNodeReader, 0, len(batches))
   133  
   134  	lastBatchRevision := int64(0)
   135  	for _, batch := range batches {
   136  		if batch.TreeRevision <= lastBatchRevision {
   137  			glog.Fatalf("Batches out of order revision: %d, last: %d in:\n%v", batch.TreeRevision,
   138  				lastBatchRevision, batches)
   139  		}
   140  
   141  		lastBatchRevision = batch.TreeRevision
   142  		nodeMap := make(map[string]storage.Node)
   143  		for _, leaf := range batch.Leaves {
   144  			// We're only interested in the side effects of adding leaves - the node updates
   145  			tree.AddLeaf([]byte(leaf), func(depth int, index int64, hash []byte) error {
   146  				nID, err := storage.NewNodeIDForTreeCoords(int64(depth), index, 64)
   147  
   148  				if err != nil {
   149  					return fmt.Errorf("failed to create a nodeID for tree - should not happen d:%d i:%d",
   150  						depth, index)
   151  				}
   152  
   153  				nodeMap[nID.String()] = storage.Node{NodeID: nID, NodeRevision: batch.TreeRevision, Hash: hash}
   154  				return nil
   155  			})
   156  		}
   157  
   158  		// Sanity check the tree root hash against the one we expect to see.
   159  		if got, want := tree.CurrentRoot(), batch.ExpectedRoot; !bytes.Equal(got, want) {
   160  			panic(fmt.Errorf("NewMultiFakeNodeReaderFromLeaves() got root: %x, want: %x (%v)", got, want, batch))
   161  		}
   162  
   163  		// Unroll the update map to []NodeMappings to retain the most recent node update within
   164  		// the batch for each ID. Use that to create a new FakeNodeReader.
   165  		mappings := make([]NodeMapping, 0, len(nodeMap))
   166  
   167  		for _, node := range nodeMap {
   168  			mappings = append(mappings, NodeMapping{NodeID: node.NodeID, Node: node})
   169  		}
   170  
   171  		readers = append(readers, *NewFakeNodeReader(mappings, tree.Size(), batch.TreeRevision))
   172  	}
   173  
   174  	return NewMultiFakeNodeReader(readers)
   175  }
   176  
   177  func (m MultiFakeNodeReader) readerForNodeID(nodeID storage.NodeID, revision int64) *FakeNodeReader {
   178  	// Work backwards and use the first reader where the node is present and the revision is in range
   179  	for i := len(m.readers) - 1; i >= 0; i-- {
   180  		if m.readers[i].treeRevision <= revision && m.readers[i].hasID(nodeID) {
   181  			return &m.readers[i]
   182  		}
   183  	}
   184  
   185  	return nil
   186  }
   187  
   188  // GetTreeRevisionIncludingSize implements the corresponding NodeReader API.
   189  func (m MultiFakeNodeReader) GetTreeRevisionIncludingSize(treeSize int64) (int64, int64, error) {
   190  	for i := len(m.readers) - 1; i >= 0; i-- {
   191  		if m.readers[i].treeSize >= treeSize {
   192  			return m.readers[i].treeRevision, m.readers[i].treeSize, nil
   193  		}
   194  	}
   195  
   196  	return int64(0), int64(0), fmt.Errorf("want revision for tree size: %d but it doesn't exist", treeSize)
   197  }
   198  
   199  // GetMerkleNodes implements the corresponding NodeReader API.
   200  func (m MultiFakeNodeReader) GetMerkleNodes(ctx context.Context, treeRevision int64, NodeIDs []storage.NodeID) ([]storage.Node, error) {
   201  	// Find the correct reader for the supplied tree revision. This must be done for each node
   202  	// as earlier revisions may still be relevant
   203  	nodes := make([]storage.Node, 0, len(NodeIDs))
   204  	for _, nID := range NodeIDs {
   205  		reader := m.readerForNodeID(nID, treeRevision)
   206  
   207  		if reader == nil {
   208  			return nil,
   209  				fmt.Errorf("want nodeID: %v with revision <= %d but no reader has it\n%v", nID, treeRevision, m)
   210  		}
   211  
   212  		node, err := reader.GetMerkleNodes(treeRevision, []storage.NodeID{nID})
   213  		if err != nil {
   214  			return nil, err
   215  		}
   216  
   217  		nodes = append(nodes, node[0])
   218  	}
   219  
   220  	return nodes, nil
   221  }