github.com/mre-fog/trillianxx@v1.1.2-0.20180615153820-ae375a99d36a/log/sequencer.go (about)

     1  // Copyright 2016 Google Inc. All Rights Reserved.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Package log includes code that is specific to Trillian's log mode, particularly code
    16  // for running sequencing operations.
    17  package log
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"strconv"
    23  	"sync"
    24  	"time"
    25  
    26  	"github.com/golang/glog"
    27  	"github.com/golang/protobuf/ptypes"
    28  	"github.com/google/trillian"
    29  	"github.com/google/trillian/merkle"
    30  	"github.com/google/trillian/merkle/hashers"
    31  	"github.com/google/trillian/monitoring"
    32  	"github.com/google/trillian/quota"
    33  	"github.com/google/trillian/storage"
    34  	"github.com/google/trillian/types"
    35  	"github.com/google/trillian/util"
    36  
    37  	tcrypto "github.com/google/trillian/crypto"
    38  )
    39  
    40  const logIDLabel = "logid"
    41  
    42  var (
    43  	once                   sync.Once
    44  	seqBatches             monitoring.Counter
    45  	seqTreeSize            monitoring.Gauge
    46  	seqLatency             monitoring.Histogram
    47  	seqDequeueLatency      monitoring.Histogram
    48  	seqGetRootLatency      monitoring.Histogram
    49  	seqInitTreeLatency     monitoring.Histogram
    50  	seqWriteTreeLatency    monitoring.Histogram
    51  	seqUpdateLeavesLatency monitoring.Histogram
    52  	seqSetNodesLatency     monitoring.Histogram
    53  	seqStoreRootLatency    monitoring.Histogram
    54  	seqCounter             monitoring.Counter
    55  	seqMergeDelay          monitoring.Histogram
    56  
    57  	// QuotaIncreaseFactor is the multiplier used for the number of tokens added back to
    58  	// sequencing-based quotas. The resulting PutTokens call is equivalent to
    59  	// "PutTokens(_, numLeaves * QuotaIncreaseFactor, _)".
    60  	// A factor >1 adds resilience to token leakage, on the risk of a system that's overly
    61  	// optimistic in face of true token shortages. The higher the factor, the higher the quota
    62  	// "optimism" is. A factor that's too high (say, >1.5) is likely a sign that the quota
    63  	// configuration should be changed instead.
    64  	// A factor <1 WILL lead to token shortages, therefore it'll be normalized to 1.
    65  	QuotaIncreaseFactor = 1.1
    66  )
    67  
    68  func quotaIncreaseFactor() float64 {
    69  	if QuotaIncreaseFactor < 1 {
    70  		QuotaIncreaseFactor = 1
    71  		return 1
    72  	}
    73  	return QuotaIncreaseFactor
    74  }
    75  
    76  func createMetrics(mf monitoring.MetricFactory) {
    77  	if mf == nil {
    78  		mf = monitoring.InertMetricFactory{}
    79  	}
    80  	quota.InitMetrics(mf)
    81  	seqBatches = mf.NewCounter("sequencer_batches", "Number of sequencer batch operations", logIDLabel)
    82  	seqTreeSize = mf.NewGauge("sequencer_tree_size", "Size of Merkle tree", logIDLabel)
    83  	seqLatency = mf.NewHistogram("sequencer_latency", "Latency of sequencer batch operation in seconds", logIDLabel)
    84  	seqDequeueLatency = mf.NewHistogram("sequencer_latency_dequeue", "Latency of dequeue-leaves part of sequencer batch operation in seconds", logIDLabel)
    85  	seqGetRootLatency = mf.NewHistogram("sequencer_latency_get_root", "Latency of get-root part of sequencer batch operation in seconds", logIDLabel)
    86  	seqInitTreeLatency = mf.NewHistogram("sequencer_latency_init_tree", "Latency of init-tree part of sequencer batch operation in seconds", logIDLabel)
    87  	seqWriteTreeLatency = mf.NewHistogram("sequencer_latency_write_tree", "Latency of write-tree part of sequencer batch operation in seconds", logIDLabel)
    88  	seqUpdateLeavesLatency = mf.NewHistogram("sequencer_latency_update_leaves", "Latency of update-leaves part of sequencer batch operation in seconds", logIDLabel)
    89  	seqSetNodesLatency = mf.NewHistogram("sequencer_latency_set_nodes", "Latency of set-nodes part of sequencer batch operation in seconds", logIDLabel)
    90  	seqStoreRootLatency = mf.NewHistogram("sequencer_latency_store_root", "Latency of store-root part of sequencer batch operation in seconds", logIDLabel)
    91  	seqCounter = mf.NewCounter("sequencer_sequenced", "Number of leaves sequenced", logIDLabel)
    92  	seqMergeDelay = mf.NewHistogram("sequencer_merge_delay", "Delay between queuing and integration of leaves", logIDLabel)
    93  }
    94  
    95  // Sequencer instances are responsible for integrating new leaves into a single log.
    96  // Leaves will be assigned unique sequence numbers when they are processed.
    97  // There is no strong ordering guarantee but in general entries will be processed
    98  // in order of submission to the log.
    99  type Sequencer struct {
   100  	hasher     hashers.LogHasher
   101  	timeSource util.TimeSource
   102  	logStorage storage.LogStorage
   103  	signer     *tcrypto.Signer
   104  	qm         quota.Manager
   105  }
   106  
   107  // maxTreeDepth sets an upper limit on the size of Log trees.
   108  // Note: We actually can't go beyond 2^63 entries because we use int64s,
   109  // but we need to calculate tree depths from a multiple of 8 due to the
   110  // subtree assumptions.
   111  const maxTreeDepth = 64
   112  
   113  // NewSequencer creates a new Sequencer instance for the specified inputs.
   114  func NewSequencer(
   115  	hasher hashers.LogHasher,
   116  	timeSource util.TimeSource,
   117  	logStorage storage.LogStorage,
   118  	signer *tcrypto.Signer,
   119  	mf monitoring.MetricFactory,
   120  	qm quota.Manager) *Sequencer {
   121  	once.Do(func() {
   122  		createMetrics(mf)
   123  	})
   124  	return &Sequencer{
   125  		hasher:     hasher,
   126  		timeSource: timeSource,
   127  		logStorage: logStorage,
   128  		signer:     signer,
   129  		qm:         qm,
   130  	}
   131  }
   132  
   133  // TODO: This currently doesn't use the batch api for fetching the required nodes. This
   134  // would be more efficient but requires refactoring.
   135  func (s Sequencer) buildMerkleTreeFromStorageAtRoot(ctx context.Context, root *types.LogRootV1, tx storage.TreeTX) (*merkle.CompactMerkleTree, error) {
   136  	mt, err := merkle.NewCompactMerkleTreeWithState(s.hasher, int64(root.TreeSize), func(depth int, index int64) ([]byte, error) {
   137  		nodeID, err := storage.NewNodeIDForTreeCoords(int64(depth), index, maxTreeDepth)
   138  		if err != nil {
   139  			glog.Warningf("%x: Failed to create nodeID: %v", s.signer.KeyHint, err)
   140  			return nil, err
   141  		}
   142  		nodes, err := tx.GetMerkleNodes(ctx, int64(root.Revision), []storage.NodeID{nodeID})
   143  
   144  		if err != nil {
   145  			glog.Warningf("%x: Failed to get Merkle nodes: %v", s.signer.KeyHint, err)
   146  			return nil, err
   147  		}
   148  
   149  		// We expect to get exactly one node here
   150  		if nodes == nil || len(nodes) != 1 {
   151  			return nil, fmt.Errorf("%x: Did not retrieve one node while loading CompactMerkleTree, got %#v for ID %v@%v", s.signer.KeyHint, nodes, nodeID.String(), root.Revision)
   152  		}
   153  
   154  		return nodes[0].Hash, nil
   155  	}, root.RootHash)
   156  
   157  	return mt, err
   158  }
   159  
   160  func (s Sequencer) buildNodesFromNodeMap(nodeMap map[string]storage.Node, newVersion int64) ([]storage.Node, error) {
   161  	targetNodes := make([]storage.Node, len(nodeMap))
   162  	i := 0
   163  	for _, node := range nodeMap {
   164  		node.NodeRevision = newVersion
   165  		targetNodes[i] = node
   166  		i++
   167  	}
   168  	return targetNodes, nil
   169  }
   170  
   171  func (s Sequencer) updateCompactTree(mt *merkle.CompactMerkleTree, leaves []*trillian.LogLeaf, label string) (map[string]storage.Node, error) {
   172  	nodeMap := make(map[string]storage.Node)
   173  	// Update the tree state by integrating the leaves one by one.
   174  	for _, leaf := range leaves {
   175  		seq, err := mt.AddLeafHash(leaf.MerkleLeafHash, func(depth int, index int64, hash []byte) error {
   176  			nodeID, err := storage.NewNodeIDForTreeCoords(int64(depth), index, maxTreeDepth)
   177  			if err != nil {
   178  				return err
   179  			}
   180  			nodeMap[nodeID.String()] = storage.Node{
   181  				NodeID: nodeID,
   182  				Hash:   hash,
   183  			}
   184  			return nil
   185  		})
   186  		if err != nil {
   187  			return nil, err
   188  		}
   189  		// The leaf should already have the correct index before it's integrated.
   190  		if leaf.LeafIndex != seq {
   191  			return nil, fmt.Errorf("got invalid leaf index: %v, want: %v", leaf.LeafIndex, seq)
   192  		}
   193  		integrateTS := s.timeSource.Now()
   194  		leaf.IntegrateTimestamp, err = ptypes.TimestampProto(integrateTS)
   195  		if err != nil {
   196  			return nil, fmt.Errorf("got invalid integrate timestamp: %v", err)
   197  		}
   198  
   199  		// Old leaves might not have a QueueTimestamp, only calculate the merge delay if this one does.
   200  		if leaf.QueueTimestamp != nil && leaf.QueueTimestamp.Seconds != 0 {
   201  			queueTS, err := ptypes.Timestamp(leaf.QueueTimestamp)
   202  			if err != nil {
   203  				return nil, fmt.Errorf("got invalid queue timestamp: %v", queueTS)
   204  			}
   205  			mergeDelay := integrateTS.Sub(queueTS)
   206  			seqMergeDelay.Observe(mergeDelay.Seconds(), label)
   207  		}
   208  
   209  		// Store leaf hash in the Merkle tree too:
   210  		leafNodeID, err := storage.NewNodeIDForTreeCoords(0, seq, maxTreeDepth)
   211  		if err != nil {
   212  			return nil, err
   213  		}
   214  		nodeMap[leafNodeID.String()] = storage.Node{
   215  			NodeID: leafNodeID,
   216  			Hash:   leaf.MerkleLeafHash,
   217  		}
   218  	}
   219  
   220  	return nodeMap, nil
   221  }
   222  
   223  func (s Sequencer) initMerkleTreeFromStorage(ctx context.Context, currentRoot *types.LogRootV1, tx storage.LogTreeTX) (*merkle.CompactMerkleTree, error) {
   224  	if currentRoot.TreeSize == 0 {
   225  		return merkle.NewCompactMerkleTree(s.hasher), nil
   226  	}
   227  
   228  	// Initialize the compact tree state to match the latest root in the database
   229  	return s.buildMerkleTreeFromStorageAtRoot(ctx, currentRoot, tx)
   230  }
   231  
   232  // sequencingTask provides sequenced LogLeaf entries, and updates storage
   233  // according to their ordering if needed.
   234  type sequencingTask interface {
   235  	// fetch returns a batch of sequenced entries obtained from storage, sized up
   236  	// to the specified limit. The returned leaves have consecutive LeafIndex
   237  	// values starting from the current tree size.
   238  	fetch(ctx context.Context, limit int, cutoff time.Time) ([]*trillian.LogLeaf, error)
   239  
   240  	// update makes sequencing persisted in storage, if not yet.
   241  	update(ctx context.Context, leaves []*trillian.LogLeaf) error
   242  }
   243  
   244  type sequencingTaskData struct {
   245  	label      string
   246  	treeSize   int64
   247  	timeSource util.TimeSource
   248  	tx         storage.LogTreeTX
   249  }
   250  
   251  // logSequencingTask is a sequencingTask implementation for "normal" Log mode,
   252  // which assigns consecutive sequence numbers to leaves as they are read from
   253  // the pending unsequenced entries.
   254  type logSequencingTask sequencingTaskData
   255  
   256  func (s *logSequencingTask) fetch(ctx context.Context, limit int, cutoff time.Time) ([]*trillian.LogLeaf, error) {
   257  	start := s.timeSource.Now()
   258  	// Recent leaves inside the guard window will not be available for sequencing.
   259  	leaves, err := s.tx.DequeueLeaves(ctx, limit, cutoff)
   260  	if err != nil {
   261  		glog.Warningf("%v: Sequencer failed to dequeue leaves: %v", s.label, err)
   262  		return nil, err
   263  	}
   264  	seqDequeueLatency.Observe(util.SecondsSince(s.timeSource, start), s.label)
   265  
   266  	// Assign leaf sequence numbers.
   267  	for i, leaf := range leaves {
   268  		leaf.LeafIndex = s.treeSize + int64(i)
   269  	}
   270  	return leaves, nil
   271  }
   272  
   273  func (s *logSequencingTask) update(ctx context.Context, leaves []*trillian.LogLeaf) error {
   274  	start := s.timeSource.Now()
   275  	// Write the new sequence numbers to the leaves in the DB.
   276  	if err := s.tx.UpdateSequencedLeaves(ctx, leaves); err != nil {
   277  		glog.Warningf("%v: Sequencer failed to update sequenced leaves: %v", s.label, err)
   278  		return err
   279  	}
   280  	seqUpdateLeavesLatency.Observe(util.SecondsSince(s.timeSource, start), s.label)
   281  	return nil
   282  }
   283  
   284  // preorderedLogSequencingTask is a sequencingTask implementation for
   285  // Pre-ordered Log mode. It reads sequenced entries past the tree size which
   286  // are already in the storage.
   287  type preorderedLogSequencingTask sequencingTaskData
   288  
   289  func (s *preorderedLogSequencingTask) fetch(ctx context.Context, limit int, cutoff time.Time) ([]*trillian.LogLeaf, error) {
   290  	// TODO(pavelkalinnikov): Measure latency.
   291  	leaves, err := s.tx.GetLeavesByRange(ctx, s.treeSize, int64(limit))
   292  	if err != nil {
   293  		glog.Warningf("%v: Sequencer failed to load sequenced leaves: %v", s.label, err)
   294  		return nil, err
   295  	}
   296  	return leaves, nil
   297  }
   298  
   299  func (s *preorderedLogSequencingTask) update(ctx context.Context, leaves []*trillian.LogLeaf) error {
   300  	// TODO(pavelkalinnikov): Update integration timestamps.
   301  	return nil
   302  }
   303  
   304  // IntegrateBatch wraps up all the operations needed to take a batch of queued
   305  // or sequenced leaves and integrate them into the tree.
   306  func (s Sequencer) IntegrateBatch(ctx context.Context, tree *trillian.Tree, limit int, guardWindow, maxRootDurationInterval time.Duration) (int, error) {
   307  	start := s.timeSource.Now()
   308  	label := strconv.FormatInt(tree.TreeId, 10)
   309  
   310  	numLeaves := 0
   311  	var newLogRoot *types.LogRootV1
   312  	var newSLR *trillian.SignedLogRoot
   313  	err := s.logStorage.ReadWriteTransaction(ctx, tree, func(ctx context.Context, tx storage.LogTreeTX) error {
   314  		stageStart := s.timeSource.Now()
   315  		defer seqBatches.Inc(label)
   316  		defer func() { seqLatency.Observe(util.SecondsSince(s.timeSource, start), label) }()
   317  
   318  		// Get the latest known root from storage
   319  		sth, err := tx.LatestSignedLogRoot(ctx)
   320  		if err != nil {
   321  			glog.Warningf("%v: Sequencer failed to get latest root: %v", tree.TreeId, err)
   322  			return err
   323  		}
   324  		// There is no trust boundary between the signer and the
   325  		// database, so we skip signature verification.
   326  		// TODO(gbelvin): Add signature checking as a santity check.
   327  		var currentRoot types.LogRootV1
   328  		if err := currentRoot.UnmarshalBinary(sth.LogRoot); err != nil {
   329  			glog.Warningf("%v: Sequencer failed to unmarshal latest root: %v", tree.TreeId, err)
   330  			return err
   331  		}
   332  		seqGetRootLatency.Observe(util.SecondsSince(s.timeSource, stageStart), label)
   333  
   334  		if currentRoot.RootHash == nil {
   335  			glog.Warningf("%v: Fresh log - no previous TreeHeads exist.", tree.TreeId)
   336  			return storage.ErrTreeNeedsInit
   337  		}
   338  
   339  		taskData := &sequencingTaskData{
   340  			label:      label,
   341  			treeSize:   int64(currentRoot.TreeSize),
   342  			timeSource: s.timeSource,
   343  			tx:         tx,
   344  		}
   345  		var st sequencingTask
   346  		switch tree.TreeType {
   347  		case trillian.TreeType_LOG:
   348  			st = (*logSequencingTask)(taskData)
   349  		case trillian.TreeType_PREORDERED_LOG:
   350  			st = (*preorderedLogSequencingTask)(taskData)
   351  		default:
   352  			return fmt.Errorf("IntegrateBatch not supported for TreeType %v", tree.TreeType)
   353  		}
   354  
   355  		sequencedLeaves, err := st.fetch(ctx, limit, start.Add(-guardWindow))
   356  		if err != nil {
   357  			glog.Warningf("%v: Sequencer failed to load sequenced batch: %v", tree.TreeId, err)
   358  			return err
   359  		}
   360  		numLeaves = len(sequencedLeaves)
   361  
   362  		// We need to create a signed root if entries were added or the latest root
   363  		// is too old.
   364  		if numLeaves == 0 {
   365  			nowNanos := s.timeSource.Now().UnixNano()
   366  			interval := time.Duration(nowNanos - int64(currentRoot.TimestampNanos))
   367  			if maxRootDurationInterval == 0 || interval < maxRootDurationInterval {
   368  				// We have nothing to integrate into the tree.
   369  				glog.V(1).Infof("%v: No leaves sequenced in this signing operation", tree.TreeId)
   370  				return nil
   371  			}
   372  			glog.Infof("%v: Force new root generation as %v since last root", tree.TreeId, interval)
   373  		}
   374  
   375  		stageStart = s.timeSource.Now()
   376  		merkleTree, err := s.initMerkleTreeFromStorage(ctx, &currentRoot, tx)
   377  		if err != nil {
   378  			return err
   379  		}
   380  		seqInitTreeLatency.Observe(util.SecondsSince(s.timeSource, stageStart), label)
   381  		stageStart = s.timeSource.Now()
   382  
   383  		// We've done all the reads, can now do the updates in the same transaction.
   384  		// The schema should prevent multiple STHs being inserted with the same
   385  		// revision number so it should not be possible for colliding updates to
   386  		// commit.
   387  		newVersion := tx.WriteRevision()
   388  		if got, want := newVersion, int64(currentRoot.Revision)+1; got != want {
   389  			return fmt.Errorf("%v: got writeRevision of %v, but expected %v", tree.TreeId, got, want)
   390  		}
   391  
   392  		// Collate node updates.
   393  		nodeMap, err := s.updateCompactTree(merkleTree, sequencedLeaves, label)
   394  		if err != nil {
   395  			return err
   396  		}
   397  		seqWriteTreeLatency.Observe(util.SecondsSince(s.timeSource, stageStart), label)
   398  
   399  		// Store the sequenced batch.
   400  		if err := st.update(ctx, sequencedLeaves); err != nil {
   401  			return err
   402  		}
   403  		stageStart = s.timeSource.Now()
   404  
   405  		// Build objects for the nodes to be updated. Because we deduped via the map
   406  		// each node can only be created / updated once in each tree revision and
   407  		// they cannot conflict when we do the storage update.
   408  		targetNodes, err := s.buildNodesFromNodeMap(nodeMap, newVersion)
   409  		if err != nil {
   410  			// Probably an internal error with map building, unexpected.
   411  			glog.Warningf("%v: Failed to build target nodes in sequencer: %v", tree.TreeId, err)
   412  			return err
   413  		}
   414  
   415  		// Now insert or update the nodes affected by the above, at the new tree
   416  		// version.
   417  		if err := tx.SetMerkleNodes(ctx, targetNodes); err != nil {
   418  			glog.Warningf("%v: Sequencer failed to set Merkle nodes: %v", tree.TreeId, err)
   419  			return err
   420  		}
   421  		seqSetNodesLatency.Observe(util.SecondsSince(s.timeSource, stageStart), label)
   422  		stageStart = s.timeSource.Now()
   423  
   424  		// Create the log root ready for signing
   425  		seqTreeSize.Set(float64(merkleTree.Size()), label)
   426  		newLogRoot = &types.LogRootV1{
   427  			RootHash:       merkleTree.CurrentRoot(),
   428  			TimestampNanos: uint64(s.timeSource.Now().UnixNano()),
   429  			TreeSize:       uint64(merkleTree.Size()),
   430  			Revision:       uint64(newVersion),
   431  		}
   432  
   433  		if newLogRoot.TimestampNanos <= currentRoot.TimestampNanos {
   434  			err := fmt.Errorf("refusing to sign root with timestamp earlier than previous root (%d <= %d)", newLogRoot.TimestampNanos, currentRoot.TimestampNanos)
   435  			glog.Warningf("%v: %s", tree.TreeId, err)
   436  			return err
   437  		}
   438  
   439  		newSLR, err = s.signer.SignLogRoot(newLogRoot)
   440  		if err != nil {
   441  			glog.Warningf("%v: signer failed to sign root: %v", tree.TreeId, err)
   442  			return err
   443  		}
   444  
   445  		if err := tx.StoreSignedLogRoot(ctx, *newSLR); err != nil {
   446  			glog.Warningf("%v: failed to write updated tree root: %v", tree.TreeId, err)
   447  			return err
   448  		}
   449  		seqStoreRootLatency.Observe(util.SecondsSince(s.timeSource, stageStart), label)
   450  		stageStart = s.timeSource.Now()
   451  
   452  		return nil
   453  	})
   454  	if err != nil {
   455  		return 0, err
   456  	}
   457  
   458  	// Let quota.Manager know about newly-sequenced entries.
   459  	// All possibly influenced quotas are replenished: {Tree/Global, Read/Write}.
   460  	// Implementations are tasked with filtering quotas that shouldn't be replenished.
   461  	// TODO(codingllama): Consider adding a source-aware replenish method
   462  	// (eg, qm.Replenish(ctx, tokens, specs, quota.SequencerSource)), so there's no ambiguity as to
   463  	// where the tokens come from.
   464  	if numLeaves > 0 {
   465  		tokens := int(float64(numLeaves) * quotaIncreaseFactor())
   466  		specs := []quota.Spec{
   467  			{Group: quota.Tree, Kind: quota.Read, TreeID: tree.TreeId},
   468  			{Group: quota.Tree, Kind: quota.Write, TreeID: tree.TreeId},
   469  			{Group: quota.Global, Kind: quota.Read},
   470  			{Group: quota.Global, Kind: quota.Write},
   471  		}
   472  		glog.V(2).Infof("%v: Replenishing %v tokens (numLeaves = %v)", tree.TreeId, tokens, numLeaves)
   473  		err := s.qm.PutTokens(ctx, tokens, specs)
   474  		if err != nil {
   475  			glog.Warningf("%v: Failed to replenish %v tokens: %v", tree.TreeId, tokens, err)
   476  		}
   477  		quota.Metrics.IncReplenished(tokens, specs, err == nil)
   478  	}
   479  
   480  	seqCounter.Add(float64(numLeaves), label)
   481  	if newSLR != nil {
   482  		glog.Infof("%v: sequenced %v leaves, size %v, tree-revision %v", tree.TreeId, numLeaves, newLogRoot.TreeSize, newLogRoot.Revision)
   483  	}
   484  	return numLeaves, nil
   485  }
   486  
   487  // SignRoot wraps up all the operations for creating a new log signed root.
   488  func (s Sequencer) SignRoot(ctx context.Context, tree *trillian.Tree) error {
   489  	return s.logStorage.ReadWriteTransaction(ctx, tree, func(ctx context.Context, tx storage.LogTreeTX) error {
   490  		// Get the latest known root from storage
   491  		sth, err := tx.LatestSignedLogRoot(ctx)
   492  		if err != nil {
   493  			glog.Warningf("%v: signer failed to get latest root: %v", tree.TreeId, err)
   494  			return err
   495  		}
   496  		var currentRoot types.LogRootV1
   497  		if err := currentRoot.UnmarshalBinary(sth.LogRoot); err != nil {
   498  			glog.Warningf("%v: Sequencer failed to unmarshal latest root: %v", tree.TreeId, err)
   499  			return err
   500  		}
   501  
   502  		// Initialize a Merkle Tree from the state in storage. This should fail if the tree is
   503  		// in a corrupt state.
   504  		merkleTree, err := s.initMerkleTreeFromStorage(ctx, &currentRoot, tx)
   505  		if err != nil {
   506  			return err
   507  		}
   508  		newLogRoot := &types.LogRootV1{
   509  			RootHash:       merkleTree.CurrentRoot(),
   510  			TimestampNanos: uint64(s.timeSource.Now().UnixNano()),
   511  			TreeSize:       uint64(merkleTree.Size()),
   512  			Revision:       currentRoot.Revision + 1,
   513  		}
   514  		newSLR, err := s.signer.SignLogRoot(newLogRoot)
   515  		if err != nil {
   516  			glog.Warningf("%v: signer failed to sign root: %v", tree.TreeId, err)
   517  			return err
   518  		}
   519  
   520  		// Store the new root and we're done
   521  		if err := tx.StoreSignedLogRoot(ctx, *newSLR); err != nil {
   522  			glog.Warningf("%v: signer failed to write updated root: %v", tree.TreeId, err)
   523  			return err
   524  		}
   525  		glog.V(2).Infof("%v: new signed root, size %v, tree-revision %v", tree.TreeId, newLogRoot.TreeSize, newLogRoot.Revision)
   526  
   527  		return nil
   528  	})
   529  }