github.com/mre-fog/trillianxx@v1.1.2-0.20180615153820-ae375a99d36a/docs/storage/commit_log/signer/signer.go (about) 1 // Copyright 2017 Google Inc. All Rights Reserved. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 // Package signer is a sample implementation of a commit-log based signer. 16 package signer 17 18 import ( 19 "flag" 20 "fmt" 21 "sync" 22 "time" 23 24 "github.com/golang/glog" 25 "github.com/google/trillian/docs/storage/commit_log/simelection" 26 "github.com/google/trillian/docs/storage/commit_log/simkafka" 27 ) 28 29 var batchSize = flag.Int("batch_size", 5, "Maximum leaves to sign in one run") 30 var pessimizeInterval = flag.Duration("signer_pessimize", 10*time.Millisecond, "Pause interval in signing to induce inter-signer problems") 31 32 // Signer is a simulated signer instance. 33 type Signer struct { 34 mu sync.RWMutex 35 Name string 36 election *simelection.Election 37 epoch int64 38 dbSTHInfo STHInfo 39 db FakeDatabase 40 } 41 42 // New creates a simulated signer that uses the provided election. 43 func New(name string, election *simelection.Election, epoch int64) *Signer { 44 return &Signer{ 45 Name: name, 46 election: election, 47 epoch: epoch, 48 dbSTHInfo: STHInfo{ 49 treeRevision: -1, 50 sthOffset: -1, 51 }, 52 } 53 } 54 55 func (s *Signer) String() string { 56 s.mu.RLock() 57 defer s.mu.RUnlock() 58 prefix := " " 59 if s.IsMaster() { 60 prefix = "**" 61 } 62 return fmt.Sprintf("%s Signer %s up to STH{offset=%d, rev=%d} = %s\n", prefix, s.Name, s.dbSTHInfo.sthOffset, s.dbSTHInfo.treeRevision, s.dbSTHInfo.sth.String()) 63 } 64 65 // LatestSTHInfo returns the most recent STHInfo known about by the signer 66 func (s *Signer) LatestSTHInfo() STHInfo { 67 return s.dbSTHInfo 68 } 69 70 // StoreSTHInfo updates the STHInfo known about by the signer. 71 func (s *Signer) StoreSTHInfo(info STHInfo) { 72 s.dbSTHInfo = info 73 } 74 75 // IsMaster indicates if this signer is master. 76 func (s *Signer) IsMaster() bool { 77 if s.election == nil { 78 return true 79 } 80 return s.election.IsMaster(s.Name) 81 } 82 83 // Run performs a single signing run. 84 func (s *Signer) Run() { 85 s.mu.Lock() 86 defer s.mu.Unlock() 87 // Read from local DB to see what STH we know about locally. 88 dbSTHInfo := s.LatestSTHInfo() 89 glog.V(2).Infof("%s: our DB has data upto STH at %d", s.Name, dbSTHInfo.sthOffset) 90 91 // Sanity check that the STH table has what we already know. 92 if dbSTHInfo.sth.TreeSize > 0 { 93 ourSTH := sthFromString(simkafka.Read("STHs/<treeID>", dbSTHInfo.sthOffset)) 94 if ourSTH == nil { 95 glog.Errorf("%s: local DB has data ahead of STHs topic!!", s.Name) 96 return 97 } 98 if ourSTH.Offset != dbSTHInfo.sthOffset { 99 glog.Errorf("%s: local DB recorded offset %d but that has inconsistent STH %s!!", s.Name, dbSTHInfo.sthOffset, ourSTH) 100 return 101 } 102 if ourSTH.TimeStamp != dbSTHInfo.sth.TimeStamp || ourSTH.TreeSize != dbSTHInfo.sth.TreeSize { 103 glog.Errorf("%s: local DB has different data than STHs topic!!", s.Name) 104 return 105 } 106 glog.V(2).Infof("%s: our DB at %v, matches STH at that offset", s.Name, dbSTHInfo.sthOffset) 107 } 108 109 // Look to see if anyone else has already stored data just ahead of our STH. This will 110 // normally be the next entry, but we need to ignore any entries that have inconsistent 111 // offsets. 112 nextOffset := dbSTHInfo.sthOffset 113 var nextSTH *STH 114 for { 115 nextOffset++ 116 nextSTH = sthFromString(simkafka.Read("STHs/<treeID>", nextOffset)) 117 if nextSTH == nil { 118 break 119 } 120 if nextSTH.Offset < nextOffset { 121 // Found an entry in the STHs topic that didn't get stored at the offset its writer 122 // expected it to be stored at, probably due to another master signer nipping in an 123 // entry ahead of it (due to a bug in mastership election). 124 // Kafka adjudicates the clash: whichever entry got the correct offset wins. 125 glog.V(2).Infof("%s: ignoring inconsistent STH %s at offset %d", s.Name, nextSTH.String(), nextOffset) 126 continue 127 } 128 if nextSTH.Offset > nextOffset { 129 glog.Errorf("%s: STH %s is stored at offset %d, earlier than its writer expected!!", s.Name, nextSTH.String(), nextOffset) 130 return 131 } 132 if nextSTH.TimeStamp < dbSTHInfo.sth.TimeStamp || nextSTH.TreeSize < dbSTHInfo.sth.TreeSize { 133 glog.Errorf("%s: next STH %s has earlier timestamp than in local DB (%s)!!", s.Name, nextSTH.String(), dbSTHInfo.sth.String()) 134 return 135 } 136 break 137 } 138 139 time.Sleep(*pessimizeInterval) 140 if nextSTH == nil { 141 // We're up-to-date with the STHs topic (as of a moment ago) ... 142 if !s.IsMaster() { 143 glog.V(2).Infof("%s: up-to-date with STHs but not master, so exit", s.Name) 144 return 145 } 146 // ... and we're the master. Move the STHs topic along to encompass any unincorporated leaves. 147 offset := dbSTHInfo.sth.TreeSize 148 batch := simkafka.ReadMultiple("Leaves/<treeID>", offset, *batchSize) 149 glog.V(2).Infof("%s: nothing at next offset %d and we are master, so have read %d more leaves", s.Name, nextOffset, len(batch)) 150 if len(batch) == 0 { 151 glog.V(2).Infof("%s: nothing to do", s.Name) 152 return 153 } 154 timestamp := (time.Now().UnixNano() / int64(time.Millisecond)) - s.epoch 155 newSTHInfo := STHInfo{ 156 sth: STH{ 157 TreeSize: s.db.Size() + len(batch), 158 TimeStamp: timestamp, 159 Offset: nextOffset, // The offset we expect this STH to end up at in STH topic 160 }, 161 treeRevision: dbSTHInfo.treeRevision + 1, 162 } 163 newSTHInfo.sthOffset = simkafka.Append("STHs/<treeID>", newSTHInfo.sth.String()) 164 if newSTHInfo.sthOffset > nextOffset { 165 // The STH didn't get stored at the offset we expected, presumably because someone else got there first 166 glog.Warningf("%s: stored new STH %s at offset %d, which is unexpected; give up", s.Name, newSTHInfo.sth.String(), newSTHInfo.sthOffset) 167 return 168 } 169 if newSTHInfo.sthOffset < nextOffset { 170 glog.Errorf("%s: stored new STH %s at offset %d, which is earlier than expected!!", s.Name, newSTHInfo.sth.String(), newSTHInfo.sthOffset) 171 return 172 } 173 glog.V(2).Infof("%s: stored new STH %s at expected offset, including %d new leaves", s.Name, newSTHInfo.sth.String(), len(batch)) 174 175 // Now the STH topic is updated (correctly), do our local DB 176 s.db.AddLeaves(timestamp, nextOffset, batch) 177 s.StoreSTHInfo(newSTHInfo) 178 } else { 179 // There is an STH one ahead of us that we're not caught up with yet. 180 // Read the leaves between what we have in our DB, and that STH... 181 count := nextSTH.TreeSize - dbSTHInfo.sth.TreeSize 182 glog.V(2).Infof("%s: our DB is %d leaves behind the next STH at %s, so update it", s.Name, count, nextSTH.String()) 183 batch := simkafka.ReadMultiple("Leaves/<treeID>", dbSTHInfo.sth.TreeSize, count) 184 if len(batch) != count { 185 glog.Errorf("%s: expected to read leaves [%d, %d) but only got %d!!", s.Name, dbSTHInfo.sth.TreeSize, dbSTHInfo.sth.TreeSize+count, len(batch)) 186 return 187 } 188 // ... and store it in our local DB 189 newSTHInfo := STHInfo{ 190 sth: s.db.AddLeaves(nextSTH.TimeStamp, nextOffset, batch), 191 treeRevision: dbSTHInfo.treeRevision + 1, 192 sthOffset: nextOffset, 193 } 194 glog.V(2).Infof("%s: update our DB to %s", s.Name, newSTHInfo.sth.String()) 195 s.StoreSTHInfo(newSTHInfo) 196 // We may still not be caught up, but that's for the next time around. 197 } 198 }