github.com/adoriasoft/tendermint@v0.34.0-dev1.0.20200722151356-96d84601a75a/statesync/reactor.go (about) 1 package statesync 2 3 import ( 4 "errors" 5 "sort" 6 7 abci "github.com/tendermint/tendermint/abci/types" 8 tmsync "github.com/tendermint/tendermint/libs/sync" 9 "github.com/tendermint/tendermint/p2p" 10 ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" 11 "github.com/tendermint/tendermint/proxy" 12 sm "github.com/tendermint/tendermint/state" 13 "github.com/tendermint/tendermint/types" 14 ) 15 16 const ( 17 // SnapshotChannel exchanges snapshot metadata 18 SnapshotChannel = byte(0x60) 19 // ChunkChannel exchanges chunk contents 20 ChunkChannel = byte(0x61) 21 // recentSnapshots is the number of recent snapshots to send and receive per peer. 22 recentSnapshots = 10 23 ) 24 25 // Reactor handles state sync, both restoring snapshots for the local node and serving snapshots 26 // for other nodes. 27 type Reactor struct { 28 p2p.BaseReactor 29 30 conn proxy.AppConnSnapshot 31 connQuery proxy.AppConnQuery 32 tempDir string 33 34 // This will only be set when a state sync is in progress. It is used to feed received 35 // snapshots and chunks into the sync. 36 mtx tmsync.RWMutex 37 syncer *syncer 38 } 39 40 // NewReactor creates a new state sync reactor. 41 func NewReactor(conn proxy.AppConnSnapshot, connQuery proxy.AppConnQuery, tempDir string) *Reactor { 42 r := &Reactor{ 43 conn: conn, 44 connQuery: connQuery, 45 } 46 r.BaseReactor = *p2p.NewBaseReactor("StateSync", r) 47 return r 48 } 49 50 // GetChannels implements p2p.Reactor. 51 func (r *Reactor) GetChannels() []*p2p.ChannelDescriptor { 52 return []*p2p.ChannelDescriptor{ 53 { 54 ID: SnapshotChannel, 55 Priority: 3, 56 SendQueueCapacity: 10, 57 RecvMessageCapacity: snapshotMsgSize, 58 }, 59 { 60 ID: ChunkChannel, 61 Priority: 1, 62 SendQueueCapacity: 4, 63 RecvMessageCapacity: chunkMsgSize, 64 }, 65 } 66 } 67 68 // OnStart implements p2p.Reactor. 69 func (r *Reactor) OnStart() error { 70 return nil 71 } 72 73 // AddPeer implements p2p.Reactor. 74 func (r *Reactor) AddPeer(peer p2p.Peer) { 75 r.mtx.RLock() 76 defer r.mtx.RUnlock() 77 if r.syncer != nil { 78 r.syncer.AddPeer(peer) 79 } 80 } 81 82 // RemovePeer implements p2p.Reactor. 83 func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { 84 r.mtx.RLock() 85 defer r.mtx.RUnlock() 86 if r.syncer != nil { 87 r.syncer.RemovePeer(peer) 88 } 89 } 90 91 // Receive implements p2p.Reactor. 92 func (r *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { 93 if !r.IsRunning() { 94 return 95 } 96 97 msg, err := decodeMsg(msgBytes) 98 if err != nil { 99 r.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) 100 r.Switch.StopPeerForError(src, err) 101 return 102 } 103 err = validateMsg(msg) 104 if err != nil { 105 r.Logger.Error("Invalid message", "peer", src, "msg", msg, "err", err) 106 r.Switch.StopPeerForError(src, err) 107 return 108 } 109 110 switch chID { 111 case SnapshotChannel: 112 switch msg := msg.(type) { 113 case *ssproto.SnapshotsRequest: 114 snapshots, err := r.recentSnapshots(recentSnapshots) 115 if err != nil { 116 r.Logger.Error("Failed to fetch snapshots", "err", err) 117 return 118 } 119 for _, snapshot := range snapshots { 120 r.Logger.Debug("Advertising snapshot", "height", snapshot.Height, 121 "format", snapshot.Format, "peer", src.ID()) 122 src.Send(chID, mustEncodeMsg(&ssproto.SnapshotsResponse{ 123 Height: snapshot.Height, 124 Format: snapshot.Format, 125 Chunks: snapshot.Chunks, 126 Hash: snapshot.Hash, 127 Metadata: snapshot.Metadata, 128 })) 129 } 130 131 case *ssproto.SnapshotsResponse: 132 r.mtx.RLock() 133 defer r.mtx.RUnlock() 134 if r.syncer == nil { 135 r.Logger.Debug("Received unexpected snapshot, no state sync in progress") 136 return 137 } 138 r.Logger.Debug("Received snapshot", "height", msg.Height, "format", msg.Format, "peer", src.ID()) 139 _, err := r.syncer.AddSnapshot(src, &snapshot{ 140 Height: msg.Height, 141 Format: msg.Format, 142 Chunks: msg.Chunks, 143 Hash: msg.Hash, 144 Metadata: msg.Metadata, 145 }) 146 if err != nil { 147 r.Logger.Error("Failed to add snapshot", "height", msg.Height, "format", msg.Format, 148 "peer", src.ID(), "err", err) 149 return 150 } 151 152 default: 153 r.Logger.Error("Received unknown message %T", msg) 154 } 155 156 case ChunkChannel: 157 switch msg := msg.(type) { 158 case *ssproto.ChunkRequest: 159 r.Logger.Debug("Received chunk request", "height", msg.Height, "format", msg.Format, 160 "chunk", msg.Index, "peer", src.ID()) 161 resp, err := r.conn.LoadSnapshotChunkSync(abci.RequestLoadSnapshotChunk{ 162 Height: msg.Height, 163 Format: msg.Format, 164 Chunk: msg.Index, 165 }) 166 if err != nil { 167 r.Logger.Error("Failed to load chunk", "height", msg.Height, "format", msg.Format, 168 "chunk", msg.Index, "err", err) 169 return 170 } 171 r.Logger.Debug("Sending chunk", "height", msg.Height, "format", msg.Format, 172 "chunk", msg.Index, "peer", src.ID()) 173 src.Send(ChunkChannel, mustEncodeMsg(&ssproto.ChunkResponse{ 174 Height: msg.Height, 175 Format: msg.Format, 176 Index: msg.Index, 177 Chunk: resp.Chunk, 178 Missing: resp.Chunk == nil, 179 })) 180 181 case *ssproto.ChunkResponse: 182 r.mtx.RLock() 183 defer r.mtx.RUnlock() 184 if r.syncer == nil { 185 r.Logger.Debug("Received unexpected chunk, no state sync in progress", "peer", src.ID()) 186 return 187 } 188 r.Logger.Debug("Received chunk, adding to sync", "height", msg.Height, "format", msg.Format, 189 "chunk", msg.Index, "peer", src.ID()) 190 _, err := r.syncer.AddChunk(&chunk{ 191 Height: msg.Height, 192 Format: msg.Format, 193 Index: msg.Index, 194 Chunk: msg.Chunk, 195 Sender: src.ID(), 196 }) 197 if err != nil { 198 r.Logger.Error("Failed to add chunk", "height", msg.Height, "format", msg.Format, 199 "chunk", msg.Index, "err", err) 200 return 201 } 202 203 default: 204 r.Logger.Error("Received unknown message %T", msg) 205 } 206 207 default: 208 r.Logger.Error("Received message on invalid channel %x", chID) 209 } 210 } 211 212 // recentSnapshots fetches the n most recent snapshots from the app 213 func (r *Reactor) recentSnapshots(n uint32) ([]*snapshot, error) { 214 resp, err := r.conn.ListSnapshotsSync(abci.RequestListSnapshots{}) 215 if err != nil { 216 return nil, err 217 } 218 sort.Slice(resp.Snapshots, func(i, j int) bool { 219 a := resp.Snapshots[i] 220 b := resp.Snapshots[j] 221 switch { 222 case a.Height > b.Height: 223 return true 224 case a.Height == b.Height && a.Format > b.Format: 225 return true 226 default: 227 return false 228 } 229 }) 230 snapshots := make([]*snapshot, 0, n) 231 for i, s := range resp.Snapshots { 232 if i >= recentSnapshots { 233 break 234 } 235 snapshots = append(snapshots, &snapshot{ 236 Height: s.Height, 237 Format: s.Format, 238 Chunks: s.Chunks, 239 Hash: s.Hash, 240 Metadata: s.Metadata, 241 }) 242 } 243 return snapshots, nil 244 } 245 246 // Sync runs a state sync, returning the new state and last commit at the snapshot height. 247 // The caller must store the state and commit in the state database and block store. 248 func (r *Reactor) Sync(stateProvider StateProvider) (sm.State, *types.Commit, error) { 249 r.mtx.Lock() 250 if r.syncer != nil { 251 r.mtx.Unlock() 252 return sm.State{}, nil, errors.New("a state sync is already in progress") 253 } 254 r.syncer = newSyncer(r.Logger, r.conn, r.connQuery, stateProvider, r.tempDir) 255 r.mtx.Unlock() 256 257 state, commit, err := r.syncer.SyncAny(defaultDiscoveryTime) 258 r.mtx.Lock() 259 r.syncer = nil 260 r.mtx.Unlock() 261 return state, commit, err 262 }