github.com/Finschia/ostracon@v1.1.5/statesync/reactor.go (about)

     1  package statesync
     2  
     3  import (
     4  	"errors"
     5  	"sort"
     6  	"time"
     7  
     8  	"github.com/gogo/protobuf/proto"
     9  
    10  	abci "github.com/tendermint/tendermint/abci/types"
    11  	ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"
    12  
    13  	"github.com/Finschia/ostracon/config"
    14  	tmsync "github.com/Finschia/ostracon/libs/sync"
    15  	"github.com/Finschia/ostracon/p2p"
    16  	"github.com/Finschia/ostracon/proxy"
    17  	sm "github.com/Finschia/ostracon/state"
    18  	"github.com/Finschia/ostracon/types"
    19  )
    20  
    21  const (
    22  	// SnapshotChannel exchanges snapshot metadata
    23  	SnapshotChannel = byte(0x60)
    24  	// ChunkChannel exchanges chunk contents
    25  	ChunkChannel = byte(0x61)
    26  	// recentSnapshots is the number of recent snapshots to send and receive per peer.
    27  	recentSnapshots = 10
    28  )
    29  
    30  // Reactor handles state sync, both restoring snapshots for the local node and serving snapshots
    31  // for other nodes.
    32  type Reactor struct {
    33  	p2p.BaseReactor
    34  
    35  	cfg       config.StateSyncConfig
    36  	conn      proxy.AppConnSnapshot
    37  	connQuery proxy.AppConnQuery
    38  	tempDir   string
    39  
    40  	// This will only be set when a state sync is in progress. It is used to feed received
    41  	// snapshots and chunks into the sync.
    42  	mtx    tmsync.RWMutex
    43  	syncer *syncer
    44  }
    45  
    46  // NewReactor creates a new state sync reactor.
    47  func NewReactor(
    48  	cfg config.StateSyncConfig,
    49  	conn proxy.AppConnSnapshot,
    50  	connQuery proxy.AppConnQuery,
    51  	async bool,
    52  	recvBufSize int,
    53  ) *Reactor {
    54  
    55  	r := &Reactor{
    56  		cfg:       cfg,
    57  		conn:      conn,
    58  		connQuery: connQuery,
    59  	}
    60  	r.BaseReactor = *p2p.NewBaseReactor("StateSync", r, async, recvBufSize)
    61  
    62  	return r
    63  }
    64  
    65  // GetChannels implements p2p.Reactor.
    66  func (r *Reactor) GetChannels() []*p2p.ChannelDescriptor {
    67  	return []*p2p.ChannelDescriptor{
    68  		{
    69  			ID:                  SnapshotChannel,
    70  			Priority:            5,
    71  			SendQueueCapacity:   10,
    72  			RecvMessageCapacity: snapshotMsgSize,
    73  			MessageType:         &ssproto.Message{},
    74  		},
    75  		{
    76  			ID:                  ChunkChannel,
    77  			Priority:            3,
    78  			SendQueueCapacity:   10,
    79  			RecvMessageCapacity: chunkMsgSize,
    80  			MessageType:         &ssproto.Message{},
    81  		},
    82  	}
    83  }
    84  
    85  // OnStart implements p2p.Reactor.
    86  func (r *Reactor) OnStart() error {
    87  	// call BaseReactor's OnStart()
    88  	err := r.BaseReactor.OnStart()
    89  	if err != nil {
    90  		return err
    91  	}
    92  	return nil
    93  }
    94  
    95  // AddPeer implements p2p.Reactor.
    96  func (r *Reactor) AddPeer(peer p2p.Peer) {
    97  	r.mtx.RLock()
    98  	defer r.mtx.RUnlock()
    99  	if r.syncer != nil {
   100  		r.syncer.AddPeer(peer)
   101  	}
   102  }
   103  
   104  // RemovePeer implements p2p.Reactor.
   105  func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
   106  	r.mtx.RLock()
   107  	defer r.mtx.RUnlock()
   108  	if r.syncer != nil {
   109  		r.syncer.RemovePeer(peer)
   110  	}
   111  }
   112  
   113  // Receive implements p2p.Reactor.
   114  func (r *Reactor) ReceiveEnvelope(e p2p.Envelope) {
   115  	if !r.IsRunning() {
   116  		return
   117  	}
   118  
   119  	err := validateMsg(e.Message)
   120  	if err != nil {
   121  		r.Logger.Error("Invalid message", "peer", e.Src, "msg", e.Message, "err", err)
   122  		r.Switch.StopPeerForError(e.Src, err)
   123  		return
   124  	}
   125  
   126  	switch e.ChannelID {
   127  	case SnapshotChannel:
   128  		switch msg := e.Message.(type) {
   129  		case *ssproto.SnapshotsRequest:
   130  			snapshots, err := r.recentSnapshots(recentSnapshots)
   131  			if err != nil {
   132  				r.Logger.Error("Failed to fetch snapshots", "err", err)
   133  				return
   134  			}
   135  			for _, snapshot := range snapshots {
   136  				r.Logger.Debug("Advertising snapshot", "height", snapshot.Height,
   137  					"format", snapshot.Format, "peer", e.Src.ID())
   138  				p2p.SendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck
   139  					ChannelID: e.ChannelID,
   140  					Message: &ssproto.SnapshotsResponse{
   141  						Height:   snapshot.Height,
   142  						Format:   snapshot.Format,
   143  						Chunks:   snapshot.Chunks,
   144  						Hash:     snapshot.Hash,
   145  						Metadata: snapshot.Metadata,
   146  					},
   147  				}, r.Logger)
   148  			}
   149  
   150  		case *ssproto.SnapshotsResponse:
   151  			r.mtx.RLock()
   152  			defer r.mtx.RUnlock()
   153  			if r.syncer == nil {
   154  				r.Logger.Debug("Received unexpected snapshot, no state sync in progress")
   155  				return
   156  			}
   157  			r.Logger.Debug("Received snapshot", "height", msg.Height, "format", msg.Format, "peer", e.Src.ID())
   158  			_, err := r.syncer.AddSnapshot(e.Src, &snapshot{
   159  				Height:   msg.Height,
   160  				Format:   msg.Format,
   161  				Chunks:   msg.Chunks,
   162  				Hash:     msg.Hash,
   163  				Metadata: msg.Metadata,
   164  			})
   165  			// TODO: We may want to consider punishing the peer for certain errors
   166  			if err != nil {
   167  				r.Logger.Error("Failed to add snapshot", "height", msg.Height, "format", msg.Format,
   168  					"peer", e.Src.ID(), "err", err)
   169  				return
   170  			}
   171  
   172  		default:
   173  			r.Logger.Error("Received unknown message %T", msg)
   174  		}
   175  
   176  	case ChunkChannel:
   177  		switch msg := e.Message.(type) {
   178  		case *ssproto.ChunkRequest:
   179  			r.Logger.Debug("Received chunk request", "height", msg.Height, "format", msg.Format,
   180  				"chunk", msg.Index, "peer", e.Src.ID())
   181  			resp, err := r.conn.LoadSnapshotChunkSync(abci.RequestLoadSnapshotChunk{
   182  				Height: msg.Height,
   183  				Format: msg.Format,
   184  				Chunk:  msg.Index,
   185  			})
   186  			if err != nil {
   187  				r.Logger.Error("Failed to load chunk", "height", msg.Height, "format", msg.Format,
   188  					"chunk", msg.Index, "err", err)
   189  				return
   190  			}
   191  			r.Logger.Debug("Sending chunk", "height", msg.Height, "format", msg.Format,
   192  				"chunk", msg.Index, "peer", e.Src.ID())
   193  			p2p.SendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck
   194  				ChannelID: ChunkChannel,
   195  				Message: &ssproto.ChunkResponse{
   196  					Height:  msg.Height,
   197  					Format:  msg.Format,
   198  					Index:   msg.Index,
   199  					Chunk:   resp.Chunk,
   200  					Missing: resp.Chunk == nil,
   201  				},
   202  			}, r.Logger)
   203  
   204  		case *ssproto.ChunkResponse:
   205  			r.mtx.RLock()
   206  			defer r.mtx.RUnlock()
   207  			if r.syncer == nil {
   208  				r.Logger.Debug("Received unexpected chunk, no state sync in progress", "peer", e.Src.ID())
   209  				return
   210  			}
   211  			r.Logger.Debug("Received chunk, adding to sync", "height", msg.Height, "format", msg.Format,
   212  				"chunk", msg.Index, "peer", e.Src.ID())
   213  			_, err := r.syncer.AddChunk(&chunk{
   214  				Height: msg.Height,
   215  				Format: msg.Format,
   216  				Index:  msg.Index,
   217  				Chunk:  msg.Chunk,
   218  				Sender: e.Src.ID(),
   219  			})
   220  			if err != nil {
   221  				r.Logger.Error("Failed to add chunk", "height", msg.Height, "format", msg.Format,
   222  					"chunk", msg.Index, "err", err)
   223  				return
   224  			}
   225  
   226  		default:
   227  			r.Logger.Error("Received unknown message %T", msg)
   228  		}
   229  
   230  	default:
   231  		r.Logger.Error("Received message on invalid channel %x", e.ChannelID)
   232  	}
   233  }
   234  
   235  func (r *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
   236  	msg := &ssproto.Message{}
   237  	err := proto.Unmarshal(msgBytes, msg)
   238  	if err != nil {
   239  		panic(err)
   240  	}
   241  	um, err := msg.Unwrap()
   242  	if err != nil {
   243  		panic(err)
   244  	}
   245  
   246  	r.ReceiveEnvelope(p2p.Envelope{
   247  		ChannelID: chID,
   248  		Src:       peer,
   249  		Message:   um,
   250  	})
   251  }
   252  
   253  // recentSnapshots fetches the n most recent snapshots from the app
   254  func (r *Reactor) recentSnapshots(n uint32) ([]*snapshot, error) {
   255  	resp, err := r.conn.ListSnapshotsSync(abci.RequestListSnapshots{})
   256  	if err != nil {
   257  		return nil, err
   258  	}
   259  	sort.Slice(resp.Snapshots, func(i, j int) bool {
   260  		a := resp.Snapshots[i]
   261  		b := resp.Snapshots[j]
   262  		switch {
   263  		case a.Height > b.Height:
   264  			return true
   265  		case a.Height == b.Height && a.Format > b.Format:
   266  			return true
   267  		default:
   268  			return false
   269  		}
   270  	})
   271  	snapshots := make([]*snapshot, 0, n)
   272  	for i, s := range resp.Snapshots {
   273  		if i >= recentSnapshots {
   274  			break
   275  		}
   276  		snapshots = append(snapshots, &snapshot{
   277  			Height:   s.Height,
   278  			Format:   s.Format,
   279  			Chunks:   s.Chunks,
   280  			Hash:     s.Hash,
   281  			Metadata: s.Metadata,
   282  		})
   283  	}
   284  	return snapshots, nil
   285  }
   286  
   287  // Sync runs a state sync, returning the new state, previous state and last commit at the snapshot height.
   288  // The caller must store the state and commit in the state database and block store.
   289  func (r *Reactor) Sync(
   290  	stateProvider StateProvider, discoveryTime time.Duration) (sm.State, sm.State, *types.Commit, error) {
   291  	r.mtx.Lock()
   292  	if r.syncer != nil {
   293  		r.mtx.Unlock()
   294  		return sm.State{}, sm.State{}, nil, errors.New("a state sync is already in progress")
   295  	}
   296  	r.syncer = newSyncer(r.cfg, r.Logger, r.conn, r.connQuery, stateProvider, r.tempDir)
   297  	r.mtx.Unlock()
   298  
   299  	hook := func() {
   300  		r.Logger.Debug("Requesting snapshots from known peers")
   301  		// Request snapshots from all currently connected peers
   302  
   303  		r.Switch.BroadcastEnvelope(p2p.Envelope{
   304  			ChannelID: SnapshotChannel,
   305  			Message:   &ssproto.SnapshotsRequest{},
   306  		})
   307  	}
   308  
   309  	hook()
   310  
   311  	state, previousState, commit, err := r.syncer.SyncAny(discoveryTime, hook)
   312  
   313  	r.mtx.Lock()
   314  	r.syncer = nil
   315  	r.mtx.Unlock()
   316  	return state, previousState, commit, err
   317  }