vitess.io/vitess@v0.16.2/go/vt/vttablet/tabletmanager/vreplication/vplayer.go (about)

     1  /*
     2  Copyright 2019 The Vitess Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package vreplication
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"io"
    23  	"math"
    24  	"strconv"
    25  	"strings"
    26  	"time"
    27  
    28  	"context"
    29  
    30  	"vitess.io/vitess/go/mysql"
    31  	"vitess.io/vitess/go/sqltypes"
    32  
    33  	"vitess.io/vitess/go/vt/binlog/binlogplayer"
    34  	"vitess.io/vitess/go/vt/log"
    35  
    36  	binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
    37  )
    38  
    39  // vplayer replays binlog events by pulling them from a vstreamer.
    40  type vplayer struct {
    41  	vr        *vreplicator
    42  	startPos  mysql.Position
    43  	stopPos   mysql.Position
    44  	saveStop  bool
    45  	copyState map[string]*sqltypes.Result
    46  
    47  	replicatorPlan *ReplicatorPlan
    48  	tablePlans     map[string]*TablePlan
    49  
    50  	pos mysql.Position
    51  	// unsavedEvent is set any time we skip an event without
    52  	// saving, which is on an empty commit.
    53  	// If nothing else happens for idleTimeout since timeLastSaved,
    54  	// the position of the unsavedEvent gets saved.
    55  	unsavedEvent *binlogdatapb.VEvent
    56  	// timeLastSaved is set every time a GTID is saved.
    57  	timeLastSaved time.Time
    58  	// lastTimestampNs is the last timestamp seen so far.
    59  	lastTimestampNs int64
    60  	// timeOffsetNs keeps track of the clock difference with respect to source tablet.
    61  	timeOffsetNs int64
    62  	// numAccumulatedHeartbeats keeps track of how many heartbeats have been received since we updated the time_updated column of _vt.vreplication
    63  	numAccumulatedHeartbeats int
    64  
    65  	// canAcceptStmtEvents is set to true if the current player can accept events in statement mode. Only true for filters that are match all.
    66  	canAcceptStmtEvents bool
    67  
    68  	phase string
    69  
    70  	throttlerAppName string
    71  }
    72  
    73  // newVPlayer creates a new vplayer. Parameters:
    74  // vreplicator: the outer replicator. It's used for common functions like setState.
    75  //
    76  //	Also used to access the engine for registering journal events.
    77  //
    78  // settings: current settings read from _vt.vreplication.
    79  // copyState: if set, contains the list of tables yet to be copied, or in the process
    80  //
    81  //	of being copied. If copyState is non-nil, the plans generated make sure that
    82  //	replication is only applied to parts that have been copied so far.
    83  //
    84  // pausePos: if set, replication will stop at that position without updating the state to "Stopped".
    85  //
    86  //	This is used by the fastForward function during copying.
    87  func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map[string]*sqltypes.Result, pausePos mysql.Position, phase string) *vplayer {
    88  	saveStop := true
    89  	if !pausePos.IsZero() {
    90  		settings.StopPos = pausePos
    91  		saveStop = false
    92  	}
    93  	return &vplayer{
    94  		vr:               vr,
    95  		startPos:         settings.StartPos,
    96  		pos:              settings.StartPos,
    97  		stopPos:          settings.StopPos,
    98  		saveStop:         saveStop,
    99  		copyState:        copyState,
   100  		timeLastSaved:    time.Now(),
   101  		tablePlans:       make(map[string]*TablePlan),
   102  		phase:            phase,
   103  		throttlerAppName: vr.throttlerAppName(),
   104  	}
   105  }
   106  
   107  // play is the entry point for playing binlogs.
   108  func (vp *vplayer) play(ctx context.Context) error {
   109  	if !vp.stopPos.IsZero() && vp.startPos.AtLeast(vp.stopPos) {
   110  		log.Infof("Stop position %v already reached: %v", vp.startPos, vp.stopPos)
   111  		if vp.saveStop {
   112  			return vp.vr.setState(binlogplayer.BlpStopped, fmt.Sprintf("Stop position %v already reached: %v", vp.startPos, vp.stopPos))
   113  		}
   114  		return nil
   115  	}
   116  
   117  	plan, err := buildReplicatorPlan(vp.vr.source, vp.vr.colInfoMap, vp.copyState, vp.vr.stats)
   118  	if err != nil {
   119  		vp.vr.stats.ErrorCounts.Add([]string{"Plan"}, 1)
   120  		return err
   121  	}
   122  	vp.replicatorPlan = plan
   123  
   124  	// We can't run in statement mode if there are filters defined.
   125  	vp.canAcceptStmtEvents = true
   126  	for _, rule := range vp.vr.source.Filter.Rules {
   127  		if rule.Filter != "" || rule.Match != "/.*" {
   128  			vp.canAcceptStmtEvents = false
   129  			break
   130  		}
   131  	}
   132  
   133  	return vp.fetchAndApply(ctx)
   134  }
   135  
   136  // fetchAndApply performs the fetching and application of the binlogs.
   137  // This is done by two different threads. The fetcher thread pulls
   138  // events from the vstreamer and adds them to the relayLog.
   139  // The applyEvents thread pulls accumulated events from the relayLog
   140  // to apply them to mysql. The reason for this separation is because
   141  // commits are slow during apply. So, more events can accumulate in
   142  // the relay log during a commit. In such situations, the next iteration
   143  // of apply combines all the transactions in the relay log into a single
   144  // one. This allows for the apply thread to catch up more quickly if
   145  // a backlog builds up.
   146  func (vp *vplayer) fetchAndApply(ctx context.Context) (err error) {
   147  	log.Infof("Starting VReplication player id: %v, startPos: %v, stop: %v, filter: %v", vp.vr.id, vp.startPos, vp.stopPos, vp.vr.source)
   148  
   149  	ctx, cancel := context.WithCancel(ctx)
   150  	defer cancel()
   151  
   152  	relay := newRelayLog(ctx, relayLogMaxItems, relayLogMaxSize)
   153  
   154  	streamErr := make(chan error, 1)
   155  	go func() {
   156  		streamErr <- vp.vr.sourceVStreamer.VStream(ctx, mysql.EncodePosition(vp.startPos), nil, vp.replicatorPlan.VStreamFilter, func(events []*binlogdatapb.VEvent) error {
   157  			return relay.Send(events)
   158  		})
   159  	}()
   160  
   161  	applyErr := make(chan error, 1)
   162  	go func() {
   163  		applyErr <- vp.applyEvents(ctx, relay)
   164  	}()
   165  
   166  	select {
   167  	case err := <-applyErr:
   168  		defer func() {
   169  			// cancel and wait for the other thread to finish.
   170  			cancel()
   171  			<-streamErr
   172  		}()
   173  
   174  		// If the apply thread ends with io.EOF, it means either the Engine
   175  		// is shutting down and canceled the context, or stop position was reached,
   176  		// or a journal event was encountered.
   177  		// If so, we return nil which will cause the controller to not retry.
   178  		if err == io.EOF {
   179  			return nil
   180  		}
   181  		return err
   182  	case err := <-streamErr:
   183  		defer func() {
   184  			// cancel and wait for the other thread to finish.
   185  			cancel()
   186  			<-applyErr
   187  		}()
   188  		// If context is done, don't return an error.
   189  		select {
   190  		case <-ctx.Done():
   191  			return nil
   192  		default:
   193  		}
   194  		// If the stream ends normally we have to return an error indicating
   195  		// that the controller has to retry a different vttablet.
   196  		if err == nil || err == io.EOF {
   197  			return errors.New("vstream ended")
   198  		}
   199  		return err
   200  	}
   201  }
   202  
   203  // applyStmtEvent applies an actual DML statement received from the source, directly onto the backend database
   204  func (vp *vplayer) applyStmtEvent(ctx context.Context, event *binlogdatapb.VEvent) error {
   205  	sql := event.Statement
   206  	if sql == "" {
   207  		sql = event.Dml
   208  	}
   209  	if event.Type == binlogdatapb.VEventType_SAVEPOINT || vp.canAcceptStmtEvents {
   210  		start := time.Now()
   211  		_, err := vp.vr.dbClient.ExecuteWithRetry(ctx, sql)
   212  		vp.vr.stats.QueryTimings.Record(vp.phase, start)
   213  		vp.vr.stats.QueryCount.Add(vp.phase, 1)
   214  		return err
   215  	}
   216  	return fmt.Errorf("filter rules are not supported for SBR replication: %v", vp.vr.source.Filter.GetRules())
   217  }
   218  
   219  func (vp *vplayer) applyRowEvent(ctx context.Context, rowEvent *binlogdatapb.RowEvent) error {
   220  	tplan := vp.tablePlans[rowEvent.TableName]
   221  	if tplan == nil {
   222  		return fmt.Errorf("unexpected event on table %s", rowEvent.TableName)
   223  	}
   224  	for _, change := range rowEvent.RowChanges {
   225  		_, err := tplan.applyChange(change, func(sql string) (*sqltypes.Result, error) {
   226  			stats := NewVrLogStats("ROWCHANGE")
   227  			start := time.Now()
   228  			qr, err := vp.vr.dbClient.ExecuteWithRetry(ctx, sql)
   229  			vp.vr.stats.QueryCount.Add(vp.phase, 1)
   230  			vp.vr.stats.QueryTimings.Record(vp.phase, start)
   231  			stats.Send(sql)
   232  			return qr, err
   233  		})
   234  		if err != nil {
   235  			return err
   236  		}
   237  	}
   238  	return nil
   239  }
   240  
   241  func (vp *vplayer) updatePos(ts int64) (posReached bool, err error) {
   242  	vp.numAccumulatedHeartbeats = 0
   243  	update := binlogplayer.GenerateUpdatePos(vp.vr.id, vp.pos, time.Now().Unix(), ts, vp.vr.stats.CopyRowCount.Get(), vreplicationStoreCompressedGTID)
   244  	if _, err := vp.vr.dbClient.Execute(update); err != nil {
   245  		return false, fmt.Errorf("error %v updating position", err)
   246  	}
   247  	vp.unsavedEvent = nil
   248  	vp.timeLastSaved = time.Now()
   249  	vp.vr.stats.SetLastPosition(vp.pos)
   250  	posReached = !vp.stopPos.IsZero() && vp.pos.AtLeast(vp.stopPos)
   251  	if posReached {
   252  		log.Infof("Stopped at position: %v", vp.stopPos)
   253  		if vp.saveStop {
   254  			if err := vp.vr.setState(binlogplayer.BlpStopped, fmt.Sprintf("Stopped at position %v", vp.stopPos)); err != nil {
   255  				return false, err
   256  			}
   257  		}
   258  	}
   259  	return posReached, nil
   260  }
   261  
   262  func (vp *vplayer) mustUpdateHeartbeat() bool {
   263  	return vp.numAccumulatedHeartbeats >= vreplicationHeartbeatUpdateInterval ||
   264  		vp.numAccumulatedHeartbeats >= vreplicationMinimumHeartbeatUpdateInterval
   265  }
   266  
   267  func (vp *vplayer) recordHeartbeat() error {
   268  	tm := time.Now().Unix()
   269  	vp.vr.stats.RecordHeartbeat(tm)
   270  	if !vp.mustUpdateHeartbeat() {
   271  		return nil
   272  	}
   273  	vp.numAccumulatedHeartbeats = 0
   274  	return vp.vr.updateHeartbeatTime(tm)
   275  }
   276  
   277  // applyEvents is the main thread that applies the events. It has the following use
   278  // cases to take into account:
   279  //   - Normal transaction that has row mutations. In this case, the transaction
   280  //     is committed along with an update of the position.
   281  //   - DDL event: the action depends on the OnDDL setting.
   282  //   - OTHER event: the current position of the event is saved.
   283  //   - JOURNAL event: if the event is relevant to the current stream, invoke registerJournal
   284  //     of the engine, and terminate.
   285  //   - HEARTBEAT: update ReplicationLagSeconds.
   286  //   - Empty transaction: The event is remembered as an unsavedEvent. If no commits
   287  //     happen for idleTimeout since timeLastSaved, the current position of the unsavedEvent
   288  //     is committed (updatePos).
   289  //   - An empty transaction: Empty transactions are necessary because the current
   290  //     position of that transaction may be the stop position. If so, we have to record it.
   291  //     If not significant, we should avoid saving these empty transactions individually
   292  //     because they can cause unnecessary churn and binlog bloat. We should
   293  //     also not go for too long without saving because we should not fall way behind
   294  //     on the current replication position. Additionally, WaitForPos or other external
   295  //     agents could be waiting on that specific position by watching the vreplication
   296  //     record.
   297  //   - A group of transactions: Combine them into a single transaction.
   298  //   - Partial transaction: Replay the events received so far and refetch from relay log
   299  //     for more.
   300  //   - A combination of any of the above: The trickier case is the one where a group
   301  //     of transactions come in, with the last one being partial. In this case, all transactions
   302  //     up to the last one have to be committed, and the final one must be partially applied.
   303  //
   304  // Of the above events, the saveable ones are COMMIT, DDL, and OTHER. Eventhough
   305  // A GTID comes as a separate event, it's not saveable until a subsequent saveable
   306  // event occurs. VStreamer currently sequences the GTID to be sent just before
   307  // a saveable event, but we do not rely on this. To handle this, we only remember
   308  // the position when a GTID is encountered. The next saveable event causes the
   309  // current position to be saved.
   310  //
   311  // In order to handle the above use cases, we use an implicit transaction scheme:
   312  // A BEGIN does not really start a transaction. Ony a ROW event does. With this
   313  // approach, no transaction gets started if an empty one arrives. If a we receive
   314  // a commit, and a we are not in a transaction, we infer that the transaction was
   315  // empty, and remember it as an unsaved event. The next GTID event will reset the
   316  // unsaved event. If the next commit is also an empty transaction, then the latest
   317  // one gets remembered as unsaved. A non-empty transaction, a must-save event,
   318  // or a timeout will eventually cause the next save.
   319  // The timeout (1s) plays another significant role: If the source and target shards of
   320  // the replication are the same, then a commit of an unsaved event will generate
   321  // another empty event. This is an infinite loop, and the timeout prevents
   322  // this from becoming a tight loop.
   323  // TODO(sougou): we can look at recognizing self-generated events and find a better
   324  // way to handle them.
   325  func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog) error {
   326  	defer vp.vr.dbClient.Rollback()
   327  
   328  	// If we're not running, set ReplicationLagSeconds to be very high.
   329  	// TODO(sougou): if we also stored the time of the last event, we
   330  	// can estimate this value more accurately.
   331  	defer vp.vr.stats.ReplicationLagSeconds.Set(math.MaxInt64)
   332  	defer vp.vr.stats.VReplicationLags.Add(strconv.Itoa(int(vp.vr.id)), math.MaxInt64)
   333  	var sbm int64 = -1
   334  	for {
   335  		if ctx.Err() != nil {
   336  			return ctx.Err()
   337  		}
   338  		// check throttler.
   339  		if !vp.vr.vre.throttlerClient.ThrottleCheckOKOrWaitAppName(ctx, vp.throttlerAppName) {
   340  			_ = vp.vr.updateTimeThrottled(VPlayerComponentName)
   341  			continue
   342  		}
   343  
   344  		items, err := relay.Fetch()
   345  		if err != nil {
   346  			return err
   347  		}
   348  		// No events were received. This likely means that there's a network partition.
   349  		// So, we should assume we're falling behind.
   350  		if len(items) == 0 {
   351  			behind := time.Now().UnixNano() - vp.lastTimestampNs - vp.timeOffsetNs
   352  			vp.vr.stats.ReplicationLagSeconds.Set(behind / 1e9)
   353  			vp.vr.stats.VReplicationLags.Add(strconv.Itoa(int(vp.vr.id)), time.Duration(behind/1e9)*time.Second)
   354  		}
   355  		// Empty transactions are saved at most once every idleTimeout.
   356  		// This covers two situations:
   357  		// 1. Fetch was idle for idleTimeout.
   358  		// 2. We've been receiving empty events for longer than idleTimeout.
   359  		// In both cases, now > timeLastSaved. If so, the GTID of the last unsavedEvent
   360  		// must be saved.
   361  		if time.Since(vp.timeLastSaved) >= idleTimeout && vp.unsavedEvent != nil {
   362  			posReached, err := vp.updatePos(vp.unsavedEvent.Timestamp)
   363  			if err != nil {
   364  				return err
   365  			}
   366  			if posReached {
   367  				// Unreachable.
   368  				return nil
   369  			}
   370  		}
   371  		for i, events := range items {
   372  			for j, event := range events {
   373  				if event.Timestamp != 0 {
   374  					vp.lastTimestampNs = event.Timestamp * 1e9
   375  					vp.timeOffsetNs = time.Now().UnixNano() - event.CurrentTime
   376  					sbm = event.CurrentTime/1e9 - event.Timestamp
   377  				}
   378  				mustSave := false
   379  				switch event.Type {
   380  				case binlogdatapb.VEventType_COMMIT:
   381  					// If we've reached the stop position, we must save the current commit
   382  					// even if it's empty. So, the next applyEvent is invoked with the
   383  					// mustSave flag.
   384  					if !vp.stopPos.IsZero() && vp.pos.AtLeast(vp.stopPos) {
   385  						mustSave = true
   386  						break
   387  					}
   388  					// In order to group multiple commits into a single one, we look ahead for
   389  					// the next commit. If there is one, we skip the current commit, which ends up
   390  					// applying the next set of events as part of the current transaction. This approach
   391  					// also handles the case where the last transaction is partial. In that case,
   392  					// we only group the transactions with commits we've seen so far.
   393  					if hasAnotherCommit(items, i, j+1) {
   394  						continue
   395  					}
   396  				}
   397  				if err := vp.applyEvent(ctx, event, mustSave); err != nil {
   398  					if err != io.EOF {
   399  						vp.vr.stats.ErrorCounts.Add([]string{"Apply"}, 1)
   400  						log.Errorf("Error applying event: %s", err.Error())
   401  					}
   402  					return err
   403  				}
   404  			}
   405  		}
   406  
   407  		if sbm >= 0 {
   408  			vp.vr.stats.ReplicationLagSeconds.Set(sbm)
   409  			vp.vr.stats.VReplicationLags.Add(strconv.Itoa(int(vp.vr.id)), time.Duration(sbm)*time.Second)
   410  		}
   411  
   412  	}
   413  }
   414  
   415  func hasAnotherCommit(items [][]*binlogdatapb.VEvent, i, j int) bool {
   416  	for i < len(items) {
   417  		for j < len(items[i]) {
   418  			// We skip GTID, BEGIN, FIELD, ROW and DMLs.
   419  			switch items[i][j].Type {
   420  			case binlogdatapb.VEventType_COMMIT:
   421  				return true
   422  			case binlogdatapb.VEventType_DDL, binlogdatapb.VEventType_OTHER, binlogdatapb.VEventType_JOURNAL:
   423  				return false
   424  			}
   425  			j++
   426  		}
   427  		j = 0
   428  		i++
   429  	}
   430  	return false
   431  }
   432  
   433  func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, mustSave bool) error {
   434  	stats := NewVrLogStats(event.Type.String())
   435  	switch event.Type {
   436  	case binlogdatapb.VEventType_GTID:
   437  		pos, err := binlogplayer.DecodePosition(event.Gtid)
   438  		if err != nil {
   439  			return err
   440  		}
   441  		vp.pos = pos
   442  		// A new position should not be saved until a saveable event occurs.
   443  		vp.unsavedEvent = nil
   444  		if vp.stopPos.IsZero() {
   445  			return nil
   446  		}
   447  	case binlogdatapb.VEventType_BEGIN:
   448  		// No-op: begin is called as needed.
   449  	case binlogdatapb.VEventType_COMMIT:
   450  		if mustSave {
   451  			if err := vp.vr.dbClient.Begin(); err != nil {
   452  				return err
   453  			}
   454  		}
   455  
   456  		if !vp.vr.dbClient.InTransaction {
   457  			// We're skipping an empty transaction. We may have to save the position on inactivity.
   458  			vp.unsavedEvent = event
   459  			return nil
   460  		}
   461  		posReached, err := vp.updatePos(event.Timestamp)
   462  		if err != nil {
   463  			return err
   464  		}
   465  		if err := vp.vr.dbClient.Commit(); err != nil {
   466  			return err
   467  		}
   468  		if posReached {
   469  			return io.EOF
   470  		}
   471  	case binlogdatapb.VEventType_FIELD:
   472  		if err := vp.vr.dbClient.Begin(); err != nil {
   473  			return err
   474  		}
   475  		tplan, err := vp.replicatorPlan.buildExecutionPlan(event.FieldEvent)
   476  		if err != nil {
   477  			return err
   478  		}
   479  		vp.tablePlans[event.FieldEvent.TableName] = tplan
   480  		stats.Send(fmt.Sprintf("%v", event.FieldEvent))
   481  
   482  	case binlogdatapb.VEventType_INSERT, binlogdatapb.VEventType_DELETE, binlogdatapb.VEventType_UPDATE,
   483  		binlogdatapb.VEventType_REPLACE, binlogdatapb.VEventType_SAVEPOINT:
   484  		// use event.Statement if available, preparing for deprecation in 8.0
   485  		sql := event.Statement
   486  		if sql == "" {
   487  			sql = event.Dml
   488  		}
   489  		// If the event is for one of the AWS RDS "special" or pt-table-checksum tables, we skip
   490  		if !strings.Contains(sql, " mysql.rds_") && !strings.Contains(sql, " percona.checksums") {
   491  			// This is a player using statement based replication
   492  			if err := vp.vr.dbClient.Begin(); err != nil {
   493  				return err
   494  			}
   495  			if err := vp.applyStmtEvent(ctx, event); err != nil {
   496  				return err
   497  			}
   498  			stats.Send(sql)
   499  		}
   500  	case binlogdatapb.VEventType_ROW:
   501  		// This player is configured for row based replication
   502  		if err := vp.vr.dbClient.Begin(); err != nil {
   503  			return err
   504  		}
   505  		if err := vp.applyRowEvent(ctx, event.RowEvent); err != nil {
   506  			return err
   507  		}
   508  		//Row event is logged AFTER RowChanges are applied so as to calculate the total elapsed time for the Row event
   509  		stats.Send(fmt.Sprintf("%v", event.RowEvent))
   510  	case binlogdatapb.VEventType_OTHER:
   511  		if vp.vr.dbClient.InTransaction {
   512  			// Unreachable
   513  			log.Errorf("internal error: vplayer is in a transaction on event: %v", event)
   514  			return fmt.Errorf("internal error: vplayer is in a transaction on event: %v", event)
   515  		}
   516  		// Just update the position.
   517  		posReached, err := vp.updatePos(event.Timestamp)
   518  		if err != nil {
   519  			return err
   520  		}
   521  		if posReached {
   522  			return io.EOF
   523  		}
   524  	case binlogdatapb.VEventType_DDL:
   525  		if vp.vr.dbClient.InTransaction {
   526  			// Unreachable
   527  			log.Errorf("internal error: vplayer is in a transaction on event: %v", event)
   528  			return fmt.Errorf("internal error: vplayer is in a transaction on event: %v", event)
   529  		}
   530  		switch vp.vr.source.OnDdl {
   531  		case binlogdatapb.OnDDLAction_IGNORE:
   532  			// We still have to update the position.
   533  			posReached, err := vp.updatePos(event.Timestamp)
   534  			if err != nil {
   535  				return err
   536  			}
   537  			if posReached {
   538  				return io.EOF
   539  			}
   540  		case binlogdatapb.OnDDLAction_STOP:
   541  			if err := vp.vr.dbClient.Begin(); err != nil {
   542  				return err
   543  			}
   544  			if _, err := vp.updatePos(event.Timestamp); err != nil {
   545  				return err
   546  			}
   547  			if err := vp.vr.setState(binlogplayer.BlpStopped, fmt.Sprintf("Stopped at DDL %s", event.Statement)); err != nil {
   548  				return err
   549  			}
   550  			if err := vp.vr.dbClient.Commit(); err != nil {
   551  				return err
   552  			}
   553  			return io.EOF
   554  		case binlogdatapb.OnDDLAction_EXEC:
   555  			// It's impossible to save the position transactionally with the statement.
   556  			// So, we apply the DDL first, and then save the position.
   557  			// Manual intervention may be needed if there is a partial
   558  			// failure here.
   559  			if _, err := vp.vr.dbClient.ExecuteWithRetry(ctx, event.Statement); err != nil {
   560  				return err
   561  			}
   562  			stats.Send(fmt.Sprintf("%v", event.Statement))
   563  			posReached, err := vp.updatePos(event.Timestamp)
   564  			if err != nil {
   565  				return err
   566  			}
   567  			if posReached {
   568  				return io.EOF
   569  			}
   570  		case binlogdatapb.OnDDLAction_EXEC_IGNORE:
   571  			if _, err := vp.vr.dbClient.ExecuteWithRetry(ctx, event.Statement); err != nil {
   572  				log.Infof("Ignoring error: %v for DDL: %s", err, event.Statement)
   573  			}
   574  			stats.Send(fmt.Sprintf("%v", event.Statement))
   575  			posReached, err := vp.updatePos(event.Timestamp)
   576  			if err != nil {
   577  				return err
   578  			}
   579  			if posReached {
   580  				return io.EOF
   581  			}
   582  		}
   583  	case binlogdatapb.VEventType_JOURNAL:
   584  		if vp.vr.dbClient.InTransaction {
   585  			// Unreachable
   586  			log.Errorf("internal error: vplayer is in a transaction on event: %v", event)
   587  			return fmt.Errorf("internal error: vplayer is in a transaction on event: %v", event)
   588  		}
   589  		// Ensure that we don't have a partial set of table matches in the journal.
   590  		switch event.Journal.MigrationType {
   591  		case binlogdatapb.MigrationType_SHARDS:
   592  			// All tables of the source were migrated. So, no validation needed.
   593  		case binlogdatapb.MigrationType_TABLES:
   594  			// Validate that all or none of the tables are in the journal.
   595  			jtables := make(map[string]bool)
   596  			for _, table := range event.Journal.Tables {
   597  				jtables[table] = true
   598  			}
   599  			found := false
   600  			notFound := false
   601  			for tableName := range vp.replicatorPlan.TablePlans {
   602  				if _, ok := jtables[tableName]; ok {
   603  					found = true
   604  				} else {
   605  					notFound = true
   606  				}
   607  			}
   608  			switch {
   609  			case found && notFound:
   610  				// Some were found and some were not found. We can't handle this.
   611  				if err := vp.vr.setState(binlogplayer.BlpStopped, "unable to handle journal event: tables were partially matched"); err != nil {
   612  					return err
   613  				}
   614  				return io.EOF
   615  			case notFound:
   616  				// None were found. Ignore journal.
   617  				return nil
   618  			}
   619  			// All were found. We must register journal.
   620  		}
   621  		log.Infof("Binlog event registering journal event %+v", event.Journal)
   622  		if err := vp.vr.vre.registerJournal(event.Journal, int(vp.vr.id)); err != nil {
   623  			if err := vp.vr.setState(binlogplayer.BlpStopped, err.Error()); err != nil {
   624  				return err
   625  			}
   626  			return io.EOF
   627  		}
   628  		stats.Send(fmt.Sprintf("%v", event.Journal))
   629  		return io.EOF
   630  	case binlogdatapb.VEventType_HEARTBEAT:
   631  		if event.Throttled {
   632  			if err := vp.vr.updateTimeThrottled(VStreamerComponentName); err != nil {
   633  				return err
   634  			}
   635  		}
   636  		if !vp.vr.dbClient.InTransaction {
   637  			vp.numAccumulatedHeartbeats++
   638  			if err := vp.recordHeartbeat(); err != nil {
   639  				return err
   640  			}
   641  		}
   642  	}
   643  
   644  	return nil
   645  }