github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/pkg/p2p/server_ack_manager.go (about)

     1  // Copyright 2021 PingCAP, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package p2p
    15  
    16  import "sync"
    17  
    18  // Background:
    19  //
    20  // Peer-messages are divided into topics to facilitate registering handlers and
    21  // to avoid interference between different types of messages.
    22  //
    23  // For a given (sender, topic) pair, messages are handled in order. In the case
    24  // of a retry after failure, we need to know the latest progress so that the client
    25  // can retry from that message. This is what we need Acks for.
    26  
    27  // ackManager is used to track the progress of Acks.
    28  // It is thread-safe to use.
    29  type ackManager struct {
    30  	// peers is a map from senderID to peerAckList.
    31  	peers sync.Map
    32  }
    33  
    34  const (
    35  	// initAck is the initial value of an Ack.
    36  	// It is a placeholder for unknown progress.
    37  	initAck = Seq(0)
    38  )
    39  
    40  type peerAckList struct {
    41  	mu   sync.RWMutex
    42  	acks map[Topic]Seq
    43  }
    44  
    45  // newAckManager returns a new ackManager
    46  func newAckManager() *ackManager {
    47  	return &ackManager{}
    48  }
    49  
    50  // Get returns the latest ACK for a given topic sent from a given node.
    51  func (m *ackManager) Get(senderID NodeID, topic Topic) Seq {
    52  	rawAcks, ok := m.peers.Load(senderID)
    53  	if !ok {
    54  		return initAck
    55  	}
    56  
    57  	ackList := rawAcks.(*peerAckList)
    58  	ackList.mu.RLock()
    59  	defer ackList.mu.RUnlock()
    60  
    61  	return ackList.acks[topic]
    62  }
    63  
    64  // Set sets the latest ACK for a given topic sent from a given node.
    65  func (m *ackManager) Set(senderID NodeID, topic Topic, newSeq Seq) {
    66  	rawAcks, ok := m.peers.Load(senderID)
    67  	if !ok {
    68  		newAcks := &peerAckList{
    69  			acks: make(map[Topic]Seq),
    70  		}
    71  		// LoadOrStore will load the existing value if another thread
    72  		// has just inserted the value for our key.
    73  		rawAcks, _ = m.peers.LoadOrStore(senderID, newAcks)
    74  	}
    75  
    76  	ackList := rawAcks.(*peerAckList)
    77  	ackList.mu.Lock()
    78  	defer ackList.mu.Unlock()
    79  	ackList.acks[topic] = newSeq
    80  }
    81  
    82  // Range iterates through all the topics from a given node.
    83  // The iteration terminates if fn returns false.
    84  func (m *ackManager) Range(senderID NodeID, fn func(topic Topic, seq Seq) bool) {
    85  	rawAcks, ok := m.peers.Load(senderID)
    86  	if !ok {
    87  		return
    88  	}
    89  
    90  	ackList := rawAcks.(*peerAckList)
    91  	ackList.mu.RLock()
    92  	defer ackList.mu.RUnlock()
    93  
    94  	for topic, seq := range ackList.acks {
    95  		if !fn(topic, seq) {
    96  			return
    97  		}
    98  	}
    99  }
   100  
   101  // RemoveTopic removes a topic for all nodes.
   102  // We do not support removing a topic from a specific node.
   103  func (m *ackManager) RemoveTopic(topic Topic) {
   104  	m.peers.Range(func(key, value interface{}) bool {
   105  		ackList := value.(*peerAckList)
   106  		ackList.mu.Lock()
   107  		defer ackList.mu.Unlock()
   108  		delete(ackList.acks, topic)
   109  		return true
   110  	})
   111  }
   112  
   113  // RemoveNode removes all records of ACKS for a given node.
   114  func (m *ackManager) RemoveNode(senderID NodeID) {
   115  	m.peers.Delete(senderID)
   116  }