github.com/mysteriumnetwork/node@v0.0.0-20240516044423-365054f76801/p2p/channel.go (about)

     1  /*
     2   * Copyright (C) 2020 The "MysteriumNetwork/node" Authors.
     3   *
     4   * This program is free software: you can redistribute it and/or modify
     5   * it under the terms of the GNU General Public License as published by
     6   * the Free Software Foundation, either version 3 of the License, or
     7   * (at your option) any later version.
     8   *
     9   * This program is distributed in the hope that it will be useful,
    10   * but WITHOUT ANY WARRANTY; without even the implied warranty of
    11   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    12   * GNU General Public License for more details.
    13   *
    14   * You should have received a copy of the GNU General Public License
    15   * along with this program.  If not, see <http://www.gnu.org/licenses/>.
    16   */
    17  
    18  package p2p
    19  
    20  import (
    21  	"context"
    22  	"errors"
    23  	"fmt"
    24  	"net"
    25  	"os"
    26  	"strings"
    27  	"sync"
    28  	"time"
    29  
    30  	"github.com/rs/zerolog/log"
    31  	kcp "github.com/xtaci/kcp-go/v5"
    32  	"golang.org/x/crypto/nacl/box"
    33  
    34  	"github.com/mysteriumnetwork/node/identity"
    35  	"github.com/mysteriumnetwork/node/router"
    36  	"github.com/mysteriumnetwork/node/trace"
    37  )
    38  
    39  var (
    40  	// If this env variable is set channel will log raw messages from send and receive loops.
    41  	debugTransport = os.Getenv("P2P_DEBUG_TRANSPORT") == "1"
    42  
    43  	// ErrSendTimeout indicates send timeout error.
    44  	ErrSendTimeout = errors.New("p2p send timeout")
    45  
    46  	// ErrHandlerNotFound indicates that peer is not registered handler yet.
    47  	ErrHandlerNotFound = errors.New("p2p peer handler not found")
    48  )
    49  
    50  const (
    51  	kcpMTUSize            = 1280
    52  	mtuLimit              = 1500
    53  	initialTrafficTimeout = 30 * time.Second
    54  )
    55  
    56  // ChannelSender is used to send messages.
    57  type ChannelSender interface {
    58  	// Send sends message to given topic. Peer listening to topic will receive message.
    59  	Send(ctx context.Context, topic string, msg *Message) (*Message, error)
    60  }
    61  
    62  // ChannelHandler is used to handle messages.
    63  type ChannelHandler interface {
    64  	// Handle registers handler for given topic which handles peer request.
    65  	Handle(topic string, handler HandlerFunc)
    66  }
    67  
    68  // Channel represents p2p communication channel which can send and receive messages over encrypted and reliable UDP transport.
    69  type Channel interface {
    70  	ChannelSender
    71  	ChannelHandler
    72  
    73  	// Tracer returns tracer which tracks channel establishment
    74  	Tracer() *trace.Tracer
    75  
    76  	// ServiceConn returns UDP connection which can be used for services.
    77  	ServiceConn() *net.UDPConn
    78  
    79  	// Conn returns underlying channel's UDP connection.
    80  	Conn() *net.UDPConn
    81  
    82  	// Close closes p2p communication channel.
    83  	Close() error
    84  
    85  	// Unique ID
    86  	ID() string
    87  }
    88  
    89  // HandlerFunc is channel request handler func signature.
    90  type HandlerFunc func(c Context) error
    91  
    92  // stream is used to associate request and reply messages.
    93  type stream struct {
    94  	id    uint64
    95  	resCh chan *transportMsg
    96  }
    97  
    98  type peer struct {
    99  	sync.RWMutex
   100  	publicKey  PublicKey
   101  	remoteAddr *net.UDPAddr
   102  }
   103  
   104  func (p *peer) addr() *net.UDPAddr {
   105  	p.RLock()
   106  	defer p.RUnlock()
   107  
   108  	return p.remoteAddr
   109  }
   110  
   111  func (p *peer) updateAddr(addr *net.UDPAddr) {
   112  	p.Lock()
   113  	defer p.Unlock()
   114  
   115  	p.remoteAddr = addr
   116  }
   117  
   118  // transport wraps network primitives for sending and receiving packets.
   119  type transport struct {
   120  	// wireReader is used to read p2p message envelopes.
   121  	wireReader wireReader
   122  
   123  	// wireWriter is used to marshal messages to underlying p2p protocol.
   124  	wireWriter wireWriter
   125  
   126  	// session is KCP session which wraps UDP connection and adds reliability and ordered messages support.
   127  	session *kcp.UDPSession
   128  
   129  	// remoteConn is initial conn which should be created from NAT hole punching or manually. It contains
   130  	// initial local and remote peer addresses.
   131  	remoteConn *net.UDPConn
   132  	localConn  *net.UDPConn
   133  
   134  	// proxyConn is used for KCP session as a remote. Since KCP doesn't expose it's data read loop
   135  	// this is needed to detect remote peer address changes as we can simply use conn.ReadFromUDP and
   136  	// get updated peer address.
   137  	proxyConn *net.UDPConn
   138  }
   139  
   140  // channel implements Channel interface.
   141  type channel struct {
   142  	mu   sync.RWMutex
   143  	once sync.Once
   144  
   145  	// tr is transport containing network related connections for p2p to work.
   146  	tr *transport
   147  
   148  	tracer *trace.Tracer
   149  
   150  	// serviceConn is separate connection which is created outside of p2p channel when
   151  	// performing initial NAT hole punching or manual conn. It is here just because it's more easy
   152  	// to pass it to services as p2p channel will be available anyway.
   153  	serviceConn *net.UDPConn
   154  
   155  	// peer identity authenticated by its signature in initial exchange
   156  	peerID identity.Identity
   157  
   158  	// topicHandlers is similar to HTTP Server handlers and is responsible for handling peer requests.
   159  	topicHandlers map[string]HandlerFunc
   160  
   161  	// streams is temp map to create request/response pipelines. Each stream is created on send and contains
   162  	// channel to which receive loop should eventually send peer reply.
   163  	streams      map[uint64]*stream
   164  	nextStreamID uint64
   165  
   166  	// privateKey is channel's private key. For now it's here just to be able to recreate the same channel for unit tests.
   167  	privateKey PrivateKey
   168  
   169  	// peer is remote peer holding it's public key and address.
   170  	peer *peer
   171  
   172  	// localSessionAddr is KCP UDP conn address to which packets are written from remote conn.
   173  	localSessionAddr *net.UDPAddr
   174  
   175  	// sendQueue is a queue to which channel puts messages for sending. Message is not send directly to remote peer
   176  	// but to proxy conn which is when responsible for sending to remote
   177  	sendQueue chan *transportMsg
   178  
   179  	// upnpPortsRelease should be called to close mapped upnp ports when channel is closed.
   180  	upnpPortsRelease func()
   181  
   182  	// stop is used to stop all running goroutines.
   183  	stop chan struct{}
   184  }
   185  
   186  // newChannel creates new p2p channel with initialized crypto primitives for data encryption
   187  // and starts listening for connections.
   188  func newChannel(remoteConn *net.UDPConn, privateKey PrivateKey, peerPubKey PublicKey, peerCompatibility int) (*channel, error) {
   189  	peerAddr := remoteConn.RemoteAddr().(*net.UDPAddr)
   190  	localAddr := remoteConn.LocalAddr().(*net.UDPAddr)
   191  	remoteConn, err := reopenConn(remoteConn)
   192  	if err != nil {
   193  		return nil, fmt.Errorf("could not reopen remote conn: %w", err)
   194  	}
   195  
   196  	// Create local proxy UDP conn which will receive packets from local KCP UDP conn.
   197  	proxyConn, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.ParseIP("127.0.0.1")})
   198  	if err != nil {
   199  		return nil, fmt.Errorf("could not create proxy conn: %w", err)
   200  	}
   201  
   202  	if err := router.ProtectUDPConn(proxyConn); err != nil {
   203  		return nil, fmt.Errorf("failed to protect udp proxy connection: %w", err)
   204  	}
   205  
   206  	// Setup KCP session. It will write to proxy conn only.
   207  	udpSession, localConn, err := listenUDPSession(proxyConn.LocalAddr(), privateKey, peerPubKey)
   208  	if err != nil {
   209  		return nil, fmt.Errorf("could not create KCP UDP session: %w", err)
   210  	}
   211  
   212  	log.Debug().Msgf("Creating p2p channel with local addr: %s, UDP session addr: %s, proxy addr: %s, remote peer addr: x.x.x.x:%d", localAddr.String(), udpSession.LocalAddr().String(), proxyConn.LocalAddr().String(), peerAddr.Port)
   213  
   214  	tr := transport{
   215  		wireReader: newCompatibleWireReader(udpSession, peerCompatibility),
   216  		wireWriter: newCompatibleWireWriter(udpSession, peerCompatibility),
   217  		session:    udpSession,
   218  		remoteConn: remoteConn,
   219  		localConn:  localConn,
   220  		proxyConn:  proxyConn,
   221  	}
   222  
   223  	peer := peer{
   224  		publicKey:  peerPubKey,
   225  		remoteAddr: peerAddr,
   226  	}
   227  
   228  	c := channel{
   229  		tr:               &tr,
   230  		topicHandlers:    make(map[string]HandlerFunc),
   231  		streams:          make(map[uint64]*stream),
   232  		privateKey:       privateKey,
   233  		peer:             &peer,
   234  		localSessionAddr: localConn.LocalAddr().(*net.UDPAddr),
   235  		serviceConn:      nil,
   236  		stop:             make(chan struct{}, 1),
   237  		sendQueue:        make(chan *transportMsg, 100),
   238  	}
   239  
   240  	return &c, nil
   241  }
   242  
   243  func (c *channel) ID() string {
   244  	return fmt.Sprintf("%p", c)
   245  }
   246  
   247  func (c *channel) launchReadSendLoops() {
   248  	c.mu.Lock()
   249  	defer c.mu.Unlock()
   250  
   251  	go c.remoteReadLoop(c.tr)
   252  	go c.remoteSendLoop(c.tr)
   253  	go c.localReadLoop(c.tr)
   254  	go c.localSendLoop(c.tr)
   255  }
   256  
   257  // remoteReadLoop reads from remote conn and writes to local KCP UDP conn.
   258  // If remote peer addr changes it will be updated and next send will use new addr.
   259  func (c *channel) remoteReadLoop(tr *transport) {
   260  	buf := make([]byte, mtuLimit)
   261  	latestPeerAddr := c.peer.addr()
   262  
   263  	for {
   264  		select {
   265  		case <-c.stop:
   266  			return
   267  		default:
   268  		}
   269  
   270  		n, addr, err := tr.remoteConn.ReadFrom(buf)
   271  		if err != nil {
   272  			if !errNetClose(err) {
   273  				log.Error().Err(err).Msg("Read from remote conn failed")
   274  			}
   275  
   276  			return
   277  		}
   278  
   279  		// Check if peer port changed.
   280  		if addr, ok := addr.(*net.UDPAddr); ok {
   281  			if addr.IP.Equal(latestPeerAddr.IP) && addr.Port != latestPeerAddr.Port {
   282  				log.Debug().Msgf("Peer port changed from %v to %v", latestPeerAddr, addr)
   283  				c.peer.updateAddr(addr)
   284  				latestPeerAddr = addr
   285  			}
   286  		}
   287  
   288  		_, err = tr.proxyConn.WriteToUDP(buf[:n], c.localSessionAddr)
   289  		if err != nil {
   290  			if !errNetClose(err) {
   291  				log.Error().Err(err).Msg("Write to local udp session failed")
   292  			}
   293  
   294  			return
   295  		}
   296  	}
   297  }
   298  
   299  // remoteSendLoop reads from proxy conn and writes to remote conn.
   300  // Packets to proxy conn are written by local KCP UDP session from localSendLoop.
   301  func (c *channel) remoteSendLoop(tr *transport) {
   302  	buf := make([]byte, mtuLimit)
   303  
   304  	for {
   305  		select {
   306  		case <-c.stop:
   307  			return
   308  		default:
   309  		}
   310  
   311  		n, err := tr.proxyConn.Read(buf)
   312  		if err != nil {
   313  			if !errNetClose(err) {
   314  				log.Error().Err(err).Msg("Read from proxy conn failed")
   315  			}
   316  
   317  			return
   318  		}
   319  
   320  		_, err = tr.remoteConn.WriteToUDP(buf[:n], c.peer.addr())
   321  		if err != nil {
   322  			if !errNetClose(err) {
   323  				log.Error().Err(err).Msgf("Write to remote peer conn failed")
   324  			}
   325  
   326  			return
   327  		}
   328  	}
   329  }
   330  
   331  // localReadLoop reads incoming requests or replies to initiated requests.
   332  func (c *channel) localReadLoop(tr *transport) {
   333  	for {
   334  		select {
   335  		case <-c.stop:
   336  			return
   337  		default:
   338  		}
   339  
   340  		var msg transportMsg
   341  		if err := msg.readFrom(tr.wireReader); err != nil {
   342  			if !errPipeClosed(err) && !errNetClose(err) {
   343  				log.Err(err).Msg("Read from wireproto reader failed")
   344  			}
   345  			return
   346  		}
   347  
   348  		if debugTransport {
   349  			fmt.Printf("recv from %s: %+v\n", tr.session.RemoteAddr(), msg)
   350  		}
   351  
   352  		// If message contains topic it means that peer is making a request
   353  		// and waits for response.
   354  		if msg.topic != "" {
   355  			go c.handleRequest(&msg)
   356  		} else {
   357  			// In other case we treat it as a reply for peer to our request.
   358  			go c.handleReply(&msg)
   359  		}
   360  	}
   361  }
   362  
   363  // localSendLoop sends data to local proxy conn.
   364  func (c *channel) localSendLoop(tr *transport) {
   365  	for {
   366  		select {
   367  		case <-c.stop:
   368  			return
   369  		case msg, more := <-c.sendQueue:
   370  			if !more {
   371  				return
   372  			}
   373  
   374  			if debugTransport {
   375  				fmt.Printf("send to %s: %+v\n", tr.session.RemoteAddr(), msg)
   376  			}
   377  
   378  			if err := msg.writeTo(tr.wireWriter); err != nil {
   379  				if !errPipeClosed(err) && !errNetClose(err) {
   380  					log.Err(err).Msg("Write to wireproto writer failed")
   381  				}
   382  				return
   383  			}
   384  		}
   385  	}
   386  }
   387  
   388  // handleReply forwards reply message to associated stream result channel.
   389  func (c *channel) handleReply(msg *transportMsg) {
   390  	c.mu.RLock()
   391  	defer c.mu.RUnlock()
   392  	if s, ok := c.streams[msg.id]; ok {
   393  		s.resCh <- msg
   394  	} else {
   395  		log.Warn().Msgf("Stream %d not found, message data: %s", msg.id, string(msg.data))
   396  	}
   397  }
   398  
   399  // handleRequest handles incoming request and schedules reply to send queue.
   400  func (c *channel) handleRequest(msg *transportMsg) {
   401  	c.mu.RLock()
   402  	handler, ok := c.topicHandlers[msg.topic]
   403  	c.mu.RUnlock()
   404  
   405  	var resMsg transportMsg
   406  	resMsg.id = msg.id
   407  
   408  	if !ok {
   409  		resMsg.statusCode = statusCodeHandlerNotFoundErr
   410  		errMsg := fmt.Sprintf("handler %q not found", msg.topic)
   411  		log.Err(errors.New(errMsg))
   412  		resMsg.data = []byte(errMsg)
   413  		c.sendQueue <- &resMsg
   414  		return
   415  	}
   416  
   417  	ctx := defaultContext{
   418  		req: &Message{
   419  			Data: msg.data,
   420  		},
   421  		peerID: c.peerID,
   422  	}
   423  	err := handler(&ctx)
   424  	if err != nil {
   425  		log.Err(err).Msgf("Handler %q internal error", msg.topic)
   426  		resMsg.statusCode = statusCodeInternalErr
   427  		resMsg.msg = err.Error()
   428  	} else if ctx.publicError != nil {
   429  		log.Err(ctx.publicError).Msgf("Handler %q public error", msg.topic)
   430  		resMsg.statusCode = statusCodePublicErr
   431  		resMsg.data = []byte(ctx.publicError.Error())
   432  	} else {
   433  		resMsg.statusCode = statusCodeOK
   434  		if ctx.res != nil {
   435  			resMsg.data = ctx.res.Data
   436  		}
   437  	}
   438  	c.sendQueue <- &resMsg
   439  }
   440  
   441  // Tracer returns tracer which tracks channel establishment
   442  func (c *channel) Tracer() *trace.Tracer {
   443  	return c.tracer
   444  }
   445  
   446  // ServiceConn returns UDP connection which can be used for services.
   447  func (c *channel) ServiceConn() *net.UDPConn {
   448  	return c.serviceConn
   449  }
   450  
   451  // Close closes channel.
   452  func (c *channel) Close() error {
   453  	c.mu.Lock()
   454  	defer c.mu.Unlock()
   455  
   456  	var closeErr error
   457  	c.once.Do(func() {
   458  		close(c.stop)
   459  
   460  		if c.upnpPortsRelease != nil {
   461  			c.upnpPortsRelease()
   462  		}
   463  
   464  		if err := c.tr.localConn.Close(); err != nil {
   465  			closeErr = fmt.Errorf("could not close remote conn: %w", err)
   466  		}
   467  
   468  		if err := c.tr.remoteConn.Close(); err != nil {
   469  			closeErr = fmt.Errorf("could not close remote conn: %w", err)
   470  		}
   471  
   472  		if err := c.tr.proxyConn.Close(); err != nil {
   473  			closeErr = fmt.Errorf("could not close proxy conn: %w", err)
   474  		}
   475  
   476  		if err := c.tr.session.Close(); err != nil {
   477  			closeErr = fmt.Errorf("could not close p2p transport session: %w", err)
   478  		}
   479  
   480  		if c.serviceConn != nil {
   481  			if err := c.serviceConn.Close(); err != nil {
   482  				if errors.Is(err, errors.New("use of closed network connection")) { // Have to check this error as a string match https://github.com/golang/go/issues/4373
   483  					closeErr = fmt.Errorf("could not close p2p service connection: %w", err)
   484  				}
   485  			}
   486  		}
   487  
   488  		c.tr = nil
   489  	})
   490  
   491  	if err := router.RemoveExcludedIP(c.peer.remoteAddr.IP); err != nil {
   492  		return err
   493  	}
   494  
   495  	return closeErr
   496  }
   497  
   498  // Conn returns underlying channel's UDP connection.
   499  func (c *channel) Conn() *net.UDPConn {
   500  	return c.tr.remoteConn
   501  }
   502  
   503  // Send sends message to given topic. Peer listening to topic will receive message.
   504  func (c *channel) Send(ctx context.Context, topic string, msg *Message) (*Message, error) {
   505  	reply, err := c.sendRequest(ctx, topic, msg)
   506  	if err != nil {
   507  		return nil, err
   508  	}
   509  	return reply, nil
   510  }
   511  
   512  // Handle registers handler for given topic which handles peer request.
   513  func (c *channel) Handle(topic string, handler HandlerFunc) {
   514  	c.mu.Lock()
   515  	defer c.mu.Unlock()
   516  
   517  	c.topicHandlers[topic] = handler
   518  }
   519  
   520  // sendRequest sends message to send queue and waits for response.
   521  func (c *channel) sendRequest(ctx context.Context, topic string, m *Message) (*Message, error) {
   522  	s := c.addStream()
   523  	defer c.deleteStream(s.id)
   524  
   525  	// Send request.
   526  	c.sendQueue <- &transportMsg{id: s.id, topic: topic, data: m.Data}
   527  
   528  	// Wait for response.
   529  	select {
   530  	case <-ctx.Done():
   531  		return nil, fmt.Errorf("timeout waiting for reply to %q: %w", topic, ErrSendTimeout)
   532  	case res := <-s.resCh:
   533  		if res.statusCode != statusCodeOK {
   534  			if res.statusCode == statusCodePublicErr {
   535  				return nil, fmt.Errorf("public peer error: %s", string(res.data))
   536  			}
   537  			if res.statusCode == statusCodeHandlerNotFoundErr {
   538  				return nil, fmt.Errorf("%s: %w", string(res.data), ErrHandlerNotFound)
   539  			}
   540  			return nil, fmt.Errorf("peer error: %w", errors.New(res.msg))
   541  		}
   542  		return &Message{Data: res.data}, nil
   543  	}
   544  }
   545  
   546  func (c *channel) addStream() *stream {
   547  	c.mu.Lock()
   548  	defer c.mu.Unlock()
   549  
   550  	c.nextStreamID++
   551  	s := &stream{id: c.nextStreamID, resCh: make(chan *transportMsg, 1)}
   552  	c.streams[s.id] = s
   553  	return s
   554  }
   555  
   556  func (c *channel) deleteStream(id uint64) {
   557  	c.mu.Lock()
   558  	defer c.mu.Unlock()
   559  
   560  	delete(c.streams, id)
   561  }
   562  
   563  func (c *channel) setTracer(tracer *trace.Tracer) {
   564  	c.mu.Lock()
   565  	defer c.mu.Unlock()
   566  
   567  	c.tracer = tracer
   568  }
   569  
   570  func (c *channel) setServiceConn(conn *net.UDPConn) {
   571  	c.mu.Lock()
   572  	defer c.mu.Unlock()
   573  
   574  	log.Debug().Msgf("Will use service conn with local port: %d, remote port: %d", conn.LocalAddr().(*net.UDPAddr).Port, conn.RemoteAddr().(*net.UDPAddr).Port)
   575  	c.serviceConn = conn
   576  }
   577  
   578  func (c *channel) setPeerID(id identity.Identity) {
   579  	c.mu.Lock()
   580  	defer c.mu.Unlock()
   581  
   582  	c.peerID = id
   583  }
   584  
   585  func (c *channel) setUpnpPortsRelease(release func()) {
   586  	c.mu.Lock()
   587  	defer c.mu.Unlock()
   588  
   589  	c.upnpPortsRelease = release
   590  }
   591  
   592  func reopenConn(conn *net.UDPConn) (*net.UDPConn, error) {
   593  	// conn first must be closed to prevent use of WriteTo with pre-connected connection error.
   594  	conn.Close()
   595  	conn, err := net.ListenUDP("udp4", conn.LocalAddr().(*net.UDPAddr))
   596  	if err != nil {
   597  		return nil, fmt.Errorf("could not listen UDP: %w", err)
   598  	}
   599  
   600  	if err := router.ProtectUDPConn(conn); err != nil {
   601  		return nil, fmt.Errorf("failed to protect udp connection: %w", err)
   602  	}
   603  
   604  	return conn, nil
   605  }
   606  
   607  func listenUDPSession(proxyAddr net.Addr, privateKey PrivateKey, peerPubKey PublicKey) (sess *kcp.UDPSession, localconn *net.UDPConn, err error) {
   608  	blockCrypt, err := newBlockCrypt(privateKey, peerPubKey)
   609  	if err != nil {
   610  		return nil, nil, fmt.Errorf("could not create block crypt: %w", err)
   611  	}
   612  
   613  	localConn, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.ParseIP("127.0.0.1")})
   614  	if err != nil {
   615  		return nil, nil, fmt.Errorf("could not create UDP conn: %w", err)
   616  	}
   617  
   618  	sess, err = kcp.NewConn3(1, proxyAddr, blockCrypt, 10, 3, localConn)
   619  	if err != nil {
   620  		localConn.Close()
   621  		return nil, nil, fmt.Errorf("could not create UDP session: %w", err)
   622  	}
   623  
   624  	sess.SetMtu(kcpMTUSize)
   625  
   626  	return sess, localConn, nil
   627  }
   628  
   629  func newBlockCrypt(privateKey PrivateKey, peerPublicKey PublicKey) (kcp.BlockCrypt, error) {
   630  	// Compute shared key. Nonce for each message will be added inside kcp salsa block crypt.
   631  	var sharedKey [32]byte
   632  	box.Precompute(&sharedKey, (*[32]byte)(&peerPublicKey), (*[32]byte)(&privateKey))
   633  	blockCrypt, err := kcp.NewSalsa20BlockCrypt(sharedKey[:])
   634  	if err != nil {
   635  		return nil, fmt.Errorf("could not create Sasla20 block crypt: %w", err)
   636  	}
   637  	return blockCrypt, nil
   638  }
   639  
   640  func errNetClose(err error) bool {
   641  	// Hack. See https://github.com/golang/go/issues/4373 which should expose net close error with 1.15.
   642  	return strings.Contains(err.Error(), "use of closed network connection")
   643  }
   644  
   645  func errPipeClosed(err error) bool {
   646  	// Hack. We can't check io.ErrPipeClosed as kcp wraps this error with old github.com/pkg/errors.
   647  	return strings.Contains(err.Error(), "io: read/write on closed pipe")
   648  }