github.com/ergo-services/ergo@v1.999.224/proto/dist/proto.go (about)

     1  package dist
     2  
     3  import (
     4  	"bytes"
     5  	"compress/gzip"
     6  	"context"
     7  	"crypto/aes"
     8  	"crypto/cipher"
     9  	crand "crypto/rand"
    10  	"encoding/binary"
    11  	"fmt"
    12  	"io"
    13  	"math/rand"
    14  	"net"
    15  	"sync"
    16  	"sync/atomic"
    17  	"time"
    18  
    19  	"github.com/ergo-services/ergo/etf"
    20  	"github.com/ergo-services/ergo/gen"
    21  	"github.com/ergo-services/ergo/lib"
    22  	"github.com/ergo-services/ergo/node"
    23  )
    24  
    25  var (
    26  	errMissingInCache = fmt.Errorf("missing in cache")
    27  	errMalformed      = fmt.Errorf("malformed")
    28  	gzipReaders       = &sync.Pool{
    29  		New: func() interface{} {
    30  			return nil
    31  		},
    32  	}
    33  	gzipWriters  = [10]*sync.Pool{}
    34  	sendMessages = &sync.Pool{
    35  		New: func() interface{} {
    36  			return &sendMessage{}
    37  		},
    38  	}
    39  )
    40  
    41  func init() {
    42  	rand.Seed(time.Now().UTC().UnixNano())
    43  	for i := range gzipWriters {
    44  		gzipWriters[i] = &sync.Pool{
    45  			New: func() interface{} {
    46  				return nil
    47  			},
    48  		}
    49  	}
    50  }
    51  
    52  const (
    53  	defaultLatency = 200 * time.Nanosecond // for linkFlusher
    54  
    55  	defaultCleanTimeout  = 5 * time.Second  // for checkClean
    56  	defaultCleanDeadline = 30 * time.Second // for checkClean
    57  
    58  	// ergo proxy message
    59  	protoProxy = 141
    60  	// ergo proxy encrypted message
    61  	protoProxyX = 142
    62  
    63  	// http://erlang.org/doc/apps/erts/erl_ext_dist.html#distribution_header
    64  	protoDist          = 131
    65  	protoDistMessage   = 68
    66  	protoDistFragment1 = 69
    67  	protoDistFragmentN = 70
    68  
    69  	// ergo gzipped messages
    70  	protoDistMessageZ   = 200
    71  	protoDistFragment1Z = 201
    72  	protoDistFragmentNZ = 202
    73  )
    74  
    75  type fragmentedPacket struct {
    76  	buffer           *lib.Buffer
    77  	disordered       *lib.Buffer
    78  	disorderedSlices map[uint64][]byte
    79  	fragmentID       uint64
    80  	lastUpdate       time.Time
    81  }
    82  
    83  type proxySession struct {
    84  	session     node.ProxySession
    85  	cache       etf.AtomCache
    86  	senderCache []map[etf.Atom]etf.CacheItem
    87  }
    88  
    89  type distConnection struct {
    90  	node.Connection
    91  
    92  	nodename string
    93  	peername string
    94  	ctx      context.Context
    95  
    96  	// peer flags
    97  	flags node.Flags
    98  
    99  	creation uint32
   100  
   101  	// socket
   102  	conn          lib.NetReadWriter
   103  	cancelContext context.CancelFunc
   104  
   105  	// proxy session (endpoints)
   106  	proxySessionsByID       map[string]proxySession
   107  	proxySessionsByPeerName map[string]proxySession
   108  	proxySessionsMutex      sync.RWMutex
   109  
   110  	// route incoming messages
   111  	router node.CoreRouter
   112  
   113  	// writer
   114  	flusher *linkFlusher
   115  
   116  	// buffer
   117  	buffer *lib.Buffer
   118  
   119  	// senders list of channels for the sending goroutines
   120  	senders senders
   121  	// receivers list of channels for the receiving goroutines
   122  	receivers receivers
   123  
   124  	// atom cache for outgoing messages
   125  	cache etf.AtomCache
   126  
   127  	mapping *etf.AtomMapping
   128  
   129  	// fragmentation sequence ID
   130  	sequenceID     int64
   131  	fragments      map[uint64]*fragmentedPacket
   132  	fragmentsMutex sync.Mutex
   133  
   134  	// check and clean lost fragments
   135  	checkCleanPending  bool
   136  	checkCleanTimer    *time.Timer
   137  	checkCleanTimeout  time.Duration // default is 5 seconds
   138  	checkCleanDeadline time.Duration // how long we wait for the next fragment of the certain sequenceID. Default is 30 seconds
   139  
   140  	// stats
   141  	stats node.NetworkStats
   142  }
   143  
   144  type distProto struct {
   145  	node.Proto
   146  	nodename string
   147  	options  node.ProtoOptions
   148  }
   149  
   150  func CreateProto(options node.ProtoOptions) node.ProtoInterface {
   151  	return &distProto{
   152  		options: options,
   153  	}
   154  }
   155  
   156  //
   157  // node.Proto interface implementation
   158  //
   159  
   160  type senders struct {
   161  	sender []*senderChannel
   162  	n      int32
   163  	i      int32
   164  }
   165  
   166  type senderChannel struct {
   167  	sync.Mutex
   168  	sendChannel chan *sendMessage
   169  }
   170  
   171  type sendMessage struct {
   172  	packet               *lib.Buffer
   173  	control              etf.Term
   174  	payload              etf.Term
   175  	compression          bool
   176  	compressionLevel     int
   177  	compressionThreshold int
   178  	proxy                *proxySession
   179  }
   180  
   181  type receivers struct {
   182  	recv []chan *lib.Buffer
   183  	n    int32
   184  	i    int32
   185  }
   186  
   187  func (dp *distProto) Init(ctx context.Context, conn lib.NetReadWriter, nodename string, details node.HandshakeDetails) (node.ConnectionInterface, error) {
   188  	connection := &distConnection{
   189  		nodename:                nodename,
   190  		peername:                details.Name,
   191  		flags:                   details.Flags,
   192  		creation:                details.Creation,
   193  		buffer:                  details.Buffer,
   194  		conn:                    conn,
   195  		cache:                   etf.NewAtomCache(),
   196  		mapping:                 details.AtomMapping,
   197  		proxySessionsByID:       make(map[string]proxySession),
   198  		proxySessionsByPeerName: make(map[string]proxySession),
   199  		fragments:               make(map[uint64]*fragmentedPacket),
   200  		checkCleanTimeout:       defaultCleanTimeout,
   201  		checkCleanDeadline:      defaultCleanDeadline,
   202  	}
   203  	connection.ctx, connection.cancelContext = context.WithCancel(ctx)
   204  
   205  	connection.stats.NodeName = details.Name
   206  
   207  	// create connection buffering
   208  	connection.flusher = newLinkFlusher(conn, defaultLatency)
   209  
   210  	numHandlers := dp.options.NumHandlers
   211  	if details.NumHandlers > 0 {
   212  		numHandlers = details.NumHandlers
   213  	}
   214  
   215  	// do not use shared channels within intencive code parts, impacts on a performance
   216  	connection.receivers = receivers{
   217  		recv: make([]chan *lib.Buffer, numHandlers),
   218  		n:    int32(numHandlers),
   219  	}
   220  
   221  	// run readers for incoming messages
   222  	for i := 0; i < numHandlers; i++ {
   223  		// run packet reader routines (decoder)
   224  		recv := make(chan *lib.Buffer, dp.options.RecvQueueLength)
   225  		connection.receivers.recv[i] = recv
   226  		go connection.receiver(recv)
   227  	}
   228  
   229  	connection.senders = senders{
   230  		sender: make([]*senderChannel, numHandlers),
   231  		n:      int32(numHandlers),
   232  	}
   233  
   234  	// run readers/writers for incoming/outgoing messages
   235  	for i := 0; i < numHandlers; i++ {
   236  		// run writer routines (encoder)
   237  		send := make(chan *sendMessage, dp.options.SendQueueLength)
   238  		connection.senders.sender[i] = &senderChannel{
   239  			sendChannel: send,
   240  		}
   241  		go connection.sender(i, send, dp.options, connection.flags)
   242  	}
   243  
   244  	return connection, nil
   245  }
   246  
   247  func (dp *distProto) Serve(ci node.ConnectionInterface, router node.CoreRouter) {
   248  	connection, ok := ci.(*distConnection)
   249  	if !ok {
   250  		lib.Warning("conn is not a *distConnection type")
   251  		return
   252  	}
   253  
   254  	connection.router = router
   255  
   256  	// run read loop
   257  	var err error
   258  	var packetLength int
   259  
   260  	b := connection.buffer // not nil if we got extra data withing the handshake process
   261  	if b == nil {
   262  		b = lib.TakeBuffer()
   263  	}
   264  
   265  	for {
   266  		packetLength, err = connection.read(b, dp.options.MaxMessageSize)
   267  
   268  		// validation
   269  		if err != nil || packetLength == 0 {
   270  			// link was closed or got malformed data
   271  			if err != nil {
   272  				lib.Warning("link was closed", connection.peername, "error:", err)
   273  			}
   274  			lib.ReleaseBuffer(b)
   275  			return
   276  		}
   277  
   278  		// check the context if it was cancelled
   279  		if connection.ctx.Err() != nil {
   280  			// canceled
   281  			lib.ReleaseBuffer(b)
   282  			return
   283  		}
   284  
   285  		// take the new buffer for the next reading and append the tail
   286  		// (which is part of the next packet)
   287  		b1 := lib.TakeBuffer()
   288  		b1.Set(b.B[packetLength:])
   289  
   290  		// cut the tail and send it further for handling.
   291  		// buffer b has to be released by the reader of
   292  		// recv channel (link.ReadHandlePacket)
   293  		b.B = b.B[:packetLength]
   294  		connection.receivers.recv[connection.receivers.i] <- b
   295  
   296  		// set new buffer as a current for the next reading
   297  		b = b1
   298  
   299  		// round-robin switch to the next receiver
   300  		connection.receivers.i++
   301  		if connection.receivers.i < connection.receivers.n {
   302  			continue
   303  		}
   304  		connection.receivers.i = 0
   305  	}
   306  
   307  }
   308  
   309  func (dp *distProto) Terminate(ci node.ConnectionInterface) {
   310  	connection, ok := ci.(*distConnection)
   311  	if !ok {
   312  		lib.Warning("conn is not a *distConnection type")
   313  		return
   314  	}
   315  
   316  	for i := 0; i < dp.options.NumHandlers; i++ {
   317  		sender := connection.senders.sender[i]
   318  		if sender != nil {
   319  			sender.Lock()
   320  			close(sender.sendChannel)
   321  			sender.sendChannel = nil
   322  			sender.Unlock()
   323  			connection.senders.sender[i] = nil
   324  		}
   325  		if connection.receivers.recv[i] != nil {
   326  			close(connection.receivers.recv[i])
   327  		}
   328  	}
   329  	connection.flusher.Stop()
   330  	connection.cancelContext()
   331  }
   332  
   333  // node.Connection interface implementation
   334  
   335  func (dc *distConnection) Send(from gen.Process, to etf.Pid, message etf.Term) error {
   336  	msg := sendMessages.Get().(*sendMessage)
   337  
   338  	msg.control = etf.Tuple{distProtoSEND, etf.Atom(""), to}
   339  	msg.payload = message
   340  	msg.compression = from.Compression()
   341  	msg.compressionLevel = from.CompressionLevel()
   342  	msg.compressionThreshold = from.CompressionThreshold()
   343  
   344  	return dc.send(string(to.Node), to.Creation, msg)
   345  }
   346  func (dc *distConnection) SendReg(from gen.Process, to gen.ProcessID, message etf.Term) error {
   347  	msg := sendMessages.Get().(*sendMessage)
   348  
   349  	msg.control = etf.Tuple{distProtoREG_SEND, from.Self(), etf.Atom(""), etf.Atom(to.Name)}
   350  	msg.payload = message
   351  	msg.compression = from.Compression()
   352  	msg.compressionLevel = from.CompressionLevel()
   353  	msg.compressionThreshold = from.CompressionThreshold()
   354  	return dc.send(to.Node, 0, msg)
   355  }
   356  func (dc *distConnection) SendAlias(from gen.Process, to etf.Alias, message etf.Term) error {
   357  	if dc.flags.EnableAlias == false {
   358  		return lib.ErrUnsupported
   359  	}
   360  
   361  	msg := sendMessages.Get().(*sendMessage)
   362  
   363  	msg.control = etf.Tuple{distProtoALIAS_SEND, from.Self(), to}
   364  	msg.payload = message
   365  	msg.compression = from.Compression()
   366  	msg.compressionLevel = from.CompressionLevel()
   367  	msg.compressionThreshold = from.CompressionThreshold()
   368  
   369  	return dc.send(string(to.Node), to.Creation, msg)
   370  }
   371  
   372  func (dc *distConnection) Link(local etf.Pid, remote etf.Pid) error {
   373  	dc.proxySessionsMutex.RLock()
   374  	ps, isProxy := dc.proxySessionsByPeerName[string(remote.Node)]
   375  	dc.proxySessionsMutex.RUnlock()
   376  	if isProxy && ps.session.PeerFlags.EnableLink == false {
   377  		return lib.ErrPeerUnsupported
   378  	}
   379  	msg := &sendMessage{
   380  		control: etf.Tuple{distProtoLINK, local, remote},
   381  	}
   382  	return dc.send(string(remote.Node), remote.Creation, msg)
   383  }
   384  func (dc *distConnection) Unlink(local etf.Pid, remote etf.Pid) error {
   385  	dc.proxySessionsMutex.RLock()
   386  	ps, isProxy := dc.proxySessionsByPeerName[string(remote.Node)]
   387  	dc.proxySessionsMutex.RUnlock()
   388  	if isProxy && ps.session.PeerFlags.EnableLink == false {
   389  		return lib.ErrPeerUnsupported
   390  	}
   391  	msg := &sendMessage{
   392  		control: etf.Tuple{distProtoUNLINK, local, remote},
   393  	}
   394  	return dc.send(string(remote.Node), remote.Creation, msg)
   395  }
   396  func (dc *distConnection) LinkExit(to etf.Pid, terminated etf.Pid, reason string) error {
   397  	msg := &sendMessage{
   398  		control: etf.Tuple{distProtoEXIT, terminated, to, etf.Atom(reason)},
   399  	}
   400  	return dc.send(string(to.Node), 0, msg)
   401  }
   402  
   403  func (dc *distConnection) Monitor(local etf.Pid, remote etf.Pid, ref etf.Ref) error {
   404  	dc.proxySessionsMutex.RLock()
   405  	ps, isProxy := dc.proxySessionsByPeerName[string(remote.Node)]
   406  	dc.proxySessionsMutex.RUnlock()
   407  	if isProxy && ps.session.PeerFlags.EnableMonitor == false {
   408  		return lib.ErrPeerUnsupported
   409  	}
   410  	msg := &sendMessage{
   411  		control: etf.Tuple{distProtoMONITOR, local, remote, ref},
   412  	}
   413  	return dc.send(string(remote.Node), remote.Creation, msg)
   414  }
   415  func (dc *distConnection) MonitorReg(local etf.Pid, remote gen.ProcessID, ref etf.Ref) error {
   416  	dc.proxySessionsMutex.RLock()
   417  	ps, isProxy := dc.proxySessionsByPeerName[remote.Node]
   418  	dc.proxySessionsMutex.RUnlock()
   419  	if isProxy && ps.session.PeerFlags.EnableMonitor == false {
   420  		return lib.ErrPeerUnsupported
   421  	}
   422  	msg := &sendMessage{
   423  		control: etf.Tuple{distProtoMONITOR, local, etf.Atom(remote.Name), ref},
   424  	}
   425  	return dc.send(remote.Node, 0, msg)
   426  }
   427  func (dc *distConnection) Demonitor(local etf.Pid, remote etf.Pid, ref etf.Ref) error {
   428  	dc.proxySessionsMutex.RLock()
   429  	ps, isProxy := dc.proxySessionsByPeerName[string(remote.Node)]
   430  	dc.proxySessionsMutex.RUnlock()
   431  	if isProxy && ps.session.PeerFlags.EnableMonitor == false {
   432  		return lib.ErrPeerUnsupported
   433  	}
   434  	msg := &sendMessage{
   435  		control: etf.Tuple{distProtoDEMONITOR, local, remote, ref},
   436  	}
   437  	return dc.send(string(remote.Node), remote.Creation, msg)
   438  }
   439  func (dc *distConnection) DemonitorReg(local etf.Pid, remote gen.ProcessID, ref etf.Ref) error {
   440  	dc.proxySessionsMutex.RLock()
   441  	ps, isProxy := dc.proxySessionsByPeerName[remote.Node]
   442  	dc.proxySessionsMutex.RUnlock()
   443  	if isProxy && ps.session.PeerFlags.EnableMonitor == false {
   444  		return lib.ErrPeerUnsupported
   445  	}
   446  	msg := &sendMessage{
   447  		control: etf.Tuple{distProtoDEMONITOR, local, etf.Atom(remote.Name), ref},
   448  	}
   449  	return dc.send(remote.Node, 0, msg)
   450  }
   451  func (dc *distConnection) MonitorExitReg(to etf.Pid, terminated gen.ProcessID, reason string, ref etf.Ref) error {
   452  	msg := &sendMessage{
   453  		control: etf.Tuple{distProtoMONITOR_EXIT, etf.Atom(terminated.Name), to, ref, etf.Atom(reason)},
   454  	}
   455  	return dc.send(string(to.Node), to.Creation, msg)
   456  }
   457  func (dc *distConnection) MonitorExit(to etf.Pid, terminated etf.Pid, reason string, ref etf.Ref) error {
   458  	msg := &sendMessage{
   459  		control: etf.Tuple{distProtoMONITOR_EXIT, terminated, to, ref, etf.Atom(reason)},
   460  	}
   461  	return dc.send(string(to.Node), to.Creation, msg)
   462  }
   463  
   464  func (dc *distConnection) SpawnRequest(nodeName string, behaviorName string, request gen.RemoteSpawnRequest, args ...etf.Term) error {
   465  	dc.proxySessionsMutex.RLock()
   466  	ps, isProxy := dc.proxySessionsByPeerName[nodeName]
   467  	dc.proxySessionsMutex.RUnlock()
   468  	if isProxy {
   469  		if ps.session.PeerFlags.EnableRemoteSpawn == false {
   470  			return lib.ErrPeerUnsupported
   471  		}
   472  	} else {
   473  		if dc.flags.EnableRemoteSpawn == false {
   474  			return lib.ErrPeerUnsupported
   475  		}
   476  	}
   477  
   478  	optlist := etf.List{}
   479  	if request.Options.Name != "" {
   480  		optlist = append(optlist, etf.Tuple{etf.Atom("name"), etf.Atom(request.Options.Name)})
   481  
   482  	}
   483  	msg := &sendMessage{
   484  		control: etf.Tuple{distProtoSPAWN_REQUEST, request.Ref, request.From, request.From,
   485  			// {M,F,A}
   486  			etf.Tuple{etf.Atom(behaviorName), etf.Atom(request.Options.Function), len(args)},
   487  			optlist,
   488  		},
   489  		payload: args,
   490  	}
   491  	return dc.send(nodeName, 0, msg)
   492  }
   493  
   494  func (dc *distConnection) SpawnReply(to etf.Pid, ref etf.Ref, pid etf.Pid) error {
   495  	msg := &sendMessage{
   496  		control: etf.Tuple{distProtoSPAWN_REPLY, ref, to, 0, pid},
   497  	}
   498  	return dc.send(string(to.Node), to.Creation, msg)
   499  }
   500  
   501  func (dc *distConnection) SpawnReplyError(to etf.Pid, ref etf.Ref, err error) error {
   502  	msg := &sendMessage{
   503  		control: etf.Tuple{distProtoSPAWN_REPLY, ref, to, 0, etf.Atom(err.Error())},
   504  	}
   505  	return dc.send(string(to.Node), to.Creation, msg)
   506  }
   507  
   508  func (dc *distConnection) ProxyConnectRequest(request node.ProxyConnectRequest) error {
   509  	if dc.flags.EnableProxy == false {
   510  		return lib.ErrPeerUnsupported
   511  	}
   512  
   513  	path := []etf.Atom{}
   514  	for i := range request.Path {
   515  		path = append(path, etf.Atom(request.Path[i]))
   516  	}
   517  
   518  	msg := &sendMessage{
   519  		control: etf.Tuple{distProtoPROXY_CONNECT_REQUEST,
   520  			request.ID,           // etf.Ref
   521  			etf.Atom(request.To), // to node
   522  			request.Digest,       //
   523  			request.PublicKey,    // public key for the sending symmetric key
   524  			proxyFlagsToUint64(request.Flags),
   525  			request.Creation,
   526  			request.Hop,
   527  			path,
   528  		},
   529  	}
   530  	return dc.send(dc.peername, 0, msg)
   531  }
   532  
   533  func (dc *distConnection) ProxyConnectReply(reply node.ProxyConnectReply) error {
   534  	if dc.flags.EnableProxy == false {
   535  		return lib.ErrPeerUnsupported
   536  	}
   537  
   538  	path := etf.List{}
   539  	for i := range reply.Path {
   540  		path = append(path, etf.Atom(reply.Path[i]))
   541  	}
   542  
   543  	msg := &sendMessage{
   544  		control: etf.Tuple{distProtoPROXY_CONNECT_REPLY,
   545  			reply.ID,           // etf.Ref
   546  			etf.Atom(reply.To), // to node
   547  			reply.Digest,       //
   548  			reply.Cipher,       //
   549  			proxyFlagsToUint64(reply.Flags),
   550  			reply.Creation,
   551  			reply.SessionID,
   552  			path,
   553  		},
   554  	}
   555  
   556  	return dc.send(dc.peername, 0, msg)
   557  }
   558  
   559  func (dc *distConnection) ProxyConnectCancel(err node.ProxyConnectCancel) error {
   560  	if dc.flags.EnableProxy == false {
   561  		return lib.ErrPeerUnsupported
   562  	}
   563  
   564  	path := etf.List{}
   565  	for i := range err.Path {
   566  		path = append(path, etf.Atom(err.Path[i]))
   567  	}
   568  
   569  	msg := &sendMessage{
   570  		control: etf.Tuple{distProtoPROXY_CONNECT_CANCEL,
   571  			err.ID,             // etf.Ref
   572  			etf.Atom(err.From), // from node
   573  			err.Reason,
   574  			path,
   575  		},
   576  	}
   577  
   578  	return dc.send(dc.peername, 0, msg)
   579  }
   580  
   581  func (dc *distConnection) ProxyDisconnect(disconnect node.ProxyDisconnect) error {
   582  	if dc.flags.EnableProxy == false {
   583  		return lib.ErrPeerUnsupported
   584  	}
   585  
   586  	msg := &sendMessage{
   587  		control: etf.Tuple{distProtoPROXY_DISCONNECT,
   588  			etf.Atom(disconnect.Node),
   589  			etf.Atom(disconnect.Proxy),
   590  			disconnect.SessionID,
   591  			disconnect.Reason,
   592  		},
   593  	}
   594  
   595  	return dc.send(dc.peername, 0, msg)
   596  }
   597  
   598  func (dc *distConnection) ProxyRegisterSession(session node.ProxySession) error {
   599  	dc.proxySessionsMutex.Lock()
   600  	defer dc.proxySessionsMutex.Unlock()
   601  	_, exist := dc.proxySessionsByPeerName[session.PeerName]
   602  	if exist {
   603  		return lib.ErrProxySessionDuplicate
   604  	}
   605  	_, exist = dc.proxySessionsByID[session.ID]
   606  	if exist {
   607  		return lib.ErrProxySessionDuplicate
   608  	}
   609  	ps := proxySession{
   610  		session: session,
   611  		cache:   etf.NewAtomCache(),
   612  		// every sender should have its own senderAtomCache in the proxy session
   613  		senderCache: make([]map[etf.Atom]etf.CacheItem, len(dc.senders.sender)),
   614  	}
   615  	dc.proxySessionsByPeerName[session.PeerName] = ps
   616  	dc.proxySessionsByID[session.ID] = ps
   617  	return nil
   618  }
   619  
   620  func (dc *distConnection) ProxyUnregisterSession(id string) error {
   621  	dc.proxySessionsMutex.Lock()
   622  	defer dc.proxySessionsMutex.Unlock()
   623  	ps, exist := dc.proxySessionsByID[id]
   624  	if exist == false {
   625  		return lib.ErrProxySessionUnknown
   626  	}
   627  	delete(dc.proxySessionsByPeerName, ps.session.PeerName)
   628  	delete(dc.proxySessionsByID, ps.session.ID)
   629  	return nil
   630  }
   631  
   632  func (dc *distConnection) ProxyPacket(packet *lib.Buffer) error {
   633  	if dc.flags.EnableProxy == false {
   634  		return lib.ErrPeerUnsupported
   635  	}
   636  	msg := &sendMessage{
   637  		packet: packet,
   638  	}
   639  	return dc.send(dc.peername, 0, msg)
   640  }
   641  
   642  //
   643  // internal
   644  //
   645  
   646  func (dc *distConnection) read(b *lib.Buffer, max int) (int, error) {
   647  	// http://erlang.org/doc/apps/erts/erl_dist_protocol.html#protocol-between-connected-nodes
   648  	expectingBytes := 4
   649  	for {
   650  		if b.Len() < expectingBytes {
   651  			// if no data is received during the 4 * keepAlivePeriod the remote node
   652  			// seems to be stuck.
   653  			deadline := true
   654  			if err := dc.conn.SetReadDeadline(time.Now().Add(4 * keepAlivePeriod)); err != nil {
   655  				deadline = false
   656  			}
   657  
   658  			n, e := b.ReadDataFrom(dc.conn, max)
   659  			if n == 0 {
   660  				if err, ok := e.(net.Error); deadline && ok && err.Timeout() {
   661  					lib.Warning("Node %q not responding. Drop connection", dc.peername)
   662  				}
   663  				// link was closed
   664  				return 0, nil
   665  			}
   666  
   667  			if e != nil && e != io.EOF {
   668  				// something went wrong
   669  				return 0, e
   670  			}
   671  
   672  			// check onemore time if we should read more data
   673  			continue
   674  		}
   675  
   676  		packetLength := binary.BigEndian.Uint32(b.B[:4])
   677  		if packetLength == 0 {
   678  			// it was "software" keepalive
   679  			expectingBytes = 4
   680  			if len(b.B) == 4 {
   681  				b.Reset()
   682  				continue
   683  			}
   684  			b.B = b.B[4:]
   685  			continue
   686  		}
   687  
   688  		if b.Len() < int(packetLength)+4 {
   689  			expectingBytes = int(packetLength) + 4
   690  			continue
   691  		}
   692  
   693  		return int(packetLength) + 4, nil
   694  	}
   695  
   696  }
   697  
   698  type deferrMissing struct {
   699  	b *lib.Buffer
   700  	c int
   701  }
   702  
   703  type distMessage struct {
   704  	control etf.Term
   705  	payload etf.Term
   706  	proxy   *proxySession
   707  }
   708  
   709  func (dc *distConnection) receiver(recv <-chan *lib.Buffer) {
   710  	var b *lib.Buffer
   711  	var missing deferrMissing
   712  	var Timeout <-chan time.Time
   713  
   714  	// cancel connection context if something went wrong
   715  	// it will cause closing connection with stopping all
   716  	// goroutines around this connection
   717  	defer dc.cancelContext()
   718  
   719  	deferrChannel := make(chan deferrMissing, 100)
   720  	defer close(deferrChannel)
   721  
   722  	timer := lib.TakeTimer()
   723  	defer lib.ReleaseTimer(timer)
   724  
   725  	dChannel := deferrChannel
   726  
   727  	for {
   728  		select {
   729  		case missing = <-dChannel:
   730  			b = missing.b
   731  		default:
   732  			if len(deferrChannel) > 0 {
   733  				timer.Reset(150 * time.Millisecond)
   734  				Timeout = timer.C
   735  			} else {
   736  				Timeout = nil
   737  			}
   738  			select {
   739  			case b = <-recv:
   740  				if b == nil {
   741  					// channel was closed
   742  					return
   743  				}
   744  			case <-Timeout:
   745  				dChannel = deferrChannel
   746  				continue
   747  			}
   748  		}
   749  
   750  		// read and decode received packet
   751  		message, err := dc.decodePacket(b)
   752  
   753  		if err == errMissingInCache {
   754  			if b == missing.b && missing.c > 100 {
   755  				lib.Warning("Disordered data at the link with %q. Close connection", dc.peername)
   756  				dc.cancelContext()
   757  				lib.ReleaseBuffer(b)
   758  				return
   759  			}
   760  
   761  			if b == missing.b {
   762  				missing.c++
   763  			} else {
   764  				missing.b = b
   765  				missing.c = 0
   766  			}
   767  
   768  			select {
   769  			case deferrChannel <- missing:
   770  				// read recv channel
   771  				dChannel = nil
   772  				continue
   773  			default:
   774  				lib.Warning("Mess at the link with %q. Close connection", dc.peername)
   775  				dc.cancelContext()
   776  				lib.ReleaseBuffer(b)
   777  				return
   778  			}
   779  		}
   780  
   781  		dChannel = deferrChannel
   782  
   783  		if err != nil {
   784  			lib.Warning("[%s] Malformed Dist proto at the link with %s: %s", dc.nodename, dc.peername, err)
   785  			dc.cancelContext()
   786  			lib.ReleaseBuffer(b)
   787  			return
   788  		}
   789  
   790  		if message == nil {
   791  			// fragment or proxy message
   792  			continue
   793  		}
   794  
   795  		// handle message
   796  		if err := dc.handleMessage(message); err != nil {
   797  			if message.proxy == nil {
   798  				lib.Warning("[%s] Malformed Control packet at the link with %s: %#v", dc.nodename, dc.peername, message.control)
   799  				dc.cancelContext()
   800  				lib.ReleaseBuffer(b)
   801  				return
   802  			}
   803  			// drop proxy session
   804  			lib.Warning("[%s] Malformed Control packet at the proxy link with %s: %#v", dc.nodename, message.proxy.session.PeerName, message.control)
   805  			disconnect := node.ProxyDisconnect{
   806  				Node:      dc.nodename,
   807  				Proxy:     dc.nodename,
   808  				SessionID: message.proxy.session.ID,
   809  				Reason:    err.Error(),
   810  			}
   811  			// route it locally to unregister this session
   812  			dc.router.RouteProxyDisconnect(dc, disconnect)
   813  			// send it to the peer
   814  			dc.ProxyDisconnect(disconnect)
   815  		}
   816  
   817  		atomic.AddUint64(&dc.stats.MessagesIn, 1)
   818  
   819  		// we have to release this buffer
   820  		lib.ReleaseBuffer(b)
   821  
   822  	}
   823  }
   824  
   825  func (dc *distConnection) decodePacket(b *lib.Buffer) (*distMessage, error) {
   826  	packet := b.B
   827  	if len(packet) < 5 {
   828  		return nil, fmt.Errorf("malformed packet")
   829  	}
   830  
   831  	// [:3] length
   832  	switch packet[4] {
   833  	case protoDist:
   834  		// do not check the length. it was checked on the receiving this packet.
   835  		control, payload, err := dc.decodeDist(packet[5:], nil)
   836  		if control == nil {
   837  			return nil, err
   838  		}
   839  		atomic.AddUint64(&dc.stats.BytesIn, uint64(b.Len()))
   840  
   841  		message := &distMessage{control: control, payload: payload}
   842  		return message, err
   843  
   844  	case protoProxy:
   845  		sessionID := string(packet[5:37])
   846  		dc.proxySessionsMutex.RLock()
   847  		ps, exist := dc.proxySessionsByID[sessionID]
   848  		dc.proxySessionsMutex.RUnlock()
   849  		if exist == false {
   850  			// must be send further
   851  			if err := dc.router.RouteProxy(dc, sessionID, b); err != nil {
   852  				// drop proxy session
   853  				disconnect := node.ProxyDisconnect{
   854  					Node:      dc.nodename,
   855  					Proxy:     dc.nodename,
   856  					SessionID: sessionID,
   857  					Reason:    err.Error(),
   858  				}
   859  				dc.ProxyDisconnect(disconnect)
   860  				return nil, nil
   861  			}
   862  			atomic.AddUint64(&dc.stats.TransitBytesIn, uint64(b.Len()))
   863  			return nil, nil
   864  		}
   865  
   866  		// this node is endpoint of this session
   867  		packet = b.B[37:]
   868  		control, payload, err := dc.decodeDist(packet, &ps)
   869  		if err != nil {
   870  			if err == errMissingInCache {
   871  				// will be deferred.
   872  				// 37 - 5
   873  				// where:
   874  				// 37 = packet len (4) + protoProxy (1) + session id (32)
   875  				// reserving 5 bytes for: packet len(4) + protoDist (1)
   876  				// we don't update packet len value. it was already validated
   877  				// and will be ignored on the next dc.decodeDist call
   878  				b.B = b.B[32:]
   879  				b.B[4] = protoDist
   880  				return nil, err
   881  			}
   882  			// drop this proxy session. send back ProxyDisconnect
   883  			disconnect := node.ProxyDisconnect{
   884  				Node:      dc.nodename,
   885  				Proxy:     dc.nodename,
   886  				SessionID: sessionID,
   887  				Reason:    err.Error(),
   888  			}
   889  			dc.router.RouteProxyDisconnect(dc, disconnect)
   890  			dc.ProxyDisconnect(disconnect)
   891  			return nil, nil
   892  		}
   893  
   894  		atomic.AddUint64(&dc.stats.BytesIn, uint64(b.Len()))
   895  
   896  		if control == nil {
   897  			return nil, nil
   898  		}
   899  		message := &distMessage{control: control, payload: payload, proxy: &ps}
   900  		return message, nil
   901  
   902  	case protoProxyX:
   903  		atomic.AddUint64(&dc.stats.BytesIn, uint64(b.Len()))
   904  		sessionID := string(packet[5:37])
   905  		dc.proxySessionsMutex.RLock()
   906  		ps, exist := dc.proxySessionsByID[sessionID]
   907  		dc.proxySessionsMutex.RUnlock()
   908  		if exist == false {
   909  			// must be send further
   910  			if err := dc.router.RouteProxy(dc, sessionID, b); err != nil {
   911  				// drop proxy session
   912  				disconnect := node.ProxyDisconnect{
   913  					Node:      dc.nodename,
   914  					Proxy:     dc.nodename,
   915  					SessionID: sessionID,
   916  					Reason:    err.Error(),
   917  				}
   918  				dc.ProxyDisconnect(disconnect)
   919  				return nil, nil
   920  			}
   921  			atomic.AddUint64(&dc.stats.TransitBytesIn, uint64(b.Len()))
   922  			return nil, nil
   923  		}
   924  
   925  		packet = b.B[37:]
   926  		if (len(packet) % aes.BlockSize) != 0 {
   927  			// drop this proxy session.
   928  			disconnect := node.ProxyDisconnect{
   929  				Node:      dc.nodename,
   930  				Proxy:     dc.nodename,
   931  				SessionID: sessionID,
   932  				Reason:    "wrong blocksize of the encrypted message",
   933  			}
   934  			dc.router.RouteProxyDisconnect(dc, disconnect)
   935  			dc.ProxyDisconnect(disconnect)
   936  			return nil, nil
   937  		}
   938  
   939  		// BUG? double counted. see below
   940  		atomic.AddUint64(&dc.stats.BytesIn, uint64(b.Len()))
   941  
   942  		iv := packet[:aes.BlockSize]
   943  		msg := packet[aes.BlockSize:]
   944  		cfb := cipher.NewCFBDecrypter(ps.session.Block, iv)
   945  		cfb.XORKeyStream(msg, msg)
   946  
   947  		// check padding
   948  		length := len(msg)
   949  		unpadding := int(msg[length-1])
   950  		if unpadding > length {
   951  			// drop this proxy session.
   952  			disconnect := node.ProxyDisconnect{
   953  				Node:      dc.nodename,
   954  				Proxy:     dc.nodename,
   955  				SessionID: sessionID,
   956  				Reason:    "wrong padding of the encrypted message",
   957  			}
   958  			dc.router.RouteProxyDisconnect(dc, disconnect)
   959  			dc.ProxyDisconnect(disconnect)
   960  			return nil, nil
   961  		}
   962  		packet = msg[:(length - unpadding)]
   963  		control, payload, err := dc.decodeDist(packet, &ps)
   964  		if err != nil {
   965  			if err == errMissingInCache {
   966  				// will be deferred
   967  				b.B = b.B[32+aes.BlockSize:]
   968  				b.B[4] = protoDist
   969  				return nil, err
   970  			}
   971  			// drop this proxy session.
   972  			disconnect := node.ProxyDisconnect{
   973  				Node:      dc.nodename,
   974  				Proxy:     dc.nodename,
   975  				SessionID: sessionID,
   976  				Reason:    err.Error(),
   977  			}
   978  			dc.router.RouteProxyDisconnect(dc, disconnect)
   979  			dc.ProxyDisconnect(disconnect)
   980  			return nil, nil
   981  		}
   982  		// BUG? double counted. see above
   983  		atomic.AddUint64(&dc.stats.BytesIn, uint64(b.Len()))
   984  		if control == nil {
   985  			return nil, nil
   986  		}
   987  		message := &distMessage{control: control, payload: payload, proxy: &ps}
   988  		return message, nil
   989  
   990  	default:
   991  		// unknown proto
   992  		return nil, fmt.Errorf("unknown/unsupported proto")
   993  	}
   994  
   995  }
   996  
   997  func (dc *distConnection) decodeDist(packet []byte, proxy *proxySession) (etf.Term, etf.Term, error) {
   998  	switch packet[0] {
   999  	case protoDistMessage:
  1000  		var control, payload etf.Term
  1001  		var err error
  1002  		var cache []etf.Atom
  1003  
  1004  		cache, packet, err = dc.decodeDistHeaderAtomCache(packet[1:], proxy)
  1005  		if err != nil {
  1006  			return nil, nil, err
  1007  		}
  1008  
  1009  		decodeOptions := etf.DecodeOptions{
  1010  			AtomMapping:   dc.mapping,
  1011  			FlagBigPidRef: dc.flags.EnableBigPidRef,
  1012  		}
  1013  		if proxy != nil {
  1014  			decodeOptions.FlagBigPidRef = true
  1015  		}
  1016  
  1017  		// decode control message
  1018  		control, packet, err = etf.Decode(packet, cache, decodeOptions)
  1019  		if err != nil {
  1020  			return nil, nil, err
  1021  		}
  1022  
  1023  		if len(packet) == 0 {
  1024  			return control, nil, nil
  1025  		}
  1026  
  1027  		// decode payload message
  1028  		payload, packet, err = etf.Decode(packet, cache, decodeOptions)
  1029  		if err != nil {
  1030  			return nil, nil, err
  1031  		}
  1032  
  1033  		if len(packet) != 0 {
  1034  			return nil, nil, fmt.Errorf("packet has extra %d byte(s)", len(packet))
  1035  		}
  1036  
  1037  		return control, payload, nil
  1038  
  1039  	case protoDistMessageZ:
  1040  		var control, payload etf.Term
  1041  		var err error
  1042  		var cache []etf.Atom
  1043  		var zReader *gzip.Reader
  1044  		var total int
  1045  		// compressed protoDistMessage
  1046  
  1047  		cache, packet, err = dc.decodeDistHeaderAtomCache(packet[1:], proxy)
  1048  		if err != nil {
  1049  			return nil, nil, err
  1050  		}
  1051  
  1052  		// read the length of unpacked data
  1053  		lenUnpacked := int(binary.BigEndian.Uint32(packet[:4]))
  1054  
  1055  		// take the gzip reader from the pool
  1056  		if r, ok := gzipReaders.Get().(*gzip.Reader); ok {
  1057  			zReader = r
  1058  			zReader.Reset(bytes.NewBuffer(packet[4:]))
  1059  		} else {
  1060  			zReader, _ = gzip.NewReader(bytes.NewBuffer(packet[4:]))
  1061  		}
  1062  		defer gzipReaders.Put(zReader)
  1063  
  1064  		// take new buffer and allocate space for the unpacked data
  1065  		zBuffer := lib.TakeBuffer()
  1066  		zBuffer.Allocate(lenUnpacked)
  1067  		defer lib.ReleaseBuffer(zBuffer)
  1068  
  1069  		// unzipping and decoding the data
  1070  		for {
  1071  			n, e := zReader.Read(zBuffer.B[total:])
  1072  			if n == 0 {
  1073  				return nil, nil, fmt.Errorf("zbuffer too small")
  1074  			}
  1075  			total += n
  1076  			if e == io.EOF {
  1077  				break
  1078  			}
  1079  			if e != nil {
  1080  				return nil, nil, e
  1081  			}
  1082  		}
  1083  
  1084  		packet = zBuffer.B
  1085  		decodeOptions := etf.DecodeOptions{
  1086  			FlagBigPidRef: dc.flags.EnableBigPidRef,
  1087  		}
  1088  		if proxy != nil {
  1089  			decodeOptions.FlagBigPidRef = true
  1090  		}
  1091  
  1092  		// decode control message
  1093  		control, packet, err = etf.Decode(packet, cache, decodeOptions)
  1094  		if err != nil {
  1095  			return nil, nil, err
  1096  		}
  1097  		if len(packet) == 0 {
  1098  			return control, nil, nil
  1099  		}
  1100  
  1101  		// decode payload message
  1102  		payload, packet, err = etf.Decode(packet, cache, decodeOptions)
  1103  		if err != nil {
  1104  			return nil, nil, err
  1105  		}
  1106  
  1107  		if len(packet) != 0 {
  1108  			return nil, nil, fmt.Errorf("packet has extra %d byte(s)", len(packet))
  1109  		}
  1110  
  1111  		return control, payload, nil
  1112  
  1113  	case protoDistFragment1, protoDistFragmentN, protoDistFragment1Z, protoDistFragmentNZ:
  1114  		if len(packet) < 18 {
  1115  			return nil, nil, fmt.Errorf("malformed fragment (too small)")
  1116  		}
  1117  
  1118  		if assembled, err := dc.decodeFragment(packet, proxy); assembled != nil {
  1119  			if err != nil {
  1120  				return nil, nil, err
  1121  			}
  1122  			control, payload, err := dc.decodeDist(assembled.B, nil)
  1123  			lib.ReleaseBuffer(assembled)
  1124  			return control, payload, err
  1125  		} else {
  1126  			if err != nil {
  1127  				return nil, nil, err
  1128  			}
  1129  		}
  1130  		return nil, nil, nil
  1131  	}
  1132  
  1133  	return nil, nil, fmt.Errorf("unknown packet type %d", packet[0])
  1134  }
  1135  
  1136  func (dc *distConnection) handleMessage(message *distMessage) (err error) {
  1137  	defer func() {
  1138  		if lib.CatchPanic() {
  1139  			if r := recover(); r != nil {
  1140  				err = fmt.Errorf("%s", r)
  1141  			}
  1142  		}
  1143  	}()
  1144  
  1145  	switch t := message.control.(type) {
  1146  	case etf.Tuple:
  1147  		switch act := t.Element(1).(type) {
  1148  		case int:
  1149  			switch act {
  1150  			case distProtoREG_SEND:
  1151  				// {6, FromPid, Unused, ToName}
  1152  				lib.Log("[%s] CONTROL REG_SEND [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1153  				to := gen.ProcessID{
  1154  					Node: dc.nodename,
  1155  					Name: string(t.Element(4).(etf.Atom)),
  1156  				}
  1157  				dc.router.RouteSendReg(t.Element(2).(etf.Pid), to, message.payload)
  1158  				return nil
  1159  
  1160  			case distProtoSEND:
  1161  				// {2, Unused, ToPid}
  1162  				// SEND has no sender pid
  1163  				lib.Log("[%s] CONTROL SEND [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1164  				dc.router.RouteSend(etf.Pid{}, t.Element(3).(etf.Pid), message.payload)
  1165  				return nil
  1166  
  1167  			case distProtoLINK:
  1168  				// {1, FromPid, ToPid}
  1169  				lib.Log("[%s] CONTROL LINK [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1170  				if message.proxy != nil && message.proxy.session.NodeFlags.EnableLink == false {
  1171  					// we didn't allow this feature. proxy session will be closed due to
  1172  					// this violation of the contract
  1173  					return lib.ErrPeerUnsupported
  1174  				}
  1175  				dc.router.RouteLink(t.Element(2).(etf.Pid), t.Element(3).(etf.Pid))
  1176  				return nil
  1177  
  1178  			case distProtoUNLINK:
  1179  				// {4, FromPid, ToPid}
  1180  				lib.Log("[%s] CONTROL UNLINK [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1181  				if message.proxy != nil && message.proxy.session.NodeFlags.EnableLink == false {
  1182  					// we didn't allow this feature. proxy session will be closed due to
  1183  					// this violation of the contract
  1184  					return lib.ErrPeerUnsupported
  1185  				}
  1186  				dc.router.RouteUnlink(t.Element(2).(etf.Pid), t.Element(3).(etf.Pid))
  1187  				return nil
  1188  
  1189  			case distProtoNODE_LINK:
  1190  				lib.Log("[%s] CONTROL NODE_LINK [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1191  				return nil
  1192  
  1193  			case distProtoEXIT:
  1194  				// {3, FromPid, ToPid, Reason}
  1195  				lib.Log("[%s] CONTROL EXIT [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1196  				terminated := t.Element(2).(etf.Pid)
  1197  				to := t.Element(3).(etf.Pid)
  1198  				reason := fmt.Sprint(t.Element(4))
  1199  				dc.router.RouteExit(to, terminated, string(reason))
  1200  				return nil
  1201  
  1202  			case distProtoEXIT2:
  1203  				lib.Log("[%s] CONTROL EXIT2 [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1204  				return nil
  1205  
  1206  			case distProtoMONITOR:
  1207  				// {19, FromPid, ToProc, Ref}, where FromPid = monitoring process
  1208  				// and ToProc = monitored process pid or name (atom)
  1209  				lib.Log("[%s] CONTROL MONITOR [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1210  				if message.proxy != nil && message.proxy.session.NodeFlags.EnableMonitor == false {
  1211  					// we didn't allow this feature. proxy session will be closed due to
  1212  					// this violation of the contract
  1213  					return lib.ErrPeerUnsupported
  1214  				}
  1215  
  1216  				fromPid := t.Element(2).(etf.Pid)
  1217  				ref := t.Element(4).(etf.Ref)
  1218  				// if monitoring by pid
  1219  				if to, ok := t.Element(3).(etf.Pid); ok {
  1220  					dc.router.RouteMonitor(fromPid, to, ref)
  1221  					return nil
  1222  				}
  1223  
  1224  				// if monitoring by process name
  1225  				if to, ok := t.Element(3).(etf.Atom); ok {
  1226  					processID := gen.ProcessID{
  1227  						Node: dc.nodename,
  1228  						Name: string(to),
  1229  					}
  1230  					dc.router.RouteMonitorReg(fromPid, processID, ref)
  1231  					return nil
  1232  				}
  1233  
  1234  				return fmt.Errorf("malformed monitor message")
  1235  
  1236  			case distProtoDEMONITOR:
  1237  				// {20, FromPid, ToProc, Ref}, where FromPid = monitoring process
  1238  				// and ToProc = monitored process pid or name (atom)
  1239  				lib.Log("[%s] CONTROL DEMONITOR [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1240  				if message.proxy != nil && message.proxy.session.NodeFlags.EnableMonitor == false {
  1241  					// we didn't allow this feature. proxy session will be closed due to
  1242  					// this violation of the contract
  1243  					return lib.ErrPeerUnsupported
  1244  				}
  1245  				ref := t.Element(4).(etf.Ref)
  1246  				fromPid := t.Element(2).(etf.Pid)
  1247  				dc.router.RouteDemonitor(fromPid, ref)
  1248  				return nil
  1249  
  1250  			case distProtoMONITOR_EXIT:
  1251  				// {21, FromProc, ToPid, Ref, Reason}, where FromProc = monitored process
  1252  				// pid or name (atom), ToPid = monitoring process, and Reason = exit reason for the monitored process
  1253  				lib.Log("[%s] CONTROL MONITOR_EXIT [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1254  				reason := fmt.Sprint(t.Element(5))
  1255  				ref := t.Element(4).(etf.Ref)
  1256  				switch terminated := t.Element(2).(type) {
  1257  				case etf.Pid:
  1258  					dc.router.RouteMonitorExit(terminated, reason, ref)
  1259  					return nil
  1260  				case etf.Atom:
  1261  					processID := gen.ProcessID{Name: string(terminated), Node: dc.peername}
  1262  					if message.proxy != nil {
  1263  						processID.Node = message.proxy.session.PeerName
  1264  					}
  1265  					dc.router.RouteMonitorExitReg(processID, reason, ref)
  1266  					return nil
  1267  				}
  1268  				return fmt.Errorf("malformed monitor exit message")
  1269  
  1270  			// Not implemented yet, just stubs. TODO.
  1271  			case distProtoSEND_SENDER:
  1272  				lib.Log("[%s] CONTROL SEND_SENDER unsupported [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1273  				return nil
  1274  			case distProtoPAYLOAD_EXIT:
  1275  				lib.Log("[%s] CONTROL PAYLOAD_EXIT unsupported [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1276  				return nil
  1277  			case distProtoPAYLOAD_EXIT2:
  1278  				lib.Log("[%s] CONTROL PAYLOAD_EXIT2 unsupported [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1279  				return nil
  1280  			case distProtoPAYLOAD_MONITOR_P_EXIT:
  1281  				lib.Log("[%s] CONTROL PAYLOAD_MONITOR_P_EXIT unsupported [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1282  				return nil
  1283  
  1284  			// alias support
  1285  			case distProtoALIAS_SEND:
  1286  				// {33, FromPid, Alias}
  1287  				lib.Log("[%s] CONTROL ALIAS_SEND [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1288  				alias := etf.Alias(t.Element(3).(etf.Ref))
  1289  				dc.router.RouteSendAlias(t.Element(2).(etf.Pid), alias, message.payload)
  1290  				return nil
  1291  
  1292  			case distProtoSPAWN_REQUEST:
  1293  				// {29, ReqId, From, GroupLeader, {Module, Function, Arity}, OptList}
  1294  				lib.Log("[%s] CONTROL SPAWN_REQUEST [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1295  				if message.proxy != nil && message.proxy.session.NodeFlags.EnableRemoteSpawn == false {
  1296  					// we didn't allow this feature. proxy session will be closed due to
  1297  					// this violation of the contract
  1298  					return lib.ErrPeerUnsupported
  1299  				}
  1300  				registerName := ""
  1301  				for _, option := range t.Element(6).(etf.List) {
  1302  					name, ok := option.(etf.Tuple)
  1303  					if !ok || len(name) != 2 {
  1304  						return fmt.Errorf("malformed spawn request")
  1305  					}
  1306  					switch name.Element(1) {
  1307  					case etf.Atom("name"):
  1308  						registerName = string(name.Element(2).(etf.Atom))
  1309  					}
  1310  				}
  1311  
  1312  				from := t.Element(3).(etf.Pid)
  1313  				ref := t.Element(2).(etf.Ref)
  1314  
  1315  				mfa := t.Element(5).(etf.Tuple)
  1316  				module := mfa.Element(1).(etf.Atom)
  1317  				function := mfa.Element(2).(etf.Atom)
  1318  				var args etf.List
  1319  				if str, ok := message.payload.(string); !ok {
  1320  					args, _ = message.payload.(etf.List)
  1321  				} else {
  1322  					// stupid Erlang's strings :). [1,2,3,4,5] sends as a string.
  1323  					// args can't be anything but etf.List.
  1324  					for i := range []byte(str) {
  1325  						args = append(args, str[i])
  1326  					}
  1327  				}
  1328  
  1329  				spawnRequestOptions := gen.RemoteSpawnOptions{
  1330  					Name:     registerName,
  1331  					Function: string(function),
  1332  				}
  1333  				spawnRequest := gen.RemoteSpawnRequest{
  1334  					From:    from,
  1335  					Ref:     ref,
  1336  					Options: spawnRequestOptions,
  1337  				}
  1338  				dc.router.RouteSpawnRequest(dc.nodename, string(module), spawnRequest, args...)
  1339  				return nil
  1340  
  1341  			case distProtoSPAWN_REPLY:
  1342  				// {31, ReqId, To, Flags, Result}
  1343  				lib.Log("[%s] CONTROL SPAWN_REPLY [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1344  				ref := t.Element(2).(etf.Ref)
  1345  				to := t.Element(3).(etf.Pid)
  1346  				dc.router.RouteSpawnReply(to, ref, t.Element(5))
  1347  				return nil
  1348  
  1349  			case distProtoPROXY_CONNECT_REQUEST:
  1350  				// {101, ID, To, Digest, PublicKey, Flags, Hop, Path}
  1351  				lib.Log("[%s] PROXY CONNECT REQUEST [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1352  				request := node.ProxyConnectRequest{
  1353  					ID:        t.Element(2).(etf.Ref),
  1354  					To:        string(t.Element(3).(etf.Atom)),
  1355  					Digest:    t.Element(4).([]byte),
  1356  					PublicKey: t.Element(5).([]byte),
  1357  					// FIXME it will be int64 after using more than 32 flags
  1358  					Flags:    proxyFlagsFromUint64(uint64(t.Element(6).(int))),
  1359  					Creation: uint32(t.Element(7).(int64)),
  1360  					Hop:      t.Element(8).(int),
  1361  				}
  1362  				for _, p := range t.Element(9).(etf.List) {
  1363  					request.Path = append(request.Path, string(p.(etf.Atom)))
  1364  				}
  1365  				if err := dc.router.RouteProxyConnectRequest(dc, request); err != nil {
  1366  					errReply := node.ProxyConnectCancel{
  1367  						ID:     request.ID,
  1368  						From:   dc.nodename,
  1369  						Reason: err.Error(),
  1370  						Path:   request.Path[1:],
  1371  					}
  1372  					dc.ProxyConnectCancel(errReply)
  1373  				}
  1374  				return nil
  1375  
  1376  			case distProtoPROXY_CONNECT_REPLY:
  1377  				// {102, ID, To, Digest, Cipher, Flags, SessionID, Path}
  1378  				lib.Log("[%s] PROXY CONNECT REPLY [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1379  				connectReply := node.ProxyConnectReply{
  1380  					ID:     t.Element(2).(etf.Ref),
  1381  					To:     string(t.Element(3).(etf.Atom)),
  1382  					Digest: t.Element(4).([]byte),
  1383  					Cipher: t.Element(5).([]byte),
  1384  					// FIXME it will be int64 after using more than 32 flags
  1385  					Flags:     proxyFlagsFromUint64(uint64(t.Element(6).(int))),
  1386  					Creation:  uint32(t.Element(7).(int64)),
  1387  					SessionID: t.Element(8).(string),
  1388  				}
  1389  				for _, p := range t.Element(9).(etf.List) {
  1390  					connectReply.Path = append(connectReply.Path, string(p.(etf.Atom)))
  1391  				}
  1392  				if err := dc.router.RouteProxyConnectReply(dc, connectReply); err != nil {
  1393  					lib.Log("[%s] PROXY CONNECT REPLY error %s (message: %#v)", dc.nodename, err, connectReply)
  1394  					// send disconnect to clean up this session all the way to the
  1395  					// destination node
  1396  					disconnect := node.ProxyDisconnect{
  1397  						Node:      dc.nodename,
  1398  						Proxy:     dc.nodename,
  1399  						SessionID: connectReply.SessionID,
  1400  						Reason:    err.Error(),
  1401  					}
  1402  					dc.ProxyDisconnect(disconnect)
  1403  					if err == lib.ErrNoRoute {
  1404  						return nil
  1405  					}
  1406  
  1407  					// send cancel message to the source node
  1408  					cancel := node.ProxyConnectCancel{
  1409  						ID:     connectReply.ID,
  1410  						From:   dc.nodename,
  1411  						Reason: err.Error(),
  1412  						Path:   connectReply.Path,
  1413  					}
  1414  					dc.router.RouteProxyConnectCancel(dc, cancel)
  1415  				}
  1416  
  1417  				return nil
  1418  
  1419  			case distProtoPROXY_CONNECT_CANCEL:
  1420  				lib.Log("[%s] PROXY CONNECT CANCEL [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1421  				connectError := node.ProxyConnectCancel{
  1422  					ID:     t.Element(2).(etf.Ref),
  1423  					From:   string(t.Element(3).(etf.Atom)),
  1424  					Reason: t.Element(4).(string),
  1425  				}
  1426  				for _, p := range t.Element(5).(etf.List) {
  1427  					connectError.Path = append(connectError.Path, string(p.(etf.Atom)))
  1428  				}
  1429  				dc.router.RouteProxyConnectCancel(dc, connectError)
  1430  				return nil
  1431  
  1432  			case distProtoPROXY_DISCONNECT:
  1433  				// {104, Node, Proxy, SessionID, Reason}
  1434  				lib.Log("[%s] PROXY DISCONNECT [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1435  				proxyDisconnect := node.ProxyDisconnect{
  1436  					Node:      string(t.Element(2).(etf.Atom)),
  1437  					Proxy:     string(t.Element(3).(etf.Atom)),
  1438  					SessionID: t.Element(4).(string),
  1439  					Reason:    t.Element(5).(string),
  1440  				}
  1441  				dc.router.RouteProxyDisconnect(dc, proxyDisconnect)
  1442  				return nil
  1443  
  1444  			default:
  1445  				lib.Log("[%s] CONTROL unknown command [from %s]: %#v", dc.nodename, dc.peername, message.control)
  1446  				return fmt.Errorf("unknown control command %#v", message.control)
  1447  			}
  1448  		}
  1449  	}
  1450  
  1451  	return fmt.Errorf("unsupported control message %#v", message.control)
  1452  }
  1453  
  1454  func (dc *distConnection) decodeFragment(packet []byte, proxy *proxySession) (*lib.Buffer, error) {
  1455  	var first, compressed bool
  1456  	var err error
  1457  
  1458  	sequenceID := binary.BigEndian.Uint64(packet[1:9])
  1459  	fragmentID := binary.BigEndian.Uint64(packet[9:17])
  1460  	if fragmentID == 0 {
  1461  		return nil, fmt.Errorf("fragmentID can't be 0")
  1462  	}
  1463  
  1464  	switch packet[0] {
  1465  	case protoDistFragment1:
  1466  		// We should decode atom cache from the first fragment in order
  1467  		// to get rid the case when we get the first fragment of the packet with
  1468  		// cached atoms and the next packet is not the part of the fragmented packet,
  1469  		// but with the ids were cached in the first fragment
  1470  		_, _, err = dc.decodeDistHeaderAtomCache(packet[17:], proxy)
  1471  		if err != nil {
  1472  			return nil, err
  1473  		}
  1474  		first = true
  1475  	case protoDistFragment1Z:
  1476  		_, _, err = dc.decodeDistHeaderAtomCache(packet[17:], proxy)
  1477  		if err != nil {
  1478  			return nil, err
  1479  		}
  1480  		first = true
  1481  		compressed = true
  1482  	case protoDistFragmentNZ:
  1483  		compressed = true
  1484  	}
  1485  	packet = packet[17:]
  1486  
  1487  	dc.fragmentsMutex.Lock()
  1488  	defer dc.fragmentsMutex.Unlock()
  1489  
  1490  	fragmented, ok := dc.fragments[sequenceID]
  1491  	if !ok {
  1492  		fragmented = &fragmentedPacket{
  1493  			buffer:           lib.TakeBuffer(),
  1494  			disordered:       lib.TakeBuffer(),
  1495  			disorderedSlices: make(map[uint64][]byte),
  1496  			lastUpdate:       time.Now(),
  1497  		}
  1498  
  1499  		// append new packet type
  1500  		if compressed {
  1501  			fragmented.buffer.AppendByte(protoDistMessageZ)
  1502  		} else {
  1503  			fragmented.buffer.AppendByte(protoDistMessage)
  1504  		}
  1505  		dc.fragments[sequenceID] = fragmented
  1506  	}
  1507  
  1508  	// until we get the first item everything will be treated as disordered
  1509  	if first {
  1510  		fragmented.fragmentID = fragmentID + 1
  1511  	}
  1512  
  1513  	if fragmented.fragmentID-fragmentID != 1 {
  1514  		// got the next fragment. disordered
  1515  		slice := fragmented.disordered.Extend(len(packet))
  1516  		copy(slice, packet)
  1517  		fragmented.disorderedSlices[fragmentID] = slice
  1518  	} else {
  1519  		// order is correct. just append
  1520  		fragmented.buffer.Append(packet)
  1521  		fragmented.fragmentID = fragmentID
  1522  	}
  1523  
  1524  	// check whether we have disordered slices and try
  1525  	// to append them if it does fit
  1526  	if fragmented.fragmentID > 0 && len(fragmented.disorderedSlices) > 0 {
  1527  		for i := fragmented.fragmentID - 1; i > 0; i-- {
  1528  			if slice, ok := fragmented.disorderedSlices[i]; ok {
  1529  				fragmented.buffer.Append(slice)
  1530  				delete(fragmented.disorderedSlices, i)
  1531  				fragmented.fragmentID = i
  1532  				continue
  1533  			}
  1534  			break
  1535  		}
  1536  	}
  1537  
  1538  	fragmented.lastUpdate = time.Now()
  1539  
  1540  	if fragmented.fragmentID == 1 && len(fragmented.disorderedSlices) == 0 {
  1541  		// it was the last fragment
  1542  		delete(dc.fragments, sequenceID)
  1543  		lib.ReleaseBuffer(fragmented.disordered)
  1544  		return fragmented.buffer, nil
  1545  	}
  1546  
  1547  	if dc.checkCleanPending {
  1548  		return nil, nil
  1549  	}
  1550  
  1551  	if dc.checkCleanTimer != nil {
  1552  		dc.checkCleanTimer.Reset(dc.checkCleanTimeout)
  1553  		return nil, nil
  1554  	}
  1555  
  1556  	dc.checkCleanTimer = time.AfterFunc(dc.checkCleanTimeout, func() {
  1557  		dc.fragmentsMutex.Lock()
  1558  		defer dc.fragmentsMutex.Unlock()
  1559  
  1560  		if len(dc.fragments) == 0 {
  1561  			dc.checkCleanPending = false
  1562  			return
  1563  		}
  1564  
  1565  		valid := time.Now().Add(-dc.checkCleanDeadline)
  1566  		for sequenceID, fragmented := range dc.fragments {
  1567  			if fragmented.lastUpdate.Before(valid) {
  1568  				// dropping  due to exceeded deadline
  1569  				delete(dc.fragments, sequenceID)
  1570  			}
  1571  		}
  1572  		if len(dc.fragments) == 0 {
  1573  			dc.checkCleanPending = false
  1574  			return
  1575  		}
  1576  
  1577  		dc.checkCleanPending = true
  1578  		dc.checkCleanTimer.Reset(dc.checkCleanTimeout)
  1579  	})
  1580  
  1581  	return nil, nil
  1582  }
  1583  
  1584  func (dc *distConnection) decodeDistHeaderAtomCache(packet []byte, proxy *proxySession) ([]etf.Atom, []byte, error) {
  1585  	var err error
  1586  	// all the details are here https://erlang.org/doc/apps/erts/erl_ext_dist.html#normal-distribution-header
  1587  
  1588  	// number of atom references are present in package
  1589  	references := int(packet[0])
  1590  	if references == 0 {
  1591  		return nil, packet[1:], nil
  1592  	}
  1593  
  1594  	cache := dc.cache.In
  1595  	if proxy != nil {
  1596  		cache = proxy.cache.In
  1597  	}
  1598  	cached := make([]etf.Atom, references)
  1599  	flagsLen := references/2 + 1
  1600  	if len(packet) < 1+flagsLen {
  1601  		// malformed
  1602  		return nil, nil, errMalformed
  1603  	}
  1604  	flags := packet[1 : flagsLen+1]
  1605  
  1606  	// The least significant bit in a half byte is flag LongAtoms.
  1607  	// If it is set, 2 bytes are used for atom lengths instead of 1 byte
  1608  	// in the distribution header.
  1609  	headerAtomLength := 1 // if 'LongAtom' is not set
  1610  
  1611  	// extract this bit. just increase headereAtomLength if this flag is set
  1612  	lastByte := flags[len(flags)-1]
  1613  	shift := uint((references & 0x01) * 4)
  1614  	headerAtomLength += int((lastByte >> shift) & 0x01)
  1615  
  1616  	// 1 (number of references) + references/2+1 (length of flags)
  1617  	packet = packet[1+flagsLen:]
  1618  
  1619  	for i := 0; i < references; i++ {
  1620  		if len(packet) < 1+headerAtomLength {
  1621  			// malformed
  1622  			return nil, nil, errMalformed
  1623  		}
  1624  		shift = uint((i & 0x01) * 4)
  1625  		flag := (flags[i/2] >> shift) & 0x0F
  1626  		isNewReference := flag&0x08 == 0x08
  1627  		idxReference := uint16(flag & 0x07)
  1628  		idxInternal := uint16(packet[0])
  1629  		idx := (idxReference << 8) | idxInternal
  1630  
  1631  		if isNewReference {
  1632  			atomLen := uint16(packet[1])
  1633  			if headerAtomLength == 2 {
  1634  				atomLen = binary.BigEndian.Uint16(packet[1:3])
  1635  			}
  1636  			// extract atom
  1637  			packet = packet[1+headerAtomLength:]
  1638  			if len(packet) < int(atomLen) {
  1639  				// malformed
  1640  				return nil, nil, errMalformed
  1641  			}
  1642  			atom := etf.Atom(packet[:atomLen])
  1643  			// store in temporary cache for decoding
  1644  			cached[i] = atom
  1645  
  1646  			// store in link' cache
  1647  			cache.Atoms[idx] = &atom
  1648  			packet = packet[atomLen:]
  1649  			continue
  1650  		}
  1651  
  1652  		c := cache.Atoms[idx]
  1653  		if c == nil {
  1654  			packet = packet[1:]
  1655  			// decode the rest of this cache but set return err = errMissingInCache
  1656  			err = errMissingInCache
  1657  			continue
  1658  		}
  1659  		cached[i] = *c
  1660  		packet = packet[1:]
  1661  	}
  1662  
  1663  	return cached, packet, err
  1664  }
  1665  
  1666  func (dc *distConnection) encodeDistHeaderAtomCache(b *lib.Buffer,
  1667  	senderAtomCache map[etf.Atom]etf.CacheItem,
  1668  	encodingAtomCache *etf.EncodingAtomCache) {
  1669  
  1670  	n := encodingAtomCache.Len()
  1671  	b.AppendByte(byte(n)) // write NumberOfAtomCache
  1672  	if n == 0 {
  1673  		return
  1674  	}
  1675  
  1676  	startPosition := len(b.B)
  1677  	lenFlags := n/2 + 1
  1678  	flags := b.Extend(lenFlags)
  1679  	flags[lenFlags-1] = 0 // clear last byte to make sure we have valid LongAtom flag
  1680  
  1681  	for i := 0; i < len(encodingAtomCache.L); i++ {
  1682  		// clean internal name cache
  1683  		encodingAtomCache.Delete(encodingAtomCache.L[i].Name)
  1684  
  1685  		shift := uint((i & 0x01) * 4)
  1686  		idxReference := byte(encodingAtomCache.L[i].ID >> 8) // SegmentIndex
  1687  		idxInternal := byte(encodingAtomCache.L[i].ID & 255) // InternalSegmentIndex
  1688  
  1689  		cachedItem := senderAtomCache[encodingAtomCache.L[i].Name]
  1690  		if !cachedItem.Encoded {
  1691  			idxReference |= 8 // set NewCacheEntryFlag
  1692  		}
  1693  
  1694  		// the 'flags' slice could be changed if b.B was reallocated during the encoding atoms
  1695  		flags = b.B[startPosition : startPosition+lenFlags]
  1696  		// clean it up before reuse
  1697  		if shift == 0 {
  1698  			flags[i/2] = 0
  1699  		}
  1700  		flags[i/2] |= idxReference << shift
  1701  
  1702  		if cachedItem.Encoded {
  1703  			b.AppendByte(idxInternal)
  1704  			continue
  1705  		}
  1706  
  1707  		if encodingAtomCache.HasLongAtom {
  1708  			// 1 (InternalSegmentIndex) + 2 (length) + name
  1709  			allocLen := 1 + 2 + len(encodingAtomCache.L[i].Name)
  1710  			buf := b.Extend(allocLen)
  1711  			buf[0] = idxInternal
  1712  			binary.BigEndian.PutUint16(buf[1:3], uint16(len(encodingAtomCache.L[i].Name)))
  1713  			copy(buf[3:], encodingAtomCache.L[i].Name)
  1714  		} else {
  1715  			// 1 (InternalSegmentIndex) + 1 (length) + name
  1716  			allocLen := 1 + 1 + len(encodingAtomCache.L[i].Name)
  1717  			buf := b.Extend(allocLen)
  1718  			buf[0] = idxInternal
  1719  			buf[1] = byte(len(encodingAtomCache.L[i].Name))
  1720  			copy(buf[2:], encodingAtomCache.L[i].Name)
  1721  		}
  1722  
  1723  		cachedItem.Encoded = true
  1724  		senderAtomCache[encodingAtomCache.L[i].Name] = cachedItem
  1725  	}
  1726  
  1727  	if encodingAtomCache.HasLongAtom {
  1728  		shift := uint((n & 0x01) * 4)
  1729  		flags = b.B[startPosition : startPosition+lenFlags]
  1730  		flags[lenFlags-1] |= 1 << shift // set LongAtom = 1
  1731  	}
  1732  }
  1733  
  1734  func (dc *distConnection) sender(sender_id int, send <-chan *sendMessage, options node.ProtoOptions, peerFlags node.Flags) {
  1735  	var lenMessage, lenAtomCache, lenPacket, startDataPosition int
  1736  	var atomCacheBuffer, packetBuffer *lib.Buffer
  1737  	var err error
  1738  	var compressed bool
  1739  	var cacheEnabled, fragmentationEnabled, compressionEnabled, encryptionEnabled bool
  1740  
  1741  	// cancel connection context if something went wrong
  1742  	// it will cause closing connection with stopping all
  1743  	// goroutines around this connection
  1744  	defer dc.cancelContext()
  1745  
  1746  	// Header atom cache is encoded right after the control/message encoding process
  1747  	// but should be stored as a first item in the packet.
  1748  	// Thats why we do reserve some space for it in order to get rid
  1749  	// of reallocation packetBuffer data
  1750  	reserveHeaderAtomCache := 8192
  1751  
  1752  	// atom cache of this sender
  1753  	senderAtomCache := make(map[etf.Atom]etf.CacheItem)
  1754  	// atom cache of this encoding
  1755  	encodingAtomCache := etf.TakeEncodingAtomCache()
  1756  	defer etf.ReleaseEncodingAtomCache(encodingAtomCache)
  1757  
  1758  	encrypt := func(data []byte, sessionID string, block cipher.Block) *lib.Buffer {
  1759  		l := len(data)
  1760  		padding := aes.BlockSize - l%aes.BlockSize
  1761  		padtext := bytes.Repeat([]byte{byte(padding)}, padding)
  1762  		data = append(data, padtext...)
  1763  		l = len(data)
  1764  
  1765  		// take another buffer for encrypted message
  1766  		xBuffer := lib.TakeBuffer()
  1767  		// 4 (packet len) + 1 (protoProxyX) + 32 (sessionID) + aes.BlockSize + l
  1768  		xBuffer.Allocate(4 + 1 + 32 + aes.BlockSize + l)
  1769  
  1770  		binary.BigEndian.PutUint32(xBuffer.B, uint32(xBuffer.Len()-4))
  1771  		xBuffer.B[4] = protoProxyX
  1772  		copy(xBuffer.B[5:], sessionID)
  1773  		iv := xBuffer.B[4+1+32 : 4+1+32+aes.BlockSize]
  1774  		if _, err := io.ReadFull(crand.Reader, iv); err != nil {
  1775  			lib.ReleaseBuffer(xBuffer)
  1776  			return nil
  1777  		}
  1778  		cfb := cipher.NewCFBEncrypter(block, iv)
  1779  		cfb.XORKeyStream(xBuffer.B[4+1+32+aes.BlockSize:], data)
  1780  		return xBuffer
  1781  	}
  1782  
  1783  	message := &sendMessage{}
  1784  	encodingOptions := etf.EncodeOptions{
  1785  		EncodingAtomCache: encodingAtomCache,
  1786  		AtomMapping:       dc.mapping,
  1787  		NodeName:          dc.nodename,
  1788  		PeerName:          dc.peername,
  1789  	}
  1790  
  1791  	for {
  1792  		// clean up and get back message struct to the pool
  1793  		message.packet = nil
  1794  		message.control = nil
  1795  		message.payload = nil
  1796  		message.compression = false
  1797  		message.proxy = nil
  1798  		sendMessages.Put(message)
  1799  
  1800  		// waiting for the next message
  1801  		message = <-send
  1802  
  1803  		if message == nil {
  1804  			// channel was closed
  1805  			return
  1806  		}
  1807  
  1808  		if message.packet != nil {
  1809  			// transit proxy message
  1810  			bytesOut, err := dc.flusher.Write(message.packet.B)
  1811  			if err != nil {
  1812  				return
  1813  			}
  1814  			atomic.AddUint64(&dc.stats.TransitBytesOut, uint64(bytesOut))
  1815  			lib.ReleaseBuffer(message.packet)
  1816  			continue
  1817  		}
  1818  
  1819  		atomic.AddUint64(&dc.stats.MessagesOut, 1)
  1820  
  1821  		packetBuffer = lib.TakeBuffer()
  1822  		lenMessage, lenAtomCache, lenPacket = 0, 0, 0
  1823  		startDataPosition = reserveHeaderAtomCache
  1824  
  1825  		// do reserve for the header 8K, should be enough
  1826  		packetBuffer.Allocate(reserveHeaderAtomCache)
  1827  
  1828  		// compression feature is always available for the proxy connection
  1829  		// check whether compress is enabled for the peer and for this message
  1830  		compressed = false
  1831  		compressionEnabled = false
  1832  		if message.compression {
  1833  			if message.proxy != nil || peerFlags.EnableCompression {
  1834  				compressionEnabled = true
  1835  			}
  1836  		}
  1837  
  1838  		cacheEnabled = false
  1839  		// atom cache feature is always available for the proxy connection
  1840  		if message.proxy != nil || peerFlags.EnableHeaderAtomCache {
  1841  			cacheEnabled = true
  1842  			encodingAtomCache.Reset()
  1843  		}
  1844  
  1845  		// fragmentation feature is always available for the proxy connection
  1846  		fragmentationEnabled = false
  1847  		if options.FragmentationUnit > 0 {
  1848  			if message.proxy != nil || peerFlags.EnableFragmentation {
  1849  				fragmentationEnabled = true
  1850  			}
  1851  		}
  1852  
  1853  		// encryption feature is only available for the proxy connection
  1854  		encryptionEnabled = false
  1855  		if message.proxy != nil && message.proxy.session.PeerFlags.EnableEncryption {
  1856  			encryptionEnabled = true
  1857  		}
  1858  
  1859  		if message.proxy == nil {
  1860  			// use connection atom cache
  1861  			encodingOptions.AtomCache = dc.cache.Out
  1862  			encodingOptions.SenderAtomCache = senderAtomCache
  1863  			// use connection flags
  1864  			encodingOptions.FlagBigCreation = peerFlags.EnableBigCreation
  1865  			encodingOptions.FlagBigPidRef = peerFlags.EnableBigPidRef
  1866  
  1867  		} else {
  1868  			// use proxy connection atom cache
  1869  			encodingOptions.AtomCache = message.proxy.cache.Out
  1870  			if message.proxy.senderCache[sender_id] == nil {
  1871  				message.proxy.senderCache[sender_id] = make(map[etf.Atom]etf.CacheItem)
  1872  			}
  1873  			encodingOptions.SenderAtomCache = message.proxy.senderCache[sender_id]
  1874  			// these flags are always enabled for the proxy connection
  1875  			encodingOptions.FlagBigCreation = true
  1876  			encodingOptions.FlagBigPidRef = true
  1877  		}
  1878  
  1879  		// We could use gzip writer for the encoder, but we don't know
  1880  		// the actual size of the control/payload. For small data, gzipping
  1881  		// is getting extremely inefficient. That's why it is cheaper to
  1882  		// encode control/payload first and then decide whether to compress it
  1883  		// according to a threshold value.
  1884  
  1885  		// encode Control
  1886  		err = etf.Encode(message.control, packetBuffer, encodingOptions)
  1887  		if err != nil {
  1888  			lib.Warning("can not encode control message: %s", err)
  1889  			lib.ReleaseBuffer(packetBuffer)
  1890  			continue
  1891  		}
  1892  
  1893  		// encode Message if present
  1894  		if message.payload != nil {
  1895  			err = etf.Encode(message.payload, packetBuffer, encodingOptions)
  1896  			if err != nil {
  1897  				lib.Warning("can not encode payload message: %s", err)
  1898  				lib.ReleaseBuffer(packetBuffer)
  1899  				continue
  1900  			}
  1901  
  1902  		}
  1903  		lenMessage = packetBuffer.Len() - reserveHeaderAtomCache
  1904  
  1905  		if compressionEnabled && packetBuffer.Len() > (reserveHeaderAtomCache+message.compressionThreshold) {
  1906  			var zWriter *gzip.Writer
  1907  
  1908  			//// take another buffer
  1909  			zBuffer := lib.TakeBuffer()
  1910  			// allocate extra 4 bytes for the lenMessage (length of unpacked data)
  1911  			zBuffer.Allocate(reserveHeaderAtomCache + 4)
  1912  			level := message.compressionLevel
  1913  			if level == -1 {
  1914  				level = 0
  1915  			}
  1916  			if w, ok := gzipWriters[level].Get().(*gzip.Writer); ok {
  1917  				zWriter = w
  1918  				zWriter.Reset(zBuffer)
  1919  			} else {
  1920  				zWriter, _ = gzip.NewWriterLevel(zBuffer, message.compressionLevel)
  1921  			}
  1922  			zWriter.Write(packetBuffer.B[reserveHeaderAtomCache:])
  1923  			zWriter.Close()
  1924  			gzipWriters[level].Put(zWriter)
  1925  
  1926  			// swap buffers only if gzipped data less than the original ones
  1927  			if zBuffer.Len() < packetBuffer.Len() {
  1928  				binary.BigEndian.PutUint32(zBuffer.B[reserveHeaderAtomCache:], uint32(lenMessage))
  1929  				lenMessage = zBuffer.Len() - reserveHeaderAtomCache
  1930  				packetBuffer, zBuffer = zBuffer, packetBuffer
  1931  				compressed = true
  1932  			}
  1933  			lib.ReleaseBuffer(zBuffer)
  1934  		}
  1935  
  1936  		// encode Header Atom Cache if its enabled
  1937  		if cacheEnabled && encodingAtomCache.Len() > 0 {
  1938  			atomCacheBuffer = lib.TakeBuffer()
  1939  			atomCacheBuffer.Allocate(1024)
  1940  			dc.encodeDistHeaderAtomCache(atomCacheBuffer, encodingOptions.SenderAtomCache, encodingAtomCache)
  1941  
  1942  			lenAtomCache = atomCacheBuffer.Len() - 1024
  1943  			if lenAtomCache > reserveHeaderAtomCache-1024 {
  1944  				// we got huge atom cache
  1945  				atomCacheBuffer.Append(packetBuffer.B[startDataPosition:])
  1946  				startDataPosition = 1024
  1947  				lib.ReleaseBuffer(packetBuffer)
  1948  				packetBuffer = atomCacheBuffer
  1949  			} else {
  1950  				startDataPosition -= lenAtomCache
  1951  				copy(packetBuffer.B[startDataPosition:], atomCacheBuffer.B[1024:])
  1952  				lib.ReleaseBuffer(atomCacheBuffer)
  1953  			}
  1954  
  1955  		} else {
  1956  			lenAtomCache = 1
  1957  			startDataPosition -= lenAtomCache
  1958  			packetBuffer.B[startDataPosition] = byte(0)
  1959  		}
  1960  
  1961  		for {
  1962  			// 4 (packet len) + 1 (dist header: 131) + 1 (dist header: protoDistMessage[Z]) + lenAtomCache
  1963  			lenPacket = 1 + 1 + lenAtomCache + lenMessage
  1964  			if !fragmentationEnabled || lenMessage < options.FragmentationUnit {
  1965  				// send as a single packet
  1966  				startDataPosition -= 1
  1967  				if compressed {
  1968  					packetBuffer.B[startDataPosition] = protoDistMessageZ // 200
  1969  				} else {
  1970  					packetBuffer.B[startDataPosition] = protoDistMessage // 68
  1971  				}
  1972  
  1973  				if message.proxy == nil {
  1974  					// 4 (packet len) + 1 (protoDist)
  1975  					startDataPosition -= 4 + 1
  1976  
  1977  					binary.BigEndian.PutUint32(packetBuffer.B[startDataPosition:], uint32(lenPacket))
  1978  					packetBuffer.B[startDataPosition+4] = protoDist // 131
  1979  
  1980  					bytesOut, err := dc.flusher.Write(packetBuffer.B[startDataPosition:])
  1981  					if err != nil {
  1982  						return
  1983  					}
  1984  					atomic.AddUint64(&dc.stats.BytesOut, uint64(bytesOut))
  1985  					break
  1986  				}
  1987  
  1988  				// proxy message.
  1989  				if encryptionEnabled == false {
  1990  					// no encryption
  1991  					// 4 (packet len) + protoProxy + sessionID
  1992  					startDataPosition -= 1 + 4 + 32
  1993  					l := len(packetBuffer.B[startDataPosition:]) - 4
  1994  					binary.BigEndian.PutUint32(packetBuffer.B[startDataPosition:], uint32(l))
  1995  					packetBuffer.B[startDataPosition+4] = protoProxy
  1996  					copy(packetBuffer.B[startDataPosition+5:], message.proxy.session.ID)
  1997  					bytesOut, err := dc.flusher.Write(packetBuffer.B[startDataPosition:])
  1998  					if err != nil {
  1999  						return
  2000  					}
  2001  					atomic.AddUint64(&dc.stats.BytesOut, uint64(bytesOut))
  2002  					break
  2003  				}
  2004  
  2005  				// send encrypted proxy message
  2006  				xBuffer := encrypt(packetBuffer.B[startDataPosition:],
  2007  					message.proxy.session.ID, message.proxy.session.Block)
  2008  				if xBuffer == nil {
  2009  					// can't encrypt message
  2010  					return
  2011  				}
  2012  				bytesOut, err := dc.flusher.Write(xBuffer.B)
  2013  				if err != nil {
  2014  					return
  2015  				}
  2016  				atomic.AddUint64(&dc.stats.BytesOut, uint64(bytesOut))
  2017  				lib.ReleaseBuffer(xBuffer)
  2018  				break
  2019  			}
  2020  
  2021  			// Message should be fragmented
  2022  
  2023  			// https://erlang.org/doc/apps/erts/erl_ext_dist.html#distribution-header-for-fragmented-messages
  2024  			// "The entire atom cache and control message has to be part of the starting fragment"
  2025  
  2026  			sequenceID := uint64(atomic.AddInt64(&dc.sequenceID, 1))
  2027  			numFragments := lenMessage/options.FragmentationUnit + 1
  2028  
  2029  			// 1 (dist header: 131) + 1 (dist header: protoDistFragment) + 8 (sequenceID) + 8 (fragmentID) + ...
  2030  			lenPacket = 1 + 1 + 8 + 8 + lenAtomCache + options.FragmentationUnit
  2031  
  2032  			// 4 (packet len) + 1 (dist header: 131) + 1 (dist header: protoDistFragment[Z]) + 8 (sequenceID) + 8 (fragmentID)
  2033  			startDataPosition -= 22
  2034  
  2035  			if compressed {
  2036  				packetBuffer.B[startDataPosition+5] = protoDistFragment1Z // 201
  2037  			} else {
  2038  				packetBuffer.B[startDataPosition+5] = protoDistFragment1 // 69
  2039  			}
  2040  
  2041  			binary.BigEndian.PutUint64(packetBuffer.B[startDataPosition+6:], uint64(sequenceID))
  2042  			binary.BigEndian.PutUint64(packetBuffer.B[startDataPosition+14:], uint64(numFragments))
  2043  
  2044  			if message.proxy == nil {
  2045  				binary.BigEndian.PutUint32(packetBuffer.B[startDataPosition:], uint32(lenPacket))
  2046  				packetBuffer.B[startDataPosition+4] = protoDist // 131
  2047  				bytesOut, err := dc.flusher.Write(packetBuffer.B[startDataPosition : startDataPosition+4+lenPacket])
  2048  				if err != nil {
  2049  					return
  2050  				}
  2051  				atomic.AddUint64(&dc.stats.BytesOut, uint64(bytesOut))
  2052  			} else {
  2053  				// proxy message
  2054  				if encryptionEnabled == false {
  2055  					// send proxy message
  2056  					// shift left on 32 bytes for the session id
  2057  					binary.BigEndian.PutUint32(packetBuffer.B[startDataPosition-32:], uint32(lenPacket+32))
  2058  					packetBuffer.B[startDataPosition-32+4] = protoProxy // 141
  2059  					copy(packetBuffer.B[startDataPosition-32+5:], message.proxy.session.ID)
  2060  					bytesOut, err := dc.flusher.Write(packetBuffer.B[startDataPosition-32 : startDataPosition+4+lenPacket])
  2061  					if err != nil {
  2062  						return
  2063  					}
  2064  					atomic.AddUint64(&dc.stats.BytesOut, uint64(bytesOut))
  2065  
  2066  				} else {
  2067  					// send encrypted proxy message
  2068  					// encryption makes padding (up to aes.BlockSize = 16 bytes) so we should keep the data
  2069  					tail16 := [16]byte{}
  2070  					n := copy(tail16[:], packetBuffer.B[startDataPosition+4+lenPacket:])
  2071  					xBuffer := encrypt(packetBuffer.B[startDataPosition+5:startDataPosition+4+lenPacket],
  2072  						message.proxy.session.ID, message.proxy.session.Block)
  2073  					if xBuffer == nil {
  2074  						// can't encrypt message
  2075  						return
  2076  					}
  2077  					bytesOut, err := dc.flusher.Write(xBuffer.B)
  2078  					if err != nil {
  2079  						return
  2080  					}
  2081  					atomic.AddUint64(&dc.stats.BytesOut, uint64(bytesOut))
  2082  
  2083  					// resore tail
  2084  					copy(packetBuffer.B[startDataPosition+4+lenPacket:], tail16[:n])
  2085  					lib.ReleaseBuffer(xBuffer)
  2086  				}
  2087  			}
  2088  
  2089  			startDataPosition += 4 + lenPacket
  2090  			numFragments--
  2091  
  2092  		nextFragment:
  2093  
  2094  			if len(packetBuffer.B[startDataPosition:]) > options.FragmentationUnit {
  2095  				lenPacket = 1 + 1 + 8 + 8 + options.FragmentationUnit
  2096  				// reuse the previous 22 bytes for the next frame header
  2097  				startDataPosition -= 22
  2098  
  2099  			} else {
  2100  				// the last one
  2101  				lenPacket = 1 + 1 + 8 + 8 + len(packetBuffer.B[startDataPosition:])
  2102  				startDataPosition -= 22
  2103  			}
  2104  
  2105  			if compressed {
  2106  				packetBuffer.B[startDataPosition+5] = protoDistFragmentNZ // 202
  2107  			} else {
  2108  				packetBuffer.B[startDataPosition+5] = protoDistFragmentN // 70
  2109  			}
  2110  
  2111  			binary.BigEndian.PutUint64(packetBuffer.B[startDataPosition+6:], uint64(sequenceID))
  2112  			binary.BigEndian.PutUint64(packetBuffer.B[startDataPosition+14:], uint64(numFragments))
  2113  			if message.proxy == nil {
  2114  				// send fragment
  2115  				binary.BigEndian.PutUint32(packetBuffer.B[startDataPosition:], uint32(lenPacket))
  2116  				packetBuffer.B[startDataPosition+4] = protoDist // 131
  2117  				bytesOut, err := dc.flusher.Write(packetBuffer.B[startDataPosition : startDataPosition+4+lenPacket])
  2118  				if err != nil {
  2119  					return
  2120  				}
  2121  				atomic.AddUint64(&dc.stats.BytesOut, uint64(bytesOut))
  2122  			} else {
  2123  				// wrap it as a proxy message
  2124  				if encryptionEnabled == false {
  2125  					binary.BigEndian.PutUint32(packetBuffer.B[startDataPosition-32:], uint32(lenPacket+32))
  2126  					packetBuffer.B[startDataPosition-32+4] = protoProxy // 141
  2127  					copy(packetBuffer.B[startDataPosition-32+5:], message.proxy.session.ID)
  2128  					bytesOut, err := dc.flusher.Write(packetBuffer.B[startDataPosition-32 : startDataPosition+4+lenPacket])
  2129  					if err != nil {
  2130  						return
  2131  					}
  2132  					atomic.AddUint64(&dc.stats.BytesOut, uint64(bytesOut))
  2133  				} else {
  2134  					// send encrypted proxy message
  2135  					tail16 := [16]byte{}
  2136  					n := copy(tail16[:], packetBuffer.B[startDataPosition+4+lenPacket:])
  2137  					xBuffer := encrypt(packetBuffer.B[startDataPosition+5:startDataPosition+4+lenPacket],
  2138  						message.proxy.session.ID, message.proxy.session.Block)
  2139  					if xBuffer == nil {
  2140  						// can't encrypt message
  2141  						return
  2142  					}
  2143  					bytesOut, err := dc.flusher.Write(xBuffer.B)
  2144  					if err != nil {
  2145  						return
  2146  					}
  2147  					atomic.AddUint64(&dc.stats.BytesOut, uint64(bytesOut))
  2148  					// resore tail
  2149  					copy(packetBuffer.B[startDataPosition+4+lenPacket:], tail16[:n])
  2150  					lib.ReleaseBuffer(xBuffer)
  2151  				}
  2152  			}
  2153  
  2154  			startDataPosition += 4 + lenPacket
  2155  			numFragments--
  2156  			if numFragments > 0 {
  2157  				goto nextFragment
  2158  			}
  2159  
  2160  			// done
  2161  			break
  2162  		}
  2163  
  2164  		lib.ReleaseBuffer(packetBuffer)
  2165  
  2166  		if cacheEnabled == false {
  2167  			continue
  2168  		}
  2169  
  2170  		// get updates from the connection AtomCache and update the sender's cache (senderAtomCache)
  2171  		lastAddedAtom, lastAddedID := encodingOptions.AtomCache.LastAdded()
  2172  		if lastAddedID < 0 {
  2173  			continue
  2174  		}
  2175  		if _, exist := encodingOptions.SenderAtomCache[lastAddedAtom]; exist {
  2176  			continue
  2177  		}
  2178  
  2179  		encodingOptions.AtomCache.RLock()
  2180  		for _, a := range encodingOptions.AtomCache.ListSince(lastAddedID) {
  2181  			encodingOptions.SenderAtomCache[a] = etf.CacheItem{ID: lastAddedID, Name: a, Encoded: false}
  2182  			lastAddedID++
  2183  		}
  2184  		encodingOptions.AtomCache.RUnlock()
  2185  
  2186  	}
  2187  
  2188  }
  2189  
  2190  func (dc *distConnection) send(to string, creation uint32, msg *sendMessage) error {
  2191  	i := atomic.AddInt32(&dc.senders.i, 1)
  2192  	n := i % dc.senders.n
  2193  	s := dc.senders.sender[n]
  2194  	if s == nil {
  2195  		// connection was closed
  2196  		return lib.ErrNoRoute
  2197  	}
  2198  	dc.proxySessionsMutex.RLock()
  2199  	ps, isProxy := dc.proxySessionsByPeerName[to]
  2200  	dc.proxySessionsMutex.RUnlock()
  2201  	peer_creation := dc.creation
  2202  	if isProxy {
  2203  		msg.proxy = &ps
  2204  		peer_creation = ps.session.Creation
  2205  	} else {
  2206  		// its direct sending, so have to make sure if this peer does support compression
  2207  		if dc.flags.EnableCompression == false {
  2208  			msg.compression = false
  2209  		}
  2210  	}
  2211  
  2212  	// if this peer is Erlang OTP 22 (and earlier), peer_creation is always 0, so we
  2213  	// must skip this checking.
  2214  	if creation > 0 && peer_creation > 0 && peer_creation != creation {
  2215  		return lib.ErrProcessIncarnation
  2216  	}
  2217  
  2218  	// TODO to decide whether to return error if channel is full
  2219  	//select {
  2220  	//case s.sendChannel <- msg:
  2221  	//	return nil
  2222  	//default:
  2223  	//	return ErrOverloadConnection
  2224  	//}
  2225  
  2226  	s.Lock()
  2227  	defer s.Unlock()
  2228  
  2229  	s.sendChannel <- msg
  2230  	return nil
  2231  }
  2232  
  2233  func (dc *distConnection) Stats() node.NetworkStats {
  2234  	return dc.stats
  2235  }
  2236  
  2237  func proxyFlagsToUint64(pf node.ProxyFlags) uint64 {
  2238  	var flags uint64
  2239  	if pf.EnableLink {
  2240  		flags |= 1
  2241  	}
  2242  	if pf.EnableMonitor {
  2243  		flags |= 1 << 1
  2244  	}
  2245  	if pf.EnableRemoteSpawn {
  2246  		flags |= 1 << 2
  2247  	}
  2248  	if pf.EnableEncryption {
  2249  		flags |= 1 << 3
  2250  	}
  2251  	return flags
  2252  }
  2253  
  2254  func proxyFlagsFromUint64(f uint64) node.ProxyFlags {
  2255  	var flags node.ProxyFlags
  2256  	flags.EnableLink = f&1 > 0
  2257  	flags.EnableMonitor = f&(1<<1) > 0
  2258  	flags.EnableRemoteSpawn = f&(1<<2) > 0
  2259  	flags.EnableEncryption = f&(1<<3) > 0
  2260  	return flags
  2261  }