github.com/unionj-cloud/go-doudou/v2@v2.3.5/toolkit/memberlist/memberlist.go (about) 1 /* 2 memberlist is a library that manages cluster 3 membership and member failure detection using a gossip based protocol. 4 5 The use cases for such a library are far-reaching: all distributed systems 6 require membership, and memberlist is a re-usable solution to managing 7 cluster membership and node failure detection. 8 9 memberlist is eventually consistent but converges quickly on average. 10 The speed at which it converges can be heavily tuned via various knobs 11 on the protocol. Node failures are detected and network partitions are partially 12 tolerated by attempting to communicate to potentially dead nodes through 13 multiple routes. 14 */ 15 package memberlist 16 17 import ( 18 "container/list" 19 "errors" 20 "fmt" 21 "github.com/unionj-cloud/go-doudou/v2/toolkit/stringutils" 22 "log" 23 "net" 24 "os" 25 "strconv" 26 "strings" 27 "sync" 28 "sync/atomic" 29 "time" 30 31 multierror "github.com/hashicorp/go-multierror" 32 ) 33 34 var errNodeNamesAreRequired = errors.New("memberlist: node names are required by configuration but one was not provided") 35 36 type Memberlist struct { 37 sequenceNum uint32 // Local sequence number 38 incarnation uint32 // Local incarnation number 39 numNodes uint32 // Number of known nodes (estimate) 40 pushPullReq uint32 // Number of push/pull requests 41 42 advertiseLock sync.RWMutex 43 advertiseAddr string 44 advertisePort uint16 45 46 config *Config 47 shutdown int32 // Used as an atomic boolean value 48 shutdownCh chan struct{} 49 leave int32 // Used as an atomic boolean value 50 leaveBroadcast chan struct{} 51 52 shutdownLock sync.Mutex // Serializes calls to Shutdown 53 leaveLock sync.Mutex // Serializes calls to Leave 54 55 transport NodeAwareTransport 56 57 handoffCh chan struct{} 58 highPriorityMsgQueue *list.List 59 lowPriorityMsgQueue *list.List 60 msgQueueLock sync.Mutex 61 62 nodeLock sync.RWMutex 63 nodes []*nodeState // Known nodes 64 nodeMap map[string]*nodeState // Maps Node.Name -> NodeState 65 nodeTimers map[string]*suspicion // Maps Node.Name -> suspicion timer 66 awareness *awareness 67 68 tickerLock sync.Mutex 69 tickers []*time.Ticker 70 stopTick chan struct{} 71 probeIndex int 72 73 ackLock sync.Mutex 74 ackHandlers map[uint32]*ackHandler 75 76 broadcasts *TransmitLimitedQueue 77 78 logger *log.Logger 79 } 80 81 func (m *Memberlist) NodeMap() map[string]*nodeState { 82 return m.nodeMap 83 } 84 85 func (m *Memberlist) AdvertiseAddr() string { 86 return m.advertiseAddr 87 } 88 89 func (m *Memberlist) AdvertisePort() uint16 { 90 return m.advertisePort 91 } 92 93 func (m *Memberlist) Nodes() []*nodeState { 94 return m.nodes 95 } 96 97 func (m *Memberlist) SetNodes(nodes ...*nodeState) { 98 m.nodes = nodes 99 } 100 101 func (m *Memberlist) Config() *Config { 102 return m.config 103 } 104 105 // BuildVsnArray creates the array of Vsn 106 func (conf *Config) BuildVsnArray() []uint8 { 107 return []uint8{ 108 ProtocolVersionMin, ProtocolVersionMax, conf.ProtocolVersion, 109 conf.DelegateProtocolMin, conf.DelegateProtocolMax, 110 conf.DelegateProtocolVersion, 111 } 112 } 113 114 // NewMemberlist creates the network listeners. 115 // Does not schedule execution of background maintenance. 116 func NewMemberlist(conf *Config) (*Memberlist, error) { 117 if conf.ProtocolVersion < ProtocolVersionMin { 118 return nil, fmt.Errorf("Protocol version '%d' too low. Must be in range: [%d, %d]", 119 conf.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax) 120 } else if conf.ProtocolVersion > ProtocolVersionMax { 121 return nil, fmt.Errorf("Protocol version '%d' too high. Must be in range: [%d, %d]", 122 conf.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax) 123 } 124 125 if len(conf.SecretKey) > 0 { 126 if conf.Keyring == nil { 127 keyring, err := NewKeyring(nil, conf.SecretKey) 128 if err != nil { 129 return nil, err 130 } 131 conf.Keyring = keyring 132 } else { 133 if err := conf.Keyring.AddKey(conf.SecretKey); err != nil { 134 return nil, err 135 } 136 if err := conf.Keyring.UseKey(conf.SecretKey); err != nil { 137 return nil, err 138 } 139 } 140 } 141 142 if conf.LogOutput != nil && conf.Logger != nil { 143 return nil, fmt.Errorf("Cannot specify both LogOutput and Logger. Please choose a single log configuration setting.") 144 } 145 146 logDest := conf.LogOutput 147 if logDest == nil { 148 logDest = os.Stderr 149 } 150 151 logger := conf.Logger 152 if logger == nil { 153 logger = log.New(logDest, "", log.LstdFlags) 154 } 155 156 // Set up a network transport by default if a custom one wasn't given 157 // by the config. 158 transport := conf.Transport 159 if transport == nil { 160 nc := &NetTransportConfig{ 161 BindAddrs: []string{conf.BindAddr}, 162 BindPort: conf.BindPort, 163 Logger: logger, 164 } 165 166 // See comment below for details about the retry in here. 167 makeNetRetry := func(limit int) (*NetTransport, error) { 168 var err error 169 for try := 0; try < limit; try++ { 170 var nt *NetTransport 171 if nt, err = NewNetTransport(nc); err == nil { 172 return nt, nil 173 } 174 if strings.Contains(err.Error(), "address already in use") { 175 logger.Printf("[DEBUG] memberlist: Got bind error: %v", err) 176 continue 177 } 178 } 179 180 return nil, fmt.Errorf("failed to obtain an address: %v", err) 181 } 182 183 // The dynamic bind port operation is inherently racy because 184 // even though we are using the kernel to find a port for us, we 185 // are attempting to bind multiple protocols (and potentially 186 // multiple addresses) with the same port number. We build in a 187 // few retries here since this often gets transient errors in 188 // busy unit tests. 189 limit := 1 190 if conf.BindPort == 0 { 191 limit = 10 192 } 193 194 nt, err := makeNetRetry(limit) 195 if err != nil { 196 return nil, fmt.Errorf("Could not set up network transport: %v", err) 197 } 198 if conf.BindPort == 0 { 199 port := nt.GetAutoBindPort() 200 conf.BindPort = port 201 conf.AdvertisePort = port 202 logger.Printf("[DEBUG] memberlist: Using dynamic bind port %d", port) 203 } 204 transport = nt 205 } 206 207 nodeAwareTransport, ok := transport.(NodeAwareTransport) 208 if !ok { 209 logger.Printf("[DEBUG] memberlist: configured Transport is not a NodeAwareTransport and some features may not work as desired") 210 nodeAwareTransport = &shimNodeAwareTransport{transport} 211 } 212 213 m := &Memberlist{ 214 config: conf, 215 shutdownCh: make(chan struct{}), 216 leaveBroadcast: make(chan struct{}, 1), 217 transport: nodeAwareTransport, 218 handoffCh: make(chan struct{}, 1), 219 highPriorityMsgQueue: list.New(), 220 lowPriorityMsgQueue: list.New(), 221 nodeMap: make(map[string]*nodeState), 222 nodeTimers: make(map[string]*suspicion), 223 awareness: newAwareness(conf.AwarenessMaxMultiplier), 224 ackHandlers: make(map[uint32]*ackHandler), 225 broadcasts: &TransmitLimitedQueue{RetransmitMultGetter: func() int { 226 return conf.RetransmitMult 227 }}, 228 logger: logger, 229 } 230 m.broadcasts.NumNodes = func() int { 231 return m.estNumNodes() 232 } 233 234 // Get the final advertise address from the transport, which may need 235 // to see which address we bound to. We'll refresh this each time we 236 // send out an alive message. 237 if _, _, err := m.refreshAdvertise(); err != nil { 238 return nil, err 239 } 240 241 go m.streamListen() 242 go m.packetListen() 243 go m.packetHandler() 244 return m, nil 245 } 246 247 // Create will create a new Memberlist using the given configuration. 248 // This will not connect to any other node (see Join) yet, but will start 249 // all the listeners to allow other nodes to join this memberlist. 250 // After creating a Memberlist, the configuration given should not be 251 // modified by the user anymore. 252 func Create(conf *Config) (*Memberlist, error) { 253 m, err := NewMemberlist(conf) 254 if err != nil { 255 return nil, err 256 } 257 if err := m.setAlive(); err != nil { 258 m.Shutdown() 259 return nil, err 260 } 261 m.schedule() 262 return m, nil 263 } 264 265 // Join is used to take an existing Memberlist and attempt to join a cluster 266 // by contacting all the given hosts and performing a state sync. Initially, 267 // the Memberlist only contains our own state, so doing this will cause 268 // remote nodes to become aware of the existence of this node, effectively 269 // joining the cluster. 270 // 271 // This returns the number of hosts successfully contacted and an error if 272 // none could be reached. If an error is returned, the node did not successfully 273 // join the cluster. 274 func (m *Memberlist) Join(existing []string) (int, error) { 275 numSuccess := 0 276 var errs error 277 for _, exist := range existing { 278 addrs, err := m.ResolveAddr(exist) 279 if err != nil { 280 err = fmt.Errorf("Failed to resolve %s: %v", exist, err) 281 errs = multierror.Append(errs, err) 282 m.logger.Printf("[WARN] memberlist: %v", err) 283 continue 284 } 285 286 for _, addr := range addrs { 287 hp := joinHostPort(addr.ip, addr.port) 288 a := Address{Addr: hp, Name: addr.nodeName} 289 if err := m.pushPullNode(a, true); err != nil { 290 err = fmt.Errorf("Failed to join %s: %v", addr.ip, err) 291 errs = multierror.Append(errs, err) 292 m.logger.Printf("[DEBUG] memberlist: %v", err) 293 continue 294 } 295 numSuccess++ 296 } 297 } 298 if numSuccess > 0 { 299 errs = nil 300 } 301 return numSuccess, errs 302 } 303 304 // IpPort holds information about a node we want to try to join. 305 type IpPort struct { 306 ip string 307 port uint16 308 nodeName string // optional 309 } 310 311 func (i *IpPort) SetIp(ip string) { 312 i.ip = ip 313 } 314 315 func (i *IpPort) SetPort(port uint16) { 316 i.port = port 317 } 318 319 func (i *IpPort) SetNodeName(nodeName string) { 320 i.nodeName = nodeName 321 } 322 323 func (i IpPort) Ip() string { 324 return i.ip 325 } 326 327 func (i IpPort) Port() uint16 { 328 return i.port 329 } 330 331 func (i IpPort) NodeName() string { 332 return i.nodeName 333 } 334 335 func NewIpPort(ip string, port uint16, nodeName string) IpPort { 336 return IpPort{ip: ip, port: port, nodeName: nodeName} 337 } 338 339 // ResolveAddr is used to resolve the address into an address, 340 // port, and error. If no port is given, use the default 341 func (m *Memberlist) ResolveAddr(hostStr string) ([]IpPort, error) { 342 // First peel off any leading node name. This is optional. 343 nodeName := "" 344 if slashIdx := strings.Index(hostStr, "/"); slashIdx >= 0 { 345 if slashIdx == 0 { 346 return nil, fmt.Errorf("empty node name provided") 347 } 348 nodeName = hostStr[0:slashIdx] 349 hostStr = hostStr[slashIdx+1:] 350 } 351 352 // This captures the supplied port, or the default one. 353 hostStr = ensurePort(hostStr, m.config.BindPort) 354 host, sport, err := net.SplitHostPort(hostStr) 355 if err != nil { 356 return nil, err 357 } 358 if stringutils.IsEmpty(host) { 359 return nil, errors.New("host should not be empty") 360 } 361 lport, err := strconv.ParseUint(sport, 10, 16) 362 if err != nil { 363 return nil, err 364 } 365 port := uint16(lport) 366 367 return []IpPort{ 368 {ip: host, port: port, nodeName: nodeName}, 369 }, nil 370 } 371 372 // setAlive is used to mark this node as being alive. This is the same 373 // as if we received an alive notification our own network channel for 374 // ourself. 375 func (m *Memberlist) setAlive() error { 376 // Get the final advertise address from the transport, which may need 377 // to see which address we bound to. 378 addr, port, err := m.refreshAdvertise() 379 if err != nil { 380 return err 381 } 382 383 // Set any metadata from the delegate. 384 var meta []byte 385 if m.config.Delegate != nil { 386 meta = m.config.Delegate.NodeMeta(MetaMaxSize) 387 if len(meta) > MetaMaxSize { 388 panic("Node meta data provided is longer than the limit") 389 } 390 } 391 392 a := alive{ 393 Incarnation: m.nextIncarnation(), 394 Node: m.config.Name, 395 Addr: addr, 396 Port: uint16(port), 397 Meta: meta, 398 Vsn: m.config.BuildVsnArray(), 399 } 400 m.aliveNode(&a, nil, true) 401 402 return nil 403 } 404 405 func (m *Memberlist) getAdvertise() (string, uint16) { 406 m.advertiseLock.RLock() 407 defer m.advertiseLock.RUnlock() 408 return m.advertiseAddr, m.advertisePort 409 } 410 411 func (m *Memberlist) setAdvertise(addr string, port int) { 412 m.advertiseLock.Lock() 413 defer m.advertiseLock.Unlock() 414 m.advertiseAddr = addr 415 m.advertisePort = uint16(port) 416 } 417 418 func (m *Memberlist) refreshAdvertise() (string, int, error) { 419 addr, port, err := m.transport.FinalAdvertiseAddr( 420 m.config.AdvertiseAddr, m.config.AdvertisePort) 421 if err != nil { 422 return "", 0, fmt.Errorf("Failed to get final advertise address: %v", err) 423 } 424 m.setAdvertise(addr, port) 425 return addr, port, nil 426 } 427 428 // LocalNode is used to return the local Node 429 func (m *Memberlist) LocalNode() *Node { 430 m.nodeLock.RLock() 431 defer m.nodeLock.RUnlock() 432 state := m.nodeMap[m.config.Name] 433 return &state.Node 434 } 435 436 // UpdateNode is used to trigger re-advertising the local node. This is 437 // primarily used with a Delegate to support dynamic updates to the local 438 // meta data. This will block until the update message is successfully 439 // broadcasted to a member of the cluster, if any exist or until a specified 440 // timeout is reached. 441 func (m *Memberlist) UpdateNode(timeout time.Duration) error { 442 // Get the node meta data 443 var meta []byte 444 if m.config.Delegate != nil { 445 meta = m.config.Delegate.NodeMeta(MetaMaxSize) 446 if len(meta) > MetaMaxSize { 447 panic("Node meta data provided is longer than the limit") 448 } 449 } 450 451 // Get the existing node 452 m.nodeLock.RLock() 453 state := m.nodeMap[m.config.Name] 454 m.nodeLock.RUnlock() 455 456 // Format a new alive message 457 a := alive{ 458 Incarnation: m.nextIncarnation(), 459 Node: m.config.Name, 460 Addr: state.Addr, 461 Port: state.Port, 462 Meta: meta, 463 Vsn: m.config.BuildVsnArray(), 464 } 465 notifyCh := make(chan struct{}) 466 m.aliveNode(&a, notifyCh, true) 467 468 // Wait for the broadcast or a timeout 469 if m.anyAlive() { 470 var timeoutCh <-chan time.Time 471 if timeout > 0 { 472 timeoutCh = time.After(timeout) 473 } 474 select { 475 case <-notifyCh: 476 case <-timeoutCh: 477 return fmt.Errorf("timeout waiting for update broadcast") 478 } 479 } 480 return nil 481 } 482 483 func (m *Memberlist) SendToAddress(a Address, msg []byte) error { 484 // Encode as a user message 485 buf := make([]byte, 1, len(msg)+1) 486 buf[0] = byte(userMsg) 487 buf = append(buf, msg...) 488 489 // Send the message 490 return m.rawSendMsgPacket(a, nil, buf) 491 } 492 493 // SendBestEffort uses the unreliable packet-oriented interface of the transport 494 // to target a user message at the given node (this does not use the gossip 495 // mechanism). The maximum size of the message depends on the configured 496 // UDPBufferSize for this memberlist instance. 497 func (m *Memberlist) SendBestEffort(to *Node, msg []byte) error { 498 // Encode as a user message 499 buf := make([]byte, 1, len(msg)+1) 500 buf[0] = byte(userMsg) 501 buf = append(buf, msg...) 502 503 // Send the message 504 a := Address{Addr: to.Address(), Name: to.Name} 505 return m.rawSendMsgPacket(a, to, buf) 506 } 507 508 // SendReliable uses the reliable stream-oriented interface of the transport to 509 // target a user message at the given node (this does not use the gossip 510 // mechanism). Delivery is guaranteed if no error is returned, and there is no 511 // limit on the size of the message. 512 func (m *Memberlist) SendReliable(to *Node, msg []byte) error { 513 return m.sendUserMsg(to.FullAddress(), msg) 514 } 515 516 // Members returns a list of all known live nodes. The node structures 517 // returned must not be modified. If you wish to modify a Node, make a 518 // copy first. 519 func (m *Memberlist) Members() []*Node { 520 m.nodeLock.RLock() 521 defer m.nodeLock.RUnlock() 522 523 nodes := make([]*Node, 0, len(m.nodes)) 524 for _, n := range m.nodes { 525 if !n.DeadOrLeft() { 526 nodes = append(nodes, &n.Node) 527 } 528 } 529 530 return nodes 531 } 532 533 // NumMembers returns the number of alive nodes currently known. Between 534 // the time of calling this and calling Members, the number of alive nodes 535 // may have changed, so this shouldn't be used to determine how many 536 // members will be returned by Members. 537 func (m *Memberlist) NumMembers() (alive int) { 538 m.nodeLock.RLock() 539 defer m.nodeLock.RUnlock() 540 541 for _, n := range m.nodes { 542 if !n.DeadOrLeft() { 543 alive++ 544 } 545 } 546 547 return 548 } 549 550 // Leave will broadcast a leave message but will not shutdown the background 551 // listeners, meaning the node will continue participating in gossip and state 552 // updates. 553 // 554 // This will block until the leave message is successfully broadcasted to 555 // a member of the cluster, if any exist or until a specified timeout 556 // is reached. 557 // 558 // This method is safe to call multiple times, but must not be called 559 // after the cluster is already shut down. 560 func (m *Memberlist) Leave(timeout time.Duration) error { 561 m.leaveLock.Lock() 562 defer m.leaveLock.Unlock() 563 564 if m.hasShutdown() { 565 panic("leave after shutdown") 566 } 567 568 if !m.hasLeft() { 569 atomic.StoreInt32(&m.leave, 1) 570 571 m.nodeLock.Lock() 572 state, ok := m.nodeMap[m.config.Name] 573 m.nodeLock.Unlock() 574 if !ok { 575 m.logger.Printf("[WARN] memberlist: Leave but we're not in the node map.") 576 return nil 577 } 578 579 // This dead message is special, because Node and From are the 580 // same. This helps other nodes figure out that a node left 581 // intentionally. When Node equals From, other nodes know for 582 // sure this node is gone. 583 d := dead{ 584 Incarnation: state.Incarnation, 585 Node: state.Name, 586 From: state.Name, 587 } 588 m.deadNode(&d) 589 590 // Block until the broadcast goes out 591 if m.anyAlive() { 592 var timeoutCh <-chan time.Time 593 if timeout > 0 { 594 timeoutCh = time.After(timeout) 595 } 596 select { 597 case <-m.leaveBroadcast: 598 case <-timeoutCh: 599 return fmt.Errorf("timeout waiting for leave broadcast") 600 } 601 } 602 } 603 604 return nil 605 } 606 607 // Check for any other alive node. 608 func (m *Memberlist) anyAlive() bool { 609 m.nodeLock.RLock() 610 defer m.nodeLock.RUnlock() 611 for _, n := range m.nodes { 612 if !n.DeadOrLeft() && n.Name != m.config.Name { 613 return true 614 } 615 } 616 return false 617 } 618 619 // GetHealthScore gives this instance's idea of how well it is meeting the soft 620 // real-time requirements of the protocol. Lower numbers are better, and zero 621 // means "totally healthy". 622 func (m *Memberlist) GetHealthScore() int { 623 return m.awareness.GetHealthScore() 624 } 625 626 // ProtocolVersion returns the protocol version currently in use by 627 // this memberlist. 628 func (m *Memberlist) ProtocolVersion() uint8 { 629 // NOTE: This method exists so that in the future we can control 630 // any locking if necessary, if we change the protocol version at 631 // runtime, etc. 632 return m.config.ProtocolVersion 633 } 634 635 // Shutdown will stop any background maintenance of network activity 636 // for this memberlist, causing it to appear "dead". A leave message 637 // will not be broadcasted prior, so the cluster being left will have 638 // to detect this node's shutdown using probing. If you wish to more 639 // gracefully exit the cluster, call Leave prior to shutting down. 640 // 641 // This method is safe to call multiple times. 642 func (m *Memberlist) Shutdown() error { 643 m.shutdownLock.Lock() 644 defer m.shutdownLock.Unlock() 645 646 if m.hasShutdown() { 647 return nil 648 } 649 650 // Shut down the transport first, which should block until it's 651 // completely torn down. If we kill the memberlist-side handlers 652 // those I/O handlers might get stuck. 653 if err := m.transport.Shutdown(); err != nil { 654 m.logger.Printf("[ERR] Failed to shutdown transport: %v", err) 655 } 656 657 // Now tear down everything else. 658 atomic.StoreInt32(&m.shutdown, 1) 659 close(m.shutdownCh) 660 m.deschedule() 661 return nil 662 } 663 664 func (m *Memberlist) hasShutdown() bool { 665 return atomic.LoadInt32(&m.shutdown) == 1 666 } 667 668 func (m *Memberlist) hasLeft() bool { 669 return atomic.LoadInt32(&m.leave) == 1 670 } 671 672 func (m *Memberlist) getNodeState(addr string) NodeStateType { 673 m.nodeLock.RLock() 674 defer m.nodeLock.RUnlock() 675 676 n := m.nodeMap[addr] 677 return n.State 678 } 679 680 func (m *Memberlist) getNodeStateChange(addr string) time.Time { 681 m.nodeLock.RLock() 682 defer m.nodeLock.RUnlock() 683 684 n := m.nodeMap[addr] 685 return n.StateChange 686 } 687 688 func (m *Memberlist) changeNode(addr string, f func(*nodeState)) { 689 m.nodeLock.Lock() 690 defer m.nodeLock.Unlock() 691 692 n := m.nodeMap[addr] 693 f(n) 694 } 695 696 // encodeAndBroadcast encodes a message and enqueues it for broadcast. Fails 697 // silently if there is an encoding error. 698 func (m *Memberlist) encodeAndBroadcast(node string, msgType messageType, msg interface{}) { 699 m.encodeBroadcastNotify(node, msgType, msg, nil) 700 } 701 702 // encodeBroadcastNotify encodes a message and enqueues it for broadcast 703 // and notifies the given channel when transmission is finished. Fails 704 // silently if there is an encoding error. 705 func (m *Memberlist) encodeBroadcastNotify(node string, msgType messageType, msg interface{}, notify chan struct{}) { 706 buf, err := encode(msgType, msg) 707 if err != nil { 708 m.logger.Printf("[ERR] memberlist: Failed to encode message for broadcast: %s", err) 709 } else { 710 m.queueBroadcast(node, buf.Bytes(), notify) 711 } 712 } 713 714 // queueBroadcast is used to start dissemination of a message. It will be 715 // sent up to a configured number of times. The message could potentially 716 // be invalidated by a future message about the same node 717 func (m *Memberlist) queueBroadcast(node string, msg []byte, notify chan struct{}) { 718 b := &memberlistBroadcast{node, msg, notify} 719 m.broadcasts.QueueBroadcast(b) 720 } 721 722 // getBroadcasts is used to return a slice of broadcasts to send up to 723 // a maximum byte size, while imposing a per-broadcast overhead. This is used 724 // to fill a UDP packet with piggybacked data 725 func (m *Memberlist) getBroadcasts(overhead, limit int) [][]byte { 726 // Get memberlist messages first 727 toSend := m.broadcasts.GetBroadcasts(overhead, limit) 728 729 // Check if the user has anything to broadcast 730 d := m.config.Delegate 731 if d != nil { 732 // Determine the bytes used already 733 bytesUsed := 0 734 for _, msg := range toSend { 735 bytesUsed += len(msg) + overhead 736 } 737 738 // Check space remaining for user messages 739 avail := limit - bytesUsed 740 if avail > overhead+userMsgOverhead { 741 userMsgs := d.GetBroadcasts(overhead+userMsgOverhead, avail) 742 743 // Frame each user message 744 for _, msg := range userMsgs { 745 buf := make([]byte, 1, len(msg)+1) 746 buf[0] = byte(userMsg) 747 buf = append(buf, msg...) 748 toSend = append(toSend, buf) 749 } 750 } 751 } 752 return toSend 753 } 754 755 // encodeWeightMsgAndBroadcast encodes a weight message and enqueues it for broadcast. Fails 756 // silently if there is an encoding error. 757 func (m *Memberlist) encodeWeightMsgAndBroadcast(node string, msg interface{}) { 758 buf, err := encode(weightMsg, msg) 759 if err != nil { 760 m.logger.Printf("[ERR] memberlist: Failed to encode message for weight message broadcast: %s", err) 761 } else { 762 m.broadcasts.QueueBroadcast(&weightBroadcast{node, buf.Bytes()}) 763 } 764 }