github.com/lirm/aeron-go@v0.0.0-20230415210743-920325491dc4/examples/cluster_client/throughput_test_client.go (about)

     1  package main
     2  
     3  import (
     4  	"fmt"
     5  	"os"
     6  	"sort"
     7  	"time"
     8  
     9  	"github.com/lirm/aeron-go/aeron"
    10  	"github.com/lirm/aeron-go/aeron/atomic"
    11  	"github.com/lirm/aeron-go/aeron/idlestrategy"
    12  	"github.com/lirm/aeron-go/aeron/logbuffer"
    13  	"github.com/lirm/aeron-go/cluster/client"
    14  )
    15  
    16  type TestContext struct {
    17  	ac                    *client.AeronCluster
    18  	messageCount          int
    19  	latencies             []int64
    20  	nextSendKeepAliveTime int64
    21  }
    22  
    23  func (ctx *TestContext) OnConnect(ac *client.AeronCluster) {
    24  	fmt.Printf("OnConnect - sessionId=%d leaderMemberId=%d leadershipTermId=%d\n",
    25  		ac.ClusterSessionId(), ac.LeaderMemberId(), ac.LeadershipTermId())
    26  	ctx.ac = ac
    27  	ctx.nextSendKeepAliveTime = time.Now().UnixMilli() + time.Second.Milliseconds()
    28  }
    29  
    30  func (ctx *TestContext) OnDisconnect(cluster *client.AeronCluster, details string) {
    31  	fmt.Printf("OnDisconnect - sessionId=%d (%s)\n", cluster.ClusterSessionId(), details)
    32  	ctx.ac = nil
    33  }
    34  
    35  func (ctx *TestContext) OnMessage(cluster *client.AeronCluster, timestamp int64,
    36  	buffer *atomic.Buffer, offset int32, length int32, header *logbuffer.Header) {
    37  	recvTime := time.Now().UnixNano()
    38  	msgNo := buffer.GetInt32(offset)
    39  	sendTime := buffer.GetInt64(offset + 8)
    40  	latency := recvTime - sendTime
    41  	if msgNo < 1 || int(msgNo) > len(ctx.latencies) {
    42  		fmt.Printf("OnMessage - sessionId=%d timestamp=%d pos=%d length=%d latency=%d\n",
    43  			cluster.ClusterSessionId(), timestamp, header.Position(), length, latency)
    44  	} else {
    45  		ctx.latencies[msgNo-1] = latency
    46  		ctx.messageCount++
    47  	}
    48  }
    49  
    50  func (ctx *TestContext) OnNewLeader(cluster *client.AeronCluster, leadershipTermId int64, leaderMemberId int32) {
    51  	fmt.Printf("OnNewLeader - sessionId=%d leaderMemberId=%d leadershipTermId=%d\n",
    52  		cluster.ClusterSessionId(), leaderMemberId, leadershipTermId)
    53  }
    54  
    55  func (ctx *TestContext) OnError(cluster *client.AeronCluster, details string) {
    56  	fmt.Printf("OnError - sessionId=%d: %s\n", cluster.ClusterSessionId(), details)
    57  }
    58  
    59  func (ctx *TestContext) sendKeepAliveIfNecessary() {
    60  	if now := time.Now().UnixMilli(); now > ctx.nextSendKeepAliveTime && ctx.ac != nil && ctx.ac.SendKeepAlive() {
    61  		ctx.nextSendKeepAliveTime += time.Second.Milliseconds()
    62  	}
    63  }
    64  
    65  func main() {
    66  	ctx := aeron.NewContext()
    67  	if aeronDir := os.Getenv("AERON_DIR"); aeronDir != "" {
    68  		ctx.AeronDir(aeronDir)
    69  		fmt.Println("aeron dir: ", aeronDir)
    70  	} else if _, err := os.Stat("/dev/shm"); err == nil {
    71  		path := fmt.Sprintf("/dev/shm/aeron-%s", aeron.UserName)
    72  		ctx.AeronDir(path)
    73  		fmt.Println("aeron dir: ", path)
    74  	}
    75  
    76  	opts := client.NewOptions()
    77  	if idleStr := os.Getenv("NO_OP_IDLE"); idleStr != "" {
    78  		opts.IdleStrategy = &idlestrategy.Busy{}
    79  	}
    80  	opts.IngressChannel = "aeron:udp?alias=cluster-client-ingress|endpoint=localhost:20000"
    81  	opts.IngressEndpoints = "0=localhost:20000,1=localhost:21000,2=localhost:22000"
    82  	//opts.EgressChannel = "aeron:udp?alias=cluster-egress|endpoint=localhost:11111"
    83  
    84  	listener := &TestContext{
    85  		latencies: make([]int64, 1000),
    86  	}
    87  	clusterClient, err := client.NewAeronCluster(ctx, opts, listener)
    88  	if err != nil {
    89  		panic(err)
    90  	}
    91  
    92  	for !clusterClient.IsConnected() {
    93  		opts.IdleStrategy.Idle(clusterClient.Poll())
    94  	}
    95  
    96  	sendBuf := atomic.MakeBuffer(make([]byte, 100))
    97  	for round := 1; round <= 10; round++ {
    98  		fmt.Printf("starting round #%d\n", round)
    99  		listener.messageCount = 0
   100  		sentCt := 0
   101  		beginTime := time.Now().UnixNano()
   102  		latencies := listener.latencies
   103  		for i := range latencies {
   104  			latencies[i] = 0
   105  		}
   106  		ct := len(latencies)
   107  		for i := 1; i <= ct; i++ {
   108  			sendBuf.PutInt32(0, int32(i))
   109  			sendBuf.PutInt64(8, time.Now().UnixNano())
   110  			for {
   111  				if r := clusterClient.Offer(sendBuf, 0, sendBuf.Capacity()); r >= 0 {
   112  					sentCt++
   113  					break
   114  				}
   115  				clusterClient.Poll()
   116  				listener.sendKeepAliveIfNecessary()
   117  			}
   118  		}
   119  		for listener.messageCount < sentCt {
   120  			pollCt := clusterClient.Poll()
   121  			if pollCt == 0 {
   122  				listener.sendKeepAliveIfNecessary()
   123  			}
   124  			opts.IdleStrategy.Idle(pollCt)
   125  		}
   126  		now := time.Now()
   127  		totalNs := now.UnixNano() - beginTime
   128  		sort.Slice(latencies, func(i, j int) bool { return latencies[i] < latencies[j] })
   129  		fmt.Printf("round #%d complete, count=%d min=%d 10%%=%d 50%%=%d 90%%=%d max=%d throughput=%.2f\n",
   130  			round, sentCt, latencies[ct-sentCt]/1000, latencies[ct/10]/1000, latencies[ct/2]/1000, latencies[9*(ct/10)]/1000,
   131  			latencies[ct-1]/1000, (float64(sentCt) * 1000000000.0 / float64(totalNs)))
   132  
   133  		for time.Since(now) < 10*time.Second {
   134  			listener.sendKeepAliveIfNecessary()
   135  			opts.IdleStrategy.Idle(clusterClient.Poll())
   136  		}
   137  	}
   138  	clusterClient.Close()
   139  	fmt.Println("done")
   140  	time.Sleep(time.Second)
   141  }