github.com/gdamore/mangos@v1.4.0/test/scale_test.go (about)

     1  // +build !race
     2  // Copyright 2018 The Mangos Authors
     3  //
     4  // Licensed under the Apache License, Version 2.0 (the "License");
     5  // you may not use file except in compliance with the License.
     6  // You may obtain a copy of the license at
     7  //
     8  //    http://www.apache.org/licenses/LICENSE-2.0
     9  //
    10  // Unless required by applicable law or agreed to in writing, software
    11  // distributed under the License is distributed on an "AS IS" BASIS,
    12  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  // See the License for the specific language governing permissions and
    14  // limitations under the License.
    15  
    16  package test
    17  
    18  // This simple test just fires off a crapton of inproc clients, to see
    19  // how many connections we could handle.  We do this using inproc, because
    20  // we would absolutely exhaust TCP ports before we would hit any of the
    21  // natural limits.  The inproc transport has no such limits, so we are
    22  // effectively just testing goroutine scalability, which is what we want.
    23  // The intention is to demonstrate that mangos can address the C10K problem
    24  // without breaking a sweat.
    25  
    26  import (
    27  	"errors"
    28  	"math/rand"
    29  	"sync"
    30  	"testing"
    31  	"time"
    32  
    33  	"nanomsg.org/go-mangos"
    34  	"nanomsg.org/go-mangos/protocol/rep"
    35  	"nanomsg.org/go-mangos/protocol/req"
    36  	"nanomsg.org/go-mangos/transport/inproc"
    37  )
    38  
    39  func scalabilityClient(errp *error, loops int, wg *sync.WaitGroup) {
    40  	defer wg.Done()
    41  	sock, err := req.NewSocket()
    42  	if err != nil {
    43  		*errp = err
    44  		return
    45  	}
    46  	sock.AddTransport(inproc.NewTransport())
    47  	defer sock.Close()
    48  	if err := sock.Dial("inproc://atscale"); err != nil {
    49  		*errp = err
    50  		return
    51  	}
    52  
    53  	for i := 0; i < loops; i++ {
    54  		// Inject a random sleep to avoid pounding the CPU too hard.
    55  		time.Sleep(time.Duration(rand.Int31n(1000)) * time.Microsecond)
    56  
    57  		msg := mangos.NewMessage(3)
    58  		msg.Body = append(msg.Body, []byte("ping")...)
    59  		if err := sock.SendMsg(msg); err != nil {
    60  			*errp = err
    61  			return
    62  		}
    63  
    64  		if msg, err = sock.RecvMsg(); err != nil {
    65  			*errp = err
    66  			return
    67  		}
    68  		if string(msg.Body) != "pong" {
    69  			*errp = errors.New("response mismatch")
    70  			return
    71  		}
    72  		msg.Free()
    73  	}
    74  }
    75  
    76  func scalabilityServer(sock mangos.Socket) {
    77  	defer sock.Close()
    78  	for {
    79  		msg, e := sock.RecvMsg()
    80  		if e != nil {
    81  			return
    82  		}
    83  		msg.Body = append(msg.Body[:0], []byte("pong")...)
    84  		e = sock.SendMsg(msg)
    85  		if e != nil {
    86  			return
    87  		}
    88  	}
    89  }
    90  
    91  func TestScalability(t *testing.T) {
    92  	// Beyond this things get crazy.
    93  	// Note that the thread count actually indicates that you will
    94  	// have this many client sockets, and an equal number of server
    95  	// side pipes.  On my Mac, 20K leads to around 30 sec to run the
    96  	// program, whereas 10k can run in under 10 sec.  This proves we
    97  	// can handle 10K connections.
    98  	loops := 1
    99  	threads := 10000
   100  
   101  	errs := make([]error, threads)
   102  
   103  	ssock, e := rep.NewSocket()
   104  	if e != nil {
   105  		t.Fatalf("Cannot make server socket: %v", e)
   106  	}
   107  	ssock.AddTransport(inproc.NewTransport())
   108  	if e = ssock.Listen("inproc://atscale"); e != nil {
   109  		t.Fatalf("Cannot listen: %v", e)
   110  	}
   111  	time.Sleep(time.Millisecond * 100)
   112  	go scalabilityServer(ssock)
   113  	wg := &sync.WaitGroup{}
   114  	wg.Add(threads)
   115  	for i := 0; i < threads; i++ {
   116  		go scalabilityClient(&errs[i], loops, wg)
   117  	}
   118  
   119  	wg.Wait()
   120  	for i := 0; i < threads; i++ {
   121  		if errs[i] != nil {
   122  			t.Fatalf("Test failed: %v", errs[i])
   123  		}
   124  	}
   125  }