github.com/tiagovtristao/plz@v13.4.0+incompatible/src/follow/grpc_test.go (about)

     1  // Integration tests between the gRPC event server & client.
     2  //
     3  // It's a little limited in how much it can test due to extensive synchronisation
     4  // issues (discussed ina  little more detail below); essentially the scheme is designed
     5  // for clients following a series of events in "human" time (i.e. a stream that runs
     6  // for many seconds), without a hard requirement to observe all initial events
     7  // correctly (there's an expectation that we'll catch up soon enough).
     8  // That doesn't work so well for a test where everything happens on the scale of
     9  // microseconds and we want to assert precise events, but we do the best we can.
    10  
    11  package follow
    12  
    13  import (
    14  	"fmt"
    15  	"testing"
    16  	"time"
    17  
    18  	"github.com/stretchr/testify/assert"
    19  
    20  	"github.com/thought-machine/please/src/cli"
    21  	"github.com/thought-machine/please/src/core"
    22  )
    23  
    24  const retries = 3
    25  const delay = 10 * time.Millisecond
    26  
    27  func init() {
    28  	cli.InitLogging(cli.MaxVerbosity)
    29  	// The usual 1 second is pretty annoying in this test.
    30  	disconnectTimeout = 1 * time.Millisecond
    31  	// As is half a second wait for this.
    32  	resourceUpdateFrequency = 1 * time.Millisecond
    33  }
    34  
    35  var (
    36  	l1 = core.ParseBuildLabel("//src/remote:target1", "")
    37  	l2 = core.ParseBuildLabel("//src/remote:target2", "")
    38  	l3 = core.ParseBuildLabel("//src/remote:target3", "")
    39  )
    40  
    41  func TestClientToServerCommunication(t *testing.T) {
    42  	// Note that the ordering of things here is pretty important; we need to get the
    43  	// client connected & ready to receive events before we push them all into the
    44  	// server and shut it down again.
    45  	serverState := core.NewBuildState(5, nil, 4, core.DefaultConfiguration())
    46  	addr, shutdown := initialiseServer(serverState, 0)
    47  
    48  	// This is a bit awkward. We want to assert that we receive a matching set of
    49  	// build events, but it's difficult to build strong synchronisation into this
    50  	// scheme which is really designed for builds taking a significant amount of
    51  	// real time in which remote clients have a chance to sync up.
    52  	// This test does the best it can to assert a reliable set of observable events.
    53  
    54  	// Dispatch the first round of build events now
    55  	serverState.LogBuildResult(0, l1, core.PackageParsed, fmt.Sprintf("Parsed %s", l1))
    56  	serverState.LogBuildResult(0, l1, core.TargetBuilding, fmt.Sprintf("Building %s", l1))
    57  	serverState.LogBuildResult(2, l2, core.TargetBuilding, fmt.Sprintf("Building %s", l2))
    58  	serverState.LogBuildResult(0, l1, core.TargetBuilt, fmt.Sprintf("Built %s", l1))
    59  	serverState.LogBuildResult(1, l3, core.TargetBuilding, fmt.Sprintf("Building %s", l3))
    60  
    61  	clientState := core.NewBuildState(1, nil, 4, core.DefaultConfiguration())
    62  	results := clientState.Results()
    63  	connectClient(clientState, addr, retries, delay)
    64  	// The client state should have synced up with the server's number of threads
    65  	assert.Equal(t, 5, clientState.Config.Please.NumThreads)
    66  
    67  	// We should be able to receive the latest build events for each thread.
    68  	// Note that they come out in thread order, not time order.
    69  	r := <-results
    70  	log.Info("Received first build event")
    71  	assert.Equal(t, "Built //src/remote:target1", r.Description)
    72  	r = <-results
    73  	assert.Equal(t, "Building //src/remote:target3", r.Description)
    74  	r = <-results
    75  	assert.Equal(t, "Building //src/remote:target2", r.Description)
    76  
    77  	// Here we hit a bit of a synchronisation problem, whereby we can't guarantee that
    78  	// the client is actually going to be ready to receive the events in time, which
    79  	// manifests by blocking when we try to receive below. Conversely, we also race between
    80  	// the client connecting and these results going in; we can miss them if it's still
    81  	// not really receiving. Finally, the server can block on shutdown() if the client
    82  	// isn't trying to read any pending events.
    83  	go func() {
    84  		defer func() {
    85  			recover() // Send on closed channel, can happen because shutdown() is out of sync.
    86  		}()
    87  		serverState.LogBuildResult(1, l3, core.TargetBuilt, fmt.Sprintf("Built %s", l3))
    88  		serverState.LogBuildResult(2, l2, core.TargetBuilt, fmt.Sprintf("Built %s", l2))
    89  	}()
    90  	go func() {
    91  		for r := range results {
    92  			log.Info("Received result from thread %d", r.ThreadID)
    93  		}
    94  	}()
    95  	log.Info("Shutting down server")
    96  	shutdown()
    97  	log.Info("Server shutdown")
    98  }
    99  
   100  func TestWithOutput(t *testing.T) {
   101  	serverState := core.NewBuildState(5, nil, 4, core.DefaultConfiguration())
   102  	addr, shutdown := initialiseServer(serverState, 0)
   103  	clientState := core.NewBuildState(1, nil, 4, core.DefaultConfiguration())
   104  	connectClient(clientState, addr, retries, delay)
   105  	go func() {
   106  		serverState.LogBuildResult(0, l1, core.PackageParsed, fmt.Sprintf("Parsed %s", l1))
   107  		serverState.LogBuildResult(0, l1, core.TargetBuilding, fmt.Sprintf("Building %s", l1))
   108  		serverState.LogBuildResult(2, l2, core.TargetBuilding, fmt.Sprintf("Building %s", l2))
   109  		serverState.LogBuildResult(0, l1, core.TargetBuilt, fmt.Sprintf("Built %s", l1))
   110  		serverState.LogBuildResult(1, l3, core.TargetBuilding, fmt.Sprintf("Building %s", l3))
   111  		serverState.LogBuildResult(1, l3, core.TargetBuilt, fmt.Sprintf("Built %s", l3))
   112  		serverState.LogBuildResult(2, l2, core.TargetBuilt, fmt.Sprintf("Built %s", l2))
   113  		log.Info("Shutting down server")
   114  		shutdown()
   115  	}()
   116  	assert.True(t, runOutput(clientState))
   117  }
   118  
   119  func TestResources(t *testing.T) {
   120  	serverState := core.NewBuildState(5, nil, 4, core.DefaultConfiguration())
   121  	go UpdateResources(serverState)
   122  	addr, shutdown := initialiseServer(serverState, 0)
   123  	defer shutdown()
   124  	clientState := core.NewBuildState(1, nil, 4, core.DefaultConfiguration())
   125  	connectClient(clientState, addr, retries, delay)
   126  	// Fortunately this is a lot less fiddly than the others, because we always
   127  	// receive updates eventually. On the downside it's hard to know when it'll
   128  	// be done since we can't observe the actual goroutines that are doing it.
   129  	for i := 0; i < 20; i++ {
   130  		time.Sleep(resourceUpdateFrequency)
   131  		if clientState.Stats.Memory.Used > 0.0 {
   132  			break
   133  		}
   134  	}
   135  	// Hard to know what any of the values should be, but of course we must be using
   136  	// *some* memory.
   137  	assert.True(t, clientState.Stats.Memory.Used > 0.0)
   138  }