agones.dev/agones@v1.53.0/pkg/processor/client_test.go (about)

     1  // Copyright 2025 Google LLC All Rights Reserved.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package processor
    16  
    17  import (
    18  	"context"
    19  	"errors"
    20  	"testing"
    21  	"time"
    22  
    23  	allocationpb "agones.dev/agones/pkg/allocation/go"
    24  	"github.com/sirupsen/logrus"
    25  	"google.golang.org/genproto/googleapis/rpc/status"
    26  	"google.golang.org/grpc/metadata"
    27  )
    28  
    29  // Mock for Processor_StreamBatchesClient
    30  type mockStream struct {
    31  	recvChan chan *allocationpb.ProcessorMessage
    32  	sendChan chan *allocationpb.ProcessorMessage
    33  }
    34  
    35  func (m *mockStream) Send(msg *allocationpb.ProcessorMessage) error {
    36  	m.sendChan <- msg
    37  	return nil
    38  }
    39  func (m *mockStream) Recv() (*allocationpb.ProcessorMessage, error) {
    40  	msg, ok := <-m.recvChan
    41  	if !ok {
    42  		return nil, errors.New("stream closed")
    43  	}
    44  	return msg, nil
    45  }
    46  func (m *mockStream) CloseSend() error             { close(m.sendChan); return nil }
    47  func (m *mockStream) Context() context.Context     { return context.Background() }
    48  func (m *mockStream) Header() (metadata.MD, error) { return metadata.MD{}, nil }
    49  func (m *mockStream) Trailer() metadata.MD         { return metadata.MD{} }
    50  func (m *mockStream) SendMsg(interface{}) error    { return nil }
    51  func (m *mockStream) RecvMsg(interface{}) error    { return nil }
    52  
    53  func TestProcessorClient_Allocate(t *testing.T) {
    54  	testCases := []struct {
    55  		name          string
    56  		batchSize     int
    57  		setupResponse func(stream *mockStream, reqIDs []string)
    58  		expectError   []bool
    59  	}{
    60  		{
    61  			name:      "successful allocation with batchSize 1",
    62  			batchSize: 1,
    63  			setupResponse: func(stream *mockStream, reqIDs []string) {
    64  				msg := <-stream.sendChan
    65  				batchID := msg.GetBatchRequest().BatchId
    66  				stream.recvChan <- &allocationpb.ProcessorMessage{
    67  					Payload: &allocationpb.ProcessorMessage_BatchResponse{
    68  						BatchResponse: &allocationpb.BatchResponse{
    69  							BatchId: batchID,
    70  							Responses: []*allocationpb.ResponseWrapper{
    71  								{
    72  									RequestId: reqIDs[0],
    73  									Result: &allocationpb.ResponseWrapper_Response{
    74  										Response: &allocationpb.AllocationResponse{},
    75  									},
    76  								},
    77  							},
    78  						},
    79  					},
    80  				}
    81  			},
    82  			expectError: []bool{false},
    83  		},
    84  		{
    85  			name:      "successful allocation with batchSize 3",
    86  			batchSize: 3,
    87  			setupResponse: func(stream *mockStream, reqIDs []string) {
    88  				msg := <-stream.sendChan
    89  				batchID := msg.GetBatchRequest().BatchId
    90  				responses := make([]*allocationpb.ResponseWrapper, 3)
    91  				for i := 0; i < 3; i++ {
    92  					responses[i] = &allocationpb.ResponseWrapper{
    93  						RequestId: reqIDs[i],
    94  						Result: &allocationpb.ResponseWrapper_Response{
    95  							Response: &allocationpb.AllocationResponse{},
    96  						},
    97  					}
    98  				}
    99  				stream.recvChan <- &allocationpb.ProcessorMessage{
   100  					Payload: &allocationpb.ProcessorMessage_BatchResponse{
   101  						BatchResponse: &allocationpb.BatchResponse{
   102  							BatchId:   batchID,
   103  							Responses: responses,
   104  						},
   105  					},
   106  				}
   107  			},
   108  			expectError: []bool{false, false, false},
   109  		},
   110  		{
   111  			name:          "pull received but no batch available",
   112  			batchSize:     0,
   113  			setupResponse: func(_ *mockStream, _ []string) {},
   114  			expectError:   []bool{},
   115  		},
   116  		{
   117  			name:      "allocation error response",
   118  			batchSize: 1,
   119  			setupResponse: func(stream *mockStream, reqIDs []string) {
   120  				msg := <-stream.sendChan
   121  				batchID := msg.GetBatchRequest().BatchId
   122  				stream.recvChan <- &allocationpb.ProcessorMessage{
   123  					Payload: &allocationpb.ProcessorMessage_BatchResponse{
   124  						BatchResponse: &allocationpb.BatchResponse{
   125  							BatchId: batchID,
   126  							Responses: []*allocationpb.ResponseWrapper{
   127  								{
   128  									RequestId: reqIDs[0],
   129  									Result: &allocationpb.ResponseWrapper_Error{
   130  										Error: &status.Status{
   131  											Code:    int32(13), // INTERNAL
   132  											Message: "mock error",
   133  										},
   134  									},
   135  								},
   136  							},
   137  						},
   138  					},
   139  				}
   140  			},
   141  			expectError: []bool{true},
   142  		},
   143  		{
   144  			name:      "allocation timeout",
   145  			batchSize: 1,
   146  			setupResponse: func(_ *mockStream, _ []string) {
   147  				// Do not send any batch response, let it timeout
   148  				time.Sleep(100 * time.Millisecond)
   149  			},
   150  			expectError: []bool{true},
   151  		},
   152  	}
   153  
   154  	for _, tc := range testCases {
   155  		t.Run(tc.name, func(t *testing.T) {
   156  			logger := logrus.New()
   157  			config := Config{
   158  				MaxBatchSize:      10,
   159  				AllocationTimeout: 50 * time.Millisecond,
   160  				ClientID:          "test-client",
   161  			}
   162  			stream := &mockStream{
   163  				recvChan: make(chan *allocationpb.ProcessorMessage, 10),
   164  				sendChan: make(chan *allocationpb.ProcessorMessage, 10),
   165  			}
   166  			p := &client{
   167  				config:           config,
   168  				logger:           logger,
   169  				hotBatch:         &allocationpb.BatchRequest{Requests: make([]*allocationpb.RequestWrapper, 0, config.MaxBatchSize)},
   170  				pendingRequests:  make([]*pendingRequest, 0, config.MaxBatchSize),
   171  				requestIDMapping: make(map[string]*pendingRequest),
   172  			}
   173  
   174  			// Start handleStream in a goroutine
   175  			ctx, cancel := context.WithCancel(context.Background())
   176  			defer cancel()
   177  			go func() {
   178  				_ = p.handleStream(ctx, stream)
   179  			}()
   180  
   181  			reqIDs := make([]string, tc.batchSize)
   182  			responses := make([]*allocationpb.AllocationResponse, tc.batchSize)
   183  			errorsArr := make([]error, tc.batchSize)
   184  			doneChans := make([]chan struct{}, tc.batchSize)
   185  
   186  			for i := 0; i < tc.batchSize; i++ {
   187  				doneChans[i] = make(chan struct{})
   188  				req := &allocationpb.AllocationRequest{}
   189  				go func(idx int) {
   190  					responses[idx], errorsArr[idx] = p.Allocate(context.Background(), req)
   191  					close(doneChans[idx])
   192  				}(i)
   193  			}
   194  
   195  			// Wait for requests to be added
   196  			time.Sleep(5 * time.Millisecond)
   197  			p.batchMutex.RLock()
   198  			for i := 0; i < tc.batchSize && i < len(p.hotBatch.Requests); i++ {
   199  				reqIDs[i] = p.hotBatch.Requests[i].RequestId
   200  			}
   201  			p.batchMutex.RUnlock()
   202  
   203  			// Simulate a pullRequest
   204  			stream.recvChan <- &allocationpb.ProcessorMessage{Payload: &allocationpb.ProcessorMessage_Pull{}}
   205  			// Wait for pullRequest to be processed
   206  			time.Sleep(5 * time.Millisecond)
   207  			// Simulate responses
   208  			tc.setupResponse(stream, reqIDs)
   209  
   210  			for i := 0; i < tc.batchSize; i++ {
   211  				<-doneChans[i]
   212  				if tc.expectError[i] && errorsArr[i] == nil {
   213  					t.Errorf("expected error for request %d, got nil", i)
   214  				}
   215  				if !tc.expectError[i] && errorsArr[i] != nil {
   216  					t.Errorf("expected no error for request %d, got %v", i, errorsArr[i])
   217  				}
   218  				if !tc.expectError[i] && responses[i] == nil {
   219  					t.Errorf("expected response for request %d, got nil", i)
   220  				}
   221  			}
   222  		})
   223  	}
   224  }