k8s.io/apiserver@v0.31.1/plugin/pkg/audit/buffered/buffered_test.go (about)

     1  /*
     2  Copyright 2018 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package buffered
    18  
    19  import (
    20  	"fmt"
    21  	"sync"
    22  	"testing"
    23  	"time"
    24  
    25  	"github.com/stretchr/testify/assert"
    26  	"github.com/stretchr/testify/require"
    27  
    28  	"k8s.io/apimachinery/pkg/util/wait"
    29  	auditinternal "k8s.io/apiserver/pkg/apis/audit"
    30  	"k8s.io/apiserver/plugin/pkg/audit/fake"
    31  )
    32  
    33  var (
    34  	infiniteTimeCh <-chan time.Time
    35  )
    36  
    37  func newEvents(number int) []*auditinternal.Event {
    38  	events := make([]*auditinternal.Event, number)
    39  	for i := range events {
    40  		events[i] = &auditinternal.Event{}
    41  	}
    42  
    43  	return events
    44  }
    45  
    46  func testBatchConfig() BatchConfig {
    47  	return BatchConfig{
    48  		BufferSize:     100,
    49  		MaxBatchSize:   10,
    50  		MaxBatchWait:   wait.ForeverTestTimeout,
    51  		ThrottleEnable: false,
    52  		AsyncDelegate:  true,
    53  	}
    54  }
    55  
    56  func TestBatchedBackendCollectEvents(t *testing.T) {
    57  	config := testBatchConfig()
    58  	batchSize := config.MaxBatchSize
    59  	backend := NewBackend(&fake.Backend{}, config).(*bufferedBackend)
    60  
    61  	t.Log("Max batch size encountered.")
    62  	backend.ProcessEvents(newEvents(batchSize + 1)...)
    63  	batch := backend.collectEvents(nil, nil)
    64  	assert.Len(t, batch, batchSize, "Expected full batch")
    65  
    66  	t.Log("Partial batch should hang until timer expires.")
    67  	backend.ProcessEvents(newEvents(1)...)
    68  	tc := make(chan time.Time)
    69  	wg := sync.WaitGroup{}
    70  	wg.Add(1)
    71  	go func() {
    72  		defer wg.Done()
    73  		batch = backend.collectEvents(tc, nil)
    74  	}()
    75  	// Wait for the queued events to be collected.
    76  	err := wait.Poll(time.Second, wait.ForeverTestTimeout, func() (bool, error) {
    77  		return len(backend.buffer) == 0, nil
    78  	})
    79  	require.NoError(t, err)
    80  
    81  	tc <- time.Now() // Trigger "timeout"
    82  	wg.Wait()
    83  	assert.Len(t, batch, 2, "Expected partial batch")
    84  
    85  	t.Log("Collected events should be delivered when stop channel is closed.")
    86  	backend.ProcessEvents(newEvents(3)...)
    87  	stopCh := make(chan struct{})
    88  	wg.Add(1)
    89  	go func() {
    90  		defer wg.Done()
    91  		batch = backend.collectEvents(nil, stopCh)
    92  	}()
    93  	// Wait for the queued events to be collected.
    94  	err = wait.Poll(time.Second, wait.ForeverTestTimeout, func() (bool, error) {
    95  		return len(backend.buffer) == 0, nil
    96  	})
    97  	require.NoError(t, err)
    98  
    99  	close(stopCh)
   100  	wg.Wait()
   101  	assert.Len(t, batch, 3, "Expected partial batch")
   102  }
   103  
   104  func TestUnbatchedBackendCollectEvents(t *testing.T) {
   105  	config := testBatchConfig()
   106  	config.MaxBatchSize = 1 // No batching.
   107  	backend := NewBackend(&fake.Backend{}, config).(*bufferedBackend)
   108  
   109  	t.Log("Max batch size encountered.")
   110  	backend.ProcessEvents(newEvents(3)...)
   111  	batch := backend.collectEvents(nil, nil)
   112  	assert.Len(t, batch, 1, "Expected single event")
   113  
   114  	t.Log("Queue should always be drained.")
   115  	for len(backend.buffer) > 0 {
   116  		batch = backend.collectEvents(nil, nil)
   117  		assert.Len(t, batch, 1, "Expected single event")
   118  	}
   119  
   120  	t.Log("Collection should hault when stop channel is closed.")
   121  	stopCh := make(chan struct{})
   122  	wg := sync.WaitGroup{}
   123  	wg.Add(1)
   124  	go func() {
   125  		defer wg.Done()
   126  		batch = backend.collectEvents(nil, stopCh)
   127  	}()
   128  	close(stopCh)
   129  	wg.Wait()
   130  	assert.Empty(t, batch, "Empty final batch")
   131  }
   132  
   133  func TestBufferedBackendProcessEventsAfterStop(t *testing.T) {
   134  	t.Parallel()
   135  
   136  	backend := NewBackend(&fake.Backend{}, testBatchConfig()).(*bufferedBackend)
   137  
   138  	closedStopCh := make(chan struct{})
   139  	close(closedStopCh)
   140  	backend.Run(closedStopCh)
   141  	backend.Shutdown()
   142  	backend.ProcessEvents(newEvents(1)...)
   143  	batch := backend.collectEvents(infiniteTimeCh, wait.NeverStop)
   144  
   145  	require.Empty(t, batch, "processed events after the backed has been stopped")
   146  }
   147  
   148  func TestBufferedBackendProcessEventsBufferFull(t *testing.T) {
   149  	t.Parallel()
   150  
   151  	config := testBatchConfig()
   152  	config.BufferSize = 1
   153  	backend := NewBackend(&fake.Backend{}, config).(*bufferedBackend)
   154  
   155  	backend.ProcessEvents(newEvents(2)...)
   156  
   157  	require.Len(t, backend.buffer, 1, "buffed contains more elements than it should")
   158  }
   159  
   160  func TestBufferedBackendShutdownWaitsForDelegatedCalls(t *testing.T) {
   161  	t.Parallel()
   162  
   163  	delegatedCallStartCh := make(chan struct{})
   164  	delegatedCallEndCh := make(chan struct{})
   165  	delegateBackend := &fake.Backend{
   166  		OnRequest: func(_ []*auditinternal.Event) {
   167  			close(delegatedCallStartCh)
   168  			<-delegatedCallEndCh
   169  		},
   170  	}
   171  	config := testBatchConfig()
   172  	backend := NewBackend(delegateBackend, config)
   173  
   174  	// Run backend, process events, wait for them to be batched and for delegated call to start.
   175  	stopCh := make(chan struct{})
   176  	backend.Run(stopCh)
   177  	backend.ProcessEvents(newEvents(config.MaxBatchSize)...)
   178  	<-delegatedCallStartCh
   179  
   180  	// Start shutdown procedure.
   181  	shutdownEndCh := make(chan struct{})
   182  	go func() {
   183  		close(stopCh)
   184  		backend.Shutdown()
   185  		close(shutdownEndCh)
   186  	}()
   187  
   188  	// Wait for some time and then check whether Shutdown has exited. Can give false positive,
   189  	// but never false negative.
   190  	time.Sleep(100 * time.Millisecond)
   191  	select {
   192  	case <-shutdownEndCh:
   193  		t.Fatalf("Shutdown exited before delegated call ended")
   194  	default:
   195  	}
   196  
   197  	// Wait for Shutdown to exit after delegated call has exited.
   198  	close(delegatedCallEndCh)
   199  	<-shutdownEndCh
   200  }
   201  
   202  func TestDelegateProcessEvents(t *testing.T) {
   203  	for _, async := range []bool{true, false} {
   204  		t.Run(fmt.Sprintf("async:%t", async), func(t *testing.T) {
   205  			config := testBatchConfig()
   206  			config.AsyncDelegate = async
   207  			wg := sync.WaitGroup{}
   208  			delegate := &fake.Backend{
   209  				OnRequest: func(events []*auditinternal.Event) {
   210  					assert.Len(t, events, config.MaxBatchSize, "Unexpected batch")
   211  					wg.Done()
   212  				},
   213  			}
   214  			b := NewBackend(delegate, config).(*bufferedBackend)
   215  			wg.Add(5)
   216  			for i := 0; i < 5; i++ {
   217  				b.processEvents(newEvents(config.MaxBatchSize))
   218  			}
   219  			wg.Wait()
   220  		})
   221  	}
   222  }