storj.io/uplink@v1.13.0/private/eestream/scheduler/scheduler_test.go (about)

     1  // Copyright (C) 2023 Storj Labs, Inc.
     2  // See LICENSE for copying information.
     3  
     4  package scheduler
     5  
     6  import (
     7  	"context"
     8  	"math/rand"
     9  	"runtime"
    10  	"sync"
    11  	"sync/atomic"
    12  	"testing"
    13  	"time"
    14  
    15  	"github.com/stretchr/testify/require"
    16  
    17  	"storj.io/common/sync2"
    18  )
    19  
    20  // TestScheduler_Priority checks that earlier handles get priority when handing out resources.
    21  func TestScheduler_Priority(t *testing.T) {
    22  	ctx := context.Background()
    23  
    24  	s := New(Options{MaximumConcurrent: 3})
    25  
    26  	h1, _ := s.Join(ctx)
    27  	h2, _ := s.Join(ctx)
    28  	h3, _ := s.Join(ctx)
    29  
    30  	_, _ = h1.Get(ctx)
    31  	_, _ = h1.Get(ctx)
    32  
    33  	var counts [3]int
    34  	for i := 0; i < 1000; i++ {
    35  		func() {
    36  			r, ok := h1.Get(ctx)
    37  			require.True(t, ok)
    38  
    39  			ctx, cancel := context.WithCancel(ctx)
    40  			defer cancel()
    41  
    42  			var got Resource
    43  
    44  			track := func(h Handle, n int) func() {
    45  				return func() {
    46  					r, ok := h.Get(ctx)
    47  					if ok {
    48  						counts[n]++
    49  						cancel()
    50  						got = r
    51  					}
    52  				}
    53  			}
    54  
    55  			// try to acquire the resource with all three handles.
    56  			wait := concurrently(
    57  				track(h1, 0),
    58  				track(h2, 1),
    59  				track(h3, 2),
    60  			)
    61  
    62  			// wait for them to all be in the queue to acquire the resource.
    63  			for s.numWaiters() != 3 {
    64  				runtime.Gosched()
    65  			}
    66  
    67  			// release the resource to allow one of them to win.
    68  			r.Done()
    69  
    70  			// wait for the acquire to happen.
    71  			wait()
    72  
    73  			// release the newly acquired resource.
    74  			got.Done()
    75  		}()
    76  	}
    77  
    78  	// this should determinisitcally always give to the first resource
    79  	t.Log(counts)
    80  	require.Equal(t, [...]int{1000, 0, 0}, counts)
    81  }
    82  
    83  func concurrently(fns ...func()) func() {
    84  	rng := rand.New(rand.NewSource(time.Now().UnixNano()))
    85  	rng.Shuffle(len(fns), func(i, j int) { fns[i], fns[j] = fns[j], fns[i] })
    86  	return sync2.Go(fns...)
    87  }
    88  
    89  // TestScheduler_Limits checks that the configured limits are respected.
    90  func TestScheduler_Limits(t *testing.T) {
    91  	seed := time.Now().UnixNano()
    92  	ctx := context.Background()
    93  	concurrent := int64(0)
    94  	max := int64(0)
    95  
    96  	updateMax := func(c int64) {
    97  		for {
    98  			m := atomic.LoadInt64(&max)
    99  			if c <= m {
   100  				return
   101  			}
   102  			if atomic.CompareAndSwapInt64(&max, m, c) {
   103  				return
   104  			}
   105  		}
   106  	}
   107  
   108  	const (
   109  		maxConcurrent = 10
   110  		numHandles    = 100
   111  		numResources  = 100
   112  	)
   113  
   114  	s := New(Options{MaximumConcurrent: maxConcurrent})
   115  
   116  	var counts [maxConcurrent]int64
   117  	var wg sync.WaitGroup
   118  	for i := 0; i < numHandles; i++ {
   119  		i := i
   120  
   121  		wg.Add(1)
   122  		go func() {
   123  			defer wg.Done()
   124  			rng := rand.New(rand.NewSource(seed + int64(i)))
   125  
   126  			h, _ := s.Join(ctx)
   127  			defer h.Done()
   128  
   129  			held := make([]Resource, 0, maxConcurrent)
   130  			done := func() {
   131  				if len(held) > 0 {
   132  					atomic.AddInt64(&concurrent, -1)
   133  					held[len(held)-1].Done()
   134  					held = held[:len(held)-1]
   135  				}
   136  			}
   137  			defer func() {
   138  				for len(held) > 0 {
   139  					done()
   140  				}
   141  			}()
   142  
   143  			for j := 0; j < numResources; j++ {
   144  				if t.Failed() {
   145  					break
   146  				}
   147  
   148  				for rng.Intn(3) == 0 {
   149  					done()
   150  				}
   151  				for len(held) > 0 && atomic.LoadInt64(&concurrent) == maxConcurrent {
   152  					done()
   153  				}
   154  
   155  				r, ok := h.Get(ctx)
   156  				if !ok {
   157  					t.Error("Unable to get resource")
   158  					break
   159  				}
   160  				held = append(held, r)
   161  
   162  				c := atomic.AddInt64(&concurrent, 1)
   163  				updateMax(c)
   164  				if c > maxConcurrent {
   165  					t.Error("maximum concurrent:", c)
   166  					break
   167  				}
   168  				atomic.AddInt64(&counts[c-1], 1)
   169  			}
   170  		}()
   171  	}
   172  	wg.Wait()
   173  
   174  	t.Log("observed max:", max)
   175  	t.Log("histogram:", counts)
   176  
   177  	require.LessOrEqual(t, max, int64(maxConcurrent))
   178  }
   179  
   180  func TestScheduler_MaxHandles(t *testing.T) {
   181  	ctx := context.Background()
   182  	canceled, cancel := context.WithCancel(ctx)
   183  	cancel()
   184  
   185  	s := New(Options{MaximumConcurrentHandles: 1})
   186  
   187  	{ // first join should be fine
   188  		h, ok := s.Join(ctx)
   189  		require.True(t, ok)
   190  		h.Done()
   191  	}
   192  
   193  	{ // join with canceled should always fail
   194  		_, ok := s.Join(canceled)
   195  		require.False(t, ok)
   196  	}
   197  
   198  	func() { // a second join should fail when handle outstanding
   199  		h, ok := s.Join(ctx)
   200  		require.True(t, ok)
   201  		defer h.Done()
   202  
   203  		timeout, cancel := context.WithTimeout(ctx, time.Millisecond)
   204  		defer cancel()
   205  		_, ok = s.Join(timeout)
   206  		require.False(t, ok)
   207  	}()
   208  }
   209  
   210  func BenchmarkScheduler_Single(b *testing.B) {
   211  	ctx := context.Background()
   212  
   213  	s := New(Options{MaximumConcurrent: 1})
   214  
   215  	h, _ := s.Join(ctx)
   216  
   217  	b.ReportAllocs()
   218  	b.ResetTimer()
   219  
   220  	for i := 0; i < b.N; i++ {
   221  		if r, ok := h.Get(ctx); ok {
   222  			r.Done()
   223  		}
   224  	}
   225  }
   226  
   227  func BenchmarkScheduler_Parallel(b *testing.B) {
   228  	ctx := context.Background()
   229  
   230  	s := New(Options{MaximumConcurrent: runtime.GOMAXPROCS(-1)})
   231  
   232  	b.ReportAllocs()
   233  	b.ResetTimer()
   234  
   235  	b.RunParallel(func(pb *testing.PB) {
   236  		h, _ := s.Join(ctx)
   237  		defer h.Done()
   238  
   239  		for pb.Next() {
   240  			if r, ok := h.Get(ctx); ok {
   241  				r.Done()
   242  			}
   243  		}
   244  	})
   245  }