github.com/zppinho/prow@v0.0.0-20240510014325-1738badeb017/pkg/ghcache/coalesce_test.go (about)

     1  /*
     2  Copyright 2018 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package ghcache
    18  
    19  import (
    20  	"bytes"
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"net/http"
    25  	"net/url"
    26  	"reflect"
    27  	"sync"
    28  	"testing"
    29  	"time"
    30  
    31  	"sigs.k8s.io/prow/pkg/github/ghmetrics"
    32  
    33  	"k8s.io/apimachinery/pkg/util/diff"
    34  )
    35  
    36  // fakeRequestExecutor is a fake upstream transport RoundTripper that logs hits by
    37  // URI. It will wait to respond to requests until signaled, or respond
    38  // immediately if the request has a header specifying it should be responded to
    39  // immediately.
    40  type fakeRequestExecutor struct {
    41  	beginResponding *sync.Cond
    42  
    43  	hitsLock sync.Mutex
    44  	hits     map[string]int
    45  
    46  	responseHeader http.Header
    47  }
    48  
    49  func (fre *fakeRequestExecutor) RoundTrip(req *http.Request) (*http.Response, error) {
    50  	fre.hitsLock.Lock()
    51  	fre.hits[req.URL.Path] += 1
    52  	fre.hitsLock.Unlock()
    53  
    54  	if req.Header.Get("test-immediate-response") == "" {
    55  		fre.beginResponding.L.Lock()
    56  		fre.beginResponding.Wait()
    57  		fre.beginResponding.L.Unlock()
    58  	}
    59  	header := fre.responseHeader
    60  	if header == nil {
    61  		header = http.Header{}
    62  	}
    63  	return &http.Response{
    64  			Body:   io.NopCloser(bytes.NewBufferString("Response")),
    65  			Header: header,
    66  		},
    67  		nil
    68  }
    69  
    70  func TestRoundTrip(t *testing.T) {
    71  	// Check that only 1 request goes to upstream if there are concurrent requests.
    72  	t.Parallel()
    73  	fre := &fakeRequestExecutor{
    74  		hits:            make(map[string]int),
    75  		beginResponding: sync.NewCond(&sync.Mutex{}),
    76  	}
    77  	coalescer := &requestCoalescer{
    78  		cache:           make(map[string]*firstRequest),
    79  		requestExecutor: fre,
    80  		hasher:          ghmetrics.NewCachingHasher(),
    81  	}
    82  	wg := sync.WaitGroup{}
    83  	wg.Add(100)
    84  	for i := 0; i < 100; i++ {
    85  		go func() {
    86  			if _, err := runRequest(coalescer, "/resource1", false); err != nil {
    87  				t.Errorf("Failed to run request: %v.", err)
    88  			}
    89  			wg.Done()
    90  		}()
    91  	}
    92  	// There is a race here. We need to wait for all requests to be made to the
    93  	// coalescer before letting upstream respond, but we don't have a way of
    94  	// knowing when all requests have actually started waiting on the
    95  	// responseWaiter...
    96  	time.Sleep(time.Second * 5)
    97  
    98  	// Check that requests for different resources are not blocked.
    99  	if _, err := runRequest(coalescer, "/resource2", true); err != nil {
   100  		t.Errorf("Failed to run request: %v.", err)
   101  	} // Doesn't return until timeout or success.
   102  	fre.beginResponding.Broadcast()
   103  
   104  	// Check that non concurrent requests all hit upstream.
   105  	if _, err := runRequest(coalescer, "/resource2", true); err != nil {
   106  		t.Errorf("Failed to run request: %v.", err)
   107  	}
   108  
   109  	wg.Wait()
   110  	expectedHits := map[string]int{"/resource1": 1, "/resource2": 2}
   111  	if !reflect.DeepEqual(fre.hits, expectedHits) {
   112  		t.Errorf("Unexpected hit count(s). Diff: %v.", diff.ObjectReflectDiff(expectedHits, fre.hits))
   113  	}
   114  }
   115  
   116  func TestCacheModeHeader(t *testing.T) {
   117  	t.Parallel()
   118  	wg := sync.WaitGroup{}
   119  	fre := &fakeRequestExecutor{
   120  		hits:            make(map[string]int),
   121  		beginResponding: sync.NewCond(&sync.Mutex{}),
   122  	}
   123  	coalescer := &requestCoalescer{
   124  		cache:           make(map[string]*firstRequest),
   125  		requestExecutor: fre,
   126  		hasher:          ghmetrics.NewCachingHasher(),
   127  	}
   128  
   129  	checkMode := func(resp *http.Response, expected CacheResponseMode) {
   130  		mode := CacheResponseMode(resp.Header.Get(CacheModeHeader))
   131  		if mode != expected {
   132  			t.Errorf("Expected cache mode %s, but got %s.", string(expected), string(mode))
   133  		}
   134  	}
   135  
   136  	// Queue an initial request for resource1.
   137  	// This should eventually return ModeMiss.
   138  	wg.Add(1)
   139  	go func() {
   140  		if resp, err := runRequest(coalescer, "/resource1", false); err != nil {
   141  			t.Errorf("Failed to run request: %v.", err)
   142  		} else {
   143  			checkMode(resp, ModeMiss)
   144  		}
   145  		wg.Done()
   146  	}()
   147  	// There is a race here and where sleeps are used below.
   148  	// We need to wait for the initial request to be made
   149  	// to the coalescer before letting upstream respond, but we don't have a way
   150  	// of knowing when the requests has actually started waiting on the
   151  	// responseWaiter...
   152  	time.Sleep(time.Second * 3)
   153  
   154  	// Queue a second request for resource1.
   155  	// This should coalesce and eventually return ModeCoalesced.
   156  	wg.Add(1)
   157  	go func() {
   158  		if resp, err := runRequest(coalescer, "/resource1", false); err != nil {
   159  			t.Errorf("Failed to run request: %v.", err)
   160  		} else {
   161  			checkMode(resp, ModeCoalesced)
   162  		}
   163  		wg.Done()
   164  	}()
   165  	time.Sleep(time.Second * 3)
   166  
   167  	// Requests should be waiting now. Start responding and wait for all
   168  	// downstream responses to return.
   169  	fre.beginResponding.Broadcast()
   170  	wg.Wait()
   171  
   172  	// A later request for resource1 revalidates cached response.
   173  	// This should return ModeRevalidated.
   174  	header := http.Header{}
   175  	header.Set("Status", "304 Not Modified")
   176  	fre.responseHeader = header
   177  	if resp, err := runRequest(coalescer, "/resource1", true); err != nil {
   178  		t.Errorf("Failed to run request: %v.", err)
   179  	} else {
   180  		checkMode(resp, ModeRevalidated)
   181  	}
   182  
   183  	// Another request for resource1 after the resource has changed.
   184  	// This should return ModeChanged.
   185  	header = http.Header{}
   186  	header.Set("X-Conditional-Request", "I am an E-Tag.")
   187  	fre.responseHeader = header
   188  	if resp, err := runRequest(coalescer, "/resource1", true); err != nil {
   189  		t.Errorf("Failed to run request: %v.", err)
   190  	} else {
   191  		checkMode(resp, ModeChanged)
   192  	}
   193  
   194  	// Request for new resource2 with no concurrent requests.
   195  	// This should return ModeMiss.
   196  	fre.responseHeader = nil
   197  	if resp, err := runRequest(coalescer, "/resource2", true); err != nil {
   198  		t.Errorf("Failed to run request: %v.", err)
   199  	} else {
   200  		checkMode(resp, ModeMiss)
   201  	}
   202  
   203  	// Request for uncacheable resource3.
   204  	// This should return ModeNoStore.
   205  	header = http.Header{}
   206  	header.Set("Cache-Control", "no-store")
   207  	fre.responseHeader = header
   208  	if resp, err := runRequest(coalescer, "/resource3", true); err != nil {
   209  		t.Errorf("Failed to run request: %v.", err)
   210  	} else {
   211  		checkMode(resp, ModeNoStore)
   212  	}
   213  
   214  	// We never send a ModeError mode in a header because we never return a
   215  	// http.Response if there is an error. ModeError is only for metrics.
   216  
   217  	// Might as well mind the hit count in this test too.
   218  	expectedHits := map[string]int{"/resource1": 3, "/resource2": 1, "/resource3": 1}
   219  	if !reflect.DeepEqual(fre.hits, expectedHits) {
   220  		t.Errorf("Unexpected hit count(s). Diff: %v.", diff.ObjectReflectDiff(expectedHits, fre.hits))
   221  	}
   222  }
   223  
   224  func runRequest(rt http.RoundTripper, uri string, immediate bool) (*http.Response, error) {
   225  	u, err := url.Parse("http://foo.com" + uri)
   226  	if err != nil {
   227  		return nil, err
   228  	}
   229  	req, err := http.NewRequest(http.MethodGet, u.String(), nil)
   230  	if err != nil {
   231  		return nil, err
   232  	}
   233  	if immediate {
   234  		req.Header.Set("test-immediate-response", "true")
   235  	}
   236  
   237  	waitChan := make(chan struct{})
   238  	var resp *http.Response
   239  	go func() {
   240  		defer close(waitChan)
   241  		resp, err = rt.RoundTrip(req)
   242  		if err == nil {
   243  			if b, readErr := io.ReadAll(resp.Body); readErr != nil {
   244  				err = readErr
   245  			} else if string(b) != "Response" {
   246  				err = errors.New("unexpected response value")
   247  			}
   248  		}
   249  	}()
   250  
   251  	select {
   252  	case <-time.After(time.Second * 10):
   253  		return nil, fmt.Errorf("Request for %q timed out.", uri)
   254  	case <-waitChan:
   255  		return resp, err
   256  	}
   257  }