github.com/munnerz/test-infra@v0.0.0-20190108210205-ce3d181dc989/ghproxy/ghcache/coalesce.go (about)

     1  /*
     2  Copyright 2018 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package ghcache
    18  
    19  import (
    20  	"bufio"
    21  	"bytes"
    22  	"net/http"
    23  	"net/http/httputil"
    24  	"sync"
    25  
    26  	"github.com/sirupsen/logrus"
    27  )
    28  
    29  // requestCoalescer allows concurrent requests for the same URI to share a
    30  // single upstream request and response.
    31  type requestCoalescer struct {
    32  	sync.Mutex
    33  	keys map[string]*responseWaiter
    34  
    35  	delegate http.RoundTripper
    36  }
    37  
    38  type responseWaiter struct {
    39  	*sync.Cond
    40  
    41  	waiting bool
    42  	resp    []byte
    43  	err     error
    44  }
    45  
    46  // RoundTrip coalesces concurrent GET requests for the same URI by blocking
    47  // the later requests until the first request returns and then sharing the
    48  // response between all requests.
    49  //
    50  // Notes: Deadlock shouldn't be possible because the map lock is always
    51  // acquired before responseWaiter lock if both locks are to be held and we
    52  // never hold multiple responseWaiter locks.
    53  func (r *requestCoalescer) RoundTrip(req *http.Request) (*http.Response, error) {
    54  	// Only coalesce GET requests
    55  	if req.Method != http.MethodGet {
    56  		return r.delegate.RoundTrip(req)
    57  	}
    58  
    59  	var cacheMode = ModeError
    60  	defer func() {
    61  		cacheCounter.WithLabelValues(cacheMode).Inc()
    62  	}()
    63  
    64  	key := req.URL.String()
    65  	r.Lock()
    66  	waiter, ok := r.keys[key]
    67  	if ok {
    68  		// Earlier request in flight. Wait for it's response.
    69  		if req.Body != nil {
    70  			defer req.Body.Close() // Since we won't pass the request we must close it.
    71  		}
    72  		waiter.L.Lock()
    73  		r.Unlock()
    74  		waiter.waiting = true
    75  		// The documentation for Wait() says:
    76  		// "Because c.L is not locked when Wait first resumes, the caller typically
    77  		// cannot assume that the condition is true when Wait returns. Instead, the
    78  		// caller should Wait in a loop."
    79  		// This does not apply to this use of Wait() because the condition we are
    80  		// waiting for remains true once it becomes true. This lets us avoid the
    81  		// normal check to see if the condition has switched back to false between
    82  		// the signal being sent and this thread acquiring the lock.
    83  		waiter.Wait()
    84  		waiter.L.Unlock()
    85  		// Earlier request completed.
    86  
    87  		if waiter.err != nil {
    88  			// Don't log the error, it will be logged by requester.
    89  			return nil, waiter.err
    90  		}
    91  		resp, err := http.ReadResponse(bufio.NewReader(bytes.NewBuffer(waiter.resp)), nil)
    92  		if err != nil {
    93  			logrus.WithField("cache-key", key).WithError(err).Error("Error loading response.")
    94  			return nil, err
    95  		}
    96  
    97  		cacheMode = ModeCoalesced
    98  		return resp, nil
    99  	}
   100  	// No earlier request in flight (common case).
   101  	// Register a new responseWaiter and make the request ourself.
   102  	waiter = &responseWaiter{Cond: sync.NewCond(&sync.Mutex{})}
   103  	r.keys[key] = waiter
   104  	r.Unlock()
   105  
   106  	resp, err := r.delegate.RoundTrip(req)
   107  	// Real response received. Remove this responseWaiter from the map THEN
   108  	// wake any requesters that were waiting on this response.
   109  	r.Lock()
   110  	delete(r.keys, key)
   111  	r.Unlock()
   112  
   113  	waiter.L.Lock()
   114  	if waiter.waiting {
   115  		if err != nil {
   116  			waiter.resp, waiter.err = nil, err
   117  		} else {
   118  			// Copy the response before releasing to waiter(s).
   119  			waiter.resp, waiter.err = httputil.DumpResponse(resp, true)
   120  		}
   121  		waiter.Broadcast()
   122  	}
   123  	waiter.L.Unlock()
   124  
   125  	if err != nil {
   126  		logrus.WithField("cache-key", key).WithError(err).Error("Error from cache transport layer.")
   127  		return nil, err
   128  	}
   129  	cacheMode = cacheResponseMode(resp.Header)
   130  	return resp, nil
   131  }