github.com/muhammadn/cortex@v1.9.1-0.20220510110439-46bb7000d03d/tools/querytee/proxy_endpoint.go (about)

     1  package querytee
     2  
     3  import (
     4  	"fmt"
     5  	"net/http"
     6  	"strconv"
     7  	"sync"
     8  	"time"
     9  
    10  	"github.com/go-kit/log"
    11  	"github.com/go-kit/log/level"
    12  
    13  	util_log "github.com/cortexproject/cortex/pkg/util/log"
    14  )
    15  
    16  type ResponsesComparator interface {
    17  	Compare(expected, actual []byte) error
    18  }
    19  
    20  type ProxyEndpoint struct {
    21  	backends   []*ProxyBackend
    22  	metrics    *ProxyMetrics
    23  	logger     log.Logger
    24  	comparator ResponsesComparator
    25  
    26  	// Whether for this endpoint there's a preferred backend configured.
    27  	hasPreferredBackend bool
    28  
    29  	// The route name used to track metrics.
    30  	routeName string
    31  }
    32  
    33  func NewProxyEndpoint(backends []*ProxyBackend, routeName string, metrics *ProxyMetrics, logger log.Logger, comparator ResponsesComparator) *ProxyEndpoint {
    34  	hasPreferredBackend := false
    35  	for _, backend := range backends {
    36  		if backend.preferred {
    37  			hasPreferredBackend = true
    38  			break
    39  		}
    40  	}
    41  
    42  	return &ProxyEndpoint{
    43  		backends:            backends,
    44  		routeName:           routeName,
    45  		metrics:             metrics,
    46  		logger:              logger,
    47  		comparator:          comparator,
    48  		hasPreferredBackend: hasPreferredBackend,
    49  	}
    50  }
    51  
    52  func (p *ProxyEndpoint) ServeHTTP(w http.ResponseWriter, r *http.Request) {
    53  	level.Debug(p.logger).Log("msg", "Received request", "path", r.URL.Path, "query", r.URL.RawQuery)
    54  
    55  	// Send the same request to all backends.
    56  	resCh := make(chan *backendResponse, len(p.backends))
    57  	go p.executeBackendRequests(r, resCh)
    58  
    59  	// Wait for the first response that's feasible to be sent back to the client.
    60  	downstreamRes := p.waitBackendResponseForDownstream(resCh)
    61  
    62  	if downstreamRes.err != nil {
    63  		http.Error(w, downstreamRes.err.Error(), http.StatusInternalServerError)
    64  	} else {
    65  		w.WriteHeader(downstreamRes.status)
    66  		if _, err := w.Write(downstreamRes.body); err != nil {
    67  			level.Warn(p.logger).Log("msg", "Unable to write response", "err", err)
    68  		}
    69  	}
    70  
    71  	p.metrics.responsesTotal.WithLabelValues(downstreamRes.backend.name, r.Method, p.routeName).Inc()
    72  }
    73  
    74  func (p *ProxyEndpoint) executeBackendRequests(r *http.Request, resCh chan *backendResponse) {
    75  	responses := make([]*backendResponse, 0, len(p.backends))
    76  
    77  	var (
    78  		wg  = sync.WaitGroup{}
    79  		mtx = sync.Mutex{}
    80  	)
    81  	wg.Add(len(p.backends))
    82  
    83  	for _, b := range p.backends {
    84  		b := b
    85  
    86  		go func() {
    87  			defer wg.Done()
    88  
    89  			start := time.Now()
    90  			status, body, err := b.ForwardRequest(r)
    91  			elapsed := time.Since(start)
    92  
    93  			res := &backendResponse{
    94  				backend: b,
    95  				status:  status,
    96  				body:    body,
    97  				err:     err,
    98  			}
    99  
   100  			// Log with a level based on the backend response.
   101  			lvl := level.Debug
   102  			if !res.succeeded() {
   103  				lvl = level.Warn
   104  			}
   105  
   106  			lvl(p.logger).Log("msg", "Backend response", "path", r.URL.Path, "query", r.URL.RawQuery, "backend", b.name, "status", status, "elapsed", elapsed)
   107  			p.metrics.requestDuration.WithLabelValues(res.backend.name, r.Method, p.routeName, strconv.Itoa(res.statusCode())).Observe(elapsed.Seconds())
   108  
   109  			// Keep track of the response if required.
   110  			if p.comparator != nil {
   111  				mtx.Lock()
   112  				responses = append(responses, res)
   113  				mtx.Unlock()
   114  			}
   115  
   116  			resCh <- res
   117  		}()
   118  	}
   119  
   120  	// Wait until all backend requests completed.
   121  	wg.Wait()
   122  	close(resCh)
   123  
   124  	// Compare responses.
   125  	if p.comparator != nil {
   126  		expectedResponse := responses[0]
   127  		actualResponse := responses[1]
   128  		if responses[1].backend.preferred {
   129  			expectedResponse, actualResponse = actualResponse, expectedResponse
   130  		}
   131  
   132  		result := comparisonSuccess
   133  		err := p.compareResponses(expectedResponse, actualResponse)
   134  		if err != nil {
   135  			level.Error(util_log.Logger).Log("msg", "response comparison failed", "route-name", p.routeName,
   136  				"query", r.URL.RawQuery, "err", err)
   137  			result = comparisonFailed
   138  		}
   139  
   140  		p.metrics.responsesComparedTotal.WithLabelValues(p.routeName, result).Inc()
   141  	}
   142  }
   143  
   144  func (p *ProxyEndpoint) waitBackendResponseForDownstream(resCh chan *backendResponse) *backendResponse {
   145  	var (
   146  		responses                 = make([]*backendResponse, 0, len(p.backends))
   147  		preferredResponseReceived = false
   148  	)
   149  
   150  	for res := range resCh {
   151  		// If the response is successful we can immediately return it if:
   152  		// - There's no preferred backend configured
   153  		// - Or this response is from the preferred backend
   154  		// - Or the preferred backend response has already been received and wasn't successful
   155  		if res.succeeded() && (!p.hasPreferredBackend || res.backend.preferred || preferredResponseReceived) {
   156  			return res
   157  		}
   158  
   159  		// If we received a non successful response from the preferred backend, then we can
   160  		// return the first successful response received so far (if any).
   161  		if res.backend.preferred && !res.succeeded() {
   162  			preferredResponseReceived = true
   163  
   164  			for _, prevRes := range responses {
   165  				if prevRes.succeeded() {
   166  					return prevRes
   167  				}
   168  			}
   169  		}
   170  
   171  		// Otherwise we keep track of it for later.
   172  		responses = append(responses, res)
   173  	}
   174  
   175  	// No successful response, so let's pick the first one.
   176  	return responses[0]
   177  }
   178  
   179  func (p *ProxyEndpoint) compareResponses(expectedResponse, actualResponse *backendResponse) error {
   180  	// compare response body only if we get a 200
   181  	if expectedResponse.status != 200 {
   182  		return fmt.Errorf("skipped comparison of response because we got status code %d from preferred backend's response", expectedResponse.status)
   183  	}
   184  
   185  	if actualResponse.status != 200 {
   186  		return fmt.Errorf("skipped comparison of response because we got status code %d from secondary backend's response", actualResponse.status)
   187  	}
   188  
   189  	if expectedResponse.status != actualResponse.status {
   190  		return fmt.Errorf("expected status code %d but got %d", expectedResponse.status, actualResponse.status)
   191  	}
   192  
   193  	return p.comparator.Compare(expectedResponse.body, actualResponse.body)
   194  }
   195  
   196  type backendResponse struct {
   197  	backend *ProxyBackend
   198  	status  int
   199  	body    []byte
   200  	err     error
   201  }
   202  
   203  func (r *backendResponse) succeeded() bool {
   204  	if r.err != nil {
   205  		return false
   206  	}
   207  
   208  	// We consider the response successful if it's a 2xx or 4xx (but not 429).
   209  	return (r.status >= 200 && r.status < 300) || (r.status >= 400 && r.status < 500 && r.status != 429)
   210  }
   211  
   212  func (r *backendResponse) statusCode() int {
   213  	if r.err != nil || r.status <= 0 {
   214  		return 500
   215  	}
   216  
   217  	return r.status
   218  }