code.gitea.io/gitea@v1.19.3/modules/web/routing/logger_manager.go (about)

     1  // Copyright 2021 The Gitea Authors. All rights reserved.
     2  // SPDX-License-Identifier: MIT
     3  
     4  package routing
     5  
     6  import (
     7  	"context"
     8  	"net/http"
     9  	"sync"
    10  	"time"
    11  
    12  	"code.gitea.io/gitea/modules/graceful"
    13  	"code.gitea.io/gitea/modules/process"
    14  )
    15  
    16  // Event indicates when the printer is triggered
    17  type Event int
    18  
    19  const (
    20  	// StartEvent at the beginning of a request
    21  	StartEvent Event = iota
    22  
    23  	// StillExecutingEvent the request is still executing
    24  	StillExecutingEvent
    25  
    26  	// EndEvent the request has ended (either completed or failed)
    27  	EndEvent
    28  )
    29  
    30  // Printer is used to output the log for a request
    31  type Printer func(trigger Event, record *requestRecord)
    32  
    33  type requestRecordsManager struct {
    34  	print Printer
    35  
    36  	lock sync.Mutex
    37  
    38  	requestRecords map[uint64]*requestRecord
    39  	count          uint64
    40  }
    41  
    42  func (manager *requestRecordsManager) startSlowQueryDetector(threshold time.Duration) {
    43  	go graceful.GetManager().RunWithShutdownContext(func(ctx context.Context) {
    44  		ctx, _, finished := process.GetManager().AddTypedContext(ctx, "Service: SlowQueryDetector", process.SystemProcessType, true)
    45  		defer finished()
    46  		// This go-routine checks all active requests every second.
    47  		// If a request has been running for a long time (eg: /user/events), we also print a log with "still-executing" message
    48  		// After the "still-executing" log is printed, the record will be removed from the map to prevent from duplicated logs in future
    49  
    50  		// We do not care about accurate duration here. It just does the check periodically, 0.5s or 1.5s are all OK.
    51  		t := time.NewTicker(time.Second)
    52  		for {
    53  			select {
    54  			case <-ctx.Done():
    55  				return
    56  			case <-t.C:
    57  				now := time.Now()
    58  
    59  				var slowRequests []*requestRecord
    60  
    61  				// find all slow requests with lock
    62  				manager.lock.Lock()
    63  				for index, record := range manager.requestRecords {
    64  					if now.Sub(record.startTime) < threshold {
    65  						continue
    66  					}
    67  
    68  					slowRequests = append(slowRequests, record)
    69  					delete(manager.requestRecords, index)
    70  				}
    71  				manager.lock.Unlock()
    72  
    73  				// print logs for slow requests
    74  				for _, record := range slowRequests {
    75  					manager.print(StillExecutingEvent, record)
    76  				}
    77  			}
    78  		}
    79  	})
    80  }
    81  
    82  func (manager *requestRecordsManager) handler(next http.Handler) http.Handler {
    83  	return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
    84  		record := &requestRecord{
    85  			startTime:      time.Now(),
    86  			request:        req,
    87  			responseWriter: w,
    88  		}
    89  
    90  		// generate a record index an insert into the map
    91  		manager.lock.Lock()
    92  		record.index = manager.count
    93  		manager.count++
    94  		manager.requestRecords[record.index] = record
    95  		manager.lock.Unlock()
    96  
    97  		defer func() {
    98  			// just in case there is a panic. now the panics are all recovered in middleware.go
    99  			localPanicErr := recover()
   100  			if localPanicErr != nil {
   101  				record.lock.Lock()
   102  				record.panicError = localPanicErr
   103  				record.lock.Unlock()
   104  			}
   105  
   106  			// remove from the record map
   107  			manager.lock.Lock()
   108  			delete(manager.requestRecords, record.index)
   109  			manager.lock.Unlock()
   110  
   111  			// log the end of the request
   112  			manager.print(EndEvent, record)
   113  
   114  			if localPanicErr != nil {
   115  				// the panic wasn't recovered before us, so we should pass it up, and let the framework handle the panic error
   116  				panic(localPanicErr)
   117  			}
   118  		}()
   119  
   120  		req = req.WithContext(context.WithValue(req.Context(), contextKey, record))
   121  		manager.print(StartEvent, record)
   122  		next.ServeHTTP(w, req)
   123  	})
   124  }