github.com/grafana/pyroscope@v1.18.0/pkg/util/logger.go (about)

     1  package util
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"io"
     7  	"os"
     8  	"sync"
     9  	"time"
    10  
    11  	"github.com/go-kit/log"
    12  	"github.com/grafana/dskit/tenant"
    13  	"github.com/grafana/dskit/tracing"
    14  )
    15  
    16  // Logger is a nop global logger
    17  var Logger = log.NewNopLogger()
    18  
    19  // LoggerWithUserID returns a Logger that has information about the current user in
    20  // its details.
    21  func LoggerWithUserID(tenantID string, l log.Logger) log.Logger {
    22  	// See note in WithContext.
    23  	return log.With(l, "tenant", tenantID)
    24  }
    25  
    26  // LoggerWithUserIDs returns a Logger that has information about the current user or
    27  // users (separated by "|") in its details.
    28  func LoggerWithUserIDs(tenantIDs []string, l log.Logger) log.Logger {
    29  	return log.With(l, "tenant", tenant.JoinTenantIDs(tenantIDs))
    30  }
    31  
    32  // LoggerWithTraceID returns a Logger that has information about the traceID in
    33  // its details.
    34  func LoggerWithTraceID(traceID string, l log.Logger) log.Logger {
    35  	// See note in WithContext.
    36  	return log.With(l, "traceID", traceID)
    37  }
    38  
    39  // LoggerWithContext returns a Logger that has information about the current user or users
    40  // and trace in its details.
    41  //
    42  // e.g.
    43  //
    44  //	log = util.WithContext(ctx, log)
    45  //	# level=error tenant=user-1|user-2 traceID=123abc msg="Could not chunk chunks" err="an error"
    46  //	level.Error(log).Log("msg", "Could not chunk chunks", "err", err)
    47  func LoggerWithContext(ctx context.Context, l log.Logger) log.Logger {
    48  	// Weaveworks uses "orgs" and "orgID" to represent Cortex users,
    49  	// even though the code-base generally uses `userID` to refer to the same thing.
    50  	userIDs, err := tenant.TenantIDs(ctx)
    51  	if err == nil {
    52  		l = LoggerWithUserIDs(userIDs, l)
    53  	}
    54  
    55  	traceID, ok := tracing.ExtractSampledTraceID(ctx)
    56  	if !ok {
    57  		return l
    58  	}
    59  
    60  	return LoggerWithTraceID(traceID, l)
    61  }
    62  
    63  // WithSourceIPs returns a Logger that has information about the source IPs in
    64  // its details.
    65  func WithSourceIPs(sourceIPs string, l log.Logger) log.Logger {
    66  	return log.With(l, "sourceIPs", sourceIPs)
    67  }
    68  
    69  // AsyncWriter is a writer that buffers writes and flushes them asynchronously
    70  // in the order they were written. It is safe for concurrent use.
    71  //
    72  // If the internal queue is full, writes will block until there is space.
    73  // Errors are ignored: it's caller responsibility to handle errors from the
    74  // underlying writer.
    75  type AsyncWriter struct {
    76  	mu            sync.Mutex
    77  	w             io.Writer
    78  	pool          sync.Pool
    79  	buffer        *bytes.Buffer
    80  	flushQueue    chan *bytes.Buffer
    81  	maxSize       int
    82  	maxCount      int
    83  	flushInterval time.Duration
    84  	writes        int
    85  	closeOnce     sync.Once
    86  	close         chan struct{}
    87  	done          chan error
    88  	closed        bool
    89  }
    90  
    91  func NewAsyncWriter(w io.Writer, bufSize, maxBuffers, maxWrites int, flushInterval time.Duration) *AsyncWriter {
    92  	bw := &AsyncWriter{
    93  		w:             w,
    94  		flushQueue:    make(chan *bytes.Buffer, maxBuffers),
    95  		maxSize:       bufSize,
    96  		maxCount:      maxWrites,
    97  		flushInterval: flushInterval,
    98  		close:         make(chan struct{}),
    99  		done:          make(chan error),
   100  		pool: sync.Pool{
   101  			New: func() interface{} {
   102  				return bytes.NewBuffer(make([]byte, 0, bufSize))
   103  			},
   104  		},
   105  	}
   106  	go bw.loop()
   107  	return bw
   108  }
   109  
   110  func (aw *AsyncWriter) Write(p []byte) (int, error) {
   111  	aw.mu.Lock()
   112  	defer aw.mu.Unlock()
   113  	if aw.closed {
   114  		return 0, os.ErrClosed
   115  	}
   116  	if aw.overflows(len(p)) {
   117  		aw.enqueueFlush()
   118  	}
   119  	if aw.buffer == nil {
   120  		aw.buffer = aw.pool.Get().(*bytes.Buffer)
   121  		aw.buffer.Reset()
   122  	}
   123  	aw.writes++
   124  	return aw.buffer.Write(p)
   125  }
   126  
   127  func (aw *AsyncWriter) overflows(n int) bool {
   128  	return aw.buffer != nil && (aw.buffer.Len()+n >= aw.maxSize || aw.writes >= aw.maxCount)
   129  }
   130  
   131  func (aw *AsyncWriter) Close() error {
   132  	aw.closeOnce.Do(func() {
   133  		// Break the loop.
   134  		close(aw.close)
   135  		<-aw.done
   136  		// Empty the queue.
   137  		aw.mu.Lock()
   138  		defer aw.mu.Unlock()
   139  		aw.enqueueFlush()
   140  		close(aw.flushQueue)
   141  		for buf := range aw.flushQueue {
   142  			aw.flushSync(buf)
   143  		}
   144  		aw.closed = true
   145  	})
   146  	return nil
   147  }
   148  
   149  func (aw *AsyncWriter) enqueueFlush() {
   150  	buf := aw.buffer
   151  	if buf == nil || buf.Len() == 0 {
   152  		return
   153  	}
   154  	aw.buffer = nil
   155  	aw.writes = 0
   156  	select {
   157  	case aw.flushQueue <- buf:
   158  	default:
   159  	}
   160  }
   161  
   162  func (aw *AsyncWriter) loop() {
   163  	ticker := time.NewTicker(aw.flushInterval)
   164  	defer func() {
   165  		ticker.Stop()
   166  		close(aw.done)
   167  	}()
   168  
   169  	for {
   170  		select {
   171  		case buf := <-aw.flushQueue:
   172  			aw.flushSync(buf)
   173  
   174  		case <-ticker.C:
   175  			aw.mu.Lock()
   176  			aw.enqueueFlush()
   177  			aw.mu.Unlock()
   178  
   179  		case <-aw.close:
   180  			return
   181  		}
   182  	}
   183  }
   184  
   185  func (aw *AsyncWriter) flushSync(b *bytes.Buffer) {
   186  	_, _ = aw.w.Write(b.Bytes())
   187  	aw.pool.Put(b)
   188  }