github.com/yankunsam/loki/v2@v2.6.3-0.20220817130409-389df5235c27/clients/pkg/promtail/promtail_test.go (about)

     1  package promtail
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io"
     7  	"io/ioutil"
     8  	"math"
     9  	"math/rand"
    10  	"net"
    11  	"net/http"
    12  	"net/url"
    13  	"os"
    14  	"path/filepath"
    15  	"sync"
    16  	"testing"
    17  	"time"
    18  
    19  	"github.com/go-kit/log"
    20  	"github.com/go-kit/log/level"
    21  	"github.com/grafana/dskit/flagext"
    22  	"github.com/pkg/errors"
    23  	"github.com/prometheus/client_golang/prometheus"
    24  	"github.com/prometheus/common/model"
    25  	"github.com/prometheus/prometheus/discovery"
    26  	"github.com/prometheus/prometheus/discovery/targetgroup"
    27  	"github.com/prometheus/prometheus/model/labels"
    28  	"github.com/prometheus/prometheus/model/textparse"
    29  	"github.com/prometheus/prometheus/promql/parser"
    30  	"github.com/stretchr/testify/assert"
    31  	"github.com/stretchr/testify/require"
    32  	serverww "github.com/weaveworks/common/server"
    33  
    34  	"github.com/grafana/loki/clients/pkg/logentry/stages"
    35  	"github.com/grafana/loki/clients/pkg/promtail/client"
    36  	"github.com/grafana/loki/clients/pkg/promtail/config"
    37  	"github.com/grafana/loki/clients/pkg/promtail/positions"
    38  	"github.com/grafana/loki/clients/pkg/promtail/scrapeconfig"
    39  	"github.com/grafana/loki/clients/pkg/promtail/server"
    40  	pserver "github.com/grafana/loki/clients/pkg/promtail/server"
    41  	file2 "github.com/grafana/loki/clients/pkg/promtail/targets/file"
    42  
    43  	"github.com/grafana/loki/pkg/logproto"
    44  	"github.com/grafana/loki/pkg/util"
    45  	util_log "github.com/grafana/loki/pkg/util/log"
    46  )
    47  
    48  var clientMetrics = client.NewMetrics(prometheus.DefaultRegisterer, nil)
    49  
    50  func TestPromtail(t *testing.T) {
    51  	// Setup.
    52  	w := log.NewSyncWriter(os.Stderr)
    53  	logger := log.NewLogfmtLogger(w)
    54  	logger = level.NewFilter(logger, level.AllowInfo())
    55  	util_log.Logger = logger
    56  
    57  	initRandom()
    58  	dirName := "/tmp/promtail_test_" + randName()
    59  	positionsFileName := dirName + "/positions.yml"
    60  
    61  	err := os.MkdirAll(dirName, 0o750)
    62  	if err != nil {
    63  		t.Error(err)
    64  		return
    65  	}
    66  
    67  	defer func() { _ = os.RemoveAll(dirName) }()
    68  
    69  	testDir := dirName + "/logs"
    70  	err = os.MkdirAll(testDir, 0o750)
    71  	if err != nil {
    72  		t.Error(err)
    73  		return
    74  	}
    75  
    76  	handler := &testServerHandler{
    77  		receivedMap:    map[string][]logproto.Entry{},
    78  		receivedLabels: map[string][]labels.Labels{},
    79  		recMtx:         sync.Mutex{},
    80  		t:              t,
    81  	}
    82  	http.Handle("/loki/api/v1/push", handler)
    83  	var (
    84  		wg        sync.WaitGroup
    85  		listenErr error
    86  		server    = &http.Server{Addr: "localhost:3100", Handler: nil}
    87  	)
    88  	defer func() {
    89  		if t.Failed() {
    90  			return // Test has already failed; don't wait for everything to shut down.
    91  		}
    92  		fmt.Fprintf(os.Stdout, "wait close")
    93  		wg.Wait()
    94  		if err != nil {
    95  			t.Fatal(err)
    96  		}
    97  		if listenErr != nil && listenErr != http.ErrServerClosed {
    98  			t.Fatal(listenErr)
    99  		}
   100  	}()
   101  	wg.Add(1)
   102  	go func() {
   103  		defer wg.Done()
   104  		listenErr = server.ListenAndServe()
   105  	}()
   106  	defer func() {
   107  		_ = server.Shutdown(context.Background())
   108  	}()
   109  
   110  	p, err := New(buildTestConfig(t, positionsFileName, testDir), clientMetrics, false, nil)
   111  	if err != nil {
   112  		t.Error("error creating promtail", err)
   113  		return
   114  	}
   115  	wg.Add(1)
   116  	go func() {
   117  		defer wg.Done()
   118  		err = p.Run()
   119  		if err != nil {
   120  			err = errors.Wrap(err, "Failed to start promtail")
   121  		}
   122  	}()
   123  	defer p.Shutdown() // In case the test fails before the call to Shutdown below.
   124  
   125  	svr := p.server.(*pserver.PromtailServer)
   126  
   127  	httpListenAddr := svr.Server.HTTPListenAddr()
   128  
   129  	expectedCounts := map[string]int{}
   130  
   131  	startupMarkerFile := testDir + "/startupMarker.log"
   132  	expectedCounts[startupMarkerFile] = createStartupFile(t, startupMarkerFile)
   133  
   134  	// Wait for promtail to startup and send entry from our startup marker file.
   135  	if err := waitForEntries(10, handler, expectedCounts); err != nil {
   136  		t.Fatal("Timed out waiting for promtail to start")
   137  	}
   138  
   139  	// Run test file scenarios.
   140  
   141  	logFile1 := testDir + "/testSingle.log"
   142  	prefix1 := "single"
   143  	expectedCounts[logFile1] = singleFile(t, logFile1, prefix1)
   144  
   145  	logFile2 := testDir + "/testFileRoll.log"
   146  	prefix2 := "roll"
   147  	expectedCounts[logFile2] = fileRoll(t, logFile2, prefix2)
   148  
   149  	logFile3 := testDir + "/testSymlinkRoll.log"
   150  	prefix3 := "sym"
   151  	expectedCounts[logFile3] = symlinkRoll(t, testDir, logFile3, prefix3)
   152  
   153  	logFile4 := testDir + "/testsubdir/testFile.log"
   154  	prefix4 := "sub"
   155  	expectedCounts[logFile4] = subdirSingleFile(t, logFile4, prefix4)
   156  
   157  	logFile5 := testDir + "/testPipeline.log"
   158  	entries := []string{
   159  		`{"log":"11.11.11.11 - frank [25/Jan/2000:14:00:01 -0500] \"GET /1986.js HTTP/1.1\" 200 932 \"-\" \"Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6\"","stream":"stderr","time":"2019-04-30T02:12:41.8443515Z"}`,
   160  		`{"log":"11.11.11.12 - - [19/May/2015:04:05:16 -0500] \"POST /blog HTTP/1.1\" 200 10975 \"http://grafana.com/test/\" \"Mozilla/5.0 (Windows NT 6.1; WOW64) Gecko/20091221 Firefox/3.5.7 GTB6\"","stream":"stdout","time":"2019-04-30T02:12:42.8443515Z"}`,
   161  	}
   162  	expectedCounts[logFile5] = pipelineFile(t, logFile5, entries)
   163  	expectedEntries := make(map[string]int)
   164  	entriesArray := []string{
   165  		`11.11.11.11 - frank [25/Jan/2000:14:00:01 -0500] "GET /1986.js HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6"`,
   166  		`11.11.11.12 - - [19/May/2015:04:05:16 -0500] "POST /blog HTTP/1.1" 200 10975 "http://grafana.com/test/" "Mozilla/5.0 (Windows NT 6.1; WOW64) Gecko/20091221 Firefox/3.5.7 GTB6"`,
   167  	}
   168  	for i, entry := range entriesArray {
   169  		expectedEntries[entry] = i
   170  	}
   171  	lbls := []labels.Labels{}
   172  	lbls = append(lbls, labels.Labels{
   173  		labels.Label{Name: "action", Value: "GET"},
   174  		labels.Label{Name: "filename", Value: dirName + "/logs/testPipeline.log"},
   175  		labels.Label{Name: "job", Value: "varlogs"},
   176  		labels.Label{Name: "localhost", Value: ""},
   177  		labels.Label{Name: "match", Value: "true"},
   178  		labels.Label{Name: "stream", Value: "stderr"},
   179  	})
   180  
   181  	lbls = append(lbls, labels.Labels{
   182  		labels.Label{Name: "action", Value: "POST"},
   183  		labels.Label{Name: "filename", Value: dirName + "/logs/testPipeline.log"},
   184  		labels.Label{Name: "job", Value: "varlogs"},
   185  		labels.Label{Name: "localhost", Value: ""},
   186  		labels.Label{Name: "match", Value: "true"},
   187  		labels.Label{Name: "stream", Value: "stdout"},
   188  	})
   189  	expectedLabels := make(map[string]int)
   190  	for i, label := range lbls {
   191  		expectedLabels[label.String()] = i
   192  	}
   193  
   194  	// Wait for all lines to be received.
   195  	if err := waitForEntries(20, handler, expectedCounts); err != nil {
   196  		t.Fatal("Timed out waiting for log entries: ", err)
   197  	}
   198  
   199  	// Delete one of the log files so we can verify metrics are clean up
   200  	err = os.Remove(logFile1)
   201  	if err != nil {
   202  		t.Fatal("Could not delete a log file to verify metrics are removed: ", err)
   203  	}
   204  
   205  	// Sync period is 500ms in tests, need to wait for at least one sync period for tailer to be cleaned up
   206  	<-time.After(500 * time.Millisecond)
   207  
   208  	// Pull out some prometheus metrics before shutting down
   209  	metricsBytes, contentType := getPromMetrics(t, httpListenAddr)
   210  
   211  	p.Shutdown()
   212  
   213  	// Verify.
   214  	verifyFile(t, expectedCounts[logFile1], prefix1, handler.receivedMap[logFile1])
   215  	verifyFile(t, expectedCounts[logFile2], prefix2, handler.receivedMap[logFile2])
   216  	verifyFile(t, expectedCounts[logFile3], prefix3, handler.receivedMap[logFile3])
   217  	verifyFile(t, expectedCounts[logFile4], prefix4, handler.receivedMap[logFile4])
   218  	verifyPipeline(t, expectedCounts[logFile5], expectedEntries, handler.receivedMap[logFile5], handler.receivedLabels[logFile5], expectedLabels)
   219  
   220  	if len(handler.receivedMap) != len(expectedCounts) {
   221  		t.Error("Somehow we ended up tailing more files than we were supposed to, this is likely a bug")
   222  	}
   223  
   224  	readBytesMetrics := parsePromMetrics(t, metricsBytes, contentType, "promtail_read_bytes_total", "path")
   225  	fileBytesMetrics := parsePromMetrics(t, metricsBytes, contentType, "promtail_file_bytes_total", "path")
   226  
   227  	verifyMetricAbsent(t, readBytesMetrics, "promtail_read_bytes_total", logFile1)
   228  	verifyMetricAbsent(t, fileBytesMetrics, "promtail_file_bytes_total", logFile1)
   229  
   230  	verifyMetric(t, readBytesMetrics, "promtail_read_bytes_total", logFile2, 800)
   231  	verifyMetric(t, fileBytesMetrics, "promtail_file_bytes_total", logFile2, 800)
   232  
   233  	verifyMetric(t, readBytesMetrics, "promtail_read_bytes_total", logFile3, 700)
   234  	verifyMetric(t, fileBytesMetrics, "promtail_file_bytes_total", logFile3, 700)
   235  
   236  	verifyMetric(t, readBytesMetrics, "promtail_read_bytes_total", logFile4, 590)
   237  	verifyMetric(t, fileBytesMetrics, "promtail_file_bytes_total", logFile4, 590)
   238  }
   239  
   240  func createStartupFile(t *testing.T, filename string) int {
   241  	f, err := os.Create(filename)
   242  	if err != nil {
   243  		t.Fatal(err)
   244  	}
   245  	_, err = f.WriteString("marker\n")
   246  	if err != nil {
   247  		t.Fatal(err)
   248  	}
   249  	return 1
   250  }
   251  
   252  func verifyFile(t *testing.T, expected int, prefix string, entries []logproto.Entry) {
   253  	for i := 0; i < expected; i++ {
   254  		if entries[i].Line != fmt.Sprintf("%s%d", prefix, i) {
   255  			t.Errorf("Received out of order or incorrect log event, expected test%d, received %s", i, entries[i].Line)
   256  		}
   257  	}
   258  }
   259  
   260  func verifyPipeline(t *testing.T, expected int, expectedEntries map[string]int, entries []logproto.Entry, labels []labels.Labels, expectedLabels map[string]int) {
   261  	for i := 0; i < expected; i++ {
   262  		if _, ok := expectedLabels[labels[i].String()]; !ok {
   263  			t.Errorf("Did not receive expected labels, expected %v, received %s", expectedLabels, labels[i])
   264  		}
   265  	}
   266  
   267  	for i := 0; i < expected; i++ {
   268  		if _, ok := expectedEntries[entries[i].Line]; !ok {
   269  			t.Errorf("Did not receive expected log entry, expected %v, received %s", expectedEntries, entries[i].Line)
   270  		}
   271  	}
   272  }
   273  
   274  func verifyMetricAbsent(t *testing.T, metrics map[string]float64, metric string, label string) {
   275  	if _, ok := metrics[label]; ok {
   276  		t.Error("Found metric", metric, "with label", label, "which was not expected, "+
   277  			"this metric should not be present")
   278  	}
   279  }
   280  
   281  func verifyMetric(t *testing.T, metrics map[string]float64, metric string, label string, expected float64) {
   282  	if _, ok := metrics[label]; !ok {
   283  		t.Error("Expected to find metric ", metric, " with", label, "but it was not present")
   284  	} else {
   285  		actualBytes := metrics[label]
   286  		assert.Equal(t, expected, actualBytes, "found incorrect value for metric %s and label %s", metric, label)
   287  	}
   288  }
   289  
   290  func singleFile(t *testing.T, filename string, prefix string) int {
   291  	f, err := os.Create(filename)
   292  	if err != nil {
   293  		t.Fatal(err)
   294  	}
   295  	entries := 100
   296  	for i := 0; i < entries; i++ {
   297  		entry := fmt.Sprintf("%s%d\n", prefix, i)
   298  		_, err = f.WriteString(entry)
   299  		if err != nil {
   300  			t.Fatal(err)
   301  		}
   302  		time.Sleep(1 * time.Millisecond)
   303  	}
   304  
   305  	return entries
   306  }
   307  
   308  func pipelineFile(t *testing.T, filename string, entries []string) int {
   309  	f, err := os.Create(filename)
   310  	if err != nil {
   311  		t.Fatal(err)
   312  	}
   313  
   314  	for _, entry := range entries {
   315  		line := fmt.Sprintf("%s\n", entry)
   316  		_, err = f.WriteString(line)
   317  		if err != nil {
   318  			t.Fatal(err)
   319  		}
   320  		time.Sleep(1 * time.Millisecond)
   321  	}
   322  
   323  	return len(entries)
   324  }
   325  
   326  func fileRoll(t *testing.T, filename string, prefix string) int {
   327  	f, err := os.Create(filename)
   328  	if err != nil {
   329  		t.Fatal(err)
   330  	}
   331  	for i := 0; i < 100; i++ {
   332  		entry := fmt.Sprintf("%s%d\n", prefix, i)
   333  		_, err = f.WriteString(entry)
   334  		if err != nil {
   335  			t.Fatal(err)
   336  		}
   337  		time.Sleep(1 * time.Millisecond)
   338  	}
   339  
   340  	if err = os.Rename(filename, filename+".1"); err != nil {
   341  		t.Fatal("Failed to rename file for test: ", err)
   342  	}
   343  	f, err = os.Create(filename)
   344  	if err != nil {
   345  		t.Fatal(err)
   346  	}
   347  	for i := 100; i < 200; i++ {
   348  		entry := fmt.Sprintf("%s%d\n", prefix, i)
   349  		_, err = f.WriteString(entry)
   350  		if err != nil {
   351  			t.Fatal(err)
   352  		}
   353  		time.Sleep(1 * time.Millisecond)
   354  	}
   355  
   356  	return 200
   357  }
   358  
   359  func symlinkRoll(t *testing.T, testDir string, filename string, prefix string) int {
   360  	symlinkDir := testDir + "/symlink"
   361  	if err := os.Mkdir(symlinkDir, 0o750); err != nil {
   362  		t.Fatal(err)
   363  	}
   364  
   365  	// Create a file for the logs, make sure it doesn't end in .log
   366  	symlinkFile := symlinkDir + "/log1.notail"
   367  	f, err := os.Create(symlinkFile)
   368  	if err != nil {
   369  		t.Fatal(err)
   370  	}
   371  
   372  	// Link to that file with the provided file name.
   373  	if err := os.Symlink(symlinkFile, filename); err != nil {
   374  		t.Fatal(err)
   375  	}
   376  	for i := 0; i < 100; i++ {
   377  		entry := fmt.Sprintf("%s%d\n", prefix, i)
   378  		_, err = f.WriteString(entry)
   379  		if err != nil {
   380  			t.Fatal(err)
   381  		}
   382  		time.Sleep(1 * time.Millisecond)
   383  	}
   384  
   385  	// Remove the link, make a new file, link to the new file.
   386  	if err := os.Remove(filename); err != nil {
   387  		t.Fatal(err)
   388  	}
   389  	symlinkFile2 := symlinkDir + "/log2.notail"
   390  	f, err = os.Create(symlinkFile2)
   391  	if err != nil {
   392  		t.Fatal(err)
   393  	}
   394  	if err := os.Symlink(symlinkFile2, filename); err != nil {
   395  		t.Fatal(err)
   396  	}
   397  	for i := 100; i < 200; i++ {
   398  		entry := fmt.Sprintf("%s%d\n", prefix, i)
   399  		_, err = f.WriteString(entry)
   400  		if err != nil {
   401  			t.Fatal(err)
   402  		}
   403  		time.Sleep(1 * time.Millisecond)
   404  	}
   405  
   406  	return 200
   407  }
   408  
   409  func subdirSingleFile(t *testing.T, filename string, prefix string) int {
   410  	if err := os.MkdirAll(filepath.Dir(filename), 0o750); err != nil {
   411  		t.Fatal(err)
   412  	}
   413  	f, err := os.Create(filename)
   414  	if err != nil {
   415  		t.Fatal(err)
   416  	}
   417  	entries := 100
   418  	for i := 0; i < entries; i++ {
   419  		entry := fmt.Sprintf("%s%d\n", prefix, i)
   420  		_, err = f.WriteString(entry)
   421  		if err != nil {
   422  			t.Fatal(err)
   423  		}
   424  		time.Sleep(1 * time.Millisecond)
   425  	}
   426  
   427  	return entries
   428  }
   429  
   430  func waitForEntries(timeoutSec int, handler *testServerHandler, expectedCounts map[string]int) error {
   431  	timeout := timeoutSec * 10
   432  	for timeout > 0 {
   433  		countReady := 0
   434  		for file, expectedCount := range expectedCounts {
   435  			handler.recMtx.Lock()
   436  			if rcvd, ok := handler.receivedMap[file]; ok && len(rcvd) == expectedCount {
   437  				countReady++
   438  			}
   439  			handler.recMtx.Unlock()
   440  		}
   441  		if countReady == len(expectedCounts) {
   442  			break
   443  		}
   444  		time.Sleep(100 * time.Millisecond)
   445  		timeout--
   446  	}
   447  
   448  	if timeout <= 0 {
   449  		waiting := ""
   450  		for file, expectedCount := range expectedCounts {
   451  			if rcvd, ok := handler.receivedMap[file]; !ok || len(rcvd) != expectedCount {
   452  				waiting = waiting + " " + file
   453  				for _, e := range rcvd {
   454  					level.Info(util_log.Logger).Log("file", file, "entry", e.Line)
   455  				}
   456  			}
   457  		}
   458  		return errors.New("still waiting for logs from" + waiting)
   459  	}
   460  	return nil
   461  }
   462  
   463  type testServerHandler struct {
   464  	receivedMap    map[string][]logproto.Entry
   465  	receivedLabels map[string][]labels.Labels
   466  	recMtx         sync.Mutex
   467  	t              *testing.T
   468  }
   469  
   470  func (h *testServerHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
   471  	var req logproto.PushRequest
   472  	if err := util.ParseProtoReader(r.Context(), r.Body, int(r.ContentLength), math.MaxInt32, &req, util.RawSnappy); err != nil {
   473  		http.Error(w, err.Error(), http.StatusBadRequest)
   474  		return
   475  	}
   476  	h.recMtx.Lock()
   477  	for _, s := range req.Streams {
   478  		parsedLabels, err := parser.ParseMetric(s.Labels)
   479  		if err != nil {
   480  			h.t.Error("Failed to parse incoming labels", err)
   481  			return
   482  		}
   483  		file := ""
   484  		for _, label := range parsedLabels {
   485  			if label.Name == file2.FilenameLabel {
   486  				file = label.Value
   487  				continue
   488  			}
   489  		}
   490  		if file == "" {
   491  			h.t.Error("Expected to find a label with name `filename` but did not!")
   492  			return
   493  		}
   494  
   495  		h.receivedMap[file] = append(h.receivedMap[file], s.Entries...)
   496  		h.receivedLabels[file] = append(h.receivedLabels[file], parsedLabels)
   497  
   498  	}
   499  
   500  	h.recMtx.Unlock()
   501  }
   502  
   503  func getPromMetrics(t *testing.T, httpListenAddr net.Addr) ([]byte, string) {
   504  	resp, err := http.Get(fmt.Sprintf("http://%s/metrics", httpListenAddr))
   505  	if err != nil {
   506  		t.Fatal("Could not query metrics endpoint", err)
   507  	}
   508  
   509  	if resp.StatusCode != http.StatusOK {
   510  		t.Fatal("Received a non 200 status code from /metrics endpoint", resp.StatusCode)
   511  	}
   512  
   513  	b, err := ioutil.ReadAll(resp.Body)
   514  	if err != nil {
   515  		t.Fatal("Error reading response body from /metrics endpoint", err)
   516  	}
   517  	ct := resp.Header.Get("Content-Type")
   518  	return b, ct
   519  }
   520  
   521  func parsePromMetrics(t *testing.T, bytes []byte, contentType string, metricName string, label string) map[string]float64 {
   522  	rb := map[string]float64{}
   523  
   524  	pr, err := textparse.New(bytes, contentType)
   525  	require.NoError(t, err)
   526  	for {
   527  		et, err := pr.Next()
   528  		if err == io.EOF {
   529  			break
   530  		}
   531  		if err != nil {
   532  			t.Fatal("Failed to parse prometheus metrics", err)
   533  		}
   534  		switch et {
   535  		case textparse.EntrySeries:
   536  			var res labels.Labels
   537  			_, _, v := pr.Series()
   538  			pr.Metric(&res)
   539  			switch res.Get(labels.MetricName) {
   540  			case metricName:
   541  				rb[res.Get(label)] = v
   542  				continue
   543  			default:
   544  				continue
   545  			}
   546  		default:
   547  			continue
   548  		}
   549  	}
   550  	return rb
   551  }
   552  
   553  func buildTestConfig(t *testing.T, positionsFileName string, logDirName string) config.Config {
   554  	var clientURL flagext.URLValue
   555  	err := clientURL.Set("http://localhost:3100/loki/api/v1/push")
   556  	if err != nil {
   557  		t.Fatal("Failed to parse client URL")
   558  	}
   559  
   560  	cfg := config.Config{}
   561  	// Init everything with default values.
   562  	flagext.RegisterFlags(&cfg)
   563  
   564  	const hostname = "localhost"
   565  	cfg.ServerConfig.HTTPListenAddress = hostname
   566  	cfg.ServerConfig.ExternalURL = hostname
   567  	cfg.ServerConfig.GRPCListenAddress = hostname
   568  
   569  	// NOTE: setting port to `0` makes it bind to some unused random port.
   570  	// enabling tests run more self contained and easy to run tests in parallel.
   571  	cfg.ServerConfig.HTTPListenPort = 0
   572  	cfg.ServerConfig.GRPCListenPort = 0
   573  
   574  	// Override some of those defaults
   575  	cfg.ClientConfig.URL = clientURL
   576  	cfg.ClientConfig.BatchWait = 10 * time.Millisecond
   577  	cfg.ClientConfig.BatchSize = 10 * 1024
   578  
   579  	cfg.PositionsConfig.SyncPeriod = 100 * time.Millisecond
   580  	cfg.PositionsConfig.PositionsFile = positionsFileName
   581  
   582  	pipeline := stages.PipelineStages{
   583  		stages.PipelineStage{
   584  			stages.StageTypeMatch: stages.MatcherConfig{
   585  				PipelineName: nil,
   586  				Selector:     "{match=\"true\"}",
   587  				Stages: stages.PipelineStages{
   588  					stages.PipelineStage{
   589  						stages.StageTypeDocker: nil,
   590  					},
   591  					stages.PipelineStage{
   592  						stages.StageTypeRegex: stages.RegexConfig{
   593  							Expression: "^(?P<ip>\\S+) (?P<identd>\\S+) (?P<user>\\S+) \\[(?P<timestamp>[\\w:/]+\\s[+\\-]\\d{4})\\] \"(?P<action>\\S+)\\s?(?P<path>\\S+)?\\s?(?P<protocol>\\S+)?\" (?P<status>\\d{3}|-) (?P<size>\\d+|-)\\s?\"?(?P<referer>[^\"]*)\"?\\s?\"?(?P<useragent>[^\"]*)?\"?$",
   594  							Source:     nil,
   595  						},
   596  					},
   597  					stages.PipelineStage{
   598  						stages.StageTypeTimestamp: stages.TimestampConfig{
   599  							Source: "timestamp",
   600  							Format: "02/Jan/2006:15:04:05 -0700",
   601  						},
   602  					},
   603  					stages.PipelineStage{
   604  						stages.StageTypeLabel: stages.LabelsConfig{
   605  							"action": nil,
   606  						},
   607  					},
   608  				},
   609  			},
   610  		},
   611  	}
   612  
   613  	targetGroup := targetgroup.Group{
   614  		Targets: []model.LabelSet{{
   615  			"localhost": "",
   616  		}},
   617  		Labels: model.LabelSet{
   618  			"job":      "varlogs",
   619  			"match":    "true",
   620  			"__path__": model.LabelValue(logDirName + "/**/*.log"),
   621  		},
   622  		Source: "",
   623  	}
   624  	scrapeConfig := scrapeconfig.Config{
   625  		JobName:        "",
   626  		PipelineStages: pipeline,
   627  		RelabelConfigs: nil,
   628  		ServiceDiscoveryConfig: scrapeconfig.ServiceDiscoveryConfig{
   629  			StaticConfigs: discovery.StaticConfig{
   630  				&targetGroup,
   631  			},
   632  		},
   633  	}
   634  
   635  	cfg.ScrapeConfig = append(cfg.ScrapeConfig, scrapeConfig)
   636  
   637  	// Make sure the SyncPeriod is fast for test purposes, but not faster than the poll interval (250ms)
   638  	// to avoid a race between the sync() function and the tailers noticing when files are deleted
   639  	cfg.TargetConfig.SyncPeriod = 500 * time.Millisecond
   640  
   641  	return cfg
   642  }
   643  
   644  func initRandom() {
   645  	rand.Seed(time.Now().UnixNano())
   646  }
   647  
   648  var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
   649  
   650  func randName() string {
   651  	b := make([]rune, 10)
   652  	for i := range b {
   653  		b[i] = letters[rand.Intn(len(letters))]
   654  	}
   655  	return string(b)
   656  }
   657  
   658  func Test_DryRun(t *testing.T) {
   659  	f, err := ioutil.TempFile("/tmp", "Test_DryRun")
   660  	require.NoError(t, err)
   661  	defer os.Remove(f.Name())
   662  
   663  	_, err = New(config.Config{}, clientMetrics, true, nil)
   664  	require.Error(t, err)
   665  
   666  	// Set the minimum config needed to start a server. We need to do this since we
   667  	// aren't doing any CLI parsing ala RegisterFlags and thus don't get the defaults.
   668  	// Required because a hardcoded value became a configuration setting in this commit
   669  	// https://github.com/weaveworks/common/commit/c44eeb028a671c5931b047976f9a0171910571ce
   670  	serverCfg := server.Config{
   671  		Config: serverww.Config{
   672  			HTTPListenNetwork: serverww.DefaultNetwork,
   673  			GRPCListenNetwork: serverww.DefaultNetwork,
   674  		},
   675  	}
   676  
   677  	prometheus.DefaultRegisterer = prometheus.NewRegistry() // reset registry, otherwise you can't create 2 weavework server.
   678  	_, err = New(config.Config{
   679  		ServerConfig: serverCfg,
   680  		ClientConfig: client.Config{URL: flagext.URLValue{URL: &url.URL{Host: "string"}}},
   681  		PositionsConfig: positions.Config{
   682  			PositionsFile: f.Name(),
   683  			SyncPeriod:    time.Second,
   684  		},
   685  	}, clientMetrics, true, nil)
   686  	require.NoError(t, err)
   687  
   688  	prometheus.DefaultRegisterer = prometheus.NewRegistry()
   689  
   690  	p, err := New(config.Config{
   691  		ServerConfig: serverCfg,
   692  		ClientConfig: client.Config{URL: flagext.URLValue{URL: &url.URL{Host: "string"}}},
   693  		PositionsConfig: positions.Config{
   694  			PositionsFile: f.Name(),
   695  			SyncPeriod:    time.Second,
   696  		},
   697  	}, clientMetrics, false, nil)
   698  	require.NoError(t, err)
   699  	require.IsType(t, &client.MultiClient{}, p.client)
   700  }