github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/cmd/roachtest/restore.go (about)

     1  // Copyright 2018 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package main
    12  
    13  import (
    14  	"bytes"
    15  	"context"
    16  	gosql "database/sql"
    17  	"fmt"
    18  	"math/rand"
    19  	"net/http"
    20  	"sort"
    21  	"strings"
    22  	"text/tabwriter"
    23  	"time"
    24  
    25  	"github.com/cockroachdb/cockroach/pkg/ts/tspb"
    26  	"github.com/cockroachdb/cockroach/pkg/util/httputil"
    27  	"github.com/cockroachdb/cockroach/pkg/util/humanizeutil"
    28  	"github.com/cockroachdb/cockroach/pkg/util/timeutil"
    29  	"github.com/cockroachdb/errors"
    30  )
    31  
    32  // HealthChecker runs a regular check that verifies that a specified subset
    33  // of (CockroachDB) nodes look "very healthy". That is, there are no stuck
    34  // proposals, liveness problems, or whatever else might get added in the
    35  // future.
    36  type HealthChecker struct {
    37  	c      *cluster
    38  	nodes  nodeListOption
    39  	doneCh chan struct{}
    40  }
    41  
    42  // NewHealthChecker returns a populated HealthChecker.
    43  func NewHealthChecker(c *cluster, nodes nodeListOption) *HealthChecker {
    44  	return &HealthChecker{
    45  		c:      c,
    46  		nodes:  nodes,
    47  		doneCh: make(chan struct{}),
    48  	}
    49  }
    50  
    51  // Done signals the HeatlthChecker's Runner to shut down.
    52  func (hc *HealthChecker) Done() {
    53  	close(hc.doneCh)
    54  }
    55  
    56  type gossipAlert struct {
    57  	NodeID, StoreID       int
    58  	Category, Description string
    59  	Value                 float64
    60  }
    61  
    62  type gossipAlerts []gossipAlert
    63  
    64  func (g gossipAlerts) String() string {
    65  	var buf bytes.Buffer
    66  	tw := tabwriter.NewWriter(&buf, 2, 1, 2, ' ', 0)
    67  
    68  	for _, a := range g {
    69  		fmt.Fprintf(tw, "n%d/s%d\t%.2f\t%s\t%s\n", a.NodeID, a.StoreID, a.Value, a.Category, a.Description)
    70  	}
    71  	_ = tw.Flush()
    72  	return buf.String()
    73  }
    74  
    75  // Runner makes sure the gossip_alerts table is empty at all times.
    76  //
    77  // TODO(tschottdorf): actually let this fail the test instead of logging complaints.
    78  func (hc *HealthChecker) Runner(ctx context.Context) (err error) {
    79  	logger, err := hc.c.l.ChildLogger("health")
    80  	if err != nil {
    81  		return err
    82  	}
    83  	defer func() {
    84  		logger.Printf("health check terminated with %v\n", err)
    85  	}()
    86  
    87  	ticker := time.NewTicker(5 * time.Second)
    88  	defer ticker.Stop()
    89  
    90  	for {
    91  		select {
    92  		case <-ctx.Done():
    93  			return ctx.Err()
    94  		case <-hc.doneCh:
    95  			return nil
    96  		case <-ticker.C:
    97  		}
    98  
    99  		tBegin := timeutil.Now()
   100  
   101  		nodeIdx := 1 + rand.Intn(len(hc.nodes))
   102  		db, err := hc.c.ConnE(ctx, nodeIdx)
   103  		if err != nil {
   104  			return err
   105  		}
   106  		// TODO(tschottdorf): remove replicate queue failures when the cluster first starts.
   107  		// Ditto queue.raftsnapshot.process.failure.
   108  		rows, err := db.QueryContext(ctx, `SELECT * FROM crdb_internal.gossip_alerts ORDER BY node_id ASC, store_id ASC`)
   109  		_ = db.Close()
   110  		if err != nil {
   111  			return err
   112  		}
   113  		var rr gossipAlerts
   114  		for rows.Next() {
   115  			a := gossipAlert{StoreID: -1}
   116  			var storeID gosql.NullInt64
   117  			if err := rows.Scan(&a.NodeID, &storeID, &a.Category, &a.Description, &a.Value); err != nil {
   118  				return err
   119  			}
   120  			if storeID.Valid {
   121  				a.StoreID = int(storeID.Int64)
   122  			}
   123  			rr = append(rr, a)
   124  		}
   125  		if len(rr) > 0 {
   126  			logger.Printf(rr.String() + "\n")
   127  			// TODO(tschottdorf): see method comment.
   128  			// return errors.New(rr.String())
   129  		}
   130  
   131  		if elapsed := timeutil.Since(tBegin); elapsed > 10*time.Second {
   132  			err := errors.Errorf("health check against node %d took %s", nodeIdx, elapsed)
   133  			logger.Printf("%+v", err)
   134  			// TODO(tschottdorf): see method comment.
   135  			// return err
   136  		}
   137  	}
   138  }
   139  
   140  // DiskUsageLogger regularly logs the disk spaced used by the nodes in the cluster.
   141  type DiskUsageLogger struct {
   142  	c      *cluster
   143  	doneCh chan struct{}
   144  }
   145  
   146  // NewDiskUsageLogger populates a DiskUsageLogger.
   147  func NewDiskUsageLogger(c *cluster) *DiskUsageLogger {
   148  	return &DiskUsageLogger{
   149  		c:      c,
   150  		doneCh: make(chan struct{}),
   151  	}
   152  }
   153  
   154  // Done instructs the Runner to terminate.
   155  func (dul *DiskUsageLogger) Done() {
   156  	close(dul.doneCh)
   157  }
   158  
   159  // Runner runs in a loop until Done() is called and prints the cluster-wide per
   160  // node disk usage in descending order.
   161  func (dul *DiskUsageLogger) Runner(ctx context.Context) error {
   162  	logger, err := dul.c.l.ChildLogger("diskusage")
   163  	if err != nil {
   164  		return err
   165  	}
   166  	quietLogger, err := dul.c.l.ChildLogger("diskusage-exec", quietStdout, quietStderr)
   167  	if err != nil {
   168  		return err
   169  	}
   170  
   171  	ticker := time.NewTicker(time.Minute)
   172  	defer ticker.Stop()
   173  
   174  	for {
   175  		select {
   176  		case <-ctx.Done():
   177  			return ctx.Err()
   178  		case <-dul.doneCh:
   179  			return nil
   180  		case <-ticker.C:
   181  		}
   182  
   183  		type usage struct {
   184  			nodeNum int
   185  			bytes   int
   186  		}
   187  
   188  		var bytesUsed []usage
   189  		for i := 1; i <= dul.c.spec.NodeCount; i++ {
   190  			cur, err := getDiskUsageInBytes(ctx, dul.c, quietLogger, i)
   191  			if err != nil {
   192  				// This can trigger spuriously as compactions remove files out from under `du`.
   193  				logger.Printf("%s", errors.Wrapf(err, "node #%d", i))
   194  				cur = -1
   195  			}
   196  			bytesUsed = append(bytesUsed, usage{
   197  				nodeNum: i,
   198  				bytes:   cur,
   199  			})
   200  		}
   201  		sort.Slice(bytesUsed, func(i, j int) bool { return bytesUsed[i].bytes > bytesUsed[j].bytes }) // descending
   202  
   203  		var s []string
   204  		for _, usage := range bytesUsed {
   205  			s = append(s, fmt.Sprintf("n#%d: %s", usage.nodeNum, humanizeutil.IBytes(int64(usage.bytes))))
   206  		}
   207  
   208  		logger.Printf("%s\n", strings.Join(s, ", "))
   209  	}
   210  }
   211  
   212  func registerRestore(r *testRegistry) {
   213  	for _, item := range []struct {
   214  		nodes   int
   215  		timeout time.Duration
   216  	}{
   217  		{10, 6 * time.Hour},
   218  		{32, 3 * time.Hour},
   219  	} {
   220  		r.Add(testSpec{
   221  			Name:    fmt.Sprintf("restore2TB/nodes=%d", item.nodes),
   222  			Owner:   OwnerBulkIO,
   223  			Cluster: makeClusterSpec(item.nodes),
   224  			Timeout: item.timeout,
   225  			Run: func(ctx context.Context, t *test, c *cluster) {
   226  				c.Put(ctx, cockroach, "./cockroach")
   227  				c.Start(ctx, t)
   228  				m := newMonitor(ctx, c)
   229  
   230  				// Run the disk usage logger in the monitor to guarantee its
   231  				// having terminated when the test ends.
   232  				dul := NewDiskUsageLogger(c)
   233  				m.Go(dul.Runner)
   234  				hc := NewHealthChecker(c, c.All())
   235  				m.Go(hc.Runner)
   236  
   237  				// TODO(peter): This currently causes the test to fail because we see a
   238  				// flurry of valid merges when the restore finishes.
   239  				//
   240  				// m.Go(func(ctx context.Context) error {
   241  				// 	// Make sure the merge queue doesn't muck with our restore.
   242  				// 	return verifyMetrics(ctx, c, map[string]float64{
   243  				// 		"cr.store.queue.merge.process.success": 10,
   244  				// 		"cr.store.queue.merge.process.failure": 10,
   245  				// 	})
   246  				// })
   247  
   248  				m.Go(func(ctx context.Context) error {
   249  					defer dul.Done()
   250  					defer hc.Done()
   251  					t.Status(`running restore`)
   252  					c.Run(ctx, c.Node(1), `./cockroach sql --insecure -e "CREATE DATABASE restore2tb"`)
   253  					// TODO(dan): It'd be nice if we could keep track over time of how
   254  					// long this next line took.
   255  					c.Run(ctx, c.Node(1), `./cockroach sql --insecure -e "
   256  				RESTORE csv.bank FROM
   257  				'gs://cockroach-fixtures/workload/bank/version=1.0.0,payload-bytes=10240,ranges=0,rows=65104166,seed=1/bank'
   258  				WITH into_db = 'restore2tb'"`)
   259  
   260  					return nil
   261  				})
   262  				m.Wait()
   263  			},
   264  		})
   265  	}
   266  }
   267  
   268  // verifyMetrics loops, retrieving the timeseries metrics specified in m every
   269  // 10s and verifying that the most recent value is less that the limit
   270  // specified in m. This is particularly useful for verifying that a counter
   271  // metric does not exceed some threshold during a test. For example, the
   272  // restore and import tests verify that the range merge queue is inactive.
   273  func verifyMetrics(ctx context.Context, c *cluster, m map[string]float64) error {
   274  	const sample = 10 * time.Second
   275  	// Query needed information over the timespan of the query.
   276  	url := "http://" + c.ExternalAdminUIAddr(ctx, c.Node(1))[0] + "/ts/query"
   277  
   278  	request := tspb.TimeSeriesQueryRequest{
   279  		// Ask for one minute intervals. We can't just ask for the whole hour
   280  		// because the time series query system does not support downsampling
   281  		// offsets.
   282  		SampleNanos: sample.Nanoseconds(),
   283  	}
   284  	for name := range m {
   285  		request.Queries = append(request.Queries, tspb.Query{
   286  			Name:             name,
   287  			Downsampler:      tspb.TimeSeriesQueryAggregator_AVG.Enum(),
   288  			SourceAggregator: tspb.TimeSeriesQueryAggregator_SUM.Enum(),
   289  		})
   290  	}
   291  
   292  	ticker := time.NewTicker(sample)
   293  	defer ticker.Stop()
   294  	for {
   295  		select {
   296  		case <-ctx.Done():
   297  			return ctx.Err()
   298  		case <-ticker.C:
   299  		}
   300  
   301  		now := timeutil.Now()
   302  		request.StartNanos = now.Add(-sample * 3).UnixNano()
   303  		request.EndNanos = now.UnixNano()
   304  
   305  		var response tspb.TimeSeriesQueryResponse
   306  		if err := httputil.PostJSON(http.Client{}, url, &request, &response); err != nil {
   307  			return err
   308  		}
   309  
   310  		for i := range request.Queries {
   311  			name := request.Queries[i].Name
   312  			data := response.Results[i].Datapoints
   313  			n := len(data)
   314  			if n == 0 {
   315  				continue
   316  			}
   317  			limit := m[name]
   318  			value := data[n-1].Value
   319  			if value >= limit {
   320  				return fmt.Errorf("%s: %.1f >= %.1f @ %d", name, value, limit, data[n-1].TimestampNanos)
   321  			}
   322  		}
   323  	}
   324  }
   325  
   326  // TODO(peter): silence unused warning.
   327  var _ = verifyMetrics