github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/integration/resources/docker/dockerm3/dbnode.go (about)

     1  // Copyright (c) 2020 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package dockerm3
    22  
    23  import (
    24  	"fmt"
    25  	"strings"
    26  
    27  	"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
    28  	"github.com/m3db/m3/src/dbnode/integration"
    29  	"github.com/m3db/m3/src/integration/resources"
    30  	"github.com/m3db/m3/src/query/generated/proto/admin"
    31  	xdockertest "github.com/m3db/m3/src/x/dockertest"
    32  
    33  	"github.com/ory/dockertest/v3"
    34  	"github.com/ory/dockertest/v3/docker"
    35  	"go.uber.org/zap"
    36  )
    37  
    38  const (
    39  	defaultDBNodeSource        = "dbnode"
    40  	defaultDBNodeContainerName = "dbnode01"
    41  )
    42  
    43  var (
    44  	defaultDBNodePortList = []int{2379, 2380, 9000, 9001, 9002, 9003, 9004}
    45  
    46  	defaultDBNodeOptions = xdockertest.ResourceOptions{
    47  		Source:        defaultDBNodeSource,
    48  		ContainerName: defaultDBNodeContainerName,
    49  		PortList:      defaultDBNodePortList,
    50  	}
    51  )
    52  
    53  type dbNode struct {
    54  	tchanClient *integration.TestTChannelClient
    55  	resource    *xdockertest.Resource
    56  	pool        *dockertest.Pool
    57  	logger      *zap.Logger
    58  }
    59  
    60  func newDockerHTTPNode(
    61  	pool *dockertest.Pool,
    62  	opts xdockertest.ResourceOptions,
    63  ) (resources.Node, error) {
    64  	opts = opts.WithDefaults(defaultDBNodeOptions)
    65  	resource, err := xdockertest.NewDockerResource(pool, opts)
    66  	if err != nil {
    67  		return nil, err
    68  	}
    69  
    70  	completed := false
    71  	defer func() {
    72  		if !completed {
    73  			_ = resource.Close()
    74  		}
    75  	}()
    76  
    77  	logger := opts.InstrumentOpts.Logger()
    78  	addr := resource.Resource().GetHostPort("9000/tcp")
    79  	tchanClient, err := integration.NewTChannelClient("client", addr)
    80  	if err != nil {
    81  		return nil, err
    82  	}
    83  
    84  	logger.Info("set up tchanClient", zap.String("node_addr", addr))
    85  	completed = true
    86  	return &dbNode{
    87  		tchanClient: tchanClient,
    88  		pool:        pool,
    89  		resource:    resource,
    90  		logger:      logger,
    91  	}, nil
    92  }
    93  
    94  func (c *dbNode) Start() {
    95  	// noop as docker container should already be started
    96  }
    97  
    98  func (c *dbNode) HostDetails(p int) (*admin.Host, error) {
    99  	var network docker.ContainerNetwork
   100  	for _, n := range c.resource.Resource().Container.NetworkSettings.Networks { // nolint: gocritic
   101  		network = n
   102  	}
   103  
   104  	host := strings.TrimLeft(c.resource.Resource().Container.Name, "/")
   105  	return &admin.Host{
   106  		Id:             host,
   107  		IsolationGroup: "rack-a-" + c.resource.Resource().Container.Name,
   108  		Zone:           "embedded",
   109  		Weight:         1024,
   110  		Address:        network.IPAddress,
   111  		Port:           uint32(p),
   112  	}, nil
   113  }
   114  
   115  func (c *dbNode) Health() (*rpc.NodeHealthResult_, error) {
   116  	if c.resource.Closed() {
   117  		return nil, xdockertest.ErrClosed
   118  	}
   119  
   120  	logger := c.logger.With(zapMethod("health"))
   121  	res, err := c.tchanClient.TChannelClientHealth(timeout)
   122  	if err != nil {
   123  		logger.Error("failed get", zap.Error(err), zap.Any("res", res))
   124  	}
   125  
   126  	return res, err
   127  }
   128  
   129  func (c *dbNode) WaitForBootstrap() error {
   130  	if c.resource.Closed() {
   131  		return xdockertest.ErrClosed
   132  	}
   133  
   134  	logger := c.logger.With(zapMethod("waitForBootstrap"))
   135  	return c.pool.Retry(func() error {
   136  		health, err := c.Health()
   137  		if err != nil {
   138  			return err
   139  		}
   140  
   141  		if !health.GetBootstrapped() {
   142  			err = fmt.Errorf("not bootstrapped")
   143  			logger.Error("could not get health", zap.Error(err))
   144  			return err
   145  		}
   146  
   147  		return nil
   148  	})
   149  }
   150  
   151  func (c *dbNode) WritePoint(req *rpc.WriteRequest) error {
   152  	if c.resource.Closed() {
   153  		return xdockertest.ErrClosed
   154  	}
   155  
   156  	logger := c.logger.With(zapMethod("write"))
   157  	err := c.tchanClient.TChannelClientWrite(timeout, req)
   158  	if err != nil {
   159  		logger.Error("could not write", zap.Error(err))
   160  		return err
   161  	}
   162  
   163  	logger.Info("wrote")
   164  	return nil
   165  }
   166  
   167  func (c *dbNode) WriteTaggedPoint(req *rpc.WriteTaggedRequest) error {
   168  	if c.resource.Closed() {
   169  		return xdockertest.ErrClosed
   170  	}
   171  
   172  	logger := c.logger.With(zapMethod("write-tagged"))
   173  	err := c.tchanClient.TChannelClientWriteTagged(timeout, req)
   174  	if err != nil {
   175  		logger.Error("could not write-tagged", zap.Error(err))
   176  		return err
   177  	}
   178  
   179  	logger.Info("wrote")
   180  	return nil
   181  }
   182  
   183  // WriteTaggedBatchRaw writes a batch of writes to the node directly.
   184  func (c *dbNode) WriteTaggedBatchRaw(req *rpc.WriteTaggedBatchRawRequest) error {
   185  	if c.resource.Closed() {
   186  		return xdockertest.ErrClosed
   187  	}
   188  
   189  	logger := c.logger.With(zapMethod("write-tagged-batch-raw"))
   190  	err := c.tchanClient.TChannelClientWriteTaggedBatchRaw(timeout, req)
   191  	if err != nil {
   192  		logger.Error("writeTaggedBatchRaw call failed", zap.Error(err))
   193  		return err
   194  	}
   195  
   196  	logger.Info("wrote")
   197  	return nil
   198  }
   199  
   200  func (c *dbNode) AggregateTiles(req *rpc.AggregateTilesRequest) (int64, error) {
   201  	if c.resource.Closed() {
   202  		return 0, xdockertest.ErrClosed
   203  	}
   204  
   205  	logger := c.logger.With(zapMethod("aggregate-tiles"))
   206  	rsp, err := c.tchanClient.TChannelClientAggregateTiles(timeout, req)
   207  	if err != nil {
   208  		logger.Error("could not aggregate tiles", zap.Error(err))
   209  		return 0, err
   210  	}
   211  
   212  	logger.Info("wrote")
   213  	return rsp.ProcessedTileCount, nil
   214  }
   215  
   216  func (c *dbNode) Fetch(req *rpc.FetchRequest) (*rpc.FetchResult_, error) {
   217  	if c.resource.Closed() {
   218  		return nil, xdockertest.ErrClosed
   219  	}
   220  
   221  	logger := c.logger.With(zapMethod("fetch"))
   222  	dps, err := c.tchanClient.TChannelClientFetch(timeout, req)
   223  	if err != nil {
   224  		logger.Error("could not fetch", zap.Error(err))
   225  		return nil, err
   226  	}
   227  
   228  	logger.Info("fetched", zap.Int("num_points", len(dps.GetDatapoints())))
   229  	return dps, nil
   230  }
   231  
   232  func (c *dbNode) FetchTagged(req *rpc.FetchTaggedRequest) (*rpc.FetchTaggedResult_, error) {
   233  	if c.resource.Closed() {
   234  		return nil, xdockertest.ErrClosed
   235  	}
   236  
   237  	logger := c.logger.With(zapMethod("fetchtagged"))
   238  	result, err := c.tchanClient.TChannelClientFetchTagged(timeout, req)
   239  	if err != nil {
   240  		logger.Error("could not fetch", zap.Error(err))
   241  		return nil, err
   242  	}
   243  
   244  	logger.Info("fetched", zap.Int("series_count", len(result.GetElements())))
   245  	return result, nil
   246  }
   247  
   248  func (c *dbNode) Restart() error {
   249  	if c.resource.Closed() {
   250  		return xdockertest.ErrClosed
   251  	}
   252  
   253  	cName := c.resource.Resource().Container.Name
   254  	logger := c.logger.With(zapMethod("restart"))
   255  	logger.Info("restarting container", zap.String("container", cName))
   256  	err := c.pool.Client.RestartContainer(cName, 60)
   257  	if err != nil {
   258  		logger.Error("could not restart", zap.Error(err))
   259  		return err
   260  	}
   261  
   262  	return nil
   263  }
   264  
   265  func (c *dbNode) Exec(commands ...string) (string, error) {
   266  	if c.resource.Closed() {
   267  		return "", xdockertest.ErrClosed
   268  	}
   269  
   270  	return c.resource.Exec(commands...)
   271  }
   272  
   273  func (c *dbNode) GoalStateExec(
   274  	verifier resources.GoalStateVerifier,
   275  	commands ...string,
   276  ) error {
   277  	if c.resource.Closed() {
   278  		return xdockertest.ErrClosed
   279  	}
   280  
   281  	return c.resource.GoalStateExec(xdockertest.GoalStateVerifier(verifier), commands...)
   282  }
   283  
   284  func (c *dbNode) Close() error {
   285  	if c.resource.Closed() {
   286  		return xdockertest.ErrClosed
   287  	}
   288  
   289  	return c.resource.Close()
   290  }