github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/integration/resources/inprocess/coordinator_test.go (about)

     1  //go:build test_harness
     2  // +build test_harness
     3  
     4  // Copyright (c) 2021  Uber Technologies, Inc.
     5  //
     6  // Permission is hereby granted, free of charge, to any person obtaining a copy
     7  // of this software and associated documentation files (the "Software"), to deal
     8  // in the Software without restriction, including without limitation the rights
     9  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    10  // copies of the Software, and to permit persons to whom the Software is
    11  // furnished to do so, subject to the following conditions:
    12  //
    13  // The above copyright notice and this permission notice shall be included in
    14  // all copies or substantial portions of the Software.
    15  //
    16  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    17  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    18  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    19  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    20  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    21  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    22  // THE SOFTWARE.
    23  
    24  package inprocess
    25  
    26  import (
    27  	"context"
    28  	"testing"
    29  
    30  	"github.com/m3db/m3/src/cluster/generated/proto/placementpb"
    31  	"github.com/m3db/m3/src/cluster/placement"
    32  	"github.com/m3db/m3/src/integration/resources"
    33  	"github.com/m3db/m3/src/integration/resources/docker/dockerexternal"
    34  	"github.com/m3db/m3/src/msg/generated/proto/topicpb"
    35  	"github.com/m3db/m3/src/msg/topic"
    36  	"github.com/m3db/m3/src/query/generated/proto/admin"
    37  	"github.com/m3db/m3/src/query/generated/proto/prompb"
    38  	"github.com/m3db/m3/src/query/storage"
    39  	"github.com/m3db/m3/src/x/instrument"
    40  	xtime "github.com/m3db/m3/src/x/time"
    41  	"github.com/ory/dockertest/v3"
    42  
    43  	"github.com/prometheus/common/model"
    44  	"github.com/stretchr/testify/assert"
    45  	"github.com/stretchr/testify/require"
    46  )
    47  
    48  func TestNewCoordinator(t *testing.T) {
    49  	pool, err := dockertest.NewPool("")
    50  	require.NoError(t, err)
    51  
    52  	etcd, err := dockerexternal.NewEtcd(pool, instrument.NewOptions(), dockerexternal.EtcdClusterPort(2379))
    53  	require.NoError(t, err)
    54  	require.NoError(t, etcd.Setup(context.TODO()))
    55  	t.Cleanup(func() {
    56  		require.NoError(t, etcd.Close(context.TODO()))
    57  	})
    58  
    59  	dbnode, err := NewDBNodeFromYAML(defaultDBNodeConfig, DBNodeOptions{Start: true})
    60  	require.NoError(t, err)
    61  	defer func() {
    62  		assert.NoError(t, dbnode.Close())
    63  	}()
    64  
    65  	coord, err := NewCoordinatorFromYAML(defaultCoordConfig, CoordinatorOptions{Start: true})
    66  	require.NoError(t, err)
    67  	require.NoError(t, coord.Close())
    68  
    69  	// Restart and shut down again to test restarting.
    70  	coord, err = NewCoordinatorFromYAML(defaultCoordConfig, CoordinatorOptions{Start: true})
    71  	require.NoError(t, err)
    72  	require.NoError(t, coord.Close())
    73  }
    74  
    75  func TestNewEmbeddedCoordinator(t *testing.T) {
    76  	dbnode, err := NewDBNodeFromYAML(embeddedCoordConfig, DBNodeOptions{Start: true})
    77  	require.NoError(t, err)
    78  	defer func() {
    79  		assert.NoError(t, dbnode.Close())
    80  	}()
    81  
    82  	d, ok := dbnode.(*DBNode)
    83  	require.True(t, ok)
    84  	require.True(t, d.started)
    85  
    86  	_, err = NewEmbeddedCoordinator(d)
    87  	require.NoError(t, err)
    88  }
    89  
    90  func TestNewEmbeddedCoordinatorNotStarted(t *testing.T) {
    91  	var dbnode DBNode
    92  	_, err := NewEmbeddedCoordinator(&dbnode)
    93  	require.Error(t, err)
    94  }
    95  
    96  func TestCoordinatorAPIs(t *testing.T) {
    97  	_, coord, closer := setupNodeAndCoordinator(t)
    98  	defer closer()
    99  
   100  	testM3msgTopicFunctions(t, coord)
   101  	testAggPlacementFunctions(t, coord)
   102  	testMetadataAPIs(t, coord)
   103  }
   104  
   105  func testMetadataAPIs(t *testing.T, coordinator resources.Coordinator) {
   106  	err := coordinator.WriteProm("cpu", map[string]string{"pod": "foo-1234"}, []prompb.Sample{
   107  		{Value: 1, Timestamp: storage.TimeToPromTimestamp(xtime.Now())},
   108  	}, nil)
   109  	require.NoError(t, err)
   110  
   111  	names, err := coordinator.LabelNames(resources.LabelNamesRequest{}, nil)
   112  	require.NoError(t, err)
   113  	require.Equal(t, model.LabelNames{
   114  		"__name__",
   115  		"pod",
   116  	}, names)
   117  
   118  	values, err := coordinator.LabelValues(resources.LabelValuesRequest{
   119  		LabelName: "__name__",
   120  	}, nil)
   121  	require.NoError(t, err)
   122  	require.Equal(t, model.LabelValues{"cpu"}, values)
   123  
   124  	series, err := coordinator.Series(resources.SeriesRequest{
   125  		MetadataRequest: resources.MetadataRequest{
   126  			Match: "cpu",
   127  		},
   128  	}, nil)
   129  	require.NoError(t, err)
   130  	require.Equal(t, []model.Metric{
   131  		{
   132  			"__name__": "cpu",
   133  			"pod":      "foo-1234",
   134  		},
   135  	}, series)
   136  }
   137  
   138  func testM3msgTopicFunctions(t *testing.T, coord resources.Coordinator) {
   139  	// init an m3msg topic
   140  	m3msgTopicOpts := resources.M3msgTopicOptions{
   141  		Zone:      "embedded",
   142  		Env:       "default_env",
   143  		TopicName: "testtopic",
   144  	}
   145  	initResp, err := coord.InitM3msgTopic(
   146  		m3msgTopicOpts,
   147  		admin.TopicInitRequest{NumberOfShards: 16},
   148  	)
   149  	expectedInitResp := admin.TopicGetResponse{
   150  		Topic: &topicpb.Topic{
   151  			Name:             "testtopic",
   152  			NumberOfShards:   16,
   153  			ConsumerServices: nil,
   154  		},
   155  		Version: 1,
   156  	}
   157  	require.NoError(t, err)
   158  	validateEqualTopicResp(t, expectedInitResp, initResp)
   159  
   160  	// add a consumer service
   161  	consumer := topicpb.ConsumerService{
   162  		ServiceId: &topicpb.ServiceID{
   163  			Name:        "testservice",
   164  			Environment: m3msgTopicOpts.Env,
   165  			Zone:        m3msgTopicOpts.Zone,
   166  		},
   167  		ConsumptionType: topicpb.ConsumptionType_SHARED,
   168  		MessageTtlNanos: 1,
   169  	}
   170  	addResp, err := coord.AddM3msgTopicConsumer(
   171  		m3msgTopicOpts,
   172  		admin.TopicAddRequest{ConsumerService: &consumer},
   173  	)
   174  	expectedAddResp := admin.TopicGetResponse{
   175  		Topic: &topicpb.Topic{
   176  			Name:           "testtopic",
   177  			NumberOfShards: 16,
   178  			ConsumerServices: []*topicpb.ConsumerService{
   179  				&consumer,
   180  			},
   181  		},
   182  		Version: 2,
   183  	}
   184  	require.NoError(t, err)
   185  	validateEqualTopicResp(t, expectedAddResp, addResp)
   186  
   187  	// get an m3msg topic
   188  	getResp, err := coord.GetM3msgTopic(m3msgTopicOpts)
   189  	require.NoError(t, err)
   190  	validateEqualTopicResp(t, expectedAddResp, getResp)
   191  }
   192  
   193  func validateEqualTopicResp(t *testing.T, expected, actual admin.TopicGetResponse) {
   194  	require.Equal(t, expected.Version, actual.Version)
   195  
   196  	t1, err := topic.NewTopicFromProto(expected.Topic)
   197  	require.NoError(t, err)
   198  	t2, err := topic.NewTopicFromProto(actual.Topic)
   199  	require.NoError(t, err)
   200  	require.Equal(t, t1, t2)
   201  }
   202  
   203  func testAggPlacementFunctions(t *testing.T, coord resources.Coordinator) {
   204  	placementOpts := resources.PlacementRequestOptions{
   205  		Service: resources.ServiceTypeM3Aggregator,
   206  		Env:     "default_env",
   207  		Zone:    "embedded",
   208  	}
   209  	initRequest := admin.PlacementInitRequest{
   210  		Instances: []*placementpb.Instance{
   211  			{
   212  				Id:             "host1",
   213  				IsolationGroup: "rack1",
   214  				Zone:           "embedded",
   215  				Weight:         1,
   216  				Endpoint:       "http://host1:1234",
   217  				Hostname:       "host1",
   218  				Port:           1234,
   219  				Metadata: &placementpb.InstanceMetadata{
   220  					DebugPort: 0,
   221  				},
   222  			},
   223  			{
   224  				Id:             "host2",
   225  				IsolationGroup: "rack2",
   226  				Zone:           "embedded",
   227  				Weight:         1,
   228  				Endpoint:       "http://host2:1234",
   229  				Hostname:       "host2",
   230  				Port:           1234,
   231  				Metadata: &placementpb.InstanceMetadata{
   232  					DebugPort: 0,
   233  				},
   234  			},
   235  		},
   236  		NumShards:         1,
   237  		ReplicationFactor: 2,
   238  	}
   239  	instanceMap := make(map[string]*placementpb.Instance, len(initRequest.Instances))
   240  	for _, ins := range initRequest.Instances {
   241  		newIns := *ins
   242  		newIns.ShardSetId = 1 // initialized
   243  		newIns.Shards = []*placementpb.Shard{
   244  			{
   245  				Id:                0,
   246  				State:             placementpb.ShardState_INITIALIZING,
   247  				SourceId:          "",
   248  				CutoverNanos:      0,
   249  				CutoffNanos:       0,
   250  				RedirectToShardId: nil,
   251  			},
   252  		}
   253  		instanceMap[ins.Hostname] = &newIns
   254  	}
   255  	initResp, err := coord.InitPlacement(placementOpts, initRequest)
   256  	require.NoError(t, err)
   257  	expectedPlacement := &placementpb.Placement{
   258  		Instances:     instanceMap,
   259  		ReplicaFactor: uint32(initRequest.ReplicationFactor),
   260  		NumShards:     uint32(initRequest.NumShards),
   261  		MaxShardSetId: uint32(initRequest.NumShards),
   262  		IsSharded:     true,
   263  		IsMirrored:    true,
   264  	}
   265  	require.Equal(t, int32(0), initResp.Version)
   266  	validateEqualAggPlacement(t, expectedPlacement, initResp.Placement)
   267  
   268  	getResp, err := coord.GetPlacement(placementOpts)
   269  
   270  	require.NoError(t, err)
   271  	require.Equal(t, int32(1), getResp.Version)
   272  	validateEqualAggPlacement(t, expectedPlacement, getResp.Placement)
   273  
   274  	wrongPlacementOpts := resources.PlacementRequestOptions{
   275  		Service: resources.ServiceTypeM3Aggregator,
   276  		Env:     "default_env_wrong",
   277  		Zone:    "embedded",
   278  	}
   279  	_, err = coord.GetPlacement(wrongPlacementOpts)
   280  	require.NotNil(t, err)
   281  }
   282  
   283  func validateEqualAggPlacement(t *testing.T, expected, actual *placementpb.Placement) {
   284  	p1, err := placement.NewPlacementFromProto(expected)
   285  	require.NoError(t, err)
   286  	p2, err := placement.NewPlacementFromProto(actual)
   287  	require.NoError(t, err)
   288  	require.Equal(t, p1.String(), p2.String())
   289  }
   290  
   291  const defaultCoordConfig = `
   292  clusters:
   293    - client:
   294        config:
   295          service:
   296            env: default_env
   297            zone: embedded
   298            service: m3db
   299            etcdClusters:
   300              - zone: embedded
   301                endpoints:
   302                  - 127.0.0.1:2379
   303  `
   304  
   305  const embeddedCoordConfig = `
   306  coordinator:
   307    clusters:
   308      - client:
   309          config:
   310            service:
   311              env: default_env
   312              zone: embedded
   313              service: m3db
   314              etcdClusters:
   315                - zone: embedded
   316                  endpoints:
   317                    - 127.0.0.1:2379
   318  
   319  db: {}
   320  `