github.com/grafana/pyroscope@v1.18.0/pkg/querier/querier_test.go (about) 1 package querier 2 3 import ( 4 "bytes" 5 "compress/gzip" 6 "context" 7 "encoding/json" 8 "errors" 9 "os" 10 "path" 11 "sort" 12 "sync" 13 "testing" 14 "time" 15 16 "connectrpc.com/connect" 17 "github.com/go-kit/log" 18 "github.com/gogo/protobuf/proto" 19 "github.com/google/pprof/profile" 20 "github.com/grafana/dskit/kv" 21 "github.com/grafana/dskit/ring" 22 "github.com/grafana/dskit/ring/client" 23 "github.com/prometheus/common/model" 24 "github.com/stretchr/testify/assert" 25 "github.com/stretchr/testify/mock" 26 "github.com/stretchr/testify/require" 27 "google.golang.org/grpc" 28 "google.golang.org/grpc/health/grpc_health_v1" 29 30 profilev1 "github.com/grafana/pyroscope/api/gen/proto/go/google/v1" 31 ingestv1 "github.com/grafana/pyroscope/api/gen/proto/go/ingester/v1" 32 querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1" 33 typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" 34 "github.com/grafana/pyroscope/pkg/clientpool" 35 "github.com/grafana/pyroscope/pkg/featureflags" 36 phlaremodel "github.com/grafana/pyroscope/pkg/model" 37 objstoreclient "github.com/grafana/pyroscope/pkg/objstore/client" 38 "github.com/grafana/pyroscope/pkg/objstore/providers/filesystem" 39 "github.com/grafana/pyroscope/pkg/phlaredb/bucketindex" 40 "github.com/grafana/pyroscope/pkg/pprof" 41 pprofth "github.com/grafana/pyroscope/pkg/pprof/testhelper" 42 "github.com/grafana/pyroscope/pkg/storegateway" 43 "github.com/grafana/pyroscope/pkg/tenant" 44 "github.com/grafana/pyroscope/pkg/testhelper" 45 "github.com/grafana/pyroscope/pkg/util" 46 ) 47 48 type poolFactory struct { 49 f func(addr string) (client.PoolClient, error) 50 } 51 52 func (p poolFactory) FromInstance(desc ring.InstanceDesc) (client.PoolClient, error) { 53 return p.f(desc.Addr) 54 } 55 56 func Test_QuerySampleType(t *testing.T) { 57 querier, err := New(&NewQuerierParams{ 58 Cfg: Config{ 59 PoolConfig: clientpool.PoolConfig{ClientCleanupPeriod: 1 * time.Millisecond}, 60 }, 61 IngestersRing: testhelper.NewMockRing([]ring.InstanceDesc{ 62 {Addr: "1"}, 63 {Addr: "2"}, 64 {Addr: "3"}, 65 }, 3), 66 PoolFactory: &poolFactory{f: func(addr string) (client.PoolClient, error) { 67 q := newFakeQuerier() 68 switch addr { 69 case "1": 70 q.On("LabelValues", mock.Anything, mock.Anything). 71 Return(connect.NewResponse(&typesv1.LabelValuesResponse{ 72 Names: []string{ 73 "foo::::", 74 "bar::::", 75 }, 76 }), nil) 77 case "2": 78 q.On("LabelValues", mock.Anything, mock.Anything). 79 Return(connect.NewResponse(&typesv1.LabelValuesResponse{ 80 Names: []string{ 81 "bar::::", 82 "buzz::::", 83 }, 84 }), nil) 85 case "3": 86 q.On("LabelValues", mock.Anything, mock.Anything). 87 Return(connect.NewResponse(&typesv1.LabelValuesResponse{ 88 Names: []string{ 89 "buzz::::", 90 "foo::::", 91 }, 92 }), nil) 93 } 94 return q, nil 95 }}, 96 Logger: log.NewLogfmtLogger(os.Stdout), 97 }) 98 require.NoError(t, err) 99 100 out, err := querier.ProfileTypes(context.Background(), connect.NewRequest(&querierv1.ProfileTypesRequest{})) 101 require.NoError(t, err) 102 103 ids := make([]string, 0, len(out.Msg.ProfileTypes)) 104 for _, pt := range out.Msg.ProfileTypes { 105 ids = append(ids, pt.ID) 106 } 107 require.NoError(t, err) 108 require.Equal(t, []string{"bar::::", "buzz::::", "foo::::"}, ids) 109 } 110 111 func Test_QueryLabelValues(t *testing.T) { 112 req := connect.NewRequest(&typesv1.LabelValuesRequest{Name: "foo"}) 113 querier, err := New(&NewQuerierParams{ 114 Cfg: Config{ 115 PoolConfig: clientpool.PoolConfig{ClientCleanupPeriod: 1 * time.Millisecond}, 116 }, 117 IngestersRing: testhelper.NewMockRing([]ring.InstanceDesc{ 118 {Addr: "1"}, 119 {Addr: "2"}, 120 {Addr: "3"}, 121 }, 3), 122 PoolFactory: &poolFactory{f: func(addr string) (client.PoolClient, error) { 123 q := newFakeQuerier() 124 switch addr { 125 case "1": 126 q.On("LabelValues", mock.Anything, mock.Anything).Return(connect.NewResponse(&typesv1.LabelValuesResponse{Names: []string{"foo", "bar"}}), nil) 127 case "2": 128 q.On("LabelValues", mock.Anything, mock.Anything).Return(connect.NewResponse(&typesv1.LabelValuesResponse{Names: []string{"bar", "buzz"}}), nil) 129 case "3": 130 q.On("LabelValues", mock.Anything, mock.Anything).Return(connect.NewResponse(&typesv1.LabelValuesResponse{Names: []string{"buzz", "foo"}}), nil) 131 } 132 return q, nil 133 }}, 134 Logger: log.NewLogfmtLogger(os.Stdout), 135 }) 136 137 require.NoError(t, err) 138 out, err := querier.LabelValues(context.Background(), req) 139 require.NoError(t, err) 140 require.Equal(t, []string{"bar", "buzz", "foo"}, out.Msg.Names) 141 } 142 143 func Test_QueryLabelNames(t *testing.T) { 144 req := connect.NewRequest(&typesv1.LabelNamesRequest{}) 145 querier, err := New(&NewQuerierParams{ 146 Cfg: Config{ 147 PoolConfig: clientpool.PoolConfig{ClientCleanupPeriod: 1 * time.Millisecond}, 148 }, 149 IngestersRing: testhelper.NewMockRing([]ring.InstanceDesc{ 150 {Addr: "1"}, 151 {Addr: "2"}, 152 {Addr: "3"}, 153 }, 3), 154 PoolFactory: &poolFactory{f: func(addr string) (client.PoolClient, error) { 155 q := newFakeQuerier() 156 switch addr { 157 case "1": 158 q.On("LabelNames", mock.Anything, mock.Anything).Return(connect.NewResponse(&typesv1.LabelNamesResponse{Names: []string{"foo", "bar"}}), nil) 159 case "2": 160 q.On("LabelNames", mock.Anything, mock.Anything).Return(connect.NewResponse(&typesv1.LabelNamesResponse{Names: []string{"bar", "buzz"}}), nil) 161 case "3": 162 q.On("LabelNames", mock.Anything, mock.Anything).Return(connect.NewResponse(&typesv1.LabelNamesResponse{Names: []string{"buzz", "foo"}}), nil) 163 } 164 return q, nil 165 }}, 166 Logger: log.NewLogfmtLogger(os.Stdout), 167 }) 168 169 require.NoError(t, err) 170 out, err := querier.LabelNames(context.Background(), req) 171 require.NoError(t, err) 172 require.Equal(t, []string{"bar", "buzz", "foo"}, out.Msg.Names) 173 } 174 175 func Test_filterLabelNames(t *testing.T) { 176 tests := []struct { 177 name string 178 labelNames []string 179 want []string 180 }{ 181 { 182 name: "empty slice", 183 labelNames: []string{}, 184 want: []string{}, 185 }, 186 { 187 name: "nil slice", 188 labelNames: nil, 189 want: []string{}, 190 }, 191 { 192 name: "all valid label names", 193 labelNames: []string{"foo", "bar", "service_name", "ServiceName123"}, 194 want: []string{"foo", "bar", "service_name", "ServiceName123"}, 195 }, 196 { 197 name: "all invalid label names", 198 labelNames: []string{"123service", "service-name", "service name", "世界"}, 199 want: []string{}, 200 }, 201 { 202 name: "mix of valid and invalid label names", 203 labelNames: []string{"foo", "123invalid", "bar", "invalid-name", "buzz", "世界"}, 204 want: []string{"foo", "bar", "buzz"}, 205 }, 206 { 207 name: "label names with dots (valid but get sanitized)", 208 labelNames: []string{"service.name", "app.version", "valid_label"}, 209 want: []string{"service.name", "app.version", "valid_label"}, 210 }, 211 { 212 name: "single valid label", 213 labelNames: []string{"service"}, 214 want: []string{"service"}, 215 }, 216 { 217 name: "single invalid label", 218 labelNames: []string{"123invalid"}, 219 want: []string{}, 220 }, 221 { 222 name: "labels with underscores", 223 labelNames: []string{"_", "__a", "__a__", "_service_name_"}, 224 want: []string{"_", "__a", "__a__", "_service_name_"}, 225 }, 226 { 227 name: "mixed valid labels with dots and underscores", 228 labelNames: []string{"a.b.c", "a_b_c", "a.b_c.d"}, 229 want: []string{"a.b.c", "a_b_c", "a.b_c.d"}, 230 }, 231 { 232 name: "labels starting with dots (valid, dots are sanitized)", 233 labelNames: []string{".abc", "..def", ".g.h.i"}, 234 want: []string{".abc", "..def", ".g.h.i"}, 235 }, 236 { 237 name: "labels starting with invalid characters", 238 labelNames: []string{"-abc", "0abc", "123xyz"}, 239 want: []string{}, 240 }, 241 { 242 name: "empty string in slice (invalid)", 243 labelNames: []string{"foo", "", "bar"}, 244 want: []string{"foo", "bar"}, 245 }, 246 } 247 248 for _, tt := range tests { 249 t.Run(tt.name, func(t *testing.T) { 250 got := filterLabelNames(tt.labelNames) 251 require.Equal(t, tt.want, got) 252 }) 253 } 254 } 255 256 func Test_QueryLabelNames_WithFiltering(t *testing.T) { 257 req := connect.NewRequest(&typesv1.LabelNamesRequest{}) 258 259 tests := []struct { 260 name string 261 allowUtf8LabelNames bool 262 setCapabilities bool 263 ingesterLabelNames map[string][]string 264 expectedLabelNames []string 265 }{ 266 { 267 name: "UTF8 labels allowed when enabled", 268 allowUtf8LabelNames: true, 269 setCapabilities: true, 270 ingesterLabelNames: map[string][]string{ 271 "1": {"foo", "bar", "世界"}, 272 "2": {"foo", "bar", "世界"}, 273 "3": {"foo", "bar", "世界"}, 274 }, 275 // UTF8 labels pass through when UTF8 is enabled 276 expectedLabelNames: []string{"bar", "foo", "世界"}, 277 }, 278 { 279 name: "UTF8 labels filtered when disabled", 280 allowUtf8LabelNames: false, 281 setCapabilities: true, 282 ingesterLabelNames: map[string][]string{ 283 "1": {"foo", "bar", "世界"}, 284 "2": {"foo", "bar", "世界"}, 285 "3": {"foo", "bar", "世界"}, 286 }, 287 // Only legacy-valid labels pass through (UTF8 filtered out) 288 expectedLabelNames: []string{"bar", "foo"}, 289 }, 290 { 291 name: "all labels pass through when UTF8 enabled including invalid ones", 292 allowUtf8LabelNames: true, 293 setCapabilities: true, 294 ingesterLabelNames: map[string][]string{ 295 "1": {"foo", "123invalid"}, 296 "2": {"foo", "123invalid"}, 297 "3": {"foo", "123invalid"}, 298 }, 299 // When UTF8 is enabled, NO filtering happens - even invalid labels pass through 300 expectedLabelNames: []string{"123invalid", "foo"}, 301 }, 302 { 303 name: "filtering enabled when no capabilities set", 304 setCapabilities: false, 305 ingesterLabelNames: map[string][]string{ 306 "1": {"valid_name", "123invalid", "世界"}, 307 "2": {"valid_name", "123invalid", "世界"}, 308 "3": {"valid_name", "123invalid", "世界"}, 309 }, 310 // No capabilities means legacy-only mode - UTF8 and invalid labels filtered 311 expectedLabelNames: []string{"valid_name"}, 312 }, 313 { 314 name: "all valid legacy labels pass through when filtering enabled", 315 allowUtf8LabelNames: false, 316 setCapabilities: true, 317 ingesterLabelNames: map[string][]string{ 318 "1": {"foo", "bar"}, 319 "2": {"bar", "buzz"}, 320 "3": {"buzz", "foo"}, 321 }, 322 expectedLabelNames: []string{"bar", "buzz", "foo"}, 323 }, 324 { 325 name: "labels with dots pass through in both modes", 326 allowUtf8LabelNames: false, 327 setCapabilities: true, 328 ingesterLabelNames: map[string][]string{ 329 "1": {"service.name", "app.version"}, 330 "2": {"host.name", "service.name"}, 331 "3": {"app.version", "host.name"}, 332 }, 333 // Dots are valid legacy characters (get sanitized to underscores) 334 expectedLabelNames: []string{"app.version", "host.name", "service.name"}, 335 }, 336 } 337 338 for _, tc := range tests { 339 t.Run(tc.name, func(t *testing.T) { 340 querier, err := New(&NewQuerierParams{ 341 Cfg: Config{ 342 PoolConfig: clientpool.PoolConfig{ClientCleanupPeriod: 1 * time.Millisecond}, 343 }, 344 IngestersRing: testhelper.NewMockRing([]ring.InstanceDesc{ 345 {Addr: "1"}, 346 {Addr: "2"}, 347 {Addr: "3"}, 348 }, 3), 349 PoolFactory: &poolFactory{f: func(addr string) (client.PoolClient, error) { 350 q := newFakeQuerier() 351 if labelNames, ok := tc.ingesterLabelNames[addr]; ok { 352 q.On("LabelNames", mock.Anything, mock.Anything).Return( 353 connect.NewResponse(&typesv1.LabelNamesResponse{Names: labelNames}), nil) 354 } 355 return q, nil 356 }}, 357 Logger: log.NewLogfmtLogger(os.Stdout), 358 }) 359 require.NoError(t, err) 360 361 ctx := context.Background() 362 if tc.setCapabilities { 363 ctx = featureflags.WithClientCapabilities(ctx, featureflags.ClientCapabilities{ 364 AllowUtf8LabelNames: tc.allowUtf8LabelNames, 365 }) 366 } 367 368 out, err := querier.LabelNames(ctx, req) 369 require.NoError(t, err) 370 require.Equal(t, tc.expectedLabelNames, out.Msg.Names) 371 }) 372 } 373 } 374 375 func Test_Series(t *testing.T) { 376 foobarlabels := phlaremodel.NewLabelsBuilder(nil).Set("foo", "bar") 377 foobuzzlabels := phlaremodel.NewLabelsBuilder(nil).Set("foo", "buzz") 378 req := connect.NewRequest(&querierv1.SeriesRequest{Matchers: []string{`{foo="bar"}`}}) 379 ingesterResponse := connect.NewResponse(&ingestv1.SeriesResponse{LabelsSet: []*typesv1.Labels{ 380 {Labels: foobarlabels.Labels()}, 381 {Labels: foobuzzlabels.Labels()}, 382 }}) 383 querier, err := New(&NewQuerierParams{ 384 Cfg: Config{ 385 PoolConfig: clientpool.PoolConfig{ClientCleanupPeriod: 1 * time.Millisecond}, 386 }, 387 IngestersRing: testhelper.NewMockRing([]ring.InstanceDesc{ 388 {Addr: "1"}, 389 {Addr: "2"}, 390 {Addr: "3"}, 391 }, 3), 392 PoolFactory: &poolFactory{func(addr string) (client.PoolClient, error) { 393 q := newFakeQuerier() 394 switch addr { 395 case "1": 396 q.On("LabelNames", mock.Anything, mock.Anything).Return(connect.NewResponse(&typesv1.LabelNamesResponse{Names: []string{"foo", "bar"}}), nil) 397 q.On("Series", mock.Anything, mock.Anything).Return(ingesterResponse, nil) 398 case "2": 399 q.On("LabelNames", mock.Anything, mock.Anything).Return(connect.NewResponse(&typesv1.LabelNamesResponse{Names: []string{"foo", "bar"}}), nil) 400 q.On("Series", mock.Anything, mock.Anything).Return(ingesterResponse, nil) 401 case "3": 402 q.On("LabelNames", mock.Anything, mock.Anything).Return(connect.NewResponse(&typesv1.LabelNamesResponse{Names: []string{"foo", "bar"}}), nil) 403 q.On("Series", mock.Anything, mock.Anything).Return(ingesterResponse, nil) 404 } 405 return q, nil 406 }}, 407 Logger: log.NewLogfmtLogger(os.Stdout), 408 }) 409 410 require.NoError(t, err) 411 out, err := querier.Series(context.Background(), req) 412 require.NoError(t, err) 413 require.Equal(t, []*typesv1.Labels{ 414 {Labels: foobarlabels.Labels()}, 415 {Labels: foobuzzlabels.Labels()}, 416 }, out.Msg.LabelsSet) 417 } 418 419 func Test_Series_WithLabelNameFiltering(t *testing.T) { 420 tests := []struct { 421 name string 422 allowUtf8LabelNames bool 423 setCapabilities bool 424 requestLabelNames []string 425 expectedLabelNames []string 426 }{ 427 { 428 name: "all label names pass through when UTF8 enabled", 429 allowUtf8LabelNames: true, 430 setCapabilities: true, 431 requestLabelNames: []string{"valid_name", "123invalid", "invalid-hyphen", "世界"}, 432 expectedLabelNames: []string{"valid_name", "123invalid", "invalid-hyphen", "世界"}, 433 }, 434 { 435 name: "invalid label names filtered when UTF8 disabled", 436 allowUtf8LabelNames: false, 437 setCapabilities: true, 438 requestLabelNames: []string{"valid_name", "123invalid", "invalid-hyphen", "世界"}, 439 expectedLabelNames: []string{"valid_name"}, 440 }, 441 { 442 name: "UTF8 labels filtered when UTF8 disabled", 443 allowUtf8LabelNames: false, 444 setCapabilities: true, 445 requestLabelNames: []string{"foo", "bar", "世界", "日本語"}, 446 expectedLabelNames: []string{"foo", "bar"}, 447 }, 448 { 449 name: "filtering enabled when no capabilities set", 450 setCapabilities: false, 451 requestLabelNames: []string{"foo", "123invalid", "世界"}, 452 expectedLabelNames: []string{"foo"}, 453 }, 454 { 455 name: "all valid labels pass through", 456 allowUtf8LabelNames: false, 457 setCapabilities: true, 458 requestLabelNames: []string{"foo", "bar", "service_name"}, 459 expectedLabelNames: []string{"foo", "bar", "service_name"}, 460 }, 461 { 462 name: "labels with dots pass through", 463 allowUtf8LabelNames: false, 464 setCapabilities: true, 465 requestLabelNames: []string{"service.name", "app.version"}, 466 expectedLabelNames: []string{"service.name", "app.version"}, 467 }, 468 { 469 name: "empty label names with UTF8 disabled queries all labels and filters", 470 allowUtf8LabelNames: false, 471 setCapabilities: true, 472 requestLabelNames: []string{}, 473 expectedLabelNames: []string{"bar", "foo"}, 474 }, 475 { 476 name: "empty label names with UTF8 enabled returns ingester labels without filtering", 477 allowUtf8LabelNames: true, 478 setCapabilities: true, 479 requestLabelNames: []string{}, 480 expectedLabelNames: []string{"日本語", "bar"}, 481 }, 482 } 483 484 for _, tc := range tests { 485 t.Run(tc.name, func(t *testing.T) { 486 ingesterLabels := []string{"日本語", "bar"} 487 var capturedLabelNames []string 488 var capturedMutex sync.Mutex 489 490 querier, err := New(&NewQuerierParams{ 491 Cfg: Config{ 492 PoolConfig: clientpool.PoolConfig{ClientCleanupPeriod: 1 * time.Millisecond}, 493 }, 494 IngestersRing: testhelper.NewMockRing([]ring.InstanceDesc{ 495 {Addr: "1"}, 496 {Addr: "2"}, 497 {Addr: "3"}, 498 }, 3), 499 PoolFactory: &poolFactory{f: func(addr string) (client.PoolClient, error) { 500 q := newFakeQuerier() 501 502 q.On("Series", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { 503 req := args.Get(1).(*connect.Request[ingestv1.SeriesRequest]) 504 capturedMutex.Lock() 505 defer capturedMutex.Unlock() 506 507 // If no labels passed to ingester series request, 508 // ingester returns all ingester labels 509 if len(req.Msg.LabelNames) == 0 { 510 capturedLabelNames = ingesterLabels 511 } else { 512 capturedLabelNames = req.Msg.LabelNames 513 } 514 }).Return(connect.NewResponse(&ingestv1.SeriesResponse{}), nil) 515 516 if len(tc.requestLabelNames) == 0 { 517 q.On("LabelNames", mock.Anything, mock.Anything).Return( 518 connect.NewResponse(&typesv1.LabelNamesResponse{Names: []string{"foo", "bar", "世界"}}), nil) 519 } 520 521 return q, nil 522 }}, 523 Logger: log.NewLogfmtLogger(os.Stdout), 524 }) 525 require.NoError(t, err) 526 527 ctx := context.Background() 528 if tc.setCapabilities { 529 ctx = featureflags.WithClientCapabilities(ctx, featureflags.ClientCapabilities{ 530 AllowUtf8LabelNames: tc.allowUtf8LabelNames, 531 }) 532 } 533 534 req := connect.NewRequest(&querierv1.SeriesRequest{ 535 Matchers: []string{`{foo="bar"}`}, 536 LabelNames: tc.requestLabelNames, 537 }) 538 539 _, err = querier.Series(ctx, req) 540 require.NoError(t, err) 541 542 // Verify that the label names were filtered correctly before being sent to ingester 543 capturedMutex.Lock() 544 actualLabelNames := capturedLabelNames 545 capturedMutex.Unlock() 546 require.Equal(t, tc.expectedLabelNames, actualLabelNames, 547 "Expected label names sent to ingester to be %v, but got %v", tc.expectedLabelNames, actualLabelNames) 548 }) 549 } 550 } 551 552 func newBlockMeta(ulids ...string) *connect.Response[ingestv1.BlockMetadataResponse] { 553 resp := &ingestv1.BlockMetadataResponse{} 554 555 resp.Blocks = make([]*typesv1.BlockInfo, len(ulids)) 556 for i, ulid := range ulids { 557 resp.Blocks[i] = &typesv1.BlockInfo{ 558 Ulid: ulid, 559 } 560 } 561 562 return connect.NewResponse(resp) 563 } 564 565 var endpointNotExistingErr = connect.NewError( 566 connect.CodeInternal, 567 connect.NewError( 568 connect.CodeUnknown, 569 errors.New("405 Method Not Allowed"), 570 ), 571 ) 572 573 func Test_isEndpointNotExisting(t *testing.T) { 574 assert.False(t, isEndpointNotExistingErr(nil)) 575 assert.False(t, isEndpointNotExistingErr(errors.New("my-error"))) 576 assert.True(t, isEndpointNotExistingErr(endpointNotExistingErr)) 577 } 578 579 func Test_SelectMergeStacktraces(t *testing.T) { 580 now := time.Now().UnixMilli() 581 for _, tc := range []struct { 582 blockSelect bool 583 name string 584 }{ 585 // This tests the interoperability between older ingesters and new queriers 586 {false, "WithoutBlockHints"}, 587 {true, "WithBlockHints"}, 588 } { 589 t.Run(tc.name, func(t *testing.T) { 590 req := connect.NewRequest(&querierv1.SelectMergeStacktracesRequest{ 591 LabelSelector: `{app="foo"}`, 592 ProfileTypeID: "memory:inuse_space:bytes:space:byte", 593 Start: now + 0, 594 End: now + 2, 595 }) 596 bidi1 := newFakeBidiClientStacktraces([]*ingestv1.ProfileSets{ 597 { 598 LabelsSets: []*typesv1.Labels{ 599 { 600 Labels: []*typesv1.LabelPair{{Name: "app", Value: "foo"}}, 601 }, 602 { 603 Labels: []*typesv1.LabelPair{{Name: "app", Value: "bar"}}, 604 }, 605 }, 606 Profiles: []*ingestv1.SeriesProfile{ 607 {Timestamp: now + 1, LabelIndex: 0}, 608 {Timestamp: now + 2, LabelIndex: 1}, 609 {Timestamp: now + 2, LabelIndex: 0}, 610 }, 611 }, 612 }) 613 bidi2 := newFakeBidiClientStacktraces([]*ingestv1.ProfileSets{ 614 { 615 LabelsSets: []*typesv1.Labels{ 616 { 617 Labels: []*typesv1.LabelPair{{Name: "app", Value: "foo"}}, 618 }, 619 { 620 Labels: []*typesv1.LabelPair{{Name: "app", Value: "bar"}}, 621 }, 622 }, 623 Profiles: []*ingestv1.SeriesProfile{ 624 {Timestamp: now + 1, LabelIndex: 1}, 625 {Timestamp: now + 1, LabelIndex: 0}, 626 {Timestamp: now + 2, LabelIndex: 1}, 627 }, 628 }, 629 }) 630 bidi3 := newFakeBidiClientStacktraces([]*ingestv1.ProfileSets{ 631 { 632 LabelsSets: []*typesv1.Labels{ 633 { 634 Labels: []*typesv1.LabelPair{{Name: "app", Value: "foo"}}, 635 }, 636 { 637 Labels: []*typesv1.LabelPair{{Name: "app", Value: "bar"}}, 638 }, 639 }, 640 Profiles: []*ingestv1.SeriesProfile{ 641 {Timestamp: now + 1, LabelIndex: 1}, 642 {Timestamp: now + 1, LabelIndex: 0}, 643 {Timestamp: now + 2, LabelIndex: 0}, 644 }, 645 }, 646 }) 647 querier, err := New(&NewQuerierParams{ 648 Cfg: Config{ 649 PoolConfig: clientpool.PoolConfig{ClientCleanupPeriod: 1 * time.Millisecond}, 650 }, 651 IngestersRing: testhelper.NewMockRing([]ring.InstanceDesc{ 652 {Addr: "1"}, 653 {Addr: "2"}, 654 {Addr: "3"}, 655 }, 3), 656 PoolFactory: &poolFactory{func(addr string) (client.PoolClient, error) { 657 q := newFakeQuerier() 658 switch addr { 659 case "1": 660 q.mockMergeStacktraces(bidi1, []string{"a", "d"}, tc.blockSelect) 661 case "2": 662 q.mockMergeStacktraces(bidi2, []string{"b", "d"}, tc.blockSelect) 663 case "3": 664 q.mockMergeStacktraces(bidi3, []string{"c", "d"}, tc.blockSelect) 665 } 666 return q, nil 667 }}, 668 Logger: log.NewLogfmtLogger(os.Stdout), 669 }) 670 require.NoError(t, err) 671 flame, err := querier.SelectMergeStacktraces(context.Background(), req) 672 require.NoError(t, err) 673 674 sort.Strings(flame.Msg.Flamegraph.Names) 675 require.Equal(t, []string{"bar", "buzz", "foo", "total"}, flame.Msg.Flamegraph.Names) 676 require.Equal(t, []int64{0, 2, 0, 0}, flame.Msg.Flamegraph.Levels[0].Values) 677 require.Equal(t, int64(2), flame.Msg.Flamegraph.Total) 678 require.Equal(t, int64(2), flame.Msg.Flamegraph.MaxSelf) 679 var selected []testProfile 680 selected = append(selected, bidi1.kept...) 681 selected = append(selected, bidi2.kept...) 682 selected = append(selected, bidi3.kept...) 683 sort.Slice(selected, func(i, j int) bool { 684 if selected[i].Ts == selected[j].Ts { 685 return phlaremodel.CompareLabelPairs(selected[i].Labels.Labels, selected[j].Labels.Labels) < 0 686 } 687 return selected[i].Ts < selected[j].Ts 688 }) 689 require.Len(t, selected, 4) 690 require.Equal(t, 691 []testProfile{ 692 {Ts: now + 1, Labels: &typesv1.Labels{Labels: []*typesv1.LabelPair{{Name: "app", Value: "bar"}}}}, 693 {Ts: now + 1, Labels: &typesv1.Labels{Labels: []*typesv1.LabelPair{{Name: "app", Value: "foo"}}}}, 694 {Ts: now + 2, Labels: &typesv1.Labels{Labels: []*typesv1.LabelPair{{Name: "app", Value: "bar"}}}}, 695 {Ts: now + 2, Labels: &typesv1.Labels{Labels: []*typesv1.LabelPair{{Name: "app", Value: "foo"}}}}, 696 }, selected) 697 }) 698 } 699 } 700 701 func Test_SelectMergeProfiles(t *testing.T) { 702 for _, tc := range []struct { 703 blockSelect bool 704 name string 705 }{ 706 // This tests the interoberabitlity between older ingesters and new queriers 707 {false, "WithoutBlockHints"}, 708 {true, "WithBlockHints"}, 709 } { 710 t.Run(tc.name, func(t *testing.T) { 711 712 req := connect.NewRequest(&querierv1.SelectMergeProfileRequest{ 713 LabelSelector: `{app="foo"}`, 714 ProfileTypeID: "memory:inuse_space:bytes:space:byte", 715 Start: 0, 716 End: 2, 717 }) 718 bidi1 := newFakeBidiClientProfiles([]*ingestv1.ProfileSets{ 719 { 720 LabelsSets: []*typesv1.Labels{ 721 { 722 Labels: []*typesv1.LabelPair{{Name: "app", Value: "foo"}}, 723 }, 724 { 725 Labels: []*typesv1.LabelPair{{Name: "app", Value: "bar"}}, 726 }, 727 }, 728 Profiles: []*ingestv1.SeriesProfile{ 729 {Timestamp: 1, LabelIndex: 0}, 730 {Timestamp: 2, LabelIndex: 1}, 731 {Timestamp: 2, LabelIndex: 0}, 732 }, 733 }, 734 }) 735 bidi2 := newFakeBidiClientProfiles([]*ingestv1.ProfileSets{ 736 { 737 LabelsSets: []*typesv1.Labels{ 738 { 739 Labels: []*typesv1.LabelPair{{Name: "app", Value: "foo"}}, 740 }, 741 { 742 Labels: []*typesv1.LabelPair{{Name: "app", Value: "bar"}}, 743 }, 744 }, 745 Profiles: []*ingestv1.SeriesProfile{ 746 {Timestamp: 1, LabelIndex: 1}, 747 {Timestamp: 1, LabelIndex: 0}, 748 {Timestamp: 2, LabelIndex: 1}, 749 }, 750 }, 751 }) 752 bidi3 := newFakeBidiClientProfiles([]*ingestv1.ProfileSets{ 753 { 754 LabelsSets: []*typesv1.Labels{ 755 { 756 Labels: []*typesv1.LabelPair{{Name: "app", Value: "foo"}}, 757 }, 758 { 759 Labels: []*typesv1.LabelPair{{Name: "app", Value: "bar"}}, 760 }, 761 }, 762 Profiles: []*ingestv1.SeriesProfile{ 763 {Timestamp: 1, LabelIndex: 1}, 764 {Timestamp: 1, LabelIndex: 0}, 765 {Timestamp: 2, LabelIndex: 0}, 766 }, 767 }, 768 }) 769 querier, err := New(&NewQuerierParams{ 770 Cfg: Config{ 771 PoolConfig: clientpool.PoolConfig{ClientCleanupPeriod: 1 * time.Millisecond}, 772 }, 773 IngestersRing: testhelper.NewMockRing([]ring.InstanceDesc{ 774 {Addr: "1"}, 775 {Addr: "2"}, 776 {Addr: "3"}, 777 }, 3), 778 PoolFactory: &poolFactory{f: func(addr string) (client.PoolClient, error) { 779 q := newFakeQuerier() 780 switch addr { 781 case "1": 782 q.mockMergeProfile(bidi1, []string{"a", "d"}, tc.blockSelect) 783 case "2": 784 q.mockMergeProfile(bidi2, []string{"b", "d"}, tc.blockSelect) 785 case "3": 786 q.mockMergeProfile(bidi3, []string{"c", "d"}, tc.blockSelect) 787 } 788 switch addr { 789 case "1": 790 q.On("MergeProfilesPprof", mock.Anything).Once().Return(bidi1) 791 case "2": 792 q.On("MergeProfilesPprof", mock.Anything).Once().Return(bidi2) 793 case "3": 794 q.On("MergeProfilesPprof", mock.Anything).Once().Return(bidi3) 795 } 796 return q, nil 797 }}, 798 Logger: log.NewLogfmtLogger(os.Stdout), 799 }) 800 require.NoError(t, err) 801 res, err := querier.SelectMergeProfile(context.Background(), req) 802 require.NoError(t, err) 803 require.NotNil(t, res) 804 data, err := proto.Marshal(res.Msg) 805 require.NoError(t, err) 806 actual, err := profile.ParseUncompressed(data) 807 require.NoError(t, err) 808 809 expected := pprofth.FooBarProfile.Copy() 810 expected.DurationNanos = model.Time(req.Msg.End).UnixNano() - model.Time(req.Msg.Start).UnixNano() 811 expected.TimeNanos = model.Time(req.Msg.End).UnixNano() 812 for _, s := range expected.Sample { 813 s.Value[0] = s.Value[0] * 2 814 } 815 require.Equal(t, actual, expected) 816 817 var selected []testProfile 818 selected = append(selected, bidi1.kept...) 819 selected = append(selected, bidi2.kept...) 820 selected = append(selected, bidi3.kept...) 821 sort.Slice(selected, func(i, j int) bool { 822 if selected[i].Ts == selected[j].Ts { 823 return phlaremodel.CompareLabelPairs(selected[i].Labels.Labels, selected[j].Labels.Labels) < 0 824 } 825 return selected[i].Ts < selected[j].Ts 826 }) 827 require.Len(t, selected, 4) 828 require.Equal(t, 829 []testProfile{ 830 {Ts: 1, Labels: &typesv1.Labels{Labels: []*typesv1.LabelPair{{Name: "app", Value: "bar"}}}}, 831 {Ts: 1, Labels: &typesv1.Labels{Labels: []*typesv1.LabelPair{{Name: "app", Value: "foo"}}}}, 832 {Ts: 2, Labels: &typesv1.Labels{Labels: []*typesv1.LabelPair{{Name: "app", Value: "bar"}}}}, 833 {Ts: 2, Labels: &typesv1.Labels{Labels: []*typesv1.LabelPair{{Name: "app", Value: "foo"}}}}, 834 }, selected) 835 }) 836 } 837 } 838 839 func TestSelectSeries(t *testing.T) { 840 // now := time.Now().UnixMilli() 841 for _, tc := range []struct { 842 blockSelect bool 843 name string 844 }{ 845 // This tests the interoberabitlity between older ingesters and new queriers 846 {false, "WithoutBlockHints"}, 847 {true, "WithBlockHints"}, 848 } { 849 t.Run(tc.name, func(t *testing.T) { 850 req := connect.NewRequest(&querierv1.SelectSeriesRequest{ 851 LabelSelector: `{app="foo"}`, 852 ProfileTypeID: "memory:inuse_space:bytes:space:byte", 853 Start: 0, 854 End: 2, 855 Step: 0.001, 856 }) 857 bidi1 := newFakeBidiClientSeries([]*ingestv1.ProfileSets{ 858 { 859 LabelsSets: []*typesv1.Labels{ 860 { 861 Labels: []*typesv1.LabelPair{{Name: "app", Value: "foo"}}, 862 }, 863 { 864 Labels: []*typesv1.LabelPair{{Name: "app", Value: "bar"}}, 865 }, 866 }, 867 Profiles: []*ingestv1.SeriesProfile{ 868 {Timestamp: 1, LabelIndex: 0}, 869 {Timestamp: 2, LabelIndex: 1}, 870 {Timestamp: 2, LabelIndex: 0}, 871 }, 872 }, 873 }, &typesv1.Series{Labels: foobarlabels, Points: []*typesv1.Point{{Value: 1, Timestamp: 1, Annotations: []*typesv1.ProfileAnnotation{}}, {Value: 2, Timestamp: 2, Annotations: []*typesv1.ProfileAnnotation{}}}}) 874 bidi2 := newFakeBidiClientSeries([]*ingestv1.ProfileSets{ 875 { 876 LabelsSets: []*typesv1.Labels{ 877 { 878 Labels: []*typesv1.LabelPair{{Name: "app", Value: "foo"}}, 879 }, 880 { 881 Labels: []*typesv1.LabelPair{{Name: "app", Value: "bar"}}, 882 }, 883 }, 884 Profiles: []*ingestv1.SeriesProfile{ 885 {Timestamp: 1, LabelIndex: 1}, 886 {Timestamp: 1, LabelIndex: 0}, 887 {Timestamp: 2, LabelIndex: 1}, 888 }, 889 }, 890 }, &typesv1.Series{Labels: foobarlabels, Points: []*typesv1.Point{{Value: 1, Timestamp: 1, Annotations: []*typesv1.ProfileAnnotation{}}, {Value: 2, Timestamp: 2, Annotations: []*typesv1.ProfileAnnotation{}}}}) 891 bidi3 := newFakeBidiClientSeries([]*ingestv1.ProfileSets{ 892 { 893 LabelsSets: []*typesv1.Labels{ 894 { 895 Labels: []*typesv1.LabelPair{{Name: "app", Value: "foo"}}, 896 }, 897 { 898 Labels: []*typesv1.LabelPair{{Name: "app", Value: "bar"}}, 899 }, 900 }, 901 Profiles: []*ingestv1.SeriesProfile{ 902 {Timestamp: 1, LabelIndex: 1}, 903 {Timestamp: 1, LabelIndex: 0}, 904 {Timestamp: 2, LabelIndex: 0}, 905 }, 906 }, 907 }, &typesv1.Series{Labels: foobarlabels, Points: []*typesv1.Point{{Value: 1, Timestamp: 1, Annotations: []*typesv1.ProfileAnnotation{}}, {Value: 2, Timestamp: 2, Annotations: []*typesv1.ProfileAnnotation{}}}}) 908 querier, err := New(&NewQuerierParams{ 909 Cfg: Config{ 910 PoolConfig: clientpool.PoolConfig{ClientCleanupPeriod: 1 * time.Millisecond}, 911 }, 912 IngestersRing: testhelper.NewMockRing([]ring.InstanceDesc{ 913 {Addr: "1"}, 914 {Addr: "2"}, 915 {Addr: "3"}, 916 }, 3), 917 PoolFactory: &poolFactory{f: func(addr string) (client.PoolClient, error) { 918 q := newFakeQuerier() 919 switch addr { 920 case "1": 921 q.mockMergeLabels(bidi1, []string{"a", "d"}, tc.blockSelect) 922 case "2": 923 q.mockMergeLabels(bidi2, []string{"b", "d"}, tc.blockSelect) 924 case "3": 925 q.mockMergeLabels(bidi3, []string{"c", "d"}, tc.blockSelect) 926 } 927 return q, nil 928 }}, 929 Logger: log.NewLogfmtLogger(os.Stdout), 930 }) 931 require.NoError(t, err) 932 res, err := querier.SelectSeries(context.Background(), req) 933 require.NoError(t, err) 934 // Only 2 results are used since the 3rd not required because of replication. 935 testhelper.EqualProto(t, []*typesv1.Series{ 936 {Labels: foobarlabels, Points: []*typesv1.Point{{Value: 2, Timestamp: 1, Annotations: []*typesv1.ProfileAnnotation{}}, {Value: 4, Timestamp: 2, Annotations: []*typesv1.ProfileAnnotation{}}}}, 937 }, res.Msg.Series) 938 var selected []testProfile 939 selected = append(selected, bidi1.kept...) 940 selected = append(selected, bidi2.kept...) 941 selected = append(selected, bidi3.kept...) 942 sort.Slice(selected, func(i, j int) bool { 943 if selected[i].Ts == selected[j].Ts { 944 return phlaremodel.CompareLabelPairs(selected[i].Labels.Labels, selected[j].Labels.Labels) < 0 945 } 946 return selected[i].Ts < selected[j].Ts 947 }) 948 require.Len(t, selected, 4) 949 require.Equal(t, 950 []testProfile{ 951 {Ts: 1, Labels: &typesv1.Labels{Labels: []*typesv1.LabelPair{{Name: "app", Value: "bar"}}}}, 952 {Ts: 1, Labels: &typesv1.Labels{Labels: []*typesv1.LabelPair{{Name: "app", Value: "foo"}}}}, 953 {Ts: 2, Labels: &typesv1.Labels{Labels: []*typesv1.LabelPair{{Name: "app", Value: "bar"}}}}, 954 {Ts: 2, Labels: &typesv1.Labels{Labels: []*typesv1.LabelPair{{Name: "app", Value: "foo"}}}}, 955 }, selected) 956 }) 957 } 958 } 959 960 type fakeQuerierIngester struct { 961 mock.Mock 962 testhelper.FakePoolClient 963 } 964 965 func newFakeQuerier() *fakeQuerierIngester { 966 return &fakeQuerierIngester{} 967 } 968 969 func (f *fakeQuerierIngester) List(ctx context.Context, in *grpc_health_v1.HealthListRequest, opts ...grpc.CallOption) (*grpc_health_v1.HealthListResponse, error) { 970 return nil, errors.New("not implemented") 971 } 972 973 func (f *fakeQuerierIngester) mockMergeStacktraces(bidi *fakeBidiClientStacktraces, blocks []string, blockSelect bool) { 974 if blockSelect { 975 f.On("BlockMetadata", mock.Anything, mock.Anything).Once().Return(newBlockMeta(blocks...), nil) 976 } else { 977 f.On("BlockMetadata", mock.Anything, mock.Anything).Once().Return(nil, endpointNotExistingErr) 978 } 979 f.On("MergeProfilesStacktraces", mock.Anything).Once().Return(bidi) 980 } 981 982 func (f *fakeQuerierIngester) mockMergeLabels(bidi *fakeBidiClientSeries, blocks []string, blockSelect bool) { 983 if blockSelect { 984 f.On("BlockMetadata", mock.Anything, mock.Anything).Once().Return(newBlockMeta(blocks...), nil) 985 } else { 986 f.On("BlockMetadata", mock.Anything, mock.Anything).Once().Return(nil, endpointNotExistingErr) 987 } 988 f.On("MergeProfilesLabels", mock.Anything).Once().Return(bidi) 989 } 990 991 func (f *fakeQuerierIngester) mockMergeProfile(bidi *fakeBidiClientProfiles, blocks []string, blockSelect bool) { 992 if blockSelect { 993 f.On("BlockMetadata", mock.Anything, mock.Anything).Once().Return(newBlockMeta(blocks...), nil) 994 } else { 995 f.On("BlockMetadata", mock.Anything, mock.Anything).Once().Return(nil, endpointNotExistingErr) 996 } 997 f.On("MergeProfilesPprof", mock.Anything).Once().Return(bidi) 998 } 999 1000 func (f *fakeQuerierIngester) LabelValues(ctx context.Context, req *connect.Request[typesv1.LabelValuesRequest]) (*connect.Response[typesv1.LabelValuesResponse], error) { 1001 var ( 1002 args = f.Called(ctx, req) 1003 res *connect.Response[typesv1.LabelValuesResponse] 1004 err error 1005 ) 1006 if args[0] != nil { 1007 res = args[0].(*connect.Response[typesv1.LabelValuesResponse]) 1008 } 1009 if args[1] != nil { 1010 err = args.Get(1).(error) 1011 } 1012 return res, err 1013 } 1014 1015 func (f *fakeQuerierIngester) LabelNames(ctx context.Context, req *connect.Request[typesv1.LabelNamesRequest]) (*connect.Response[typesv1.LabelNamesResponse], error) { 1016 var ( 1017 args = f.Called(ctx, req) 1018 res *connect.Response[typesv1.LabelNamesResponse] 1019 err error 1020 ) 1021 if args[0] != nil { 1022 res = args[0].(*connect.Response[typesv1.LabelNamesResponse]) 1023 } 1024 if args[1] != nil { 1025 err = args.Get(1).(error) 1026 } 1027 return res, err 1028 } 1029 1030 func (f *fakeQuerierIngester) ProfileTypes(ctx context.Context, req *connect.Request[ingestv1.ProfileTypesRequest]) (*connect.Response[ingestv1.ProfileTypesResponse], error) { 1031 var ( 1032 args = f.Called(ctx, req) 1033 res *connect.Response[ingestv1.ProfileTypesResponse] 1034 err error 1035 ) 1036 if args[0] != nil { 1037 res = args[0].(*connect.Response[ingestv1.ProfileTypesResponse]) 1038 } 1039 if args[1] != nil { 1040 err = args.Get(1).(error) 1041 } 1042 1043 return res, err 1044 } 1045 1046 func (f *fakeQuerierIngester) Series(ctx context.Context, req *connect.Request[ingestv1.SeriesRequest]) (*connect.Response[ingestv1.SeriesResponse], error) { 1047 var ( 1048 args = f.Called(ctx, req) 1049 res *connect.Response[ingestv1.SeriesResponse] 1050 err error 1051 ) 1052 if args[0] != nil { 1053 res = args[0].(*connect.Response[ingestv1.SeriesResponse]) 1054 } 1055 if args[1] != nil { 1056 err = args.Get(1).(error) 1057 } 1058 1059 return res, err 1060 } 1061 1062 func (f *fakeQuerierIngester) BlockMetadata(ctx context.Context, req *connect.Request[ingestv1.BlockMetadataRequest]) (*connect.Response[ingestv1.BlockMetadataResponse], error) { 1063 var ( 1064 args = f.Called(ctx, req) 1065 res *connect.Response[ingestv1.BlockMetadataResponse] 1066 err error 1067 ) 1068 if args[0] != nil { 1069 res = args[0].(*connect.Response[ingestv1.BlockMetadataResponse]) 1070 } 1071 if args[1] != nil { 1072 err = args.Get(1).(error) 1073 } 1074 1075 return res, err 1076 } 1077 1078 func (f *fakeQuerierIngester) GetProfileStats(ctx context.Context, req *connect.Request[typesv1.GetProfileStatsRequest]) (*connect.Response[typesv1.GetProfileStatsResponse], error) { 1079 var ( 1080 args = f.Called(ctx, req) 1081 res *connect.Response[typesv1.GetProfileStatsResponse] 1082 err error 1083 ) 1084 if args[0] != nil { 1085 res = args[0].(*connect.Response[typesv1.GetProfileStatsResponse]) 1086 } 1087 if args[1] != nil { 1088 err = args.Get(1).(error) 1089 } 1090 1091 return res, err 1092 } 1093 1094 func (f *fakeQuerierIngester) GetBlockStats(ctx context.Context, req *connect.Request[ingestv1.GetBlockStatsRequest]) (*connect.Response[ingestv1.GetBlockStatsResponse], error) { 1095 var ( 1096 args = f.Called(ctx, req) 1097 res *connect.Response[ingestv1.GetBlockStatsResponse] 1098 err error 1099 ) 1100 if args[0] != nil { 1101 res = args[0].(*connect.Response[ingestv1.GetBlockStatsResponse]) 1102 } 1103 if args[1] != nil { 1104 err = args.Get(1).(error) 1105 } 1106 1107 return res, err 1108 } 1109 1110 type testProfile struct { 1111 Ts int64 1112 Labels *typesv1.Labels 1113 } 1114 1115 type fakeBidiClientStacktraces struct { 1116 profiles chan *ingestv1.ProfileSets 1117 batches []*ingestv1.ProfileSets 1118 kept []testProfile 1119 cur *ingestv1.ProfileSets 1120 } 1121 1122 func newFakeBidiClientStacktraces(batches []*ingestv1.ProfileSets) *fakeBidiClientStacktraces { 1123 res := &fakeBidiClientStacktraces{ 1124 profiles: make(chan *ingestv1.ProfileSets, 1), 1125 } 1126 res.profiles <- batches[0] 1127 batches = batches[1:] 1128 res.batches = batches 1129 return res 1130 } 1131 1132 func (f *fakeBidiClientStacktraces) Send(in *ingestv1.MergeProfilesStacktracesRequest) error { 1133 if in.Request != nil { 1134 return nil 1135 } 1136 for i, b := range in.Profiles { 1137 if b { 1138 f.kept = append(f.kept, testProfile{ 1139 Ts: f.cur.Profiles[i].Timestamp, 1140 Labels: f.cur.LabelsSets[f.cur.Profiles[i].LabelIndex], 1141 }) 1142 } 1143 } 1144 if len(f.batches) == 0 { 1145 close(f.profiles) 1146 return nil 1147 } 1148 f.profiles <- f.batches[0] 1149 f.batches = f.batches[1:] 1150 return nil 1151 } 1152 1153 func (f *fakeBidiClientStacktraces) Receive() (*ingestv1.MergeProfilesStacktracesResponse, error) { 1154 profiles := <-f.profiles 1155 if profiles == nil { 1156 return &ingestv1.MergeProfilesStacktracesResponse{ 1157 Result: &ingestv1.MergeProfilesStacktracesResult{ 1158 Format: ingestv1.StacktracesMergeFormat_MERGE_FORMAT_STACKTRACES, 1159 Stacktraces: []*ingestv1.StacktraceSample{ 1160 {FunctionIds: []int32{0, 1, 2}, Value: 1}, 1161 }, 1162 FunctionNames: []string{"foo", "bar", "buzz"}, 1163 }, 1164 }, nil 1165 } 1166 f.cur = profiles 1167 return &ingestv1.MergeProfilesStacktracesResponse{ 1168 SelectedProfiles: profiles, 1169 }, nil 1170 } 1171 func (f *fakeBidiClientStacktraces) CloseRequest() error { return nil } 1172 func (f *fakeBidiClientStacktraces) CloseResponse() error { return nil } 1173 1174 func requireFakeMergeProfilesStacktracesResultTree(t *testing.T, r *phlaremodel.Tree) { 1175 flame := phlaremodel.NewFlameGraph(r, -1) 1176 sort.Strings(flame.Names) 1177 require.Equal(t, []string{"bar", "buzz", "foo", "total"}, flame.Names) 1178 } 1179 1180 type fakeBidiClientProfiles struct { 1181 profiles chan *ingestv1.ProfileSets 1182 batches []*ingestv1.ProfileSets 1183 kept []testProfile 1184 cur *ingestv1.ProfileSets 1185 } 1186 1187 func newFakeBidiClientProfiles(batches []*ingestv1.ProfileSets) *fakeBidiClientProfiles { 1188 res := &fakeBidiClientProfiles{ 1189 profiles: make(chan *ingestv1.ProfileSets, 1), 1190 } 1191 res.profiles <- batches[0] 1192 batches = batches[1:] 1193 res.batches = batches 1194 return res 1195 } 1196 1197 func (f *fakeBidiClientProfiles) Send(in *ingestv1.MergeProfilesPprofRequest) error { 1198 if in.Request != nil { 1199 return nil 1200 } 1201 for i, b := range in.Profiles { 1202 if b { 1203 f.kept = append(f.kept, testProfile{ 1204 Ts: f.cur.Profiles[i].Timestamp, 1205 Labels: f.cur.LabelsSets[f.cur.Profiles[i].LabelIndex], 1206 }) 1207 } 1208 } 1209 if len(f.batches) == 0 { 1210 close(f.profiles) 1211 return nil 1212 } 1213 f.profiles <- f.batches[0] 1214 f.batches = f.batches[1:] 1215 return nil 1216 } 1217 1218 func (f *fakeBidiClientProfiles) Receive() (*ingestv1.MergeProfilesPprofResponse, error) { 1219 profiles := <-f.profiles 1220 if profiles == nil { 1221 var buf bytes.Buffer 1222 if err := pprofth.FooBarProfile.WriteUncompressed(&buf); err != nil { 1223 return nil, err 1224 } 1225 return &ingestv1.MergeProfilesPprofResponse{ 1226 Result: buf.Bytes(), 1227 }, nil 1228 } 1229 f.cur = profiles 1230 return &ingestv1.MergeProfilesPprofResponse{ 1231 SelectedProfiles: profiles, 1232 }, nil 1233 } 1234 func (f *fakeBidiClientProfiles) CloseRequest() error { return nil } 1235 func (f *fakeBidiClientProfiles) CloseResponse() error { return nil } 1236 1237 func requireFakeMergeProfilesPprof(t *testing.T, n int64, r *profilev1.Profile) { 1238 x, err := pprof.FromProfile(pprofth.FooBarProfile) 1239 for _, s := range x.Sample { 1240 s.Value[0] *= n 1241 } 1242 x.DurationNanos *= n 1243 require.NoError(t, err) 1244 require.Equal(t, x, r) 1245 } 1246 1247 type fakeBidiClientSeries struct { 1248 profiles chan *ingestv1.ProfileSets 1249 batches []*ingestv1.ProfileSets 1250 kept []testProfile 1251 cur *ingestv1.ProfileSets 1252 1253 result []*typesv1.Series 1254 } 1255 1256 func newFakeBidiClientSeries(batches []*ingestv1.ProfileSets, result ...*typesv1.Series) *fakeBidiClientSeries { 1257 res := &fakeBidiClientSeries{ 1258 profiles: make(chan *ingestv1.ProfileSets, 1), 1259 } 1260 res.profiles <- batches[0] 1261 batches = batches[1:] 1262 res.batches = batches 1263 res.result = result 1264 return res 1265 } 1266 1267 func (f *fakeBidiClientSeries) Send(in *ingestv1.MergeProfilesLabelsRequest) error { 1268 if in.Request != nil { 1269 return nil 1270 } 1271 for i, b := range in.Profiles { 1272 if b { 1273 f.kept = append(f.kept, testProfile{ 1274 Ts: f.cur.Profiles[i].Timestamp, 1275 Labels: f.cur.LabelsSets[f.cur.Profiles[i].LabelIndex], 1276 }) 1277 } 1278 } 1279 if len(f.batches) == 0 { 1280 close(f.profiles) 1281 return nil 1282 } 1283 f.profiles <- f.batches[0] 1284 f.batches = f.batches[1:] 1285 return nil 1286 } 1287 1288 func (f *fakeBidiClientSeries) Receive() (*ingestv1.MergeProfilesLabelsResponse, error) { 1289 profiles := <-f.profiles 1290 if profiles == nil { 1291 return &ingestv1.MergeProfilesLabelsResponse{ 1292 Series: f.result, 1293 }, nil 1294 } 1295 f.cur = profiles 1296 return &ingestv1.MergeProfilesLabelsResponse{ 1297 SelectedProfiles: profiles, 1298 }, nil 1299 } 1300 func (f *fakeBidiClientSeries) CloseRequest() error { return nil } 1301 func (f *fakeBidiClientSeries) CloseResponse() error { return nil } 1302 1303 func (f *fakeQuerierIngester) MergeSpanProfile(ctx context.Context) clientpool.BidiClientMergeSpanProfile { 1304 var ( 1305 args = f.Called(ctx) 1306 res clientpool.BidiClientMergeSpanProfile 1307 ) 1308 if args[0] != nil { 1309 res = args[0].(clientpool.BidiClientMergeSpanProfile) 1310 } 1311 1312 return res 1313 } 1314 1315 func (f *fakeQuerierIngester) MergeProfilesStacktraces(ctx context.Context) clientpool.BidiClientMergeProfilesStacktraces { 1316 var ( 1317 args = f.Called(ctx) 1318 res clientpool.BidiClientMergeProfilesStacktraces 1319 ) 1320 if args[0] != nil { 1321 res = args[0].(clientpool.BidiClientMergeProfilesStacktraces) 1322 } 1323 1324 return res 1325 } 1326 1327 func (f *fakeQuerierIngester) MergeProfilesLabels(ctx context.Context) clientpool.BidiClientMergeProfilesLabels { 1328 var ( 1329 args = f.Called(ctx) 1330 res clientpool.BidiClientMergeProfilesLabels 1331 ) 1332 if args[0] != nil { 1333 res = args[0].(clientpool.BidiClientMergeProfilesLabels) 1334 } 1335 1336 return res 1337 } 1338 1339 func (f *fakeQuerierIngester) MergeProfilesPprof(ctx context.Context) clientpool.BidiClientMergeProfilesPprof { 1340 var ( 1341 args = f.Called(ctx) 1342 res clientpool.BidiClientMergeProfilesPprof 1343 ) 1344 if args[0] != nil { 1345 res = args[0].(clientpool.BidiClientMergeProfilesPprof) 1346 } 1347 1348 return res 1349 } 1350 1351 func Test_splitQueryToStores(t *testing.T) { 1352 for _, tc := range []struct { 1353 name string 1354 now model.Time 1355 start, end model.Time 1356 queryStoreAfter time.Duration 1357 plan blockPlan 1358 1359 expected storeQueries 1360 }{ 1361 { 1362 // ----|-----|-----|----|---- 1363 // ^ ^ ^ ^ 1364 // cutoff now start end 1365 // 1366 name: "start and end are in the future", 1367 now: model.TimeFromUnixNano(0), 1368 start: model.TimeFromUnixNano(int64(time.Hour)), 1369 end: model.TimeFromUnixNano(int64(2 * time.Hour)), 1370 queryStoreAfter: 30 * time.Minute, 1371 1372 expected: storeQueries{ 1373 queryStoreAfter: 30 * time.Minute, 1374 storeGateway: storeQuery{ 1375 shouldQuery: false, 1376 }, 1377 ingester: storeQuery{ 1378 shouldQuery: true, 1379 start: model.TimeFromUnixNano(int64(time.Hour)), 1380 end: model.TimeFromUnixNano(int64(2 * time.Hour)), 1381 }, 1382 }, 1383 }, 1384 { 1385 // ----|-------|-----|----|---- 1386 // ^ ^ ^ ^ 1387 // cutoff start now end 1388 // 1389 name: "end is in the future and start is after the cutoff", 1390 now: model.TimeFromUnixNano(int64(time.Hour)), 1391 start: model.TimeFromUnixNano(int64(45 * time.Minute)), 1392 end: model.TimeFromUnixNano(int64(2 * time.Hour)), 1393 queryStoreAfter: 30 * time.Minute, 1394 1395 expected: storeQueries{ 1396 queryStoreAfter: 30 * time.Minute, 1397 storeGateway: storeQuery{ 1398 shouldQuery: false, 1399 }, 1400 ingester: storeQuery{ 1401 shouldQuery: true, 1402 start: model.TimeFromUnixNano(int64(45 * time.Minute)), 1403 end: model.TimeFromUnixNano(int64(2 * time.Hour)), 1404 }, 1405 }, 1406 }, 1407 { 1408 // ----|-------|-----|----|---- 1409 // ^ ^ ^ ^ 1410 // start cutoff now end 1411 // 1412 name: "end is in the future and start is before the cutoff", 1413 now: model.TimeFromUnixNano(int64(time.Hour)), 1414 start: model.TimeFromUnixNano(int64(15 * time.Minute)), 1415 end: model.TimeFromUnixNano(int64(2 * time.Hour)), 1416 queryStoreAfter: 30 * time.Minute, 1417 1418 expected: storeQueries{ 1419 queryStoreAfter: 30 * time.Minute, 1420 storeGateway: storeQuery{ 1421 shouldQuery: true, 1422 start: model.TimeFromUnixNano(int64(15 * time.Minute)), 1423 end: model.TimeFromUnixNano(int64(30 * time.Minute)), 1424 }, 1425 ingester: storeQuery{ 1426 shouldQuery: true, 1427 start: model.TimeFromUnixNano(int64(30*time.Minute)) + 1, 1428 end: model.TimeFromUnixNano(int64(2 * time.Hour)), 1429 }, 1430 }, 1431 }, 1432 { 1433 // ----|-----|-----|----|---- 1434 // ^ ^ ^ ^ 1435 // start end cutoff now 1436 // 1437 name: "start and end are in the past and cutoff is in the future", 1438 now: model.TimeFromUnixNano(int64(2 * time.Hour)), 1439 start: model.TimeFromUnixNano(0), 1440 end: model.TimeFromUnixNano(int64(time.Hour)), 1441 queryStoreAfter: 30 * time.Minute, 1442 1443 expected: storeQueries{ 1444 queryStoreAfter: 30 * time.Minute, 1445 storeGateway: storeQuery{ 1446 shouldQuery: true, 1447 start: model.TimeFromUnixNano(0), 1448 end: model.TimeFromUnixNano(int64(time.Hour)), 1449 }, 1450 ingester: storeQuery{ 1451 shouldQuery: false, 1452 }, 1453 }, 1454 }, 1455 { 1456 // ----|-----|-----|----|---- 1457 // ^ ^ ^ ^ 1458 // start cutoff end now 1459 // 1460 name: "start and end are within cutoff", 1461 now: model.TimeFromUnixNano(int64(1 * time.Hour)), 1462 start: model.TimeFromUnixNano(0), 1463 end: model.TimeFromUnixNano(int64(45 * time.Minute)), 1464 queryStoreAfter: 30 * time.Minute, 1465 1466 expected: storeQueries{ 1467 queryStoreAfter: 30 * time.Minute, 1468 storeGateway: storeQuery{ 1469 shouldQuery: true, 1470 start: model.TimeFromUnixNano(0), 1471 end: model.TimeFromUnixNano(int64(30 * time.Minute)), 1472 }, 1473 ingester: storeQuery{ 1474 shouldQuery: true, 1475 start: model.TimeFromUnixNano(int64(30*time.Minute)) + 1, 1476 end: model.TimeFromUnixNano(int64(45 * time.Minute)), 1477 }, 1478 }, 1479 }, 1480 { 1481 // ----|----------|----|---- 1482 // ^ ^ ^ 1483 // start=cutoff end now 1484 // 1485 name: "start is exactly at cutoff", 1486 now: model.TimeFromUnixNano(int64(1 * time.Hour)), 1487 start: model.TimeFromUnixNano(int64(30 * time.Minute)), 1488 end: model.TimeFromUnixNano(int64(45 * time.Minute)), 1489 queryStoreAfter: 30 * time.Minute, 1490 1491 expected: storeQueries{ 1492 queryStoreAfter: 30 * time.Minute, 1493 storeGateway: storeQuery{ 1494 shouldQuery: false, 1495 }, 1496 ingester: storeQuery{ 1497 shouldQuery: true, 1498 start: model.TimeFromUnixNano(int64(30 * time.Minute)), 1499 end: model.TimeFromUnixNano(int64(45 * time.Minute)), 1500 }, 1501 }, 1502 }, 1503 { 1504 // ----|------|--------|---- 1505 // ^ ^ ^ 1506 // start end=cutoff now 1507 // 1508 name: "end is exactly at cutoff", 1509 now: model.TimeFromUnixNano(int64(15 * time.Hour)), 1510 start: model.TimeFromUnixNano(int64(60 * time.Minute)), 1511 end: model.TimeFromUnixNano(int64(30 * time.Minute)), 1512 queryStoreAfter: 30 * time.Minute, 1513 1514 expected: storeQueries{ 1515 queryStoreAfter: 30 * time.Minute, 1516 storeGateway: storeQuery{ 1517 shouldQuery: true, 1518 start: model.TimeFromUnixNano(int64(60 * time.Minute)), 1519 end: model.TimeFromUnixNano(int64(30 * time.Minute)), 1520 }, 1521 ingester: storeQuery{ 1522 shouldQuery: false, 1523 }, 1524 }, 1525 }, 1526 { 1527 // ----|------|-----|----|---- 1528 // ^ ^ ^ ^ 1529 // cutoff start end now 1530 // 1531 name: "start is after at cutoff", 1532 now: model.TimeFromUnixNano(int64(1 * time.Hour)), 1533 start: model.TimeFromUnixNano(int64(30 * time.Minute)), 1534 end: model.TimeFromUnixNano(int64(45 * time.Minute)), 1535 queryStoreAfter: 30 * time.Minute, 1536 1537 expected: storeQueries{ 1538 queryStoreAfter: 30 * time.Minute, 1539 storeGateway: storeQuery{ 1540 shouldQuery: false, 1541 }, 1542 ingester: storeQuery{ 1543 shouldQuery: true, 1544 start: model.TimeFromUnixNano(int64(30 * time.Minute)), 1545 end: model.TimeFromUnixNano(int64(45 * time.Minute)), 1546 }, 1547 }, 1548 }, 1549 { 1550 name: "with a plan we touch all stores at full time window and eleminate later based on the plan", 1551 now: model.TimeFromUnixNano(int64(4 * time.Hour)), 1552 start: model.TimeFromUnixNano(int64(30 * time.Minute)), 1553 end: model.TimeFromUnixNano(int64(45*time.Minute) + int64(3*time.Hour)), 1554 queryStoreAfter: 30 * time.Minute, 1555 plan: blockPlan{"replica-a": &blockPlanEntry{InstanceTypes: []instanceType{ingesterInstance}, BlockHints: &ingestv1.BlockHints{Ulids: []string{"block-a", "block-b"}}}}, 1556 1557 expected: storeQueries{ 1558 queryStoreAfter: 0, 1559 storeGateway: storeQuery{ 1560 shouldQuery: true, 1561 start: model.TimeFromUnixNano(int64(30 * time.Minute)), 1562 end: model.TimeFromUnixNano(int64(45*time.Minute) + int64(3*time.Hour)), 1563 }, 1564 ingester: storeQuery{ 1565 shouldQuery: true, 1566 start: model.TimeFromUnixNano(int64(30 * time.Minute)), 1567 end: model.TimeFromUnixNano(int64(45*time.Minute) + int64(3*time.Hour)), 1568 }, 1569 }, 1570 }, 1571 } { 1572 tc := tc 1573 t.Run(tc.name, func(t *testing.T) { 1574 actual := splitQueryToStores( 1575 tc.start, 1576 tc.end, 1577 tc.now, 1578 tc.queryStoreAfter, 1579 tc.plan, 1580 ) 1581 require.Equal(t, tc.expected, actual) 1582 }) 1583 } 1584 } 1585 1586 func Test_GetProfileStats(t *testing.T) { 1587 ctx := tenant.InjectTenantID(context.Background(), "1234") 1588 1589 dbPath := t.TempDir() 1590 localBucket, err := objstoreclient.NewBucket(ctx, objstoreclient.Config{ 1591 StorageBackendConfig: objstoreclient.StorageBackendConfig{ 1592 Backend: objstoreclient.Filesystem, 1593 Filesystem: filesystem.Config{ 1594 Directory: dbPath, 1595 }, 1596 }, 1597 Prefix: "testdata", 1598 }, "") 1599 require.NoError(t, err) 1600 1601 index := bucketindex.Index{Blocks: []*bucketindex.Block{{ 1602 MinTime: 0, 1603 MaxTime: 3, 1604 }}, 1605 Version: bucketindex.IndexVersion3, 1606 } 1607 indexJson, err := json.Marshal(index) 1608 require.NoError(t, err) 1609 1610 var gzipContent bytes.Buffer 1611 gzip := gzip.NewWriter(&gzipContent) 1612 gzip.Name = bucketindex.IndexFilename 1613 _, err = gzip.Write(indexJson) 1614 gzip.Close() 1615 require.NoError(t, err) 1616 1617 err = localBucket.Upload(ctx, path.Join("1234", "phlaredb", bucketindex.IndexCompressedFilename), &gzipContent) 1618 require.NoError(t, err) 1619 1620 req := connect.NewRequest(&typesv1.GetProfileStatsRequest{}) 1621 querier, err := New(&NewQuerierParams{ 1622 Cfg: Config{ 1623 PoolConfig: clientpool.PoolConfig{ClientCleanupPeriod: 1 * time.Millisecond}, 1624 }, 1625 IngestersRing: testhelper.NewMockRing([]ring.InstanceDesc{ 1626 {Addr: "1"}, 1627 {Addr: "2"}, 1628 {Addr: "3"}, 1629 }, 3), 1630 PoolFactory: &poolFactory{f: func(addr string) (client.PoolClient, error) { 1631 q := newFakeQuerier() 1632 switch addr { 1633 case "1": 1634 q.On("GetProfileStats", mock.Anything, mock.Anything).Return(connect.NewResponse(&typesv1.GetProfileStatsResponse{ 1635 DataIngested: true, 1636 OldestProfileTime: 1, 1637 NewestProfileTime: 4, 1638 }), nil) 1639 case "2": 1640 q.On("GetProfileStats", mock.Anything, mock.Anything).Return(connect.NewResponse(&typesv1.GetProfileStatsResponse{ 1641 DataIngested: true, 1642 OldestProfileTime: 1, 1643 NewestProfileTime: 5, 1644 }), nil) 1645 case "3": 1646 q.On("GetProfileStats", mock.Anything, mock.Anything).Return(connect.NewResponse(&typesv1.GetProfileStatsResponse{ 1647 DataIngested: true, 1648 OldestProfileTime: 2, 1649 NewestProfileTime: 5, 1650 }), nil) 1651 } 1652 return q, nil 1653 }}, 1654 Logger: log.NewLogfmtLogger(os.Stdout), 1655 StorageBucket: localBucket, 1656 StoreGatewayCfg: storegateway.Config{ 1657 ShardingRing: storegateway.RingConfig{ 1658 Ring: util.CommonRingConfig{ 1659 KVStore: kv.Config{ 1660 Store: "inmemory", 1661 }, 1662 }, 1663 ReplicationFactor: 1, 1664 }, 1665 }, 1666 }) 1667 1668 require.NoError(t, err) 1669 out, err := querier.GetProfileStats(ctx, req) 1670 require.NoError(t, err) 1671 require.Equal(t, &typesv1.GetProfileStatsResponse{ 1672 DataIngested: true, 1673 OldestProfileTime: 0, 1674 NewestProfileTime: 5, 1675 }, out.Msg) 1676 } 1677 1678 // The code below can be useful for testing deduping directly to a cluster. 1679 // func TestDedupeLive(t *testing.T) { 1680 // clients, err := createClients(context.Background()) 1681 // require.NoError(t, err) 1682 // st, err := dedupe(context.Background(), clients) 1683 // require.NoError(t, err) 1684 // require.Equal(t, 2, len(st)) 1685 // } 1686 1687 // func createClients(ctx context.Context) ([]responseFromIngesters[BidiClientMergeProfilesStacktraces], error) { 1688 // var clients []responseFromIngesters[BidiClientMergeProfilesStacktraces] 1689 // for i := 1; i < 6; i++ { 1690 // addr := fmt.Sprintf("localhost:4%d00", i) 1691 // c, err := clientpool.PoolFactory(addr) 1692 // if err != nil { 1693 // return nil, err 1694 // } 1695 // res, err := c.Check(ctx, &grpc_health_v1.HealthCheckRequest{ 1696 // Service: ingestv1.IngesterService_ServiceDesc.ServiceName, 1697 // }) 1698 // if err != nil { 1699 // return nil, err 1700 // } 1701 // if res.Status != grpc_health_v1.HealthCheckResponse_SERVING { 1702 // return nil, fmt.Errorf("ingester %s is not serving", addr) 1703 // } 1704 // bidi := c.(IngesterQueryClient).MergeProfilesStacktraces(ctx) 1705 // profileType, err := phlaremodel.ParseProfileTypeSelector("process_cpu:cpu:nanoseconds:cpu:nanoseconds") 1706 // if err != nil { 1707 // return nil, err 1708 // } 1709 // now := time.Now() 1710 // err = bidi.Send(&ingestv1.MergeProfilesStacktracesRequest{ 1711 // Request: &ingestv1.SelectProfilesRequest{ 1712 // LabelSelector: `{namespace="phlare-dev-001"}`, 1713 // Type: profileType, 1714 // Start: int64(model.TimeFromUnixNano(now.Add(-30 * time.Minute).UnixNano())), 1715 // End: int64(model.TimeFromUnixNano(now.UnixNano())), 1716 // }, 1717 // }) 1718 // if err != nil { 1719 // return nil, err 1720 // } 1721 // clients = append(clients, responseFromIngesters[BidiClientMergeProfilesStacktraces]{ 1722 // response: bidi, 1723 // addr: addr, 1724 // }) 1725 // } 1726 // return clients, nil 1727 // }