github.com/muhammadn/cortex@v1.9.1-0.20220510110439-46bb7000d03d/pkg/configs/legacy_promql/testdata/histograms.test (about)

     1  # Two histograms with 4 buckets each (x_sum and x_count not included,
     2  # only buckets). Lowest bucket for one histogram < 0, for the other >
     3  # 0. They have the same name, just separated by label. Not useful in
     4  # practice, but can happen (if clients change bucketing), and the
     5  # server has to cope with it.
     6  
     7  # Test histogram.
     8  load 5m
     9  	testhistogram_bucket{le="0.1", start="positive"}	0+5x10
    10  	testhistogram_bucket{le=".2", start="positive"}		0+7x10
    11  	testhistogram_bucket{le="1e0", start="positive"}	0+11x10
    12  	testhistogram_bucket{le="+Inf", start="positive"}	0+12x10
    13  	testhistogram_bucket{le="-.2", start="negative"}	0+1x10
    14  	testhistogram_bucket{le="-0.1", start="negative"}	0+2x10
    15  	testhistogram_bucket{le="0.3", start="negative"}	0+2x10
    16  	testhistogram_bucket{le="+Inf", start="negative"}	0+3x10
    17  
    18  
    19  # Now a more realistic histogram per job and instance to test aggregation.
    20  load 5m
    21  	request_duration_seconds_bucket{job="job1", instance="ins1", le="0.1"}	0+1x10
    22  	request_duration_seconds_bucket{job="job1", instance="ins1", le="0.2"}	0+3x10
    23  	request_duration_seconds_bucket{job="job1", instance="ins1", le="+Inf"}	0+4x10
    24  	request_duration_seconds_bucket{job="job1", instance="ins2", le="0.1"}	0+2x10
    25  	request_duration_seconds_bucket{job="job1", instance="ins2", le="0.2"}	0+5x10
    26  	request_duration_seconds_bucket{job="job1", instance="ins2", le="+Inf"}	0+6x10
    27  	request_duration_seconds_bucket{job="job2", instance="ins1", le="0.1"}	0+3x10
    28  	request_duration_seconds_bucket{job="job2", instance="ins1", le="0.2"}	0+4x10
    29  	request_duration_seconds_bucket{job="job2", instance="ins1", le="+Inf"}	0+6x10
    30  	request_duration_seconds_bucket{job="job2", instance="ins2", le="0.1"}	0+4x10
    31  	request_duration_seconds_bucket{job="job2", instance="ins2", le="0.2"}	0+7x10
    32  	request_duration_seconds_bucket{job="job2", instance="ins2", le="+Inf"}	0+9x10
    33  
    34  
    35  # Quantile too low.
    36  eval instant at 50m histogram_quantile(-0.1, testhistogram_bucket)
    37  	{start="positive"} -Inf
    38  	{start="negative"} -Inf
    39  
    40  # Quantile too high.
    41  eval instant at 50m histogram_quantile(1.01, testhistogram_bucket)
    42  	{start="positive"} +Inf
    43  	{start="negative"} +Inf
    44  
    45  # Quantile value in lowest bucket, which is positive.
    46  eval instant at 50m histogram_quantile(0, testhistogram_bucket{start="positive"})
    47  	{start="positive"} 0
    48  
    49  # Quantile value in lowest bucket, which is negative.
    50  eval instant at 50m histogram_quantile(0, testhistogram_bucket{start="negative"})
    51  	{start="negative"} -0.2
    52  
    53  # Quantile value in highest bucket.
    54  eval instant at 50m histogram_quantile(1, testhistogram_bucket)
    55  	{start="positive"} 1
    56  	{start="negative"} 0.3
    57  
    58  # Finally some useful quantiles.
    59  eval instant at 50m histogram_quantile(0.2, testhistogram_bucket)
    60  	{start="positive"} 0.048
    61  	{start="negative"} -0.2
    62  
    63  
    64  eval instant at 50m histogram_quantile(0.5, testhistogram_bucket)
    65  	{start="positive"} 0.15
    66  	{start="negative"} -0.15
    67  
    68  eval instant at 50m histogram_quantile(0.8, testhistogram_bucket)
    69  	{start="positive"} 0.72
    70  	{start="negative"} 0.3
    71  
    72  # More realistic with rates.
    73  eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m]))
    74  	{start="positive"} 0.048
    75  	{start="negative"} -0.2
    76  
    77  eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m]))
    78  	{start="positive"} 0.15
    79  	{start="negative"} -0.15
    80  
    81  eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m]))
    82  	{start="positive"} 0.72
    83  	{start="negative"} 0.3
    84  
    85  # Aggregated histogram: Everything in one.
    86  eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le))
    87  	{} 0.075
    88  
    89  eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le))
    90  	{} 0.1277777777777778
    91  
    92  # Aggregated histogram: Everything in one. Now with avg, which does not change anything.
    93  eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le))
    94  	{} 0.075
    95  
    96  eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le))
    97  	{} 0.12777777777777778
    98  
    99  # Aggregated histogram: By job.
   100  eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
   101  	{instance="ins1"} 0.075
   102  	{instance="ins2"} 0.075
   103  
   104  eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
   105  	{instance="ins1"} 0.1333333333
   106  	{instance="ins2"} 0.125
   107  
   108  # Aggregated histogram: By instance.
   109  eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
   110  	{job="job1"} 0.1
   111  	{job="job2"} 0.0642857142857143
   112  
   113  eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
   114  	{job="job1"} 0.14
   115  	{job="job2"} 0.1125
   116  
   117  # Aggregated histogram: By job and instance.
   118  eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
   119  	{instance="ins1", job="job1"} 0.11
   120  	{instance="ins2", job="job1"} 0.09
   121  	{instance="ins1", job="job2"} 0.06
   122  	{instance="ins2", job="job2"} 0.0675
   123  
   124  eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
   125  	{instance="ins1", job="job1"} 0.15
   126  	{instance="ins2", job="job1"} 0.1333333333333333
   127  	{instance="ins1", job="job2"} 0.1
   128  	{instance="ins2", job="job2"} 0.1166666666666667
   129  
   130  # The unaggregated histogram for comparison. Same result as the previous one.
   131  eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m]))
   132  	{instance="ins1", job="job1"} 0.11
   133  	{instance="ins2", job="job1"} 0.09
   134  	{instance="ins1", job="job2"} 0.06
   135  	{instance="ins2", job="job2"} 0.0675
   136  
   137  eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m]))
   138  	{instance="ins1", job="job1"} 0.15
   139  	{instance="ins2", job="job1"} 0.13333333333333333
   140  	{instance="ins1", job="job2"} 0.1
   141  	{instance="ins2", job="job2"} 0.11666666666666667
   142  
   143  # A histogram with nonmonotonic bucket counts. This may happen when recording
   144  # rule evaluation or federation races scrape ingestion, causing some buckets
   145  # counts to be derived from fewer samples. The wrong answer we want to avoid
   146  # is for histogram_quantile(0.99, nonmonotonic_bucket) to return ~1000 instead
   147  # of 1.
   148  
   149  load 5m
   150      nonmonotonic_bucket{le="0.1"}   0+1x10
   151      nonmonotonic_bucket{le="1"}     0+9x10
   152      nonmonotonic_bucket{le="10"}    0+8x10
   153      nonmonotonic_bucket{le="100"}   0+8x10
   154      nonmonotonic_bucket{le="1000"}  0+9x10
   155      nonmonotonic_bucket{le="+Inf"}  0+9x10
   156  
   157  # Nonmonotonic buckets
   158  eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket)
   159      {} 0.989875