github.com/grafana/pyroscope@v1.18.0/tools/k6/tests/reads.js (about)

     1  import { group } from 'k6';
     2  import pyroscope from 'https://jslib.k6.io/http-instrumentation-pyroscope/1.0.0/index.js';
     3  
     4  import {
     5    doLabelNamesRequest,
     6    doRenderDiffRequest,
     7    doRenderRequest,
     8    doSelectMergeProfileRequest,
     9    doSelectMergeStacktracesRequest,
    10    doSeriesRequest,
    11  } from '../lib/request.js';
    12  
    13  export const options = {
    14    ext: {
    15      loadimpact: {
    16        projectID: 16425,
    17        name: 'reads',
    18      },
    19    },
    20  
    21    scenarios: {
    22      even_reads: {
    23        executor: 'constant-arrival-rate',
    24        duration: '5m',
    25        rate: 10,
    26        timeUnit: '1m',
    27        preAllocatedVUs: 3,
    28        maxVUs: 10,
    29      },
    30    },
    31  
    32    thresholds: {
    33      checks: ['rate>0.9'],
    34    },
    35  };
    36  
    37  // This the query distribution for Pyroscope pulled from a 7 day period in ops.
    38  // Ultimately we should try tune our load tests to match this distribution. At
    39  // the moment, we're making evenly distributed requests across the implemented
    40  // endpoints.
    41  //
    42  // We also should try identify the distribution of query parameters used and
    43  // make the load tests reflect that.
    44  //
    45  // count   %       endpoint                                           implemented
    46  // ------  ------  -----------------------------------                -----------
    47  // 11997   78.03   /querier.v1.QuerierService/SelectMergeProfile      ✅
    48  //  2298   14.95   /pyroscope/render                                  ✅
    49  //   461    3.00   /querier.v1.QuerierService/SelectMergeStacktraces  ✅
    50  //   221    1.44   /querier.v1.QuerierService/LabelNames              ✅
    51  //   130    0.85   /querier.v1.QuerierService/Series                  ✅
    52  //   100    0.65   /pyroscope/render-diff                             ✅
    53  //    59    0.38   /querier.v1.QuerierService/ProfileTypes            ❌
    54  //    54    0.35   /querier.v1.QuerierService/SelectSeries            ❌
    55  //    28    0.18   /querier.v1.QuerierService/LabelValues             ❌
    56  //    26    0.17   /querier.v1.QuerierService/SelectMergeSpanProfile  ❌
    57  //     1    0.01   /querier.v1.QuerierService/GetProfileStats         ❌
    58  
    59  // Enable Pyroscope auto-labeling.
    60  pyroscope.instrumentHTTP();
    61  
    62  export default function() {
    63    const timeRanges = (__ENV.K6_QUERY_DURATIONS || '1h').split(',').map((s) => {
    64      return [
    65        parseInt(s.slice(0, -1)),
    66        s.slice(-1),
    67      ];
    68    });
    69  
    70    const serviceName = __ENV.K6_QUERY_SERVICE_NAME || 'fire-dev-001/ingester';
    71  
    72    for (const [scalar, unit] of timeRanges) {
    73      group(`reads last ${scalar}${unit}`, () => {
    74        const { start, end } = newRelativeTimeRange(scalar, unit);
    75        doAllQueryRequests(serviceName, start, end);
    76      });
    77    }
    78  }
    79  
    80  function doAllQueryRequests(serviceName, start, end) {
    81    doSelectMergeProfileRequest({
    82      start,
    83      end,
    84      profile_typeID: 'process_cpu:cpu:nanoseconds:cpu:nanoseconds',
    85      label_selector: `{service_name="${serviceName}"}`,
    86    });
    87  
    88    doRenderRequest({
    89      from: start,
    90      until: end,
    91      query: `process_cpu:cpu:nanoseconds:cpu:nanoseconds{service_name="${serviceName}"}`,
    92      aggregation: 'sum',
    93      format: 'json',
    94      'max-nodes': 16384,
    95    });
    96  
    97    doSelectMergeStacktracesRequest({
    98      start,
    99      end,
   100      profile_typeID: 'process_cpu:cpu:nanoseconds:cpu:nanoseconds',
   101      label_selector: `{service_name="${serviceName}"}`,
   102      'max-nodes': 16384,
   103    });
   104  
   105    doLabelNamesRequest({
   106      start,
   107      end,
   108      matchers: [
   109        `{__profile_type__="process_cpu:cpu:nanoseconds:cpu:nanoseconds", service_name="${serviceName}"}`,
   110      ],
   111    });
   112  
   113    doSeriesRequest({
   114      start,
   115      end,
   116      labelNames: ['service_name', '__profile_type__'],
   117      matchers: [],
   118    });
   119  
   120    doRenderDiffRequest({
   121      rightQuery: `process_cpu:cpu:nanoseconds:cpu:nanoseconds{service_name="${serviceName}"}`,
   122      rightFrom: start,
   123      rightUntil: end,
   124      leftQuery: `process_cpu:cpu:nanoseconds:cpu:nanoseconds{service_name="${serviceName}"}`,
   125      leftFrom: start - (end - start), // Whatever the right query range is, we want to go back the same amount.
   126      leftUntil: start,
   127      format: 'json',
   128      'max-nodes': 16384,
   129    });
   130  }
   131  
   132  function newRelativeTimeRange(scalar, unit) {
   133    const end = Date.now();
   134    switch (unit) {
   135      case 's':
   136        return { start: end - scalar * 1000, end };
   137      case 'm':
   138        return { start: end - scalar * 60 * 1000, end };
   139      case 'h':
   140        return { start: end - scalar * 60 * 60 * 1000, end };
   141      case 'd':
   142        return { start: end - scalar * 24 * 60 * 60 * 1000, end };
   143      default:
   144        throw new Error(`Invalid unit: ${unit}`);
   145    }
   146  }