github.com/hernad/nomad@v1.6.112/ui/tests/acceptance/topology-test.js (about)

     1  /**
     2   * Copyright (c) HashiCorp, Inc.
     3   * SPDX-License-Identifier: MPL-2.0
     4   */
     5  
     6  /* eslint-disable qunit/require-expect */
     7  import { get } from '@ember/object';
     8  import { currentURL, typeIn, click } from '@ember/test-helpers';
     9  import { module, test } from 'qunit';
    10  import { setupApplicationTest } from 'ember-qunit';
    11  import { setupMirage } from 'ember-cli-mirage/test-support';
    12  import a11yAudit from 'nomad-ui/tests/helpers/a11y-audit';
    13  import Topology from 'nomad-ui/tests/pages/topology';
    14  import {
    15    formatBytes,
    16    formatScheduledBytes,
    17    formatHertz,
    18    formatScheduledHertz,
    19  } from 'nomad-ui/utils/units';
    20  import queryString from 'query-string';
    21  import percySnapshot from '@percy/ember';
    22  import faker from 'nomad-ui/mirage/faker';
    23  
    24  const sumResources = (list, dimension) =>
    25    list.reduce((agg, val) => agg + (get(val, dimension) || 0), 0);
    26  
    27  module('Acceptance | topology', function (hooks) {
    28    setupApplicationTest(hooks);
    29    setupMirage(hooks);
    30  
    31    hooks.beforeEach(function () {
    32      server.createList('node-pool', 5);
    33      server.create('job', { createAllocations: false });
    34    });
    35  
    36    test('it passes an accessibility audit', async function (assert) {
    37      assert.expect(1);
    38  
    39      server.createList('node', 3);
    40      server.createList('allocation', 5);
    41  
    42      await Topology.visit();
    43      await a11yAudit(assert);
    44    });
    45  
    46    test('by default the info panel shows cluster aggregate stats', async function (assert) {
    47      faker.seed(1);
    48      server.create('node-pool', { name: 'all' });
    49      server.createList('node', 3);
    50      server.createList('allocation', 5);
    51  
    52      await Topology.visit();
    53  
    54      await percySnapshot(assert);
    55  
    56      assert.equal(Topology.infoPanelTitle, 'Cluster Details');
    57      assert.notOk(Topology.filteredNodesWarning.isPresent);
    58  
    59      assert.equal(
    60        Topology.clusterInfoPanel.nodeCount,
    61        `${server.schema.nodes.all().length} Clients`
    62      );
    63  
    64      const allocs = server.schema.allocations.all().models;
    65      const scheduledAllocs = allocs.filter((alloc) =>
    66        ['pending', 'running'].includes(alloc.clientStatus)
    67      );
    68      assert.equal(
    69        Topology.clusterInfoPanel.allocCount,
    70        `${scheduledAllocs.length} Allocations`
    71      );
    72  
    73      // Node pool count ignores 'all'.
    74      const nodePools = server.schema.nodePools
    75        .all()
    76        .models.filter((p) => p.name !== 'all');
    77      assert.equal(
    78        Topology.clusterInfoPanel.nodePoolCount,
    79        `${nodePools.length} Node Pools`
    80      );
    81  
    82      const nodeResources = server.schema.nodes
    83        .all()
    84        .models.mapBy('nodeResources');
    85      const taskResources = scheduledAllocs
    86        .mapBy('taskResources.models')
    87        .flat()
    88        .mapBy('resources');
    89  
    90      const totalMem = sumResources(nodeResources, 'Memory.MemoryMB');
    91      const totalCPU = sumResources(nodeResources, 'Cpu.CpuShares');
    92      const reservedMem = sumResources(taskResources, 'Memory.MemoryMB');
    93      const reservedCPU = sumResources(taskResources, 'Cpu.CpuShares');
    94  
    95      assert.equal(
    96        Topology.clusterInfoPanel.memoryProgressValue,
    97        reservedMem / totalMem
    98      );
    99      assert.equal(
   100        Topology.clusterInfoPanel.cpuProgressValue,
   101        reservedCPU / totalCPU
   102      );
   103  
   104      assert.equal(
   105        Topology.clusterInfoPanel.memoryAbsoluteValue,
   106        `${formatBytes(reservedMem * 1024 * 1024)} / ${formatBytes(
   107          totalMem * 1024 * 1024
   108        )} reserved`
   109      );
   110  
   111      assert.equal(
   112        Topology.clusterInfoPanel.cpuAbsoluteValue,
   113        `${formatHertz(reservedCPU, 'MHz')} / ${formatHertz(
   114          totalCPU,
   115          'MHz'
   116        )} reserved`
   117      );
   118    });
   119  
   120    test('all allocations for all namespaces and all clients are queried on load', async function (assert) {
   121      server.createList('node', 3);
   122      server.createList('allocation', 5);
   123  
   124      await Topology.visit();
   125      const requests = this.server.pretender.handledRequests;
   126      assert.ok(requests.findBy('url', '/v1/nodes?resources=true'));
   127  
   128      const allocationsRequest = requests.find((req) =>
   129        req.url.startsWith('/v1/allocations')
   130      );
   131      assert.ok(allocationsRequest);
   132  
   133      const allocationRequestParams = queryString.parse(
   134        allocationsRequest.url.split('?')[1]
   135      );
   136      assert.deepEqual(allocationRequestParams, {
   137        namespace: '*',
   138        task_states: 'false',
   139        resources: 'true',
   140      });
   141    });
   142  
   143    test('when an allocation is selected, the info panel shows information on the allocation', async function (assert) {
   144      const nodes = server.createList('node', 5);
   145      const job = server.create('job', { createAllocations: false });
   146      const taskGroup = server.schema.find('taskGroup', job.taskGroupIds[0]).name;
   147      const allocs = server.createList('allocation', 5, {
   148        forceRunningClientStatus: true,
   149        jobId: job.id,
   150        taskGroup,
   151      });
   152  
   153      // Get the first alloc of the first node that has an alloc
   154      const sortedNodes = nodes.sortBy('datacenter');
   155      let node, alloc;
   156      for (let n of sortedNodes) {
   157        alloc = allocs.find((a) => a.nodeId === n.id);
   158        if (alloc) {
   159          node = n;
   160          break;
   161        }
   162      }
   163  
   164      const dcIndex = nodes
   165        .mapBy('datacenter')
   166        .uniq()
   167        .sort()
   168        .indexOf(node.datacenter);
   169      const nodeIndex = nodes
   170        .filterBy('datacenter', node.datacenter)
   171        .indexOf(node);
   172  
   173      const reset = async () => {
   174        await Topology.visit();
   175        await Topology.viz.datacenters[dcIndex].nodes[
   176          nodeIndex
   177        ].memoryRects[0].select();
   178      };
   179  
   180      await reset();
   181      assert.equal(Topology.infoPanelTitle, 'Allocation Details');
   182  
   183      assert.equal(Topology.allocInfoPanel.id, alloc.id.split('-')[0]);
   184  
   185      const uniqueClients = allocs.mapBy('nodeId').uniq();
   186      assert.equal(
   187        Topology.allocInfoPanel.siblingAllocs,
   188        `Sibling Allocations: ${allocs.length}`
   189      );
   190      assert.equal(
   191        Topology.allocInfoPanel.uniquePlacements,
   192        `Unique Client Placements: ${uniqueClients.length}`
   193      );
   194  
   195      assert.equal(Topology.allocInfoPanel.job, job.name);
   196      assert.ok(Topology.allocInfoPanel.taskGroup.endsWith(alloc.taskGroup));
   197      assert.equal(Topology.allocInfoPanel.client, node.id.split('-')[0]);
   198  
   199      await Topology.allocInfoPanel.visitAlloc();
   200      assert.equal(currentURL(), `/allocations/${alloc.id}`);
   201  
   202      await reset();
   203  
   204      await Topology.allocInfoPanel.visitJob();
   205      assert.equal(currentURL(), `/jobs/${job.id}@default`);
   206  
   207      await reset();
   208  
   209      await Topology.allocInfoPanel.visitClient();
   210      assert.equal(currentURL(), `/clients/${node.id}`);
   211    });
   212  
   213    test('changing which allocation is selected changes the metric charts', async function (assert) {
   214      server.create('node');
   215      const job1 = server.create('job', { createAllocations: false });
   216      const taskGroup1 = server.schema.find(
   217        'taskGroup',
   218        job1.taskGroupIds[0]
   219      ).name;
   220      server.create('allocation', {
   221        forceRunningClientStatus: true,
   222        jobId: job1.id,
   223        taskGroup1,
   224      });
   225  
   226      const job2 = server.create('job', { createAllocations: false });
   227      const taskGroup2 = server.schema.find(
   228        'taskGroup',
   229        job2.taskGroupIds[0]
   230      ).name;
   231      server.create('allocation', {
   232        forceRunningClientStatus: true,
   233        jobId: job2.id,
   234        taskGroup2,
   235      });
   236  
   237      await Topology.visit();
   238      await Topology.viz.datacenters[0].nodes[0].memoryRects[0].select();
   239      const firstAllocationTaskNames =
   240        Topology.allocInfoPanel.charts[0].areas.mapBy('taskName');
   241  
   242      await Topology.viz.datacenters[0].nodes[0].memoryRects[1].select();
   243      const secondAllocationTaskNames =
   244        Topology.allocInfoPanel.charts[0].areas.mapBy('taskName');
   245  
   246      assert.notDeepEqual(firstAllocationTaskNames, secondAllocationTaskNames);
   247    });
   248  
   249    test('when a node is selected, the info panel shows information on the node', async function (assert) {
   250      // A high node count is required for node selection
   251      const nodes = server.createList('node', 51);
   252      const node = nodes.sortBy('datacenter')[0];
   253      server.createList('allocation', 5, { forceRunningClientStatus: true });
   254  
   255      const allocs = server.schema.allocations.where({ nodeId: node.id }).models;
   256  
   257      await Topology.visit();
   258  
   259      await Topology.viz.datacenters[0].nodes[0].selectNode();
   260      assert.equal(Topology.infoPanelTitle, 'Client Details');
   261  
   262      assert.equal(Topology.nodeInfoPanel.id, node.id.split('-')[0]);
   263      assert.equal(Topology.nodeInfoPanel.name, `Name: ${node.name}`);
   264      assert.equal(Topology.nodeInfoPanel.address, `Address: ${node.httpAddr}`);
   265      assert.equal(Topology.nodeInfoPanel.status, `Status: ${node.status}`);
   266  
   267      assert.equal(
   268        Topology.nodeInfoPanel.drainingLabel,
   269        node.drain ? 'Yes' : 'No'
   270      );
   271      assert.equal(
   272        Topology.nodeInfoPanel.eligibleLabel,
   273        node.schedulingEligibility === 'eligible' ? 'Yes' : 'No'
   274      );
   275  
   276      assert.equal(Topology.nodeInfoPanel.drainingIsAccented, node.drain);
   277      assert.equal(
   278        Topology.nodeInfoPanel.eligibleIsAccented,
   279        node.schedulingEligibility !== 'eligible'
   280      );
   281  
   282      const taskResources = allocs
   283        .mapBy('taskResources.models')
   284        .flat()
   285        .mapBy('resources');
   286      const reservedMem = sumResources(taskResources, 'Memory.MemoryMB');
   287      const reservedCPU = sumResources(taskResources, 'Cpu.CpuShares');
   288  
   289      const totalMem = node.nodeResources.Memory.MemoryMB;
   290      const totalCPU = node.nodeResources.Cpu.CpuShares;
   291  
   292      assert.equal(
   293        Topology.nodeInfoPanel.memoryProgressValue,
   294        reservedMem / totalMem
   295      );
   296      assert.equal(
   297        Topology.nodeInfoPanel.cpuProgressValue,
   298        reservedCPU / totalCPU
   299      );
   300  
   301      assert.equal(
   302        Topology.nodeInfoPanel.memoryAbsoluteValue,
   303        `${formatScheduledBytes(
   304          reservedMem * 1024 * 1024
   305        )} / ${formatScheduledBytes(totalMem, 'MiB')} reserved`
   306      );
   307  
   308      assert.equal(
   309        Topology.nodeInfoPanel.cpuAbsoluteValue,
   310        `${formatScheduledHertz(reservedCPU, 'MHz')} / ${formatScheduledHertz(
   311          totalCPU,
   312          'MHz'
   313        )} reserved`
   314      );
   315  
   316      await Topology.nodeInfoPanel.visitNode();
   317      assert.equal(currentURL(), `/clients/${node.id}`);
   318    });
   319  
   320    test('when one or more nodes lack the NodeResources property, a warning message is shown', async function (assert) {
   321      server.createList('node', 3);
   322      server.createList('allocation', 5);
   323  
   324      server.schema.nodes.all().models[0].update({ nodeResources: null });
   325  
   326      await Topology.visit();
   327      assert.ok(Topology.filteredNodesWarning.isPresent);
   328      assert.ok(Topology.filteredNodesWarning.message.startsWith('1'));
   329    });
   330  
   331    test('Filtering and Querying reduces the number of nodes shown', async function (assert) {
   332      server.createList('node', 10);
   333      server.createList('node', 2, {
   334        nodeClass: 'foo-bar-baz',
   335      });
   336  
   337      // Make sure we have at least one node draining and one ineligible.
   338      server.create('node', {
   339        schedulingEligibility: 'ineligible',
   340      });
   341      server.create('node', 'draining');
   342  
   343      // Create node pool exclusive for these nodes.
   344      server.create('node-pool', { name: 'test-node-pool' });
   345      server.createList('node', 3, {
   346        nodePool: 'test-node-pool',
   347      });
   348  
   349      server.createList('allocation', 5);
   350  
   351      // Count draining and ineligible nodes.
   352      const counts = {
   353        ineligible: 0,
   354        draining: 0,
   355      };
   356      server.db.nodes.forEach((n) => {
   357        if (n.schedulingEligibility === 'ineligible') {
   358          counts['ineligible'] += 1;
   359        }
   360        if (n.drain) {
   361          counts['draining'] += 1;
   362        }
   363      });
   364  
   365      await Topology.visit();
   366      assert.dom('[data-test-topo-viz-node]').exists({ count: 17 });
   367  
   368      // Test search.
   369      await typeIn('input.node-search', server.schema.nodes.first().name);
   370      assert.dom('[data-test-topo-viz-node]').exists({ count: 1 });
   371      await typeIn('input.node-search', server.schema.nodes.first().name);
   372      assert.dom('[data-test-topo-viz-node]').doesNotExist();
   373      await click('[title="Clear search"]');
   374      assert.dom('[data-test-topo-viz-node]').exists({ count: 17 });
   375  
   376      // Test node class filter.
   377      await Topology.facets.class.toggle();
   378      await Topology.facets.class.options
   379        .findOneBy('label', 'foo-bar-baz')
   380        .toggle();
   381      assert.dom('[data-test-topo-viz-node]').exists({ count: 2 });
   382      await Topology.facets.class.options
   383        .findOneBy('label', 'foo-bar-baz')
   384        .toggle();
   385  
   386      // Test ineligible state filter.
   387      await Topology.facets.state.toggle();
   388      await Topology.facets.state.options
   389        .findOneBy('label', 'Ineligible')
   390        .toggle();
   391      assert
   392        .dom('[data-test-topo-viz-node]')
   393        .exists({ count: counts['ineligible'] });
   394      await Topology.facets.state.options
   395        .findOneBy('label', 'Ineligible')
   396        .toggle();
   397      await Topology.facets.state.toggle();
   398  
   399      // Test draining state filter.
   400      await Topology.facets.state.toggle();
   401      await Topology.facets.state.options.findOneBy('label', 'Draining').toggle();
   402      assert
   403        .dom('[data-test-topo-viz-node]')
   404        .exists({ count: counts['draining'] });
   405      await Topology.facets.state.options.findOneBy('label', 'Draining').toggle();
   406      await Topology.facets.state.toggle();
   407  
   408      // Test node pool filter.
   409      await Topology.facets.nodePool.toggle();
   410      await Topology.facets.nodePool.options
   411        .findOneBy('label', 'test-node-pool')
   412        .toggle();
   413      assert.dom('[data-test-topo-viz-node]').exists({ count: 3 });
   414      await Topology.facets.nodePool.options
   415        .findOneBy('label', 'test-node-pool')
   416        .toggle();
   417    });
   418  });