github.com/hernad/nomad@v1.6.112/ui/tests/acceptance/job-status-panel-test.js (about)

     1  /**
     2   * Copyright (c) HashiCorp, Inc.
     3   * SPDX-License-Identifier: MPL-2.0
     4   */
     5  
     6  // @ts-check
     7  import { module, test } from 'qunit';
     8  import { setupApplicationTest } from 'ember-qunit';
     9  
    10  import {
    11    click,
    12    visit,
    13    find,
    14    findAll,
    15    fillIn,
    16    settled,
    17    triggerEvent,
    18  } from '@ember/test-helpers';
    19  
    20  import { setupMirage } from 'ember-cli-mirage/test-support';
    21  import faker from 'nomad-ui/mirage/faker';
    22  import percySnapshot from '@percy/ember';
    23  import a11yAudit from 'nomad-ui/tests/helpers/a11y-audit';
    24  // TODO: Mirage is not type-friendly / assigns "server" as a global. Try to work around this shortcoming.
    25  
    26  module('Acceptance | job status panel', function (hooks) {
    27    setupApplicationTest(hooks);
    28    setupMirage(hooks);
    29  
    30    hooks.beforeEach(async function () {
    31      server.create('node-pool');
    32      server.create('node');
    33    });
    34  
    35    test('Status panel lets you switch between Current and Historical', async function (assert) {
    36      assert.expect(5);
    37      let job = server.create('job', {
    38        status: 'running',
    39        datacenters: ['*'],
    40        type: 'service',
    41        createAllocations: true,
    42      });
    43  
    44      await visit(`/jobs/${job.id}`);
    45      assert.dom('.job-status-panel').exists();
    46      await a11yAudit(assert);
    47      await percySnapshot(assert, {
    48        percyCSS: `
    49          .allocation-row td { display: none; }
    50        `,
    51      });
    52  
    53      assert
    54        .dom('[data-test-status-mode="current"]')
    55        .exists('Current mode by default');
    56  
    57      await click('[data-test-status-mode-current]');
    58  
    59      assert
    60        .dom('[data-test-status-mode="current"]')
    61        .exists('Clicking active mode makes no change');
    62  
    63      await click('[data-test-status-mode-historical]');
    64  
    65      assert
    66        .dom('[data-test-status-mode="historical"]')
    67        .exists('Lets you switch to historical mode');
    68    });
    69  
    70    test('Status panel observes query parameters for current/historical', async function (assert) {
    71      assert.expect(2);
    72      let job = server.create('job', {
    73        status: 'running',
    74        datacenters: ['*'],
    75        type: 'service',
    76        createAllocations: true,
    77        noActiveDeployment: true,
    78      });
    79  
    80      await visit(`/jobs/${job.id}?statusMode=historical`);
    81      assert.dom('.job-status-panel').exists();
    82  
    83      assert
    84        .dom('[data-test-status-mode="historical"]')
    85        .exists('Historical mode when rendered with queryParams');
    86    });
    87  
    88    test('Status Panel shows accurate number and types of ungrouped allocation blocks', async function (assert) {
    89      assert.expect(7);
    90  
    91      faker.seed(1);
    92  
    93      let groupTaskCount = 10;
    94  
    95      let job = server.create('job', {
    96        status: 'running',
    97        datacenters: ['*'],
    98        type: 'service',
    99        resourceSpec: ['M: 256, C: 500'], // a single group
   100        createAllocations: true,
   101        allocStatusDistribution: {
   102          running: 1,
   103          failed: 0,
   104          unknown: 0,
   105          lost: 0,
   106        },
   107        groupTaskCount,
   108        shallow: true,
   109      });
   110  
   111      await visit(`/jobs/${job.id}`);
   112      assert.dom('.job-status-panel').exists();
   113  
   114      let jobAllocCount = server.db.allocations.where({
   115        jobId: job.id,
   116      }).length;
   117  
   118      assert.equal(
   119        jobAllocCount,
   120        groupTaskCount * job.taskGroups.length,
   121        'Correect number of allocs generated (metatest)'
   122      );
   123      assert
   124        .dom('.ungrouped-allocs .represented-allocation.running')
   125        .exists(
   126          { count: jobAllocCount },
   127          `All ${jobAllocCount} allocations are represented in the status panel`
   128        );
   129  
   130      groupTaskCount = 20;
   131  
   132      job = server.create('job', {
   133        status: 'running',
   134        datacenters: ['*'],
   135        type: 'service',
   136        resourceSpec: ['M: 256, C: 500'], // a single group
   137        createAllocations: true,
   138        allocStatusDistribution: {
   139          running: 0.5,
   140          failed: 0.5,
   141          unknown: 0,
   142          lost: 0,
   143        },
   144        groupTaskCount,
   145        noActiveDeployment: true,
   146        shallow: true,
   147      });
   148  
   149      await visit(`/jobs/${job.id}`);
   150      assert.dom('.job-status-panel').exists();
   151  
   152      let runningAllocCount = server.db.allocations.where({
   153        jobId: job.id,
   154        clientStatus: 'running',
   155      }).length;
   156  
   157      let failedAllocCount = server.db.allocations.where({
   158        jobId: job.id,
   159        clientStatus: 'failed',
   160      }).length;
   161  
   162      assert.equal(
   163        runningAllocCount + failedAllocCount,
   164        groupTaskCount * job.taskGroups.length,
   165        'Correect number of allocs generated (metatest)'
   166      );
   167      assert
   168        .dom('.ungrouped-allocs .represented-allocation.running')
   169        .exists(
   170          { count: runningAllocCount },
   171          `All ${runningAllocCount} running allocations are represented in the status panel`
   172        );
   173      assert
   174        .dom('.ungrouped-allocs .represented-allocation.failed')
   175        .exists(
   176          { count: failedAllocCount },
   177          `All ${failedAllocCount} failed allocations are represented in the status panel`
   178        );
   179      await percySnapshot(assert, {
   180        percyCSS: `
   181            .allocation-row td { display: none; }
   182          `,
   183      });
   184    });
   185  
   186    test('After running/pending allocations are covered, fill in allocs by jobVersion, descending', async function (assert) {
   187      assert.expect(9);
   188      let job = server.create('job', {
   189        status: 'running',
   190        datacenters: ['*'],
   191        type: 'service',
   192        resourceSpec: ['M: 256, C: 500'], // a single group
   193        createAllocations: false,
   194        groupTaskCount: 4,
   195        shallow: true,
   196        version: 5,
   197      });
   198  
   199      server.create('allocation', {
   200        jobId: job.id,
   201        clientStatus: 'running',
   202        jobVersion: 5,
   203      });
   204      server.create('allocation', {
   205        jobId: job.id,
   206        clientStatus: 'pending',
   207        jobVersion: 5,
   208      });
   209      server.create('allocation', {
   210        jobId: job.id,
   211        clientStatus: 'running',
   212        jobVersion: 3,
   213      });
   214      server.create('allocation', {
   215        jobId: job.id,
   216        clientStatus: 'failed',
   217        jobVersion: 4,
   218      });
   219      server.create('allocation', {
   220        jobId: job.id,
   221        clientStatus: 'lost',
   222        jobVersion: 5,
   223      });
   224  
   225      await visit(`/jobs/${job.id}`);
   226      assert.dom('.job-status-panel').exists();
   227  
   228      // We expect to see 4 represented-allocations, since that's the number in our groupTaskCount
   229      assert
   230        .dom('.ungrouped-allocs .represented-allocation')
   231        .exists({ count: 4 });
   232  
   233      // We expect 2 of them to be running, and one to be pending, since running/pending allocations superecede other clientStatuses
   234      assert
   235        .dom('.ungrouped-allocs .represented-allocation.running')
   236        .exists({ count: 2 });
   237      assert
   238        .dom('.ungrouped-allocs .represented-allocation.pending')
   239        .exists({ count: 1 });
   240  
   241      // We expect the lone other allocation to be lost, since it has the highest jobVersion
   242      assert
   243        .dom('.ungrouped-allocs .represented-allocation.lost')
   244        .exists({ count: 1 });
   245  
   246      // We expect the job versions legend to show 3 at v5 (running, pending, and lost), and 1 at v3 (old running), and none at v4 (failed is not represented)
   247      assert.dom('.job-status-panel .versions > ul > li').exists({ count: 2 });
   248      assert
   249        .dom('.job-status-panel .versions > ul > li > a[data-version="5"]')
   250        .exists({ count: 1 });
   251      assert
   252        .dom('.job-status-panel .versions > ul > li > a[data-version="3"]')
   253        .exists({ count: 1 });
   254      assert
   255        .dom('.job-status-panel .versions > ul > li > a[data-version="4"]')
   256        .doesNotExist();
   257      await percySnapshot(assert, {
   258        percyCSS: `
   259          .allocation-row td { display: none; }
   260        `,
   261      });
   262    });
   263  
   264    test('After running/pending allocations are covered, fill in allocs by jobVersion, descending (batch)', async function (assert) {
   265      assert.expect(7);
   266      let job = server.create('job', {
   267        status: 'running',
   268        datacenters: ['*'],
   269        type: 'batch',
   270        resourceSpec: ['M: 256, C: 500'], // a single group
   271        createAllocations: false,
   272        allocStatusDistribution: {
   273          running: 0.5,
   274          failed: 0.3,
   275          unknown: 0,
   276          lost: 0,
   277          complete: 0.2,
   278        },
   279        groupTaskCount: 5,
   280        shallow: true,
   281        version: 5,
   282        noActiveDeployment: true,
   283      });
   284  
   285      server.create('allocation', {
   286        jobId: job.id,
   287        clientStatus: 'running',
   288        jobVersion: 5,
   289      });
   290      server.create('allocation', {
   291        jobId: job.id,
   292        clientStatus: 'pending',
   293        jobVersion: 5,
   294      });
   295      server.create('allocation', {
   296        jobId: job.id,
   297        clientStatus: 'running',
   298        jobVersion: 3,
   299      });
   300      server.create('allocation', {
   301        jobId: job.id,
   302        clientStatus: 'failed',
   303        jobVersion: 4,
   304      });
   305      server.create('allocation', {
   306        jobId: job.id,
   307        clientStatus: 'complete',
   308        jobVersion: 4,
   309      });
   310      server.create('allocation', {
   311        jobId: job.id,
   312        clientStatus: 'lost',
   313        jobVersion: 5,
   314      });
   315  
   316      await visit(`/jobs/${job.id}`);
   317      assert.dom('.job-status-panel').exists();
   318      // We expect to see 5 represented-allocations, since that's the number in our groupTaskCount
   319      assert
   320        .dom('.ungrouped-allocs .represented-allocation')
   321        .exists({ count: 5 });
   322  
   323      // We expect 2 of them to be running, and one to be pending, since running/pending allocations superecede other clientStatuses
   324      assert
   325        .dom('.ungrouped-allocs .represented-allocation.running')
   326        .exists({ count: 2 });
   327      assert
   328        .dom('.ungrouped-allocs .represented-allocation.pending')
   329        .exists({ count: 1 });
   330  
   331      // We expect 1 to be lost, since it has the highest jobVersion
   332      assert
   333        .dom('.ungrouped-allocs .represented-allocation.lost')
   334        .exists({ count: 1 });
   335  
   336      // We expect the remaining one to be complete, rather than failed, since it comes earlier in the jobAllocStatuses.batch constant
   337      assert
   338        .dom('.ungrouped-allocs .represented-allocation.complete')
   339        .exists({ count: 1 });
   340      assert
   341        .dom('.ungrouped-allocs .represented-allocation.failed')
   342        .doesNotExist();
   343  
   344      await percySnapshot(assert, {
   345        percyCSS: `
   346          .allocation-row td { display: none; }
   347        `,
   348      });
   349    });
   350  
   351    test('Status Panel groups allocations when they get past a threshold', async function (assert) {
   352      assert.expect(6);
   353  
   354      faker.seed(1);
   355  
   356      let groupTaskCount = 20;
   357  
   358      let job = server.create('job', {
   359        status: 'running',
   360        datacenters: ['*'],
   361        type: 'service',
   362        resourceSpec: ['M: 256, C: 500'], // a single group
   363        createAllocations: true,
   364        allocStatusDistribution: {
   365          running: 1,
   366          failed: 0,
   367          unknown: 0,
   368          lost: 0,
   369        },
   370        groupTaskCount,
   371        shallow: true,
   372      });
   373  
   374      await visit(`/jobs/${job.id}`);
   375      assert.dom('.job-status-panel').exists();
   376  
   377      let jobAllocCount = server.db.allocations.where({
   378        jobId: job.id,
   379      }).length;
   380  
   381      assert
   382        .dom('.ungrouped-allocs .represented-allocation.running')
   383        .exists(
   384          { count: jobAllocCount },
   385          `All ${jobAllocCount} allocations are represented in the status panel, ungrouped`
   386        );
   387  
   388      groupTaskCount = 40;
   389  
   390      job = server.create('job', {
   391        status: 'running',
   392        datacenters: ['*'],
   393        type: 'service',
   394        resourceSpec: ['M: 256, C: 500'], // a single group
   395        createAllocations: true,
   396        allocStatusDistribution: {
   397          running: 1,
   398          failed: 0,
   399          unknown: 0,
   400          lost: 0,
   401        },
   402        groupTaskCount,
   403        shallow: true,
   404      });
   405  
   406      await visit(`/jobs/${job.id}`);
   407      assert.dom('.job-status-panel').exists();
   408  
   409      jobAllocCount = server.db.allocations.where({
   410        jobId: job.id,
   411      }).length;
   412  
   413      // At standard test resolution, 40 allocations will attempt to display 20 ungrouped, and 20 grouped.
   414      let desiredUngroupedAllocCount = 20;
   415      assert
   416        .dom('.ungrouped-allocs .represented-allocation.running')
   417        .exists(
   418          { count: desiredUngroupedAllocCount },
   419          `${desiredUngroupedAllocCount} allocations are represented ungrouped`
   420        );
   421  
   422      assert
   423        .dom('.represented-allocation.rest')
   424        .exists('Allocations are numerous enough that a summary block exists');
   425      assert
   426        .dom('.represented-allocation.rest')
   427        .hasText(
   428          `+${groupTaskCount - desiredUngroupedAllocCount}`,
   429          'Summary block has the correct number of grouped allocs'
   430        );
   431  
   432      await percySnapshot(assert, {
   433        percyCSS: `
   434          .allocation-row td { display: none; }
   435        `,
   436      });
   437    });
   438  
   439    test('Status Panel groups allocations when they get past a threshold, multiple statuses', async function (assert) {
   440      let groupTaskCount = 50;
   441  
   442      let job = server.create('job', {
   443        status: 'running',
   444        datacenters: ['*'],
   445        type: 'service',
   446        resourceSpec: ['M: 256, C: 500'], // a single group
   447        createAllocations: true,
   448        allocStatusDistribution: {
   449          running: 0.5,
   450          failed: 0.3,
   451          pending: 0.1,
   452          unknown: 0.1,
   453        },
   454        groupTaskCount,
   455        shallow: true,
   456      });
   457  
   458      await visit(`/jobs/${job.id}`);
   459      assert.dom('.job-status-panel').exists();
   460  
   461      // With 50 allocs split across 4 statuses distributed as above, we can expect 25 running, 16 failed, 6 pending, and 4 remaining.
   462      // At standard test resolution, each status will be ungrouped/grouped as follows:
   463      // 25 running: 9 ungrouped, 17 grouped
   464      // 15 failed: 5 ungrouped, 10 grouped
   465      // 5 pending: 0 ungrouped, 5 grouped
   466      // 5 unknown: 0 ungrouped, 5 grouped. Represented as "Unplaced"
   467  
   468      assert
   469        .dom('.ungrouped-allocs .represented-allocation.running')
   470        .exists({ count: 9 }, '9 running allocations are represented ungrouped');
   471      assert
   472        .dom('.represented-allocation.rest.running')
   473        .exists(
   474          'Running allocations are numerous enough that a summary block exists'
   475        );
   476      assert
   477        .dom('.represented-allocation.rest.running')
   478        .hasText(
   479          '+16',
   480          'Summary block has the correct number of grouped running allocs'
   481        );
   482  
   483      assert
   484        .dom('.ungrouped-allocs .represented-allocation.failed')
   485        .exists({ count: 5 }, '5 failed allocations are represented ungrouped');
   486      assert
   487        .dom('.represented-allocation.rest.failed')
   488        .exists(
   489          'Failed allocations are numerous enough that a summary block exists'
   490        );
   491      assert
   492        .dom('.represented-allocation.rest.failed')
   493        .hasText(
   494          '+10',
   495          'Summary block has the correct number of grouped failed allocs'
   496        );
   497  
   498      assert
   499        .dom('.ungrouped-allocs .represented-allocation.pending')
   500        .exists({ count: 0 }, '0 pending allocations are represented ungrouped');
   501      assert
   502        .dom('.represented-allocation.rest.pending')
   503        .exists(
   504          'pending allocations are numerous enough that a summary block exists'
   505        );
   506      assert
   507        .dom('.represented-allocation.rest.pending')
   508        .hasText(
   509          '5',
   510          'Summary block has the correct number of grouped pending allocs'
   511        );
   512  
   513      assert
   514        .dom('.ungrouped-allocs .represented-allocation.unplaced')
   515        .exists({ count: 0 }, '0 unplaced allocations are represented ungrouped');
   516      assert
   517        .dom('.represented-allocation.rest.unplaced')
   518        .exists(
   519          'Unplaced allocations are numerous enough that a summary block exists'
   520        );
   521      assert
   522        .dom('.represented-allocation.rest.unplaced')
   523        .hasText(
   524          '5',
   525          'Summary block has the correct number of grouped unplaced allocs'
   526        );
   527      await percySnapshot(
   528        'Status Panel groups allocations when they get past a threshold, multiple statuses (full width)',
   529        {
   530          percyCSS: `
   531            .allocation-row td { display: none; }
   532          `,
   533        }
   534      );
   535  
   536      // Simulate a window resize event; will recompute how many of each ought to be grouped.
   537  
   538      // At 1100px, only running and failed allocations have some ungrouped allocs
   539      find('.page-body').style.width = '1100px';
   540      await triggerEvent(window, 'resize');
   541  
   542      await percySnapshot(
   543        'Status Panel groups allocations when they get past a threshold, multiple statuses (1100px)',
   544        {
   545          percyCSS: `
   546            .allocation-row td { display: none; }
   547          `,
   548        }
   549      );
   550  
   551      assert
   552        .dom('.ungrouped-allocs .represented-allocation.running')
   553        .exists({ count: 7 }, '7 running allocations are represented ungrouped');
   554      assert
   555        .dom('.represented-allocation.rest.running')
   556        .exists(
   557          'Running allocations are numerous enough that a summary block exists'
   558        );
   559      assert
   560        .dom('.represented-allocation.rest.running')
   561        .hasText(
   562          '+18',
   563          'Summary block has the correct number of grouped running allocs'
   564        );
   565  
   566      assert
   567        .dom('.ungrouped-allocs .represented-allocation.failed')
   568        .exists({ count: 4 }, '4 failed allocations are represented ungrouped');
   569      assert
   570        .dom('.represented-allocation.rest.failed')
   571        .exists(
   572          'Failed allocations are numerous enough that a summary block exists'
   573        );
   574      assert
   575        .dom('.represented-allocation.rest.failed')
   576        .hasText(
   577          '+11',
   578          'Summary block has the correct number of grouped failed allocs'
   579        );
   580  
   581      // At 500px, only running allocations have some ungrouped allocs. The rest are all fully grouped.
   582      find('.page-body').style.width = '800px';
   583      await triggerEvent(window, 'resize');
   584  
   585      await percySnapshot(
   586        'Status Panel groups allocations when they get past a threshold, multiple statuses (500px)',
   587        {
   588          percyCSS: `
   589            .allocation-row td { display: none; }
   590          `,
   591        }
   592      );
   593  
   594      assert
   595        .dom('.ungrouped-allocs .represented-allocation.running')
   596        .exists({ count: 4 }, '4 running allocations are represented ungrouped');
   597      assert
   598        .dom('.represented-allocation.rest.running')
   599        .exists(
   600          'Running allocations are numerous enough that a summary block exists'
   601        );
   602      assert
   603        .dom('.represented-allocation.rest.running')
   604        .hasText(
   605          '+21',
   606          'Summary block has the correct number of grouped running allocs'
   607        );
   608  
   609      assert
   610        .dom('.ungrouped-allocs .represented-allocation.failed')
   611        .doesNotExist('no failed allocations are represented ungrouped');
   612      assert
   613        .dom('.represented-allocation.rest.failed')
   614        .exists(
   615          'Failed allocations are numerous enough that a summary block exists'
   616        );
   617      assert
   618        .dom('.represented-allocation.rest.failed')
   619        .hasText(
   620          '15',
   621          'Summary block has the correct number of grouped failed allocs'
   622        );
   623    });
   624  
   625    test('Restarted/Rescheduled/Failed numbers reflected correctly', async function (assert) {
   626      this.store = this.owner.lookup('service:store');
   627  
   628      let groupTaskCount = 10;
   629  
   630      let job = server.create('job', {
   631        status: 'running',
   632        datacenters: ['*'],
   633        type: 'service',
   634        resourceSpec: ['M: 256, C: 500'], // a single group
   635        createAllocations: true,
   636        allocStatusDistribution: {
   637          running: 0.5,
   638          failed: 0.5,
   639          unknown: 0,
   640          lost: 0,
   641        },
   642        groupTaskCount,
   643        activeDeployment: true,
   644        shallow: true,
   645        version: 0,
   646      });
   647  
   648      let state = server.create('task-state');
   649      state.events = server.schema.taskEvents.where({ taskStateId: state.id });
   650      server.schema.allocations.where({ jobId: job.id }).update({
   651        taskStateIds: [state.id],
   652        jobVersion: 0,
   653      });
   654  
   655      await visit(`/jobs/${job.id}`);
   656      assert.dom('.job-status-panel').exists();
   657      assert
   658        .dom('.failed-or-lost-links > span')
   659        .exists({ count: 2 }, 'Restarted and Rescheduled cells are both present');
   660      // await this.pauseTest();
   661      let rescheduledCell = [...findAll('.failed-or-lost-links > span')][0];
   662      let restartedCell = [...findAll('.failed-or-lost-links > span')][1];
   663  
   664      // Check that the title in each cell has the right text
   665      assert.dom(rescheduledCell).hasText('0 Rescheduled');
   666      assert.dom(restartedCell).hasText('0 Restarted');
   667  
   668      // Check that both values are zero and non-links
   669      assert
   670        .dom(rescheduledCell.querySelector('a'))
   671        .doesNotExist('Rescheduled cell is not a link');
   672      assert
   673        .dom(rescheduledCell)
   674        .hasText('0 Rescheduled', 'Rescheduled cell has zero value');
   675      assert
   676        .dom(restartedCell.querySelector('a'))
   677        .doesNotExist('Restarted cell is not a link');
   678      assert
   679        .dom(restartedCell)
   680        .hasText('0 Restarted', 'Restarted cell has zero value');
   681  
   682      // A wild event appears! Change a recent task event to type "Restarting" in a task state:
   683      this.store
   684        .peekAll('job')
   685        .objectAt(0)
   686        .get('allocations')
   687        .objectAt(0)
   688        .get('states')
   689        .objectAt(0)
   690        .get('events')
   691        .objectAt(0)
   692        .set('type', 'Restarting');
   693  
   694      await settled();
   695  
   696      assert
   697        .dom(restartedCell)
   698        .hasText(
   699          '1 Restarted',
   700          'Restarted cell updates when a task event with type "Restarting" is added'
   701        );
   702  
   703      this.store
   704        .peekAll('job')
   705        .objectAt(0)
   706        .get('allocations')
   707        .objectAt(1)
   708        .get('states')
   709        .objectAt(0)
   710        .get('events')
   711        .objectAt(0)
   712        .set('type', 'Restarting');
   713  
   714      await settled();
   715  
   716      // Trigger a reschedule! Set up a desiredTransition object with a Reschedule property on one of the allocations.
   717      assert
   718        .dom(restartedCell)
   719        .hasText(
   720          '2 Restarted',
   721          'Restarted cell updates when a second task event with type "Restarting" is added'
   722        );
   723  
   724      this.store
   725        .peekAll('job')
   726        .objectAt(0)
   727        .get('allocations')
   728        .objectAt(0)
   729        .get('followUpEvaluation')
   730        .set('content', { 'test-key': 'not-empty' });
   731  
   732      await settled();
   733  
   734      assert
   735        .dom(rescheduledCell)
   736        .hasText(
   737          '1 Rescheduled',
   738          'Rescheduled cell updates when desiredTransition is set'
   739        );
   740      assert
   741        .dom(rescheduledCell.querySelector('a'))
   742        .exists('Rescheduled cell with a non-zero number is now a link');
   743    });
   744  
   745    module('deployment history', function () {
   746      test('Deployment history can be searched', async function (assert) {
   747        faker.seed(1);
   748  
   749        let groupTaskCount = 10;
   750  
   751        let job = server.create('job', {
   752          status: 'running',
   753          datacenters: ['*'],
   754          type: 'service',
   755          resourceSpec: ['M: 256, C: 500'], // a single group
   756          createAllocations: true,
   757          allocStatusDistribution: {
   758            running: 1,
   759            failed: 0,
   760            unknown: 0,
   761            lost: 0,
   762          },
   763          groupTaskCount,
   764          shallow: true,
   765          activeDeployment: true,
   766          version: 0,
   767        });
   768  
   769        let state = server.create('task-state');
   770        state.events = server.schema.taskEvents.where({ taskStateId: state.id });
   771  
   772        server.schema.allocations.where({ jobId: job.id }).update({
   773          taskStateIds: [state.id],
   774          jobVersion: 0,
   775        });
   776  
   777        await visit(`/jobs/${job.id}`);
   778        assert.dom('.job-status-panel').exists();
   779  
   780        const serverEvents = server.schema.taskEvents.where({
   781          taskStateId: state.id,
   782        });
   783        const shownEvents = findAll('.timeline-object');
   784        const jobAllocations = server.db.allocations.where({ jobId: job.id });
   785        assert.equal(
   786          shownEvents.length,
   787          serverEvents.length * jobAllocations.length,
   788          'All events are shown'
   789        );
   790  
   791        await fillIn(
   792          '[data-test-history-search] input',
   793          serverEvents.models[0].message
   794        );
   795        assert.equal(
   796          findAll('.timeline-object').length,
   797          jobAllocations.length,
   798          'Only events matching the search are shown'
   799        );
   800  
   801        await fillIn('[data-test-history-search] input', 'foo bar baz');
   802        assert
   803          .dom('[data-test-history-search-no-match]')
   804          .exists('No match message is shown');
   805      });
   806    });
   807  
   808    module('Batch jobs', function () {
   809      test('Batch jobs have a valid Completed status', async function (assert) {
   810        this.store = this.owner.lookup('service:store');
   811  
   812        let batchJob = server.create('job', {
   813          status: 'running',
   814          datacenters: ['*'],
   815          type: 'batch',
   816          createAllocations: true,
   817          allocStatusDistribution: {
   818            running: 0.5,
   819            failed: 0.3,
   820            unknown: 0,
   821            lost: 0,
   822            complete: 0.2,
   823          },
   824          groupsCount: 1,
   825          groupTaskCount: 10,
   826          noActiveDeployment: true,
   827          shallow: true,
   828          version: 1,
   829        });
   830  
   831        let serviceJob = server.create('job', {
   832          status: 'running',
   833          datacenters: ['*'],
   834          type: 'service',
   835          createAllocations: true,
   836          allocStatusDistribution: {
   837            running: 0.5,
   838            failed: 0.3,
   839            unknown: 0,
   840            lost: 0,
   841            complete: 0.2,
   842          },
   843          groupsCount: 1,
   844          groupTaskCount: 10,
   845          noActiveDeployment: true,
   846          shallow: true,
   847          version: 1,
   848        });
   849  
   850        // Batch job should have 5 running, 3 failed, 2 completed
   851        await visit(`/jobs/${batchJob.id}`);
   852        assert.dom('.job-status-panel').exists();
   853        assert
   854          .dom('.running-allocs-title')
   855          .hasText(
   856            '5/8 Remaining Allocations Running',
   857            'Completed allocations do not count toward the Remaining denominator'
   858          );
   859        assert
   860          .dom('.ungrouped-allocs .represented-allocation.complete')
   861          .exists(
   862            { count: 2 },
   863            `2 complete allocations are represented in the status panel`
   864          );
   865  
   866        // Service job should have 5 running, 3 failed, 2 unplaced
   867  
   868        await visit(`/jobs/${serviceJob.id}`);
   869        assert.dom('.job-status-panel').exists();
   870        assert.dom('.running-allocs-title').hasText('5/10 Allocations Running');
   871        assert
   872          .dom('.ungrouped-allocs .represented-allocation.complete')
   873          .doesNotExist(
   874            'For a service job, no copmlete allocations are represented in the status panel'
   875          );
   876        assert
   877          .dom('.ungrouped-allocs .represented-allocation.unplaced')
   878          .exists(
   879            { count: 2 },
   880            `2 unplaced allocations are represented in the status panel`
   881          );
   882      });
   883    });
   884  
   885    module('System jobs', function () {
   886      test('System jobs show restarted but not rescheduled allocs', async function (assert) {
   887        this.store = this.owner.lookup('service:store');
   888  
   889        let job = server.create('job', {
   890          status: 'running',
   891          datacenters: ['*'],
   892          type: 'system',
   893          createAllocations: true,
   894          allocStatusDistribution: {
   895            running: 0.5,
   896            failed: 0.5,
   897            unknown: 0,
   898            lost: 0,
   899          },
   900          noActiveDeployment: true,
   901          shallow: true,
   902          version: 0,
   903        });
   904  
   905        let state = server.create('task-state');
   906        state.events = server.schema.taskEvents.where({ taskStateId: state.id });
   907        server.schema.allocations.where({ jobId: job.id }).update({
   908          taskStateIds: [state.id],
   909          jobVersion: 0,
   910        });
   911  
   912        await visit(`/jobs/${job.id}`);
   913        assert.dom('.job-status-panel').exists();
   914        assert.dom('.failed-or-lost').exists({ count: 1 });
   915        assert.dom('.failed-or-lost h4').hasText('Replaced Allocations');
   916        assert
   917          .dom('.failed-or-lost-links > span')
   918          .hasText('0 Restarted', 'Restarted cell at zero by default');
   919  
   920        // A wild event appears! Change a recent task event to type "Restarting" in a task state:
   921        this.store
   922          .peekAll('job')
   923          .objectAt(0)
   924          .get('allocations')
   925          .objectAt(0)
   926          .get('states')
   927          .objectAt(0)
   928          .get('events')
   929          .objectAt(0)
   930          .set('type', 'Restarting');
   931  
   932        await settled();
   933  
   934        assert
   935          .dom('.failed-or-lost-links > span')
   936          .hasText(
   937            '1 Restarted',
   938            'Restarted cell updates when a task event with type "Restarting" is added'
   939          );
   940      });
   941  
   942      test('System jobs do not have a sense of Desired/Total allocs', async function (assert) {
   943        this.store = this.owner.lookup('service:store');
   944  
   945        server.db.nodes.remove();
   946  
   947        server.createList('node', 3, {
   948          status: 'ready',
   949          drain: false,
   950          schedulingEligibility: 'eligible',
   951        });
   952  
   953        let job = server.create('job', {
   954          status: 'running',
   955          datacenters: ['*'],
   956          type: 'system',
   957          createAllocations: false,
   958          noActiveDeployment: true,
   959          shallow: true,
   960          version: 0,
   961        });
   962  
   963        // Create an allocation on this job for each node
   964        server.schema.nodes.all().models.forEach((node) => {
   965          server.create('allocation', {
   966            jobId: job.id,
   967            jobVersion: 0,
   968            clientStatus: 'running',
   969            nodeId: node.id,
   970          });
   971        });
   972  
   973        await visit(`/jobs/${job.id}`);
   974        let storedJob = await this.store.find(
   975          'job',
   976          JSON.stringify([job.id, 'default'])
   977        );
   978        // Weird Mirage thing: job summary factory is disconnected from its job and therefore allocations.
   979        // So we manually create the number here.
   980        let summary = await storedJob.get('summary');
   981        summary
   982          .get('taskGroupSummaries')
   983          .objectAt(0)
   984          .set(
   985            'runningAllocs',
   986            server.schema.allocations.where({
   987              jobId: job.id,
   988              clientStatus: 'running',
   989            }).length
   990          );
   991  
   992        await settled();
   993  
   994        assert.dom('.job-status-panel').exists();
   995        assert.dom('.running-allocs-title').hasText(
   996          `${
   997            server.schema.allocations.where({
   998              jobId: job.id,
   999              clientStatus: 'running',
  1000            }).length
  1001          } Allocations Running`
  1002        );
  1003  
  1004        // Let's bring another node online!
  1005        let newNode = server.create('node', {
  1006          status: 'ready',
  1007          drain: false,
  1008          schedulingEligibility: 'eligible',
  1009        });
  1010  
  1011        // Let's expect our scheduler to have therefore added an alloc to it
  1012        server.create('allocation', {
  1013          jobId: job.id,
  1014          jobVersion: 0,
  1015          clientStatus: 'running',
  1016          nodeId: newNode.id,
  1017        });
  1018  
  1019        summary
  1020          .get('taskGroupSummaries')
  1021          .objectAt(0)
  1022          .set(
  1023            'runningAllocs',
  1024            server.schema.allocations.where({
  1025              jobId: job.id,
  1026              clientStatus: 'running',
  1027            }).length
  1028          );
  1029  
  1030        await settled();
  1031  
  1032        assert.dom('.running-allocs-title').hasText('4 Allocations Running');
  1033      });
  1034    });
  1035  });