github.com/web-platform-tests/wpt.fyi@v0.0.0-20240530210107-70cf978996f1/scripts/build_test_history.py (about)

     1  # Copyright 2023 The WPT Dashboard Project. All rights reserved.
     2  # Use of this source code is governed by a BSD-style license that can be
     3  # found in the LICENSE file.
     4  
     5  import argparse
     6  import json
     7  import re
     8  import requests
     9  import time
    10  from datetime import datetime, timedelta
    11  from typing import Any, Optional, TypedDict
    12  
    13  from google.cloud import ndb, storage
    14  
    15  
    16  BUCKET_NAME = 'wpt-recent-statuses-staging'
    17  PROJECT_NAME = 'wptdashboard-staging'
    18  RUNS_API_URL = 'https://staging.wpt.fyi/api/runs'
    19  TIMEOUT_SECONDS = 3600
    20  
    21  parser = argparse.ArgumentParser()
    22  parser.add_argument(
    23      '-v', '--verbose', action='store_true', help='increase output verbosity.')
    24  parser.add_argument(
    25      '--delete-history-entities', action='store_true',
    26      help='delete all TestHistoryEntry entities from Datastore.')
    27  parser.add_argument(
    28      '--set-history-start-date',
    29      help=('Set the starting date to process test history. '
    30            'Date must be in ISO format (e.g. "2030-12-31T09:30:00.000Z). '
    31            'Command will fail if TestHistoryEntry entities '
    32            'already exist in Datastore.'))
    33  # Set to true to generate new JSON files for tracking previous test history.
    34  # This should only be used in the first invocation to create the initial
    35  # starting point of test history, and all Datastore entities should be deleted
    36  # in order to be regenerated correctly. Note that this will take a
    37  # significantly longer amount of processing time, and will likely need to be
    38  # invoked locally to avoid any timeout issues that would occur normally.
    39  parser.add_argument(
    40      '--generate-new-statuses-json',
    41      action='store_true',
    42      help=('generate new statuses json and entities '
    43            'after entities have been deleted.'))
    44  
    45  parsed_args = parser.parse_args()
    46  # Function set to only print if verbose arg is active.
    47  verboseprint = (print if parsed_args.verbose
    48                  else lambda *a, **k: None)
    49  
    50  
    51  class TestHistoryEntry(ndb.Model):
    52      BrowserName = ndb.StringProperty(required=True)
    53      RunID = ndb.StringProperty(required=True)
    54      Date = ndb.StringProperty(required=True)
    55      TestName = ndb.StringProperty(required=True)
    56      SubtestName = ndb.StringProperty(required=True)
    57      Status = ndb.StringProperty(required=True)
    58  
    59  
    60  class MostRecentHistoryProcessed(ndb.Model):
    61      Date = ndb.StringProperty(required=True)
    62  
    63  
    64  class TestRun(ndb.Model):
    65      BrowserName = ndb.StringProperty()
    66      BrowserVersion = ndb.StringProperty()
    67      FullRevisionHash = ndb.StringProperty()
    68      Labels = ndb.StringProperty(repeated=True)
    69      OSName = ndb.StringProperty()
    70      OSVersion = ndb.StringProperty()
    71      RawResultsURL = ndb.StringProperty()
    72      ResultsUrl = ndb.StringProperty()
    73      Revision = ndb.StringProperty()
    74      TimeEnd = ndb.StringProperty()
    75      TimeStart = ndb.StringProperty()
    76  
    77  
    78  # Type hint class for the run metadata return value from api/runs endpoint.
    79  class MetadataDict(TypedDict):
    80      id: str
    81      browser_name: str
    82      browser_version: str
    83      os_name: str
    84      os_version: str
    85      revision: str
    86      full_revision_hash: str
    87      results_url: str
    88      created_at: str
    89      time_start: str
    90      time_end: str
    91      raw_results_url: str
    92      labels: list[str]
    93  
    94  
    95  def _build_new_test_history_entry(
    96          test_name: str,
    97          subtest_name: str,
    98          run_metadata: MetadataDict,
    99          run_date: str,
   100          current_status: str) -> TestHistoryEntry:
   101      return TestHistoryEntry(
   102          RunID=str(run_metadata['id']),
   103          BrowserName=run_metadata['browser_name'],
   104          Date=run_date,
   105          TestName=test_name,
   106          SubtestName=subtest_name,
   107          Status=current_status,
   108      )
   109  
   110  
   111  def create_entity_if_needed(
   112          test_name: str,
   113          subtest_name: str,
   114          prev_test_statuses: dict,
   115          run_metadata: MetadataDict,
   116          run_date: str,
   117          current_status: str,
   118          entities_to_write: list[TestHistoryEntry],
   119          unique_entities_to_write: set[tuple[str, str]]) -> None:
   120      """Check if an entity should be created for a test status delta,
   121      and create one if necessary.
   122      """
   123      # Test results are stored in dictionary with a tuple key
   124      # in the form of (testname, subtest_name).
   125      # The overall test status has an empty string as the subtest name.
   126      test_key = (test_name, subtest_name)
   127      if test_key in unique_entities_to_write:
   128          return
   129  
   130      should_create_new_entry = (
   131          test_key not in prev_test_statuses or
   132          prev_test_statuses[test_key] != current_status)
   133  
   134      if should_create_new_entry:
   135          test_status_entry = _build_new_test_history_entry(
   136              test_name,
   137              subtest_name=subtest_name,
   138              run_metadata=run_metadata,
   139              run_date=run_date,
   140              current_status=current_status
   141          )
   142          entities_to_write.append(test_status_entry)
   143          unique_entities_to_write.add(test_key)
   144      prev_test_statuses[test_key] = current_status
   145  
   146  
   147  def process_single_run(run_metadata: MetadataDict) -> None:
   148      """Process a single aligned run and save and deltas to history."""
   149      verboseprint('Obtaining the raw results JSON for the test run '
   150                   f'at {run_metadata["raw_results_url"]}')
   151      try:
   152          run_resp = requests.get(run_metadata['raw_results_url'])
   153          run_data = run_resp.json()
   154      except requests.exceptions.RequestException as e:
   155          raise requests.exceptions.RequestException(
   156              'Failed to fetch raw results', e)
   157  
   158      # Keep a dictionary of the previous test statuses
   159      # from runs we've processed.
   160      prev_test_statuses = _populate_previous_statuses(
   161          run_metadata['browser_name'])
   162  
   163      # Keep track of every single test result that's in the dataset of
   164      # runs we've previously seen. If they're not in the run we're processing,
   165      # we'll mark them as missing.
   166      tests_not_seen: set[tuple[str, str]] = set(prev_test_statuses.keys())
   167  
   168      run_date = run_metadata["time_start"]
   169      entities_to_write: list[TestHistoryEntry] = []
   170      unique_entities_to_write: set[tuple[str, str]] = set()
   171      # Iterate through each test.
   172      for test_data in run_data['results']:
   173          # Format the test name.
   174          test_name = re.sub(r'\s', ' ', test_data['test'])
   175  
   176          # Specifying the subtest name as empty string means that we're dealing
   177          # with the overall test status rather than a subtest status.
   178          create_entity_if_needed(
   179              test_name,
   180              subtest_name='',
   181              prev_test_statuses=prev_test_statuses,
   182              run_metadata=run_metadata,
   183              run_date=run_date,
   184              current_status=test_data['status'],
   185              entities_to_write=entities_to_write,
   186              unique_entities_to_write=unique_entities_to_write
   187          )
   188  
   189          # Now that we've seen this test status, we can remove it from the
   190          # the set of tests we haven't seen yet.
   191          tests_not_seen.discard((test_name, ''))
   192  
   193          # Do the same basic process for each subtest.
   194          for subtest_data in test_data['subtests']:
   195              # Format the subtest name.
   196              subtest_name = re.sub(r'\s', ' ', subtest_data['name'])
   197              # Truncate a subtest name if it's too long to be indexed in
   198              # Datastore. The subtest name stored can be at most 1500 bytes.
   199              # At least 1 subtest violates this size.
   200              if len(subtest_name) > 1000:
   201                  subtest_name = subtest_name[:1000]
   202              subtest_key = (test_name, subtest_name)
   203  
   204              create_entity_if_needed(
   205                  test_name,
   206                  subtest_name=subtest_name,
   207                  prev_test_statuses=prev_test_statuses,
   208                  run_metadata=run_metadata,
   209                  run_date=run_date,
   210                  current_status=subtest_data['status'],
   211                  entities_to_write=entities_to_write,
   212                  unique_entities_to_write=unique_entities_to_write
   213              )
   214  
   215              tests_not_seen.discard(subtest_key)
   216  
   217      # Write MISSING status for tests/subtests not seen.
   218      for test_name, subtest_name in tests_not_seen:
   219          # Only write a row as missing if it's not already marked as missing.
   220          create_entity_if_needed(
   221              test_name,
   222              subtest_name=subtest_name,
   223              prev_test_statuses=prev_test_statuses,
   224              run_metadata=run_metadata,
   225              run_date=run_date,
   226              current_status='MISSING',
   227              entities_to_write=entities_to_write,
   228              unique_entities_to_write=unique_entities_to_write
   229          )
   230  
   231      print(f'Entities to write: {len(entities_to_write)}')
   232      if len(entities_to_write) > 0:
   233          ndb.put_multi(entities_to_write)
   234      update_previous_statuses(
   235          prev_test_statuses, run_metadata['browser_name'])
   236      print(f'Finished {run_metadata["browser_name"]} run!')
   237  
   238  
   239  def get_previous_statuses(browser_name: str) -> Any:
   240      """Fetch the JSON of most recent test statuses for comparison."""
   241      verboseprint(f'Obtaining recent status JSOn for {browser_name}...')
   242      storage_client = storage.Client(project=PROJECT_NAME)
   243      bucket = storage_client.bucket(BUCKET_NAME)
   244      blob = bucket.blob(f'{browser_name}_recent_statuses.json')
   245      return blob.download_as_string()
   246  
   247  
   248  def update_previous_statuses(
   249          prev_test_statuses: dict, browser_name: str) -> None:
   250      """Update the JSON of most recently seen statuses
   251      for use in the next invocation.
   252      """
   253      new_statuses = []
   254      print('Updating recent statuses JSON...')
   255      for test_name, subtest_name in prev_test_statuses.keys():
   256          new_statuses.append({
   257              'test_name': test_name,
   258              'subtest_name': subtest_name,
   259              'status': prev_test_statuses[(test_name, subtest_name)]
   260          })
   261      storage_client = storage.Client()
   262      bucket = storage_client.bucket(BUCKET_NAME)
   263  
   264      # Replace old revision number with new number.
   265      blob = bucket.blob(f'{browser_name}_recent_statuses.json')
   266      blob.upload_from_string(json.dumps(new_statuses))
   267      verboseprint('JSON updated.')
   268  
   269  
   270  def _populate_previous_statuses(browser_name: str) -> dict:
   271      """Create a dict with the most recent test statuses seen for browser."""
   272      verboseprint('Populating the most recently seen statuses...')
   273      if parsed_args.generate_new_statuses_json:
   274          # Returning an empty dictionary of recent statuses will generate the
   275          # initial recent statuses file and all of the first history entries.
   276          verboseprint('Generating new statuses, so returning empty dict.')
   277          return {}
   278      # If the JSON file is not found, then an exception should be raised
   279      # or the file should be generated, depending on the constant's value.
   280      statuses_json_str = get_previous_statuses(browser_name)
   281      if statuses_json_str is None:
   282          # If this is not the first ever run for test statuses, then raise an
   283          # exception if the JSON file was not found.
   284          raise Exception(
   285              f'Error obtaining recent statuses file for {browser_name}')
   286  
   287      test_statuses = json.loads(statuses_json_str)
   288      # Turn the list of recent statuses into a dictionary for quick referencing.
   289      prev_test_statuses = {(t['test_name'], t['subtest_name']): t['status']
   290                            for t in test_statuses}
   291      verboseprint('Most recent previous statuses dictionary populated.')
   292      return prev_test_statuses
   293  
   294  
   295  def should_process_run(run_metadata: MetadataDict) -> bool:
   296      """Check if a run should be processed."""
   297      # A run should be processed if no entities have been written for it.
   298      test_entry = TestHistoryEntry.query(
   299          TestHistoryEntry.RunID == str(run_metadata['id'])).get()
   300      return test_entry is None
   301  
   302  
   303  def process_runs(
   304          runs_list: list[MetadataDict],
   305          process_start_entity: MostRecentHistoryProcessed) -> None:
   306      """Process each aligned run and update the
   307      most recent processed date afterward."""
   308      revisions_processed = {}
   309      # Go through each aligned run.
   310  
   311      start = time.time()
   312      verboseprint('Beginning processing of each aligned runs set...')
   313      for run_metadata in runs_list:
   314          browser_name = run_metadata['browser_name']
   315          revision = run_metadata['full_revision_hash']
   316          verboseprint(f'Revision: {revision}')
   317  
   318          # Keep track of the runs that have been processed.
   319          # The process start date entity is only updated once all aligned runs
   320          # for a given revision are processed.
   321          if revision not in revisions_processed:
   322              revisions_processed[revision] = {
   323                  'chrome': False,
   324                  'edge': False,
   325                  'firefox': False,
   326                  'safari': False,
   327              }
   328  
   329          if should_process_run(run_metadata):
   330              process_single_run(run_metadata)
   331          else:
   332              print('Run has already been processed! '
   333                    'TestHistoryEntry values already exist for this run.')
   334  
   335          revisions_processed[revision][browser_name] = True
   336          # If all runs for this revision have been processed, we can update
   337          # the most recently processed date to the run's start time.
   338          if (revisions_processed[revision]['chrome'] and
   339                  revisions_processed[revision]['edge'] and
   340                  revisions_processed[revision]['firefox'] and
   341                  revisions_processed[revision]['safari']):
   342              print(f'All browsers have been processed for {revision}. '
   343                    'Updating date.')
   344              update_recent_processed_date(
   345                  process_start_entity, run_metadata['time_start'])
   346      print('Set of runs processed after '
   347            f'{round(time.time() - start, 0)} seconds.')
   348  
   349  
   350  # Get the list of metadata for the most recent aligned runs.
   351  def get_aligned_run_info(
   352          date_entity: MostRecentHistoryProcessed) -> Optional[list]:
   353      date_start = date_entity.Date
   354      date_start_obj = datetime.strptime(date_start, '%Y-%m-%dT%H:%M:%S.%fZ')
   355  
   356      # Since aligned runs need to all be completed runs to be fetched,
   357      # a time window buffer of 24 hours is kept to allow runs to finish before
   358      # assuming we've processed all aligned runs up to present time.
   359      # Therefore, we only process runs up to (now - 24 hours).
   360      yesterday = datetime.now() - timedelta(days=1)
   361      end_interval = date_start_obj + timedelta(days=1)
   362      if end_interval > yesterday:
   363          end_interval = yesterday
   364  
   365      end_interval_string = end_interval.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
   366      url = (f'{RUNS_API_URL}?label=master'
   367             '&label=experimental&max-count=1&aligned'
   368             f'&from={date_start}&to={end_interval_string}')
   369  
   370      verboseprint(f'Getting set of aligned runs from: {url}')
   371      try:
   372          resp = requests.get(url)
   373      # Sometimes this request can time out. If it does, just return
   374      # an empty list and attempt the fetch again.
   375      except requests.exceptions.ReadTimeout as e:
   376          verboseprint('Request timed out!', e)
   377          return []
   378      runs_list: list[MetadataDict] = resp.json()
   379  
   380      # If we have no runs to process in this date interval,
   381      # we can skip this interval for processing from now on.
   382      if len(runs_list) == 0:
   383          print('No runs found for this interval.')
   384          update_recent_processed_date(date_entity, end_interval_string)
   385          # If we've processed up to (now - 24 hours), then return null,
   386          # which signals we're done processing.
   387          if end_interval == yesterday:
   388              return None
   389          return runs_list
   390  
   391      # Sort by revision -> then time start, so that the aligned runs are
   392      # processed in groups with each other.
   393      # Note that this technically doesn't have an impact if only 1 set of
   394      # aligned runs are processed, but this sort will allow this script to
   395      # function properly if multiple aligned run sets were to be processed
   396      # together.
   397      runs_list.sort(key=lambda run: (run['revision'], run['time_start']))
   398  
   399      if len(runs_list) != 4:
   400          raise ValueError('Aligned run set should contain 4 runs. '
   401                           f'Got {len(runs_list)}.')
   402      # Print the dates just to get info on the list of runs we're working with.
   403      print('Runs to process:')
   404      for run in runs_list:
   405          print(f'ID: {run["id"]}, {run["browser_name"]} {run["time_start"]}')
   406  
   407      return runs_list
   408  
   409  
   410  def update_recent_processed_date(
   411          date_entity: MostRecentHistoryProcessed, new_date: str) -> None:
   412      """Update the most recently processed date after finishing processing."""
   413      verboseprint(f'Updating most recent processed date to {new_date}...')
   414      date_entity.Date = new_date
   415      date_entity.put()
   416      verboseprint('Date updated.')
   417  
   418  
   419  def set_history_start_date(new_date: str) -> None:
   420      """Update the history processing starting date based on date input."""
   421      # Datastore should be empty before manipulating
   422      # the history processing start date.
   423      check_if_db_empty()
   424      # Make sure the new date is a valid format.
   425      verboseprint(f'Checking if given date {new_date} is valid...')
   426      try:
   427          datetime.strptime(new_date, '%Y-%m-%dT%H:%M:%S.%fZ')
   428      except ValueError as e:
   429          raise e
   430  
   431      # Query for the existing entity if it exists.
   432      date_entity = MostRecentHistoryProcessed.query().get()
   433      # Update the Date value if it exists - otherwise, create a new entity.
   434      if date_entity is not None:
   435          date_entity.Date = new_date
   436      else:
   437          date_entity = MostRecentHistoryProcessed(Date=new_date)
   438      date_entity.put()
   439  
   440  
   441  class NoRecentDateError(Exception):
   442      """Exception raised when the MostRecentHistoryProcessed
   443      entity is not found.
   444      """
   445      pass
   446  
   447  
   448  class DatastorePopulatedError(Exception):
   449      """Exception raised when initial JSON files are being generated,
   450      but the database has not been cleared of existing entries.
   451      """
   452      pass
   453  
   454  
   455  def get_processing_start_date() -> MostRecentHistoryProcessed:
   456      verboseprint('Getting processing start date...')
   457      most_recent_processed: MostRecentHistoryProcessed = (
   458          MostRecentHistoryProcessed.query().get())
   459  
   460      if most_recent_processed is None:
   461          raise NoRecentDateError('Most recently processed run date not found.')
   462      verboseprint('History processing start date is',
   463                   most_recent_processed.Date)
   464      return most_recent_processed
   465  
   466  
   467  def check_if_db_empty() -> None:
   468      """Raise an error if new JSON files are set to be generated and
   469      test history data already exists.
   470      """
   471      verboseprint(
   472          'Checking if Datastore is empty of TestHistoryEntry entities...')
   473      test_history_entry: TestHistoryEntry = TestHistoryEntry.query().get()
   474      if test_history_entry is not None:
   475          raise DatastorePopulatedError(
   476              'TestHistoryEntry entities exist in Datastore. '
   477              'JSON files and processing start date should not change if data '
   478              'already exists.')
   479      else:
   480          verboseprint('Datastore is empty of TestHistoryEntry entities.')
   481  
   482  
   483  def delete_history_entities():
   484      """Delete any existing TestHistoryEntry entities in Datastore."""
   485      # Delete entities in batches of 100,000.
   486      to_delete = TestHistoryEntry.query().fetch(100000, keys_only=True)
   487      print('Deleting existing TestHistoryEntry entities...')
   488      while len(to_delete) > 0:
   489          ndb.delete_multi(to_delete)
   490          verboseprint('.', end='', flush=True)
   491          to_delete = TestHistoryEntry.query().fetch(100000, keys_only=True)
   492      print('Entities Deleted!')
   493  
   494  
   495  # default parameters used for cloud functions.
   496  def main(args=None, topic=None) -> str:
   497      client = ndb.Client(project=PROJECT_NAME)
   498      verboseprint('CLI args: ', parsed_args)
   499      with client.context():
   500          # If the flag to delete entities is specified, handle it and exit.
   501          if parsed_args.delete_history_entities:
   502              delete_history_entities()
   503              verboseprint('Processing will stop after deletion. '
   504                           'Invoke again to repopulate.')
   505              exit()
   506          # If the flag to set the processing date is specified,
   507          # handle it and exit.
   508          if parsed_args.set_history_start_date:
   509              set_history_start_date(parsed_args.set_history_start_date)
   510              exit()
   511  
   512          # If we're generating new JSON files, the database should be empty
   513          # of test history data.
   514          if parsed_args.generate_new_statuses_json:
   515              check_if_db_empty()
   516  
   517          processing_start = time.time()
   518          run_sets_processed = 0
   519          # If we're generating new status JSON files, only 1 set of aligned runs
   520          # should be processed to create the baseline statuses.
   521          while (not parsed_args.generate_new_statuses_json
   522                 or run_sets_processed == 0):
   523              process_start_entity = get_processing_start_date()
   524              runs_list = get_aligned_run_info(process_start_entity)
   525              # A return value of None means that the processing is complete
   526              # and up-to-date. Stop the processing.
   527              if runs_list is None:
   528                  break
   529              # A return value of an empty list means that no aligned runs
   530              # were found at the given interval.
   531              if len(runs_list) == 0:
   532                  continue
   533              process_runs(runs_list, process_start_entity)
   534              run_sets_processed += 1
   535              # Check if we've passed the soft timeout marker
   536              # and stop processing if so.
   537              if round(time.time() - processing_start, 0) > TIMEOUT_SECONDS:
   538                  return ('Timed out after successfully processing '
   539                          f'{run_sets_processed} sets of aligned runs.')
   540      return 'Test history processing complete.'
   541  
   542  
   543  if __name__ == '__main__':
   544      main()