sigs.k8s.io/gateway-api@v1.0.0/hack/boilerplate/boilerplate.py (about)

     1  #!/usr/bin/env python3
     2  
     3  # Copyright 2015 The Kubernetes Authors.
     4  #
     5  # Licensed under the Apache License, Version 2.0 (the "License");
     6  # you may not use this file except in compliance with the License.
     7  # You may obtain a copy of the License at
     8  #
     9  #     http://www.apache.org/licenses/LICENSE-2.0
    10  #
    11  # Unless required by applicable law or agreed to in writing, software
    12  # distributed under the License is distributed on an "AS IS" BASIS,
    13  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14  # See the License for the specific language governing permissions and
    15  # limitations under the License.
    16  
    17  # This file is copied from https://github.com/kubernetes/kubernetes/blob/04c2b1fbdc1289c9a72eda87cf7072346e60d241/hack/boilerplate/boilerplate.py
    18  
    19  from __future__ import print_function
    20  
    21  import argparse
    22  import datetime
    23  import difflib
    24  import glob
    25  import os
    26  import re
    27  import sys
    28  
    29  parser = argparse.ArgumentParser()
    30  parser.add_argument(
    31      "filenames",
    32      help="list of files to check, all files if unspecified",
    33      nargs='*')
    34  
    35  rootdir = os.path.dirname(__file__) + "/../../"
    36  rootdir = os.path.abspath(rootdir)
    37  parser.add_argument(
    38      "--rootdir", default=rootdir, help="root directory to examine")
    39  
    40  default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
    41  parser.add_argument(
    42      "--boilerplate-dir", default=default_boilerplate_dir)
    43  
    44  parser.add_argument(
    45      "-v", "--verbose",
    46      help="give verbose output regarding why a file does not pass",
    47      action="store_true")
    48  
    49  args = parser.parse_args()
    50  
    51  verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
    52  
    53  
    54  def get_refs():
    55      refs = {}
    56  
    57      for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
    58          extension = os.path.basename(path).split(".")[1]
    59  
    60          ref_file = open(path, 'r')
    61          ref = ref_file.read().splitlines()
    62          ref_file.close()
    63          refs[extension] = ref
    64  
    65      return refs
    66  
    67  
    68  def is_generated_file(filename, data, regexs):
    69      for d in skipped_ungenerated_files:
    70          if d in filename:
    71              return False
    72  
    73      p = regexs["generated"]
    74      return p.search(data)
    75  
    76  
    77  def file_passes(filename, refs, regexs):
    78      try:
    79          f = open(filename, 'r')
    80      except Exception as exc:
    81          print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
    82          return False
    83  
    84      data = f.read()
    85      f.close()
    86  
    87      # determine if the file is automatically generated
    88      generated = is_generated_file(filename, data, regexs)
    89  
    90      basename = os.path.basename(filename)
    91      extension = file_extension(filename)
    92      if generated:
    93          if extension == "go":
    94              extension = "generatego"
    95          elif extension == "bzl":
    96              extension = "generatebzl"
    97  
    98      if extension != "":
    99          ref = refs[extension]
   100      else:
   101          ref = refs[basename]
   102  
   103      # remove extra content from the top of files
   104      if extension == "go" or extension == "generatego":
   105          p = regexs["go_build_constraints"]
   106          (data, found) = p.subn("", data, 1)
   107      elif extension in ["sh", "py"]:
   108          p = regexs["shebang"]
   109          (data, found) = p.subn("", data, 1)
   110  
   111      data = data.splitlines()
   112  
   113      # if our test file is smaller than the reference it surely fails!
   114      if len(ref) > len(data):
   115          print('File %s smaller than reference (%d < %d)' %
   116                (filename, len(data), len(ref)),
   117                file=verbose_out)
   118          return False
   119  
   120      # trim our file to the same number of lines as the reference file
   121      data = data[:len(ref)]
   122  
   123      p = regexs["year"]
   124      for d in data:
   125          if p.search(d):
   126              if generated:
   127                  print('File %s has the YEAR field, but it should not be in generated file' %
   128                        filename, file=verbose_out)
   129              else:
   130                  print('File %s has the YEAR field, but missing the year of date' %
   131                        filename, file=verbose_out)
   132              return False
   133  
   134      if not generated:
   135          # Replace all occurrences of the regex "2014|2015|2016|2017|2018" with "YEAR"
   136          p = regexs["date"]
   137          for i, d in enumerate(data):
   138              (data[i], found) = p.subn('YEAR', d)
   139              if found != 0:
   140                  break
   141  
   142      # if we don't match the reference at this point, fail
   143      if ref != data:
   144          print("Header in %s does not match reference, diff:" %
   145                filename, file=verbose_out)
   146          if args.verbose:
   147              print(file=verbose_out)
   148              for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
   149                  print(line, file=verbose_out)
   150              print(file=verbose_out)
   151          return False
   152  
   153      return True
   154  
   155  
   156  def file_extension(filename):
   157      return os.path.splitext(filename)[1].split(".")[-1].lower()
   158  
   159  
   160  skipped_dirs = [
   161      'cluster/env.sh',
   162      '.git',
   163      '_gopath',
   164      'hack/boilerplate/test',
   165      '_output',
   166      'staging/src/k8s.io/kubectl/pkg/generated/bindata.go',
   167      'test/e2e/generated/bindata.go',
   168      'third_party',
   169      'vendor',
   170      '.venv',
   171  ]
   172  
   173  # list all the files contain 'DO NOT EDIT', but are not generated
   174  skipped_ungenerated_files = [
   175      'hack/lib/swagger.sh', 'hack/boilerplate/boilerplate.py']
   176  
   177  
   178  def normalize_files(files):
   179      newfiles = []
   180      for pathname in files:
   181          if any(x in pathname for x in skipped_dirs):
   182              continue
   183          newfiles.append(pathname)
   184      for i, pathname in enumerate(newfiles):
   185          if not os.path.isabs(pathname):
   186              newfiles[i] = os.path.join(args.rootdir, pathname)
   187      return newfiles
   188  
   189  
   190  def get_files(extensions):
   191      files = []
   192      if len(args.filenames) > 0:
   193          files = args.filenames
   194      else:
   195          for root, dirs, walkfiles in os.walk(args.rootdir):
   196              # don't visit certain dirs. This is just a performance improvement
   197              # as we would prune these later in normalize_files(). But doing it
   198              # cuts down the amount of filesystem walking we do and cuts down
   199              # the size of the file list
   200              for d in skipped_dirs:
   201                  if d in dirs:
   202                      dirs.remove(d)
   203  
   204              for name in walkfiles:
   205                  pathname = os.path.join(root, name)
   206                  files.append(pathname)
   207  
   208      files = normalize_files(files)
   209      outfiles = []
   210      for pathname in files:
   211          basename = os.path.basename(pathname)
   212          extension = file_extension(pathname)
   213          if extension in extensions or basename in extensions:
   214              outfiles.append(pathname)
   215      return outfiles
   216  
   217  
   218  def get_dates():
   219      years = datetime.datetime.now().year
   220      return '(%s)' % '|'.join((str(year) for year in range(2014, years+1)))
   221  
   222  
   223  def get_regexs():
   224      regexs = {}
   225      # Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
   226      regexs["year"] = re.compile('YEAR')
   227      # get_dates return 2014, 2015, 2016, 2017, or 2018 until the current year as a regex like: "(2014|2015|2016|2017|2018)";
   228      # company holder names can be anything
   229      regexs["date"] = re.compile(get_dates())
   230      # strip the following build constraints/tags:
   231      # //go:build
   232      # // +build \n\n
   233      regexs["go_build_constraints"] = re.compile(
   234          r"^(//(go:build| \+build).*\n)+\n", re.MULTILINE)
   235      # strip #!.* from scripts
   236      regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
   237      # Search for generated files
   238      regexs["generated"] = re.compile('DO NOT EDIT')
   239      return regexs
   240  
   241  
   242  def main():
   243      regexs = get_regexs()
   244      refs = get_refs()
   245      filenames = get_files(refs.keys())
   246  
   247      for filename in filenames:
   248          if not file_passes(filename, refs, regexs):
   249              print(filename, file=sys.stdout)
   250  
   251      print("Verified %d file headers match boilerplate" % (len(filenames),), file=sys.stderr)
   252  
   253      return 0
   254  
   255  
   256  if __name__ == "__main__":
   257      sys.exit(main())