sigs.k8s.io/cluster-api/bootstrap/kubeadm@v0.0.0-20191016155141-23a891785b60/hack/boilerplate/boilerplate.py (about)

     1  #!/usr/bin/env python
     2  
     3  # Copyright 2015 The Kubernetes Authors.
     4  #
     5  # Licensed under the Apache License, Version 2.0 (the "License");
     6  # you may not use this file except in compliance with the License.
     7  # You may obtain a copy of the License at
     8  #
     9  #     http://www.apache.org/licenses/LICENSE-2.0
    10  #
    11  # Unless required by applicable law or agreed to in writing, software
    12  # distributed under the License is distributed on an "AS IS" BASIS,
    13  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14  # See the License for the specific language governing permissions and
    15  # limitations under the License.
    16  
    17  from __future__ import print_function
    18  
    19  import argparse
    20  import datetime
    21  import difflib
    22  import glob
    23  import os
    24  import re
    25  import sys
    26  
    27  parser = argparse.ArgumentParser()
    28  parser.add_argument(
    29      "filenames",
    30      help="list of files to check, all files if unspecified",
    31      nargs='*')
    32  
    33  rootdir = os.path.dirname(__file__) + "/../../"
    34  rootdir = os.path.abspath(rootdir)
    35  parser.add_argument(
    36      "--rootdir", default=rootdir, help="root directory to examine")
    37  
    38  default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
    39  parser.add_argument(
    40      "--boilerplate-dir", default=default_boilerplate_dir)
    41  
    42  parser.add_argument(
    43      "-v", "--verbose",
    44      help="give verbose output regarding why a file does not pass",
    45      action="store_true")
    46  
    47  args = parser.parse_args()
    48  
    49  verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
    50  
    51  def get_refs():
    52      refs = {}
    53  
    54      for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
    55          extension = os.path.basename(path).split(".")[1]
    56  
    57          ref_file = open(path, 'r')
    58          ref = ref_file.read().splitlines()
    59          ref_file.close()
    60          refs[extension] = ref
    61  
    62      return refs
    63  
    64  def is_generated_file(filename, data, regexs):
    65      for d in skipped_ungenerated_files:
    66          if d in filename:
    67              return False
    68  
    69      p = regexs["generated"]
    70      return p.search(data)
    71  
    72  def file_passes(filename, refs, regexs):
    73      try:
    74          f = open(filename, 'r')
    75      except Exception as exc:
    76          print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
    77          return False
    78  
    79      data = f.read()
    80      f.close()
    81  
    82      # determine if the file is automatically generated
    83      generated = is_generated_file(filename, data, regexs)
    84  
    85      basename = os.path.basename(filename)
    86      extension = file_extension(filename)
    87      if generated:
    88          if extension == "go":
    89              extension = "generatego"
    90          elif extension == "bzl":
    91              extension = "generatebzl"
    92  
    93      if extension != "":
    94          ref = refs[extension]
    95      else:
    96          ref = refs[basename]
    97  
    98      # remove extra content from the top of files
    99      if extension == "go" or extension == "generatego":
   100          p = regexs["go_build_constraints"]
   101          (data, found) = p.subn("", data, 1)
   102      elif extension in ["sh", "py"]:
   103          p = regexs["shebang"]
   104          (data, found) = p.subn("", data, 1)
   105  
   106      data = data.splitlines()
   107  
   108      # if our test file is smaller than the reference it surely fails!
   109      if len(ref) > len(data):
   110          print('File %s smaller than reference (%d < %d)' %
   111                (filename, len(data), len(ref)),
   112                file=verbose_out)
   113          return False
   114  
   115      # trim our file to the same number of lines as the reference file
   116      data = data[:len(ref)]
   117  
   118      p = regexs["year"]
   119      for d in data:
   120          if p.search(d):
   121              if generated:
   122                  print('File %s has the YEAR field, but it should not be in generated file' % filename, file=verbose_out)
   123              else:
   124                  print('File %s has the YEAR field, but missing the year of date' % filename, file=verbose_out)
   125              return False
   126  
   127      if not generated:
   128          # Replace all occurrences of the regex "2014|2015|2016|2017|2018" with "YEAR"
   129          p = regexs["date"]
   130          for i, d in enumerate(data):
   131              (data[i], found) = p.subn('YEAR', d)
   132              if found != 0:
   133                  break
   134  
   135      # if we don't match the reference at this point, fail
   136      if ref != data:
   137          print("Header in %s does not match reference, diff:" % filename, file=verbose_out)
   138          if args.verbose:
   139              print(file=verbose_out)
   140              for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
   141                  print(line, file=verbose_out)
   142              print(file=verbose_out)
   143          return False
   144  
   145      return True
   146  
   147  def file_extension(filename):
   148      return os.path.splitext(filename)[1].split(".")[-1].lower()
   149  
   150  skipped_dirs = ["third_party", ".git", "vendor", "hack/boilerplate/test"]
   151  
   152  # list all the files contain 'DO NOT EDIT', but are not generated
   153  skipped_ungenerated_files = ['hack/boilerplate/boilerplate.py']
   154  
   155  def normalize_files(files):
   156      newfiles = []
   157      for pathname in files:
   158          if any(x in pathname for x in skipped_dirs):
   159              continue
   160          newfiles.append(pathname)
   161      for i, pathname in enumerate(newfiles):
   162          if not os.path.isabs(pathname):
   163              newfiles[i] = os.path.join(args.rootdir, pathname)
   164      return newfiles
   165  
   166  def get_files(extensions):
   167      files = []
   168      if len(args.filenames) > 0:
   169          files = args.filenames
   170      else:
   171          for root, dirs, walkfiles in os.walk(args.rootdir):
   172              # don't visit certain dirs. This is just a performance improvement
   173              # as we would prune these later in normalize_files(). But doing it
   174              # cuts down the amount of filesystem walking we do and cuts down
   175              # the size of the file list
   176              for d in skipped_dirs:
   177                  if d in dirs:
   178                      dirs.remove(d)
   179  
   180              for name in walkfiles:
   181                  pathname = os.path.join(root, name)
   182                  files.append(pathname)
   183  
   184      files = normalize_files(files)
   185      outfiles = []
   186      for pathname in files:
   187          basename = os.path.basename(pathname)
   188          extension = file_extension(pathname)
   189          if extension in extensions or basename in extensions:
   190              outfiles.append(pathname)
   191      return outfiles
   192  
   193  def get_dates():
   194      years = datetime.datetime.now().year
   195      return '(%s)' % '|'.join((str(year) for year in range(2014, years+1)))
   196  
   197  def get_regexs():
   198      regexs = {}
   199      # Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
   200      regexs["year"] = re.compile( 'YEAR' )
   201      # get_dates return 2014, 2015, 2016, 2017, or 2018 until the current year as a regex like: "(2014|2015|2016|2017|2018)";
   202      # company holder names can be anything
   203      regexs["date"] = re.compile(get_dates())
   204      # strip // +build \n\n build constraints
   205      regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
   206      # strip #!.* from scripts
   207      regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
   208      # Search for generated files
   209      regexs["generated"] = re.compile( '(DO NOT EDIT)|(do not modify manually)' )
   210      return regexs
   211  
   212  def main():
   213      regexs = get_regexs()
   214      refs = get_refs()
   215      filenames = get_files(refs.keys())
   216  
   217      for filename in filenames:
   218          if not file_passes(filename, refs, regexs):
   219              print(filename, file=sys.stdout)
   220  
   221      return 0
   222  
   223  if __name__ == "__main__":
   224    sys.exit(main())