github.com/sercand/please@v13.4.0+incompatible/test/BUILD (about)

     1  # A series of end-to-end tests on the Please binary.
     2  #
     3  # These are a little fragile since they assume things about specific output messages, which
     4  # of course we might rather not. However it's something of a pain to get good test coverage
     5  # since by its nature the tool has heaps of side effects, so this is at least one way of
     6  # reassuring ourselves that it does behave as expected.
     7  #
     8  # Note that we have to be kinda careful with this; since it invokes plz to run tests while
     9  # an instance of it is already going, there are potential concurrency issues. These are
    10  # mitigated by having these tests only run tests in this package which are tagged as manual
    11  # so the bootstrap script won't try to run them twice simultaneously.
    12  
    13  subinclude("//build_defs:plz_e2e_test")
    14  
    15  # This isn't really needed in this BUILD file, but is intended to avoid race conditions
    16  # between this and other BUILD files that have the same dependency.
    17  subinclude("@pleasings//go:go_bindata")
    18  
    19  # Tests the expected output of 'query somepath'.
    20  # Note that you have to be careful with the choice of targets, since the path
    21  # found is not necessarily unique or stable.
    22  plz_e2e_test(
    23      name = "query_somepath_test",
    24      cmd = "plz query somepath //tools/java:junit_runner //third_party/java:junit",
    25      expected_output = "query_somepath_test.txt",
    26  )
    27  
    28  plz_e2e_test(
    29      name = "query_somepath_reverse_test",
    30      cmd = "plz query somepath //third_party/java:junit //tools/java:junit_runner",
    31      expected_output = "query_somepath_test.txt",
    32  )
    33  
    34  plz_e2e_test(
    35      name = "query_somepath_nopath_test",
    36      cmd = "plz query somepath //src:please //third_party/java:junit",
    37      expected_output = "query_somepath_nopath_test.txt",
    38  )
    39  
    40  # Tests that targets can only use other targets that they depend on.
    41  plz_e2e_test(
    42      name = "dep_required_test",
    43      cmd = "plz build //test:failed_dep",
    44      expect_output_contains = "Target //src/core:config_test isn't visible to //test:failed_dep",
    45      expected_failure = True,
    46  )
    47  
    48  build_rule(
    49      name = "failed_dep",
    50      labels = ["manual"],
    51      deps = ["//src/core:config_test"],
    52  )
    53  
    54  # Test that we count test output correctly. Also indirectly tests access to test data files.
    55  plz_e2e_test(
    56      name = "test_output_test",
    57      cmd = "plz test //test:test_output_test_1 //test:test_output_test_2",
    58      expect_output_contains = "6 passed.",
    59      # Invokes a containerised test although it's not itself.
    60      labels = ["container"],
    61  )
    62  
    63  gentest(
    64      name = "test_output_test_1",
    65      data = ["test_output_test_1.txt"],
    66      labels = ["manual"],
    67      test_cmd = "cp $(location test_output_test_1.txt) test.results",
    68  )
    69  
    70  gentest(
    71      name = "test_output_test_2",
    72      container = True,
    73      data = ["test_output_test_2.xml"],
    74      labels = ["manual"],
    75      test_cmd = "cp $(location test_output_test_2.xml) test.results",
    76  )
    77  
    78  # Test that on re-running a test it is cached.
    79  plz_e2e_test(
    80      name = "test_caching_test",
    81      cmd = "plz test //test:caching_test && plz test -v 4 //test:caching_test",
    82      expect_output_contains = "Not re-running test //test:caching_test",
    83  )
    84  
    85  gentest(
    86      name = "caching_test",
    87      labels = ["manual"],
    88      no_test_output = True,
    89      test_cmd = "true",
    90      deps = ["//src:please"],
    91  )
    92  
    93  # Test that we don't generate coverage on running a test normally (because it's slower).
    94  python_test(
    95      name = "_no_coverage_output_test",
    96      srcs = ["coverage_output_test.py"],
    97      labels = ["manual"],
    98  )
    99  
   100  plz_e2e_test(
   101      name = "no_coverage_output_test",
   102      cmd = "plz test //test:_no_coverage_output_test",
   103      expect_file_doesnt_exist = "../../../bin/test/.test_coverage__no_coverage_output_test*",
   104      labels = ["python3"],
   105  )
   106  
   107  # Test that we do generate it when using plz cover.
   108  python_test(
   109      name = "_coverage_output_test",
   110      srcs = ["coverage_output_test.py"],
   111      labels = ["manual"],
   112  )
   113  
   114  plz_e2e_test(
   115      name = "coverage_output_test",
   116      cmd = "plz cover //test:_coverage_output_test",
   117      expect_file_exists = "../../../bin/test/.test_coverage__coverage_output_test*",
   118      # Temporarily disabled until #25 is resolved. Until then it recompiles various go_library
   119      # rules which makes it (and possibly others) flaky.
   120      labels = ["manual"],
   121  )
   122  
   123  # Quick test for plz run
   124  plz_e2e_test(
   125      name = "plz_run_test",
   126      cmd = "plz run //src:please -- --version",
   127      expect_output_contains = "Please version",
   128  )
   129  
   130  # Test for query alltargets
   131  plz_e2e_test(
   132      name = "query_alltargets_test",
   133      cmd = "plz query alltargets",
   134      expect_output_contains = "//src:please",
   135  )
   136  
   137  # Test for query output
   138  plz_e2e_test(
   139      name = "query_output_test",
   140      cmd = "plz query output //test:query_output_filegroup",
   141      expected_output = "query_output_test.txt",
   142  )
   143  
   144  filegroup(
   145      name = "query_output_filegroup",
   146      srcs = ["//src:please"],
   147  )
   148  
   149  # Test running a test with no-cache
   150  plz_e2e_test(
   151      name = "test_nocache_test",
   152      cmd = "plz test --nocache //test:nocache_test",
   153  )
   154  
   155  gentest(
   156      name = "nocache_test",
   157      labels = ["manual"],
   158      no_test_output = True,
   159      test_cmd = "true",
   160      deps = ["//src:please"],
   161  )
   162  
   163  # Simulates a code generating rule to test the require / provide mechanism.
   164  plz_e2e_test(
   165      name = "require_provide_test",
   166      cmd = "plz build //test/moar:require_provide_check -v 2 -p",
   167      expect_output_doesnt_contain = "//test/moar:test_require",
   168  )
   169  
   170  # Test for running individual tests
   171  python_test(
   172      name = "individual_test_run_py",
   173      srcs = ["individual_test_run.py"],
   174      labels = ["manual"],
   175  )
   176  
   177  plz_e2e_test(
   178      name = "individual_python_test",
   179      cmd = "plz test //test:individual_test_run_py TestRunningIndividualTests.test_first_thing",
   180      expect_output_contains = "1 test target and 1 test run",
   181      labels = ["python3"],
   182  )
   183  
   184  java_test(
   185      name = "individual_test_run_java",
   186      srcs = ["IndividualTest.java"],
   187      labels = ["manual"],
   188      deps = [
   189          "//third_party/java:junit",
   190      ],
   191  )
   192  
   193  plz_e2e_test(
   194      name = "individual_java_test",
   195      cmd = "plz test //test:individual_test_run_java testFirstThing",
   196      expect_output_contains = "1 test target and 1 test run",
   197      labels = ["java"],
   198  )
   199  
   200  java_test(
   201      name = "no_test_run_java",
   202      srcs = ["NoTestRun.java"],
   203      labels = ["manual"],
   204      deps = [
   205          "//third_party/java:junit",
   206      ],
   207  )
   208  
   209  plz_e2e_test(
   210      name = "no_java_test",
   211      cmd = "plz test -p //test:no_test_run_java wibblewobble",
   212      expect_output_contains = "1 errored",
   213      expected_failure = True,
   214      labels = ["java"],
   215  )
   216  
   217  # Test re-runs.
   218  go_test(
   219      name = "num_runs_go_test",
   220      srcs = ["num_runs_test.go"],
   221      labels = ["manual"],
   222  )
   223  
   224  plz_e2e_test(
   225      name = "num_runs_test",
   226      cmd = "plz test -p --num_runs=5 //test:num_runs_go_test",
   227      expect_output_contains = "5 passed",
   228  )
   229  
   230  # Tests for query affectedtests.
   231  plz_e2e_test(
   232      name = "query_affectedtests_test",
   233      cmd = "plz query affectedtargets --tests -p test/affectedtests_test.go",
   234      expected_output = "query_affectedtests_test.txt",
   235  )
   236  
   237  plz_e2e_test(
   238      name = "query_affectedtests_stdin_test",
   239      cmd = "echo test/affectedtests_test.go | plz query affectedtargets --tests -p -",
   240      expected_output = "query_affectedtests_test.txt",
   241  )
   242  
   243  go_test(
   244      name = "affectedtests_test",
   245      srcs = ["affectedtests_test.go"],
   246  )
   247  
   248  go_test(
   249      name = "affectedtests_manual_test",
   250      srcs = ["affectedtests_test.go"],
   251      labels = ["manual"],
   252  )
   253  
   254  # Tests for query completions
   255  plz_e2e_test(
   256      name = "basic_completion_test",
   257      cmd = "plz query completions //test/completions: | sort",
   258      expected_output = "basic_completions.txt",
   259  )
   260  
   261  plz_e2e_test(
   262      name = "build_completion_test",
   263      cmd = "plz query completions //test/completions: --cmd build | sort",
   264      expected_output = "basic_completions.txt",
   265  )
   266  
   267  plz_e2e_test(
   268      name = "test_completion_test",
   269      cmd = "plz query completions //test/completions: --cmd test | sort",
   270      expected_output = "test_completions.txt",
   271  )
   272  
   273  plz_e2e_test(
   274      name = "run_completion_test",
   275      cmd = "plz query completions //test/completions: --cmd run | sort",
   276      expected_output = "run_completions.txt",
   277  )
   278  
   279  # Flag tests
   280  plz_e2e_test(
   281      name = "extra_flag_test",
   282      cmd = "plz cache clean",
   283      expected_failure = True,
   284  )
   285  
   286  # Test the add_out functionality which has a subtle dependency on the order
   287  # we do things relating to the cache.
   288  genrule(
   289      name = "_add_out_gen",
   290      cmd = "echo hello > _add_out_gen.txt",
   291      post_build = lambda name, _: add_out(name, "_add_out_gen.txt"),
   292  )
   293  
   294  gentest(
   295      name = "_add_out_test",
   296      data = [":_add_out_gen"],
   297      labels = ["manual"],
   298      no_test_output = True,
   299      test_cmd = "ls test/_add_out_gen.txt",
   300  )
   301  
   302  plz_e2e_test(
   303      name = "add_out_test",
   304      cmd = "plz build //test:_add_out_test && plz clean //test:_add_out_gen && plz test //test:_add_out_test",
   305  )
   306  
   307  # Test the extra output functionality.
   308  go_test(
   309      name = "extra_test_output_go_test",
   310      srcs = ["extra_test_output_test.go"],
   311      container = True,
   312      labels = ["manual"],
   313      test_outputs = ["truth.txt"],
   314  )
   315  
   316  plz_e2e_test(
   317      name = "extra_test_output_test",
   318      cmd = "plz test //test:extra_test_output_go_test",
   319      expect_file_exists = "../../../bin/test/truth.txt",
   320      labels = ["container"],
   321  )
   322  
   323  # Test 'query alltargets'
   324  plz_e2e_test(
   325      name = "query_alltargets_1_test",
   326      cmd = "plz query alltargets //test/moar/...",
   327      expected_output = "query_alltargets_1.txt",
   328  )
   329  
   330  plz_e2e_test(
   331      name = "query_alltargets_2_test",
   332      cmd = "plz query alltargets //test/moar/... --include test",
   333      expected_output = "query_alltargets_2.txt",
   334  )
   335  
   336  plz_e2e_test(
   337      name = "cyclic_dependency_test",
   338      cmd = "plz test //plz-out/tmp/test/cyclic_dependency_test._test/test/cycle:all",
   339      data = ["cycle/TEST_BUILD"],
   340      expect_output_contains = "Dependency cycle found",
   341      expected_failure = True,
   342      pre_cmd = "mv test/cycle/TEST_BUILD test/cycle/BUILD",
   343  )
   344  
   345  # Used manually for testing the test flakiness stuff.
   346  python_test(
   347      name = "flaky_test",
   348      srcs = ["flaky_test.py"],
   349      flaky = True,
   350      labels = ["manual"],
   351  )
   352  
   353  # Tests on commands / flags etc.
   354  plz_e2e_test(
   355      name = "unknown_command_test",
   356      cmd = "plz fix",
   357      expect_output_contains = "Unknown command",
   358      expected_failure = True,
   359  )
   360  
   361  plz_e2e_test(
   362      name = "unknown_flag_test",
   363      cmd = "plz build --wibble",
   364      expect_output_contains = "unknown flag",
   365      expected_failure = True,
   366  )
   367  
   368  # Tests on the stamp attribute.
   369  # These essentially pass as long as they can build.
   370  # (Note that we can't use -v because OSX is unlikely to have a new enough version of bash).
   371  build_rule(
   372      name = "stamp_negative_test",
   373      cmd = "[ -z ${STAMP+x} ]",
   374      no_test_output = True,
   375      test = True,
   376      test_cmd = "true",
   377  )
   378  
   379  build_rule(
   380      name = "stamp_positive_test",
   381      cmd = "[ ! -z ${STAMP+x} ]",
   382      no_test_output = True,
   383      stamp = True,
   384      test = True,
   385      test_cmd = "true",
   386  )
   387  
   388  # Test on a build rule that writes a symlink.
   389  genrule(
   390      name = "symlink_gen",
   391      srcs = ["symlink_test.txt"],
   392      outs = ["symlink_test.txt"],
   393      cmd = "ln -s $SRCS $OUTS",
   394  )
   395  
   396  gentest(
   397      name = "symlink_test",
   398      data = [":symlink_gen"],
   399      no_test_output = True,
   400      test_cmd = "test -L test/symlink_test.txt",
   401  )
   402  
   403  # This rule tests the no_test_output flag. If that isn't honoured
   404  # plz would report a test failure because results were missing.
   405  gentest(
   406      name = "no_test_output_test",
   407      no_test_output = True,
   408      test_cmd = "echo SUCCESS",
   409  )
   410  
   411  # This tests that data files exist in the correct location, and
   412  # indirectly performs a basic test of sh_test which we don't use elsewhere.
   413  sh_test(
   414      name = "data_files_test",
   415      src = "data_files_test.sh",
   416      data = ["container_data.txt"],
   417  )
   418  
   419  # This test is here as a convenience to test the flakiness functionality.
   420  # It's just using random internally so won't pass consistently.
   421  python_test(
   422      name = "flakiness_test",
   423      srcs = ["flakiness_test.py"],
   424      flaky = 5,
   425      labels = ["manual"],
   426  )
   427  
   428  go_binary(
   429      name = "test",
   430      srcs = ["name_conflict.go"],
   431  )
   432  
   433  sh_test(
   434      name = "glob_build_test",
   435      src = "glob_build_test.sh",
   436      data = glob(["*"]),
   437  )