github.com/shashidharatd/test-infra@v0.0.0-20171006011030-71304e1ca560/jobs/config_test.py (about) 1 #!/usr/bin/env python 2 3 # Copyright 2017 The Kubernetes Authors. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 """Tests for config.json and Prow configuration.""" 18 19 20 import unittest 21 22 import collections 23 import json 24 import os 25 import re 26 27 import config_sort 28 import env_gc 29 import yaml 30 31 # pylint: disable=too-many-public-methods, too-many-branches, too-many-locals, too-many-statements 32 33 def get_required_jobs(): 34 required_jobs = set() 35 configs_dir = config_sort.test_infra('mungegithub', 'submit-queue', 'deployment') 36 for root, _, files in os.walk(configs_dir): 37 for file_name in files: 38 if file_name == 'configmap.yaml': 39 path = os.path.join(root, file_name) 40 with open(path) as fp: 41 conf = yaml.safe_load(fp) 42 for job in conf.get('required-retest-contexts', '').split(','): 43 if job: 44 required_jobs.add(job) 45 return required_jobs 46 47 class JobTest(unittest.TestCase): 48 49 excludes = [ 50 'BUILD', # For bazel 51 'config.json', # For --json mode 52 'validOwners.json', # Contains a list of current sigs; sigs are allowed to own jobs 53 'config_sort.py', # Tool script to sort config.json 54 'config_test.py', # Script for testing config.json and Prow config. 55 'env_gc.py', # Tool script to garbage collect unused .env files. 56 'move_extract.py', 57 # Node-e2e image configurations 58 'benchmark-config.yaml', 59 'image-config.yaml', 60 'image-config-serial.yaml', 61 ] 62 # also exclude .pyc 63 excludes.extend(e + 'c' for e in excludes if e.endswith('.py')) 64 65 yaml_suffix = { 66 'jenkins/job-configs/bootstrap-maintenance.yaml' : 'suffix', 67 'jenkins/job-configs/kubernetes-jenkins-pull/bootstrap-pull-json.yaml' : 'jsonsuffix', 68 'jenkins/job-configs/kubernetes-jenkins-pull/bootstrap-security-pull.yaml' : 'suffix', 69 'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci.yaml' : 'suffix', 70 'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci-commit.yaml' : 'commit-suffix', 71 'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci-repo.yaml' : 'repo-suffix', 72 'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci-soak.yaml' : 'soak-suffix', 73 'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci-dockerpush.yaml' : 'dockerpush-suffix' 74 } 75 76 prow_config = '../prow/config.yaml' 77 78 realjobs = {} 79 prowjobs = [] 80 presubmits = [] 81 82 @property 83 def jobs(self): 84 """[(job, job_path)] sequence""" 85 for path, _, filenames in os.walk(config_sort.test_infra('jobs')): 86 for job in [f for f in filenames if f not in self.excludes]: 87 job_path = os.path.join(path, job) 88 yield job, job_path 89 90 def test_config_is_sorted(self): 91 """Test jobs/config.json, prow/config.yaml and boskos/resources.json are sorted.""" 92 with open(config_sort.test_infra('jobs/config.json')) as fp: 93 original = fp.read() 94 expect = config_sort.sorted_job_config().getvalue() 95 if original != expect: 96 self.fail('jobs/config.json is not sorted, please run ' 97 '`bazel run //jobs:config_sort`') 98 with open(config_sort.test_infra('prow/config.yaml')) as fp: 99 original = fp.read() 100 expect = config_sort.sorted_prow_config().getvalue() 101 if original != expect: 102 self.fail('prow/config.yaml is not sorted, please run ' 103 '`bazel run //jobs:config_sort`') 104 with open(config_sort.test_infra('boskos/resources.json')) as fp: 105 original = fp.read() 106 expect = config_sort.sorted_boskos_config().getvalue() 107 if original != expect: 108 self.fail('boskos/resources.json is not sorted, please run ' 109 '`bazel run //jobs:config_sort`') 110 111 def test_orphaned_env(self): 112 orphans = env_gc.find_orphans() 113 if orphans: 114 self.fail('the following .env files are not referenced ' + 115 'in config.json, please run `bazel run //jobs:env_gc: ' + 116 ' '.join(orphans)) 117 118 def test_bootstrap_maintenance_yaml(self): 119 def check(job, name): 120 job_name = 'maintenance-%s' % name 121 self.assertIn('frequency', job) 122 self.assertIn('repo-name', job) 123 self.assertIn('.', job['repo-name']) # Has domain 124 self.assertGreater(job['timeout'], 0) 125 return job_name 126 127 self.check_bootstrap_yaml('jenkins/job-configs/bootstrap-maintenance.yaml', check) 128 129 def test_bootstrap_pull_json_yaml(self): 130 def check(job, name): 131 job_name = 'pull-%s' % name 132 self.assertIn('max-total', job) 133 self.assertIn('repo-name', job) 134 self.assertIn('.', job['repo-name']) # Has domain 135 self.assertIn('timeout', job) 136 self.assertNotIn('json', job) 137 self.assertGreater(job['timeout'], 0) 138 return job_name 139 140 self.check_bootstrap_yaml( 141 'jenkins/job-configs/kubernetes-jenkins-pull/bootstrap-pull-json.yaml', check) 142 143 def test_bootstrap_security_pull(self): 144 def check(job, name): 145 job_name = 'pull-%s' % name 146 self.assertIn('max-total', job) 147 self.assertIn('repo-name', job) 148 self.assertIn('.', job['repo-name']) # Has domain 149 self.assertIn('timeout', job) 150 self.assertNotIn('json', job) 151 self.assertGreater(job['timeout'], 0) 152 return job_name 153 154 self.check_bootstrap_yaml( 155 'jenkins/job-configs/kubernetes-jenkins-pull/bootstrap-security-pull.yaml', check) 156 157 def test_bootstrap_security_match(self): 158 json_jobs = self.load_bootstrap_yaml( 159 'jenkins/job-configs/kubernetes-jenkins-pull/bootstrap-pull-json.yaml') 160 161 sec_jobs = self.load_bootstrap_yaml( 162 'jenkins/job-configs/kubernetes-jenkins-pull/bootstrap-security-pull.yaml') 163 for name, job in sec_jobs.iteritems(): 164 self.assertIn(name, json_jobs) 165 job2 = json_jobs[name] 166 for attr in job: 167 if attr == 'repo-name': 168 continue 169 self.assertEquals(job[attr], job2[attr]) 170 171 172 def test_bootstrap_ci_yaml(self): 173 def check(job, name): 174 job_name = 'ci-%s' % name 175 self.assertIn('frequency', job) 176 self.assertIn('trigger-job', job) 177 self.assertNotIn('branch', job) 178 self.assertNotIn('json', job) 179 self.assertGreater(job['timeout'], 0, job_name) 180 self.assertGreaterEqual(job['jenkins-timeout'], job['timeout']+100, job_name) 181 return job_name 182 183 self.check_bootstrap_yaml( 184 'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci.yaml', 185 check) 186 187 def test_bootstrap_ci_commit_yaml(self): 188 def check(job, name): 189 job_name = 'ci-%s' % name 190 self.assertIn('branch', job) 191 self.assertIn('commit-frequency', job) 192 self.assertIn('giturl', job) 193 self.assertIn('repo-name', job) 194 self.assertIn('timeout', job) 195 self.assertNotIn('use-logexporter', job) 196 self.assertGreater(job['timeout'], 0, job) 197 198 return job_name 199 200 self.check_bootstrap_yaml( 201 'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci-commit.yaml', 202 check) 203 204 def test_bootstrap_ci_repo_yaml(self): 205 def check(job, name): 206 job_name = 'ci-%s' % name 207 self.assertIn('branch', job) 208 self.assertIn('frequency', job) 209 self.assertIn('repo-name', job) 210 self.assertIn('timeout', job) 211 self.assertNotIn('json', job) 212 self.assertNotIn('use-logexporter', job) 213 self.assertGreater(job['timeout'], 0, name) 214 return job_name 215 216 self.check_bootstrap_yaml( 217 'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci-repo.yaml', 218 check) 219 220 def test_bootstrap_ci_soak_yaml(self): 221 def check(job, name): 222 job_name = 'ci-%s' % name 223 self.assertIn('blocker', job) 224 self.assertIn('frequency', job) 225 self.assertIn('scan', job) 226 self.assertNotIn('repo-name', job) 227 self.assertNotIn('branch', job) 228 self.assertIn('timeout', job) 229 self.assertIn('soak-repos', job) 230 self.assertNotIn('use-logexporter', job) 231 self.assertGreater(job['timeout'], 0, name) 232 233 return job_name 234 235 self.check_bootstrap_yaml( 236 'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci-soak.yaml', 237 check) 238 239 def test_bootstrap_ci_dockerpush(self): 240 def check(job, name): 241 job_name = 'ci-%s' % name 242 self.assertIn('branch', job) 243 self.assertIn('frequency', job) 244 self.assertIn('repo-name', job) 245 self.assertIn('timeout', job) 246 self.assertNotIn('use-logexporter', job) 247 self.assertGreater(job['timeout'], 0, name) 248 return job_name 249 250 self.check_bootstrap_yaml( 251 'jenkins/job-configs/kubernetes-jenkins/bootstrap-ci-dockerpush.yaml', 252 check) 253 254 def check_job_template(self, tmpl): 255 builders = tmpl.get('builders') 256 if not isinstance(builders, list): 257 self.fail(tmpl) 258 self.assertEquals(1, len(builders), builders) 259 shell = builders[0] 260 if not isinstance(shell, dict): 261 self.fail(tmpl) 262 self.assertEquals(1, len(shell), tmpl) 263 if 'raw' in shell: 264 self.assertEquals('maintenance-all-{suffix}', tmpl['name']) 265 return 266 cmd = shell.get('shell') 267 if not isinstance(cmd, basestring): 268 self.fail(tmpl) 269 self.assertIn('--service-account=', cmd) 270 self.assertIn('--upload=', cmd) 271 if 'kubernetes-security' in cmd: 272 self.assertIn('--upload=\'gs://kubernetes-security-jenkins/pr-logs\'', cmd) 273 elif '${{PULL_REFS}}' in cmd: 274 self.assertIn('--upload=\'gs://kubernetes-jenkins/pr-logs\'', cmd) 275 else: 276 self.assertIn('--upload=\'gs://kubernetes-jenkins/logs\'', cmd) 277 278 def add_prow_job(self, job): 279 name = job.get('name') 280 real_job = {} 281 real_job['name'] = name 282 if 'spec' in job: 283 spec = job.get('spec') 284 for container in spec.get('containers'): 285 if 'args' in container: 286 for arg in container.get('args'): 287 match = re.match(r'--timeout=(\d+)', arg) 288 if match: 289 real_job['timeout'] = match.group(1) 290 if 'pull-' not in name and name in self.realjobs and name not in self.prowjobs: 291 self.fail('CI job %s exist in both Jenkins and Prow config!' % name) 292 if name not in self.realjobs: 293 self.realjobs[name] = real_job 294 self.prowjobs.append(name) 295 if 'run_after_success' in job: 296 for sub in job.get('run_after_success'): 297 self.add_prow_job(sub) 298 299 def load_prow_yaml(self, path): 300 with open(os.path.join( 301 os.path.dirname(__file__), path)) as fp: 302 doc = yaml.safe_load(fp) 303 304 if 'periodics' not in doc: 305 self.fail('No periodics in prow config!') 306 307 if 'presubmits' not in doc: 308 self.fail('No presubmits in prow config!') 309 310 for item in doc.get('periodics'): 311 self.add_prow_job(item) 312 313 if 'postsubmits' not in doc: 314 self.fail('No postsubmits in prow config!') 315 316 self.presubmits = doc.get('presubmits') 317 postsubmits = doc.get('postsubmits') 318 319 for _repo, joblist in self.presubmits.items() + postsubmits.items(): 320 for job in joblist: 321 self.add_prow_job(job) 322 323 def load_bootstrap_yaml(self, path): 324 with open(config_sort.test_infra(path)) as fp: 325 doc = yaml.safe_load(fp) 326 327 project = None 328 defined_templates = set() 329 for item in doc: 330 if not isinstance(item, dict): 331 continue 332 if isinstance(item.get('job-template'), dict): 333 defined_templates.add(item['job-template']['name']) 334 self.check_job_template(item['job-template']) 335 if not isinstance(item.get('project'), dict): 336 continue 337 project = item['project'] 338 self.assertIn('bootstrap-', project.get('name')) 339 break 340 else: 341 self.fail('Could not find bootstrap-pull-jobs project') 342 343 self.assertIn('jobs', project) 344 used_templates = {j for j in project['jobs']} 345 msg = '\nMissing templates: %s\nUnused templates: %s' % ( 346 ','.join(used_templates - defined_templates), 347 ','.join(defined_templates - used_templates)) 348 self.assertEquals(defined_templates, used_templates, msg) 349 350 self.assertIn(path, self.yaml_suffix) 351 jobs = project.get(self.yaml_suffix[path]) 352 if not jobs or not isinstance(jobs, list): 353 self.fail('Could not find suffix list in %s' % (project)) 354 355 real_jobs = {} 356 for job in jobs: 357 # Things to check on all bootstrap jobs 358 if not isinstance(job, dict): 359 self.fail('suffix items should be dicts: %s' % jobs) 360 self.assertEquals(1, len(job), job) 361 name = job.keys()[0] 362 real_job = job[name] 363 self.assertNotIn(name, real_jobs, 'duplicate job: %s' % name) 364 real_jobs[name] = real_job 365 real_name = real_job.get('job-name', 'unset-%s' % name) 366 if real_name not in self.realjobs: 367 self.realjobs[real_name] = real_job 368 return real_jobs 369 370 def check_bootstrap_yaml(self, path, check): 371 for name, real_job in self.load_bootstrap_yaml(path).iteritems(): 372 # Things to check on all bootstrap jobs 373 374 for key, value in real_job.items(): 375 if not isinstance(value, (basestring, int)): 376 self.fail('Jobs may not contain child objects %s: %s' % ( 377 key, value)) 378 if '{' in str(value): 379 self.fail('Jobs may not contain {expansions} - %s: %s' % ( 380 key, value)) # Use simple strings 381 # Things to check on specific flavors. 382 job_name = check(real_job, name) 383 self.assertTrue(job_name) 384 self.assertEquals(job_name, real_job.get('job-name')) 385 386 def get_real_bootstrap_job(self, job): 387 key = os.path.splitext(job.strip())[0] 388 if not key in self.realjobs: 389 for yamlf in self.yaml_suffix: 390 self.load_bootstrap_yaml(yamlf) 391 self.load_prow_yaml(self.prow_config) 392 self.assertIn(key, sorted(self.realjobs)) # sorted for clearer error message 393 return self.realjobs.get(key) 394 395 def test_non_blocking_jenkins(self): 396 """All PR non-blocking jenkins jobs are always_run: false""" 397 # ref https://github.com/kubernetes/test-infra/issues/4637 398 if not self.presubmits: 399 self.load_prow_yaml(self.prow_config) 400 required_jobs = get_required_jobs() 401 # TODO(bentheelder): should we also include other repos? 402 # If we do, we need to check which ones have a deployment in get_required_jobs 403 # and ignore the ones without submit-queue deployments. This seems brittle 404 # and unnecessary for now though. 405 for job in self.presubmits.get('kubernetes/kubernetes', []): 406 if (job['agent'] == 'jenkins' and 407 job['name'] not in required_jobs and 408 job.get('always_run', False)): 409 self.fail( 410 'Jenkins jobs should not be `always_run: true`' 411 ' unless they are required! %s' 412 % job['name']) 413 414 def test_valid_timeout(self): 415 """All jobs set a timeout less than 120m or set DOCKER_TIMEOUT.""" 416 default_timeout = 60 417 bad_jobs = set() 418 with open(config_sort.test_infra('jobs/config.json')) as fp: 419 config = json.loads(fp.read()) 420 421 for job, job_path in self.jobs: 422 job_name = job.rsplit('.', 1)[0] 423 modern = config.get(job_name, {}).get('scenario') in [ 424 'kubernetes_e2e', 425 'kubernetes_kops_aws', 426 ] 427 valids = [ 428 'kubernetes-e2e-', 429 'kubernetes-kubemark-', 430 'kubernetes-soak-', 431 'kubernetes-federation-e2e-', 432 'kops-e2e-', 433 ] 434 435 if not re.search('|'.join(valids), job): 436 continue 437 with open(job_path) as fp: 438 lines = list(l for l in fp if not l.startswith('#')) 439 container_timeout = default_timeout 440 kubetest_timeout = None 441 for line in lines: # Validate old pattern no longer used 442 if line.startswith('### Reporting'): 443 bad_jobs.add(job) 444 if '{rc}' in line: 445 bad_jobs.add(job) 446 self.assertFalse(job.endswith('.sh'), job) 447 self.assertTrue(modern, job) 448 449 realjob = self.get_real_bootstrap_job(job) 450 self.assertTrue(realjob) 451 self.assertIn('timeout', realjob, job) 452 container_timeout = int(realjob['timeout']) 453 for line in lines: 454 if 'DOCKER_TIMEOUT=' in line: 455 self.fail('Set container timeout in prow and/or bootstrap yaml: %s' % job) 456 if 'KUBEKINS_TIMEOUT=' in line: 457 self.fail( 458 'Set kubetest --timeout in config.json, not KUBEKINS_TIMEOUT: %s' 459 % job 460 ) 461 for arg in config[job_name]['args']: 462 if arg == '--timeout=None': 463 bad_jobs.add(('Must specify a timeout', job, arg)) 464 mat = re.match(r'--timeout=(\d+)m', arg) 465 if not mat: 466 continue 467 kubetest_timeout = int(mat.group(1)) 468 if kubetest_timeout is None: 469 self.fail('Missing timeout: %s' % job) 470 if kubetest_timeout > container_timeout: 471 bad_jobs.add((job, kubetest_timeout, container_timeout)) 472 elif kubetest_timeout + 20 > container_timeout: 473 bad_jobs.add(( 474 'insufficient kubetest leeway', 475 job, kubetest_timeout, container_timeout 476 )) 477 if bad_jobs: 478 self.fail( 479 'jobs: %s, ' 480 'prow timeout need to be at least 20min longer than timeout in config.json' 481 % ('\n'.join(str(s) for s in bad_jobs)) 482 ) 483 484 def test_valid_job_config_json(self): 485 """Validate jobs/config.json.""" 486 # bootstrap integration test scripts 487 ignore = [ 488 'fake-failure', 489 'fake-branch', 490 'fake-pr', 491 'random_job', 492 ] 493 494 self.load_prow_yaml(self.prow_config) 495 config = config_sort.test_infra('jobs/config.json') 496 owners = config_sort.test_infra('jobs/validOwners.json') 497 with open(config) as fp, open(owners) as ownfp: 498 config = json.loads(fp.read()) 499 valid_owners = json.loads(ownfp.read()) 500 for job in config: 501 if job not in ignore: 502 self.assertTrue(job in self.prowjobs or job in self.realjobs, 503 '%s must have a matching jenkins/prow entry' % job) 504 505 # onwership assertions 506 self.assertIn('sigOwners', config[job], job) 507 self.assertIsInstance(config[job]['sigOwners'], list, job) 508 self.assertTrue(config[job]['sigOwners'], job) # non-empty 509 owners = config[job]['sigOwners'] 510 for owner in owners: 511 self.assertIsInstance(owner, basestring, job) 512 self.assertIn(owner, valid_owners, job) 513 514 # env assertions 515 self.assertTrue('scenario' in config[job], job) 516 scenario = config_sort.test_infra('scenarios/%s.py' % config[job]['scenario']) 517 self.assertTrue(os.path.isfile(scenario), job) 518 self.assertTrue(os.access(scenario, os.X_OK|os.R_OK), job) 519 args = config[job].get('args', []) 520 use_shared_build_in_args = False 521 extract_in_args = False 522 build_in_args = False 523 for arg in args: 524 if arg.startswith('--use-shared-build'): 525 use_shared_build_in_args = True 526 elif arg.startswith('--build'): 527 build_in_args = True 528 elif arg.startswith('--extract'): 529 extract_in_args = True 530 match = re.match(r'--env-file=([^\"]+)\.env', arg) 531 if match: 532 env_path = match.group(1) 533 self.assertTrue(env_path.startswith('jobs/'), env_path) 534 path = config_sort.test_infra('%s.env' % env_path) 535 self.assertTrue( 536 os.path.isfile(path), 537 '%s does not exist for %s' % (path, job)) 538 elif 'kops' not in job: 539 match = re.match(r'--cluster=([^\"]+)', arg) 540 if match: 541 cluster = match.group(1) 542 self.assertLessEqual( 543 len(cluster), 20, 544 'Job %r, --cluster should be 20 chars or fewer' % job 545 ) 546 # these args should not be combined: 547 # --use-shared-build and (--build or --extract) 548 self.assertFalse(use_shared_build_in_args and build_in_args) 549 self.assertFalse(use_shared_build_in_args and extract_in_args) 550 if config[job]['scenario'] == 'kubernetes_e2e': 551 if job in self.prowjobs: 552 for arg in args: 553 # --mode=local is default now 554 self.assertNotIn('--mode', arg, job) 555 else: 556 self.assertIn('--mode=docker', args, job) 557 for arg in args: 558 if "--env=" in arg: 559 self._check_env(job, arg.split("=", 1)[1]) 560 if '--provider=gke' in args: 561 self.assertTrue('--deployment=gke' in args, 562 '%s must use --deployment=gke' % job) 563 self.assertFalse(any('--gcp-master-image' in a for a in args), 564 '%s cannot use --gcp-master-image on GKE' % job) 565 self.assertFalse(any('--gcp-nodes' in a for a in args), 566 '%s cannot use --gcp-nodes on GKE' % job) 567 if '--deployment=gke' in args: 568 self.assertTrue(any('--gcp-node-image' in a for a in args), job) 569 self.assertNotIn('--charts-tests', args) # Use --charts 570 if any('--check_version_skew' in a for a in args): 571 self.fail('Use --check-version-skew, not --check_version_skew in %s' % job) 572 if '--check-leaked-resources=true' in args: 573 self.fail('Use --check-leaked-resources (no value) in %s' % job) 574 if '--check-leaked-resources==false' in args: 575 self.fail( 576 'Remove --check-leaked-resources=false (default value) from %s' % job) 577 if ( 578 '--env-file=jobs/pull-kubernetes-e2e.env' in args 579 and '--check-leaked-resources' in args): 580 self.fail('PR job %s should not check for resource leaks' % job) 581 # Consider deleting any job with --check-leaked-resources=false 582 if ( 583 '--provider=gce' not in args 584 and '--provider=gke' not in args 585 and '--check-leaked-resources' in args 586 and 'generated' not in config[job].get('tags', [])): 587 self.fail('Only GCP jobs can --check-leaked-resources, not %s' % job) 588 if '--mode=local' in args: 589 self.fail('--mode=local is default now, drop that for %s' % job) 590 591 extracts = [a for a in args if '--extract=' in a] 592 shared_builds = [a for a in args if '--use-shared-build' in a] 593 node_e2e = [a for a in args if '--deployment=node' in a] 594 pull = job.startswith('pull-') 595 if shared_builds and extracts: 596 self.fail(('e2e jobs cannot have --use-shared-build' 597 ' and --extract: %s %s') % (job, args)) 598 elif not extracts and not shared_builds and not node_e2e: 599 self.fail(('e2e job needs --extract or' 600 ' --use-shared-build: %s %s') % (job, args)) 601 602 if shared_builds or node_e2e and not pull: 603 expected = 0 604 elif any(s in job for s in [ 605 'upgrade', 'skew', 'downgrade', 'rollback', 606 'ci-kubernetes-e2e-gce-canary', 607 ]): 608 expected = 2 609 else: 610 expected = 1 611 if len(extracts) != expected: 612 self.fail('Wrong number of --extract args (%d != %d) in %s' % ( 613 len(extracts), expected, job)) 614 615 has_image_family = any( 616 [x for x in args if x.startswith('--image-family')]) 617 has_image_project = any( 618 [x for x in args if x.startswith('--image-project')]) 619 docker_mode = any( 620 [x for x in args if x.startswith('--mode=docker')]) 621 if ( 622 (has_image_family or has_image_project) 623 and docker_mode): 624 self.fail('--image-family / --image-project is not ' 625 'supported in docker mode: %s' % job) 626 if has_image_family != has_image_project: 627 self.fail('--image-family and --image-project must be' 628 'both set or unset: %s' % job) 629 630 if job.startswith('pull-kubernetes-'): 631 self.assertIn('--cluster=', args) 632 if 'gke' in job: 633 stage = 'gs://kubernetes-release-dev/ci' 634 suffix = True 635 elif 'kubeadm' in job: 636 # kubeadm-based jobs use out-of-band .deb artifacts, 637 # not the --stage flag. 638 continue 639 else: 640 stage = 'gs://kubernetes-release-pull/ci/%s' % job 641 suffix = False 642 if not shared_builds: 643 self.assertIn('--stage=%s' % stage, args) 644 self.assertEquals( 645 suffix, 646 any('--stage-suffix=' in a for a in args), 647 ('--stage-suffix=', suffix, job, args)) 648 649 650 def test_valid_env(self): 651 for job, job_path in self.jobs: 652 with open(job_path) as fp: 653 data = fp.read() 654 if 'kops' in job: # TODO(fejta): update this one too 655 continue 656 self.assertNotIn( 657 'JENKINS_USE_LOCAL_BINARIES=', 658 data, 659 'Send --extract=local to config.json, not JENKINS_USE_LOCAL_BINARIES in %s' % job) 660 self.assertNotIn( 661 'JENKINS_USE_EXISTING_BINARIES=', 662 data, 663 'Send --extract=local to config.json, not JENKINS_USE_EXISTING_BINARIES in %s' % job) # pylint: disable=line-too-long 664 665 def test_only_jobs(self): 666 """Ensure that everything in jobs/ is a valid job name and script.""" 667 for job, job_path in self.jobs: 668 # Jobs should have simple names: letters, numbers, -, . 669 self.assertTrue(re.match(r'[.0-9a-z-_]+.env', job), job) 670 # Jobs should point to a real, executable file 671 # Note: it is easy to forget to chmod +x 672 self.assertTrue(os.path.isfile(job_path), job_path) 673 self.assertFalse(os.path.islink(job_path), job_path) 674 self.assertTrue(os.access(job_path, os.R_OK), job_path) 675 676 def test_all_project_are_unique(self): 677 # pylint: disable=line-too-long 678 allowed_list = { 679 # The cos image validation jobs intentionally share projects. 680 'ci-kubernetes-e2e-gce-cosdev-k8sdev-default': 'ci-kubernetes-e2e-gce-cos*', 681 'ci-kubernetes-e2e-gce-cosdev-k8sdev-serial': 'ci-kubernetes-e2e-gce-cos*', 682 'ci-kubernetes-e2e-gce-cosdev-k8sdev-slow': 'ci-kubernetes-e2e-gce-cos*', 683 'ci-kubernetes-e2e-gce-cosdev-k8sstable1-default': 'ci-kubernetes-e2e-gce-cos*', 684 'ci-kubernetes-e2e-gce-cosdev-k8sstable1-serial': 'ci-kubernetes-e2e-gce-cos*', 685 'ci-kubernetes-e2e-gce-cosdev-k8sstable1-slow': 'ci-kubernetes-e2e-gce-cos*', 686 'ci-kubernetes-e2e-gce-cosdev-k8sbeta-default': 'ci-kubernetes-e2e-gce-cos*', 687 'ci-kubernetes-e2e-gce-cosdev-k8sbeta-serial': 'ci-kubernetes-e2e-gce-cos*', 688 'ci-kubernetes-e2e-gce-cosdev-k8sbeta-slow': 'ci-kubernetes-e2e-gce-cos*', 689 'ci-kubernetes-e2e-gce-cosbeta-k8sdev-default': 'ci-kubernetes-e2e-gce-cos*', 690 'ci-kubernetes-e2e-gce-cosbeta-k8sdev-serial': 'ci-kubernetes-e2e-gce-cos*', 691 'ci-kubernetes-e2e-gce-cosbeta-k8sdev-slow': 'ci-kubernetes-e2e-gce-cos*', 692 'ci-kubernetes-e2e-gce-cosbeta-k8sbeta-default': 'ci-kubernetes-e2e-gce-cos*', 693 'ci-kubernetes-e2e-gce-cosbeta-k8sbeta-serial': 'ci-kubernetes-e2e-gce-cos*', 694 'ci-kubernetes-e2e-gce-cosbeta-k8sbeta-slow': 'ci-kubernetes-e2e-gce-cos*', 695 'ci-kubernetes-e2e-gce-cosbeta-k8sstable1-default': 'ci-kubernetes-e2e-gce-cos*', 696 'ci-kubernetes-e2e-gce-cosbeta-k8sstable1-serial': 'ci-kubernetes-e2e-gce-cos*', 697 'ci-kubernetes-e2e-gce-cosbeta-k8sstable1-slow': 'ci-kubernetes-e2e-gce-cos*', 698 'ci-kubernetes-e2e-gce-cosbeta-k8sstable2-default': 'ci-kubernetes-e2e-gce-cos*', 699 'ci-kubernetes-e2e-gce-cosbeta-k8sstable2-serial': 'ci-kubernetes-e2e-gce-cos*', 700 'ci-kubernetes-e2e-gce-cosbeta-k8sstable2-slow': 'ci-kubernetes-e2e-gce-cos*', 701 'ci-kubernetes-e2e-gce-cosbeta-k8sstable3-default': 'ci-kubernetes-e2e-gce-cos*', 702 'ci-kubernetes-e2e-gce-cosbeta-k8sstable3-serial': 'ci-kubernetes-e2e-gce-cos*', 703 'ci-kubernetes-e2e-gce-cosbeta-k8sstable3-slow': 'ci-kubernetes-e2e-gce-cos*', 704 'ci-kubernetes-e2e-gce-cosstable1-k8sdev-default': 'ci-kubernetes-e2e-gce-cos*', 705 'ci-kubernetes-e2e-gce-cosstable1-k8sdev-serial': 'ci-kubernetes-e2e-gce-cos*', 706 'ci-kubernetes-e2e-gce-cosstable1-k8sdev-slow': 'ci-kubernetes-e2e-gce-cos*', 707 'ci-kubernetes-e2e-gce-cosstable1-k8sbeta-default': 'ci-kubernetes-e2e-gce-cos*', 708 'ci-kubernetes-e2e-gce-cosstable1-k8sbeta-serial': 'ci-kubernetes-e2e-gce-cos*', 709 'ci-kubernetes-e2e-gce-cosstable1-k8sbeta-slow': 'ci-kubernetes-e2e-gce-cos*', 710 'ci-kubernetes-e2e-gce-cosstable1-k8sstable1-default': 'ci-kubernetes-e2e-gce-cos*', 711 'ci-kubernetes-e2e-gce-cosstable1-k8sstable1-serial': 'ci-kubernetes-e2e-gce-cos*', 712 'ci-kubernetes-e2e-gce-cosstable1-k8sstable1-slow': 'ci-kubernetes-e2e-gce-cos*', 713 'ci-kubernetes-e2e-gce-cosstable1-k8sstable2-default': 'ci-kubernetes-e2e-gce-cos*', 714 'ci-kubernetes-e2e-gce-cosstable1-k8sstable2-serial': 'ci-kubernetes-e2e-gce-cos*', 715 'ci-kubernetes-e2e-gce-cosstable1-k8sstable2-slow': 'ci-kubernetes-e2e-gce-cos*', 716 'ci-kubernetes-e2e-gce-cosstable1-k8sstable3-default': 'ci-kubernetes-e2e-gce-cos*', 717 'ci-kubernetes-e2e-gce-cosstable1-k8sstable3-serial': 'ci-kubernetes-e2e-gce-cos*', 718 'ci-kubernetes-e2e-gce-cosstable1-k8sstable3-slow': 'ci-kubernetes-e2e-gce-cos*', 719 720 # The ubuntu image validation jobs intentionally share projects. 721 'ci-kubernetes-e2e-gce-ubuntudev-k8sdev-default': 'ci-kubernetes-e2e-gce-ubuntu*', 722 'ci-kubernetes-e2e-gce-ubuntudev-k8sdev-serial': 'ci-kubernetes-e2e-gce-ubuntu*', 723 'ci-kubernetes-e2e-gce-ubuntudev-k8sdev-slow': 'ci-kubernetes-e2e-gce-ubuntu*', 724 'ci-kubernetes-e2e-gce-ubuntudev-k8sbeta-default': 'ci-kubernetes-e2e-gce-ubuntu*', 725 'ci-kubernetes-e2e-gce-ubuntudev-k8sbeta-serial': 'ci-kubernetes-e2e-gce-ubuntu*', 726 'ci-kubernetes-e2e-gce-ubuntudev-k8sbeta-slow': 'ci-kubernetes-e2e-gce-ubuntu*', 727 'ci-kubernetes-e2e-gce-ubuntudev-k8sstable1-default': 'ci-kubernetes-e2e-gce-ubuntu*', 728 'ci-kubernetes-e2e-gce-ubuntudev-k8sstable1-serial': 'ci-kubernetes-e2e-gce-ubuntu*', 729 'ci-kubernetes-e2e-gce-ubuntudev-k8sstable1-slow': 'ci-kubernetes-e2e-gce-ubuntu*', 730 'ci-kubernetes-e2e-gce-ubuntustable1-k8sdev-default': 'ci-kubernetes-e2e-gce-ubuntu*', 731 'ci-kubernetes-e2e-gce-ubuntustable1-k8sdev-serial': 'ci-kubernetes-e2e-gce-ubuntu*', 732 'ci-kubernetes-e2e-gce-ubuntustable1-k8sdev-slow': 'ci-kubernetes-e2e-gce-ubuntu*', 733 'ci-kubernetes-e2e-gce-ubuntustable1-k8sstable1-default': 'ci-kubernetes-e2e-gce-ubuntu*', 734 'ci-kubernetes-e2e-gce-ubuntustable1-k8sstable1-serial': 'ci-kubernetes-e2e-gce-ubuntu*', 735 'ci-kubernetes-e2e-gce-ubuntustable1-k8sstable1-slow': 'ci-kubernetes-e2e-gce-ubuntu*', 736 'ci-kubernetes-e2e-gce-ubuntustable1-k8sstable2-default': 'ci-kubernetes-e2e-gce-ubuntu*', 737 'ci-kubernetes-e2e-gce-ubuntustable1-k8sstable2-serial': 'ci-kubernetes-e2e-gce-ubuntu*', 738 'ci-kubernetes-e2e-gce-ubuntustable1-k8sstable2-slow': 'ci-kubernetes-e2e-gce-ubuntu*', 739 'ci-kubernetes-e2e-gke-ubuntustable1-k8sstable1-alphafeatures': 'ci-kubernetes-e2e-gke-ubuntu*', 740 'ci-kubernetes-e2e-gke-ubuntustable1-k8sstable1-autoscaling': 'ci-kubernetes-e2e-gke-ubuntu*', 741 'ci-kubernetes-e2e-gke-ubuntustable1-k8sstable1-default': 'ci-kubernetes-e2e-gke-ubuntu*', 742 'ci-kubernetes-e2e-gke-ubuntustable1-k8sstable1-flaky': 'ci-kubernetes-e2e-gke-ubuntu*', 743 'ci-kubernetes-e2e-gke-ubuntustable1-k8sstable1-ingress': 'ci-kubernetes-e2e-gke-ubuntu*', 744 'ci-kubernetes-e2e-gke-ubuntustable1-k8sstable1-reboot': 'ci-kubernetes-e2e-gke-ubuntu*', 745 'ci-kubernetes-e2e-gke-ubuntustable1-k8sstable1-serial': 'ci-kubernetes-e2e-gke-ubuntu*', 746 'ci-kubernetes-e2e-gke-ubuntustable1-k8sstable1-slow': 'ci-kubernetes-e2e-gke-ubuntu*', 747 'ci-kubernetes-e2e-gke-ubuntustable1-k8sstable1-updown': 'ci-kubernetes-e2e-gke-ubuntu*', 748 # The 1.5 and 1.6 scalability jobs intentionally share projects. 749 'ci-kubernetes-e2e-gci-gce-scalability-release-1-7': 'ci-kubernetes-e2e-gci-gce-scalability-release-*', 750 'ci-kubernetes-e2e-gci-gce-scalability-stable1': 'ci-kubernetes-e2e-gci-gce-scalability-release-*', 751 'ci-kubernetes-e2e-gce-scalability': 'ci-kubernetes-e2e-gce-scalability-*', 752 'ci-kubernetes-e2e-gce-scalability-canary': 'ci-kubernetes-e2e-gce-scalability-*', 753 # TODO(fejta): remove these (found while migrating jobs) 754 'ci-kubernetes-kubemark-100-gce': 'ci-kubernetes-kubemark-*', 755 'ci-kubernetes-kubemark-5-gce': 'ci-kubernetes-kubemark-*', 756 'ci-kubernetes-kubemark-5-gce-last-release': 'ci-kubernetes-kubemark-*', 757 'ci-kubernetes-kubemark-high-density-100-gce': 'ci-kubernetes-kubemark-*', 758 'ci-kubernetes-kubemark-gce-scale': 'ci-kubernetes-scale-*', 759 'pull-kubernetes-kubemark-e2e-gce-big': 'ci-kubernetes-scale-*', 760 'ci-kubernetes-e2e-gce-large-manual-up': 'ci-kubernetes-scale-*', 761 'ci-kubernetes-e2e-gce-large-manual-down': 'ci-kubernetes-scale-*', 762 'ci-kubernetes-e2e-gce-large-correctness': 'ci-kubernetes-scale-*', 763 'ci-kubernetes-e2e-gce-large-performance': 'ci-kubernetes-scale-*', 764 'ci-kubernetes-e2e-gce-scale-correctness': 'ci-kubernetes-scale-*', 765 'ci-kubernetes-e2e-gce-scale-performance': 'ci-kubernetes-scale-*', 766 'ci-kubernetes-e2e-gke-large-correctness': 'ci-kubernetes-scale-*', 767 'ci-kubernetes-e2e-gke-large-performance': 'ci-kubernetes-scale-*', 768 'ci-kubernetes-e2e-gke-large-deploy': 'ci-kubernetes-scale-*', 769 'ci-kubernetes-e2e-gke-large-teardown': 'ci-kubernetes-scale-*', 770 'ci-kubernetes-e2e-gke-scale-correctness': 'ci-kubernetes-scale-*', 771 'ci-kubernetes-e2e-gce-federation': 'ci-kubernetes-federation-*', 772 'pull-kubernetes-federation-e2e-gce': 'pull-kubernetes-federation-e2e-gce-*', 773 'ci-kubernetes-pull-gce-federation-deploy': 'pull-kubernetes-federation-e2e-gce-*', 774 'pull-kubernetes-federation-e2e-gce-canary': 'pull-kubernetes-federation-e2e-gce-*', 775 'pull-kubernetes-e2e-gce': 'pull-kubernetes-e2e-gce-*', 776 'pull-kubernetes-e2e-gce-canary': 'pull-kubernetes-e2e-gce-*', 777 'ci-kubernetes-e2e-gce': 'ci-kubernetes-e2e-gce-*', 778 'ci-kubernetes-e2e-gce-canary': 'ci-kubernetes-e2e-gce-*', 779 'ci-kubernetes-e2e-gke-gpu': 'ci-kubernetes-e2e-gke-gpu-*', 780 'pull-kubernetes-e2e-gke-gpu': 'ci-kubernetes-e2e-gke-gpu-*', 781 'ci-kubernetes-node-kubelet-serial': 'ci-kubernetes-node-kubelet-*', 782 'ci-kubernetes-node-kubelet-flaky': 'ci-kubernetes-node-kubelet-*', 783 'ci-kubernetes-node-kubelet-conformance': 'ci-kubernetes-node-kubelet-*', 784 'ci-kubernetes-node-kubelet-benchmark': 'ci-kubernetes-node-kubelet-*', 785 'ci-kubernetes-node-kubelet': 'ci-kubernetes-node-kubelet-*', 786 } 787 for soak_prefix in [ 788 'ci-kubernetes-soak-gce-1.5', 789 'ci-kubernetes-soak-gce-1-7', 790 'ci-kubernetes-soak-gce-1.6', 791 'ci-kubernetes-soak-gce-2', 792 'ci-kubernetes-soak-gce', 793 'ci-kubernetes-soak-gci-gce-1.5', 794 'ci-kubernetes-soak-gce-gci', 795 'ci-kubernetes-soak-gke-gci', 796 'ci-kubernetes-soak-gce-federation', 797 'ci-kubernetes-soak-gci-gce-stable1', 798 'ci-kubernetes-soak-gci-gce-1.6', 799 'ci-kubernetes-soak-gci-gce-1-7', 800 'ci-kubernetes-soak-cos-docker-validation', 801 'ci-kubernetes-soak-gke', 802 ]: 803 allowed_list['%s-deploy' % soak_prefix] = '%s-*' % soak_prefix 804 allowed_list['%s-test' % soak_prefix] = '%s-*' % soak_prefix 805 # pylint: enable=line-too-long 806 projects = collections.defaultdict(set) 807 boskos = [] 808 with open(config_sort.test_infra('boskos/resources.json')) as fp: 809 for rtype in json.loads(fp.read()): 810 if 'project' in rtype['type']: 811 for name in rtype['names']: 812 boskos.append(name) 813 814 with open(config_sort.test_infra('jobs/config.json')) as fp: 815 job_config = json.load(fp) 816 for job in job_config: 817 project = '' 818 cfg = job_config.get(job.rsplit('.', 1)[0], {}) 819 if cfg.get('scenario') == 'kubernetes_e2e': 820 for arg in cfg.get('args', []): 821 if not arg.startswith('--gcp-project='): 822 continue 823 project = arg.split('=', 1)[1] 824 if project: 825 if project in boskos: 826 self.fail('Project %s cannot be in boskos/resources.json!' % project) 827 projects[project].add(allowed_list.get(job, job)) 828 829 duplicates = [(p, j) for p, j in projects.items() if len(j) > 1] 830 if duplicates: 831 self.fail('Jobs duplicate projects:\n %s' % ( 832 '\n '.join('%s: %s' % t for t in duplicates))) 833 834 def test_jobs_do_not_source_shell(self): 835 for job, job_path in self.jobs: 836 if job.startswith('pull-'): 837 continue # No clean way to determine version 838 with open(job_path) as fp: 839 script = fp.read() 840 self.assertFalse(re.search(r'\Wsource ', script), job) 841 self.assertNotIn('\n. ', script, job) 842 843 def test_all_bash_jobs_have_errexit(self): 844 options = { 845 'errexit', 846 'nounset', 847 'pipefail', 848 } 849 for job, job_path in self.jobs: 850 if not job.endswith('.sh'): 851 continue 852 with open(job_path) as fp: 853 lines = list(fp) 854 for option in options: 855 expected = 'set -o %s\n' % option 856 self.assertIn( 857 expected, lines, 858 '%s not found in %s' % (expected, job_path)) 859 860 def _check_env(self, job, setting): 861 if not re.match(r'[0-9A-Z_]+=[^\n]*', setting): 862 self.fail('[%r]: Env %r: need to follow FOO=BAR pattern' % (job, setting)) 863 if '#' in setting: 864 self.fail('[%r]: Env %r: No inline comments' % (job, setting)) 865 if '"' in setting or '\'' in setting: 866 self.fail('[%r]: Env %r: No quote in env' % (job, setting)) 867 if '$' in setting: 868 self.fail('[%r]: Env %r: Please resolve variables in env' % (job, setting)) 869 if '{' in setting or '}' in setting: 870 self.fail('[%r]: Env %r: { and } are not allowed in env' % (job, setting)) 871 # also test for https://github.com/kubernetes/test-infra/issues/2829 872 # TODO(fejta): sort this list 873 black = [ 874 ('CHARTS_TEST=', '--charts-tests'), 875 ('CLUSTER_IP_RANGE=', '--test_args=--cluster-ip-range=FOO'), 876 ('CLOUDSDK_BUCKET=', '--gcp-cloud-sdk=gs://foo'), 877 ('CLUSTER_NAME=', '--cluster=FOO'), 878 ('E2E_CLEAN_START=', '--test_args=--clean-start=true'), 879 ('E2E_DOWN=', '--down=true|false'), 880 ('E2E_MIN_STARTUP_PODS=', '--test_args=--minStartupPods=FOO'), 881 ('E2E_NAME=', '--cluster=whatever'), 882 ('E2E_PUBLISH_PATH=', '--publish=gs://FOO'), 883 ('E2E_REPORT_DIR=', '--test_args=--report-dir=FOO'), 884 ('E2E_REPORT_PREFIX=', '--test_args=--report-prefix=FOO'), 885 ('E2E_TEST=', '--test=true|false'), 886 ('E2E_UPGRADE_TEST=', '--upgrade_args=FOO'), 887 ('E2E_UP=', '--up=true|false'), 888 ('E2E_OPT=', 'Send kubetest the flags directly'), 889 ('FAIL_ON_GCP_RESOURCE_LEAK=', '--check-leaked-resources=true|false'), 890 ('FEDERATION_DOWN=', '--down=true|false'), 891 ('FEDERATION_UP=', '--up=true|false'), 892 ('GINKGO_PARALLEL=', '--ginkgo-parallel=# (1 for serial)'), 893 ('GINKGO_PARALLEL_NODES=', '--ginkgo-parallel=# (1 for serial)'), 894 ('GINKGO_TEST_ARGS=', '--test_args=FOO'), 895 ('GINKGO_UPGRADE_TEST_ARGS=', '--upgrade_args=FOO'), 896 ('JENKINS_FEDERATION_PREFIX=', '--stage=gs://FOO'), 897 ('JENKINS_GCI_PATCH_K8S=', 'Unused, see --extract docs'), 898 ('JENKINS_PUBLISHED_VERSION=', '--extract=V'), 899 ('JENKINS_PUBLISHED_SKEW_VERSION=', '--extract=V'), 900 ('JENKINS_USE_SKEW_KUBECTL=', 'SKEW_KUBECTL=y'), 901 ('JENKINS_USE_SKEW_TESTS=', '--skew'), 902 ('JENKINS_SOAK_MODE', '--soak'), 903 ('JENKINS_SOAK_PREFIX', '--stage=gs://FOO'), 904 ('JENKINS_USE_EXISTING_BINARIES=', '--extract=local'), 905 ('JENKINS_USE_LOCAL_BINARIES=', '--extract=none'), 906 ('JENKINS_USE_SERVER_VERSION=', '--extract=gke'), 907 ('JENKINS_USE_GCI_VERSION=', '--extract=gci/FAMILY'), 908 ('JENKINS_USE_GCI_HEAD_IMAGE_FAMILY=', '--extract=gci/FAMILY'), 909 ('KUBE_GKE_NETWORK=', '--gcp-network=FOO'), 910 ('KUBE_GCE_NETWORK=', '--gcp-network=FOO'), 911 ('KUBE_GCE_ZONE=', '--gcp-zone=FOO'), 912 ('KUBEKINS_TIMEOUT=', '--timeout=XXm'), 913 ('KUBEMARK_TEST_ARGS=', '--test_args=FOO'), 914 ('KUBEMARK_TESTS=', '--test_args=--ginkgo.focus=FOO'), 915 ('KUBEMARK_MASTER_SIZE=', '--kubemark-master-size=FOO'), 916 ('KUBEMARK_NUM_NODES=', '--kubemark-nodes=FOO'), 917 ('KUBE_OS_DISTRIBUTION=', '--gcp-node-image=FOO and --gcp-master-image=FOO'), 918 ('KUBE_NODE_OS_DISTRIBUTION=', '--gcp-node-image=FOO'), 919 ('KUBE_MASTER_OS_DISTRIBUTION=', '--gcp-master-image=FOO'), 920 ('KUBERNETES_PROVIDER=', '--provider=FOO'), 921 ('PERF_TESTS=', '--perf'), 922 ('PROJECT=', '--gcp-project=FOO'), 923 ('SKEW_KUBECTL=', '--test_args=--kubectl-path=FOO'), 924 ('USE_KUBEMARK=', '--kubemark'), 925 ('ZONE=', '--gcp-zone=FOO'), 926 ] 927 for env, fix in black: 928 if 'kops' in job and env in [ 929 'JENKINS_PUBLISHED_VERSION=', 930 'JENKINS_USE_LOCAL_BINARIES=', 931 'GINKGO_TEST_ARGS=', 932 'KUBERNETES_PROVIDER=', 933 ]: 934 continue # TOOD(fejta): migrate kops jobs 935 if setting.startswith(env): 936 self.fail('[%s]: Env %s: Convert %s to use %s in jobs/config.json' % ( 937 job, setting, env, fix)) 938 939 def test_envs_no_export(self): 940 for job, job_path in self.jobs: 941 if not job.endswith('.env'): 942 continue 943 with open(job_path) as fp: 944 lines = list(fp) 945 for line in lines: 946 line = line.strip() 947 self.assertFalse(line.endswith('\\')) 948 if not line: 949 continue 950 if line.startswith('#'): 951 continue 952 self._check_env(job, line) 953 954 def test_envs_non_empty(self): 955 bad = [] 956 for job, job_path in self.jobs: 957 if not job.endswith('.env'): 958 continue 959 with open(job_path) as fp: 960 lines = list(fp) 961 for line in lines: 962 line = line.strip() 963 if line and not line.startswith('#'): 964 break 965 else: 966 bad.append(job) 967 if bad: 968 self.fail('%s is empty, please remove the file(s)' % bad) 969 970 def test_no_bad_vars_in_jobs(self): 971 """Searches for jobs that contain ${{VAR}}""" 972 for job, job_path in self.jobs: 973 with open(job_path) as fp: 974 script = fp.read() 975 bad_vars = re.findall(r'(\${{.+}})', script) 976 if bad_vars: 977 self.fail('Job %s contains bad bash variables: %s' % (job, ' '.join(bad_vars))) 978 979 if __name__ == '__main__': 980 unittest.main()