blob: 103454f801f611e33538a2899b63baff1f96896b [file] [log] [blame]
Kenneth Russelleb60cbd22017-12-05 07:54:281#!/usr/bin/env python
2# Copyright 2016 The Chromium Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Script to generate the majority of the JSON files in the src/testing/buildbot
7directory. Maintaining these files by hand is too unwieldy.
8"""
9
10import argparse
11import ast
12import collections
13import copy
14import json
15import os
16import string
17import sys
18
19THIS_DIR = os.path.dirname(os.path.abspath(__file__))
20
21
22class BBGenErr(Exception):
23 pass
24
25
26class BaseGenerator(object):
27 def __init__(self, bb_gen):
28 self.bb_gen = bb_gen
29
30 def generate(self, waterfall, name, config, input_tests):
31 raise NotImplementedError()
32
33 def sort(self, tests):
34 raise NotImplementedError()
35
36
37class GTestGenerator(BaseGenerator):
38 def __init__(self, bb_gen):
39 super(GTestGenerator, self).__init__(bb_gen)
40
41 def generate(self, waterfall, name, config, input_tests):
42 # The relative ordering of some of the tests is important to
43 # minimize differences compared to the handwritten JSON files, since
44 # Python's sorts are stable and there are some tests with the same
45 # key (see gles2_conform_d3d9_test and similar variants). Avoid
46 # losing the order by avoiding coalescing the dictionaries into one.
47 gtests = []
48 for test_name, test_config in sorted(input_tests.iteritems()):
49 test = self.bb_gen.generate_gtest(
50 waterfall, name, config, test_name, test_config)
51 if test:
52 # generate_gtest may veto the test generation on this tester.
53 gtests.append(test)
54 return gtests
55
56 def sort(self, tests):
57 def cmp_gtests(a, b):
58 # Prefer to compare based on the "test" key.
59 val = cmp(a['test'], b['test'])
60 if val != 0:
61 return val
62 if 'name' in a and 'name' in b:
63 return cmp(a['name'], b['name']) # pragma: no cover
64 if 'name' not in a and 'name' not in b:
65 return 0 # pragma: no cover
66 # Prefer to put variants of the same test after the first one.
67 if 'name' in a:
68 return 1
69 # 'name' is in b.
70 return -1 # pragma: no cover
71 return sorted(tests, cmp=cmp_gtests)
72
73
74class IsolatedScriptTestGenerator(BaseGenerator):
75 def __init__(self, bb_gen):
76 super(IsolatedScriptTestGenerator, self).__init__(bb_gen)
77
78 def generate(self, waterfall, name, config, input_tests):
79 isolated_scripts = []
80 for test_name, test_config in sorted(input_tests.iteritems()):
81 test = self.bb_gen.generate_isolated_script_test(
82 waterfall, name, config, test_name, test_config)
83 if test:
84 isolated_scripts.append(test)
85 return isolated_scripts
86
87 def sort(self, tests):
88 return sorted(tests, key=lambda x: x['name'])
89
90
91class ScriptGenerator(BaseGenerator):
92 def __init__(self, bb_gen):
93 super(ScriptGenerator, self).__init__(bb_gen)
94
95 def generate(self, waterfall, name, config, input_tests):
96 scripts = []
97 for test_name, test_config in sorted(input_tests.iteritems()):
98 test = self.bb_gen.generate_script_test(
99 waterfall, name, config, test_name, test_config)
100 if test:
101 scripts.append(test)
102 return scripts
103
104 def sort(self, tests):
105 return sorted(tests, key=lambda x: x['name'])
106
107
108class JUnitGenerator(BaseGenerator):
109 def __init__(self, bb_gen):
110 super(JUnitGenerator, self).__init__(bb_gen)
111
112 def generate(self, waterfall, name, config, input_tests):
113 scripts = []
114 for test_name, test_config in sorted(input_tests.iteritems()):
115 test = self.bb_gen.generate_junit_test(
116 waterfall, name, config, test_name, test_config)
117 if test:
118 scripts.append(test)
119 return scripts
120
121 def sort(self, tests):
122 return sorted(tests, key=lambda x: x['test'])
123
124
125class CTSGenerator(BaseGenerator):
126 def __init__(self, bb_gen):
127 super(CTSGenerator, self).__init__(bb_gen)
128
129 def generate(self, waterfall, name, config, input_tests):
130 # These only contain one entry and it's the contents of the input tests'
131 # dictionary, verbatim.
132 cts_tests = []
133 cts_tests.append(input_tests)
134 return cts_tests
135
136 def sort(self, tests):
137 return tests
138
139
140class InstrumentationTestGenerator(BaseGenerator):
141 def __init__(self, bb_gen):
142 super(InstrumentationTestGenerator, self).__init__(bb_gen)
143
144 def generate(self, waterfall, name, config, input_tests):
145 scripts = []
146 for test_name, test_config in sorted(input_tests.iteritems()):
147 test = self.bb_gen.generate_instrumentation_test(
148 waterfall, name, config, test_name, test_config)
149 if test:
150 scripts.append(test)
151 return scripts
152
153 def sort(self, tests):
154 return sorted(tests, key=lambda x: x['test'])
155
156
157class BBJSONGenerator(object):
158 def __init__(self):
159 self.this_dir = THIS_DIR
160 self.args = None
161 self.waterfalls = None
162 self.test_suites = None
163 self.exceptions = None
164
165 def generate_abs_file_path(self, relative_path):
166 return os.path.join(self.this_dir, relative_path) # pragma: no cover
167
168 def read_file(self, relative_path):
169 with open(self.generate_abs_file_path(
170 relative_path)) as fp: # pragma: no cover
171 return fp.read() # pragma: no cover
172
173 def write_file(self, relative_path, contents):
174 with open(self.generate_abs_file_path(
175 relative_path), 'wb') as fp: # pragma: no cover
176 fp.write(contents) # pragma: no cover
177
178 def load_pyl_file(self, filename):
179 try:
180 return ast.literal_eval(self.read_file(filename))
181 except (SyntaxError, ValueError) as e: # pragma: no cover
182 raise BBGenErr('Failed to parse pyl file "%s": %s' %
183 (filename, e)) # pragma: no cover
184
185 def is_android(self, tester_config):
186 return tester_config.get('os_type') == 'android'
187
188 def get_exception_for_test(self, test_name, test_config):
189 # gtests may have both "test" and "name" fields, and usually, if the "name"
190 # field is specified, it means that the same test is being repurposed
191 # multiple times with different command line arguments. To handle this case,
192 # prefer to lookup per the "name" field of the test itself, as opposed to
193 # the "test_name", which is actually the "test" field.
194 if 'name' in test_config:
195 return self.exceptions.get(test_config['name'])
196 else:
197 return self.exceptions.get(test_name)
198
199 def should_run_on_tester(self, waterfall, tester_name, tester_config,
200 test_name, test_config):
201 # TODO(kbr): until this script is merged with the GPU test generator, a few
202 # arguments will be unused.
203 del waterfall
204 del tester_config
205 # Currently, the only reason a test should not run on a given tester is that
206 # it's in the exceptions. (Once the GPU waterfall generation script is
207 # incorporated here, the rules will become more complex.)
208 exception = self.get_exception_for_test(test_name, test_config)
209 if not exception:
210 return True
211 remove_from = exception.get('remove_from')
212 if not remove_from:
213 # Having difficulty getting coverage for the next line
214 return True # pragma: no cover
215 return tester_name not in remove_from
216
217 def get_test_modifications(self, test, test_name, tester_name):
218 exception = self.get_exception_for_test(test_name, test)
219 if not exception:
220 return None
221 return exception.get('modifications', {}).get(tester_name)
222
223 def get_test_key_removals(self, test_name, tester_name):
224 exception = self.exceptions.get(test_name)
225 if not exception:
226 return []
227 return exception.get('key_removals', {}).get(tester_name, [])
228
229 def dictionary_merge(self, a, b, path=None, update=True):
230 """http://stackoverflow.com/questions/7204805/
231 python-dictionaries-of-dictionaries-merge
232 merges b into a
233 """
234 if path is None:
235 path = []
236 for key in b:
237 if key in a:
238 if isinstance(a[key], dict) and isinstance(b[key], dict):
239 self.dictionary_merge(a[key], b[key], path + [str(key)])
240 elif a[key] == b[key]:
241 pass # same leaf value
242 elif isinstance(a[key], list) and isinstance(b[key], list):
243 # TODO(kbr): this only works properly if the two arrays are
244 # the same length, which is currently always the case in the
245 # swarming dimension_sets that we have to merge. It will fail
246 # to merge / override 'args' arrays which are different
247 # length.
248 #
249 # Fundamentally we want different behavior for arrays of
250 # dictionaries vs. arrays of strings.
251 for idx in xrange(len(b[key])):
252 try:
253 a[key][idx] = self.dictionary_merge(a[key][idx], b[key][idx],
254 path + [str(key), str(idx)],
255 update=update)
256 except (IndexError, TypeError): # pragma: no cover
257 raise BBGenErr('Error merging list keys ' + str(key) +
258 ' and indices ' + str(idx) + ' between ' +
259 str(a) + ' and ' + str(b)) # pragma: no cover
260 elif update: # pragma: no cover
261 a[key] = b[key] # pragma: no cover
262 else:
263 raise BBGenErr('Conflict at %s' % '.'.join(
264 path + [str(key)])) # pragma: no cover
265 else:
266 a[key] = b[key]
267 return a
268
269 def initialize_swarming_dictionary_for_test(self, generated_test,
270 tester_config):
271 if 'swarming' not in generated_test:
272 generated_test['swarming'] = {}
Dirk Pranke81ff51c2017-12-09 19:24:28273 if not 'can_use_on_swarming_builders' in generated_test['swarming']:
274 generated_test['swarming'].update({
275 'can_use_on_swarming_builders': tester_config.get('use_swarming', True)
276 })
Kenneth Russelleb60cbd22017-12-05 07:54:28277 if 'swarming' in tester_config:
278 if 'dimension_sets' not in generated_test['swarming']:
279 generated_test['swarming']['dimension_sets'] = copy.deepcopy(
280 tester_config['swarming']['dimension_sets'])
281 self.dictionary_merge(generated_test['swarming'],
282 tester_config['swarming'])
283 # Apply any Android-specific Swarming dimensions after the generic ones.
284 if 'android_swarming' in generated_test:
285 if self.is_android(tester_config): # pragma: no cover
286 self.dictionary_merge(
287 generated_test['swarming'],
288 generated_test['android_swarming']) # pragma: no cover
289 del generated_test['android_swarming'] # pragma: no cover
290
291 def clean_swarming_dictionary(self, swarming_dict):
292 # Clean out redundant entries from a test's "swarming" dictionary.
293 # This is really only needed to retain 100% parity with the
294 # handwritten JSON files, and can be removed once all the files are
295 # autogenerated.
296 if 'shards' in swarming_dict:
297 if swarming_dict['shards'] == 1: # pragma: no cover
298 del swarming_dict['shards'] # pragma: no cover
Kenneth Russellfbda3c532017-12-08 23:57:24299 if 'hard_timeout' in swarming_dict:
300 if swarming_dict['hard_timeout'] == 0: # pragma: no cover
301 del swarming_dict['hard_timeout'] # pragma: no cover
Kenneth Russelleb60cbd22017-12-05 07:54:28302 if not swarming_dict['can_use_on_swarming_builders']:
303 # Remove all other keys.
304 for k in swarming_dict.keys(): # pragma: no cover
305 if k != 'can_use_on_swarming_builders': # pragma: no cover
306 del swarming_dict[k] # pragma: no cover
307
308 def update_and_cleanup_test(self, test, test_name, tester_name):
309 # See if there are any exceptions that need to be merged into this
310 # test's specification.
311 modifications = self.get_test_modifications(test, test_name, tester_name)
312 if modifications:
313 test = self.dictionary_merge(test, modifications)
314 for k in self.get_test_key_removals(test_name, tester_name):
315 del test[k]
Dirk Pranke1b767092017-12-07 04:44:23316 if 'swarming' in test:
317 self.clean_swarming_dictionary(test['swarming'])
Kenneth Russelleb60cbd22017-12-05 07:54:28318 return test
319
320 def generate_gtest(self, waterfall, tester_name, tester_config, test_name,
321 test_config):
322 if not self.should_run_on_tester(
323 waterfall, tester_name, tester_config, test_name, test_config):
324 return None
325 result = copy.deepcopy(test_config)
326 if 'test' in result:
327 result['name'] = test_name
328 else:
329 result['test'] = test_name
330 self.initialize_swarming_dictionary_for_test(result, tester_config)
331 if self.is_android(tester_config) and tester_config.get('use_swarming',
332 True):
333 if not tester_config.get('skip_merge_script', False):
334 result['merge'] = {
335 'args': [
336 '--bucket',
337 'chromium-result-details',
338 '--test-name',
339 test_name
340 ],
341 'script': '//build/android/pylib/results/presentation/' \
342 'test_results_presentation.py',
343 } # pragma: no cover
344 result['swarming']['cipd_packages'] = [
345 {
346 'cipd_package': 'infra/tools/luci/logdog/butler/${platform}',
347 'location': 'bin',
348 'revision': 'git_revision:ff387eadf445b24c935f1cf7d6ddd279f8a6b04c',
349 }
350 ]
351 if not tester_config.get('skip_output_links', False):
352 result['swarming']['output_links'] = [
353 {
354 'link': [
355 'https://luci-logdog.appspot.com/v/?s',
356 '=android%2Fswarming%2Flogcats%2F',
357 '${TASK_ID}%2F%2B%2Funified_logcats',
358 ],
359 'name': 'shard #${SHARD_INDEX} logcats',
360 },
361 ]
362 result = self.update_and_cleanup_test(result, test_name, tester_name)
363 return result
364
365 def generate_isolated_script_test(self, waterfall, tester_name, tester_config,
366 test_name, test_config):
367 if not self.should_run_on_tester(waterfall, tester_name, tester_config,
368 test_name, test_config):
369 return None
370 result = copy.deepcopy(test_config)
371 result['isolate_name'] = result.get('isolate_name', test_name)
372 result['name'] = test_name
373 self.initialize_swarming_dictionary_for_test(result, tester_config)
374 result = self.update_and_cleanup_test(result, test_name, tester_name)
375 return result
376
377 def generate_script_test(self, waterfall, tester_name, tester_config,
378 test_name, test_config):
379 if not self.should_run_on_tester(waterfall, tester_name, tester_config,
380 test_name, test_config):
381 return None
382 result = {
383 'name': test_name,
384 'script': test_config['script']
385 }
Dirk Pranke1b767092017-12-07 04:44:23386 result = self.update_and_cleanup_test(result, test_name, tester_name)
Kenneth Russelleb60cbd22017-12-05 07:54:28387 return result
388
389 def generate_junit_test(self, waterfall, tester_name, tester_config,
390 test_name, test_config):
391 if not self.should_run_on_tester(waterfall, tester_name, tester_config,
392 test_name, test_config):
393 return None
394 result = {
395 'test': test_name,
396 }
397 return result
398
399 def generate_instrumentation_test(self, waterfall, tester_name, tester_config,
400 test_name, test_config):
401 if not self.should_run_on_tester(waterfall, tester_name, tester_config,
402 test_name, test_config):
403 return None
404 result = copy.deepcopy(test_config)
405 result['test'] = test_name
406 return result
407
408 def get_test_generator_map(self):
409 return {
410 'cts_tests': CTSGenerator(self),
411 'gtest_tests': GTestGenerator(self),
412 'instrumentation_tests': InstrumentationTestGenerator(self),
413 'isolated_scripts': IsolatedScriptTestGenerator(self),
414 'junit_tests': JUnitGenerator(self),
415 'scripts': ScriptGenerator(self),
416 }
417
418 def check_composition_test_suites(self):
419 # Pre-pass to catch errors reliably.
420 for name, value in self.test_suites.iteritems():
421 if isinstance(value, list):
422 for entry in value:
423 if isinstance(self.test_suites[entry], list):
424 raise BBGenErr('Composition test suites may not refer to other ' \
425 'composition test suites (error found while ' \
426 'processing %s)' % name)
427
428 def resolve_composition_test_suites(self):
429 self.check_composition_test_suites()
430 for name, value in self.test_suites.iteritems():
431 if isinstance(value, list):
432 # Resolve this to a dictionary.
433 full_suite = {}
434 for entry in value:
435 suite = self.test_suites[entry]
436 full_suite.update(suite)
437 self.test_suites[name] = full_suite
438
439 def link_waterfalls_to_test_suites(self):
440 for waterfall in self.waterfalls:
Kenneth Russell139f8642017-12-05 08:51:43441 for tester_name, tester in waterfall['machines'].iteritems():
442 for suite, value in tester.get('test_suites', {}).iteritems():
Kenneth Russelleb60cbd22017-12-05 07:54:28443 if not value in self.test_suites:
444 # Hard / impossible to cover this in the unit test.
445 raise self.unknown_test_suite(
446 value, tester_name, waterfall['name']) # pragma: no cover
447 tester['test_suites'][suite] = self.test_suites[value]
448
449 def load_configuration_files(self):
450 self.waterfalls = self.load_pyl_file('waterfalls.pyl')
451 self.test_suites = self.load_pyl_file('test_suites.pyl')
452 self.exceptions = self.load_pyl_file('test_suite_exceptions.pyl')
453
454 def resolve_configuration_files(self):
455 self.resolve_composition_test_suites()
456 self.link_waterfalls_to_test_suites()
457
458 def unknown_test_suite(self, suite_name, bot_name, waterfall_name):
459 return BBGenErr(
460 'Test suite %s from machine %s on waterfall %s not present in ' \
461 'test_suites.pyl' % (suite_name, bot_name, waterfall_name))
462
463 def unknown_test_suite_type(self, suite_type, bot_name, waterfall_name):
464 return BBGenErr(
465 'Unknown test suite type ' + suite_type + ' in bot ' + bot_name +
466 ' on waterfall ' + waterfall_name)
467
468 def generate_waterfall_json(self, waterfall):
469 all_tests = {}
Kenneth Russelleb60cbd22017-12-05 07:54:28470 generator_map = self.get_test_generator_map()
Kenneth Russell139f8642017-12-05 08:51:43471 for name, config in waterfall['machines'].iteritems():
Kenneth Russelleb60cbd22017-12-05 07:54:28472 tests = {}
Kenneth Russell139f8642017-12-05 08:51:43473 # Copy only well-understood entries in the machine's configuration
474 # verbatim into the generated JSON.
Kenneth Russelleb60cbd22017-12-05 07:54:28475 if 'additional_compile_targets' in config:
476 tests['additional_compile_targets'] = config[
477 'additional_compile_targets']
Kenneth Russell139f8642017-12-05 08:51:43478 for test_type, input_tests in config.get('test_suites', {}).iteritems():
Kenneth Russelleb60cbd22017-12-05 07:54:28479 if test_type not in generator_map:
480 raise self.unknown_test_suite_type(
481 test_type, name, waterfall['name']) # pragma: no cover
482 test_generator = generator_map[test_type]
483 tests[test_type] = test_generator.sort(test_generator.generate(
484 waterfall, name, config, input_tests))
485 all_tests[name] = tests
486 all_tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {}
487 all_tests['AAAAA2 See generate_buildbot_json.py to make changes'] = {}
488 return json.dumps(all_tests, indent=2, separators=(',', ': '),
489 sort_keys=True) + '\n'
490
491 def generate_waterfalls(self): # pragma: no cover
492 self.load_configuration_files()
493 self.resolve_configuration_files()
494 filters = self.args.waterfall_filters
495 suffix = '.json'
496 if self.args.new_files:
497 suffix = '.new' + suffix
498 for waterfall in self.waterfalls:
499 should_gen = not filters or waterfall['name'] in filters
500 if should_gen:
501 self.write_file(waterfall['name'] + suffix,
502 self.generate_waterfall_json(waterfall))
503
504 def check_input_file_consistency(self):
505 self.load_configuration_files()
506 self.check_composition_test_suites()
507 # All test suites must be referenced.
508 suites_seen = set()
509 generator_map = self.get_test_generator_map()
510 for waterfall in self.waterfalls:
Kenneth Russell139f8642017-12-05 08:51:43511 for bot_name, tester in waterfall['machines'].iteritems():
512 for suite_type, suite in tester.get('test_suites', {}).iteritems():
Kenneth Russelleb60cbd22017-12-05 07:54:28513 if suite_type not in generator_map:
514 raise self.unknown_test_suite_type(suite_type, bot_name,
515 waterfall['name'])
516 if suite not in self.test_suites:
517 raise self.unknown_test_suite(suite, bot_name, waterfall['name'])
518 suites_seen.add(suite)
519 # Since we didn't resolve the configuration files, this set
520 # includes both composition test suites and regular ones.
521 resolved_suites = set()
522 for suite_name in suites_seen:
523 suite = self.test_suites[suite_name]
524 if isinstance(suite, list):
525 for sub_suite in suite:
526 resolved_suites.add(sub_suite)
527 resolved_suites.add(suite_name)
528 # At this point, every key in test_suites.pyl should be referenced.
529 missing_suites = set(self.test_suites.keys()) - resolved_suites
530 if missing_suites:
531 raise BBGenErr('The following test suites were unreferenced by bots on '
532 'the waterfalls: ' + str(missing_suites))
533
534 # All test suite exceptions must refer to bots on the waterfall.
535 all_bots = set()
536 missing_bots = set()
537 for waterfall in self.waterfalls:
Kenneth Russell139f8642017-12-05 08:51:43538 for bot_name, tester in waterfall['machines'].iteritems():
Kenneth Russelleb60cbd22017-12-05 07:54:28539 all_bots.add(bot_name)
540 for exception in self.exceptions.itervalues():
541 for removal in exception.get('remove_from', []):
542 if removal not in all_bots:
543 missing_bots.add(removal)
544 for mod in exception.get('modifications', {}).iterkeys():
545 if mod not in all_bots:
546 missing_bots.add(mod)
547 if missing_bots:
548 raise BBGenErr('The following nonexistent machines were referenced in '
549 'the test suite exceptions: ' + str(missing_bots))
550
551 def check_output_file_consistency(self, verbose=False):
552 self.load_configuration_files()
553 # All waterfalls must have been written by this script already.
554 self.resolve_configuration_files()
555 ungenerated_waterfalls = set()
556 for waterfall in self.waterfalls:
557 expected = self.generate_waterfall_json(waterfall)
558 current = self.read_file(waterfall['name'] + '.json')
559 if expected != current:
560 ungenerated_waterfalls.add(waterfall['name'])
561 if verbose:
562 print ('Waterfall ' + waterfall['name'] +
563 ' did not have the following expected '
564 'contents:') # pragma: no cover
565 print expected # pragma: no cover
566 if ungenerated_waterfalls:
567 raise BBGenErr('The following waterfalls have not been properly '
568 'autogenerated by generate_buildbot_json.py: ' +
569 str(ungenerated_waterfalls))
570
571 def check_consistency(self, verbose=False):
572 self.check_input_file_consistency() # pragma: no cover
573 self.check_output_file_consistency(verbose) # pragma: no cover
574
575 def parse_args(self, argv): # pragma: no cover
576 parser = argparse.ArgumentParser()
577 parser.add_argument(
578 '-c', '--check', action='store_true', help=
579 'Do consistency checks of configuration and generated files and then '
580 'exit. Used during presubmit. Causes the tool to not generate any files.')
581 parser.add_argument(
582 '-n', '--new-files', action='store_true', help=
583 'Write output files as .new.json. Useful during development so old and '
584 'new files can be looked at side-by-side.')
585 parser.add_argument(
586 'waterfall_filters', metavar='waterfalls', type=str, nargs='*',
587 help='Optional list of waterfalls to generate.')
588 self.args = parser.parse_args(argv)
589
590 def main(self, argv): # pragma: no cover
591 self.parse_args(argv)
592 if self.args.check:
593 self.check_consistency()
594 else:
595 self.generate_waterfalls()
596 return 0
597
598if __name__ == "__main__": # pragma: no cover
599 generator = BBJSONGenerator()
600 sys.exit(generator.main(sys.argv[1:]))