compare.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. #!/usr/bin/env python
  2. import unittest
  3. """
  4. compare.py - versatile benchmark output compare tool
  5. """
  6. import argparse
  7. from argparse import ArgumentParser
  8. import json
  9. import sys
  10. import gbench
  11. from gbench import util, report
  12. from gbench.util import *
  13. def check_inputs(in1, in2, flags):
  14. """
  15. Perform checking on the user provided inputs and diagnose any abnormalities
  16. """
  17. in1_kind, in1_err = classify_input_file(in1)
  18. in2_kind, in2_err = classify_input_file(in2)
  19. output_file = find_benchmark_flag('--benchmark_out=', flags)
  20. output_type = find_benchmark_flag('--benchmark_out_format=', flags)
  21. if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
  22. print(("WARNING: '--benchmark_out=%s' will be passed to both "
  23. "benchmarks causing it to be overwritten") % output_file)
  24. if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
  25. print("WARNING: passing optional flags has no effect since both "
  26. "inputs are JSON")
  27. if output_type is not None and output_type != 'json':
  28. print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
  29. " is not supported.") % output_type)
  30. sys.exit(1)
  31. def create_parser():
  32. parser = ArgumentParser(
  33. description='versatile benchmark output compare tool')
  34. parser.add_argument(
  35. '-a',
  36. '--display_aggregates_only',
  37. dest='display_aggregates_only',
  38. action="store_true",
  39. help="If there are repetitions, by default, we display everything - the"
  40. " actual runs, and the aggregates computed. Sometimes, it is "
  41. "desirable to only view the aggregates. E.g. when there are a lot "
  42. "of repetitions. Do note that only the display is affected. "
  43. "Internally, all the actual runs are still used, e.g. for U test.")
  44. parser.add_argument(
  45. '--no-color',
  46. dest='color',
  47. default=True,
  48. action="store_false",
  49. help="Do not use colors in the terminal output"
  50. )
  51. parser.add_argument(
  52. '-d',
  53. '--dump_to_json',
  54. dest='dump_to_json',
  55. help="Additionally, dump benchmark comparison output to this file in JSON format.")
  56. utest = parser.add_argument_group()
  57. utest.add_argument(
  58. '--no-utest',
  59. dest='utest',
  60. default=True,
  61. action="store_false",
  62. help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS))
  63. alpha_default = 0.05
  64. utest.add_argument(
  65. "--alpha",
  66. dest='utest_alpha',
  67. default=alpha_default,
  68. type=float,
  69. help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") %
  70. alpha_default)
  71. subparsers = parser.add_subparsers(
  72. help='This tool has multiple modes of operation:',
  73. dest='mode')
  74. parser_a = subparsers.add_parser(
  75. 'benchmarks',
  76. help='The most simple use-case, compare all the output of these two benchmarks')
  77. baseline = parser_a.add_argument_group(
  78. 'baseline', 'The benchmark baseline')
  79. baseline.add_argument(
  80. 'test_baseline',
  81. metavar='test_baseline',
  82. type=argparse.FileType('r'),
  83. nargs=1,
  84. help='A benchmark executable or JSON output file')
  85. contender = parser_a.add_argument_group(
  86. 'contender', 'The benchmark that will be compared against the baseline')
  87. contender.add_argument(
  88. 'test_contender',
  89. metavar='test_contender',
  90. type=argparse.FileType('r'),
  91. nargs=1,
  92. help='A benchmark executable or JSON output file')
  93. parser_a.add_argument(
  94. 'benchmark_options',
  95. metavar='benchmark_options',
  96. nargs=argparse.REMAINDER,
  97. help='Arguments to pass when running benchmark executables')
  98. parser_b = subparsers.add_parser(
  99. 'filters', help='Compare filter one with the filter two of benchmark')
  100. baseline = parser_b.add_argument_group(
  101. 'baseline', 'The benchmark baseline')
  102. baseline.add_argument(
  103. 'test',
  104. metavar='test',
  105. type=argparse.FileType('r'),
  106. nargs=1,
  107. help='A benchmark executable or JSON output file')
  108. baseline.add_argument(
  109. 'filter_baseline',
  110. metavar='filter_baseline',
  111. type=str,
  112. nargs=1,
  113. help='The first filter, that will be used as baseline')
  114. contender = parser_b.add_argument_group(
  115. 'contender', 'The benchmark that will be compared against the baseline')
  116. contender.add_argument(
  117. 'filter_contender',
  118. metavar='filter_contender',
  119. type=str,
  120. nargs=1,
  121. help='The second filter, that will be compared against the baseline')
  122. parser_b.add_argument(
  123. 'benchmark_options',
  124. metavar='benchmark_options',
  125. nargs=argparse.REMAINDER,
  126. help='Arguments to pass when running benchmark executables')
  127. parser_c = subparsers.add_parser(
  128. 'benchmarksfiltered',
  129. help='Compare filter one of first benchmark with filter two of the second benchmark')
  130. baseline = parser_c.add_argument_group(
  131. 'baseline', 'The benchmark baseline')
  132. baseline.add_argument(
  133. 'test_baseline',
  134. metavar='test_baseline',
  135. type=argparse.FileType('r'),
  136. nargs=1,
  137. help='A benchmark executable or JSON output file')
  138. baseline.add_argument(
  139. 'filter_baseline',
  140. metavar='filter_baseline',
  141. type=str,
  142. nargs=1,
  143. help='The first filter, that will be used as baseline')
  144. contender = parser_c.add_argument_group(
  145. 'contender', 'The benchmark that will be compared against the baseline')
  146. contender.add_argument(
  147. 'test_contender',
  148. metavar='test_contender',
  149. type=argparse.FileType('r'),
  150. nargs=1,
  151. help='The second benchmark executable or JSON output file, that will be compared against the baseline')
  152. contender.add_argument(
  153. 'filter_contender',
  154. metavar='filter_contender',
  155. type=str,
  156. nargs=1,
  157. help='The second filter, that will be compared against the baseline')
  158. parser_c.add_argument(
  159. 'benchmark_options',
  160. metavar='benchmark_options',
  161. nargs=argparse.REMAINDER,
  162. help='Arguments to pass when running benchmark executables')
  163. return parser
  164. def main():
  165. # Parse the command line flags
  166. parser = create_parser()
  167. args, unknown_args = parser.parse_known_args()
  168. if args.mode is None:
  169. parser.print_help()
  170. exit(1)
  171. assert not unknown_args
  172. benchmark_options = args.benchmark_options
  173. if args.mode == 'benchmarks':
  174. test_baseline = args.test_baseline[0].name
  175. test_contender = args.test_contender[0].name
  176. filter_baseline = ''
  177. filter_contender = ''
  178. # NOTE: if test_baseline == test_contender, you are analyzing the stdev
  179. description = 'Comparing %s to %s' % (test_baseline, test_contender)
  180. elif args.mode == 'filters':
  181. test_baseline = args.test[0].name
  182. test_contender = args.test[0].name
  183. filter_baseline = args.filter_baseline[0]
  184. filter_contender = args.filter_contender[0]
  185. # NOTE: if filter_baseline == filter_contender, you are analyzing the
  186. # stdev
  187. description = 'Comparing %s to %s (from %s)' % (
  188. filter_baseline, filter_contender, args.test[0].name)
  189. elif args.mode == 'benchmarksfiltered':
  190. test_baseline = args.test_baseline[0].name
  191. test_contender = args.test_contender[0].name
  192. filter_baseline = args.filter_baseline[0]
  193. filter_contender = args.filter_contender[0]
  194. # NOTE: if test_baseline == test_contender and
  195. # filter_baseline == filter_contender, you are analyzing the stdev
  196. description = 'Comparing %s (from %s) to %s (from %s)' % (
  197. filter_baseline, test_baseline, filter_contender, test_contender)
  198. else:
  199. # should never happen
  200. print("Unrecognized mode of operation: '%s'" % args.mode)
  201. parser.print_help()
  202. exit(1)
  203. check_inputs(test_baseline, test_contender, benchmark_options)
  204. if args.display_aggregates_only:
  205. benchmark_options += ['--benchmark_display_aggregates_only=true']
  206. options_baseline = []
  207. options_contender = []
  208. if filter_baseline and filter_contender:
  209. options_baseline = ['--benchmark_filter=%s' % filter_baseline]
  210. options_contender = ['--benchmark_filter=%s' % filter_contender]
  211. # Run the benchmarks and report the results
  212. json1 = json1_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark(
  213. test_baseline, benchmark_options + options_baseline))
  214. json2 = json2_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark(
  215. test_contender, benchmark_options + options_contender))
  216. # Now, filter the benchmarks so that the difference report can work
  217. if filter_baseline and filter_contender:
  218. replacement = '[%s vs. %s]' % (filter_baseline, filter_contender)
  219. json1 = gbench.report.filter_benchmark(
  220. json1_orig, filter_baseline, replacement)
  221. json2 = gbench.report.filter_benchmark(
  222. json2_orig, filter_contender, replacement)
  223. diff_report = gbench.report.get_difference_report(
  224. json1, json2, args.utest)
  225. output_lines = gbench.report.print_difference_report(
  226. diff_report,
  227. args.display_aggregates_only,
  228. args.utest, args.utest_alpha, args.color)
  229. print(description)
  230. for ln in output_lines:
  231. print(ln)
  232. # Optionally, diff and output to JSON
  233. if args.dump_to_json is not None:
  234. with open(args.dump_to_json, 'w') as f_json:
  235. json.dump(diff_report, f_json)
  236. class TestParser(unittest.TestCase):
  237. def setUp(self):
  238. self.parser = create_parser()
  239. testInputs = os.path.join(
  240. os.path.dirname(
  241. os.path.realpath(__file__)),
  242. 'gbench',
  243. 'Inputs')
  244. self.testInput0 = os.path.join(testInputs, 'test1_run1.json')
  245. self.testInput1 = os.path.join(testInputs, 'test1_run2.json')
  246. def test_benchmarks_basic(self):
  247. parsed = self.parser.parse_args(
  248. ['benchmarks', self.testInput0, self.testInput1])
  249. self.assertFalse(parsed.display_aggregates_only)
  250. self.assertTrue(parsed.utest)
  251. self.assertEqual(parsed.mode, 'benchmarks')
  252. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  253. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  254. self.assertFalse(parsed.benchmark_options)
  255. def test_benchmarks_basic_without_utest(self):
  256. parsed = self.parser.parse_args(
  257. ['--no-utest', 'benchmarks', self.testInput0, self.testInput1])
  258. self.assertFalse(parsed.display_aggregates_only)
  259. self.assertFalse(parsed.utest)
  260. self.assertEqual(parsed.utest_alpha, 0.05)
  261. self.assertEqual(parsed.mode, 'benchmarks')
  262. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  263. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  264. self.assertFalse(parsed.benchmark_options)
  265. def test_benchmarks_basic_display_aggregates_only(self):
  266. parsed = self.parser.parse_args(
  267. ['-a', 'benchmarks', self.testInput0, self.testInput1])
  268. self.assertTrue(parsed.display_aggregates_only)
  269. self.assertTrue(parsed.utest)
  270. self.assertEqual(parsed.mode, 'benchmarks')
  271. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  272. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  273. self.assertFalse(parsed.benchmark_options)
  274. def test_benchmarks_basic_with_utest_alpha(self):
  275. parsed = self.parser.parse_args(
  276. ['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
  277. self.assertFalse(parsed.display_aggregates_only)
  278. self.assertTrue(parsed.utest)
  279. self.assertEqual(parsed.utest_alpha, 0.314)
  280. self.assertEqual(parsed.mode, 'benchmarks')
  281. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  282. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  283. self.assertFalse(parsed.benchmark_options)
  284. def test_benchmarks_basic_without_utest_with_utest_alpha(self):
  285. parsed = self.parser.parse_args(
  286. ['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
  287. self.assertFalse(parsed.display_aggregates_only)
  288. self.assertFalse(parsed.utest)
  289. self.assertEqual(parsed.utest_alpha, 0.314)
  290. self.assertEqual(parsed.mode, 'benchmarks')
  291. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  292. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  293. self.assertFalse(parsed.benchmark_options)
  294. def test_benchmarks_with_remainder(self):
  295. parsed = self.parser.parse_args(
  296. ['benchmarks', self.testInput0, self.testInput1, 'd'])
  297. self.assertFalse(parsed.display_aggregates_only)
  298. self.assertTrue(parsed.utest)
  299. self.assertEqual(parsed.mode, 'benchmarks')
  300. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  301. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  302. self.assertEqual(parsed.benchmark_options, ['d'])
  303. def test_benchmarks_with_remainder_after_doubleminus(self):
  304. parsed = self.parser.parse_args(
  305. ['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
  306. self.assertFalse(parsed.display_aggregates_only)
  307. self.assertTrue(parsed.utest)
  308. self.assertEqual(parsed.mode, 'benchmarks')
  309. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  310. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  311. self.assertEqual(parsed.benchmark_options, ['e'])
  312. def test_filters_basic(self):
  313. parsed = self.parser.parse_args(
  314. ['filters', self.testInput0, 'c', 'd'])
  315. self.assertFalse(parsed.display_aggregates_only)
  316. self.assertTrue(parsed.utest)
  317. self.assertEqual(parsed.mode, 'filters')
  318. self.assertEqual(parsed.test[0].name, self.testInput0)
  319. self.assertEqual(parsed.filter_baseline[0], 'c')
  320. self.assertEqual(parsed.filter_contender[0], 'd')
  321. self.assertFalse(parsed.benchmark_options)
  322. def test_filters_with_remainder(self):
  323. parsed = self.parser.parse_args(
  324. ['filters', self.testInput0, 'c', 'd', 'e'])
  325. self.assertFalse(parsed.display_aggregates_only)
  326. self.assertTrue(parsed.utest)
  327. self.assertEqual(parsed.mode, 'filters')
  328. self.assertEqual(parsed.test[0].name, self.testInput0)
  329. self.assertEqual(parsed.filter_baseline[0], 'c')
  330. self.assertEqual(parsed.filter_contender[0], 'd')
  331. self.assertEqual(parsed.benchmark_options, ['e'])
  332. def test_filters_with_remainder_after_doubleminus(self):
  333. parsed = self.parser.parse_args(
  334. ['filters', self.testInput0, 'c', 'd', '--', 'f'])
  335. self.assertFalse(parsed.display_aggregates_only)
  336. self.assertTrue(parsed.utest)
  337. self.assertEqual(parsed.mode, 'filters')
  338. self.assertEqual(parsed.test[0].name, self.testInput0)
  339. self.assertEqual(parsed.filter_baseline[0], 'c')
  340. self.assertEqual(parsed.filter_contender[0], 'd')
  341. self.assertEqual(parsed.benchmark_options, ['f'])
  342. def test_benchmarksfiltered_basic(self):
  343. parsed = self.parser.parse_args(
  344. ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
  345. self.assertFalse(parsed.display_aggregates_only)
  346. self.assertTrue(parsed.utest)
  347. self.assertEqual(parsed.mode, 'benchmarksfiltered')
  348. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  349. self.assertEqual(parsed.filter_baseline[0], 'c')
  350. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  351. self.assertEqual(parsed.filter_contender[0], 'e')
  352. self.assertFalse(parsed.benchmark_options)
  353. def test_benchmarksfiltered_with_remainder(self):
  354. parsed = self.parser.parse_args(
  355. ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
  356. self.assertFalse(parsed.display_aggregates_only)
  357. self.assertTrue(parsed.utest)
  358. self.assertEqual(parsed.mode, 'benchmarksfiltered')
  359. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  360. self.assertEqual(parsed.filter_baseline[0], 'c')
  361. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  362. self.assertEqual(parsed.filter_contender[0], 'e')
  363. self.assertEqual(parsed.benchmark_options[0], 'f')
  364. def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
  365. parsed = self.parser.parse_args(
  366. ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
  367. self.assertFalse(parsed.display_aggregates_only)
  368. self.assertTrue(parsed.utest)
  369. self.assertEqual(parsed.mode, 'benchmarksfiltered')
  370. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  371. self.assertEqual(parsed.filter_baseline[0], 'c')
  372. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  373. self.assertEqual(parsed.filter_contender[0], 'e')
  374. self.assertEqual(parsed.benchmark_options[0], 'g')
  375. if __name__ == '__main__':
  376. # unittest.main()
  377. main()
  378. # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
  379. # kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
  380. # kate: indent-mode python; remove-trailing-spaces modified;