executor.go 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. package main
  2. import (
  3. "flag"
  4. "log"
  5. "math"
  6. "os"
  7. "runtime"
  8. "strconv"
  9. "sync"
  10. "sync/atomic"
  11. "time"
  12. )
  13. func init() {
  14. log.SetFlags(0)
  15. }
  16. func main() {
  17. parallelism := flag.Int("parallelism", runtime.NumCPU(), "Number of test cases to run in parallel")
  18. goFlag := flag.Bool("go", false, "Run go test harness")
  19. ccFlag := flag.Bool("cc", false, "Run c++ test harness")
  20. javaFlag := flag.Bool("java", false, "Run java test harness")
  21. pythonFlag := flag.Bool("python", false, "Run python test harness")
  22. externalHarnessFlag := flag.String("external_harness", "", "Path to a binary to be executed as an external test harness")
  23. flag.Parse()
  24. test_cases := shardTestCases(TestCases)
  25. start := time.Now()
  26. harnesses := Harnesses(*goFlag, *ccFlag, *javaFlag, *pythonFlag, *externalHarnessFlag)
  27. successes, failures, skips := run(*parallelism, harnesses, test_cases)
  28. log.Printf("Successes: %d | Failures: %d | Skips: %d (%v)",
  29. successes, failures, skips, time.Since(start))
  30. if failures > 0 {
  31. os.Exit(1)
  32. }
  33. }
  34. func shardTestCases(test_cases []TestCase) []TestCase {
  35. // Support Bazel test sharding by slicing up the list of test cases if requested.
  36. shard_count, err := strconv.Atoi(os.Getenv("TEST_TOTAL_SHARDS"))
  37. if err != nil {
  38. return test_cases
  39. }
  40. shard_index, err := strconv.Atoi(os.Getenv("TEST_SHARD_INDEX"))
  41. if err != nil {
  42. return test_cases
  43. }
  44. // Bazel expects that the test will create or modify the file with the provided name to show that it supports sharding.
  45. status_file := os.Getenv("TEST_SHARD_STATUS_FILE")
  46. if status_file == "" {
  47. return test_cases
  48. }
  49. if file, err := os.Create(status_file); err != nil {
  50. return test_cases
  51. } else {
  52. file.Close()
  53. }
  54. shard_length := int(math.Ceil(float64(len(test_cases)) / float64(shard_count)))
  55. shard_start := shard_index * shard_length
  56. shard_end := (shard_index + 1) * shard_length
  57. if shard_end >= len(test_cases) {
  58. shard_end = len(test_cases)
  59. }
  60. return test_cases[shard_start:shard_end]
  61. }
  62. func run(parallelism int, harnesses []Harness, test_cases []TestCase) (successes, failures, skips uint64) {
  63. wg := new(sync.WaitGroup)
  64. if parallelism <= 0 {
  65. panic("Parallelism must be > 0")
  66. }
  67. if len(harnesses) == 0 {
  68. panic("At least one harness must be selected with a flag")
  69. }
  70. wg.Add(parallelism)
  71. in := make(chan TestCase)
  72. out := make(chan TestResult)
  73. done := make(chan struct{})
  74. for i := 0; i < parallelism; i++ {
  75. go Work(wg, in, out, harnesses)
  76. }
  77. go func() {
  78. for res := range out {
  79. if res.Skipped {
  80. atomic.AddUint64(&skips, 1)
  81. } else if res.OK {
  82. atomic.AddUint64(&successes, 1)
  83. } else {
  84. atomic.AddUint64(&failures, 1)
  85. }
  86. }
  87. close(done)
  88. }()
  89. for _, test := range test_cases {
  90. in <- test
  91. }
  92. close(in)
  93. wg.Wait()
  94. close(out)
  95. <-done
  96. return
  97. }