jobset.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600
  1. # Copyright 2015 gRPC authors.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """Run a group of subprocesses and then finish."""
  15. import errno
  16. import logging
  17. import multiprocessing
  18. import os
  19. import platform
  20. import re
  21. import signal
  22. import subprocess
  23. import sys
  24. import tempfile
  25. import time
  26. # cpu cost measurement
  27. measure_cpu_costs = False
  28. _DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
  29. # Maximum number of bytes of job's stdout that will be stored in the result.
  30. # Only last N bytes of stdout will be kept if the actual output longer.
  31. _MAX_RESULT_SIZE = 64 * 1024
  32. # NOTE: If you change this, please make sure to test reviewing the
  33. # github PR with http://reviewable.io, which is known to add UTF-8
  34. # characters to the PR description, which leak into the environment here
  35. # and cause failures.
  36. def strip_non_ascii_chars(s):
  37. return ''.join(c for c in s if ord(c) < 128)
  38. def sanitized_environment(env):
  39. sanitized = {}
  40. for key, value in list(env.items()):
  41. sanitized[strip_non_ascii_chars(key)] = strip_non_ascii_chars(value)
  42. return sanitized
  43. def platform_string():
  44. if platform.system() == 'Windows':
  45. return 'windows'
  46. elif platform.system()[:7] == 'MSYS_NT':
  47. return 'windows'
  48. elif platform.system() == 'Darwin':
  49. return 'mac'
  50. elif platform.system() == 'Linux':
  51. return 'linux'
  52. else:
  53. return 'posix'
  54. # setup a signal handler so that signal.pause registers 'something'
  55. # when a child finishes
  56. # not using futures and threading to avoid a dependency on subprocess32
  57. if platform_string() == 'windows':
  58. pass
  59. else:
  60. def alarm_handler(unused_signum, unused_frame):
  61. pass
  62. signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
  63. signal.signal(signal.SIGALRM, alarm_handler)
  64. _SUCCESS = object()
  65. _FAILURE = object()
  66. _RUNNING = object()
  67. _KILLED = object()
  68. _COLORS = {
  69. 'red': [31, 0],
  70. 'green': [32, 0],
  71. 'yellow': [33, 0],
  72. 'lightgray': [37, 0],
  73. 'gray': [30, 1],
  74. 'purple': [35, 0],
  75. 'cyan': [36, 0]
  76. }
  77. _BEGINNING_OF_LINE = '\x1b[0G'
  78. _CLEAR_LINE = '\x1b[2K'
  79. _TAG_COLOR = {
  80. 'FAILED': 'red',
  81. 'FLAKE': 'purple',
  82. 'TIMEOUT_FLAKE': 'purple',
  83. 'WARNING': 'yellow',
  84. 'TIMEOUT': 'red',
  85. 'PASSED': 'green',
  86. 'START': 'gray',
  87. 'WAITING': 'yellow',
  88. 'SUCCESS': 'green',
  89. 'IDLE': 'gray',
  90. 'SKIPPED': 'cyan'
  91. }
  92. _FORMAT = '%(asctime)-15s %(message)s'
  93. logging.basicConfig(level=logging.INFO, format=_FORMAT)
  94. def eintr_be_gone(fn):
  95. """Run fn until it doesn't stop because of EINTR"""
  96. while True:
  97. try:
  98. return fn()
  99. except IOError as e:
  100. if e.errno != errno.EINTR:
  101. raise
  102. def message(tag, msg, explanatory_text=None, do_newline=False):
  103. if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
  104. return
  105. message.old_tag = tag
  106. message.old_msg = msg
  107. if explanatory_text:
  108. if isinstance(explanatory_text, bytes):
  109. explanatory_text = explanatory_text.decode('utf8', errors='replace')
  110. while True:
  111. try:
  112. if platform_string() == 'windows' or not sys.stdout.isatty():
  113. if explanatory_text:
  114. logging.info(explanatory_text)
  115. logging.info('%s: %s', tag, msg)
  116. else:
  117. sys.stdout.write(
  118. '%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' %
  119. (_BEGINNING_OF_LINE, _CLEAR_LINE, '\n%s' %
  120. explanatory_text if explanatory_text is not None else '',
  121. _COLORS[_TAG_COLOR[tag]][1], _COLORS[_TAG_COLOR[tag]][0],
  122. tag, msg, '\n'
  123. if do_newline or explanatory_text is not None else ''))
  124. sys.stdout.flush()
  125. return
  126. except IOError as e:
  127. if e.errno != errno.EINTR:
  128. raise
  129. message.old_tag = ''
  130. message.old_msg = ''
  131. def which(filename):
  132. if '/' in filename:
  133. return filename
  134. for path in os.environ['PATH'].split(os.pathsep):
  135. if os.path.exists(os.path.join(path, filename)):
  136. return os.path.join(path, filename)
  137. raise Exception('%s not found' % filename)
  138. class JobSpec(object):
  139. """Specifies what to run for a job."""
  140. def __init__(self,
  141. cmdline,
  142. shortname=None,
  143. environ=None,
  144. cwd=None,
  145. shell=False,
  146. timeout_seconds=5 * 60,
  147. flake_retries=0,
  148. timeout_retries=0,
  149. kill_handler=None,
  150. cpu_cost=1.0,
  151. verbose_success=False,
  152. logfilename=None):
  153. """
  154. Arguments:
  155. cmdline: a list of arguments to pass as the command line
  156. environ: a dictionary of environment variables to set in the child process
  157. kill_handler: a handler that will be called whenever job.kill() is invoked
  158. cpu_cost: number of cores per second this job needs
  159. logfilename: use given file to store job's output, rather than using a temporary file
  160. """
  161. if environ is None:
  162. environ = {}
  163. self.cmdline = cmdline
  164. self.environ = environ
  165. self.shortname = cmdline[0] if shortname is None else shortname
  166. self.cwd = cwd
  167. self.shell = shell
  168. self.timeout_seconds = timeout_seconds
  169. self.flake_retries = flake_retries
  170. self.timeout_retries = timeout_retries
  171. self.kill_handler = kill_handler
  172. self.cpu_cost = cpu_cost
  173. self.verbose_success = verbose_success
  174. self.logfilename = logfilename
  175. if self.logfilename and self.flake_retries != 0 and self.timeout_retries != 0:
  176. # Forbidden to avoid overwriting the test log when retrying.
  177. raise Exception(
  178. 'Cannot use custom logfile when retries are enabled')
  179. def identity(self):
  180. return '%r %r' % (self.cmdline, self.environ)
  181. def __hash__(self):
  182. return hash(self.identity())
  183. def __cmp__(self, other):
  184. return self.identity() == other.identity()
  185. def __lt__(self, other):
  186. return self.identity() < other.identity()
  187. def __repr__(self):
  188. return 'JobSpec(shortname=%s, cmdline=%s)' % (self.shortname,
  189. self.cmdline)
  190. def __str__(self):
  191. return '%s: %s %s' % (self.shortname, ' '.join(
  192. '%s=%s' % kv for kv in list(self.environ.items())), ' '.join(
  193. self.cmdline))
  194. class JobResult(object):
  195. def __init__(self):
  196. self.state = 'UNKNOWN'
  197. self.returncode = -1
  198. self.elapsed_time = 0
  199. self.num_failures = 0
  200. self.retries = 0
  201. self.message = ''
  202. self.cpu_estimated = 1
  203. self.cpu_measured = 1
  204. def read_from_start(f):
  205. f.seek(0)
  206. return f.read()
  207. class Job(object):
  208. """Manages one job."""
  209. def __init__(self,
  210. spec,
  211. newline_on_success,
  212. travis,
  213. add_env,
  214. quiet_success=False):
  215. self._spec = spec
  216. self._newline_on_success = newline_on_success
  217. self._travis = travis
  218. self._add_env = add_env.copy()
  219. self._retries = 0
  220. self._timeout_retries = 0
  221. self._suppress_failure_message = False
  222. self._quiet_success = quiet_success
  223. if not self._quiet_success:
  224. message('START', spec.shortname, do_newline=self._travis)
  225. self.result = JobResult()
  226. self.start()
  227. def GetSpec(self):
  228. return self._spec
  229. def start(self):
  230. if self._spec.logfilename:
  231. # make sure the log directory exists
  232. logfile_dir = os.path.dirname(
  233. os.path.abspath(self._spec.logfilename))
  234. if not os.path.exists(logfile_dir):
  235. os.makedirs(logfile_dir)
  236. self._logfile = open(self._spec.logfilename, 'w+')
  237. else:
  238. # macOS: a series of quick os.unlink invocation might cause OS
  239. # error during the creation of temporary file. By using
  240. # NamedTemporaryFile, we defer the removal of file and directory.
  241. self._logfile = tempfile.NamedTemporaryFile()
  242. env = dict(os.environ)
  243. env.update(self._spec.environ)
  244. env.update(self._add_env)
  245. env = sanitized_environment(env)
  246. self._start = time.time()
  247. cmdline = self._spec.cmdline
  248. # The Unix time command is finicky when used with MSBuild, so we don't use it
  249. # with jobs that run MSBuild.
  250. global measure_cpu_costs
  251. if measure_cpu_costs and not 'vsprojects\\build' in cmdline[0]:
  252. cmdline = ['time', '-p'] + cmdline
  253. else:
  254. measure_cpu_costs = False
  255. try_start = lambda: subprocess.Popen(args=cmdline,
  256. stderr=subprocess.STDOUT,
  257. stdout=self._logfile,
  258. cwd=self._spec.cwd,
  259. shell=self._spec.shell,
  260. env=env)
  261. delay = 0.3
  262. for i in range(0, 4):
  263. try:
  264. self._process = try_start()
  265. break
  266. except OSError:
  267. message(
  268. 'WARNING', 'Failed to start %s, retrying in %f seconds' %
  269. (self._spec.shortname, delay))
  270. time.sleep(delay)
  271. delay *= 2
  272. else:
  273. self._process = try_start()
  274. self._state = _RUNNING
  275. def state(self):
  276. """Poll current state of the job. Prints messages at completion."""
  277. def stdout(self=self):
  278. stdout = read_from_start(self._logfile)
  279. self.result.message = stdout[-_MAX_RESULT_SIZE:]
  280. return stdout
  281. if self._state == _RUNNING and self._process.poll() is not None:
  282. elapsed = time.time() - self._start
  283. self.result.elapsed_time = elapsed
  284. if self._process.returncode != 0:
  285. if self._retries < self._spec.flake_retries:
  286. message('FLAKE',
  287. '%s [ret=%d, pid=%d]' %
  288. (self._spec.shortname, self._process.returncode,
  289. self._process.pid),
  290. stdout(),
  291. do_newline=True)
  292. self._retries += 1
  293. self.result.num_failures += 1
  294. self.result.retries = self._timeout_retries + self._retries
  295. # NOTE: job is restarted regardless of jobset's max_time setting
  296. self.start()
  297. else:
  298. self._state = _FAILURE
  299. if not self._suppress_failure_message:
  300. message('FAILED',
  301. '%s [ret=%d, pid=%d, time=%.1fsec]' %
  302. (self._spec.shortname, self._process.returncode,
  303. self._process.pid, elapsed),
  304. stdout(),
  305. do_newline=True)
  306. self.result.state = 'FAILED'
  307. self.result.num_failures += 1
  308. self.result.returncode = self._process.returncode
  309. else:
  310. self._state = _SUCCESS
  311. measurement = ''
  312. if measure_cpu_costs:
  313. m = re.search(
  314. r'real\s+([0-9.]+)\nuser\s+([0-9.]+)\nsys\s+([0-9.]+)',
  315. (stdout()).decode('utf8', errors='replace'))
  316. real = float(m.group(1))
  317. user = float(m.group(2))
  318. sys = float(m.group(3))
  319. if real > 0.5:
  320. cores = (user + sys) / real
  321. self.result.cpu_measured = float('%.01f' % cores)
  322. self.result.cpu_estimated = float('%.01f' %
  323. self._spec.cpu_cost)
  324. measurement = '; cpu_cost=%.01f; estimated=%.01f' % (
  325. self.result.cpu_measured, self.result.cpu_estimated)
  326. if not self._quiet_success:
  327. message('PASSED',
  328. '%s [time=%.1fsec, retries=%d:%d%s]' %
  329. (self._spec.shortname, elapsed, self._retries,
  330. self._timeout_retries, measurement),
  331. stdout() if self._spec.verbose_success else None,
  332. do_newline=self._newline_on_success or self._travis)
  333. self.result.state = 'PASSED'
  334. elif (self._state == _RUNNING and
  335. self._spec.timeout_seconds is not None and
  336. time.time() - self._start > self._spec.timeout_seconds):
  337. elapsed = time.time() - self._start
  338. self.result.elapsed_time = elapsed
  339. if self._timeout_retries < self._spec.timeout_retries:
  340. message('TIMEOUT_FLAKE',
  341. '%s [pid=%d]' %
  342. (self._spec.shortname, self._process.pid),
  343. stdout(),
  344. do_newline=True)
  345. self._timeout_retries += 1
  346. self.result.num_failures += 1
  347. self.result.retries = self._timeout_retries + self._retries
  348. if self._spec.kill_handler:
  349. self._spec.kill_handler(self)
  350. self._process.terminate()
  351. # NOTE: job is restarted regardless of jobset's max_time setting
  352. self.start()
  353. else:
  354. message('TIMEOUT',
  355. '%s [pid=%d, time=%.1fsec]' %
  356. (self._spec.shortname, self._process.pid, elapsed),
  357. stdout(),
  358. do_newline=True)
  359. self.kill()
  360. self.result.state = 'TIMEOUT'
  361. self.result.num_failures += 1
  362. return self._state
  363. def kill(self):
  364. if self._state == _RUNNING:
  365. self._state = _KILLED
  366. if self._spec.kill_handler:
  367. self._spec.kill_handler(self)
  368. self._process.terminate()
  369. def suppress_failure_message(self):
  370. self._suppress_failure_message = True
  371. class Jobset(object):
  372. """Manages one run of jobs."""
  373. def __init__(self, check_cancelled, maxjobs, maxjobs_cpu_agnostic,
  374. newline_on_success, travis, stop_on_failure, add_env,
  375. quiet_success, max_time):
  376. self._running = set()
  377. self._check_cancelled = check_cancelled
  378. self._cancelled = False
  379. self._failures = 0
  380. self._completed = 0
  381. self._maxjobs = maxjobs
  382. self._maxjobs_cpu_agnostic = maxjobs_cpu_agnostic
  383. self._newline_on_success = newline_on_success
  384. self._travis = travis
  385. self._stop_on_failure = stop_on_failure
  386. self._add_env = add_env
  387. self._quiet_success = quiet_success
  388. self._max_time = max_time
  389. self.resultset = {}
  390. self._remaining = None
  391. self._start_time = time.time()
  392. def set_remaining(self, remaining):
  393. self._remaining = remaining
  394. def get_num_failures(self):
  395. return self._failures
  396. def cpu_cost(self):
  397. c = 0
  398. for job in self._running:
  399. c += job._spec.cpu_cost
  400. return c
  401. def start(self, spec):
  402. """Start a job. Return True on success, False on failure."""
  403. while True:
  404. if self._max_time > 0 and time.time(
  405. ) - self._start_time > self._max_time:
  406. skipped_job_result = JobResult()
  407. skipped_job_result.state = 'SKIPPED'
  408. message('SKIPPED', spec.shortname, do_newline=True)
  409. self.resultset[spec.shortname] = [skipped_job_result]
  410. return True
  411. if self.cancelled():
  412. return False
  413. current_cpu_cost = self.cpu_cost()
  414. if current_cpu_cost == 0:
  415. break
  416. if current_cpu_cost + spec.cpu_cost <= self._maxjobs:
  417. if len(self._running) < self._maxjobs_cpu_agnostic:
  418. break
  419. self.reap(spec.shortname, spec.cpu_cost)
  420. if self.cancelled():
  421. return False
  422. job = Job(spec, self._newline_on_success, self._travis, self._add_env,
  423. self._quiet_success)
  424. self._running.add(job)
  425. if job.GetSpec().shortname not in self.resultset:
  426. self.resultset[job.GetSpec().shortname] = []
  427. return True
  428. def reap(self, waiting_for=None, waiting_for_cost=None):
  429. """Collect the dead jobs."""
  430. while self._running:
  431. dead = set()
  432. for job in self._running:
  433. st = eintr_be_gone(lambda: job.state())
  434. if st == _RUNNING:
  435. continue
  436. if st == _FAILURE or st == _KILLED:
  437. self._failures += 1
  438. if self._stop_on_failure:
  439. self._cancelled = True
  440. for job in self._running:
  441. job.kill()
  442. dead.add(job)
  443. break
  444. for job in dead:
  445. self._completed += 1
  446. if not self._quiet_success or job.result.state != 'PASSED':
  447. self.resultset[job.GetSpec().shortname].append(job.result)
  448. self._running.remove(job)
  449. if dead:
  450. return
  451. if not self._travis and platform_string() != 'windows':
  452. rstr = '' if self._remaining is None else '%d queued, ' % self._remaining
  453. if self._remaining is not None and self._completed > 0:
  454. now = time.time()
  455. sofar = now - self._start_time
  456. remaining = sofar / self._completed * (self._remaining +
  457. len(self._running))
  458. rstr = 'ETA %.1f sec; %s' % (remaining, rstr)
  459. if waiting_for is not None:
  460. wstr = ' next: %s @ %.2f cpu' % (waiting_for,
  461. waiting_for_cost)
  462. else:
  463. wstr = ''
  464. message(
  465. 'WAITING',
  466. '%s%d jobs running, %d complete, %d failed (load %.2f)%s' %
  467. (rstr, len(self._running), self._completed, self._failures,
  468. self.cpu_cost(), wstr))
  469. if platform_string() == 'windows':
  470. time.sleep(0.1)
  471. else:
  472. signal.alarm(10)
  473. signal.pause()
  474. def cancelled(self):
  475. """Poll for cancellation."""
  476. if self._cancelled:
  477. return True
  478. if not self._check_cancelled():
  479. return False
  480. for job in self._running:
  481. job.kill()
  482. self._cancelled = True
  483. return True
  484. def finish(self):
  485. while self._running:
  486. if self.cancelled():
  487. pass # poll cancellation
  488. self.reap()
  489. if platform_string() != 'windows':
  490. signal.alarm(0)
  491. return not self.cancelled() and self._failures == 0
  492. def _never_cancelled():
  493. return False
  494. def tag_remaining(xs):
  495. staging = []
  496. for x in xs:
  497. staging.append(x)
  498. if len(staging) > 5000:
  499. yield (staging.pop(0), None)
  500. n = len(staging)
  501. for i, x in enumerate(staging):
  502. yield (x, n - i - 1)
  503. def run(cmdlines,
  504. check_cancelled=_never_cancelled,
  505. maxjobs=None,
  506. maxjobs_cpu_agnostic=None,
  507. newline_on_success=False,
  508. travis=False,
  509. infinite_runs=False,
  510. stop_on_failure=False,
  511. add_env={},
  512. skip_jobs=False,
  513. quiet_success=False,
  514. max_time=-1):
  515. if skip_jobs:
  516. resultset = {}
  517. skipped_job_result = JobResult()
  518. skipped_job_result.state = 'SKIPPED'
  519. for job in cmdlines:
  520. message('SKIPPED', job.shortname, do_newline=True)
  521. resultset[job.shortname] = [skipped_job_result]
  522. return 0, resultset
  523. js = Jobset(
  524. check_cancelled, maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
  525. maxjobs_cpu_agnostic if maxjobs_cpu_agnostic is not None else
  526. _DEFAULT_MAX_JOBS, newline_on_success, travis, stop_on_failure, add_env,
  527. quiet_success, max_time)
  528. for cmdline, remaining in tag_remaining(cmdlines):
  529. if not js.start(cmdline):
  530. break
  531. if remaining is not None:
  532. js.set_remaining(remaining)
  533. js.finish()
  534. return js.get_num_failures(), js.resultset