test-threadpool-cancel.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
  2. *
  3. * Permission is hereby granted, free of charge, to any person obtaining a copy
  4. * of this software and associated documentation files (the "Software"), to
  5. * deal in the Software without restriction, including without limitation the
  6. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  7. * sell copies of the Software, and to permit persons to whom the Software is
  8. * furnished to do so, subject to the following conditions:
  9. *
  10. * The above copyright notice and this permission notice shall be included in
  11. * all copies or substantial portions of the Software.
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  16. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  17. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  18. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  19. * IN THE SOFTWARE.
  20. */
  21. #include "uv.h"
  22. #include "task.h"
  23. #define INIT_CANCEL_INFO(ci, what) \
  24. do { \
  25. (ci)->reqs = (what); \
  26. (ci)->nreqs = ARRAY_SIZE(what); \
  27. (ci)->stride = sizeof((what)[0]); \
  28. } \
  29. while (0)
  30. struct cancel_info {
  31. void* reqs;
  32. unsigned nreqs;
  33. unsigned stride;
  34. uv_timer_t timer_handle;
  35. };
  36. struct random_info {
  37. uv_random_t random_req;
  38. char buf[1];
  39. };
  40. static unsigned fs_cb_called;
  41. static unsigned done_cb_called;
  42. static unsigned done2_cb_called;
  43. static unsigned timer_cb_called;
  44. static uv_work_t pause_reqs[4];
  45. static uv_sem_t pause_sems[ARRAY_SIZE(pause_reqs)];
  46. static void work_cb(uv_work_t* req) {
  47. uv_sem_wait(pause_sems + (req - pause_reqs));
  48. }
  49. static void done_cb(uv_work_t* req, int status) {
  50. uv_sem_destroy(pause_sems + (req - pause_reqs));
  51. }
  52. static void saturate_threadpool(void) {
  53. uv_loop_t* loop;
  54. char buf[64];
  55. size_t i;
  56. snprintf(buf,
  57. sizeof(buf),
  58. "UV_THREADPOOL_SIZE=%lu",
  59. (unsigned long)ARRAY_SIZE(pause_reqs));
  60. putenv(buf);
  61. loop = uv_default_loop();
  62. for (i = 0; i < ARRAY_SIZE(pause_reqs); i += 1) {
  63. ASSERT(0 == uv_sem_init(pause_sems + i, 0));
  64. ASSERT(0 == uv_queue_work(loop, pause_reqs + i, work_cb, done_cb));
  65. }
  66. }
  67. static void unblock_threadpool(void) {
  68. size_t i;
  69. for (i = 0; i < ARRAY_SIZE(pause_reqs); i += 1)
  70. uv_sem_post(pause_sems + i);
  71. }
  72. static void fs_cb(uv_fs_t* req) {
  73. ASSERT(req->result == UV_ECANCELED);
  74. uv_fs_req_cleanup(req);
  75. fs_cb_called++;
  76. }
  77. static void getaddrinfo_cb(uv_getaddrinfo_t* req,
  78. int status,
  79. struct addrinfo* res) {
  80. ASSERT(status == UV_EAI_CANCELED);
  81. ASSERT(res == NULL);
  82. uv_freeaddrinfo(res); /* Should not crash. */
  83. }
  84. static void getnameinfo_cb(uv_getnameinfo_t* handle,
  85. int status,
  86. const char* hostname,
  87. const char* service) {
  88. ASSERT(status == UV_EAI_CANCELED);
  89. ASSERT(hostname == NULL);
  90. ASSERT(service == NULL);
  91. }
  92. static void work2_cb(uv_work_t* req) {
  93. ASSERT(0 && "work2_cb called");
  94. }
  95. static void done2_cb(uv_work_t* req, int status) {
  96. ASSERT(status == UV_ECANCELED);
  97. done2_cb_called++;
  98. }
  99. static void timer_cb(uv_timer_t* handle) {
  100. struct cancel_info* ci;
  101. uv_req_t* req;
  102. unsigned i;
  103. ci = container_of(handle, struct cancel_info, timer_handle);
  104. for (i = 0; i < ci->nreqs; i++) {
  105. req = (uv_req_t*) ((char*) ci->reqs + i * ci->stride);
  106. ASSERT(0 == uv_cancel(req));
  107. }
  108. uv_close((uv_handle_t*) &ci->timer_handle, NULL);
  109. unblock_threadpool();
  110. timer_cb_called++;
  111. }
  112. static void nop_done_cb(uv_work_t* req, int status) {
  113. ASSERT(status == UV_ECANCELED);
  114. done_cb_called++;
  115. }
  116. static void nop_random_cb(uv_random_t* req, int status, void* buf, size_t len) {
  117. struct random_info* ri;
  118. ri = container_of(req, struct random_info, random_req);
  119. ASSERT(status == UV_ECANCELED);
  120. ASSERT(buf == (void*) ri->buf);
  121. ASSERT(len == sizeof(ri->buf));
  122. done_cb_called++;
  123. }
  124. TEST_IMPL(threadpool_cancel_getaddrinfo) {
  125. uv_getaddrinfo_t reqs[4];
  126. struct cancel_info ci;
  127. struct addrinfo hints;
  128. uv_loop_t* loop;
  129. int r;
  130. INIT_CANCEL_INFO(&ci, reqs);
  131. loop = uv_default_loop();
  132. saturate_threadpool();
  133. r = uv_getaddrinfo(loop, reqs + 0, getaddrinfo_cb, "fail", NULL, NULL);
  134. ASSERT(r == 0);
  135. r = uv_getaddrinfo(loop, reqs + 1, getaddrinfo_cb, NULL, "fail", NULL);
  136. ASSERT(r == 0);
  137. r = uv_getaddrinfo(loop, reqs + 2, getaddrinfo_cb, "fail", "fail", NULL);
  138. ASSERT(r == 0);
  139. r = uv_getaddrinfo(loop, reqs + 3, getaddrinfo_cb, "fail", NULL, &hints);
  140. ASSERT(r == 0);
  141. ASSERT(0 == uv_timer_init(loop, &ci.timer_handle));
  142. ASSERT(0 == uv_timer_start(&ci.timer_handle, timer_cb, 10, 0));
  143. ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
  144. ASSERT(1 == timer_cb_called);
  145. MAKE_VALGRIND_HAPPY();
  146. return 0;
  147. }
  148. TEST_IMPL(threadpool_cancel_getnameinfo) {
  149. uv_getnameinfo_t reqs[4];
  150. struct sockaddr_in addr4;
  151. struct cancel_info ci;
  152. uv_loop_t* loop;
  153. int r;
  154. r = uv_ip4_addr("127.0.0.1", 80, &addr4);
  155. ASSERT(r == 0);
  156. INIT_CANCEL_INFO(&ci, reqs);
  157. loop = uv_default_loop();
  158. saturate_threadpool();
  159. r = uv_getnameinfo(loop, reqs + 0, getnameinfo_cb, (const struct sockaddr*)&addr4, 0);
  160. ASSERT(r == 0);
  161. r = uv_getnameinfo(loop, reqs + 1, getnameinfo_cb, (const struct sockaddr*)&addr4, 0);
  162. ASSERT(r == 0);
  163. r = uv_getnameinfo(loop, reqs + 2, getnameinfo_cb, (const struct sockaddr*)&addr4, 0);
  164. ASSERT(r == 0);
  165. r = uv_getnameinfo(loop, reqs + 3, getnameinfo_cb, (const struct sockaddr*)&addr4, 0);
  166. ASSERT(r == 0);
  167. ASSERT(0 == uv_timer_init(loop, &ci.timer_handle));
  168. ASSERT(0 == uv_timer_start(&ci.timer_handle, timer_cb, 10, 0));
  169. ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
  170. ASSERT(1 == timer_cb_called);
  171. MAKE_VALGRIND_HAPPY();
  172. return 0;
  173. }
  174. TEST_IMPL(threadpool_cancel_random) {
  175. struct random_info req;
  176. uv_loop_t* loop;
  177. saturate_threadpool();
  178. loop = uv_default_loop();
  179. ASSERT(0 == uv_random(loop,
  180. &req.random_req,
  181. &req.buf,
  182. sizeof(req.buf),
  183. 0,
  184. nop_random_cb));
  185. ASSERT(0 == uv_cancel((uv_req_t*) &req));
  186. ASSERT(0 == done_cb_called);
  187. unblock_threadpool();
  188. ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
  189. ASSERT(1 == done_cb_called);
  190. MAKE_VALGRIND_HAPPY();
  191. return 0;
  192. }
  193. TEST_IMPL(threadpool_cancel_work) {
  194. struct cancel_info ci;
  195. uv_work_t reqs[16];
  196. uv_loop_t* loop;
  197. unsigned i;
  198. INIT_CANCEL_INFO(&ci, reqs);
  199. loop = uv_default_loop();
  200. saturate_threadpool();
  201. for (i = 0; i < ARRAY_SIZE(reqs); i++)
  202. ASSERT(0 == uv_queue_work(loop, reqs + i, work2_cb, done2_cb));
  203. ASSERT(0 == uv_timer_init(loop, &ci.timer_handle));
  204. ASSERT(0 == uv_timer_start(&ci.timer_handle, timer_cb, 10, 0));
  205. ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
  206. ASSERT(1 == timer_cb_called);
  207. ASSERT(ARRAY_SIZE(reqs) == done2_cb_called);
  208. MAKE_VALGRIND_HAPPY();
  209. return 0;
  210. }
  211. TEST_IMPL(threadpool_cancel_fs) {
  212. struct cancel_info ci;
  213. uv_fs_t reqs[26];
  214. uv_loop_t* loop;
  215. unsigned n;
  216. uv_buf_t iov;
  217. INIT_CANCEL_INFO(&ci, reqs);
  218. loop = uv_default_loop();
  219. saturate_threadpool();
  220. iov = uv_buf_init(NULL, 0);
  221. /* Needs to match ARRAY_SIZE(fs_reqs). */
  222. n = 0;
  223. ASSERT(0 == uv_fs_chmod(loop, reqs + n++, "/", 0, fs_cb));
  224. ASSERT(0 == uv_fs_chown(loop, reqs + n++, "/", 0, 0, fs_cb));
  225. ASSERT(0 == uv_fs_close(loop, reqs + n++, 0, fs_cb));
  226. ASSERT(0 == uv_fs_fchmod(loop, reqs + n++, 0, 0, fs_cb));
  227. ASSERT(0 == uv_fs_fchown(loop, reqs + n++, 0, 0, 0, fs_cb));
  228. ASSERT(0 == uv_fs_fdatasync(loop, reqs + n++, 0, fs_cb));
  229. ASSERT(0 == uv_fs_fstat(loop, reqs + n++, 0, fs_cb));
  230. ASSERT(0 == uv_fs_fsync(loop, reqs + n++, 0, fs_cb));
  231. ASSERT(0 == uv_fs_ftruncate(loop, reqs + n++, 0, 0, fs_cb));
  232. ASSERT(0 == uv_fs_futime(loop, reqs + n++, 0, 0, 0, fs_cb));
  233. ASSERT(0 == uv_fs_link(loop, reqs + n++, "/", "/", fs_cb));
  234. ASSERT(0 == uv_fs_lstat(loop, reqs + n++, "/", fs_cb));
  235. ASSERT(0 == uv_fs_mkdir(loop, reqs + n++, "/", 0, fs_cb));
  236. ASSERT(0 == uv_fs_open(loop, reqs + n++, "/", 0, 0, fs_cb));
  237. ASSERT(0 == uv_fs_read(loop, reqs + n++, 0, &iov, 1, 0, fs_cb));
  238. ASSERT(0 == uv_fs_scandir(loop, reqs + n++, "/", 0, fs_cb));
  239. ASSERT(0 == uv_fs_readlink(loop, reqs + n++, "/", fs_cb));
  240. ASSERT(0 == uv_fs_realpath(loop, reqs + n++, "/", fs_cb));
  241. ASSERT(0 == uv_fs_rename(loop, reqs + n++, "/", "/", fs_cb));
  242. ASSERT(0 == uv_fs_mkdir(loop, reqs + n++, "/", 0, fs_cb));
  243. ASSERT(0 == uv_fs_sendfile(loop, reqs + n++, 0, 0, 0, 0, fs_cb));
  244. ASSERT(0 == uv_fs_stat(loop, reqs + n++, "/", fs_cb));
  245. ASSERT(0 == uv_fs_symlink(loop, reqs + n++, "/", "/", 0, fs_cb));
  246. ASSERT(0 == uv_fs_unlink(loop, reqs + n++, "/", fs_cb));
  247. ASSERT(0 == uv_fs_utime(loop, reqs + n++, "/", 0, 0, fs_cb));
  248. ASSERT(0 == uv_fs_write(loop, reqs + n++, 0, &iov, 1, 0, fs_cb));
  249. ASSERT(n == ARRAY_SIZE(reqs));
  250. ASSERT(0 == uv_timer_init(loop, &ci.timer_handle));
  251. ASSERT(0 == uv_timer_start(&ci.timer_handle, timer_cb, 10, 0));
  252. ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
  253. ASSERT(n == fs_cb_called);
  254. ASSERT(1 == timer_cb_called);
  255. MAKE_VALGRIND_HAPPY();
  256. return 0;
  257. }
  258. TEST_IMPL(threadpool_cancel_single) {
  259. uv_loop_t* loop;
  260. uv_work_t req;
  261. saturate_threadpool();
  262. loop = uv_default_loop();
  263. ASSERT(0 == uv_queue_work(loop, &req, (uv_work_cb) abort, nop_done_cb));
  264. ASSERT(0 == uv_cancel((uv_req_t*) &req));
  265. ASSERT(0 == done_cb_called);
  266. unblock_threadpool();
  267. ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
  268. ASSERT(1 == done_cb_called);
  269. MAKE_VALGRIND_HAPPY();
  270. return 0;
  271. }