tsd.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. #include "test/jemalloc_test.h"
  2. /*
  3. * If we're e.g. in debug mode, we *never* enter the fast path, and so shouldn't
  4. * be asserting that we're on one.
  5. */
  6. static bool originally_fast;
  7. static int data_cleanup_count;
  8. void
  9. data_cleanup(int *data) {
  10. if (data_cleanup_count == 0) {
  11. expect_x_eq(*data, MALLOC_TSD_TEST_DATA_INIT,
  12. "Argument passed into cleanup function should match tsd "
  13. "value");
  14. }
  15. ++data_cleanup_count;
  16. /*
  17. * Allocate during cleanup for two rounds, in order to assure that
  18. * jemalloc's internal tsd reinitialization happens.
  19. */
  20. bool reincarnate = false;
  21. switch (*data) {
  22. case MALLOC_TSD_TEST_DATA_INIT:
  23. *data = 1;
  24. reincarnate = true;
  25. break;
  26. case 1:
  27. *data = 2;
  28. reincarnate = true;
  29. break;
  30. case 2:
  31. return;
  32. default:
  33. not_reached();
  34. }
  35. if (reincarnate) {
  36. void *p = mallocx(1, 0);
  37. expect_ptr_not_null(p, "Unexpeced mallocx() failure");
  38. dallocx(p, 0);
  39. }
  40. }
  41. static void *
  42. thd_start(void *arg) {
  43. int d = (int)(uintptr_t)arg;
  44. void *p;
  45. /*
  46. * Test free before tsd init -- the free fast path (which does not
  47. * explicitly check for NULL) has to tolerate this case, and fall back
  48. * to free_default.
  49. */
  50. free(NULL);
  51. tsd_t *tsd = tsd_fetch();
  52. expect_x_eq(tsd_test_data_get(tsd), MALLOC_TSD_TEST_DATA_INIT,
  53. "Initial tsd get should return initialization value");
  54. p = malloc(1);
  55. expect_ptr_not_null(p, "Unexpected malloc() failure");
  56. tsd_test_data_set(tsd, d);
  57. expect_x_eq(tsd_test_data_get(tsd), d,
  58. "After tsd set, tsd get should return value that was set");
  59. d = 0;
  60. expect_x_eq(tsd_test_data_get(tsd), (int)(uintptr_t)arg,
  61. "Resetting local data should have no effect on tsd");
  62. tsd_test_callback_set(tsd, &data_cleanup);
  63. free(p);
  64. return NULL;
  65. }
  66. TEST_BEGIN(test_tsd_main_thread) {
  67. thd_start((void *)(uintptr_t)0xa5f3e329);
  68. }
  69. TEST_END
  70. TEST_BEGIN(test_tsd_sub_thread) {
  71. thd_t thd;
  72. data_cleanup_count = 0;
  73. thd_create(&thd, thd_start, (void *)MALLOC_TSD_TEST_DATA_INIT);
  74. thd_join(thd, NULL);
  75. /*
  76. * We reincarnate twice in the data cleanup, so it should execute at
  77. * least 3 times.
  78. */
  79. expect_x_ge(data_cleanup_count, 3,
  80. "Cleanup function should have executed multiple times.");
  81. }
  82. TEST_END
  83. static void *
  84. thd_start_reincarnated(void *arg) {
  85. tsd_t *tsd = tsd_fetch();
  86. assert(tsd);
  87. void *p = malloc(1);
  88. expect_ptr_not_null(p, "Unexpected malloc() failure");
  89. /* Manually trigger reincarnation. */
  90. expect_ptr_not_null(tsd_arena_get(tsd),
  91. "Should have tsd arena set.");
  92. tsd_cleanup((void *)tsd);
  93. expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
  94. "TSD arena should have been cleared.");
  95. expect_u_eq(tsd_state_get(tsd), tsd_state_purgatory,
  96. "TSD state should be purgatory\n");
  97. free(p);
  98. expect_u_eq(tsd_state_get(tsd), tsd_state_reincarnated,
  99. "TSD state should be reincarnated\n");
  100. p = mallocx(1, MALLOCX_TCACHE_NONE);
  101. expect_ptr_not_null(p, "Unexpected malloc() failure");
  102. expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
  103. "Should not have tsd arena set after reincarnation.");
  104. free(p);
  105. tsd_cleanup((void *)tsd);
  106. expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
  107. "TSD arena should have been cleared after 2nd cleanup.");
  108. return NULL;
  109. }
  110. TEST_BEGIN(test_tsd_reincarnation) {
  111. thd_t thd;
  112. thd_create(&thd, thd_start_reincarnated, NULL);
  113. thd_join(thd, NULL);
  114. }
  115. TEST_END
  116. typedef struct {
  117. atomic_u32_t phase;
  118. atomic_b_t error;
  119. } global_slow_data_t;
  120. static void *
  121. thd_start_global_slow(void *arg) {
  122. /* PHASE 0 */
  123. global_slow_data_t *data = (global_slow_data_t *)arg;
  124. free(mallocx(1, 0));
  125. tsd_t *tsd = tsd_fetch();
  126. /*
  127. * No global slowness has happened yet; there was an error if we were
  128. * originally fast but aren't now.
  129. */
  130. atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd),
  131. ATOMIC_SEQ_CST);
  132. atomic_store_u32(&data->phase, 1, ATOMIC_SEQ_CST);
  133. /* PHASE 2 */
  134. while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 2) {
  135. }
  136. free(mallocx(1, 0));
  137. atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
  138. atomic_store_u32(&data->phase, 3, ATOMIC_SEQ_CST);
  139. /* PHASE 4 */
  140. while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 4) {
  141. }
  142. free(mallocx(1, 0));
  143. atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
  144. atomic_store_u32(&data->phase, 5, ATOMIC_SEQ_CST);
  145. /* PHASE 6 */
  146. while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 6) {
  147. }
  148. free(mallocx(1, 0));
  149. /* Only one decrement so far. */
  150. atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
  151. atomic_store_u32(&data->phase, 7, ATOMIC_SEQ_CST);
  152. /* PHASE 8 */
  153. while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 8) {
  154. }
  155. free(mallocx(1, 0));
  156. /*
  157. * Both decrements happened; we should be fast again (if we ever
  158. * were)
  159. */
  160. atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd),
  161. ATOMIC_SEQ_CST);
  162. atomic_store_u32(&data->phase, 9, ATOMIC_SEQ_CST);
  163. return NULL;
  164. }
  165. TEST_BEGIN(test_tsd_global_slow) {
  166. global_slow_data_t data = {ATOMIC_INIT(0), ATOMIC_INIT(false)};
  167. /*
  168. * Note that the "mallocx" here (vs. malloc) is important, since the
  169. * compiler is allowed to optimize away free(malloc(1)) but not
  170. * free(mallocx(1)).
  171. */
  172. free(mallocx(1, 0));
  173. tsd_t *tsd = tsd_fetch();
  174. originally_fast = tsd_fast(tsd);
  175. thd_t thd;
  176. thd_create(&thd, thd_start_global_slow, (void *)&data.phase);
  177. /* PHASE 1 */
  178. while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 1) {
  179. /*
  180. * We don't have a portable condvar/semaphore mechanism.
  181. * Spin-wait.
  182. */
  183. }
  184. expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
  185. tsd_global_slow_inc(tsd_tsdn(tsd));
  186. free(mallocx(1, 0));
  187. expect_false(tsd_fast(tsd), "");
  188. atomic_store_u32(&data.phase, 2, ATOMIC_SEQ_CST);
  189. /* PHASE 3 */
  190. while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 3) {
  191. }
  192. expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
  193. /* Increase again, so that we can test multiple fast/slow changes. */
  194. tsd_global_slow_inc(tsd_tsdn(tsd));
  195. atomic_store_u32(&data.phase, 4, ATOMIC_SEQ_CST);
  196. free(mallocx(1, 0));
  197. expect_false(tsd_fast(tsd), "");
  198. /* PHASE 5 */
  199. while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 5) {
  200. }
  201. expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
  202. tsd_global_slow_dec(tsd_tsdn(tsd));
  203. atomic_store_u32(&data.phase, 6, ATOMIC_SEQ_CST);
  204. /* We only decreased once; things should still be slow. */
  205. free(mallocx(1, 0));
  206. expect_false(tsd_fast(tsd), "");
  207. /* PHASE 7 */
  208. while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 7) {
  209. }
  210. expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
  211. tsd_global_slow_dec(tsd_tsdn(tsd));
  212. atomic_store_u32(&data.phase, 8, ATOMIC_SEQ_CST);
  213. /* We incremented and then decremented twice; we should be fast now. */
  214. free(mallocx(1, 0));
  215. expect_true(!originally_fast || tsd_fast(tsd), "");
  216. /* PHASE 9 */
  217. while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 9) {
  218. }
  219. expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
  220. thd_join(thd, NULL);
  221. }
  222. TEST_END
  223. int
  224. main(void) {
  225. /* Ensure tsd bootstrapped. */
  226. if (nallocx(1, 0) == 0) {
  227. malloc_printf("Initialization error");
  228. return test_status_fail;
  229. }
  230. return test_no_reentrancy(
  231. test_tsd_main_thread,
  232. test_tsd_sub_thread,
  233. test_tsd_reincarnation,
  234. test_tsd_global_slow);
  235. }