hpa_background_thread.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. #include "test/jemalloc_test.h"
  2. #include "test/sleep.h"
  3. static void
  4. sleep_for_background_thread_interval() {
  5. /*
  6. * The sleep interval set in our .sh file is 50ms. So it likely will
  7. * run if we sleep for four times that.
  8. */
  9. sleep_ns(200 * 1000 * 1000);
  10. }
  11. static unsigned
  12. create_arena() {
  13. unsigned arena_ind;
  14. size_t sz;
  15. sz = sizeof(unsigned);
  16. expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 2),
  17. 0, "Unexpected mallctl() failure");
  18. return arena_ind;
  19. }
  20. static size_t
  21. get_empty_ndirty(unsigned arena_ind) {
  22. int err;
  23. size_t ndirty_huge;
  24. size_t ndirty_nonhuge;
  25. uint64_t epoch = 1;
  26. size_t sz = sizeof(epoch);
  27. err = je_mallctl("epoch", (void *)&epoch, &sz, (void *)&epoch,
  28. sizeof(epoch));
  29. expect_d_eq(0, err, "Unexpected mallctl() failure");
  30. size_t mib[6];
  31. size_t miblen = sizeof(mib)/sizeof(mib[0]);
  32. err = mallctlnametomib(
  33. "stats.arenas.0.hpa_shard.empty_slabs.ndirty_nonhuge", mib,
  34. &miblen);
  35. expect_d_eq(0, err, "Unexpected mallctlnametomib() failure");
  36. sz = sizeof(ndirty_nonhuge);
  37. mib[2] = arena_ind;
  38. err = mallctlbymib(mib, miblen, &ndirty_nonhuge, &sz, NULL, 0);
  39. expect_d_eq(0, err, "Unexpected mallctlbymib() failure");
  40. err = mallctlnametomib(
  41. "stats.arenas.0.hpa_shard.empty_slabs.ndirty_huge", mib,
  42. &miblen);
  43. expect_d_eq(0, err, "Unexpected mallctlnametomib() failure");
  44. sz = sizeof(ndirty_huge);
  45. mib[2] = arena_ind;
  46. err = mallctlbymib(mib, miblen, &ndirty_huge, &sz, NULL, 0);
  47. expect_d_eq(0, err, "Unexpected mallctlbymib() failure");
  48. return ndirty_huge + ndirty_nonhuge;
  49. }
  50. static void
  51. set_background_thread_enabled(bool enabled) {
  52. int err;
  53. err = je_mallctl("background_thread", NULL, NULL, &enabled,
  54. sizeof(enabled));
  55. expect_d_eq(0, err, "Unexpected mallctl failure");
  56. }
  57. static void
  58. wait_until_thread_is_enabled(unsigned arena_id) {
  59. tsd_t* tsd = tsd_fetch();
  60. bool sleeping = false;
  61. int iterations = 0;
  62. do {
  63. background_thread_info_t *info =
  64. background_thread_info_get(arena_id);
  65. malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
  66. malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
  67. sleeping = background_thread_indefinite_sleep(info);
  68. assert_d_lt(iterations, UINT64_C(1000000),
  69. "Waiting for a thread to start for too long");
  70. } while (!sleeping);
  71. }
  72. static void
  73. expect_purging(unsigned arena_ind, bool expect_deferred) {
  74. size_t empty_ndirty;
  75. empty_ndirty = get_empty_ndirty(arena_ind);
  76. expect_zu_eq(0, empty_ndirty, "Expected arena to start unused.");
  77. /*
  78. * It's possible that we get unlucky with our stats collection timing,
  79. * and the background thread runs in between the deallocation and the
  80. * stats collection. So we retry 10 times, and see if we *ever* see
  81. * deferred reclamation.
  82. */
  83. bool observed_dirty_page = false;
  84. for (int i = 0; i < 10; i++) {
  85. void *ptr = mallocx(PAGE,
  86. MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena_ind));
  87. empty_ndirty = get_empty_ndirty(arena_ind);
  88. expect_zu_eq(0, empty_ndirty, "All pages should be active");
  89. dallocx(ptr, MALLOCX_TCACHE_NONE);
  90. empty_ndirty = get_empty_ndirty(arena_ind);
  91. if (expect_deferred) {
  92. expect_true(empty_ndirty == 0 || empty_ndirty == 1 ||
  93. opt_prof, "Unexpected extra dirty page count: %zu",
  94. empty_ndirty);
  95. } else {
  96. assert_zu_eq(0, empty_ndirty,
  97. "Saw dirty pages without deferred purging");
  98. }
  99. if (empty_ndirty > 0) {
  100. observed_dirty_page = true;
  101. break;
  102. }
  103. }
  104. expect_b_eq(expect_deferred, observed_dirty_page, "");
  105. /*
  106. * Under high concurrency / heavy test load (e.g. using run_test.sh),
  107. * the background thread may not get scheduled for a longer period of
  108. * time. Retry 100 times max before bailing out.
  109. */
  110. unsigned retry = 0;
  111. while ((empty_ndirty = get_empty_ndirty(arena_ind)) > 0 &&
  112. expect_deferred && (retry++ < 100)) {
  113. sleep_for_background_thread_interval();
  114. }
  115. expect_zu_eq(0, empty_ndirty, "Should have seen a background purge");
  116. }
  117. TEST_BEGIN(test_hpa_background_thread_purges) {
  118. test_skip_if(!config_stats);
  119. test_skip_if(!hpa_supported());
  120. test_skip_if(!have_background_thread);
  121. /* Skip since guarded pages cannot be allocated from hpa. */
  122. test_skip_if(san_guard_enabled());
  123. unsigned arena_ind = create_arena();
  124. /*
  125. * Our .sh sets dirty mult to 0, so all dirty pages should get purged
  126. * any time any thread frees.
  127. */
  128. expect_purging(arena_ind, /* expect_deferred */ true);
  129. }
  130. TEST_END
  131. TEST_BEGIN(test_hpa_background_thread_enable_disable) {
  132. test_skip_if(!config_stats);
  133. test_skip_if(!hpa_supported());
  134. test_skip_if(!have_background_thread);
  135. /* Skip since guarded pages cannot be allocated from hpa. */
  136. test_skip_if(san_guard_enabled());
  137. unsigned arena_ind = create_arena();
  138. set_background_thread_enabled(false);
  139. expect_purging(arena_ind, false);
  140. set_background_thread_enabled(true);
  141. wait_until_thread_is_enabled(arena_ind);
  142. expect_purging(arena_ind, true);
  143. }
  144. TEST_END
  145. int
  146. main(void) {
  147. /*
  148. * OK, this is a sort of nasty hack. We don't want to add *another*
  149. * config option for HPA (the intent is that it becomes available on
  150. * more platforms over time, and we're trying to prune back config
  151. * options generally. But we'll get initialization errors on other
  152. * platforms if we set hpa:true in the MALLOC_CONF (even if we set
  153. * abort_conf:false as well). So we reach into the internals and set
  154. * them directly, but only if we know that we're actually going to do
  155. * something nontrivial in the tests.
  156. */
  157. if (config_stats && hpa_supported() && have_background_thread) {
  158. opt_hpa = true;
  159. opt_background_thread = true;
  160. }
  161. return test_no_reentrancy(
  162. test_hpa_background_thread_purges,
  163. test_hpa_background_thread_enable_disable);
  164. }