arena_decay.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. #include "test/jemalloc_test.h"
  2. #include "test/arena_util.h"
  3. #include "jemalloc/internal/ticker.h"
  4. static nstime_monotonic_t *nstime_monotonic_orig;
  5. static nstime_update_t *nstime_update_orig;
  6. static unsigned nupdates_mock;
  7. static nstime_t time_mock;
  8. static bool monotonic_mock;
  9. static bool
  10. nstime_monotonic_mock(void) {
  11. return monotonic_mock;
  12. }
  13. static void
  14. nstime_update_mock(nstime_t *time) {
  15. nupdates_mock++;
  16. if (monotonic_mock) {
  17. nstime_copy(time, &time_mock);
  18. }
  19. }
  20. TEST_BEGIN(test_decay_ticks) {
  21. test_skip_if(is_background_thread_enabled());
  22. test_skip_if(opt_hpa);
  23. ticker_geom_t *decay_ticker;
  24. unsigned tick0, tick1, arena_ind;
  25. size_t sz, large0;
  26. void *p;
  27. sz = sizeof(size_t);
  28. expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
  29. 0), 0, "Unexpected mallctl failure");
  30. /* Set up a manually managed arena for test. */
  31. arena_ind = do_arena_create(0, 0);
  32. /* Migrate to the new arena, and get the ticker. */
  33. unsigned old_arena_ind;
  34. size_t sz_arena_ind = sizeof(old_arena_ind);
  35. expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind,
  36. &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0,
  37. "Unexpected mallctl() failure");
  38. decay_ticker = tsd_arena_decay_tickerp_get(tsd_fetch());
  39. expect_ptr_not_null(decay_ticker,
  40. "Unexpected failure getting decay ticker");
  41. /*
  42. * Test the standard APIs using a large size class, since we can't
  43. * control tcache interactions for small size classes (except by
  44. * completely disabling tcache for the entire test program).
  45. */
  46. /* malloc(). */
  47. tick0 = ticker_geom_read(decay_ticker);
  48. p = malloc(large0);
  49. expect_ptr_not_null(p, "Unexpected malloc() failure");
  50. tick1 = ticker_geom_read(decay_ticker);
  51. expect_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
  52. /* free(). */
  53. tick0 = ticker_geom_read(decay_ticker);
  54. free(p);
  55. tick1 = ticker_geom_read(decay_ticker);
  56. expect_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
  57. /* calloc(). */
  58. tick0 = ticker_geom_read(decay_ticker);
  59. p = calloc(1, large0);
  60. expect_ptr_not_null(p, "Unexpected calloc() failure");
  61. tick1 = ticker_geom_read(decay_ticker);
  62. expect_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
  63. free(p);
  64. /* posix_memalign(). */
  65. tick0 = ticker_geom_read(decay_ticker);
  66. expect_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0,
  67. "Unexpected posix_memalign() failure");
  68. tick1 = ticker_geom_read(decay_ticker);
  69. expect_u32_ne(tick1, tick0,
  70. "Expected ticker to tick during posix_memalign()");
  71. free(p);
  72. /* aligned_alloc(). */
  73. tick0 = ticker_geom_read(decay_ticker);
  74. p = aligned_alloc(sizeof(size_t), large0);
  75. expect_ptr_not_null(p, "Unexpected aligned_alloc() failure");
  76. tick1 = ticker_geom_read(decay_ticker);
  77. expect_u32_ne(tick1, tick0,
  78. "Expected ticker to tick during aligned_alloc()");
  79. free(p);
  80. /* realloc(). */
  81. /* Allocate. */
  82. tick0 = ticker_geom_read(decay_ticker);
  83. p = realloc(NULL, large0);
  84. expect_ptr_not_null(p, "Unexpected realloc() failure");
  85. tick1 = ticker_geom_read(decay_ticker);
  86. expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
  87. /* Reallocate. */
  88. tick0 = ticker_geom_read(decay_ticker);
  89. p = realloc(p, large0);
  90. expect_ptr_not_null(p, "Unexpected realloc() failure");
  91. tick1 = ticker_geom_read(decay_ticker);
  92. expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
  93. /* Deallocate. */
  94. tick0 = ticker_geom_read(decay_ticker);
  95. realloc(p, 0);
  96. tick1 = ticker_geom_read(decay_ticker);
  97. expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
  98. /*
  99. * Test the *allocx() APIs using large and small size classes, with
  100. * tcache explicitly disabled.
  101. */
  102. {
  103. unsigned i;
  104. size_t allocx_sizes[2];
  105. allocx_sizes[0] = large0;
  106. allocx_sizes[1] = 1;
  107. for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
  108. sz = allocx_sizes[i];
  109. /* mallocx(). */
  110. tick0 = ticker_geom_read(decay_ticker);
  111. p = mallocx(sz, MALLOCX_TCACHE_NONE);
  112. expect_ptr_not_null(p, "Unexpected mallocx() failure");
  113. tick1 = ticker_geom_read(decay_ticker);
  114. expect_u32_ne(tick1, tick0,
  115. "Expected ticker to tick during mallocx() (sz=%zu)",
  116. sz);
  117. /* rallocx(). */
  118. tick0 = ticker_geom_read(decay_ticker);
  119. p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
  120. expect_ptr_not_null(p, "Unexpected rallocx() failure");
  121. tick1 = ticker_geom_read(decay_ticker);
  122. expect_u32_ne(tick1, tick0,
  123. "Expected ticker to tick during rallocx() (sz=%zu)",
  124. sz);
  125. /* xallocx(). */
  126. tick0 = ticker_geom_read(decay_ticker);
  127. xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
  128. tick1 = ticker_geom_read(decay_ticker);
  129. expect_u32_ne(tick1, tick0,
  130. "Expected ticker to tick during xallocx() (sz=%zu)",
  131. sz);
  132. /* dallocx(). */
  133. tick0 = ticker_geom_read(decay_ticker);
  134. dallocx(p, MALLOCX_TCACHE_NONE);
  135. tick1 = ticker_geom_read(decay_ticker);
  136. expect_u32_ne(tick1, tick0,
  137. "Expected ticker to tick during dallocx() (sz=%zu)",
  138. sz);
  139. /* sdallocx(). */
  140. p = mallocx(sz, MALLOCX_TCACHE_NONE);
  141. expect_ptr_not_null(p, "Unexpected mallocx() failure");
  142. tick0 = ticker_geom_read(decay_ticker);
  143. sdallocx(p, sz, MALLOCX_TCACHE_NONE);
  144. tick1 = ticker_geom_read(decay_ticker);
  145. expect_u32_ne(tick1, tick0,
  146. "Expected ticker to tick during sdallocx() "
  147. "(sz=%zu)", sz);
  148. }
  149. }
  150. /*
  151. * Test tcache fill/flush interactions for large and small size classes,
  152. * using an explicit tcache.
  153. */
  154. unsigned tcache_ind, i;
  155. size_t tcache_sizes[2];
  156. tcache_sizes[0] = large0;
  157. tcache_sizes[1] = 1;
  158. size_t tcache_max, sz_tcache_max;
  159. sz_tcache_max = sizeof(tcache_max);
  160. expect_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
  161. &sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure");
  162. sz = sizeof(unsigned);
  163. expect_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
  164. NULL, 0), 0, "Unexpected mallctl failure");
  165. for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
  166. sz = tcache_sizes[i];
  167. /* tcache fill. */
  168. tick0 = ticker_geom_read(decay_ticker);
  169. p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
  170. expect_ptr_not_null(p, "Unexpected mallocx() failure");
  171. tick1 = ticker_geom_read(decay_ticker);
  172. expect_u32_ne(tick1, tick0,
  173. "Expected ticker to tick during tcache fill "
  174. "(sz=%zu)", sz);
  175. /* tcache flush. */
  176. dallocx(p, MALLOCX_TCACHE(tcache_ind));
  177. tick0 = ticker_geom_read(decay_ticker);
  178. expect_d_eq(mallctl("tcache.flush", NULL, NULL,
  179. (void *)&tcache_ind, sizeof(unsigned)), 0,
  180. "Unexpected mallctl failure");
  181. tick1 = ticker_geom_read(decay_ticker);
  182. /* Will only tick if it's in tcache. */
  183. expect_u32_ne(tick1, tick0,
  184. "Expected ticker to tick during tcache flush (sz=%zu)", sz);
  185. }
  186. }
  187. TEST_END
  188. static void
  189. decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt,
  190. uint64_t dirty_npurge0, uint64_t muzzy_npurge0, bool terminate_asap) {
  191. #define NINTERVALS 101
  192. nstime_t time, update_interval, decay_ms, deadline;
  193. nstime_init_update(&time);
  194. nstime_init2(&decay_ms, dt, 0);
  195. nstime_copy(&deadline, &time);
  196. nstime_add(&deadline, &decay_ms);
  197. nstime_init2(&update_interval, dt, 0);
  198. nstime_idivide(&update_interval, NINTERVALS);
  199. /*
  200. * Keep q's slab from being deallocated during the looping below. If a
  201. * cached slab were to repeatedly come and go during looping, it could
  202. * prevent the decay backlog ever becoming empty.
  203. */
  204. void *p = do_mallocx(1, flags);
  205. uint64_t dirty_npurge1, muzzy_npurge1;
  206. do {
  207. for (unsigned i = 0; i < ARENA_DECAY_NTICKS_PER_UPDATE / 2;
  208. i++) {
  209. void *q = do_mallocx(1, flags);
  210. dallocx(q, flags);
  211. }
  212. dirty_npurge1 = get_arena_dirty_npurge(arena_ind);
  213. muzzy_npurge1 = get_arena_muzzy_npurge(arena_ind);
  214. nstime_add(&time_mock, &update_interval);
  215. nstime_update(&time);
  216. } while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 ==
  217. dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) ||
  218. !terminate_asap));
  219. dallocx(p, flags);
  220. if (config_stats) {
  221. expect_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 +
  222. muzzy_npurge0, "Expected purging to occur");
  223. }
  224. #undef NINTERVALS
  225. }
  226. TEST_BEGIN(test_decay_ticker) {
  227. test_skip_if(is_background_thread_enabled());
  228. test_skip_if(opt_hpa);
  229. #define NPS 2048
  230. ssize_t ddt = opt_dirty_decay_ms;
  231. ssize_t mdt = opt_muzzy_decay_ms;
  232. unsigned arena_ind = do_arena_create(ddt, mdt);
  233. int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE);
  234. void *ps[NPS];
  235. /*
  236. * Allocate a bunch of large objects, pause the clock, deallocate every
  237. * other object (to fragment virtual memory), restore the clock, then
  238. * [md]allocx() in a tight loop while advancing time rapidly to verify
  239. * the ticker triggers purging.
  240. */
  241. size_t large;
  242. size_t sz = sizeof(size_t);
  243. expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large, &sz, NULL,
  244. 0), 0, "Unexpected mallctl failure");
  245. do_purge(arena_ind);
  246. uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind);
  247. uint64_t muzzy_npurge0 = get_arena_muzzy_npurge(arena_ind);
  248. for (unsigned i = 0; i < NPS; i++) {
  249. ps[i] = do_mallocx(large, flags);
  250. }
  251. nupdates_mock = 0;
  252. nstime_init_update(&time_mock);
  253. monotonic_mock = true;
  254. nstime_monotonic_orig = nstime_monotonic;
  255. nstime_update_orig = nstime_update;
  256. nstime_monotonic = nstime_monotonic_mock;
  257. nstime_update = nstime_update_mock;
  258. for (unsigned i = 0; i < NPS; i += 2) {
  259. dallocx(ps[i], flags);
  260. unsigned nupdates0 = nupdates_mock;
  261. do_decay(arena_ind);
  262. expect_u_gt(nupdates_mock, nupdates0,
  263. "Expected nstime_update() to be called");
  264. }
  265. decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0,
  266. muzzy_npurge0, true);
  267. decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0,
  268. muzzy_npurge0, false);
  269. do_arena_destroy(arena_ind);
  270. nstime_monotonic = nstime_monotonic_orig;
  271. nstime_update = nstime_update_orig;
  272. #undef NPS
  273. }
  274. TEST_END
  275. TEST_BEGIN(test_decay_nonmonotonic) {
  276. test_skip_if(is_background_thread_enabled());
  277. test_skip_if(opt_hpa);
  278. #define NPS (SMOOTHSTEP_NSTEPS + 1)
  279. int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
  280. void *ps[NPS];
  281. uint64_t npurge0 = 0;
  282. uint64_t npurge1 = 0;
  283. size_t sz, large0;
  284. unsigned i, nupdates0;
  285. sz = sizeof(size_t);
  286. expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
  287. 0), 0, "Unexpected mallctl failure");
  288. expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
  289. "Unexpected mallctl failure");
  290. do_epoch();
  291. sz = sizeof(uint64_t);
  292. npurge0 = get_arena_npurge(0);
  293. nupdates_mock = 0;
  294. nstime_init_update(&time_mock);
  295. monotonic_mock = false;
  296. nstime_monotonic_orig = nstime_monotonic;
  297. nstime_update_orig = nstime_update;
  298. nstime_monotonic = nstime_monotonic_mock;
  299. nstime_update = nstime_update_mock;
  300. for (i = 0; i < NPS; i++) {
  301. ps[i] = mallocx(large0, flags);
  302. expect_ptr_not_null(ps[i], "Unexpected mallocx() failure");
  303. }
  304. for (i = 0; i < NPS; i++) {
  305. dallocx(ps[i], flags);
  306. nupdates0 = nupdates_mock;
  307. expect_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
  308. "Unexpected arena.0.decay failure");
  309. expect_u_gt(nupdates_mock, nupdates0,
  310. "Expected nstime_update() to be called");
  311. }
  312. do_epoch();
  313. sz = sizeof(uint64_t);
  314. npurge1 = get_arena_npurge(0);
  315. if (config_stats) {
  316. expect_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
  317. }
  318. nstime_monotonic = nstime_monotonic_orig;
  319. nstime_update = nstime_update_orig;
  320. #undef NPS
  321. }
  322. TEST_END
  323. TEST_BEGIN(test_decay_now) {
  324. test_skip_if(is_background_thread_enabled());
  325. test_skip_if(opt_hpa);
  326. unsigned arena_ind = do_arena_create(0, 0);
  327. expect_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
  328. expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
  329. size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
  330. /* Verify that dirty/muzzy pages never linger after deallocation. */
  331. for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
  332. size_t size = sizes[i];
  333. generate_dirty(arena_ind, size);
  334. expect_zu_eq(get_arena_pdirty(arena_ind), 0,
  335. "Unexpected dirty pages");
  336. expect_zu_eq(get_arena_pmuzzy(arena_ind), 0,
  337. "Unexpected muzzy pages");
  338. }
  339. do_arena_destroy(arena_ind);
  340. }
  341. TEST_END
  342. TEST_BEGIN(test_decay_never) {
  343. test_skip_if(is_background_thread_enabled() || !config_stats);
  344. test_skip_if(opt_hpa);
  345. unsigned arena_ind = do_arena_create(-1, -1);
  346. int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
  347. expect_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
  348. expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
  349. size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
  350. void *ptrs[sizeof(sizes)/sizeof(size_t)];
  351. for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
  352. ptrs[i] = do_mallocx(sizes[i], flags);
  353. }
  354. /* Verify that each deallocation generates additional dirty pages. */
  355. size_t pdirty_prev = get_arena_pdirty(arena_ind);
  356. size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind);
  357. expect_zu_eq(pdirty_prev, 0, "Unexpected dirty pages");
  358. expect_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages");
  359. for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
  360. dallocx(ptrs[i], flags);
  361. size_t pdirty = get_arena_pdirty(arena_ind);
  362. size_t pmuzzy = get_arena_pmuzzy(arena_ind);
  363. expect_zu_gt(pdirty + (size_t)get_arena_dirty_purged(arena_ind),
  364. pdirty_prev, "Expected dirty pages to increase.");
  365. expect_zu_eq(pmuzzy, 0, "Unexpected muzzy pages");
  366. pdirty_prev = pdirty;
  367. }
  368. do_arena_destroy(arena_ind);
  369. }
  370. TEST_END
  371. int
  372. main(void) {
  373. return test(
  374. test_decay_ticks,
  375. test_decay_ticker,
  376. test_decay_nonmonotonic,
  377. test_decay_now,
  378. test_decay_never);
  379. }