hpa.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460
  1. #include "test/jemalloc_test.h"
  2. #include "jemalloc/internal/hpa.h"
  3. #include "jemalloc/internal/nstime.h"
  4. #define SHARD_IND 111
  5. #define ALLOC_MAX (HUGEPAGE / 4)
  6. typedef struct test_data_s test_data_t;
  7. struct test_data_s {
  8. /*
  9. * Must be the first member -- we convert back and forth between the
  10. * test_data_t and the hpa_shard_t;
  11. */
  12. hpa_shard_t shard;
  13. hpa_central_t central;
  14. base_t *base;
  15. edata_cache_t shard_edata_cache;
  16. emap_t emap;
  17. };
  18. static hpa_shard_opts_t test_hpa_shard_opts_default = {
  19. /* slab_max_alloc */
  20. ALLOC_MAX,
  21. /* hugification threshold */
  22. HUGEPAGE,
  23. /* dirty_mult */
  24. FXP_INIT_PERCENT(25),
  25. /* deferral_allowed */
  26. false,
  27. /* hugify_delay_ms */
  28. 10 * 1000,
  29. };
  30. static hpa_shard_t *
  31. create_test_data(hpa_hooks_t *hooks, hpa_shard_opts_t *opts) {
  32. bool err;
  33. base_t *base = base_new(TSDN_NULL, /* ind */ SHARD_IND,
  34. &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
  35. assert_ptr_not_null(base, "");
  36. test_data_t *test_data = malloc(sizeof(test_data_t));
  37. assert_ptr_not_null(test_data, "");
  38. test_data->base = base;
  39. err = edata_cache_init(&test_data->shard_edata_cache, base);
  40. assert_false(err, "");
  41. err = emap_init(&test_data->emap, test_data->base, /* zeroed */ false);
  42. assert_false(err, "");
  43. err = hpa_central_init(&test_data->central, test_data->base, hooks);
  44. assert_false(err, "");
  45. err = hpa_shard_init(&test_data->shard, &test_data->central,
  46. &test_data->emap, test_data->base, &test_data->shard_edata_cache,
  47. SHARD_IND, opts);
  48. assert_false(err, "");
  49. return (hpa_shard_t *)test_data;
  50. }
  51. static void
  52. destroy_test_data(hpa_shard_t *shard) {
  53. test_data_t *test_data = (test_data_t *)shard;
  54. base_delete(TSDN_NULL, test_data->base);
  55. free(test_data);
  56. }
  57. TEST_BEGIN(test_alloc_max) {
  58. test_skip_if(!hpa_supported());
  59. hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
  60. &test_hpa_shard_opts_default);
  61. tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
  62. edata_t *edata;
  63. /* Small max */
  64. bool deferred_work_generated = false;
  65. edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false,
  66. false, &deferred_work_generated);
  67. expect_ptr_not_null(edata, "Allocation of small max failed");
  68. edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX + PAGE, PAGE, false,
  69. false, false, &deferred_work_generated);
  70. expect_ptr_null(edata, "Allocation of larger than small max succeeded");
  71. destroy_test_data(shard);
  72. }
  73. TEST_END
  74. typedef struct mem_contents_s mem_contents_t;
  75. struct mem_contents_s {
  76. uintptr_t my_addr;
  77. size_t size;
  78. edata_t *my_edata;
  79. rb_node(mem_contents_t) link;
  80. };
  81. static int
  82. mem_contents_cmp(const mem_contents_t *a, const mem_contents_t *b) {
  83. return (a->my_addr > b->my_addr) - (a->my_addr < b->my_addr);
  84. }
  85. typedef rb_tree(mem_contents_t) mem_tree_t;
  86. rb_gen(static, mem_tree_, mem_tree_t, mem_contents_t, link,
  87. mem_contents_cmp);
  88. static void
  89. node_assert_ordered(mem_contents_t *a, mem_contents_t *b) {
  90. assert_zu_lt(a->my_addr, a->my_addr + a->size, "Overflow");
  91. assert_zu_le(a->my_addr + a->size, b->my_addr, "");
  92. }
  93. static void
  94. node_check(mem_tree_t *tree, mem_contents_t *contents) {
  95. edata_t *edata = contents->my_edata;
  96. assert_ptr_eq(contents, (void *)contents->my_addr, "");
  97. assert_ptr_eq(contents, edata_base_get(edata), "");
  98. assert_zu_eq(contents->size, edata_size_get(edata), "");
  99. assert_ptr_eq(contents->my_edata, edata, "");
  100. mem_contents_t *next = mem_tree_next(tree, contents);
  101. if (next != NULL) {
  102. node_assert_ordered(contents, next);
  103. }
  104. mem_contents_t *prev = mem_tree_prev(tree, contents);
  105. if (prev != NULL) {
  106. node_assert_ordered(prev, contents);
  107. }
  108. }
  109. static void
  110. node_insert(mem_tree_t *tree, edata_t *edata, size_t npages) {
  111. mem_contents_t *contents = (mem_contents_t *)edata_base_get(edata);
  112. contents->my_addr = (uintptr_t)edata_base_get(edata);
  113. contents->size = edata_size_get(edata);
  114. contents->my_edata = edata;
  115. mem_tree_insert(tree, contents);
  116. node_check(tree, contents);
  117. }
  118. static void
  119. node_remove(mem_tree_t *tree, edata_t *edata) {
  120. mem_contents_t *contents = (mem_contents_t *)edata_base_get(edata);
  121. node_check(tree, contents);
  122. mem_tree_remove(tree, contents);
  123. }
  124. TEST_BEGIN(test_stress) {
  125. test_skip_if(!hpa_supported());
  126. hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
  127. &test_hpa_shard_opts_default);
  128. tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
  129. const size_t nlive_edatas_max = 500;
  130. size_t nlive_edatas = 0;
  131. edata_t **live_edatas = calloc(nlive_edatas_max, sizeof(edata_t *));
  132. /*
  133. * Nothing special about this constant; we're only fixing it for
  134. * consistency across runs.
  135. */
  136. size_t prng_state = (size_t)0x76999ffb014df07c;
  137. mem_tree_t tree;
  138. mem_tree_new(&tree);
  139. bool deferred_work_generated = false;
  140. for (size_t i = 0; i < 100 * 1000; i++) {
  141. size_t operation = prng_range_zu(&prng_state, 2);
  142. if (operation == 0) {
  143. /* Alloc */
  144. if (nlive_edatas == nlive_edatas_max) {
  145. continue;
  146. }
  147. /*
  148. * We make sure to get an even balance of small and
  149. * large allocations.
  150. */
  151. size_t npages_min = 1;
  152. size_t npages_max = ALLOC_MAX / PAGE;
  153. size_t npages = npages_min + prng_range_zu(&prng_state,
  154. npages_max - npages_min);
  155. edata_t *edata = pai_alloc(tsdn, &shard->pai,
  156. npages * PAGE, PAGE, false, false, false,
  157. &deferred_work_generated);
  158. assert_ptr_not_null(edata,
  159. "Unexpected allocation failure");
  160. live_edatas[nlive_edatas] = edata;
  161. nlive_edatas++;
  162. node_insert(&tree, edata, npages);
  163. } else {
  164. /* Free. */
  165. if (nlive_edatas == 0) {
  166. continue;
  167. }
  168. size_t victim = prng_range_zu(&prng_state, nlive_edatas);
  169. edata_t *to_free = live_edatas[victim];
  170. live_edatas[victim] = live_edatas[nlive_edatas - 1];
  171. nlive_edatas--;
  172. node_remove(&tree, to_free);
  173. pai_dalloc(tsdn, &shard->pai, to_free,
  174. &deferred_work_generated);
  175. }
  176. }
  177. size_t ntreenodes = 0;
  178. for (mem_contents_t *contents = mem_tree_first(&tree); contents != NULL;
  179. contents = mem_tree_next(&tree, contents)) {
  180. ntreenodes++;
  181. node_check(&tree, contents);
  182. }
  183. expect_zu_eq(ntreenodes, nlive_edatas, "");
  184. /*
  185. * Test hpa_shard_destroy, which requires as a precondition that all its
  186. * extents have been deallocated.
  187. */
  188. for (size_t i = 0; i < nlive_edatas; i++) {
  189. edata_t *to_free = live_edatas[i];
  190. node_remove(&tree, to_free);
  191. pai_dalloc(tsdn, &shard->pai, to_free,
  192. &deferred_work_generated);
  193. }
  194. hpa_shard_destroy(tsdn, shard);
  195. free(live_edatas);
  196. destroy_test_data(shard);
  197. }
  198. TEST_END
  199. static void
  200. expect_contiguous(edata_t **edatas, size_t nedatas) {
  201. for (size_t i = 0; i < nedatas; i++) {
  202. size_t expected = (size_t)edata_base_get(edatas[0])
  203. + i * PAGE;
  204. expect_zu_eq(expected, (size_t)edata_base_get(edatas[i]),
  205. "Mismatch at index %zu", i);
  206. }
  207. }
  208. TEST_BEGIN(test_alloc_dalloc_batch) {
  209. test_skip_if(!hpa_supported());
  210. hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
  211. &test_hpa_shard_opts_default);
  212. tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
  213. bool deferred_work_generated = false;
  214. enum {NALLOCS = 8};
  215. edata_t *allocs[NALLOCS];
  216. /*
  217. * Allocate a mix of ways; first half from regular alloc, second half
  218. * from alloc_batch.
  219. */
  220. for (size_t i = 0; i < NALLOCS / 2; i++) {
  221. allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
  222. /* zero */ false, /* guarded */ false,
  223. /* frequent_reuse */ false, &deferred_work_generated);
  224. expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
  225. }
  226. edata_list_active_t allocs_list;
  227. edata_list_active_init(&allocs_list);
  228. size_t nsuccess = pai_alloc_batch(tsdn, &shard->pai, PAGE, NALLOCS / 2,
  229. &allocs_list, &deferred_work_generated);
  230. expect_zu_eq(NALLOCS / 2, nsuccess, "Unexpected oom");
  231. for (size_t i = NALLOCS / 2; i < NALLOCS; i++) {
  232. allocs[i] = edata_list_active_first(&allocs_list);
  233. edata_list_active_remove(&allocs_list, allocs[i]);
  234. }
  235. /*
  236. * Should have allocated them contiguously, despite the differing
  237. * methods used.
  238. */
  239. void *orig_base = edata_base_get(allocs[0]);
  240. expect_contiguous(allocs, NALLOCS);
  241. /*
  242. * Batch dalloc the first half, individually deallocate the second half.
  243. */
  244. for (size_t i = 0; i < NALLOCS / 2; i++) {
  245. edata_list_active_append(&allocs_list, allocs[i]);
  246. }
  247. pai_dalloc_batch(tsdn, &shard->pai, &allocs_list,
  248. &deferred_work_generated);
  249. for (size_t i = NALLOCS / 2; i < NALLOCS; i++) {
  250. pai_dalloc(tsdn, &shard->pai, allocs[i],
  251. &deferred_work_generated);
  252. }
  253. /* Reallocate (individually), and ensure reuse and contiguity. */
  254. for (size_t i = 0; i < NALLOCS; i++) {
  255. allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
  256. /* zero */ false, /* guarded */ false, /* frequent_reuse */
  257. false, &deferred_work_generated);
  258. expect_ptr_not_null(allocs[i], "Unexpected alloc failure.");
  259. }
  260. void *new_base = edata_base_get(allocs[0]);
  261. expect_ptr_eq(orig_base, new_base,
  262. "Failed to reuse the allocated memory.");
  263. expect_contiguous(allocs, NALLOCS);
  264. destroy_test_data(shard);
  265. }
  266. TEST_END
  267. static uintptr_t defer_bump_ptr = HUGEPAGE * 123;
  268. static void *
  269. defer_test_map(size_t size) {
  270. void *result = (void *)defer_bump_ptr;
  271. defer_bump_ptr += size;
  272. return result;
  273. }
  274. static void
  275. defer_test_unmap(void *ptr, size_t size) {
  276. (void)ptr;
  277. (void)size;
  278. }
  279. static bool defer_purge_called = false;
  280. static void
  281. defer_test_purge(void *ptr, size_t size) {
  282. (void)ptr;
  283. (void)size;
  284. defer_purge_called = true;
  285. }
  286. static bool defer_hugify_called = false;
  287. static void
  288. defer_test_hugify(void *ptr, size_t size) {
  289. defer_hugify_called = true;
  290. }
  291. static bool defer_dehugify_called = false;
  292. static void
  293. defer_test_dehugify(void *ptr, size_t size) {
  294. defer_dehugify_called = true;
  295. }
  296. static nstime_t defer_curtime;
  297. static void
  298. defer_test_curtime(nstime_t *r_time, bool first_reading) {
  299. *r_time = defer_curtime;
  300. }
  301. static uint64_t
  302. defer_test_ms_since(nstime_t *past_time) {
  303. return (nstime_ns(&defer_curtime) - nstime_ns(past_time)) / 1000 / 1000;
  304. }
  305. TEST_BEGIN(test_defer_time) {
  306. test_skip_if(!hpa_supported());
  307. hpa_hooks_t hooks;
  308. hooks.map = &defer_test_map;
  309. hooks.unmap = &defer_test_unmap;
  310. hooks.purge = &defer_test_purge;
  311. hooks.hugify = &defer_test_hugify;
  312. hooks.dehugify = &defer_test_dehugify;
  313. hooks.curtime = &defer_test_curtime;
  314. hooks.ms_since = &defer_test_ms_since;
  315. hpa_shard_opts_t opts = test_hpa_shard_opts_default;
  316. opts.deferral_allowed = true;
  317. hpa_shard_t *shard = create_test_data(&hooks, &opts);
  318. bool deferred_work_generated = false;
  319. nstime_init(&defer_curtime, 0);
  320. tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
  321. edata_t *edatas[HUGEPAGE_PAGES];
  322. for (int i = 0; i < (int)HUGEPAGE_PAGES; i++) {
  323. edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
  324. false, false, &deferred_work_generated);
  325. expect_ptr_not_null(edatas[i], "Unexpected null edata");
  326. }
  327. hpa_shard_do_deferred_work(tsdn, shard);
  328. expect_false(defer_hugify_called, "Hugified too early");
  329. /* Hugification delay is set to 10 seconds in options. */
  330. nstime_init2(&defer_curtime, 11, 0);
  331. hpa_shard_do_deferred_work(tsdn, shard);
  332. expect_true(defer_hugify_called, "Failed to hugify");
  333. defer_hugify_called = false;
  334. /* Purge. Recall that dirty_mult is .25. */
  335. for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
  336. pai_dalloc(tsdn, &shard->pai, edatas[i],
  337. &deferred_work_generated);
  338. }
  339. hpa_shard_do_deferred_work(tsdn, shard);
  340. expect_false(defer_hugify_called, "Hugified too early");
  341. expect_true(defer_dehugify_called, "Should have dehugified");
  342. expect_true(defer_purge_called, "Should have purged");
  343. defer_hugify_called = false;
  344. defer_dehugify_called = false;
  345. defer_purge_called = false;
  346. /*
  347. * Refill the page. We now meet the hugification threshold; we should
  348. * be marked for pending hugify.
  349. */
  350. for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
  351. edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
  352. false, false, &deferred_work_generated);
  353. expect_ptr_not_null(edatas[i], "Unexpected null edata");
  354. }
  355. /*
  356. * We would be ineligible for hugification, had we not already met the
  357. * threshold before dipping below it.
  358. */
  359. pai_dalloc(tsdn, &shard->pai, edatas[0],
  360. &deferred_work_generated);
  361. /* Wait for the threshold again. */
  362. nstime_init2(&defer_curtime, 22, 0);
  363. hpa_shard_do_deferred_work(tsdn, shard);
  364. expect_true(defer_hugify_called, "Hugified too early");
  365. expect_false(defer_dehugify_called, "Unexpected dehugify");
  366. expect_false(defer_purge_called, "Unexpected purge");
  367. destroy_test_data(shard);
  368. }
  369. TEST_END
  370. int
  371. main(void) {
  372. /*
  373. * These trigger unused-function warnings on CI runs, even if declared
  374. * with static inline.
  375. */
  376. (void)mem_tree_empty;
  377. (void)mem_tree_last;
  378. (void)mem_tree_search;
  379. (void)mem_tree_nsearch;
  380. (void)mem_tree_psearch;
  381. (void)mem_tree_iter;
  382. (void)mem_tree_reverse_iter;
  383. (void)mem_tree_destroy;
  384. return test_no_reentrancy(
  385. test_alloc_max,
  386. test_stress,
  387. test_alloc_dalloc_batch,
  388. test_defer_time);
  389. }