sec.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635
  1. #include "test/jemalloc_test.h"
  2. #include "jemalloc/internal/sec.h"
  3. typedef struct pai_test_allocator_s pai_test_allocator_t;
  4. struct pai_test_allocator_s {
  5. pai_t pai;
  6. bool alloc_fail;
  7. size_t alloc_count;
  8. size_t alloc_batch_count;
  9. size_t dalloc_count;
  10. size_t dalloc_batch_count;
  11. /*
  12. * We use a simple bump allocator as the implementation. This isn't
  13. * *really* correct, since we may allow expansion into a subsequent
  14. * allocation, but it's not like the SEC is really examining the
  15. * pointers it gets back; this is mostly just helpful for debugging.
  16. */
  17. uintptr_t next_ptr;
  18. size_t expand_count;
  19. bool expand_return_value;
  20. size_t shrink_count;
  21. bool shrink_return_value;
  22. };
  23. static void
  24. test_sec_init(sec_t *sec, pai_t *fallback, size_t nshards, size_t max_alloc,
  25. size_t max_bytes) {
  26. sec_opts_t opts;
  27. opts.nshards = 1;
  28. opts.max_alloc = max_alloc;
  29. opts.max_bytes = max_bytes;
  30. /*
  31. * Just choose reasonable defaults for these; most tests don't care so
  32. * long as they're something reasonable.
  33. */
  34. opts.bytes_after_flush = max_bytes / 2;
  35. opts.batch_fill_extra = 4;
  36. /*
  37. * We end up leaking this base, but that's fine; this test is
  38. * short-running, and SECs are arena-scoped in reality.
  39. */
  40. base_t *base = base_new(TSDN_NULL, /* ind */ 123,
  41. &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
  42. bool err = sec_init(TSDN_NULL, sec, base, fallback, &opts);
  43. assert_false(err, "Unexpected initialization failure");
  44. assert_u_ge(sec->npsizes, 0, "Zero size classes allowed for caching");
  45. }
  46. static inline edata_t *
  47. pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
  48. size_t alignment, bool zero, bool guarded, bool frequent_reuse,
  49. bool *deferred_work_generated) {
  50. assert(!guarded);
  51. pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
  52. if (ta->alloc_fail) {
  53. return NULL;
  54. }
  55. edata_t *edata = malloc(sizeof(edata_t));
  56. assert_ptr_not_null(edata, "");
  57. ta->next_ptr += alignment - 1;
  58. edata_init(edata, /* arena_ind */ 0,
  59. (void *)(ta->next_ptr & ~(alignment - 1)), size,
  60. /* slab */ false,
  61. /* szind */ 0, /* sn */ 1, extent_state_active, /* zero */ zero,
  62. /* comitted */ true, /* ranged */ false, EXTENT_NOT_HEAD);
  63. ta->next_ptr += size;
  64. ta->alloc_count++;
  65. return edata;
  66. }
  67. static inline size_t
  68. pai_test_allocator_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
  69. size_t nallocs, edata_list_active_t *results,
  70. bool *deferred_work_generated) {
  71. pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
  72. if (ta->alloc_fail) {
  73. return 0;
  74. }
  75. for (size_t i = 0; i < nallocs; i++) {
  76. edata_t *edata = malloc(sizeof(edata_t));
  77. assert_ptr_not_null(edata, "");
  78. edata_init(edata, /* arena_ind */ 0,
  79. (void *)ta->next_ptr, size,
  80. /* slab */ false, /* szind */ 0, /* sn */ 1,
  81. extent_state_active, /* zero */ false, /* comitted */ true,
  82. /* ranged */ false, EXTENT_NOT_HEAD);
  83. ta->next_ptr += size;
  84. ta->alloc_batch_count++;
  85. edata_list_active_append(results, edata);
  86. }
  87. return nallocs;
  88. }
  89. static bool
  90. pai_test_allocator_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
  91. size_t old_size, size_t new_size, bool zero,
  92. bool *deferred_work_generated) {
  93. pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
  94. ta->expand_count++;
  95. return ta->expand_return_value;
  96. }
  97. static bool
  98. pai_test_allocator_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
  99. size_t old_size, size_t new_size, bool *deferred_work_generated) {
  100. pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
  101. ta->shrink_count++;
  102. return ta->shrink_return_value;
  103. }
  104. static void
  105. pai_test_allocator_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
  106. bool *deferred_work_generated) {
  107. pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
  108. ta->dalloc_count++;
  109. free(edata);
  110. }
  111. static void
  112. pai_test_allocator_dalloc_batch(tsdn_t *tsdn, pai_t *self,
  113. edata_list_active_t *list, bool *deferred_work_generated) {
  114. pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
  115. edata_t *edata;
  116. while ((edata = edata_list_active_first(list)) != NULL) {
  117. edata_list_active_remove(list, edata);
  118. ta->dalloc_batch_count++;
  119. free(edata);
  120. }
  121. }
  122. static inline void
  123. pai_test_allocator_init(pai_test_allocator_t *ta) {
  124. ta->alloc_fail = false;
  125. ta->alloc_count = 0;
  126. ta->alloc_batch_count = 0;
  127. ta->dalloc_count = 0;
  128. ta->dalloc_batch_count = 0;
  129. /* Just don't start the edata at 0. */
  130. ta->next_ptr = 10 * PAGE;
  131. ta->expand_count = 0;
  132. ta->expand_return_value = false;
  133. ta->shrink_count = 0;
  134. ta->shrink_return_value = false;
  135. ta->pai.alloc = &pai_test_allocator_alloc;
  136. ta->pai.alloc_batch = &pai_test_allocator_alloc_batch;
  137. ta->pai.expand = &pai_test_allocator_expand;
  138. ta->pai.shrink = &pai_test_allocator_shrink;
  139. ta->pai.dalloc = &pai_test_allocator_dalloc;
  140. ta->pai.dalloc_batch = &pai_test_allocator_dalloc_batch;
  141. }
  142. TEST_BEGIN(test_reuse) {
  143. pai_test_allocator_t ta;
  144. pai_test_allocator_init(&ta);
  145. sec_t sec;
  146. /*
  147. * We can't use the "real" tsd, since we malloc within the test
  148. * allocator hooks; we'd get lock inversion crashes. Eventually, we
  149. * should have a way to mock tsds, but for now just don't do any
  150. * lock-order checking.
  151. */
  152. tsdn_t *tsdn = TSDN_NULL;
  153. /*
  154. * 11 allocs apiece of 1-PAGE and 2-PAGE objects means that we should be
  155. * able to get to 33 pages in the cache before triggering a flush. We
  156. * set the flush liimt to twice this amount, to avoid accidentally
  157. * triggering a flush caused by the batch-allocation down the cache fill
  158. * pathway disrupting ordering.
  159. */
  160. enum { NALLOCS = 11 };
  161. edata_t *one_page[NALLOCS];
  162. edata_t *two_page[NALLOCS];
  163. bool deferred_work_generated = false;
  164. test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 2 * PAGE,
  165. /* max_bytes */ 2 * (NALLOCS * PAGE + NALLOCS * 2 * PAGE));
  166. for (int i = 0; i < NALLOCS; i++) {
  167. one_page[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
  168. /* zero */ false, /* guarded */ false, /* frequent_reuse */
  169. false, &deferred_work_generated);
  170. expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
  171. two_page[i] = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
  172. /* zero */ false, /* guarded */ false, /* frequent_reuse */
  173. false, &deferred_work_generated);
  174. expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
  175. }
  176. expect_zu_eq(0, ta.alloc_count, "Should be using batch allocs");
  177. size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
  178. expect_zu_le(2 * NALLOCS, max_allocs,
  179. "Incorrect number of allocations");
  180. expect_zu_eq(0, ta.dalloc_count,
  181. "Incorrect number of allocations");
  182. /*
  183. * Free in a different order than we allocated, to make sure free-list
  184. * separation works correctly.
  185. */
  186. for (int i = NALLOCS - 1; i >= 0; i--) {
  187. pai_dalloc(tsdn, &sec.pai, one_page[i],
  188. &deferred_work_generated);
  189. }
  190. for (int i = NALLOCS - 1; i >= 0; i--) {
  191. pai_dalloc(tsdn, &sec.pai, two_page[i],
  192. &deferred_work_generated);
  193. }
  194. expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
  195. "Incorrect number of allocations");
  196. expect_zu_eq(0, ta.dalloc_count,
  197. "Incorrect number of allocations");
  198. /*
  199. * Check that the n'th most recent deallocated extent is returned for
  200. * the n'th alloc request of a given size.
  201. */
  202. for (int i = 0; i < NALLOCS; i++) {
  203. edata_t *alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
  204. /* zero */ false, /* guarded */ false, /* frequent_reuse */
  205. false, &deferred_work_generated);
  206. edata_t *alloc2 = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
  207. /* zero */ false, /* guarded */ false, /* frequent_reuse */
  208. false, &deferred_work_generated);
  209. expect_ptr_eq(one_page[i], alloc1,
  210. "Got unexpected allocation");
  211. expect_ptr_eq(two_page[i], alloc2,
  212. "Got unexpected allocation");
  213. }
  214. expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
  215. "Incorrect number of allocations");
  216. expect_zu_eq(0, ta.dalloc_count,
  217. "Incorrect number of allocations");
  218. }
  219. TEST_END
  220. TEST_BEGIN(test_auto_flush) {
  221. pai_test_allocator_t ta;
  222. pai_test_allocator_init(&ta);
  223. sec_t sec;
  224. /* See the note above -- we can't use the real tsd. */
  225. tsdn_t *tsdn = TSDN_NULL;
  226. /*
  227. * 10-allocs apiece of 1-PAGE and 2-PAGE objects means that we should be
  228. * able to get to 30 pages in the cache before triggering a flush. The
  229. * choice of NALLOCS here is chosen to match the batch allocation
  230. * default (4 extra + 1 == 5; so 10 allocations leaves the cache exactly
  231. * empty, even in the presence of batch allocation on fill).
  232. * Eventually, once our allocation batching strategies become smarter,
  233. * this should change.
  234. */
  235. enum { NALLOCS = 10 };
  236. edata_t *extra_alloc;
  237. edata_t *allocs[NALLOCS];
  238. bool deferred_work_generated = false;
  239. test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
  240. /* max_bytes */ NALLOCS * PAGE);
  241. for (int i = 0; i < NALLOCS; i++) {
  242. allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
  243. /* zero */ false, /* guarded */ false, /* frequent_reuse */
  244. false, &deferred_work_generated);
  245. expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
  246. }
  247. extra_alloc = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
  248. /* guarded */ false, /* frequent_reuse */ false,
  249. &deferred_work_generated);
  250. expect_ptr_not_null(extra_alloc, "Unexpected alloc failure");
  251. size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
  252. expect_zu_le(NALLOCS + 1, max_allocs,
  253. "Incorrect number of allocations");
  254. expect_zu_eq(0, ta.dalloc_count,
  255. "Incorrect number of allocations");
  256. /* Free until the SEC is full, but should not have flushed yet. */
  257. for (int i = 0; i < NALLOCS; i++) {
  258. pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
  259. }
  260. expect_zu_le(NALLOCS + 1, max_allocs,
  261. "Incorrect number of allocations");
  262. expect_zu_eq(0, ta.dalloc_count,
  263. "Incorrect number of allocations");
  264. /*
  265. * Free the extra allocation; this should trigger a flush. The internal
  266. * flushing logic is allowed to get complicated; for now, we rely on our
  267. * whitebox knowledge of the fact that the SEC flushes bins in their
  268. * entirety when it decides to do so, and it has only one bin active
  269. * right now.
  270. */
  271. pai_dalloc(tsdn, &sec.pai, extra_alloc, &deferred_work_generated);
  272. expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
  273. "Incorrect number of allocations");
  274. expect_zu_eq(0, ta.dalloc_count,
  275. "Incorrect number of (non-batch) deallocations");
  276. expect_zu_eq(NALLOCS + 1, ta.dalloc_batch_count,
  277. "Incorrect number of batch deallocations");
  278. }
  279. TEST_END
  280. /*
  281. * A disable and a flush are *almost* equivalent; the only difference is what
  282. * happens afterwards; disabling disallows all future caching as well.
  283. */
  284. static void
  285. do_disable_flush_test(bool is_disable) {
  286. pai_test_allocator_t ta;
  287. pai_test_allocator_init(&ta);
  288. sec_t sec;
  289. /* See the note above -- we can't use the real tsd. */
  290. tsdn_t *tsdn = TSDN_NULL;
  291. enum { NALLOCS = 11 };
  292. edata_t *allocs[NALLOCS];
  293. bool deferred_work_generated = false;
  294. test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
  295. /* max_bytes */ NALLOCS * PAGE);
  296. for (int i = 0; i < NALLOCS; i++) {
  297. allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
  298. /* zero */ false, /* guarded */ false, /* frequent_reuse */
  299. false, &deferred_work_generated);
  300. expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
  301. }
  302. /* Free all but the last aloc. */
  303. for (int i = 0; i < NALLOCS - 1; i++) {
  304. pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
  305. }
  306. size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
  307. expect_zu_le(NALLOCS, max_allocs, "Incorrect number of allocations");
  308. expect_zu_eq(0, ta.dalloc_count,
  309. "Incorrect number of allocations");
  310. if (is_disable) {
  311. sec_disable(tsdn, &sec);
  312. } else {
  313. sec_flush(tsdn, &sec);
  314. }
  315. expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
  316. "Incorrect number of allocations");
  317. expect_zu_eq(0, ta.dalloc_count,
  318. "Incorrect number of (non-batch) deallocations");
  319. expect_zu_le(NALLOCS - 1, ta.dalloc_batch_count,
  320. "Incorrect number of batch deallocations");
  321. size_t old_dalloc_batch_count = ta.dalloc_batch_count;
  322. /*
  323. * If we free into a disabled SEC, it should forward to the fallback.
  324. * Otherwise, the SEC should accept the allocation.
  325. */
  326. pai_dalloc(tsdn, &sec.pai, allocs[NALLOCS - 1],
  327. &deferred_work_generated);
  328. expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
  329. "Incorrect number of allocations");
  330. expect_zu_eq(is_disable ? 1 : 0, ta.dalloc_count,
  331. "Incorrect number of (non-batch) deallocations");
  332. expect_zu_eq(old_dalloc_batch_count, ta.dalloc_batch_count,
  333. "Incorrect number of batch deallocations");
  334. }
  335. TEST_BEGIN(test_disable) {
  336. do_disable_flush_test(/* is_disable */ true);
  337. }
  338. TEST_END
  339. TEST_BEGIN(test_flush) {
  340. do_disable_flush_test(/* is_disable */ false);
  341. }
  342. TEST_END
  343. TEST_BEGIN(test_max_alloc_respected) {
  344. pai_test_allocator_t ta;
  345. pai_test_allocator_init(&ta);
  346. sec_t sec;
  347. /* See the note above -- we can't use the real tsd. */
  348. tsdn_t *tsdn = TSDN_NULL;
  349. size_t max_alloc = 2 * PAGE;
  350. size_t attempted_alloc = 3 * PAGE;
  351. bool deferred_work_generated = false;
  352. test_sec_init(&sec, &ta.pai, /* nshards */ 1, max_alloc,
  353. /* max_bytes */ 1000 * PAGE);
  354. for (size_t i = 0; i < 100; i++) {
  355. expect_zu_eq(i, ta.alloc_count,
  356. "Incorrect number of allocations");
  357. expect_zu_eq(i, ta.dalloc_count,
  358. "Incorrect number of deallocations");
  359. edata_t *edata = pai_alloc(tsdn, &sec.pai, attempted_alloc,
  360. PAGE, /* zero */ false, /* guarded */ false,
  361. /* frequent_reuse */ false, &deferred_work_generated);
  362. expect_ptr_not_null(edata, "Unexpected alloc failure");
  363. expect_zu_eq(i + 1, ta.alloc_count,
  364. "Incorrect number of allocations");
  365. expect_zu_eq(i, ta.dalloc_count,
  366. "Incorrect number of deallocations");
  367. pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
  368. }
  369. }
  370. TEST_END
  371. TEST_BEGIN(test_expand_shrink_delegate) {
  372. /*
  373. * Expand and shrink shouldn't affect sec state; they should just
  374. * delegate to the fallback PAI.
  375. */
  376. pai_test_allocator_t ta;
  377. pai_test_allocator_init(&ta);
  378. sec_t sec;
  379. /* See the note above -- we can't use the real tsd. */
  380. tsdn_t *tsdn = TSDN_NULL;
  381. bool deferred_work_generated = false;
  382. test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 10 * PAGE,
  383. /* max_bytes */ 1000 * PAGE);
  384. edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
  385. /* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
  386. &deferred_work_generated);
  387. expect_ptr_not_null(edata, "Unexpected alloc failure");
  388. bool err = pai_expand(tsdn, &sec.pai, edata, PAGE, 4 * PAGE,
  389. /* zero */ false, &deferred_work_generated);
  390. expect_false(err, "Unexpected expand failure");
  391. expect_zu_eq(1, ta.expand_count, "");
  392. ta.expand_return_value = true;
  393. err = pai_expand(tsdn, &sec.pai, edata, 4 * PAGE, 3 * PAGE,
  394. /* zero */ false, &deferred_work_generated);
  395. expect_true(err, "Unexpected expand success");
  396. expect_zu_eq(2, ta.expand_count, "");
  397. err = pai_shrink(tsdn, &sec.pai, edata, 4 * PAGE, 2 * PAGE,
  398. &deferred_work_generated);
  399. expect_false(err, "Unexpected shrink failure");
  400. expect_zu_eq(1, ta.shrink_count, "");
  401. ta.shrink_return_value = true;
  402. err = pai_shrink(tsdn, &sec.pai, edata, 2 * PAGE, PAGE,
  403. &deferred_work_generated);
  404. expect_true(err, "Unexpected shrink success");
  405. expect_zu_eq(2, ta.shrink_count, "");
  406. }
  407. TEST_END
  408. TEST_BEGIN(test_nshards_0) {
  409. pai_test_allocator_t ta;
  410. pai_test_allocator_init(&ta);
  411. sec_t sec;
  412. /* See the note above -- we can't use the real tsd. */
  413. tsdn_t *tsdn = TSDN_NULL;
  414. base_t *base = base_new(TSDN_NULL, /* ind */ 123,
  415. &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
  416. sec_opts_t opts = SEC_OPTS_DEFAULT;
  417. opts.nshards = 0;
  418. sec_init(TSDN_NULL, &sec, base, &ta.pai, &opts);
  419. bool deferred_work_generated = false;
  420. edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
  421. /* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
  422. &deferred_work_generated);
  423. pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
  424. /* Both operations should have gone directly to the fallback. */
  425. expect_zu_eq(1, ta.alloc_count, "");
  426. expect_zu_eq(1, ta.dalloc_count, "");
  427. }
  428. TEST_END
  429. static void
  430. expect_stats_pages(tsdn_t *tsdn, sec_t *sec, size_t npages) {
  431. sec_stats_t stats;
  432. /*
  433. * Check that the stats merging accumulates rather than overwrites by
  434. * putting some (made up) data there to begin with.
  435. */
  436. stats.bytes = 123;
  437. sec_stats_merge(tsdn, sec, &stats);
  438. assert_zu_le(npages * PAGE + 123, stats.bytes, "");
  439. }
  440. TEST_BEGIN(test_stats_simple) {
  441. pai_test_allocator_t ta;
  442. pai_test_allocator_init(&ta);
  443. sec_t sec;
  444. /* See the note above -- we can't use the real tsd. */
  445. tsdn_t *tsdn = TSDN_NULL;
  446. enum {
  447. NITERS = 100,
  448. FLUSH_PAGES = 20,
  449. };
  450. bool deferred_work_generated = false;
  451. test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
  452. /* max_bytes */ FLUSH_PAGES * PAGE);
  453. edata_t *allocs[FLUSH_PAGES];
  454. for (size_t i = 0; i < FLUSH_PAGES; i++) {
  455. allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
  456. /* zero */ false, /* guarded */ false, /* frequent_reuse */
  457. false, &deferred_work_generated);
  458. expect_stats_pages(tsdn, &sec, 0);
  459. }
  460. /* Increase and decrease, without flushing. */
  461. for (size_t i = 0; i < NITERS; i++) {
  462. for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
  463. pai_dalloc(tsdn, &sec.pai, allocs[j],
  464. &deferred_work_generated);
  465. expect_stats_pages(tsdn, &sec, j + 1);
  466. }
  467. for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
  468. allocs[j] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
  469. /* zero */ false, /* guarded */ false,
  470. /* frequent_reuse */ false,
  471. &deferred_work_generated);
  472. expect_stats_pages(tsdn, &sec, FLUSH_PAGES / 2 - j - 1);
  473. }
  474. }
  475. }
  476. TEST_END
  477. TEST_BEGIN(test_stats_auto_flush) {
  478. pai_test_allocator_t ta;
  479. pai_test_allocator_init(&ta);
  480. sec_t sec;
  481. /* See the note above -- we can't use the real tsd. */
  482. tsdn_t *tsdn = TSDN_NULL;
  483. enum {
  484. FLUSH_PAGES = 10,
  485. };
  486. test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
  487. /* max_bytes */ FLUSH_PAGES * PAGE);
  488. edata_t *extra_alloc0;
  489. edata_t *extra_alloc1;
  490. edata_t *allocs[2 * FLUSH_PAGES];
  491. bool deferred_work_generated = false;
  492. extra_alloc0 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
  493. /* guarded */ false, /* frequent_reuse */ false,
  494. &deferred_work_generated);
  495. extra_alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
  496. /* guarded */ false, /* frequent_reuse */ false,
  497. &deferred_work_generated);
  498. for (size_t i = 0; i < 2 * FLUSH_PAGES; i++) {
  499. allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
  500. /* zero */ false, /* guarded */ false, /* frequent_reuse */
  501. false, &deferred_work_generated);
  502. }
  503. for (size_t i = 0; i < FLUSH_PAGES; i++) {
  504. pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
  505. }
  506. pai_dalloc(tsdn, &sec.pai, extra_alloc0, &deferred_work_generated);
  507. /* Flush the remaining pages; stats should still work. */
  508. for (size_t i = 0; i < FLUSH_PAGES; i++) {
  509. pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES + i],
  510. &deferred_work_generated);
  511. }
  512. pai_dalloc(tsdn, &sec.pai, extra_alloc1, &deferred_work_generated);
  513. expect_stats_pages(tsdn, &sec, ta.alloc_count + ta.alloc_batch_count
  514. - ta.dalloc_count - ta.dalloc_batch_count);
  515. }
  516. TEST_END
  517. TEST_BEGIN(test_stats_manual_flush) {
  518. pai_test_allocator_t ta;
  519. pai_test_allocator_init(&ta);
  520. sec_t sec;
  521. /* See the note above -- we can't use the real tsd. */
  522. tsdn_t *tsdn = TSDN_NULL;
  523. enum {
  524. FLUSH_PAGES = 10,
  525. };
  526. test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
  527. /* max_bytes */ FLUSH_PAGES * PAGE);
  528. bool deferred_work_generated = false;
  529. edata_t *allocs[FLUSH_PAGES];
  530. for (size_t i = 0; i < FLUSH_PAGES; i++) {
  531. allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
  532. /* zero */ false, /* guarded */ false, /* frequent_reuse */
  533. false, &deferred_work_generated);
  534. expect_stats_pages(tsdn, &sec, 0);
  535. }
  536. /* Dalloc the first half of the allocations. */
  537. for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
  538. pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
  539. expect_stats_pages(tsdn, &sec, i + 1);
  540. }
  541. sec_flush(tsdn, &sec);
  542. expect_stats_pages(tsdn, &sec, 0);
  543. /* Flush the remaining pages. */
  544. for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
  545. pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES / 2 + i],
  546. &deferred_work_generated);
  547. expect_stats_pages(tsdn, &sec, i + 1);
  548. }
  549. sec_disable(tsdn, &sec);
  550. expect_stats_pages(tsdn, &sec, 0);
  551. }
  552. TEST_END
  553. int
  554. main(void) {
  555. return test(
  556. test_reuse,
  557. test_auto_flush,
  558. test_disable,
  559. test_flush,
  560. test_max_alloc_respected,
  561. test_expand_shrink_delegate,
  562. test_nshards_0,
  563. test_stats_simple,
  564. test_stats_auto_flush,
  565. test_stats_manual_flush);
  566. }