retained.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. #include "test/jemalloc_test.h"
  2. #include "jemalloc/internal/san.h"
  3. #include "jemalloc/internal/spin.h"
  4. static unsigned arena_ind;
  5. static size_t sz;
  6. static size_t esz;
  7. #define NEPOCHS 8
  8. #define PER_THD_NALLOCS 1
  9. static atomic_u_t epoch;
  10. static atomic_u_t nfinished;
  11. static unsigned
  12. do_arena_create(extent_hooks_t *h) {
  13. unsigned new_arena_ind;
  14. size_t ind_sz = sizeof(unsigned);
  15. expect_d_eq(mallctl("arenas.create", (void *)&new_arena_ind, &ind_sz,
  16. (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
  17. "Unexpected mallctl() failure");
  18. return new_arena_ind;
  19. }
  20. static void
  21. do_arena_destroy(unsigned ind) {
  22. size_t mib[3];
  23. size_t miblen;
  24. miblen = sizeof(mib)/sizeof(size_t);
  25. expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
  26. "Unexpected mallctlnametomib() failure");
  27. mib[1] = (size_t)ind;
  28. expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
  29. "Unexpected mallctlbymib() failure");
  30. }
  31. static void
  32. do_refresh(void) {
  33. uint64_t refresh_epoch = 1;
  34. expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&refresh_epoch,
  35. sizeof(refresh_epoch)), 0, "Unexpected mallctl() failure");
  36. }
  37. static size_t
  38. do_get_size_impl(const char *cmd, unsigned ind) {
  39. size_t mib[4];
  40. size_t miblen = sizeof(mib) / sizeof(size_t);
  41. size_t z = sizeof(size_t);
  42. expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
  43. 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
  44. mib[2] = ind;
  45. size_t size;
  46. expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0),
  47. 0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd);
  48. return size;
  49. }
  50. static size_t
  51. do_get_active(unsigned ind) {
  52. return do_get_size_impl("stats.arenas.0.pactive", ind) * PAGE;
  53. }
  54. static size_t
  55. do_get_mapped(unsigned ind) {
  56. return do_get_size_impl("stats.arenas.0.mapped", ind);
  57. }
  58. static void *
  59. thd_start(void *arg) {
  60. for (unsigned next_epoch = 1; next_epoch < NEPOCHS; next_epoch++) {
  61. /* Busy-wait for next epoch. */
  62. unsigned cur_epoch;
  63. spin_t spinner = SPIN_INITIALIZER;
  64. while ((cur_epoch = atomic_load_u(&epoch, ATOMIC_ACQUIRE)) !=
  65. next_epoch) {
  66. spin_adaptive(&spinner);
  67. }
  68. expect_u_eq(cur_epoch, next_epoch, "Unexpected epoch");
  69. /*
  70. * Allocate. The main thread will reset the arena, so there's
  71. * no need to deallocate.
  72. */
  73. for (unsigned i = 0; i < PER_THD_NALLOCS; i++) {
  74. void *p = mallocx(sz, MALLOCX_ARENA(arena_ind) |
  75. MALLOCX_TCACHE_NONE
  76. );
  77. expect_ptr_not_null(p,
  78. "Unexpected mallocx() failure\n");
  79. }
  80. /* Let the main thread know we've finished this iteration. */
  81. atomic_fetch_add_u(&nfinished, 1, ATOMIC_RELEASE);
  82. }
  83. return NULL;
  84. }
  85. TEST_BEGIN(test_retained) {
  86. test_skip_if(!config_stats);
  87. test_skip_if(opt_hpa);
  88. arena_ind = do_arena_create(NULL);
  89. sz = nallocx(HUGEPAGE, 0);
  90. size_t guard_sz = san_guard_enabled() ? SAN_PAGE_GUARDS_SIZE : 0;
  91. esz = sz + sz_large_pad + guard_sz;
  92. atomic_store_u(&epoch, 0, ATOMIC_RELAXED);
  93. unsigned nthreads = ncpus * 2;
  94. if (LG_SIZEOF_PTR < 3 && nthreads > 16) {
  95. nthreads = 16; /* 32-bit platform could run out of vaddr. */
  96. }
  97. VARIABLE_ARRAY(thd_t, threads, nthreads);
  98. for (unsigned i = 0; i < nthreads; i++) {
  99. thd_create(&threads[i], thd_start, NULL);
  100. }
  101. for (unsigned e = 1; e < NEPOCHS; e++) {
  102. atomic_store_u(&nfinished, 0, ATOMIC_RELEASE);
  103. atomic_store_u(&epoch, e, ATOMIC_RELEASE);
  104. /* Wait for threads to finish allocating. */
  105. spin_t spinner = SPIN_INITIALIZER;
  106. while (atomic_load_u(&nfinished, ATOMIC_ACQUIRE) < nthreads) {
  107. spin_adaptive(&spinner);
  108. }
  109. /*
  110. * Assert that retained is no more than the sum of size classes
  111. * that should have been used to satisfy the worker threads'
  112. * requests, discounting per growth fragmentation.
  113. */
  114. do_refresh();
  115. size_t allocated = (esz - guard_sz) * nthreads *
  116. PER_THD_NALLOCS;
  117. size_t active = do_get_active(arena_ind);
  118. expect_zu_le(allocated, active, "Unexpected active memory");
  119. size_t mapped = do_get_mapped(arena_ind);
  120. expect_zu_le(active, mapped, "Unexpected mapped memory");
  121. arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false);
  122. size_t usable = 0;
  123. size_t fragmented = 0;
  124. for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind <
  125. arena->pa_shard.pac.exp_grow.next; pind++) {
  126. size_t psz = sz_pind2sz(pind);
  127. size_t psz_fragmented = psz % esz;
  128. size_t psz_usable = psz - psz_fragmented;
  129. /*
  130. * Only consider size classes that wouldn't be skipped.
  131. */
  132. if (psz_usable > 0) {
  133. expect_zu_lt(usable, allocated,
  134. "Excessive retained memory "
  135. "(%#zx[+%#zx] > %#zx)", usable, psz_usable,
  136. allocated);
  137. fragmented += psz_fragmented;
  138. usable += psz_usable;
  139. }
  140. }
  141. /*
  142. * Clean up arena. Destroying and recreating the arena
  143. * is simpler that specifying extent hooks that deallocate
  144. * (rather than retaining) during reset.
  145. */
  146. do_arena_destroy(arena_ind);
  147. expect_u_eq(do_arena_create(NULL), arena_ind,
  148. "Unexpected arena index");
  149. }
  150. for (unsigned i = 0; i < nthreads; i++) {
  151. thd_join(threads[i], NULL);
  152. }
  153. do_arena_destroy(arena_ind);
  154. }
  155. TEST_END
  156. int
  157. main(void) {
  158. return test(
  159. test_retained);
  160. }