san.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. #include "test/jemalloc_test.h"
  2. #include "test/arena_util.h"
  3. #include "test/san.h"
  4. #include "jemalloc/internal/san.h"
  5. static void
  6. verify_extent_guarded(tsdn_t *tsdn, void *ptr) {
  7. expect_true(extent_is_guarded(tsdn, ptr),
  8. "All extents should be guarded.");
  9. }
  10. #define MAX_SMALL_ALLOCATIONS 4096
  11. void *small_alloc[MAX_SMALL_ALLOCATIONS];
  12. /*
  13. * This test allocates page sized slabs and checks that every two slabs have
  14. * at least one page in between them. That page is supposed to be the guard
  15. * page.
  16. */
  17. TEST_BEGIN(test_guarded_small) {
  18. test_skip_if(opt_prof);
  19. tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
  20. unsigned npages = 16, pages_found = 0, ends_found = 0;
  21. VARIABLE_ARRAY(uintptr_t, pages, npages);
  22. /* Allocate to get sanitized pointers. */
  23. size_t slab_sz = PAGE;
  24. size_t sz = slab_sz / 8;
  25. unsigned n_alloc = 0;
  26. while (n_alloc < MAX_SMALL_ALLOCATIONS) {
  27. void *ptr = malloc(sz);
  28. expect_ptr_not_null(ptr, "Unexpected malloc() failure");
  29. small_alloc[n_alloc] = ptr;
  30. verify_extent_guarded(tsdn, ptr);
  31. if ((uintptr_t)ptr % PAGE == 0) {
  32. assert_u_lt(pages_found, npages,
  33. "Unexpectedly large number of page aligned allocs");
  34. pages[pages_found++] = (uintptr_t)ptr;
  35. }
  36. if (((uintptr_t)ptr + (uintptr_t)sz) % PAGE == 0) {
  37. ends_found++;
  38. }
  39. n_alloc++;
  40. if (pages_found == npages && ends_found == npages) {
  41. break;
  42. }
  43. }
  44. /* Should found the ptrs being checked for overflow and underflow. */
  45. expect_u_eq(pages_found, npages, "Could not found the expected pages.");
  46. expect_u_eq(ends_found, npages, "Could not found the expected pages.");
  47. /* Verify the pages are not continuous, i.e. separated by guards. */
  48. for (unsigned i = 0; i < npages - 1; i++) {
  49. for (unsigned j = i + 1; j < npages; j++) {
  50. uintptr_t ptr_diff = pages[i] > pages[j] ?
  51. pages[i] - pages[j] : pages[j] - pages[i];
  52. expect_zu_ge((size_t)ptr_diff, slab_sz + PAGE,
  53. "There should be at least one pages between "
  54. "guarded slabs");
  55. }
  56. }
  57. for (unsigned i = 0; i < n_alloc + 1; i++) {
  58. free(small_alloc[i]);
  59. }
  60. }
  61. TEST_END
  62. TEST_BEGIN(test_guarded_large) {
  63. tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
  64. unsigned nlarge = 32;
  65. VARIABLE_ARRAY(uintptr_t, large, nlarge);
  66. /* Allocate to get sanitized pointers. */
  67. size_t large_sz = SC_LARGE_MINCLASS;
  68. for (unsigned i = 0; i < nlarge; i++) {
  69. void *ptr = malloc(large_sz);
  70. verify_extent_guarded(tsdn, ptr);
  71. expect_ptr_not_null(ptr, "Unexpected malloc() failure");
  72. large[i] = (uintptr_t)ptr;
  73. }
  74. /* Verify the pages are not continuous, i.e. separated by guards. */
  75. for (unsigned i = 0; i < nlarge; i++) {
  76. for (unsigned j = i + 1; j < nlarge; j++) {
  77. uintptr_t ptr_diff = large[i] > large[j] ?
  78. large[i] - large[j] : large[j] - large[i];
  79. expect_zu_ge((size_t)ptr_diff, large_sz + 2 * PAGE,
  80. "There should be at least two pages between "
  81. " guarded large allocations");
  82. }
  83. }
  84. for (unsigned i = 0; i < nlarge; i++) {
  85. free((void *)large[i]);
  86. }
  87. }
  88. TEST_END
  89. static void
  90. verify_pdirty(unsigned arena_ind, uint64_t expected) {
  91. uint64_t pdirty = get_arena_pdirty(arena_ind);
  92. expect_u64_eq(pdirty, expected / PAGE,
  93. "Unexpected dirty page amount.");
  94. }
  95. static void
  96. verify_pmuzzy(unsigned arena_ind, uint64_t expected) {
  97. uint64_t pmuzzy = get_arena_pmuzzy(arena_ind);
  98. expect_u64_eq(pmuzzy, expected / PAGE,
  99. "Unexpected muzzy page amount.");
  100. }
  101. TEST_BEGIN(test_guarded_decay) {
  102. unsigned arena_ind = do_arena_create(-1, -1);
  103. do_decay(arena_ind);
  104. do_purge(arena_ind);
  105. verify_pdirty(arena_ind, 0);
  106. verify_pmuzzy(arena_ind, 0);
  107. /* Verify that guarded extents as dirty. */
  108. size_t sz1 = PAGE, sz2 = PAGE * 2;
  109. /* W/o maps_coalesce, guarded extents are unguarded eagerly. */
  110. size_t add_guard_size = maps_coalesce ? 0 : SAN_PAGE_GUARDS_SIZE;
  111. generate_dirty(arena_ind, sz1);
  112. verify_pdirty(arena_ind, sz1 + add_guard_size);
  113. verify_pmuzzy(arena_ind, 0);
  114. /* Should reuse the first extent. */
  115. generate_dirty(arena_ind, sz1);
  116. verify_pdirty(arena_ind, sz1 + add_guard_size);
  117. verify_pmuzzy(arena_ind, 0);
  118. /* Should not reuse; expect new dirty pages. */
  119. generate_dirty(arena_ind, sz2);
  120. verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
  121. verify_pmuzzy(arena_ind, 0);
  122. tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
  123. int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
  124. /* Should reuse dirty extents for the two mallocx. */
  125. void *p1 = do_mallocx(sz1, flags);
  126. verify_extent_guarded(tsdn, p1);
  127. verify_pdirty(arena_ind, sz2 + add_guard_size);
  128. void *p2 = do_mallocx(sz2, flags);
  129. verify_extent_guarded(tsdn, p2);
  130. verify_pdirty(arena_ind, 0);
  131. verify_pmuzzy(arena_ind, 0);
  132. dallocx(p1, flags);
  133. verify_pdirty(arena_ind, sz1 + add_guard_size);
  134. dallocx(p2, flags);
  135. verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
  136. verify_pmuzzy(arena_ind, 0);
  137. do_purge(arena_ind);
  138. verify_pdirty(arena_ind, 0);
  139. verify_pmuzzy(arena_ind, 0);
  140. if (config_stats) {
  141. expect_u64_eq(get_arena_npurge(arena_ind), 1,
  142. "Expected purging to occur");
  143. expect_u64_eq(get_arena_dirty_npurge(arena_ind), 1,
  144. "Expected purging to occur");
  145. expect_u64_eq(get_arena_dirty_purged(arena_ind),
  146. (sz1 + sz2 + 2 * add_guard_size) / PAGE,
  147. "Expected purging to occur");
  148. expect_u64_eq(get_arena_muzzy_npurge(arena_ind), 0,
  149. "Expected purging to occur");
  150. }
  151. if (opt_retain) {
  152. /*
  153. * With retain, guarded extents are not mergable and will be
  154. * cached in ecache_retained. They should be reused.
  155. */
  156. void *new_p1 = do_mallocx(sz1, flags);
  157. verify_extent_guarded(tsdn, p1);
  158. expect_ptr_eq(p1, new_p1, "Expect to reuse p1");
  159. void *new_p2 = do_mallocx(sz2, flags);
  160. verify_extent_guarded(tsdn, p2);
  161. expect_ptr_eq(p2, new_p2, "Expect to reuse p2");
  162. dallocx(new_p1, flags);
  163. verify_pdirty(arena_ind, sz1 + add_guard_size);
  164. dallocx(new_p2, flags);
  165. verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
  166. verify_pmuzzy(arena_ind, 0);
  167. }
  168. do_arena_destroy(arena_ind);
  169. }
  170. TEST_END
  171. int
  172. main(void) {
  173. return test(
  174. test_guarded_small,
  175. test_guarded_large,
  176. test_guarded_decay);
  177. }