junk.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. #include "test/jemalloc_test.h"
  2. #define arraylen(arr) (sizeof(arr)/sizeof(arr[0]))
  3. static size_t ptr_ind;
  4. static void *volatile ptrs[100];
  5. static void *last_junked_ptr;
  6. static size_t last_junked_usize;
  7. static void
  8. reset() {
  9. ptr_ind = 0;
  10. last_junked_ptr = NULL;
  11. last_junked_usize = 0;
  12. }
  13. static void
  14. test_junk(void *ptr, size_t usize) {
  15. last_junked_ptr = ptr;
  16. last_junked_usize = usize;
  17. }
  18. static void
  19. do_allocs(size_t size, bool zero, size_t lg_align) {
  20. #define JUNK_ALLOC(...) \
  21. do { \
  22. assert(ptr_ind + 1 < arraylen(ptrs)); \
  23. void *ptr = __VA_ARGS__; \
  24. assert_ptr_not_null(ptr, ""); \
  25. ptrs[ptr_ind++] = ptr; \
  26. if (opt_junk_alloc && !zero) { \
  27. expect_ptr_eq(ptr, last_junked_ptr, ""); \
  28. expect_zu_eq(last_junked_usize, \
  29. TEST_MALLOC_SIZE(ptr), ""); \
  30. } \
  31. } while (0)
  32. if (!zero && lg_align == 0) {
  33. JUNK_ALLOC(malloc(size));
  34. }
  35. if (!zero) {
  36. JUNK_ALLOC(aligned_alloc(1 << lg_align, size));
  37. }
  38. #ifdef JEMALLOC_OVERRIDE_MEMALIGN
  39. if (!zero) {
  40. JUNK_ALLOC(je_memalign(1 << lg_align, size));
  41. }
  42. #endif
  43. #ifdef JEMALLOC_OVERRIDE_VALLOC
  44. if (!zero && lg_align == LG_PAGE) {
  45. JUNK_ALLOC(je_valloc(size));
  46. }
  47. #endif
  48. int zero_flag = zero ? MALLOCX_ZERO : 0;
  49. JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)));
  50. JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)
  51. | MALLOCX_TCACHE_NONE));
  52. if (lg_align >= LG_SIZEOF_PTR) {
  53. void *memalign_result;
  54. int err = posix_memalign(&memalign_result, (1 << lg_align),
  55. size);
  56. assert_d_eq(err, 0, "");
  57. JUNK_ALLOC(memalign_result);
  58. }
  59. }
  60. TEST_BEGIN(test_junk_alloc_free) {
  61. bool zerovals[] = {false, true};
  62. size_t sizevals[] = {
  63. 1, 8, 100, 1000, 100*1000
  64. /*
  65. * Memory allocation failure is a real possibility in 32-bit mode.
  66. * Rather than try to check in the face of resource exhaustion, we just
  67. * rely more on the 64-bit tests. This is a little bit white-box-y in
  68. * the sense that this is only a good test strategy if we know that the
  69. * junk pathways don't touch interact with the allocation selection
  70. * mechanisms; but this is in fact the case.
  71. */
  72. #if LG_SIZEOF_PTR == 3
  73. , 10 * 1000 * 1000
  74. #endif
  75. };
  76. size_t lg_alignvals[] = {
  77. 0, 4, 10, 15, 16, LG_PAGE
  78. #if LG_SIZEOF_PTR == 3
  79. , 20, 24
  80. #endif
  81. };
  82. #define JUNK_FREE(...) \
  83. do { \
  84. do_allocs(size, zero, lg_align); \
  85. for (size_t n = 0; n < ptr_ind; n++) { \
  86. void *ptr = ptrs[n]; \
  87. __VA_ARGS__; \
  88. if (opt_junk_free) { \
  89. assert_ptr_eq(ptr, last_junked_ptr, \
  90. ""); \
  91. assert_zu_eq(usize, last_junked_usize, \
  92. ""); \
  93. } \
  94. reset(); \
  95. } \
  96. } while (0)
  97. for (size_t i = 0; i < arraylen(zerovals); i++) {
  98. for (size_t j = 0; j < arraylen(sizevals); j++) {
  99. for (size_t k = 0; k < arraylen(lg_alignvals); k++) {
  100. bool zero = zerovals[i];
  101. size_t size = sizevals[j];
  102. size_t lg_align = lg_alignvals[k];
  103. size_t usize = nallocx(size,
  104. MALLOCX_LG_ALIGN(lg_align));
  105. JUNK_FREE(free(ptr));
  106. JUNK_FREE(dallocx(ptr, 0));
  107. JUNK_FREE(dallocx(ptr, MALLOCX_TCACHE_NONE));
  108. JUNK_FREE(dallocx(ptr, MALLOCX_LG_ALIGN(
  109. lg_align)));
  110. JUNK_FREE(sdallocx(ptr, usize, MALLOCX_LG_ALIGN(
  111. lg_align)));
  112. JUNK_FREE(sdallocx(ptr, usize,
  113. MALLOCX_TCACHE_NONE | MALLOCX_LG_ALIGN(lg_align)));
  114. if (opt_zero_realloc_action
  115. == zero_realloc_action_free) {
  116. JUNK_FREE(realloc(ptr, 0));
  117. }
  118. }
  119. }
  120. }
  121. }
  122. TEST_END
  123. TEST_BEGIN(test_realloc_expand) {
  124. char *volatile ptr;
  125. char *volatile expanded;
  126. test_skip_if(!opt_junk_alloc);
  127. /* Realloc */
  128. ptr = malloc(SC_SMALL_MAXCLASS);
  129. expanded = realloc(ptr, SC_LARGE_MINCLASS);
  130. expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
  131. expect_zu_eq(last_junked_usize,
  132. SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
  133. free(expanded);
  134. /* rallocx(..., 0) */
  135. ptr = malloc(SC_SMALL_MAXCLASS);
  136. expanded = rallocx(ptr, SC_LARGE_MINCLASS, 0);
  137. expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
  138. expect_zu_eq(last_junked_usize,
  139. SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
  140. free(expanded);
  141. /* rallocx(..., nonzero) */
  142. ptr = malloc(SC_SMALL_MAXCLASS);
  143. expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
  144. expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
  145. expect_zu_eq(last_junked_usize,
  146. SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
  147. free(expanded);
  148. /* rallocx(..., MALLOCX_ZERO) */
  149. ptr = malloc(SC_SMALL_MAXCLASS);
  150. last_junked_ptr = (void *)-1;
  151. last_junked_usize = (size_t)-1;
  152. expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_ZERO);
  153. expect_ptr_eq(last_junked_ptr, (void *)-1, "");
  154. expect_zu_eq(last_junked_usize, (size_t)-1, "");
  155. free(expanded);
  156. /*
  157. * Unfortunately, testing xallocx reliably is difficult to do portably
  158. * (since allocations can be expanded / not expanded differently on
  159. * different platforms. We rely on manual inspection there -- the
  160. * xallocx pathway is easy to inspect, though.
  161. *
  162. * Likewise, we don't test the shrinking pathways. It's difficult to do
  163. * so consistently (because of the risk of split failure or memory
  164. * exhaustion, in which case no junking should happen). This is fine
  165. * -- junking is a best-effort debug mechanism in the first place.
  166. */
  167. }
  168. TEST_END
  169. int
  170. main(void) {
  171. junk_alloc_callback = &test_junk;
  172. junk_free_callback = &test_junk;
  173. /*
  174. * We check the last pointer junked. If a reentrant call happens, that
  175. * might be an internal allocation.
  176. */
  177. return test_no_reentrancy(
  178. test_junk_alloc_free,
  179. test_realloc_expand);
  180. }