prof_reset.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. #include "test/jemalloc_test.h"
  2. #include "jemalloc/internal/prof_data.h"
  3. #include "jemalloc/internal/prof_sys.h"
  4. static int
  5. prof_dump_open_file_intercept(const char *filename, int mode) {
  6. int fd;
  7. fd = open("/dev/null", O_WRONLY);
  8. assert_d_ne(fd, -1, "Unexpected open() failure");
  9. return fd;
  10. }
  11. static void
  12. set_prof_active(bool active) {
  13. expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
  14. sizeof(active)), 0, "Unexpected mallctl failure");
  15. }
  16. static size_t
  17. get_lg_prof_sample(void) {
  18. size_t ret;
  19. size_t sz = sizeof(size_t);
  20. expect_d_eq(mallctl("prof.lg_sample", (void *)&ret, &sz, NULL, 0), 0,
  21. "Unexpected mallctl failure while reading profiling sample rate");
  22. return ret;
  23. }
  24. static void
  25. do_prof_reset(size_t lg_prof_sample_input) {
  26. expect_d_eq(mallctl("prof.reset", NULL, NULL,
  27. (void *)&lg_prof_sample_input, sizeof(size_t)), 0,
  28. "Unexpected mallctl failure while resetting profile data");
  29. expect_zu_eq(lg_prof_sample_input, get_lg_prof_sample(),
  30. "Expected profile sample rate change");
  31. }
  32. TEST_BEGIN(test_prof_reset_basic) {
  33. size_t lg_prof_sample_orig, lg_prof_sample_cur, lg_prof_sample_next;
  34. size_t sz;
  35. unsigned i;
  36. test_skip_if(!config_prof);
  37. sz = sizeof(size_t);
  38. expect_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig,
  39. &sz, NULL, 0), 0,
  40. "Unexpected mallctl failure while reading profiling sample rate");
  41. expect_zu_eq(lg_prof_sample_orig, 0,
  42. "Unexpected profiling sample rate");
  43. lg_prof_sample_cur = get_lg_prof_sample();
  44. expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
  45. "Unexpected disagreement between \"opt.lg_prof_sample\" and "
  46. "\"prof.lg_sample\"");
  47. /* Test simple resets. */
  48. for (i = 0; i < 2; i++) {
  49. expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
  50. "Unexpected mallctl failure while resetting profile data");
  51. lg_prof_sample_cur = get_lg_prof_sample();
  52. expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
  53. "Unexpected profile sample rate change");
  54. }
  55. /* Test resets with prof.lg_sample changes. */
  56. lg_prof_sample_next = 1;
  57. for (i = 0; i < 2; i++) {
  58. do_prof_reset(lg_prof_sample_next);
  59. lg_prof_sample_cur = get_lg_prof_sample();
  60. expect_zu_eq(lg_prof_sample_cur, lg_prof_sample_next,
  61. "Expected profile sample rate change");
  62. lg_prof_sample_next = lg_prof_sample_orig;
  63. }
  64. /* Make sure the test code restored prof.lg_sample. */
  65. lg_prof_sample_cur = get_lg_prof_sample();
  66. expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
  67. "Unexpected disagreement between \"opt.lg_prof_sample\" and "
  68. "\"prof.lg_sample\"");
  69. }
  70. TEST_END
  71. TEST_BEGIN(test_prof_reset_cleanup) {
  72. test_skip_if(!config_prof);
  73. set_prof_active(true);
  74. expect_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
  75. void *p = mallocx(1, 0);
  76. expect_ptr_not_null(p, "Unexpected mallocx() failure");
  77. expect_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
  78. prof_cnt_t cnt_all;
  79. prof_cnt_all(&cnt_all);
  80. expect_u64_eq(cnt_all.curobjs, 1, "Expected 1 allocation");
  81. expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
  82. "Unexpected error while resetting heap profile data");
  83. prof_cnt_all(&cnt_all);
  84. expect_u64_eq(cnt_all.curobjs, 0, "Expected 0 allocations");
  85. expect_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
  86. dallocx(p, 0);
  87. expect_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
  88. set_prof_active(false);
  89. }
  90. TEST_END
  91. #define NTHREADS 4
  92. #define NALLOCS_PER_THREAD (1U << 13)
  93. #define OBJ_RING_BUF_COUNT 1531
  94. #define RESET_INTERVAL (1U << 10)
  95. #define DUMP_INTERVAL 3677
  96. static void *
  97. thd_start(void *varg) {
  98. unsigned thd_ind = *(unsigned *)varg;
  99. unsigned i;
  100. void *objs[OBJ_RING_BUF_COUNT];
  101. memset(objs, 0, sizeof(objs));
  102. for (i = 0; i < NALLOCS_PER_THREAD; i++) {
  103. if (i % RESET_INTERVAL == 0) {
  104. expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0),
  105. 0, "Unexpected error while resetting heap profile "
  106. "data");
  107. }
  108. if (i % DUMP_INTERVAL == 0) {
  109. expect_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
  110. 0, "Unexpected error while dumping heap profile");
  111. }
  112. {
  113. void **pp = &objs[i % OBJ_RING_BUF_COUNT];
  114. if (*pp != NULL) {
  115. dallocx(*pp, 0);
  116. *pp = NULL;
  117. }
  118. *pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i);
  119. expect_ptr_not_null(*pp,
  120. "Unexpected btalloc() failure");
  121. }
  122. }
  123. /* Clean up any remaining objects. */
  124. for (i = 0; i < OBJ_RING_BUF_COUNT; i++) {
  125. void **pp = &objs[i % OBJ_RING_BUF_COUNT];
  126. if (*pp != NULL) {
  127. dallocx(*pp, 0);
  128. *pp = NULL;
  129. }
  130. }
  131. return NULL;
  132. }
  133. TEST_BEGIN(test_prof_reset) {
  134. size_t lg_prof_sample_orig;
  135. thd_t thds[NTHREADS];
  136. unsigned thd_args[NTHREADS];
  137. unsigned i;
  138. size_t bt_count, tdata_count;
  139. test_skip_if(!config_prof);
  140. bt_count = prof_bt_count();
  141. expect_zu_eq(bt_count, 0,
  142. "Unexpected pre-existing tdata structures");
  143. tdata_count = prof_tdata_count();
  144. lg_prof_sample_orig = get_lg_prof_sample();
  145. do_prof_reset(5);
  146. set_prof_active(true);
  147. for (i = 0; i < NTHREADS; i++) {
  148. thd_args[i] = i;
  149. thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
  150. }
  151. for (i = 0; i < NTHREADS; i++) {
  152. thd_join(thds[i], NULL);
  153. }
  154. expect_zu_eq(prof_bt_count(), bt_count,
  155. "Unexpected bactrace count change");
  156. expect_zu_eq(prof_tdata_count(), tdata_count,
  157. "Unexpected remaining tdata structures");
  158. set_prof_active(false);
  159. do_prof_reset(lg_prof_sample_orig);
  160. }
  161. TEST_END
  162. #undef NTHREADS
  163. #undef NALLOCS_PER_THREAD
  164. #undef OBJ_RING_BUF_COUNT
  165. #undef RESET_INTERVAL
  166. #undef DUMP_INTERVAL
  167. /* Test sampling at the same allocation site across resets. */
  168. #define NITER 10
  169. TEST_BEGIN(test_xallocx) {
  170. size_t lg_prof_sample_orig;
  171. unsigned i;
  172. void *ptrs[NITER];
  173. test_skip_if(!config_prof);
  174. lg_prof_sample_orig = get_lg_prof_sample();
  175. set_prof_active(true);
  176. /* Reset profiling. */
  177. do_prof_reset(0);
  178. for (i = 0; i < NITER; i++) {
  179. void *p;
  180. size_t sz, nsz;
  181. /* Reset profiling. */
  182. do_prof_reset(0);
  183. /* Allocate small object (which will be promoted). */
  184. p = ptrs[i] = mallocx(1, 0);
  185. expect_ptr_not_null(p, "Unexpected mallocx() failure");
  186. /* Reset profiling. */
  187. do_prof_reset(0);
  188. /* Perform successful xallocx(). */
  189. sz = sallocx(p, 0);
  190. expect_zu_eq(xallocx(p, sz, 0, 0), sz,
  191. "Unexpected xallocx() failure");
  192. /* Perform unsuccessful xallocx(). */
  193. nsz = nallocx(sz+1, 0);
  194. expect_zu_eq(xallocx(p, nsz, 0, 0), sz,
  195. "Unexpected xallocx() success");
  196. }
  197. for (i = 0; i < NITER; i++) {
  198. /* dallocx. */
  199. dallocx(ptrs[i], 0);
  200. }
  201. set_prof_active(false);
  202. do_prof_reset(lg_prof_sample_orig);
  203. }
  204. TEST_END
  205. #undef NITER
  206. int
  207. main(void) {
  208. /* Intercept dumping prior to running any tests. */
  209. prof_dump_open_file = prof_dump_open_file_intercept;
  210. return test_no_reentrancy(
  211. test_prof_reset_basic,
  212. test_prof_reset_cleanup,
  213. test_prof_reset,
  214. test_xallocx);
  215. }