prof_recent.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679
  1. #include "test/jemalloc_test.h"
  2. #include "jemalloc/internal/prof_recent.h"
  3. /* As specified in the shell script */
  4. #define OPT_ALLOC_MAX 3
  5. /* Invariant before and after every test (when config_prof is on) */
  6. static void
  7. confirm_prof_setup() {
  8. /* Options */
  9. assert_true(opt_prof, "opt_prof not on");
  10. assert_true(opt_prof_active, "opt_prof_active not on");
  11. assert_zd_eq(opt_prof_recent_alloc_max, OPT_ALLOC_MAX,
  12. "opt_prof_recent_alloc_max not set correctly");
  13. /* Dynamics */
  14. assert_true(prof_active_state, "prof_active not on");
  15. assert_zd_eq(prof_recent_alloc_max_ctl_read(), OPT_ALLOC_MAX,
  16. "prof_recent_alloc_max not set correctly");
  17. }
  18. TEST_BEGIN(test_confirm_setup) {
  19. test_skip_if(!config_prof);
  20. confirm_prof_setup();
  21. }
  22. TEST_END
  23. TEST_BEGIN(test_prof_recent_off) {
  24. test_skip_if(config_prof);
  25. const ssize_t past_ref = 0, future_ref = 0;
  26. const size_t len_ref = sizeof(ssize_t);
  27. ssize_t past = past_ref, future = future_ref;
  28. size_t len = len_ref;
  29. #define ASSERT_SHOULD_FAIL(opt, a, b, c, d) do { \
  30. assert_d_eq(mallctl("experimental.prof_recent." opt, a, b, c, \
  31. d), ENOENT, "Should return ENOENT when config_prof is off");\
  32. assert_zd_eq(past, past_ref, "output was touched"); \
  33. assert_zu_eq(len, len_ref, "output length was touched"); \
  34. assert_zd_eq(future, future_ref, "input was touched"); \
  35. } while (0)
  36. ASSERT_SHOULD_FAIL("alloc_max", NULL, NULL, NULL, 0);
  37. ASSERT_SHOULD_FAIL("alloc_max", &past, &len, NULL, 0);
  38. ASSERT_SHOULD_FAIL("alloc_max", NULL, NULL, &future, len);
  39. ASSERT_SHOULD_FAIL("alloc_max", &past, &len, &future, len);
  40. #undef ASSERT_SHOULD_FAIL
  41. }
  42. TEST_END
  43. TEST_BEGIN(test_prof_recent_on) {
  44. test_skip_if(!config_prof);
  45. ssize_t past, future;
  46. size_t len = sizeof(ssize_t);
  47. confirm_prof_setup();
  48. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  49. NULL, NULL, NULL, 0), 0, "no-op mallctl should be allowed");
  50. confirm_prof_setup();
  51. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  52. &past, &len, NULL, 0), 0, "Read error");
  53. expect_zd_eq(past, OPT_ALLOC_MAX, "Wrong read result");
  54. future = OPT_ALLOC_MAX + 1;
  55. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  56. NULL, NULL, &future, len), 0, "Write error");
  57. future = -1;
  58. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  59. &past, &len, &future, len), 0, "Read/write error");
  60. expect_zd_eq(past, OPT_ALLOC_MAX + 1, "Wrong read result");
  61. future = -2;
  62. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  63. &past, &len, &future, len), EINVAL,
  64. "Invalid write should return EINVAL");
  65. expect_zd_eq(past, OPT_ALLOC_MAX + 1,
  66. "Output should not be touched given invalid write");
  67. future = OPT_ALLOC_MAX;
  68. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  69. &past, &len, &future, len), 0, "Read/write error");
  70. expect_zd_eq(past, -1, "Wrong read result");
  71. future = OPT_ALLOC_MAX + 2;
  72. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  73. &past, &len, &future, len * 2), EINVAL,
  74. "Invalid write should return EINVAL");
  75. expect_zd_eq(past, -1,
  76. "Output should not be touched given invalid write");
  77. confirm_prof_setup();
  78. }
  79. TEST_END
  80. /* Reproducible sequence of request sizes */
  81. #define NTH_REQ_SIZE(n) ((n) * 97 + 101)
  82. static void
  83. confirm_malloc(void *p) {
  84. assert_ptr_not_null(p, "malloc failed unexpectedly");
  85. edata_t *e = emap_edata_lookup(TSDN_NULL, &arena_emap_global, p);
  86. assert_ptr_not_null(e, "NULL edata for living pointer");
  87. prof_recent_t *n = edata_prof_recent_alloc_get_no_lock_test(e);
  88. assert_ptr_not_null(n, "Record in edata should not be NULL");
  89. expect_ptr_not_null(n->alloc_tctx,
  90. "alloc_tctx in record should not be NULL");
  91. expect_ptr_eq(e, prof_recent_alloc_edata_get_no_lock_test(n),
  92. "edata pointer in record is not correct");
  93. expect_ptr_null(n->dalloc_tctx, "dalloc_tctx in record should be NULL");
  94. }
  95. static void
  96. confirm_record_size(prof_recent_t *n, unsigned kth) {
  97. expect_zu_eq(n->size, NTH_REQ_SIZE(kth),
  98. "Recorded allocation size is wrong");
  99. }
  100. static void
  101. confirm_record_living(prof_recent_t *n) {
  102. expect_ptr_not_null(n->alloc_tctx,
  103. "alloc_tctx in record should not be NULL");
  104. edata_t *edata = prof_recent_alloc_edata_get_no_lock_test(n);
  105. assert_ptr_not_null(edata,
  106. "Recorded edata should not be NULL for living pointer");
  107. expect_ptr_eq(n, edata_prof_recent_alloc_get_no_lock_test(edata),
  108. "Record in edata is not correct");
  109. expect_ptr_null(n->dalloc_tctx, "dalloc_tctx in record should be NULL");
  110. }
  111. static void
  112. confirm_record_released(prof_recent_t *n) {
  113. expect_ptr_not_null(n->alloc_tctx,
  114. "alloc_tctx in record should not be NULL");
  115. expect_ptr_null(prof_recent_alloc_edata_get_no_lock_test(n),
  116. "Recorded edata should be NULL for released pointer");
  117. expect_ptr_not_null(n->dalloc_tctx,
  118. "dalloc_tctx in record should not be NULL for released pointer");
  119. }
  120. TEST_BEGIN(test_prof_recent_alloc) {
  121. test_skip_if(!config_prof);
  122. bool b;
  123. unsigned i, c;
  124. size_t req_size;
  125. void *p;
  126. prof_recent_t *n;
  127. ssize_t future;
  128. confirm_prof_setup();
  129. /*
  130. * First batch of 2 * OPT_ALLOC_MAX allocations. After the
  131. * (OPT_ALLOC_MAX - 1)'th allocation the recorded allocations should
  132. * always be the last OPT_ALLOC_MAX allocations coming from here.
  133. */
  134. for (i = 0; i < 2 * OPT_ALLOC_MAX; ++i) {
  135. req_size = NTH_REQ_SIZE(i);
  136. p = malloc(req_size);
  137. confirm_malloc(p);
  138. if (i < OPT_ALLOC_MAX - 1) {
  139. assert_false(ql_empty(&prof_recent_alloc_list),
  140. "Empty recent allocation");
  141. free(p);
  142. /*
  143. * The recorded allocations may still include some
  144. * other allocations before the test run started,
  145. * so keep allocating without checking anything.
  146. */
  147. continue;
  148. }
  149. c = 0;
  150. ql_foreach(n, &prof_recent_alloc_list, link) {
  151. ++c;
  152. confirm_record_size(n, i + c - OPT_ALLOC_MAX);
  153. if (c == OPT_ALLOC_MAX) {
  154. confirm_record_living(n);
  155. } else {
  156. confirm_record_released(n);
  157. }
  158. }
  159. assert_u_eq(c, OPT_ALLOC_MAX,
  160. "Incorrect total number of allocations");
  161. free(p);
  162. }
  163. confirm_prof_setup();
  164. b = false;
  165. assert_d_eq(mallctl("prof.active", NULL, NULL, &b, sizeof(bool)), 0,
  166. "mallctl for turning off prof_active failed");
  167. /*
  168. * Second batch of OPT_ALLOC_MAX allocations. Since prof_active is
  169. * turned off, this batch shouldn't be recorded.
  170. */
  171. for (; i < 3 * OPT_ALLOC_MAX; ++i) {
  172. req_size = NTH_REQ_SIZE(i);
  173. p = malloc(req_size);
  174. assert_ptr_not_null(p, "malloc failed unexpectedly");
  175. c = 0;
  176. ql_foreach(n, &prof_recent_alloc_list, link) {
  177. confirm_record_size(n, c + OPT_ALLOC_MAX);
  178. confirm_record_released(n);
  179. ++c;
  180. }
  181. assert_u_eq(c, OPT_ALLOC_MAX,
  182. "Incorrect total number of allocations");
  183. free(p);
  184. }
  185. b = true;
  186. assert_d_eq(mallctl("prof.active", NULL, NULL, &b, sizeof(bool)), 0,
  187. "mallctl for turning on prof_active failed");
  188. confirm_prof_setup();
  189. /*
  190. * Third batch of OPT_ALLOC_MAX allocations. Since prof_active is
  191. * turned back on, they should be recorded, and in the list of recorded
  192. * allocations they should follow the first batch rather than the
  193. * second batch.
  194. */
  195. for (; i < 4 * OPT_ALLOC_MAX; ++i) {
  196. req_size = NTH_REQ_SIZE(i);
  197. p = malloc(req_size);
  198. confirm_malloc(p);
  199. c = 0;
  200. ql_foreach(n, &prof_recent_alloc_list, link) {
  201. ++c;
  202. confirm_record_size(n,
  203. /* Is the allocation from the third batch? */
  204. i + c - OPT_ALLOC_MAX >= 3 * OPT_ALLOC_MAX ?
  205. /* If yes, then it's just recorded. */
  206. i + c - OPT_ALLOC_MAX :
  207. /*
  208. * Otherwise, it should come from the first batch
  209. * instead of the second batch.
  210. */
  211. i + c - 2 * OPT_ALLOC_MAX);
  212. if (c == OPT_ALLOC_MAX) {
  213. confirm_record_living(n);
  214. } else {
  215. confirm_record_released(n);
  216. }
  217. }
  218. assert_u_eq(c, OPT_ALLOC_MAX,
  219. "Incorrect total number of allocations");
  220. free(p);
  221. }
  222. /* Increasing the limit shouldn't alter the list of records. */
  223. future = OPT_ALLOC_MAX + 1;
  224. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  225. NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
  226. c = 0;
  227. ql_foreach(n, &prof_recent_alloc_list, link) {
  228. confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
  229. confirm_record_released(n);
  230. ++c;
  231. }
  232. assert_u_eq(c, OPT_ALLOC_MAX,
  233. "Incorrect total number of allocations");
  234. /*
  235. * Decreasing the limit shouldn't alter the list of records as long as
  236. * the new limit is still no less than the length of the list.
  237. */
  238. future = OPT_ALLOC_MAX;
  239. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  240. NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
  241. c = 0;
  242. ql_foreach(n, &prof_recent_alloc_list, link) {
  243. confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
  244. confirm_record_released(n);
  245. ++c;
  246. }
  247. assert_u_eq(c, OPT_ALLOC_MAX,
  248. "Incorrect total number of allocations");
  249. /*
  250. * Decreasing the limit should shorten the list of records if the new
  251. * limit is less than the length of the list.
  252. */
  253. future = OPT_ALLOC_MAX - 1;
  254. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  255. NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
  256. c = 0;
  257. ql_foreach(n, &prof_recent_alloc_list, link) {
  258. ++c;
  259. confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
  260. confirm_record_released(n);
  261. }
  262. assert_u_eq(c, OPT_ALLOC_MAX - 1,
  263. "Incorrect total number of allocations");
  264. /* Setting to unlimited shouldn't alter the list of records. */
  265. future = -1;
  266. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  267. NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
  268. c = 0;
  269. ql_foreach(n, &prof_recent_alloc_list, link) {
  270. ++c;
  271. confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
  272. confirm_record_released(n);
  273. }
  274. assert_u_eq(c, OPT_ALLOC_MAX - 1,
  275. "Incorrect total number of allocations");
  276. /* Downshift to only one record. */
  277. future = 1;
  278. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  279. NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
  280. assert_false(ql_empty(&prof_recent_alloc_list), "Recent list is empty");
  281. n = ql_first(&prof_recent_alloc_list);
  282. confirm_record_size(n, 4 * OPT_ALLOC_MAX - 1);
  283. confirm_record_released(n);
  284. n = ql_next(&prof_recent_alloc_list, n, link);
  285. assert_ptr_null(n, "Recent list should only contain one record");
  286. /* Completely turn off. */
  287. future = 0;
  288. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  289. NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
  290. assert_true(ql_empty(&prof_recent_alloc_list),
  291. "Recent list should be empty");
  292. /* Restore the settings. */
  293. future = OPT_ALLOC_MAX;
  294. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  295. NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
  296. assert_true(ql_empty(&prof_recent_alloc_list),
  297. "Recent list should be empty");
  298. confirm_prof_setup();
  299. }
  300. TEST_END
  301. #undef NTH_REQ_SIZE
  302. #define DUMP_OUT_SIZE 4096
  303. static char dump_out[DUMP_OUT_SIZE];
  304. static size_t dump_out_len = 0;
  305. static void
  306. test_dump_write_cb(void *not_used, const char *str) {
  307. size_t len = strlen(str);
  308. assert(dump_out_len + len < DUMP_OUT_SIZE);
  309. memcpy(dump_out + dump_out_len, str, len + 1);
  310. dump_out_len += len;
  311. }
  312. static void
  313. call_dump() {
  314. static void *in[2] = {test_dump_write_cb, NULL};
  315. dump_out_len = 0;
  316. assert_d_eq(mallctl("experimental.prof_recent.alloc_dump",
  317. NULL, NULL, in, sizeof(in)), 0, "Dump mallctl raised error");
  318. }
  319. typedef struct {
  320. size_t size;
  321. size_t usize;
  322. bool released;
  323. } confirm_record_t;
  324. #define DUMP_ERROR "Dump output is wrong"
  325. static void
  326. confirm_record(const char *template, const confirm_record_t *records,
  327. const size_t n_records) {
  328. static const char *types[2] = {"alloc", "dalloc"};
  329. static char buf[64];
  330. /*
  331. * The template string would be in the form of:
  332. * "{...,\"recent_alloc\":[]}",
  333. * and dump_out would be in the form of:
  334. * "{...,\"recent_alloc\":[...]}".
  335. * Using "- 2" serves to cut right before the ending "]}".
  336. */
  337. assert_d_eq(memcmp(dump_out, template, strlen(template) - 2), 0,
  338. DUMP_ERROR);
  339. assert_d_eq(memcmp(dump_out + strlen(dump_out) - 2,
  340. template + strlen(template) - 2, 2), 0, DUMP_ERROR);
  341. const char *start = dump_out + strlen(template) - 2;
  342. const char *end = dump_out + strlen(dump_out) - 2;
  343. const confirm_record_t *record;
  344. for (record = records; record < records + n_records; ++record) {
  345. #define ASSERT_CHAR(c) do { \
  346. assert_true(start < end, DUMP_ERROR); \
  347. assert_c_eq(*start++, c, DUMP_ERROR); \
  348. } while (0)
  349. #define ASSERT_STR(s) do { \
  350. const size_t len = strlen(s); \
  351. assert_true(start + len <= end, DUMP_ERROR); \
  352. assert_d_eq(memcmp(start, s, len), 0, DUMP_ERROR); \
  353. start += len; \
  354. } while (0)
  355. #define ASSERT_FORMATTED_STR(s, ...) do { \
  356. malloc_snprintf(buf, sizeof(buf), s, __VA_ARGS__); \
  357. ASSERT_STR(buf); \
  358. } while (0)
  359. if (record != records) {
  360. ASSERT_CHAR(',');
  361. }
  362. ASSERT_CHAR('{');
  363. ASSERT_STR("\"size\"");
  364. ASSERT_CHAR(':');
  365. ASSERT_FORMATTED_STR("%zu", record->size);
  366. ASSERT_CHAR(',');
  367. ASSERT_STR("\"usize\"");
  368. ASSERT_CHAR(':');
  369. ASSERT_FORMATTED_STR("%zu", record->usize);
  370. ASSERT_CHAR(',');
  371. ASSERT_STR("\"released\"");
  372. ASSERT_CHAR(':');
  373. ASSERT_STR(record->released ? "true" : "false");
  374. ASSERT_CHAR(',');
  375. const char **type = types;
  376. while (true) {
  377. ASSERT_FORMATTED_STR("\"%s_thread_uid\"", *type);
  378. ASSERT_CHAR(':');
  379. while (isdigit(*start)) {
  380. ++start;
  381. }
  382. ASSERT_CHAR(',');
  383. if (opt_prof_sys_thread_name) {
  384. ASSERT_FORMATTED_STR("\"%s_thread_name\"",
  385. *type);
  386. ASSERT_CHAR(':');
  387. ASSERT_CHAR('"');
  388. while (*start != '"') {
  389. ++start;
  390. }
  391. ASSERT_CHAR('"');
  392. ASSERT_CHAR(',');
  393. }
  394. ASSERT_FORMATTED_STR("\"%s_time\"", *type);
  395. ASSERT_CHAR(':');
  396. while (isdigit(*start)) {
  397. ++start;
  398. }
  399. ASSERT_CHAR(',');
  400. ASSERT_FORMATTED_STR("\"%s_trace\"", *type);
  401. ASSERT_CHAR(':');
  402. ASSERT_CHAR('[');
  403. while (isdigit(*start) || *start == 'x' ||
  404. (*start >= 'a' && *start <= 'f') ||
  405. *start == '\"' || *start == ',') {
  406. ++start;
  407. }
  408. ASSERT_CHAR(']');
  409. if (strcmp(*type, "dalloc") == 0) {
  410. break;
  411. }
  412. assert(strcmp(*type, "alloc") == 0);
  413. if (!record->released) {
  414. break;
  415. }
  416. ASSERT_CHAR(',');
  417. ++type;
  418. }
  419. ASSERT_CHAR('}');
  420. #undef ASSERT_FORMATTED_STR
  421. #undef ASSERT_STR
  422. #undef ASSERT_CHAR
  423. }
  424. assert_ptr_eq(record, records + n_records, DUMP_ERROR);
  425. assert_ptr_eq(start, end, DUMP_ERROR);
  426. }
  427. TEST_BEGIN(test_prof_recent_alloc_dump) {
  428. test_skip_if(!config_prof);
  429. confirm_prof_setup();
  430. ssize_t future;
  431. void *p, *q;
  432. confirm_record_t records[2];
  433. assert_zu_eq(lg_prof_sample, (size_t)0,
  434. "lg_prof_sample not set correctly");
  435. future = 0;
  436. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  437. NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
  438. call_dump();
  439. expect_str_eq(dump_out, "{\"sample_interval\":1,"
  440. "\"recent_alloc_max\":0,\"recent_alloc\":[]}", DUMP_ERROR);
  441. future = 2;
  442. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  443. NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
  444. call_dump();
  445. const char *template = "{\"sample_interval\":1,"
  446. "\"recent_alloc_max\":2,\"recent_alloc\":[]}";
  447. expect_str_eq(dump_out, template, DUMP_ERROR);
  448. p = malloc(7);
  449. call_dump();
  450. records[0].size = 7;
  451. records[0].usize = sz_s2u(7);
  452. records[0].released = false;
  453. confirm_record(template, records, 1);
  454. q = mallocx(17, MALLOCX_ALIGN(128));
  455. call_dump();
  456. records[1].size = 17;
  457. records[1].usize = sz_sa2u(17, 128);
  458. records[1].released = false;
  459. confirm_record(template, records, 2);
  460. free(q);
  461. call_dump();
  462. records[1].released = true;
  463. confirm_record(template, records, 2);
  464. free(p);
  465. call_dump();
  466. records[0].released = true;
  467. confirm_record(template, records, 2);
  468. future = OPT_ALLOC_MAX;
  469. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  470. NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
  471. confirm_prof_setup();
  472. }
  473. TEST_END
  474. #undef DUMP_ERROR
  475. #undef DUMP_OUT_SIZE
  476. #define N_THREADS 8
  477. #define N_PTRS 512
  478. #define N_CTLS 8
  479. #define N_ITERS 2048
  480. #define STRESS_ALLOC_MAX 4096
  481. typedef struct {
  482. thd_t thd;
  483. size_t id;
  484. void *ptrs[N_PTRS];
  485. size_t count;
  486. } thd_data_t;
  487. static thd_data_t thd_data[N_THREADS];
  488. static ssize_t test_max;
  489. static void
  490. test_write_cb(void *cbopaque, const char *str) {
  491. sleep_ns(1000 * 1000);
  492. }
  493. static void *
  494. f_thread(void *arg) {
  495. const size_t thd_id = *(size_t *)arg;
  496. thd_data_t *data_p = thd_data + thd_id;
  497. assert(data_p->id == thd_id);
  498. data_p->count = 0;
  499. uint64_t rand = (uint64_t)thd_id;
  500. tsd_t *tsd = tsd_fetch();
  501. assert(test_max > 1);
  502. ssize_t last_max = -1;
  503. for (int i = 0; i < N_ITERS; i++) {
  504. rand = prng_range_u64(&rand, N_PTRS + N_CTLS * 5);
  505. assert(data_p->count <= N_PTRS);
  506. if (rand < data_p->count) {
  507. assert(data_p->count > 0);
  508. if (rand != data_p->count - 1) {
  509. assert(data_p->count > 1);
  510. void *temp = data_p->ptrs[rand];
  511. data_p->ptrs[rand] =
  512. data_p->ptrs[data_p->count - 1];
  513. data_p->ptrs[data_p->count - 1] = temp;
  514. }
  515. free(data_p->ptrs[--data_p->count]);
  516. } else if (rand < N_PTRS) {
  517. assert(data_p->count < N_PTRS);
  518. data_p->ptrs[data_p->count++] = malloc(1);
  519. } else if (rand % 5 == 0) {
  520. prof_recent_alloc_dump(tsd, test_write_cb, NULL);
  521. } else if (rand % 5 == 1) {
  522. last_max = prof_recent_alloc_max_ctl_read();
  523. } else if (rand % 5 == 2) {
  524. last_max =
  525. prof_recent_alloc_max_ctl_write(tsd, test_max * 2);
  526. } else if (rand % 5 == 3) {
  527. last_max =
  528. prof_recent_alloc_max_ctl_write(tsd, test_max);
  529. } else {
  530. assert(rand % 5 == 4);
  531. last_max =
  532. prof_recent_alloc_max_ctl_write(tsd, test_max / 2);
  533. }
  534. assert_zd_ge(last_max, -1, "Illegal last-N max");
  535. }
  536. while (data_p->count > 0) {
  537. free(data_p->ptrs[--data_p->count]);
  538. }
  539. return NULL;
  540. }
  541. TEST_BEGIN(test_prof_recent_stress) {
  542. test_skip_if(!config_prof);
  543. confirm_prof_setup();
  544. test_max = OPT_ALLOC_MAX;
  545. for (size_t i = 0; i < N_THREADS; i++) {
  546. thd_data_t *data_p = thd_data + i;
  547. data_p->id = i;
  548. thd_create(&data_p->thd, &f_thread, &data_p->id);
  549. }
  550. for (size_t i = 0; i < N_THREADS; i++) {
  551. thd_data_t *data_p = thd_data + i;
  552. thd_join(data_p->thd, NULL);
  553. }
  554. test_max = STRESS_ALLOC_MAX;
  555. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  556. NULL, NULL, &test_max, sizeof(ssize_t)), 0, "Write error");
  557. for (size_t i = 0; i < N_THREADS; i++) {
  558. thd_data_t *data_p = thd_data + i;
  559. data_p->id = i;
  560. thd_create(&data_p->thd, &f_thread, &data_p->id);
  561. }
  562. for (size_t i = 0; i < N_THREADS; i++) {
  563. thd_data_t *data_p = thd_data + i;
  564. thd_join(data_p->thd, NULL);
  565. }
  566. test_max = OPT_ALLOC_MAX;
  567. assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
  568. NULL, NULL, &test_max, sizeof(ssize_t)), 0, "Write error");
  569. confirm_prof_setup();
  570. }
  571. TEST_END
  572. #undef STRESS_ALLOC_MAX
  573. #undef N_ITERS
  574. #undef N_PTRS
  575. #undef N_THREADS
  576. int
  577. main(void) {
  578. return test(
  579. test_confirm_setup,
  580. test_prof_recent_off,
  581. test_prof_recent_on,
  582. test_prof_recent_alloc,
  583. test_prof_recent_alloc_dump,
  584. test_prof_recent_stress);
  585. }