]> git.saurik.com Git - redis.git/blob - deps/jemalloc/src/stats.c
If the computer running the Redis test is slow, we revert to --clients 1 to avoid...
[redis.git] / deps / jemalloc / src / stats.c
1 #define JEMALLOC_STATS_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3
4 #define CTL_GET(n, v, t) do { \
5 size_t sz = sizeof(t); \
6 xmallctl(n, v, &sz, NULL, 0); \
7 } while (0)
8
9 #define CTL_I_GET(n, v, t) do { \
10 size_t mib[6]; \
11 size_t miblen = sizeof(mib) / sizeof(size_t); \
12 size_t sz = sizeof(t); \
13 xmallctlnametomib(n, mib, &miblen); \
14 mib[2] = i; \
15 xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
16 } while (0)
17
18 #define CTL_J_GET(n, v, t) do { \
19 size_t mib[6]; \
20 size_t miblen = sizeof(mib) / sizeof(size_t); \
21 size_t sz = sizeof(t); \
22 xmallctlnametomib(n, mib, &miblen); \
23 mib[2] = j; \
24 xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
25 } while (0)
26
27 #define CTL_IJ_GET(n, v, t) do { \
28 size_t mib[6]; \
29 size_t miblen = sizeof(mib) / sizeof(size_t); \
30 size_t sz = sizeof(t); \
31 xmallctlnametomib(n, mib, &miblen); \
32 mib[2] = i; \
33 mib[4] = j; \
34 xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
35 } while (0)
36
37 /******************************************************************************/
38 /* Data. */
39
40 bool opt_stats_print = false;
41
42 #ifdef JEMALLOC_STATS
43 size_t stats_cactive = 0;
44 #endif
45
46 /******************************************************************************/
47 /* Function prototypes for non-inline static functions. */
48
49 #ifdef JEMALLOC_STATS
50 static void malloc_vcprintf(void (*write_cb)(void *, const char *),
51 void *cbopaque, const char *format, va_list ap);
52 static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
53 void *cbopaque, unsigned i);
54 static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
55 void *cbopaque, unsigned i);
56 static void stats_arena_print(void (*write_cb)(void *, const char *),
57 void *cbopaque, unsigned i);
58 #endif
59
60 /******************************************************************************/
61
62 /*
63 * We don't want to depend on vsnprintf() for production builds, since that can
64 * cause unnecessary bloat for static binaries. u2s() provides minimal integer
65 * printing functionality, so that malloc_printf() use can be limited to
66 * JEMALLOC_STATS code.
67 */
68 char *
69 u2s(uint64_t x, unsigned base, char *s)
70 {
71 unsigned i;
72
73 i = UMAX2S_BUFSIZE - 1;
74 s[i] = '\0';
75 switch (base) {
76 case 10:
77 do {
78 i--;
79 s[i] = "0123456789"[x % (uint64_t)10];
80 x /= (uint64_t)10;
81 } while (x > 0);
82 break;
83 case 16:
84 do {
85 i--;
86 s[i] = "0123456789abcdef"[x & 0xf];
87 x >>= 4;
88 } while (x > 0);
89 break;
90 default:
91 do {
92 i--;
93 s[i] = "0123456789abcdefghijklmnopqrstuvwxyz"[x %
94 (uint64_t)base];
95 x /= (uint64_t)base;
96 } while (x > 0);
97 }
98
99 return (&s[i]);
100 }
101
102 #ifdef JEMALLOC_STATS
103 static void
104 malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
105 const char *format, va_list ap)
106 {
107 char buf[4096];
108
109 if (write_cb == NULL) {
110 /*
111 * The caller did not provide an alternate write_cb callback
112 * function, so use the default one. malloc_write() is an
113 * inline function, so use malloc_message() directly here.
114 */
115 write_cb = JEMALLOC_P(malloc_message);
116 cbopaque = NULL;
117 }
118
119 vsnprintf(buf, sizeof(buf), format, ap);
120 write_cb(cbopaque, buf);
121 }
122
123 /*
124 * Print to a callback function in such a way as to (hopefully) avoid memory
125 * allocation.
126 */
127 JEMALLOC_ATTR(format(printf, 3, 4))
128 void
129 malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
130 const char *format, ...)
131 {
132 va_list ap;
133
134 va_start(ap, format);
135 malloc_vcprintf(write_cb, cbopaque, format, ap);
136 va_end(ap);
137 }
138
139 /*
140 * Print to stderr in such a way as to (hopefully) avoid memory allocation.
141 */
142 JEMALLOC_ATTR(format(printf, 1, 2))
143 void
144 malloc_printf(const char *format, ...)
145 {
146 va_list ap;
147
148 va_start(ap, format);
149 malloc_vcprintf(NULL, NULL, format, ap);
150 va_end(ap);
151 }
152 #endif
153
154 #ifdef JEMALLOC_STATS
155 static void
156 stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
157 unsigned i)
158 {
159 size_t pagesize;
160 bool config_tcache;
161 unsigned nbins, j, gap_start;
162
163 CTL_GET("arenas.pagesize", &pagesize, size_t);
164
165 CTL_GET("config.tcache", &config_tcache, bool);
166 if (config_tcache) {
167 malloc_cprintf(write_cb, cbopaque,
168 "bins: bin size regs pgs allocated nmalloc"
169 " ndalloc nrequests nfills nflushes"
170 " newruns reruns maxruns curruns\n");
171 } else {
172 malloc_cprintf(write_cb, cbopaque,
173 "bins: bin size regs pgs allocated nmalloc"
174 " ndalloc newruns reruns maxruns"
175 " curruns\n");
176 }
177 CTL_GET("arenas.nbins", &nbins, unsigned);
178 for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
179 uint64_t nruns;
180
181 CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t);
182 if (nruns == 0) {
183 if (gap_start == UINT_MAX)
184 gap_start = j;
185 } else {
186 unsigned ntbins_, nqbins, ncbins, nsbins;
187 size_t reg_size, run_size, allocated;
188 uint32_t nregs;
189 uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
190 uint64_t reruns;
191 size_t highruns, curruns;
192
193 if (gap_start != UINT_MAX) {
194 if (j > gap_start + 1) {
195 /* Gap of more than one size class. */
196 malloc_cprintf(write_cb, cbopaque,
197 "[%u..%u]\n", gap_start,
198 j - 1);
199 } else {
200 /* Gap of one size class. */
201 malloc_cprintf(write_cb, cbopaque,
202 "[%u]\n", gap_start);
203 }
204 gap_start = UINT_MAX;
205 }
206 CTL_GET("arenas.ntbins", &ntbins_, unsigned);
207 CTL_GET("arenas.nqbins", &nqbins, unsigned);
208 CTL_GET("arenas.ncbins", &ncbins, unsigned);
209 CTL_GET("arenas.nsbins", &nsbins, unsigned);
210 CTL_J_GET("arenas.bin.0.size", &reg_size, size_t);
211 CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t);
212 CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t);
213 CTL_IJ_GET("stats.arenas.0.bins.0.allocated",
214 &allocated, size_t);
215 CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc",
216 &nmalloc, uint64_t);
217 CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc",
218 &ndalloc, uint64_t);
219 if (config_tcache) {
220 CTL_IJ_GET("stats.arenas.0.bins.0.nrequests",
221 &nrequests, uint64_t);
222 CTL_IJ_GET("stats.arenas.0.bins.0.nfills",
223 &nfills, uint64_t);
224 CTL_IJ_GET("stats.arenas.0.bins.0.nflushes",
225 &nflushes, uint64_t);
226 }
227 CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns,
228 uint64_t);
229 CTL_IJ_GET("stats.arenas.0.bins.0.highruns", &highruns,
230 size_t);
231 CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns,
232 size_t);
233 if (config_tcache) {
234 malloc_cprintf(write_cb, cbopaque,
235 "%13u %1s %5zu %4u %3zu %12zu %12"PRIu64
236 " %12"PRIu64" %12"PRIu64" %12"PRIu64
237 " %12"PRIu64" %12"PRIu64" %12"PRIu64
238 " %12zu %12zu\n",
239 j,
240 j < ntbins_ ? "T" : j < ntbins_ + nqbins ?
241 "Q" : j < ntbins_ + nqbins + ncbins ? "C" :
242 "S",
243 reg_size, nregs, run_size / pagesize,
244 allocated, nmalloc, ndalloc, nrequests,
245 nfills, nflushes, nruns, reruns, highruns,
246 curruns);
247 } else {
248 malloc_cprintf(write_cb, cbopaque,
249 "%13u %1s %5zu %4u %3zu %12zu %12"PRIu64
250 " %12"PRIu64" %12"PRIu64" %12"PRIu64
251 " %12zu %12zu\n",
252 j,
253 j < ntbins_ ? "T" : j < ntbins_ + nqbins ?
254 "Q" : j < ntbins_ + nqbins + ncbins ? "C" :
255 "S",
256 reg_size, nregs, run_size / pagesize,
257 allocated, nmalloc, ndalloc, nruns, reruns,
258 highruns, curruns);
259 }
260 }
261 }
262 if (gap_start != UINT_MAX) {
263 if (j > gap_start + 1) {
264 /* Gap of more than one size class. */
265 malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n",
266 gap_start, j - 1);
267 } else {
268 /* Gap of one size class. */
269 malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start);
270 }
271 }
272 }
273
274 static void
275 stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
276 unsigned i)
277 {
278 size_t pagesize, nlruns, j;
279 ssize_t gap_start;
280
281 CTL_GET("arenas.pagesize", &pagesize, size_t);
282
283 malloc_cprintf(write_cb, cbopaque,
284 "large: size pages nmalloc ndalloc nrequests"
285 " maxruns curruns\n");
286 CTL_GET("arenas.nlruns", &nlruns, size_t);
287 for (j = 0, gap_start = -1; j < nlruns; j++) {
288 uint64_t nmalloc, ndalloc, nrequests;
289 size_t run_size, highruns, curruns;
290
291 CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc,
292 uint64_t);
293 CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc,
294 uint64_t);
295 CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests,
296 uint64_t);
297 if (nrequests == 0) {
298 if (gap_start == -1)
299 gap_start = j;
300 } else {
301 CTL_J_GET("arenas.lrun.0.size", &run_size, size_t);
302 CTL_IJ_GET("stats.arenas.0.lruns.0.highruns", &highruns,
303 size_t);
304 CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns,
305 size_t);
306 if (gap_start != -1) {
307 malloc_cprintf(write_cb, cbopaque, "[%zu]\n",
308 j - gap_start);
309 gap_start = -1;
310 }
311 malloc_cprintf(write_cb, cbopaque,
312 "%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64
313 " %12zu %12zu\n",
314 run_size, run_size / pagesize, nmalloc, ndalloc,
315 nrequests, highruns, curruns);
316 }
317 }
318 if (gap_start != -1)
319 malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start);
320 }
321
322 static void
323 stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
324 unsigned i)
325 {
326 unsigned nthreads;
327 size_t pagesize, pactive, pdirty, mapped;
328 uint64_t npurge, nmadvise, purged;
329 size_t small_allocated;
330 uint64_t small_nmalloc, small_ndalloc, small_nrequests;
331 size_t large_allocated;
332 uint64_t large_nmalloc, large_ndalloc, large_nrequests;
333
334 CTL_GET("arenas.pagesize", &pagesize, size_t);
335
336 CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
337 malloc_cprintf(write_cb, cbopaque,
338 "assigned threads: %u\n", nthreads);
339 CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t);
340 CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t);
341 CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t);
342 CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t);
343 CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t);
344 malloc_cprintf(write_cb, cbopaque,
345 "dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s,"
346 " %"PRIu64" madvise%s, %"PRIu64" purged\n",
347 pactive, pdirty, npurge, npurge == 1 ? "" : "s",
348 nmadvise, nmadvise == 1 ? "" : "s", purged);
349
350 malloc_cprintf(write_cb, cbopaque,
351 " allocated nmalloc ndalloc nrequests\n");
352 CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t);
353 CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t);
354 CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t);
355 CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t);
356 malloc_cprintf(write_cb, cbopaque,
357 "small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
358 small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
359 CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t);
360 CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t);
361 CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t);
362 CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t);
363 malloc_cprintf(write_cb, cbopaque,
364 "large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
365 large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
366 malloc_cprintf(write_cb, cbopaque,
367 "total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
368 small_allocated + large_allocated,
369 small_nmalloc + large_nmalloc,
370 small_ndalloc + large_ndalloc,
371 small_nrequests + large_nrequests);
372 malloc_cprintf(write_cb, cbopaque, "active: %12zu\n",
373 pactive * pagesize );
374 CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
375 malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
376
377 stats_arena_bins_print(write_cb, cbopaque, i);
378 stats_arena_lruns_print(write_cb, cbopaque, i);
379 }
380 #endif
381
382 void
383 stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
384 const char *opts)
385 {
386 int err;
387 uint64_t epoch;
388 size_t u64sz;
389 char s[UMAX2S_BUFSIZE];
390 bool general = true;
391 bool merged = true;
392 bool unmerged = true;
393 bool bins = true;
394 bool large = true;
395
396 /*
397 * Refresh stats, in case mallctl() was called by the application.
398 *
399 * Check for OOM here, since refreshing the ctl cache can trigger
400 * allocation. In practice, none of the subsequent mallctl()-related
401 * calls in this function will cause OOM if this one succeeds.
402 * */
403 epoch = 1;
404 u64sz = sizeof(uint64_t);
405 err = JEMALLOC_P(mallctl)("epoch", &epoch, &u64sz, &epoch,
406 sizeof(uint64_t));
407 if (err != 0) {
408 if (err == EAGAIN) {
409 malloc_write("<jemalloc>: Memory allocation failure in "
410 "mallctl(\"epoch\", ...)\n");
411 return;
412 }
413 malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
414 "...)\n");
415 abort();
416 }
417
418 if (write_cb == NULL) {
419 /*
420 * The caller did not provide an alternate write_cb callback
421 * function, so use the default one. malloc_write() is an
422 * inline function, so use malloc_message() directly here.
423 */
424 write_cb = JEMALLOC_P(malloc_message);
425 cbopaque = NULL;
426 }
427
428 if (opts != NULL) {
429 unsigned i;
430
431 for (i = 0; opts[i] != '\0'; i++) {
432 switch (opts[i]) {
433 case 'g':
434 general = false;
435 break;
436 case 'm':
437 merged = false;
438 break;
439 case 'a':
440 unmerged = false;
441 break;
442 case 'b':
443 bins = false;
444 break;
445 case 'l':
446 large = false;
447 break;
448 default:;
449 }
450 }
451 }
452
453 write_cb(cbopaque, "___ Begin jemalloc statistics ___\n");
454 if (general) {
455 int err;
456 const char *cpv;
457 bool bv;
458 unsigned uv;
459 ssize_t ssv;
460 size_t sv, bsz, ssz, sssz, cpsz;
461
462 bsz = sizeof(bool);
463 ssz = sizeof(size_t);
464 sssz = sizeof(ssize_t);
465 cpsz = sizeof(const char *);
466
467 CTL_GET("version", &cpv, const char *);
468 write_cb(cbopaque, "Version: ");
469 write_cb(cbopaque, cpv);
470 write_cb(cbopaque, "\n");
471 CTL_GET("config.debug", &bv, bool);
472 write_cb(cbopaque, "Assertions ");
473 write_cb(cbopaque, bv ? "enabled" : "disabled");
474 write_cb(cbopaque, "\n");
475
476 #define OPT_WRITE_BOOL(n) \
477 if ((err = JEMALLOC_P(mallctl)("opt."#n, &bv, &bsz, \
478 NULL, 0)) == 0) { \
479 write_cb(cbopaque, " opt."#n": "); \
480 write_cb(cbopaque, bv ? "true" : "false"); \
481 write_cb(cbopaque, "\n"); \
482 }
483 #define OPT_WRITE_SIZE_T(n) \
484 if ((err = JEMALLOC_P(mallctl)("opt."#n, &sv, &ssz, \
485 NULL, 0)) == 0) { \
486 write_cb(cbopaque, " opt."#n": "); \
487 write_cb(cbopaque, u2s(sv, 10, s)); \
488 write_cb(cbopaque, "\n"); \
489 }
490 #define OPT_WRITE_SSIZE_T(n) \
491 if ((err = JEMALLOC_P(mallctl)("opt."#n, &ssv, &sssz, \
492 NULL, 0)) == 0) { \
493 if (ssv >= 0) { \
494 write_cb(cbopaque, " opt."#n": "); \
495 write_cb(cbopaque, u2s(ssv, 10, s)); \
496 } else { \
497 write_cb(cbopaque, " opt."#n": -"); \
498 write_cb(cbopaque, u2s(-ssv, 10, s)); \
499 } \
500 write_cb(cbopaque, "\n"); \
501 }
502 #define OPT_WRITE_CHAR_P(n) \
503 if ((err = JEMALLOC_P(mallctl)("opt."#n, &cpv, &cpsz, \
504 NULL, 0)) == 0) { \
505 write_cb(cbopaque, " opt."#n": \""); \
506 write_cb(cbopaque, cpv); \
507 write_cb(cbopaque, "\"\n"); \
508 }
509
510 write_cb(cbopaque, "Run-time option settings:\n");
511 OPT_WRITE_BOOL(abort)
512 OPT_WRITE_SIZE_T(lg_qspace_max)
513 OPT_WRITE_SIZE_T(lg_cspace_max)
514 OPT_WRITE_SIZE_T(lg_chunk)
515 OPT_WRITE_SIZE_T(narenas)
516 OPT_WRITE_SSIZE_T(lg_dirty_mult)
517 OPT_WRITE_BOOL(stats_print)
518 OPT_WRITE_BOOL(junk)
519 OPT_WRITE_BOOL(zero)
520 OPT_WRITE_BOOL(sysv)
521 OPT_WRITE_BOOL(xmalloc)
522 OPT_WRITE_BOOL(tcache)
523 OPT_WRITE_SSIZE_T(lg_tcache_gc_sweep)
524 OPT_WRITE_SSIZE_T(lg_tcache_max)
525 OPT_WRITE_BOOL(prof)
526 OPT_WRITE_CHAR_P(prof_prefix)
527 OPT_WRITE_SIZE_T(lg_prof_bt_max)
528 OPT_WRITE_BOOL(prof_active)
529 OPT_WRITE_SSIZE_T(lg_prof_sample)
530 OPT_WRITE_BOOL(prof_accum)
531 OPT_WRITE_SSIZE_T(lg_prof_tcmax)
532 OPT_WRITE_SSIZE_T(lg_prof_interval)
533 OPT_WRITE_BOOL(prof_gdump)
534 OPT_WRITE_BOOL(prof_leak)
535 OPT_WRITE_BOOL(overcommit)
536
537 #undef OPT_WRITE_BOOL
538 #undef OPT_WRITE_SIZE_T
539 #undef OPT_WRITE_SSIZE_T
540 #undef OPT_WRITE_CHAR_P
541
542 write_cb(cbopaque, "CPUs: ");
543 write_cb(cbopaque, u2s(ncpus, 10, s));
544 write_cb(cbopaque, "\n");
545
546 CTL_GET("arenas.narenas", &uv, unsigned);
547 write_cb(cbopaque, "Max arenas: ");
548 write_cb(cbopaque, u2s(uv, 10, s));
549 write_cb(cbopaque, "\n");
550
551 write_cb(cbopaque, "Pointer size: ");
552 write_cb(cbopaque, u2s(sizeof(void *), 10, s));
553 write_cb(cbopaque, "\n");
554
555 CTL_GET("arenas.quantum", &sv, size_t);
556 write_cb(cbopaque, "Quantum size: ");
557 write_cb(cbopaque, u2s(sv, 10, s));
558 write_cb(cbopaque, "\n");
559
560 CTL_GET("arenas.cacheline", &sv, size_t);
561 write_cb(cbopaque, "Cacheline size (assumed): ");
562 write_cb(cbopaque, u2s(sv, 10, s));
563 write_cb(cbopaque, "\n");
564
565 CTL_GET("arenas.subpage", &sv, size_t);
566 write_cb(cbopaque, "Subpage spacing: ");
567 write_cb(cbopaque, u2s(sv, 10, s));
568 write_cb(cbopaque, "\n");
569
570 if ((err = JEMALLOC_P(mallctl)("arenas.tspace_min", &sv, &ssz,
571 NULL, 0)) == 0) {
572 write_cb(cbopaque, "Tiny 2^n-spaced sizes: [");
573 write_cb(cbopaque, u2s(sv, 10, s));
574 write_cb(cbopaque, "..");
575
576 CTL_GET("arenas.tspace_max", &sv, size_t);
577 write_cb(cbopaque, u2s(sv, 10, s));
578 write_cb(cbopaque, "]\n");
579 }
580
581 CTL_GET("arenas.qspace_min", &sv, size_t);
582 write_cb(cbopaque, "Quantum-spaced sizes: [");
583 write_cb(cbopaque, u2s(sv, 10, s));
584 write_cb(cbopaque, "..");
585 CTL_GET("arenas.qspace_max", &sv, size_t);
586 write_cb(cbopaque, u2s(sv, 10, s));
587 write_cb(cbopaque, "]\n");
588
589 CTL_GET("arenas.cspace_min", &sv, size_t);
590 write_cb(cbopaque, "Cacheline-spaced sizes: [");
591 write_cb(cbopaque, u2s(sv, 10, s));
592 write_cb(cbopaque, "..");
593 CTL_GET("arenas.cspace_max", &sv, size_t);
594 write_cb(cbopaque, u2s(sv, 10, s));
595 write_cb(cbopaque, "]\n");
596
597 CTL_GET("arenas.sspace_min", &sv, size_t);
598 write_cb(cbopaque, "Subpage-spaced sizes: [");
599 write_cb(cbopaque, u2s(sv, 10, s));
600 write_cb(cbopaque, "..");
601 CTL_GET("arenas.sspace_max", &sv, size_t);
602 write_cb(cbopaque, u2s(sv, 10, s));
603 write_cb(cbopaque, "]\n");
604
605 CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
606 if (ssv >= 0) {
607 write_cb(cbopaque,
608 "Min active:dirty page ratio per arena: ");
609 write_cb(cbopaque, u2s((1U << ssv), 10, s));
610 write_cb(cbopaque, ":1\n");
611 } else {
612 write_cb(cbopaque,
613 "Min active:dirty page ratio per arena: N/A\n");
614 }
615 if ((err = JEMALLOC_P(mallctl)("arenas.tcache_max", &sv,
616 &ssz, NULL, 0)) == 0) {
617 write_cb(cbopaque,
618 "Maximum thread-cached size class: ");
619 write_cb(cbopaque, u2s(sv, 10, s));
620 write_cb(cbopaque, "\n");
621 }
622 if ((err = JEMALLOC_P(mallctl)("opt.lg_tcache_gc_sweep", &ssv,
623 &ssz, NULL, 0)) == 0) {
624 size_t tcache_gc_sweep = (1U << ssv);
625 bool tcache_enabled;
626 CTL_GET("opt.tcache", &tcache_enabled, bool);
627 write_cb(cbopaque, "Thread cache GC sweep interval: ");
628 write_cb(cbopaque, tcache_enabled && ssv >= 0 ?
629 u2s(tcache_gc_sweep, 10, s) : "N/A");
630 write_cb(cbopaque, "\n");
631 }
632 if ((err = JEMALLOC_P(mallctl)("opt.prof", &bv, &bsz, NULL, 0))
633 == 0 && bv) {
634 CTL_GET("opt.lg_prof_bt_max", &sv, size_t);
635 write_cb(cbopaque, "Maximum profile backtrace depth: ");
636 write_cb(cbopaque, u2s((1U << sv), 10, s));
637 write_cb(cbopaque, "\n");
638
639 CTL_GET("opt.lg_prof_tcmax", &ssv, ssize_t);
640 write_cb(cbopaque,
641 "Maximum per thread backtrace cache: ");
642 if (ssv >= 0) {
643 write_cb(cbopaque, u2s((1U << ssv), 10, s));
644 write_cb(cbopaque, " (2^");
645 write_cb(cbopaque, u2s(ssv, 10, s));
646 write_cb(cbopaque, ")\n");
647 } else
648 write_cb(cbopaque, "N/A\n");
649
650 CTL_GET("opt.lg_prof_sample", &sv, size_t);
651 write_cb(cbopaque, "Average profile sample interval: ");
652 write_cb(cbopaque, u2s((((uint64_t)1U) << sv), 10, s));
653 write_cb(cbopaque, " (2^");
654 write_cb(cbopaque, u2s(sv, 10, s));
655 write_cb(cbopaque, ")\n");
656
657 CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
658 write_cb(cbopaque, "Average profile dump interval: ");
659 if (ssv >= 0) {
660 write_cb(cbopaque, u2s((((uint64_t)1U) << ssv),
661 10, s));
662 write_cb(cbopaque, " (2^");
663 write_cb(cbopaque, u2s(ssv, 10, s));
664 write_cb(cbopaque, ")\n");
665 } else
666 write_cb(cbopaque, "N/A\n");
667 }
668 CTL_GET("arenas.chunksize", &sv, size_t);
669 write_cb(cbopaque, "Chunk size: ");
670 write_cb(cbopaque, u2s(sv, 10, s));
671 CTL_GET("opt.lg_chunk", &sv, size_t);
672 write_cb(cbopaque, " (2^");
673 write_cb(cbopaque, u2s(sv, 10, s));
674 write_cb(cbopaque, ")\n");
675 }
676
677 #ifdef JEMALLOC_STATS
678 {
679 int err;
680 size_t sszp, ssz;
681 size_t *cactive;
682 size_t allocated, active, mapped;
683 size_t chunks_current, chunks_high, swap_avail;
684 uint64_t chunks_total;
685 size_t huge_allocated;
686 uint64_t huge_nmalloc, huge_ndalloc;
687
688 sszp = sizeof(size_t *);
689 ssz = sizeof(size_t);
690
691 CTL_GET("stats.cactive", &cactive, size_t *);
692 CTL_GET("stats.allocated", &allocated, size_t);
693 CTL_GET("stats.active", &active, size_t);
694 CTL_GET("stats.mapped", &mapped, size_t);
695 malloc_cprintf(write_cb, cbopaque,
696 "Allocated: %zu, active: %zu, mapped: %zu\n",
697 allocated, active, mapped);
698 malloc_cprintf(write_cb, cbopaque,
699 "Current active ceiling: %zu\n", atomic_read_z(cactive));
700
701 /* Print chunk stats. */
702 CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
703 CTL_GET("stats.chunks.high", &chunks_high, size_t);
704 CTL_GET("stats.chunks.current", &chunks_current, size_t);
705 if ((err = JEMALLOC_P(mallctl)("swap.avail", &swap_avail, &ssz,
706 NULL, 0)) == 0) {
707 size_t lg_chunk;
708
709 malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
710 "highchunks curchunks swap_avail\n");
711 CTL_GET("opt.lg_chunk", &lg_chunk, size_t);
712 malloc_cprintf(write_cb, cbopaque,
713 " %13"PRIu64"%13zu%13zu%13zu\n",
714 chunks_total, chunks_high, chunks_current,
715 swap_avail << lg_chunk);
716 } else {
717 malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
718 "highchunks curchunks\n");
719 malloc_cprintf(write_cb, cbopaque,
720 " %13"PRIu64"%13zu%13zu\n",
721 chunks_total, chunks_high, chunks_current);
722 }
723
724 /* Print huge stats. */
725 CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);
726 CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t);
727 CTL_GET("stats.huge.allocated", &huge_allocated, size_t);
728 malloc_cprintf(write_cb, cbopaque,
729 "huge: nmalloc ndalloc allocated\n");
730 malloc_cprintf(write_cb, cbopaque,
731 " %12"PRIu64" %12"PRIu64" %12zu\n",
732 huge_nmalloc, huge_ndalloc, huge_allocated);
733
734 if (merged) {
735 unsigned narenas;
736
737 CTL_GET("arenas.narenas", &narenas, unsigned);
738 {
739 bool initialized[narenas];
740 size_t isz;
741 unsigned i, ninitialized;
742
743 isz = sizeof(initialized);
744 xmallctl("arenas.initialized", initialized,
745 &isz, NULL, 0);
746 for (i = ninitialized = 0; i < narenas; i++) {
747 if (initialized[i])
748 ninitialized++;
749 }
750
751 if (ninitialized > 1 || unmerged == false) {
752 /* Print merged arena stats. */
753 malloc_cprintf(write_cb, cbopaque,
754 "\nMerged arenas stats:\n");
755 stats_arena_print(write_cb, cbopaque,
756 narenas);
757 }
758 }
759 }
760
761 if (unmerged) {
762 unsigned narenas;
763
764 /* Print stats for each arena. */
765
766 CTL_GET("arenas.narenas", &narenas, unsigned);
767 {
768 bool initialized[narenas];
769 size_t isz;
770 unsigned i;
771
772 isz = sizeof(initialized);
773 xmallctl("arenas.initialized", initialized,
774 &isz, NULL, 0);
775
776 for (i = 0; i < narenas; i++) {
777 if (initialized[i]) {
778 malloc_cprintf(write_cb,
779 cbopaque,
780 "\narenas[%u]:\n", i);
781 stats_arena_print(write_cb,
782 cbopaque, i);
783 }
784 }
785 }
786 }
787 }
788 #endif /* #ifdef JEMALLOC_STATS */
789 write_cb(cbopaque, "--- End jemalloc statistics ---\n");
790 }