]>
git.saurik.com Git - redis.git/blob - deps/jemalloc.orig/src/stats.c
1 #define JEMALLOC_STATS_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 #define CTL_GET(n, v, t) do { \
5 size_t sz = sizeof(t); \
6 xmallctl(n, v, &sz, NULL, 0); \
9 #define CTL_I_GET(n, v, t) do { \
11 size_t miblen = sizeof(mib) / sizeof(size_t); \
12 size_t sz = sizeof(t); \
13 xmallctlnametomib(n, mib, &miblen); \
15 xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
18 #define CTL_J_GET(n, v, t) do { \
20 size_t miblen = sizeof(mib) / sizeof(size_t); \
21 size_t sz = sizeof(t); \
22 xmallctlnametomib(n, mib, &miblen); \
24 xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
27 #define CTL_IJ_GET(n, v, t) do { \
29 size_t miblen = sizeof(mib) / sizeof(size_t); \
30 size_t sz = sizeof(t); \
31 xmallctlnametomib(n, mib, &miblen); \
34 xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
37 /******************************************************************************/
40 bool opt_stats_print
= false;
43 size_t stats_cactive
= 0;
46 /******************************************************************************/
47 /* Function prototypes for non-inline static functions. */
50 static void malloc_vcprintf(void (*write_cb
)(void *, const char *),
51 void *cbopaque
, const char *format
, va_list ap
);
52 static void stats_arena_bins_print(void (*write_cb
)(void *, const char *),
53 void *cbopaque
, unsigned i
);
54 static void stats_arena_lruns_print(void (*write_cb
)(void *, const char *),
55 void *cbopaque
, unsigned i
);
56 static void stats_arena_print(void (*write_cb
)(void *, const char *),
57 void *cbopaque
, unsigned i
);
60 /******************************************************************************/
63 * We don't want to depend on vsnprintf() for production builds, since that can
64 * cause unnecessary bloat for static binaries. u2s() provides minimal integer
65 * printing functionality, so that malloc_printf() use can be limited to
66 * JEMALLOC_STATS code.
69 u2s(uint64_t x
, unsigned base
, char *s
)
73 i
= UMAX2S_BUFSIZE
- 1;
79 s
[i
] = "0123456789"[x
% (uint64_t)10];
86 s
[i
] = "0123456789abcdef"[x
& 0xf];
93 s
[i
] = "0123456789abcdefghijklmnopqrstuvwxyz"[x
%
102 #ifdef JEMALLOC_STATS
104 malloc_vcprintf(void (*write_cb
)(void *, const char *), void *cbopaque
,
105 const char *format
, va_list ap
)
109 if (write_cb
== NULL
) {
111 * The caller did not provide an alternate write_cb callback
112 * function, so use the default one. malloc_write() is an
113 * inline function, so use malloc_message() directly here.
115 write_cb
= JEMALLOC_P(malloc_message
);
119 vsnprintf(buf
, sizeof(buf
), format
, ap
);
120 write_cb(cbopaque
, buf
);
124 * Print to a callback function in such a way as to (hopefully) avoid memory
127 JEMALLOC_ATTR(format(printf
, 3, 4))
129 malloc_cprintf(void (*write_cb
)(void *, const char *), void *cbopaque
,
130 const char *format
, ...)
134 va_start(ap
, format
);
135 malloc_vcprintf(write_cb
, cbopaque
, format
, ap
);
140 * Print to stderr in such a way as to (hopefully) avoid memory allocation.
142 JEMALLOC_ATTR(format(printf
, 1, 2))
144 malloc_printf(const char *format
, ...)
148 va_start(ap
, format
);
149 malloc_vcprintf(NULL
, NULL
, format
, ap
);
154 #ifdef JEMALLOC_STATS
156 stats_arena_bins_print(void (*write_cb
)(void *, const char *), void *cbopaque
,
161 unsigned nbins
, j
, gap_start
;
163 CTL_GET("arenas.pagesize", &pagesize
, size_t);
165 CTL_GET("config.tcache", &config_tcache
, bool);
167 malloc_cprintf(write_cb
, cbopaque
,
168 "bins: bin size regs pgs allocated nmalloc"
169 " ndalloc nrequests nfills nflushes"
170 " newruns reruns maxruns curruns\n");
172 malloc_cprintf(write_cb
, cbopaque
,
173 "bins: bin size regs pgs allocated nmalloc"
174 " ndalloc newruns reruns maxruns"
177 CTL_GET("arenas.nbins", &nbins
, unsigned);
178 for (j
= 0, gap_start
= UINT_MAX
; j
< nbins
; j
++) {
181 CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns
, uint64_t);
183 if (gap_start
== UINT_MAX
)
186 unsigned ntbins_
, nqbins
, ncbins
, nsbins
;
187 size_t reg_size
, run_size
, allocated
;
189 uint64_t nmalloc
, ndalloc
, nrequests
, nfills
, nflushes
;
191 size_t highruns
, curruns
;
193 if (gap_start
!= UINT_MAX
) {
194 if (j
> gap_start
+ 1) {
195 /* Gap of more than one size class. */
196 malloc_cprintf(write_cb
, cbopaque
,
197 "[%u..%u]\n", gap_start
,
200 /* Gap of one size class. */
201 malloc_cprintf(write_cb
, cbopaque
,
202 "[%u]\n", gap_start
);
204 gap_start
= UINT_MAX
;
206 CTL_GET("arenas.ntbins", &ntbins_
, unsigned);
207 CTL_GET("arenas.nqbins", &nqbins
, unsigned);
208 CTL_GET("arenas.ncbins", &ncbins
, unsigned);
209 CTL_GET("arenas.nsbins", &nsbins
, unsigned);
210 CTL_J_GET("arenas.bin.0.size", ®_size
, size_t);
211 CTL_J_GET("arenas.bin.0.nregs", &nregs
, uint32_t);
212 CTL_J_GET("arenas.bin.0.run_size", &run_size
, size_t);
213 CTL_IJ_GET("stats.arenas.0.bins.0.allocated",
215 CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc",
217 CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc",
220 CTL_IJ_GET("stats.arenas.0.bins.0.nrequests",
221 &nrequests
, uint64_t);
222 CTL_IJ_GET("stats.arenas.0.bins.0.nfills",
224 CTL_IJ_GET("stats.arenas.0.bins.0.nflushes",
225 &nflushes
, uint64_t);
227 CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns
,
229 CTL_IJ_GET("stats.arenas.0.bins.0.highruns", &highruns
,
231 CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns
,
234 malloc_cprintf(write_cb
, cbopaque
,
235 "%13u %1s %5zu %4u %3zu %12zu %12"PRIu64
236 " %12"PRIu64
" %12"PRIu64
" %12"PRIu64
237 " %12"PRIu64
" %12"PRIu64
" %12"PRIu64
240 j
< ntbins_
? "T" : j
< ntbins_
+ nqbins
?
241 "Q" : j
< ntbins_
+ nqbins
+ ncbins
? "C" :
243 reg_size
, nregs
, run_size
/ pagesize
,
244 allocated
, nmalloc
, ndalloc
, nrequests
,
245 nfills
, nflushes
, nruns
, reruns
, highruns
,
248 malloc_cprintf(write_cb
, cbopaque
,
249 "%13u %1s %5zu %4u %3zu %12zu %12"PRIu64
250 " %12"PRIu64
" %12"PRIu64
" %12"PRIu64
253 j
< ntbins_
? "T" : j
< ntbins_
+ nqbins
?
254 "Q" : j
< ntbins_
+ nqbins
+ ncbins
? "C" :
256 reg_size
, nregs
, run_size
/ pagesize
,
257 allocated
, nmalloc
, ndalloc
, nruns
, reruns
,
262 if (gap_start
!= UINT_MAX
) {
263 if (j
> gap_start
+ 1) {
264 /* Gap of more than one size class. */
265 malloc_cprintf(write_cb
, cbopaque
, "[%u..%u]\n",
268 /* Gap of one size class. */
269 malloc_cprintf(write_cb
, cbopaque
, "[%u]\n", gap_start
);
275 stats_arena_lruns_print(void (*write_cb
)(void *, const char *), void *cbopaque
,
278 size_t pagesize
, nlruns
, j
;
281 CTL_GET("arenas.pagesize", &pagesize
, size_t);
283 malloc_cprintf(write_cb
, cbopaque
,
284 "large: size pages nmalloc ndalloc nrequests"
285 " maxruns curruns\n");
286 CTL_GET("arenas.nlruns", &nlruns
, size_t);
287 for (j
= 0, gap_start
= -1; j
< nlruns
; j
++) {
288 uint64_t nmalloc
, ndalloc
, nrequests
;
289 size_t run_size
, highruns
, curruns
;
291 CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc
,
293 CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc
,
295 CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests
,
297 if (nrequests
== 0) {
301 CTL_J_GET("arenas.lrun.0.size", &run_size
, size_t);
302 CTL_IJ_GET("stats.arenas.0.lruns.0.highruns", &highruns
,
304 CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns
,
306 if (gap_start
!= -1) {
307 malloc_cprintf(write_cb
, cbopaque
, "[%zu]\n",
311 malloc_cprintf(write_cb
, cbopaque
,
312 "%13zu %5zu %12"PRIu64
" %12"PRIu64
" %12"PRIu64
314 run_size
, run_size
/ pagesize
, nmalloc
, ndalloc
,
315 nrequests
, highruns
, curruns
);
319 malloc_cprintf(write_cb
, cbopaque
, "[%zu]\n", j
- gap_start
);
323 stats_arena_print(void (*write_cb
)(void *, const char *), void *cbopaque
,
327 size_t pagesize
, pactive
, pdirty
, mapped
;
328 uint64_t npurge
, nmadvise
, purged
;
329 size_t small_allocated
;
330 uint64_t small_nmalloc
, small_ndalloc
, small_nrequests
;
331 size_t large_allocated
;
332 uint64_t large_nmalloc
, large_ndalloc
, large_nrequests
;
334 CTL_GET("arenas.pagesize", &pagesize
, size_t);
336 CTL_I_GET("stats.arenas.0.nthreads", &nthreads
, unsigned);
337 malloc_cprintf(write_cb
, cbopaque
,
338 "assigned threads: %u\n", nthreads
);
339 CTL_I_GET("stats.arenas.0.pactive", &pactive
, size_t);
340 CTL_I_GET("stats.arenas.0.pdirty", &pdirty
, size_t);
341 CTL_I_GET("stats.arenas.0.npurge", &npurge
, uint64_t);
342 CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise
, uint64_t);
343 CTL_I_GET("stats.arenas.0.purged", &purged
, uint64_t);
344 malloc_cprintf(write_cb
, cbopaque
,
345 "dirty pages: %zu:%zu active:dirty, %"PRIu64
" sweep%s,"
346 " %"PRIu64
" madvise%s, %"PRIu64
" purged\n",
347 pactive
, pdirty
, npurge
, npurge
== 1 ? "" : "s",
348 nmadvise
, nmadvise
== 1 ? "" : "s", purged
);
350 malloc_cprintf(write_cb
, cbopaque
,
351 " allocated nmalloc ndalloc nrequests\n");
352 CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated
, size_t);
353 CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc
, uint64_t);
354 CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc
, uint64_t);
355 CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests
, uint64_t);
356 malloc_cprintf(write_cb
, cbopaque
,
357 "small: %12zu %12"PRIu64
" %12"PRIu64
" %12"PRIu64
"\n",
358 small_allocated
, small_nmalloc
, small_ndalloc
, small_nrequests
);
359 CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated
, size_t);
360 CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc
, uint64_t);
361 CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc
, uint64_t);
362 CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests
, uint64_t);
363 malloc_cprintf(write_cb
, cbopaque
,
364 "large: %12zu %12"PRIu64
" %12"PRIu64
" %12"PRIu64
"\n",
365 large_allocated
, large_nmalloc
, large_ndalloc
, large_nrequests
);
366 malloc_cprintf(write_cb
, cbopaque
,
367 "total: %12zu %12"PRIu64
" %12"PRIu64
" %12"PRIu64
"\n",
368 small_allocated
+ large_allocated
,
369 small_nmalloc
+ large_nmalloc
,
370 small_ndalloc
+ large_ndalloc
,
371 small_nrequests
+ large_nrequests
);
372 malloc_cprintf(write_cb
, cbopaque
, "active: %12zu\n",
373 pactive
* pagesize
);
374 CTL_I_GET("stats.arenas.0.mapped", &mapped
, size_t);
375 malloc_cprintf(write_cb
, cbopaque
, "mapped: %12zu\n", mapped
);
377 stats_arena_bins_print(write_cb
, cbopaque
, i
);
378 stats_arena_lruns_print(write_cb
, cbopaque
, i
);
383 stats_print(void (*write_cb
)(void *, const char *), void *cbopaque
,
389 char s
[UMAX2S_BUFSIZE
];
392 bool unmerged
= true;
397 * Refresh stats, in case mallctl() was called by the application.
399 * Check for OOM here, since refreshing the ctl cache can trigger
400 * allocation. In practice, none of the subsequent mallctl()-related
401 * calls in this function will cause OOM if this one succeeds.
404 u64sz
= sizeof(uint64_t);
405 err
= JEMALLOC_P(mallctl
)("epoch", &epoch
, &u64sz
, &epoch
,
409 malloc_write("<jemalloc>: Memory allocation failure in "
410 "mallctl(\"epoch\", ...)\n");
413 malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
418 if (write_cb
== NULL
) {
420 * The caller did not provide an alternate write_cb callback
421 * function, so use the default one. malloc_write() is an
422 * inline function, so use malloc_message() directly here.
424 write_cb
= JEMALLOC_P(malloc_message
);
431 for (i
= 0; opts
[i
] != '\0'; i
++) {
453 write_cb(cbopaque
, "___ Begin jemalloc statistics ___\n");
460 size_t sv
, bsz
, ssz
, sssz
, cpsz
;
463 ssz
= sizeof(size_t);
464 sssz
= sizeof(ssize_t
);
465 cpsz
= sizeof(const char *);
467 CTL_GET("version", &cpv
, const char *);
468 write_cb(cbopaque
, "Version: ");
469 write_cb(cbopaque
, cpv
);
470 write_cb(cbopaque
, "\n");
471 CTL_GET("config.debug", &bv
, bool);
472 write_cb(cbopaque
, "Assertions ");
473 write_cb(cbopaque
, bv
? "enabled" : "disabled");
474 write_cb(cbopaque
, "\n");
476 #define OPT_WRITE_BOOL(n) \
477 if ((err = JEMALLOC_P(mallctl)("opt."#n, &bv, &bsz, \
479 write_cb(cbopaque, " opt."#n": "); \
480 write_cb(cbopaque, bv ? "true" : "false"); \
481 write_cb(cbopaque, "\n"); \
483 #define OPT_WRITE_SIZE_T(n) \
484 if ((err = JEMALLOC_P(mallctl)("opt."#n, &sv, &ssz, \
486 write_cb(cbopaque, " opt."#n": "); \
487 write_cb(cbopaque, u2s(sv, 10, s)); \
488 write_cb(cbopaque, "\n"); \
490 #define OPT_WRITE_SSIZE_T(n) \
491 if ((err = JEMALLOC_P(mallctl)("opt."#n, &ssv, &sssz, \
494 write_cb(cbopaque, " opt."#n": "); \
495 write_cb(cbopaque, u2s(ssv, 10, s)); \
497 write_cb(cbopaque, " opt."#n": -"); \
498 write_cb(cbopaque, u2s(-ssv, 10, s)); \
500 write_cb(cbopaque, "\n"); \
502 #define OPT_WRITE_CHAR_P(n) \
503 if ((err = JEMALLOC_P(mallctl)("opt."#n, &cpv, &cpsz, \
505 write_cb(cbopaque, " opt."#n": \""); \
506 write_cb(cbopaque, cpv); \
507 write_cb(cbopaque, "\"\n"); \
510 write_cb(cbopaque
, "Run-time option settings:\n");
511 OPT_WRITE_BOOL(abort
)
512 OPT_WRITE_SIZE_T(lg_qspace_max
)
513 OPT_WRITE_SIZE_T(lg_cspace_max
)
514 OPT_WRITE_SIZE_T(lg_chunk
)
515 OPT_WRITE_SIZE_T(narenas
)
516 OPT_WRITE_SSIZE_T(lg_dirty_mult
)
517 OPT_WRITE_BOOL(stats_print
)
521 OPT_WRITE_BOOL(xmalloc
)
522 OPT_WRITE_BOOL(tcache
)
523 OPT_WRITE_SSIZE_T(lg_tcache_gc_sweep
)
524 OPT_WRITE_SSIZE_T(lg_tcache_max
)
526 OPT_WRITE_CHAR_P(prof_prefix
)
527 OPT_WRITE_SIZE_T(lg_prof_bt_max
)
528 OPT_WRITE_BOOL(prof_active
)
529 OPT_WRITE_SSIZE_T(lg_prof_sample
)
530 OPT_WRITE_BOOL(prof_accum
)
531 OPT_WRITE_SSIZE_T(lg_prof_tcmax
)
532 OPT_WRITE_SSIZE_T(lg_prof_interval
)
533 OPT_WRITE_BOOL(prof_gdump
)
534 OPT_WRITE_BOOL(prof_leak
)
535 OPT_WRITE_BOOL(overcommit
)
537 #undef OPT_WRITE_BOOL
538 #undef OPT_WRITE_SIZE_T
539 #undef OPT_WRITE_SSIZE_T
540 #undef OPT_WRITE_CHAR_P
542 write_cb(cbopaque
, "CPUs: ");
543 write_cb(cbopaque
, u2s(ncpus
, 10, s
));
544 write_cb(cbopaque
, "\n");
546 CTL_GET("arenas.narenas", &uv
, unsigned);
547 write_cb(cbopaque
, "Max arenas: ");
548 write_cb(cbopaque
, u2s(uv
, 10, s
));
549 write_cb(cbopaque
, "\n");
551 write_cb(cbopaque
, "Pointer size: ");
552 write_cb(cbopaque
, u2s(sizeof(void *), 10, s
));
553 write_cb(cbopaque
, "\n");
555 CTL_GET("arenas.quantum", &sv
, size_t);
556 write_cb(cbopaque
, "Quantum size: ");
557 write_cb(cbopaque
, u2s(sv
, 10, s
));
558 write_cb(cbopaque
, "\n");
560 CTL_GET("arenas.cacheline", &sv
, size_t);
561 write_cb(cbopaque
, "Cacheline size (assumed): ");
562 write_cb(cbopaque
, u2s(sv
, 10, s
));
563 write_cb(cbopaque
, "\n");
565 CTL_GET("arenas.subpage", &sv
, size_t);
566 write_cb(cbopaque
, "Subpage spacing: ");
567 write_cb(cbopaque
, u2s(sv
, 10, s
));
568 write_cb(cbopaque
, "\n");
570 if ((err
= JEMALLOC_P(mallctl
)("arenas.tspace_min", &sv
, &ssz
,
572 write_cb(cbopaque
, "Tiny 2^n-spaced sizes: [");
573 write_cb(cbopaque
, u2s(sv
, 10, s
));
574 write_cb(cbopaque
, "..");
576 CTL_GET("arenas.tspace_max", &sv
, size_t);
577 write_cb(cbopaque
, u2s(sv
, 10, s
));
578 write_cb(cbopaque
, "]\n");
581 CTL_GET("arenas.qspace_min", &sv
, size_t);
582 write_cb(cbopaque
, "Quantum-spaced sizes: [");
583 write_cb(cbopaque
, u2s(sv
, 10, s
));
584 write_cb(cbopaque
, "..");
585 CTL_GET("arenas.qspace_max", &sv
, size_t);
586 write_cb(cbopaque
, u2s(sv
, 10, s
));
587 write_cb(cbopaque
, "]\n");
589 CTL_GET("arenas.cspace_min", &sv
, size_t);
590 write_cb(cbopaque
, "Cacheline-spaced sizes: [");
591 write_cb(cbopaque
, u2s(sv
, 10, s
));
592 write_cb(cbopaque
, "..");
593 CTL_GET("arenas.cspace_max", &sv
, size_t);
594 write_cb(cbopaque
, u2s(sv
, 10, s
));
595 write_cb(cbopaque
, "]\n");
597 CTL_GET("arenas.sspace_min", &sv
, size_t);
598 write_cb(cbopaque
, "Subpage-spaced sizes: [");
599 write_cb(cbopaque
, u2s(sv
, 10, s
));
600 write_cb(cbopaque
, "..");
601 CTL_GET("arenas.sspace_max", &sv
, size_t);
602 write_cb(cbopaque
, u2s(sv
, 10, s
));
603 write_cb(cbopaque
, "]\n");
605 CTL_GET("opt.lg_dirty_mult", &ssv
, ssize_t
);
608 "Min active:dirty page ratio per arena: ");
609 write_cb(cbopaque
, u2s((1U << ssv
), 10, s
));
610 write_cb(cbopaque
, ":1\n");
613 "Min active:dirty page ratio per arena: N/A\n");
615 if ((err
= JEMALLOC_P(mallctl
)("arenas.tcache_max", &sv
,
616 &ssz
, NULL
, 0)) == 0) {
618 "Maximum thread-cached size class: ");
619 write_cb(cbopaque
, u2s(sv
, 10, s
));
620 write_cb(cbopaque
, "\n");
622 if ((err
= JEMALLOC_P(mallctl
)("opt.lg_tcache_gc_sweep", &ssv
,
623 &ssz
, NULL
, 0)) == 0) {
624 size_t tcache_gc_sweep
= (1U << ssv
);
626 CTL_GET("opt.tcache", &tcache_enabled
, bool);
627 write_cb(cbopaque
, "Thread cache GC sweep interval: ");
628 write_cb(cbopaque
, tcache_enabled
&& ssv
>= 0 ?
629 u2s(tcache_gc_sweep
, 10, s
) : "N/A");
630 write_cb(cbopaque
, "\n");
632 if ((err
= JEMALLOC_P(mallctl
)("opt.prof", &bv
, &bsz
, NULL
, 0))
634 CTL_GET("opt.lg_prof_bt_max", &sv
, size_t);
635 write_cb(cbopaque
, "Maximum profile backtrace depth: ");
636 write_cb(cbopaque
, u2s((1U << sv
), 10, s
));
637 write_cb(cbopaque
, "\n");
639 CTL_GET("opt.lg_prof_tcmax", &ssv
, ssize_t
);
641 "Maximum per thread backtrace cache: ");
643 write_cb(cbopaque
, u2s((1U << ssv
), 10, s
));
644 write_cb(cbopaque
, " (2^");
645 write_cb(cbopaque
, u2s(ssv
, 10, s
));
646 write_cb(cbopaque
, ")\n");
648 write_cb(cbopaque
, "N/A\n");
650 CTL_GET("opt.lg_prof_sample", &sv
, size_t);
651 write_cb(cbopaque
, "Average profile sample interval: ");
652 write_cb(cbopaque
, u2s((((uint64_t)1U) << sv
), 10, s
));
653 write_cb(cbopaque
, " (2^");
654 write_cb(cbopaque
, u2s(sv
, 10, s
));
655 write_cb(cbopaque
, ")\n");
657 CTL_GET("opt.lg_prof_interval", &ssv
, ssize_t
);
658 write_cb(cbopaque
, "Average profile dump interval: ");
660 write_cb(cbopaque
, u2s((((uint64_t)1U) << ssv
),
662 write_cb(cbopaque
, " (2^");
663 write_cb(cbopaque
, u2s(ssv
, 10, s
));
664 write_cb(cbopaque
, ")\n");
666 write_cb(cbopaque
, "N/A\n");
668 CTL_GET("arenas.chunksize", &sv
, size_t);
669 write_cb(cbopaque
, "Chunk size: ");
670 write_cb(cbopaque
, u2s(sv
, 10, s
));
671 CTL_GET("opt.lg_chunk", &sv
, size_t);
672 write_cb(cbopaque
, " (2^");
673 write_cb(cbopaque
, u2s(sv
, 10, s
));
674 write_cb(cbopaque
, ")\n");
677 #ifdef JEMALLOC_STATS
682 size_t allocated
, active
, mapped
;
683 size_t chunks_current
, chunks_high
, swap_avail
;
684 uint64_t chunks_total
;
685 size_t huge_allocated
;
686 uint64_t huge_nmalloc
, huge_ndalloc
;
688 sszp
= sizeof(size_t *);
689 ssz
= sizeof(size_t);
691 CTL_GET("stats.cactive", &cactive
, size_t *);
692 CTL_GET("stats.allocated", &allocated
, size_t);
693 CTL_GET("stats.active", &active
, size_t);
694 CTL_GET("stats.mapped", &mapped
, size_t);
695 malloc_cprintf(write_cb
, cbopaque
,
696 "Allocated: %zu, active: %zu, mapped: %zu\n",
697 allocated
, active
, mapped
);
698 malloc_cprintf(write_cb
, cbopaque
,
699 "Current active ceiling: %zu\n", atomic_read_z(cactive
));
701 /* Print chunk stats. */
702 CTL_GET("stats.chunks.total", &chunks_total
, uint64_t);
703 CTL_GET("stats.chunks.high", &chunks_high
, size_t);
704 CTL_GET("stats.chunks.current", &chunks_current
, size_t);
705 if ((err
= JEMALLOC_P(mallctl
)("swap.avail", &swap_avail
, &ssz
,
709 malloc_cprintf(write_cb
, cbopaque
, "chunks: nchunks "
710 "highchunks curchunks swap_avail\n");
711 CTL_GET("opt.lg_chunk", &lg_chunk
, size_t);
712 malloc_cprintf(write_cb
, cbopaque
,
713 " %13"PRIu64
"%13zu%13zu%13zu\n",
714 chunks_total
, chunks_high
, chunks_current
,
715 swap_avail
<< lg_chunk
);
717 malloc_cprintf(write_cb
, cbopaque
, "chunks: nchunks "
718 "highchunks curchunks\n");
719 malloc_cprintf(write_cb
, cbopaque
,
720 " %13"PRIu64
"%13zu%13zu\n",
721 chunks_total
, chunks_high
, chunks_current
);
724 /* Print huge stats. */
725 CTL_GET("stats.huge.nmalloc", &huge_nmalloc
, uint64_t);
726 CTL_GET("stats.huge.ndalloc", &huge_ndalloc
, uint64_t);
727 CTL_GET("stats.huge.allocated", &huge_allocated
, size_t);
728 malloc_cprintf(write_cb
, cbopaque
,
729 "huge: nmalloc ndalloc allocated\n");
730 malloc_cprintf(write_cb
, cbopaque
,
731 " %12"PRIu64
" %12"PRIu64
" %12zu\n",
732 huge_nmalloc
, huge_ndalloc
, huge_allocated
);
737 CTL_GET("arenas.narenas", &narenas
, unsigned);
739 bool initialized
[narenas
];
741 unsigned i
, ninitialized
;
743 isz
= sizeof(initialized
);
744 xmallctl("arenas.initialized", initialized
,
746 for (i
= ninitialized
= 0; i
< narenas
; i
++) {
751 if (ninitialized
> 1 || unmerged
== false) {
752 /* Print merged arena stats. */
753 malloc_cprintf(write_cb
, cbopaque
,
754 "\nMerged arenas stats:\n");
755 stats_arena_print(write_cb
, cbopaque
,
764 /* Print stats for each arena. */
766 CTL_GET("arenas.narenas", &narenas
, unsigned);
768 bool initialized
[narenas
];
772 isz
= sizeof(initialized
);
773 xmallctl("arenas.initialized", initialized
,
776 for (i
= 0; i
< narenas
; i
++) {
777 if (initialized
[i
]) {
778 malloc_cprintf(write_cb
,
780 "\narenas[%u]:\n", i
);
781 stats_arena_print(write_cb
,
788 #endif /* #ifdef JEMALLOC_STATS */
789 write_cb(cbopaque
, "--- End jemalloc statistics ---\n");