]> git.saurik.com Git - redis.git/blob - deps/jemalloc/src/ctl.c
Query the archive to provide a complete KEYS list.
[redis.git] / deps / jemalloc / src / ctl.c
1 #define JEMALLOC_CTL_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3
4 /******************************************************************************/
5 /* Data. */
6
7 /*
8 * ctl_mtx protects the following:
9 * - ctl_stats.*
10 * - opt_prof_active
11 */
12 static malloc_mutex_t ctl_mtx;
13 static bool ctl_initialized;
14 static uint64_t ctl_epoch;
15 static ctl_stats_t ctl_stats;
16
17 /******************************************************************************/
18 /* Helpers for named and indexed nodes. */
19
20 static inline const ctl_named_node_t *
21 ctl_named_node(const ctl_node_t *node)
22 {
23
24 return ((node->named) ? (const ctl_named_node_t *)node : NULL);
25 }
26
27 static inline const ctl_named_node_t *
28 ctl_named_children(const ctl_named_node_t *node, int index)
29 {
30 const ctl_named_node_t *children = ctl_named_node(node->children);
31
32 return (children ? &children[index] : NULL);
33 }
34
35 static inline const ctl_indexed_node_t *
36 ctl_indexed_node(const ctl_node_t *node)
37 {
38
39 return ((node->named == false) ? (const ctl_indexed_node_t *)node :
40 NULL);
41 }
42
43 /******************************************************************************/
44 /* Function prototypes for non-inline static functions. */
45
46 #define CTL_PROTO(n) \
47 static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
48 size_t *oldlenp, void *newp, size_t newlen);
49
50 #define INDEX_PROTO(n) \
51 static const ctl_named_node_t *n##_index(const size_t *mib, \
52 size_t miblen, size_t i);
53
54 static bool ctl_arena_init(ctl_arena_stats_t *astats);
55 static void ctl_arena_clear(ctl_arena_stats_t *astats);
56 static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
57 arena_t *arena);
58 static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
59 ctl_arena_stats_t *astats);
60 static void ctl_arena_refresh(arena_t *arena, unsigned i);
61 static bool ctl_grow(void);
62 static void ctl_refresh(void);
63 static bool ctl_init(void);
64 static int ctl_lookup(const char *name, ctl_node_t const **nodesp,
65 size_t *mibp, size_t *depthp);
66
67 CTL_PROTO(version)
68 CTL_PROTO(epoch)
69 CTL_PROTO(thread_tcache_enabled)
70 CTL_PROTO(thread_tcache_flush)
71 CTL_PROTO(thread_arena)
72 CTL_PROTO(thread_allocated)
73 CTL_PROTO(thread_allocatedp)
74 CTL_PROTO(thread_deallocated)
75 CTL_PROTO(thread_deallocatedp)
76 CTL_PROTO(config_debug)
77 CTL_PROTO(config_dss)
78 CTL_PROTO(config_fill)
79 CTL_PROTO(config_lazy_lock)
80 CTL_PROTO(config_mremap)
81 CTL_PROTO(config_munmap)
82 CTL_PROTO(config_prof)
83 CTL_PROTO(config_prof_libgcc)
84 CTL_PROTO(config_prof_libunwind)
85 CTL_PROTO(config_stats)
86 CTL_PROTO(config_tcache)
87 CTL_PROTO(config_tls)
88 CTL_PROTO(config_utrace)
89 CTL_PROTO(config_valgrind)
90 CTL_PROTO(config_xmalloc)
91 CTL_PROTO(opt_abort)
92 CTL_PROTO(opt_dss)
93 CTL_PROTO(opt_lg_chunk)
94 CTL_PROTO(opt_narenas)
95 CTL_PROTO(opt_lg_dirty_mult)
96 CTL_PROTO(opt_stats_print)
97 CTL_PROTO(opt_junk)
98 CTL_PROTO(opt_zero)
99 CTL_PROTO(opt_quarantine)
100 CTL_PROTO(opt_redzone)
101 CTL_PROTO(opt_utrace)
102 CTL_PROTO(opt_valgrind)
103 CTL_PROTO(opt_xmalloc)
104 CTL_PROTO(opt_tcache)
105 CTL_PROTO(opt_lg_tcache_max)
106 CTL_PROTO(opt_prof)
107 CTL_PROTO(opt_prof_prefix)
108 CTL_PROTO(opt_prof_active)
109 CTL_PROTO(opt_lg_prof_sample)
110 CTL_PROTO(opt_lg_prof_interval)
111 CTL_PROTO(opt_prof_gdump)
112 CTL_PROTO(opt_prof_final)
113 CTL_PROTO(opt_prof_leak)
114 CTL_PROTO(opt_prof_accum)
115 CTL_PROTO(arena_i_purge)
116 static void arena_purge(unsigned arena_ind);
117 CTL_PROTO(arena_i_dss)
118 INDEX_PROTO(arena_i)
119 CTL_PROTO(arenas_bin_i_size)
120 CTL_PROTO(arenas_bin_i_nregs)
121 CTL_PROTO(arenas_bin_i_run_size)
122 INDEX_PROTO(arenas_bin_i)
123 CTL_PROTO(arenas_lrun_i_size)
124 INDEX_PROTO(arenas_lrun_i)
125 CTL_PROTO(arenas_narenas)
126 CTL_PROTO(arenas_initialized)
127 CTL_PROTO(arenas_quantum)
128 CTL_PROTO(arenas_page)
129 CTL_PROTO(arenas_tcache_max)
130 CTL_PROTO(arenas_nbins)
131 CTL_PROTO(arenas_nhbins)
132 CTL_PROTO(arenas_nlruns)
133 CTL_PROTO(arenas_purge)
134 CTL_PROTO(arenas_extend)
135 CTL_PROTO(prof_active)
136 CTL_PROTO(prof_dump)
137 CTL_PROTO(prof_interval)
138 CTL_PROTO(stats_chunks_current)
139 CTL_PROTO(stats_chunks_total)
140 CTL_PROTO(stats_chunks_high)
141 CTL_PROTO(stats_huge_allocated)
142 CTL_PROTO(stats_huge_nmalloc)
143 CTL_PROTO(stats_huge_ndalloc)
144 CTL_PROTO(stats_arenas_i_small_allocated)
145 CTL_PROTO(stats_arenas_i_small_nmalloc)
146 CTL_PROTO(stats_arenas_i_small_ndalloc)
147 CTL_PROTO(stats_arenas_i_small_nrequests)
148 CTL_PROTO(stats_arenas_i_large_allocated)
149 CTL_PROTO(stats_arenas_i_large_nmalloc)
150 CTL_PROTO(stats_arenas_i_large_ndalloc)
151 CTL_PROTO(stats_arenas_i_large_nrequests)
152 CTL_PROTO(stats_arenas_i_bins_j_allocated)
153 CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
154 CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
155 CTL_PROTO(stats_arenas_i_bins_j_nrequests)
156 CTL_PROTO(stats_arenas_i_bins_j_nfills)
157 CTL_PROTO(stats_arenas_i_bins_j_nflushes)
158 CTL_PROTO(stats_arenas_i_bins_j_nruns)
159 CTL_PROTO(stats_arenas_i_bins_j_nreruns)
160 CTL_PROTO(stats_arenas_i_bins_j_curruns)
161 INDEX_PROTO(stats_arenas_i_bins_j)
162 CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
163 CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
164 CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
165 CTL_PROTO(stats_arenas_i_lruns_j_curruns)
166 INDEX_PROTO(stats_arenas_i_lruns_j)
167 CTL_PROTO(stats_arenas_i_nthreads)
168 CTL_PROTO(stats_arenas_i_dss)
169 CTL_PROTO(stats_arenas_i_pactive)
170 CTL_PROTO(stats_arenas_i_pdirty)
171 CTL_PROTO(stats_arenas_i_mapped)
172 CTL_PROTO(stats_arenas_i_npurge)
173 CTL_PROTO(stats_arenas_i_nmadvise)
174 CTL_PROTO(stats_arenas_i_purged)
175 INDEX_PROTO(stats_arenas_i)
176 CTL_PROTO(stats_cactive)
177 CTL_PROTO(stats_allocated)
178 CTL_PROTO(stats_active)
179 CTL_PROTO(stats_mapped)
180
181 /******************************************************************************/
182 /* mallctl tree. */
183
184 /* Maximum tree depth. */
185 #define CTL_MAX_DEPTH 6
186
187 #define NAME(n) {true}, n
188 #define CHILD(t, c) \
189 sizeof(c##_node) / sizeof(ctl_##t##_node_t), \
190 (ctl_node_t *)c##_node, \
191 NULL
192 #define CTL(c) 0, NULL, c##_ctl
193
194 /*
195 * Only handles internal indexed nodes, since there are currently no external
196 * ones.
197 */
198 #define INDEX(i) {false}, i##_index
199
200 static const ctl_named_node_t tcache_node[] = {
201 {NAME("enabled"), CTL(thread_tcache_enabled)},
202 {NAME("flush"), CTL(thread_tcache_flush)}
203 };
204
205 static const ctl_named_node_t thread_node[] = {
206 {NAME("arena"), CTL(thread_arena)},
207 {NAME("allocated"), CTL(thread_allocated)},
208 {NAME("allocatedp"), CTL(thread_allocatedp)},
209 {NAME("deallocated"), CTL(thread_deallocated)},
210 {NAME("deallocatedp"), CTL(thread_deallocatedp)},
211 {NAME("tcache"), CHILD(named, tcache)}
212 };
213
214 static const ctl_named_node_t config_node[] = {
215 {NAME("debug"), CTL(config_debug)},
216 {NAME("dss"), CTL(config_dss)},
217 {NAME("fill"), CTL(config_fill)},
218 {NAME("lazy_lock"), CTL(config_lazy_lock)},
219 {NAME("mremap"), CTL(config_mremap)},
220 {NAME("munmap"), CTL(config_munmap)},
221 {NAME("prof"), CTL(config_prof)},
222 {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
223 {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
224 {NAME("stats"), CTL(config_stats)},
225 {NAME("tcache"), CTL(config_tcache)},
226 {NAME("tls"), CTL(config_tls)},
227 {NAME("utrace"), CTL(config_utrace)},
228 {NAME("valgrind"), CTL(config_valgrind)},
229 {NAME("xmalloc"), CTL(config_xmalloc)}
230 };
231
232 static const ctl_named_node_t opt_node[] = {
233 {NAME("abort"), CTL(opt_abort)},
234 {NAME("dss"), CTL(opt_dss)},
235 {NAME("lg_chunk"), CTL(opt_lg_chunk)},
236 {NAME("narenas"), CTL(opt_narenas)},
237 {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
238 {NAME("stats_print"), CTL(opt_stats_print)},
239 {NAME("junk"), CTL(opt_junk)},
240 {NAME("zero"), CTL(opt_zero)},
241 {NAME("quarantine"), CTL(opt_quarantine)},
242 {NAME("redzone"), CTL(opt_redzone)},
243 {NAME("utrace"), CTL(opt_utrace)},
244 {NAME("valgrind"), CTL(opt_valgrind)},
245 {NAME("xmalloc"), CTL(opt_xmalloc)},
246 {NAME("tcache"), CTL(opt_tcache)},
247 {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
248 {NAME("prof"), CTL(opt_prof)},
249 {NAME("prof_prefix"), CTL(opt_prof_prefix)},
250 {NAME("prof_active"), CTL(opt_prof_active)},
251 {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
252 {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
253 {NAME("prof_gdump"), CTL(opt_prof_gdump)},
254 {NAME("prof_final"), CTL(opt_prof_final)},
255 {NAME("prof_leak"), CTL(opt_prof_leak)},
256 {NAME("prof_accum"), CTL(opt_prof_accum)}
257 };
258
259 static const ctl_named_node_t arena_i_node[] = {
260 {NAME("purge"), CTL(arena_i_purge)},
261 {NAME("dss"), CTL(arena_i_dss)}
262 };
263 static const ctl_named_node_t super_arena_i_node[] = {
264 {NAME(""), CHILD(named, arena_i)}
265 };
266
267 static const ctl_indexed_node_t arena_node[] = {
268 {INDEX(arena_i)}
269 };
270
271 static const ctl_named_node_t arenas_bin_i_node[] = {
272 {NAME("size"), CTL(arenas_bin_i_size)},
273 {NAME("nregs"), CTL(arenas_bin_i_nregs)},
274 {NAME("run_size"), CTL(arenas_bin_i_run_size)}
275 };
276 static const ctl_named_node_t super_arenas_bin_i_node[] = {
277 {NAME(""), CHILD(named, arenas_bin_i)}
278 };
279
280 static const ctl_indexed_node_t arenas_bin_node[] = {
281 {INDEX(arenas_bin_i)}
282 };
283
284 static const ctl_named_node_t arenas_lrun_i_node[] = {
285 {NAME("size"), CTL(arenas_lrun_i_size)}
286 };
287 static const ctl_named_node_t super_arenas_lrun_i_node[] = {
288 {NAME(""), CHILD(named, arenas_lrun_i)}
289 };
290
291 static const ctl_indexed_node_t arenas_lrun_node[] = {
292 {INDEX(arenas_lrun_i)}
293 };
294
295 static const ctl_named_node_t arenas_node[] = {
296 {NAME("narenas"), CTL(arenas_narenas)},
297 {NAME("initialized"), CTL(arenas_initialized)},
298 {NAME("quantum"), CTL(arenas_quantum)},
299 {NAME("page"), CTL(arenas_page)},
300 {NAME("tcache_max"), CTL(arenas_tcache_max)},
301 {NAME("nbins"), CTL(arenas_nbins)},
302 {NAME("nhbins"), CTL(arenas_nhbins)},
303 {NAME("bin"), CHILD(indexed, arenas_bin)},
304 {NAME("nlruns"), CTL(arenas_nlruns)},
305 {NAME("lrun"), CHILD(indexed, arenas_lrun)},
306 {NAME("purge"), CTL(arenas_purge)},
307 {NAME("extend"), CTL(arenas_extend)}
308 };
309
310 static const ctl_named_node_t prof_node[] = {
311 {NAME("active"), CTL(prof_active)},
312 {NAME("dump"), CTL(prof_dump)},
313 {NAME("interval"), CTL(prof_interval)}
314 };
315
316 static const ctl_named_node_t stats_chunks_node[] = {
317 {NAME("current"), CTL(stats_chunks_current)},
318 {NAME("total"), CTL(stats_chunks_total)},
319 {NAME("high"), CTL(stats_chunks_high)}
320 };
321
322 static const ctl_named_node_t stats_huge_node[] = {
323 {NAME("allocated"), CTL(stats_huge_allocated)},
324 {NAME("nmalloc"), CTL(stats_huge_nmalloc)},
325 {NAME("ndalloc"), CTL(stats_huge_ndalloc)}
326 };
327
328 static const ctl_named_node_t stats_arenas_i_small_node[] = {
329 {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
330 {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
331 {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
332 {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
333 };
334
335 static const ctl_named_node_t stats_arenas_i_large_node[] = {
336 {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
337 {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
338 {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
339 {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
340 };
341
342 static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
343 {NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)},
344 {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
345 {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
346 {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
347 {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
348 {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
349 {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
350 {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)},
351 {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)}
352 };
353 static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
354 {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
355 };
356
357 static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
358 {INDEX(stats_arenas_i_bins_j)}
359 };
360
361 static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
362 {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)},
363 {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)},
364 {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)},
365 {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)}
366 };
367 static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
368 {NAME(""), CHILD(named, stats_arenas_i_lruns_j)}
369 };
370
371 static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
372 {INDEX(stats_arenas_i_lruns_j)}
373 };
374
375 static const ctl_named_node_t stats_arenas_i_node[] = {
376 {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
377 {NAME("dss"), CTL(stats_arenas_i_dss)},
378 {NAME("pactive"), CTL(stats_arenas_i_pactive)},
379 {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
380 {NAME("mapped"), CTL(stats_arenas_i_mapped)},
381 {NAME("npurge"), CTL(stats_arenas_i_npurge)},
382 {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
383 {NAME("purged"), CTL(stats_arenas_i_purged)},
384 {NAME("small"), CHILD(named, stats_arenas_i_small)},
385 {NAME("large"), CHILD(named, stats_arenas_i_large)},
386 {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
387 {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}
388 };
389 static const ctl_named_node_t super_stats_arenas_i_node[] = {
390 {NAME(""), CHILD(named, stats_arenas_i)}
391 };
392
393 static const ctl_indexed_node_t stats_arenas_node[] = {
394 {INDEX(stats_arenas_i)}
395 };
396
397 static const ctl_named_node_t stats_node[] = {
398 {NAME("cactive"), CTL(stats_cactive)},
399 {NAME("allocated"), CTL(stats_allocated)},
400 {NAME("active"), CTL(stats_active)},
401 {NAME("mapped"), CTL(stats_mapped)},
402 {NAME("chunks"), CHILD(named, stats_chunks)},
403 {NAME("huge"), CHILD(named, stats_huge)},
404 {NAME("arenas"), CHILD(indexed, stats_arenas)}
405 };
406
407 static const ctl_named_node_t root_node[] = {
408 {NAME("version"), CTL(version)},
409 {NAME("epoch"), CTL(epoch)},
410 {NAME("thread"), CHILD(named, thread)},
411 {NAME("config"), CHILD(named, config)},
412 {NAME("opt"), CHILD(named, opt)},
413 {NAME("arena"), CHILD(indexed, arena)},
414 {NAME("arenas"), CHILD(named, arenas)},
415 {NAME("prof"), CHILD(named, prof)},
416 {NAME("stats"), CHILD(named, stats)}
417 };
418 static const ctl_named_node_t super_root_node[] = {
419 {NAME(""), CHILD(named, root)}
420 };
421
422 #undef NAME
423 #undef CHILD
424 #undef CTL
425 #undef INDEX
426
427 /******************************************************************************/
428
429 static bool
430 ctl_arena_init(ctl_arena_stats_t *astats)
431 {
432
433 if (astats->lstats == NULL) {
434 astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
435 sizeof(malloc_large_stats_t));
436 if (astats->lstats == NULL)
437 return (true);
438 }
439
440 return (false);
441 }
442
443 static void
444 ctl_arena_clear(ctl_arena_stats_t *astats)
445 {
446
447 astats->dss = dss_prec_names[dss_prec_limit];
448 astats->pactive = 0;
449 astats->pdirty = 0;
450 if (config_stats) {
451 memset(&astats->astats, 0, sizeof(arena_stats_t));
452 astats->allocated_small = 0;
453 astats->nmalloc_small = 0;
454 astats->ndalloc_small = 0;
455 astats->nrequests_small = 0;
456 memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
457 memset(astats->lstats, 0, nlclasses *
458 sizeof(malloc_large_stats_t));
459 }
460 }
461
462 static void
463 ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
464 {
465 unsigned i;
466
467 arena_stats_merge(arena, &cstats->dss, &cstats->pactive,
468 &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats);
469
470 for (i = 0; i < NBINS; i++) {
471 cstats->allocated_small += cstats->bstats[i].allocated;
472 cstats->nmalloc_small += cstats->bstats[i].nmalloc;
473 cstats->ndalloc_small += cstats->bstats[i].ndalloc;
474 cstats->nrequests_small += cstats->bstats[i].nrequests;
475 }
476 }
477
478 static void
479 ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
480 {
481 unsigned i;
482
483 sstats->pactive += astats->pactive;
484 sstats->pdirty += astats->pdirty;
485
486 sstats->astats.mapped += astats->astats.mapped;
487 sstats->astats.npurge += astats->astats.npurge;
488 sstats->astats.nmadvise += astats->astats.nmadvise;
489 sstats->astats.purged += astats->astats.purged;
490
491 sstats->allocated_small += astats->allocated_small;
492 sstats->nmalloc_small += astats->nmalloc_small;
493 sstats->ndalloc_small += astats->ndalloc_small;
494 sstats->nrequests_small += astats->nrequests_small;
495
496 sstats->astats.allocated_large += astats->astats.allocated_large;
497 sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
498 sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
499 sstats->astats.nrequests_large += astats->astats.nrequests_large;
500
501 for (i = 0; i < nlclasses; i++) {
502 sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
503 sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
504 sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
505 sstats->lstats[i].curruns += astats->lstats[i].curruns;
506 }
507
508 for (i = 0; i < NBINS; i++) {
509 sstats->bstats[i].allocated += astats->bstats[i].allocated;
510 sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
511 sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
512 sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
513 if (config_tcache) {
514 sstats->bstats[i].nfills += astats->bstats[i].nfills;
515 sstats->bstats[i].nflushes +=
516 astats->bstats[i].nflushes;
517 }
518 sstats->bstats[i].nruns += astats->bstats[i].nruns;
519 sstats->bstats[i].reruns += astats->bstats[i].reruns;
520 sstats->bstats[i].curruns += astats->bstats[i].curruns;
521 }
522 }
523
524 static void
525 ctl_arena_refresh(arena_t *arena, unsigned i)
526 {
527 ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
528 ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
529
530 ctl_arena_clear(astats);
531
532 sstats->nthreads += astats->nthreads;
533 if (config_stats) {
534 ctl_arena_stats_amerge(astats, arena);
535 /* Merge into sum stats as well. */
536 ctl_arena_stats_smerge(sstats, astats);
537 } else {
538 astats->pactive += arena->nactive;
539 astats->pdirty += arena->ndirty;
540 /* Merge into sum stats as well. */
541 sstats->pactive += arena->nactive;
542 sstats->pdirty += arena->ndirty;
543 }
544 }
545
546 static bool
547 ctl_grow(void)
548 {
549 size_t astats_size;
550 ctl_arena_stats_t *astats;
551 arena_t **tarenas;
552
553 /* Extend arena stats and arenas arrays. */
554 astats_size = (ctl_stats.narenas + 2) * sizeof(ctl_arena_stats_t);
555 if (ctl_stats.narenas == narenas_auto) {
556 /* ctl_stats.arenas and arenas came from base_alloc(). */
557 astats = (ctl_arena_stats_t *)imalloc(astats_size);
558 if (astats == NULL)
559 return (true);
560 memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
561 sizeof(ctl_arena_stats_t));
562
563 tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
564 sizeof(arena_t *));
565 if (tarenas == NULL) {
566 idalloc(astats);
567 return (true);
568 }
569 memcpy(tarenas, arenas, ctl_stats.narenas * sizeof(arena_t *));
570 } else {
571 astats = (ctl_arena_stats_t *)iralloc(ctl_stats.arenas,
572 astats_size, 0, 0, false, false);
573 if (astats == NULL)
574 return (true);
575
576 tarenas = (arena_t **)iralloc(arenas, (ctl_stats.narenas + 1) *
577 sizeof(arena_t *), 0, 0, false, false);
578 if (tarenas == NULL)
579 return (true);
580 }
581 /* Initialize the new astats and arenas elements. */
582 memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
583 if (ctl_arena_init(&astats[ctl_stats.narenas + 1]))
584 return (true);
585 tarenas[ctl_stats.narenas] = NULL;
586 /* Swap merged stats to their new location. */
587 {
588 ctl_arena_stats_t tstats;
589 memcpy(&tstats, &astats[ctl_stats.narenas],
590 sizeof(ctl_arena_stats_t));
591 memcpy(&astats[ctl_stats.narenas],
592 &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t));
593 memcpy(&astats[ctl_stats.narenas + 1], &tstats,
594 sizeof(ctl_arena_stats_t));
595 }
596 ctl_stats.arenas = astats;
597 ctl_stats.narenas++;
598 malloc_mutex_lock(&arenas_lock);
599 arenas = tarenas;
600 narenas_total++;
601 arenas_extend(narenas_total - 1);
602 malloc_mutex_unlock(&arenas_lock);
603
604 return (false);
605 }
606
607 static void
608 ctl_refresh(void)
609 {
610 unsigned i;
611 VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
612
613 if (config_stats) {
614 malloc_mutex_lock(&chunks_mtx);
615 ctl_stats.chunks.current = stats_chunks.curchunks;
616 ctl_stats.chunks.total = stats_chunks.nchunks;
617 ctl_stats.chunks.high = stats_chunks.highchunks;
618 malloc_mutex_unlock(&chunks_mtx);
619
620 malloc_mutex_lock(&huge_mtx);
621 ctl_stats.huge.allocated = huge_allocated;
622 ctl_stats.huge.nmalloc = huge_nmalloc;
623 ctl_stats.huge.ndalloc = huge_ndalloc;
624 malloc_mutex_unlock(&huge_mtx);
625 }
626
627 /*
628 * Clear sum stats, since they will be merged into by
629 * ctl_arena_refresh().
630 */
631 ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
632 ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
633
634 malloc_mutex_lock(&arenas_lock);
635 memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
636 for (i = 0; i < ctl_stats.narenas; i++) {
637 if (arenas[i] != NULL)
638 ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
639 else
640 ctl_stats.arenas[i].nthreads = 0;
641 }
642 malloc_mutex_unlock(&arenas_lock);
643 for (i = 0; i < ctl_stats.narenas; i++) {
644 bool initialized = (tarenas[i] != NULL);
645
646 ctl_stats.arenas[i].initialized = initialized;
647 if (initialized)
648 ctl_arena_refresh(tarenas[i], i);
649 }
650
651 if (config_stats) {
652 ctl_stats.allocated =
653 ctl_stats.arenas[ctl_stats.narenas].allocated_small
654 + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
655 + ctl_stats.huge.allocated;
656 ctl_stats.active =
657 (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE)
658 + ctl_stats.huge.allocated;
659 ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
660 }
661
662 ctl_epoch++;
663 }
664
665 static bool
666 ctl_init(void)
667 {
668 bool ret;
669
670 malloc_mutex_lock(&ctl_mtx);
671 if (ctl_initialized == false) {
672 /*
673 * Allocate space for one extra arena stats element, which
674 * contains summed stats across all arenas.
675 */
676 assert(narenas_auto == narenas_total_get());
677 ctl_stats.narenas = narenas_auto;
678 ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
679 (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
680 if (ctl_stats.arenas == NULL) {
681 ret = true;
682 goto label_return;
683 }
684 memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
685 sizeof(ctl_arena_stats_t));
686
687 /*
688 * Initialize all stats structures, regardless of whether they
689 * ever get used. Lazy initialization would allow errors to
690 * cause inconsistent state to be viewable by the application.
691 */
692 if (config_stats) {
693 unsigned i;
694 for (i = 0; i <= ctl_stats.narenas; i++) {
695 if (ctl_arena_init(&ctl_stats.arenas[i])) {
696 ret = true;
697 goto label_return;
698 }
699 }
700 }
701 ctl_stats.arenas[ctl_stats.narenas].initialized = true;
702
703 ctl_epoch = 0;
704 ctl_refresh();
705 ctl_initialized = true;
706 }
707
708 ret = false;
709 label_return:
710 malloc_mutex_unlock(&ctl_mtx);
711 return (ret);
712 }
713
714 static int
715 ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
716 size_t *depthp)
717 {
718 int ret;
719 const char *elm, *tdot, *dot;
720 size_t elen, i, j;
721 const ctl_named_node_t *node;
722
723 elm = name;
724 /* Equivalent to strchrnul(). */
725 dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
726 elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
727 if (elen == 0) {
728 ret = ENOENT;
729 goto label_return;
730 }
731 node = super_root_node;
732 for (i = 0; i < *depthp; i++) {
733 assert(node);
734 assert(node->nchildren > 0);
735 if (ctl_named_node(node->children) != NULL) {
736 const ctl_named_node_t *pnode = node;
737
738 /* Children are named. */
739 for (j = 0; j < node->nchildren; j++) {
740 const ctl_named_node_t *child =
741 ctl_named_children(node, j);
742 if (strlen(child->name) == elen &&
743 strncmp(elm, child->name, elen) == 0) {
744 node = child;
745 if (nodesp != NULL)
746 nodesp[i] =
747 (const ctl_node_t *)node;
748 mibp[i] = j;
749 break;
750 }
751 }
752 if (node == pnode) {
753 ret = ENOENT;
754 goto label_return;
755 }
756 } else {
757 uintmax_t index;
758 const ctl_indexed_node_t *inode;
759
760 /* Children are indexed. */
761 index = malloc_strtoumax(elm, NULL, 10);
762 if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
763 ret = ENOENT;
764 goto label_return;
765 }
766
767 inode = ctl_indexed_node(node->children);
768 node = inode->index(mibp, *depthp, (size_t)index);
769 if (node == NULL) {
770 ret = ENOENT;
771 goto label_return;
772 }
773
774 if (nodesp != NULL)
775 nodesp[i] = (const ctl_node_t *)node;
776 mibp[i] = (size_t)index;
777 }
778
779 if (node->ctl != NULL) {
780 /* Terminal node. */
781 if (*dot != '\0') {
782 /*
783 * The name contains more elements than are
784 * in this path through the tree.
785 */
786 ret = ENOENT;
787 goto label_return;
788 }
789 /* Complete lookup successful. */
790 *depthp = i + 1;
791 break;
792 }
793
794 /* Update elm. */
795 if (*dot == '\0') {
796 /* No more elements. */
797 ret = ENOENT;
798 goto label_return;
799 }
800 elm = &dot[1];
801 dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
802 strchr(elm, '\0');
803 elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
804 }
805
806 ret = 0;
807 label_return:
808 return (ret);
809 }
810
811 int
812 ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
813 size_t newlen)
814 {
815 int ret;
816 size_t depth;
817 ctl_node_t const *nodes[CTL_MAX_DEPTH];
818 size_t mib[CTL_MAX_DEPTH];
819 const ctl_named_node_t *node;
820
821 if (ctl_initialized == false && ctl_init()) {
822 ret = EAGAIN;
823 goto label_return;
824 }
825
826 depth = CTL_MAX_DEPTH;
827 ret = ctl_lookup(name, nodes, mib, &depth);
828 if (ret != 0)
829 goto label_return;
830
831 node = ctl_named_node(nodes[depth-1]);
832 if (node != NULL && node->ctl)
833 ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen);
834 else {
835 /* The name refers to a partial path through the ctl tree. */
836 ret = ENOENT;
837 }
838
839 label_return:
840 return(ret);
841 }
842
843 int
844 ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
845 {
846 int ret;
847
848 if (ctl_initialized == false && ctl_init()) {
849 ret = EAGAIN;
850 goto label_return;
851 }
852
853 ret = ctl_lookup(name, NULL, mibp, miblenp);
854 label_return:
855 return(ret);
856 }
857
858 int
859 ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
860 void *newp, size_t newlen)
861 {
862 int ret;
863 const ctl_named_node_t *node;
864 size_t i;
865
866 if (ctl_initialized == false && ctl_init()) {
867 ret = EAGAIN;
868 goto label_return;
869 }
870
871 /* Iterate down the tree. */
872 node = super_root_node;
873 for (i = 0; i < miblen; i++) {
874 assert(node);
875 assert(node->nchildren > 0);
876 if (ctl_named_node(node->children) != NULL) {
877 /* Children are named. */
878 if (node->nchildren <= mib[i]) {
879 ret = ENOENT;
880 goto label_return;
881 }
882 node = ctl_named_children(node, mib[i]);
883 } else {
884 const ctl_indexed_node_t *inode;
885
886 /* Indexed element. */
887 inode = ctl_indexed_node(node->children);
888 node = inode->index(mib, miblen, mib[i]);
889 if (node == NULL) {
890 ret = ENOENT;
891 goto label_return;
892 }
893 }
894 }
895
896 /* Call the ctl function. */
897 if (node && node->ctl)
898 ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
899 else {
900 /* Partial MIB. */
901 ret = ENOENT;
902 }
903
904 label_return:
905 return(ret);
906 }
907
908 bool
909 ctl_boot(void)
910 {
911
912 if (malloc_mutex_init(&ctl_mtx))
913 return (true);
914
915 ctl_initialized = false;
916
917 return (false);
918 }
919
920 void
921 ctl_prefork(void)
922 {
923
924 malloc_mutex_lock(&ctl_mtx);
925 }
926
927 void
928 ctl_postfork_parent(void)
929 {
930
931 malloc_mutex_postfork_parent(&ctl_mtx);
932 }
933
934 void
935 ctl_postfork_child(void)
936 {
937
938 malloc_mutex_postfork_child(&ctl_mtx);
939 }
940
941 /******************************************************************************/
942 /* *_ctl() functions. */
943
944 #define READONLY() do { \
945 if (newp != NULL || newlen != 0) { \
946 ret = EPERM; \
947 goto label_return; \
948 } \
949 } while (0)
950
951 #define WRITEONLY() do { \
952 if (oldp != NULL || oldlenp != NULL) { \
953 ret = EPERM; \
954 goto label_return; \
955 } \
956 } while (0)
957
958 #define READ(v, t) do { \
959 if (oldp != NULL && oldlenp != NULL) { \
960 if (*oldlenp != sizeof(t)) { \
961 size_t copylen = (sizeof(t) <= *oldlenp) \
962 ? sizeof(t) : *oldlenp; \
963 memcpy(oldp, (void *)&v, copylen); \
964 ret = EINVAL; \
965 goto label_return; \
966 } else \
967 *(t *)oldp = v; \
968 } \
969 } while (0)
970
971 #define WRITE(v, t) do { \
972 if (newp != NULL) { \
973 if (newlen != sizeof(t)) { \
974 ret = EINVAL; \
975 goto label_return; \
976 } \
977 v = *(t *)newp; \
978 } \
979 } while (0)
980
981 /*
982 * There's a lot of code duplication in the following macros due to limitations
983 * in how nested cpp macros are expanded.
984 */
985 #define CTL_RO_CLGEN(c, l, n, v, t) \
986 static int \
987 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
988 void *newp, size_t newlen) \
989 { \
990 int ret; \
991 t oldval; \
992 \
993 if ((c) == false) \
994 return (ENOENT); \
995 if (l) \
996 malloc_mutex_lock(&ctl_mtx); \
997 READONLY(); \
998 oldval = v; \
999 READ(oldval, t); \
1000 \
1001 ret = 0; \
1002 label_return: \
1003 if (l) \
1004 malloc_mutex_unlock(&ctl_mtx); \
1005 return (ret); \
1006 }
1007
1008 #define CTL_RO_CGEN(c, n, v, t) \
1009 static int \
1010 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
1011 void *newp, size_t newlen) \
1012 { \
1013 int ret; \
1014 t oldval; \
1015 \
1016 if ((c) == false) \
1017 return (ENOENT); \
1018 malloc_mutex_lock(&ctl_mtx); \
1019 READONLY(); \
1020 oldval = v; \
1021 READ(oldval, t); \
1022 \
1023 ret = 0; \
1024 label_return: \
1025 malloc_mutex_unlock(&ctl_mtx); \
1026 return (ret); \
1027 }
1028
1029 #define CTL_RO_GEN(n, v, t) \
1030 static int \
1031 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
1032 void *newp, size_t newlen) \
1033 { \
1034 int ret; \
1035 t oldval; \
1036 \
1037 malloc_mutex_lock(&ctl_mtx); \
1038 READONLY(); \
1039 oldval = v; \
1040 READ(oldval, t); \
1041 \
1042 ret = 0; \
1043 label_return: \
1044 malloc_mutex_unlock(&ctl_mtx); \
1045 return (ret); \
1046 }
1047
1048 /*
1049 * ctl_mtx is not acquired, under the assumption that no pertinent data will
1050 * mutate during the call.
1051 */
1052 #define CTL_RO_NL_CGEN(c, n, v, t) \
1053 static int \
1054 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
1055 void *newp, size_t newlen) \
1056 { \
1057 int ret; \
1058 t oldval; \
1059 \
1060 if ((c) == false) \
1061 return (ENOENT); \
1062 READONLY(); \
1063 oldval = v; \
1064 READ(oldval, t); \
1065 \
1066 ret = 0; \
1067 label_return: \
1068 return (ret); \
1069 }
1070
1071 #define CTL_RO_NL_GEN(n, v, t) \
1072 static int \
1073 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
1074 void *newp, size_t newlen) \
1075 { \
1076 int ret; \
1077 t oldval; \
1078 \
1079 READONLY(); \
1080 oldval = v; \
1081 READ(oldval, t); \
1082 \
1083 ret = 0; \
1084 label_return: \
1085 return (ret); \
1086 }
1087
1088 #define CTL_RO_BOOL_CONFIG_GEN(n) \
1089 static int \
1090 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
1091 void *newp, size_t newlen) \
1092 { \
1093 int ret; \
1094 bool oldval; \
1095 \
1096 READONLY(); \
1097 oldval = n; \
1098 READ(oldval, bool); \
1099 \
1100 ret = 0; \
1101 label_return: \
1102 return (ret); \
1103 }
1104
1105 CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
1106
1107 static int
1108 epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1109 void *newp, size_t newlen)
1110 {
1111 int ret;
1112 uint64_t newval;
1113
1114 malloc_mutex_lock(&ctl_mtx);
1115 WRITE(newval, uint64_t);
1116 if (newp != NULL)
1117 ctl_refresh();
1118 READ(ctl_epoch, uint64_t);
1119
1120 ret = 0;
1121 label_return:
1122 malloc_mutex_unlock(&ctl_mtx);
1123 return (ret);
1124 }
1125
1126 static int
1127 thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
1128 size_t *oldlenp, void *newp, size_t newlen)
1129 {
1130 int ret;
1131 bool oldval;
1132
1133 if (config_tcache == false)
1134 return (ENOENT);
1135
1136 oldval = tcache_enabled_get();
1137 if (newp != NULL) {
1138 if (newlen != sizeof(bool)) {
1139 ret = EINVAL;
1140 goto label_return;
1141 }
1142 tcache_enabled_set(*(bool *)newp);
1143 }
1144 READ(oldval, bool);
1145
1146 ret = 0;
1147 label_return:
1148 return (ret);
1149 }
1150
1151 static int
1152 thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
1153 size_t *oldlenp, void *newp, size_t newlen)
1154 {
1155 int ret;
1156
1157 if (config_tcache == false)
1158 return (ENOENT);
1159
1160 READONLY();
1161 WRITEONLY();
1162
1163 tcache_flush();
1164
1165 ret = 0;
1166 label_return:
1167 return (ret);
1168 }
1169
1170 static int
1171 thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1172 void *newp, size_t newlen)
1173 {
1174 int ret;
1175 unsigned newind, oldind;
1176
1177 malloc_mutex_lock(&ctl_mtx);
1178 newind = oldind = choose_arena(NULL)->ind;
1179 WRITE(newind, unsigned);
1180 READ(oldind, unsigned);
1181 if (newind != oldind) {
1182 arena_t *arena;
1183
1184 if (newind >= ctl_stats.narenas) {
1185 /* New arena index is out of range. */
1186 ret = EFAULT;
1187 goto label_return;
1188 }
1189
1190 /* Initialize arena if necessary. */
1191 malloc_mutex_lock(&arenas_lock);
1192 if ((arena = arenas[newind]) == NULL && (arena =
1193 arenas_extend(newind)) == NULL) {
1194 malloc_mutex_unlock(&arenas_lock);
1195 ret = EAGAIN;
1196 goto label_return;
1197 }
1198 assert(arena == arenas[newind]);
1199 arenas[oldind]->nthreads--;
1200 arenas[newind]->nthreads++;
1201 malloc_mutex_unlock(&arenas_lock);
1202
1203 /* Set new arena association. */
1204 if (config_tcache) {
1205 tcache_t *tcache;
1206 if ((uintptr_t)(tcache = *tcache_tsd_get()) >
1207 (uintptr_t)TCACHE_STATE_MAX) {
1208 tcache_arena_dissociate(tcache);
1209 tcache_arena_associate(tcache, arena);
1210 }
1211 }
1212 arenas_tsd_set(&arena);
1213 }
1214
1215 ret = 0;
1216 label_return:
1217 malloc_mutex_unlock(&ctl_mtx);
1218 return (ret);
1219 }
1220
1221 CTL_RO_NL_CGEN(config_stats, thread_allocated,
1222 thread_allocated_tsd_get()->allocated, uint64_t)
1223 CTL_RO_NL_CGEN(config_stats, thread_allocatedp,
1224 &thread_allocated_tsd_get()->allocated, uint64_t *)
1225 CTL_RO_NL_CGEN(config_stats, thread_deallocated,
1226 thread_allocated_tsd_get()->deallocated, uint64_t)
1227 CTL_RO_NL_CGEN(config_stats, thread_deallocatedp,
1228 &thread_allocated_tsd_get()->deallocated, uint64_t *)
1229
1230 /******************************************************************************/
1231
1232 CTL_RO_BOOL_CONFIG_GEN(config_debug)
1233 CTL_RO_BOOL_CONFIG_GEN(config_dss)
1234 CTL_RO_BOOL_CONFIG_GEN(config_fill)
1235 CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
1236 CTL_RO_BOOL_CONFIG_GEN(config_mremap)
1237 CTL_RO_BOOL_CONFIG_GEN(config_munmap)
1238 CTL_RO_BOOL_CONFIG_GEN(config_prof)
1239 CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
1240 CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
1241 CTL_RO_BOOL_CONFIG_GEN(config_stats)
1242 CTL_RO_BOOL_CONFIG_GEN(config_tcache)
1243 CTL_RO_BOOL_CONFIG_GEN(config_tls)
1244 CTL_RO_BOOL_CONFIG_GEN(config_utrace)
1245 CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
1246 CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
1247
1248 /******************************************************************************/
1249
1250 CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
1251 CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
1252 CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
1253 CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
1254 CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
1255 CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
1256 CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
1257 CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
1258 CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
1259 CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
1260 CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
1261 CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
1262 CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
1263 CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
1264 CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
1265 CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
1266 CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
1267 CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
1268 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
1269 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
1270 CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
1271 CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
1272 CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
1273 CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
1274
1275 /******************************************************************************/
1276
1277 /* ctl_mutex must be held during execution of this function. */
1278 static void
1279 arena_purge(unsigned arena_ind)
1280 {
1281 VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
1282
1283 malloc_mutex_lock(&arenas_lock);
1284 memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
1285 malloc_mutex_unlock(&arenas_lock);
1286
1287 if (arena_ind == ctl_stats.narenas) {
1288 unsigned i;
1289 for (i = 0; i < ctl_stats.narenas; i++) {
1290 if (tarenas[i] != NULL)
1291 arena_purge_all(tarenas[i]);
1292 }
1293 } else {
1294 assert(arena_ind < ctl_stats.narenas);
1295 if (tarenas[arena_ind] != NULL)
1296 arena_purge_all(tarenas[arena_ind]);
1297 }
1298 }
1299
1300 static int
1301 arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1302 void *newp, size_t newlen)
1303 {
1304 int ret;
1305
1306 READONLY();
1307 WRITEONLY();
1308 malloc_mutex_lock(&ctl_mtx);
1309 arena_purge(mib[1]);
1310 malloc_mutex_unlock(&ctl_mtx);
1311
1312 ret = 0;
1313 label_return:
1314 return (ret);
1315 }
1316
1317 static int
1318 arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1319 void *newp, size_t newlen)
1320 {
1321 int ret, i;
1322 bool match, err;
1323 const char *dss;
1324 unsigned arena_ind = mib[1];
1325 dss_prec_t dss_prec_old = dss_prec_limit;
1326 dss_prec_t dss_prec = dss_prec_limit;
1327
1328 malloc_mutex_lock(&ctl_mtx);
1329 WRITE(dss, const char *);
1330 match = false;
1331 for (i = 0; i < dss_prec_limit; i++) {
1332 if (strcmp(dss_prec_names[i], dss) == 0) {
1333 dss_prec = i;
1334 match = true;
1335 break;
1336 }
1337 }
1338 if (match == false) {
1339 ret = EINVAL;
1340 goto label_return;
1341 }
1342
1343 if (arena_ind < ctl_stats.narenas) {
1344 arena_t *arena = arenas[arena_ind];
1345 if (arena != NULL) {
1346 dss_prec_old = arena_dss_prec_get(arena);
1347 arena_dss_prec_set(arena, dss_prec);
1348 err = false;
1349 } else
1350 err = true;
1351 } else {
1352 dss_prec_old = chunk_dss_prec_get();
1353 err = chunk_dss_prec_set(dss_prec);
1354 }
1355 dss = dss_prec_names[dss_prec_old];
1356 READ(dss, const char *);
1357 if (err) {
1358 ret = EFAULT;
1359 goto label_return;
1360 }
1361
1362 ret = 0;
1363 label_return:
1364 malloc_mutex_unlock(&ctl_mtx);
1365 return (ret);
1366 }
1367
1368 static const ctl_named_node_t *
1369 arena_i_index(const size_t *mib, size_t miblen, size_t i)
1370 {
1371 const ctl_named_node_t * ret;
1372
1373 malloc_mutex_lock(&ctl_mtx);
1374 if (i > ctl_stats.narenas) {
1375 ret = NULL;
1376 goto label_return;
1377 }
1378
1379 ret = super_arena_i_node;
1380 label_return:
1381 malloc_mutex_unlock(&ctl_mtx);
1382 return (ret);
1383 }
1384
1385
1386 /******************************************************************************/
1387
1388 CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
1389 CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
1390 CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
1391 static const ctl_named_node_t *
1392 arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
1393 {
1394
1395 if (i > NBINS)
1396 return (NULL);
1397 return (super_arenas_bin_i_node);
1398 }
1399
1400 CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
1401 static const ctl_named_node_t *
1402 arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
1403 {
1404
1405 if (i > nlclasses)
1406 return (NULL);
1407 return (super_arenas_lrun_i_node);
1408 }
1409
1410 static int
1411 arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
1412 size_t *oldlenp, void *newp, size_t newlen)
1413 {
1414 int ret;
1415 unsigned narenas;
1416
1417 malloc_mutex_lock(&ctl_mtx);
1418 READONLY();
1419 if (*oldlenp != sizeof(unsigned)) {
1420 ret = EINVAL;
1421 goto label_return;
1422 }
1423 narenas = ctl_stats.narenas;
1424 READ(narenas, unsigned);
1425
1426 ret = 0;
1427 label_return:
1428 malloc_mutex_unlock(&ctl_mtx);
1429 return (ret);
1430 }
1431
1432 static int
1433 arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
1434 size_t *oldlenp, void *newp, size_t newlen)
1435 {
1436 int ret;
1437 unsigned nread, i;
1438
1439 malloc_mutex_lock(&ctl_mtx);
1440 READONLY();
1441 if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
1442 ret = EINVAL;
1443 nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
1444 ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas;
1445 } else {
1446 ret = 0;
1447 nread = ctl_stats.narenas;
1448 }
1449
1450 for (i = 0; i < nread; i++)
1451 ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
1452
1453 label_return:
1454 malloc_mutex_unlock(&ctl_mtx);
1455 return (ret);
1456 }
1457
1458 CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
1459 CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
1460 CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
1461 CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
1462 CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
1463 CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
1464
1465 static int
1466 arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1467 void *newp, size_t newlen)
1468 {
1469 int ret;
1470 unsigned arena_ind;
1471
1472 malloc_mutex_lock(&ctl_mtx);
1473 WRITEONLY();
1474 arena_ind = UINT_MAX;
1475 WRITE(arena_ind, unsigned);
1476 if (newp != NULL && arena_ind >= ctl_stats.narenas)
1477 ret = EFAULT;
1478 else {
1479 if (arena_ind == UINT_MAX)
1480 arena_ind = ctl_stats.narenas;
1481 arena_purge(arena_ind);
1482 ret = 0;
1483 }
1484
1485 label_return:
1486 malloc_mutex_unlock(&ctl_mtx);
1487 return (ret);
1488 }
1489
1490 static int
1491 arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1492 void *newp, size_t newlen)
1493 {
1494 int ret;
1495
1496 malloc_mutex_lock(&ctl_mtx);
1497 READONLY();
1498 if (ctl_grow()) {
1499 ret = EAGAIN;
1500 goto label_return;
1501 }
1502 READ(ctl_stats.narenas - 1, unsigned);
1503
1504 ret = 0;
1505 label_return:
1506 malloc_mutex_unlock(&ctl_mtx);
1507 return (ret);
1508 }
1509
1510 /******************************************************************************/
1511
1512 static int
1513 prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1514 void *newp, size_t newlen)
1515 {
1516 int ret;
1517 bool oldval;
1518
1519 if (config_prof == false)
1520 return (ENOENT);
1521
1522 malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
1523 oldval = opt_prof_active;
1524 if (newp != NULL) {
1525 /*
1526 * The memory barriers will tend to make opt_prof_active
1527 * propagate faster on systems with weak memory ordering.
1528 */
1529 mb_write();
1530 WRITE(opt_prof_active, bool);
1531 mb_write();
1532 }
1533 READ(oldval, bool);
1534
1535 ret = 0;
1536 label_return:
1537 malloc_mutex_unlock(&ctl_mtx);
1538 return (ret);
1539 }
1540
1541 static int
1542 prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1543 void *newp, size_t newlen)
1544 {
1545 int ret;
1546 const char *filename = NULL;
1547
1548 if (config_prof == false)
1549 return (ENOENT);
1550
1551 WRITEONLY();
1552 WRITE(filename, const char *);
1553
1554 if (prof_mdump(filename)) {
1555 ret = EFAULT;
1556 goto label_return;
1557 }
1558
1559 ret = 0;
1560 label_return:
1561 return (ret);
1562 }
1563
1564 CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
1565
1566 /******************************************************************************/
1567
1568 CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
1569 size_t)
1570 CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
1571 CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
1572 CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
1573 CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
1574 CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
1575 CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
1576 ctl_stats.arenas[mib[2]].allocated_small, size_t)
1577 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
1578 ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t)
1579 CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
1580 ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
1581 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
1582 ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
1583 CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
1584 ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
1585 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
1586 ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
1587 CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
1588 ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
1589 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
1590 ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
1591
1592 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
1593 ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
1594 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
1595 ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
1596 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
1597 ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
1598 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
1599 ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
1600 CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
1601 ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
1602 CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
1603 ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
1604 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
1605 ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
1606 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
1607 ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
1608 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
1609 ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
1610
1611 static const ctl_named_node_t *
1612 stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
1613 {
1614
1615 if (j > NBINS)
1616 return (NULL);
1617 return (super_stats_arenas_i_bins_j_node);
1618 }
1619
1620 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
1621 ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
1622 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
1623 ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
1624 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
1625 ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
1626 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
1627 ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
1628
1629 static const ctl_named_node_t *
1630 stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
1631 {
1632
1633 if (j > nlclasses)
1634 return (NULL);
1635 return (super_stats_arenas_i_lruns_j_node);
1636 }
1637
1638 CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
1639 CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
1640 CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
1641 CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
1642 CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
1643 ctl_stats.arenas[mib[2]].astats.mapped, size_t)
1644 CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
1645 ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
1646 CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
1647 ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
1648 CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
1649 ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
1650
1651 static const ctl_named_node_t *
1652 stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
1653 {
1654 const ctl_named_node_t * ret;
1655
1656 malloc_mutex_lock(&ctl_mtx);
1657 if (i > ctl_stats.narenas || ctl_stats.arenas[i].initialized == false) {
1658 ret = NULL;
1659 goto label_return;
1660 }
1661
1662 ret = super_stats_arenas_i_node;
1663 label_return:
1664 malloc_mutex_unlock(&ctl_mtx);
1665 return (ret);
1666 }
1667
1668 CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
1669 CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
1670 CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
1671 CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)