]>
Commit | Line | Data |
---|---|---|
a78e148b | 1 | #define JEMALLOC_CTL_C_ |
2 | #include "jemalloc/internal/jemalloc_internal.h" | |
3 | ||
4 | /******************************************************************************/ | |
5 | /* Data. */ | |
6 | ||
7 | /* | |
8 | * ctl_mtx protects the following: | |
9 | * - ctl_stats.* | |
10 | * - opt_prof_active | |
11 | * - swap_enabled | |
12 | * - swap_prezeroed | |
13 | */ | |
14 | static malloc_mutex_t ctl_mtx; | |
15 | static bool ctl_initialized; | |
16 | static uint64_t ctl_epoch; | |
17 | static ctl_stats_t ctl_stats; | |
18 | ||
19 | /******************************************************************************/ | |
20 | /* Function prototypes for non-inline static functions. */ | |
21 | ||
22 | #define CTL_PROTO(n) \ | |
23 | static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \ | |
24 | size_t *oldlenp, void *newp, size_t newlen); | |
25 | ||
26 | #define INDEX_PROTO(n) \ | |
27 | const ctl_node_t *n##_index(const size_t *mib, size_t miblen, \ | |
28 | size_t i); | |
29 | ||
30 | #ifdef JEMALLOC_STATS | |
31 | static bool ctl_arena_init(ctl_arena_stats_t *astats); | |
32 | #endif | |
33 | static void ctl_arena_clear(ctl_arena_stats_t *astats); | |
34 | #ifdef JEMALLOC_STATS | |
35 | static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, | |
36 | arena_t *arena); | |
37 | static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, | |
38 | ctl_arena_stats_t *astats); | |
39 | #endif | |
40 | static void ctl_arena_refresh(arena_t *arena, unsigned i); | |
41 | static void ctl_refresh(void); | |
42 | static bool ctl_init(void); | |
43 | static int ctl_lookup(const char *name, ctl_node_t const **nodesp, | |
44 | size_t *mibp, size_t *depthp); | |
45 | ||
46 | CTL_PROTO(version) | |
47 | CTL_PROTO(epoch) | |
48 | #ifdef JEMALLOC_TCACHE | |
49 | CTL_PROTO(tcache_flush) | |
50 | #endif | |
51 | CTL_PROTO(thread_arena) | |
52 | #ifdef JEMALLOC_STATS | |
53 | CTL_PROTO(thread_allocated) | |
54 | CTL_PROTO(thread_allocatedp) | |
55 | CTL_PROTO(thread_deallocated) | |
56 | CTL_PROTO(thread_deallocatedp) | |
57 | #endif | |
58 | CTL_PROTO(config_debug) | |
59 | CTL_PROTO(config_dss) | |
60 | CTL_PROTO(config_dynamic_page_shift) | |
61 | CTL_PROTO(config_fill) | |
62 | CTL_PROTO(config_lazy_lock) | |
63 | CTL_PROTO(config_prof) | |
64 | CTL_PROTO(config_prof_libgcc) | |
65 | CTL_PROTO(config_prof_libunwind) | |
66 | CTL_PROTO(config_stats) | |
67 | CTL_PROTO(config_swap) | |
68 | CTL_PROTO(config_sysv) | |
69 | CTL_PROTO(config_tcache) | |
70 | CTL_PROTO(config_tiny) | |
71 | CTL_PROTO(config_tls) | |
72 | CTL_PROTO(config_xmalloc) | |
73 | CTL_PROTO(opt_abort) | |
74 | CTL_PROTO(opt_lg_qspace_max) | |
75 | CTL_PROTO(opt_lg_cspace_max) | |
76 | CTL_PROTO(opt_lg_chunk) | |
77 | CTL_PROTO(opt_narenas) | |
78 | CTL_PROTO(opt_lg_dirty_mult) | |
79 | CTL_PROTO(opt_stats_print) | |
80 | #ifdef JEMALLOC_FILL | |
81 | CTL_PROTO(opt_junk) | |
82 | CTL_PROTO(opt_zero) | |
83 | #endif | |
84 | #ifdef JEMALLOC_SYSV | |
85 | CTL_PROTO(opt_sysv) | |
86 | #endif | |
87 | #ifdef JEMALLOC_XMALLOC | |
88 | CTL_PROTO(opt_xmalloc) | |
89 | #endif | |
90 | #ifdef JEMALLOC_TCACHE | |
91 | CTL_PROTO(opt_tcache) | |
92 | CTL_PROTO(opt_lg_tcache_gc_sweep) | |
93 | #endif | |
94 | #ifdef JEMALLOC_PROF | |
95 | CTL_PROTO(opt_prof) | |
96 | CTL_PROTO(opt_prof_prefix) | |
97 | CTL_PROTO(opt_prof_active) | |
98 | CTL_PROTO(opt_lg_prof_bt_max) | |
99 | CTL_PROTO(opt_lg_prof_sample) | |
100 | CTL_PROTO(opt_lg_prof_interval) | |
101 | CTL_PROTO(opt_prof_gdump) | |
102 | CTL_PROTO(opt_prof_leak) | |
103 | CTL_PROTO(opt_prof_accum) | |
104 | CTL_PROTO(opt_lg_prof_tcmax) | |
105 | #endif | |
106 | #ifdef JEMALLOC_SWAP | |
107 | CTL_PROTO(opt_overcommit) | |
108 | #endif | |
109 | CTL_PROTO(arenas_bin_i_size) | |
110 | CTL_PROTO(arenas_bin_i_nregs) | |
111 | CTL_PROTO(arenas_bin_i_run_size) | |
112 | INDEX_PROTO(arenas_bin_i) | |
113 | CTL_PROTO(arenas_lrun_i_size) | |
114 | INDEX_PROTO(arenas_lrun_i) | |
115 | CTL_PROTO(arenas_narenas) | |
116 | CTL_PROTO(arenas_initialized) | |
117 | CTL_PROTO(arenas_quantum) | |
118 | CTL_PROTO(arenas_cacheline) | |
119 | CTL_PROTO(arenas_subpage) | |
120 | CTL_PROTO(arenas_pagesize) | |
121 | CTL_PROTO(arenas_chunksize) | |
122 | #ifdef JEMALLOC_TINY | |
123 | CTL_PROTO(arenas_tspace_min) | |
124 | CTL_PROTO(arenas_tspace_max) | |
125 | #endif | |
126 | CTL_PROTO(arenas_qspace_min) | |
127 | CTL_PROTO(arenas_qspace_max) | |
128 | CTL_PROTO(arenas_cspace_min) | |
129 | CTL_PROTO(arenas_cspace_max) | |
130 | CTL_PROTO(arenas_sspace_min) | |
131 | CTL_PROTO(arenas_sspace_max) | |
132 | #ifdef JEMALLOC_TCACHE | |
133 | CTL_PROTO(arenas_tcache_max) | |
134 | #endif | |
135 | CTL_PROTO(arenas_ntbins) | |
136 | CTL_PROTO(arenas_nqbins) | |
137 | CTL_PROTO(arenas_ncbins) | |
138 | CTL_PROTO(arenas_nsbins) | |
139 | CTL_PROTO(arenas_nbins) | |
140 | #ifdef JEMALLOC_TCACHE | |
141 | CTL_PROTO(arenas_nhbins) | |
142 | #endif | |
143 | CTL_PROTO(arenas_nlruns) | |
144 | CTL_PROTO(arenas_purge) | |
145 | #ifdef JEMALLOC_PROF | |
146 | CTL_PROTO(prof_active) | |
147 | CTL_PROTO(prof_dump) | |
148 | CTL_PROTO(prof_interval) | |
149 | #endif | |
150 | #ifdef JEMALLOC_STATS | |
151 | CTL_PROTO(stats_chunks_current) | |
152 | CTL_PROTO(stats_chunks_total) | |
153 | CTL_PROTO(stats_chunks_high) | |
154 | CTL_PROTO(stats_huge_allocated) | |
155 | CTL_PROTO(stats_huge_nmalloc) | |
156 | CTL_PROTO(stats_huge_ndalloc) | |
157 | CTL_PROTO(stats_arenas_i_small_allocated) | |
158 | CTL_PROTO(stats_arenas_i_small_nmalloc) | |
159 | CTL_PROTO(stats_arenas_i_small_ndalloc) | |
160 | CTL_PROTO(stats_arenas_i_small_nrequests) | |
161 | CTL_PROTO(stats_arenas_i_large_allocated) | |
162 | CTL_PROTO(stats_arenas_i_large_nmalloc) | |
163 | CTL_PROTO(stats_arenas_i_large_ndalloc) | |
164 | CTL_PROTO(stats_arenas_i_large_nrequests) | |
165 | CTL_PROTO(stats_arenas_i_bins_j_allocated) | |
166 | CTL_PROTO(stats_arenas_i_bins_j_nmalloc) | |
167 | CTL_PROTO(stats_arenas_i_bins_j_ndalloc) | |
168 | CTL_PROTO(stats_arenas_i_bins_j_nrequests) | |
169 | #ifdef JEMALLOC_TCACHE | |
170 | CTL_PROTO(stats_arenas_i_bins_j_nfills) | |
171 | CTL_PROTO(stats_arenas_i_bins_j_nflushes) | |
172 | #endif | |
173 | CTL_PROTO(stats_arenas_i_bins_j_nruns) | |
174 | CTL_PROTO(stats_arenas_i_bins_j_nreruns) | |
175 | CTL_PROTO(stats_arenas_i_bins_j_highruns) | |
176 | CTL_PROTO(stats_arenas_i_bins_j_curruns) | |
177 | INDEX_PROTO(stats_arenas_i_bins_j) | |
178 | CTL_PROTO(stats_arenas_i_lruns_j_nmalloc) | |
179 | CTL_PROTO(stats_arenas_i_lruns_j_ndalloc) | |
180 | CTL_PROTO(stats_arenas_i_lruns_j_nrequests) | |
181 | CTL_PROTO(stats_arenas_i_lruns_j_highruns) | |
182 | CTL_PROTO(stats_arenas_i_lruns_j_curruns) | |
183 | INDEX_PROTO(stats_arenas_i_lruns_j) | |
184 | #endif | |
185 | CTL_PROTO(stats_arenas_i_nthreads) | |
186 | CTL_PROTO(stats_arenas_i_pactive) | |
187 | CTL_PROTO(stats_arenas_i_pdirty) | |
188 | #ifdef JEMALLOC_STATS | |
189 | CTL_PROTO(stats_arenas_i_mapped) | |
190 | CTL_PROTO(stats_arenas_i_npurge) | |
191 | CTL_PROTO(stats_arenas_i_nmadvise) | |
192 | CTL_PROTO(stats_arenas_i_purged) | |
193 | #endif | |
194 | INDEX_PROTO(stats_arenas_i) | |
195 | #ifdef JEMALLOC_STATS | |
196 | CTL_PROTO(stats_cactive) | |
197 | CTL_PROTO(stats_allocated) | |
198 | CTL_PROTO(stats_active) | |
199 | CTL_PROTO(stats_mapped) | |
200 | #endif | |
201 | #ifdef JEMALLOC_SWAP | |
202 | # ifdef JEMALLOC_STATS | |
203 | CTL_PROTO(swap_avail) | |
204 | # endif | |
205 | CTL_PROTO(swap_prezeroed) | |
206 | CTL_PROTO(swap_nfds) | |
207 | CTL_PROTO(swap_fds) | |
208 | #endif | |
209 | ||
210 | /******************************************************************************/ | |
211 | /* mallctl tree. */ | |
212 | ||
213 | /* Maximum tree depth. */ | |
214 | #define CTL_MAX_DEPTH 6 | |
215 | ||
216 | #define NAME(n) true, {.named = {n | |
217 | #define CHILD(c) sizeof(c##_node) / sizeof(ctl_node_t), c##_node}}, NULL | |
218 | #define CTL(c) 0, NULL}}, c##_ctl | |
219 | ||
220 | /* | |
221 | * Only handles internal indexed nodes, since there are currently no external | |
222 | * ones. | |
223 | */ | |
224 | #define INDEX(i) false, {.indexed = {i##_index}}, NULL | |
225 | ||
226 | #ifdef JEMALLOC_TCACHE | |
227 | static const ctl_node_t tcache_node[] = { | |
228 | {NAME("flush"), CTL(tcache_flush)} | |
229 | }; | |
230 | #endif | |
231 | ||
232 | static const ctl_node_t thread_node[] = { | |
233 | {NAME("arena"), CTL(thread_arena)} | |
234 | #ifdef JEMALLOC_STATS | |
235 | , | |
236 | {NAME("allocated"), CTL(thread_allocated)}, | |
237 | {NAME("allocatedp"), CTL(thread_allocatedp)}, | |
238 | {NAME("deallocated"), CTL(thread_deallocated)}, | |
239 | {NAME("deallocatedp"), CTL(thread_deallocatedp)} | |
240 | #endif | |
241 | }; | |
242 | ||
243 | static const ctl_node_t config_node[] = { | |
244 | {NAME("debug"), CTL(config_debug)}, | |
245 | {NAME("dss"), CTL(config_dss)}, | |
246 | {NAME("dynamic_page_shift"), CTL(config_dynamic_page_shift)}, | |
247 | {NAME("fill"), CTL(config_fill)}, | |
248 | {NAME("lazy_lock"), CTL(config_lazy_lock)}, | |
249 | {NAME("prof"), CTL(config_prof)}, | |
250 | {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, | |
251 | {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, | |
252 | {NAME("stats"), CTL(config_stats)}, | |
253 | {NAME("swap"), CTL(config_swap)}, | |
254 | {NAME("sysv"), CTL(config_sysv)}, | |
255 | {NAME("tcache"), CTL(config_tcache)}, | |
256 | {NAME("tiny"), CTL(config_tiny)}, | |
257 | {NAME("tls"), CTL(config_tls)}, | |
258 | {NAME("xmalloc"), CTL(config_xmalloc)} | |
259 | }; | |
260 | ||
261 | static const ctl_node_t opt_node[] = { | |
262 | {NAME("abort"), CTL(opt_abort)}, | |
263 | {NAME("lg_qspace_max"), CTL(opt_lg_qspace_max)}, | |
264 | {NAME("lg_cspace_max"), CTL(opt_lg_cspace_max)}, | |
265 | {NAME("lg_chunk"), CTL(opt_lg_chunk)}, | |
266 | {NAME("narenas"), CTL(opt_narenas)}, | |
267 | {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, | |
268 | {NAME("stats_print"), CTL(opt_stats_print)} | |
269 | #ifdef JEMALLOC_FILL | |
270 | , | |
271 | {NAME("junk"), CTL(opt_junk)}, | |
272 | {NAME("zero"), CTL(opt_zero)} | |
273 | #endif | |
274 | #ifdef JEMALLOC_SYSV | |
275 | , | |
276 | {NAME("sysv"), CTL(opt_sysv)} | |
277 | #endif | |
278 | #ifdef JEMALLOC_XMALLOC | |
279 | , | |
280 | {NAME("xmalloc"), CTL(opt_xmalloc)} | |
281 | #endif | |
282 | #ifdef JEMALLOC_TCACHE | |
283 | , | |
284 | {NAME("tcache"), CTL(opt_tcache)}, | |
285 | {NAME("lg_tcache_gc_sweep"), CTL(opt_lg_tcache_gc_sweep)} | |
286 | #endif | |
287 | #ifdef JEMALLOC_PROF | |
288 | , | |
289 | {NAME("prof"), CTL(opt_prof)}, | |
290 | {NAME("prof_prefix"), CTL(opt_prof_prefix)}, | |
291 | {NAME("prof_active"), CTL(opt_prof_active)}, | |
292 | {NAME("lg_prof_bt_max"), CTL(opt_lg_prof_bt_max)}, | |
293 | {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, | |
294 | {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, | |
295 | {NAME("prof_gdump"), CTL(opt_prof_gdump)}, | |
296 | {NAME("prof_leak"), CTL(opt_prof_leak)}, | |
297 | {NAME("prof_accum"), CTL(opt_prof_accum)}, | |
298 | {NAME("lg_prof_tcmax"), CTL(opt_lg_prof_tcmax)} | |
299 | #endif | |
300 | #ifdef JEMALLOC_SWAP | |
301 | , | |
302 | {NAME("overcommit"), CTL(opt_overcommit)} | |
303 | #endif | |
304 | }; | |
305 | ||
306 | static const ctl_node_t arenas_bin_i_node[] = { | |
307 | {NAME("size"), CTL(arenas_bin_i_size)}, | |
308 | {NAME("nregs"), CTL(arenas_bin_i_nregs)}, | |
309 | {NAME("run_size"), CTL(arenas_bin_i_run_size)} | |
310 | }; | |
311 | static const ctl_node_t super_arenas_bin_i_node[] = { | |
312 | {NAME(""), CHILD(arenas_bin_i)} | |
313 | }; | |
314 | ||
315 | static const ctl_node_t arenas_bin_node[] = { | |
316 | {INDEX(arenas_bin_i)} | |
317 | }; | |
318 | ||
319 | static const ctl_node_t arenas_lrun_i_node[] = { | |
320 | {NAME("size"), CTL(arenas_lrun_i_size)} | |
321 | }; | |
322 | static const ctl_node_t super_arenas_lrun_i_node[] = { | |
323 | {NAME(""), CHILD(arenas_lrun_i)} | |
324 | }; | |
325 | ||
326 | static const ctl_node_t arenas_lrun_node[] = { | |
327 | {INDEX(arenas_lrun_i)} | |
328 | }; | |
329 | ||
330 | static const ctl_node_t arenas_node[] = { | |
331 | {NAME("narenas"), CTL(arenas_narenas)}, | |
332 | {NAME("initialized"), CTL(arenas_initialized)}, | |
333 | {NAME("quantum"), CTL(arenas_quantum)}, | |
334 | {NAME("cacheline"), CTL(arenas_cacheline)}, | |
335 | {NAME("subpage"), CTL(arenas_subpage)}, | |
336 | {NAME("pagesize"), CTL(arenas_pagesize)}, | |
337 | {NAME("chunksize"), CTL(arenas_chunksize)}, | |
338 | #ifdef JEMALLOC_TINY | |
339 | {NAME("tspace_min"), CTL(arenas_tspace_min)}, | |
340 | {NAME("tspace_max"), CTL(arenas_tspace_max)}, | |
341 | #endif | |
342 | {NAME("qspace_min"), CTL(arenas_qspace_min)}, | |
343 | {NAME("qspace_max"), CTL(arenas_qspace_max)}, | |
344 | {NAME("cspace_min"), CTL(arenas_cspace_min)}, | |
345 | {NAME("cspace_max"), CTL(arenas_cspace_max)}, | |
346 | {NAME("sspace_min"), CTL(arenas_sspace_min)}, | |
347 | {NAME("sspace_max"), CTL(arenas_sspace_max)}, | |
348 | #ifdef JEMALLOC_TCACHE | |
349 | {NAME("tcache_max"), CTL(arenas_tcache_max)}, | |
350 | #endif | |
351 | {NAME("ntbins"), CTL(arenas_ntbins)}, | |
352 | {NAME("nqbins"), CTL(arenas_nqbins)}, | |
353 | {NAME("ncbins"), CTL(arenas_ncbins)}, | |
354 | {NAME("nsbins"), CTL(arenas_nsbins)}, | |
355 | {NAME("nbins"), CTL(arenas_nbins)}, | |
356 | #ifdef JEMALLOC_TCACHE | |
357 | {NAME("nhbins"), CTL(arenas_nhbins)}, | |
358 | #endif | |
359 | {NAME("bin"), CHILD(arenas_bin)}, | |
360 | {NAME("nlruns"), CTL(arenas_nlruns)}, | |
361 | {NAME("lrun"), CHILD(arenas_lrun)}, | |
362 | {NAME("purge"), CTL(arenas_purge)} | |
363 | }; | |
364 | ||
365 | #ifdef JEMALLOC_PROF | |
366 | static const ctl_node_t prof_node[] = { | |
367 | {NAME("active"), CTL(prof_active)}, | |
368 | {NAME("dump"), CTL(prof_dump)}, | |
369 | {NAME("interval"), CTL(prof_interval)} | |
370 | }; | |
371 | #endif | |
372 | ||
373 | #ifdef JEMALLOC_STATS | |
374 | static const ctl_node_t stats_chunks_node[] = { | |
375 | {NAME("current"), CTL(stats_chunks_current)}, | |
376 | {NAME("total"), CTL(stats_chunks_total)}, | |
377 | {NAME("high"), CTL(stats_chunks_high)} | |
378 | }; | |
379 | ||
380 | static const ctl_node_t stats_huge_node[] = { | |
381 | {NAME("allocated"), CTL(stats_huge_allocated)}, | |
382 | {NAME("nmalloc"), CTL(stats_huge_nmalloc)}, | |
383 | {NAME("ndalloc"), CTL(stats_huge_ndalloc)} | |
384 | }; | |
385 | ||
386 | static const ctl_node_t stats_arenas_i_small_node[] = { | |
387 | {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, | |
388 | {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, | |
389 | {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, | |
390 | {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} | |
391 | }; | |
392 | ||
393 | static const ctl_node_t stats_arenas_i_large_node[] = { | |
394 | {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, | |
395 | {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, | |
396 | {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, | |
397 | {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} | |
398 | }; | |
399 | ||
400 | static const ctl_node_t stats_arenas_i_bins_j_node[] = { | |
401 | {NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)}, | |
402 | {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, | |
403 | {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, | |
404 | {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, | |
405 | #ifdef JEMALLOC_TCACHE | |
406 | {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, | |
407 | {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, | |
408 | #endif | |
409 | {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)}, | |
410 | {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)}, | |
411 | {NAME("highruns"), CTL(stats_arenas_i_bins_j_highruns)}, | |
412 | {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)} | |
413 | }; | |
414 | static const ctl_node_t super_stats_arenas_i_bins_j_node[] = { | |
415 | {NAME(""), CHILD(stats_arenas_i_bins_j)} | |
416 | }; | |
417 | ||
418 | static const ctl_node_t stats_arenas_i_bins_node[] = { | |
419 | {INDEX(stats_arenas_i_bins_j)} | |
420 | }; | |
421 | ||
422 | static const ctl_node_t stats_arenas_i_lruns_j_node[] = { | |
423 | {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)}, | |
424 | {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)}, | |
425 | {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)}, | |
426 | {NAME("highruns"), CTL(stats_arenas_i_lruns_j_highruns)}, | |
427 | {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)} | |
428 | }; | |
429 | static const ctl_node_t super_stats_arenas_i_lruns_j_node[] = { | |
430 | {NAME(""), CHILD(stats_arenas_i_lruns_j)} | |
431 | }; | |
432 | ||
433 | static const ctl_node_t stats_arenas_i_lruns_node[] = { | |
434 | {INDEX(stats_arenas_i_lruns_j)} | |
435 | }; | |
436 | #endif | |
437 | ||
438 | static const ctl_node_t stats_arenas_i_node[] = { | |
439 | {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, | |
440 | {NAME("pactive"), CTL(stats_arenas_i_pactive)}, | |
441 | {NAME("pdirty"), CTL(stats_arenas_i_pdirty)} | |
442 | #ifdef JEMALLOC_STATS | |
443 | , | |
444 | {NAME("mapped"), CTL(stats_arenas_i_mapped)}, | |
445 | {NAME("npurge"), CTL(stats_arenas_i_npurge)}, | |
446 | {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, | |
447 | {NAME("purged"), CTL(stats_arenas_i_purged)}, | |
448 | {NAME("small"), CHILD(stats_arenas_i_small)}, | |
449 | {NAME("large"), CHILD(stats_arenas_i_large)}, | |
450 | {NAME("bins"), CHILD(stats_arenas_i_bins)}, | |
451 | {NAME("lruns"), CHILD(stats_arenas_i_lruns)} | |
452 | #endif | |
453 | }; | |
454 | static const ctl_node_t super_stats_arenas_i_node[] = { | |
455 | {NAME(""), CHILD(stats_arenas_i)} | |
456 | }; | |
457 | ||
458 | static const ctl_node_t stats_arenas_node[] = { | |
459 | {INDEX(stats_arenas_i)} | |
460 | }; | |
461 | ||
462 | static const ctl_node_t stats_node[] = { | |
463 | #ifdef JEMALLOC_STATS | |
464 | {NAME("cactive"), CTL(stats_cactive)}, | |
465 | {NAME("allocated"), CTL(stats_allocated)}, | |
466 | {NAME("active"), CTL(stats_active)}, | |
467 | {NAME("mapped"), CTL(stats_mapped)}, | |
468 | {NAME("chunks"), CHILD(stats_chunks)}, | |
469 | {NAME("huge"), CHILD(stats_huge)}, | |
470 | #endif | |
471 | {NAME("arenas"), CHILD(stats_arenas)} | |
472 | }; | |
473 | ||
474 | #ifdef JEMALLOC_SWAP | |
475 | static const ctl_node_t swap_node[] = { | |
476 | # ifdef JEMALLOC_STATS | |
477 | {NAME("avail"), CTL(swap_avail)}, | |
478 | # endif | |
479 | {NAME("prezeroed"), CTL(swap_prezeroed)}, | |
480 | {NAME("nfds"), CTL(swap_nfds)}, | |
481 | {NAME("fds"), CTL(swap_fds)} | |
482 | }; | |
483 | #endif | |
484 | ||
485 | static const ctl_node_t root_node[] = { | |
486 | {NAME("version"), CTL(version)}, | |
487 | {NAME("epoch"), CTL(epoch)}, | |
488 | #ifdef JEMALLOC_TCACHE | |
489 | {NAME("tcache"), CHILD(tcache)}, | |
490 | #endif | |
491 | {NAME("thread"), CHILD(thread)}, | |
492 | {NAME("config"), CHILD(config)}, | |
493 | {NAME("opt"), CHILD(opt)}, | |
494 | {NAME("arenas"), CHILD(arenas)}, | |
495 | #ifdef JEMALLOC_PROF | |
496 | {NAME("prof"), CHILD(prof)}, | |
497 | #endif | |
498 | {NAME("stats"), CHILD(stats)} | |
499 | #ifdef JEMALLOC_SWAP | |
500 | , | |
501 | {NAME("swap"), CHILD(swap)} | |
502 | #endif | |
503 | }; | |
504 | static const ctl_node_t super_root_node[] = { | |
505 | {NAME(""), CHILD(root)} | |
506 | }; | |
507 | ||
508 | #undef NAME | |
509 | #undef CHILD | |
510 | #undef CTL | |
511 | #undef INDEX | |
512 | ||
513 | /******************************************************************************/ | |
514 | ||
515 | #ifdef JEMALLOC_STATS | |
516 | static bool | |
517 | ctl_arena_init(ctl_arena_stats_t *astats) | |
518 | { | |
519 | ||
520 | if (astats->bstats == NULL) { | |
521 | astats->bstats = (malloc_bin_stats_t *)base_alloc(nbins * | |
522 | sizeof(malloc_bin_stats_t)); | |
523 | if (astats->bstats == NULL) | |
524 | return (true); | |
525 | } | |
526 | if (astats->lstats == NULL) { | |
527 | astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses * | |
528 | sizeof(malloc_large_stats_t)); | |
529 | if (astats->lstats == NULL) | |
530 | return (true); | |
531 | } | |
532 | ||
533 | return (false); | |
534 | } | |
535 | #endif | |
536 | ||
537 | static void | |
538 | ctl_arena_clear(ctl_arena_stats_t *astats) | |
539 | { | |
540 | ||
541 | astats->pactive = 0; | |
542 | astats->pdirty = 0; | |
543 | #ifdef JEMALLOC_STATS | |
544 | memset(&astats->astats, 0, sizeof(arena_stats_t)); | |
545 | astats->allocated_small = 0; | |
546 | astats->nmalloc_small = 0; | |
547 | astats->ndalloc_small = 0; | |
548 | astats->nrequests_small = 0; | |
549 | memset(astats->bstats, 0, nbins * sizeof(malloc_bin_stats_t)); | |
550 | memset(astats->lstats, 0, nlclasses * sizeof(malloc_large_stats_t)); | |
551 | #endif | |
552 | } | |
553 | ||
554 | #ifdef JEMALLOC_STATS | |
555 | static void | |
556 | ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena) | |
557 | { | |
558 | unsigned i; | |
559 | ||
560 | arena_stats_merge(arena, &cstats->pactive, &cstats->pdirty, | |
561 | &cstats->astats, cstats->bstats, cstats->lstats); | |
562 | ||
563 | for (i = 0; i < nbins; i++) { | |
564 | cstats->allocated_small += cstats->bstats[i].allocated; | |
565 | cstats->nmalloc_small += cstats->bstats[i].nmalloc; | |
566 | cstats->ndalloc_small += cstats->bstats[i].ndalloc; | |
567 | cstats->nrequests_small += cstats->bstats[i].nrequests; | |
568 | } | |
569 | } | |
570 | ||
571 | static void | |
572 | ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) | |
573 | { | |
574 | unsigned i; | |
575 | ||
576 | sstats->pactive += astats->pactive; | |
577 | sstats->pdirty += astats->pdirty; | |
578 | ||
579 | sstats->astats.mapped += astats->astats.mapped; | |
580 | sstats->astats.npurge += astats->astats.npurge; | |
581 | sstats->astats.nmadvise += astats->astats.nmadvise; | |
582 | sstats->astats.purged += astats->astats.purged; | |
583 | ||
584 | sstats->allocated_small += astats->allocated_small; | |
585 | sstats->nmalloc_small += astats->nmalloc_small; | |
586 | sstats->ndalloc_small += astats->ndalloc_small; | |
587 | sstats->nrequests_small += astats->nrequests_small; | |
588 | ||
589 | sstats->astats.allocated_large += astats->astats.allocated_large; | |
590 | sstats->astats.nmalloc_large += astats->astats.nmalloc_large; | |
591 | sstats->astats.ndalloc_large += astats->astats.ndalloc_large; | |
592 | sstats->astats.nrequests_large += astats->astats.nrequests_large; | |
593 | ||
594 | for (i = 0; i < nlclasses; i++) { | |
595 | sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; | |
596 | sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; | |
597 | sstats->lstats[i].nrequests += astats->lstats[i].nrequests; | |
598 | sstats->lstats[i].highruns += astats->lstats[i].highruns; | |
599 | sstats->lstats[i].curruns += astats->lstats[i].curruns; | |
600 | } | |
601 | ||
602 | for (i = 0; i < nbins; i++) { | |
603 | sstats->bstats[i].allocated += astats->bstats[i].allocated; | |
604 | sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; | |
605 | sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; | |
606 | sstats->bstats[i].nrequests += astats->bstats[i].nrequests; | |
607 | #ifdef JEMALLOC_TCACHE | |
608 | sstats->bstats[i].nfills += astats->bstats[i].nfills; | |
609 | sstats->bstats[i].nflushes += astats->bstats[i].nflushes; | |
610 | #endif | |
611 | sstats->bstats[i].nruns += astats->bstats[i].nruns; | |
612 | sstats->bstats[i].reruns += astats->bstats[i].reruns; | |
613 | sstats->bstats[i].highruns += astats->bstats[i].highruns; | |
614 | sstats->bstats[i].curruns += astats->bstats[i].curruns; | |
615 | } | |
616 | } | |
617 | #endif | |
618 | ||
619 | static void | |
620 | ctl_arena_refresh(arena_t *arena, unsigned i) | |
621 | { | |
622 | ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; | |
623 | ctl_arena_stats_t *sstats = &ctl_stats.arenas[narenas]; | |
624 | ||
625 | ctl_arena_clear(astats); | |
626 | ||
627 | sstats->nthreads += astats->nthreads; | |
628 | #ifdef JEMALLOC_STATS | |
629 | ctl_arena_stats_amerge(astats, arena); | |
630 | /* Merge into sum stats as well. */ | |
631 | ctl_arena_stats_smerge(sstats, astats); | |
632 | #else | |
633 | astats->pactive += arena->nactive; | |
634 | astats->pdirty += arena->ndirty; | |
635 | /* Merge into sum stats as well. */ | |
636 | sstats->pactive += arena->nactive; | |
637 | sstats->pdirty += arena->ndirty; | |
638 | #endif | |
639 | } | |
640 | ||
641 | static void | |
642 | ctl_refresh(void) | |
643 | { | |
644 | unsigned i; | |
645 | arena_t *tarenas[narenas]; | |
646 | ||
647 | #ifdef JEMALLOC_STATS | |
648 | malloc_mutex_lock(&chunks_mtx); | |
649 | ctl_stats.chunks.current = stats_chunks.curchunks; | |
650 | ctl_stats.chunks.total = stats_chunks.nchunks; | |
651 | ctl_stats.chunks.high = stats_chunks.highchunks; | |
652 | malloc_mutex_unlock(&chunks_mtx); | |
653 | ||
654 | malloc_mutex_lock(&huge_mtx); | |
655 | ctl_stats.huge.allocated = huge_allocated; | |
656 | ctl_stats.huge.nmalloc = huge_nmalloc; | |
657 | ctl_stats.huge.ndalloc = huge_ndalloc; | |
658 | malloc_mutex_unlock(&huge_mtx); | |
659 | #endif | |
660 | ||
661 | /* | |
662 | * Clear sum stats, since they will be merged into by | |
663 | * ctl_arena_refresh(). | |
664 | */ | |
665 | ctl_stats.arenas[narenas].nthreads = 0; | |
666 | ctl_arena_clear(&ctl_stats.arenas[narenas]); | |
667 | ||
668 | malloc_mutex_lock(&arenas_lock); | |
669 | memcpy(tarenas, arenas, sizeof(arena_t *) * narenas); | |
670 | for (i = 0; i < narenas; i++) { | |
671 | if (arenas[i] != NULL) | |
672 | ctl_stats.arenas[i].nthreads = arenas[i]->nthreads; | |
673 | else | |
674 | ctl_stats.arenas[i].nthreads = 0; | |
675 | } | |
676 | malloc_mutex_unlock(&arenas_lock); | |
677 | for (i = 0; i < narenas; i++) { | |
678 | bool initialized = (tarenas[i] != NULL); | |
679 | ||
680 | ctl_stats.arenas[i].initialized = initialized; | |
681 | if (initialized) | |
682 | ctl_arena_refresh(tarenas[i], i); | |
683 | } | |
684 | ||
685 | #ifdef JEMALLOC_STATS | |
686 | ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small | |
687 | + ctl_stats.arenas[narenas].astats.allocated_large | |
688 | + ctl_stats.huge.allocated; | |
689 | ctl_stats.active = (ctl_stats.arenas[narenas].pactive << PAGE_SHIFT) | |
690 | + ctl_stats.huge.allocated; | |
691 | ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk); | |
692 | ||
693 | # ifdef JEMALLOC_SWAP | |
694 | malloc_mutex_lock(&swap_mtx); | |
695 | ctl_stats.swap_avail = swap_avail; | |
696 | malloc_mutex_unlock(&swap_mtx); | |
697 | # endif | |
698 | #endif | |
699 | ||
700 | ctl_epoch++; | |
701 | } | |
702 | ||
703 | static bool | |
704 | ctl_init(void) | |
705 | { | |
706 | bool ret; | |
707 | ||
708 | malloc_mutex_lock(&ctl_mtx); | |
709 | if (ctl_initialized == false) { | |
710 | #ifdef JEMALLOC_STATS | |
711 | unsigned i; | |
712 | #endif | |
713 | ||
714 | /* | |
715 | * Allocate space for one extra arena stats element, which | |
716 | * contains summed stats across all arenas. | |
717 | */ | |
718 | ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc( | |
719 | (narenas + 1) * sizeof(ctl_arena_stats_t)); | |
720 | if (ctl_stats.arenas == NULL) { | |
721 | ret = true; | |
722 | goto RETURN; | |
723 | } | |
724 | memset(ctl_stats.arenas, 0, (narenas + 1) * | |
725 | sizeof(ctl_arena_stats_t)); | |
726 | ||
727 | /* | |
728 | * Initialize all stats structures, regardless of whether they | |
729 | * ever get used. Lazy initialization would allow errors to | |
730 | * cause inconsistent state to be viewable by the application. | |
731 | */ | |
732 | #ifdef JEMALLOC_STATS | |
733 | for (i = 0; i <= narenas; i++) { | |
734 | if (ctl_arena_init(&ctl_stats.arenas[i])) { | |
735 | ret = true; | |
736 | goto RETURN; | |
737 | } | |
738 | } | |
739 | #endif | |
740 | ctl_stats.arenas[narenas].initialized = true; | |
741 | ||
742 | ctl_epoch = 0; | |
743 | ctl_refresh(); | |
744 | ctl_initialized = true; | |
745 | } | |
746 | ||
747 | ret = false; | |
748 | RETURN: | |
749 | malloc_mutex_unlock(&ctl_mtx); | |
750 | return (ret); | |
751 | } | |
752 | ||
753 | static int | |
754 | ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, | |
755 | size_t *depthp) | |
756 | { | |
757 | int ret; | |
758 | const char *elm, *tdot, *dot; | |
759 | size_t elen, i, j; | |
760 | const ctl_node_t *node; | |
761 | ||
762 | elm = name; | |
763 | /* Equivalent to strchrnul(). */ | |
764 | dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); | |
765 | elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); | |
766 | if (elen == 0) { | |
767 | ret = ENOENT; | |
768 | goto RETURN; | |
769 | } | |
770 | node = super_root_node; | |
771 | for (i = 0; i < *depthp; i++) { | |
772 | assert(node->named); | |
773 | assert(node->u.named.nchildren > 0); | |
774 | if (node->u.named.children[0].named) { | |
775 | const ctl_node_t *pnode = node; | |
776 | ||
777 | /* Children are named. */ | |
778 | for (j = 0; j < node->u.named.nchildren; j++) { | |
779 | const ctl_node_t *child = | |
780 | &node->u.named.children[j]; | |
781 | if (strlen(child->u.named.name) == elen | |
782 | && strncmp(elm, child->u.named.name, | |
783 | elen) == 0) { | |
784 | node = child; | |
785 | if (nodesp != NULL) | |
786 | nodesp[i] = node; | |
787 | mibp[i] = j; | |
788 | break; | |
789 | } | |
790 | } | |
791 | if (node == pnode) { | |
792 | ret = ENOENT; | |
793 | goto RETURN; | |
794 | } | |
795 | } else { | |
796 | unsigned long index; | |
797 | const ctl_node_t *inode; | |
798 | ||
799 | /* Children are indexed. */ | |
800 | index = strtoul(elm, NULL, 10); | |
801 | if (index == ULONG_MAX) { | |
802 | ret = ENOENT; | |
803 | goto RETURN; | |
804 | } | |
805 | ||
806 | inode = &node->u.named.children[0]; | |
807 | node = inode->u.indexed.index(mibp, *depthp, | |
808 | index); | |
809 | if (node == NULL) { | |
810 | ret = ENOENT; | |
811 | goto RETURN; | |
812 | } | |
813 | ||
814 | if (nodesp != NULL) | |
815 | nodesp[i] = node; | |
816 | mibp[i] = (size_t)index; | |
817 | } | |
818 | ||
819 | if (node->ctl != NULL) { | |
820 | /* Terminal node. */ | |
821 | if (*dot != '\0') { | |
822 | /* | |
823 | * The name contains more elements than are | |
824 | * in this path through the tree. | |
825 | */ | |
826 | ret = ENOENT; | |
827 | goto RETURN; | |
828 | } | |
829 | /* Complete lookup successful. */ | |
830 | *depthp = i + 1; | |
831 | break; | |
832 | } | |
833 | ||
834 | /* Update elm. */ | |
835 | if (*dot == '\0') { | |
836 | /* No more elements. */ | |
837 | ret = ENOENT; | |
838 | goto RETURN; | |
839 | } | |
840 | elm = &dot[1]; | |
841 | dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : | |
842 | strchr(elm, '\0'); | |
843 | elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); | |
844 | } | |
845 | ||
846 | ret = 0; | |
847 | RETURN: | |
848 | return (ret); | |
849 | } | |
850 | ||
851 | int | |
852 | ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, | |
853 | size_t newlen) | |
854 | { | |
855 | int ret; | |
856 | size_t depth; | |
857 | ctl_node_t const *nodes[CTL_MAX_DEPTH]; | |
858 | size_t mib[CTL_MAX_DEPTH]; | |
859 | ||
860 | if (ctl_initialized == false && ctl_init()) { | |
861 | ret = EAGAIN; | |
862 | goto RETURN; | |
863 | } | |
864 | ||
865 | depth = CTL_MAX_DEPTH; | |
866 | ret = ctl_lookup(name, nodes, mib, &depth); | |
867 | if (ret != 0) | |
868 | goto RETURN; | |
869 | ||
870 | if (nodes[depth-1]->ctl == NULL) { | |
871 | /* The name refers to a partial path through the ctl tree. */ | |
872 | ret = ENOENT; | |
873 | goto RETURN; | |
874 | } | |
875 | ||
876 | ret = nodes[depth-1]->ctl(mib, depth, oldp, oldlenp, newp, newlen); | |
877 | RETURN: | |
878 | return(ret); | |
879 | } | |
880 | ||
881 | int | |
882 | ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp) | |
883 | { | |
884 | int ret; | |
885 | ||
886 | if (ctl_initialized == false && ctl_init()) { | |
887 | ret = EAGAIN; | |
888 | goto RETURN; | |
889 | } | |
890 | ||
891 | ret = ctl_lookup(name, NULL, mibp, miblenp); | |
892 | RETURN: | |
893 | return(ret); | |
894 | } | |
895 | ||
896 | int | |
897 | ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, | |
898 | void *newp, size_t newlen) | |
899 | { | |
900 | int ret; | |
901 | const ctl_node_t *node; | |
902 | size_t i; | |
903 | ||
904 | if (ctl_initialized == false && ctl_init()) { | |
905 | ret = EAGAIN; | |
906 | goto RETURN; | |
907 | } | |
908 | ||
909 | /* Iterate down the tree. */ | |
910 | node = super_root_node; | |
911 | for (i = 0; i < miblen; i++) { | |
912 | if (node->u.named.children[0].named) { | |
913 | /* Children are named. */ | |
914 | if (node->u.named.nchildren <= mib[i]) { | |
915 | ret = ENOENT; | |
916 | goto RETURN; | |
917 | } | |
918 | node = &node->u.named.children[mib[i]]; | |
919 | } else { | |
920 | const ctl_node_t *inode; | |
921 | ||
922 | /* Indexed element. */ | |
923 | inode = &node->u.named.children[0]; | |
924 | node = inode->u.indexed.index(mib, miblen, mib[i]); | |
925 | if (node == NULL) { | |
926 | ret = ENOENT; | |
927 | goto RETURN; | |
928 | } | |
929 | } | |
930 | } | |
931 | ||
932 | /* Call the ctl function. */ | |
933 | if (node->ctl == NULL) { | |
934 | /* Partial MIB. */ | |
935 | ret = ENOENT; | |
936 | goto RETURN; | |
937 | } | |
938 | ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); | |
939 | ||
940 | RETURN: | |
941 | return(ret); | |
942 | } | |
943 | ||
944 | bool | |
945 | ctl_boot(void) | |
946 | { | |
947 | ||
948 | if (malloc_mutex_init(&ctl_mtx)) | |
949 | return (true); | |
950 | ||
951 | ctl_initialized = false; | |
952 | ||
953 | return (false); | |
954 | } | |
955 | ||
956 | /******************************************************************************/ | |
957 | /* *_ctl() functions. */ | |
958 | ||
959 | #define READONLY() do { \ | |
960 | if (newp != NULL || newlen != 0) { \ | |
961 | ret = EPERM; \ | |
962 | goto RETURN; \ | |
963 | } \ | |
964 | } while (0) | |
965 | ||
966 | #define WRITEONLY() do { \ | |
967 | if (oldp != NULL || oldlenp != NULL) { \ | |
968 | ret = EPERM; \ | |
969 | goto RETURN; \ | |
970 | } \ | |
971 | } while (0) | |
972 | ||
973 | #define VOID() do { \ | |
974 | READONLY(); \ | |
975 | WRITEONLY(); \ | |
976 | } while (0) | |
977 | ||
978 | #define READ(v, t) do { \ | |
979 | if (oldp != NULL && oldlenp != NULL) { \ | |
980 | if (*oldlenp != sizeof(t)) { \ | |
981 | size_t copylen = (sizeof(t) <= *oldlenp) \ | |
982 | ? sizeof(t) : *oldlenp; \ | |
983 | memcpy(oldp, (void *)&v, copylen); \ | |
984 | ret = EINVAL; \ | |
985 | goto RETURN; \ | |
986 | } else \ | |
987 | *(t *)oldp = v; \ | |
988 | } \ | |
989 | } while (0) | |
990 | ||
991 | #define WRITE(v, t) do { \ | |
992 | if (newp != NULL) { \ | |
993 | if (newlen != sizeof(t)) { \ | |
994 | ret = EINVAL; \ | |
995 | goto RETURN; \ | |
996 | } \ | |
997 | v = *(t *)newp; \ | |
998 | } \ | |
999 | } while (0) | |
1000 | ||
1001 | #define CTL_RO_GEN(n, v, t) \ | |
1002 | static int \ | |
1003 | n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ | |
1004 | void *newp, size_t newlen) \ | |
1005 | { \ | |
1006 | int ret; \ | |
1007 | t oldval; \ | |
1008 | \ | |
1009 | malloc_mutex_lock(&ctl_mtx); \ | |
1010 | READONLY(); \ | |
1011 | oldval = v; \ | |
1012 | READ(oldval, t); \ | |
1013 | \ | |
1014 | ret = 0; \ | |
1015 | RETURN: \ | |
1016 | malloc_mutex_unlock(&ctl_mtx); \ | |
1017 | return (ret); \ | |
1018 | } | |
1019 | ||
1020 | /* | |
1021 | * ctl_mtx is not acquired, under the assumption that no pertinent data will | |
1022 | * mutate during the call. | |
1023 | */ | |
1024 | #define CTL_RO_NL_GEN(n, v, t) \ | |
1025 | static int \ | |
1026 | n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ | |
1027 | void *newp, size_t newlen) \ | |
1028 | { \ | |
1029 | int ret; \ | |
1030 | t oldval; \ | |
1031 | \ | |
1032 | READONLY(); \ | |
1033 | oldval = v; \ | |
1034 | READ(oldval, t); \ | |
1035 | \ | |
1036 | ret = 0; \ | |
1037 | RETURN: \ | |
1038 | return (ret); \ | |
1039 | } | |
1040 | ||
1041 | #define CTL_RO_TRUE_GEN(n) \ | |
1042 | static int \ | |
1043 | n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ | |
1044 | void *newp, size_t newlen) \ | |
1045 | { \ | |
1046 | int ret; \ | |
1047 | bool oldval; \ | |
1048 | \ | |
1049 | READONLY(); \ | |
1050 | oldval = true; \ | |
1051 | READ(oldval, bool); \ | |
1052 | \ | |
1053 | ret = 0; \ | |
1054 | RETURN: \ | |
1055 | return (ret); \ | |
1056 | } | |
1057 | ||
1058 | #define CTL_RO_FALSE_GEN(n) \ | |
1059 | static int \ | |
1060 | n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ | |
1061 | void *newp, size_t newlen) \ | |
1062 | { \ | |
1063 | int ret; \ | |
1064 | bool oldval; \ | |
1065 | \ | |
1066 | READONLY(); \ | |
1067 | oldval = false; \ | |
1068 | READ(oldval, bool); \ | |
1069 | \ | |
1070 | ret = 0; \ | |
1071 | RETURN: \ | |
1072 | return (ret); \ | |
1073 | } | |
1074 | ||
1075 | CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) | |
1076 | ||
1077 | static int | |
1078 | epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, | |
1079 | void *newp, size_t newlen) | |
1080 | { | |
1081 | int ret; | |
1082 | uint64_t newval; | |
1083 | ||
1084 | malloc_mutex_lock(&ctl_mtx); | |
1085 | newval = 0; | |
1086 | WRITE(newval, uint64_t); | |
1087 | if (newval != 0) | |
1088 | ctl_refresh(); | |
1089 | READ(ctl_epoch, uint64_t); | |
1090 | ||
1091 | ret = 0; | |
1092 | RETURN: | |
1093 | malloc_mutex_unlock(&ctl_mtx); | |
1094 | return (ret); | |
1095 | } | |
1096 | ||
1097 | #ifdef JEMALLOC_TCACHE | |
1098 | static int | |
1099 | tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, | |
1100 | void *newp, size_t newlen) | |
1101 | { | |
1102 | int ret; | |
1103 | tcache_t *tcache; | |
1104 | ||
1105 | VOID(); | |
1106 | ||
1107 | tcache = TCACHE_GET(); | |
1108 | if (tcache == NULL) { | |
1109 | ret = 0; | |
1110 | goto RETURN; | |
1111 | } | |
1112 | tcache_destroy(tcache); | |
1113 | TCACHE_SET(NULL); | |
1114 | ||
1115 | ret = 0; | |
1116 | RETURN: | |
1117 | return (ret); | |
1118 | } | |
1119 | #endif | |
1120 | ||
1121 | static int | |
1122 | thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, | |
1123 | void *newp, size_t newlen) | |
1124 | { | |
1125 | int ret; | |
1126 | unsigned newind, oldind; | |
1127 | ||
1128 | newind = oldind = choose_arena()->ind; | |
1129 | WRITE(newind, unsigned); | |
1130 | READ(oldind, unsigned); | |
1131 | if (newind != oldind) { | |
1132 | arena_t *arena; | |
1133 | ||
1134 | if (newind >= narenas) { | |
1135 | /* New arena index is out of range. */ | |
1136 | ret = EFAULT; | |
1137 | goto RETURN; | |
1138 | } | |
1139 | ||
1140 | /* Initialize arena if necessary. */ | |
1141 | malloc_mutex_lock(&arenas_lock); | |
1142 | if ((arena = arenas[newind]) == NULL) | |
1143 | arena = arenas_extend(newind); | |
1144 | arenas[oldind]->nthreads--; | |
1145 | arenas[newind]->nthreads++; | |
1146 | malloc_mutex_unlock(&arenas_lock); | |
1147 | if (arena == NULL) { | |
1148 | ret = EAGAIN; | |
1149 | goto RETURN; | |
1150 | } | |
1151 | ||
1152 | /* Set new arena association. */ | |
1153 | ARENA_SET(arena); | |
1154 | { | |
1155 | tcache_t *tcache = TCACHE_GET(); | |
1156 | if (tcache != NULL) | |
1157 | tcache->arena = arena; | |
1158 | } | |
1159 | } | |
1160 | ||
1161 | ret = 0; | |
1162 | RETURN: | |
1163 | return (ret); | |
1164 | } | |
1165 | ||
1166 | #ifdef JEMALLOC_STATS | |
1167 | CTL_RO_NL_GEN(thread_allocated, ALLOCATED_GET(), uint64_t); | |
1168 | CTL_RO_NL_GEN(thread_allocatedp, ALLOCATEDP_GET(), uint64_t *); | |
1169 | CTL_RO_NL_GEN(thread_deallocated, DEALLOCATED_GET(), uint64_t); | |
1170 | CTL_RO_NL_GEN(thread_deallocatedp, DEALLOCATEDP_GET(), uint64_t *); | |
1171 | #endif | |
1172 | ||
1173 | /******************************************************************************/ | |
1174 | ||
1175 | #ifdef JEMALLOC_DEBUG | |
1176 | CTL_RO_TRUE_GEN(config_debug) | |
1177 | #else | |
1178 | CTL_RO_FALSE_GEN(config_debug) | |
1179 | #endif | |
1180 | ||
1181 | #ifdef JEMALLOC_DSS | |
1182 | CTL_RO_TRUE_GEN(config_dss) | |
1183 | #else | |
1184 | CTL_RO_FALSE_GEN(config_dss) | |
1185 | #endif | |
1186 | ||
1187 | #ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT | |
1188 | CTL_RO_TRUE_GEN(config_dynamic_page_shift) | |
1189 | #else | |
1190 | CTL_RO_FALSE_GEN(config_dynamic_page_shift) | |
1191 | #endif | |
1192 | ||
1193 | #ifdef JEMALLOC_FILL | |
1194 | CTL_RO_TRUE_GEN(config_fill) | |
1195 | #else | |
1196 | CTL_RO_FALSE_GEN(config_fill) | |
1197 | #endif | |
1198 | ||
1199 | #ifdef JEMALLOC_LAZY_LOCK | |
1200 | CTL_RO_TRUE_GEN(config_lazy_lock) | |
1201 | #else | |
1202 | CTL_RO_FALSE_GEN(config_lazy_lock) | |
1203 | #endif | |
1204 | ||
1205 | #ifdef JEMALLOC_PROF | |
1206 | CTL_RO_TRUE_GEN(config_prof) | |
1207 | #else | |
1208 | CTL_RO_FALSE_GEN(config_prof) | |
1209 | #endif | |
1210 | ||
1211 | #ifdef JEMALLOC_PROF_LIBGCC | |
1212 | CTL_RO_TRUE_GEN(config_prof_libgcc) | |
1213 | #else | |
1214 | CTL_RO_FALSE_GEN(config_prof_libgcc) | |
1215 | #endif | |
1216 | ||
1217 | #ifdef JEMALLOC_PROF_LIBUNWIND | |
1218 | CTL_RO_TRUE_GEN(config_prof_libunwind) | |
1219 | #else | |
1220 | CTL_RO_FALSE_GEN(config_prof_libunwind) | |
1221 | #endif | |
1222 | ||
1223 | #ifdef JEMALLOC_STATS | |
1224 | CTL_RO_TRUE_GEN(config_stats) | |
1225 | #else | |
1226 | CTL_RO_FALSE_GEN(config_stats) | |
1227 | #endif | |
1228 | ||
1229 | #ifdef JEMALLOC_SWAP | |
1230 | CTL_RO_TRUE_GEN(config_swap) | |
1231 | #else | |
1232 | CTL_RO_FALSE_GEN(config_swap) | |
1233 | #endif | |
1234 | ||
1235 | #ifdef JEMALLOC_SYSV | |
1236 | CTL_RO_TRUE_GEN(config_sysv) | |
1237 | #else | |
1238 | CTL_RO_FALSE_GEN(config_sysv) | |
1239 | #endif | |
1240 | ||
1241 | #ifdef JEMALLOC_TCACHE | |
1242 | CTL_RO_TRUE_GEN(config_tcache) | |
1243 | #else | |
1244 | CTL_RO_FALSE_GEN(config_tcache) | |
1245 | #endif | |
1246 | ||
1247 | #ifdef JEMALLOC_TINY | |
1248 | CTL_RO_TRUE_GEN(config_tiny) | |
1249 | #else | |
1250 | CTL_RO_FALSE_GEN(config_tiny) | |
1251 | #endif | |
1252 | ||
1253 | #ifdef JEMALLOC_TLS | |
1254 | CTL_RO_TRUE_GEN(config_tls) | |
1255 | #else | |
1256 | CTL_RO_FALSE_GEN(config_tls) | |
1257 | #endif | |
1258 | ||
1259 | #ifdef JEMALLOC_XMALLOC | |
1260 | CTL_RO_TRUE_GEN(config_xmalloc) | |
1261 | #else | |
1262 | CTL_RO_FALSE_GEN(config_xmalloc) | |
1263 | #endif | |
1264 | ||
1265 | /******************************************************************************/ | |
1266 | ||
1267 | CTL_RO_NL_GEN(opt_abort, opt_abort, bool) | |
1268 | CTL_RO_NL_GEN(opt_lg_qspace_max, opt_lg_qspace_max, size_t) | |
1269 | CTL_RO_NL_GEN(opt_lg_cspace_max, opt_lg_cspace_max, size_t) | |
1270 | CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) | |
1271 | CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) | |
1272 | CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) | |
1273 | CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) | |
1274 | #ifdef JEMALLOC_FILL | |
1275 | CTL_RO_NL_GEN(opt_junk, opt_junk, bool) | |
1276 | CTL_RO_NL_GEN(opt_zero, opt_zero, bool) | |
1277 | #endif | |
1278 | #ifdef JEMALLOC_SYSV | |
1279 | CTL_RO_NL_GEN(opt_sysv, opt_sysv, bool) | |
1280 | #endif | |
1281 | #ifdef JEMALLOC_XMALLOC | |
1282 | CTL_RO_NL_GEN(opt_xmalloc, opt_xmalloc, bool) | |
1283 | #endif | |
1284 | #ifdef JEMALLOC_TCACHE | |
1285 | CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool) | |
1286 | CTL_RO_NL_GEN(opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep, ssize_t) | |
1287 | #endif | |
1288 | #ifdef JEMALLOC_PROF | |
1289 | CTL_RO_NL_GEN(opt_prof, opt_prof, bool) | |
1290 | CTL_RO_NL_GEN(opt_prof_prefix, opt_prof_prefix, const char *) | |
1291 | CTL_RO_GEN(opt_prof_active, opt_prof_active, bool) /* Mutable. */ | |
1292 | CTL_RO_NL_GEN(opt_lg_prof_bt_max, opt_lg_prof_bt_max, size_t) | |
1293 | CTL_RO_NL_GEN(opt_lg_prof_sample, opt_lg_prof_sample, size_t) | |
1294 | CTL_RO_NL_GEN(opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) | |
1295 | CTL_RO_NL_GEN(opt_prof_gdump, opt_prof_gdump, bool) | |
1296 | CTL_RO_NL_GEN(opt_prof_leak, opt_prof_leak, bool) | |
1297 | CTL_RO_NL_GEN(opt_prof_accum, opt_prof_accum, bool) | |
1298 | CTL_RO_NL_GEN(opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t) | |
1299 | #endif | |
1300 | #ifdef JEMALLOC_SWAP | |
1301 | CTL_RO_NL_GEN(opt_overcommit, opt_overcommit, bool) | |
1302 | #endif | |
1303 | ||
1304 | /******************************************************************************/ | |
1305 | ||
1306 | CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) | |
1307 | CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) | |
1308 | CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) | |
1309 | const ctl_node_t * | |
1310 | arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) | |
1311 | { | |
1312 | ||
1313 | if (i > nbins) | |
1314 | return (NULL); | |
1315 | return (super_arenas_bin_i_node); | |
1316 | } | |
1317 | ||
1318 | CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << PAGE_SHIFT), size_t) | |
1319 | const ctl_node_t * | |
1320 | arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) | |
1321 | { | |
1322 | ||
1323 | if (i > nlclasses) | |
1324 | return (NULL); | |
1325 | return (super_arenas_lrun_i_node); | |
1326 | } | |
1327 | ||
1328 | CTL_RO_NL_GEN(arenas_narenas, narenas, unsigned) | |
1329 | ||
1330 | static int | |
1331 | arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, | |
1332 | size_t *oldlenp, void *newp, size_t newlen) | |
1333 | { | |
1334 | int ret; | |
1335 | unsigned nread, i; | |
1336 | ||
1337 | malloc_mutex_lock(&ctl_mtx); | |
1338 | READONLY(); | |
1339 | if (*oldlenp != narenas * sizeof(bool)) { | |
1340 | ret = EINVAL; | |
1341 | nread = (*oldlenp < narenas * sizeof(bool)) | |
1342 | ? (*oldlenp / sizeof(bool)) : narenas; | |
1343 | } else { | |
1344 | ret = 0; | |
1345 | nread = narenas; | |
1346 | } | |
1347 | ||
1348 | for (i = 0; i < nread; i++) | |
1349 | ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; | |
1350 | ||
1351 | RETURN: | |
1352 | malloc_mutex_unlock(&ctl_mtx); | |
1353 | return (ret); | |
1354 | } | |
1355 | ||
1356 | CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) | |
1357 | CTL_RO_NL_GEN(arenas_cacheline, CACHELINE, size_t) | |
1358 | CTL_RO_NL_GEN(arenas_subpage, SUBPAGE, size_t) | |
1359 | CTL_RO_NL_GEN(arenas_pagesize, PAGE_SIZE, size_t) | |
1360 | CTL_RO_NL_GEN(arenas_chunksize, chunksize, size_t) | |
1361 | #ifdef JEMALLOC_TINY | |
1362 | CTL_RO_NL_GEN(arenas_tspace_min, (1U << LG_TINY_MIN), size_t) | |
1363 | CTL_RO_NL_GEN(arenas_tspace_max, (qspace_min >> 1), size_t) | |
1364 | #endif | |
1365 | CTL_RO_NL_GEN(arenas_qspace_min, qspace_min, size_t) | |
1366 | CTL_RO_NL_GEN(arenas_qspace_max, qspace_max, size_t) | |
1367 | CTL_RO_NL_GEN(arenas_cspace_min, cspace_min, size_t) | |
1368 | CTL_RO_NL_GEN(arenas_cspace_max, cspace_max, size_t) | |
1369 | CTL_RO_NL_GEN(arenas_sspace_min, sspace_min, size_t) | |
1370 | CTL_RO_NL_GEN(arenas_sspace_max, sspace_max, size_t) | |
1371 | #ifdef JEMALLOC_TCACHE | |
1372 | CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t) | |
1373 | #endif | |
1374 | CTL_RO_NL_GEN(arenas_ntbins, ntbins, unsigned) | |
1375 | CTL_RO_NL_GEN(arenas_nqbins, nqbins, unsigned) | |
1376 | CTL_RO_NL_GEN(arenas_ncbins, ncbins, unsigned) | |
1377 | CTL_RO_NL_GEN(arenas_nsbins, nsbins, unsigned) | |
1378 | CTL_RO_NL_GEN(arenas_nbins, nbins, unsigned) | |
1379 | #ifdef JEMALLOC_TCACHE | |
1380 | CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned) | |
1381 | #endif | |
1382 | CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t) | |
1383 | ||
1384 | static int | |
1385 | arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, | |
1386 | void *newp, size_t newlen) | |
1387 | { | |
1388 | int ret; | |
1389 | unsigned arena; | |
1390 | ||
1391 | WRITEONLY(); | |
1392 | arena = UINT_MAX; | |
1393 | WRITE(arena, unsigned); | |
1394 | if (newp != NULL && arena >= narenas) { | |
1395 | ret = EFAULT; | |
1396 | goto RETURN; | |
1397 | } else { | |
1398 | arena_t *tarenas[narenas]; | |
1399 | ||
1400 | malloc_mutex_lock(&arenas_lock); | |
1401 | memcpy(tarenas, arenas, sizeof(arena_t *) * narenas); | |
1402 | malloc_mutex_unlock(&arenas_lock); | |
1403 | ||
1404 | if (arena == UINT_MAX) { | |
1405 | unsigned i; | |
1406 | for (i = 0; i < narenas; i++) { | |
1407 | if (tarenas[i] != NULL) | |
1408 | arena_purge_all(tarenas[i]); | |
1409 | } | |
1410 | } else { | |
1411 | assert(arena < narenas); | |
1412 | if (tarenas[arena] != NULL) | |
1413 | arena_purge_all(tarenas[arena]); | |
1414 | } | |
1415 | } | |
1416 | ||
1417 | ret = 0; | |
1418 | RETURN: | |
1419 | return (ret); | |
1420 | } | |
1421 | ||
1422 | /******************************************************************************/ | |
1423 | ||
1424 | #ifdef JEMALLOC_PROF | |
1425 | static int | |
1426 | prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, | |
1427 | void *newp, size_t newlen) | |
1428 | { | |
1429 | int ret; | |
1430 | bool oldval; | |
1431 | ||
1432 | malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */ | |
1433 | oldval = opt_prof_active; | |
1434 | if (newp != NULL) { | |
1435 | /* | |
1436 | * The memory barriers will tend to make opt_prof_active | |
1437 | * propagate faster on systems with weak memory ordering. | |
1438 | */ | |
1439 | mb_write(); | |
1440 | WRITE(opt_prof_active, bool); | |
1441 | mb_write(); | |
1442 | } | |
1443 | READ(oldval, bool); | |
1444 | ||
1445 | ret = 0; | |
1446 | RETURN: | |
1447 | malloc_mutex_unlock(&ctl_mtx); | |
1448 | return (ret); | |
1449 | } | |
1450 | ||
1451 | static int | |
1452 | prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, | |
1453 | void *newp, size_t newlen) | |
1454 | { | |
1455 | int ret; | |
1456 | const char *filename = NULL; | |
1457 | ||
1458 | WRITEONLY(); | |
1459 | WRITE(filename, const char *); | |
1460 | ||
1461 | if (prof_mdump(filename)) { | |
1462 | ret = EFAULT; | |
1463 | goto RETURN; | |
1464 | } | |
1465 | ||
1466 | ret = 0; | |
1467 | RETURN: | |
1468 | return (ret); | |
1469 | } | |
1470 | ||
1471 | CTL_RO_NL_GEN(prof_interval, prof_interval, uint64_t) | |
1472 | #endif | |
1473 | ||
1474 | /******************************************************************************/ | |
1475 | ||
1476 | #ifdef JEMALLOC_STATS | |
1477 | CTL_RO_GEN(stats_chunks_current, ctl_stats.chunks.current, size_t) | |
1478 | CTL_RO_GEN(stats_chunks_total, ctl_stats.chunks.total, uint64_t) | |
1479 | CTL_RO_GEN(stats_chunks_high, ctl_stats.chunks.high, size_t) | |
1480 | CTL_RO_GEN(stats_huge_allocated, huge_allocated, size_t) | |
1481 | CTL_RO_GEN(stats_huge_nmalloc, huge_nmalloc, uint64_t) | |
1482 | CTL_RO_GEN(stats_huge_ndalloc, huge_ndalloc, uint64_t) | |
1483 | CTL_RO_GEN(stats_arenas_i_small_allocated, | |
1484 | ctl_stats.arenas[mib[2]].allocated_small, size_t) | |
1485 | CTL_RO_GEN(stats_arenas_i_small_nmalloc, | |
1486 | ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t) | |
1487 | CTL_RO_GEN(stats_arenas_i_small_ndalloc, | |
1488 | ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t) | |
1489 | CTL_RO_GEN(stats_arenas_i_small_nrequests, | |
1490 | ctl_stats.arenas[mib[2]].nrequests_small, uint64_t) | |
1491 | CTL_RO_GEN(stats_arenas_i_large_allocated, | |
1492 | ctl_stats.arenas[mib[2]].astats.allocated_large, size_t) | |
1493 | CTL_RO_GEN(stats_arenas_i_large_nmalloc, | |
1494 | ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) | |
1495 | CTL_RO_GEN(stats_arenas_i_large_ndalloc, | |
1496 | ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t) | |
1497 | CTL_RO_GEN(stats_arenas_i_large_nrequests, | |
1498 | ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t) | |
1499 | ||
1500 | CTL_RO_GEN(stats_arenas_i_bins_j_allocated, | |
1501 | ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t) | |
1502 | CTL_RO_GEN(stats_arenas_i_bins_j_nmalloc, | |
1503 | ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t) | |
1504 | CTL_RO_GEN(stats_arenas_i_bins_j_ndalloc, | |
1505 | ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t) | |
1506 | CTL_RO_GEN(stats_arenas_i_bins_j_nrequests, | |
1507 | ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t) | |
1508 | #ifdef JEMALLOC_TCACHE | |
1509 | CTL_RO_GEN(stats_arenas_i_bins_j_nfills, | |
1510 | ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t) | |
1511 | CTL_RO_GEN(stats_arenas_i_bins_j_nflushes, | |
1512 | ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t) | |
1513 | #endif | |
1514 | CTL_RO_GEN(stats_arenas_i_bins_j_nruns, | |
1515 | ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t) | |
1516 | CTL_RO_GEN(stats_arenas_i_bins_j_nreruns, | |
1517 | ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t) | |
1518 | CTL_RO_GEN(stats_arenas_i_bins_j_highruns, | |
1519 | ctl_stats.arenas[mib[2]].bstats[mib[4]].highruns, size_t) | |
1520 | CTL_RO_GEN(stats_arenas_i_bins_j_curruns, | |
1521 | ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) | |
1522 | ||
1523 | const ctl_node_t * | |
1524 | stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) | |
1525 | { | |
1526 | ||
1527 | if (j > nbins) | |
1528 | return (NULL); | |
1529 | return (super_stats_arenas_i_bins_j_node); | |
1530 | } | |
1531 | ||
1532 | CTL_RO_GEN(stats_arenas_i_lruns_j_nmalloc, | |
1533 | ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t) | |
1534 | CTL_RO_GEN(stats_arenas_i_lruns_j_ndalloc, | |
1535 | ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t) | |
1536 | CTL_RO_GEN(stats_arenas_i_lruns_j_nrequests, | |
1537 | ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t) | |
1538 | CTL_RO_GEN(stats_arenas_i_lruns_j_curruns, | |
1539 | ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) | |
1540 | CTL_RO_GEN(stats_arenas_i_lruns_j_highruns, | |
1541 | ctl_stats.arenas[mib[2]].lstats[mib[4]].highruns, size_t) | |
1542 | ||
1543 | const ctl_node_t * | |
1544 | stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) | |
1545 | { | |
1546 | ||
1547 | if (j > nlclasses) | |
1548 | return (NULL); | |
1549 | return (super_stats_arenas_i_lruns_j_node); | |
1550 | } | |
1551 | ||
1552 | #endif | |
1553 | CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) | |
1554 | CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) | |
1555 | CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) | |
1556 | #ifdef JEMALLOC_STATS | |
1557 | CTL_RO_GEN(stats_arenas_i_mapped, ctl_stats.arenas[mib[2]].astats.mapped, | |
1558 | size_t) | |
1559 | CTL_RO_GEN(stats_arenas_i_npurge, ctl_stats.arenas[mib[2]].astats.npurge, | |
1560 | uint64_t) | |
1561 | CTL_RO_GEN(stats_arenas_i_nmadvise, ctl_stats.arenas[mib[2]].astats.nmadvise, | |
1562 | uint64_t) | |
1563 | CTL_RO_GEN(stats_arenas_i_purged, ctl_stats.arenas[mib[2]].astats.purged, | |
1564 | uint64_t) | |
1565 | #endif | |
1566 | ||
1567 | const ctl_node_t * | |
1568 | stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) | |
1569 | { | |
1570 | const ctl_node_t * ret; | |
1571 | ||
1572 | malloc_mutex_lock(&ctl_mtx); | |
1573 | if (ctl_stats.arenas[i].initialized == false) { | |
1574 | ret = NULL; | |
1575 | goto RETURN; | |
1576 | } | |
1577 | ||
1578 | ret = super_stats_arenas_i_node; | |
1579 | RETURN: | |
1580 | malloc_mutex_unlock(&ctl_mtx); | |
1581 | return (ret); | |
1582 | } | |
1583 | ||
1584 | #ifdef JEMALLOC_STATS | |
1585 | CTL_RO_GEN(stats_cactive, &stats_cactive, size_t *) | |
1586 | CTL_RO_GEN(stats_allocated, ctl_stats.allocated, size_t) | |
1587 | CTL_RO_GEN(stats_active, ctl_stats.active, size_t) | |
1588 | CTL_RO_GEN(stats_mapped, ctl_stats.mapped, size_t) | |
1589 | #endif | |
1590 | ||
1591 | /******************************************************************************/ | |
1592 | ||
1593 | #ifdef JEMALLOC_SWAP | |
1594 | # ifdef JEMALLOC_STATS | |
1595 | CTL_RO_GEN(swap_avail, ctl_stats.swap_avail, size_t) | |
1596 | # endif | |
1597 | ||
1598 | static int | |
1599 | swap_prezeroed_ctl(const size_t *mib, size_t miblen, void *oldp, | |
1600 | size_t *oldlenp, void *newp, size_t newlen) | |
1601 | { | |
1602 | int ret; | |
1603 | ||
1604 | malloc_mutex_lock(&ctl_mtx); | |
1605 | if (swap_enabled) { | |
1606 | READONLY(); | |
1607 | } else { | |
1608 | /* | |
1609 | * swap_prezeroed isn't actually used by the swap code until it | |
1610 | * is set during a successful chunk_swap_enabled() call. We | |
1611 | * use it here to store the value that we'll pass to | |
1612 | * chunk_swap_enable() in a swap.fds mallctl(). This is not | |
1613 | * very clean, but the obvious alternatives are even worse. | |
1614 | */ | |
1615 | WRITE(swap_prezeroed, bool); | |
1616 | } | |
1617 | ||
1618 | READ(swap_prezeroed, bool); | |
1619 | ||
1620 | ret = 0; | |
1621 | RETURN: | |
1622 | malloc_mutex_unlock(&ctl_mtx); | |
1623 | return (ret); | |
1624 | } | |
1625 | ||
1626 | CTL_RO_GEN(swap_nfds, swap_nfds, size_t) | |
1627 | ||
1628 | static int | |
1629 | swap_fds_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, | |
1630 | void *newp, size_t newlen) | |
1631 | { | |
1632 | int ret; | |
1633 | ||
1634 | malloc_mutex_lock(&ctl_mtx); | |
1635 | if (swap_enabled) { | |
1636 | READONLY(); | |
1637 | } else if (newp != NULL) { | |
1638 | size_t nfds = newlen / sizeof(int); | |
1639 | ||
1640 | { | |
1641 | int fds[nfds]; | |
1642 | ||
1643 | memcpy(fds, newp, nfds * sizeof(int)); | |
1644 | if (chunk_swap_enable(fds, nfds, swap_prezeroed)) { | |
1645 | ret = EFAULT; | |
1646 | goto RETURN; | |
1647 | } | |
1648 | } | |
1649 | } | |
1650 | ||
1651 | if (oldp != NULL && oldlenp != NULL) { | |
1652 | if (*oldlenp != swap_nfds * sizeof(int)) { | |
1653 | size_t copylen = (swap_nfds * sizeof(int) <= *oldlenp) | |
1654 | ? swap_nfds * sizeof(int) : *oldlenp; | |
1655 | ||
1656 | memcpy(oldp, swap_fds, copylen); | |
1657 | ret = EINVAL; | |
1658 | goto RETURN; | |
1659 | } else | |
1660 | memcpy(oldp, swap_fds, *oldlenp); | |
1661 | } | |
1662 | ||
1663 | ret = 0; | |
1664 | RETURN: | |
1665 | malloc_mutex_unlock(&ctl_mtx); | |
1666 | return (ret); | |
1667 | } | |
1668 | #endif |