]> git.saurik.com Git - redis.git/blob - deps/jemalloc/include/jemalloc/internal/stats.h
2a9b31d9ffc8a7d8b7d5b79ff9365bf41ad4c7d7
[redis.git] / deps / jemalloc / include / jemalloc / internal / stats.h
1 /******************************************************************************/
2 #ifdef JEMALLOC_H_TYPES
3
4 #define UMAX2S_BUFSIZE 65
5
6 #ifdef JEMALLOC_STATS
7 typedef struct tcache_bin_stats_s tcache_bin_stats_t;
8 typedef struct malloc_bin_stats_s malloc_bin_stats_t;
9 typedef struct malloc_large_stats_s malloc_large_stats_t;
10 typedef struct arena_stats_s arena_stats_t;
11 #endif
12 #if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
13 typedef struct chunk_stats_s chunk_stats_t;
14 #endif
15
16 #endif /* JEMALLOC_H_TYPES */
17 /******************************************************************************/
18 #ifdef JEMALLOC_H_STRUCTS
19
20 #ifdef JEMALLOC_STATS
21
22 #ifdef JEMALLOC_TCACHE
23 struct tcache_bin_stats_s {
24 /*
25 * Number of allocation requests that corresponded to the size of this
26 * bin.
27 */
28 uint64_t nrequests;
29 };
30 #endif
31
32 struct malloc_bin_stats_s {
33 /*
34 * Current number of bytes allocated, including objects currently
35 * cached by tcache.
36 */
37 size_t allocated;
38
39 /*
40 * Total number of allocation/deallocation requests served directly by
41 * the bin. Note that tcache may allocate an object, then recycle it
42 * many times, resulting many increments to nrequests, but only one
43 * each to nmalloc and ndalloc.
44 */
45 uint64_t nmalloc;
46 uint64_t ndalloc;
47
48 /*
49 * Number of allocation requests that correspond to the size of this
50 * bin. This includes requests served by tcache, though tcache only
51 * periodically merges into this counter.
52 */
53 uint64_t nrequests;
54
55 #ifdef JEMALLOC_TCACHE
56 /* Number of tcache fills from this bin. */
57 uint64_t nfills;
58
59 /* Number of tcache flushes to this bin. */
60 uint64_t nflushes;
61 #endif
62
63 /* Total number of runs created for this bin's size class. */
64 uint64_t nruns;
65
66 /*
67 * Total number of runs reused by extracting them from the runs tree for
68 * this bin's size class.
69 */
70 uint64_t reruns;
71
72 /* High-water mark for this bin. */
73 size_t highruns;
74
75 /* Current number of runs in this bin. */
76 size_t curruns;
77 };
78
79 struct malloc_large_stats_s {
80 /*
81 * Total number of allocation/deallocation requests served directly by
82 * the arena. Note that tcache may allocate an object, then recycle it
83 * many times, resulting many increments to nrequests, but only one
84 * each to nmalloc and ndalloc.
85 */
86 uint64_t nmalloc;
87 uint64_t ndalloc;
88
89 /*
90 * Number of allocation requests that correspond to this size class.
91 * This includes requests served by tcache, though tcache only
92 * periodically merges into this counter.
93 */
94 uint64_t nrequests;
95
96 /* High-water mark for this size class. */
97 size_t highruns;
98
99 /* Current number of runs of this size class. */
100 size_t curruns;
101 };
102
103 struct arena_stats_s {
104 /* Number of bytes currently mapped. */
105 size_t mapped;
106
107 /*
108 * Total number of purge sweeps, total number of madvise calls made,
109 * and total pages purged in order to keep dirty unused memory under
110 * control.
111 */
112 uint64_t npurge;
113 uint64_t nmadvise;
114 uint64_t purged;
115
116 /* Per-size-category statistics. */
117 size_t allocated_large;
118 uint64_t nmalloc_large;
119 uint64_t ndalloc_large;
120 uint64_t nrequests_large;
121
122 /*
123 * One element for each possible size class, including sizes that
124 * overlap with bin size classes. This is necessary because ipalloc()
125 * sometimes has to use such large objects in order to assure proper
126 * alignment.
127 */
128 malloc_large_stats_t *lstats;
129 };
130 #endif /* JEMALLOC_STATS */
131
132 #if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
133 struct chunk_stats_s {
134 # ifdef JEMALLOC_STATS
135 /* Number of chunks that were allocated. */
136 uint64_t nchunks;
137 # endif
138
139 /* High-water mark for number of chunks allocated. */
140 size_t highchunks;
141
142 /*
143 * Current number of chunks allocated. This value isn't maintained for
144 * any other purpose, so keep track of it in order to be able to set
145 * highchunks.
146 */
147 size_t curchunks;
148 };
149 #endif /* JEMALLOC_STATS */
150
151 #endif /* JEMALLOC_H_STRUCTS */
152 /******************************************************************************/
153 #ifdef JEMALLOC_H_EXTERNS
154
155 extern bool opt_stats_print;
156
157 #ifdef JEMALLOC_STATS
158 extern size_t stats_cactive;
159 #endif
160
161 char *u2s(uint64_t x, unsigned base, char *s);
162 #ifdef JEMALLOC_STATS
163 void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
164 const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
165 void malloc_printf(const char *format, ...)
166 JEMALLOC_ATTR(format(printf, 1, 2));
167 #endif
168 void stats_print(void (*write)(void *, const char *), void *cbopaque,
169 const char *opts);
170
171 #endif /* JEMALLOC_H_EXTERNS */
172 /******************************************************************************/
173 #ifdef JEMALLOC_H_INLINES
174 #ifdef JEMALLOC_STATS
175
176 #ifndef JEMALLOC_ENABLE_INLINE
177 size_t stats_cactive_get(void);
178 void stats_cactive_add(size_t size);
179 void stats_cactive_sub(size_t size);
180 #endif
181
182 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
183 JEMALLOC_INLINE size_t
184 stats_cactive_get(void)
185 {
186
187 return (atomic_read_z(&stats_cactive));
188 }
189
190 JEMALLOC_INLINE void
191 stats_cactive_add(size_t size)
192 {
193
194 atomic_add_z(&stats_cactive, size);
195 }
196
197 JEMALLOC_INLINE void
198 stats_cactive_sub(size_t size)
199 {
200
201 atomic_sub_z(&stats_cactive, size);
202 }
203 #endif
204
205 #endif /* JEMALLOC_STATS */
206 #endif /* JEMALLOC_H_INLINES */
207 /******************************************************************************/