]>
Commit | Line | Data |
---|---|---|
a78e148b | 1 | #define JEMALLOC_CHUNK_C_ |
2 | #include "jemalloc/internal/jemalloc_internal.h" | |
3 | ||
4 | /******************************************************************************/ | |
5 | /* Data. */ | |
6 | ||
21b26915 | 7 | const char *opt_dss = DSS_DEFAULT; |
8 | size_t opt_lg_chunk = LG_CHUNK_DEFAULT; | |
a78e148b | 9 | |
a78e148b | 10 | malloc_mutex_t chunks_mtx; |
11 | chunk_stats_t stats_chunks; | |
a78e148b | 12 | |
4934f93d | 13 | /* |
14 | * Trees of chunks that were previously allocated (trees differ only in node | |
15 | * ordering). These are used when allocating chunks, in an attempt to re-use | |
16 | * address space. Depending on function, different tree orderings are needed, | |
17 | * which is why there are two trees with the same contents. | |
18 | */ | |
21b26915 | 19 | static extent_tree_t chunks_szad_mmap; |
20 | static extent_tree_t chunks_ad_mmap; | |
21 | static extent_tree_t chunks_szad_dss; | |
22 | static extent_tree_t chunks_ad_dss; | |
4934f93d | 23 | |
a78e148b | 24 | rtree_t *chunks_rtree; |
a78e148b | 25 | |
26 | /* Various chunk-related settings. */ | |
27 | size_t chunksize; | |
28 | size_t chunksize_mask; /* (chunksize - 1). */ | |
29 | size_t chunk_npages; | |
30 | size_t map_bias; | |
31 | size_t arena_maxclass; /* Max size class for arenas. */ | |
32 | ||
33 | /******************************************************************************/ | |
4934f93d | 34 | /* Function prototypes for non-inline static functions. */ |
35 | ||
21b26915 | 36 | static void *chunk_recycle(extent_tree_t *chunks_szad, |
37 | extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base, | |
4934f93d | 38 | bool *zero); |
21b26915 | 39 | static void chunk_record(extent_tree_t *chunks_szad, |
40 | extent_tree_t *chunks_ad, void *chunk, size_t size); | |
4934f93d | 41 | |
42 | /******************************************************************************/ | |
43 | ||
44 | static void * | |
21b26915 | 45 | chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size, |
46 | size_t alignment, bool base, bool *zero) | |
4934f93d | 47 | { |
48 | void *ret; | |
49 | extent_node_t *node; | |
50 | extent_node_t key; | |
51 | size_t alloc_size, leadsize, trailsize; | |
21b26915 | 52 | bool zeroed; |
4934f93d | 53 | |
54 | if (base) { | |
55 | /* | |
56 | * This function may need to call base_node_{,de}alloc(), but | |
57 | * the current chunk allocation request is on behalf of the | |
58 | * base allocator. Avoid deadlock (and if that weren't an | |
59 | * issue, potential for infinite recursion) by returning NULL. | |
60 | */ | |
61 | return (NULL); | |
62 | } | |
63 | ||
64 | alloc_size = size + alignment - chunksize; | |
65 | /* Beware size_t wrap-around. */ | |
66 | if (alloc_size < size) | |
67 | return (NULL); | |
68 | key.addr = NULL; | |
69 | key.size = alloc_size; | |
70 | malloc_mutex_lock(&chunks_mtx); | |
21b26915 | 71 | node = extent_tree_szad_nsearch(chunks_szad, &key); |
4934f93d | 72 | if (node == NULL) { |
73 | malloc_mutex_unlock(&chunks_mtx); | |
74 | return (NULL); | |
75 | } | |
76 | leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) - | |
77 | (uintptr_t)node->addr; | |
78 | assert(node->size >= leadsize + size); | |
79 | trailsize = node->size - leadsize - size; | |
80 | ret = (void *)((uintptr_t)node->addr + leadsize); | |
81 | /* Remove node from the tree. */ | |
21b26915 | 82 | extent_tree_szad_remove(chunks_szad, node); |
83 | extent_tree_ad_remove(chunks_ad, node); | |
4934f93d | 84 | if (leadsize != 0) { |
85 | /* Insert the leading space as a smaller chunk. */ | |
86 | node->size = leadsize; | |
21b26915 | 87 | extent_tree_szad_insert(chunks_szad, node); |
88 | extent_tree_ad_insert(chunks_ad, node); | |
4934f93d | 89 | node = NULL; |
90 | } | |
91 | if (trailsize != 0) { | |
92 | /* Insert the trailing space as a smaller chunk. */ | |
93 | if (node == NULL) { | |
94 | /* | |
95 | * An additional node is required, but | |
96 | * base_node_alloc() can cause a new base chunk to be | |
97 | * allocated. Drop chunks_mtx in order to avoid | |
98 | * deadlock, and if node allocation fails, deallocate | |
99 | * the result before returning an error. | |
100 | */ | |
101 | malloc_mutex_unlock(&chunks_mtx); | |
102 | node = base_node_alloc(); | |
103 | if (node == NULL) { | |
104 | chunk_dealloc(ret, size, true); | |
105 | return (NULL); | |
106 | } | |
107 | malloc_mutex_lock(&chunks_mtx); | |
108 | } | |
109 | node->addr = (void *)((uintptr_t)(ret) + size); | |
110 | node->size = trailsize; | |
21b26915 | 111 | extent_tree_szad_insert(chunks_szad, node); |
112 | extent_tree_ad_insert(chunks_ad, node); | |
4934f93d | 113 | node = NULL; |
114 | } | |
115 | malloc_mutex_unlock(&chunks_mtx); | |
116 | ||
21b26915 | 117 | zeroed = false; |
118 | if (node != NULL) { | |
119 | if (node->zeroed) { | |
120 | zeroed = true; | |
121 | *zero = true; | |
122 | } | |
4934f93d | 123 | base_node_dealloc(node); |
21b26915 | 124 | } |
125 | if (zeroed == false && *zero) { | |
4934f93d | 126 | VALGRIND_MAKE_MEM_UNDEFINED(ret, size); |
127 | memset(ret, 0, size); | |
128 | } | |
4934f93d | 129 | return (ret); |
130 | } | |
a78e148b | 131 | |
132 | /* | |
133 | * If the caller specifies (*zero == false), it is still possible to receive | |
134 | * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc() | |
135 | * takes advantage of this to avoid demanding zeroed chunks, but taking | |
136 | * advantage of them if they are returned. | |
137 | */ | |
138 | void * | |
21b26915 | 139 | chunk_alloc(size_t size, size_t alignment, bool base, bool *zero, |
140 | dss_prec_t dss_prec) | |
a78e148b | 141 | { |
142 | void *ret; | |
143 | ||
144 | assert(size != 0); | |
145 | assert((size & chunksize_mask) == 0); | |
4934f93d | 146 | assert(alignment != 0); |
147 | assert((alignment & chunksize_mask) == 0); | |
a78e148b | 148 | |
21b26915 | 149 | /* "primary" dss. */ |
150 | if (config_dss && dss_prec == dss_prec_primary) { | |
151 | if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size, | |
152 | alignment, base, zero)) != NULL) | |
153 | goto label_return; | |
154 | if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL) | |
155 | goto label_return; | |
156 | } | |
157 | /* mmap. */ | |
158 | if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size, | |
159 | alignment, base, zero)) != NULL) | |
4934f93d | 160 | goto label_return; |
21b26915 | 161 | if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL) |
4934f93d | 162 | goto label_return; |
21b26915 | 163 | /* "secondary" dss. */ |
164 | if (config_dss && dss_prec == dss_prec_secondary) { | |
165 | if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size, | |
166 | alignment, base, zero)) != NULL) | |
167 | goto label_return; | |
168 | if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL) | |
4934f93d | 169 | goto label_return; |
a78e148b | 170 | } |
a78e148b | 171 | |
172 | /* All strategies for allocation failed. */ | |
173 | ret = NULL; | |
4934f93d | 174 | label_return: |
175 | if (config_ivsalloc && base == false && ret != NULL) { | |
a78e148b | 176 | if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) { |
1d03c1c9 | 177 | chunk_dealloc(ret, size, true); |
a78e148b | 178 | return (NULL); |
179 | } | |
180 | } | |
4934f93d | 181 | if ((config_stats || config_prof) && ret != NULL) { |
a78e148b | 182 | bool gdump; |
a78e148b | 183 | malloc_mutex_lock(&chunks_mtx); |
4934f93d | 184 | if (config_stats) |
185 | stats_chunks.nchunks += (size / chunksize); | |
a78e148b | 186 | stats_chunks.curchunks += (size / chunksize); |
187 | if (stats_chunks.curchunks > stats_chunks.highchunks) { | |
188 | stats_chunks.highchunks = stats_chunks.curchunks; | |
4934f93d | 189 | if (config_prof) |
190 | gdump = true; | |
191 | } else if (config_prof) | |
a78e148b | 192 | gdump = false; |
a78e148b | 193 | malloc_mutex_unlock(&chunks_mtx); |
4934f93d | 194 | if (config_prof && opt_prof && opt_prof_gdump && gdump) |
a78e148b | 195 | prof_gdump(); |
a78e148b | 196 | } |
4934f93d | 197 | if (config_debug && *zero && ret != NULL) { |
198 | size_t i; | |
199 | size_t *p = (size_t *)(uintptr_t)ret; | |
a78e148b | 200 | |
4934f93d | 201 | VALGRIND_MAKE_MEM_DEFINED(ret, size); |
202 | for (i = 0; i < size / sizeof(size_t); i++) | |
203 | assert(p[i] == 0); | |
204 | } | |
a78e148b | 205 | assert(CHUNK_ADDR2BASE(ret) == ret); |
206 | return (ret); | |
207 | } | |
208 | ||
4934f93d | 209 | static void |
21b26915 | 210 | chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, |
211 | size_t size) | |
4934f93d | 212 | { |
21b26915 | 213 | bool unzeroed; |
4934f93d | 214 | extent_node_t *xnode, *node, *prev, key; |
215 | ||
21b26915 | 216 | unzeroed = pages_purge(chunk, size); |
4934f93d | 217 | |
218 | /* | |
219 | * Allocate a node before acquiring chunks_mtx even though it might not | |
220 | * be needed, because base_node_alloc() may cause a new base chunk to | |
221 | * be allocated, which could cause deadlock if chunks_mtx were already | |
222 | * held. | |
223 | */ | |
224 | xnode = base_node_alloc(); | |
225 | ||
226 | malloc_mutex_lock(&chunks_mtx); | |
227 | key.addr = (void *)((uintptr_t)chunk + size); | |
21b26915 | 228 | node = extent_tree_ad_nsearch(chunks_ad, &key); |
4934f93d | 229 | /* Try to coalesce forward. */ |
230 | if (node != NULL && node->addr == key.addr) { | |
231 | /* | |
232 | * Coalesce chunk with the following address range. This does | |
233 | * not change the position within chunks_ad, so only | |
234 | * remove/insert from/into chunks_szad. | |
235 | */ | |
21b26915 | 236 | extent_tree_szad_remove(chunks_szad, node); |
4934f93d | 237 | node->addr = chunk; |
238 | node->size += size; | |
21b26915 | 239 | node->zeroed = (node->zeroed && (unzeroed == false)); |
240 | extent_tree_szad_insert(chunks_szad, node); | |
4934f93d | 241 | if (xnode != NULL) |
242 | base_node_dealloc(xnode); | |
243 | } else { | |
244 | /* Coalescing forward failed, so insert a new node. */ | |
245 | if (xnode == NULL) { | |
246 | /* | |
247 | * base_node_alloc() failed, which is an exceedingly | |
248 | * unlikely failure. Leak chunk; its pages have | |
249 | * already been purged, so this is only a virtual | |
250 | * memory leak. | |
251 | */ | |
252 | malloc_mutex_unlock(&chunks_mtx); | |
253 | return; | |
254 | } | |
255 | node = xnode; | |
256 | node->addr = chunk; | |
257 | node->size = size; | |
21b26915 | 258 | node->zeroed = (unzeroed == false); |
259 | extent_tree_ad_insert(chunks_ad, node); | |
260 | extent_tree_szad_insert(chunks_szad, node); | |
4934f93d | 261 | } |
262 | ||
263 | /* Try to coalesce backward. */ | |
21b26915 | 264 | prev = extent_tree_ad_prev(chunks_ad, node); |
4934f93d | 265 | if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) == |
266 | chunk) { | |
267 | /* | |
268 | * Coalesce chunk with the previous address range. This does | |
269 | * not change the position within chunks_ad, so only | |
270 | * remove/insert node from/into chunks_szad. | |
271 | */ | |
21b26915 | 272 | extent_tree_szad_remove(chunks_szad, prev); |
273 | extent_tree_ad_remove(chunks_ad, prev); | |
4934f93d | 274 | |
21b26915 | 275 | extent_tree_szad_remove(chunks_szad, node); |
4934f93d | 276 | node->addr = prev->addr; |
277 | node->size += prev->size; | |
21b26915 | 278 | node->zeroed = (node->zeroed && prev->zeroed); |
279 | extent_tree_szad_insert(chunks_szad, node); | |
4934f93d | 280 | |
281 | base_node_dealloc(prev); | |
282 | } | |
283 | malloc_mutex_unlock(&chunks_mtx); | |
284 | } | |
285 | ||
21b26915 | 286 | void |
287 | chunk_unmap(void *chunk, size_t size) | |
288 | { | |
289 | assert(chunk != NULL); | |
290 | assert(CHUNK_ADDR2BASE(chunk) == chunk); | |
291 | assert(size != 0); | |
292 | assert((size & chunksize_mask) == 0); | |
293 | ||
294 | if (config_dss && chunk_in_dss(chunk)) | |
295 | chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size); | |
296 | else if (chunk_dealloc_mmap(chunk, size)) | |
297 | chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size); | |
298 | } | |
299 | ||
a78e148b | 300 | void |
1d03c1c9 | 301 | chunk_dealloc(void *chunk, size_t size, bool unmap) |
a78e148b | 302 | { |
303 | ||
304 | assert(chunk != NULL); | |
305 | assert(CHUNK_ADDR2BASE(chunk) == chunk); | |
306 | assert(size != 0); | |
307 | assert((size & chunksize_mask) == 0); | |
308 | ||
4934f93d | 309 | if (config_ivsalloc) |
310 | rtree_set(chunks_rtree, (uintptr_t)chunk, NULL); | |
311 | if (config_stats || config_prof) { | |
312 | malloc_mutex_lock(&chunks_mtx); | |
21b26915 | 313 | assert(stats_chunks.curchunks >= (size / chunksize)); |
4934f93d | 314 | stats_chunks.curchunks -= (size / chunksize); |
315 | malloc_mutex_unlock(&chunks_mtx); | |
316 | } | |
a78e148b | 317 | |
21b26915 | 318 | if (unmap) |
319 | chunk_unmap(chunk, size); | |
a78e148b | 320 | } |
321 | ||
322 | bool | |
323 | chunk_boot(void) | |
324 | { | |
325 | ||
326 | /* Set variables according to the value of opt_lg_chunk. */ | |
327 | chunksize = (ZU(1) << opt_lg_chunk); | |
4934f93d | 328 | assert(chunksize >= PAGE); |
a78e148b | 329 | chunksize_mask = chunksize - 1; |
4934f93d | 330 | chunk_npages = (chunksize >> LG_PAGE); |
a78e148b | 331 | |
4934f93d | 332 | if (config_stats || config_prof) { |
333 | if (malloc_mutex_init(&chunks_mtx)) | |
334 | return (true); | |
335 | memset(&stats_chunks, 0, sizeof(chunk_stats_t)); | |
336 | } | |
337 | if (config_dss && chunk_dss_boot()) | |
a78e148b | 338 | return (true); |
21b26915 | 339 | extent_tree_szad_new(&chunks_szad_mmap); |
340 | extent_tree_ad_new(&chunks_ad_mmap); | |
341 | extent_tree_szad_new(&chunks_szad_dss); | |
342 | extent_tree_ad_new(&chunks_ad_dss); | |
4934f93d | 343 | if (config_ivsalloc) { |
344 | chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - | |
345 | opt_lg_chunk); | |
346 | if (chunks_rtree == NULL) | |
347 | return (true); | |
348 | } | |
a78e148b | 349 | |
350 | return (false); | |
351 | } | |
21b26915 | 352 | |
353 | void | |
354 | chunk_prefork(void) | |
355 | { | |
356 | ||
357 | malloc_mutex_lock(&chunks_mtx); | |
358 | if (config_ivsalloc) | |
359 | rtree_prefork(chunks_rtree); | |
360 | chunk_dss_prefork(); | |
361 | } | |
362 | ||
363 | void | |
364 | chunk_postfork_parent(void) | |
365 | { | |
366 | ||
367 | chunk_dss_postfork_parent(); | |
368 | if (config_ivsalloc) | |
369 | rtree_postfork_parent(chunks_rtree); | |
370 | malloc_mutex_postfork_parent(&chunks_mtx); | |
371 | } | |
372 | ||
373 | void | |
374 | chunk_postfork_child(void) | |
375 | { | |
376 | ||
377 | chunk_dss_postfork_child(); | |
378 | if (config_ivsalloc) | |
379 | rtree_postfork_child(chunks_rtree); | |
380 | malloc_mutex_postfork_child(&chunks_mtx); | |
381 | } |