1 #define JEMALLOC_CHUNK_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 const char *opt_dss
= DSS_DEFAULT
;
8 size_t opt_lg_chunk
= LG_CHUNK_DEFAULT
;
10 malloc_mutex_t chunks_mtx
;
11 chunk_stats_t stats_chunks
;
14 * Trees of chunks that were previously allocated (trees differ only in node
15 * ordering). These are used when allocating chunks, in an attempt to re-use
16 * address space. Depending on function, different tree orderings are needed,
17 * which is why there are two trees with the same contents.
19 static extent_tree_t chunks_szad_mmap
;
20 static extent_tree_t chunks_ad_mmap
;
21 static extent_tree_t chunks_szad_dss
;
22 static extent_tree_t chunks_ad_dss
;
24 rtree_t
*chunks_rtree
;
26 /* Various chunk-related settings. */
28 size_t chunksize_mask
; /* (chunksize - 1). */
31 size_t arena_maxclass
; /* Max size class for arenas. */
33 /******************************************************************************/
34 /* Function prototypes for non-inline static functions. */
36 static void *chunk_recycle(extent_tree_t
*chunks_szad
,
37 extent_tree_t
*chunks_ad
, size_t size
, size_t alignment
, bool base
,
39 static void chunk_record(extent_tree_t
*chunks_szad
,
40 extent_tree_t
*chunks_ad
, void *chunk
, size_t size
);
42 /******************************************************************************/
45 chunk_recycle(extent_tree_t
*chunks_szad
, extent_tree_t
*chunks_ad
, size_t size
,
46 size_t alignment
, bool base
, bool *zero
)
51 size_t alloc_size
, leadsize
, trailsize
;
56 * This function may need to call base_node_{,de}alloc(), but
57 * the current chunk allocation request is on behalf of the
58 * base allocator. Avoid deadlock (and if that weren't an
59 * issue, potential for infinite recursion) by returning NULL.
64 alloc_size
= size
+ alignment
- chunksize
;
65 /* Beware size_t wrap-around. */
66 if (alloc_size
< size
)
69 key
.size
= alloc_size
;
70 malloc_mutex_lock(&chunks_mtx
);
71 node
= extent_tree_szad_nsearch(chunks_szad
, &key
);
73 malloc_mutex_unlock(&chunks_mtx
);
76 leadsize
= ALIGNMENT_CEILING((uintptr_t)node
->addr
, alignment
) -
77 (uintptr_t)node
->addr
;
78 assert(node
->size
>= leadsize
+ size
);
79 trailsize
= node
->size
- leadsize
- size
;
80 ret
= (void *)((uintptr_t)node
->addr
+ leadsize
);
81 /* Remove node from the tree. */
82 extent_tree_szad_remove(chunks_szad
, node
);
83 extent_tree_ad_remove(chunks_ad
, node
);
85 /* Insert the leading space as a smaller chunk. */
86 node
->size
= leadsize
;
87 extent_tree_szad_insert(chunks_szad
, node
);
88 extent_tree_ad_insert(chunks_ad
, node
);
92 /* Insert the trailing space as a smaller chunk. */
95 * An additional node is required, but
96 * base_node_alloc() can cause a new base chunk to be
97 * allocated. Drop chunks_mtx in order to avoid
98 * deadlock, and if node allocation fails, deallocate
99 * the result before returning an error.
101 malloc_mutex_unlock(&chunks_mtx
);
102 node
= base_node_alloc();
104 chunk_dealloc(ret
, size
, true);
107 malloc_mutex_lock(&chunks_mtx
);
109 node
->addr
= (void *)((uintptr_t)(ret
) + size
);
110 node
->size
= trailsize
;
111 extent_tree_szad_insert(chunks_szad
, node
);
112 extent_tree_ad_insert(chunks_ad
, node
);
115 malloc_mutex_unlock(&chunks_mtx
);
123 base_node_dealloc(node
);
125 if (zeroed
== false && *zero
) {
126 VALGRIND_MAKE_MEM_UNDEFINED(ret
, size
);
127 memset(ret
, 0, size
);
133 * If the caller specifies (*zero == false), it is still possible to receive
134 * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
135 * takes advantage of this to avoid demanding zeroed chunks, but taking
136 * advantage of them if they are returned.
139 chunk_alloc(size_t size
, size_t alignment
, bool base
, bool *zero
,
145 assert((size
& chunksize_mask
) == 0);
146 assert(alignment
!= 0);
147 assert((alignment
& chunksize_mask
) == 0);
150 if (config_dss
&& dss_prec
== dss_prec_primary
) {
151 if ((ret
= chunk_recycle(&chunks_szad_dss
, &chunks_ad_dss
, size
,
152 alignment
, base
, zero
)) != NULL
)
154 if ((ret
= chunk_alloc_dss(size
, alignment
, zero
)) != NULL
)
158 if ((ret
= chunk_recycle(&chunks_szad_mmap
, &chunks_ad_mmap
, size
,
159 alignment
, base
, zero
)) != NULL
)
161 if ((ret
= chunk_alloc_mmap(size
, alignment
, zero
)) != NULL
)
163 /* "secondary" dss. */
164 if (config_dss
&& dss_prec
== dss_prec_secondary
) {
165 if ((ret
= chunk_recycle(&chunks_szad_dss
, &chunks_ad_dss
, size
,
166 alignment
, base
, zero
)) != NULL
)
168 if ((ret
= chunk_alloc_dss(size
, alignment
, zero
)) != NULL
)
172 /* All strategies for allocation failed. */
175 if (config_ivsalloc
&& base
== false && ret
!= NULL
) {
176 if (rtree_set(chunks_rtree
, (uintptr_t)ret
, ret
)) {
177 chunk_dealloc(ret
, size
, true);
181 if ((config_stats
|| config_prof
) && ret
!= NULL
) {
183 malloc_mutex_lock(&chunks_mtx
);
185 stats_chunks
.nchunks
+= (size
/ chunksize
);
186 stats_chunks
.curchunks
+= (size
/ chunksize
);
187 if (stats_chunks
.curchunks
> stats_chunks
.highchunks
) {
188 stats_chunks
.highchunks
= stats_chunks
.curchunks
;
191 } else if (config_prof
)
193 malloc_mutex_unlock(&chunks_mtx
);
194 if (config_prof
&& opt_prof
&& opt_prof_gdump
&& gdump
)
197 if (config_debug
&& *zero
&& ret
!= NULL
) {
199 size_t *p
= (size_t *)(uintptr_t)ret
;
201 VALGRIND_MAKE_MEM_DEFINED(ret
, size
);
202 for (i
= 0; i
< size
/ sizeof(size_t); i
++)
205 assert(CHUNK_ADDR2BASE(ret
) == ret
);
210 chunk_record(extent_tree_t
*chunks_szad
, extent_tree_t
*chunks_ad
, void *chunk
,
214 extent_node_t
*xnode
, *node
, *prev
, key
;
216 unzeroed
= pages_purge(chunk
, size
);
219 * Allocate a node before acquiring chunks_mtx even though it might not
220 * be needed, because base_node_alloc() may cause a new base chunk to
221 * be allocated, which could cause deadlock if chunks_mtx were already
224 xnode
= base_node_alloc();
226 malloc_mutex_lock(&chunks_mtx
);
227 key
.addr
= (void *)((uintptr_t)chunk
+ size
);
228 node
= extent_tree_ad_nsearch(chunks_ad
, &key
);
229 /* Try to coalesce forward. */
230 if (node
!= NULL
&& node
->addr
== key
.addr
) {
232 * Coalesce chunk with the following address range. This does
233 * not change the position within chunks_ad, so only
234 * remove/insert from/into chunks_szad.
236 extent_tree_szad_remove(chunks_szad
, node
);
239 node
->zeroed
= (node
->zeroed
&& (unzeroed
== false));
240 extent_tree_szad_insert(chunks_szad
, node
);
242 base_node_dealloc(xnode
);
244 /* Coalescing forward failed, so insert a new node. */
247 * base_node_alloc() failed, which is an exceedingly
248 * unlikely failure. Leak chunk; its pages have
249 * already been purged, so this is only a virtual
252 malloc_mutex_unlock(&chunks_mtx
);
258 node
->zeroed
= (unzeroed
== false);
259 extent_tree_ad_insert(chunks_ad
, node
);
260 extent_tree_szad_insert(chunks_szad
, node
);
263 /* Try to coalesce backward. */
264 prev
= extent_tree_ad_prev(chunks_ad
, node
);
265 if (prev
!= NULL
&& (void *)((uintptr_t)prev
->addr
+ prev
->size
) ==
268 * Coalesce chunk with the previous address range. This does
269 * not change the position within chunks_ad, so only
270 * remove/insert node from/into chunks_szad.
272 extent_tree_szad_remove(chunks_szad
, prev
);
273 extent_tree_ad_remove(chunks_ad
, prev
);
275 extent_tree_szad_remove(chunks_szad
, node
);
276 node
->addr
= prev
->addr
;
277 node
->size
+= prev
->size
;
278 node
->zeroed
= (node
->zeroed
&& prev
->zeroed
);
279 extent_tree_szad_insert(chunks_szad
, node
);
281 base_node_dealloc(prev
);
283 malloc_mutex_unlock(&chunks_mtx
);
287 chunk_unmap(void *chunk
, size_t size
)
289 assert(chunk
!= NULL
);
290 assert(CHUNK_ADDR2BASE(chunk
) == chunk
);
292 assert((size
& chunksize_mask
) == 0);
294 if (config_dss
&& chunk_in_dss(chunk
))
295 chunk_record(&chunks_szad_dss
, &chunks_ad_dss
, chunk
, size
);
296 else if (chunk_dealloc_mmap(chunk
, size
))
297 chunk_record(&chunks_szad_mmap
, &chunks_ad_mmap
, chunk
, size
);
301 chunk_dealloc(void *chunk
, size_t size
, bool unmap
)
304 assert(chunk
!= NULL
);
305 assert(CHUNK_ADDR2BASE(chunk
) == chunk
);
307 assert((size
& chunksize_mask
) == 0);
310 rtree_set(chunks_rtree
, (uintptr_t)chunk
, NULL
);
311 if (config_stats
|| config_prof
) {
312 malloc_mutex_lock(&chunks_mtx
);
313 assert(stats_chunks
.curchunks
>= (size
/ chunksize
));
314 stats_chunks
.curchunks
-= (size
/ chunksize
);
315 malloc_mutex_unlock(&chunks_mtx
);
319 chunk_unmap(chunk
, size
);
326 /* Set variables according to the value of opt_lg_chunk. */
327 chunksize
= (ZU(1) << opt_lg_chunk
);
328 assert(chunksize
>= PAGE
);
329 chunksize_mask
= chunksize
- 1;
330 chunk_npages
= (chunksize
>> LG_PAGE
);
332 if (config_stats
|| config_prof
) {
333 if (malloc_mutex_init(&chunks_mtx
))
335 memset(&stats_chunks
, 0, sizeof(chunk_stats_t
));
337 if (config_dss
&& chunk_dss_boot())
339 extent_tree_szad_new(&chunks_szad_mmap
);
340 extent_tree_ad_new(&chunks_ad_mmap
);
341 extent_tree_szad_new(&chunks_szad_dss
);
342 extent_tree_ad_new(&chunks_ad_dss
);
343 if (config_ivsalloc
) {
344 chunks_rtree
= rtree_new((ZU(1) << (LG_SIZEOF_PTR
+3)) -
346 if (chunks_rtree
== NULL
)
357 malloc_mutex_lock(&chunks_mtx
);
359 rtree_prefork(chunks_rtree
);
364 chunk_postfork_parent(void)
367 chunk_dss_postfork_parent();
369 rtree_postfork_parent(chunks_rtree
);
370 malloc_mutex_postfork_parent(&chunks_mtx
);
374 chunk_postfork_child(void)
377 chunk_dss_postfork_child();
379 rtree_postfork_child(chunks_rtree
);
380 malloc_mutex_postfork_child(&chunks_mtx
);