1 #define JEMALLOC_CHUNK_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 size_t opt_lg_chunk
= LG_CHUNK_DEFAULT
;
9 malloc_mutex_t chunks_mtx
;
10 chunk_stats_t stats_chunks
;
13 * Trees of chunks that were previously allocated (trees differ only in node
14 * ordering). These are used when allocating chunks, in an attempt to re-use
15 * address space. Depending on function, different tree orderings are needed,
16 * which is why there are two trees with the same contents.
18 static extent_tree_t chunks_szad
;
19 static extent_tree_t chunks_ad
;
21 rtree_t
*chunks_rtree
;
23 /* Various chunk-related settings. */
25 size_t chunksize_mask
; /* (chunksize - 1). */
28 size_t arena_maxclass
; /* Max size class for arenas. */
30 /******************************************************************************/
31 /* Function prototypes for non-inline static functions. */
33 static void *chunk_recycle(size_t size
, size_t alignment
, bool base
,
35 static void chunk_record(void *chunk
, size_t size
);
37 /******************************************************************************/
40 chunk_recycle(size_t size
, size_t alignment
, bool base
, bool *zero
)
45 size_t alloc_size
, leadsize
, trailsize
;
49 * This function may need to call base_node_{,de}alloc(), but
50 * the current chunk allocation request is on behalf of the
51 * base allocator. Avoid deadlock (and if that weren't an
52 * issue, potential for infinite recursion) by returning NULL.
57 alloc_size
= size
+ alignment
- chunksize
;
58 /* Beware size_t wrap-around. */
59 if (alloc_size
< size
)
62 key
.size
= alloc_size
;
63 malloc_mutex_lock(&chunks_mtx
);
64 node
= extent_tree_szad_nsearch(&chunks_szad
, &key
);
66 malloc_mutex_unlock(&chunks_mtx
);
69 leadsize
= ALIGNMENT_CEILING((uintptr_t)node
->addr
, alignment
) -
70 (uintptr_t)node
->addr
;
71 assert(node
->size
>= leadsize
+ size
);
72 trailsize
= node
->size
- leadsize
- size
;
73 ret
= (void *)((uintptr_t)node
->addr
+ leadsize
);
74 /* Remove node from the tree. */
75 extent_tree_szad_remove(&chunks_szad
, node
);
76 extent_tree_ad_remove(&chunks_ad
, node
);
78 /* Insert the leading space as a smaller chunk. */
79 node
->size
= leadsize
;
80 extent_tree_szad_insert(&chunks_szad
, node
);
81 extent_tree_ad_insert(&chunks_ad
, node
);
85 /* Insert the trailing space as a smaller chunk. */
88 * An additional node is required, but
89 * base_node_alloc() can cause a new base chunk to be
90 * allocated. Drop chunks_mtx in order to avoid
91 * deadlock, and if node allocation fails, deallocate
92 * the result before returning an error.
94 malloc_mutex_unlock(&chunks_mtx
);
95 node
= base_node_alloc();
97 chunk_dealloc(ret
, size
, true);
100 malloc_mutex_lock(&chunks_mtx
);
102 node
->addr
= (void *)((uintptr_t)(ret
) + size
);
103 node
->size
= trailsize
;
104 extent_tree_szad_insert(&chunks_szad
, node
);
105 extent_tree_ad_insert(&chunks_ad
, node
);
108 malloc_mutex_unlock(&chunks_mtx
);
111 base_node_dealloc(node
);
112 #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
113 /* Pages are zeroed as a side effect of pages_purge(). */
117 VALGRIND_MAKE_MEM_UNDEFINED(ret
, size
);
118 memset(ret
, 0, size
);
125 * If the caller specifies (*zero == false), it is still possible to receive
126 * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
127 * takes advantage of this to avoid demanding zeroed chunks, but taking
128 * advantage of them if they are returned.
131 chunk_alloc(size_t size
, size_t alignment
, bool base
, bool *zero
)
136 assert((size
& chunksize_mask
) == 0);
137 assert(alignment
!= 0);
138 assert((alignment
& chunksize_mask
) == 0);
140 ret
= chunk_recycle(size
, alignment
, base
, zero
);
144 ret
= chunk_alloc_mmap(size
, alignment
, zero
);
149 ret
= chunk_alloc_dss(size
, alignment
, zero
);
154 /* All strategies for allocation failed. */
157 if (config_ivsalloc
&& base
== false && ret
!= NULL
) {
158 if (rtree_set(chunks_rtree
, (uintptr_t)ret
, ret
)) {
159 chunk_dealloc(ret
, size
, true);
163 if ((config_stats
|| config_prof
) && ret
!= NULL
) {
165 malloc_mutex_lock(&chunks_mtx
);
167 stats_chunks
.nchunks
+= (size
/ chunksize
);
168 stats_chunks
.curchunks
+= (size
/ chunksize
);
169 if (stats_chunks
.curchunks
> stats_chunks
.highchunks
) {
170 stats_chunks
.highchunks
= stats_chunks
.curchunks
;
173 } else if (config_prof
)
175 malloc_mutex_unlock(&chunks_mtx
);
176 if (config_prof
&& opt_prof
&& opt_prof_gdump
&& gdump
)
179 if (config_debug
&& *zero
&& ret
!= NULL
) {
181 size_t *p
= (size_t *)(uintptr_t)ret
;
183 VALGRIND_MAKE_MEM_DEFINED(ret
, size
);
184 for (i
= 0; i
< size
/ sizeof(size_t); i
++)
187 assert(CHUNK_ADDR2BASE(ret
) == ret
);
192 chunk_record(void *chunk
, size_t size
)
194 extent_node_t
*xnode
, *node
, *prev
, key
;
196 pages_purge(chunk
, size
);
199 * Allocate a node before acquiring chunks_mtx even though it might not
200 * be needed, because base_node_alloc() may cause a new base chunk to
201 * be allocated, which could cause deadlock if chunks_mtx were already
204 xnode
= base_node_alloc();
206 malloc_mutex_lock(&chunks_mtx
);
207 key
.addr
= (void *)((uintptr_t)chunk
+ size
);
208 node
= extent_tree_ad_nsearch(&chunks_ad
, &key
);
209 /* Try to coalesce forward. */
210 if (node
!= NULL
&& node
->addr
== key
.addr
) {
212 * Coalesce chunk with the following address range. This does
213 * not change the position within chunks_ad, so only
214 * remove/insert from/into chunks_szad.
216 extent_tree_szad_remove(&chunks_szad
, node
);
219 extent_tree_szad_insert(&chunks_szad
, node
);
221 base_node_dealloc(xnode
);
223 /* Coalescing forward failed, so insert a new node. */
226 * base_node_alloc() failed, which is an exceedingly
227 * unlikely failure. Leak chunk; its pages have
228 * already been purged, so this is only a virtual
231 malloc_mutex_unlock(&chunks_mtx
);
237 extent_tree_ad_insert(&chunks_ad
, node
);
238 extent_tree_szad_insert(&chunks_szad
, node
);
241 /* Try to coalesce backward. */
242 prev
= extent_tree_ad_prev(&chunks_ad
, node
);
243 if (prev
!= NULL
&& (void *)((uintptr_t)prev
->addr
+ prev
->size
) ==
246 * Coalesce chunk with the previous address range. This does
247 * not change the position within chunks_ad, so only
248 * remove/insert node from/into chunks_szad.
250 extent_tree_szad_remove(&chunks_szad
, prev
);
251 extent_tree_ad_remove(&chunks_ad
, prev
);
253 extent_tree_szad_remove(&chunks_szad
, node
);
254 node
->addr
= prev
->addr
;
255 node
->size
+= prev
->size
;
256 extent_tree_szad_insert(&chunks_szad
, node
);
258 base_node_dealloc(prev
);
260 malloc_mutex_unlock(&chunks_mtx
);
264 chunk_dealloc(void *chunk
, size_t size
, bool unmap
)
267 assert(chunk
!= NULL
);
268 assert(CHUNK_ADDR2BASE(chunk
) == chunk
);
270 assert((size
& chunksize_mask
) == 0);
273 rtree_set(chunks_rtree
, (uintptr_t)chunk
, NULL
);
274 if (config_stats
|| config_prof
) {
275 malloc_mutex_lock(&chunks_mtx
);
276 stats_chunks
.curchunks
-= (size
/ chunksize
);
277 malloc_mutex_unlock(&chunks_mtx
);
281 if ((config_dss
&& chunk_in_dss(chunk
)) ||
282 chunk_dealloc_mmap(chunk
, size
))
283 chunk_record(chunk
, size
);
291 /* Set variables according to the value of opt_lg_chunk. */
292 chunksize
= (ZU(1) << opt_lg_chunk
);
293 assert(chunksize
>= PAGE
);
294 chunksize_mask
= chunksize
- 1;
295 chunk_npages
= (chunksize
>> LG_PAGE
);
297 if (config_stats
|| config_prof
) {
298 if (malloc_mutex_init(&chunks_mtx
))
300 memset(&stats_chunks
, 0, sizeof(chunk_stats_t
));
302 if (config_dss
&& chunk_dss_boot())
304 extent_tree_szad_new(&chunks_szad
);
305 extent_tree_ad_new(&chunks_ad
);
306 if (config_ivsalloc
) {
307 chunks_rtree
= rtree_new((ZU(1) << (LG_SIZEOF_PTR
+3)) -
309 if (chunks_rtree
== NULL
)