1 #define JEMALLOC_CHUNK_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 size_t opt_lg_chunk
= LG_CHUNK_DEFAULT
;
9 bool opt_overcommit
= true;
12 #if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
13 malloc_mutex_t chunks_mtx
;
14 chunk_stats_t stats_chunks
;
17 #ifdef JEMALLOC_IVSALLOC
18 rtree_t
*chunks_rtree
;
21 /* Various chunk-related settings. */
23 size_t chunksize_mask
; /* (chunksize - 1). */
26 size_t arena_maxclass
; /* Max size class for arenas. */
28 /******************************************************************************/
31 * If the caller specifies (*zero == false), it is still possible to receive
32 * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
33 * takes advantage of this to avoid demanding zeroed chunks, but taking
34 * advantage of them if they are returned.
37 chunk_alloc(size_t size
, bool base
, bool *zero
)
42 assert((size
& chunksize_mask
) == 0);
46 ret
= chunk_alloc_swap(size
, zero
);
51 if (swap_enabled
== false || opt_overcommit
) {
54 ret
= chunk_alloc_dss(size
, zero
);
58 ret
= chunk_alloc_mmap(size
);
67 /* All strategies for allocation failed. */
70 #ifdef JEMALLOC_IVSALLOC
71 if (base
== false && ret
!= NULL
) {
72 if (rtree_set(chunks_rtree
, (uintptr_t)ret
, ret
)) {
73 chunk_dealloc(ret
, size
, true);
78 #if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
83 malloc_mutex_lock(&chunks_mtx
);
84 # ifdef JEMALLOC_STATS
85 stats_chunks
.nchunks
+= (size
/ chunksize
);
87 stats_chunks
.curchunks
+= (size
/ chunksize
);
88 if (stats_chunks
.curchunks
> stats_chunks
.highchunks
) {
89 stats_chunks
.highchunks
= stats_chunks
.curchunks
;
98 malloc_mutex_unlock(&chunks_mtx
);
100 if (opt_prof
&& opt_prof_gdump
&& gdump
)
106 assert(CHUNK_ADDR2BASE(ret
) == ret
);
111 chunk_dealloc(void *chunk
, size_t size
, bool unmap
)
114 assert(chunk
!= NULL
);
115 assert(CHUNK_ADDR2BASE(chunk
) == chunk
);
117 assert((size
& chunksize_mask
) == 0);
119 #ifdef JEMALLOC_IVSALLOC
120 rtree_set(chunks_rtree
, (uintptr_t)chunk
, NULL
);
122 #if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
123 malloc_mutex_lock(&chunks_mtx
);
124 stats_chunks
.curchunks
-= (size
/ chunksize
);
125 malloc_mutex_unlock(&chunks_mtx
);
130 if (swap_enabled
&& chunk_dealloc_swap(chunk
, size
) == false)
134 if (chunk_dealloc_dss(chunk
, size
) == false)
137 chunk_dealloc_mmap(chunk
, size
);
145 /* Set variables according to the value of opt_lg_chunk. */
146 chunksize
= (ZU(1) << opt_lg_chunk
);
147 assert(chunksize
>= PAGE_SIZE
);
148 chunksize_mask
= chunksize
- 1;
149 chunk_npages
= (chunksize
>> PAGE_SHIFT
);
151 #if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
152 if (malloc_mutex_init(&chunks_mtx
))
154 memset(&stats_chunks
, 0, sizeof(chunk_stats_t
));
157 if (chunk_swap_boot())
160 if (chunk_mmap_boot())
163 if (chunk_dss_boot())
166 #ifdef JEMALLOC_IVSALLOC
167 chunks_rtree
= rtree_new((ZU(1) << (LG_SIZEOF_PTR
+3)) - opt_lg_chunk
);
168 if (chunks_rtree
== NULL
)