]>
Commit | Line | Data |
---|---|---|
a78e148b | 1 | #define JEMALLOC_CHUNK_C_ |
2 | #include "jemalloc/internal/jemalloc_internal.h" | |
3 | ||
4 | /******************************************************************************/ | |
5 | /* Data. */ | |
6 | ||
7 | size_t opt_lg_chunk = LG_CHUNK_DEFAULT; | |
a78e148b | 8 | |
a78e148b | 9 | malloc_mutex_t chunks_mtx; |
10 | chunk_stats_t stats_chunks; | |
a78e148b | 11 | |
4934f93d | 12 | /* |
13 | * Trees of chunks that were previously allocated (trees differ only in node | |
14 | * ordering). These are used when allocating chunks, in an attempt to re-use | |
15 | * address space. Depending on function, different tree orderings are needed, | |
16 | * which is why there are two trees with the same contents. | |
17 | */ | |
18 | static extent_tree_t chunks_szad; | |
19 | static extent_tree_t chunks_ad; | |
20 | ||
a78e148b | 21 | rtree_t *chunks_rtree; |
a78e148b | 22 | |
23 | /* Various chunk-related settings. */ | |
24 | size_t chunksize; | |
25 | size_t chunksize_mask; /* (chunksize - 1). */ | |
26 | size_t chunk_npages; | |
27 | size_t map_bias; | |
28 | size_t arena_maxclass; /* Max size class for arenas. */ | |
29 | ||
30 | /******************************************************************************/ | |
4934f93d | 31 | /* Function prototypes for non-inline static functions. */ |
32 | ||
33 | static void *chunk_recycle(size_t size, size_t alignment, bool base, | |
34 | bool *zero); | |
35 | static void chunk_record(void *chunk, size_t size); | |
36 | ||
37 | /******************************************************************************/ | |
38 | ||
39 | static void * | |
40 | chunk_recycle(size_t size, size_t alignment, bool base, bool *zero) | |
41 | { | |
42 | void *ret; | |
43 | extent_node_t *node; | |
44 | extent_node_t key; | |
45 | size_t alloc_size, leadsize, trailsize; | |
46 | ||
47 | if (base) { | |
48 | /* | |
49 | * This function may need to call base_node_{,de}alloc(), but | |
50 | * the current chunk allocation request is on behalf of the | |
51 | * base allocator. Avoid deadlock (and if that weren't an | |
52 | * issue, potential for infinite recursion) by returning NULL. | |
53 | */ | |
54 | return (NULL); | |
55 | } | |
56 | ||
57 | alloc_size = size + alignment - chunksize; | |
58 | /* Beware size_t wrap-around. */ | |
59 | if (alloc_size < size) | |
60 | return (NULL); | |
61 | key.addr = NULL; | |
62 | key.size = alloc_size; | |
63 | malloc_mutex_lock(&chunks_mtx); | |
64 | node = extent_tree_szad_nsearch(&chunks_szad, &key); | |
65 | if (node == NULL) { | |
66 | malloc_mutex_unlock(&chunks_mtx); | |
67 | return (NULL); | |
68 | } | |
69 | leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) - | |
70 | (uintptr_t)node->addr; | |
71 | assert(node->size >= leadsize + size); | |
72 | trailsize = node->size - leadsize - size; | |
73 | ret = (void *)((uintptr_t)node->addr + leadsize); | |
74 | /* Remove node from the tree. */ | |
75 | extent_tree_szad_remove(&chunks_szad, node); | |
76 | extent_tree_ad_remove(&chunks_ad, node); | |
77 | if (leadsize != 0) { | |
78 | /* Insert the leading space as a smaller chunk. */ | |
79 | node->size = leadsize; | |
80 | extent_tree_szad_insert(&chunks_szad, node); | |
81 | extent_tree_ad_insert(&chunks_ad, node); | |
82 | node = NULL; | |
83 | } | |
84 | if (trailsize != 0) { | |
85 | /* Insert the trailing space as a smaller chunk. */ | |
86 | if (node == NULL) { | |
87 | /* | |
88 | * An additional node is required, but | |
89 | * base_node_alloc() can cause a new base chunk to be | |
90 | * allocated. Drop chunks_mtx in order to avoid | |
91 | * deadlock, and if node allocation fails, deallocate | |
92 | * the result before returning an error. | |
93 | */ | |
94 | malloc_mutex_unlock(&chunks_mtx); | |
95 | node = base_node_alloc(); | |
96 | if (node == NULL) { | |
97 | chunk_dealloc(ret, size, true); | |
98 | return (NULL); | |
99 | } | |
100 | malloc_mutex_lock(&chunks_mtx); | |
101 | } | |
102 | node->addr = (void *)((uintptr_t)(ret) + size); | |
103 | node->size = trailsize; | |
104 | extent_tree_szad_insert(&chunks_szad, node); | |
105 | extent_tree_ad_insert(&chunks_ad, node); | |
106 | node = NULL; | |
107 | } | |
108 | malloc_mutex_unlock(&chunks_mtx); | |
109 | ||
110 | if (node != NULL) | |
111 | base_node_dealloc(node); | |
112 | #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED | |
113 | /* Pages are zeroed as a side effect of pages_purge(). */ | |
114 | *zero = true; | |
115 | #else | |
116 | if (*zero) { | |
117 | VALGRIND_MAKE_MEM_UNDEFINED(ret, size); | |
118 | memset(ret, 0, size); | |
119 | } | |
120 | #endif | |
121 | return (ret); | |
122 | } | |
a78e148b | 123 | |
124 | /* | |
125 | * If the caller specifies (*zero == false), it is still possible to receive | |
126 | * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc() | |
127 | * takes advantage of this to avoid demanding zeroed chunks, but taking | |
128 | * advantage of them if they are returned. | |
129 | */ | |
130 | void * | |
4934f93d | 131 | chunk_alloc(size_t size, size_t alignment, bool base, bool *zero) |
a78e148b | 132 | { |
133 | void *ret; | |
134 | ||
135 | assert(size != 0); | |
136 | assert((size & chunksize_mask) == 0); | |
4934f93d | 137 | assert(alignment != 0); |
138 | assert((alignment & chunksize_mask) == 0); | |
a78e148b | 139 | |
4934f93d | 140 | ret = chunk_recycle(size, alignment, base, zero); |
141 | if (ret != NULL) | |
142 | goto label_return; | |
a78e148b | 143 | |
4934f93d | 144 | ret = chunk_alloc_mmap(size, alignment, zero); |
145 | if (ret != NULL) | |
146 | goto label_return; | |
147 | ||
148 | if (config_dss) { | |
149 | ret = chunk_alloc_dss(size, alignment, zero); | |
a78e148b | 150 | if (ret != NULL) |
4934f93d | 151 | goto label_return; |
a78e148b | 152 | } |
a78e148b | 153 | |
154 | /* All strategies for allocation failed. */ | |
155 | ret = NULL; | |
4934f93d | 156 | label_return: |
157 | if (config_ivsalloc && base == false && ret != NULL) { | |
a78e148b | 158 | if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) { |
1d03c1c9 | 159 | chunk_dealloc(ret, size, true); |
a78e148b | 160 | return (NULL); |
161 | } | |
162 | } | |
4934f93d | 163 | if ((config_stats || config_prof) && ret != NULL) { |
a78e148b | 164 | bool gdump; |
a78e148b | 165 | malloc_mutex_lock(&chunks_mtx); |
4934f93d | 166 | if (config_stats) |
167 | stats_chunks.nchunks += (size / chunksize); | |
a78e148b | 168 | stats_chunks.curchunks += (size / chunksize); |
169 | if (stats_chunks.curchunks > stats_chunks.highchunks) { | |
170 | stats_chunks.highchunks = stats_chunks.curchunks; | |
4934f93d | 171 | if (config_prof) |
172 | gdump = true; | |
173 | } else if (config_prof) | |
a78e148b | 174 | gdump = false; |
a78e148b | 175 | malloc_mutex_unlock(&chunks_mtx); |
4934f93d | 176 | if (config_prof && opt_prof && opt_prof_gdump && gdump) |
a78e148b | 177 | prof_gdump(); |
a78e148b | 178 | } |
4934f93d | 179 | if (config_debug && *zero && ret != NULL) { |
180 | size_t i; | |
181 | size_t *p = (size_t *)(uintptr_t)ret; | |
a78e148b | 182 | |
4934f93d | 183 | VALGRIND_MAKE_MEM_DEFINED(ret, size); |
184 | for (i = 0; i < size / sizeof(size_t); i++) | |
185 | assert(p[i] == 0); | |
186 | } | |
a78e148b | 187 | assert(CHUNK_ADDR2BASE(ret) == ret); |
188 | return (ret); | |
189 | } | |
190 | ||
4934f93d | 191 | static void |
192 | chunk_record(void *chunk, size_t size) | |
193 | { | |
194 | extent_node_t *xnode, *node, *prev, key; | |
195 | ||
196 | pages_purge(chunk, size); | |
197 | ||
198 | /* | |
199 | * Allocate a node before acquiring chunks_mtx even though it might not | |
200 | * be needed, because base_node_alloc() may cause a new base chunk to | |
201 | * be allocated, which could cause deadlock if chunks_mtx were already | |
202 | * held. | |
203 | */ | |
204 | xnode = base_node_alloc(); | |
205 | ||
206 | malloc_mutex_lock(&chunks_mtx); | |
207 | key.addr = (void *)((uintptr_t)chunk + size); | |
208 | node = extent_tree_ad_nsearch(&chunks_ad, &key); | |
209 | /* Try to coalesce forward. */ | |
210 | if (node != NULL && node->addr == key.addr) { | |
211 | /* | |
212 | * Coalesce chunk with the following address range. This does | |
213 | * not change the position within chunks_ad, so only | |
214 | * remove/insert from/into chunks_szad. | |
215 | */ | |
216 | extent_tree_szad_remove(&chunks_szad, node); | |
217 | node->addr = chunk; | |
218 | node->size += size; | |
219 | extent_tree_szad_insert(&chunks_szad, node); | |
220 | if (xnode != NULL) | |
221 | base_node_dealloc(xnode); | |
222 | } else { | |
223 | /* Coalescing forward failed, so insert a new node. */ | |
224 | if (xnode == NULL) { | |
225 | /* | |
226 | * base_node_alloc() failed, which is an exceedingly | |
227 | * unlikely failure. Leak chunk; its pages have | |
228 | * already been purged, so this is only a virtual | |
229 | * memory leak. | |
230 | */ | |
231 | malloc_mutex_unlock(&chunks_mtx); | |
232 | return; | |
233 | } | |
234 | node = xnode; | |
235 | node->addr = chunk; | |
236 | node->size = size; | |
237 | extent_tree_ad_insert(&chunks_ad, node); | |
238 | extent_tree_szad_insert(&chunks_szad, node); | |
239 | } | |
240 | ||
241 | /* Try to coalesce backward. */ | |
242 | prev = extent_tree_ad_prev(&chunks_ad, node); | |
243 | if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) == | |
244 | chunk) { | |
245 | /* | |
246 | * Coalesce chunk with the previous address range. This does | |
247 | * not change the position within chunks_ad, so only | |
248 | * remove/insert node from/into chunks_szad. | |
249 | */ | |
250 | extent_tree_szad_remove(&chunks_szad, prev); | |
251 | extent_tree_ad_remove(&chunks_ad, prev); | |
252 | ||
253 | extent_tree_szad_remove(&chunks_szad, node); | |
254 | node->addr = prev->addr; | |
255 | node->size += prev->size; | |
256 | extent_tree_szad_insert(&chunks_szad, node); | |
257 | ||
258 | base_node_dealloc(prev); | |
259 | } | |
260 | malloc_mutex_unlock(&chunks_mtx); | |
261 | } | |
262 | ||
a78e148b | 263 | void |
1d03c1c9 | 264 | chunk_dealloc(void *chunk, size_t size, bool unmap) |
a78e148b | 265 | { |
266 | ||
267 | assert(chunk != NULL); | |
268 | assert(CHUNK_ADDR2BASE(chunk) == chunk); | |
269 | assert(size != 0); | |
270 | assert((size & chunksize_mask) == 0); | |
271 | ||
4934f93d | 272 | if (config_ivsalloc) |
273 | rtree_set(chunks_rtree, (uintptr_t)chunk, NULL); | |
274 | if (config_stats || config_prof) { | |
275 | malloc_mutex_lock(&chunks_mtx); | |
276 | stats_chunks.curchunks -= (size / chunksize); | |
277 | malloc_mutex_unlock(&chunks_mtx); | |
278 | } | |
a78e148b | 279 | |
1d03c1c9 | 280 | if (unmap) { |
4934f93d | 281 | if ((config_dss && chunk_in_dss(chunk)) || |
282 | chunk_dealloc_mmap(chunk, size)) | |
283 | chunk_record(chunk, size); | |
1d03c1c9 | 284 | } |
a78e148b | 285 | } |
286 | ||
287 | bool | |
288 | chunk_boot(void) | |
289 | { | |
290 | ||
291 | /* Set variables according to the value of opt_lg_chunk. */ | |
292 | chunksize = (ZU(1) << opt_lg_chunk); | |
4934f93d | 293 | assert(chunksize >= PAGE); |
a78e148b | 294 | chunksize_mask = chunksize - 1; |
4934f93d | 295 | chunk_npages = (chunksize >> LG_PAGE); |
a78e148b | 296 | |
4934f93d | 297 | if (config_stats || config_prof) { |
298 | if (malloc_mutex_init(&chunks_mtx)) | |
299 | return (true); | |
300 | memset(&stats_chunks, 0, sizeof(chunk_stats_t)); | |
301 | } | |
302 | if (config_dss && chunk_dss_boot()) | |
a78e148b | 303 | return (true); |
4934f93d | 304 | extent_tree_szad_new(&chunks_szad); |
305 | extent_tree_ad_new(&chunks_ad); | |
306 | if (config_ivsalloc) { | |
307 | chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - | |
308 | opt_lg_chunk); | |
309 | if (chunks_rtree == NULL) | |
310 | return (true); | |
311 | } | |
a78e148b | 312 | |
313 | return (false); | |
314 | } |