]>
git.saurik.com Git - redis.git/blob - deps/jemalloc/src/huge.c
1 #define JEMALLOC_HUGE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
10 size_t huge_allocated
;
13 malloc_mutex_t huge_mtx
;
15 /******************************************************************************/
17 /* Tree of chunks that are stand-alone huge allocations. */
18 static extent_tree_t huge
;
21 huge_malloc(size_t size
, bool zero
)
27 /* Allocate one or more contiguous chunks for this request. */
29 csize
= CHUNK_CEILING(size
);
31 /* size is large enough to cause size_t wrap-around. */
35 /* Allocate an extent node with which to track the chunk. */
36 node
= base_node_alloc();
40 ret
= chunk_alloc(csize
, false, &zero
);
42 base_node_dealloc(node
);
46 /* Insert node into huge. */
50 malloc_mutex_lock(&huge_mtx
);
51 extent_tree_ad_insert(&huge
, node
);
53 stats_cactive_add(csize
);
55 huge_allocated
+= csize
;
57 malloc_mutex_unlock(&huge_mtx
);
62 memset(ret
, 0xa5, csize
);
64 memset(ret
, 0, csize
);
71 /* Only handles large allocations that require more than chunk alignment. */
73 huge_palloc(size_t size
, size_t alignment
, bool zero
)
76 size_t alloc_size
, chunk_size
, offset
;
80 * This allocation requires alignment that is even larger than chunk
81 * alignment. This means that huge_malloc() isn't good enough.
83 * Allocate almost twice as many chunks as are demanded by the size or
84 * alignment, in order to assure the alignment can be achieved, then
85 * unmap leading and trailing chunks.
87 assert(alignment
> chunksize
);
89 chunk_size
= CHUNK_CEILING(size
);
91 if (size
>= alignment
)
92 alloc_size
= chunk_size
+ alignment
- chunksize
;
94 alloc_size
= (alignment
<< 1) - chunksize
;
96 /* Allocate an extent node with which to track the chunk. */
97 node
= base_node_alloc();
101 ret
= chunk_alloc(alloc_size
, false, &zero
);
103 base_node_dealloc(node
);
107 offset
= (uintptr_t)ret
& (alignment
- 1);
108 assert((offset
& chunksize_mask
) == 0);
109 assert(offset
< alloc_size
);
111 /* Trim trailing space. */
112 chunk_dealloc((void *)((uintptr_t)ret
+ chunk_size
), alloc_size
117 /* Trim leading space. */
118 chunk_dealloc(ret
, alignment
- offset
, true);
120 ret
= (void *)((uintptr_t)ret
+ (alignment
- offset
));
122 trailsize
= alloc_size
- (alignment
- offset
) - chunk_size
;
123 if (trailsize
!= 0) {
124 /* Trim trailing space. */
125 assert(trailsize
< alloc_size
);
126 chunk_dealloc((void *)((uintptr_t)ret
+ chunk_size
),
131 /* Insert node into huge. */
133 node
->size
= chunk_size
;
135 malloc_mutex_lock(&huge_mtx
);
136 extent_tree_ad_insert(&huge
, node
);
137 #ifdef JEMALLOC_STATS
138 stats_cactive_add(chunk_size
);
140 huge_allocated
+= chunk_size
;
142 malloc_mutex_unlock(&huge_mtx
);
147 memset(ret
, 0xa5, chunk_size
);
149 memset(ret
, 0, chunk_size
);
157 huge_ralloc_no_move(void *ptr
, size_t oldsize
, size_t size
, size_t extra
)
161 * Avoid moving the allocation if the size class can be left the same.
163 if (oldsize
> arena_maxclass
164 && CHUNK_CEILING(oldsize
) >= CHUNK_CEILING(size
)
165 && CHUNK_CEILING(oldsize
) <= CHUNK_CEILING(size
+extra
)) {
166 assert(CHUNK_CEILING(oldsize
) == oldsize
);
168 if (opt_junk
&& size
< oldsize
) {
169 memset((void *)((uintptr_t)ptr
+ size
), 0x5a,
176 /* Reallocation would require a move. */
181 huge_ralloc(void *ptr
, size_t oldsize
, size_t size
, size_t extra
,
182 size_t alignment
, bool zero
)
187 /* Try to avoid moving the allocation. */
188 ret
= huge_ralloc_no_move(ptr
, oldsize
, size
, extra
);
193 * size and oldsize are different enough that we need to use a
194 * different size class. In that case, fall back to allocating new
197 if (alignment
> chunksize
)
198 ret
= huge_palloc(size
+ extra
, alignment
, zero
);
200 ret
= huge_malloc(size
+ extra
, zero
);
205 /* Try again, this time without extra. */
206 if (alignment
> chunksize
)
207 ret
= huge_palloc(size
, alignment
, zero
);
209 ret
= huge_malloc(size
, zero
);
216 * Copy at most size bytes (not size+extra), since the caller has no
217 * expectation that the extra bytes will be reliably preserved.
219 copysize
= (size
< oldsize
) ? size
: oldsize
;
222 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
223 * source nor the destination are in swap or dss.
225 #ifdef JEMALLOC_MREMAP_FIXED
226 if (oldsize
>= chunksize
227 # ifdef JEMALLOC_SWAP
228 && (swap_enabled
== false || (chunk_in_swap(ptr
) == false &&
229 chunk_in_swap(ret
) == false))
232 && chunk_in_dss(ptr
) == false && chunk_in_dss(ret
) == false
235 size_t newsize
= huge_salloc(ret
);
238 * Remove ptr from the tree of huge allocations before
239 * performing the remap operation, in order to avoid the
240 * possibility of another thread acquiring that mapping before
241 * this one removes it from the tree.
243 huge_dalloc(ptr
, false);
244 if (mremap(ptr
, oldsize
, newsize
, MREMAP_MAYMOVE
|MREMAP_FIXED
,
245 ret
) == MAP_FAILED
) {
247 * Assuming no chunk management bugs in the allocator,
248 * the only documented way an error can occur here is
249 * if the application changed the map type for a
250 * portion of the old allocation. This is firmly in
251 * undefined behavior territory, so write a diagnostic
252 * message, and optionally abort.
254 char buf
[BUFERROR_BUF
];
256 buferror(errno
, buf
, sizeof(buf
));
257 malloc_write("<jemalloc>: Error in mremap(): ");
262 memcpy(ret
, ptr
, copysize
);
263 chunk_dealloc_mmap(ptr
, oldsize
);
268 memcpy(ret
, ptr
, copysize
);
275 huge_dalloc(void *ptr
, bool unmap
)
277 extent_node_t
*node
, key
;
279 malloc_mutex_lock(&huge_mtx
);
281 /* Extract from tree of huge allocations. */
283 node
= extent_tree_ad_search(&huge
, &key
);
284 assert(node
!= NULL
);
285 assert(node
->addr
== ptr
);
286 extent_tree_ad_remove(&huge
, node
);
288 #ifdef JEMALLOC_STATS
289 stats_cactive_sub(node
->size
);
291 huge_allocated
-= node
->size
;
294 malloc_mutex_unlock(&huge_mtx
);
299 #if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
301 memset(node
->addr
, 0x5a, node
->size
);
306 chunk_dealloc(node
->addr
, node
->size
, unmap
);
308 base_node_dealloc(node
);
312 huge_salloc(const void *ptr
)
315 extent_node_t
*node
, key
;
317 malloc_mutex_lock(&huge_mtx
);
319 /* Extract from tree of huge allocations. */
320 key
.addr
= __DECONST(void *, ptr
);
321 node
= extent_tree_ad_search(&huge
, &key
);
322 assert(node
!= NULL
);
326 malloc_mutex_unlock(&huge_mtx
);
333 huge_prof_ctx_get(const void *ptr
)
336 extent_node_t
*node
, key
;
338 malloc_mutex_lock(&huge_mtx
);
340 /* Extract from tree of huge allocations. */
341 key
.addr
= __DECONST(void *, ptr
);
342 node
= extent_tree_ad_search(&huge
, &key
);
343 assert(node
!= NULL
);
345 ret
= node
->prof_ctx
;
347 malloc_mutex_unlock(&huge_mtx
);
353 huge_prof_ctx_set(const void *ptr
, prof_ctx_t
*ctx
)
355 extent_node_t
*node
, key
;
357 malloc_mutex_lock(&huge_mtx
);
359 /* Extract from tree of huge allocations. */
360 key
.addr
= __DECONST(void *, ptr
);
361 node
= extent_tree_ad_search(&huge
, &key
);
362 assert(node
!= NULL
);
364 node
->prof_ctx
= ctx
;
366 malloc_mutex_unlock(&huge_mtx
);
374 /* Initialize chunks data. */
375 if (malloc_mutex_init(&huge_mtx
))
377 extent_tree_ad_new(&huge
);
379 #ifdef JEMALLOC_STATS