]>
git.saurik.com Git - redis.git/blob - deps/jemalloc/src/huge.c
1 #define JEMALLOC_HUGE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
10 size_t huge_allocated
;
13 malloc_mutex_t huge_mtx
;
15 /******************************************************************************/
17 /* Tree of chunks that are stand-alone huge allocations. */
18 static extent_tree_t huge
;
21 huge_malloc(size_t size
, bool zero
)
27 /* Allocate one or more contiguous chunks for this request. */
29 csize
= CHUNK_CEILING(size
);
31 /* size is large enough to cause size_t wrap-around. */
35 /* Allocate an extent node with which to track the chunk. */
36 node
= base_node_alloc();
40 ret
= chunk_alloc(csize
, false, &zero
);
42 base_node_dealloc(node
);
46 /* Insert node into huge. */
50 malloc_mutex_lock(&huge_mtx
);
51 extent_tree_ad_insert(&huge
, node
);
53 stats_cactive_add(csize
);
55 huge_allocated
+= csize
;
57 malloc_mutex_unlock(&huge_mtx
);
62 memset(ret
, 0xa5, csize
);
64 memset(ret
, 0, csize
);
71 /* Only handles large allocations that require more than chunk alignment. */
73 huge_palloc(size_t size
, size_t alignment
, bool zero
)
76 size_t alloc_size
, chunk_size
, offset
;
80 * This allocation requires alignment that is even larger than chunk
81 * alignment. This means that huge_malloc() isn't good enough.
83 * Allocate almost twice as many chunks as are demanded by the size or
84 * alignment, in order to assure the alignment can be achieved, then
85 * unmap leading and trailing chunks.
87 assert(alignment
> chunksize
);
89 chunk_size
= CHUNK_CEILING(size
);
91 if (size
>= alignment
)
92 alloc_size
= chunk_size
+ alignment
- chunksize
;
94 alloc_size
= (alignment
<< 1) - chunksize
;
96 /* Allocate an extent node with which to track the chunk. */
97 node
= base_node_alloc();
101 ret
= chunk_alloc(alloc_size
, false, &zero
);
103 base_node_dealloc(node
);
107 offset
= (uintptr_t)ret
& (alignment
- 1);
108 assert((offset
& chunksize_mask
) == 0);
109 assert(offset
< alloc_size
);
111 /* Trim trailing space. */
112 chunk_dealloc((void *)((uintptr_t)ret
+ chunk_size
), alloc_size
117 /* Trim leading space. */
118 chunk_dealloc(ret
, alignment
- offset
);
120 ret
= (void *)((uintptr_t)ret
+ (alignment
- offset
));
122 trailsize
= alloc_size
- (alignment
- offset
) - chunk_size
;
123 if (trailsize
!= 0) {
124 /* Trim trailing space. */
125 assert(trailsize
< alloc_size
);
126 chunk_dealloc((void *)((uintptr_t)ret
+ chunk_size
),
131 /* Insert node into huge. */
133 node
->size
= chunk_size
;
135 malloc_mutex_lock(&huge_mtx
);
136 extent_tree_ad_insert(&huge
, node
);
137 #ifdef JEMALLOC_STATS
138 stats_cactive_add(chunk_size
);
140 huge_allocated
+= chunk_size
;
142 malloc_mutex_unlock(&huge_mtx
);
147 memset(ret
, 0xa5, chunk_size
);
149 memset(ret
, 0, chunk_size
);
157 huge_ralloc_no_move(void *ptr
, size_t oldsize
, size_t size
, size_t extra
)
161 * Avoid moving the allocation if the size class can be left the same.
163 if (oldsize
> arena_maxclass
164 && CHUNK_CEILING(oldsize
) >= CHUNK_CEILING(size
)
165 && CHUNK_CEILING(oldsize
) <= CHUNK_CEILING(size
+extra
)) {
166 assert(CHUNK_CEILING(oldsize
) == oldsize
);
168 if (opt_junk
&& size
< oldsize
) {
169 memset((void *)((uintptr_t)ptr
+ size
), 0x5a,
176 /* Reallocation would require a move. */
181 huge_ralloc(void *ptr
, size_t oldsize
, size_t size
, size_t extra
,
182 size_t alignment
, bool zero
)
187 /* Try to avoid moving the allocation. */
188 ret
= huge_ralloc_no_move(ptr
, oldsize
, size
, extra
);
193 * size and oldsize are different enough that we need to use a
194 * different size class. In that case, fall back to allocating new
197 if (alignment
> chunksize
)
198 ret
= huge_palloc(size
+ extra
, alignment
, zero
);
200 ret
= huge_malloc(size
+ extra
, zero
);
205 /* Try again, this time without extra. */
206 if (alignment
> chunksize
)
207 ret
= huge_palloc(size
, alignment
, zero
);
209 ret
= huge_malloc(size
, zero
);
216 * Copy at most size bytes (not size+extra), since the caller has no
217 * expectation that the extra bytes will be reliably preserved.
219 copysize
= (size
< oldsize
) ? size
: oldsize
;
222 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
223 * source nor the destination are in swap or dss.
225 #ifdef JEMALLOC_MREMAP_FIXED
226 if (oldsize
>= chunksize
227 # ifdef JEMALLOC_SWAP
228 && (swap_enabled
== false || (chunk_in_swap(ptr
) == false &&
229 chunk_in_swap(ret
) == false))
232 && chunk_in_dss(ptr
) == false && chunk_in_dss(ret
) == false
235 size_t newsize
= huge_salloc(ret
);
237 if (mremap(ptr
, oldsize
, newsize
, MREMAP_MAYMOVE
|MREMAP_FIXED
,
238 ret
) == MAP_FAILED
) {
240 * Assuming no chunk management bugs in the allocator,
241 * the only documented way an error can occur here is
242 * if the application changed the map type for a
243 * portion of the old allocation. This is firmly in
244 * undefined behavior territory, so write a diagnostic
245 * message, and optionally abort.
247 char buf
[BUFERROR_BUF
];
249 buferror(errno
, buf
, sizeof(buf
));
250 malloc_write("<jemalloc>: Error in mremap(): ");
255 memcpy(ret
, ptr
, copysize
);
258 huge_dalloc(ptr
, false);
262 memcpy(ret
, ptr
, copysize
);
269 huge_dalloc(void *ptr
, bool unmap
)
271 extent_node_t
*node
, key
;
273 malloc_mutex_lock(&huge_mtx
);
275 /* Extract from tree of huge allocations. */
277 node
= extent_tree_ad_search(&huge
, &key
);
278 assert(node
!= NULL
);
279 assert(node
->addr
== ptr
);
280 extent_tree_ad_remove(&huge
, node
);
282 #ifdef JEMALLOC_STATS
283 stats_cactive_sub(node
->size
);
285 huge_allocated
-= node
->size
;
288 malloc_mutex_unlock(&huge_mtx
);
293 #if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
295 memset(node
->addr
, 0x5a, node
->size
);
298 chunk_dealloc(node
->addr
, node
->size
);
301 base_node_dealloc(node
);
305 huge_salloc(const void *ptr
)
308 extent_node_t
*node
, key
;
310 malloc_mutex_lock(&huge_mtx
);
312 /* Extract from tree of huge allocations. */
313 key
.addr
= __DECONST(void *, ptr
);
314 node
= extent_tree_ad_search(&huge
, &key
);
315 assert(node
!= NULL
);
319 malloc_mutex_unlock(&huge_mtx
);
326 huge_prof_ctx_get(const void *ptr
)
329 extent_node_t
*node
, key
;
331 malloc_mutex_lock(&huge_mtx
);
333 /* Extract from tree of huge allocations. */
334 key
.addr
= __DECONST(void *, ptr
);
335 node
= extent_tree_ad_search(&huge
, &key
);
336 assert(node
!= NULL
);
338 ret
= node
->prof_ctx
;
340 malloc_mutex_unlock(&huge_mtx
);
346 huge_prof_ctx_set(const void *ptr
, prof_ctx_t
*ctx
)
348 extent_node_t
*node
, key
;
350 malloc_mutex_lock(&huge_mtx
);
352 /* Extract from tree of huge allocations. */
353 key
.addr
= __DECONST(void *, ptr
);
354 node
= extent_tree_ad_search(&huge
, &key
);
355 assert(node
!= NULL
);
357 node
->prof_ctx
= ctx
;
359 malloc_mutex_unlock(&huge_mtx
);
367 /* Initialize chunks data. */
368 if (malloc_mutex_init(&huge_mtx
))
370 extent_tree_ad_new(&huge
);
372 #ifdef JEMALLOC_STATS