]>
git.saurik.com Git - redis.git/blob - deps/jemalloc/src/huge.c
1 #define JEMALLOC_HUGE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
11 malloc_mutex_t huge_mtx
;
13 /******************************************************************************/
15 /* Tree of chunks that are stand-alone huge allocations. */
16 static extent_tree_t huge
;
19 huge_malloc(size_t size
, bool zero
)
22 return (huge_palloc(size
, chunksize
, zero
));
26 huge_palloc(size_t size
, size_t alignment
, bool zero
)
33 /* Allocate one or more contiguous chunks for this request. */
35 csize
= CHUNK_CEILING(size
);
37 /* size is large enough to cause size_t wrap-around. */
41 /* Allocate an extent node with which to track the chunk. */
42 node
= base_node_alloc();
47 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
48 * it is possible to make correct junk/zero fill decisions below.
51 ret
= chunk_alloc(csize
, alignment
, false, &is_zeroed
,
52 chunk_dss_prec_get());
54 base_node_dealloc(node
);
58 /* Insert node into huge. */
62 malloc_mutex_lock(&huge_mtx
);
63 extent_tree_ad_insert(&huge
, node
);
65 stats_cactive_add(csize
);
67 huge_allocated
+= csize
;
69 malloc_mutex_unlock(&huge_mtx
);
71 if (config_fill
&& zero
== false) {
73 memset(ret
, 0xa5, csize
);
74 else if (opt_zero
&& is_zeroed
== false)
75 memset(ret
, 0, csize
);
82 huge_ralloc_no_move(void *ptr
, size_t oldsize
, size_t size
, size_t extra
)
86 * Avoid moving the allocation if the size class can be left the same.
88 if (oldsize
> arena_maxclass
89 && CHUNK_CEILING(oldsize
) >= CHUNK_CEILING(size
)
90 && CHUNK_CEILING(oldsize
) <= CHUNK_CEILING(size
+extra
)) {
91 assert(CHUNK_CEILING(oldsize
) == oldsize
);
92 if (config_fill
&& opt_junk
&& size
< oldsize
) {
93 memset((void *)((uintptr_t)ptr
+ size
), 0x5a,
99 /* Reallocation would require a move. */
104 huge_ralloc(void *ptr
, size_t oldsize
, size_t size
, size_t extra
,
105 size_t alignment
, bool zero
, bool try_tcache_dalloc
)
110 /* Try to avoid moving the allocation. */
111 ret
= huge_ralloc_no_move(ptr
, oldsize
, size
, extra
);
116 * size and oldsize are different enough that we need to use a
117 * different size class. In that case, fall back to allocating new
120 if (alignment
> chunksize
)
121 ret
= huge_palloc(size
+ extra
, alignment
, zero
);
123 ret
= huge_malloc(size
+ extra
, zero
);
128 /* Try again, this time without extra. */
129 if (alignment
> chunksize
)
130 ret
= huge_palloc(size
, alignment
, zero
);
132 ret
= huge_malloc(size
, zero
);
139 * Copy at most size bytes (not size+extra), since the caller has no
140 * expectation that the extra bytes will be reliably preserved.
142 copysize
= (size
< oldsize
) ? size
: oldsize
;
144 #ifdef JEMALLOC_MREMAP
146 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
147 * source nor the destination are in dss.
149 if (oldsize
>= chunksize
&& (config_dss
== false || (chunk_in_dss(ptr
)
150 == false && chunk_in_dss(ret
) == false))) {
151 size_t newsize
= huge_salloc(ret
);
154 * Remove ptr from the tree of huge allocations before
155 * performing the remap operation, in order to avoid the
156 * possibility of another thread acquiring that mapping before
157 * this one removes it from the tree.
159 huge_dalloc(ptr
, false);
160 if (mremap(ptr
, oldsize
, newsize
, MREMAP_MAYMOVE
|MREMAP_FIXED
,
161 ret
) == MAP_FAILED
) {
163 * Assuming no chunk management bugs in the allocator,
164 * the only documented way an error can occur here is
165 * if the application changed the map type for a
166 * portion of the old allocation. This is firmly in
167 * undefined behavior territory, so write a diagnostic
168 * message, and optionally abort.
170 char buf
[BUFERROR_BUF
];
172 buferror(buf
, sizeof(buf
));
173 malloc_printf("<jemalloc>: Error in mremap(): %s\n",
177 memcpy(ret
, ptr
, copysize
);
178 chunk_dealloc_mmap(ptr
, oldsize
);
183 memcpy(ret
, ptr
, copysize
);
184 iqallocx(ptr
, try_tcache_dalloc
);
190 huge_dalloc(void *ptr
, bool unmap
)
192 extent_node_t
*node
, key
;
194 malloc_mutex_lock(&huge_mtx
);
196 /* Extract from tree of huge allocations. */
198 node
= extent_tree_ad_search(&huge
, &key
);
199 assert(node
!= NULL
);
200 assert(node
->addr
== ptr
);
201 extent_tree_ad_remove(&huge
, node
);
204 stats_cactive_sub(node
->size
);
206 huge_allocated
-= node
->size
;
209 malloc_mutex_unlock(&huge_mtx
);
211 if (unmap
&& config_fill
&& config_dss
&& opt_junk
)
212 memset(node
->addr
, 0x5a, node
->size
);
214 chunk_dealloc(node
->addr
, node
->size
, unmap
);
216 base_node_dealloc(node
);
220 huge_salloc(const void *ptr
)
223 extent_node_t
*node
, key
;
225 malloc_mutex_lock(&huge_mtx
);
227 /* Extract from tree of huge allocations. */
228 key
.addr
= __DECONST(void *, ptr
);
229 node
= extent_tree_ad_search(&huge
, &key
);
230 assert(node
!= NULL
);
234 malloc_mutex_unlock(&huge_mtx
);
240 huge_prof_ctx_get(const void *ptr
)
243 extent_node_t
*node
, key
;
245 malloc_mutex_lock(&huge_mtx
);
247 /* Extract from tree of huge allocations. */
248 key
.addr
= __DECONST(void *, ptr
);
249 node
= extent_tree_ad_search(&huge
, &key
);
250 assert(node
!= NULL
);
252 ret
= node
->prof_ctx
;
254 malloc_mutex_unlock(&huge_mtx
);
260 huge_prof_ctx_set(const void *ptr
, prof_ctx_t
*ctx
)
262 extent_node_t
*node
, key
;
264 malloc_mutex_lock(&huge_mtx
);
266 /* Extract from tree of huge allocations. */
267 key
.addr
= __DECONST(void *, ptr
);
268 node
= extent_tree_ad_search(&huge
, &key
);
269 assert(node
!= NULL
);
271 node
->prof_ctx
= ctx
;
273 malloc_mutex_unlock(&huge_mtx
);
280 /* Initialize chunks data. */
281 if (malloc_mutex_init(&huge_mtx
))
283 extent_tree_ad_new(&huge
);
298 malloc_mutex_prefork(&huge_mtx
);
302 huge_postfork_parent(void)
305 malloc_mutex_postfork_parent(&huge_mtx
);
309 huge_postfork_child(void)
312 malloc_mutex_postfork_child(&huge_mtx
);