]>
git.saurik.com Git - redis.git/blob - deps/jemalloc/src/huge.c
1 #define JEMALLOC_HUGE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
11 malloc_mutex_t huge_mtx
;
13 /******************************************************************************/
15 /* Tree of chunks that are stand-alone huge allocations. */
16 static extent_tree_t huge
;
19 huge_malloc(size_t size
, bool zero
)
22 return (huge_palloc(size
, chunksize
, zero
));
26 huge_palloc(size_t size
, size_t alignment
, bool zero
)
33 /* Allocate one or more contiguous chunks for this request. */
35 csize
= CHUNK_CEILING(size
);
37 /* size is large enough to cause size_t wrap-around. */
41 /* Allocate an extent node with which to track the chunk. */
42 node
= base_node_alloc();
47 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
48 * it is possible to make correct junk/zero fill decisions below.
51 ret
= chunk_alloc(csize
, alignment
, false, &is_zeroed
);
53 base_node_dealloc(node
);
57 /* Insert node into huge. */
61 malloc_mutex_lock(&huge_mtx
);
62 extent_tree_ad_insert(&huge
, node
);
64 stats_cactive_add(csize
);
66 huge_allocated
+= csize
;
68 malloc_mutex_unlock(&huge_mtx
);
70 if (config_fill
&& zero
== false) {
72 memset(ret
, 0xa5, csize
);
73 else if (opt_zero
&& is_zeroed
== false)
74 memset(ret
, 0, csize
);
81 huge_ralloc_no_move(void *ptr
, size_t oldsize
, size_t size
, size_t extra
)
85 * Avoid moving the allocation if the size class can be left the same.
87 if (oldsize
> arena_maxclass
88 && CHUNK_CEILING(oldsize
) >= CHUNK_CEILING(size
)
89 && CHUNK_CEILING(oldsize
) <= CHUNK_CEILING(size
+extra
)) {
90 assert(CHUNK_CEILING(oldsize
) == oldsize
);
91 if (config_fill
&& opt_junk
&& size
< oldsize
) {
92 memset((void *)((uintptr_t)ptr
+ size
), 0x5a,
98 /* Reallocation would require a move. */
103 huge_ralloc(void *ptr
, size_t oldsize
, size_t size
, size_t extra
,
104 size_t alignment
, bool zero
)
109 /* Try to avoid moving the allocation. */
110 ret
= huge_ralloc_no_move(ptr
, oldsize
, size
, extra
);
115 * size and oldsize are different enough that we need to use a
116 * different size class. In that case, fall back to allocating new
119 if (alignment
> chunksize
)
120 ret
= huge_palloc(size
+ extra
, alignment
, zero
);
122 ret
= huge_malloc(size
+ extra
, zero
);
127 /* Try again, this time without extra. */
128 if (alignment
> chunksize
)
129 ret
= huge_palloc(size
, alignment
, zero
);
131 ret
= huge_malloc(size
, zero
);
138 * Copy at most size bytes (not size+extra), since the caller has no
139 * expectation that the extra bytes will be reliably preserved.
141 copysize
= (size
< oldsize
) ? size
: oldsize
;
143 #ifdef JEMALLOC_MREMAP
145 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
146 * source nor the destination are in dss.
148 if (oldsize
>= chunksize
&& (config_dss
== false || (chunk_in_dss(ptr
)
149 == false && chunk_in_dss(ret
) == false))) {
150 size_t newsize
= huge_salloc(ret
);
153 * Remove ptr from the tree of huge allocations before
154 * performing the remap operation, in order to avoid the
155 * possibility of another thread acquiring that mapping before
156 * this one removes it from the tree.
158 huge_dalloc(ptr
, false);
159 if (mremap(ptr
, oldsize
, newsize
, MREMAP_MAYMOVE
|MREMAP_FIXED
,
160 ret
) == MAP_FAILED
) {
162 * Assuming no chunk management bugs in the allocator,
163 * the only documented way an error can occur here is
164 * if the application changed the map type for a
165 * portion of the old allocation. This is firmly in
166 * undefined behavior territory, so write a diagnostic
167 * message, and optionally abort.
169 char buf
[BUFERROR_BUF
];
171 buferror(buf
, sizeof(buf
));
172 malloc_printf("<jemalloc>: Error in mremap(): %s\n",
176 memcpy(ret
, ptr
, copysize
);
177 chunk_dealloc_mmap(ptr
, oldsize
);
182 memcpy(ret
, ptr
, copysize
);
189 huge_dalloc(void *ptr
, bool unmap
)
191 extent_node_t
*node
, key
;
193 malloc_mutex_lock(&huge_mtx
);
195 /* Extract from tree of huge allocations. */
197 node
= extent_tree_ad_search(&huge
, &key
);
198 assert(node
!= NULL
);
199 assert(node
->addr
== ptr
);
200 extent_tree_ad_remove(&huge
, node
);
203 stats_cactive_sub(node
->size
);
205 huge_allocated
-= node
->size
;
208 malloc_mutex_unlock(&huge_mtx
);
210 if (unmap
&& config_fill
&& config_dss
&& opt_junk
)
211 memset(node
->addr
, 0x5a, node
->size
);
213 chunk_dealloc(node
->addr
, node
->size
, unmap
);
215 base_node_dealloc(node
);
219 huge_salloc(const void *ptr
)
222 extent_node_t
*node
, key
;
224 malloc_mutex_lock(&huge_mtx
);
226 /* Extract from tree of huge allocations. */
227 key
.addr
= __DECONST(void *, ptr
);
228 node
= extent_tree_ad_search(&huge
, &key
);
229 assert(node
!= NULL
);
233 malloc_mutex_unlock(&huge_mtx
);
239 huge_prof_ctx_get(const void *ptr
)
242 extent_node_t
*node
, key
;
244 malloc_mutex_lock(&huge_mtx
);
246 /* Extract from tree of huge allocations. */
247 key
.addr
= __DECONST(void *, ptr
);
248 node
= extent_tree_ad_search(&huge
, &key
);
249 assert(node
!= NULL
);
251 ret
= node
->prof_ctx
;
253 malloc_mutex_unlock(&huge_mtx
);
259 huge_prof_ctx_set(const void *ptr
, prof_ctx_t
*ctx
)
261 extent_node_t
*node
, key
;
263 malloc_mutex_lock(&huge_mtx
);
265 /* Extract from tree of huge allocations. */
266 key
.addr
= __DECONST(void *, ptr
);
267 node
= extent_tree_ad_search(&huge
, &key
);
268 assert(node
!= NULL
);
270 node
->prof_ctx
= ctx
;
272 malloc_mutex_unlock(&huge_mtx
);
279 /* Initialize chunks data. */
280 if (malloc_mutex_init(&huge_mtx
))
282 extent_tree_ad_new(&huge
);
297 malloc_mutex_prefork(&huge_mtx
);
301 huge_postfork_parent(void)
304 malloc_mutex_postfork_parent(&huge_mtx
);
308 huge_postfork_child(void)
311 malloc_mutex_postfork_child(&huge_mtx
);