1 #define JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 ssize_t opt_lg_dirty_mult
= LG_DIRTY_MULT_DEFAULT
;
8 arena_bin_info_t arena_bin_info
[NBINS
];
10 JEMALLOC_ALIGNED(CACHELINE
)
11 const uint8_t small_size2bin
[] = {
13 #define S2B_16(i) S2B_8(i) S2B_8(i)
14 #define S2B_32(i) S2B_16(i) S2B_16(i)
15 #define S2B_64(i) S2B_32(i) S2B_32(i)
16 #define S2B_128(i) S2B_64(i) S2B_64(i)
17 #define S2B_256(i) S2B_128(i) S2B_128(i)
18 #define S2B_512(i) S2B_256(i) S2B_256(i)
19 #define S2B_1024(i) S2B_512(i) S2B_512(i)
20 #define S2B_2048(i) S2B_1024(i) S2B_1024(i)
21 #define S2B_4096(i) S2B_2048(i) S2B_2048(i)
22 #define S2B_8192(i) S2B_4096(i) S2B_4096(i)
23 #define SIZE_CLASS(bin, delta, size) \
40 /******************************************************************************/
41 /* Function prototypes for non-inline static functions. */
43 static void arena_run_split(arena_t
*arena
, arena_run_t
*run
, size_t size
,
44 bool large
, size_t binind
, bool zero
);
45 static arena_chunk_t
*arena_chunk_alloc(arena_t
*arena
);
46 static void arena_chunk_dealloc(arena_t
*arena
, arena_chunk_t
*chunk
);
47 static arena_run_t
*arena_run_alloc_helper(arena_t
*arena
, size_t size
,
48 bool large
, size_t binind
, bool zero
);
49 static arena_run_t
*arena_run_alloc(arena_t
*arena
, size_t size
, bool large
,
50 size_t binind
, bool zero
);
51 static void arena_purge(arena_t
*arena
, bool all
);
52 static void arena_run_dalloc(arena_t
*arena
, arena_run_t
*run
, bool dirty
);
53 static void arena_run_trim_head(arena_t
*arena
, arena_chunk_t
*chunk
,
54 arena_run_t
*run
, size_t oldsize
, size_t newsize
);
55 static void arena_run_trim_tail(arena_t
*arena
, arena_chunk_t
*chunk
,
56 arena_run_t
*run
, size_t oldsize
, size_t newsize
, bool dirty
);
57 static arena_run_t
*arena_bin_runs_first(arena_bin_t
*bin
);
58 static void arena_bin_runs_insert(arena_bin_t
*bin
, arena_run_t
*run
);
59 static void arena_bin_runs_remove(arena_bin_t
*bin
, arena_run_t
*run
);
60 static arena_run_t
*arena_bin_nonfull_run_tryget(arena_bin_t
*bin
);
61 static arena_run_t
*arena_bin_nonfull_run_get(arena_t
*arena
, arena_bin_t
*bin
);
62 static void *arena_bin_malloc_hard(arena_t
*arena
, arena_bin_t
*bin
);
63 static void arena_dissociate_bin_run(arena_chunk_t
*chunk
, arena_run_t
*run
,
65 static void arena_dalloc_bin_run(arena_t
*arena
, arena_chunk_t
*chunk
,
66 arena_run_t
*run
, arena_bin_t
*bin
);
67 static void arena_bin_lower_run(arena_t
*arena
, arena_chunk_t
*chunk
,
68 arena_run_t
*run
, arena_bin_t
*bin
);
69 static void arena_ralloc_large_shrink(arena_t
*arena
, arena_chunk_t
*chunk
,
70 void *ptr
, size_t oldsize
, size_t size
);
71 static bool arena_ralloc_large_grow(arena_t
*arena
, arena_chunk_t
*chunk
,
72 void *ptr
, size_t oldsize
, size_t size
, size_t extra
, bool zero
);
73 static bool arena_ralloc_large(void *ptr
, size_t oldsize
, size_t size
,
74 size_t extra
, bool zero
);
75 static size_t bin_info_run_size_calc(arena_bin_info_t
*bin_info
,
77 static void bin_info_init(void);
79 /******************************************************************************/
82 arena_run_comp(arena_chunk_map_t
*a
, arena_chunk_map_t
*b
)
84 uintptr_t a_mapelm
= (uintptr_t)a
;
85 uintptr_t b_mapelm
= (uintptr_t)b
;
90 return ((a_mapelm
> b_mapelm
) - (a_mapelm
< b_mapelm
));
93 /* Generate red-black tree functions. */
94 rb_gen(static UNUSED
, arena_run_tree_
, arena_run_tree_t
, arena_chunk_map_t
,
95 u
.rb_link
, arena_run_comp
)
98 arena_avail_comp(arena_chunk_map_t
*a
, arena_chunk_map_t
*b
)
101 size_t a_size
= a
->bits
& ~PAGE_MASK
;
102 size_t b_size
= b
->bits
& ~PAGE_MASK
;
104 assert((a
->bits
& CHUNK_MAP_KEY
) == CHUNK_MAP_KEY
|| (a
->bits
&
105 CHUNK_MAP_DIRTY
) == (b
->bits
& CHUNK_MAP_DIRTY
));
107 ret
= (a_size
> b_size
) - (a_size
< b_size
);
109 uintptr_t a_mapelm
, b_mapelm
;
111 if ((a
->bits
& CHUNK_MAP_KEY
) != CHUNK_MAP_KEY
)
112 a_mapelm
= (uintptr_t)a
;
115 * Treat keys as though they are lower than anything
120 b_mapelm
= (uintptr_t)b
;
122 ret
= (a_mapelm
> b_mapelm
) - (a_mapelm
< b_mapelm
);
128 /* Generate red-black tree functions. */
129 rb_gen(static UNUSED
, arena_avail_tree_
, arena_avail_tree_t
, arena_chunk_map_t
,
130 u
.rb_link
, arena_avail_comp
)
133 arena_run_reg_alloc(arena_run_t
*run
, arena_bin_info_t
*bin_info
)
137 bitmap_t
*bitmap
= (bitmap_t
*)((uintptr_t)run
+
138 (uintptr_t)bin_info
->bitmap_offset
);
140 assert(run
->nfree
> 0);
141 assert(bitmap_full(bitmap
, &bin_info
->bitmap_info
) == false);
143 regind
= bitmap_sfu(bitmap
, &bin_info
->bitmap_info
);
144 ret
= (void *)((uintptr_t)run
+ (uintptr_t)bin_info
->reg0_offset
+
145 (uintptr_t)(bin_info
->reg_interval
* regind
));
147 if (regind
== run
->nextind
)
149 assert(regind
< run
->nextind
);
154 arena_run_reg_dalloc(arena_run_t
*run
, void *ptr
)
156 arena_chunk_t
*chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
157 size_t pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> LG_PAGE
;
158 size_t mapbits
= arena_mapbits_get(chunk
, pageind
);
159 size_t binind
= arena_ptr_small_binind_get(ptr
, mapbits
);
160 arena_bin_info_t
*bin_info
= &arena_bin_info
[binind
];
161 unsigned regind
= arena_run_regind(run
, bin_info
, ptr
);
162 bitmap_t
*bitmap
= (bitmap_t
*)((uintptr_t)run
+
163 (uintptr_t)bin_info
->bitmap_offset
);
165 assert(run
->nfree
< bin_info
->nregs
);
166 /* Freeing an interior pointer can cause assertion failure. */
167 assert(((uintptr_t)ptr
- ((uintptr_t)run
+
168 (uintptr_t)bin_info
->reg0_offset
)) %
169 (uintptr_t)bin_info
->reg_interval
== 0);
170 assert((uintptr_t)ptr
>= (uintptr_t)run
+
171 (uintptr_t)bin_info
->reg0_offset
);
172 /* Freeing an unallocated pointer can cause assertion failure. */
173 assert(bitmap_get(bitmap
, &bin_info
->bitmap_info
, regind
));
175 bitmap_unset(bitmap
, &bin_info
->bitmap_info
, regind
);
180 arena_chunk_validate_zeroed(arena_chunk_t
*chunk
, size_t run_ind
)
183 UNUSED
size_t *p
= (size_t *)((uintptr_t)chunk
+ (run_ind
<< LG_PAGE
));
185 for (i
= 0; i
< PAGE
/ sizeof(size_t); i
++)
190 arena_run_split(arena_t
*arena
, arena_run_t
*run
, size_t size
, bool large
,
191 size_t binind
, bool zero
)
193 arena_chunk_t
*chunk
;
194 size_t run_ind
, total_pages
, need_pages
, rem_pages
, i
;
196 arena_avail_tree_t
*runs_avail
;
198 assert((large
&& binind
== BININD_INVALID
) || (large
== false && binind
201 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
202 run_ind
= (unsigned)(((uintptr_t)run
- (uintptr_t)chunk
) >> LG_PAGE
);
203 flag_dirty
= arena_mapbits_dirty_get(chunk
, run_ind
);
204 runs_avail
= (flag_dirty
!= 0) ? &arena
->runs_avail_dirty
:
205 &arena
->runs_avail_clean
;
206 total_pages
= arena_mapbits_unallocated_size_get(chunk
, run_ind
) >>
208 assert(arena_mapbits_dirty_get(chunk
, run_ind
+total_pages
-1) ==
210 need_pages
= (size
>> LG_PAGE
);
211 assert(need_pages
> 0);
212 assert(need_pages
<= total_pages
);
213 rem_pages
= total_pages
- need_pages
;
215 arena_avail_tree_remove(runs_avail
, arena_mapp_get(chunk
, run_ind
));
218 * Update stats_cactive if nactive is crossing a chunk
221 size_t cactive_diff
= CHUNK_CEILING((arena
->nactive
+
222 need_pages
) << LG_PAGE
) - CHUNK_CEILING(arena
->nactive
<<
224 if (cactive_diff
!= 0)
225 stats_cactive_add(cactive_diff
);
227 arena
->nactive
+= need_pages
;
229 /* Keep track of trailing unused pages for later use. */
231 if (flag_dirty
!= 0) {
232 arena_mapbits_unallocated_set(chunk
, run_ind
+need_pages
,
233 (rem_pages
<< LG_PAGE
), CHUNK_MAP_DIRTY
);
234 arena_mapbits_unallocated_set(chunk
,
235 run_ind
+total_pages
-1, (rem_pages
<< LG_PAGE
),
238 arena_mapbits_unallocated_set(chunk
, run_ind
+need_pages
,
239 (rem_pages
<< LG_PAGE
),
240 arena_mapbits_unzeroed_get(chunk
,
241 run_ind
+need_pages
));
242 arena_mapbits_unallocated_set(chunk
,
243 run_ind
+total_pages
-1, (rem_pages
<< LG_PAGE
),
244 arena_mapbits_unzeroed_get(chunk
,
245 run_ind
+total_pages
-1));
247 arena_avail_tree_insert(runs_avail
, arena_mapp_get(chunk
,
248 run_ind
+need_pages
));
251 /* Update dirty page accounting. */
252 if (flag_dirty
!= 0) {
253 chunk
->ndirty
-= need_pages
;
254 arena
->ndirty
-= need_pages
;
258 * Update the page map separately for large vs. small runs, since it is
259 * possible to avoid iteration for large mallocs.
263 if (flag_dirty
== 0) {
265 * The run is clean, so some pages may be
266 * zeroed (i.e. never before touched).
268 for (i
= 0; i
< need_pages
; i
++) {
269 if (arena_mapbits_unzeroed_get(chunk
,
271 VALGRIND_MAKE_MEM_UNDEFINED(
273 chunk
+ ((run_ind
+i
) <<
275 memset((void *)((uintptr_t)
276 chunk
+ ((run_ind
+i
) <<
278 } else if (config_debug
) {
279 VALGRIND_MAKE_MEM_DEFINED(
281 chunk
+ ((run_ind
+i
) <<
283 arena_chunk_validate_zeroed(
289 * The run is dirty, so all pages must be
292 VALGRIND_MAKE_MEM_UNDEFINED((void
293 *)((uintptr_t)chunk
+ (run_ind
<<
294 LG_PAGE
)), (need_pages
<< LG_PAGE
));
295 memset((void *)((uintptr_t)chunk
+ (run_ind
<<
296 LG_PAGE
)), 0, (need_pages
<< LG_PAGE
));
301 * Set the last element first, in case the run only contains one
302 * page (i.e. both statements set the same element).
304 arena_mapbits_large_set(chunk
, run_ind
+need_pages
-1, 0,
306 arena_mapbits_large_set(chunk
, run_ind
, size
, flag_dirty
);
308 assert(zero
== false);
310 * Propagate the dirty and unzeroed flags to the allocated
311 * small run, so that arena_dalloc_bin_run() has the ability to
312 * conditionally trim clean pages.
314 arena_mapbits_small_set(chunk
, run_ind
, 0, binind
, flag_dirty
);
316 * The first page will always be dirtied during small run
317 * initialization, so a validation failure here would not
318 * actually cause an observable failure.
320 if (config_debug
&& flag_dirty
== 0 &&
321 arena_mapbits_unzeroed_get(chunk
, run_ind
) == 0)
322 arena_chunk_validate_zeroed(chunk
, run_ind
);
323 for (i
= 1; i
< need_pages
- 1; i
++) {
324 arena_mapbits_small_set(chunk
, run_ind
+i
, i
, binind
, 0);
325 if (config_debug
&& flag_dirty
== 0 &&
326 arena_mapbits_unzeroed_get(chunk
, run_ind
+i
) == 0)
327 arena_chunk_validate_zeroed(chunk
, run_ind
+i
);
329 arena_mapbits_small_set(chunk
, run_ind
+need_pages
-1,
330 need_pages
-1, binind
, flag_dirty
);
331 if (config_debug
&& flag_dirty
== 0 &&
332 arena_mapbits_unzeroed_get(chunk
, run_ind
+need_pages
-1) ==
334 arena_chunk_validate_zeroed(chunk
,
335 run_ind
+need_pages
-1);
340 static arena_chunk_t
*
341 arena_chunk_alloc(arena_t
*arena
)
343 arena_chunk_t
*chunk
;
346 if (arena
->spare
!= NULL
) {
347 arena_avail_tree_t
*runs_avail
;
349 chunk
= arena
->spare
;
352 assert(arena_mapbits_allocated_get(chunk
, map_bias
) == 0);
353 assert(arena_mapbits_allocated_get(chunk
, chunk_npages
-1) == 0);
354 assert(arena_mapbits_unallocated_size_get(chunk
, map_bias
) ==
356 assert(arena_mapbits_unallocated_size_get(chunk
,
357 chunk_npages
-1) == arena_maxclass
);
358 assert(arena_mapbits_dirty_get(chunk
, map_bias
) ==
359 arena_mapbits_dirty_get(chunk
, chunk_npages
-1));
361 /* Insert the run into the appropriate runs_avail_* tree. */
362 if (arena_mapbits_dirty_get(chunk
, map_bias
) == 0)
363 runs_avail
= &arena
->runs_avail_clean
;
365 runs_avail
= &arena
->runs_avail_dirty
;
366 arena_avail_tree_insert(runs_avail
, arena_mapp_get(chunk
,
373 malloc_mutex_unlock(&arena
->lock
);
374 chunk
= (arena_chunk_t
*)chunk_alloc(chunksize
, chunksize
,
376 malloc_mutex_lock(&arena
->lock
);
380 arena
->stats
.mapped
+= chunksize
;
382 chunk
->arena
= arena
;
383 ql_elm_new(chunk
, link_dirty
);
384 chunk
->dirtied
= false;
387 * Claim that no pages are in use, since the header is merely
393 * Initialize the map to contain one maximal free untouched run.
394 * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
397 unzeroed
= zero
? 0 : CHUNK_MAP_UNZEROED
;
398 arena_mapbits_unallocated_set(chunk
, map_bias
, arena_maxclass
,
401 * There is no need to initialize the internal page map entries
402 * unless the chunk is not zeroed.
405 for (i
= map_bias
+1; i
< chunk_npages
-1; i
++)
406 arena_mapbits_unzeroed_set(chunk
, i
, unzeroed
);
407 } else if (config_debug
) {
408 for (i
= map_bias
+1; i
< chunk_npages
-1; i
++) {
409 assert(arena_mapbits_unzeroed_get(chunk
, i
) ==
413 arena_mapbits_unallocated_set(chunk
, chunk_npages
-1,
414 arena_maxclass
, unzeroed
);
416 /* Insert the run into the runs_avail_clean tree. */
417 arena_avail_tree_insert(&arena
->runs_avail_clean
,
418 arena_mapp_get(chunk
, map_bias
));
425 arena_chunk_dealloc(arena_t
*arena
, arena_chunk_t
*chunk
)
427 arena_avail_tree_t
*runs_avail
;
429 assert(arena_mapbits_allocated_get(chunk
, map_bias
) == 0);
430 assert(arena_mapbits_allocated_get(chunk
, chunk_npages
-1) == 0);
431 assert(arena_mapbits_unallocated_size_get(chunk
, map_bias
) ==
433 assert(arena_mapbits_unallocated_size_get(chunk
, chunk_npages
-1) ==
435 assert(arena_mapbits_dirty_get(chunk
, map_bias
) ==
436 arena_mapbits_dirty_get(chunk
, chunk_npages
-1));
439 * Remove run from the appropriate runs_avail_* tree, so that the arena
442 if (arena_mapbits_dirty_get(chunk
, map_bias
) == 0)
443 runs_avail
= &arena
->runs_avail_clean
;
445 runs_avail
= &arena
->runs_avail_dirty
;
446 arena_avail_tree_remove(runs_avail
, arena_mapp_get(chunk
, map_bias
));
448 if (arena
->spare
!= NULL
) {
449 arena_chunk_t
*spare
= arena
->spare
;
451 arena
->spare
= chunk
;
452 if (spare
->dirtied
) {
453 ql_remove(&chunk
->arena
->chunks_dirty
, spare
,
455 arena
->ndirty
-= spare
->ndirty
;
457 malloc_mutex_unlock(&arena
->lock
);
458 chunk_dealloc((void *)spare
, chunksize
, true);
459 malloc_mutex_lock(&arena
->lock
);
461 arena
->stats
.mapped
-= chunksize
;
463 arena
->spare
= chunk
;
467 arena_run_alloc_helper(arena_t
*arena
, size_t size
, bool large
, size_t binind
,
471 arena_chunk_map_t
*mapelm
, key
;
473 key
.bits
= size
| CHUNK_MAP_KEY
;
474 mapelm
= arena_avail_tree_nsearch(&arena
->runs_avail_dirty
, &key
);
475 if (mapelm
!= NULL
) {
476 arena_chunk_t
*run_chunk
= CHUNK_ADDR2BASE(mapelm
);
477 size_t pageind
= (((uintptr_t)mapelm
-
478 (uintptr_t)run_chunk
->map
) / sizeof(arena_chunk_map_t
))
481 run
= (arena_run_t
*)((uintptr_t)run_chunk
+ (pageind
<<
483 arena_run_split(arena
, run
, size
, large
, binind
, zero
);
486 mapelm
= arena_avail_tree_nsearch(&arena
->runs_avail_clean
, &key
);
487 if (mapelm
!= NULL
) {
488 arena_chunk_t
*run_chunk
= CHUNK_ADDR2BASE(mapelm
);
489 size_t pageind
= (((uintptr_t)mapelm
-
490 (uintptr_t)run_chunk
->map
) / sizeof(arena_chunk_map_t
))
493 run
= (arena_run_t
*)((uintptr_t)run_chunk
+ (pageind
<<
495 arena_run_split(arena
, run
, size
, large
, binind
, zero
);
503 arena_run_alloc(arena_t
*arena
, size_t size
, bool large
, size_t binind
,
506 arena_chunk_t
*chunk
;
509 assert(size
<= arena_maxclass
);
510 assert((size
& PAGE_MASK
) == 0);
511 assert((large
&& binind
== BININD_INVALID
) || (large
== false && binind
514 /* Search the arena's chunks for the lowest best fit. */
515 run
= arena_run_alloc_helper(arena
, size
, large
, binind
, zero
);
520 * No usable runs. Create a new chunk from which to allocate the run.
522 chunk
= arena_chunk_alloc(arena
);
524 run
= (arena_run_t
*)((uintptr_t)chunk
+ (map_bias
<< LG_PAGE
));
525 arena_run_split(arena
, run
, size
, large
, binind
, zero
);
530 * arena_chunk_alloc() failed, but another thread may have made
531 * sufficient memory available while this one dropped arena->lock in
532 * arena_chunk_alloc(), so search one more time.
534 return (arena_run_alloc_helper(arena
, size
, large
, binind
, zero
));
538 arena_maybe_purge(arena_t
*arena
)
541 /* Enforce opt_lg_dirty_mult. */
542 if (opt_lg_dirty_mult
>= 0 && arena
->ndirty
> arena
->npurgatory
&&
543 (arena
->ndirty
- arena
->npurgatory
) > chunk_npages
&&
544 (arena
->nactive
>> opt_lg_dirty_mult
) < (arena
->ndirty
-
546 arena_purge(arena
, false);
550 arena_chunk_purge(arena_t
*arena
, arena_chunk_t
*chunk
)
552 ql_head(arena_chunk_map_t
) mapelms
;
553 arena_chunk_map_t
*mapelm
;
554 size_t pageind
, flag_unzeroed
;
561 #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
563 * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
564 * mappings, but not for file-backed mappings.
573 * If chunk is the spare, temporarily re-allocate it, 1) so that its
574 * run is reinserted into runs_avail_dirty, and 2) so that it cannot be
575 * completely discarded by another thread while arena->lock is dropped
576 * by this thread. Note that the arena_run_dalloc() call will
577 * implicitly deallocate the chunk, so no explicit action is required
578 * in this function to deallocate the chunk.
580 * Note that once a chunk contains dirty pages, it cannot again contain
581 * a single run unless 1) it is a dirty run, or 2) this function purges
582 * dirty pages and causes the transition to a single clean run. Thus
583 * (chunk == arena->spare) is possible, but it is not possible for
584 * this function to be called on the spare unless it contains a dirty
587 if (chunk
== arena
->spare
) {
588 assert(arena_mapbits_dirty_get(chunk
, map_bias
) != 0);
589 assert(arena_mapbits_dirty_get(chunk
, chunk_npages
-1) != 0);
591 arena_chunk_alloc(arena
);
594 /* Temporarily allocate all free dirty runs within chunk. */
595 for (pageind
= map_bias
; pageind
< chunk_npages
;) {
596 mapelm
= arena_mapp_get(chunk
, pageind
);
597 if (arena_mapbits_allocated_get(chunk
, pageind
) == 0) {
600 npages
= arena_mapbits_unallocated_size_get(chunk
,
602 assert(pageind
+ npages
<= chunk_npages
);
603 assert(arena_mapbits_dirty_get(chunk
, pageind
) ==
604 arena_mapbits_dirty_get(chunk
, pageind
+npages
-1));
605 if (arena_mapbits_dirty_get(chunk
, pageind
) != 0) {
608 arena_avail_tree_remove(
609 &arena
->runs_avail_dirty
, mapelm
);
611 arena_mapbits_unzeroed_set(chunk
, pageind
,
613 arena_mapbits_large_set(chunk
, pageind
,
614 (npages
<< LG_PAGE
), 0);
616 * Update internal elements in the page map, so
617 * that CHUNK_MAP_UNZEROED is properly set.
619 for (i
= 1; i
< npages
- 1; i
++) {
620 arena_mapbits_unzeroed_set(chunk
,
621 pageind
+i
, flag_unzeroed
);
624 arena_mapbits_unzeroed_set(chunk
,
625 pageind
+npages
-1, flag_unzeroed
);
626 arena_mapbits_large_set(chunk
,
627 pageind
+npages
-1, 0, 0);
632 * Update stats_cactive if nactive is
633 * crossing a chunk multiple.
635 size_t cactive_diff
=
636 CHUNK_CEILING((arena
->nactive
+
637 npages
) << LG_PAGE
) -
638 CHUNK_CEILING(arena
->nactive
<<
640 if (cactive_diff
!= 0)
641 stats_cactive_add(cactive_diff
);
643 arena
->nactive
+= npages
;
644 /* Append to list for later processing. */
645 ql_elm_new(mapelm
, u
.ql_link
);
646 ql_tail_insert(&mapelms
, mapelm
, u
.ql_link
);
651 /* Skip allocated run. */
652 if (arena_mapbits_large_get(chunk
, pageind
))
653 pageind
+= arena_mapbits_large_size_get(chunk
,
657 arena_bin_info_t
*bin_info
;
658 arena_run_t
*run
= (arena_run_t
*)((uintptr_t)
659 chunk
+ (uintptr_t)(pageind
<< LG_PAGE
));
661 assert(arena_mapbits_small_runind_get(chunk
,
663 binind
= arena_bin_index(arena
, run
->bin
);
664 bin_info
= &arena_bin_info
[binind
];
665 pageind
+= bin_info
->run_size
>> LG_PAGE
;
669 assert(pageind
== chunk_npages
);
672 ndirty
= chunk
->ndirty
;
674 arena
->stats
.purged
+= chunk
->ndirty
;
675 arena
->ndirty
-= chunk
->ndirty
;
677 ql_remove(&arena
->chunks_dirty
, chunk
, link_dirty
);
678 chunk
->dirtied
= false;
680 malloc_mutex_unlock(&arena
->lock
);
683 ql_foreach(mapelm
, &mapelms
, u
.ql_link
) {
684 size_t pageind
= (((uintptr_t)mapelm
- (uintptr_t)chunk
->map
) /
685 sizeof(arena_chunk_map_t
)) + map_bias
;
686 size_t npages
= arena_mapbits_large_size_get(chunk
, pageind
) >>
689 assert(pageind
+ npages
<= chunk_npages
);
690 assert(ndirty
>= npages
);
694 pages_purge((void *)((uintptr_t)chunk
+ (pageind
<< LG_PAGE
)),
695 (npages
<< LG_PAGE
));
700 malloc_mutex_lock(&arena
->lock
);
702 arena
->stats
.nmadvise
+= nmadvise
;
704 /* Deallocate runs. */
705 for (mapelm
= ql_first(&mapelms
); mapelm
!= NULL
;
706 mapelm
= ql_first(&mapelms
)) {
707 size_t pageind
= (((uintptr_t)mapelm
- (uintptr_t)chunk
->map
) /
708 sizeof(arena_chunk_map_t
)) + map_bias
;
709 arena_run_t
*run
= (arena_run_t
*)((uintptr_t)chunk
+
710 (uintptr_t)(pageind
<< LG_PAGE
));
712 ql_remove(&mapelms
, mapelm
, u
.ql_link
);
713 arena_run_dalloc(arena
, run
, false);
718 arena_purge(arena_t
*arena
, bool all
)
720 arena_chunk_t
*chunk
;
725 ql_foreach(chunk
, &arena
->chunks_dirty
, link_dirty
) {
726 assert(chunk
->dirtied
);
727 ndirty
+= chunk
->ndirty
;
729 assert(ndirty
== arena
->ndirty
);
731 assert(arena
->ndirty
> arena
->npurgatory
|| all
);
732 assert(arena
->ndirty
- arena
->npurgatory
> chunk_npages
|| all
);
733 assert((arena
->nactive
>> opt_lg_dirty_mult
) < (arena
->ndirty
-
734 arena
->npurgatory
) || all
);
737 arena
->stats
.npurge
++;
740 * Compute the minimum number of pages that this thread should try to
741 * purge, and add the result to arena->npurgatory. This will keep
742 * multiple threads from racing to reduce ndirty below the threshold.
744 npurgatory
= arena
->ndirty
- arena
->npurgatory
;
746 assert(npurgatory
>= arena
->nactive
>> opt_lg_dirty_mult
);
747 npurgatory
-= arena
->nactive
>> opt_lg_dirty_mult
;
749 arena
->npurgatory
+= npurgatory
;
751 while (npurgatory
> 0) {
752 /* Get next chunk with dirty pages. */
753 chunk
= ql_first(&arena
->chunks_dirty
);
756 * This thread was unable to purge as many pages as
757 * originally intended, due to races with other threads
758 * that either did some of the purging work, or re-used
761 arena
->npurgatory
-= npurgatory
;
764 while (chunk
->ndirty
== 0) {
765 ql_remove(&arena
->chunks_dirty
, chunk
, link_dirty
);
766 chunk
->dirtied
= false;
767 chunk
= ql_first(&arena
->chunks_dirty
);
769 /* Same logic as for above. */
770 arena
->npurgatory
-= npurgatory
;
775 if (chunk
->ndirty
> npurgatory
) {
777 * This thread will, at a minimum, purge all the dirty
778 * pages in chunk, so set npurgatory to reflect this
779 * thread's commitment to purge the pages. This tends
780 * to reduce the chances of the following scenario:
782 * 1) This thread sets arena->npurgatory such that
783 * (arena->ndirty - arena->npurgatory) is at the
785 * 2) This thread drops arena->lock.
786 * 3) Another thread causes one or more pages to be
787 * dirtied, and immediately determines that it must
790 * If this scenario *does* play out, that's okay,
791 * because all of the purging work being done really
794 arena
->npurgatory
+= chunk
->ndirty
- npurgatory
;
795 npurgatory
= chunk
->ndirty
;
798 arena
->npurgatory
-= chunk
->ndirty
;
799 npurgatory
-= chunk
->ndirty
;
800 arena_chunk_purge(arena
, chunk
);
805 arena_purge_all(arena_t
*arena
)
808 malloc_mutex_lock(&arena
->lock
);
809 arena_purge(arena
, true);
810 malloc_mutex_unlock(&arena
->lock
);
814 arena_run_dalloc(arena_t
*arena
, arena_run_t
*run
, bool dirty
)
816 arena_chunk_t
*chunk
;
817 size_t size
, run_ind
, run_pages
, flag_dirty
;
818 arena_avail_tree_t
*runs_avail
;
820 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
821 run_ind
= (size_t)(((uintptr_t)run
- (uintptr_t)chunk
) >> LG_PAGE
);
822 assert(run_ind
>= map_bias
);
823 assert(run_ind
< chunk_npages
);
824 if (arena_mapbits_large_get(chunk
, run_ind
) != 0) {
825 size
= arena_mapbits_large_size_get(chunk
, run_ind
);
826 assert(size
== PAGE
||
827 arena_mapbits_large_size_get(chunk
,
828 run_ind
+(size
>>LG_PAGE
)-1) == 0);
830 size_t binind
= arena_bin_index(arena
, run
->bin
);
831 arena_bin_info_t
*bin_info
= &arena_bin_info
[binind
];
832 size
= bin_info
->run_size
;
834 run_pages
= (size
>> LG_PAGE
);
837 * Update stats_cactive if nactive is crossing a chunk
840 size_t cactive_diff
= CHUNK_CEILING(arena
->nactive
<< LG_PAGE
) -
841 CHUNK_CEILING((arena
->nactive
- run_pages
) << LG_PAGE
);
842 if (cactive_diff
!= 0)
843 stats_cactive_sub(cactive_diff
);
845 arena
->nactive
-= run_pages
;
848 * The run is dirty if the caller claims to have dirtied it, as well as
849 * if it was already dirty before being allocated.
851 assert(arena_mapbits_dirty_get(chunk
, run_ind
) ==
852 arena_mapbits_dirty_get(chunk
, run_ind
+run_pages
-1));
853 if (arena_mapbits_dirty_get(chunk
, run_ind
) != 0)
855 flag_dirty
= dirty
? CHUNK_MAP_DIRTY
: 0;
856 runs_avail
= dirty
? &arena
->runs_avail_dirty
:
857 &arena
->runs_avail_clean
;
859 /* Mark pages as unallocated in the chunk map. */
861 arena_mapbits_unallocated_set(chunk
, run_ind
, size
,
863 arena_mapbits_unallocated_set(chunk
, run_ind
+run_pages
-1, size
,
866 chunk
->ndirty
+= run_pages
;
867 arena
->ndirty
+= run_pages
;
869 arena_mapbits_unallocated_set(chunk
, run_ind
, size
,
870 arena_mapbits_unzeroed_get(chunk
, run_ind
));
871 arena_mapbits_unallocated_set(chunk
, run_ind
+run_pages
-1, size
,
872 arena_mapbits_unzeroed_get(chunk
, run_ind
+run_pages
-1));
875 /* Try to coalesce forward. */
876 if (run_ind
+ run_pages
< chunk_npages
&&
877 arena_mapbits_allocated_get(chunk
, run_ind
+run_pages
) == 0 &&
878 arena_mapbits_dirty_get(chunk
, run_ind
+run_pages
) == flag_dirty
) {
879 size_t nrun_size
= arena_mapbits_unallocated_size_get(chunk
,
881 size_t nrun_pages
= nrun_size
>> LG_PAGE
;
884 * Remove successor from runs_avail; the coalesced run is
887 assert(arena_mapbits_unallocated_size_get(chunk
,
888 run_ind
+run_pages
+nrun_pages
-1) == nrun_size
);
889 assert(arena_mapbits_dirty_get(chunk
,
890 run_ind
+run_pages
+nrun_pages
-1) == flag_dirty
);
891 arena_avail_tree_remove(runs_avail
,
892 arena_mapp_get(chunk
, run_ind
+run_pages
));
895 run_pages
+= nrun_pages
;
897 arena_mapbits_unallocated_size_set(chunk
, run_ind
, size
);
898 arena_mapbits_unallocated_size_set(chunk
, run_ind
+run_pages
-1,
902 /* Try to coalesce backward. */
903 if (run_ind
> map_bias
&& arena_mapbits_allocated_get(chunk
, run_ind
-1)
904 == 0 && arena_mapbits_dirty_get(chunk
, run_ind
-1) == flag_dirty
) {
905 size_t prun_size
= arena_mapbits_unallocated_size_get(chunk
,
907 size_t prun_pages
= prun_size
>> LG_PAGE
;
909 run_ind
-= prun_pages
;
912 * Remove predecessor from runs_avail; the coalesced run is
915 assert(arena_mapbits_unallocated_size_get(chunk
, run_ind
) ==
917 assert(arena_mapbits_dirty_get(chunk
, run_ind
) == flag_dirty
);
918 arena_avail_tree_remove(runs_avail
, arena_mapp_get(chunk
,
922 run_pages
+= prun_pages
;
924 arena_mapbits_unallocated_size_set(chunk
, run_ind
, size
);
925 arena_mapbits_unallocated_size_set(chunk
, run_ind
+run_pages
-1,
929 /* Insert into runs_avail, now that coalescing is complete. */
930 assert(arena_mapbits_unallocated_size_get(chunk
, run_ind
) ==
931 arena_mapbits_unallocated_size_get(chunk
, run_ind
+run_pages
-1));
932 assert(arena_mapbits_dirty_get(chunk
, run_ind
) ==
933 arena_mapbits_dirty_get(chunk
, run_ind
+run_pages
-1));
934 arena_avail_tree_insert(runs_avail
, arena_mapp_get(chunk
, run_ind
));
938 * Insert into chunks_dirty before potentially calling
939 * arena_chunk_dealloc(), so that chunks_dirty and
940 * arena->ndirty are consistent.
942 if (chunk
->dirtied
== false) {
943 ql_tail_insert(&arena
->chunks_dirty
, chunk
, link_dirty
);
944 chunk
->dirtied
= true;
948 /* Deallocate chunk if it is now completely unused. */
949 if (size
== arena_maxclass
) {
950 assert(run_ind
== map_bias
);
951 assert(run_pages
== (arena_maxclass
>> LG_PAGE
));
952 arena_chunk_dealloc(arena
, chunk
);
956 * It is okay to do dirty page processing here even if the chunk was
957 * deallocated above, since in that case it is the spare. Waiting
958 * until after possible chunk deallocation to do dirty processing
959 * allows for an old spare to be fully deallocated, thus decreasing the
960 * chances of spuriously crossing the dirty page purging threshold.
963 arena_maybe_purge(arena
);
967 arena_run_trim_head(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
968 size_t oldsize
, size_t newsize
)
970 size_t pageind
= ((uintptr_t)run
- (uintptr_t)chunk
) >> LG_PAGE
;
971 size_t head_npages
= (oldsize
- newsize
) >> LG_PAGE
;
972 size_t flag_dirty
= arena_mapbits_dirty_get(chunk
, pageind
);
974 assert(oldsize
> newsize
);
977 * Update the chunk map so that arena_run_dalloc() can treat the
978 * leading run as separately allocated. Set the last element of each
979 * run first, in case of single-page runs.
981 assert(arena_mapbits_large_size_get(chunk
, pageind
) == oldsize
);
982 arena_mapbits_large_set(chunk
, pageind
+head_npages
-1, 0, flag_dirty
);
983 arena_mapbits_large_set(chunk
, pageind
, oldsize
-newsize
, flag_dirty
);
986 UNUSED
size_t tail_npages
= newsize
>> LG_PAGE
;
987 assert(arena_mapbits_large_size_get(chunk
,
988 pageind
+head_npages
+tail_npages
-1) == 0);
989 assert(arena_mapbits_dirty_get(chunk
,
990 pageind
+head_npages
+tail_npages
-1) == flag_dirty
);
992 arena_mapbits_large_set(chunk
, pageind
+head_npages
, newsize
,
995 arena_run_dalloc(arena
, run
, false);
999 arena_run_trim_tail(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
1000 size_t oldsize
, size_t newsize
, bool dirty
)
1002 size_t pageind
= ((uintptr_t)run
- (uintptr_t)chunk
) >> LG_PAGE
;
1003 size_t head_npages
= newsize
>> LG_PAGE
;
1004 size_t flag_dirty
= arena_mapbits_dirty_get(chunk
, pageind
);
1006 assert(oldsize
> newsize
);
1009 * Update the chunk map so that arena_run_dalloc() can treat the
1010 * trailing run as separately allocated. Set the last element of each
1011 * run first, in case of single-page runs.
1013 assert(arena_mapbits_large_size_get(chunk
, pageind
) == oldsize
);
1014 arena_mapbits_large_set(chunk
, pageind
+head_npages
-1, 0, flag_dirty
);
1015 arena_mapbits_large_set(chunk
, pageind
, newsize
, flag_dirty
);
1018 UNUSED
size_t tail_npages
= (oldsize
- newsize
) >> LG_PAGE
;
1019 assert(arena_mapbits_large_size_get(chunk
,
1020 pageind
+head_npages
+tail_npages
-1) == 0);
1021 assert(arena_mapbits_dirty_get(chunk
,
1022 pageind
+head_npages
+tail_npages
-1) == flag_dirty
);
1024 arena_mapbits_large_set(chunk
, pageind
+head_npages
, oldsize
-newsize
,
1027 arena_run_dalloc(arena
, (arena_run_t
*)((uintptr_t)run
+ newsize
),
1031 static arena_run_t
*
1032 arena_bin_runs_first(arena_bin_t
*bin
)
1034 arena_chunk_map_t
*mapelm
= arena_run_tree_first(&bin
->runs
);
1035 if (mapelm
!= NULL
) {
1036 arena_chunk_t
*chunk
;
1040 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(mapelm
);
1041 pageind
= ((((uintptr_t)mapelm
- (uintptr_t)chunk
->map
) /
1042 sizeof(arena_chunk_map_t
))) + map_bias
;
1043 run
= (arena_run_t
*)((uintptr_t)chunk
+ (uintptr_t)((pageind
-
1044 arena_mapbits_small_runind_get(chunk
, pageind
)) <<
1053 arena_bin_runs_insert(arena_bin_t
*bin
, arena_run_t
*run
)
1055 arena_chunk_t
*chunk
= CHUNK_ADDR2BASE(run
);
1056 size_t pageind
= ((uintptr_t)run
- (uintptr_t)chunk
) >> LG_PAGE
;
1057 arena_chunk_map_t
*mapelm
= arena_mapp_get(chunk
, pageind
);
1059 assert(arena_run_tree_search(&bin
->runs
, mapelm
) == NULL
);
1061 arena_run_tree_insert(&bin
->runs
, mapelm
);
1065 arena_bin_runs_remove(arena_bin_t
*bin
, arena_run_t
*run
)
1067 arena_chunk_t
*chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
1068 size_t pageind
= ((uintptr_t)run
- (uintptr_t)chunk
) >> LG_PAGE
;
1069 arena_chunk_map_t
*mapelm
= arena_mapp_get(chunk
, pageind
);
1071 assert(arena_run_tree_search(&bin
->runs
, mapelm
) != NULL
);
1073 arena_run_tree_remove(&bin
->runs
, mapelm
);
1076 static arena_run_t
*
1077 arena_bin_nonfull_run_tryget(arena_bin_t
*bin
)
1079 arena_run_t
*run
= arena_bin_runs_first(bin
);
1081 arena_bin_runs_remove(bin
, run
);
1083 bin
->stats
.reruns
++;
1088 static arena_run_t
*
1089 arena_bin_nonfull_run_get(arena_t
*arena
, arena_bin_t
*bin
)
1093 arena_bin_info_t
*bin_info
;
1095 /* Look for a usable run. */
1096 run
= arena_bin_nonfull_run_tryget(bin
);
1099 /* No existing runs have any space available. */
1101 binind
= arena_bin_index(arena
, bin
);
1102 bin_info
= &arena_bin_info
[binind
];
1104 /* Allocate a new run. */
1105 malloc_mutex_unlock(&bin
->lock
);
1106 /******************************/
1107 malloc_mutex_lock(&arena
->lock
);
1108 run
= arena_run_alloc(arena
, bin_info
->run_size
, false, binind
, false);
1110 bitmap_t
*bitmap
= (bitmap_t
*)((uintptr_t)run
+
1111 (uintptr_t)bin_info
->bitmap_offset
);
1113 /* Initialize run internals. */
1114 VALGRIND_MAKE_MEM_UNDEFINED(run
, bin_info
->reg0_offset
-
1115 bin_info
->redzone_size
);
1118 run
->nfree
= bin_info
->nregs
;
1119 bitmap_init(bitmap
, &bin_info
->bitmap_info
);
1121 malloc_mutex_unlock(&arena
->lock
);
1122 /********************************/
1123 malloc_mutex_lock(&bin
->lock
);
1127 bin
->stats
.curruns
++;
1133 * arena_run_alloc() failed, but another thread may have made
1134 * sufficient memory available while this one dropped bin->lock above,
1135 * so search one more time.
1137 run
= arena_bin_nonfull_run_tryget(bin
);
1144 /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
1146 arena_bin_malloc_hard(arena_t
*arena
, arena_bin_t
*bin
)
1150 arena_bin_info_t
*bin_info
;
1153 binind
= arena_bin_index(arena
, bin
);
1154 bin_info
= &arena_bin_info
[binind
];
1156 run
= arena_bin_nonfull_run_get(arena
, bin
);
1157 if (bin
->runcur
!= NULL
&& bin
->runcur
->nfree
> 0) {
1159 * Another thread updated runcur while this one ran without the
1160 * bin lock in arena_bin_nonfull_run_get().
1162 assert(bin
->runcur
->nfree
> 0);
1163 ret
= arena_run_reg_alloc(bin
->runcur
, bin_info
);
1165 arena_chunk_t
*chunk
;
1168 * arena_run_alloc() may have allocated run, or it may
1169 * have pulled run from the bin's run tree. Therefore
1170 * it is unsafe to make any assumptions about how run
1171 * has previously been used, and arena_bin_lower_run()
1172 * must be called, as if a region were just deallocated
1175 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
1176 if (run
->nfree
== bin_info
->nregs
)
1177 arena_dalloc_bin_run(arena
, chunk
, run
, bin
);
1179 arena_bin_lower_run(arena
, chunk
, run
, bin
);
1189 assert(bin
->runcur
->nfree
> 0);
1191 return (arena_run_reg_alloc(bin
->runcur
, bin_info
));
1195 arena_prof_accum(arena_t
*arena
, uint64_t accumbytes
)
1198 cassert(config_prof
);
1200 if (config_prof
&& prof_interval
!= 0) {
1201 arena
->prof_accumbytes
+= accumbytes
;
1202 if (arena
->prof_accumbytes
>= prof_interval
) {
1204 arena
->prof_accumbytes
-= prof_interval
;
1210 arena_tcache_fill_small(arena_t
*arena
, tcache_bin_t
*tbin
, size_t binind
,
1211 uint64_t prof_accumbytes
)
1218 assert(tbin
->ncached
== 0);
1221 malloc_mutex_lock(&arena
->lock
);
1222 arena_prof_accum(arena
, prof_accumbytes
);
1223 malloc_mutex_unlock(&arena
->lock
);
1225 bin
= &arena
->bins
[binind
];
1226 malloc_mutex_lock(&bin
->lock
);
1227 for (i
= 0, nfill
= (tcache_bin_info
[binind
].ncached_max
>>
1228 tbin
->lg_fill_div
); i
< nfill
; i
++) {
1229 if ((run
= bin
->runcur
) != NULL
&& run
->nfree
> 0)
1230 ptr
= arena_run_reg_alloc(run
, &arena_bin_info
[binind
]);
1232 ptr
= arena_bin_malloc_hard(arena
, bin
);
1235 if (config_fill
&& opt_junk
) {
1236 arena_alloc_junk_small(ptr
, &arena_bin_info
[binind
],
1239 /* Insert such that low regions get used first. */
1240 tbin
->avail
[nfill
- 1 - i
] = ptr
;
1243 bin
->stats
.allocated
+= i
* arena_bin_info
[binind
].reg_size
;
1244 bin
->stats
.nmalloc
+= i
;
1245 bin
->stats
.nrequests
+= tbin
->tstats
.nrequests
;
1246 bin
->stats
.nfills
++;
1247 tbin
->tstats
.nrequests
= 0;
1249 malloc_mutex_unlock(&bin
->lock
);
1254 arena_alloc_junk_small(void *ptr
, arena_bin_info_t
*bin_info
, bool zero
)
1258 size_t redzone_size
= bin_info
->redzone_size
;
1259 memset((void *)((uintptr_t)ptr
- redzone_size
), 0xa5,
1261 memset((void *)((uintptr_t)ptr
+ bin_info
->reg_size
), 0xa5,
1264 memset((void *)((uintptr_t)ptr
- bin_info
->redzone_size
), 0xa5,
1265 bin_info
->reg_interval
);
1270 arena_dalloc_junk_small(void *ptr
, arena_bin_info_t
*bin_info
)
1272 size_t size
= bin_info
->reg_size
;
1273 size_t redzone_size
= bin_info
->redzone_size
;
1277 for (i
= 1; i
<= redzone_size
; i
++) {
1279 if ((byte
= *(uint8_t *)((uintptr_t)ptr
- i
)) != 0xa5) {
1281 malloc_printf("<jemalloc>: Corrupt redzone "
1282 "%zu byte%s before %p (size %zu), byte=%#x\n", i
,
1283 (i
== 1) ? "" : "s", ptr
, size
, byte
);
1286 for (i
= 0; i
< redzone_size
; i
++) {
1288 if ((byte
= *(uint8_t *)((uintptr_t)ptr
+ size
+ i
)) != 0xa5) {
1290 malloc_printf("<jemalloc>: Corrupt redzone "
1291 "%zu byte%s after end of %p (size %zu), byte=%#x\n",
1292 i
, (i
== 1) ? "" : "s", ptr
, size
, byte
);
1295 if (opt_abort
&& error
)
1298 memset((void *)((uintptr_t)ptr
- redzone_size
), 0x5a,
1299 bin_info
->reg_interval
);
1303 arena_malloc_small(arena_t
*arena
, size_t size
, bool zero
)
1310 binind
= SMALL_SIZE2BIN(size
);
1311 assert(binind
< NBINS
);
1312 bin
= &arena
->bins
[binind
];
1313 size
= arena_bin_info
[binind
].reg_size
;
1315 malloc_mutex_lock(&bin
->lock
);
1316 if ((run
= bin
->runcur
) != NULL
&& run
->nfree
> 0)
1317 ret
= arena_run_reg_alloc(run
, &arena_bin_info
[binind
]);
1319 ret
= arena_bin_malloc_hard(arena
, bin
);
1322 malloc_mutex_unlock(&bin
->lock
);
1327 bin
->stats
.allocated
+= size
;
1328 bin
->stats
.nmalloc
++;
1329 bin
->stats
.nrequests
++;
1331 malloc_mutex_unlock(&bin
->lock
);
1332 if (config_prof
&& isthreaded
== false) {
1333 malloc_mutex_lock(&arena
->lock
);
1334 arena_prof_accum(arena
, size
);
1335 malloc_mutex_unlock(&arena
->lock
);
1338 if (zero
== false) {
1341 arena_alloc_junk_small(ret
,
1342 &arena_bin_info
[binind
], false);
1343 } else if (opt_zero
)
1344 memset(ret
, 0, size
);
1347 if (config_fill
&& opt_junk
) {
1348 arena_alloc_junk_small(ret
, &arena_bin_info
[binind
],
1351 VALGRIND_MAKE_MEM_UNDEFINED(ret
, size
);
1352 memset(ret
, 0, size
);
1359 arena_malloc_large(arena_t
*arena
, size_t size
, bool zero
)
1363 /* Large allocation. */
1364 size
= PAGE_CEILING(size
);
1365 malloc_mutex_lock(&arena
->lock
);
1366 ret
= (void *)arena_run_alloc(arena
, size
, true, BININD_INVALID
, zero
);
1368 malloc_mutex_unlock(&arena
->lock
);
1372 arena
->stats
.nmalloc_large
++;
1373 arena
->stats
.nrequests_large
++;
1374 arena
->stats
.allocated_large
+= size
;
1375 arena
->stats
.lstats
[(size
>> LG_PAGE
) - 1].nmalloc
++;
1376 arena
->stats
.lstats
[(size
>> LG_PAGE
) - 1].nrequests
++;
1377 arena
->stats
.lstats
[(size
>> LG_PAGE
) - 1].curruns
++;
1380 arena_prof_accum(arena
, size
);
1381 malloc_mutex_unlock(&arena
->lock
);
1383 if (zero
== false) {
1386 memset(ret
, 0xa5, size
);
1388 memset(ret
, 0, size
);
1395 /* Only handles large allocations that require more than page alignment. */
1397 arena_palloc(arena_t
*arena
, size_t size
, size_t alignment
, bool zero
)
1400 size_t alloc_size
, leadsize
, trailsize
;
1402 arena_chunk_t
*chunk
;
1404 assert((size
& PAGE_MASK
) == 0);
1406 alignment
= PAGE_CEILING(alignment
);
1407 alloc_size
= size
+ alignment
- PAGE
;
1409 malloc_mutex_lock(&arena
->lock
);
1410 run
= arena_run_alloc(arena
, alloc_size
, true, BININD_INVALID
, zero
);
1412 malloc_mutex_unlock(&arena
->lock
);
1415 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
1417 leadsize
= ALIGNMENT_CEILING((uintptr_t)run
, alignment
) -
1419 assert(alloc_size
>= leadsize
+ size
);
1420 trailsize
= alloc_size
- leadsize
- size
;
1421 ret
= (void *)((uintptr_t)run
+ leadsize
);
1422 if (leadsize
!= 0) {
1423 arena_run_trim_head(arena
, chunk
, run
, alloc_size
, alloc_size
-
1426 if (trailsize
!= 0) {
1427 arena_run_trim_tail(arena
, chunk
, ret
, size
+ trailsize
, size
,
1432 arena
->stats
.nmalloc_large
++;
1433 arena
->stats
.nrequests_large
++;
1434 arena
->stats
.allocated_large
+= size
;
1435 arena
->stats
.lstats
[(size
>> LG_PAGE
) - 1].nmalloc
++;
1436 arena
->stats
.lstats
[(size
>> LG_PAGE
) - 1].nrequests
++;
1437 arena
->stats
.lstats
[(size
>> LG_PAGE
) - 1].curruns
++;
1439 malloc_mutex_unlock(&arena
->lock
);
1441 if (config_fill
&& zero
== false) {
1443 memset(ret
, 0xa5, size
);
1445 memset(ret
, 0, size
);
1451 arena_prof_promoted(const void *ptr
, size_t size
)
1453 arena_chunk_t
*chunk
;
1454 size_t pageind
, binind
;
1456 cassert(config_prof
);
1457 assert(ptr
!= NULL
);
1458 assert(CHUNK_ADDR2BASE(ptr
) != ptr
);
1459 assert(isalloc(ptr
, false) == PAGE
);
1460 assert(isalloc(ptr
, true) == PAGE
);
1461 assert(size
<= SMALL_MAXCLASS
);
1463 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
1464 pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> LG_PAGE
;
1465 binind
= SMALL_SIZE2BIN(size
);
1466 assert(binind
< NBINS
);
1467 arena_mapbits_large_binind_set(chunk
, pageind
, binind
);
1469 assert(isalloc(ptr
, false) == PAGE
);
1470 assert(isalloc(ptr
, true) == size
);
1474 arena_dissociate_bin_run(arena_chunk_t
*chunk
, arena_run_t
*run
,
1478 /* Dissociate run from bin. */
1479 if (run
== bin
->runcur
)
1482 size_t binind
= arena_bin_index(chunk
->arena
, bin
);
1483 arena_bin_info_t
*bin_info
= &arena_bin_info
[binind
];
1485 if (bin_info
->nregs
!= 1) {
1487 * This block's conditional is necessary because if the
1488 * run only contains one region, then it never gets
1489 * inserted into the non-full runs tree.
1491 arena_bin_runs_remove(bin
, run
);
1497 arena_dalloc_bin_run(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
1501 arena_bin_info_t
*bin_info
;
1502 size_t npages
, run_ind
, past
;
1504 assert(run
!= bin
->runcur
);
1505 assert(arena_run_tree_search(&bin
->runs
,
1506 arena_mapp_get(chunk
, ((uintptr_t)run
-(uintptr_t)chunk
)>>LG_PAGE
))
1509 binind
= arena_bin_index(chunk
->arena
, run
->bin
);
1510 bin_info
= &arena_bin_info
[binind
];
1512 malloc_mutex_unlock(&bin
->lock
);
1513 /******************************/
1514 npages
= bin_info
->run_size
>> LG_PAGE
;
1515 run_ind
= (size_t)(((uintptr_t)run
- (uintptr_t)chunk
) >> LG_PAGE
);
1516 past
= (size_t)(PAGE_CEILING((uintptr_t)run
+
1517 (uintptr_t)bin_info
->reg0_offset
+ (uintptr_t)(run
->nextind
*
1518 bin_info
->reg_interval
- bin_info
->redzone_size
) -
1519 (uintptr_t)chunk
) >> LG_PAGE
);
1520 malloc_mutex_lock(&arena
->lock
);
1523 * If the run was originally clean, and some pages were never touched,
1524 * trim the clean pages before deallocating the dirty portion of the
1527 assert(arena_mapbits_dirty_get(chunk
, run_ind
) ==
1528 arena_mapbits_dirty_get(chunk
, run_ind
+npages
-1));
1529 if (arena_mapbits_dirty_get(chunk
, run_ind
) == 0 && past
- run_ind
<
1531 /* Trim clean pages. Convert to large run beforehand. */
1533 arena_mapbits_large_set(chunk
, run_ind
, bin_info
->run_size
, 0);
1534 arena_mapbits_large_set(chunk
, run_ind
+npages
-1, 0, 0);
1535 arena_run_trim_tail(arena
, chunk
, run
, (npages
<< LG_PAGE
),
1536 ((past
- run_ind
) << LG_PAGE
), false);
1537 /* npages = past - run_ind; */
1539 arena_run_dalloc(arena
, run
, true);
1540 malloc_mutex_unlock(&arena
->lock
);
1541 /****************************/
1542 malloc_mutex_lock(&bin
->lock
);
1544 bin
->stats
.curruns
--;
1548 arena_bin_lower_run(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
1553 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
1554 * non-full run. It is okay to NULL runcur out rather than proactively
1555 * keeping it pointing at the lowest non-full run.
1557 if ((uintptr_t)run
< (uintptr_t)bin
->runcur
) {
1558 /* Switch runcur. */
1559 if (bin
->runcur
->nfree
> 0)
1560 arena_bin_runs_insert(bin
, bin
->runcur
);
1563 bin
->stats
.reruns
++;
1565 arena_bin_runs_insert(bin
, run
);
1569 arena_dalloc_bin_locked(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
1570 arena_chunk_map_t
*mapelm
)
1575 arena_bin_info_t
*bin_info
;
1576 size_t size
, binind
;
1578 pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> LG_PAGE
;
1579 run
= (arena_run_t
*)((uintptr_t)chunk
+ (uintptr_t)((pageind
-
1580 arena_mapbits_small_runind_get(chunk
, pageind
)) << LG_PAGE
));
1582 binind
= arena_ptr_small_binind_get(ptr
, mapelm
->bits
);
1583 bin_info
= &arena_bin_info
[binind
];
1584 if (config_fill
|| config_stats
)
1585 size
= bin_info
->reg_size
;
1587 if (config_fill
&& opt_junk
)
1588 arena_dalloc_junk_small(ptr
, bin_info
);
1590 arena_run_reg_dalloc(run
, ptr
);
1591 if (run
->nfree
== bin_info
->nregs
) {
1592 arena_dissociate_bin_run(chunk
, run
, bin
);
1593 arena_dalloc_bin_run(arena
, chunk
, run
, bin
);
1594 } else if (run
->nfree
== 1 && run
!= bin
->runcur
)
1595 arena_bin_lower_run(arena
, chunk
, run
, bin
);
1598 bin
->stats
.allocated
-= size
;
1599 bin
->stats
.ndalloc
++;
1604 arena_dalloc_bin(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
1605 size_t pageind
, arena_chunk_map_t
*mapelm
)
1610 run
= (arena_run_t
*)((uintptr_t)chunk
+ (uintptr_t)((pageind
-
1611 arena_mapbits_small_runind_get(chunk
, pageind
)) << LG_PAGE
));
1613 malloc_mutex_lock(&bin
->lock
);
1614 arena_dalloc_bin_locked(arena
, chunk
, ptr
, mapelm
);
1615 malloc_mutex_unlock(&bin
->lock
);
1619 arena_dalloc_small(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
1622 arena_chunk_map_t
*mapelm
;
1625 /* arena_ptr_small_binind_get() does extra sanity checking. */
1626 assert(arena_ptr_small_binind_get(ptr
, arena_mapbits_get(chunk
,
1627 pageind
)) != BININD_INVALID
);
1629 mapelm
= arena_mapp_get(chunk
, pageind
);
1630 arena_dalloc_bin(arena
, chunk
, ptr
, pageind
, mapelm
);
1633 arena_stats_merge(arena_t
*arena
, size_t *nactive
, size_t *ndirty
,
1634 arena_stats_t
*astats
, malloc_bin_stats_t
*bstats
,
1635 malloc_large_stats_t
*lstats
)
1639 malloc_mutex_lock(&arena
->lock
);
1640 *nactive
+= arena
->nactive
;
1641 *ndirty
+= arena
->ndirty
;
1643 astats
->mapped
+= arena
->stats
.mapped
;
1644 astats
->npurge
+= arena
->stats
.npurge
;
1645 astats
->nmadvise
+= arena
->stats
.nmadvise
;
1646 astats
->purged
+= arena
->stats
.purged
;
1647 astats
->allocated_large
+= arena
->stats
.allocated_large
;
1648 astats
->nmalloc_large
+= arena
->stats
.nmalloc_large
;
1649 astats
->ndalloc_large
+= arena
->stats
.ndalloc_large
;
1650 astats
->nrequests_large
+= arena
->stats
.nrequests_large
;
1652 for (i
= 0; i
< nlclasses
; i
++) {
1653 lstats
[i
].nmalloc
+= arena
->stats
.lstats
[i
].nmalloc
;
1654 lstats
[i
].ndalloc
+= arena
->stats
.lstats
[i
].ndalloc
;
1655 lstats
[i
].nrequests
+= arena
->stats
.lstats
[i
].nrequests
;
1656 lstats
[i
].curruns
+= arena
->stats
.lstats
[i
].curruns
;
1658 malloc_mutex_unlock(&arena
->lock
);
1660 for (i
= 0; i
< NBINS
; i
++) {
1661 arena_bin_t
*bin
= &arena
->bins
[i
];
1663 malloc_mutex_lock(&bin
->lock
);
1664 bstats
[i
].allocated
+= bin
->stats
.allocated
;
1665 bstats
[i
].nmalloc
+= bin
->stats
.nmalloc
;
1666 bstats
[i
].ndalloc
+= bin
->stats
.ndalloc
;
1667 bstats
[i
].nrequests
+= bin
->stats
.nrequests
;
1668 if (config_tcache
) {
1669 bstats
[i
].nfills
+= bin
->stats
.nfills
;
1670 bstats
[i
].nflushes
+= bin
->stats
.nflushes
;
1672 bstats
[i
].nruns
+= bin
->stats
.nruns
;
1673 bstats
[i
].reruns
+= bin
->stats
.reruns
;
1674 bstats
[i
].curruns
+= bin
->stats
.curruns
;
1675 malloc_mutex_unlock(&bin
->lock
);
1680 arena_dalloc_large_locked(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
)
1683 if (config_fill
|| config_stats
) {
1684 size_t pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> LG_PAGE
;
1685 size_t size
= arena_mapbits_large_size_get(chunk
, pageind
);
1687 if (config_fill
&& config_stats
&& opt_junk
)
1688 memset(ptr
, 0x5a, size
);
1690 arena
->stats
.ndalloc_large
++;
1691 arena
->stats
.allocated_large
-= size
;
1692 arena
->stats
.lstats
[(size
>> LG_PAGE
) - 1].ndalloc
++;
1693 arena
->stats
.lstats
[(size
>> LG_PAGE
) - 1].curruns
--;
1697 arena_run_dalloc(arena
, (arena_run_t
*)ptr
, true);
1701 arena_dalloc_large(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
)
1704 malloc_mutex_lock(&arena
->lock
);
1705 arena_dalloc_large_locked(arena
, chunk
, ptr
);
1706 malloc_mutex_unlock(&arena
->lock
);
1710 arena_ralloc_large_shrink(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
1711 size_t oldsize
, size_t size
)
1714 assert(size
< oldsize
);
1717 * Shrink the run, and make trailing pages available for other
1720 malloc_mutex_lock(&arena
->lock
);
1721 arena_run_trim_tail(arena
, chunk
, (arena_run_t
*)ptr
, oldsize
, size
,
1724 arena
->stats
.ndalloc_large
++;
1725 arena
->stats
.allocated_large
-= oldsize
;
1726 arena
->stats
.lstats
[(oldsize
>> LG_PAGE
) - 1].ndalloc
++;
1727 arena
->stats
.lstats
[(oldsize
>> LG_PAGE
) - 1].curruns
--;
1729 arena
->stats
.nmalloc_large
++;
1730 arena
->stats
.nrequests_large
++;
1731 arena
->stats
.allocated_large
+= size
;
1732 arena
->stats
.lstats
[(size
>> LG_PAGE
) - 1].nmalloc
++;
1733 arena
->stats
.lstats
[(size
>> LG_PAGE
) - 1].nrequests
++;
1734 arena
->stats
.lstats
[(size
>> LG_PAGE
) - 1].curruns
++;
1736 malloc_mutex_unlock(&arena
->lock
);
1740 arena_ralloc_large_grow(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
1741 size_t oldsize
, size_t size
, size_t extra
, bool zero
)
1743 size_t pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> LG_PAGE
;
1744 size_t npages
= oldsize
>> LG_PAGE
;
1747 assert(oldsize
== arena_mapbits_large_size_get(chunk
, pageind
));
1749 /* Try to extend the run. */
1750 assert(size
+ extra
> oldsize
);
1751 malloc_mutex_lock(&arena
->lock
);
1752 if (pageind
+ npages
< chunk_npages
&&
1753 arena_mapbits_allocated_get(chunk
, pageind
+npages
) == 0 &&
1754 (followsize
= arena_mapbits_unallocated_size_get(chunk
,
1755 pageind
+npages
)) >= size
- oldsize
) {
1757 * The next run is available and sufficiently large. Split the
1758 * following run, then merge the first part with the existing
1762 size_t splitsize
= (oldsize
+ followsize
<= size
+ extra
)
1763 ? followsize
: size
+ extra
- oldsize
;
1764 arena_run_split(arena
, (arena_run_t
*)((uintptr_t)chunk
+
1765 ((pageind
+npages
) << LG_PAGE
)), splitsize
, true,
1766 BININD_INVALID
, zero
);
1768 size
= oldsize
+ splitsize
;
1769 npages
= size
>> LG_PAGE
;
1772 * Mark the extended run as dirty if either portion of the run
1773 * was dirty before allocation. This is rather pedantic,
1774 * because there's not actually any sequence of events that
1775 * could cause the resulting run to be passed to
1776 * arena_run_dalloc() with the dirty argument set to false
1777 * (which is when dirty flag consistency would really matter).
1779 flag_dirty
= arena_mapbits_dirty_get(chunk
, pageind
) |
1780 arena_mapbits_dirty_get(chunk
, pageind
+npages
-1);
1781 arena_mapbits_large_set(chunk
, pageind
, size
, flag_dirty
);
1782 arena_mapbits_large_set(chunk
, pageind
+npages
-1, 0, flag_dirty
);
1785 arena
->stats
.ndalloc_large
++;
1786 arena
->stats
.allocated_large
-= oldsize
;
1787 arena
->stats
.lstats
[(oldsize
>> LG_PAGE
) - 1].ndalloc
++;
1788 arena
->stats
.lstats
[(oldsize
>> LG_PAGE
) - 1].curruns
--;
1790 arena
->stats
.nmalloc_large
++;
1791 arena
->stats
.nrequests_large
++;
1792 arena
->stats
.allocated_large
+= size
;
1793 arena
->stats
.lstats
[(size
>> LG_PAGE
) - 1].nmalloc
++;
1794 arena
->stats
.lstats
[(size
>> LG_PAGE
) - 1].nrequests
++;
1795 arena
->stats
.lstats
[(size
>> LG_PAGE
) - 1].curruns
++;
1797 malloc_mutex_unlock(&arena
->lock
);
1800 malloc_mutex_unlock(&arena
->lock
);
1806 * Try to resize a large allocation, in order to avoid copying. This will
1807 * always fail if growing an object, and the following run is already in use.
1810 arena_ralloc_large(void *ptr
, size_t oldsize
, size_t size
, size_t extra
,
1815 psize
= PAGE_CEILING(size
+ extra
);
1816 if (psize
== oldsize
) {
1817 /* Same size class. */
1818 if (config_fill
&& opt_junk
&& size
< oldsize
) {
1819 memset((void *)((uintptr_t)ptr
+ size
), 0x5a, oldsize
-
1824 arena_chunk_t
*chunk
;
1827 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
1828 arena
= chunk
->arena
;
1830 if (psize
< oldsize
) {
1831 /* Fill before shrinking in order avoid a race. */
1832 if (config_fill
&& opt_junk
) {
1833 memset((void *)((uintptr_t)ptr
+ size
), 0x5a,
1836 arena_ralloc_large_shrink(arena
, chunk
, ptr
, oldsize
,
1840 bool ret
= arena_ralloc_large_grow(arena
, chunk
, ptr
,
1841 oldsize
, PAGE_CEILING(size
),
1842 psize
- PAGE_CEILING(size
), zero
);
1843 if (config_fill
&& ret
== false && zero
== false &&
1845 memset((void *)((uintptr_t)ptr
+ oldsize
), 0,
1854 arena_ralloc_no_move(void *ptr
, size_t oldsize
, size_t size
, size_t extra
,
1859 * Avoid moving the allocation if the size class can be left the same.
1861 if (oldsize
<= arena_maxclass
) {
1862 if (oldsize
<= SMALL_MAXCLASS
) {
1863 assert(arena_bin_info
[SMALL_SIZE2BIN(oldsize
)].reg_size
1865 if ((size
+ extra
<= SMALL_MAXCLASS
&&
1866 SMALL_SIZE2BIN(size
+ extra
) ==
1867 SMALL_SIZE2BIN(oldsize
)) || (size
<= oldsize
&&
1868 size
+ extra
>= oldsize
)) {
1869 if (config_fill
&& opt_junk
&& size
< oldsize
) {
1870 memset((void *)((uintptr_t)ptr
+ size
),
1871 0x5a, oldsize
- size
);
1876 assert(size
<= arena_maxclass
);
1877 if (size
+ extra
> SMALL_MAXCLASS
) {
1878 if (arena_ralloc_large(ptr
, oldsize
, size
,
1879 extra
, zero
) == false)
1885 /* Reallocation would require a move. */
1890 arena_ralloc(void *ptr
, size_t oldsize
, size_t size
, size_t extra
,
1891 size_t alignment
, bool zero
, bool try_tcache
)
1896 /* Try to avoid moving the allocation. */
1897 ret
= arena_ralloc_no_move(ptr
, oldsize
, size
, extra
, zero
);
1902 * size and oldsize are different enough that we need to move the
1903 * object. In that case, fall back to allocating new space and
1906 if (alignment
!= 0) {
1907 size_t usize
= sa2u(size
+ extra
, alignment
);
1910 ret
= ipalloc(usize
, alignment
, zero
);
1912 ret
= arena_malloc(NULL
, size
+ extra
, zero
, try_tcache
);
1917 /* Try again, this time without extra. */
1918 if (alignment
!= 0) {
1919 size_t usize
= sa2u(size
, alignment
);
1922 ret
= ipalloc(usize
, alignment
, zero
);
1924 ret
= arena_malloc(NULL
, size
, zero
, try_tcache
);
1930 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
1933 * Copy at most size bytes (not size+extra), since the caller has no
1934 * expectation that the extra bytes will be reliably preserved.
1936 copysize
= (size
< oldsize
) ? size
: oldsize
;
1937 VALGRIND_MAKE_MEM_UNDEFINED(ret
, copysize
);
1938 memcpy(ret
, ptr
, copysize
);
1944 arena_new(arena_t
*arena
, unsigned ind
)
1950 arena
->nthreads
= 0;
1952 if (malloc_mutex_init(&arena
->lock
))
1956 memset(&arena
->stats
, 0, sizeof(arena_stats_t
));
1957 arena
->stats
.lstats
=
1958 (malloc_large_stats_t
*)base_alloc(nlclasses
*
1959 sizeof(malloc_large_stats_t
));
1960 if (arena
->stats
.lstats
== NULL
)
1962 memset(arena
->stats
.lstats
, 0, nlclasses
*
1963 sizeof(malloc_large_stats_t
));
1965 ql_new(&arena
->tcache_ql
);
1969 arena
->prof_accumbytes
= 0;
1971 /* Initialize chunks. */
1972 ql_new(&arena
->chunks_dirty
);
1973 arena
->spare
= NULL
;
1977 arena
->npurgatory
= 0;
1979 arena_avail_tree_new(&arena
->runs_avail_clean
);
1980 arena_avail_tree_new(&arena
->runs_avail_dirty
);
1982 /* Initialize bins. */
1983 for (i
= 0; i
< NBINS
; i
++) {
1984 bin
= &arena
->bins
[i
];
1985 if (malloc_mutex_init(&bin
->lock
))
1988 arena_run_tree_new(&bin
->runs
);
1990 memset(&bin
->stats
, 0, sizeof(malloc_bin_stats_t
));
1997 * Calculate bin_info->run_size such that it meets the following constraints:
1999 * *) bin_info->run_size >= min_run_size
2000 * *) bin_info->run_size <= arena_maxclass
2001 * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
2002 * *) bin_info->nregs <= RUN_MAXREGS
2004 * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
2005 * calculated here, since these settings are all interdependent.
2008 bin_info_run_size_calc(arena_bin_info_t
*bin_info
, size_t min_run_size
)
2011 size_t try_run_size
, good_run_size
;
2012 uint32_t try_nregs
, good_nregs
;
2013 uint32_t try_hdr_size
, good_hdr_size
;
2014 uint32_t try_bitmap_offset
, good_bitmap_offset
;
2015 uint32_t try_ctx0_offset
, good_ctx0_offset
;
2016 uint32_t try_redzone0_offset
, good_redzone0_offset
;
2018 assert(min_run_size
>= PAGE
);
2019 assert(min_run_size
<= arena_maxclass
);
2022 * Determine redzone size based on minimum alignment and minimum
2023 * redzone size. Add padding to the end of the run if it is needed to
2024 * align the regions. The padding allows each redzone to be half the
2025 * minimum alignment; without the padding, each redzone would have to
2026 * be twice as large in order to maintain alignment.
2028 if (config_fill
&& opt_redzone
) {
2029 size_t align_min
= ZU(1) << (ffs(bin_info
->reg_size
) - 1);
2030 if (align_min
<= REDZONE_MINSIZE
) {
2031 bin_info
->redzone_size
= REDZONE_MINSIZE
;
2034 bin_info
->redzone_size
= align_min
>> 1;
2035 pad_size
= bin_info
->redzone_size
;
2038 bin_info
->redzone_size
= 0;
2041 bin_info
->reg_interval
= bin_info
->reg_size
+
2042 (bin_info
->redzone_size
<< 1);
2045 * Calculate known-valid settings before entering the run_size
2046 * expansion loop, so that the first part of the loop always copies
2049 * The do..while loop iteratively reduces the number of regions until
2050 * the run header and the regions no longer overlap. A closed formula
2051 * would be quite messy, since there is an interdependency between the
2052 * header's mask length and the number of regions.
2054 try_run_size
= min_run_size
;
2055 try_nregs
= ((try_run_size
- sizeof(arena_run_t
)) /
2056 bin_info
->reg_interval
)
2057 + 1; /* Counter-act try_nregs-- in loop. */
2058 if (try_nregs
> RUN_MAXREGS
) {
2059 try_nregs
= RUN_MAXREGS
2060 + 1; /* Counter-act try_nregs-- in loop. */
2064 try_hdr_size
= sizeof(arena_run_t
);
2065 /* Pad to a long boundary. */
2066 try_hdr_size
= LONG_CEILING(try_hdr_size
);
2067 try_bitmap_offset
= try_hdr_size
;
2068 /* Add space for bitmap. */
2069 try_hdr_size
+= bitmap_size(try_nregs
);
2070 if (config_prof
&& opt_prof
&& prof_promote
== false) {
2071 /* Pad to a quantum boundary. */
2072 try_hdr_size
= QUANTUM_CEILING(try_hdr_size
);
2073 try_ctx0_offset
= try_hdr_size
;
2074 /* Add space for one (prof_ctx_t *) per region. */
2075 try_hdr_size
+= try_nregs
* sizeof(prof_ctx_t
*);
2077 try_ctx0_offset
= 0;
2078 try_redzone0_offset
= try_run_size
- (try_nregs
*
2079 bin_info
->reg_interval
) - pad_size
;
2080 } while (try_hdr_size
> try_redzone0_offset
);
2082 /* run_size expansion loop. */
2085 * Copy valid settings before trying more aggressive settings.
2087 good_run_size
= try_run_size
;
2088 good_nregs
= try_nregs
;
2089 good_hdr_size
= try_hdr_size
;
2090 good_bitmap_offset
= try_bitmap_offset
;
2091 good_ctx0_offset
= try_ctx0_offset
;
2092 good_redzone0_offset
= try_redzone0_offset
;
2094 /* Try more aggressive settings. */
2095 try_run_size
+= PAGE
;
2096 try_nregs
= ((try_run_size
- sizeof(arena_run_t
) - pad_size
) /
2097 bin_info
->reg_interval
)
2098 + 1; /* Counter-act try_nregs-- in loop. */
2099 if (try_nregs
> RUN_MAXREGS
) {
2100 try_nregs
= RUN_MAXREGS
2101 + 1; /* Counter-act try_nregs-- in loop. */
2105 try_hdr_size
= sizeof(arena_run_t
);
2106 /* Pad to a long boundary. */
2107 try_hdr_size
= LONG_CEILING(try_hdr_size
);
2108 try_bitmap_offset
= try_hdr_size
;
2109 /* Add space for bitmap. */
2110 try_hdr_size
+= bitmap_size(try_nregs
);
2111 if (config_prof
&& opt_prof
&& prof_promote
== false) {
2112 /* Pad to a quantum boundary. */
2113 try_hdr_size
= QUANTUM_CEILING(try_hdr_size
);
2114 try_ctx0_offset
= try_hdr_size
;
2116 * Add space for one (prof_ctx_t *) per region.
2118 try_hdr_size
+= try_nregs
*
2119 sizeof(prof_ctx_t
*);
2121 try_redzone0_offset
= try_run_size
- (try_nregs
*
2122 bin_info
->reg_interval
) - pad_size
;
2123 } while (try_hdr_size
> try_redzone0_offset
);
2124 } while (try_run_size
<= arena_maxclass
2125 && try_run_size
<= arena_maxclass
2126 && RUN_MAX_OVRHD
* (bin_info
->reg_interval
<< 3) >
2128 && (try_redzone0_offset
<< RUN_BFP
) > RUN_MAX_OVRHD
* try_run_size
2129 && try_nregs
< RUN_MAXREGS
);
2131 assert(good_hdr_size
<= good_redzone0_offset
);
2133 /* Copy final settings. */
2134 bin_info
->run_size
= good_run_size
;
2135 bin_info
->nregs
= good_nregs
;
2136 bin_info
->bitmap_offset
= good_bitmap_offset
;
2137 bin_info
->ctx0_offset
= good_ctx0_offset
;
2138 bin_info
->reg0_offset
= good_redzone0_offset
+ bin_info
->redzone_size
;
2140 assert(bin_info
->reg0_offset
- bin_info
->redzone_size
+ (bin_info
->nregs
2141 * bin_info
->reg_interval
) + pad_size
== bin_info
->run_size
);
2143 return (good_run_size
);
2149 arena_bin_info_t
*bin_info
;
2150 size_t prev_run_size
= PAGE
;
2152 #define SIZE_CLASS(bin, delta, size) \
2153 bin_info = &arena_bin_info[bin]; \
2154 bin_info->reg_size = size; \
2155 prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
2156 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
2168 * Compute the header size such that it is large enough to contain the
2169 * page map. The page map is biased to omit entries for the header
2170 * itself, so some iteration is necessary to compute the map bias.
2172 * 1) Compute safe header_size and map_bias values that include enough
2173 * space for an unbiased page map.
2174 * 2) Refine map_bias based on (1) to omit the header pages in the page
2175 * map. The resulting map_bias may be one too small.
2176 * 3) Refine map_bias based on (2). The result will be >= the result
2177 * from (2), and will always be correct.
2180 for (i
= 0; i
< 3; i
++) {
2181 header_size
= offsetof(arena_chunk_t
, map
) +
2182 (sizeof(arena_chunk_map_t
) * (chunk_npages
-map_bias
));
2183 map_bias
= (header_size
>> LG_PAGE
) + ((header_size
& PAGE_MASK
)
2186 assert(map_bias
> 0);
2188 arena_maxclass
= chunksize
- (map_bias
<< LG_PAGE
);
2194 arena_prefork(arena_t
*arena
)
2198 malloc_mutex_prefork(&arena
->lock
);
2199 for (i
= 0; i
< NBINS
; i
++)
2200 malloc_mutex_prefork(&arena
->bins
[i
].lock
);
2204 arena_postfork_parent(arena_t
*arena
)
2208 for (i
= 0; i
< NBINS
; i
++)
2209 malloc_mutex_postfork_parent(&arena
->bins
[i
].lock
);
2210 malloc_mutex_postfork_parent(&arena
->lock
);
2214 arena_postfork_child(arena_t
*arena
)
2218 for (i
= 0; i
< NBINS
; i
++)
2219 malloc_mutex_postfork_child(&arena
->bins
[i
].lock
);
2220 malloc_mutex_postfork_child(&arena
->lock
);