1 #define JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 size_t opt_lg_qspace_max
= LG_QSPACE_MAX_DEFAULT
;
8 size_t opt_lg_cspace_max
= LG_CSPACE_MAX_DEFAULT
;
9 ssize_t opt_lg_dirty_mult
= LG_DIRTY_MULT_DEFAULT
;
10 uint8_t const *small_size2bin
;
11 arena_bin_info_t
*arena_bin_info
;
13 /* Various bin-related settings. */
28 * const_small_size2bin is a static constant lookup table that in the common
29 * case can be used as-is for small_size2bin.
31 #if (LG_TINY_MIN == 2)
33 #define S2B_8(i) S2B_4(i) S2B_4(i)
34 #elif (LG_TINY_MIN == 3)
37 # error "Unsupported LG_TINY_MIN"
39 #define S2B_16(i) S2B_8(i) S2B_8(i)
40 #define S2B_32(i) S2B_16(i) S2B_16(i)
41 #define S2B_64(i) S2B_32(i) S2B_32(i)
42 #define S2B_128(i) S2B_64(i) S2B_64(i)
43 #define S2B_256(i) S2B_128(i) S2B_128(i)
45 * The number of elements in const_small_size2bin is dependent on the
46 * definition for SUBPAGE.
48 static JEMALLOC_ATTR(aligned(CACHELINE
))
49 const uint8_t const_small_size2bin
[] = {
51 /* 16-byte quantum **********************/
53 # if (LG_TINY_MIN == 2)
58 # elif (LG_TINY_MIN == 3)
63 # error "Unsupported LG_TINY_MIN"
69 S2B_16(S2B_QMIN
+ 1) /* 32 */
70 S2B_16(S2B_QMIN
+ 2) /* 48 */
71 S2B_16(S2B_QMIN
+ 3) /* 64 */
72 S2B_16(S2B_QMIN
+ 4) /* 80 */
73 S2B_16(S2B_QMIN
+ 5) /* 96 */
74 S2B_16(S2B_QMIN
+ 6) /* 112 */
75 S2B_16(S2B_QMIN
+ 7) /* 128 */
76 # define S2B_CMIN (S2B_QMIN + 8)
78 /* 8-byte quantum ***********************/
80 # if (LG_TINY_MIN == 2)
85 # error "Unsupported LG_TINY_MIN"
91 S2B_8(S2B_QMIN
+ 1) /* 16 */
92 S2B_8(S2B_QMIN
+ 2) /* 24 */
93 S2B_8(S2B_QMIN
+ 3) /* 32 */
94 S2B_8(S2B_QMIN
+ 4) /* 40 */
95 S2B_8(S2B_QMIN
+ 5) /* 48 */
96 S2B_8(S2B_QMIN
+ 6) /* 56 */
97 S2B_8(S2B_QMIN
+ 7) /* 64 */
98 S2B_8(S2B_QMIN
+ 8) /* 72 */
99 S2B_8(S2B_QMIN
+ 9) /* 80 */
100 S2B_8(S2B_QMIN
+ 10) /* 88 */
101 S2B_8(S2B_QMIN
+ 11) /* 96 */
102 S2B_8(S2B_QMIN
+ 12) /* 104 */
103 S2B_8(S2B_QMIN
+ 13) /* 112 */
104 S2B_8(S2B_QMIN
+ 14) /* 120 */
105 S2B_8(S2B_QMIN
+ 15) /* 128 */
106 # define S2B_CMIN (S2B_QMIN + 16)
108 /****************************************/
109 S2B_64(S2B_CMIN
+ 0) /* 192 */
110 S2B_64(S2B_CMIN
+ 1) /* 256 */
111 S2B_64(S2B_CMIN
+ 2) /* 320 */
112 S2B_64(S2B_CMIN
+ 3) /* 384 */
113 S2B_64(S2B_CMIN
+ 4) /* 448 */
114 S2B_64(S2B_CMIN
+ 5) /* 512 */
115 # define S2B_SMIN (S2B_CMIN + 6)
116 S2B_256(S2B_SMIN
+ 0) /* 768 */
117 S2B_256(S2B_SMIN
+ 1) /* 1024 */
118 S2B_256(S2B_SMIN
+ 2) /* 1280 */
119 S2B_256(S2B_SMIN
+ 3) /* 1536 */
120 S2B_256(S2B_SMIN
+ 4) /* 1792 */
121 S2B_256(S2B_SMIN
+ 5) /* 2048 */
122 S2B_256(S2B_SMIN
+ 6) /* 2304 */
123 S2B_256(S2B_SMIN
+ 7) /* 2560 */
124 S2B_256(S2B_SMIN
+ 8) /* 2816 */
125 S2B_256(S2B_SMIN
+ 9) /* 3072 */
126 S2B_256(S2B_SMIN
+ 10) /* 3328 */
127 S2B_256(S2B_SMIN
+ 11) /* 3584 */
128 S2B_256(S2B_SMIN
+ 12) /* 3840 */
129 #if (STATIC_PAGE_SHIFT == 13)
130 S2B_256(S2B_SMIN
+ 13) /* 4096 */
131 S2B_256(S2B_SMIN
+ 14) /* 4352 */
132 S2B_256(S2B_SMIN
+ 15) /* 4608 */
133 S2B_256(S2B_SMIN
+ 16) /* 4864 */
134 S2B_256(S2B_SMIN
+ 17) /* 5120 */
135 S2B_256(S2B_SMIN
+ 18) /* 5376 */
136 S2B_256(S2B_SMIN
+ 19) /* 5632 */
137 S2B_256(S2B_SMIN
+ 20) /* 5888 */
138 S2B_256(S2B_SMIN
+ 21) /* 6144 */
139 S2B_256(S2B_SMIN
+ 22) /* 6400 */
140 S2B_256(S2B_SMIN
+ 23) /* 6656 */
141 S2B_256(S2B_SMIN
+ 24) /* 6912 */
142 S2B_256(S2B_SMIN
+ 25) /* 7168 */
143 S2B_256(S2B_SMIN
+ 26) /* 7424 */
144 S2B_256(S2B_SMIN
+ 27) /* 7680 */
145 S2B_256(S2B_SMIN
+ 28) /* 7936 */
161 /******************************************************************************/
162 /* Function prototypes for non-inline static functions. */
164 static void arena_run_split(arena_t
*arena
, arena_run_t
*run
, size_t size
,
165 bool large
, bool zero
);
166 static arena_chunk_t
*arena_chunk_alloc(arena_t
*arena
);
167 static void arena_chunk_dealloc(arena_t
*arena
, arena_chunk_t
*chunk
);
168 static arena_run_t
*arena_run_alloc(arena_t
*arena
, size_t size
, bool large
,
170 static void arena_purge(arena_t
*arena
, bool all
);
171 static void arena_run_dalloc(arena_t
*arena
, arena_run_t
*run
, bool dirty
);
172 static void arena_run_trim_head(arena_t
*arena
, arena_chunk_t
*chunk
,
173 arena_run_t
*run
, size_t oldsize
, size_t newsize
);
174 static void arena_run_trim_tail(arena_t
*arena
, arena_chunk_t
*chunk
,
175 arena_run_t
*run
, size_t oldsize
, size_t newsize
, bool dirty
);
176 static arena_run_t
*arena_bin_nonfull_run_get(arena_t
*arena
, arena_bin_t
*bin
);
177 static void *arena_bin_malloc_hard(arena_t
*arena
, arena_bin_t
*bin
);
178 static void arena_dissociate_bin_run(arena_chunk_t
*chunk
, arena_run_t
*run
,
180 static void arena_dalloc_bin_run(arena_t
*arena
, arena_chunk_t
*chunk
,
181 arena_run_t
*run
, arena_bin_t
*bin
);
182 static void arena_bin_lower_run(arena_t
*arena
, arena_chunk_t
*chunk
,
183 arena_run_t
*run
, arena_bin_t
*bin
);
184 static void arena_ralloc_large_shrink(arena_t
*arena
, arena_chunk_t
*chunk
,
185 void *ptr
, size_t oldsize
, size_t size
);
186 static bool arena_ralloc_large_grow(arena_t
*arena
, arena_chunk_t
*chunk
,
187 void *ptr
, size_t oldsize
, size_t size
, size_t extra
, bool zero
);
188 static bool arena_ralloc_large(void *ptr
, size_t oldsize
, size_t size
,
189 size_t extra
, bool zero
);
190 static bool small_size2bin_init(void);
191 #ifdef JEMALLOC_DEBUG
192 static void small_size2bin_validate(void);
194 static bool small_size2bin_init_hard(void);
195 static size_t bin_info_run_size_calc(arena_bin_info_t
*bin_info
,
196 size_t min_run_size
);
197 static bool bin_info_init(void);
199 /******************************************************************************/
202 arena_run_comp(arena_chunk_map_t
*a
, arena_chunk_map_t
*b
)
204 uintptr_t a_mapelm
= (uintptr_t)a
;
205 uintptr_t b_mapelm
= (uintptr_t)b
;
210 return ((a_mapelm
> b_mapelm
) - (a_mapelm
< b_mapelm
));
213 /* Generate red-black tree functions. */
214 rb_gen(static JEMALLOC_ATTR(unused
), arena_run_tree_
, arena_run_tree_t
,
215 arena_chunk_map_t
, u
.rb_link
, arena_run_comp
)
218 arena_avail_comp(arena_chunk_map_t
*a
, arena_chunk_map_t
*b
)
221 size_t a_size
= a
->bits
& ~PAGE_MASK
;
222 size_t b_size
= b
->bits
& ~PAGE_MASK
;
224 assert((a
->bits
& CHUNK_MAP_KEY
) == CHUNK_MAP_KEY
|| (a
->bits
&
225 CHUNK_MAP_DIRTY
) == (b
->bits
& CHUNK_MAP_DIRTY
));
227 ret
= (a_size
> b_size
) - (a_size
< b_size
);
229 uintptr_t a_mapelm
, b_mapelm
;
231 if ((a
->bits
& CHUNK_MAP_KEY
) != CHUNK_MAP_KEY
)
232 a_mapelm
= (uintptr_t)a
;
235 * Treat keys as though they are lower than anything
240 b_mapelm
= (uintptr_t)b
;
242 ret
= (a_mapelm
> b_mapelm
) - (a_mapelm
< b_mapelm
);
248 /* Generate red-black tree functions. */
249 rb_gen(static JEMALLOC_ATTR(unused
), arena_avail_tree_
, arena_avail_tree_t
,
250 arena_chunk_map_t
, u
.rb_link
, arena_avail_comp
)
253 arena_run_reg_alloc(arena_run_t
*run
, arena_bin_info_t
*bin_info
)
257 bitmap_t
*bitmap
= (bitmap_t
*)((uintptr_t)run
+
258 (uintptr_t)bin_info
->bitmap_offset
);
260 dassert(run
->magic
== ARENA_RUN_MAGIC
);
261 assert(run
->nfree
> 0);
262 assert(bitmap_full(bitmap
, &bin_info
->bitmap_info
) == false);
264 regind
= bitmap_sfu(bitmap
, &bin_info
->bitmap_info
);
265 ret
= (void *)((uintptr_t)run
+ (uintptr_t)bin_info
->reg0_offset
+
266 (uintptr_t)(bin_info
->reg_size
* regind
));
268 if (regind
== run
->nextind
)
270 assert(regind
< run
->nextind
);
275 arena_run_reg_dalloc(arena_run_t
*run
, void *ptr
)
277 arena_chunk_t
*chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
278 size_t binind
= arena_bin_index(chunk
->arena
, run
->bin
);
279 arena_bin_info_t
*bin_info
= &arena_bin_info
[binind
];
280 unsigned regind
= arena_run_regind(run
, bin_info
, ptr
);
281 bitmap_t
*bitmap
= (bitmap_t
*)((uintptr_t)run
+
282 (uintptr_t)bin_info
->bitmap_offset
);
284 assert(run
->nfree
< bin_info
->nregs
);
285 /* Freeing an interior pointer can cause assertion failure. */
286 assert(((uintptr_t)ptr
- ((uintptr_t)run
+
287 (uintptr_t)bin_info
->reg0_offset
)) % (uintptr_t)bin_info
->reg_size
289 assert((uintptr_t)ptr
>= (uintptr_t)run
+
290 (uintptr_t)bin_info
->reg0_offset
);
291 /* Freeing an unallocated pointer can cause assertion failure. */
292 assert(bitmap_get(bitmap
, &bin_info
->bitmap_info
, regind
));
294 bitmap_unset(bitmap
, &bin_info
->bitmap_info
, regind
);
298 #ifdef JEMALLOC_DEBUG
300 arena_chunk_validate_zeroed(arena_chunk_t
*chunk
, size_t run_ind
)
303 size_t *p
= (size_t *)((uintptr_t)chunk
+ (run_ind
<< PAGE_SHIFT
));
305 for (i
= 0; i
< PAGE_SIZE
/ sizeof(size_t); i
++)
311 arena_run_split(arena_t
*arena
, arena_run_t
*run
, size_t size
, bool large
,
314 arena_chunk_t
*chunk
;
315 size_t old_ndirty
, run_ind
, total_pages
, need_pages
, rem_pages
, i
;
317 arena_avail_tree_t
*runs_avail
;
318 #ifdef JEMALLOC_STATS
322 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
323 old_ndirty
= chunk
->ndirty
;
324 run_ind
= (unsigned)(((uintptr_t)run
- (uintptr_t)chunk
)
326 flag_dirty
= chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_DIRTY
;
327 runs_avail
= (flag_dirty
!= 0) ? &arena
->runs_avail_dirty
:
328 &arena
->runs_avail_clean
;
329 total_pages
= (chunk
->map
[run_ind
-map_bias
].bits
& ~PAGE_MASK
) >>
331 assert((chunk
->map
[run_ind
+total_pages
-1-map_bias
].bits
&
332 CHUNK_MAP_DIRTY
) == flag_dirty
);
333 need_pages
= (size
>> PAGE_SHIFT
);
334 assert(need_pages
> 0);
335 assert(need_pages
<= total_pages
);
336 rem_pages
= total_pages
- need_pages
;
338 arena_avail_tree_remove(runs_avail
, &chunk
->map
[run_ind
-map_bias
]);
339 #ifdef JEMALLOC_STATS
340 /* Update stats_cactive if nactive is crossing a chunk multiple. */
341 cactive_diff
= CHUNK_CEILING((arena
->nactive
+ need_pages
) <<
342 PAGE_SHIFT
) - CHUNK_CEILING(arena
->nactive
<< PAGE_SHIFT
);
343 if (cactive_diff
!= 0)
344 stats_cactive_add(cactive_diff
);
346 arena
->nactive
+= need_pages
;
348 /* Keep track of trailing unused pages for later use. */
350 if (flag_dirty
!= 0) {
351 chunk
->map
[run_ind
+need_pages
-map_bias
].bits
=
352 (rem_pages
<< PAGE_SHIFT
) | CHUNK_MAP_DIRTY
;
353 chunk
->map
[run_ind
+total_pages
-1-map_bias
].bits
=
354 (rem_pages
<< PAGE_SHIFT
) | CHUNK_MAP_DIRTY
;
356 chunk
->map
[run_ind
+need_pages
-map_bias
].bits
=
357 (rem_pages
<< PAGE_SHIFT
) |
358 (chunk
->map
[run_ind
+need_pages
-map_bias
].bits
&
360 chunk
->map
[run_ind
+total_pages
-1-map_bias
].bits
=
361 (rem_pages
<< PAGE_SHIFT
) |
362 (chunk
->map
[run_ind
+total_pages
-1-map_bias
].bits
&
365 arena_avail_tree_insert(runs_avail
,
366 &chunk
->map
[run_ind
+need_pages
-map_bias
]);
369 /* Update dirty page accounting. */
370 if (flag_dirty
!= 0) {
371 chunk
->ndirty
-= need_pages
;
372 arena
->ndirty
-= need_pages
;
376 * Update the page map separately for large vs. small runs, since it is
377 * possible to avoid iteration for large mallocs.
381 if (flag_dirty
== 0) {
383 * The run is clean, so some pages may be
384 * zeroed (i.e. never before touched).
386 for (i
= 0; i
< need_pages
; i
++) {
387 if ((chunk
->map
[run_ind
+i
-map_bias
].bits
388 & CHUNK_MAP_UNZEROED
) != 0) {
389 memset((void *)((uintptr_t)
390 chunk
+ ((run_ind
+i
) <<
394 #ifdef JEMALLOC_DEBUG
396 arena_chunk_validate_zeroed(
403 * The run is dirty, so all pages must be
406 memset((void *)((uintptr_t)chunk
+ (run_ind
<<
407 PAGE_SHIFT
)), 0, (need_pages
<<
413 * Set the last element first, in case the run only contains one
414 * page (i.e. both statements set the same element).
416 chunk
->map
[run_ind
+need_pages
-1-map_bias
].bits
=
417 CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
| flag_dirty
;
418 chunk
->map
[run_ind
-map_bias
].bits
= size
| flag_dirty
|
419 CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
421 assert(zero
== false);
423 * Propagate the dirty and unzeroed flags to the allocated
424 * small run, so that arena_dalloc_bin_run() has the ability to
425 * conditionally trim clean pages.
427 chunk
->map
[run_ind
-map_bias
].bits
=
428 (chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_UNZEROED
) |
429 CHUNK_MAP_ALLOCATED
| flag_dirty
;
430 #ifdef JEMALLOC_DEBUG
432 * The first page will always be dirtied during small run
433 * initialization, so a validation failure here would not
434 * actually cause an observable failure.
436 if (flag_dirty
== 0 &&
437 (chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_UNZEROED
)
439 arena_chunk_validate_zeroed(chunk
, run_ind
);
441 for (i
= 1; i
< need_pages
- 1; i
++) {
442 chunk
->map
[run_ind
+i
-map_bias
].bits
= (i
<< PAGE_SHIFT
)
443 | (chunk
->map
[run_ind
+i
-map_bias
].bits
&
444 CHUNK_MAP_UNZEROED
) | CHUNK_MAP_ALLOCATED
;
445 #ifdef JEMALLOC_DEBUG
446 if (flag_dirty
== 0 &&
447 (chunk
->map
[run_ind
+i
-map_bias
].bits
&
448 CHUNK_MAP_UNZEROED
) == 0)
449 arena_chunk_validate_zeroed(chunk
, run_ind
+i
);
452 chunk
->map
[run_ind
+need_pages
-1-map_bias
].bits
= ((need_pages
453 - 1) << PAGE_SHIFT
) |
454 (chunk
->map
[run_ind
+need_pages
-1-map_bias
].bits
&
455 CHUNK_MAP_UNZEROED
) | CHUNK_MAP_ALLOCATED
| flag_dirty
;
456 #ifdef JEMALLOC_DEBUG
457 if (flag_dirty
== 0 &&
458 (chunk
->map
[run_ind
+need_pages
-1-map_bias
].bits
&
459 CHUNK_MAP_UNZEROED
) == 0) {
460 arena_chunk_validate_zeroed(chunk
,
461 run_ind
+need_pages
-1);
467 static arena_chunk_t
*
468 arena_chunk_alloc(arena_t
*arena
)
470 arena_chunk_t
*chunk
;
473 if (arena
->spare
!= NULL
) {
474 arena_avail_tree_t
*runs_avail
;
476 chunk
= arena
->spare
;
479 /* Insert the run into the appropriate runs_avail_* tree. */
480 if ((chunk
->map
[0].bits
& CHUNK_MAP_DIRTY
) == 0)
481 runs_avail
= &arena
->runs_avail_clean
;
483 runs_avail
= &arena
->runs_avail_dirty
;
484 assert((chunk
->map
[0].bits
& ~PAGE_MASK
) == arena_maxclass
);
485 assert((chunk
->map
[chunk_npages
-1-map_bias
].bits
& ~PAGE_MASK
)
487 assert((chunk
->map
[0].bits
& CHUNK_MAP_DIRTY
) ==
488 (chunk
->map
[chunk_npages
-1-map_bias
].bits
&
490 arena_avail_tree_insert(runs_avail
, &chunk
->map
[0]);
496 malloc_mutex_unlock(&arena
->lock
);
497 chunk
= (arena_chunk_t
*)chunk_alloc(chunksize
, false, &zero
);
498 malloc_mutex_lock(&arena
->lock
);
501 #ifdef JEMALLOC_STATS
502 arena
->stats
.mapped
+= chunksize
;
505 chunk
->arena
= arena
;
506 ql_elm_new(chunk
, link_dirty
);
507 chunk
->dirtied
= false;
510 * Claim that no pages are in use, since the header is merely
516 * Initialize the map to contain one maximal free untouched run.
517 * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
520 unzeroed
= zero
? 0 : CHUNK_MAP_UNZEROED
;
521 chunk
->map
[0].bits
= arena_maxclass
| unzeroed
;
523 * There is no need to initialize the internal page map entries
524 * unless the chunk is not zeroed.
527 for (i
= map_bias
+1; i
< chunk_npages
-1; i
++)
528 chunk
->map
[i
-map_bias
].bits
= unzeroed
;
530 #ifdef JEMALLOC_DEBUG
532 for (i
= map_bias
+1; i
< chunk_npages
-1; i
++)
533 assert(chunk
->map
[i
-map_bias
].bits
== unzeroed
);
536 chunk
->map
[chunk_npages
-1-map_bias
].bits
= arena_maxclass
|
539 /* Insert the run into the runs_avail_clean tree. */
540 arena_avail_tree_insert(&arena
->runs_avail_clean
,
548 arena_chunk_dealloc(arena_t
*arena
, arena_chunk_t
*chunk
)
550 arena_avail_tree_t
*runs_avail
;
553 * Remove run from the appropriate runs_avail_* tree, so that the arena
556 if ((chunk
->map
[0].bits
& CHUNK_MAP_DIRTY
) == 0)
557 runs_avail
= &arena
->runs_avail_clean
;
559 runs_avail
= &arena
->runs_avail_dirty
;
560 arena_avail_tree_remove(runs_avail
, &chunk
->map
[0]);
562 if (arena
->spare
!= NULL
) {
563 arena_chunk_t
*spare
= arena
->spare
;
565 arena
->spare
= chunk
;
566 if (spare
->dirtied
) {
567 ql_remove(&chunk
->arena
->chunks_dirty
, spare
,
569 arena
->ndirty
-= spare
->ndirty
;
571 malloc_mutex_unlock(&arena
->lock
);
572 chunk_dealloc((void *)spare
, chunksize
);
573 malloc_mutex_lock(&arena
->lock
);
574 #ifdef JEMALLOC_STATS
575 arena
->stats
.mapped
-= chunksize
;
578 arena
->spare
= chunk
;
582 arena_run_alloc(arena_t
*arena
, size_t size
, bool large
, bool zero
)
584 arena_chunk_t
*chunk
;
586 arena_chunk_map_t
*mapelm
, key
;
588 assert(size
<= arena_maxclass
);
589 assert((size
& PAGE_MASK
) == 0);
591 /* Search the arena's chunks for the lowest best fit. */
592 key
.bits
= size
| CHUNK_MAP_KEY
;
593 mapelm
= arena_avail_tree_nsearch(&arena
->runs_avail_dirty
, &key
);
594 if (mapelm
!= NULL
) {
595 arena_chunk_t
*run_chunk
= CHUNK_ADDR2BASE(mapelm
);
596 size_t pageind
= (((uintptr_t)mapelm
-
597 (uintptr_t)run_chunk
->map
) / sizeof(arena_chunk_map_t
))
600 run
= (arena_run_t
*)((uintptr_t)run_chunk
+ (pageind
<<
602 arena_run_split(arena
, run
, size
, large
, zero
);
605 mapelm
= arena_avail_tree_nsearch(&arena
->runs_avail_clean
, &key
);
606 if (mapelm
!= NULL
) {
607 arena_chunk_t
*run_chunk
= CHUNK_ADDR2BASE(mapelm
);
608 size_t pageind
= (((uintptr_t)mapelm
-
609 (uintptr_t)run_chunk
->map
) / sizeof(arena_chunk_map_t
))
612 run
= (arena_run_t
*)((uintptr_t)run_chunk
+ (pageind
<<
614 arena_run_split(arena
, run
, size
, large
, zero
);
619 * No usable runs. Create a new chunk from which to allocate the run.
621 chunk
= arena_chunk_alloc(arena
);
623 run
= (arena_run_t
*)((uintptr_t)chunk
+ (map_bias
<<
625 arena_run_split(arena
, run
, size
, large
, zero
);
630 * arena_chunk_alloc() failed, but another thread may have made
631 * sufficient memory available while this one dropped arena->lock in
632 * arena_chunk_alloc(), so search one more time.
634 mapelm
= arena_avail_tree_nsearch(&arena
->runs_avail_dirty
, &key
);
635 if (mapelm
!= NULL
) {
636 arena_chunk_t
*run_chunk
= CHUNK_ADDR2BASE(mapelm
);
637 size_t pageind
= (((uintptr_t)mapelm
-
638 (uintptr_t)run_chunk
->map
) / sizeof(arena_chunk_map_t
))
641 run
= (arena_run_t
*)((uintptr_t)run_chunk
+ (pageind
<<
643 arena_run_split(arena
, run
, size
, large
, zero
);
646 mapelm
= arena_avail_tree_nsearch(&arena
->runs_avail_clean
, &key
);
647 if (mapelm
!= NULL
) {
648 arena_chunk_t
*run_chunk
= CHUNK_ADDR2BASE(mapelm
);
649 size_t pageind
= (((uintptr_t)mapelm
-
650 (uintptr_t)run_chunk
->map
) / sizeof(arena_chunk_map_t
))
653 run
= (arena_run_t
*)((uintptr_t)run_chunk
+ (pageind
<<
655 arena_run_split(arena
, run
, size
, large
, zero
);
663 arena_maybe_purge(arena_t
*arena
)
666 /* Enforce opt_lg_dirty_mult. */
667 if (opt_lg_dirty_mult
>= 0 && arena
->ndirty
> arena
->npurgatory
&&
668 (arena
->ndirty
- arena
->npurgatory
) > chunk_npages
&&
669 (arena
->nactive
>> opt_lg_dirty_mult
) < (arena
->ndirty
-
671 arena_purge(arena
, false);
675 arena_chunk_purge(arena_t
*arena
, arena_chunk_t
*chunk
)
677 ql_head(arena_chunk_map_t
) mapelms
;
678 arena_chunk_map_t
*mapelm
;
679 size_t pageind
, flag_unzeroed
;
680 #ifdef JEMALLOC_DEBUG
683 #ifdef JEMALLOC_STATS
690 #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
692 * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
693 * mappings, but not for file-backed mappings.
695 # ifdef JEMALLOC_SWAP
696 swap_enabled
? CHUNK_MAP_UNZEROED
:
704 * If chunk is the spare, temporarily re-allocate it, 1) so that its
705 * run is reinserted into runs_avail_dirty, and 2) so that it cannot be
706 * completely discarded by another thread while arena->lock is dropped
707 * by this thread. Note that the arena_run_dalloc() call will
708 * implicitly deallocate the chunk, so no explicit action is required
709 * in this function to deallocate the chunk.
711 * Note that once a chunk contains dirty pages, it cannot again contain
712 * a single run unless 1) it is a dirty run, or 2) this function purges
713 * dirty pages and causes the transition to a single clean run. Thus
714 * (chunk == arena->spare) is possible, but it is not possible for
715 * this function to be called on the spare unless it contains a dirty
718 if (chunk
== arena
->spare
) {
719 assert((chunk
->map
[0].bits
& CHUNK_MAP_DIRTY
) != 0);
720 arena_chunk_alloc(arena
);
723 /* Temporarily allocate all free dirty runs within chunk. */
724 for (pageind
= map_bias
; pageind
< chunk_npages
;) {
725 mapelm
= &chunk
->map
[pageind
-map_bias
];
726 if ((mapelm
->bits
& CHUNK_MAP_ALLOCATED
) == 0) {
729 npages
= mapelm
->bits
>> PAGE_SHIFT
;
730 assert(pageind
+ npages
<= chunk_npages
);
731 if (mapelm
->bits
& CHUNK_MAP_DIRTY
) {
733 #ifdef JEMALLOC_STATS
737 arena_avail_tree_remove(
738 &arena
->runs_avail_dirty
, mapelm
);
740 mapelm
->bits
= (npages
<< PAGE_SHIFT
) |
741 flag_unzeroed
| CHUNK_MAP_LARGE
|
744 * Update internal elements in the page map, so
745 * that CHUNK_MAP_UNZEROED is properly set.
747 for (i
= 1; i
< npages
- 1; i
++) {
748 chunk
->map
[pageind
+i
-map_bias
].bits
=
753 pageind
+npages
-1-map_bias
].bits
=
754 flag_unzeroed
| CHUNK_MAP_LARGE
|
758 #ifdef JEMALLOC_STATS
760 * Update stats_cactive if nactive is crossing a
763 cactive_diff
= CHUNK_CEILING((arena
->nactive
+
764 npages
) << PAGE_SHIFT
) -
765 CHUNK_CEILING(arena
->nactive
<< PAGE_SHIFT
);
766 if (cactive_diff
!= 0)
767 stats_cactive_add(cactive_diff
);
769 arena
->nactive
+= npages
;
770 /* Append to list for later processing. */
771 ql_elm_new(mapelm
, u
.ql_link
);
772 ql_tail_insert(&mapelms
, mapelm
, u
.ql_link
);
777 /* Skip allocated run. */
778 if (mapelm
->bits
& CHUNK_MAP_LARGE
)
779 pageind
+= mapelm
->bits
>> PAGE_SHIFT
;
781 arena_run_t
*run
= (arena_run_t
*)((uintptr_t)
782 chunk
+ (uintptr_t)(pageind
<< PAGE_SHIFT
));
784 assert((mapelm
->bits
>> PAGE_SHIFT
) == 0);
785 dassert(run
->magic
== ARENA_RUN_MAGIC
);
786 size_t binind
= arena_bin_index(arena
,
788 arena_bin_info_t
*bin_info
=
789 &arena_bin_info
[binind
];
790 pageind
+= bin_info
->run_size
>> PAGE_SHIFT
;
794 assert(pageind
== chunk_npages
);
796 #ifdef JEMALLOC_DEBUG
797 ndirty
= chunk
->ndirty
;
799 #ifdef JEMALLOC_STATS
800 arena
->stats
.purged
+= chunk
->ndirty
;
802 arena
->ndirty
-= chunk
->ndirty
;
804 ql_remove(&arena
->chunks_dirty
, chunk
, link_dirty
);
805 chunk
->dirtied
= false;
807 malloc_mutex_unlock(&arena
->lock
);
808 #ifdef JEMALLOC_STATS
811 ql_foreach(mapelm
, &mapelms
, u
.ql_link
) {
812 size_t pageind
= (((uintptr_t)mapelm
- (uintptr_t)chunk
->map
) /
813 sizeof(arena_chunk_map_t
)) + map_bias
;
814 size_t npages
= mapelm
->bits
>> PAGE_SHIFT
;
816 assert(pageind
+ npages
<= chunk_npages
);
817 #ifdef JEMALLOC_DEBUG
818 assert(ndirty
>= npages
);
822 #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
823 madvise((void *)((uintptr_t)chunk
+ (pageind
<< PAGE_SHIFT
)),
824 (npages
<< PAGE_SHIFT
), MADV_DONTNEED
);
825 #elif defined(JEMALLOC_PURGE_MADVISE_FREE)
826 madvise((void *)((uintptr_t)chunk
+ (pageind
<< PAGE_SHIFT
)),
827 (npages
<< PAGE_SHIFT
), MADV_FREE
);
829 # error "No method defined for purging unused dirty pages."
832 #ifdef JEMALLOC_STATS
836 #ifdef JEMALLOC_DEBUG
839 malloc_mutex_lock(&arena
->lock
);
840 #ifdef JEMALLOC_STATS
841 arena
->stats
.nmadvise
+= nmadvise
;
844 /* Deallocate runs. */
845 for (mapelm
= ql_first(&mapelms
); mapelm
!= NULL
;
846 mapelm
= ql_first(&mapelms
)) {
847 size_t pageind
= (((uintptr_t)mapelm
- (uintptr_t)chunk
->map
) /
848 sizeof(arena_chunk_map_t
)) + map_bias
;
849 arena_run_t
*run
= (arena_run_t
*)((uintptr_t)chunk
+
850 (uintptr_t)(pageind
<< PAGE_SHIFT
));
852 ql_remove(&mapelms
, mapelm
, u
.ql_link
);
853 arena_run_dalloc(arena
, run
, false);
858 arena_purge(arena_t
*arena
, bool all
)
860 arena_chunk_t
*chunk
;
862 #ifdef JEMALLOC_DEBUG
865 ql_foreach(chunk
, &arena
->chunks_dirty
, link_dirty
) {
866 assert(chunk
->dirtied
);
867 ndirty
+= chunk
->ndirty
;
869 assert(ndirty
== arena
->ndirty
);
871 assert(arena
->ndirty
> arena
->npurgatory
|| all
);
872 assert(arena
->ndirty
> chunk_npages
|| all
);
873 assert((arena
->nactive
>> opt_lg_dirty_mult
) < (arena
->ndirty
-
876 #ifdef JEMALLOC_STATS
877 arena
->stats
.npurge
++;
881 * Compute the minimum number of pages that this thread should try to
882 * purge, and add the result to arena->npurgatory. This will keep
883 * multiple threads from racing to reduce ndirty below the threshold.
885 npurgatory
= arena
->ndirty
- arena
->npurgatory
;
887 assert(npurgatory
>= arena
->nactive
>> opt_lg_dirty_mult
);
888 npurgatory
-= arena
->nactive
>> opt_lg_dirty_mult
;
890 arena
->npurgatory
+= npurgatory
;
892 while (npurgatory
> 0) {
893 /* Get next chunk with dirty pages. */
894 chunk
= ql_first(&arena
->chunks_dirty
);
897 * This thread was unable to purge as many pages as
898 * originally intended, due to races with other threads
899 * that either did some of the purging work, or re-used
902 arena
->npurgatory
-= npurgatory
;
905 while (chunk
->ndirty
== 0) {
906 ql_remove(&arena
->chunks_dirty
, chunk
, link_dirty
);
907 chunk
->dirtied
= false;
908 chunk
= ql_first(&arena
->chunks_dirty
);
910 /* Same logic as for above. */
911 arena
->npurgatory
-= npurgatory
;
916 if (chunk
->ndirty
> npurgatory
) {
918 * This thread will, at a minimum, purge all the dirty
919 * pages in chunk, so set npurgatory to reflect this
920 * thread's commitment to purge the pages. This tends
921 * to reduce the chances of the following scenario:
923 * 1) This thread sets arena->npurgatory such that
924 * (arena->ndirty - arena->npurgatory) is at the
926 * 2) This thread drops arena->lock.
927 * 3) Another thread causes one or more pages to be
928 * dirtied, and immediately determines that it must
931 * If this scenario *does* play out, that's okay,
932 * because all of the purging work being done really
935 arena
->npurgatory
+= chunk
->ndirty
- npurgatory
;
936 npurgatory
= chunk
->ndirty
;
939 arena
->npurgatory
-= chunk
->ndirty
;
940 npurgatory
-= chunk
->ndirty
;
941 arena_chunk_purge(arena
, chunk
);
946 arena_purge_all(arena_t
*arena
)
949 malloc_mutex_lock(&arena
->lock
);
950 arena_purge(arena
, true);
951 malloc_mutex_unlock(&arena
->lock
);
955 arena_run_dalloc(arena_t
*arena
, arena_run_t
*run
, bool dirty
)
957 arena_chunk_t
*chunk
;
958 size_t size
, run_ind
, run_pages
, flag_dirty
;
959 arena_avail_tree_t
*runs_avail
;
960 #ifdef JEMALLOC_STATS
964 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
965 run_ind
= (size_t)(((uintptr_t)run
- (uintptr_t)chunk
)
967 assert(run_ind
>= map_bias
);
968 assert(run_ind
< chunk_npages
);
969 if ((chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_LARGE
) != 0) {
970 size
= chunk
->map
[run_ind
-map_bias
].bits
& ~PAGE_MASK
;
971 assert(size
== PAGE_SIZE
||
972 (chunk
->map
[run_ind
+(size
>>PAGE_SHIFT
)-1-map_bias
].bits
&
974 assert((chunk
->map
[run_ind
+(size
>>PAGE_SHIFT
)-1-map_bias
].bits
&
975 CHUNK_MAP_LARGE
) != 0);
976 assert((chunk
->map
[run_ind
+(size
>>PAGE_SHIFT
)-1-map_bias
].bits
&
977 CHUNK_MAP_ALLOCATED
) != 0);
979 size_t binind
= arena_bin_index(arena
, run
->bin
);
980 arena_bin_info_t
*bin_info
= &arena_bin_info
[binind
];
981 size
= bin_info
->run_size
;
983 run_pages
= (size
>> PAGE_SHIFT
);
984 #ifdef JEMALLOC_STATS
985 /* Update stats_cactive if nactive is crossing a chunk multiple. */
986 cactive_diff
= CHUNK_CEILING(arena
->nactive
<< PAGE_SHIFT
) -
987 CHUNK_CEILING((arena
->nactive
- run_pages
) << PAGE_SHIFT
);
988 if (cactive_diff
!= 0)
989 stats_cactive_sub(cactive_diff
);
991 arena
->nactive
-= run_pages
;
994 * The run is dirty if the caller claims to have dirtied it, as well as
995 * if it was already dirty before being allocated.
997 if ((chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_DIRTY
) != 0)
999 flag_dirty
= dirty
? CHUNK_MAP_DIRTY
: 0;
1000 runs_avail
= dirty
? &arena
->runs_avail_dirty
:
1001 &arena
->runs_avail_clean
;
1003 /* Mark pages as unallocated in the chunk map. */
1005 chunk
->map
[run_ind
-map_bias
].bits
= size
| CHUNK_MAP_DIRTY
;
1006 chunk
->map
[run_ind
+run_pages
-1-map_bias
].bits
= size
|
1009 chunk
->ndirty
+= run_pages
;
1010 arena
->ndirty
+= run_pages
;
1012 chunk
->map
[run_ind
-map_bias
].bits
= size
|
1013 (chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_UNZEROED
);
1014 chunk
->map
[run_ind
+run_pages
-1-map_bias
].bits
= size
|
1015 (chunk
->map
[run_ind
+run_pages
-1-map_bias
].bits
&
1016 CHUNK_MAP_UNZEROED
);
1019 /* Try to coalesce forward. */
1020 if (run_ind
+ run_pages
< chunk_npages
&&
1021 (chunk
->map
[run_ind
+run_pages
-map_bias
].bits
& CHUNK_MAP_ALLOCATED
)
1022 == 0 && (chunk
->map
[run_ind
+run_pages
-map_bias
].bits
&
1023 CHUNK_MAP_DIRTY
) == flag_dirty
) {
1024 size_t nrun_size
= chunk
->map
[run_ind
+run_pages
-map_bias
].bits
&
1026 size_t nrun_pages
= nrun_size
>> PAGE_SHIFT
;
1029 * Remove successor from runs_avail; the coalesced run is
1032 assert((chunk
->map
[run_ind
+run_pages
+nrun_pages
-1-map_bias
].bits
1033 & ~PAGE_MASK
) == nrun_size
);
1034 assert((chunk
->map
[run_ind
+run_pages
+nrun_pages
-1-map_bias
].bits
1035 & CHUNK_MAP_ALLOCATED
) == 0);
1036 assert((chunk
->map
[run_ind
+run_pages
+nrun_pages
-1-map_bias
].bits
1037 & CHUNK_MAP_DIRTY
) == flag_dirty
);
1038 arena_avail_tree_remove(runs_avail
,
1039 &chunk
->map
[run_ind
+run_pages
-map_bias
]);
1042 run_pages
+= nrun_pages
;
1044 chunk
->map
[run_ind
-map_bias
].bits
= size
|
1045 (chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_FLAGS_MASK
);
1046 chunk
->map
[run_ind
+run_pages
-1-map_bias
].bits
= size
|
1047 (chunk
->map
[run_ind
+run_pages
-1-map_bias
].bits
&
1048 CHUNK_MAP_FLAGS_MASK
);
1051 /* Try to coalesce backward. */
1052 if (run_ind
> map_bias
&& (chunk
->map
[run_ind
-1-map_bias
].bits
&
1053 CHUNK_MAP_ALLOCATED
) == 0 && (chunk
->map
[run_ind
-1-map_bias
].bits
&
1054 CHUNK_MAP_DIRTY
) == flag_dirty
) {
1055 size_t prun_size
= chunk
->map
[run_ind
-1-map_bias
].bits
&
1057 size_t prun_pages
= prun_size
>> PAGE_SHIFT
;
1059 run_ind
-= prun_pages
;
1062 * Remove predecessor from runs_avail; the coalesced run is
1065 assert((chunk
->map
[run_ind
-map_bias
].bits
& ~PAGE_MASK
)
1067 assert((chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_ALLOCATED
)
1069 assert((chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_DIRTY
)
1071 arena_avail_tree_remove(runs_avail
,
1072 &chunk
->map
[run_ind
-map_bias
]);
1075 run_pages
+= prun_pages
;
1077 chunk
->map
[run_ind
-map_bias
].bits
= size
|
1078 (chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_FLAGS_MASK
);
1079 chunk
->map
[run_ind
+run_pages
-1-map_bias
].bits
= size
|
1080 (chunk
->map
[run_ind
+run_pages
-1-map_bias
].bits
&
1081 CHUNK_MAP_FLAGS_MASK
);
1084 /* Insert into runs_avail, now that coalescing is complete. */
1085 assert((chunk
->map
[run_ind
-map_bias
].bits
& ~PAGE_MASK
) ==
1086 (chunk
->map
[run_ind
+run_pages
-1-map_bias
].bits
& ~PAGE_MASK
));
1087 assert((chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_DIRTY
) ==
1088 (chunk
->map
[run_ind
+run_pages
-1-map_bias
].bits
& CHUNK_MAP_DIRTY
));
1089 arena_avail_tree_insert(runs_avail
, &chunk
->map
[run_ind
-map_bias
]);
1093 * Insert into chunks_dirty before potentially calling
1094 * arena_chunk_dealloc(), so that chunks_dirty and
1095 * arena->ndirty are consistent.
1097 if (chunk
->dirtied
== false) {
1098 ql_tail_insert(&arena
->chunks_dirty
, chunk
, link_dirty
);
1099 chunk
->dirtied
= true;
1104 * Deallocate chunk if it is now completely unused. The bit
1105 * manipulation checks whether the first run is unallocated and extends
1106 * to the end of the chunk.
1108 if ((chunk
->map
[0].bits
& (~PAGE_MASK
| CHUNK_MAP_ALLOCATED
)) ==
1110 arena_chunk_dealloc(arena
, chunk
);
1113 * It is okay to do dirty page processing here even if the chunk was
1114 * deallocated above, since in that case it is the spare. Waiting
1115 * until after possible chunk deallocation to do dirty processing
1116 * allows for an old spare to be fully deallocated, thus decreasing the
1117 * chances of spuriously crossing the dirty page purging threshold.
1120 arena_maybe_purge(arena
);
1124 arena_run_trim_head(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
1125 size_t oldsize
, size_t newsize
)
1127 size_t pageind
= ((uintptr_t)run
- (uintptr_t)chunk
) >> PAGE_SHIFT
;
1128 size_t head_npages
= (oldsize
- newsize
) >> PAGE_SHIFT
;
1129 size_t flag_dirty
= chunk
->map
[pageind
-map_bias
].bits
& CHUNK_MAP_DIRTY
;
1131 assert(oldsize
> newsize
);
1134 * Update the chunk map so that arena_run_dalloc() can treat the
1135 * leading run as separately allocated. Set the last element of each
1136 * run first, in case of single-page runs.
1138 assert((chunk
->map
[pageind
-map_bias
].bits
& CHUNK_MAP_LARGE
) != 0);
1139 assert((chunk
->map
[pageind
-map_bias
].bits
& CHUNK_MAP_ALLOCATED
) != 0);
1140 chunk
->map
[pageind
+head_npages
-1-map_bias
].bits
= flag_dirty
|
1141 (chunk
->map
[pageind
+head_npages
-1-map_bias
].bits
&
1142 CHUNK_MAP_UNZEROED
) | CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
1143 chunk
->map
[pageind
-map_bias
].bits
= (oldsize
- newsize
)
1144 | flag_dirty
| (chunk
->map
[pageind
-map_bias
].bits
&
1145 CHUNK_MAP_UNZEROED
) | CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
1147 #ifdef JEMALLOC_DEBUG
1149 size_t tail_npages
= newsize
>> PAGE_SHIFT
;
1150 assert((chunk
->map
[pageind
+head_npages
+tail_npages
-1-map_bias
]
1151 .bits
& ~PAGE_MASK
) == 0);
1152 assert((chunk
->map
[pageind
+head_npages
+tail_npages
-1-map_bias
]
1153 .bits
& CHUNK_MAP_DIRTY
) == flag_dirty
);
1154 assert((chunk
->map
[pageind
+head_npages
+tail_npages
-1-map_bias
]
1155 .bits
& CHUNK_MAP_LARGE
) != 0);
1156 assert((chunk
->map
[pageind
+head_npages
+tail_npages
-1-map_bias
]
1157 .bits
& CHUNK_MAP_ALLOCATED
) != 0);
1160 chunk
->map
[pageind
+head_npages
-map_bias
].bits
= newsize
| flag_dirty
|
1161 (chunk
->map
[pageind
+head_npages
-map_bias
].bits
&
1162 CHUNK_MAP_FLAGS_MASK
) | CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
1164 arena_run_dalloc(arena
, run
, false);
1168 arena_run_trim_tail(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
1169 size_t oldsize
, size_t newsize
, bool dirty
)
1171 size_t pageind
= ((uintptr_t)run
- (uintptr_t)chunk
) >> PAGE_SHIFT
;
1172 size_t head_npages
= newsize
>> PAGE_SHIFT
;
1173 size_t tail_npages
= (oldsize
- newsize
) >> PAGE_SHIFT
;
1174 size_t flag_dirty
= chunk
->map
[pageind
-map_bias
].bits
&
1177 assert(oldsize
> newsize
);
1180 * Update the chunk map so that arena_run_dalloc() can treat the
1181 * trailing run as separately allocated. Set the last element of each
1182 * run first, in case of single-page runs.
1184 assert((chunk
->map
[pageind
-map_bias
].bits
& CHUNK_MAP_LARGE
) != 0);
1185 assert((chunk
->map
[pageind
-map_bias
].bits
& CHUNK_MAP_ALLOCATED
) != 0);
1186 chunk
->map
[pageind
+head_npages
-1-map_bias
].bits
= flag_dirty
|
1187 (chunk
->map
[pageind
+head_npages
-1-map_bias
].bits
&
1188 CHUNK_MAP_UNZEROED
) | CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
1189 chunk
->map
[pageind
-map_bias
].bits
= newsize
| flag_dirty
|
1190 (chunk
->map
[pageind
-map_bias
].bits
& CHUNK_MAP_UNZEROED
) |
1191 CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
1193 assert((chunk
->map
[pageind
+head_npages
+tail_npages
-1-map_bias
].bits
&
1195 assert((chunk
->map
[pageind
+head_npages
+tail_npages
-1-map_bias
].bits
&
1196 CHUNK_MAP_LARGE
) != 0);
1197 assert((chunk
->map
[pageind
+head_npages
+tail_npages
-1-map_bias
].bits
&
1198 CHUNK_MAP_ALLOCATED
) != 0);
1199 chunk
->map
[pageind
+head_npages
+tail_npages
-1-map_bias
].bits
=
1201 (chunk
->map
[pageind
+head_npages
+tail_npages
-1-map_bias
].bits
&
1202 CHUNK_MAP_UNZEROED
) | CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
1203 chunk
->map
[pageind
+head_npages
-map_bias
].bits
= (oldsize
- newsize
) |
1204 flag_dirty
| (chunk
->map
[pageind
+head_npages
-map_bias
].bits
&
1205 CHUNK_MAP_UNZEROED
) | CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
1207 arena_run_dalloc(arena
, (arena_run_t
*)((uintptr_t)run
+ newsize
),
1211 static arena_run_t
*
1212 arena_bin_nonfull_run_get(arena_t
*arena
, arena_bin_t
*bin
)
1214 arena_chunk_map_t
*mapelm
;
1217 arena_bin_info_t
*bin_info
;
1219 /* Look for a usable run. */
1220 mapelm
= arena_run_tree_first(&bin
->runs
);
1221 if (mapelm
!= NULL
) {
1222 arena_chunk_t
*chunk
;
1225 /* run is guaranteed to have available space. */
1226 arena_run_tree_remove(&bin
->runs
, mapelm
);
1228 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(mapelm
);
1229 pageind
= ((((uintptr_t)mapelm
- (uintptr_t)chunk
->map
) /
1230 sizeof(arena_chunk_map_t
))) + map_bias
;
1231 run
= (arena_run_t
*)((uintptr_t)chunk
+ (uintptr_t)((pageind
-
1232 (mapelm
->bits
>> PAGE_SHIFT
))
1234 #ifdef JEMALLOC_STATS
1235 bin
->stats
.reruns
++;
1239 /* No existing runs have any space available. */
1241 binind
= arena_bin_index(arena
, bin
);
1242 bin_info
= &arena_bin_info
[binind
];
1244 /* Allocate a new run. */
1245 malloc_mutex_unlock(&bin
->lock
);
1246 /******************************/
1247 malloc_mutex_lock(&arena
->lock
);
1248 run
= arena_run_alloc(arena
, bin_info
->run_size
, false, false);
1250 bitmap_t
*bitmap
= (bitmap_t
*)((uintptr_t)run
+
1251 (uintptr_t)bin_info
->bitmap_offset
);
1253 /* Initialize run internals. */
1256 run
->nfree
= bin_info
->nregs
;
1257 bitmap_init(bitmap
, &bin_info
->bitmap_info
);
1258 #ifdef JEMALLOC_DEBUG
1259 run
->magic
= ARENA_RUN_MAGIC
;
1262 malloc_mutex_unlock(&arena
->lock
);
1263 /********************************/
1264 malloc_mutex_lock(&bin
->lock
);
1266 #ifdef JEMALLOC_STATS
1268 bin
->stats
.curruns
++;
1269 if (bin
->stats
.curruns
> bin
->stats
.highruns
)
1270 bin
->stats
.highruns
= bin
->stats
.curruns
;
1276 * arena_run_alloc() failed, but another thread may have made
1277 * sufficient memory available while this one dropped bin->lock above,
1278 * so search one more time.
1280 mapelm
= arena_run_tree_first(&bin
->runs
);
1281 if (mapelm
!= NULL
) {
1282 arena_chunk_t
*chunk
;
1285 /* run is guaranteed to have available space. */
1286 arena_run_tree_remove(&bin
->runs
, mapelm
);
1288 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(mapelm
);
1289 pageind
= ((((uintptr_t)mapelm
- (uintptr_t)chunk
->map
) /
1290 sizeof(arena_chunk_map_t
))) + map_bias
;
1291 run
= (arena_run_t
*)((uintptr_t)chunk
+ (uintptr_t)((pageind
-
1292 (mapelm
->bits
>> PAGE_SHIFT
))
1294 #ifdef JEMALLOC_STATS
1295 bin
->stats
.reruns
++;
1303 /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
1305 arena_bin_malloc_hard(arena_t
*arena
, arena_bin_t
*bin
)
1309 arena_bin_info_t
*bin_info
;
1312 binind
= arena_bin_index(arena
, bin
);
1313 bin_info
= &arena_bin_info
[binind
];
1315 run
= arena_bin_nonfull_run_get(arena
, bin
);
1316 if (bin
->runcur
!= NULL
&& bin
->runcur
->nfree
> 0) {
1318 * Another thread updated runcur while this one ran without the
1319 * bin lock in arena_bin_nonfull_run_get().
1321 dassert(bin
->runcur
->magic
== ARENA_RUN_MAGIC
);
1322 assert(bin
->runcur
->nfree
> 0);
1323 ret
= arena_run_reg_alloc(bin
->runcur
, bin_info
);
1325 arena_chunk_t
*chunk
;
1328 * arena_run_alloc() may have allocated run, or it may
1329 * have pulled run from the bin's run tree. Therefore
1330 * it is unsafe to make any assumptions about how run
1331 * has previously been used, and arena_bin_lower_run()
1332 * must be called, as if a region were just deallocated
1335 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
1336 if (run
->nfree
== bin_info
->nregs
)
1337 arena_dalloc_bin_run(arena
, chunk
, run
, bin
);
1339 arena_bin_lower_run(arena
, chunk
, run
, bin
);
1349 dassert(bin
->runcur
->magic
== ARENA_RUN_MAGIC
);
1350 assert(bin
->runcur
->nfree
> 0);
1352 return (arena_run_reg_alloc(bin
->runcur
, bin_info
));
1355 #ifdef JEMALLOC_PROF
1357 arena_prof_accum(arena_t
*arena
, uint64_t accumbytes
)
1360 if (prof_interval
!= 0) {
1361 arena
->prof_accumbytes
+= accumbytes
;
1362 if (arena
->prof_accumbytes
>= prof_interval
) {
1364 arena
->prof_accumbytes
-= prof_interval
;
1370 #ifdef JEMALLOC_TCACHE
1372 arena_tcache_fill_small(arena_t
*arena
, tcache_bin_t
*tbin
, size_t binind
1373 # ifdef JEMALLOC_PROF
1374 , uint64_t prof_accumbytes
1383 assert(tbin
->ncached
== 0);
1385 #ifdef JEMALLOC_PROF
1386 malloc_mutex_lock(&arena
->lock
);
1387 arena_prof_accum(arena
, prof_accumbytes
);
1388 malloc_mutex_unlock(&arena
->lock
);
1390 bin
= &arena
->bins
[binind
];
1391 malloc_mutex_lock(&bin
->lock
);
1392 for (i
= 0, nfill
= (tcache_bin_info
[binind
].ncached_max
>>
1393 tbin
->lg_fill_div
); i
< nfill
; i
++) {
1394 if ((run
= bin
->runcur
) != NULL
&& run
->nfree
> 0)
1395 ptr
= arena_run_reg_alloc(run
, &arena_bin_info
[binind
]);
1397 ptr
= arena_bin_malloc_hard(arena
, bin
);
1400 /* Insert such that low regions get used first. */
1401 tbin
->avail
[nfill
- 1 - i
] = ptr
;
1403 #ifdef JEMALLOC_STATS
1404 bin
->stats
.allocated
+= i
* arena_bin_info
[binind
].reg_size
;
1405 bin
->stats
.nmalloc
+= i
;
1406 bin
->stats
.nrequests
+= tbin
->tstats
.nrequests
;
1407 bin
->stats
.nfills
++;
1408 tbin
->tstats
.nrequests
= 0;
1410 malloc_mutex_unlock(&bin
->lock
);
1416 arena_malloc_small(arena_t
*arena
, size_t size
, bool zero
)
1423 binind
= SMALL_SIZE2BIN(size
);
1424 assert(binind
< nbins
);
1425 bin
= &arena
->bins
[binind
];
1426 size
= arena_bin_info
[binind
].reg_size
;
1428 malloc_mutex_lock(&bin
->lock
);
1429 if ((run
= bin
->runcur
) != NULL
&& run
->nfree
> 0)
1430 ret
= arena_run_reg_alloc(run
, &arena_bin_info
[binind
]);
1432 ret
= arena_bin_malloc_hard(arena
, bin
);
1435 malloc_mutex_unlock(&bin
->lock
);
1439 #ifdef JEMALLOC_STATS
1440 bin
->stats
.allocated
+= size
;
1441 bin
->stats
.nmalloc
++;
1442 bin
->stats
.nrequests
++;
1444 malloc_mutex_unlock(&bin
->lock
);
1445 #ifdef JEMALLOC_PROF
1446 if (isthreaded
== false) {
1447 malloc_mutex_lock(&arena
->lock
);
1448 arena_prof_accum(arena
, size
);
1449 malloc_mutex_unlock(&arena
->lock
);
1453 if (zero
== false) {
1454 #ifdef JEMALLOC_FILL
1456 memset(ret
, 0xa5, size
);
1458 memset(ret
, 0, size
);
1461 memset(ret
, 0, size
);
1467 arena_malloc_large(arena_t
*arena
, size_t size
, bool zero
)
1471 /* Large allocation. */
1472 size
= PAGE_CEILING(size
);
1473 malloc_mutex_lock(&arena
->lock
);
1474 ret
= (void *)arena_run_alloc(arena
, size
, true, zero
);
1476 malloc_mutex_unlock(&arena
->lock
);
1479 #ifdef JEMALLOC_STATS
1480 arena
->stats
.nmalloc_large
++;
1481 arena
->stats
.nrequests_large
++;
1482 arena
->stats
.allocated_large
+= size
;
1483 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].nmalloc
++;
1484 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].nrequests
++;
1485 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
++;
1486 if (arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
>
1487 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].highruns
) {
1488 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].highruns
=
1489 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
;
1492 #ifdef JEMALLOC_PROF
1493 arena_prof_accum(arena
, size
);
1495 malloc_mutex_unlock(&arena
->lock
);
1497 if (zero
== false) {
1498 #ifdef JEMALLOC_FILL
1500 memset(ret
, 0xa5, size
);
1502 memset(ret
, 0, size
);
1510 arena_malloc(size_t size
, bool zero
)
1514 assert(QUANTUM_CEILING(size
) <= arena_maxclass
);
1516 if (size
<= small_maxclass
) {
1517 #ifdef JEMALLOC_TCACHE
1520 if ((tcache
= tcache_get()) != NULL
)
1521 return (tcache_alloc_small(tcache
, size
, zero
));
1525 return (arena_malloc_small(choose_arena(), size
, zero
));
1527 #ifdef JEMALLOC_TCACHE
1528 if (size
<= tcache_maxclass
) {
1531 if ((tcache
= tcache_get()) != NULL
)
1532 return (tcache_alloc_large(tcache
, size
, zero
));
1534 return (arena_malloc_large(choose_arena(),
1539 return (arena_malloc_large(choose_arena(), size
, zero
));
1543 /* Only handles large allocations that require more than page alignment. */
1545 arena_palloc(arena_t
*arena
, size_t size
, size_t alloc_size
, size_t alignment
,
1550 arena_chunk_t
*chunk
;
1552 assert((size
& PAGE_MASK
) == 0);
1554 alignment
= PAGE_CEILING(alignment
);
1556 malloc_mutex_lock(&arena
->lock
);
1557 ret
= (void *)arena_run_alloc(arena
, alloc_size
, true, zero
);
1559 malloc_mutex_unlock(&arena
->lock
);
1563 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ret
);
1565 offset
= (uintptr_t)ret
& (alignment
- 1);
1566 assert((offset
& PAGE_MASK
) == 0);
1567 assert(offset
< alloc_size
);
1569 arena_run_trim_tail(arena
, chunk
, ret
, alloc_size
, size
, false);
1571 size_t leadsize
, trailsize
;
1573 leadsize
= alignment
- offset
;
1575 arena_run_trim_head(arena
, chunk
, ret
, alloc_size
,
1576 alloc_size
- leadsize
);
1577 ret
= (void *)((uintptr_t)ret
+ leadsize
);
1580 trailsize
= alloc_size
- leadsize
- size
;
1581 if (trailsize
!= 0) {
1582 /* Trim trailing space. */
1583 assert(trailsize
< alloc_size
);
1584 arena_run_trim_tail(arena
, chunk
, ret
, size
+ trailsize
,
1589 #ifdef JEMALLOC_STATS
1590 arena
->stats
.nmalloc_large
++;
1591 arena
->stats
.nrequests_large
++;
1592 arena
->stats
.allocated_large
+= size
;
1593 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].nmalloc
++;
1594 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].nrequests
++;
1595 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
++;
1596 if (arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
>
1597 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].highruns
) {
1598 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].highruns
=
1599 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
;
1602 malloc_mutex_unlock(&arena
->lock
);
1604 #ifdef JEMALLOC_FILL
1605 if (zero
== false) {
1607 memset(ret
, 0xa5, size
);
1609 memset(ret
, 0, size
);
1615 /* Return the size of the allocation pointed to by ptr. */
1617 arena_salloc(const void *ptr
)
1620 arena_chunk_t
*chunk
;
1621 size_t pageind
, mapbits
;
1623 assert(ptr
!= NULL
);
1624 assert(CHUNK_ADDR2BASE(ptr
) != ptr
);
1626 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
1627 pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> PAGE_SHIFT
;
1628 mapbits
= chunk
->map
[pageind
-map_bias
].bits
;
1629 assert((mapbits
& CHUNK_MAP_ALLOCATED
) != 0);
1630 if ((mapbits
& CHUNK_MAP_LARGE
) == 0) {
1631 arena_run_t
*run
= (arena_run_t
*)((uintptr_t)chunk
+
1632 (uintptr_t)((pageind
- (mapbits
>> PAGE_SHIFT
)) <<
1634 dassert(run
->magic
== ARENA_RUN_MAGIC
);
1635 size_t binind
= arena_bin_index(chunk
->arena
, run
->bin
);
1636 arena_bin_info_t
*bin_info
= &arena_bin_info
[binind
];
1637 assert(((uintptr_t)ptr
- ((uintptr_t)run
+
1638 (uintptr_t)bin_info
->reg0_offset
)) % bin_info
->reg_size
==
1640 ret
= bin_info
->reg_size
;
1642 assert(((uintptr_t)ptr
& PAGE_MASK
) == 0);
1643 ret
= mapbits
& ~PAGE_MASK
;
1650 #ifdef JEMALLOC_PROF
1652 arena_prof_promoted(const void *ptr
, size_t size
)
1654 arena_chunk_t
*chunk
;
1655 size_t pageind
, binind
;
1657 assert(ptr
!= NULL
);
1658 assert(CHUNK_ADDR2BASE(ptr
) != ptr
);
1659 assert(isalloc(ptr
) == PAGE_SIZE
);
1661 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
1662 pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> PAGE_SHIFT
;
1663 binind
= SMALL_SIZE2BIN(size
);
1664 assert(binind
< nbins
);
1665 chunk
->map
[pageind
-map_bias
].bits
= (chunk
->map
[pageind
-map_bias
].bits
&
1666 ~CHUNK_MAP_CLASS_MASK
) | ((binind
+1) << CHUNK_MAP_CLASS_SHIFT
);
1670 arena_salloc_demote(const void *ptr
)
1673 arena_chunk_t
*chunk
;
1674 size_t pageind
, mapbits
;
1676 assert(ptr
!= NULL
);
1677 assert(CHUNK_ADDR2BASE(ptr
) != ptr
);
1679 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
1680 pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> PAGE_SHIFT
;
1681 mapbits
= chunk
->map
[pageind
-map_bias
].bits
;
1682 assert((mapbits
& CHUNK_MAP_ALLOCATED
) != 0);
1683 if ((mapbits
& CHUNK_MAP_LARGE
) == 0) {
1684 arena_run_t
*run
= (arena_run_t
*)((uintptr_t)chunk
+
1685 (uintptr_t)((pageind
- (mapbits
>> PAGE_SHIFT
)) <<
1687 dassert(run
->magic
== ARENA_RUN_MAGIC
);
1688 size_t binind
= arena_bin_index(chunk
->arena
, run
->bin
);
1689 arena_bin_info_t
*bin_info
= &arena_bin_info
[binind
];
1690 assert(((uintptr_t)ptr
- ((uintptr_t)run
+
1691 (uintptr_t)bin_info
->reg0_offset
)) % bin_info
->reg_size
==
1693 ret
= bin_info
->reg_size
;
1695 assert(((uintptr_t)ptr
& PAGE_MASK
) == 0);
1696 ret
= mapbits
& ~PAGE_MASK
;
1697 if (prof_promote
&& ret
== PAGE_SIZE
&& (mapbits
&
1698 CHUNK_MAP_CLASS_MASK
) != 0) {
1699 size_t binind
= ((mapbits
& CHUNK_MAP_CLASS_MASK
) >>
1700 CHUNK_MAP_CLASS_SHIFT
) - 1;
1701 assert(binind
< nbins
);
1702 ret
= arena_bin_info
[binind
].reg_size
;
1712 arena_dissociate_bin_run(arena_chunk_t
*chunk
, arena_run_t
*run
,
1716 /* Dissociate run from bin. */
1717 if (run
== bin
->runcur
)
1720 size_t binind
= arena_bin_index(chunk
->arena
, bin
);
1721 arena_bin_info_t
*bin_info
= &arena_bin_info
[binind
];
1723 if (bin_info
->nregs
!= 1) {
1724 size_t run_pageind
= (((uintptr_t)run
-
1725 (uintptr_t)chunk
)) >> PAGE_SHIFT
;
1726 arena_chunk_map_t
*run_mapelm
=
1727 &chunk
->map
[run_pageind
-map_bias
];
1729 * This block's conditional is necessary because if the
1730 * run only contains one region, then it never gets
1731 * inserted into the non-full runs tree.
1733 arena_run_tree_remove(&bin
->runs
, run_mapelm
);
1739 arena_dalloc_bin_run(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
1743 arena_bin_info_t
*bin_info
;
1744 size_t npages
, run_ind
, past
;
1746 assert(run
!= bin
->runcur
);
1747 assert(arena_run_tree_search(&bin
->runs
, &chunk
->map
[
1748 (((uintptr_t)run
-(uintptr_t)chunk
)>>PAGE_SHIFT
)-map_bias
]) == NULL
);
1750 binind
= arena_bin_index(chunk
->arena
, run
->bin
);
1751 bin_info
= &arena_bin_info
[binind
];
1753 malloc_mutex_unlock(&bin
->lock
);
1754 /******************************/
1755 npages
= bin_info
->run_size
>> PAGE_SHIFT
;
1756 run_ind
= (size_t)(((uintptr_t)run
- (uintptr_t)chunk
) >> PAGE_SHIFT
);
1757 past
= (size_t)(PAGE_CEILING((uintptr_t)run
+
1758 (uintptr_t)bin_info
->reg0_offset
+ (uintptr_t)(run
->nextind
*
1759 bin_info
->reg_size
) - (uintptr_t)chunk
) >> PAGE_SHIFT
);
1760 malloc_mutex_lock(&arena
->lock
);
1763 * If the run was originally clean, and some pages were never touched,
1764 * trim the clean pages before deallocating the dirty portion of the
1767 if ((chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_DIRTY
) == 0 && past
1768 - run_ind
< npages
) {
1770 * Trim clean pages. Convert to large run beforehand. Set the
1771 * last map element first, in case this is a one-page run.
1773 chunk
->map
[run_ind
+npages
-1-map_bias
].bits
= CHUNK_MAP_LARGE
|
1774 (chunk
->map
[run_ind
+npages
-1-map_bias
].bits
&
1775 CHUNK_MAP_FLAGS_MASK
);
1776 chunk
->map
[run_ind
-map_bias
].bits
= bin_info
->run_size
|
1777 CHUNK_MAP_LARGE
| (chunk
->map
[run_ind
-map_bias
].bits
&
1778 CHUNK_MAP_FLAGS_MASK
);
1779 arena_run_trim_tail(arena
, chunk
, run
, (npages
<< PAGE_SHIFT
),
1780 ((past
- run_ind
) << PAGE_SHIFT
), false);
1781 /* npages = past - run_ind; */
1783 #ifdef JEMALLOC_DEBUG
1786 arena_run_dalloc(arena
, run
, true);
1787 malloc_mutex_unlock(&arena
->lock
);
1788 /****************************/
1789 malloc_mutex_lock(&bin
->lock
);
1790 #ifdef JEMALLOC_STATS
1791 bin
->stats
.curruns
--;
1796 arena_bin_lower_run(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
1801 * Make sure that bin->runcur always refers to the lowest non-full run,
1804 if (bin
->runcur
== NULL
)
1806 else if ((uintptr_t)run
< (uintptr_t)bin
->runcur
) {
1807 /* Switch runcur. */
1808 if (bin
->runcur
->nfree
> 0) {
1809 arena_chunk_t
*runcur_chunk
=
1810 CHUNK_ADDR2BASE(bin
->runcur
);
1811 size_t runcur_pageind
= (((uintptr_t)bin
->runcur
-
1812 (uintptr_t)runcur_chunk
)) >> PAGE_SHIFT
;
1813 arena_chunk_map_t
*runcur_mapelm
=
1814 &runcur_chunk
->map
[runcur_pageind
-map_bias
];
1816 /* Insert runcur. */
1817 arena_run_tree_insert(&bin
->runs
, runcur_mapelm
);
1821 size_t run_pageind
= (((uintptr_t)run
-
1822 (uintptr_t)chunk
)) >> PAGE_SHIFT
;
1823 arena_chunk_map_t
*run_mapelm
=
1824 &chunk
->map
[run_pageind
-map_bias
];
1826 assert(arena_run_tree_search(&bin
->runs
, run_mapelm
) == NULL
);
1827 arena_run_tree_insert(&bin
->runs
, run_mapelm
);
1832 arena_dalloc_bin(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
1833 arena_chunk_map_t
*mapelm
)
1838 #if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
1842 pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> PAGE_SHIFT
;
1843 run
= (arena_run_t
*)((uintptr_t)chunk
+ (uintptr_t)((pageind
-
1844 (mapelm
->bits
>> PAGE_SHIFT
)) << PAGE_SHIFT
));
1845 dassert(run
->magic
== ARENA_RUN_MAGIC
);
1847 size_t binind
= arena_bin_index(arena
, bin
);
1848 arena_bin_info_t
*bin_info
= &arena_bin_info
[binind
];
1849 #if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
1850 size
= bin_info
->reg_size
;
1853 #ifdef JEMALLOC_FILL
1855 memset(ptr
, 0x5a, size
);
1858 arena_run_reg_dalloc(run
, ptr
);
1859 if (run
->nfree
== bin_info
->nregs
) {
1860 arena_dissociate_bin_run(chunk
, run
, bin
);
1861 arena_dalloc_bin_run(arena
, chunk
, run
, bin
);
1862 } else if (run
->nfree
== 1 && run
!= bin
->runcur
)
1863 arena_bin_lower_run(arena
, chunk
, run
, bin
);
1865 #ifdef JEMALLOC_STATS
1866 bin
->stats
.allocated
-= size
;
1867 bin
->stats
.ndalloc
++;
1871 #ifdef JEMALLOC_STATS
1873 arena_stats_merge(arena_t
*arena
, size_t *nactive
, size_t *ndirty
,
1874 arena_stats_t
*astats
, malloc_bin_stats_t
*bstats
,
1875 malloc_large_stats_t
*lstats
)
1879 malloc_mutex_lock(&arena
->lock
);
1880 *nactive
+= arena
->nactive
;
1881 *ndirty
+= arena
->ndirty
;
1883 astats
->mapped
+= arena
->stats
.mapped
;
1884 astats
->npurge
+= arena
->stats
.npurge
;
1885 astats
->nmadvise
+= arena
->stats
.nmadvise
;
1886 astats
->purged
+= arena
->stats
.purged
;
1887 astats
->allocated_large
+= arena
->stats
.allocated_large
;
1888 astats
->nmalloc_large
+= arena
->stats
.nmalloc_large
;
1889 astats
->ndalloc_large
+= arena
->stats
.ndalloc_large
;
1890 astats
->nrequests_large
+= arena
->stats
.nrequests_large
;
1892 for (i
= 0; i
< nlclasses
; i
++) {
1893 lstats
[i
].nmalloc
+= arena
->stats
.lstats
[i
].nmalloc
;
1894 lstats
[i
].ndalloc
+= arena
->stats
.lstats
[i
].ndalloc
;
1895 lstats
[i
].nrequests
+= arena
->stats
.lstats
[i
].nrequests
;
1896 lstats
[i
].highruns
+= arena
->stats
.lstats
[i
].highruns
;
1897 lstats
[i
].curruns
+= arena
->stats
.lstats
[i
].curruns
;
1899 malloc_mutex_unlock(&arena
->lock
);
1901 for (i
= 0; i
< nbins
; i
++) {
1902 arena_bin_t
*bin
= &arena
->bins
[i
];
1904 malloc_mutex_lock(&bin
->lock
);
1905 bstats
[i
].allocated
+= bin
->stats
.allocated
;
1906 bstats
[i
].nmalloc
+= bin
->stats
.nmalloc
;
1907 bstats
[i
].ndalloc
+= bin
->stats
.ndalloc
;
1908 bstats
[i
].nrequests
+= bin
->stats
.nrequests
;
1909 #ifdef JEMALLOC_TCACHE
1910 bstats
[i
].nfills
+= bin
->stats
.nfills
;
1911 bstats
[i
].nflushes
+= bin
->stats
.nflushes
;
1913 bstats
[i
].nruns
+= bin
->stats
.nruns
;
1914 bstats
[i
].reruns
+= bin
->stats
.reruns
;
1915 bstats
[i
].highruns
+= bin
->stats
.highruns
;
1916 bstats
[i
].curruns
+= bin
->stats
.curruns
;
1917 malloc_mutex_unlock(&bin
->lock
);
1923 arena_dalloc_large(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
)
1926 /* Large allocation. */
1927 #ifdef JEMALLOC_FILL
1928 # ifndef JEMALLOC_STATS
1933 #if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
1934 size_t pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >>
1936 size_t size
= chunk
->map
[pageind
-map_bias
].bits
& ~PAGE_MASK
;
1939 #ifdef JEMALLOC_FILL
1940 # ifdef JEMALLOC_STATS
1943 memset(ptr
, 0x5a, size
);
1945 #ifdef JEMALLOC_STATS
1946 arena
->stats
.ndalloc_large
++;
1947 arena
->stats
.allocated_large
-= size
;
1948 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].ndalloc
++;
1949 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
--;
1953 arena_run_dalloc(arena
, (arena_run_t
*)ptr
, true);
1957 arena_ralloc_large_shrink(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
1958 size_t oldsize
, size_t size
)
1961 assert(size
< oldsize
);
1964 * Shrink the run, and make trailing pages available for other
1967 malloc_mutex_lock(&arena
->lock
);
1968 arena_run_trim_tail(arena
, chunk
, (arena_run_t
*)ptr
, oldsize
, size
,
1970 #ifdef JEMALLOC_STATS
1971 arena
->stats
.ndalloc_large
++;
1972 arena
->stats
.allocated_large
-= oldsize
;
1973 arena
->stats
.lstats
[(oldsize
>> PAGE_SHIFT
) - 1].ndalloc
++;
1974 arena
->stats
.lstats
[(oldsize
>> PAGE_SHIFT
) - 1].curruns
--;
1976 arena
->stats
.nmalloc_large
++;
1977 arena
->stats
.nrequests_large
++;
1978 arena
->stats
.allocated_large
+= size
;
1979 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].nmalloc
++;
1980 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].nrequests
++;
1981 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
++;
1982 if (arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
>
1983 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].highruns
) {
1984 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].highruns
=
1985 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
;
1988 malloc_mutex_unlock(&arena
->lock
);
1992 arena_ralloc_large_grow(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
1993 size_t oldsize
, size_t size
, size_t extra
, bool zero
)
1995 size_t pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> PAGE_SHIFT
;
1996 size_t npages
= oldsize
>> PAGE_SHIFT
;
1999 assert(oldsize
== (chunk
->map
[pageind
-map_bias
].bits
& ~PAGE_MASK
));
2001 /* Try to extend the run. */
2002 assert(size
+ extra
> oldsize
);
2003 malloc_mutex_lock(&arena
->lock
);
2004 if (pageind
+ npages
< chunk_npages
&&
2005 (chunk
->map
[pageind
+npages
-map_bias
].bits
2006 & CHUNK_MAP_ALLOCATED
) == 0 && (followsize
=
2007 chunk
->map
[pageind
+npages
-map_bias
].bits
& ~PAGE_MASK
) >= size
-
2010 * The next run is available and sufficiently large. Split the
2011 * following run, then merge the first part with the existing
2015 size_t splitsize
= (oldsize
+ followsize
<= size
+ extra
)
2016 ? followsize
: size
+ extra
- oldsize
;
2017 arena_run_split(arena
, (arena_run_t
*)((uintptr_t)chunk
+
2018 ((pageind
+npages
) << PAGE_SHIFT
)), splitsize
, true, zero
);
2020 size
= oldsize
+ splitsize
;
2021 npages
= size
>> PAGE_SHIFT
;
2024 * Mark the extended run as dirty if either portion of the run
2025 * was dirty before allocation. This is rather pedantic,
2026 * because there's not actually any sequence of events that
2027 * could cause the resulting run to be passed to
2028 * arena_run_dalloc() with the dirty argument set to false
2029 * (which is when dirty flag consistency would really matter).
2031 flag_dirty
= (chunk
->map
[pageind
-map_bias
].bits
&
2033 (chunk
->map
[pageind
+npages
-1-map_bias
].bits
&
2035 chunk
->map
[pageind
-map_bias
].bits
= size
| flag_dirty
2036 | CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
2037 chunk
->map
[pageind
+npages
-1-map_bias
].bits
= flag_dirty
|
2038 CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
2040 #ifdef JEMALLOC_STATS
2041 arena
->stats
.ndalloc_large
++;
2042 arena
->stats
.allocated_large
-= oldsize
;
2043 arena
->stats
.lstats
[(oldsize
>> PAGE_SHIFT
) - 1].ndalloc
++;
2044 arena
->stats
.lstats
[(oldsize
>> PAGE_SHIFT
) - 1].curruns
--;
2046 arena
->stats
.nmalloc_large
++;
2047 arena
->stats
.nrequests_large
++;
2048 arena
->stats
.allocated_large
+= size
;
2049 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].nmalloc
++;
2050 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].nrequests
++;
2051 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
++;
2052 if (arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
>
2053 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].highruns
) {
2054 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].highruns
=
2055 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) -
2059 malloc_mutex_unlock(&arena
->lock
);
2062 malloc_mutex_unlock(&arena
->lock
);
2068 * Try to resize a large allocation, in order to avoid copying. This will
2069 * always fail if growing an object, and the following run is already in use.
2072 arena_ralloc_large(void *ptr
, size_t oldsize
, size_t size
, size_t extra
,
2077 psize
= PAGE_CEILING(size
+ extra
);
2078 if (psize
== oldsize
) {
2079 /* Same size class. */
2080 #ifdef JEMALLOC_FILL
2081 if (opt_junk
&& size
< oldsize
) {
2082 memset((void *)((uintptr_t)ptr
+ size
), 0x5a, oldsize
-
2088 arena_chunk_t
*chunk
;
2091 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
2092 arena
= chunk
->arena
;
2093 dassert(arena
->magic
== ARENA_MAGIC
);
2095 if (psize
< oldsize
) {
2096 #ifdef JEMALLOC_FILL
2097 /* Fill before shrinking in order avoid a race. */
2099 memset((void *)((uintptr_t)ptr
+ size
), 0x5a,
2103 arena_ralloc_large_shrink(arena
, chunk
, ptr
, oldsize
,
2107 bool ret
= arena_ralloc_large_grow(arena
, chunk
, ptr
,
2108 oldsize
, PAGE_CEILING(size
),
2109 psize
- PAGE_CEILING(size
), zero
);
2110 #ifdef JEMALLOC_FILL
2111 if (ret
== false && zero
== false && opt_zero
) {
2112 memset((void *)((uintptr_t)ptr
+ oldsize
), 0,
2122 arena_ralloc_no_move(void *ptr
, size_t oldsize
, size_t size
, size_t extra
,
2127 * Avoid moving the allocation if the size class can be left the same.
2129 if (oldsize
<= arena_maxclass
) {
2130 if (oldsize
<= small_maxclass
) {
2131 assert(arena_bin_info
[SMALL_SIZE2BIN(oldsize
)].reg_size
2133 if ((size
+ extra
<= small_maxclass
&&
2134 SMALL_SIZE2BIN(size
+ extra
) ==
2135 SMALL_SIZE2BIN(oldsize
)) || (size
<= oldsize
&&
2136 size
+ extra
>= oldsize
)) {
2137 #ifdef JEMALLOC_FILL
2138 if (opt_junk
&& size
< oldsize
) {
2139 memset((void *)((uintptr_t)ptr
+ size
),
2140 0x5a, oldsize
- size
);
2146 assert(size
<= arena_maxclass
);
2147 if (size
+ extra
> small_maxclass
) {
2148 if (arena_ralloc_large(ptr
, oldsize
, size
,
2149 extra
, zero
) == false)
2155 /* Reallocation would require a move. */
2160 arena_ralloc(void *ptr
, size_t oldsize
, size_t size
, size_t extra
,
2161 size_t alignment
, bool zero
)
2166 /* Try to avoid moving the allocation. */
2167 ret
= arena_ralloc_no_move(ptr
, oldsize
, size
, extra
, zero
);
2172 * size and oldsize are different enough that we need to move the
2173 * object. In that case, fall back to allocating new space and
2176 if (alignment
!= 0) {
2177 size_t usize
= sa2u(size
+ extra
, alignment
, NULL
);
2180 ret
= ipalloc(usize
, alignment
, zero
);
2182 ret
= arena_malloc(size
+ extra
, zero
);
2187 /* Try again, this time without extra. */
2188 if (alignment
!= 0) {
2189 size_t usize
= sa2u(size
, alignment
, NULL
);
2192 ret
= ipalloc(usize
, alignment
, zero
);
2194 ret
= arena_malloc(size
, zero
);
2200 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
2203 * Copy at most size bytes (not size+extra), since the caller has no
2204 * expectation that the extra bytes will be reliably preserved.
2206 copysize
= (size
< oldsize
) ? size
: oldsize
;
2207 memcpy(ret
, ptr
, copysize
);
2213 arena_new(arena_t
*arena
, unsigned ind
)
2219 arena
->nthreads
= 0;
2221 if (malloc_mutex_init(&arena
->lock
))
2224 #ifdef JEMALLOC_STATS
2225 memset(&arena
->stats
, 0, sizeof(arena_stats_t
));
2226 arena
->stats
.lstats
= (malloc_large_stats_t
*)base_alloc(nlclasses
*
2227 sizeof(malloc_large_stats_t
));
2228 if (arena
->stats
.lstats
== NULL
)
2230 memset(arena
->stats
.lstats
, 0, nlclasses
*
2231 sizeof(malloc_large_stats_t
));
2232 # ifdef JEMALLOC_TCACHE
2233 ql_new(&arena
->tcache_ql
);
2237 #ifdef JEMALLOC_PROF
2238 arena
->prof_accumbytes
= 0;
2241 /* Initialize chunks. */
2242 ql_new(&arena
->chunks_dirty
);
2243 arena
->spare
= NULL
;
2247 arena
->npurgatory
= 0;
2249 arena_avail_tree_new(&arena
->runs_avail_clean
);
2250 arena_avail_tree_new(&arena
->runs_avail_dirty
);
2252 /* Initialize bins. */
2254 #ifdef JEMALLOC_TINY
2255 /* (2^n)-spaced tiny bins. */
2256 for (; i
< ntbins
; i
++) {
2257 bin
= &arena
->bins
[i
];
2258 if (malloc_mutex_init(&bin
->lock
))
2261 arena_run_tree_new(&bin
->runs
);
2262 #ifdef JEMALLOC_STATS
2263 memset(&bin
->stats
, 0, sizeof(malloc_bin_stats_t
));
2268 /* Quantum-spaced bins. */
2269 for (; i
< ntbins
+ nqbins
; i
++) {
2270 bin
= &arena
->bins
[i
];
2271 if (malloc_mutex_init(&bin
->lock
))
2274 arena_run_tree_new(&bin
->runs
);
2275 #ifdef JEMALLOC_STATS
2276 memset(&bin
->stats
, 0, sizeof(malloc_bin_stats_t
));
2280 /* Cacheline-spaced bins. */
2281 for (; i
< ntbins
+ nqbins
+ ncbins
; i
++) {
2282 bin
= &arena
->bins
[i
];
2283 if (malloc_mutex_init(&bin
->lock
))
2286 arena_run_tree_new(&bin
->runs
);
2287 #ifdef JEMALLOC_STATS
2288 memset(&bin
->stats
, 0, sizeof(malloc_bin_stats_t
));
2292 /* Subpage-spaced bins. */
2293 for (; i
< nbins
; i
++) {
2294 bin
= &arena
->bins
[i
];
2295 if (malloc_mutex_init(&bin
->lock
))
2298 arena_run_tree_new(&bin
->runs
);
2299 #ifdef JEMALLOC_STATS
2300 memset(&bin
->stats
, 0, sizeof(malloc_bin_stats_t
));
2304 #ifdef JEMALLOC_DEBUG
2305 arena
->magic
= ARENA_MAGIC
;
2311 #ifdef JEMALLOC_DEBUG
2313 small_size2bin_validate(void)
2315 size_t i
, size
, binind
;
2318 # ifdef JEMALLOC_TINY
2320 for (; i
< (1U << LG_TINY_MIN
); i
++) {
2321 size
= pow2_ceil(1U << LG_TINY_MIN
);
2322 binind
= ffs((int)(size
>> (LG_TINY_MIN
+ 1)));
2323 assert(SMALL_SIZE2BIN(i
) == binind
);
2325 for (; i
< qspace_min
; i
++) {
2326 size
= pow2_ceil(i
);
2327 binind
= ffs((int)(size
>> (LG_TINY_MIN
+ 1)));
2328 assert(SMALL_SIZE2BIN(i
) == binind
);
2331 /* Quantum-spaced. */
2332 for (; i
<= qspace_max
; i
++) {
2333 size
= QUANTUM_CEILING(i
);
2334 binind
= ntbins
+ (size
>> LG_QUANTUM
) - 1;
2335 assert(SMALL_SIZE2BIN(i
) == binind
);
2337 /* Cacheline-spaced. */
2338 for (; i
<= cspace_max
; i
++) {
2339 size
= CACHELINE_CEILING(i
);
2340 binind
= ntbins
+ nqbins
+ ((size
- cspace_min
) >>
2342 assert(SMALL_SIZE2BIN(i
) == binind
);
2345 for (; i
<= sspace_max
; i
++) {
2346 size
= SUBPAGE_CEILING(i
);
2347 binind
= ntbins
+ nqbins
+ ncbins
+ ((size
- sspace_min
)
2349 assert(SMALL_SIZE2BIN(i
) == binind
);
2355 small_size2bin_init(void)
2358 if (opt_lg_qspace_max
!= LG_QSPACE_MAX_DEFAULT
2359 || opt_lg_cspace_max
!= LG_CSPACE_MAX_DEFAULT
2360 || (sizeof(const_small_size2bin
) != ((small_maxclass
-1) >>
2362 return (small_size2bin_init_hard());
2364 small_size2bin
= const_small_size2bin
;
2365 #ifdef JEMALLOC_DEBUG
2366 small_size2bin_validate();
2372 small_size2bin_init_hard(void)
2374 size_t i
, size
, binind
;
2375 uint8_t *custom_small_size2bin
;
2376 #define CUSTOM_SMALL_SIZE2BIN(s) \
2377 custom_small_size2bin[(s-1) >> LG_TINY_MIN]
2379 assert(opt_lg_qspace_max
!= LG_QSPACE_MAX_DEFAULT
2380 || opt_lg_cspace_max
!= LG_CSPACE_MAX_DEFAULT
2381 || (sizeof(const_small_size2bin
) != ((small_maxclass
-1) >>
2384 custom_small_size2bin
= (uint8_t *)
2385 base_alloc(small_maxclass
>> LG_TINY_MIN
);
2386 if (custom_small_size2bin
== NULL
)
2390 #ifdef JEMALLOC_TINY
2392 for (; i
< (1U << LG_TINY_MIN
); i
+= TINY_MIN
) {
2393 size
= pow2_ceil(1U << LG_TINY_MIN
);
2394 binind
= ffs((int)(size
>> (LG_TINY_MIN
+ 1)));
2395 CUSTOM_SMALL_SIZE2BIN(i
) = binind
;
2397 for (; i
< qspace_min
; i
+= TINY_MIN
) {
2398 size
= pow2_ceil(i
);
2399 binind
= ffs((int)(size
>> (LG_TINY_MIN
+ 1)));
2400 CUSTOM_SMALL_SIZE2BIN(i
) = binind
;
2403 /* Quantum-spaced. */
2404 for (; i
<= qspace_max
; i
+= TINY_MIN
) {
2405 size
= QUANTUM_CEILING(i
);
2406 binind
= ntbins
+ (size
>> LG_QUANTUM
) - 1;
2407 CUSTOM_SMALL_SIZE2BIN(i
) = binind
;
2409 /* Cacheline-spaced. */
2410 for (; i
<= cspace_max
; i
+= TINY_MIN
) {
2411 size
= CACHELINE_CEILING(i
);
2412 binind
= ntbins
+ nqbins
+ ((size
- cspace_min
) >>
2414 CUSTOM_SMALL_SIZE2BIN(i
) = binind
;
2417 for (; i
<= sspace_max
; i
+= TINY_MIN
) {
2418 size
= SUBPAGE_CEILING(i
);
2419 binind
= ntbins
+ nqbins
+ ncbins
+ ((size
- sspace_min
) >>
2421 CUSTOM_SMALL_SIZE2BIN(i
) = binind
;
2424 small_size2bin
= custom_small_size2bin
;
2425 #ifdef JEMALLOC_DEBUG
2426 small_size2bin_validate();
2429 #undef CUSTOM_SMALL_SIZE2BIN
2433 * Calculate bin_info->run_size such that it meets the following constraints:
2435 * *) bin_info->run_size >= min_run_size
2436 * *) bin_info->run_size <= arena_maxclass
2437 * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
2438 * *) bin_info->nregs <= RUN_MAXREGS
2440 * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
2441 * calculated here, since these settings are all interdependent.
2444 bin_info_run_size_calc(arena_bin_info_t
*bin_info
, size_t min_run_size
)
2446 size_t try_run_size
, good_run_size
;
2447 uint32_t try_nregs
, good_nregs
;
2448 uint32_t try_hdr_size
, good_hdr_size
;
2449 uint32_t try_bitmap_offset
, good_bitmap_offset
;
2450 #ifdef JEMALLOC_PROF
2451 uint32_t try_ctx0_offset
, good_ctx0_offset
;
2453 uint32_t try_reg0_offset
, good_reg0_offset
;
2455 assert(min_run_size
>= PAGE_SIZE
);
2456 assert(min_run_size
<= arena_maxclass
);
2459 * Calculate known-valid settings before entering the run_size
2460 * expansion loop, so that the first part of the loop always copies
2463 * The do..while loop iteratively reduces the number of regions until
2464 * the run header and the regions no longer overlap. A closed formula
2465 * would be quite messy, since there is an interdependency between the
2466 * header's mask length and the number of regions.
2468 try_run_size
= min_run_size
;
2469 try_nregs
= ((try_run_size
- sizeof(arena_run_t
)) / bin_info
->reg_size
)
2470 + 1; /* Counter-act try_nregs-- in loop. */
2471 if (try_nregs
> RUN_MAXREGS
) {
2472 try_nregs
= RUN_MAXREGS
2473 + 1; /* Counter-act try_nregs-- in loop. */
2477 try_hdr_size
= sizeof(arena_run_t
);
2478 /* Pad to a long boundary. */
2479 try_hdr_size
= LONG_CEILING(try_hdr_size
);
2480 try_bitmap_offset
= try_hdr_size
;
2481 /* Add space for bitmap. */
2482 try_hdr_size
+= bitmap_size(try_nregs
);
2483 #ifdef JEMALLOC_PROF
2484 if (opt_prof
&& prof_promote
== false) {
2485 /* Pad to a quantum boundary. */
2486 try_hdr_size
= QUANTUM_CEILING(try_hdr_size
);
2487 try_ctx0_offset
= try_hdr_size
;
2488 /* Add space for one (prof_ctx_t *) per region. */
2489 try_hdr_size
+= try_nregs
* sizeof(prof_ctx_t
*);
2491 try_ctx0_offset
= 0;
2493 try_reg0_offset
= try_run_size
- (try_nregs
*
2494 bin_info
->reg_size
);
2495 } while (try_hdr_size
> try_reg0_offset
);
2497 /* run_size expansion loop. */
2500 * Copy valid settings before trying more aggressive settings.
2502 good_run_size
= try_run_size
;
2503 good_nregs
= try_nregs
;
2504 good_hdr_size
= try_hdr_size
;
2505 good_bitmap_offset
= try_bitmap_offset
;
2506 #ifdef JEMALLOC_PROF
2507 good_ctx0_offset
= try_ctx0_offset
;
2509 good_reg0_offset
= try_reg0_offset
;
2511 /* Try more aggressive settings. */
2512 try_run_size
+= PAGE_SIZE
;
2513 try_nregs
= ((try_run_size
- sizeof(arena_run_t
)) /
2515 + 1; /* Counter-act try_nregs-- in loop. */
2516 if (try_nregs
> RUN_MAXREGS
) {
2517 try_nregs
= RUN_MAXREGS
2518 + 1; /* Counter-act try_nregs-- in loop. */
2522 try_hdr_size
= sizeof(arena_run_t
);
2523 /* Pad to a long boundary. */
2524 try_hdr_size
= LONG_CEILING(try_hdr_size
);
2525 try_bitmap_offset
= try_hdr_size
;
2526 /* Add space for bitmap. */
2527 try_hdr_size
+= bitmap_size(try_nregs
);
2528 #ifdef JEMALLOC_PROF
2529 if (opt_prof
&& prof_promote
== false) {
2530 /* Pad to a quantum boundary. */
2531 try_hdr_size
= QUANTUM_CEILING(try_hdr_size
);
2532 try_ctx0_offset
= try_hdr_size
;
2534 * Add space for one (prof_ctx_t *) per region.
2536 try_hdr_size
+= try_nregs
*
2537 sizeof(prof_ctx_t
*);
2540 try_reg0_offset
= try_run_size
- (try_nregs
*
2541 bin_info
->reg_size
);
2542 } while (try_hdr_size
> try_reg0_offset
);
2543 } while (try_run_size
<= arena_maxclass
2544 && try_run_size
<= arena_maxclass
2545 && RUN_MAX_OVRHD
* (bin_info
->reg_size
<< 3) > RUN_MAX_OVRHD_RELAX
2546 && (try_reg0_offset
<< RUN_BFP
) > RUN_MAX_OVRHD
* try_run_size
2547 && try_nregs
< RUN_MAXREGS
);
2549 assert(good_hdr_size
<= good_reg0_offset
);
2551 /* Copy final settings. */
2552 bin_info
->run_size
= good_run_size
;
2553 bin_info
->nregs
= good_nregs
;
2554 bin_info
->bitmap_offset
= good_bitmap_offset
;
2555 #ifdef JEMALLOC_PROF
2556 bin_info
->ctx0_offset
= good_ctx0_offset
;
2558 bin_info
->reg0_offset
= good_reg0_offset
;
2560 return (good_run_size
);
2566 arena_bin_info_t
*bin_info
;
2568 size_t prev_run_size
;
2570 arena_bin_info
= base_alloc(sizeof(arena_bin_info_t
) * nbins
);
2571 if (arena_bin_info
== NULL
)
2574 prev_run_size
= PAGE_SIZE
;
2576 #ifdef JEMALLOC_TINY
2577 /* (2^n)-spaced tiny bins. */
2578 for (; i
< ntbins
; i
++) {
2579 bin_info
= &arena_bin_info
[i
];
2580 bin_info
->reg_size
= (1U << (LG_TINY_MIN
+ i
));
2581 prev_run_size
= bin_info_run_size_calc(bin_info
, prev_run_size
);
2582 bitmap_info_init(&bin_info
->bitmap_info
, bin_info
->nregs
);
2586 /* Quantum-spaced bins. */
2587 for (; i
< ntbins
+ nqbins
; i
++) {
2588 bin_info
= &arena_bin_info
[i
];
2589 bin_info
->reg_size
= (i
- ntbins
+ 1) << LG_QUANTUM
;
2590 prev_run_size
= bin_info_run_size_calc(bin_info
, prev_run_size
);
2591 bitmap_info_init(&bin_info
->bitmap_info
, bin_info
->nregs
);
2594 /* Cacheline-spaced bins. */
2595 for (; i
< ntbins
+ nqbins
+ ncbins
; i
++) {
2596 bin_info
= &arena_bin_info
[i
];
2597 bin_info
->reg_size
= cspace_min
+ ((i
- (ntbins
+ nqbins
)) <<
2599 prev_run_size
= bin_info_run_size_calc(bin_info
, prev_run_size
);
2600 bitmap_info_init(&bin_info
->bitmap_info
, bin_info
->nregs
);
2603 /* Subpage-spaced bins. */
2604 for (; i
< nbins
; i
++) {
2605 bin_info
= &arena_bin_info
[i
];
2606 bin_info
->reg_size
= sspace_min
+ ((i
- (ntbins
+ nqbins
+
2607 ncbins
)) << LG_SUBPAGE
);
2608 prev_run_size
= bin_info_run_size_calc(bin_info
, prev_run_size
);
2609 bitmap_info_init(&bin_info
->bitmap_info
, bin_info
->nregs
);
2621 /* Set variables according to the value of opt_lg_[qc]space_max. */
2622 qspace_max
= (1U << opt_lg_qspace_max
);
2623 cspace_min
= CACHELINE_CEILING(qspace_max
);
2624 if (cspace_min
== qspace_max
)
2625 cspace_min
+= CACHELINE
;
2626 cspace_max
= (1U << opt_lg_cspace_max
);
2627 sspace_min
= SUBPAGE_CEILING(cspace_max
);
2628 if (sspace_min
== cspace_max
)
2629 sspace_min
+= SUBPAGE
;
2630 assert(sspace_min
< PAGE_SIZE
);
2631 sspace_max
= PAGE_SIZE
- SUBPAGE
;
2633 #ifdef JEMALLOC_TINY
2634 assert(LG_QUANTUM
>= LG_TINY_MIN
);
2636 assert(ntbins
<= LG_QUANTUM
);
2637 nqbins
= qspace_max
>> LG_QUANTUM
;
2638 ncbins
= ((cspace_max
- cspace_min
) >> LG_CACHELINE
) + 1;
2639 nsbins
= ((sspace_max
- sspace_min
) >> LG_SUBPAGE
) + 1;
2640 nbins
= ntbins
+ nqbins
+ ncbins
+ nsbins
;
2643 * The small_size2bin lookup table uses uint8_t to encode each bin
2644 * index, so we cannot support more than 256 small size classes. This
2645 * limit is difficult to exceed (not even possible with 16B quantum and
2646 * 4KiB pages), and such configurations are impractical, but
2647 * nonetheless we need to protect against this case in order to avoid
2648 * undefined behavior.
2650 * Further constrain nbins to 255 if prof_promote is true, since all
2651 * small size classes, plus a "not small" size class must be stored in
2652 * 8 bits of arena_chunk_map_t's bits field.
2654 #ifdef JEMALLOC_PROF
2655 if (opt_prof
&& prof_promote
) {
2657 char line_buf
[UMAX2S_BUFSIZE
];
2658 malloc_write("<jemalloc>: Too many small size classes (");
2659 malloc_write(u2s(nbins
, 10, line_buf
));
2660 malloc_write(" > max 255)\n");
2666 char line_buf
[UMAX2S_BUFSIZE
];
2667 malloc_write("<jemalloc>: Too many small size classes (");
2668 malloc_write(u2s(nbins
, 10, line_buf
));
2669 malloc_write(" > max 256)\n");
2674 * Compute the header size such that it is large enough to contain the
2675 * page map. The page map is biased to omit entries for the header
2676 * itself, so some iteration is necessary to compute the map bias.
2678 * 1) Compute safe header_size and map_bias values that include enough
2679 * space for an unbiased page map.
2680 * 2) Refine map_bias based on (1) to omit the header pages in the page
2681 * map. The resulting map_bias may be one too small.
2682 * 3) Refine map_bias based on (2). The result will be >= the result
2683 * from (2), and will always be correct.
2686 for (i
= 0; i
< 3; i
++) {
2687 header_size
= offsetof(arena_chunk_t
, map
)
2688 + (sizeof(arena_chunk_map_t
) * (chunk_npages
-map_bias
));
2689 map_bias
= (header_size
>> PAGE_SHIFT
) + ((header_size
&
2692 assert(map_bias
> 0);
2694 arena_maxclass
= chunksize
- (map_bias
<< PAGE_SHIFT
);
2696 if (small_size2bin_init())
2699 if (bin_info_init())