1 #define JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 size_t opt_lg_qspace_max
= LG_QSPACE_MAX_DEFAULT
;
8 size_t opt_lg_cspace_max
= LG_CSPACE_MAX_DEFAULT
;
9 ssize_t opt_lg_dirty_mult
= LG_DIRTY_MULT_DEFAULT
;
10 uint8_t const *small_size2bin
;
11 arena_bin_info_t
*arena_bin_info
;
13 /* Various bin-related settings. */
28 * const_small_size2bin is a static constant lookup table that in the common
29 * case can be used as-is for small_size2bin.
31 #if (LG_TINY_MIN == 2)
33 #define S2B_8(i) S2B_4(i) S2B_4(i)
34 #elif (LG_TINY_MIN == 3)
37 # error "Unsupported LG_TINY_MIN"
39 #define S2B_16(i) S2B_8(i) S2B_8(i)
40 #define S2B_32(i) S2B_16(i) S2B_16(i)
41 #define S2B_64(i) S2B_32(i) S2B_32(i)
42 #define S2B_128(i) S2B_64(i) S2B_64(i)
43 #define S2B_256(i) S2B_128(i) S2B_128(i)
45 * The number of elements in const_small_size2bin is dependent on the
46 * definition for SUBPAGE.
48 static JEMALLOC_ATTR(aligned(CACHELINE
))
49 const uint8_t const_small_size2bin
[] = {
51 /* 16-byte quantum **********************/
53 # if (LG_TINY_MIN == 2)
58 # elif (LG_TINY_MIN == 3)
63 # error "Unsupported LG_TINY_MIN"
69 S2B_16(S2B_QMIN
+ 1) /* 32 */
70 S2B_16(S2B_QMIN
+ 2) /* 48 */
71 S2B_16(S2B_QMIN
+ 3) /* 64 */
72 S2B_16(S2B_QMIN
+ 4) /* 80 */
73 S2B_16(S2B_QMIN
+ 5) /* 96 */
74 S2B_16(S2B_QMIN
+ 6) /* 112 */
75 S2B_16(S2B_QMIN
+ 7) /* 128 */
76 # define S2B_CMIN (S2B_QMIN + 8)
78 /* 8-byte quantum ***********************/
80 # if (LG_TINY_MIN == 2)
85 # error "Unsupported LG_TINY_MIN"
91 S2B_8(S2B_QMIN
+ 1) /* 16 */
92 S2B_8(S2B_QMIN
+ 2) /* 24 */
93 S2B_8(S2B_QMIN
+ 3) /* 32 */
94 S2B_8(S2B_QMIN
+ 4) /* 40 */
95 S2B_8(S2B_QMIN
+ 5) /* 48 */
96 S2B_8(S2B_QMIN
+ 6) /* 56 */
97 S2B_8(S2B_QMIN
+ 7) /* 64 */
98 S2B_8(S2B_QMIN
+ 8) /* 72 */
99 S2B_8(S2B_QMIN
+ 9) /* 80 */
100 S2B_8(S2B_QMIN
+ 10) /* 88 */
101 S2B_8(S2B_QMIN
+ 11) /* 96 */
102 S2B_8(S2B_QMIN
+ 12) /* 104 */
103 S2B_8(S2B_QMIN
+ 13) /* 112 */
104 S2B_8(S2B_QMIN
+ 14) /* 120 */
105 S2B_8(S2B_QMIN
+ 15) /* 128 */
106 # define S2B_CMIN (S2B_QMIN + 16)
108 /****************************************/
109 S2B_64(S2B_CMIN
+ 0) /* 192 */
110 S2B_64(S2B_CMIN
+ 1) /* 256 */
111 S2B_64(S2B_CMIN
+ 2) /* 320 */
112 S2B_64(S2B_CMIN
+ 3) /* 384 */
113 S2B_64(S2B_CMIN
+ 4) /* 448 */
114 S2B_64(S2B_CMIN
+ 5) /* 512 */
115 # define S2B_SMIN (S2B_CMIN + 6)
116 S2B_256(S2B_SMIN
+ 0) /* 768 */
117 S2B_256(S2B_SMIN
+ 1) /* 1024 */
118 S2B_256(S2B_SMIN
+ 2) /* 1280 */
119 S2B_256(S2B_SMIN
+ 3) /* 1536 */
120 S2B_256(S2B_SMIN
+ 4) /* 1792 */
121 S2B_256(S2B_SMIN
+ 5) /* 2048 */
122 S2B_256(S2B_SMIN
+ 6) /* 2304 */
123 S2B_256(S2B_SMIN
+ 7) /* 2560 */
124 S2B_256(S2B_SMIN
+ 8) /* 2816 */
125 S2B_256(S2B_SMIN
+ 9) /* 3072 */
126 S2B_256(S2B_SMIN
+ 10) /* 3328 */
127 S2B_256(S2B_SMIN
+ 11) /* 3584 */
128 S2B_256(S2B_SMIN
+ 12) /* 3840 */
129 #if (STATIC_PAGE_SHIFT == 13)
130 S2B_256(S2B_SMIN
+ 13) /* 4096 */
131 S2B_256(S2B_SMIN
+ 14) /* 4352 */
132 S2B_256(S2B_SMIN
+ 15) /* 4608 */
133 S2B_256(S2B_SMIN
+ 16) /* 4864 */
134 S2B_256(S2B_SMIN
+ 17) /* 5120 */
135 S2B_256(S2B_SMIN
+ 18) /* 5376 */
136 S2B_256(S2B_SMIN
+ 19) /* 5632 */
137 S2B_256(S2B_SMIN
+ 20) /* 5888 */
138 S2B_256(S2B_SMIN
+ 21) /* 6144 */
139 S2B_256(S2B_SMIN
+ 22) /* 6400 */
140 S2B_256(S2B_SMIN
+ 23) /* 6656 */
141 S2B_256(S2B_SMIN
+ 24) /* 6912 */
142 S2B_256(S2B_SMIN
+ 25) /* 7168 */
143 S2B_256(S2B_SMIN
+ 26) /* 7424 */
144 S2B_256(S2B_SMIN
+ 27) /* 7680 */
145 S2B_256(S2B_SMIN
+ 28) /* 7936 */
161 /******************************************************************************/
162 /* Function prototypes for non-inline static functions. */
164 static void arena_run_split(arena_t
*arena
, arena_run_t
*run
, size_t size
,
165 bool large
, bool zero
);
166 static arena_chunk_t
*arena_chunk_alloc(arena_t
*arena
);
167 static void arena_chunk_dealloc(arena_t
*arena
, arena_chunk_t
*chunk
);
168 static arena_run_t
*arena_run_alloc(arena_t
*arena
, size_t size
, bool large
,
170 static void arena_purge(arena_t
*arena
, bool all
);
171 static void arena_run_dalloc(arena_t
*arena
, arena_run_t
*run
, bool dirty
);
172 static void arena_run_trim_head(arena_t
*arena
, arena_chunk_t
*chunk
,
173 arena_run_t
*run
, size_t oldsize
, size_t newsize
);
174 static void arena_run_trim_tail(arena_t
*arena
, arena_chunk_t
*chunk
,
175 arena_run_t
*run
, size_t oldsize
, size_t newsize
, bool dirty
);
176 static arena_run_t
*arena_bin_nonfull_run_get(arena_t
*arena
, arena_bin_t
*bin
);
177 static void *arena_bin_malloc_hard(arena_t
*arena
, arena_bin_t
*bin
);
178 static void arena_dissociate_bin_run(arena_chunk_t
*chunk
, arena_run_t
*run
,
180 static void arena_dalloc_bin_run(arena_t
*arena
, arena_chunk_t
*chunk
,
181 arena_run_t
*run
, arena_bin_t
*bin
);
182 static void arena_bin_lower_run(arena_t
*arena
, arena_chunk_t
*chunk
,
183 arena_run_t
*run
, arena_bin_t
*bin
);
184 static void arena_ralloc_large_shrink(arena_t
*arena
, arena_chunk_t
*chunk
,
185 void *ptr
, size_t oldsize
, size_t size
);
186 static bool arena_ralloc_large_grow(arena_t
*arena
, arena_chunk_t
*chunk
,
187 void *ptr
, size_t oldsize
, size_t size
, size_t extra
, bool zero
);
188 static bool arena_ralloc_large(void *ptr
, size_t oldsize
, size_t size
,
189 size_t extra
, bool zero
);
190 static bool small_size2bin_init(void);
191 #ifdef JEMALLOC_DEBUG
192 static void small_size2bin_validate(void);
194 static bool small_size2bin_init_hard(void);
195 static size_t bin_info_run_size_calc(arena_bin_info_t
*bin_info
,
196 size_t min_run_size
);
197 static bool bin_info_init(void);
199 /******************************************************************************/
202 arena_run_comp(arena_chunk_map_t
*a
, arena_chunk_map_t
*b
)
204 uintptr_t a_mapelm
= (uintptr_t)a
;
205 uintptr_t b_mapelm
= (uintptr_t)b
;
210 return ((a_mapelm
> b_mapelm
) - (a_mapelm
< b_mapelm
));
213 /* Generate red-black tree functions. */
214 rb_gen(static JEMALLOC_ATTR(unused
), arena_run_tree_
, arena_run_tree_t
,
215 arena_chunk_map_t
, u
.rb_link
, arena_run_comp
)
218 arena_avail_comp(arena_chunk_map_t
*a
, arena_chunk_map_t
*b
)
221 size_t a_size
= a
->bits
& ~PAGE_MASK
;
222 size_t b_size
= b
->bits
& ~PAGE_MASK
;
224 assert((a
->bits
& CHUNK_MAP_KEY
) == CHUNK_MAP_KEY
|| (a
->bits
&
225 CHUNK_MAP_DIRTY
) == (b
->bits
& CHUNK_MAP_DIRTY
));
227 ret
= (a_size
> b_size
) - (a_size
< b_size
);
229 uintptr_t a_mapelm
, b_mapelm
;
231 if ((a
->bits
& CHUNK_MAP_KEY
) != CHUNK_MAP_KEY
)
232 a_mapelm
= (uintptr_t)a
;
235 * Treat keys as though they are lower than anything
240 b_mapelm
= (uintptr_t)b
;
242 ret
= (a_mapelm
> b_mapelm
) - (a_mapelm
< b_mapelm
);
248 /* Generate red-black tree functions. */
249 rb_gen(static JEMALLOC_ATTR(unused
), arena_avail_tree_
, arena_avail_tree_t
,
250 arena_chunk_map_t
, u
.rb_link
, arena_avail_comp
)
253 arena_run_reg_alloc(arena_run_t
*run
, arena_bin_info_t
*bin_info
)
257 bitmap_t
*bitmap
= (bitmap_t
*)((uintptr_t)run
+
258 (uintptr_t)bin_info
->bitmap_offset
);
260 dassert(run
->magic
== ARENA_RUN_MAGIC
);
261 assert(run
->nfree
> 0);
262 assert(bitmap_full(bitmap
, &bin_info
->bitmap_info
) == false);
264 regind
= bitmap_sfu(bitmap
, &bin_info
->bitmap_info
);
265 ret
= (void *)((uintptr_t)run
+ (uintptr_t)bin_info
->reg0_offset
+
266 (uintptr_t)(bin_info
->reg_size
* regind
));
268 if (regind
== run
->nextind
)
270 assert(regind
< run
->nextind
);
275 arena_run_reg_dalloc(arena_run_t
*run
, void *ptr
)
277 arena_chunk_t
*chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
278 size_t binind
= arena_bin_index(chunk
->arena
, run
->bin
);
279 arena_bin_info_t
*bin_info
= &arena_bin_info
[binind
];
280 unsigned regind
= arena_run_regind(run
, bin_info
, ptr
);
281 bitmap_t
*bitmap
= (bitmap_t
*)((uintptr_t)run
+
282 (uintptr_t)bin_info
->bitmap_offset
);
284 assert(run
->nfree
< bin_info
->nregs
);
285 /* Freeing an interior pointer can cause assertion failure. */
286 assert(((uintptr_t)ptr
- ((uintptr_t)run
+
287 (uintptr_t)bin_info
->reg0_offset
)) % (uintptr_t)bin_info
->reg_size
289 assert((uintptr_t)ptr
>= (uintptr_t)run
+
290 (uintptr_t)bin_info
->reg0_offset
);
291 /* Freeing an unallocated pointer can cause assertion failure. */
292 assert(bitmap_get(bitmap
, &bin_info
->bitmap_info
, regind
));
294 bitmap_unset(bitmap
, &bin_info
->bitmap_info
, regind
);
298 #ifdef JEMALLOC_DEBUG
300 arena_chunk_validate_zeroed(arena_chunk_t
*chunk
, size_t run_ind
)
303 size_t *p
= (size_t *)((uintptr_t)chunk
+ (run_ind
<< PAGE_SHIFT
));
305 for (i
= 0; i
< PAGE_SIZE
/ sizeof(size_t); i
++)
311 arena_run_split(arena_t
*arena
, arena_run_t
*run
, size_t size
, bool large
,
314 arena_chunk_t
*chunk
;
315 size_t old_ndirty
, run_ind
, total_pages
, need_pages
, rem_pages
, i
;
317 arena_avail_tree_t
*runs_avail
;
318 #ifdef JEMALLOC_STATS
322 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
323 old_ndirty
= chunk
->ndirty
;
324 run_ind
= (unsigned)(((uintptr_t)run
- (uintptr_t)chunk
)
326 flag_dirty
= chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_DIRTY
;
327 runs_avail
= (flag_dirty
!= 0) ? &arena
->runs_avail_dirty
:
328 &arena
->runs_avail_clean
;
329 total_pages
= (chunk
->map
[run_ind
-map_bias
].bits
& ~PAGE_MASK
) >>
331 assert((chunk
->map
[run_ind
+total_pages
-1-map_bias
].bits
&
332 CHUNK_MAP_DIRTY
) == flag_dirty
);
333 need_pages
= (size
>> PAGE_SHIFT
);
334 assert(need_pages
> 0);
335 assert(need_pages
<= total_pages
);
336 rem_pages
= total_pages
- need_pages
;
338 arena_avail_tree_remove(runs_avail
, &chunk
->map
[run_ind
-map_bias
]);
339 #ifdef JEMALLOC_STATS
340 /* Update stats_cactive if nactive is crossing a chunk multiple. */
341 cactive_diff
= CHUNK_CEILING((arena
->nactive
+ need_pages
) <<
342 PAGE_SHIFT
) - CHUNK_CEILING(arena
->nactive
<< PAGE_SHIFT
);
343 if (cactive_diff
!= 0)
344 stats_cactive_add(cactive_diff
);
346 arena
->nactive
+= need_pages
;
348 /* Keep track of trailing unused pages for later use. */
350 if (flag_dirty
!= 0) {
351 chunk
->map
[run_ind
+need_pages
-map_bias
].bits
=
352 (rem_pages
<< PAGE_SHIFT
) | CHUNK_MAP_DIRTY
;
353 chunk
->map
[run_ind
+total_pages
-1-map_bias
].bits
=
354 (rem_pages
<< PAGE_SHIFT
) | CHUNK_MAP_DIRTY
;
356 chunk
->map
[run_ind
+need_pages
-map_bias
].bits
=
357 (rem_pages
<< PAGE_SHIFT
) |
358 (chunk
->map
[run_ind
+need_pages
-map_bias
].bits
&
360 chunk
->map
[run_ind
+total_pages
-1-map_bias
].bits
=
361 (rem_pages
<< PAGE_SHIFT
) |
362 (chunk
->map
[run_ind
+total_pages
-1-map_bias
].bits
&
365 arena_avail_tree_insert(runs_avail
,
366 &chunk
->map
[run_ind
+need_pages
-map_bias
]);
369 /* Update dirty page accounting. */
370 if (flag_dirty
!= 0) {
371 chunk
->ndirty
-= need_pages
;
372 arena
->ndirty
-= need_pages
;
376 * Update the page map separately for large vs. small runs, since it is
377 * possible to avoid iteration for large mallocs.
381 if (flag_dirty
== 0) {
383 * The run is clean, so some pages may be
384 * zeroed (i.e. never before touched).
386 for (i
= 0; i
< need_pages
; i
++) {
387 if ((chunk
->map
[run_ind
+i
-map_bias
].bits
388 & CHUNK_MAP_UNZEROED
) != 0) {
389 memset((void *)((uintptr_t)
390 chunk
+ ((run_ind
+i
) <<
394 #ifdef JEMALLOC_DEBUG
396 arena_chunk_validate_zeroed(
403 * The run is dirty, so all pages must be
406 memset((void *)((uintptr_t)chunk
+ (run_ind
<<
407 PAGE_SHIFT
)), 0, (need_pages
<<
413 * Set the last element first, in case the run only contains one
414 * page (i.e. both statements set the same element).
416 chunk
->map
[run_ind
+need_pages
-1-map_bias
].bits
=
417 CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
| flag_dirty
;
418 chunk
->map
[run_ind
-map_bias
].bits
= size
| flag_dirty
|
419 CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
421 assert(zero
== false);
423 * Propagate the dirty and unzeroed flags to the allocated
424 * small run, so that arena_dalloc_bin_run() has the ability to
425 * conditionally trim clean pages.
427 chunk
->map
[run_ind
-map_bias
].bits
=
428 (chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_UNZEROED
) |
429 CHUNK_MAP_ALLOCATED
| flag_dirty
;
430 #ifdef JEMALLOC_DEBUG
432 * The first page will always be dirtied during small run
433 * initialization, so a validation failure here would not
434 * actually cause an observable failure.
436 if (flag_dirty
== 0 &&
437 (chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_UNZEROED
)
439 arena_chunk_validate_zeroed(chunk
, run_ind
);
441 for (i
= 1; i
< need_pages
- 1; i
++) {
442 chunk
->map
[run_ind
+i
-map_bias
].bits
= (i
<< PAGE_SHIFT
)
443 | (chunk
->map
[run_ind
+i
-map_bias
].bits
&
444 CHUNK_MAP_UNZEROED
) | CHUNK_MAP_ALLOCATED
;
445 #ifdef JEMALLOC_DEBUG
446 if (flag_dirty
== 0 &&
447 (chunk
->map
[run_ind
+i
-map_bias
].bits
&
448 CHUNK_MAP_UNZEROED
) == 0)
449 arena_chunk_validate_zeroed(chunk
, run_ind
+i
);
452 chunk
->map
[run_ind
+need_pages
-1-map_bias
].bits
= ((need_pages
453 - 1) << PAGE_SHIFT
) |
454 (chunk
->map
[run_ind
+need_pages
-1-map_bias
].bits
&
455 CHUNK_MAP_UNZEROED
) | CHUNK_MAP_ALLOCATED
| flag_dirty
;
456 #ifdef JEMALLOC_DEBUG
457 if (flag_dirty
== 0 &&
458 (chunk
->map
[run_ind
+need_pages
-1-map_bias
].bits
&
459 CHUNK_MAP_UNZEROED
) == 0) {
460 arena_chunk_validate_zeroed(chunk
,
461 run_ind
+need_pages
-1);
467 static arena_chunk_t
*
468 arena_chunk_alloc(arena_t
*arena
)
470 arena_chunk_t
*chunk
;
473 if (arena
->spare
!= NULL
) {
474 arena_avail_tree_t
*runs_avail
;
476 chunk
= arena
->spare
;
479 /* Insert the run into the appropriate runs_avail_* tree. */
480 if ((chunk
->map
[0].bits
& CHUNK_MAP_DIRTY
) == 0)
481 runs_avail
= &arena
->runs_avail_clean
;
483 runs_avail
= &arena
->runs_avail_dirty
;
484 assert((chunk
->map
[0].bits
& ~PAGE_MASK
) == arena_maxclass
);
485 assert((chunk
->map
[chunk_npages
-1-map_bias
].bits
& ~PAGE_MASK
)
487 assert((chunk
->map
[0].bits
& CHUNK_MAP_DIRTY
) ==
488 (chunk
->map
[chunk_npages
-1-map_bias
].bits
&
490 arena_avail_tree_insert(runs_avail
, &chunk
->map
[0]);
496 malloc_mutex_unlock(&arena
->lock
);
497 chunk
= (arena_chunk_t
*)chunk_alloc(chunksize
, false, &zero
);
498 malloc_mutex_lock(&arena
->lock
);
501 #ifdef JEMALLOC_STATS
502 arena
->stats
.mapped
+= chunksize
;
505 chunk
->arena
= arena
;
506 ql_elm_new(chunk
, link_dirty
);
507 chunk
->dirtied
= false;
510 * Claim that no pages are in use, since the header is merely
516 * Initialize the map to contain one maximal free untouched run.
517 * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
520 unzeroed
= zero
? 0 : CHUNK_MAP_UNZEROED
;
521 chunk
->map
[0].bits
= arena_maxclass
| unzeroed
;
523 * There is no need to initialize the internal page map entries
524 * unless the chunk is not zeroed.
527 for (i
= map_bias
+1; i
< chunk_npages
-1; i
++)
528 chunk
->map
[i
-map_bias
].bits
= unzeroed
;
530 #ifdef JEMALLOC_DEBUG
532 for (i
= map_bias
+1; i
< chunk_npages
-1; i
++)
533 assert(chunk
->map
[i
-map_bias
].bits
== unzeroed
);
536 chunk
->map
[chunk_npages
-1-map_bias
].bits
= arena_maxclass
|
539 /* Insert the run into the runs_avail_clean tree. */
540 arena_avail_tree_insert(&arena
->runs_avail_clean
,
548 arena_chunk_dealloc(arena_t
*arena
, arena_chunk_t
*chunk
)
550 arena_avail_tree_t
*runs_avail
;
553 * Remove run from the appropriate runs_avail_* tree, so that the arena
556 if ((chunk
->map
[0].bits
& CHUNK_MAP_DIRTY
) == 0)
557 runs_avail
= &arena
->runs_avail_clean
;
559 runs_avail
= &arena
->runs_avail_dirty
;
560 arena_avail_tree_remove(runs_avail
, &chunk
->map
[0]);
562 if (arena
->spare
!= NULL
) {
563 arena_chunk_t
*spare
= arena
->spare
;
565 arena
->spare
= chunk
;
566 if (spare
->dirtied
) {
567 ql_remove(&chunk
->arena
->chunks_dirty
, spare
,
569 arena
->ndirty
-= spare
->ndirty
;
571 malloc_mutex_unlock(&arena
->lock
);
572 chunk_dealloc((void *)spare
, chunksize
, true);
573 malloc_mutex_lock(&arena
->lock
);
574 #ifdef JEMALLOC_STATS
575 arena
->stats
.mapped
-= chunksize
;
578 arena
->spare
= chunk
;
582 arena_run_alloc(arena_t
*arena
, size_t size
, bool large
, bool zero
)
584 arena_chunk_t
*chunk
;
586 arena_chunk_map_t
*mapelm
, key
;
588 assert(size
<= arena_maxclass
);
589 assert((size
& PAGE_MASK
) == 0);
591 /* Search the arena's chunks for the lowest best fit. */
592 key
.bits
= size
| CHUNK_MAP_KEY
;
593 mapelm
= arena_avail_tree_nsearch(&arena
->runs_avail_dirty
, &key
);
594 if (mapelm
!= NULL
) {
595 arena_chunk_t
*run_chunk
= CHUNK_ADDR2BASE(mapelm
);
596 size_t pageind
= (((uintptr_t)mapelm
-
597 (uintptr_t)run_chunk
->map
) / sizeof(arena_chunk_map_t
))
600 run
= (arena_run_t
*)((uintptr_t)run_chunk
+ (pageind
<<
602 arena_run_split(arena
, run
, size
, large
, zero
);
605 mapelm
= arena_avail_tree_nsearch(&arena
->runs_avail_clean
, &key
);
606 if (mapelm
!= NULL
) {
607 arena_chunk_t
*run_chunk
= CHUNK_ADDR2BASE(mapelm
);
608 size_t pageind
= (((uintptr_t)mapelm
-
609 (uintptr_t)run_chunk
->map
) / sizeof(arena_chunk_map_t
))
612 run
= (arena_run_t
*)((uintptr_t)run_chunk
+ (pageind
<<
614 arena_run_split(arena
, run
, size
, large
, zero
);
619 * No usable runs. Create a new chunk from which to allocate the run.
621 chunk
= arena_chunk_alloc(arena
);
623 run
= (arena_run_t
*)((uintptr_t)chunk
+ (map_bias
<<
625 arena_run_split(arena
, run
, size
, large
, zero
);
630 * arena_chunk_alloc() failed, but another thread may have made
631 * sufficient memory available while this one dropped arena->lock in
632 * arena_chunk_alloc(), so search one more time.
634 mapelm
= arena_avail_tree_nsearch(&arena
->runs_avail_dirty
, &key
);
635 if (mapelm
!= NULL
) {
636 arena_chunk_t
*run_chunk
= CHUNK_ADDR2BASE(mapelm
);
637 size_t pageind
= (((uintptr_t)mapelm
-
638 (uintptr_t)run_chunk
->map
) / sizeof(arena_chunk_map_t
))
641 run
= (arena_run_t
*)((uintptr_t)run_chunk
+ (pageind
<<
643 arena_run_split(arena
, run
, size
, large
, zero
);
646 mapelm
= arena_avail_tree_nsearch(&arena
->runs_avail_clean
, &key
);
647 if (mapelm
!= NULL
) {
648 arena_chunk_t
*run_chunk
= CHUNK_ADDR2BASE(mapelm
);
649 size_t pageind
= (((uintptr_t)mapelm
-
650 (uintptr_t)run_chunk
->map
) / sizeof(arena_chunk_map_t
))
653 run
= (arena_run_t
*)((uintptr_t)run_chunk
+ (pageind
<<
655 arena_run_split(arena
, run
, size
, large
, zero
);
663 arena_maybe_purge(arena_t
*arena
)
666 /* Enforce opt_lg_dirty_mult. */
667 if (opt_lg_dirty_mult
>= 0 && arena
->ndirty
> arena
->npurgatory
&&
668 (arena
->ndirty
- arena
->npurgatory
) > chunk_npages
&&
669 (arena
->nactive
>> opt_lg_dirty_mult
) < (arena
->ndirty
-
671 arena_purge(arena
, false);
675 arena_chunk_purge(arena_t
*arena
, arena_chunk_t
*chunk
)
677 ql_head(arena_chunk_map_t
) mapelms
;
678 arena_chunk_map_t
*mapelm
;
679 size_t pageind
, flag_unzeroed
;
680 #ifdef JEMALLOC_DEBUG
683 #ifdef JEMALLOC_STATS
690 #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
692 * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
693 * mappings, but not for file-backed mappings.
695 # ifdef JEMALLOC_SWAP
696 swap_enabled
? CHUNK_MAP_UNZEROED
:
704 * If chunk is the spare, temporarily re-allocate it, 1) so that its
705 * run is reinserted into runs_avail_dirty, and 2) so that it cannot be
706 * completely discarded by another thread while arena->lock is dropped
707 * by this thread. Note that the arena_run_dalloc() call will
708 * implicitly deallocate the chunk, so no explicit action is required
709 * in this function to deallocate the chunk.
711 * Note that once a chunk contains dirty pages, it cannot again contain
712 * a single run unless 1) it is a dirty run, or 2) this function purges
713 * dirty pages and causes the transition to a single clean run. Thus
714 * (chunk == arena->spare) is possible, but it is not possible for
715 * this function to be called on the spare unless it contains a dirty
718 if (chunk
== arena
->spare
) {
719 assert((chunk
->map
[0].bits
& CHUNK_MAP_DIRTY
) != 0);
720 arena_chunk_alloc(arena
);
723 /* Temporarily allocate all free dirty runs within chunk. */
724 for (pageind
= map_bias
; pageind
< chunk_npages
;) {
725 mapelm
= &chunk
->map
[pageind
-map_bias
];
726 if ((mapelm
->bits
& CHUNK_MAP_ALLOCATED
) == 0) {
729 npages
= mapelm
->bits
>> PAGE_SHIFT
;
730 assert(pageind
+ npages
<= chunk_npages
);
731 if (mapelm
->bits
& CHUNK_MAP_DIRTY
) {
733 #ifdef JEMALLOC_STATS
737 arena_avail_tree_remove(
738 &arena
->runs_avail_dirty
, mapelm
);
740 mapelm
->bits
= (npages
<< PAGE_SHIFT
) |
741 flag_unzeroed
| CHUNK_MAP_LARGE
|
744 * Update internal elements in the page map, so
745 * that CHUNK_MAP_UNZEROED is properly set.
747 for (i
= 1; i
< npages
- 1; i
++) {
748 chunk
->map
[pageind
+i
-map_bias
].bits
=
753 pageind
+npages
-1-map_bias
].bits
=
754 flag_unzeroed
| CHUNK_MAP_LARGE
|
758 #ifdef JEMALLOC_STATS
760 * Update stats_cactive if nactive is crossing a
763 cactive_diff
= CHUNK_CEILING((arena
->nactive
+
764 npages
) << PAGE_SHIFT
) -
765 CHUNK_CEILING(arena
->nactive
<< PAGE_SHIFT
);
766 if (cactive_diff
!= 0)
767 stats_cactive_add(cactive_diff
);
769 arena
->nactive
+= npages
;
770 /* Append to list for later processing. */
771 ql_elm_new(mapelm
, u
.ql_link
);
772 ql_tail_insert(&mapelms
, mapelm
, u
.ql_link
);
777 /* Skip allocated run. */
778 if (mapelm
->bits
& CHUNK_MAP_LARGE
)
779 pageind
+= mapelm
->bits
>> PAGE_SHIFT
;
781 arena_run_t
*run
= (arena_run_t
*)((uintptr_t)
782 chunk
+ (uintptr_t)(pageind
<< PAGE_SHIFT
));
784 assert((mapelm
->bits
>> PAGE_SHIFT
) == 0);
785 dassert(run
->magic
== ARENA_RUN_MAGIC
);
786 size_t binind
= arena_bin_index(arena
,
788 arena_bin_info_t
*bin_info
=
789 &arena_bin_info
[binind
];
790 pageind
+= bin_info
->run_size
>> PAGE_SHIFT
;
794 assert(pageind
== chunk_npages
);
796 #ifdef JEMALLOC_DEBUG
797 ndirty
= chunk
->ndirty
;
799 #ifdef JEMALLOC_STATS
800 arena
->stats
.purged
+= chunk
->ndirty
;
802 arena
->ndirty
-= chunk
->ndirty
;
804 ql_remove(&arena
->chunks_dirty
, chunk
, link_dirty
);
805 chunk
->dirtied
= false;
807 malloc_mutex_unlock(&arena
->lock
);
808 #ifdef JEMALLOC_STATS
811 ql_foreach(mapelm
, &mapelms
, u
.ql_link
) {
812 size_t pageind
= (((uintptr_t)mapelm
- (uintptr_t)chunk
->map
) /
813 sizeof(arena_chunk_map_t
)) + map_bias
;
814 size_t npages
= mapelm
->bits
>> PAGE_SHIFT
;
816 assert(pageind
+ npages
<= chunk_npages
);
817 #ifdef JEMALLOC_DEBUG
818 assert(ndirty
>= npages
);
822 #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
823 madvise((void *)((uintptr_t)chunk
+ (pageind
<< PAGE_SHIFT
)),
824 (npages
<< PAGE_SHIFT
), MADV_DONTNEED
);
825 #elif defined(JEMALLOC_PURGE_MADVISE_FREE)
826 madvise((void *)((uintptr_t)chunk
+ (pageind
<< PAGE_SHIFT
)),
827 (npages
<< PAGE_SHIFT
), MADV_FREE
);
829 # error "No method defined for purging unused dirty pages."
832 #ifdef JEMALLOC_STATS
836 #ifdef JEMALLOC_DEBUG
839 malloc_mutex_lock(&arena
->lock
);
840 #ifdef JEMALLOC_STATS
841 arena
->stats
.nmadvise
+= nmadvise
;
844 /* Deallocate runs. */
845 for (mapelm
= ql_first(&mapelms
); mapelm
!= NULL
;
846 mapelm
= ql_first(&mapelms
)) {
847 size_t pageind
= (((uintptr_t)mapelm
- (uintptr_t)chunk
->map
) /
848 sizeof(arena_chunk_map_t
)) + map_bias
;
849 arena_run_t
*run
= (arena_run_t
*)((uintptr_t)chunk
+
850 (uintptr_t)(pageind
<< PAGE_SHIFT
));
852 ql_remove(&mapelms
, mapelm
, u
.ql_link
);
853 arena_run_dalloc(arena
, run
, false);
858 arena_purge(arena_t
*arena
, bool all
)
860 arena_chunk_t
*chunk
;
862 #ifdef JEMALLOC_DEBUG
865 ql_foreach(chunk
, &arena
->chunks_dirty
, link_dirty
) {
866 assert(chunk
->dirtied
);
867 ndirty
+= chunk
->ndirty
;
869 assert(ndirty
== arena
->ndirty
);
871 assert(arena
->ndirty
> arena
->npurgatory
|| all
);
872 assert(arena
->ndirty
- arena
->npurgatory
> chunk_npages
|| all
);
873 assert((arena
->nactive
>> opt_lg_dirty_mult
) < (arena
->ndirty
-
874 arena
->npurgatory
) || all
);
876 #ifdef JEMALLOC_STATS
877 arena
->stats
.npurge
++;
881 * Compute the minimum number of pages that this thread should try to
882 * purge, and add the result to arena->npurgatory. This will keep
883 * multiple threads from racing to reduce ndirty below the threshold.
885 npurgatory
= arena
->ndirty
- arena
->npurgatory
;
887 assert(npurgatory
>= arena
->nactive
>> opt_lg_dirty_mult
);
888 npurgatory
-= arena
->nactive
>> opt_lg_dirty_mult
;
890 arena
->npurgatory
+= npurgatory
;
892 while (npurgatory
> 0) {
893 /* Get next chunk with dirty pages. */
894 chunk
= ql_first(&arena
->chunks_dirty
);
897 * This thread was unable to purge as many pages as
898 * originally intended, due to races with other threads
899 * that either did some of the purging work, or re-used
902 arena
->npurgatory
-= npurgatory
;
905 while (chunk
->ndirty
== 0) {
906 ql_remove(&arena
->chunks_dirty
, chunk
, link_dirty
);
907 chunk
->dirtied
= false;
908 chunk
= ql_first(&arena
->chunks_dirty
);
910 /* Same logic as for above. */
911 arena
->npurgatory
-= npurgatory
;
916 if (chunk
->ndirty
> npurgatory
) {
918 * This thread will, at a minimum, purge all the dirty
919 * pages in chunk, so set npurgatory to reflect this
920 * thread's commitment to purge the pages. This tends
921 * to reduce the chances of the following scenario:
923 * 1) This thread sets arena->npurgatory such that
924 * (arena->ndirty - arena->npurgatory) is at the
926 * 2) This thread drops arena->lock.
927 * 3) Another thread causes one or more pages to be
928 * dirtied, and immediately determines that it must
931 * If this scenario *does* play out, that's okay,
932 * because all of the purging work being done really
935 arena
->npurgatory
+= chunk
->ndirty
- npurgatory
;
936 npurgatory
= chunk
->ndirty
;
939 arena
->npurgatory
-= chunk
->ndirty
;
940 npurgatory
-= chunk
->ndirty
;
941 arena_chunk_purge(arena
, chunk
);
946 arena_purge_all(arena_t
*arena
)
949 malloc_mutex_lock(&arena
->lock
);
950 arena_purge(arena
, true);
951 malloc_mutex_unlock(&arena
->lock
);
955 arena_run_dalloc(arena_t
*arena
, arena_run_t
*run
, bool dirty
)
957 arena_chunk_t
*chunk
;
958 size_t size
, run_ind
, run_pages
, flag_dirty
;
959 arena_avail_tree_t
*runs_avail
;
960 #ifdef JEMALLOC_STATS
964 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
965 run_ind
= (size_t)(((uintptr_t)run
- (uintptr_t)chunk
)
967 assert(run_ind
>= map_bias
);
968 assert(run_ind
< chunk_npages
);
969 if ((chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_LARGE
) != 0) {
970 size
= chunk
->map
[run_ind
-map_bias
].bits
& ~PAGE_MASK
;
971 assert(size
== PAGE_SIZE
||
972 (chunk
->map
[run_ind
+(size
>>PAGE_SHIFT
)-1-map_bias
].bits
&
974 assert((chunk
->map
[run_ind
+(size
>>PAGE_SHIFT
)-1-map_bias
].bits
&
975 CHUNK_MAP_LARGE
) != 0);
976 assert((chunk
->map
[run_ind
+(size
>>PAGE_SHIFT
)-1-map_bias
].bits
&
977 CHUNK_MAP_ALLOCATED
) != 0);
979 size_t binind
= arena_bin_index(arena
, run
->bin
);
980 arena_bin_info_t
*bin_info
= &arena_bin_info
[binind
];
981 size
= bin_info
->run_size
;
983 run_pages
= (size
>> PAGE_SHIFT
);
984 #ifdef JEMALLOC_STATS
985 /* Update stats_cactive if nactive is crossing a chunk multiple. */
986 cactive_diff
= CHUNK_CEILING(arena
->nactive
<< PAGE_SHIFT
) -
987 CHUNK_CEILING((arena
->nactive
- run_pages
) << PAGE_SHIFT
);
988 if (cactive_diff
!= 0)
989 stats_cactive_sub(cactive_diff
);
991 arena
->nactive
-= run_pages
;
994 * The run is dirty if the caller claims to have dirtied it, as well as
995 * if it was already dirty before being allocated.
997 if ((chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_DIRTY
) != 0)
999 flag_dirty
= dirty
? CHUNK_MAP_DIRTY
: 0;
1000 runs_avail
= dirty
? &arena
->runs_avail_dirty
:
1001 &arena
->runs_avail_clean
;
1003 /* Mark pages as unallocated in the chunk map. */
1005 chunk
->map
[run_ind
-map_bias
].bits
= size
| CHUNK_MAP_DIRTY
;
1006 chunk
->map
[run_ind
+run_pages
-1-map_bias
].bits
= size
|
1009 chunk
->ndirty
+= run_pages
;
1010 arena
->ndirty
+= run_pages
;
1012 chunk
->map
[run_ind
-map_bias
].bits
= size
|
1013 (chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_UNZEROED
);
1014 chunk
->map
[run_ind
+run_pages
-1-map_bias
].bits
= size
|
1015 (chunk
->map
[run_ind
+run_pages
-1-map_bias
].bits
&
1016 CHUNK_MAP_UNZEROED
);
1019 /* Try to coalesce forward. */
1020 if (run_ind
+ run_pages
< chunk_npages
&&
1021 (chunk
->map
[run_ind
+run_pages
-map_bias
].bits
& CHUNK_MAP_ALLOCATED
)
1022 == 0 && (chunk
->map
[run_ind
+run_pages
-map_bias
].bits
&
1023 CHUNK_MAP_DIRTY
) == flag_dirty
) {
1024 size_t nrun_size
= chunk
->map
[run_ind
+run_pages
-map_bias
].bits
&
1026 size_t nrun_pages
= nrun_size
>> PAGE_SHIFT
;
1029 * Remove successor from runs_avail; the coalesced run is
1032 assert((chunk
->map
[run_ind
+run_pages
+nrun_pages
-1-map_bias
].bits
1033 & ~PAGE_MASK
) == nrun_size
);
1034 assert((chunk
->map
[run_ind
+run_pages
+nrun_pages
-1-map_bias
].bits
1035 & CHUNK_MAP_ALLOCATED
) == 0);
1036 assert((chunk
->map
[run_ind
+run_pages
+nrun_pages
-1-map_bias
].bits
1037 & CHUNK_MAP_DIRTY
) == flag_dirty
);
1038 arena_avail_tree_remove(runs_avail
,
1039 &chunk
->map
[run_ind
+run_pages
-map_bias
]);
1042 run_pages
+= nrun_pages
;
1044 chunk
->map
[run_ind
-map_bias
].bits
= size
|
1045 (chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_FLAGS_MASK
);
1046 chunk
->map
[run_ind
+run_pages
-1-map_bias
].bits
= size
|
1047 (chunk
->map
[run_ind
+run_pages
-1-map_bias
].bits
&
1048 CHUNK_MAP_FLAGS_MASK
);
1051 /* Try to coalesce backward. */
1052 if (run_ind
> map_bias
&& (chunk
->map
[run_ind
-1-map_bias
].bits
&
1053 CHUNK_MAP_ALLOCATED
) == 0 && (chunk
->map
[run_ind
-1-map_bias
].bits
&
1054 CHUNK_MAP_DIRTY
) == flag_dirty
) {
1055 size_t prun_size
= chunk
->map
[run_ind
-1-map_bias
].bits
&
1057 size_t prun_pages
= prun_size
>> PAGE_SHIFT
;
1059 run_ind
-= prun_pages
;
1062 * Remove predecessor from runs_avail; the coalesced run is
1065 assert((chunk
->map
[run_ind
-map_bias
].bits
& ~PAGE_MASK
)
1067 assert((chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_ALLOCATED
)
1069 assert((chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_DIRTY
)
1071 arena_avail_tree_remove(runs_avail
,
1072 &chunk
->map
[run_ind
-map_bias
]);
1075 run_pages
+= prun_pages
;
1077 chunk
->map
[run_ind
-map_bias
].bits
= size
|
1078 (chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_FLAGS_MASK
);
1079 chunk
->map
[run_ind
+run_pages
-1-map_bias
].bits
= size
|
1080 (chunk
->map
[run_ind
+run_pages
-1-map_bias
].bits
&
1081 CHUNK_MAP_FLAGS_MASK
);
1084 /* Insert into runs_avail, now that coalescing is complete. */
1085 assert((chunk
->map
[run_ind
-map_bias
].bits
& ~PAGE_MASK
) ==
1086 (chunk
->map
[run_ind
+run_pages
-1-map_bias
].bits
& ~PAGE_MASK
));
1087 assert((chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_DIRTY
) ==
1088 (chunk
->map
[run_ind
+run_pages
-1-map_bias
].bits
& CHUNK_MAP_DIRTY
));
1089 arena_avail_tree_insert(runs_avail
, &chunk
->map
[run_ind
-map_bias
]);
1093 * Insert into chunks_dirty before potentially calling
1094 * arena_chunk_dealloc(), so that chunks_dirty and
1095 * arena->ndirty are consistent.
1097 if (chunk
->dirtied
== false) {
1098 ql_tail_insert(&arena
->chunks_dirty
, chunk
, link_dirty
);
1099 chunk
->dirtied
= true;
1104 * Deallocate chunk if it is now completely unused. The bit
1105 * manipulation checks whether the first run is unallocated and extends
1106 * to the end of the chunk.
1108 if ((chunk
->map
[0].bits
& (~PAGE_MASK
| CHUNK_MAP_ALLOCATED
)) ==
1110 arena_chunk_dealloc(arena
, chunk
);
1113 * It is okay to do dirty page processing here even if the chunk was
1114 * deallocated above, since in that case it is the spare. Waiting
1115 * until after possible chunk deallocation to do dirty processing
1116 * allows for an old spare to be fully deallocated, thus decreasing the
1117 * chances of spuriously crossing the dirty page purging threshold.
1120 arena_maybe_purge(arena
);
1124 arena_run_trim_head(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
1125 size_t oldsize
, size_t newsize
)
1127 size_t pageind
= ((uintptr_t)run
- (uintptr_t)chunk
) >> PAGE_SHIFT
;
1128 size_t head_npages
= (oldsize
- newsize
) >> PAGE_SHIFT
;
1129 size_t flag_dirty
= chunk
->map
[pageind
-map_bias
].bits
& CHUNK_MAP_DIRTY
;
1131 assert(oldsize
> newsize
);
1134 * Update the chunk map so that arena_run_dalloc() can treat the
1135 * leading run as separately allocated. Set the last element of each
1136 * run first, in case of single-page runs.
1138 assert((chunk
->map
[pageind
-map_bias
].bits
& CHUNK_MAP_LARGE
) != 0);
1139 assert((chunk
->map
[pageind
-map_bias
].bits
& CHUNK_MAP_ALLOCATED
) != 0);
1140 chunk
->map
[pageind
+head_npages
-1-map_bias
].bits
= flag_dirty
|
1141 (chunk
->map
[pageind
+head_npages
-1-map_bias
].bits
&
1142 CHUNK_MAP_UNZEROED
) | CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
1143 chunk
->map
[pageind
-map_bias
].bits
= (oldsize
- newsize
)
1144 | flag_dirty
| (chunk
->map
[pageind
-map_bias
].bits
&
1145 CHUNK_MAP_UNZEROED
) | CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
1147 #ifdef JEMALLOC_DEBUG
1149 size_t tail_npages
= newsize
>> PAGE_SHIFT
;
1150 assert((chunk
->map
[pageind
+head_npages
+tail_npages
-1-map_bias
]
1151 .bits
& ~PAGE_MASK
) == 0);
1152 assert((chunk
->map
[pageind
+head_npages
+tail_npages
-1-map_bias
]
1153 .bits
& CHUNK_MAP_DIRTY
) == flag_dirty
);
1154 assert((chunk
->map
[pageind
+head_npages
+tail_npages
-1-map_bias
]
1155 .bits
& CHUNK_MAP_LARGE
) != 0);
1156 assert((chunk
->map
[pageind
+head_npages
+tail_npages
-1-map_bias
]
1157 .bits
& CHUNK_MAP_ALLOCATED
) != 0);
1160 chunk
->map
[pageind
+head_npages
-map_bias
].bits
= newsize
| flag_dirty
|
1161 (chunk
->map
[pageind
+head_npages
-map_bias
].bits
&
1162 CHUNK_MAP_FLAGS_MASK
) | CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
1164 arena_run_dalloc(arena
, run
, false);
1168 arena_run_trim_tail(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
1169 size_t oldsize
, size_t newsize
, bool dirty
)
1171 size_t pageind
= ((uintptr_t)run
- (uintptr_t)chunk
) >> PAGE_SHIFT
;
1172 size_t head_npages
= newsize
>> PAGE_SHIFT
;
1173 size_t tail_npages
= (oldsize
- newsize
) >> PAGE_SHIFT
;
1174 size_t flag_dirty
= chunk
->map
[pageind
-map_bias
].bits
&
1177 assert(oldsize
> newsize
);
1180 * Update the chunk map so that arena_run_dalloc() can treat the
1181 * trailing run as separately allocated. Set the last element of each
1182 * run first, in case of single-page runs.
1184 assert((chunk
->map
[pageind
-map_bias
].bits
& CHUNK_MAP_LARGE
) != 0);
1185 assert((chunk
->map
[pageind
-map_bias
].bits
& CHUNK_MAP_ALLOCATED
) != 0);
1186 chunk
->map
[pageind
+head_npages
-1-map_bias
].bits
= flag_dirty
|
1187 (chunk
->map
[pageind
+head_npages
-1-map_bias
].bits
&
1188 CHUNK_MAP_UNZEROED
) | CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
1189 chunk
->map
[pageind
-map_bias
].bits
= newsize
| flag_dirty
|
1190 (chunk
->map
[pageind
-map_bias
].bits
& CHUNK_MAP_UNZEROED
) |
1191 CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
1193 assert((chunk
->map
[pageind
+head_npages
+tail_npages
-1-map_bias
].bits
&
1195 assert((chunk
->map
[pageind
+head_npages
+tail_npages
-1-map_bias
].bits
&
1196 CHUNK_MAP_LARGE
) != 0);
1197 assert((chunk
->map
[pageind
+head_npages
+tail_npages
-1-map_bias
].bits
&
1198 CHUNK_MAP_ALLOCATED
) != 0);
1199 chunk
->map
[pageind
+head_npages
+tail_npages
-1-map_bias
].bits
=
1201 (chunk
->map
[pageind
+head_npages
+tail_npages
-1-map_bias
].bits
&
1202 CHUNK_MAP_UNZEROED
) | CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
1203 chunk
->map
[pageind
+head_npages
-map_bias
].bits
= (oldsize
- newsize
) |
1204 flag_dirty
| (chunk
->map
[pageind
+head_npages
-map_bias
].bits
&
1205 CHUNK_MAP_UNZEROED
) | CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
1207 arena_run_dalloc(arena
, (arena_run_t
*)((uintptr_t)run
+ newsize
),
1211 static arena_run_t
*
1212 arena_bin_nonfull_run_get(arena_t
*arena
, arena_bin_t
*bin
)
1214 arena_chunk_map_t
*mapelm
;
1217 arena_bin_info_t
*bin_info
;
1219 /* Look for a usable run. */
1220 mapelm
= arena_run_tree_first(&bin
->runs
);
1221 if (mapelm
!= NULL
) {
1222 arena_chunk_t
*chunk
;
1225 /* run is guaranteed to have available space. */
1226 arena_run_tree_remove(&bin
->runs
, mapelm
);
1228 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(mapelm
);
1229 pageind
= ((((uintptr_t)mapelm
- (uintptr_t)chunk
->map
) /
1230 sizeof(arena_chunk_map_t
))) + map_bias
;
1231 run
= (arena_run_t
*)((uintptr_t)chunk
+ (uintptr_t)((pageind
-
1232 (mapelm
->bits
>> PAGE_SHIFT
))
1234 #ifdef JEMALLOC_STATS
1235 bin
->stats
.reruns
++;
1239 /* No existing runs have any space available. */
1241 binind
= arena_bin_index(arena
, bin
);
1242 bin_info
= &arena_bin_info
[binind
];
1244 /* Allocate a new run. */
1245 malloc_mutex_unlock(&bin
->lock
);
1246 /******************************/
1247 malloc_mutex_lock(&arena
->lock
);
1248 run
= arena_run_alloc(arena
, bin_info
->run_size
, false, false);
1250 bitmap_t
*bitmap
= (bitmap_t
*)((uintptr_t)run
+
1251 (uintptr_t)bin_info
->bitmap_offset
);
1253 /* Initialize run internals. */
1256 run
->nfree
= bin_info
->nregs
;
1257 bitmap_init(bitmap
, &bin_info
->bitmap_info
);
1258 #ifdef JEMALLOC_DEBUG
1259 run
->magic
= ARENA_RUN_MAGIC
;
1262 malloc_mutex_unlock(&arena
->lock
);
1263 /********************************/
1264 malloc_mutex_lock(&bin
->lock
);
1266 #ifdef JEMALLOC_STATS
1268 bin
->stats
.curruns
++;
1269 if (bin
->stats
.curruns
> bin
->stats
.highruns
)
1270 bin
->stats
.highruns
= bin
->stats
.curruns
;
1276 * arena_run_alloc() failed, but another thread may have made
1277 * sufficient memory available while this one dropped bin->lock above,
1278 * so search one more time.
1280 mapelm
= arena_run_tree_first(&bin
->runs
);
1281 if (mapelm
!= NULL
) {
1282 arena_chunk_t
*chunk
;
1285 /* run is guaranteed to have available space. */
1286 arena_run_tree_remove(&bin
->runs
, mapelm
);
1288 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(mapelm
);
1289 pageind
= ((((uintptr_t)mapelm
- (uintptr_t)chunk
->map
) /
1290 sizeof(arena_chunk_map_t
))) + map_bias
;
1291 run
= (arena_run_t
*)((uintptr_t)chunk
+ (uintptr_t)((pageind
-
1292 (mapelm
->bits
>> PAGE_SHIFT
))
1294 #ifdef JEMALLOC_STATS
1295 bin
->stats
.reruns
++;
1303 /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
1305 arena_bin_malloc_hard(arena_t
*arena
, arena_bin_t
*bin
)
1309 arena_bin_info_t
*bin_info
;
1312 binind
= arena_bin_index(arena
, bin
);
1313 bin_info
= &arena_bin_info
[binind
];
1315 run
= arena_bin_nonfull_run_get(arena
, bin
);
1316 if (bin
->runcur
!= NULL
&& bin
->runcur
->nfree
> 0) {
1318 * Another thread updated runcur while this one ran without the
1319 * bin lock in arena_bin_nonfull_run_get().
1321 dassert(bin
->runcur
->magic
== ARENA_RUN_MAGIC
);
1322 assert(bin
->runcur
->nfree
> 0);
1323 ret
= arena_run_reg_alloc(bin
->runcur
, bin_info
);
1325 arena_chunk_t
*chunk
;
1328 * arena_run_alloc() may have allocated run, or it may
1329 * have pulled run from the bin's run tree. Therefore
1330 * it is unsafe to make any assumptions about how run
1331 * has previously been used, and arena_bin_lower_run()
1332 * must be called, as if a region were just deallocated
1335 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
1336 if (run
->nfree
== bin_info
->nregs
)
1337 arena_dalloc_bin_run(arena
, chunk
, run
, bin
);
1339 arena_bin_lower_run(arena
, chunk
, run
, bin
);
1349 dassert(bin
->runcur
->magic
== ARENA_RUN_MAGIC
);
1350 assert(bin
->runcur
->nfree
> 0);
1352 return (arena_run_reg_alloc(bin
->runcur
, bin_info
));
1355 #ifdef JEMALLOC_PROF
1357 arena_prof_accum(arena_t
*arena
, uint64_t accumbytes
)
1360 if (prof_interval
!= 0) {
1361 arena
->prof_accumbytes
+= accumbytes
;
1362 if (arena
->prof_accumbytes
>= prof_interval
) {
1364 arena
->prof_accumbytes
-= prof_interval
;
1370 #ifdef JEMALLOC_TCACHE
1372 arena_tcache_fill_small(arena_t
*arena
, tcache_bin_t
*tbin
, size_t binind
1373 # ifdef JEMALLOC_PROF
1374 , uint64_t prof_accumbytes
1383 assert(tbin
->ncached
== 0);
1385 #ifdef JEMALLOC_PROF
1386 malloc_mutex_lock(&arena
->lock
);
1387 arena_prof_accum(arena
, prof_accumbytes
);
1388 malloc_mutex_unlock(&arena
->lock
);
1390 bin
= &arena
->bins
[binind
];
1391 malloc_mutex_lock(&bin
->lock
);
1392 for (i
= 0, nfill
= (tcache_bin_info
[binind
].ncached_max
>>
1393 tbin
->lg_fill_div
); i
< nfill
; i
++) {
1394 if ((run
= bin
->runcur
) != NULL
&& run
->nfree
> 0)
1395 ptr
= arena_run_reg_alloc(run
, &arena_bin_info
[binind
]);
1397 ptr
= arena_bin_malloc_hard(arena
, bin
);
1400 /* Insert such that low regions get used first. */
1401 tbin
->avail
[nfill
- 1 - i
] = ptr
;
1403 #ifdef JEMALLOC_STATS
1404 bin
->stats
.allocated
+= i
* arena_bin_info
[binind
].reg_size
;
1405 bin
->stats
.nmalloc
+= i
;
1406 bin
->stats
.nrequests
+= tbin
->tstats
.nrequests
;
1407 bin
->stats
.nfills
++;
1408 tbin
->tstats
.nrequests
= 0;
1410 malloc_mutex_unlock(&bin
->lock
);
1416 arena_malloc_small(arena_t
*arena
, size_t size
, bool zero
)
1423 binind
= SMALL_SIZE2BIN(size
);
1424 assert(binind
< nbins
);
1425 bin
= &arena
->bins
[binind
];
1426 size
= arena_bin_info
[binind
].reg_size
;
1428 malloc_mutex_lock(&bin
->lock
);
1429 if ((run
= bin
->runcur
) != NULL
&& run
->nfree
> 0)
1430 ret
= arena_run_reg_alloc(run
, &arena_bin_info
[binind
]);
1432 ret
= arena_bin_malloc_hard(arena
, bin
);
1435 malloc_mutex_unlock(&bin
->lock
);
1439 #ifdef JEMALLOC_STATS
1440 bin
->stats
.allocated
+= size
;
1441 bin
->stats
.nmalloc
++;
1442 bin
->stats
.nrequests
++;
1444 malloc_mutex_unlock(&bin
->lock
);
1445 #ifdef JEMALLOC_PROF
1446 if (isthreaded
== false) {
1447 malloc_mutex_lock(&arena
->lock
);
1448 arena_prof_accum(arena
, size
);
1449 malloc_mutex_unlock(&arena
->lock
);
1453 if (zero
== false) {
1454 #ifdef JEMALLOC_FILL
1456 memset(ret
, 0xa5, size
);
1458 memset(ret
, 0, size
);
1461 memset(ret
, 0, size
);
1467 arena_malloc_large(arena_t
*arena
, size_t size
, bool zero
)
1471 /* Large allocation. */
1472 size
= PAGE_CEILING(size
);
1473 malloc_mutex_lock(&arena
->lock
);
1474 ret
= (void *)arena_run_alloc(arena
, size
, true, zero
);
1476 malloc_mutex_unlock(&arena
->lock
);
1479 #ifdef JEMALLOC_STATS
1480 arena
->stats
.nmalloc_large
++;
1481 arena
->stats
.nrequests_large
++;
1482 arena
->stats
.allocated_large
+= size
;
1483 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].nmalloc
++;
1484 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].nrequests
++;
1485 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
++;
1486 if (arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
>
1487 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].highruns
) {
1488 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].highruns
=
1489 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
;
1492 #ifdef JEMALLOC_PROF
1493 arena_prof_accum(arena
, size
);
1495 malloc_mutex_unlock(&arena
->lock
);
1497 if (zero
== false) {
1498 #ifdef JEMALLOC_FILL
1500 memset(ret
, 0xa5, size
);
1502 memset(ret
, 0, size
);
1510 arena_malloc(size_t size
, bool zero
)
1514 assert(QUANTUM_CEILING(size
) <= arena_maxclass
);
1516 if (size
<= small_maxclass
) {
1517 #ifdef JEMALLOC_TCACHE
1520 if ((tcache
= tcache_get()) != NULL
)
1521 return (tcache_alloc_small(tcache
, size
, zero
));
1525 return (arena_malloc_small(choose_arena(), size
, zero
));
1527 #ifdef JEMALLOC_TCACHE
1528 if (size
<= tcache_maxclass
) {
1531 if ((tcache
= tcache_get()) != NULL
)
1532 return (tcache_alloc_large(tcache
, size
, zero
));
1534 return (arena_malloc_large(choose_arena(),
1539 return (arena_malloc_large(choose_arena(), size
, zero
));
1543 /* Only handles large allocations that require more than page alignment. */
1545 arena_palloc(arena_t
*arena
, size_t size
, size_t alloc_size
, size_t alignment
,
1550 arena_chunk_t
*chunk
;
1552 assert((size
& PAGE_MASK
) == 0);
1554 alignment
= PAGE_CEILING(alignment
);
1556 malloc_mutex_lock(&arena
->lock
);
1557 ret
= (void *)arena_run_alloc(arena
, alloc_size
, true, zero
);
1559 malloc_mutex_unlock(&arena
->lock
);
1563 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ret
);
1565 offset
= (uintptr_t)ret
& (alignment
- 1);
1566 assert((offset
& PAGE_MASK
) == 0);
1567 assert(offset
< alloc_size
);
1569 arena_run_trim_tail(arena
, chunk
, ret
, alloc_size
, size
, false);
1571 size_t leadsize
, trailsize
;
1573 leadsize
= alignment
- offset
;
1575 arena_run_trim_head(arena
, chunk
, ret
, alloc_size
,
1576 alloc_size
- leadsize
);
1577 ret
= (void *)((uintptr_t)ret
+ leadsize
);
1580 trailsize
= alloc_size
- leadsize
- size
;
1581 if (trailsize
!= 0) {
1582 /* Trim trailing space. */
1583 assert(trailsize
< alloc_size
);
1584 arena_run_trim_tail(arena
, chunk
, ret
, size
+ trailsize
,
1589 #ifdef JEMALLOC_STATS
1590 arena
->stats
.nmalloc_large
++;
1591 arena
->stats
.nrequests_large
++;
1592 arena
->stats
.allocated_large
+= size
;
1593 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].nmalloc
++;
1594 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].nrequests
++;
1595 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
++;
1596 if (arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
>
1597 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].highruns
) {
1598 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].highruns
=
1599 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
;
1602 malloc_mutex_unlock(&arena
->lock
);
1604 #ifdef JEMALLOC_FILL
1605 if (zero
== false) {
1607 memset(ret
, 0xa5, size
);
1609 memset(ret
, 0, size
);
1615 /* Return the size of the allocation pointed to by ptr. */
1617 arena_salloc(const void *ptr
)
1620 arena_chunk_t
*chunk
;
1621 size_t pageind
, mapbits
;
1623 assert(ptr
!= NULL
);
1624 assert(CHUNK_ADDR2BASE(ptr
) != ptr
);
1626 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
1627 pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> PAGE_SHIFT
;
1628 mapbits
= chunk
->map
[pageind
-map_bias
].bits
;
1629 assert((mapbits
& CHUNK_MAP_ALLOCATED
) != 0);
1630 if ((mapbits
& CHUNK_MAP_LARGE
) == 0) {
1631 arena_run_t
*run
= (arena_run_t
*)((uintptr_t)chunk
+
1632 (uintptr_t)((pageind
- (mapbits
>> PAGE_SHIFT
)) <<
1634 dassert(run
->magic
== ARENA_RUN_MAGIC
);
1635 size_t binind
= arena_bin_index(chunk
->arena
, run
->bin
);
1636 arena_bin_info_t
*bin_info
= &arena_bin_info
[binind
];
1637 assert(((uintptr_t)ptr
- ((uintptr_t)run
+
1638 (uintptr_t)bin_info
->reg0_offset
)) % bin_info
->reg_size
==
1640 ret
= bin_info
->reg_size
;
1642 assert(((uintptr_t)ptr
& PAGE_MASK
) == 0);
1643 ret
= mapbits
& ~PAGE_MASK
;
1650 #ifdef JEMALLOC_PROF
1652 arena_prof_promoted(const void *ptr
, size_t size
)
1654 arena_chunk_t
*chunk
;
1655 size_t pageind
, binind
;
1657 assert(ptr
!= NULL
);
1658 assert(CHUNK_ADDR2BASE(ptr
) != ptr
);
1659 assert(isalloc(ptr
) == PAGE_SIZE
);
1660 assert(size
<= small_maxclass
);
1662 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
1663 pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> PAGE_SHIFT
;
1664 binind
= SMALL_SIZE2BIN(size
);
1665 assert(binind
< nbins
);
1666 chunk
->map
[pageind
-map_bias
].bits
= (chunk
->map
[pageind
-map_bias
].bits
&
1667 ~CHUNK_MAP_CLASS_MASK
) | ((binind
+1) << CHUNK_MAP_CLASS_SHIFT
);
1671 arena_salloc_demote(const void *ptr
)
1674 arena_chunk_t
*chunk
;
1675 size_t pageind
, mapbits
;
1677 assert(ptr
!= NULL
);
1678 assert(CHUNK_ADDR2BASE(ptr
) != ptr
);
1680 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
1681 pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> PAGE_SHIFT
;
1682 mapbits
= chunk
->map
[pageind
-map_bias
].bits
;
1683 assert((mapbits
& CHUNK_MAP_ALLOCATED
) != 0);
1684 if ((mapbits
& CHUNK_MAP_LARGE
) == 0) {
1685 arena_run_t
*run
= (arena_run_t
*)((uintptr_t)chunk
+
1686 (uintptr_t)((pageind
- (mapbits
>> PAGE_SHIFT
)) <<
1688 dassert(run
->magic
== ARENA_RUN_MAGIC
);
1689 size_t binind
= arena_bin_index(chunk
->arena
, run
->bin
);
1690 arena_bin_info_t
*bin_info
= &arena_bin_info
[binind
];
1691 assert(((uintptr_t)ptr
- ((uintptr_t)run
+
1692 (uintptr_t)bin_info
->reg0_offset
)) % bin_info
->reg_size
==
1694 ret
= bin_info
->reg_size
;
1696 assert(((uintptr_t)ptr
& PAGE_MASK
) == 0);
1697 ret
= mapbits
& ~PAGE_MASK
;
1698 if (prof_promote
&& ret
== PAGE_SIZE
&& (mapbits
&
1699 CHUNK_MAP_CLASS_MASK
) != 0) {
1700 size_t binind
= ((mapbits
& CHUNK_MAP_CLASS_MASK
) >>
1701 CHUNK_MAP_CLASS_SHIFT
) - 1;
1702 assert(binind
< nbins
);
1703 ret
= arena_bin_info
[binind
].reg_size
;
1713 arena_dissociate_bin_run(arena_chunk_t
*chunk
, arena_run_t
*run
,
1717 /* Dissociate run from bin. */
1718 if (run
== bin
->runcur
)
1721 size_t binind
= arena_bin_index(chunk
->arena
, bin
);
1722 arena_bin_info_t
*bin_info
= &arena_bin_info
[binind
];
1724 if (bin_info
->nregs
!= 1) {
1725 size_t run_pageind
= (((uintptr_t)run
-
1726 (uintptr_t)chunk
)) >> PAGE_SHIFT
;
1727 arena_chunk_map_t
*run_mapelm
=
1728 &chunk
->map
[run_pageind
-map_bias
];
1730 * This block's conditional is necessary because if the
1731 * run only contains one region, then it never gets
1732 * inserted into the non-full runs tree.
1734 arena_run_tree_remove(&bin
->runs
, run_mapelm
);
1740 arena_dalloc_bin_run(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
1744 arena_bin_info_t
*bin_info
;
1745 size_t npages
, run_ind
, past
;
1747 assert(run
!= bin
->runcur
);
1748 assert(arena_run_tree_search(&bin
->runs
, &chunk
->map
[
1749 (((uintptr_t)run
-(uintptr_t)chunk
)>>PAGE_SHIFT
)-map_bias
]) == NULL
);
1751 binind
= arena_bin_index(chunk
->arena
, run
->bin
);
1752 bin_info
= &arena_bin_info
[binind
];
1754 malloc_mutex_unlock(&bin
->lock
);
1755 /******************************/
1756 npages
= bin_info
->run_size
>> PAGE_SHIFT
;
1757 run_ind
= (size_t)(((uintptr_t)run
- (uintptr_t)chunk
) >> PAGE_SHIFT
);
1758 past
= (size_t)(PAGE_CEILING((uintptr_t)run
+
1759 (uintptr_t)bin_info
->reg0_offset
+ (uintptr_t)(run
->nextind
*
1760 bin_info
->reg_size
) - (uintptr_t)chunk
) >> PAGE_SHIFT
);
1761 malloc_mutex_lock(&arena
->lock
);
1764 * If the run was originally clean, and some pages were never touched,
1765 * trim the clean pages before deallocating the dirty portion of the
1768 if ((chunk
->map
[run_ind
-map_bias
].bits
& CHUNK_MAP_DIRTY
) == 0 && past
1769 - run_ind
< npages
) {
1771 * Trim clean pages. Convert to large run beforehand. Set the
1772 * last map element first, in case this is a one-page run.
1774 chunk
->map
[run_ind
+npages
-1-map_bias
].bits
= CHUNK_MAP_LARGE
|
1775 (chunk
->map
[run_ind
+npages
-1-map_bias
].bits
&
1776 CHUNK_MAP_FLAGS_MASK
);
1777 chunk
->map
[run_ind
-map_bias
].bits
= bin_info
->run_size
|
1778 CHUNK_MAP_LARGE
| (chunk
->map
[run_ind
-map_bias
].bits
&
1779 CHUNK_MAP_FLAGS_MASK
);
1780 arena_run_trim_tail(arena
, chunk
, run
, (npages
<< PAGE_SHIFT
),
1781 ((past
- run_ind
) << PAGE_SHIFT
), false);
1782 /* npages = past - run_ind; */
1784 #ifdef JEMALLOC_DEBUG
1787 arena_run_dalloc(arena
, run
, true);
1788 malloc_mutex_unlock(&arena
->lock
);
1789 /****************************/
1790 malloc_mutex_lock(&bin
->lock
);
1791 #ifdef JEMALLOC_STATS
1792 bin
->stats
.curruns
--;
1797 arena_bin_lower_run(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
1802 * Make sure that bin->runcur always refers to the lowest non-full run,
1805 if (bin
->runcur
== NULL
)
1807 else if ((uintptr_t)run
< (uintptr_t)bin
->runcur
) {
1808 /* Switch runcur. */
1809 if (bin
->runcur
->nfree
> 0) {
1810 arena_chunk_t
*runcur_chunk
=
1811 CHUNK_ADDR2BASE(bin
->runcur
);
1812 size_t runcur_pageind
= (((uintptr_t)bin
->runcur
-
1813 (uintptr_t)runcur_chunk
)) >> PAGE_SHIFT
;
1814 arena_chunk_map_t
*runcur_mapelm
=
1815 &runcur_chunk
->map
[runcur_pageind
-map_bias
];
1817 /* Insert runcur. */
1818 arena_run_tree_insert(&bin
->runs
, runcur_mapelm
);
1822 size_t run_pageind
= (((uintptr_t)run
-
1823 (uintptr_t)chunk
)) >> PAGE_SHIFT
;
1824 arena_chunk_map_t
*run_mapelm
=
1825 &chunk
->map
[run_pageind
-map_bias
];
1827 assert(arena_run_tree_search(&bin
->runs
, run_mapelm
) == NULL
);
1828 arena_run_tree_insert(&bin
->runs
, run_mapelm
);
1833 arena_dalloc_bin(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
1834 arena_chunk_map_t
*mapelm
)
1839 #if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
1843 pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> PAGE_SHIFT
;
1844 run
= (arena_run_t
*)((uintptr_t)chunk
+ (uintptr_t)((pageind
-
1845 (mapelm
->bits
>> PAGE_SHIFT
)) << PAGE_SHIFT
));
1846 dassert(run
->magic
== ARENA_RUN_MAGIC
);
1848 size_t binind
= arena_bin_index(arena
, bin
);
1849 arena_bin_info_t
*bin_info
= &arena_bin_info
[binind
];
1850 #if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
1851 size
= bin_info
->reg_size
;
1854 #ifdef JEMALLOC_FILL
1856 memset(ptr
, 0x5a, size
);
1859 arena_run_reg_dalloc(run
, ptr
);
1860 if (run
->nfree
== bin_info
->nregs
) {
1861 arena_dissociate_bin_run(chunk
, run
, bin
);
1862 arena_dalloc_bin_run(arena
, chunk
, run
, bin
);
1863 } else if (run
->nfree
== 1 && run
!= bin
->runcur
)
1864 arena_bin_lower_run(arena
, chunk
, run
, bin
);
1866 #ifdef JEMALLOC_STATS
1867 bin
->stats
.allocated
-= size
;
1868 bin
->stats
.ndalloc
++;
1872 #ifdef JEMALLOC_STATS
1874 arena_stats_merge(arena_t
*arena
, size_t *nactive
, size_t *ndirty
,
1875 arena_stats_t
*astats
, malloc_bin_stats_t
*bstats
,
1876 malloc_large_stats_t
*lstats
)
1880 malloc_mutex_lock(&arena
->lock
);
1881 *nactive
+= arena
->nactive
;
1882 *ndirty
+= arena
->ndirty
;
1884 astats
->mapped
+= arena
->stats
.mapped
;
1885 astats
->npurge
+= arena
->stats
.npurge
;
1886 astats
->nmadvise
+= arena
->stats
.nmadvise
;
1887 astats
->purged
+= arena
->stats
.purged
;
1888 astats
->allocated_large
+= arena
->stats
.allocated_large
;
1889 astats
->nmalloc_large
+= arena
->stats
.nmalloc_large
;
1890 astats
->ndalloc_large
+= arena
->stats
.ndalloc_large
;
1891 astats
->nrequests_large
+= arena
->stats
.nrequests_large
;
1893 for (i
= 0; i
< nlclasses
; i
++) {
1894 lstats
[i
].nmalloc
+= arena
->stats
.lstats
[i
].nmalloc
;
1895 lstats
[i
].ndalloc
+= arena
->stats
.lstats
[i
].ndalloc
;
1896 lstats
[i
].nrequests
+= arena
->stats
.lstats
[i
].nrequests
;
1897 lstats
[i
].highruns
+= arena
->stats
.lstats
[i
].highruns
;
1898 lstats
[i
].curruns
+= arena
->stats
.lstats
[i
].curruns
;
1900 malloc_mutex_unlock(&arena
->lock
);
1902 for (i
= 0; i
< nbins
; i
++) {
1903 arena_bin_t
*bin
= &arena
->bins
[i
];
1905 malloc_mutex_lock(&bin
->lock
);
1906 bstats
[i
].allocated
+= bin
->stats
.allocated
;
1907 bstats
[i
].nmalloc
+= bin
->stats
.nmalloc
;
1908 bstats
[i
].ndalloc
+= bin
->stats
.ndalloc
;
1909 bstats
[i
].nrequests
+= bin
->stats
.nrequests
;
1910 #ifdef JEMALLOC_TCACHE
1911 bstats
[i
].nfills
+= bin
->stats
.nfills
;
1912 bstats
[i
].nflushes
+= bin
->stats
.nflushes
;
1914 bstats
[i
].nruns
+= bin
->stats
.nruns
;
1915 bstats
[i
].reruns
+= bin
->stats
.reruns
;
1916 bstats
[i
].highruns
+= bin
->stats
.highruns
;
1917 bstats
[i
].curruns
+= bin
->stats
.curruns
;
1918 malloc_mutex_unlock(&bin
->lock
);
1924 arena_dalloc_large(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
)
1927 /* Large allocation. */
1928 #ifdef JEMALLOC_FILL
1929 # ifndef JEMALLOC_STATS
1934 #if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
1935 size_t pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >>
1937 size_t size
= chunk
->map
[pageind
-map_bias
].bits
& ~PAGE_MASK
;
1940 #ifdef JEMALLOC_FILL
1941 # ifdef JEMALLOC_STATS
1944 memset(ptr
, 0x5a, size
);
1946 #ifdef JEMALLOC_STATS
1947 arena
->stats
.ndalloc_large
++;
1948 arena
->stats
.allocated_large
-= size
;
1949 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].ndalloc
++;
1950 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
--;
1954 arena_run_dalloc(arena
, (arena_run_t
*)ptr
, true);
1958 arena_ralloc_large_shrink(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
1959 size_t oldsize
, size_t size
)
1962 assert(size
< oldsize
);
1965 * Shrink the run, and make trailing pages available for other
1968 malloc_mutex_lock(&arena
->lock
);
1969 arena_run_trim_tail(arena
, chunk
, (arena_run_t
*)ptr
, oldsize
, size
,
1971 #ifdef JEMALLOC_STATS
1972 arena
->stats
.ndalloc_large
++;
1973 arena
->stats
.allocated_large
-= oldsize
;
1974 arena
->stats
.lstats
[(oldsize
>> PAGE_SHIFT
) - 1].ndalloc
++;
1975 arena
->stats
.lstats
[(oldsize
>> PAGE_SHIFT
) - 1].curruns
--;
1977 arena
->stats
.nmalloc_large
++;
1978 arena
->stats
.nrequests_large
++;
1979 arena
->stats
.allocated_large
+= size
;
1980 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].nmalloc
++;
1981 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].nrequests
++;
1982 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
++;
1983 if (arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
>
1984 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].highruns
) {
1985 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].highruns
=
1986 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
;
1989 malloc_mutex_unlock(&arena
->lock
);
1993 arena_ralloc_large_grow(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
1994 size_t oldsize
, size_t size
, size_t extra
, bool zero
)
1996 size_t pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> PAGE_SHIFT
;
1997 size_t npages
= oldsize
>> PAGE_SHIFT
;
2000 assert(oldsize
== (chunk
->map
[pageind
-map_bias
].bits
& ~PAGE_MASK
));
2002 /* Try to extend the run. */
2003 assert(size
+ extra
> oldsize
);
2004 malloc_mutex_lock(&arena
->lock
);
2005 if (pageind
+ npages
< chunk_npages
&&
2006 (chunk
->map
[pageind
+npages
-map_bias
].bits
2007 & CHUNK_MAP_ALLOCATED
) == 0 && (followsize
=
2008 chunk
->map
[pageind
+npages
-map_bias
].bits
& ~PAGE_MASK
) >= size
-
2011 * The next run is available and sufficiently large. Split the
2012 * following run, then merge the first part with the existing
2016 size_t splitsize
= (oldsize
+ followsize
<= size
+ extra
)
2017 ? followsize
: size
+ extra
- oldsize
;
2018 arena_run_split(arena
, (arena_run_t
*)((uintptr_t)chunk
+
2019 ((pageind
+npages
) << PAGE_SHIFT
)), splitsize
, true, zero
);
2021 size
= oldsize
+ splitsize
;
2022 npages
= size
>> PAGE_SHIFT
;
2025 * Mark the extended run as dirty if either portion of the run
2026 * was dirty before allocation. This is rather pedantic,
2027 * because there's not actually any sequence of events that
2028 * could cause the resulting run to be passed to
2029 * arena_run_dalloc() with the dirty argument set to false
2030 * (which is when dirty flag consistency would really matter).
2032 flag_dirty
= (chunk
->map
[pageind
-map_bias
].bits
&
2034 (chunk
->map
[pageind
+npages
-1-map_bias
].bits
&
2036 chunk
->map
[pageind
-map_bias
].bits
= size
| flag_dirty
2037 | CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
2038 chunk
->map
[pageind
+npages
-1-map_bias
].bits
= flag_dirty
|
2039 CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
2041 #ifdef JEMALLOC_STATS
2042 arena
->stats
.ndalloc_large
++;
2043 arena
->stats
.allocated_large
-= oldsize
;
2044 arena
->stats
.lstats
[(oldsize
>> PAGE_SHIFT
) - 1].ndalloc
++;
2045 arena
->stats
.lstats
[(oldsize
>> PAGE_SHIFT
) - 1].curruns
--;
2047 arena
->stats
.nmalloc_large
++;
2048 arena
->stats
.nrequests_large
++;
2049 arena
->stats
.allocated_large
+= size
;
2050 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].nmalloc
++;
2051 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].nrequests
++;
2052 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
++;
2053 if (arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].curruns
>
2054 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].highruns
) {
2055 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) - 1].highruns
=
2056 arena
->stats
.lstats
[(size
>> PAGE_SHIFT
) -
2060 malloc_mutex_unlock(&arena
->lock
);
2063 malloc_mutex_unlock(&arena
->lock
);
2069 * Try to resize a large allocation, in order to avoid copying. This will
2070 * always fail if growing an object, and the following run is already in use.
2073 arena_ralloc_large(void *ptr
, size_t oldsize
, size_t size
, size_t extra
,
2078 psize
= PAGE_CEILING(size
+ extra
);
2079 if (psize
== oldsize
) {
2080 /* Same size class. */
2081 #ifdef JEMALLOC_FILL
2082 if (opt_junk
&& size
< oldsize
) {
2083 memset((void *)((uintptr_t)ptr
+ size
), 0x5a, oldsize
-
2089 arena_chunk_t
*chunk
;
2092 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
2093 arena
= chunk
->arena
;
2094 dassert(arena
->magic
== ARENA_MAGIC
);
2096 if (psize
< oldsize
) {
2097 #ifdef JEMALLOC_FILL
2098 /* Fill before shrinking in order avoid a race. */
2100 memset((void *)((uintptr_t)ptr
+ size
), 0x5a,
2104 arena_ralloc_large_shrink(arena
, chunk
, ptr
, oldsize
,
2108 bool ret
= arena_ralloc_large_grow(arena
, chunk
, ptr
,
2109 oldsize
, PAGE_CEILING(size
),
2110 psize
- PAGE_CEILING(size
), zero
);
2111 #ifdef JEMALLOC_FILL
2112 if (ret
== false && zero
== false && opt_zero
) {
2113 memset((void *)((uintptr_t)ptr
+ oldsize
), 0,
2123 arena_ralloc_no_move(void *ptr
, size_t oldsize
, size_t size
, size_t extra
,
2128 * Avoid moving the allocation if the size class can be left the same.
2130 if (oldsize
<= arena_maxclass
) {
2131 if (oldsize
<= small_maxclass
) {
2132 assert(arena_bin_info
[SMALL_SIZE2BIN(oldsize
)].reg_size
2134 if ((size
+ extra
<= small_maxclass
&&
2135 SMALL_SIZE2BIN(size
+ extra
) ==
2136 SMALL_SIZE2BIN(oldsize
)) || (size
<= oldsize
&&
2137 size
+ extra
>= oldsize
)) {
2138 #ifdef JEMALLOC_FILL
2139 if (opt_junk
&& size
< oldsize
) {
2140 memset((void *)((uintptr_t)ptr
+ size
),
2141 0x5a, oldsize
- size
);
2147 assert(size
<= arena_maxclass
);
2148 if (size
+ extra
> small_maxclass
) {
2149 if (arena_ralloc_large(ptr
, oldsize
, size
,
2150 extra
, zero
) == false)
2156 /* Reallocation would require a move. */
2161 arena_ralloc(void *ptr
, size_t oldsize
, size_t size
, size_t extra
,
2162 size_t alignment
, bool zero
)
2167 /* Try to avoid moving the allocation. */
2168 ret
= arena_ralloc_no_move(ptr
, oldsize
, size
, extra
, zero
);
2173 * size and oldsize are different enough that we need to move the
2174 * object. In that case, fall back to allocating new space and
2177 if (alignment
!= 0) {
2178 size_t usize
= sa2u(size
+ extra
, alignment
, NULL
);
2181 ret
= ipalloc(usize
, alignment
, zero
);
2183 ret
= arena_malloc(size
+ extra
, zero
);
2188 /* Try again, this time without extra. */
2189 if (alignment
!= 0) {
2190 size_t usize
= sa2u(size
, alignment
, NULL
);
2193 ret
= ipalloc(usize
, alignment
, zero
);
2195 ret
= arena_malloc(size
, zero
);
2201 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
2204 * Copy at most size bytes (not size+extra), since the caller has no
2205 * expectation that the extra bytes will be reliably preserved.
2207 copysize
= (size
< oldsize
) ? size
: oldsize
;
2208 memcpy(ret
, ptr
, copysize
);
2214 arena_new(arena_t
*arena
, unsigned ind
)
2220 arena
->nthreads
= 0;
2222 if (malloc_mutex_init(&arena
->lock
))
2225 #ifdef JEMALLOC_STATS
2226 memset(&arena
->stats
, 0, sizeof(arena_stats_t
));
2227 arena
->stats
.lstats
= (malloc_large_stats_t
*)base_alloc(nlclasses
*
2228 sizeof(malloc_large_stats_t
));
2229 if (arena
->stats
.lstats
== NULL
)
2231 memset(arena
->stats
.lstats
, 0, nlclasses
*
2232 sizeof(malloc_large_stats_t
));
2233 # ifdef JEMALLOC_TCACHE
2234 ql_new(&arena
->tcache_ql
);
2238 #ifdef JEMALLOC_PROF
2239 arena
->prof_accumbytes
= 0;
2242 /* Initialize chunks. */
2243 ql_new(&arena
->chunks_dirty
);
2244 arena
->spare
= NULL
;
2248 arena
->npurgatory
= 0;
2250 arena_avail_tree_new(&arena
->runs_avail_clean
);
2251 arena_avail_tree_new(&arena
->runs_avail_dirty
);
2253 /* Initialize bins. */
2255 #ifdef JEMALLOC_TINY
2256 /* (2^n)-spaced tiny bins. */
2257 for (; i
< ntbins
; i
++) {
2258 bin
= &arena
->bins
[i
];
2259 if (malloc_mutex_init(&bin
->lock
))
2262 arena_run_tree_new(&bin
->runs
);
2263 #ifdef JEMALLOC_STATS
2264 memset(&bin
->stats
, 0, sizeof(malloc_bin_stats_t
));
2269 /* Quantum-spaced bins. */
2270 for (; i
< ntbins
+ nqbins
; i
++) {
2271 bin
= &arena
->bins
[i
];
2272 if (malloc_mutex_init(&bin
->lock
))
2275 arena_run_tree_new(&bin
->runs
);
2276 #ifdef JEMALLOC_STATS
2277 memset(&bin
->stats
, 0, sizeof(malloc_bin_stats_t
));
2281 /* Cacheline-spaced bins. */
2282 for (; i
< ntbins
+ nqbins
+ ncbins
; i
++) {
2283 bin
= &arena
->bins
[i
];
2284 if (malloc_mutex_init(&bin
->lock
))
2287 arena_run_tree_new(&bin
->runs
);
2288 #ifdef JEMALLOC_STATS
2289 memset(&bin
->stats
, 0, sizeof(malloc_bin_stats_t
));
2293 /* Subpage-spaced bins. */
2294 for (; i
< nbins
; i
++) {
2295 bin
= &arena
->bins
[i
];
2296 if (malloc_mutex_init(&bin
->lock
))
2299 arena_run_tree_new(&bin
->runs
);
2300 #ifdef JEMALLOC_STATS
2301 memset(&bin
->stats
, 0, sizeof(malloc_bin_stats_t
));
2305 #ifdef JEMALLOC_DEBUG
2306 arena
->magic
= ARENA_MAGIC
;
2312 #ifdef JEMALLOC_DEBUG
2314 small_size2bin_validate(void)
2316 size_t i
, size
, binind
;
2319 # ifdef JEMALLOC_TINY
2321 for (; i
< (1U << LG_TINY_MIN
); i
++) {
2322 size
= pow2_ceil(1U << LG_TINY_MIN
);
2323 binind
= ffs((int)(size
>> (LG_TINY_MIN
+ 1)));
2324 assert(SMALL_SIZE2BIN(i
) == binind
);
2326 for (; i
< qspace_min
; i
++) {
2327 size
= pow2_ceil(i
);
2328 binind
= ffs((int)(size
>> (LG_TINY_MIN
+ 1)));
2329 assert(SMALL_SIZE2BIN(i
) == binind
);
2332 /* Quantum-spaced. */
2333 for (; i
<= qspace_max
; i
++) {
2334 size
= QUANTUM_CEILING(i
);
2335 binind
= ntbins
+ (size
>> LG_QUANTUM
) - 1;
2336 assert(SMALL_SIZE2BIN(i
) == binind
);
2338 /* Cacheline-spaced. */
2339 for (; i
<= cspace_max
; i
++) {
2340 size
= CACHELINE_CEILING(i
);
2341 binind
= ntbins
+ nqbins
+ ((size
- cspace_min
) >>
2343 assert(SMALL_SIZE2BIN(i
) == binind
);
2346 for (; i
<= sspace_max
; i
++) {
2347 size
= SUBPAGE_CEILING(i
);
2348 binind
= ntbins
+ nqbins
+ ncbins
+ ((size
- sspace_min
)
2350 assert(SMALL_SIZE2BIN(i
) == binind
);
2356 small_size2bin_init(void)
2359 if (opt_lg_qspace_max
!= LG_QSPACE_MAX_DEFAULT
2360 || opt_lg_cspace_max
!= LG_CSPACE_MAX_DEFAULT
2361 || (sizeof(const_small_size2bin
) != ((small_maxclass
-1) >>
2363 return (small_size2bin_init_hard());
2365 small_size2bin
= const_small_size2bin
;
2366 #ifdef JEMALLOC_DEBUG
2367 small_size2bin_validate();
2373 small_size2bin_init_hard(void)
2375 size_t i
, size
, binind
;
2376 uint8_t *custom_small_size2bin
;
2377 #define CUSTOM_SMALL_SIZE2BIN(s) \
2378 custom_small_size2bin[(s-1) >> LG_TINY_MIN]
2380 assert(opt_lg_qspace_max
!= LG_QSPACE_MAX_DEFAULT
2381 || opt_lg_cspace_max
!= LG_CSPACE_MAX_DEFAULT
2382 || (sizeof(const_small_size2bin
) != ((small_maxclass
-1) >>
2385 custom_small_size2bin
= (uint8_t *)
2386 base_alloc(small_maxclass
>> LG_TINY_MIN
);
2387 if (custom_small_size2bin
== NULL
)
2391 #ifdef JEMALLOC_TINY
2393 for (; i
< (1U << LG_TINY_MIN
); i
+= TINY_MIN
) {
2394 size
= pow2_ceil(1U << LG_TINY_MIN
);
2395 binind
= ffs((int)(size
>> (LG_TINY_MIN
+ 1)));
2396 CUSTOM_SMALL_SIZE2BIN(i
) = binind
;
2398 for (; i
< qspace_min
; i
+= TINY_MIN
) {
2399 size
= pow2_ceil(i
);
2400 binind
= ffs((int)(size
>> (LG_TINY_MIN
+ 1)));
2401 CUSTOM_SMALL_SIZE2BIN(i
) = binind
;
2404 /* Quantum-spaced. */
2405 for (; i
<= qspace_max
; i
+= TINY_MIN
) {
2406 size
= QUANTUM_CEILING(i
);
2407 binind
= ntbins
+ (size
>> LG_QUANTUM
) - 1;
2408 CUSTOM_SMALL_SIZE2BIN(i
) = binind
;
2410 /* Cacheline-spaced. */
2411 for (; i
<= cspace_max
; i
+= TINY_MIN
) {
2412 size
= CACHELINE_CEILING(i
);
2413 binind
= ntbins
+ nqbins
+ ((size
- cspace_min
) >>
2415 CUSTOM_SMALL_SIZE2BIN(i
) = binind
;
2418 for (; i
<= sspace_max
; i
+= TINY_MIN
) {
2419 size
= SUBPAGE_CEILING(i
);
2420 binind
= ntbins
+ nqbins
+ ncbins
+ ((size
- sspace_min
) >>
2422 CUSTOM_SMALL_SIZE2BIN(i
) = binind
;
2425 small_size2bin
= custom_small_size2bin
;
2426 #ifdef JEMALLOC_DEBUG
2427 small_size2bin_validate();
2430 #undef CUSTOM_SMALL_SIZE2BIN
2434 * Calculate bin_info->run_size such that it meets the following constraints:
2436 * *) bin_info->run_size >= min_run_size
2437 * *) bin_info->run_size <= arena_maxclass
2438 * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
2439 * *) bin_info->nregs <= RUN_MAXREGS
2441 * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
2442 * calculated here, since these settings are all interdependent.
2445 bin_info_run_size_calc(arena_bin_info_t
*bin_info
, size_t min_run_size
)
2447 size_t try_run_size
, good_run_size
;
2448 uint32_t try_nregs
, good_nregs
;
2449 uint32_t try_hdr_size
, good_hdr_size
;
2450 uint32_t try_bitmap_offset
, good_bitmap_offset
;
2451 #ifdef JEMALLOC_PROF
2452 uint32_t try_ctx0_offset
, good_ctx0_offset
;
2454 uint32_t try_reg0_offset
, good_reg0_offset
;
2456 assert(min_run_size
>= PAGE_SIZE
);
2457 assert(min_run_size
<= arena_maxclass
);
2460 * Calculate known-valid settings before entering the run_size
2461 * expansion loop, so that the first part of the loop always copies
2464 * The do..while loop iteratively reduces the number of regions until
2465 * the run header and the regions no longer overlap. A closed formula
2466 * would be quite messy, since there is an interdependency between the
2467 * header's mask length and the number of regions.
2469 try_run_size
= min_run_size
;
2470 try_nregs
= ((try_run_size
- sizeof(arena_run_t
)) / bin_info
->reg_size
)
2471 + 1; /* Counter-act try_nregs-- in loop. */
2472 if (try_nregs
> RUN_MAXREGS
) {
2473 try_nregs
= RUN_MAXREGS
2474 + 1; /* Counter-act try_nregs-- in loop. */
2478 try_hdr_size
= sizeof(arena_run_t
);
2479 /* Pad to a long boundary. */
2480 try_hdr_size
= LONG_CEILING(try_hdr_size
);
2481 try_bitmap_offset
= try_hdr_size
;
2482 /* Add space for bitmap. */
2483 try_hdr_size
+= bitmap_size(try_nregs
);
2484 #ifdef JEMALLOC_PROF
2485 if (opt_prof
&& prof_promote
== false) {
2486 /* Pad to a quantum boundary. */
2487 try_hdr_size
= QUANTUM_CEILING(try_hdr_size
);
2488 try_ctx0_offset
= try_hdr_size
;
2489 /* Add space for one (prof_ctx_t *) per region. */
2490 try_hdr_size
+= try_nregs
* sizeof(prof_ctx_t
*);
2492 try_ctx0_offset
= 0;
2494 try_reg0_offset
= try_run_size
- (try_nregs
*
2495 bin_info
->reg_size
);
2496 } while (try_hdr_size
> try_reg0_offset
);
2498 /* run_size expansion loop. */
2501 * Copy valid settings before trying more aggressive settings.
2503 good_run_size
= try_run_size
;
2504 good_nregs
= try_nregs
;
2505 good_hdr_size
= try_hdr_size
;
2506 good_bitmap_offset
= try_bitmap_offset
;
2507 #ifdef JEMALLOC_PROF
2508 good_ctx0_offset
= try_ctx0_offset
;
2510 good_reg0_offset
= try_reg0_offset
;
2512 /* Try more aggressive settings. */
2513 try_run_size
+= PAGE_SIZE
;
2514 try_nregs
= ((try_run_size
- sizeof(arena_run_t
)) /
2516 + 1; /* Counter-act try_nregs-- in loop. */
2517 if (try_nregs
> RUN_MAXREGS
) {
2518 try_nregs
= RUN_MAXREGS
2519 + 1; /* Counter-act try_nregs-- in loop. */
2523 try_hdr_size
= sizeof(arena_run_t
);
2524 /* Pad to a long boundary. */
2525 try_hdr_size
= LONG_CEILING(try_hdr_size
);
2526 try_bitmap_offset
= try_hdr_size
;
2527 /* Add space for bitmap. */
2528 try_hdr_size
+= bitmap_size(try_nregs
);
2529 #ifdef JEMALLOC_PROF
2530 if (opt_prof
&& prof_promote
== false) {
2531 /* Pad to a quantum boundary. */
2532 try_hdr_size
= QUANTUM_CEILING(try_hdr_size
);
2533 try_ctx0_offset
= try_hdr_size
;
2535 * Add space for one (prof_ctx_t *) per region.
2537 try_hdr_size
+= try_nregs
*
2538 sizeof(prof_ctx_t
*);
2541 try_reg0_offset
= try_run_size
- (try_nregs
*
2542 bin_info
->reg_size
);
2543 } while (try_hdr_size
> try_reg0_offset
);
2544 } while (try_run_size
<= arena_maxclass
2545 && try_run_size
<= arena_maxclass
2546 && RUN_MAX_OVRHD
* (bin_info
->reg_size
<< 3) > RUN_MAX_OVRHD_RELAX
2547 && (try_reg0_offset
<< RUN_BFP
) > RUN_MAX_OVRHD
* try_run_size
2548 && try_nregs
< RUN_MAXREGS
);
2550 assert(good_hdr_size
<= good_reg0_offset
);
2552 /* Copy final settings. */
2553 bin_info
->run_size
= good_run_size
;
2554 bin_info
->nregs
= good_nregs
;
2555 bin_info
->bitmap_offset
= good_bitmap_offset
;
2556 #ifdef JEMALLOC_PROF
2557 bin_info
->ctx0_offset
= good_ctx0_offset
;
2559 bin_info
->reg0_offset
= good_reg0_offset
;
2561 return (good_run_size
);
2567 arena_bin_info_t
*bin_info
;
2569 size_t prev_run_size
;
2571 arena_bin_info
= base_alloc(sizeof(arena_bin_info_t
) * nbins
);
2572 if (arena_bin_info
== NULL
)
2575 prev_run_size
= PAGE_SIZE
;
2577 #ifdef JEMALLOC_TINY
2578 /* (2^n)-spaced tiny bins. */
2579 for (; i
< ntbins
; i
++) {
2580 bin_info
= &arena_bin_info
[i
];
2581 bin_info
->reg_size
= (1U << (LG_TINY_MIN
+ i
));
2582 prev_run_size
= bin_info_run_size_calc(bin_info
, prev_run_size
);
2583 bitmap_info_init(&bin_info
->bitmap_info
, bin_info
->nregs
);
2587 /* Quantum-spaced bins. */
2588 for (; i
< ntbins
+ nqbins
; i
++) {
2589 bin_info
= &arena_bin_info
[i
];
2590 bin_info
->reg_size
= (i
- ntbins
+ 1) << LG_QUANTUM
;
2591 prev_run_size
= bin_info_run_size_calc(bin_info
, prev_run_size
);
2592 bitmap_info_init(&bin_info
->bitmap_info
, bin_info
->nregs
);
2595 /* Cacheline-spaced bins. */
2596 for (; i
< ntbins
+ nqbins
+ ncbins
; i
++) {
2597 bin_info
= &arena_bin_info
[i
];
2598 bin_info
->reg_size
= cspace_min
+ ((i
- (ntbins
+ nqbins
)) <<
2600 prev_run_size
= bin_info_run_size_calc(bin_info
, prev_run_size
);
2601 bitmap_info_init(&bin_info
->bitmap_info
, bin_info
->nregs
);
2604 /* Subpage-spaced bins. */
2605 for (; i
< nbins
; i
++) {
2606 bin_info
= &arena_bin_info
[i
];
2607 bin_info
->reg_size
= sspace_min
+ ((i
- (ntbins
+ nqbins
+
2608 ncbins
)) << LG_SUBPAGE
);
2609 prev_run_size
= bin_info_run_size_calc(bin_info
, prev_run_size
);
2610 bitmap_info_init(&bin_info
->bitmap_info
, bin_info
->nregs
);
2622 /* Set variables according to the value of opt_lg_[qc]space_max. */
2623 qspace_max
= (1U << opt_lg_qspace_max
);
2624 cspace_min
= CACHELINE_CEILING(qspace_max
);
2625 if (cspace_min
== qspace_max
)
2626 cspace_min
+= CACHELINE
;
2627 cspace_max
= (1U << opt_lg_cspace_max
);
2628 sspace_min
= SUBPAGE_CEILING(cspace_max
);
2629 if (sspace_min
== cspace_max
)
2630 sspace_min
+= SUBPAGE
;
2631 assert(sspace_min
< PAGE_SIZE
);
2632 sspace_max
= PAGE_SIZE
- SUBPAGE
;
2634 #ifdef JEMALLOC_TINY
2635 assert(LG_QUANTUM
>= LG_TINY_MIN
);
2637 assert(ntbins
<= LG_QUANTUM
);
2638 nqbins
= qspace_max
>> LG_QUANTUM
;
2639 ncbins
= ((cspace_max
- cspace_min
) >> LG_CACHELINE
) + 1;
2640 nsbins
= ((sspace_max
- sspace_min
) >> LG_SUBPAGE
) + 1;
2641 nbins
= ntbins
+ nqbins
+ ncbins
+ nsbins
;
2644 * The small_size2bin lookup table uses uint8_t to encode each bin
2645 * index, so we cannot support more than 256 small size classes. This
2646 * limit is difficult to exceed (not even possible with 16B quantum and
2647 * 4KiB pages), and such configurations are impractical, but
2648 * nonetheless we need to protect against this case in order to avoid
2649 * undefined behavior.
2651 * Further constrain nbins to 255 if prof_promote is true, since all
2652 * small size classes, plus a "not small" size class must be stored in
2653 * 8 bits of arena_chunk_map_t's bits field.
2655 #ifdef JEMALLOC_PROF
2656 if (opt_prof
&& prof_promote
) {
2658 char line_buf
[UMAX2S_BUFSIZE
];
2659 malloc_write("<jemalloc>: Too many small size classes (");
2660 malloc_write(u2s(nbins
, 10, line_buf
));
2661 malloc_write(" > max 255)\n");
2667 char line_buf
[UMAX2S_BUFSIZE
];
2668 malloc_write("<jemalloc>: Too many small size classes (");
2669 malloc_write(u2s(nbins
, 10, line_buf
));
2670 malloc_write(" > max 256)\n");
2675 * Compute the header size such that it is large enough to contain the
2676 * page map. The page map is biased to omit entries for the header
2677 * itself, so some iteration is necessary to compute the map bias.
2679 * 1) Compute safe header_size and map_bias values that include enough
2680 * space for an unbiased page map.
2681 * 2) Refine map_bias based on (1) to omit the header pages in the page
2682 * map. The resulting map_bias may be one too small.
2683 * 3) Refine map_bias based on (2). The result will be >= the result
2684 * from (2), and will always be correct.
2687 for (i
= 0; i
< 3; i
++) {
2688 header_size
= offsetof(arena_chunk_t
, map
)
2689 + (sizeof(arena_chunk_map_t
) * (chunk_npages
-map_bias
));
2690 map_bias
= (header_size
>> PAGE_SHIFT
) + ((header_size
&
2693 assert(map_bias
> 0);
2695 arena_maxclass
= chunksize
- (map_bias
<< PAGE_SHIFT
);
2697 if (small_size2bin_init())
2700 if (bin_info_init())