]> git.saurik.com Git - redis.git/blame - deps/jemalloc/src/arena.c
Query the archive to provide a complete KEYS list.
[redis.git] / deps / jemalloc / src / arena.c
CommitLineData
a78e148b 1#define JEMALLOC_ARENA_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
a78e148b 7ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
4934f93d 8arena_bin_info_t arena_bin_info[NBINS];
a78e148b 9
4934f93d 10JEMALLOC_ALIGNED(CACHELINE)
11const uint8_t small_size2bin[] = {
a78e148b 12#define S2B_8(i) i,
a78e148b 13#define S2B_16(i) S2B_8(i) S2B_8(i)
14#define S2B_32(i) S2B_16(i) S2B_16(i)
15#define S2B_64(i) S2B_32(i) S2B_32(i)
16#define S2B_128(i) S2B_64(i) S2B_64(i)
17#define S2B_256(i) S2B_128(i) S2B_128(i)
4934f93d 18#define S2B_512(i) S2B_256(i) S2B_256(i)
19#define S2B_1024(i) S2B_512(i) S2B_512(i)
20#define S2B_2048(i) S2B_1024(i) S2B_1024(i)
21#define S2B_4096(i) S2B_2048(i) S2B_2048(i)
22#define S2B_8192(i) S2B_4096(i) S2B_4096(i)
23#define SIZE_CLASS(bin, delta, size) \
24 S2B_##delta(bin)
25 SIZE_CLASSES
a78e148b 26#undef S2B_8
27#undef S2B_16
28#undef S2B_32
29#undef S2B_64
30#undef S2B_128
31#undef S2B_256
4934f93d 32#undef S2B_512
33#undef S2B_1024
34#undef S2B_2048
35#undef S2B_4096
36#undef S2B_8192
37#undef SIZE_CLASS
38};
a78e148b 39
40/******************************************************************************/
41/* Function prototypes for non-inline static functions. */
42
21b26915 43static void arena_avail_insert(arena_t *arena, arena_chunk_t *chunk,
44 size_t pageind, size_t npages, bool maybe_adjac_pred,
45 bool maybe_adjac_succ);
46static void arena_avail_remove(arena_t *arena, arena_chunk_t *chunk,
47 size_t pageind, size_t npages, bool maybe_adjac_pred,
48 bool maybe_adjac_succ);
a78e148b 49static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
4934f93d 50 bool large, size_t binind, bool zero);
a78e148b 51static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
52static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
4934f93d 53static arena_run_t *arena_run_alloc_helper(arena_t *arena, size_t size,
54 bool large, size_t binind, bool zero);
a78e148b 55static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
4934f93d 56 size_t binind, bool zero);
21b26915 57static arena_chunk_t *chunks_dirty_iter_cb(arena_chunk_tree_t *tree,
58 arena_chunk_t *chunk, void *arg);
a78e148b 59static void arena_purge(arena_t *arena, bool all);
21b26915 60static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
61 bool cleaned);
a78e148b 62static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
63 arena_run_t *run, size_t oldsize, size_t newsize);
64static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
65 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
4934f93d 66static arena_run_t *arena_bin_runs_first(arena_bin_t *bin);
67static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);
68static void arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run);
69static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin);
a78e148b 70static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
71static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
72static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
73 arena_bin_t *bin);
74static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
75 arena_run_t *run, arena_bin_t *bin);
76static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
77 arena_run_t *run, arena_bin_t *bin);
78static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
79 void *ptr, size_t oldsize, size_t size);
80static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
81 void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
82static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
83 size_t extra, bool zero);
a78e148b 84static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info,
85 size_t min_run_size);
4934f93d 86static void bin_info_init(void);
a78e148b 87
88/******************************************************************************/
89
90static inline int
91arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
92{
93 uintptr_t a_mapelm = (uintptr_t)a;
94 uintptr_t b_mapelm = (uintptr_t)b;
95
96 assert(a != NULL);
97 assert(b != NULL);
98
99 return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
100}
101
102/* Generate red-black tree functions. */
4934f93d 103rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
104 u.rb_link, arena_run_comp)
a78e148b 105
106static inline int
107arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
108{
109 int ret;
110 size_t a_size = a->bits & ~PAGE_MASK;
111 size_t b_size = b->bits & ~PAGE_MASK;
112
a78e148b 113 ret = (a_size > b_size) - (a_size < b_size);
114 if (ret == 0) {
115 uintptr_t a_mapelm, b_mapelm;
116
117 if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
118 a_mapelm = (uintptr_t)a;
119 else {
120 /*
121 * Treat keys as though they are lower than anything
122 * else.
123 */
124 a_mapelm = 0;
125 }
126 b_mapelm = (uintptr_t)b;
127
128 ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
129 }
130
131 return (ret);
132}
133
134/* Generate red-black tree functions. */
4934f93d 135rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
136 u.rb_link, arena_avail_comp)
a78e148b 137
21b26915 138static inline int
139arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b)
140{
141
142 assert(a != NULL);
143 assert(b != NULL);
144
145 /*
146 * Short-circuit for self comparison. The following comparison code
147 * would come to the same result, but at the cost of executing the slow
148 * path.
149 */
150 if (a == b)
151 return (0);
152
153 /*
154 * Order such that chunks with higher fragmentation are "less than"
155 * those with lower fragmentation -- purging order is from "least" to
156 * "greatest". Fragmentation is measured as:
157 *
158 * mean current avail run size
159 * --------------------------------
160 * mean defragmented avail run size
161 *
162 * navail
163 * -----------
164 * nruns_avail nruns_avail-nruns_adjac
165 * = ========================= = -----------------------
166 * navail nruns_avail
167 * -----------------------
168 * nruns_avail-nruns_adjac
169 *
170 * The following code multiplies away the denominator prior to
171 * comparison, in order to avoid division.
172 *
173 */
174 {
175 size_t a_val = (a->nruns_avail - a->nruns_adjac) *
176 b->nruns_avail;
177 size_t b_val = (b->nruns_avail - b->nruns_adjac) *
178 a->nruns_avail;
179
180 if (a_val < b_val)
181 return (1);
182 if (a_val > b_val)
183 return (-1);
184 }
185 /*
186 * Break ties by chunk address. For fragmented chunks, report lower
187 * addresses as "lower", so that fragmentation reduction happens first
188 * at lower addresses. However, use the opposite ordering for
189 * unfragmented chunks, in order to increase the chances of
190 * re-allocating dirty runs.
191 */
192 {
193 uintptr_t a_chunk = (uintptr_t)a;
194 uintptr_t b_chunk = (uintptr_t)b;
195 int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk));
196 if (a->nruns_adjac == 0) {
197 assert(b->nruns_adjac == 0);
198 ret = -ret;
199 }
200 return (ret);
201 }
202}
203
204/* Generate red-black tree functions. */
205rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t,
206 dirty_link, arena_chunk_dirty_comp)
207
208static inline bool
209arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind)
210{
211 bool ret;
212
213 if (pageind-1 < map_bias)
214 ret = false;
215 else {
216 ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0);
217 assert(ret == false || arena_mapbits_dirty_get(chunk,
218 pageind-1) != arena_mapbits_dirty_get(chunk, pageind));
219 }
220 return (ret);
221}
222
223static inline bool
224arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages)
225{
226 bool ret;
227
228 if (pageind+npages == chunk_npages)
229 ret = false;
230 else {
231 assert(pageind+npages < chunk_npages);
232 ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0);
233 assert(ret == false || arena_mapbits_dirty_get(chunk, pageind)
234 != arena_mapbits_dirty_get(chunk, pageind+npages));
235 }
236 return (ret);
237}
238
239static inline bool
240arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages)
241{
242
243 return (arena_avail_adjac_pred(chunk, pageind) ||
244 arena_avail_adjac_succ(chunk, pageind, npages));
245}
246
247static void
248arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
249 size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
250{
251
252 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
253 LG_PAGE));
254
255 /*
256 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
257 * removed and reinserted even if the run to be inserted is clean.
258 */
259 if (chunk->ndirty != 0)
260 arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
261
262 if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
263 chunk->nruns_adjac++;
264 if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
265 chunk->nruns_adjac++;
266 chunk->nruns_avail++;
267 assert(chunk->nruns_avail > chunk->nruns_adjac);
268
269 if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
270 arena->ndirty += npages;
271 chunk->ndirty += npages;
272 }
273 if (chunk->ndirty != 0)
274 arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
275
276 arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
277 pageind));
278}
279
280static void
281arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
282 size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
283{
284
285 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
286 LG_PAGE));
287
288 /*
289 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
290 * removed and reinserted even if the run to be removed is clean.
291 */
292 if (chunk->ndirty != 0)
293 arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
294
295 if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
296 chunk->nruns_adjac--;
297 if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
298 chunk->nruns_adjac--;
299 chunk->nruns_avail--;
300 assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail
301 == 0 && chunk->nruns_adjac == 0));
302
303 if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
304 arena->ndirty -= npages;
305 chunk->ndirty -= npages;
306 }
307 if (chunk->ndirty != 0)
308 arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
309
310 arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
311 pageind));
312}
313
a78e148b 314static inline void *
315arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
316{
317 void *ret;
318 unsigned regind;
319 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
320 (uintptr_t)bin_info->bitmap_offset);
321
a78e148b 322 assert(run->nfree > 0);
323 assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
324
325 regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
326 ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
4934f93d 327 (uintptr_t)(bin_info->reg_interval * regind));
a78e148b 328 run->nfree--;
329 if (regind == run->nextind)
330 run->nextind++;
331 assert(regind < run->nextind);
332 return (ret);
333}
334
335static inline void
336arena_run_reg_dalloc(arena_run_t *run, void *ptr)
337{
338 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
4934f93d 339 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
340 size_t mapbits = arena_mapbits_get(chunk, pageind);
341 size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
a78e148b 342 arena_bin_info_t *bin_info = &arena_bin_info[binind];
343 unsigned regind = arena_run_regind(run, bin_info, ptr);
344 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
345 (uintptr_t)bin_info->bitmap_offset);
346
347 assert(run->nfree < bin_info->nregs);
348 /* Freeing an interior pointer can cause assertion failure. */
349 assert(((uintptr_t)ptr - ((uintptr_t)run +
4934f93d 350 (uintptr_t)bin_info->reg0_offset)) %
351 (uintptr_t)bin_info->reg_interval == 0);
a78e148b 352 assert((uintptr_t)ptr >= (uintptr_t)run +
353 (uintptr_t)bin_info->reg0_offset);
354 /* Freeing an unallocated pointer can cause assertion failure. */
355 assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
356
357 bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
358 run->nfree++;
359}
360
a78e148b 361static inline void
362arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
363{
364 size_t i;
4934f93d 365 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
a78e148b 366
4934f93d 367 for (i = 0; i < PAGE / sizeof(size_t); i++)
a78e148b 368 assert(p[i] == 0);
369}
a78e148b 370
371static void
372arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
4934f93d 373 size_t binind, bool zero)
a78e148b 374{
375 arena_chunk_t *chunk;
4934f93d 376 size_t run_ind, total_pages, need_pages, rem_pages, i;
a78e148b 377 size_t flag_dirty;
4934f93d 378
379 assert((large && binind == BININD_INVALID) || (large == false && binind
380 != BININD_INVALID));
a78e148b 381
382 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
4934f93d 383 run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
384 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
4934f93d 385 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
386 LG_PAGE;
387 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
388 flag_dirty);
389 need_pages = (size >> LG_PAGE);
a78e148b 390 assert(need_pages > 0);
391 assert(need_pages <= total_pages);
392 rem_pages = total_pages - need_pages;
393
21b26915 394 arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
4934f93d 395 if (config_stats) {
396 /*
397 * Update stats_cactive if nactive is crossing a chunk
398 * multiple.
399 */
400 size_t cactive_diff = CHUNK_CEILING((arena->nactive +
401 need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
402 LG_PAGE);
403 if (cactive_diff != 0)
404 stats_cactive_add(cactive_diff);
405 }
a78e148b 406 arena->nactive += need_pages;
407
408 /* Keep track of trailing unused pages for later use. */
409 if (rem_pages > 0) {
410 if (flag_dirty != 0) {
4934f93d 411 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
412 (rem_pages << LG_PAGE), CHUNK_MAP_DIRTY);
413 arena_mapbits_unallocated_set(chunk,
414 run_ind+total_pages-1, (rem_pages << LG_PAGE),
415 CHUNK_MAP_DIRTY);
a78e148b 416 } else {
4934f93d 417 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
418 (rem_pages << LG_PAGE),
419 arena_mapbits_unzeroed_get(chunk,
420 run_ind+need_pages));
421 arena_mapbits_unallocated_set(chunk,
422 run_ind+total_pages-1, (rem_pages << LG_PAGE),
423 arena_mapbits_unzeroed_get(chunk,
424 run_ind+total_pages-1));
a78e148b 425 }
21b26915 426 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
427 false, true);
a78e148b 428 }
429
430 /*
431 * Update the page map separately for large vs. small runs, since it is
432 * possible to avoid iteration for large mallocs.
433 */
434 if (large) {
435 if (zero) {
436 if (flag_dirty == 0) {
437 /*
438 * The run is clean, so some pages may be
439 * zeroed (i.e. never before touched).
440 */
441 for (i = 0; i < need_pages; i++) {
4934f93d 442 if (arena_mapbits_unzeroed_get(chunk,
443 run_ind+i) != 0) {
444 VALGRIND_MAKE_MEM_UNDEFINED(
445 (void *)((uintptr_t)
446 chunk + ((run_ind+i) <<
447 LG_PAGE)), PAGE);
a78e148b 448 memset((void *)((uintptr_t)
449 chunk + ((run_ind+i) <<
4934f93d 450 LG_PAGE)), 0, PAGE);
451 } else if (config_debug) {
452 VALGRIND_MAKE_MEM_DEFINED(
453 (void *)((uintptr_t)
454 chunk + ((run_ind+i) <<
455 LG_PAGE)), PAGE);
a78e148b 456 arena_chunk_validate_zeroed(
457 chunk, run_ind+i);
458 }
a78e148b 459 }
460 } else {
461 /*
462 * The run is dirty, so all pages must be
463 * zeroed.
464 */
4934f93d 465 VALGRIND_MAKE_MEM_UNDEFINED((void
466 *)((uintptr_t)chunk + (run_ind <<
467 LG_PAGE)), (need_pages << LG_PAGE));
a78e148b 468 memset((void *)((uintptr_t)chunk + (run_ind <<
4934f93d 469 LG_PAGE)), 0, (need_pages << LG_PAGE));
a78e148b 470 }
471 }
472
473 /*
474 * Set the last element first, in case the run only contains one
475 * page (i.e. both statements set the same element).
476 */
4934f93d 477 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0,
478 flag_dirty);
479 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
a78e148b 480 } else {
481 assert(zero == false);
482 /*
483 * Propagate the dirty and unzeroed flags to the allocated
484 * small run, so that arena_dalloc_bin_run() has the ability to
485 * conditionally trim clean pages.
486 */
4934f93d 487 arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
a78e148b 488 /*
489 * The first page will always be dirtied during small run
490 * initialization, so a validation failure here would not
491 * actually cause an observable failure.
492 */
4934f93d 493 if (config_debug && flag_dirty == 0 &&
494 arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
a78e148b 495 arena_chunk_validate_zeroed(chunk, run_ind);
a78e148b 496 for (i = 1; i < need_pages - 1; i++) {
4934f93d 497 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
498 if (config_debug && flag_dirty == 0 &&
499 arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
a78e148b 500 arena_chunk_validate_zeroed(chunk, run_ind+i);
a78e148b 501 }
4934f93d 502 arena_mapbits_small_set(chunk, run_ind+need_pages-1,
503 need_pages-1, binind, flag_dirty);
504 if (config_debug && flag_dirty == 0 &&
505 arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
506 0) {
a78e148b 507 arena_chunk_validate_zeroed(chunk,
508 run_ind+need_pages-1);
509 }
a78e148b 510 }
511}
512
513static arena_chunk_t *
514arena_chunk_alloc(arena_t *arena)
515{
516 arena_chunk_t *chunk;
517 size_t i;
518
519 if (arena->spare != NULL) {
a78e148b 520 chunk = arena->spare;
521 arena->spare = NULL;
522
4934f93d 523 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
524 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
525 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
526 arena_maxclass);
527 assert(arena_mapbits_unallocated_size_get(chunk,
528 chunk_npages-1) == arena_maxclass);
529 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
530 arena_mapbits_dirty_get(chunk, chunk_npages-1));
a78e148b 531 } else {
532 bool zero;
533 size_t unzeroed;
534
535 zero = false;
536 malloc_mutex_unlock(&arena->lock);
4934f93d 537 chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
21b26915 538 false, &zero, arena->dss_prec);
a78e148b 539 malloc_mutex_lock(&arena->lock);
540 if (chunk == NULL)
541 return (NULL);
4934f93d 542 if (config_stats)
543 arena->stats.mapped += chunksize;
a78e148b 544
545 chunk->arena = arena;
a78e148b 546
547 /*
548 * Claim that no pages are in use, since the header is merely
549 * overhead.
550 */
551 chunk->ndirty = 0;
552
21b26915 553 chunk->nruns_avail = 0;
554 chunk->nruns_adjac = 0;
555
a78e148b 556 /*
557 * Initialize the map to contain one maximal free untouched run.
558 * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
559 * chunk.
560 */
561 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
4934f93d 562 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
563 unzeroed);
a78e148b 564 /*
565 * There is no need to initialize the internal page map entries
566 * unless the chunk is not zeroed.
567 */
568 if (zero == false) {
569 for (i = map_bias+1; i < chunk_npages-1; i++)
4934f93d 570 arena_mapbits_unzeroed_set(chunk, i, unzeroed);
571 } else if (config_debug) {
572 for (i = map_bias+1; i < chunk_npages-1; i++) {
573 assert(arena_mapbits_unzeroed_get(chunk, i) ==
574 unzeroed);
575 }
a78e148b 576 }
4934f93d 577 arena_mapbits_unallocated_set(chunk, chunk_npages-1,
578 arena_maxclass, unzeroed);
a78e148b 579 }
580
21b26915 581 /* Insert the run into the runs_avail tree. */
582 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
583 false, false);
584
a78e148b 585 return (chunk);
586}
587
588static void
589arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
590{
4934f93d 591 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
592 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
593 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
594 arena_maxclass);
595 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
596 arena_maxclass);
597 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
598 arena_mapbits_dirty_get(chunk, chunk_npages-1));
599
a78e148b 600 /*
21b26915 601 * Remove run from the runs_avail tree, so that the arena does not use
602 * it.
a78e148b 603 */
21b26915 604 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
605 false, false);
a78e148b 606
607 if (arena->spare != NULL) {
608 arena_chunk_t *spare = arena->spare;
609
610 arena->spare = chunk;
a78e148b 611 malloc_mutex_unlock(&arena->lock);
1d03c1c9 612 chunk_dealloc((void *)spare, chunksize, true);
a78e148b 613 malloc_mutex_lock(&arena->lock);
4934f93d 614 if (config_stats)
615 arena->stats.mapped -= chunksize;
a78e148b 616 } else
617 arena->spare = chunk;
618}
619
620static arena_run_t *
4934f93d 621arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
622 bool zero)
a78e148b 623{
a78e148b 624 arena_run_t *run;
625 arena_chunk_map_t *mapelm, key;
626
a78e148b 627 key.bits = size | CHUNK_MAP_KEY;
21b26915 628 mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
a78e148b 629 if (mapelm != NULL) {
630 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
631 size_t pageind = (((uintptr_t)mapelm -
632 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
633 + map_bias;
634
635 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
4934f93d 636 LG_PAGE));
637 arena_run_split(arena, run, size, large, binind, zero);
a78e148b 638 return (run);
639 }
640
4934f93d 641 return (NULL);
642}
643
644static arena_run_t *
645arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
646 bool zero)
647{
648 arena_chunk_t *chunk;
649 arena_run_t *run;
650
651 assert(size <= arena_maxclass);
652 assert((size & PAGE_MASK) == 0);
653 assert((large && binind == BININD_INVALID) || (large == false && binind
654 != BININD_INVALID));
655
656 /* Search the arena's chunks for the lowest best fit. */
657 run = arena_run_alloc_helper(arena, size, large, binind, zero);
658 if (run != NULL)
659 return (run);
660
a78e148b 661 /*
662 * No usable runs. Create a new chunk from which to allocate the run.
663 */
664 chunk = arena_chunk_alloc(arena);
665 if (chunk != NULL) {
4934f93d 666 run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
667 arena_run_split(arena, run, size, large, binind, zero);
a78e148b 668 return (run);
669 }
670
671 /*
672 * arena_chunk_alloc() failed, but another thread may have made
673 * sufficient memory available while this one dropped arena->lock in
674 * arena_chunk_alloc(), so search one more time.
675 */
4934f93d 676 return (arena_run_alloc_helper(arena, size, large, binind, zero));
a78e148b 677}
678
679static inline void
680arena_maybe_purge(arena_t *arena)
681{
21b26915 682 size_t npurgeable, threshold;
683
684 /* Don't purge if the option is disabled. */
685 if (opt_lg_dirty_mult < 0)
686 return;
687 /* Don't purge if all dirty pages are already being purged. */
688 if (arena->ndirty <= arena->npurgatory)
689 return;
690 npurgeable = arena->ndirty - arena->npurgatory;
691 threshold = (arena->nactive >> opt_lg_dirty_mult);
692 /*
693 * Don't purge unless the number of purgeable pages exceeds the
694 * threshold.
695 */
696 if (npurgeable <= threshold)
697 return;
a78e148b 698
21b26915 699 arena_purge(arena, false);
a78e148b 700}
701
21b26915 702static inline size_t
703arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
a78e148b 704{
21b26915 705 size_t npurged;
a78e148b 706 ql_head(arena_chunk_map_t) mapelms;
707 arena_chunk_map_t *mapelm;
21b26915 708 size_t pageind, npages;
a78e148b 709 size_t nmadvise;
a78e148b 710
711 ql_new(&mapelms);
712
a78e148b 713 /*
714 * If chunk is the spare, temporarily re-allocate it, 1) so that its
21b26915 715 * run is reinserted into runs_avail, and 2) so that it cannot be
a78e148b 716 * completely discarded by another thread while arena->lock is dropped
717 * by this thread. Note that the arena_run_dalloc() call will
718 * implicitly deallocate the chunk, so no explicit action is required
719 * in this function to deallocate the chunk.
720 *
721 * Note that once a chunk contains dirty pages, it cannot again contain
722 * a single run unless 1) it is a dirty run, or 2) this function purges
723 * dirty pages and causes the transition to a single clean run. Thus
724 * (chunk == arena->spare) is possible, but it is not possible for
725 * this function to be called on the spare unless it contains a dirty
726 * run.
727 */
728 if (chunk == arena->spare) {
4934f93d 729 assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
730 assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
731
a78e148b 732 arena_chunk_alloc(arena);
733 }
734
21b26915 735 if (config_stats)
736 arena->stats.purged += chunk->ndirty;
737
738 /*
739 * Operate on all dirty runs if there is no clean/dirty run
740 * fragmentation.
741 */
742 if (chunk->nruns_adjac == 0)
743 all = true;
744
745 /*
746 * Temporarily allocate free dirty runs within chunk. If all is false,
747 * only operate on dirty runs that are fragments; otherwise operate on
748 * all dirty runs.
749 */
750 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
4934f93d 751 mapelm = arena_mapp_get(chunk, pageind);
752 if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
21b26915 753 size_t run_size =
754 arena_mapbits_unallocated_size_get(chunk, pageind);
a78e148b 755
21b26915 756 npages = run_size >> LG_PAGE;
a78e148b 757 assert(pageind + npages <= chunk_npages);
4934f93d 758 assert(arena_mapbits_dirty_get(chunk, pageind) ==
759 arena_mapbits_dirty_get(chunk, pageind+npages-1));
a78e148b 760
21b26915 761 if (arena_mapbits_dirty_get(chunk, pageind) != 0 &&
762 (all || arena_avail_adjac(chunk, pageind,
763 npages))) {
764 arena_run_t *run = (arena_run_t *)((uintptr_t)
765 chunk + (uintptr_t)(pageind << LG_PAGE));
a78e148b 766
21b26915 767 arena_run_split(arena, run, run_size, true,
768 BININD_INVALID, false);
a78e148b 769 /* Append to list for later processing. */
770 ql_elm_new(mapelm, u.ql_link);
771 ql_tail_insert(&mapelms, mapelm, u.ql_link);
772 }
a78e148b 773 } else {
21b26915 774 /* Skip run. */
775 if (arena_mapbits_large_get(chunk, pageind) != 0) {
776 npages = arena_mapbits_large_size_get(chunk,
4934f93d 777 pageind) >> LG_PAGE;
21b26915 778 } else {
4934f93d 779 size_t binind;
780 arena_bin_info_t *bin_info;
a78e148b 781 arena_run_t *run = (arena_run_t *)((uintptr_t)
4934f93d 782 chunk + (uintptr_t)(pageind << LG_PAGE));
783
784 assert(arena_mapbits_small_runind_get(chunk,
785 pageind) == 0);
786 binind = arena_bin_index(arena, run->bin);
787 bin_info = &arena_bin_info[binind];
21b26915 788 npages = bin_info->run_size >> LG_PAGE;
a78e148b 789 }
790 }
791 }
792 assert(pageind == chunk_npages);
21b26915 793 assert(chunk->ndirty == 0 || all == false);
794 assert(chunk->nruns_adjac == 0);
a78e148b 795
796 malloc_mutex_unlock(&arena->lock);
4934f93d 797 if (config_stats)
798 nmadvise = 0;
21b26915 799 npurged = 0;
a78e148b 800 ql_foreach(mapelm, &mapelms, u.ql_link) {
21b26915 801 bool unzeroed;
802 size_t flag_unzeroed, i;
803
804 pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
a78e148b 805 sizeof(arena_chunk_map_t)) + map_bias;
21b26915 806 npages = arena_mapbits_large_size_get(chunk, pageind) >>
4934f93d 807 LG_PAGE;
a78e148b 808 assert(pageind + npages <= chunk_npages);
21b26915 809 unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
810 LG_PAGE)), (npages << LG_PAGE));
811 flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
812 /*
813 * Set the unzeroed flag for all pages, now that pages_purge()
814 * has returned whether the pages were zeroed as a side effect
815 * of purging. This chunk map modification is safe even though
816 * the arena mutex isn't currently owned by this thread,
817 * because the run is marked as allocated, thus protecting it
818 * from being modified by any other thread. As long as these
819 * writes don't perturb the first and last elements'
820 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
821 */
822 for (i = 0; i < npages; i++) {
823 arena_mapbits_unzeroed_set(chunk, pageind+i,
824 flag_unzeroed);
825 }
826 npurged += npages;
4934f93d 827 if (config_stats)
828 nmadvise++;
a78e148b 829 }
a78e148b 830 malloc_mutex_lock(&arena->lock);
4934f93d 831 if (config_stats)
832 arena->stats.nmadvise += nmadvise;
a78e148b 833
834 /* Deallocate runs. */
835 for (mapelm = ql_first(&mapelms); mapelm != NULL;
836 mapelm = ql_first(&mapelms)) {
21b26915 837 arena_run_t *run;
a78e148b 838
21b26915 839 pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
840 sizeof(arena_chunk_map_t)) + map_bias;
841 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
842 LG_PAGE));
a78e148b 843 ql_remove(&mapelms, mapelm, u.ql_link);
21b26915 844 arena_run_dalloc(arena, run, false, true);
a78e148b 845 }
21b26915 846
847 return (npurged);
848}
849
850static arena_chunk_t *
851chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
852{
853 size_t *ndirty = (size_t *)arg;
854
855 assert(chunk->ndirty != 0);
856 *ndirty += chunk->ndirty;
857 return (NULL);
a78e148b 858}
859
860static void
861arena_purge(arena_t *arena, bool all)
862{
863 arena_chunk_t *chunk;
864 size_t npurgatory;
4934f93d 865 if (config_debug) {
866 size_t ndirty = 0;
a78e148b 867
21b26915 868 arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
869 chunks_dirty_iter_cb, (void *)&ndirty);
4934f93d 870 assert(ndirty == arena->ndirty);
a78e148b 871 }
a78e148b 872 assert(arena->ndirty > arena->npurgatory || all);
a78e148b 873 assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
1d03c1c9 874 arena->npurgatory) || all);
a78e148b 875
4934f93d 876 if (config_stats)
877 arena->stats.npurge++;
a78e148b 878
879 /*
880 * Compute the minimum number of pages that this thread should try to
881 * purge, and add the result to arena->npurgatory. This will keep
882 * multiple threads from racing to reduce ndirty below the threshold.
883 */
21b26915 884 {
885 size_t npurgeable = arena->ndirty - arena->npurgatory;
886
887 if (all == false) {
888 size_t threshold = (arena->nactive >>
889 opt_lg_dirty_mult);
890
891 npurgatory = npurgeable - threshold;
892 } else
893 npurgatory = npurgeable;
a78e148b 894 }
895 arena->npurgatory += npurgatory;
896
897 while (npurgatory > 0) {
21b26915 898 size_t npurgeable, npurged, nunpurged;
899
a78e148b 900 /* Get next chunk with dirty pages. */
21b26915 901 chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
a78e148b 902 if (chunk == NULL) {
903 /*
904 * This thread was unable to purge as many pages as
905 * originally intended, due to races with other threads
906 * that either did some of the purging work, or re-used
907 * dirty pages.
908 */
909 arena->npurgatory -= npurgatory;
910 return;
911 }
21b26915 912 npurgeable = chunk->ndirty;
913 assert(npurgeable != 0);
a78e148b 914
21b26915 915 if (npurgeable > npurgatory && chunk->nruns_adjac == 0) {
a78e148b 916 /*
21b26915 917 * This thread will purge all the dirty pages in chunk,
918 * so set npurgatory to reflect this thread's intent to
919 * purge the pages. This tends to reduce the chances
920 * of the following scenario:
a78e148b 921 *
922 * 1) This thread sets arena->npurgatory such that
923 * (arena->ndirty - arena->npurgatory) is at the
924 * threshold.
925 * 2) This thread drops arena->lock.
926 * 3) Another thread causes one or more pages to be
927 * dirtied, and immediately determines that it must
928 * purge dirty pages.
929 *
930 * If this scenario *does* play out, that's okay,
931 * because all of the purging work being done really
932 * needs to happen.
933 */
21b26915 934 arena->npurgatory += npurgeable - npurgatory;
935 npurgatory = npurgeable;
a78e148b 936 }
937
21b26915 938 /*
939 * Keep track of how many pages are purgeable, versus how many
940 * actually get purged, and adjust counters accordingly.
941 */
942 arena->npurgatory -= npurgeable;
943 npurgatory -= npurgeable;
944 npurged = arena_chunk_purge(arena, chunk, all);
945 nunpurged = npurgeable - npurged;
946 arena->npurgatory += nunpurged;
947 npurgatory += nunpurged;
a78e148b 948 }
949}
950
951void
952arena_purge_all(arena_t *arena)
953{
954
955 malloc_mutex_lock(&arena->lock);
956 arena_purge(arena, true);
957 malloc_mutex_unlock(&arena->lock);
958}
959
960static void
21b26915 961arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
a78e148b 962{
963 arena_chunk_t *chunk;
964 size_t size, run_ind, run_pages, flag_dirty;
a78e148b 965
966 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
4934f93d 967 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
a78e148b 968 assert(run_ind >= map_bias);
969 assert(run_ind < chunk_npages);
4934f93d 970 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
971 size = arena_mapbits_large_size_get(chunk, run_ind);
972 assert(size == PAGE ||
973 arena_mapbits_large_size_get(chunk,
974 run_ind+(size>>LG_PAGE)-1) == 0);
a78e148b 975 } else {
976 size_t binind = arena_bin_index(arena, run->bin);
977 arena_bin_info_t *bin_info = &arena_bin_info[binind];
978 size = bin_info->run_size;
979 }
4934f93d 980 run_pages = (size >> LG_PAGE);
981 if (config_stats) {
982 /*
983 * Update stats_cactive if nactive is crossing a chunk
984 * multiple.
985 */
986 size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
987 CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
988 if (cactive_diff != 0)
989 stats_cactive_sub(cactive_diff);
990 }
a78e148b 991 arena->nactive -= run_pages;
992
993 /*
994 * The run is dirty if the caller claims to have dirtied it, as well as
21b26915 995 * if it was already dirty before being allocated and the caller
996 * doesn't claim to have cleaned it.
a78e148b 997 */
4934f93d 998 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
999 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
21b26915 1000 if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
a78e148b 1001 dirty = true;
1002 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
a78e148b 1003
1004 /* Mark pages as unallocated in the chunk map. */
1005 if (dirty) {
4934f93d 1006 arena_mapbits_unallocated_set(chunk, run_ind, size,
1007 CHUNK_MAP_DIRTY);
1008 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1009 CHUNK_MAP_DIRTY);
a78e148b 1010 } else {
4934f93d 1011 arena_mapbits_unallocated_set(chunk, run_ind, size,
1012 arena_mapbits_unzeroed_get(chunk, run_ind));
1013 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1014 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
a78e148b 1015 }
1016
1017 /* Try to coalesce forward. */
1018 if (run_ind + run_pages < chunk_npages &&
4934f93d 1019 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
1020 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
1021 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1022 run_ind+run_pages);
1023 size_t nrun_pages = nrun_size >> LG_PAGE;
a78e148b 1024
1025 /*
1026 * Remove successor from runs_avail; the coalesced run is
1027 * inserted later.
1028 */
4934f93d 1029 assert(arena_mapbits_unallocated_size_get(chunk,
1030 run_ind+run_pages+nrun_pages-1) == nrun_size);
1031 assert(arena_mapbits_dirty_get(chunk,
1032 run_ind+run_pages+nrun_pages-1) == flag_dirty);
21b26915 1033 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
1034 false, true);
a78e148b 1035
1036 size += nrun_size;
1037 run_pages += nrun_pages;
1038
4934f93d 1039 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1040 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1041 size);
a78e148b 1042 }
1043
1044 /* Try to coalesce backward. */
4934f93d 1045 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1)
1046 == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) {
1047 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1048 run_ind-1);
1049 size_t prun_pages = prun_size >> LG_PAGE;
a78e148b 1050
1051 run_ind -= prun_pages;
1052
1053 /*
1054 * Remove predecessor from runs_avail; the coalesced run is
1055 * inserted later.
1056 */
4934f93d 1057 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1058 prun_size);
1059 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
21b26915 1060 arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
1061 false);
a78e148b 1062
1063 size += prun_size;
1064 run_pages += prun_pages;
1065
4934f93d 1066 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1067 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1068 size);
a78e148b 1069 }
1070
1071 /* Insert into runs_avail, now that coalescing is complete. */
4934f93d 1072 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1073 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
1074 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1075 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
21b26915 1076 arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
a78e148b 1077
4934f93d 1078 /* Deallocate chunk if it is now completely unused. */
1079 if (size == arena_maxclass) {
1080 assert(run_ind == map_bias);
1081 assert(run_pages == (arena_maxclass >> LG_PAGE));
a78e148b 1082 arena_chunk_dealloc(arena, chunk);
4934f93d 1083 }
a78e148b 1084
1085 /*
1086 * It is okay to do dirty page processing here even if the chunk was
1087 * deallocated above, since in that case it is the spare. Waiting
1088 * until after possible chunk deallocation to do dirty processing
1089 * allows for an old spare to be fully deallocated, thus decreasing the
1090 * chances of spuriously crossing the dirty page purging threshold.
1091 */
1092 if (dirty)
1093 arena_maybe_purge(arena);
1094}
1095
1096static void
1097arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1098 size_t oldsize, size_t newsize)
1099{
4934f93d 1100 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1101 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
1102 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
a78e148b 1103
1104 assert(oldsize > newsize);
1105
1106 /*
1107 * Update the chunk map so that arena_run_dalloc() can treat the
1108 * leading run as separately allocated. Set the last element of each
1109 * run first, in case of single-page runs.
1110 */
4934f93d 1111 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
1112 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1113 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
1114
1115 if (config_debug) {
1116 UNUSED size_t tail_npages = newsize >> LG_PAGE;
1117 assert(arena_mapbits_large_size_get(chunk,
1118 pageind+head_npages+tail_npages-1) == 0);
1119 assert(arena_mapbits_dirty_get(chunk,
1120 pageind+head_npages+tail_npages-1) == flag_dirty);
a78e148b 1121 }
4934f93d 1122 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
1123 flag_dirty);
a78e148b 1124
21b26915 1125 arena_run_dalloc(arena, run, false, false);
a78e148b 1126}
1127
1128static void
1129arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1130 size_t oldsize, size_t newsize, bool dirty)
1131{
4934f93d 1132 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1133 size_t head_npages = newsize >> LG_PAGE;
1134 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
a78e148b 1135
1136 assert(oldsize > newsize);
1137
1138 /*
1139 * Update the chunk map so that arena_run_dalloc() can treat the
1140 * trailing run as separately allocated. Set the last element of each
1141 * run first, in case of single-page runs.
1142 */
4934f93d 1143 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
1144 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1145 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
1146
1147 if (config_debug) {
1148 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1149 assert(arena_mapbits_large_size_get(chunk,
1150 pageind+head_npages+tail_npages-1) == 0);
1151 assert(arena_mapbits_dirty_get(chunk,
1152 pageind+head_npages+tail_npages-1) == flag_dirty);
1153 }
1154 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
1155 flag_dirty);
a78e148b 1156
1157 arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
21b26915 1158 dirty, false);
a78e148b 1159}
1160
1161static arena_run_t *
4934f93d 1162arena_bin_runs_first(arena_bin_t *bin)
a78e148b 1163{
4934f93d 1164 arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
a78e148b 1165 if (mapelm != NULL) {
1166 arena_chunk_t *chunk;
1167 size_t pageind;
4934f93d 1168 arena_run_t *run;
a78e148b 1169
1170 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
1171 pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
1172 sizeof(arena_chunk_map_t))) + map_bias;
1173 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
4934f93d 1174 arena_mapbits_small_runind_get(chunk, pageind)) <<
1175 LG_PAGE));
a78e148b 1176 return (run);
1177 }
4934f93d 1178
1179 return (NULL);
1180}
1181
1182static void
1183arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1184{
1185 arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
1186 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1187 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
1188
1189 assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
1190
1191 arena_run_tree_insert(&bin->runs, mapelm);
1192}
1193
1194static void
1195arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1196{
1197 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1198 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1199 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
1200
1201 assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
1202
1203 arena_run_tree_remove(&bin->runs, mapelm);
1204}
1205
1206static arena_run_t *
1207arena_bin_nonfull_run_tryget(arena_bin_t *bin)
1208{
1209 arena_run_t *run = arena_bin_runs_first(bin);
1210 if (run != NULL) {
1211 arena_bin_runs_remove(bin, run);
1212 if (config_stats)
1213 bin->stats.reruns++;
1214 }
1215 return (run);
1216}
1217
1218static arena_run_t *
1219arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1220{
1221 arena_run_t *run;
1222 size_t binind;
1223 arena_bin_info_t *bin_info;
1224
1225 /* Look for a usable run. */
1226 run = arena_bin_nonfull_run_tryget(bin);
1227 if (run != NULL)
1228 return (run);
a78e148b 1229 /* No existing runs have any space available. */
1230
1231 binind = arena_bin_index(arena, bin);
1232 bin_info = &arena_bin_info[binind];
1233
1234 /* Allocate a new run. */
1235 malloc_mutex_unlock(&bin->lock);
1236 /******************************/
1237 malloc_mutex_lock(&arena->lock);
4934f93d 1238 run = arena_run_alloc(arena, bin_info->run_size, false, binind, false);
a78e148b 1239 if (run != NULL) {
1240 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
1241 (uintptr_t)bin_info->bitmap_offset);
1242
1243 /* Initialize run internals. */
4934f93d 1244 VALGRIND_MAKE_MEM_UNDEFINED(run, bin_info->reg0_offset -
1245 bin_info->redzone_size);
a78e148b 1246 run->bin = bin;
1247 run->nextind = 0;
1248 run->nfree = bin_info->nregs;
1249 bitmap_init(bitmap, &bin_info->bitmap_info);
a78e148b 1250 }
1251 malloc_mutex_unlock(&arena->lock);
1252 /********************************/
1253 malloc_mutex_lock(&bin->lock);
1254 if (run != NULL) {
4934f93d 1255 if (config_stats) {
1256 bin->stats.nruns++;
1257 bin->stats.curruns++;
1258 }
a78e148b 1259 return (run);
1260 }
1261
1262 /*
1263 * arena_run_alloc() failed, but another thread may have made
1264 * sufficient memory available while this one dropped bin->lock above,
1265 * so search one more time.
1266 */
4934f93d 1267 run = arena_bin_nonfull_run_tryget(bin);
1268 if (run != NULL)
a78e148b 1269 return (run);
a78e148b 1270
1271 return (NULL);
1272}
1273
1274/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
1275static void *
1276arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1277{
1278 void *ret;
1279 size_t binind;
1280 arena_bin_info_t *bin_info;
1281 arena_run_t *run;
1282
1283 binind = arena_bin_index(arena, bin);
1284 bin_info = &arena_bin_info[binind];
1285 bin->runcur = NULL;
1286 run = arena_bin_nonfull_run_get(arena, bin);
1287 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1288 /*
1289 * Another thread updated runcur while this one ran without the
1290 * bin lock in arena_bin_nonfull_run_get().
1291 */
a78e148b 1292 assert(bin->runcur->nfree > 0);
1293 ret = arena_run_reg_alloc(bin->runcur, bin_info);
1294 if (run != NULL) {
1295 arena_chunk_t *chunk;
1296
1297 /*
1298 * arena_run_alloc() may have allocated run, or it may
1299 * have pulled run from the bin's run tree. Therefore
1300 * it is unsafe to make any assumptions about how run
1301 * has previously been used, and arena_bin_lower_run()
1302 * must be called, as if a region were just deallocated
1303 * from the run.
1304 */
1305 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1306 if (run->nfree == bin_info->nregs)
1307 arena_dalloc_bin_run(arena, chunk, run, bin);
1308 else
1309 arena_bin_lower_run(arena, chunk, run, bin);
1310 }
1311 return (ret);
1312 }
1313
1314 if (run == NULL)
1315 return (NULL);
1316
1317 bin->runcur = run;
1318
a78e148b 1319 assert(bin->runcur->nfree > 0);
1320
1321 return (arena_run_reg_alloc(bin->runcur, bin_info));
1322}
1323
a78e148b 1324void
1325arena_prof_accum(arena_t *arena, uint64_t accumbytes)
1326{
1327
4934f93d 1328 cassert(config_prof);
1329
1330 if (config_prof && prof_interval != 0) {
a78e148b 1331 arena->prof_accumbytes += accumbytes;
1332 if (arena->prof_accumbytes >= prof_interval) {
1333 prof_idump();
1334 arena->prof_accumbytes -= prof_interval;
1335 }
1336 }
1337}
a78e148b 1338
a78e148b 1339void
4934f93d 1340arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
1341 uint64_t prof_accumbytes)
a78e148b 1342{
1343 unsigned i, nfill;
1344 arena_bin_t *bin;
1345 arena_run_t *run;
1346 void *ptr;
1347
1348 assert(tbin->ncached == 0);
1349
4934f93d 1350 if (config_prof) {
1351 malloc_mutex_lock(&arena->lock);
1352 arena_prof_accum(arena, prof_accumbytes);
1353 malloc_mutex_unlock(&arena->lock);
1354 }
a78e148b 1355 bin = &arena->bins[binind];
1356 malloc_mutex_lock(&bin->lock);
1357 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1358 tbin->lg_fill_div); i < nfill; i++) {
1359 if ((run = bin->runcur) != NULL && run->nfree > 0)
1360 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1361 else
1362 ptr = arena_bin_malloc_hard(arena, bin);
1363 if (ptr == NULL)
1364 break;
4934f93d 1365 if (config_fill && opt_junk) {
1366 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
1367 true);
1368 }
a78e148b 1369 /* Insert such that low regions get used first. */
1370 tbin->avail[nfill - 1 - i] = ptr;
1371 }
4934f93d 1372 if (config_stats) {
1373 bin->stats.allocated += i * arena_bin_info[binind].reg_size;
1374 bin->stats.nmalloc += i;
1375 bin->stats.nrequests += tbin->tstats.nrequests;
1376 bin->stats.nfills++;
1377 tbin->tstats.nrequests = 0;
1378 }
a78e148b 1379 malloc_mutex_unlock(&bin->lock);
1380 tbin->ncached = i;
1381}
4934f93d 1382
1383void
1384arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
1385{
1386
1387 if (zero) {
1388 size_t redzone_size = bin_info->redzone_size;
1389 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
1390 redzone_size);
1391 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
1392 redzone_size);
1393 } else {
1394 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
1395 bin_info->reg_interval);
1396 }
1397}
1398
1399void
1400arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
1401{
1402 size_t size = bin_info->reg_size;
1403 size_t redzone_size = bin_info->redzone_size;
1404 size_t i;
1405 bool error = false;
1406
1407 for (i = 1; i <= redzone_size; i++) {
1408 unsigned byte;
1409 if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
1410 error = true;
1411 malloc_printf("<jemalloc>: Corrupt redzone "
1412 "%zu byte%s before %p (size %zu), byte=%#x\n", i,
1413 (i == 1) ? "" : "s", ptr, size, byte);
1414 }
1415 }
1416 for (i = 0; i < redzone_size; i++) {
1417 unsigned byte;
1418 if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
1419 error = true;
1420 malloc_printf("<jemalloc>: Corrupt redzone "
1421 "%zu byte%s after end of %p (size %zu), byte=%#x\n",
1422 i, (i == 1) ? "" : "s", ptr, size, byte);
1423 }
1424 }
1425 if (opt_abort && error)
1426 abort();
1427
1428 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
1429 bin_info->reg_interval);
1430}
a78e148b 1431
1432void *
1433arena_malloc_small(arena_t *arena, size_t size, bool zero)
1434{
1435 void *ret;
1436 arena_bin_t *bin;
1437 arena_run_t *run;
1438 size_t binind;
1439
1440 binind = SMALL_SIZE2BIN(size);
4934f93d 1441 assert(binind < NBINS);
a78e148b 1442 bin = &arena->bins[binind];
1443 size = arena_bin_info[binind].reg_size;
1444
1445 malloc_mutex_lock(&bin->lock);
1446 if ((run = bin->runcur) != NULL && run->nfree > 0)
1447 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1448 else
1449 ret = arena_bin_malloc_hard(arena, bin);
1450
1451 if (ret == NULL) {
1452 malloc_mutex_unlock(&bin->lock);
1453 return (NULL);
1454 }
1455
4934f93d 1456 if (config_stats) {
1457 bin->stats.allocated += size;
1458 bin->stats.nmalloc++;
1459 bin->stats.nrequests++;
1460 }
a78e148b 1461 malloc_mutex_unlock(&bin->lock);
4934f93d 1462 if (config_prof && isthreaded == false) {
a78e148b 1463 malloc_mutex_lock(&arena->lock);
1464 arena_prof_accum(arena, size);
1465 malloc_mutex_unlock(&arena->lock);
1466 }
a78e148b 1467
1468 if (zero == false) {
4934f93d 1469 if (config_fill) {
1470 if (opt_junk) {
1471 arena_alloc_junk_small(ret,
1472 &arena_bin_info[binind], false);
1473 } else if (opt_zero)
1474 memset(ret, 0, size);
1475 }
1476 } else {
1477 if (config_fill && opt_junk) {
1478 arena_alloc_junk_small(ret, &arena_bin_info[binind],
1479 true);
1480 }
1481 VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
a78e148b 1482 memset(ret, 0, size);
4934f93d 1483 }
a78e148b 1484
1485 return (ret);
1486}
1487
1488void *
1489arena_malloc_large(arena_t *arena, size_t size, bool zero)
1490{
1491 void *ret;
1492
1493 /* Large allocation. */
1494 size = PAGE_CEILING(size);
1495 malloc_mutex_lock(&arena->lock);
4934f93d 1496 ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero);
a78e148b 1497 if (ret == NULL) {
1498 malloc_mutex_unlock(&arena->lock);
1499 return (NULL);
1500 }
4934f93d 1501 if (config_stats) {
1502 arena->stats.nmalloc_large++;
1503 arena->stats.nrequests_large++;
1504 arena->stats.allocated_large += size;
1505 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1506 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1507 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
a78e148b 1508 }
4934f93d 1509 if (config_prof)
1510 arena_prof_accum(arena, size);
a78e148b 1511 malloc_mutex_unlock(&arena->lock);
1512
1513 if (zero == false) {
4934f93d 1514 if (config_fill) {
1515 if (opt_junk)
1516 memset(ret, 0xa5, size);
1517 else if (opt_zero)
1518 memset(ret, 0, size);
1519 }
a78e148b 1520 }
1521
1522 return (ret);
1523}
1524
a78e148b 1525/* Only handles large allocations that require more than page alignment. */
1526void *
4934f93d 1527arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
a78e148b 1528{
1529 void *ret;
4934f93d 1530 size_t alloc_size, leadsize, trailsize;
1531 arena_run_t *run;
a78e148b 1532 arena_chunk_t *chunk;
1533
1534 assert((size & PAGE_MASK) == 0);
1535
1536 alignment = PAGE_CEILING(alignment);
4934f93d 1537 alloc_size = size + alignment - PAGE;
a78e148b 1538
1539 malloc_mutex_lock(&arena->lock);
4934f93d 1540 run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero);
1541 if (run == NULL) {
a78e148b 1542 malloc_mutex_unlock(&arena->lock);
1543 return (NULL);
1544 }
4934f93d 1545 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
a78e148b 1546
4934f93d 1547 leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
1548 (uintptr_t)run;
1549 assert(alloc_size >= leadsize + size);
1550 trailsize = alloc_size - leadsize - size;
1551 ret = (void *)((uintptr_t)run + leadsize);
1552 if (leadsize != 0) {
1553 arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
1554 leadsize);
1555 }
1556 if (trailsize != 0) {
1557 arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
1558 false);
a78e148b 1559 }
1560
4934f93d 1561 if (config_stats) {
1562 arena->stats.nmalloc_large++;
1563 arena->stats.nrequests_large++;
1564 arena->stats.allocated_large += size;
1565 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1566 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1567 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
a78e148b 1568 }
a78e148b 1569 malloc_mutex_unlock(&arena->lock);
1570
4934f93d 1571 if (config_fill && zero == false) {
a78e148b 1572 if (opt_junk)
1573 memset(ret, 0xa5, size);
1574 else if (opt_zero)
1575 memset(ret, 0, size);
1576 }
a78e148b 1577 return (ret);
1578}
1579
a78e148b 1580void
1581arena_prof_promoted(const void *ptr, size_t size)
1582{
1583 arena_chunk_t *chunk;
1584 size_t pageind, binind;
1585
4934f93d 1586 cassert(config_prof);
a78e148b 1587 assert(ptr != NULL);
1588 assert(CHUNK_ADDR2BASE(ptr) != ptr);
4934f93d 1589 assert(isalloc(ptr, false) == PAGE);
1590 assert(isalloc(ptr, true) == PAGE);
1591 assert(size <= SMALL_MAXCLASS);
a78e148b 1592
1593 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
4934f93d 1594 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
a78e148b 1595 binind = SMALL_SIZE2BIN(size);
4934f93d 1596 assert(binind < NBINS);
1597 arena_mapbits_large_binind_set(chunk, pageind, binind);
a78e148b 1598
4934f93d 1599 assert(isalloc(ptr, false) == PAGE);
1600 assert(isalloc(ptr, true) == size);
a78e148b 1601}
a78e148b 1602
1603static void
1604arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
1605 arena_bin_t *bin)
1606{
1607
1608 /* Dissociate run from bin. */
1609 if (run == bin->runcur)
1610 bin->runcur = NULL;
1611 else {
1612 size_t binind = arena_bin_index(chunk->arena, bin);
1613 arena_bin_info_t *bin_info = &arena_bin_info[binind];
1614
1615 if (bin_info->nregs != 1) {
a78e148b 1616 /*
1617 * This block's conditional is necessary because if the
1618 * run only contains one region, then it never gets
1619 * inserted into the non-full runs tree.
1620 */
4934f93d 1621 arena_bin_runs_remove(bin, run);
a78e148b 1622 }
1623 }
1624}
1625
1626static void
1627arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1628 arena_bin_t *bin)
1629{
1630 size_t binind;
1631 arena_bin_info_t *bin_info;
1632 size_t npages, run_ind, past;
1633
1634 assert(run != bin->runcur);
4934f93d 1635 assert(arena_run_tree_search(&bin->runs,
1636 arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
1637 == NULL);
a78e148b 1638
1639 binind = arena_bin_index(chunk->arena, run->bin);
1640 bin_info = &arena_bin_info[binind];
1641
1642 malloc_mutex_unlock(&bin->lock);
1643 /******************************/
4934f93d 1644 npages = bin_info->run_size >> LG_PAGE;
1645 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
a78e148b 1646 past = (size_t)(PAGE_CEILING((uintptr_t)run +
1647 (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
4934f93d 1648 bin_info->reg_interval - bin_info->redzone_size) -
1649 (uintptr_t)chunk) >> LG_PAGE);
a78e148b 1650 malloc_mutex_lock(&arena->lock);
1651
1652 /*
1653 * If the run was originally clean, and some pages were never touched,
1654 * trim the clean pages before deallocating the dirty portion of the
1655 * run.
1656 */
4934f93d 1657 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1658 arena_mapbits_dirty_get(chunk, run_ind+npages-1));
1659 if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
1660 npages) {
1661 /* Trim clean pages. Convert to large run beforehand. */
1662 assert(npages > 0);
1663 arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
1664 arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
1665 arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
1666 ((past - run_ind) << LG_PAGE), false);
a78e148b 1667 /* npages = past - run_ind; */
1668 }
21b26915 1669 arena_run_dalloc(arena, run, true, false);
a78e148b 1670 malloc_mutex_unlock(&arena->lock);
1671 /****************************/
1672 malloc_mutex_lock(&bin->lock);
4934f93d 1673 if (config_stats)
1674 bin->stats.curruns--;
a78e148b 1675}
1676
1677static void
1678arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1679 arena_bin_t *bin)
1680{
1681
1682 /*
4934f93d 1683 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
1684 * non-full run. It is okay to NULL runcur out rather than proactively
1685 * keeping it pointing at the lowest non-full run.
a78e148b 1686 */
4934f93d 1687 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
a78e148b 1688 /* Switch runcur. */
4934f93d 1689 if (bin->runcur->nfree > 0)
1690 arena_bin_runs_insert(bin, bin->runcur);
a78e148b 1691 bin->runcur = run;
4934f93d 1692 if (config_stats)
1693 bin->stats.reruns++;
1694 } else
1695 arena_bin_runs_insert(bin, run);
a78e148b 1696}
1697
1698void
4934f93d 1699arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
a78e148b 1700 arena_chunk_map_t *mapelm)
1701{
1702 size_t pageind;
1703 arena_run_t *run;
1704 arena_bin_t *bin;
4934f93d 1705 arena_bin_info_t *bin_info;
1706 size_t size, binind;
a78e148b 1707
4934f93d 1708 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
a78e148b 1709 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
4934f93d 1710 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
a78e148b 1711 bin = run->bin;
4934f93d 1712 binind = arena_ptr_small_binind_get(ptr, mapelm->bits);
1713 bin_info = &arena_bin_info[binind];
1714 if (config_fill || config_stats)
1715 size = bin_info->reg_size;
a78e148b 1716
4934f93d 1717 if (config_fill && opt_junk)
1718 arena_dalloc_junk_small(ptr, bin_info);
a78e148b 1719
1720 arena_run_reg_dalloc(run, ptr);
1721 if (run->nfree == bin_info->nregs) {
1722 arena_dissociate_bin_run(chunk, run, bin);
1723 arena_dalloc_bin_run(arena, chunk, run, bin);
1724 } else if (run->nfree == 1 && run != bin->runcur)
1725 arena_bin_lower_run(arena, chunk, run, bin);
1726
4934f93d 1727 if (config_stats) {
1728 bin->stats.allocated -= size;
1729 bin->stats.ndalloc++;
1730 }
a78e148b 1731}
1732
4934f93d 1733void
1734arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1735 size_t pageind, arena_chunk_map_t *mapelm)
1736{
1737 arena_run_t *run;
1738 arena_bin_t *bin;
1739
1740 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1741 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
1742 bin = run->bin;
1743 malloc_mutex_lock(&bin->lock);
1744 arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
1745 malloc_mutex_unlock(&bin->lock);
1746}
1747
1748void
1749arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1750 size_t pageind)
1751{
1752 arena_chunk_map_t *mapelm;
1753
1754 if (config_debug) {
1755 /* arena_ptr_small_binind_get() does extra sanity checking. */
1756 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
1757 pageind)) != BININD_INVALID);
1758 }
1759 mapelm = arena_mapp_get(chunk, pageind);
1760 arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
1761}
a78e148b 1762
1763void
4934f93d 1764arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
a78e148b 1765{
1766
4934f93d 1767 if (config_fill || config_stats) {
1768 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1769 size_t size = arena_mapbits_large_size_get(chunk, pageind);
a78e148b 1770
4934f93d 1771 if (config_fill && config_stats && opt_junk)
a78e148b 1772 memset(ptr, 0x5a, size);
4934f93d 1773 if (config_stats) {
1774 arena->stats.ndalloc_large++;
1775 arena->stats.allocated_large -= size;
1776 arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
1777 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
1778 }
a78e148b 1779 }
1780
21b26915 1781 arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
a78e148b 1782}
1783
4934f93d 1784void
1785arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1786{
1787
1788 malloc_mutex_lock(&arena->lock);
1789 arena_dalloc_large_locked(arena, chunk, ptr);
1790 malloc_mutex_unlock(&arena->lock);
1791}
1792
a78e148b 1793static void
1794arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1795 size_t oldsize, size_t size)
1796{
1797
1798 assert(size < oldsize);
1799
1800 /*
1801 * Shrink the run, and make trailing pages available for other
1802 * allocations.
1803 */
1804 malloc_mutex_lock(&arena->lock);
1805 arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
1806 true);
4934f93d 1807 if (config_stats) {
1808 arena->stats.ndalloc_large++;
1809 arena->stats.allocated_large -= oldsize;
1810 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1811 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
1812
1813 arena->stats.nmalloc_large++;
1814 arena->stats.nrequests_large++;
1815 arena->stats.allocated_large += size;
1816 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1817 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1818 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
a78e148b 1819 }
a78e148b 1820 malloc_mutex_unlock(&arena->lock);
1821}
1822
1823static bool
1824arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1825 size_t oldsize, size_t size, size_t extra, bool zero)
1826{
4934f93d 1827 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1828 size_t npages = oldsize >> LG_PAGE;
a78e148b 1829 size_t followsize;
1830
4934f93d 1831 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
a78e148b 1832
1833 /* Try to extend the run. */
1834 assert(size + extra > oldsize);
1835 malloc_mutex_lock(&arena->lock);
1836 if (pageind + npages < chunk_npages &&
4934f93d 1837 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
1838 (followsize = arena_mapbits_unallocated_size_get(chunk,
1839 pageind+npages)) >= size - oldsize) {
a78e148b 1840 /*
1841 * The next run is available and sufficiently large. Split the
1842 * following run, then merge the first part with the existing
1843 * allocation.
1844 */
1845 size_t flag_dirty;
1846 size_t splitsize = (oldsize + followsize <= size + extra)
1847 ? followsize : size + extra - oldsize;
1848 arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
4934f93d 1849 ((pageind+npages) << LG_PAGE)), splitsize, true,
1850 BININD_INVALID, zero);
a78e148b 1851
1852 size = oldsize + splitsize;
4934f93d 1853 npages = size >> LG_PAGE;
a78e148b 1854
1855 /*
1856 * Mark the extended run as dirty if either portion of the run
1857 * was dirty before allocation. This is rather pedantic,
1858 * because there's not actually any sequence of events that
1859 * could cause the resulting run to be passed to
1860 * arena_run_dalloc() with the dirty argument set to false
1861 * (which is when dirty flag consistency would really matter).
1862 */
4934f93d 1863 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
1864 arena_mapbits_dirty_get(chunk, pageind+npages-1);
1865 arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
1866 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
1867
1868 if (config_stats) {
1869 arena->stats.ndalloc_large++;
1870 arena->stats.allocated_large -= oldsize;
1871 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1872 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
1873
1874 arena->stats.nmalloc_large++;
1875 arena->stats.nrequests_large++;
1876 arena->stats.allocated_large += size;
1877 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1878 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1879 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
a78e148b 1880 }
a78e148b 1881 malloc_mutex_unlock(&arena->lock);
1882 return (false);
1883 }
1884 malloc_mutex_unlock(&arena->lock);
1885
1886 return (true);
1887}
1888
1889/*
1890 * Try to resize a large allocation, in order to avoid copying. This will
1891 * always fail if growing an object, and the following run is already in use.
1892 */
1893static bool
1894arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
1895 bool zero)
1896{
1897 size_t psize;
1898
1899 psize = PAGE_CEILING(size + extra);
1900 if (psize == oldsize) {
1901 /* Same size class. */
4934f93d 1902 if (config_fill && opt_junk && size < oldsize) {
a78e148b 1903 memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
1904 size);
1905 }
a78e148b 1906 return (false);
1907 } else {
1908 arena_chunk_t *chunk;
1909 arena_t *arena;
1910
1911 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1912 arena = chunk->arena;
a78e148b 1913
1914 if (psize < oldsize) {
a78e148b 1915 /* Fill before shrinking in order avoid a race. */
4934f93d 1916 if (config_fill && opt_junk) {
a78e148b 1917 memset((void *)((uintptr_t)ptr + size), 0x5a,
1918 oldsize - size);
1919 }
a78e148b 1920 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
1921 psize);
1922 return (false);
1923 } else {
1924 bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
1925 oldsize, PAGE_CEILING(size),
1926 psize - PAGE_CEILING(size), zero);
4934f93d 1927 if (config_fill && ret == false && zero == false &&
1928 opt_zero) {
a78e148b 1929 memset((void *)((uintptr_t)ptr + oldsize), 0,
1930 size - oldsize);
1931 }
a78e148b 1932 return (ret);
1933 }
1934 }
1935}
1936
1937void *
1938arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
1939 bool zero)
1940{
1941
1942 /*
1943 * Avoid moving the allocation if the size class can be left the same.
1944 */
1945 if (oldsize <= arena_maxclass) {
4934f93d 1946 if (oldsize <= SMALL_MAXCLASS) {
a78e148b 1947 assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
1948 == oldsize);
4934f93d 1949 if ((size + extra <= SMALL_MAXCLASS &&
a78e148b 1950 SMALL_SIZE2BIN(size + extra) ==
1951 SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
1952 size + extra >= oldsize)) {
4934f93d 1953 if (config_fill && opt_junk && size < oldsize) {
a78e148b 1954 memset((void *)((uintptr_t)ptr + size),
1955 0x5a, oldsize - size);
1956 }
a78e148b 1957 return (ptr);
1958 }
1959 } else {
1960 assert(size <= arena_maxclass);
4934f93d 1961 if (size + extra > SMALL_MAXCLASS) {
a78e148b 1962 if (arena_ralloc_large(ptr, oldsize, size,
1963 extra, zero) == false)
1964 return (ptr);
1965 }
1966 }
1967 }
1968
1969 /* Reallocation would require a move. */
1970 return (NULL);
1971}
1972
1973void *
21b26915 1974arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
1975 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
1976 bool try_tcache_dalloc)
a78e148b 1977{
1978 void *ret;
1979 size_t copysize;
1980
1981 /* Try to avoid moving the allocation. */
1982 ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
1983 if (ret != NULL)
1984 return (ret);
1985
1986 /*
1987 * size and oldsize are different enough that we need to move the
1988 * object. In that case, fall back to allocating new space and
1989 * copying.
1990 */
1991 if (alignment != 0) {
4934f93d 1992 size_t usize = sa2u(size + extra, alignment);
a78e148b 1993 if (usize == 0)
1994 return (NULL);
21b26915 1995 ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
a78e148b 1996 } else
21b26915 1997 ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
a78e148b 1998
1999 if (ret == NULL) {
2000 if (extra == 0)
2001 return (NULL);
2002 /* Try again, this time without extra. */
2003 if (alignment != 0) {
4934f93d 2004 size_t usize = sa2u(size, alignment);
a78e148b 2005 if (usize == 0)
2006 return (NULL);
21b26915 2007 ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
2008 arena);
a78e148b 2009 } else
21b26915 2010 ret = arena_malloc(arena, size, zero, try_tcache_alloc);
a78e148b 2011
2012 if (ret == NULL)
2013 return (NULL);
2014 }
2015
2016 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
2017
2018 /*
2019 * Copy at most size bytes (not size+extra), since the caller has no
2020 * expectation that the extra bytes will be reliably preserved.
2021 */
2022 copysize = (size < oldsize) ? size : oldsize;
4934f93d 2023 VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
a78e148b 2024 memcpy(ret, ptr, copysize);
21b26915 2025 iqallocx(ptr, try_tcache_dalloc);
2026 return (ret);
2027}
2028
2029dss_prec_t
2030arena_dss_prec_get(arena_t *arena)
2031{
2032 dss_prec_t ret;
2033
2034 malloc_mutex_lock(&arena->lock);
2035 ret = arena->dss_prec;
2036 malloc_mutex_unlock(&arena->lock);
a78e148b 2037 return (ret);
2038}
2039
21b26915 2040void
2041arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
2042{
2043
2044 malloc_mutex_lock(&arena->lock);
2045 arena->dss_prec = dss_prec;
2046 malloc_mutex_unlock(&arena->lock);
2047}
2048
2049void
2050arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
2051 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
2052 malloc_large_stats_t *lstats)
2053{
2054 unsigned i;
2055
2056 malloc_mutex_lock(&arena->lock);
2057 *dss = dss_prec_names[arena->dss_prec];
2058 *nactive += arena->nactive;
2059 *ndirty += arena->ndirty;
2060
2061 astats->mapped += arena->stats.mapped;
2062 astats->npurge += arena->stats.npurge;
2063 astats->nmadvise += arena->stats.nmadvise;
2064 astats->purged += arena->stats.purged;
2065 astats->allocated_large += arena->stats.allocated_large;
2066 astats->nmalloc_large += arena->stats.nmalloc_large;
2067 astats->ndalloc_large += arena->stats.ndalloc_large;
2068 astats->nrequests_large += arena->stats.nrequests_large;
2069
2070 for (i = 0; i < nlclasses; i++) {
2071 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
2072 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
2073 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
2074 lstats[i].curruns += arena->stats.lstats[i].curruns;
2075 }
2076 malloc_mutex_unlock(&arena->lock);
2077
2078 for (i = 0; i < NBINS; i++) {
2079 arena_bin_t *bin = &arena->bins[i];
2080
2081 malloc_mutex_lock(&bin->lock);
2082 bstats[i].allocated += bin->stats.allocated;
2083 bstats[i].nmalloc += bin->stats.nmalloc;
2084 bstats[i].ndalloc += bin->stats.ndalloc;
2085 bstats[i].nrequests += bin->stats.nrequests;
2086 if (config_tcache) {
2087 bstats[i].nfills += bin->stats.nfills;
2088 bstats[i].nflushes += bin->stats.nflushes;
2089 }
2090 bstats[i].nruns += bin->stats.nruns;
2091 bstats[i].reruns += bin->stats.reruns;
2092 bstats[i].curruns += bin->stats.curruns;
2093 malloc_mutex_unlock(&bin->lock);
2094 }
2095}
2096
a78e148b 2097bool
2098arena_new(arena_t *arena, unsigned ind)
2099{
2100 unsigned i;
2101 arena_bin_t *bin;
2102
2103 arena->ind = ind;
2104 arena->nthreads = 0;
2105
2106 if (malloc_mutex_init(&arena->lock))
2107 return (true);
2108
4934f93d 2109 if (config_stats) {
2110 memset(&arena->stats, 0, sizeof(arena_stats_t));
2111 arena->stats.lstats =
2112 (malloc_large_stats_t *)base_alloc(nlclasses *
2113 sizeof(malloc_large_stats_t));
2114 if (arena->stats.lstats == NULL)
2115 return (true);
2116 memset(arena->stats.lstats, 0, nlclasses *
2117 sizeof(malloc_large_stats_t));
2118 if (config_tcache)
2119 ql_new(&arena->tcache_ql);
2120 }
a78e148b 2121
4934f93d 2122 if (config_prof)
2123 arena->prof_accumbytes = 0;
a78e148b 2124
21b26915 2125 arena->dss_prec = chunk_dss_prec_get();
2126
a78e148b 2127 /* Initialize chunks. */
21b26915 2128 arena_chunk_dirty_new(&arena->chunks_dirty);
a78e148b 2129 arena->spare = NULL;
2130
2131 arena->nactive = 0;
2132 arena->ndirty = 0;
2133 arena->npurgatory = 0;
2134
21b26915 2135 arena_avail_tree_new(&arena->runs_avail);
a78e148b 2136
2137 /* Initialize bins. */
4934f93d 2138 for (i = 0; i < NBINS; i++) {
a78e148b 2139 bin = &arena->bins[i];
2140 if (malloc_mutex_init(&bin->lock))
2141 return (true);
2142 bin->runcur = NULL;
2143 arena_run_tree_new(&bin->runs);
4934f93d 2144 if (config_stats)
2145 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
a78e148b 2146 }
a78e148b 2147
a78e148b 2148 return (false);
a78e148b 2149}
2150
2151/*
2152 * Calculate bin_info->run_size such that it meets the following constraints:
2153 *
2154 * *) bin_info->run_size >= min_run_size
2155 * *) bin_info->run_size <= arena_maxclass
2156 * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
2157 * *) bin_info->nregs <= RUN_MAXREGS
2158 *
2159 * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
2160 * calculated here, since these settings are all interdependent.
2161 */
2162static size_t
2163bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
2164{
4934f93d 2165 size_t pad_size;
a78e148b 2166 size_t try_run_size, good_run_size;
2167 uint32_t try_nregs, good_nregs;
2168 uint32_t try_hdr_size, good_hdr_size;
2169 uint32_t try_bitmap_offset, good_bitmap_offset;
a78e148b 2170 uint32_t try_ctx0_offset, good_ctx0_offset;
4934f93d 2171 uint32_t try_redzone0_offset, good_redzone0_offset;
a78e148b 2172
4934f93d 2173 assert(min_run_size >= PAGE);
a78e148b 2174 assert(min_run_size <= arena_maxclass);
2175
4934f93d 2176 /*
2177 * Determine redzone size based on minimum alignment and minimum
2178 * redzone size. Add padding to the end of the run if it is needed to
2179 * align the regions. The padding allows each redzone to be half the
2180 * minimum alignment; without the padding, each redzone would have to
2181 * be twice as large in order to maintain alignment.
2182 */
2183 if (config_fill && opt_redzone) {
2184 size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
2185 if (align_min <= REDZONE_MINSIZE) {
2186 bin_info->redzone_size = REDZONE_MINSIZE;
2187 pad_size = 0;
2188 } else {
2189 bin_info->redzone_size = align_min >> 1;
2190 pad_size = bin_info->redzone_size;
2191 }
2192 } else {
2193 bin_info->redzone_size = 0;
2194 pad_size = 0;
2195 }
2196 bin_info->reg_interval = bin_info->reg_size +
2197 (bin_info->redzone_size << 1);
2198
a78e148b 2199 /*
2200 * Calculate known-valid settings before entering the run_size
2201 * expansion loop, so that the first part of the loop always copies
2202 * valid settings.
2203 *
2204 * The do..while loop iteratively reduces the number of regions until
2205 * the run header and the regions no longer overlap. A closed formula
2206 * would be quite messy, since there is an interdependency between the
2207 * header's mask length and the number of regions.
2208 */
2209 try_run_size = min_run_size;
4934f93d 2210 try_nregs = ((try_run_size - sizeof(arena_run_t)) /
2211 bin_info->reg_interval)
a78e148b 2212 + 1; /* Counter-act try_nregs-- in loop. */
2213 if (try_nregs > RUN_MAXREGS) {
2214 try_nregs = RUN_MAXREGS
2215 + 1; /* Counter-act try_nregs-- in loop. */
2216 }
2217 do {
2218 try_nregs--;
2219 try_hdr_size = sizeof(arena_run_t);
2220 /* Pad to a long boundary. */
2221 try_hdr_size = LONG_CEILING(try_hdr_size);
2222 try_bitmap_offset = try_hdr_size;
2223 /* Add space for bitmap. */
2224 try_hdr_size += bitmap_size(try_nregs);
4934f93d 2225 if (config_prof && opt_prof && prof_promote == false) {
a78e148b 2226 /* Pad to a quantum boundary. */
2227 try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2228 try_ctx0_offset = try_hdr_size;
2229 /* Add space for one (prof_ctx_t *) per region. */
2230 try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
2231 } else
2232 try_ctx0_offset = 0;
4934f93d 2233 try_redzone0_offset = try_run_size - (try_nregs *
2234 bin_info->reg_interval) - pad_size;
2235 } while (try_hdr_size > try_redzone0_offset);
a78e148b 2236
2237 /* run_size expansion loop. */
2238 do {
2239 /*
2240 * Copy valid settings before trying more aggressive settings.
2241 */
2242 good_run_size = try_run_size;
2243 good_nregs = try_nregs;
2244 good_hdr_size = try_hdr_size;
2245 good_bitmap_offset = try_bitmap_offset;
a78e148b 2246 good_ctx0_offset = try_ctx0_offset;
4934f93d 2247 good_redzone0_offset = try_redzone0_offset;
a78e148b 2248
2249 /* Try more aggressive settings. */
4934f93d 2250 try_run_size += PAGE;
2251 try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
2252 bin_info->reg_interval)
a78e148b 2253 + 1; /* Counter-act try_nregs-- in loop. */
2254 if (try_nregs > RUN_MAXREGS) {
2255 try_nregs = RUN_MAXREGS
2256 + 1; /* Counter-act try_nregs-- in loop. */
2257 }
2258 do {
2259 try_nregs--;
2260 try_hdr_size = sizeof(arena_run_t);
2261 /* Pad to a long boundary. */
2262 try_hdr_size = LONG_CEILING(try_hdr_size);
2263 try_bitmap_offset = try_hdr_size;
2264 /* Add space for bitmap. */
2265 try_hdr_size += bitmap_size(try_nregs);
4934f93d 2266 if (config_prof && opt_prof && prof_promote == false) {
a78e148b 2267 /* Pad to a quantum boundary. */
2268 try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2269 try_ctx0_offset = try_hdr_size;
2270 /*
2271 * Add space for one (prof_ctx_t *) per region.
2272 */
2273 try_hdr_size += try_nregs *
2274 sizeof(prof_ctx_t *);
2275 }
4934f93d 2276 try_redzone0_offset = try_run_size - (try_nregs *
2277 bin_info->reg_interval) - pad_size;
2278 } while (try_hdr_size > try_redzone0_offset);
a78e148b 2279 } while (try_run_size <= arena_maxclass
2280 && try_run_size <= arena_maxclass
4934f93d 2281 && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
2282 RUN_MAX_OVRHD_RELAX
2283 && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
a78e148b 2284 && try_nregs < RUN_MAXREGS);
2285
4934f93d 2286 assert(good_hdr_size <= good_redzone0_offset);
a78e148b 2287
2288 /* Copy final settings. */
2289 bin_info->run_size = good_run_size;
2290 bin_info->nregs = good_nregs;
2291 bin_info->bitmap_offset = good_bitmap_offset;
a78e148b 2292 bin_info->ctx0_offset = good_ctx0_offset;
4934f93d 2293 bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
2294
2295 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
2296 * bin_info->reg_interval) + pad_size == bin_info->run_size);
a78e148b 2297
2298 return (good_run_size);
2299}
2300
4934f93d 2301static void
a78e148b 2302bin_info_init(void)
2303{
2304 arena_bin_info_t *bin_info;
4934f93d 2305 size_t prev_run_size = PAGE;
2306
2307#define SIZE_CLASS(bin, delta, size) \
2308 bin_info = &arena_bin_info[bin]; \
2309 bin_info->reg_size = size; \
2310 prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
2311 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
2312 SIZE_CLASSES
2313#undef SIZE_CLASS
a78e148b 2314}
2315
4934f93d 2316void
a78e148b 2317arena_boot(void)
2318{
2319 size_t header_size;
2320 unsigned i;
2321
a78e148b 2322 /*
2323 * Compute the header size such that it is large enough to contain the
2324 * page map. The page map is biased to omit entries for the header
2325 * itself, so some iteration is necessary to compute the map bias.
2326 *
2327 * 1) Compute safe header_size and map_bias values that include enough
2328 * space for an unbiased page map.
2329 * 2) Refine map_bias based on (1) to omit the header pages in the page
2330 * map. The resulting map_bias may be one too small.
2331 * 3) Refine map_bias based on (2). The result will be >= the result
2332 * from (2), and will always be correct.
2333 */
2334 map_bias = 0;
2335 for (i = 0; i < 3; i++) {
4934f93d 2336 header_size = offsetof(arena_chunk_t, map) +
2337 (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
2338 map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
2339 != 0);
a78e148b 2340 }
2341 assert(map_bias > 0);
2342
4934f93d 2343 arena_maxclass = chunksize - (map_bias << LG_PAGE);
a78e148b 2344
4934f93d 2345 bin_info_init();
2346}
a78e148b 2347
4934f93d 2348void
2349arena_prefork(arena_t *arena)
2350{
2351 unsigned i;
a78e148b 2352
4934f93d 2353 malloc_mutex_prefork(&arena->lock);
2354 for (i = 0; i < NBINS; i++)
2355 malloc_mutex_prefork(&arena->bins[i].lock);
2356}
2357
2358void
2359arena_postfork_parent(arena_t *arena)
2360{
2361 unsigned i;
2362
2363 for (i = 0; i < NBINS; i++)
2364 malloc_mutex_postfork_parent(&arena->bins[i].lock);
2365 malloc_mutex_postfork_parent(&arena->lock);
2366}
2367
2368void
2369arena_postfork_child(arena_t *arena)
2370{
2371 unsigned i;
2372
2373 for (i = 0; i < NBINS; i++)
2374 malloc_mutex_postfork_child(&arena->bins[i].lock);
2375 malloc_mutex_postfork_child(&arena->lock);
a78e148b 2376}