]> git.saurik.com Git - redis.git/blame - deps/jemalloc/src/arena.c
Better installation info inside README file.
[redis.git] / deps / jemalloc / src / arena.c
CommitLineData
a78e148b 1#define JEMALLOC_ARENA_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7size_t opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT;
8size_t opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT;
9ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
10uint8_t const *small_size2bin;
11arena_bin_info_t *arena_bin_info;
12
13/* Various bin-related settings. */
14unsigned nqbins;
15unsigned ncbins;
16unsigned nsbins;
17unsigned nbins;
18size_t qspace_max;
19size_t cspace_min;
20size_t cspace_max;
21size_t sspace_min;
22size_t sspace_max;
23
24size_t lg_mspace;
25size_t mspace_mask;
26
27/*
28 * const_small_size2bin is a static constant lookup table that in the common
29 * case can be used as-is for small_size2bin.
30 */
31#if (LG_TINY_MIN == 2)
32#define S2B_4(i) i,
33#define S2B_8(i) S2B_4(i) S2B_4(i)
34#elif (LG_TINY_MIN == 3)
35#define S2B_8(i) i,
36#else
37# error "Unsupported LG_TINY_MIN"
38#endif
39#define S2B_16(i) S2B_8(i) S2B_8(i)
40#define S2B_32(i) S2B_16(i) S2B_16(i)
41#define S2B_64(i) S2B_32(i) S2B_32(i)
42#define S2B_128(i) S2B_64(i) S2B_64(i)
43#define S2B_256(i) S2B_128(i) S2B_128(i)
44/*
45 * The number of elements in const_small_size2bin is dependent on the
46 * definition for SUBPAGE.
47 */
48static JEMALLOC_ATTR(aligned(CACHELINE))
49 const uint8_t const_small_size2bin[] = {
50#if (LG_QUANTUM == 4)
51/* 16-byte quantum **********************/
52# ifdef JEMALLOC_TINY
53# if (LG_TINY_MIN == 2)
54 S2B_4(0) /* 4 */
55 S2B_4(1) /* 8 */
56 S2B_8(2) /* 16 */
57# define S2B_QMIN 2
58# elif (LG_TINY_MIN == 3)
59 S2B_8(0) /* 8 */
60 S2B_8(1) /* 16 */
61# define S2B_QMIN 1
62# else
63# error "Unsupported LG_TINY_MIN"
64# endif
65# else
66 S2B_16(0) /* 16 */
67# define S2B_QMIN 0
68# endif
69 S2B_16(S2B_QMIN + 1) /* 32 */
70 S2B_16(S2B_QMIN + 2) /* 48 */
71 S2B_16(S2B_QMIN + 3) /* 64 */
72 S2B_16(S2B_QMIN + 4) /* 80 */
73 S2B_16(S2B_QMIN + 5) /* 96 */
74 S2B_16(S2B_QMIN + 6) /* 112 */
75 S2B_16(S2B_QMIN + 7) /* 128 */
76# define S2B_CMIN (S2B_QMIN + 8)
77#else
78/* 8-byte quantum ***********************/
79# ifdef JEMALLOC_TINY
80# if (LG_TINY_MIN == 2)
81 S2B_4(0) /* 4 */
82 S2B_4(1) /* 8 */
83# define S2B_QMIN 1
84# else
85# error "Unsupported LG_TINY_MIN"
86# endif
87# else
88 S2B_8(0) /* 8 */
89# define S2B_QMIN 0
90# endif
91 S2B_8(S2B_QMIN + 1) /* 16 */
92 S2B_8(S2B_QMIN + 2) /* 24 */
93 S2B_8(S2B_QMIN + 3) /* 32 */
94 S2B_8(S2B_QMIN + 4) /* 40 */
95 S2B_8(S2B_QMIN + 5) /* 48 */
96 S2B_8(S2B_QMIN + 6) /* 56 */
97 S2B_8(S2B_QMIN + 7) /* 64 */
98 S2B_8(S2B_QMIN + 8) /* 72 */
99 S2B_8(S2B_QMIN + 9) /* 80 */
100 S2B_8(S2B_QMIN + 10) /* 88 */
101 S2B_8(S2B_QMIN + 11) /* 96 */
102 S2B_8(S2B_QMIN + 12) /* 104 */
103 S2B_8(S2B_QMIN + 13) /* 112 */
104 S2B_8(S2B_QMIN + 14) /* 120 */
105 S2B_8(S2B_QMIN + 15) /* 128 */
106# define S2B_CMIN (S2B_QMIN + 16)
107#endif
108/****************************************/
109 S2B_64(S2B_CMIN + 0) /* 192 */
110 S2B_64(S2B_CMIN + 1) /* 256 */
111 S2B_64(S2B_CMIN + 2) /* 320 */
112 S2B_64(S2B_CMIN + 3) /* 384 */
113 S2B_64(S2B_CMIN + 4) /* 448 */
114 S2B_64(S2B_CMIN + 5) /* 512 */
115# define S2B_SMIN (S2B_CMIN + 6)
116 S2B_256(S2B_SMIN + 0) /* 768 */
117 S2B_256(S2B_SMIN + 1) /* 1024 */
118 S2B_256(S2B_SMIN + 2) /* 1280 */
119 S2B_256(S2B_SMIN + 3) /* 1536 */
120 S2B_256(S2B_SMIN + 4) /* 1792 */
121 S2B_256(S2B_SMIN + 5) /* 2048 */
122 S2B_256(S2B_SMIN + 6) /* 2304 */
123 S2B_256(S2B_SMIN + 7) /* 2560 */
124 S2B_256(S2B_SMIN + 8) /* 2816 */
125 S2B_256(S2B_SMIN + 9) /* 3072 */
126 S2B_256(S2B_SMIN + 10) /* 3328 */
127 S2B_256(S2B_SMIN + 11) /* 3584 */
128 S2B_256(S2B_SMIN + 12) /* 3840 */
129#if (STATIC_PAGE_SHIFT == 13)
130 S2B_256(S2B_SMIN + 13) /* 4096 */
131 S2B_256(S2B_SMIN + 14) /* 4352 */
132 S2B_256(S2B_SMIN + 15) /* 4608 */
133 S2B_256(S2B_SMIN + 16) /* 4864 */
134 S2B_256(S2B_SMIN + 17) /* 5120 */
135 S2B_256(S2B_SMIN + 18) /* 5376 */
136 S2B_256(S2B_SMIN + 19) /* 5632 */
137 S2B_256(S2B_SMIN + 20) /* 5888 */
138 S2B_256(S2B_SMIN + 21) /* 6144 */
139 S2B_256(S2B_SMIN + 22) /* 6400 */
140 S2B_256(S2B_SMIN + 23) /* 6656 */
141 S2B_256(S2B_SMIN + 24) /* 6912 */
142 S2B_256(S2B_SMIN + 25) /* 7168 */
143 S2B_256(S2B_SMIN + 26) /* 7424 */
144 S2B_256(S2B_SMIN + 27) /* 7680 */
145 S2B_256(S2B_SMIN + 28) /* 7936 */
146#endif
147};
148#undef S2B_1
149#undef S2B_2
150#undef S2B_4
151#undef S2B_8
152#undef S2B_16
153#undef S2B_32
154#undef S2B_64
155#undef S2B_128
156#undef S2B_256
157#undef S2B_QMIN
158#undef S2B_CMIN
159#undef S2B_SMIN
160
161/******************************************************************************/
162/* Function prototypes for non-inline static functions. */
163
164static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
165 bool large, bool zero);
166static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
167static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
168static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
169 bool zero);
170static void arena_purge(arena_t *arena, bool all);
171static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
172static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
173 arena_run_t *run, size_t oldsize, size_t newsize);
174static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
175 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
176static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
177static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
178static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
179 arena_bin_t *bin);
180static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
181 arena_run_t *run, arena_bin_t *bin);
182static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
183 arena_run_t *run, arena_bin_t *bin);
184static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
185 void *ptr, size_t oldsize, size_t size);
186static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
187 void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
188static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
189 size_t extra, bool zero);
190static bool small_size2bin_init(void);
191#ifdef JEMALLOC_DEBUG
192static void small_size2bin_validate(void);
193#endif
194static bool small_size2bin_init_hard(void);
195static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info,
196 size_t min_run_size);
197static bool bin_info_init(void);
198
199/******************************************************************************/
200
201static inline int
202arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
203{
204 uintptr_t a_mapelm = (uintptr_t)a;
205 uintptr_t b_mapelm = (uintptr_t)b;
206
207 assert(a != NULL);
208 assert(b != NULL);
209
210 return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
211}
212
213/* Generate red-black tree functions. */
214rb_gen(static JEMALLOC_ATTR(unused), arena_run_tree_, arena_run_tree_t,
215 arena_chunk_map_t, u.rb_link, arena_run_comp)
216
217static inline int
218arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
219{
220 int ret;
221 size_t a_size = a->bits & ~PAGE_MASK;
222 size_t b_size = b->bits & ~PAGE_MASK;
223
224 assert((a->bits & CHUNK_MAP_KEY) == CHUNK_MAP_KEY || (a->bits &
225 CHUNK_MAP_DIRTY) == (b->bits & CHUNK_MAP_DIRTY));
226
227 ret = (a_size > b_size) - (a_size < b_size);
228 if (ret == 0) {
229 uintptr_t a_mapelm, b_mapelm;
230
231 if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
232 a_mapelm = (uintptr_t)a;
233 else {
234 /*
235 * Treat keys as though they are lower than anything
236 * else.
237 */
238 a_mapelm = 0;
239 }
240 b_mapelm = (uintptr_t)b;
241
242 ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
243 }
244
245 return (ret);
246}
247
248/* Generate red-black tree functions. */
249rb_gen(static JEMALLOC_ATTR(unused), arena_avail_tree_, arena_avail_tree_t,
250 arena_chunk_map_t, u.rb_link, arena_avail_comp)
251
252static inline void *
253arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
254{
255 void *ret;
256 unsigned regind;
257 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
258 (uintptr_t)bin_info->bitmap_offset);
259
260 dassert(run->magic == ARENA_RUN_MAGIC);
261 assert(run->nfree > 0);
262 assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
263
264 regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
265 ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
266 (uintptr_t)(bin_info->reg_size * regind));
267 run->nfree--;
268 if (regind == run->nextind)
269 run->nextind++;
270 assert(regind < run->nextind);
271 return (ret);
272}
273
274static inline void
275arena_run_reg_dalloc(arena_run_t *run, void *ptr)
276{
277 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
278 size_t binind = arena_bin_index(chunk->arena, run->bin);
279 arena_bin_info_t *bin_info = &arena_bin_info[binind];
280 unsigned regind = arena_run_regind(run, bin_info, ptr);
281 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
282 (uintptr_t)bin_info->bitmap_offset);
283
284 assert(run->nfree < bin_info->nregs);
285 /* Freeing an interior pointer can cause assertion failure. */
286 assert(((uintptr_t)ptr - ((uintptr_t)run +
287 (uintptr_t)bin_info->reg0_offset)) % (uintptr_t)bin_info->reg_size
288 == 0);
289 assert((uintptr_t)ptr >= (uintptr_t)run +
290 (uintptr_t)bin_info->reg0_offset);
291 /* Freeing an unallocated pointer can cause assertion failure. */
292 assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
293
294 bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
295 run->nfree++;
296}
297
298#ifdef JEMALLOC_DEBUG
299static inline void
300arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
301{
302 size_t i;
303 size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << PAGE_SHIFT));
304
305 for (i = 0; i < PAGE_SIZE / sizeof(size_t); i++)
306 assert(p[i] == 0);
307}
308#endif
309
310static void
311arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
312 bool zero)
313{
314 arena_chunk_t *chunk;
315 size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
316 size_t flag_dirty;
317 arena_avail_tree_t *runs_avail;
318#ifdef JEMALLOC_STATS
319 size_t cactive_diff;
320#endif
321
322 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
323 old_ndirty = chunk->ndirty;
324 run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
325 >> PAGE_SHIFT);
326 flag_dirty = chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY;
327 runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty :
328 &arena->runs_avail_clean;
329 total_pages = (chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) >>
330 PAGE_SHIFT;
331 assert((chunk->map[run_ind+total_pages-1-map_bias].bits &
332 CHUNK_MAP_DIRTY) == flag_dirty);
333 need_pages = (size >> PAGE_SHIFT);
334 assert(need_pages > 0);
335 assert(need_pages <= total_pages);
336 rem_pages = total_pages - need_pages;
337
338 arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]);
339#ifdef JEMALLOC_STATS
340 /* Update stats_cactive if nactive is crossing a chunk multiple. */
341 cactive_diff = CHUNK_CEILING((arena->nactive + need_pages) <<
342 PAGE_SHIFT) - CHUNK_CEILING(arena->nactive << PAGE_SHIFT);
343 if (cactive_diff != 0)
344 stats_cactive_add(cactive_diff);
345#endif
346 arena->nactive += need_pages;
347
348 /* Keep track of trailing unused pages for later use. */
349 if (rem_pages > 0) {
350 if (flag_dirty != 0) {
351 chunk->map[run_ind+need_pages-map_bias].bits =
352 (rem_pages << PAGE_SHIFT) | CHUNK_MAP_DIRTY;
353 chunk->map[run_ind+total_pages-1-map_bias].bits =
354 (rem_pages << PAGE_SHIFT) | CHUNK_MAP_DIRTY;
355 } else {
356 chunk->map[run_ind+need_pages-map_bias].bits =
357 (rem_pages << PAGE_SHIFT) |
358 (chunk->map[run_ind+need_pages-map_bias].bits &
359 CHUNK_MAP_UNZEROED);
360 chunk->map[run_ind+total_pages-1-map_bias].bits =
361 (rem_pages << PAGE_SHIFT) |
362 (chunk->map[run_ind+total_pages-1-map_bias].bits &
363 CHUNK_MAP_UNZEROED);
364 }
365 arena_avail_tree_insert(runs_avail,
366 &chunk->map[run_ind+need_pages-map_bias]);
367 }
368
369 /* Update dirty page accounting. */
370 if (flag_dirty != 0) {
371 chunk->ndirty -= need_pages;
372 arena->ndirty -= need_pages;
373 }
374
375 /*
376 * Update the page map separately for large vs. small runs, since it is
377 * possible to avoid iteration for large mallocs.
378 */
379 if (large) {
380 if (zero) {
381 if (flag_dirty == 0) {
382 /*
383 * The run is clean, so some pages may be
384 * zeroed (i.e. never before touched).
385 */
386 for (i = 0; i < need_pages; i++) {
387 if ((chunk->map[run_ind+i-map_bias].bits
388 & CHUNK_MAP_UNZEROED) != 0) {
389 memset((void *)((uintptr_t)
390 chunk + ((run_ind+i) <<
391 PAGE_SHIFT)), 0,
392 PAGE_SIZE);
393 }
394#ifdef JEMALLOC_DEBUG
395 else {
396 arena_chunk_validate_zeroed(
397 chunk, run_ind+i);
398 }
399#endif
400 }
401 } else {
402 /*
403 * The run is dirty, so all pages must be
404 * zeroed.
405 */
406 memset((void *)((uintptr_t)chunk + (run_ind <<
407 PAGE_SHIFT)), 0, (need_pages <<
408 PAGE_SHIFT));
409 }
410 }
411
412 /*
413 * Set the last element first, in case the run only contains one
414 * page (i.e. both statements set the same element).
415 */
416 chunk->map[run_ind+need_pages-1-map_bias].bits =
417 CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED | flag_dirty;
418 chunk->map[run_ind-map_bias].bits = size | flag_dirty |
419 CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
420 } else {
421 assert(zero == false);
422 /*
423 * Propagate the dirty and unzeroed flags to the allocated
424 * small run, so that arena_dalloc_bin_run() has the ability to
425 * conditionally trim clean pages.
426 */
427 chunk->map[run_ind-map_bias].bits =
428 (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) |
429 CHUNK_MAP_ALLOCATED | flag_dirty;
430#ifdef JEMALLOC_DEBUG
431 /*
432 * The first page will always be dirtied during small run
433 * initialization, so a validation failure here would not
434 * actually cause an observable failure.
435 */
436 if (flag_dirty == 0 &&
437 (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED)
438 == 0)
439 arena_chunk_validate_zeroed(chunk, run_ind);
440#endif
441 for (i = 1; i < need_pages - 1; i++) {
442 chunk->map[run_ind+i-map_bias].bits = (i << PAGE_SHIFT)
443 | (chunk->map[run_ind+i-map_bias].bits &
444 CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED;
445#ifdef JEMALLOC_DEBUG
446 if (flag_dirty == 0 &&
447 (chunk->map[run_ind+i-map_bias].bits &
448 CHUNK_MAP_UNZEROED) == 0)
449 arena_chunk_validate_zeroed(chunk, run_ind+i);
450#endif
451 }
452 chunk->map[run_ind+need_pages-1-map_bias].bits = ((need_pages
453 - 1) << PAGE_SHIFT) |
454 (chunk->map[run_ind+need_pages-1-map_bias].bits &
455 CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED | flag_dirty;
456#ifdef JEMALLOC_DEBUG
457 if (flag_dirty == 0 &&
458 (chunk->map[run_ind+need_pages-1-map_bias].bits &
459 CHUNK_MAP_UNZEROED) == 0) {
460 arena_chunk_validate_zeroed(chunk,
461 run_ind+need_pages-1);
462 }
463#endif
464 }
465}
466
467static arena_chunk_t *
468arena_chunk_alloc(arena_t *arena)
469{
470 arena_chunk_t *chunk;
471 size_t i;
472
473 if (arena->spare != NULL) {
474 arena_avail_tree_t *runs_avail;
475
476 chunk = arena->spare;
477 arena->spare = NULL;
478
479 /* Insert the run into the appropriate runs_avail_* tree. */
480 if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0)
481 runs_avail = &arena->runs_avail_clean;
482 else
483 runs_avail = &arena->runs_avail_dirty;
484 assert((chunk->map[0].bits & ~PAGE_MASK) == arena_maxclass);
485 assert((chunk->map[chunk_npages-1-map_bias].bits & ~PAGE_MASK)
486 == arena_maxclass);
487 assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) ==
488 (chunk->map[chunk_npages-1-map_bias].bits &
489 CHUNK_MAP_DIRTY));
490 arena_avail_tree_insert(runs_avail, &chunk->map[0]);
491 } else {
492 bool zero;
493 size_t unzeroed;
494
495 zero = false;
496 malloc_mutex_unlock(&arena->lock);
497 chunk = (arena_chunk_t *)chunk_alloc(chunksize, false, &zero);
498 malloc_mutex_lock(&arena->lock);
499 if (chunk == NULL)
500 return (NULL);
501#ifdef JEMALLOC_STATS
502 arena->stats.mapped += chunksize;
503#endif
504
505 chunk->arena = arena;
506 ql_elm_new(chunk, link_dirty);
507 chunk->dirtied = false;
508
509 /*
510 * Claim that no pages are in use, since the header is merely
511 * overhead.
512 */
513 chunk->ndirty = 0;
514
515 /*
516 * Initialize the map to contain one maximal free untouched run.
517 * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
518 * chunk.
519 */
520 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
521 chunk->map[0].bits = arena_maxclass | unzeroed;
522 /*
523 * There is no need to initialize the internal page map entries
524 * unless the chunk is not zeroed.
525 */
526 if (zero == false) {
527 for (i = map_bias+1; i < chunk_npages-1; i++)
528 chunk->map[i-map_bias].bits = unzeroed;
529 }
530#ifdef JEMALLOC_DEBUG
531 else {
532 for (i = map_bias+1; i < chunk_npages-1; i++)
533 assert(chunk->map[i-map_bias].bits == unzeroed);
534 }
535#endif
536 chunk->map[chunk_npages-1-map_bias].bits = arena_maxclass |
537 unzeroed;
538
539 /* Insert the run into the runs_avail_clean tree. */
540 arena_avail_tree_insert(&arena->runs_avail_clean,
541 &chunk->map[0]);
542 }
543
544 return (chunk);
545}
546
547static void
548arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
549{
550 arena_avail_tree_t *runs_avail;
551
552 /*
553 * Remove run from the appropriate runs_avail_* tree, so that the arena
554 * does not use it.
555 */
556 if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0)
557 runs_avail = &arena->runs_avail_clean;
558 else
559 runs_avail = &arena->runs_avail_dirty;
560 arena_avail_tree_remove(runs_avail, &chunk->map[0]);
561
562 if (arena->spare != NULL) {
563 arena_chunk_t *spare = arena->spare;
564
565 arena->spare = chunk;
566 if (spare->dirtied) {
567 ql_remove(&chunk->arena->chunks_dirty, spare,
568 link_dirty);
569 arena->ndirty -= spare->ndirty;
570 }
571 malloc_mutex_unlock(&arena->lock);
572 chunk_dealloc((void *)spare, chunksize);
573 malloc_mutex_lock(&arena->lock);
574#ifdef JEMALLOC_STATS
575 arena->stats.mapped -= chunksize;
576#endif
577 } else
578 arena->spare = chunk;
579}
580
581static arena_run_t *
582arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
583{
584 arena_chunk_t *chunk;
585 arena_run_t *run;
586 arena_chunk_map_t *mapelm, key;
587
588 assert(size <= arena_maxclass);
589 assert((size & PAGE_MASK) == 0);
590
591 /* Search the arena's chunks for the lowest best fit. */
592 key.bits = size | CHUNK_MAP_KEY;
593 mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
594 if (mapelm != NULL) {
595 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
596 size_t pageind = (((uintptr_t)mapelm -
597 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
598 + map_bias;
599
600 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
601 PAGE_SHIFT));
602 arena_run_split(arena, run, size, large, zero);
603 return (run);
604 }
605 mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
606 if (mapelm != NULL) {
607 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
608 size_t pageind = (((uintptr_t)mapelm -
609 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
610 + map_bias;
611
612 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
613 PAGE_SHIFT));
614 arena_run_split(arena, run, size, large, zero);
615 return (run);
616 }
617
618 /*
619 * No usable runs. Create a new chunk from which to allocate the run.
620 */
621 chunk = arena_chunk_alloc(arena);
622 if (chunk != NULL) {
623 run = (arena_run_t *)((uintptr_t)chunk + (map_bias <<
624 PAGE_SHIFT));
625 arena_run_split(arena, run, size, large, zero);
626 return (run);
627 }
628
629 /*
630 * arena_chunk_alloc() failed, but another thread may have made
631 * sufficient memory available while this one dropped arena->lock in
632 * arena_chunk_alloc(), so search one more time.
633 */
634 mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
635 if (mapelm != NULL) {
636 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
637 size_t pageind = (((uintptr_t)mapelm -
638 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
639 + map_bias;
640
641 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
642 PAGE_SHIFT));
643 arena_run_split(arena, run, size, large, zero);
644 return (run);
645 }
646 mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
647 if (mapelm != NULL) {
648 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
649 size_t pageind = (((uintptr_t)mapelm -
650 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
651 + map_bias;
652
653 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
654 PAGE_SHIFT));
655 arena_run_split(arena, run, size, large, zero);
656 return (run);
657 }
658
659 return (NULL);
660}
661
662static inline void
663arena_maybe_purge(arena_t *arena)
664{
665
666 /* Enforce opt_lg_dirty_mult. */
667 if (opt_lg_dirty_mult >= 0 && arena->ndirty > arena->npurgatory &&
668 (arena->ndirty - arena->npurgatory) > chunk_npages &&
669 (arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
670 arena->npurgatory))
671 arena_purge(arena, false);
672}
673
674static inline void
675arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
676{
677 ql_head(arena_chunk_map_t) mapelms;
678 arena_chunk_map_t *mapelm;
679 size_t pageind, flag_unzeroed;
680#ifdef JEMALLOC_DEBUG
681 size_t ndirty;
682#endif
683#ifdef JEMALLOC_STATS
684 size_t nmadvise;
685#endif
686
687 ql_new(&mapelms);
688
689 flag_unzeroed =
690#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
691 /*
692 * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
693 * mappings, but not for file-backed mappings.
694 */
695# ifdef JEMALLOC_SWAP
696 swap_enabled ? CHUNK_MAP_UNZEROED :
697# endif
698 0;
699#else
700 CHUNK_MAP_UNZEROED;
701#endif
702
703 /*
704 * If chunk is the spare, temporarily re-allocate it, 1) so that its
705 * run is reinserted into runs_avail_dirty, and 2) so that it cannot be
706 * completely discarded by another thread while arena->lock is dropped
707 * by this thread. Note that the arena_run_dalloc() call will
708 * implicitly deallocate the chunk, so no explicit action is required
709 * in this function to deallocate the chunk.
710 *
711 * Note that once a chunk contains dirty pages, it cannot again contain
712 * a single run unless 1) it is a dirty run, or 2) this function purges
713 * dirty pages and causes the transition to a single clean run. Thus
714 * (chunk == arena->spare) is possible, but it is not possible for
715 * this function to be called on the spare unless it contains a dirty
716 * run.
717 */
718 if (chunk == arena->spare) {
719 assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) != 0);
720 arena_chunk_alloc(arena);
721 }
722
723 /* Temporarily allocate all free dirty runs within chunk. */
724 for (pageind = map_bias; pageind < chunk_npages;) {
725 mapelm = &chunk->map[pageind-map_bias];
726 if ((mapelm->bits & CHUNK_MAP_ALLOCATED) == 0) {
727 size_t npages;
728
729 npages = mapelm->bits >> PAGE_SHIFT;
730 assert(pageind + npages <= chunk_npages);
731 if (mapelm->bits & CHUNK_MAP_DIRTY) {
732 size_t i;
733#ifdef JEMALLOC_STATS
734 size_t cactive_diff;
735#endif
736
737 arena_avail_tree_remove(
738 &arena->runs_avail_dirty, mapelm);
739
740 mapelm->bits = (npages << PAGE_SHIFT) |
741 flag_unzeroed | CHUNK_MAP_LARGE |
742 CHUNK_MAP_ALLOCATED;
743 /*
744 * Update internal elements in the page map, so
745 * that CHUNK_MAP_UNZEROED is properly set.
746 */
747 for (i = 1; i < npages - 1; i++) {
748 chunk->map[pageind+i-map_bias].bits =
749 flag_unzeroed;
750 }
751 if (npages > 1) {
752 chunk->map[
753 pageind+npages-1-map_bias].bits =
754 flag_unzeroed | CHUNK_MAP_LARGE |
755 CHUNK_MAP_ALLOCATED;
756 }
757
758#ifdef JEMALLOC_STATS
759 /*
760 * Update stats_cactive if nactive is crossing a
761 * chunk multiple.
762 */
763 cactive_diff = CHUNK_CEILING((arena->nactive +
764 npages) << PAGE_SHIFT) -
765 CHUNK_CEILING(arena->nactive << PAGE_SHIFT);
766 if (cactive_diff != 0)
767 stats_cactive_add(cactive_diff);
768#endif
769 arena->nactive += npages;
770 /* Append to list for later processing. */
771 ql_elm_new(mapelm, u.ql_link);
772 ql_tail_insert(&mapelms, mapelm, u.ql_link);
773 }
774
775 pageind += npages;
776 } else {
777 /* Skip allocated run. */
778 if (mapelm->bits & CHUNK_MAP_LARGE)
779 pageind += mapelm->bits >> PAGE_SHIFT;
780 else {
781 arena_run_t *run = (arena_run_t *)((uintptr_t)
782 chunk + (uintptr_t)(pageind << PAGE_SHIFT));
783
784 assert((mapelm->bits >> PAGE_SHIFT) == 0);
785 dassert(run->magic == ARENA_RUN_MAGIC);
786 size_t binind = arena_bin_index(arena,
787 run->bin);
788 arena_bin_info_t *bin_info =
789 &arena_bin_info[binind];
790 pageind += bin_info->run_size >> PAGE_SHIFT;
791 }
792 }
793 }
794 assert(pageind == chunk_npages);
795
796#ifdef JEMALLOC_DEBUG
797 ndirty = chunk->ndirty;
798#endif
799#ifdef JEMALLOC_STATS
800 arena->stats.purged += chunk->ndirty;
801#endif
802 arena->ndirty -= chunk->ndirty;
803 chunk->ndirty = 0;
804 ql_remove(&arena->chunks_dirty, chunk, link_dirty);
805 chunk->dirtied = false;
806
807 malloc_mutex_unlock(&arena->lock);
808#ifdef JEMALLOC_STATS
809 nmadvise = 0;
810#endif
811 ql_foreach(mapelm, &mapelms, u.ql_link) {
812 size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
813 sizeof(arena_chunk_map_t)) + map_bias;
814 size_t npages = mapelm->bits >> PAGE_SHIFT;
815
816 assert(pageind + npages <= chunk_npages);
817#ifdef JEMALLOC_DEBUG
818 assert(ndirty >= npages);
819 ndirty -= npages;
820#endif
821
822#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
823 madvise((void *)((uintptr_t)chunk + (pageind << PAGE_SHIFT)),
824 (npages << PAGE_SHIFT), MADV_DONTNEED);
825#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
826 madvise((void *)((uintptr_t)chunk + (pageind << PAGE_SHIFT)),
827 (npages << PAGE_SHIFT), MADV_FREE);
828#else
829# error "No method defined for purging unused dirty pages."
830#endif
831
832#ifdef JEMALLOC_STATS
833 nmadvise++;
834#endif
835 }
836#ifdef JEMALLOC_DEBUG
837 assert(ndirty == 0);
838#endif
839 malloc_mutex_lock(&arena->lock);
840#ifdef JEMALLOC_STATS
841 arena->stats.nmadvise += nmadvise;
842#endif
843
844 /* Deallocate runs. */
845 for (mapelm = ql_first(&mapelms); mapelm != NULL;
846 mapelm = ql_first(&mapelms)) {
847 size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
848 sizeof(arena_chunk_map_t)) + map_bias;
849 arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
850 (uintptr_t)(pageind << PAGE_SHIFT));
851
852 ql_remove(&mapelms, mapelm, u.ql_link);
853 arena_run_dalloc(arena, run, false);
854 }
855}
856
857static void
858arena_purge(arena_t *arena, bool all)
859{
860 arena_chunk_t *chunk;
861 size_t npurgatory;
862#ifdef JEMALLOC_DEBUG
863 size_t ndirty = 0;
864
865 ql_foreach(chunk, &arena->chunks_dirty, link_dirty) {
866 assert(chunk->dirtied);
867 ndirty += chunk->ndirty;
868 }
869 assert(ndirty == arena->ndirty);
870#endif
871 assert(arena->ndirty > arena->npurgatory || all);
872 assert(arena->ndirty > chunk_npages || all);
873 assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
874 npurgatory) || all);
875
876#ifdef JEMALLOC_STATS
877 arena->stats.npurge++;
878#endif
879
880 /*
881 * Compute the minimum number of pages that this thread should try to
882 * purge, and add the result to arena->npurgatory. This will keep
883 * multiple threads from racing to reduce ndirty below the threshold.
884 */
885 npurgatory = arena->ndirty - arena->npurgatory;
886 if (all == false) {
887 assert(npurgatory >= arena->nactive >> opt_lg_dirty_mult);
888 npurgatory -= arena->nactive >> opt_lg_dirty_mult;
889 }
890 arena->npurgatory += npurgatory;
891
892 while (npurgatory > 0) {
893 /* Get next chunk with dirty pages. */
894 chunk = ql_first(&arena->chunks_dirty);
895 if (chunk == NULL) {
896 /*
897 * This thread was unable to purge as many pages as
898 * originally intended, due to races with other threads
899 * that either did some of the purging work, or re-used
900 * dirty pages.
901 */
902 arena->npurgatory -= npurgatory;
903 return;
904 }
905 while (chunk->ndirty == 0) {
906 ql_remove(&arena->chunks_dirty, chunk, link_dirty);
907 chunk->dirtied = false;
908 chunk = ql_first(&arena->chunks_dirty);
909 if (chunk == NULL) {
910 /* Same logic as for above. */
911 arena->npurgatory -= npurgatory;
912 return;
913 }
914 }
915
916 if (chunk->ndirty > npurgatory) {
917 /*
918 * This thread will, at a minimum, purge all the dirty
919 * pages in chunk, so set npurgatory to reflect this
920 * thread's commitment to purge the pages. This tends
921 * to reduce the chances of the following scenario:
922 *
923 * 1) This thread sets arena->npurgatory such that
924 * (arena->ndirty - arena->npurgatory) is at the
925 * threshold.
926 * 2) This thread drops arena->lock.
927 * 3) Another thread causes one or more pages to be
928 * dirtied, and immediately determines that it must
929 * purge dirty pages.
930 *
931 * If this scenario *does* play out, that's okay,
932 * because all of the purging work being done really
933 * needs to happen.
934 */
935 arena->npurgatory += chunk->ndirty - npurgatory;
936 npurgatory = chunk->ndirty;
937 }
938
939 arena->npurgatory -= chunk->ndirty;
940 npurgatory -= chunk->ndirty;
941 arena_chunk_purge(arena, chunk);
942 }
943}
944
945void
946arena_purge_all(arena_t *arena)
947{
948
949 malloc_mutex_lock(&arena->lock);
950 arena_purge(arena, true);
951 malloc_mutex_unlock(&arena->lock);
952}
953
954static void
955arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
956{
957 arena_chunk_t *chunk;
958 size_t size, run_ind, run_pages, flag_dirty;
959 arena_avail_tree_t *runs_avail;
960#ifdef JEMALLOC_STATS
961 size_t cactive_diff;
962#endif
963
964 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
965 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk)
966 >> PAGE_SHIFT);
967 assert(run_ind >= map_bias);
968 assert(run_ind < chunk_npages);
969 if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_LARGE) != 0) {
970 size = chunk->map[run_ind-map_bias].bits & ~PAGE_MASK;
971 assert(size == PAGE_SIZE ||
972 (chunk->map[run_ind+(size>>PAGE_SHIFT)-1-map_bias].bits &
973 ~PAGE_MASK) == 0);
974 assert((chunk->map[run_ind+(size>>PAGE_SHIFT)-1-map_bias].bits &
975 CHUNK_MAP_LARGE) != 0);
976 assert((chunk->map[run_ind+(size>>PAGE_SHIFT)-1-map_bias].bits &
977 CHUNK_MAP_ALLOCATED) != 0);
978 } else {
979 size_t binind = arena_bin_index(arena, run->bin);
980 arena_bin_info_t *bin_info = &arena_bin_info[binind];
981 size = bin_info->run_size;
982 }
983 run_pages = (size >> PAGE_SHIFT);
984#ifdef JEMALLOC_STATS
985 /* Update stats_cactive if nactive is crossing a chunk multiple. */
986 cactive_diff = CHUNK_CEILING(arena->nactive << PAGE_SHIFT) -
987 CHUNK_CEILING((arena->nactive - run_pages) << PAGE_SHIFT);
988 if (cactive_diff != 0)
989 stats_cactive_sub(cactive_diff);
990#endif
991 arena->nactive -= run_pages;
992
993 /*
994 * The run is dirty if the caller claims to have dirtied it, as well as
995 * if it was already dirty before being allocated.
996 */
997 if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) != 0)
998 dirty = true;
999 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
1000 runs_avail = dirty ? &arena->runs_avail_dirty :
1001 &arena->runs_avail_clean;
1002
1003 /* Mark pages as unallocated in the chunk map. */
1004 if (dirty) {
1005 chunk->map[run_ind-map_bias].bits = size | CHUNK_MAP_DIRTY;
1006 chunk->map[run_ind+run_pages-1-map_bias].bits = size |
1007 CHUNK_MAP_DIRTY;
1008
1009 chunk->ndirty += run_pages;
1010 arena->ndirty += run_pages;
1011 } else {
1012 chunk->map[run_ind-map_bias].bits = size |
1013 (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED);
1014 chunk->map[run_ind+run_pages-1-map_bias].bits = size |
1015 (chunk->map[run_ind+run_pages-1-map_bias].bits &
1016 CHUNK_MAP_UNZEROED);
1017 }
1018
1019 /* Try to coalesce forward. */
1020 if (run_ind + run_pages < chunk_npages &&
1021 (chunk->map[run_ind+run_pages-map_bias].bits & CHUNK_MAP_ALLOCATED)
1022 == 0 && (chunk->map[run_ind+run_pages-map_bias].bits &
1023 CHUNK_MAP_DIRTY) == flag_dirty) {
1024 size_t nrun_size = chunk->map[run_ind+run_pages-map_bias].bits &
1025 ~PAGE_MASK;
1026 size_t nrun_pages = nrun_size >> PAGE_SHIFT;
1027
1028 /*
1029 * Remove successor from runs_avail; the coalesced run is
1030 * inserted later.
1031 */
1032 assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
1033 & ~PAGE_MASK) == nrun_size);
1034 assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
1035 & CHUNK_MAP_ALLOCATED) == 0);
1036 assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
1037 & CHUNK_MAP_DIRTY) == flag_dirty);
1038 arena_avail_tree_remove(runs_avail,
1039 &chunk->map[run_ind+run_pages-map_bias]);
1040
1041 size += nrun_size;
1042 run_pages += nrun_pages;
1043
1044 chunk->map[run_ind-map_bias].bits = size |
1045 (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
1046 chunk->map[run_ind+run_pages-1-map_bias].bits = size |
1047 (chunk->map[run_ind+run_pages-1-map_bias].bits &
1048 CHUNK_MAP_FLAGS_MASK);
1049 }
1050
1051 /* Try to coalesce backward. */
1052 if (run_ind > map_bias && (chunk->map[run_ind-1-map_bias].bits &
1053 CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[run_ind-1-map_bias].bits &
1054 CHUNK_MAP_DIRTY) == flag_dirty) {
1055 size_t prun_size = chunk->map[run_ind-1-map_bias].bits &
1056 ~PAGE_MASK;
1057 size_t prun_pages = prun_size >> PAGE_SHIFT;
1058
1059 run_ind -= prun_pages;
1060
1061 /*
1062 * Remove predecessor from runs_avail; the coalesced run is
1063 * inserted later.
1064 */
1065 assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK)
1066 == prun_size);
1067 assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_ALLOCATED)
1068 == 0);
1069 assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY)
1070 == flag_dirty);
1071 arena_avail_tree_remove(runs_avail,
1072 &chunk->map[run_ind-map_bias]);
1073
1074 size += prun_size;
1075 run_pages += prun_pages;
1076
1077 chunk->map[run_ind-map_bias].bits = size |
1078 (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
1079 chunk->map[run_ind+run_pages-1-map_bias].bits = size |
1080 (chunk->map[run_ind+run_pages-1-map_bias].bits &
1081 CHUNK_MAP_FLAGS_MASK);
1082 }
1083
1084 /* Insert into runs_avail, now that coalescing is complete. */
1085 assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) ==
1086 (chunk->map[run_ind+run_pages-1-map_bias].bits & ~PAGE_MASK));
1087 assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) ==
1088 (chunk->map[run_ind+run_pages-1-map_bias].bits & CHUNK_MAP_DIRTY));
1089 arena_avail_tree_insert(runs_avail, &chunk->map[run_ind-map_bias]);
1090
1091 if (dirty) {
1092 /*
1093 * Insert into chunks_dirty before potentially calling
1094 * arena_chunk_dealloc(), so that chunks_dirty and
1095 * arena->ndirty are consistent.
1096 */
1097 if (chunk->dirtied == false) {
1098 ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty);
1099 chunk->dirtied = true;
1100 }
1101 }
1102
1103 /*
1104 * Deallocate chunk if it is now completely unused. The bit
1105 * manipulation checks whether the first run is unallocated and extends
1106 * to the end of the chunk.
1107 */
1108 if ((chunk->map[0].bits & (~PAGE_MASK | CHUNK_MAP_ALLOCATED)) ==
1109 arena_maxclass)
1110 arena_chunk_dealloc(arena, chunk);
1111
1112 /*
1113 * It is okay to do dirty page processing here even if the chunk was
1114 * deallocated above, since in that case it is the spare. Waiting
1115 * until after possible chunk deallocation to do dirty processing
1116 * allows for an old spare to be fully deallocated, thus decreasing the
1117 * chances of spuriously crossing the dirty page purging threshold.
1118 */
1119 if (dirty)
1120 arena_maybe_purge(arena);
1121}
1122
1123static void
1124arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1125 size_t oldsize, size_t newsize)
1126{
1127 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT;
1128 size_t head_npages = (oldsize - newsize) >> PAGE_SHIFT;
1129 size_t flag_dirty = chunk->map[pageind-map_bias].bits & CHUNK_MAP_DIRTY;
1130
1131 assert(oldsize > newsize);
1132
1133 /*
1134 * Update the chunk map so that arena_run_dalloc() can treat the
1135 * leading run as separately allocated. Set the last element of each
1136 * run first, in case of single-page runs.
1137 */
1138 assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0);
1139 assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0);
1140 chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty |
1141 (chunk->map[pageind+head_npages-1-map_bias].bits &
1142 CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1143 chunk->map[pageind-map_bias].bits = (oldsize - newsize)
1144 | flag_dirty | (chunk->map[pageind-map_bias].bits &
1145 CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1146
1147#ifdef JEMALLOC_DEBUG
1148 {
1149 size_t tail_npages = newsize >> PAGE_SHIFT;
1150 assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
1151 .bits & ~PAGE_MASK) == 0);
1152 assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
1153 .bits & CHUNK_MAP_DIRTY) == flag_dirty);
1154 assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
1155 .bits & CHUNK_MAP_LARGE) != 0);
1156 assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
1157 .bits & CHUNK_MAP_ALLOCATED) != 0);
1158 }
1159#endif
1160 chunk->map[pageind+head_npages-map_bias].bits = newsize | flag_dirty |
1161 (chunk->map[pageind+head_npages-map_bias].bits &
1162 CHUNK_MAP_FLAGS_MASK) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1163
1164 arena_run_dalloc(arena, run, false);
1165}
1166
1167static void
1168arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1169 size_t oldsize, size_t newsize, bool dirty)
1170{
1171 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT;
1172 size_t head_npages = newsize >> PAGE_SHIFT;
1173 size_t tail_npages = (oldsize - newsize) >> PAGE_SHIFT;
1174 size_t flag_dirty = chunk->map[pageind-map_bias].bits &
1175 CHUNK_MAP_DIRTY;
1176
1177 assert(oldsize > newsize);
1178
1179 /*
1180 * Update the chunk map so that arena_run_dalloc() can treat the
1181 * trailing run as separately allocated. Set the last element of each
1182 * run first, in case of single-page runs.
1183 */
1184 assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0);
1185 assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0);
1186 chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty |
1187 (chunk->map[pageind+head_npages-1-map_bias].bits &
1188 CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1189 chunk->map[pageind-map_bias].bits = newsize | flag_dirty |
1190 (chunk->map[pageind-map_bias].bits & CHUNK_MAP_UNZEROED) |
1191 CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1192
1193 assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
1194 ~PAGE_MASK) == 0);
1195 assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
1196 CHUNK_MAP_LARGE) != 0);
1197 assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
1198 CHUNK_MAP_ALLOCATED) != 0);
1199 chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits =
1200 flag_dirty |
1201 (chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
1202 CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1203 chunk->map[pageind+head_npages-map_bias].bits = (oldsize - newsize) |
1204 flag_dirty | (chunk->map[pageind+head_npages-map_bias].bits &
1205 CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1206
1207 arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
1208 dirty);
1209}
1210
1211static arena_run_t *
1212arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1213{
1214 arena_chunk_map_t *mapelm;
1215 arena_run_t *run;
1216 size_t binind;
1217 arena_bin_info_t *bin_info;
1218
1219 /* Look for a usable run. */
1220 mapelm = arena_run_tree_first(&bin->runs);
1221 if (mapelm != NULL) {
1222 arena_chunk_t *chunk;
1223 size_t pageind;
1224
1225 /* run is guaranteed to have available space. */
1226 arena_run_tree_remove(&bin->runs, mapelm);
1227
1228 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
1229 pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
1230 sizeof(arena_chunk_map_t))) + map_bias;
1231 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1232 (mapelm->bits >> PAGE_SHIFT))
1233 << PAGE_SHIFT));
1234#ifdef JEMALLOC_STATS
1235 bin->stats.reruns++;
1236#endif
1237 return (run);
1238 }
1239 /* No existing runs have any space available. */
1240
1241 binind = arena_bin_index(arena, bin);
1242 bin_info = &arena_bin_info[binind];
1243
1244 /* Allocate a new run. */
1245 malloc_mutex_unlock(&bin->lock);
1246 /******************************/
1247 malloc_mutex_lock(&arena->lock);
1248 run = arena_run_alloc(arena, bin_info->run_size, false, false);
1249 if (run != NULL) {
1250 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
1251 (uintptr_t)bin_info->bitmap_offset);
1252
1253 /* Initialize run internals. */
1254 run->bin = bin;
1255 run->nextind = 0;
1256 run->nfree = bin_info->nregs;
1257 bitmap_init(bitmap, &bin_info->bitmap_info);
1258#ifdef JEMALLOC_DEBUG
1259 run->magic = ARENA_RUN_MAGIC;
1260#endif
1261 }
1262 malloc_mutex_unlock(&arena->lock);
1263 /********************************/
1264 malloc_mutex_lock(&bin->lock);
1265 if (run != NULL) {
1266#ifdef JEMALLOC_STATS
1267 bin->stats.nruns++;
1268 bin->stats.curruns++;
1269 if (bin->stats.curruns > bin->stats.highruns)
1270 bin->stats.highruns = bin->stats.curruns;
1271#endif
1272 return (run);
1273 }
1274
1275 /*
1276 * arena_run_alloc() failed, but another thread may have made
1277 * sufficient memory available while this one dropped bin->lock above,
1278 * so search one more time.
1279 */
1280 mapelm = arena_run_tree_first(&bin->runs);
1281 if (mapelm != NULL) {
1282 arena_chunk_t *chunk;
1283 size_t pageind;
1284
1285 /* run is guaranteed to have available space. */
1286 arena_run_tree_remove(&bin->runs, mapelm);
1287
1288 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
1289 pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
1290 sizeof(arena_chunk_map_t))) + map_bias;
1291 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1292 (mapelm->bits >> PAGE_SHIFT))
1293 << PAGE_SHIFT));
1294#ifdef JEMALLOC_STATS
1295 bin->stats.reruns++;
1296#endif
1297 return (run);
1298 }
1299
1300 return (NULL);
1301}
1302
1303/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
1304static void *
1305arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1306{
1307 void *ret;
1308 size_t binind;
1309 arena_bin_info_t *bin_info;
1310 arena_run_t *run;
1311
1312 binind = arena_bin_index(arena, bin);
1313 bin_info = &arena_bin_info[binind];
1314 bin->runcur = NULL;
1315 run = arena_bin_nonfull_run_get(arena, bin);
1316 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1317 /*
1318 * Another thread updated runcur while this one ran without the
1319 * bin lock in arena_bin_nonfull_run_get().
1320 */
1321 dassert(bin->runcur->magic == ARENA_RUN_MAGIC);
1322 assert(bin->runcur->nfree > 0);
1323 ret = arena_run_reg_alloc(bin->runcur, bin_info);
1324 if (run != NULL) {
1325 arena_chunk_t *chunk;
1326
1327 /*
1328 * arena_run_alloc() may have allocated run, or it may
1329 * have pulled run from the bin's run tree. Therefore
1330 * it is unsafe to make any assumptions about how run
1331 * has previously been used, and arena_bin_lower_run()
1332 * must be called, as if a region were just deallocated
1333 * from the run.
1334 */
1335 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1336 if (run->nfree == bin_info->nregs)
1337 arena_dalloc_bin_run(arena, chunk, run, bin);
1338 else
1339 arena_bin_lower_run(arena, chunk, run, bin);
1340 }
1341 return (ret);
1342 }
1343
1344 if (run == NULL)
1345 return (NULL);
1346
1347 bin->runcur = run;
1348
1349 dassert(bin->runcur->magic == ARENA_RUN_MAGIC);
1350 assert(bin->runcur->nfree > 0);
1351
1352 return (arena_run_reg_alloc(bin->runcur, bin_info));
1353}
1354
1355#ifdef JEMALLOC_PROF
1356void
1357arena_prof_accum(arena_t *arena, uint64_t accumbytes)
1358{
1359
1360 if (prof_interval != 0) {
1361 arena->prof_accumbytes += accumbytes;
1362 if (arena->prof_accumbytes >= prof_interval) {
1363 prof_idump();
1364 arena->prof_accumbytes -= prof_interval;
1365 }
1366 }
1367}
1368#endif
1369
1370#ifdef JEMALLOC_TCACHE
1371void
1372arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind
1373# ifdef JEMALLOC_PROF
1374 , uint64_t prof_accumbytes
1375# endif
1376 )
1377{
1378 unsigned i, nfill;
1379 arena_bin_t *bin;
1380 arena_run_t *run;
1381 void *ptr;
1382
1383 assert(tbin->ncached == 0);
1384
1385#ifdef JEMALLOC_PROF
1386 malloc_mutex_lock(&arena->lock);
1387 arena_prof_accum(arena, prof_accumbytes);
1388 malloc_mutex_unlock(&arena->lock);
1389#endif
1390 bin = &arena->bins[binind];
1391 malloc_mutex_lock(&bin->lock);
1392 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1393 tbin->lg_fill_div); i < nfill; i++) {
1394 if ((run = bin->runcur) != NULL && run->nfree > 0)
1395 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1396 else
1397 ptr = arena_bin_malloc_hard(arena, bin);
1398 if (ptr == NULL)
1399 break;
1400 /* Insert such that low regions get used first. */
1401 tbin->avail[nfill - 1 - i] = ptr;
1402 }
1403#ifdef JEMALLOC_STATS
1404 bin->stats.allocated += i * arena_bin_info[binind].reg_size;
1405 bin->stats.nmalloc += i;
1406 bin->stats.nrequests += tbin->tstats.nrequests;
1407 bin->stats.nfills++;
1408 tbin->tstats.nrequests = 0;
1409#endif
1410 malloc_mutex_unlock(&bin->lock);
1411 tbin->ncached = i;
1412}
1413#endif
1414
1415void *
1416arena_malloc_small(arena_t *arena, size_t size, bool zero)
1417{
1418 void *ret;
1419 arena_bin_t *bin;
1420 arena_run_t *run;
1421 size_t binind;
1422
1423 binind = SMALL_SIZE2BIN(size);
1424 assert(binind < nbins);
1425 bin = &arena->bins[binind];
1426 size = arena_bin_info[binind].reg_size;
1427
1428 malloc_mutex_lock(&bin->lock);
1429 if ((run = bin->runcur) != NULL && run->nfree > 0)
1430 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1431 else
1432 ret = arena_bin_malloc_hard(arena, bin);
1433
1434 if (ret == NULL) {
1435 malloc_mutex_unlock(&bin->lock);
1436 return (NULL);
1437 }
1438
1439#ifdef JEMALLOC_STATS
1440 bin->stats.allocated += size;
1441 bin->stats.nmalloc++;
1442 bin->stats.nrequests++;
1443#endif
1444 malloc_mutex_unlock(&bin->lock);
1445#ifdef JEMALLOC_PROF
1446 if (isthreaded == false) {
1447 malloc_mutex_lock(&arena->lock);
1448 arena_prof_accum(arena, size);
1449 malloc_mutex_unlock(&arena->lock);
1450 }
1451#endif
1452
1453 if (zero == false) {
1454#ifdef JEMALLOC_FILL
1455 if (opt_junk)
1456 memset(ret, 0xa5, size);
1457 else if (opt_zero)
1458 memset(ret, 0, size);
1459#endif
1460 } else
1461 memset(ret, 0, size);
1462
1463 return (ret);
1464}
1465
1466void *
1467arena_malloc_large(arena_t *arena, size_t size, bool zero)
1468{
1469 void *ret;
1470
1471 /* Large allocation. */
1472 size = PAGE_CEILING(size);
1473 malloc_mutex_lock(&arena->lock);
1474 ret = (void *)arena_run_alloc(arena, size, true, zero);
1475 if (ret == NULL) {
1476 malloc_mutex_unlock(&arena->lock);
1477 return (NULL);
1478 }
1479#ifdef JEMALLOC_STATS
1480 arena->stats.nmalloc_large++;
1481 arena->stats.nrequests_large++;
1482 arena->stats.allocated_large += size;
1483 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
1484 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
1485 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
1486 if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
1487 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
1488 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
1489 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns;
1490 }
1491#endif
1492#ifdef JEMALLOC_PROF
1493 arena_prof_accum(arena, size);
1494#endif
1495 malloc_mutex_unlock(&arena->lock);
1496
1497 if (zero == false) {
1498#ifdef JEMALLOC_FILL
1499 if (opt_junk)
1500 memset(ret, 0xa5, size);
1501 else if (opt_zero)
1502 memset(ret, 0, size);
1503#endif
1504 }
1505
1506 return (ret);
1507}
1508
1509void *
1510arena_malloc(size_t size, bool zero)
1511{
1512
1513 assert(size != 0);
1514 assert(QUANTUM_CEILING(size) <= arena_maxclass);
1515
1516 if (size <= small_maxclass) {
1517#ifdef JEMALLOC_TCACHE
1518 tcache_t *tcache;
1519
1520 if ((tcache = tcache_get()) != NULL)
1521 return (tcache_alloc_small(tcache, size, zero));
1522 else
1523
1524#endif
1525 return (arena_malloc_small(choose_arena(), size, zero));
1526 } else {
1527#ifdef JEMALLOC_TCACHE
1528 if (size <= tcache_maxclass) {
1529 tcache_t *tcache;
1530
1531 if ((tcache = tcache_get()) != NULL)
1532 return (tcache_alloc_large(tcache, size, zero));
1533 else {
1534 return (arena_malloc_large(choose_arena(),
1535 size, zero));
1536 }
1537 } else
1538#endif
1539 return (arena_malloc_large(choose_arena(), size, zero));
1540 }
1541}
1542
1543/* Only handles large allocations that require more than page alignment. */
1544void *
1545arena_palloc(arena_t *arena, size_t size, size_t alloc_size, size_t alignment,
1546 bool zero)
1547{
1548 void *ret;
1549 size_t offset;
1550 arena_chunk_t *chunk;
1551
1552 assert((size & PAGE_MASK) == 0);
1553
1554 alignment = PAGE_CEILING(alignment);
1555
1556 malloc_mutex_lock(&arena->lock);
1557 ret = (void *)arena_run_alloc(arena, alloc_size, true, zero);
1558 if (ret == NULL) {
1559 malloc_mutex_unlock(&arena->lock);
1560 return (NULL);
1561 }
1562
1563 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
1564
1565 offset = (uintptr_t)ret & (alignment - 1);
1566 assert((offset & PAGE_MASK) == 0);
1567 assert(offset < alloc_size);
1568 if (offset == 0)
1569 arena_run_trim_tail(arena, chunk, ret, alloc_size, size, false);
1570 else {
1571 size_t leadsize, trailsize;
1572
1573 leadsize = alignment - offset;
1574 if (leadsize > 0) {
1575 arena_run_trim_head(arena, chunk, ret, alloc_size,
1576 alloc_size - leadsize);
1577 ret = (void *)((uintptr_t)ret + leadsize);
1578 }
1579
1580 trailsize = alloc_size - leadsize - size;
1581 if (trailsize != 0) {
1582 /* Trim trailing space. */
1583 assert(trailsize < alloc_size);
1584 arena_run_trim_tail(arena, chunk, ret, size + trailsize,
1585 size, false);
1586 }
1587 }
1588
1589#ifdef JEMALLOC_STATS
1590 arena->stats.nmalloc_large++;
1591 arena->stats.nrequests_large++;
1592 arena->stats.allocated_large += size;
1593 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
1594 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
1595 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
1596 if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
1597 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
1598 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
1599 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns;
1600 }
1601#endif
1602 malloc_mutex_unlock(&arena->lock);
1603
1604#ifdef JEMALLOC_FILL
1605 if (zero == false) {
1606 if (opt_junk)
1607 memset(ret, 0xa5, size);
1608 else if (opt_zero)
1609 memset(ret, 0, size);
1610 }
1611#endif
1612 return (ret);
1613}
1614
1615/* Return the size of the allocation pointed to by ptr. */
1616size_t
1617arena_salloc(const void *ptr)
1618{
1619 size_t ret;
1620 arena_chunk_t *chunk;
1621 size_t pageind, mapbits;
1622
1623 assert(ptr != NULL);
1624 assert(CHUNK_ADDR2BASE(ptr) != ptr);
1625
1626 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1627 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
1628 mapbits = chunk->map[pageind-map_bias].bits;
1629 assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
1630 if ((mapbits & CHUNK_MAP_LARGE) == 0) {
1631 arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
1632 (uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) <<
1633 PAGE_SHIFT));
1634 dassert(run->magic == ARENA_RUN_MAGIC);
1635 size_t binind = arena_bin_index(chunk->arena, run->bin);
1636 arena_bin_info_t *bin_info = &arena_bin_info[binind];
1637 assert(((uintptr_t)ptr - ((uintptr_t)run +
1638 (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_size ==
1639 0);
1640 ret = bin_info->reg_size;
1641 } else {
1642 assert(((uintptr_t)ptr & PAGE_MASK) == 0);
1643 ret = mapbits & ~PAGE_MASK;
1644 assert(ret != 0);
1645 }
1646
1647 return (ret);
1648}
1649
1650#ifdef JEMALLOC_PROF
1651void
1652arena_prof_promoted(const void *ptr, size_t size)
1653{
1654 arena_chunk_t *chunk;
1655 size_t pageind, binind;
1656
1657 assert(ptr != NULL);
1658 assert(CHUNK_ADDR2BASE(ptr) != ptr);
1659 assert(isalloc(ptr) == PAGE_SIZE);
1660
1661 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1662 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
1663 binind = SMALL_SIZE2BIN(size);
1664 assert(binind < nbins);
1665 chunk->map[pageind-map_bias].bits = (chunk->map[pageind-map_bias].bits &
1666 ~CHUNK_MAP_CLASS_MASK) | ((binind+1) << CHUNK_MAP_CLASS_SHIFT);
1667}
1668
1669size_t
1670arena_salloc_demote(const void *ptr)
1671{
1672 size_t ret;
1673 arena_chunk_t *chunk;
1674 size_t pageind, mapbits;
1675
1676 assert(ptr != NULL);
1677 assert(CHUNK_ADDR2BASE(ptr) != ptr);
1678
1679 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1680 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
1681 mapbits = chunk->map[pageind-map_bias].bits;
1682 assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
1683 if ((mapbits & CHUNK_MAP_LARGE) == 0) {
1684 arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
1685 (uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) <<
1686 PAGE_SHIFT));
1687 dassert(run->magic == ARENA_RUN_MAGIC);
1688 size_t binind = arena_bin_index(chunk->arena, run->bin);
1689 arena_bin_info_t *bin_info = &arena_bin_info[binind];
1690 assert(((uintptr_t)ptr - ((uintptr_t)run +
1691 (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_size ==
1692 0);
1693 ret = bin_info->reg_size;
1694 } else {
1695 assert(((uintptr_t)ptr & PAGE_MASK) == 0);
1696 ret = mapbits & ~PAGE_MASK;
1697 if (prof_promote && ret == PAGE_SIZE && (mapbits &
1698 CHUNK_MAP_CLASS_MASK) != 0) {
1699 size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >>
1700 CHUNK_MAP_CLASS_SHIFT) - 1;
1701 assert(binind < nbins);
1702 ret = arena_bin_info[binind].reg_size;
1703 }
1704 assert(ret != 0);
1705 }
1706
1707 return (ret);
1708}
1709#endif
1710
1711static void
1712arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
1713 arena_bin_t *bin)
1714{
1715
1716 /* Dissociate run from bin. */
1717 if (run == bin->runcur)
1718 bin->runcur = NULL;
1719 else {
1720 size_t binind = arena_bin_index(chunk->arena, bin);
1721 arena_bin_info_t *bin_info = &arena_bin_info[binind];
1722
1723 if (bin_info->nregs != 1) {
1724 size_t run_pageind = (((uintptr_t)run -
1725 (uintptr_t)chunk)) >> PAGE_SHIFT;
1726 arena_chunk_map_t *run_mapelm =
1727 &chunk->map[run_pageind-map_bias];
1728 /*
1729 * This block's conditional is necessary because if the
1730 * run only contains one region, then it never gets
1731 * inserted into the non-full runs tree.
1732 */
1733 arena_run_tree_remove(&bin->runs, run_mapelm);
1734 }
1735 }
1736}
1737
1738static void
1739arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1740 arena_bin_t *bin)
1741{
1742 size_t binind;
1743 arena_bin_info_t *bin_info;
1744 size_t npages, run_ind, past;
1745
1746 assert(run != bin->runcur);
1747 assert(arena_run_tree_search(&bin->runs, &chunk->map[
1748 (((uintptr_t)run-(uintptr_t)chunk)>>PAGE_SHIFT)-map_bias]) == NULL);
1749
1750 binind = arena_bin_index(chunk->arena, run->bin);
1751 bin_info = &arena_bin_info[binind];
1752
1753 malloc_mutex_unlock(&bin->lock);
1754 /******************************/
1755 npages = bin_info->run_size >> PAGE_SHIFT;
1756 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT);
1757 past = (size_t)(PAGE_CEILING((uintptr_t)run +
1758 (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
1759 bin_info->reg_size) - (uintptr_t)chunk) >> PAGE_SHIFT);
1760 malloc_mutex_lock(&arena->lock);
1761
1762 /*
1763 * If the run was originally clean, and some pages were never touched,
1764 * trim the clean pages before deallocating the dirty portion of the
1765 * run.
1766 */
1767 if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == 0 && past
1768 - run_ind < npages) {
1769 /*
1770 * Trim clean pages. Convert to large run beforehand. Set the
1771 * last map element first, in case this is a one-page run.
1772 */
1773 chunk->map[run_ind+npages-1-map_bias].bits = CHUNK_MAP_LARGE |
1774 (chunk->map[run_ind+npages-1-map_bias].bits &
1775 CHUNK_MAP_FLAGS_MASK);
1776 chunk->map[run_ind-map_bias].bits = bin_info->run_size |
1777 CHUNK_MAP_LARGE | (chunk->map[run_ind-map_bias].bits &
1778 CHUNK_MAP_FLAGS_MASK);
1779 arena_run_trim_tail(arena, chunk, run, (npages << PAGE_SHIFT),
1780 ((past - run_ind) << PAGE_SHIFT), false);
1781 /* npages = past - run_ind; */
1782 }
1783#ifdef JEMALLOC_DEBUG
1784 run->magic = 0;
1785#endif
1786 arena_run_dalloc(arena, run, true);
1787 malloc_mutex_unlock(&arena->lock);
1788 /****************************/
1789 malloc_mutex_lock(&bin->lock);
1790#ifdef JEMALLOC_STATS
1791 bin->stats.curruns--;
1792#endif
1793}
1794
1795static void
1796arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1797 arena_bin_t *bin)
1798{
1799
1800 /*
1801 * Make sure that bin->runcur always refers to the lowest non-full run,
1802 * if one exists.
1803 */
1804 if (bin->runcur == NULL)
1805 bin->runcur = run;
1806 else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
1807 /* Switch runcur. */
1808 if (bin->runcur->nfree > 0) {
1809 arena_chunk_t *runcur_chunk =
1810 CHUNK_ADDR2BASE(bin->runcur);
1811 size_t runcur_pageind = (((uintptr_t)bin->runcur -
1812 (uintptr_t)runcur_chunk)) >> PAGE_SHIFT;
1813 arena_chunk_map_t *runcur_mapelm =
1814 &runcur_chunk->map[runcur_pageind-map_bias];
1815
1816 /* Insert runcur. */
1817 arena_run_tree_insert(&bin->runs, runcur_mapelm);
1818 }
1819 bin->runcur = run;
1820 } else {
1821 size_t run_pageind = (((uintptr_t)run -
1822 (uintptr_t)chunk)) >> PAGE_SHIFT;
1823 arena_chunk_map_t *run_mapelm =
1824 &chunk->map[run_pageind-map_bias];
1825
1826 assert(arena_run_tree_search(&bin->runs, run_mapelm) == NULL);
1827 arena_run_tree_insert(&bin->runs, run_mapelm);
1828 }
1829}
1830
1831void
1832arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1833 arena_chunk_map_t *mapelm)
1834{
1835 size_t pageind;
1836 arena_run_t *run;
1837 arena_bin_t *bin;
1838#if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
1839 size_t size;
1840#endif
1841
1842 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
1843 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1844 (mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
1845 dassert(run->magic == ARENA_RUN_MAGIC);
1846 bin = run->bin;
1847 size_t binind = arena_bin_index(arena, bin);
1848 arena_bin_info_t *bin_info = &arena_bin_info[binind];
1849#if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
1850 size = bin_info->reg_size;
1851#endif
1852
1853#ifdef JEMALLOC_FILL
1854 if (opt_junk)
1855 memset(ptr, 0x5a, size);
1856#endif
1857
1858 arena_run_reg_dalloc(run, ptr);
1859 if (run->nfree == bin_info->nregs) {
1860 arena_dissociate_bin_run(chunk, run, bin);
1861 arena_dalloc_bin_run(arena, chunk, run, bin);
1862 } else if (run->nfree == 1 && run != bin->runcur)
1863 arena_bin_lower_run(arena, chunk, run, bin);
1864
1865#ifdef JEMALLOC_STATS
1866 bin->stats.allocated -= size;
1867 bin->stats.ndalloc++;
1868#endif
1869}
1870
1871#ifdef JEMALLOC_STATS
1872void
1873arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
1874 arena_stats_t *astats, malloc_bin_stats_t *bstats,
1875 malloc_large_stats_t *lstats)
1876{
1877 unsigned i;
1878
1879 malloc_mutex_lock(&arena->lock);
1880 *nactive += arena->nactive;
1881 *ndirty += arena->ndirty;
1882
1883 astats->mapped += arena->stats.mapped;
1884 astats->npurge += arena->stats.npurge;
1885 astats->nmadvise += arena->stats.nmadvise;
1886 astats->purged += arena->stats.purged;
1887 astats->allocated_large += arena->stats.allocated_large;
1888 astats->nmalloc_large += arena->stats.nmalloc_large;
1889 astats->ndalloc_large += arena->stats.ndalloc_large;
1890 astats->nrequests_large += arena->stats.nrequests_large;
1891
1892 for (i = 0; i < nlclasses; i++) {
1893 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
1894 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
1895 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
1896 lstats[i].highruns += arena->stats.lstats[i].highruns;
1897 lstats[i].curruns += arena->stats.lstats[i].curruns;
1898 }
1899 malloc_mutex_unlock(&arena->lock);
1900
1901 for (i = 0; i < nbins; i++) {
1902 arena_bin_t *bin = &arena->bins[i];
1903
1904 malloc_mutex_lock(&bin->lock);
1905 bstats[i].allocated += bin->stats.allocated;
1906 bstats[i].nmalloc += bin->stats.nmalloc;
1907 bstats[i].ndalloc += bin->stats.ndalloc;
1908 bstats[i].nrequests += bin->stats.nrequests;
1909#ifdef JEMALLOC_TCACHE
1910 bstats[i].nfills += bin->stats.nfills;
1911 bstats[i].nflushes += bin->stats.nflushes;
1912#endif
1913 bstats[i].nruns += bin->stats.nruns;
1914 bstats[i].reruns += bin->stats.reruns;
1915 bstats[i].highruns += bin->stats.highruns;
1916 bstats[i].curruns += bin->stats.curruns;
1917 malloc_mutex_unlock(&bin->lock);
1918 }
1919}
1920#endif
1921
1922void
1923arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1924{
1925
1926 /* Large allocation. */
1927#ifdef JEMALLOC_FILL
1928# ifndef JEMALLOC_STATS
1929 if (opt_junk)
1930# endif
1931#endif
1932 {
1933#if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
1934 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
1935 PAGE_SHIFT;
1936 size_t size = chunk->map[pageind-map_bias].bits & ~PAGE_MASK;
1937#endif
1938
1939#ifdef JEMALLOC_FILL
1940# ifdef JEMALLOC_STATS
1941 if (opt_junk)
1942# endif
1943 memset(ptr, 0x5a, size);
1944#endif
1945#ifdef JEMALLOC_STATS
1946 arena->stats.ndalloc_large++;
1947 arena->stats.allocated_large -= size;
1948 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].ndalloc++;
1949 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns--;
1950#endif
1951 }
1952
1953 arena_run_dalloc(arena, (arena_run_t *)ptr, true);
1954}
1955
1956static void
1957arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1958 size_t oldsize, size_t size)
1959{
1960
1961 assert(size < oldsize);
1962
1963 /*
1964 * Shrink the run, and make trailing pages available for other
1965 * allocations.
1966 */
1967 malloc_mutex_lock(&arena->lock);
1968 arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
1969 true);
1970#ifdef JEMALLOC_STATS
1971 arena->stats.ndalloc_large++;
1972 arena->stats.allocated_large -= oldsize;
1973 arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].ndalloc++;
1974 arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--;
1975
1976 arena->stats.nmalloc_large++;
1977 arena->stats.nrequests_large++;
1978 arena->stats.allocated_large += size;
1979 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
1980 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
1981 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
1982 if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
1983 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
1984 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
1985 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns;
1986 }
1987#endif
1988 malloc_mutex_unlock(&arena->lock);
1989}
1990
1991static bool
1992arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1993 size_t oldsize, size_t size, size_t extra, bool zero)
1994{
1995 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
1996 size_t npages = oldsize >> PAGE_SHIFT;
1997 size_t followsize;
1998
1999 assert(oldsize == (chunk->map[pageind-map_bias].bits & ~PAGE_MASK));
2000
2001 /* Try to extend the run. */
2002 assert(size + extra > oldsize);
2003 malloc_mutex_lock(&arena->lock);
2004 if (pageind + npages < chunk_npages &&
2005 (chunk->map[pageind+npages-map_bias].bits
2006 & CHUNK_MAP_ALLOCATED) == 0 && (followsize =
2007 chunk->map[pageind+npages-map_bias].bits & ~PAGE_MASK) >= size -
2008 oldsize) {
2009 /*
2010 * The next run is available and sufficiently large. Split the
2011 * following run, then merge the first part with the existing
2012 * allocation.
2013 */
2014 size_t flag_dirty;
2015 size_t splitsize = (oldsize + followsize <= size + extra)
2016 ? followsize : size + extra - oldsize;
2017 arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
2018 ((pageind+npages) << PAGE_SHIFT)), splitsize, true, zero);
2019
2020 size = oldsize + splitsize;
2021 npages = size >> PAGE_SHIFT;
2022
2023 /*
2024 * Mark the extended run as dirty if either portion of the run
2025 * was dirty before allocation. This is rather pedantic,
2026 * because there's not actually any sequence of events that
2027 * could cause the resulting run to be passed to
2028 * arena_run_dalloc() with the dirty argument set to false
2029 * (which is when dirty flag consistency would really matter).
2030 */
2031 flag_dirty = (chunk->map[pageind-map_bias].bits &
2032 CHUNK_MAP_DIRTY) |
2033 (chunk->map[pageind+npages-1-map_bias].bits &
2034 CHUNK_MAP_DIRTY);
2035 chunk->map[pageind-map_bias].bits = size | flag_dirty
2036 | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
2037 chunk->map[pageind+npages-1-map_bias].bits = flag_dirty |
2038 CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
2039
2040#ifdef JEMALLOC_STATS
2041 arena->stats.ndalloc_large++;
2042 arena->stats.allocated_large -= oldsize;
2043 arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].ndalloc++;
2044 arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--;
2045
2046 arena->stats.nmalloc_large++;
2047 arena->stats.nrequests_large++;
2048 arena->stats.allocated_large += size;
2049 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
2050 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
2051 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
2052 if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
2053 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
2054 arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
2055 arena->stats.lstats[(size >> PAGE_SHIFT) -
2056 1].curruns;
2057 }
2058#endif
2059 malloc_mutex_unlock(&arena->lock);
2060 return (false);
2061 }
2062 malloc_mutex_unlock(&arena->lock);
2063
2064 return (true);
2065}
2066
2067/*
2068 * Try to resize a large allocation, in order to avoid copying. This will
2069 * always fail if growing an object, and the following run is already in use.
2070 */
2071static bool
2072arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
2073 bool zero)
2074{
2075 size_t psize;
2076
2077 psize = PAGE_CEILING(size + extra);
2078 if (psize == oldsize) {
2079 /* Same size class. */
2080#ifdef JEMALLOC_FILL
2081 if (opt_junk && size < oldsize) {
2082 memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
2083 size);
2084 }
2085#endif
2086 return (false);
2087 } else {
2088 arena_chunk_t *chunk;
2089 arena_t *arena;
2090
2091 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2092 arena = chunk->arena;
2093 dassert(arena->magic == ARENA_MAGIC);
2094
2095 if (psize < oldsize) {
2096#ifdef JEMALLOC_FILL
2097 /* Fill before shrinking in order avoid a race. */
2098 if (opt_junk) {
2099 memset((void *)((uintptr_t)ptr + size), 0x5a,
2100 oldsize - size);
2101 }
2102#endif
2103 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
2104 psize);
2105 return (false);
2106 } else {
2107 bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
2108 oldsize, PAGE_CEILING(size),
2109 psize - PAGE_CEILING(size), zero);
2110#ifdef JEMALLOC_FILL
2111 if (ret == false && zero == false && opt_zero) {
2112 memset((void *)((uintptr_t)ptr + oldsize), 0,
2113 size - oldsize);
2114 }
2115#endif
2116 return (ret);
2117 }
2118 }
2119}
2120
2121void *
2122arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
2123 bool zero)
2124{
2125
2126 /*
2127 * Avoid moving the allocation if the size class can be left the same.
2128 */
2129 if (oldsize <= arena_maxclass) {
2130 if (oldsize <= small_maxclass) {
2131 assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
2132 == oldsize);
2133 if ((size + extra <= small_maxclass &&
2134 SMALL_SIZE2BIN(size + extra) ==
2135 SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
2136 size + extra >= oldsize)) {
2137#ifdef JEMALLOC_FILL
2138 if (opt_junk && size < oldsize) {
2139 memset((void *)((uintptr_t)ptr + size),
2140 0x5a, oldsize - size);
2141 }
2142#endif
2143 return (ptr);
2144 }
2145 } else {
2146 assert(size <= arena_maxclass);
2147 if (size + extra > small_maxclass) {
2148 if (arena_ralloc_large(ptr, oldsize, size,
2149 extra, zero) == false)
2150 return (ptr);
2151 }
2152 }
2153 }
2154
2155 /* Reallocation would require a move. */
2156 return (NULL);
2157}
2158
2159void *
2160arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
2161 size_t alignment, bool zero)
2162{
2163 void *ret;
2164 size_t copysize;
2165
2166 /* Try to avoid moving the allocation. */
2167 ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
2168 if (ret != NULL)
2169 return (ret);
2170
2171 /*
2172 * size and oldsize are different enough that we need to move the
2173 * object. In that case, fall back to allocating new space and
2174 * copying.
2175 */
2176 if (alignment != 0) {
2177 size_t usize = sa2u(size + extra, alignment, NULL);
2178 if (usize == 0)
2179 return (NULL);
2180 ret = ipalloc(usize, alignment, zero);
2181 } else
2182 ret = arena_malloc(size + extra, zero);
2183
2184 if (ret == NULL) {
2185 if (extra == 0)
2186 return (NULL);
2187 /* Try again, this time without extra. */
2188 if (alignment != 0) {
2189 size_t usize = sa2u(size, alignment, NULL);
2190 if (usize == 0)
2191 return (NULL);
2192 ret = ipalloc(usize, alignment, zero);
2193 } else
2194 ret = arena_malloc(size, zero);
2195
2196 if (ret == NULL)
2197 return (NULL);
2198 }
2199
2200 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
2201
2202 /*
2203 * Copy at most size bytes (not size+extra), since the caller has no
2204 * expectation that the extra bytes will be reliably preserved.
2205 */
2206 copysize = (size < oldsize) ? size : oldsize;
2207 memcpy(ret, ptr, copysize);
2208 idalloc(ptr);
2209 return (ret);
2210}
2211
2212bool
2213arena_new(arena_t *arena, unsigned ind)
2214{
2215 unsigned i;
2216 arena_bin_t *bin;
2217
2218 arena->ind = ind;
2219 arena->nthreads = 0;
2220
2221 if (malloc_mutex_init(&arena->lock))
2222 return (true);
2223
2224#ifdef JEMALLOC_STATS
2225 memset(&arena->stats, 0, sizeof(arena_stats_t));
2226 arena->stats.lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
2227 sizeof(malloc_large_stats_t));
2228 if (arena->stats.lstats == NULL)
2229 return (true);
2230 memset(arena->stats.lstats, 0, nlclasses *
2231 sizeof(malloc_large_stats_t));
2232# ifdef JEMALLOC_TCACHE
2233 ql_new(&arena->tcache_ql);
2234# endif
2235#endif
2236
2237#ifdef JEMALLOC_PROF
2238 arena->prof_accumbytes = 0;
2239#endif
2240
2241 /* Initialize chunks. */
2242 ql_new(&arena->chunks_dirty);
2243 arena->spare = NULL;
2244
2245 arena->nactive = 0;
2246 arena->ndirty = 0;
2247 arena->npurgatory = 0;
2248
2249 arena_avail_tree_new(&arena->runs_avail_clean);
2250 arena_avail_tree_new(&arena->runs_avail_dirty);
2251
2252 /* Initialize bins. */
2253 i = 0;
2254#ifdef JEMALLOC_TINY
2255 /* (2^n)-spaced tiny bins. */
2256 for (; i < ntbins; i++) {
2257 bin = &arena->bins[i];
2258 if (malloc_mutex_init(&bin->lock))
2259 return (true);
2260 bin->runcur = NULL;
2261 arena_run_tree_new(&bin->runs);
2262#ifdef JEMALLOC_STATS
2263 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
2264#endif
2265 }
2266#endif
2267
2268 /* Quantum-spaced bins. */
2269 for (; i < ntbins + nqbins; i++) {
2270 bin = &arena->bins[i];
2271 if (malloc_mutex_init(&bin->lock))
2272 return (true);
2273 bin->runcur = NULL;
2274 arena_run_tree_new(&bin->runs);
2275#ifdef JEMALLOC_STATS
2276 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
2277#endif
2278 }
2279
2280 /* Cacheline-spaced bins. */
2281 for (; i < ntbins + nqbins + ncbins; i++) {
2282 bin = &arena->bins[i];
2283 if (malloc_mutex_init(&bin->lock))
2284 return (true);
2285 bin->runcur = NULL;
2286 arena_run_tree_new(&bin->runs);
2287#ifdef JEMALLOC_STATS
2288 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
2289#endif
2290 }
2291
2292 /* Subpage-spaced bins. */
2293 for (; i < nbins; i++) {
2294 bin = &arena->bins[i];
2295 if (malloc_mutex_init(&bin->lock))
2296 return (true);
2297 bin->runcur = NULL;
2298 arena_run_tree_new(&bin->runs);
2299#ifdef JEMALLOC_STATS
2300 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
2301#endif
2302 }
2303
2304#ifdef JEMALLOC_DEBUG
2305 arena->magic = ARENA_MAGIC;
2306#endif
2307
2308 return (false);
2309}
2310
2311#ifdef JEMALLOC_DEBUG
2312static void
2313small_size2bin_validate(void)
2314{
2315 size_t i, size, binind;
2316
2317 i = 1;
2318# ifdef JEMALLOC_TINY
2319 /* Tiny. */
2320 for (; i < (1U << LG_TINY_MIN); i++) {
2321 size = pow2_ceil(1U << LG_TINY_MIN);
2322 binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
2323 assert(SMALL_SIZE2BIN(i) == binind);
2324 }
2325 for (; i < qspace_min; i++) {
2326 size = pow2_ceil(i);
2327 binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
2328 assert(SMALL_SIZE2BIN(i) == binind);
2329 }
2330# endif
2331 /* Quantum-spaced. */
2332 for (; i <= qspace_max; i++) {
2333 size = QUANTUM_CEILING(i);
2334 binind = ntbins + (size >> LG_QUANTUM) - 1;
2335 assert(SMALL_SIZE2BIN(i) == binind);
2336 }
2337 /* Cacheline-spaced. */
2338 for (; i <= cspace_max; i++) {
2339 size = CACHELINE_CEILING(i);
2340 binind = ntbins + nqbins + ((size - cspace_min) >>
2341 LG_CACHELINE);
2342 assert(SMALL_SIZE2BIN(i) == binind);
2343 }
2344 /* Sub-page. */
2345 for (; i <= sspace_max; i++) {
2346 size = SUBPAGE_CEILING(i);
2347 binind = ntbins + nqbins + ncbins + ((size - sspace_min)
2348 >> LG_SUBPAGE);
2349 assert(SMALL_SIZE2BIN(i) == binind);
2350 }
2351}
2352#endif
2353
2354static bool
2355small_size2bin_init(void)
2356{
2357
2358 if (opt_lg_qspace_max != LG_QSPACE_MAX_DEFAULT
2359 || opt_lg_cspace_max != LG_CSPACE_MAX_DEFAULT
2360 || (sizeof(const_small_size2bin) != ((small_maxclass-1) >>
2361 LG_TINY_MIN) + 1))
2362 return (small_size2bin_init_hard());
2363
2364 small_size2bin = const_small_size2bin;
2365#ifdef JEMALLOC_DEBUG
2366 small_size2bin_validate();
2367#endif
2368 return (false);
2369}
2370
2371static bool
2372small_size2bin_init_hard(void)
2373{
2374 size_t i, size, binind;
2375 uint8_t *custom_small_size2bin;
2376#define CUSTOM_SMALL_SIZE2BIN(s) \
2377 custom_small_size2bin[(s-1) >> LG_TINY_MIN]
2378
2379 assert(opt_lg_qspace_max != LG_QSPACE_MAX_DEFAULT
2380 || opt_lg_cspace_max != LG_CSPACE_MAX_DEFAULT
2381 || (sizeof(const_small_size2bin) != ((small_maxclass-1) >>
2382 LG_TINY_MIN) + 1));
2383
2384 custom_small_size2bin = (uint8_t *)
2385 base_alloc(small_maxclass >> LG_TINY_MIN);
2386 if (custom_small_size2bin == NULL)
2387 return (true);
2388
2389 i = 1;
2390#ifdef JEMALLOC_TINY
2391 /* Tiny. */
2392 for (; i < (1U << LG_TINY_MIN); i += TINY_MIN) {
2393 size = pow2_ceil(1U << LG_TINY_MIN);
2394 binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
2395 CUSTOM_SMALL_SIZE2BIN(i) = binind;
2396 }
2397 for (; i < qspace_min; i += TINY_MIN) {
2398 size = pow2_ceil(i);
2399 binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
2400 CUSTOM_SMALL_SIZE2BIN(i) = binind;
2401 }
2402#endif
2403 /* Quantum-spaced. */
2404 for (; i <= qspace_max; i += TINY_MIN) {
2405 size = QUANTUM_CEILING(i);
2406 binind = ntbins + (size >> LG_QUANTUM) - 1;
2407 CUSTOM_SMALL_SIZE2BIN(i) = binind;
2408 }
2409 /* Cacheline-spaced. */
2410 for (; i <= cspace_max; i += TINY_MIN) {
2411 size = CACHELINE_CEILING(i);
2412 binind = ntbins + nqbins + ((size - cspace_min) >>
2413 LG_CACHELINE);
2414 CUSTOM_SMALL_SIZE2BIN(i) = binind;
2415 }
2416 /* Sub-page. */
2417 for (; i <= sspace_max; i += TINY_MIN) {
2418 size = SUBPAGE_CEILING(i);
2419 binind = ntbins + nqbins + ncbins + ((size - sspace_min) >>
2420 LG_SUBPAGE);
2421 CUSTOM_SMALL_SIZE2BIN(i) = binind;
2422 }
2423
2424 small_size2bin = custom_small_size2bin;
2425#ifdef JEMALLOC_DEBUG
2426 small_size2bin_validate();
2427#endif
2428 return (false);
2429#undef CUSTOM_SMALL_SIZE2BIN
2430}
2431
2432/*
2433 * Calculate bin_info->run_size such that it meets the following constraints:
2434 *
2435 * *) bin_info->run_size >= min_run_size
2436 * *) bin_info->run_size <= arena_maxclass
2437 * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
2438 * *) bin_info->nregs <= RUN_MAXREGS
2439 *
2440 * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
2441 * calculated here, since these settings are all interdependent.
2442 */
2443static size_t
2444bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
2445{
2446 size_t try_run_size, good_run_size;
2447 uint32_t try_nregs, good_nregs;
2448 uint32_t try_hdr_size, good_hdr_size;
2449 uint32_t try_bitmap_offset, good_bitmap_offset;
2450#ifdef JEMALLOC_PROF
2451 uint32_t try_ctx0_offset, good_ctx0_offset;
2452#endif
2453 uint32_t try_reg0_offset, good_reg0_offset;
2454
2455 assert(min_run_size >= PAGE_SIZE);
2456 assert(min_run_size <= arena_maxclass);
2457
2458 /*
2459 * Calculate known-valid settings before entering the run_size
2460 * expansion loop, so that the first part of the loop always copies
2461 * valid settings.
2462 *
2463 * The do..while loop iteratively reduces the number of regions until
2464 * the run header and the regions no longer overlap. A closed formula
2465 * would be quite messy, since there is an interdependency between the
2466 * header's mask length and the number of regions.
2467 */
2468 try_run_size = min_run_size;
2469 try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin_info->reg_size)
2470 + 1; /* Counter-act try_nregs-- in loop. */
2471 if (try_nregs > RUN_MAXREGS) {
2472 try_nregs = RUN_MAXREGS
2473 + 1; /* Counter-act try_nregs-- in loop. */
2474 }
2475 do {
2476 try_nregs--;
2477 try_hdr_size = sizeof(arena_run_t);
2478 /* Pad to a long boundary. */
2479 try_hdr_size = LONG_CEILING(try_hdr_size);
2480 try_bitmap_offset = try_hdr_size;
2481 /* Add space for bitmap. */
2482 try_hdr_size += bitmap_size(try_nregs);
2483#ifdef JEMALLOC_PROF
2484 if (opt_prof && prof_promote == false) {
2485 /* Pad to a quantum boundary. */
2486 try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2487 try_ctx0_offset = try_hdr_size;
2488 /* Add space for one (prof_ctx_t *) per region. */
2489 try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
2490 } else
2491 try_ctx0_offset = 0;
2492#endif
2493 try_reg0_offset = try_run_size - (try_nregs *
2494 bin_info->reg_size);
2495 } while (try_hdr_size > try_reg0_offset);
2496
2497 /* run_size expansion loop. */
2498 do {
2499 /*
2500 * Copy valid settings before trying more aggressive settings.
2501 */
2502 good_run_size = try_run_size;
2503 good_nregs = try_nregs;
2504 good_hdr_size = try_hdr_size;
2505 good_bitmap_offset = try_bitmap_offset;
2506#ifdef JEMALLOC_PROF
2507 good_ctx0_offset = try_ctx0_offset;
2508#endif
2509 good_reg0_offset = try_reg0_offset;
2510
2511 /* Try more aggressive settings. */
2512 try_run_size += PAGE_SIZE;
2513 try_nregs = ((try_run_size - sizeof(arena_run_t)) /
2514 bin_info->reg_size)
2515 + 1; /* Counter-act try_nregs-- in loop. */
2516 if (try_nregs > RUN_MAXREGS) {
2517 try_nregs = RUN_MAXREGS
2518 + 1; /* Counter-act try_nregs-- in loop. */
2519 }
2520 do {
2521 try_nregs--;
2522 try_hdr_size = sizeof(arena_run_t);
2523 /* Pad to a long boundary. */
2524 try_hdr_size = LONG_CEILING(try_hdr_size);
2525 try_bitmap_offset = try_hdr_size;
2526 /* Add space for bitmap. */
2527 try_hdr_size += bitmap_size(try_nregs);
2528#ifdef JEMALLOC_PROF
2529 if (opt_prof && prof_promote == false) {
2530 /* Pad to a quantum boundary. */
2531 try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2532 try_ctx0_offset = try_hdr_size;
2533 /*
2534 * Add space for one (prof_ctx_t *) per region.
2535 */
2536 try_hdr_size += try_nregs *
2537 sizeof(prof_ctx_t *);
2538 }
2539#endif
2540 try_reg0_offset = try_run_size - (try_nregs *
2541 bin_info->reg_size);
2542 } while (try_hdr_size > try_reg0_offset);
2543 } while (try_run_size <= arena_maxclass
2544 && try_run_size <= arena_maxclass
2545 && RUN_MAX_OVRHD * (bin_info->reg_size << 3) > RUN_MAX_OVRHD_RELAX
2546 && (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
2547 && try_nregs < RUN_MAXREGS);
2548
2549 assert(good_hdr_size <= good_reg0_offset);
2550
2551 /* Copy final settings. */
2552 bin_info->run_size = good_run_size;
2553 bin_info->nregs = good_nregs;
2554 bin_info->bitmap_offset = good_bitmap_offset;
2555#ifdef JEMALLOC_PROF
2556 bin_info->ctx0_offset = good_ctx0_offset;
2557#endif
2558 bin_info->reg0_offset = good_reg0_offset;
2559
2560 return (good_run_size);
2561}
2562
2563static bool
2564bin_info_init(void)
2565{
2566 arena_bin_info_t *bin_info;
2567 unsigned i;
2568 size_t prev_run_size;
2569
2570 arena_bin_info = base_alloc(sizeof(arena_bin_info_t) * nbins);
2571 if (arena_bin_info == NULL)
2572 return (true);
2573
2574 prev_run_size = PAGE_SIZE;
2575 i = 0;
2576#ifdef JEMALLOC_TINY
2577 /* (2^n)-spaced tiny bins. */
2578 for (; i < ntbins; i++) {
2579 bin_info = &arena_bin_info[i];
2580 bin_info->reg_size = (1U << (LG_TINY_MIN + i));
2581 prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);
2582 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
2583 }
2584#endif
2585
2586 /* Quantum-spaced bins. */
2587 for (; i < ntbins + nqbins; i++) {
2588 bin_info = &arena_bin_info[i];
2589 bin_info->reg_size = (i - ntbins + 1) << LG_QUANTUM;
2590 prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);
2591 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
2592 }
2593
2594 /* Cacheline-spaced bins. */
2595 for (; i < ntbins + nqbins + ncbins; i++) {
2596 bin_info = &arena_bin_info[i];
2597 bin_info->reg_size = cspace_min + ((i - (ntbins + nqbins)) <<
2598 LG_CACHELINE);
2599 prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);
2600 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
2601 }
2602
2603 /* Subpage-spaced bins. */
2604 for (; i < nbins; i++) {
2605 bin_info = &arena_bin_info[i];
2606 bin_info->reg_size = sspace_min + ((i - (ntbins + nqbins +
2607 ncbins)) << LG_SUBPAGE);
2608 prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);
2609 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
2610 }
2611
2612 return (false);
2613}
2614
2615bool
2616arena_boot(void)
2617{
2618 size_t header_size;
2619 unsigned i;
2620
2621 /* Set variables according to the value of opt_lg_[qc]space_max. */
2622 qspace_max = (1U << opt_lg_qspace_max);
2623 cspace_min = CACHELINE_CEILING(qspace_max);
2624 if (cspace_min == qspace_max)
2625 cspace_min += CACHELINE;
2626 cspace_max = (1U << opt_lg_cspace_max);
2627 sspace_min = SUBPAGE_CEILING(cspace_max);
2628 if (sspace_min == cspace_max)
2629 sspace_min += SUBPAGE;
2630 assert(sspace_min < PAGE_SIZE);
2631 sspace_max = PAGE_SIZE - SUBPAGE;
2632
2633#ifdef JEMALLOC_TINY
2634 assert(LG_QUANTUM >= LG_TINY_MIN);
2635#endif
2636 assert(ntbins <= LG_QUANTUM);
2637 nqbins = qspace_max >> LG_QUANTUM;
2638 ncbins = ((cspace_max - cspace_min) >> LG_CACHELINE) + 1;
2639 nsbins = ((sspace_max - sspace_min) >> LG_SUBPAGE) + 1;
2640 nbins = ntbins + nqbins + ncbins + nsbins;
2641
2642 /*
2643 * The small_size2bin lookup table uses uint8_t to encode each bin
2644 * index, so we cannot support more than 256 small size classes. This
2645 * limit is difficult to exceed (not even possible with 16B quantum and
2646 * 4KiB pages), and such configurations are impractical, but
2647 * nonetheless we need to protect against this case in order to avoid
2648 * undefined behavior.
2649 *
2650 * Further constrain nbins to 255 if prof_promote is true, since all
2651 * small size classes, plus a "not small" size class must be stored in
2652 * 8 bits of arena_chunk_map_t's bits field.
2653 */
2654#ifdef JEMALLOC_PROF
2655 if (opt_prof && prof_promote) {
2656 if (nbins > 255) {
2657 char line_buf[UMAX2S_BUFSIZE];
2658 malloc_write("<jemalloc>: Too many small size classes (");
2659 malloc_write(u2s(nbins, 10, line_buf));
2660 malloc_write(" > max 255)\n");
2661 abort();
2662 }
2663 } else
2664#endif
2665 if (nbins > 256) {
2666 char line_buf[UMAX2S_BUFSIZE];
2667 malloc_write("<jemalloc>: Too many small size classes (");
2668 malloc_write(u2s(nbins, 10, line_buf));
2669 malloc_write(" > max 256)\n");
2670 abort();
2671 }
2672
2673 /*
2674 * Compute the header size such that it is large enough to contain the
2675 * page map. The page map is biased to omit entries for the header
2676 * itself, so some iteration is necessary to compute the map bias.
2677 *
2678 * 1) Compute safe header_size and map_bias values that include enough
2679 * space for an unbiased page map.
2680 * 2) Refine map_bias based on (1) to omit the header pages in the page
2681 * map. The resulting map_bias may be one too small.
2682 * 3) Refine map_bias based on (2). The result will be >= the result
2683 * from (2), and will always be correct.
2684 */
2685 map_bias = 0;
2686 for (i = 0; i < 3; i++) {
2687 header_size = offsetof(arena_chunk_t, map)
2688 + (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
2689 map_bias = (header_size >> PAGE_SHIFT) + ((header_size &
2690 PAGE_MASK) != 0);
2691 }
2692 assert(map_bias > 0);
2693
2694 arena_maxclass = chunksize - (map_bias << PAGE_SHIFT);
2695
2696 if (small_size2bin_init())
2697 return (true);
2698
2699 if (bin_info_init())
2700 return (true);
2701
2702 return (false);
2703}