a78e148b |
1 | #define JEMALLOC_ARENA_C_ |
2 | #include "jemalloc/internal/jemalloc_internal.h" |
3 | |
4 | /******************************************************************************/ |
5 | /* Data. */ |
6 | |
7 | size_t opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT; |
8 | size_t opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT; |
9 | ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; |
10 | uint8_t const *small_size2bin; |
11 | arena_bin_info_t *arena_bin_info; |
12 | |
13 | /* Various bin-related settings. */ |
14 | unsigned nqbins; |
15 | unsigned ncbins; |
16 | unsigned nsbins; |
17 | unsigned nbins; |
18 | size_t qspace_max; |
19 | size_t cspace_min; |
20 | size_t cspace_max; |
21 | size_t sspace_min; |
22 | size_t sspace_max; |
23 | |
24 | size_t lg_mspace; |
25 | size_t mspace_mask; |
26 | |
27 | /* |
28 | * const_small_size2bin is a static constant lookup table that in the common |
29 | * case can be used as-is for small_size2bin. |
30 | */ |
31 | #if (LG_TINY_MIN == 2) |
32 | #define S2B_4(i) i, |
33 | #define S2B_8(i) S2B_4(i) S2B_4(i) |
34 | #elif (LG_TINY_MIN == 3) |
35 | #define S2B_8(i) i, |
36 | #else |
37 | # error "Unsupported LG_TINY_MIN" |
38 | #endif |
39 | #define S2B_16(i) S2B_8(i) S2B_8(i) |
40 | #define S2B_32(i) S2B_16(i) S2B_16(i) |
41 | #define S2B_64(i) S2B_32(i) S2B_32(i) |
42 | #define S2B_128(i) S2B_64(i) S2B_64(i) |
43 | #define S2B_256(i) S2B_128(i) S2B_128(i) |
44 | /* |
45 | * The number of elements in const_small_size2bin is dependent on the |
46 | * definition for SUBPAGE. |
47 | */ |
48 | static JEMALLOC_ATTR(aligned(CACHELINE)) |
49 | const uint8_t const_small_size2bin[] = { |
50 | #if (LG_QUANTUM == 4) |
51 | /* 16-byte quantum **********************/ |
52 | # ifdef JEMALLOC_TINY |
53 | # if (LG_TINY_MIN == 2) |
54 | S2B_4(0) /* 4 */ |
55 | S2B_4(1) /* 8 */ |
56 | S2B_8(2) /* 16 */ |
57 | # define S2B_QMIN 2 |
58 | # elif (LG_TINY_MIN == 3) |
59 | S2B_8(0) /* 8 */ |
60 | S2B_8(1) /* 16 */ |
61 | # define S2B_QMIN 1 |
62 | # else |
63 | # error "Unsupported LG_TINY_MIN" |
64 | # endif |
65 | # else |
66 | S2B_16(0) /* 16 */ |
67 | # define S2B_QMIN 0 |
68 | # endif |
69 | S2B_16(S2B_QMIN + 1) /* 32 */ |
70 | S2B_16(S2B_QMIN + 2) /* 48 */ |
71 | S2B_16(S2B_QMIN + 3) /* 64 */ |
72 | S2B_16(S2B_QMIN + 4) /* 80 */ |
73 | S2B_16(S2B_QMIN + 5) /* 96 */ |
74 | S2B_16(S2B_QMIN + 6) /* 112 */ |
75 | S2B_16(S2B_QMIN + 7) /* 128 */ |
76 | # define S2B_CMIN (S2B_QMIN + 8) |
77 | #else |
78 | /* 8-byte quantum ***********************/ |
79 | # ifdef JEMALLOC_TINY |
80 | # if (LG_TINY_MIN == 2) |
81 | S2B_4(0) /* 4 */ |
82 | S2B_4(1) /* 8 */ |
83 | # define S2B_QMIN 1 |
84 | # else |
85 | # error "Unsupported LG_TINY_MIN" |
86 | # endif |
87 | # else |
88 | S2B_8(0) /* 8 */ |
89 | # define S2B_QMIN 0 |
90 | # endif |
91 | S2B_8(S2B_QMIN + 1) /* 16 */ |
92 | S2B_8(S2B_QMIN + 2) /* 24 */ |
93 | S2B_8(S2B_QMIN + 3) /* 32 */ |
94 | S2B_8(S2B_QMIN + 4) /* 40 */ |
95 | S2B_8(S2B_QMIN + 5) /* 48 */ |
96 | S2B_8(S2B_QMIN + 6) /* 56 */ |
97 | S2B_8(S2B_QMIN + 7) /* 64 */ |
98 | S2B_8(S2B_QMIN + 8) /* 72 */ |
99 | S2B_8(S2B_QMIN + 9) /* 80 */ |
100 | S2B_8(S2B_QMIN + 10) /* 88 */ |
101 | S2B_8(S2B_QMIN + 11) /* 96 */ |
102 | S2B_8(S2B_QMIN + 12) /* 104 */ |
103 | S2B_8(S2B_QMIN + 13) /* 112 */ |
104 | S2B_8(S2B_QMIN + 14) /* 120 */ |
105 | S2B_8(S2B_QMIN + 15) /* 128 */ |
106 | # define S2B_CMIN (S2B_QMIN + 16) |
107 | #endif |
108 | /****************************************/ |
109 | S2B_64(S2B_CMIN + 0) /* 192 */ |
110 | S2B_64(S2B_CMIN + 1) /* 256 */ |
111 | S2B_64(S2B_CMIN + 2) /* 320 */ |
112 | S2B_64(S2B_CMIN + 3) /* 384 */ |
113 | S2B_64(S2B_CMIN + 4) /* 448 */ |
114 | S2B_64(S2B_CMIN + 5) /* 512 */ |
115 | # define S2B_SMIN (S2B_CMIN + 6) |
116 | S2B_256(S2B_SMIN + 0) /* 768 */ |
117 | S2B_256(S2B_SMIN + 1) /* 1024 */ |
118 | S2B_256(S2B_SMIN + 2) /* 1280 */ |
119 | S2B_256(S2B_SMIN + 3) /* 1536 */ |
120 | S2B_256(S2B_SMIN + 4) /* 1792 */ |
121 | S2B_256(S2B_SMIN + 5) /* 2048 */ |
122 | S2B_256(S2B_SMIN + 6) /* 2304 */ |
123 | S2B_256(S2B_SMIN + 7) /* 2560 */ |
124 | S2B_256(S2B_SMIN + 8) /* 2816 */ |
125 | S2B_256(S2B_SMIN + 9) /* 3072 */ |
126 | S2B_256(S2B_SMIN + 10) /* 3328 */ |
127 | S2B_256(S2B_SMIN + 11) /* 3584 */ |
128 | S2B_256(S2B_SMIN + 12) /* 3840 */ |
129 | #if (STATIC_PAGE_SHIFT == 13) |
130 | S2B_256(S2B_SMIN + 13) /* 4096 */ |
131 | S2B_256(S2B_SMIN + 14) /* 4352 */ |
132 | S2B_256(S2B_SMIN + 15) /* 4608 */ |
133 | S2B_256(S2B_SMIN + 16) /* 4864 */ |
134 | S2B_256(S2B_SMIN + 17) /* 5120 */ |
135 | S2B_256(S2B_SMIN + 18) /* 5376 */ |
136 | S2B_256(S2B_SMIN + 19) /* 5632 */ |
137 | S2B_256(S2B_SMIN + 20) /* 5888 */ |
138 | S2B_256(S2B_SMIN + 21) /* 6144 */ |
139 | S2B_256(S2B_SMIN + 22) /* 6400 */ |
140 | S2B_256(S2B_SMIN + 23) /* 6656 */ |
141 | S2B_256(S2B_SMIN + 24) /* 6912 */ |
142 | S2B_256(S2B_SMIN + 25) /* 7168 */ |
143 | S2B_256(S2B_SMIN + 26) /* 7424 */ |
144 | S2B_256(S2B_SMIN + 27) /* 7680 */ |
145 | S2B_256(S2B_SMIN + 28) /* 7936 */ |
146 | #endif |
147 | }; |
148 | #undef S2B_1 |
149 | #undef S2B_2 |
150 | #undef S2B_4 |
151 | #undef S2B_8 |
152 | #undef S2B_16 |
153 | #undef S2B_32 |
154 | #undef S2B_64 |
155 | #undef S2B_128 |
156 | #undef S2B_256 |
157 | #undef S2B_QMIN |
158 | #undef S2B_CMIN |
159 | #undef S2B_SMIN |
160 | |
161 | /******************************************************************************/ |
162 | /* Function prototypes for non-inline static functions. */ |
163 | |
164 | static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size, |
165 | bool large, bool zero); |
166 | static arena_chunk_t *arena_chunk_alloc(arena_t *arena); |
167 | static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk); |
168 | static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large, |
169 | bool zero); |
170 | static void arena_purge(arena_t *arena, bool all); |
171 | static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty); |
172 | static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, |
173 | arena_run_t *run, size_t oldsize, size_t newsize); |
174 | static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, |
175 | arena_run_t *run, size_t oldsize, size_t newsize, bool dirty); |
176 | static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin); |
177 | static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin); |
178 | static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, |
179 | arena_bin_t *bin); |
180 | static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, |
181 | arena_run_t *run, arena_bin_t *bin); |
182 | static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, |
183 | arena_run_t *run, arena_bin_t *bin); |
184 | static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, |
185 | void *ptr, size_t oldsize, size_t size); |
186 | static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, |
187 | void *ptr, size_t oldsize, size_t size, size_t extra, bool zero); |
188 | static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size, |
189 | size_t extra, bool zero); |
190 | static bool small_size2bin_init(void); |
191 | #ifdef JEMALLOC_DEBUG |
192 | static void small_size2bin_validate(void); |
193 | #endif |
194 | static bool small_size2bin_init_hard(void); |
195 | static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info, |
196 | size_t min_run_size); |
197 | static bool bin_info_init(void); |
198 | |
199 | /******************************************************************************/ |
200 | |
201 | static inline int |
202 | arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) |
203 | { |
204 | uintptr_t a_mapelm = (uintptr_t)a; |
205 | uintptr_t b_mapelm = (uintptr_t)b; |
206 | |
207 | assert(a != NULL); |
208 | assert(b != NULL); |
209 | |
210 | return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm)); |
211 | } |
212 | |
213 | /* Generate red-black tree functions. */ |
214 | rb_gen(static JEMALLOC_ATTR(unused), arena_run_tree_, arena_run_tree_t, |
215 | arena_chunk_map_t, u.rb_link, arena_run_comp) |
216 | |
217 | static inline int |
218 | arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) |
219 | { |
220 | int ret; |
221 | size_t a_size = a->bits & ~PAGE_MASK; |
222 | size_t b_size = b->bits & ~PAGE_MASK; |
223 | |
224 | assert((a->bits & CHUNK_MAP_KEY) == CHUNK_MAP_KEY || (a->bits & |
225 | CHUNK_MAP_DIRTY) == (b->bits & CHUNK_MAP_DIRTY)); |
226 | |
227 | ret = (a_size > b_size) - (a_size < b_size); |
228 | if (ret == 0) { |
229 | uintptr_t a_mapelm, b_mapelm; |
230 | |
231 | if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY) |
232 | a_mapelm = (uintptr_t)a; |
233 | else { |
234 | /* |
235 | * Treat keys as though they are lower than anything |
236 | * else. |
237 | */ |
238 | a_mapelm = 0; |
239 | } |
240 | b_mapelm = (uintptr_t)b; |
241 | |
242 | ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm); |
243 | } |
244 | |
245 | return (ret); |
246 | } |
247 | |
248 | /* Generate red-black tree functions. */ |
249 | rb_gen(static JEMALLOC_ATTR(unused), arena_avail_tree_, arena_avail_tree_t, |
250 | arena_chunk_map_t, u.rb_link, arena_avail_comp) |
251 | |
252 | static inline void * |
253 | arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) |
254 | { |
255 | void *ret; |
256 | unsigned regind; |
257 | bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + |
258 | (uintptr_t)bin_info->bitmap_offset); |
259 | |
260 | dassert(run->magic == ARENA_RUN_MAGIC); |
261 | assert(run->nfree > 0); |
262 | assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false); |
263 | |
264 | regind = bitmap_sfu(bitmap, &bin_info->bitmap_info); |
265 | ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset + |
266 | (uintptr_t)(bin_info->reg_size * regind)); |
267 | run->nfree--; |
268 | if (regind == run->nextind) |
269 | run->nextind++; |
270 | assert(regind < run->nextind); |
271 | return (ret); |
272 | } |
273 | |
274 | static inline void |
275 | arena_run_reg_dalloc(arena_run_t *run, void *ptr) |
276 | { |
277 | arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); |
278 | size_t binind = arena_bin_index(chunk->arena, run->bin); |
279 | arena_bin_info_t *bin_info = &arena_bin_info[binind]; |
280 | unsigned regind = arena_run_regind(run, bin_info, ptr); |
281 | bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + |
282 | (uintptr_t)bin_info->bitmap_offset); |
283 | |
284 | assert(run->nfree < bin_info->nregs); |
285 | /* Freeing an interior pointer can cause assertion failure. */ |
286 | assert(((uintptr_t)ptr - ((uintptr_t)run + |
287 | (uintptr_t)bin_info->reg0_offset)) % (uintptr_t)bin_info->reg_size |
288 | == 0); |
289 | assert((uintptr_t)ptr >= (uintptr_t)run + |
290 | (uintptr_t)bin_info->reg0_offset); |
291 | /* Freeing an unallocated pointer can cause assertion failure. */ |
292 | assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind)); |
293 | |
294 | bitmap_unset(bitmap, &bin_info->bitmap_info, regind); |
295 | run->nfree++; |
296 | } |
297 | |
298 | #ifdef JEMALLOC_DEBUG |
299 | static inline void |
300 | arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) |
301 | { |
302 | size_t i; |
303 | size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << PAGE_SHIFT)); |
304 | |
305 | for (i = 0; i < PAGE_SIZE / sizeof(size_t); i++) |
306 | assert(p[i] == 0); |
307 | } |
308 | #endif |
309 | |
310 | static void |
311 | arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large, |
312 | bool zero) |
313 | { |
314 | arena_chunk_t *chunk; |
315 | size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i; |
316 | size_t flag_dirty; |
317 | arena_avail_tree_t *runs_avail; |
318 | #ifdef JEMALLOC_STATS |
319 | size_t cactive_diff; |
320 | #endif |
321 | |
322 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); |
323 | old_ndirty = chunk->ndirty; |
324 | run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) |
325 | >> PAGE_SHIFT); |
326 | flag_dirty = chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY; |
327 | runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty : |
328 | &arena->runs_avail_clean; |
329 | total_pages = (chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) >> |
330 | PAGE_SHIFT; |
331 | assert((chunk->map[run_ind+total_pages-1-map_bias].bits & |
332 | CHUNK_MAP_DIRTY) == flag_dirty); |
333 | need_pages = (size >> PAGE_SHIFT); |
334 | assert(need_pages > 0); |
335 | assert(need_pages <= total_pages); |
336 | rem_pages = total_pages - need_pages; |
337 | |
338 | arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]); |
339 | #ifdef JEMALLOC_STATS |
340 | /* Update stats_cactive if nactive is crossing a chunk multiple. */ |
341 | cactive_diff = CHUNK_CEILING((arena->nactive + need_pages) << |
342 | PAGE_SHIFT) - CHUNK_CEILING(arena->nactive << PAGE_SHIFT); |
343 | if (cactive_diff != 0) |
344 | stats_cactive_add(cactive_diff); |
345 | #endif |
346 | arena->nactive += need_pages; |
347 | |
348 | /* Keep track of trailing unused pages for later use. */ |
349 | if (rem_pages > 0) { |
350 | if (flag_dirty != 0) { |
351 | chunk->map[run_ind+need_pages-map_bias].bits = |
352 | (rem_pages << PAGE_SHIFT) | CHUNK_MAP_DIRTY; |
353 | chunk->map[run_ind+total_pages-1-map_bias].bits = |
354 | (rem_pages << PAGE_SHIFT) | CHUNK_MAP_DIRTY; |
355 | } else { |
356 | chunk->map[run_ind+need_pages-map_bias].bits = |
357 | (rem_pages << PAGE_SHIFT) | |
358 | (chunk->map[run_ind+need_pages-map_bias].bits & |
359 | CHUNK_MAP_UNZEROED); |
360 | chunk->map[run_ind+total_pages-1-map_bias].bits = |
361 | (rem_pages << PAGE_SHIFT) | |
362 | (chunk->map[run_ind+total_pages-1-map_bias].bits & |
363 | CHUNK_MAP_UNZEROED); |
364 | } |
365 | arena_avail_tree_insert(runs_avail, |
366 | &chunk->map[run_ind+need_pages-map_bias]); |
367 | } |
368 | |
369 | /* Update dirty page accounting. */ |
370 | if (flag_dirty != 0) { |
371 | chunk->ndirty -= need_pages; |
372 | arena->ndirty -= need_pages; |
373 | } |
374 | |
375 | /* |
376 | * Update the page map separately for large vs. small runs, since it is |
377 | * possible to avoid iteration for large mallocs. |
378 | */ |
379 | if (large) { |
380 | if (zero) { |
381 | if (flag_dirty == 0) { |
382 | /* |
383 | * The run is clean, so some pages may be |
384 | * zeroed (i.e. never before touched). |
385 | */ |
386 | for (i = 0; i < need_pages; i++) { |
387 | if ((chunk->map[run_ind+i-map_bias].bits |
388 | & CHUNK_MAP_UNZEROED) != 0) { |
389 | memset((void *)((uintptr_t) |
390 | chunk + ((run_ind+i) << |
391 | PAGE_SHIFT)), 0, |
392 | PAGE_SIZE); |
393 | } |
394 | #ifdef JEMALLOC_DEBUG |
395 | else { |
396 | arena_chunk_validate_zeroed( |
397 | chunk, run_ind+i); |
398 | } |
399 | #endif |
400 | } |
401 | } else { |
402 | /* |
403 | * The run is dirty, so all pages must be |
404 | * zeroed. |
405 | */ |
406 | memset((void *)((uintptr_t)chunk + (run_ind << |
407 | PAGE_SHIFT)), 0, (need_pages << |
408 | PAGE_SHIFT)); |
409 | } |
410 | } |
411 | |
412 | /* |
413 | * Set the last element first, in case the run only contains one |
414 | * page (i.e. both statements set the same element). |
415 | */ |
416 | chunk->map[run_ind+need_pages-1-map_bias].bits = |
417 | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED | flag_dirty; |
418 | chunk->map[run_ind-map_bias].bits = size | flag_dirty | |
419 | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; |
420 | } else { |
421 | assert(zero == false); |
422 | /* |
423 | * Propagate the dirty and unzeroed flags to the allocated |
424 | * small run, so that arena_dalloc_bin_run() has the ability to |
425 | * conditionally trim clean pages. |
426 | */ |
427 | chunk->map[run_ind-map_bias].bits = |
428 | (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) | |
429 | CHUNK_MAP_ALLOCATED | flag_dirty; |
430 | #ifdef JEMALLOC_DEBUG |
431 | /* |
432 | * The first page will always be dirtied during small run |
433 | * initialization, so a validation failure here would not |
434 | * actually cause an observable failure. |
435 | */ |
436 | if (flag_dirty == 0 && |
437 | (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) |
438 | == 0) |
439 | arena_chunk_validate_zeroed(chunk, run_ind); |
440 | #endif |
441 | for (i = 1; i < need_pages - 1; i++) { |
442 | chunk->map[run_ind+i-map_bias].bits = (i << PAGE_SHIFT) |
443 | | (chunk->map[run_ind+i-map_bias].bits & |
444 | CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED; |
445 | #ifdef JEMALLOC_DEBUG |
446 | if (flag_dirty == 0 && |
447 | (chunk->map[run_ind+i-map_bias].bits & |
448 | CHUNK_MAP_UNZEROED) == 0) |
449 | arena_chunk_validate_zeroed(chunk, run_ind+i); |
450 | #endif |
451 | } |
452 | chunk->map[run_ind+need_pages-1-map_bias].bits = ((need_pages |
453 | - 1) << PAGE_SHIFT) | |
454 | (chunk->map[run_ind+need_pages-1-map_bias].bits & |
455 | CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED | flag_dirty; |
456 | #ifdef JEMALLOC_DEBUG |
457 | if (flag_dirty == 0 && |
458 | (chunk->map[run_ind+need_pages-1-map_bias].bits & |
459 | CHUNK_MAP_UNZEROED) == 0) { |
460 | arena_chunk_validate_zeroed(chunk, |
461 | run_ind+need_pages-1); |
462 | } |
463 | #endif |
464 | } |
465 | } |
466 | |
467 | static arena_chunk_t * |
468 | arena_chunk_alloc(arena_t *arena) |
469 | { |
470 | arena_chunk_t *chunk; |
471 | size_t i; |
472 | |
473 | if (arena->spare != NULL) { |
474 | arena_avail_tree_t *runs_avail; |
475 | |
476 | chunk = arena->spare; |
477 | arena->spare = NULL; |
478 | |
479 | /* Insert the run into the appropriate runs_avail_* tree. */ |
480 | if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0) |
481 | runs_avail = &arena->runs_avail_clean; |
482 | else |
483 | runs_avail = &arena->runs_avail_dirty; |
484 | assert((chunk->map[0].bits & ~PAGE_MASK) == arena_maxclass); |
485 | assert((chunk->map[chunk_npages-1-map_bias].bits & ~PAGE_MASK) |
486 | == arena_maxclass); |
487 | assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) == |
488 | (chunk->map[chunk_npages-1-map_bias].bits & |
489 | CHUNK_MAP_DIRTY)); |
490 | arena_avail_tree_insert(runs_avail, &chunk->map[0]); |
491 | } else { |
492 | bool zero; |
493 | size_t unzeroed; |
494 | |
495 | zero = false; |
496 | malloc_mutex_unlock(&arena->lock); |
497 | chunk = (arena_chunk_t *)chunk_alloc(chunksize, false, &zero); |
498 | malloc_mutex_lock(&arena->lock); |
499 | if (chunk == NULL) |
500 | return (NULL); |
501 | #ifdef JEMALLOC_STATS |
502 | arena->stats.mapped += chunksize; |
503 | #endif |
504 | |
505 | chunk->arena = arena; |
506 | ql_elm_new(chunk, link_dirty); |
507 | chunk->dirtied = false; |
508 | |
509 | /* |
510 | * Claim that no pages are in use, since the header is merely |
511 | * overhead. |
512 | */ |
513 | chunk->ndirty = 0; |
514 | |
515 | /* |
516 | * Initialize the map to contain one maximal free untouched run. |
517 | * Mark the pages as zeroed iff chunk_alloc() returned a zeroed |
518 | * chunk. |
519 | */ |
520 | unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; |
521 | chunk->map[0].bits = arena_maxclass | unzeroed; |
522 | /* |
523 | * There is no need to initialize the internal page map entries |
524 | * unless the chunk is not zeroed. |
525 | */ |
526 | if (zero == false) { |
527 | for (i = map_bias+1; i < chunk_npages-1; i++) |
528 | chunk->map[i-map_bias].bits = unzeroed; |
529 | } |
530 | #ifdef JEMALLOC_DEBUG |
531 | else { |
532 | for (i = map_bias+1; i < chunk_npages-1; i++) |
533 | assert(chunk->map[i-map_bias].bits == unzeroed); |
534 | } |
535 | #endif |
536 | chunk->map[chunk_npages-1-map_bias].bits = arena_maxclass | |
537 | unzeroed; |
538 | |
539 | /* Insert the run into the runs_avail_clean tree. */ |
540 | arena_avail_tree_insert(&arena->runs_avail_clean, |
541 | &chunk->map[0]); |
542 | } |
543 | |
544 | return (chunk); |
545 | } |
546 | |
547 | static void |
548 | arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk) |
549 | { |
550 | arena_avail_tree_t *runs_avail; |
551 | |
552 | /* |
553 | * Remove run from the appropriate runs_avail_* tree, so that the arena |
554 | * does not use it. |
555 | */ |
556 | if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0) |
557 | runs_avail = &arena->runs_avail_clean; |
558 | else |
559 | runs_avail = &arena->runs_avail_dirty; |
560 | arena_avail_tree_remove(runs_avail, &chunk->map[0]); |
561 | |
562 | if (arena->spare != NULL) { |
563 | arena_chunk_t *spare = arena->spare; |
564 | |
565 | arena->spare = chunk; |
566 | if (spare->dirtied) { |
567 | ql_remove(&chunk->arena->chunks_dirty, spare, |
568 | link_dirty); |
569 | arena->ndirty -= spare->ndirty; |
570 | } |
571 | malloc_mutex_unlock(&arena->lock); |
1d03c1c9 |
572 | chunk_dealloc((void *)spare, chunksize, true); |
a78e148b |
573 | malloc_mutex_lock(&arena->lock); |
574 | #ifdef JEMALLOC_STATS |
575 | arena->stats.mapped -= chunksize; |
576 | #endif |
577 | } else |
578 | arena->spare = chunk; |
579 | } |
580 | |
581 | static arena_run_t * |
582 | arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero) |
583 | { |
584 | arena_chunk_t *chunk; |
585 | arena_run_t *run; |
586 | arena_chunk_map_t *mapelm, key; |
587 | |
588 | assert(size <= arena_maxclass); |
589 | assert((size & PAGE_MASK) == 0); |
590 | |
591 | /* Search the arena's chunks for the lowest best fit. */ |
592 | key.bits = size | CHUNK_MAP_KEY; |
593 | mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key); |
594 | if (mapelm != NULL) { |
595 | arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); |
596 | size_t pageind = (((uintptr_t)mapelm - |
597 | (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) |
598 | + map_bias; |
599 | |
600 | run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << |
601 | PAGE_SHIFT)); |
602 | arena_run_split(arena, run, size, large, zero); |
603 | return (run); |
604 | } |
605 | mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key); |
606 | if (mapelm != NULL) { |
607 | arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); |
608 | size_t pageind = (((uintptr_t)mapelm - |
609 | (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) |
610 | + map_bias; |
611 | |
612 | run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << |
613 | PAGE_SHIFT)); |
614 | arena_run_split(arena, run, size, large, zero); |
615 | return (run); |
616 | } |
617 | |
618 | /* |
619 | * No usable runs. Create a new chunk from which to allocate the run. |
620 | */ |
621 | chunk = arena_chunk_alloc(arena); |
622 | if (chunk != NULL) { |
623 | run = (arena_run_t *)((uintptr_t)chunk + (map_bias << |
624 | PAGE_SHIFT)); |
625 | arena_run_split(arena, run, size, large, zero); |
626 | return (run); |
627 | } |
628 | |
629 | /* |
630 | * arena_chunk_alloc() failed, but another thread may have made |
631 | * sufficient memory available while this one dropped arena->lock in |
632 | * arena_chunk_alloc(), so search one more time. |
633 | */ |
634 | mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key); |
635 | if (mapelm != NULL) { |
636 | arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); |
637 | size_t pageind = (((uintptr_t)mapelm - |
638 | (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) |
639 | + map_bias; |
640 | |
641 | run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << |
642 | PAGE_SHIFT)); |
643 | arena_run_split(arena, run, size, large, zero); |
644 | return (run); |
645 | } |
646 | mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key); |
647 | if (mapelm != NULL) { |
648 | arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); |
649 | size_t pageind = (((uintptr_t)mapelm - |
650 | (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) |
651 | + map_bias; |
652 | |
653 | run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << |
654 | PAGE_SHIFT)); |
655 | arena_run_split(arena, run, size, large, zero); |
656 | return (run); |
657 | } |
658 | |
659 | return (NULL); |
660 | } |
661 | |
662 | static inline void |
663 | arena_maybe_purge(arena_t *arena) |
664 | { |
665 | |
666 | /* Enforce opt_lg_dirty_mult. */ |
667 | if (opt_lg_dirty_mult >= 0 && arena->ndirty > arena->npurgatory && |
668 | (arena->ndirty - arena->npurgatory) > chunk_npages && |
669 | (arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty - |
670 | arena->npurgatory)) |
671 | arena_purge(arena, false); |
672 | } |
673 | |
674 | static inline void |
675 | arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk) |
676 | { |
677 | ql_head(arena_chunk_map_t) mapelms; |
678 | arena_chunk_map_t *mapelm; |
679 | size_t pageind, flag_unzeroed; |
680 | #ifdef JEMALLOC_DEBUG |
681 | size_t ndirty; |
682 | #endif |
683 | #ifdef JEMALLOC_STATS |
684 | size_t nmadvise; |
685 | #endif |
686 | |
687 | ql_new(&mapelms); |
688 | |
689 | flag_unzeroed = |
690 | #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED |
691 | /* |
692 | * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous |
693 | * mappings, but not for file-backed mappings. |
694 | */ |
695 | # ifdef JEMALLOC_SWAP |
696 | swap_enabled ? CHUNK_MAP_UNZEROED : |
697 | # endif |
698 | 0; |
699 | #else |
700 | CHUNK_MAP_UNZEROED; |
701 | #endif |
702 | |
703 | /* |
704 | * If chunk is the spare, temporarily re-allocate it, 1) so that its |
705 | * run is reinserted into runs_avail_dirty, and 2) so that it cannot be |
706 | * completely discarded by another thread while arena->lock is dropped |
707 | * by this thread. Note that the arena_run_dalloc() call will |
708 | * implicitly deallocate the chunk, so no explicit action is required |
709 | * in this function to deallocate the chunk. |
710 | * |
711 | * Note that once a chunk contains dirty pages, it cannot again contain |
712 | * a single run unless 1) it is a dirty run, or 2) this function purges |
713 | * dirty pages and causes the transition to a single clean run. Thus |
714 | * (chunk == arena->spare) is possible, but it is not possible for |
715 | * this function to be called on the spare unless it contains a dirty |
716 | * run. |
717 | */ |
718 | if (chunk == arena->spare) { |
719 | assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) != 0); |
720 | arena_chunk_alloc(arena); |
721 | } |
722 | |
723 | /* Temporarily allocate all free dirty runs within chunk. */ |
724 | for (pageind = map_bias; pageind < chunk_npages;) { |
725 | mapelm = &chunk->map[pageind-map_bias]; |
726 | if ((mapelm->bits & CHUNK_MAP_ALLOCATED) == 0) { |
727 | size_t npages; |
728 | |
729 | npages = mapelm->bits >> PAGE_SHIFT; |
730 | assert(pageind + npages <= chunk_npages); |
731 | if (mapelm->bits & CHUNK_MAP_DIRTY) { |
732 | size_t i; |
733 | #ifdef JEMALLOC_STATS |
734 | size_t cactive_diff; |
735 | #endif |
736 | |
737 | arena_avail_tree_remove( |
738 | &arena->runs_avail_dirty, mapelm); |
739 | |
740 | mapelm->bits = (npages << PAGE_SHIFT) | |
741 | flag_unzeroed | CHUNK_MAP_LARGE | |
742 | CHUNK_MAP_ALLOCATED; |
743 | /* |
744 | * Update internal elements in the page map, so |
745 | * that CHUNK_MAP_UNZEROED is properly set. |
746 | */ |
747 | for (i = 1; i < npages - 1; i++) { |
748 | chunk->map[pageind+i-map_bias].bits = |
749 | flag_unzeroed; |
750 | } |
751 | if (npages > 1) { |
752 | chunk->map[ |
753 | pageind+npages-1-map_bias].bits = |
754 | flag_unzeroed | CHUNK_MAP_LARGE | |
755 | CHUNK_MAP_ALLOCATED; |
756 | } |
757 | |
758 | #ifdef JEMALLOC_STATS |
759 | /* |
760 | * Update stats_cactive if nactive is crossing a |
761 | * chunk multiple. |
762 | */ |
763 | cactive_diff = CHUNK_CEILING((arena->nactive + |
764 | npages) << PAGE_SHIFT) - |
765 | CHUNK_CEILING(arena->nactive << PAGE_SHIFT); |
766 | if (cactive_diff != 0) |
767 | stats_cactive_add(cactive_diff); |
768 | #endif |
769 | arena->nactive += npages; |
770 | /* Append to list for later processing. */ |
771 | ql_elm_new(mapelm, u.ql_link); |
772 | ql_tail_insert(&mapelms, mapelm, u.ql_link); |
773 | } |
774 | |
775 | pageind += npages; |
776 | } else { |
777 | /* Skip allocated run. */ |
778 | if (mapelm->bits & CHUNK_MAP_LARGE) |
779 | pageind += mapelm->bits >> PAGE_SHIFT; |
780 | else { |
781 | arena_run_t *run = (arena_run_t *)((uintptr_t) |
782 | chunk + (uintptr_t)(pageind << PAGE_SHIFT)); |
783 | |
784 | assert((mapelm->bits >> PAGE_SHIFT) == 0); |
785 | dassert(run->magic == ARENA_RUN_MAGIC); |
786 | size_t binind = arena_bin_index(arena, |
787 | run->bin); |
788 | arena_bin_info_t *bin_info = |
789 | &arena_bin_info[binind]; |
790 | pageind += bin_info->run_size >> PAGE_SHIFT; |
791 | } |
792 | } |
793 | } |
794 | assert(pageind == chunk_npages); |
795 | |
796 | #ifdef JEMALLOC_DEBUG |
797 | ndirty = chunk->ndirty; |
798 | #endif |
799 | #ifdef JEMALLOC_STATS |
800 | arena->stats.purged += chunk->ndirty; |
801 | #endif |
802 | arena->ndirty -= chunk->ndirty; |
803 | chunk->ndirty = 0; |
804 | ql_remove(&arena->chunks_dirty, chunk, link_dirty); |
805 | chunk->dirtied = false; |
806 | |
807 | malloc_mutex_unlock(&arena->lock); |
808 | #ifdef JEMALLOC_STATS |
809 | nmadvise = 0; |
810 | #endif |
811 | ql_foreach(mapelm, &mapelms, u.ql_link) { |
812 | size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / |
813 | sizeof(arena_chunk_map_t)) + map_bias; |
814 | size_t npages = mapelm->bits >> PAGE_SHIFT; |
815 | |
816 | assert(pageind + npages <= chunk_npages); |
817 | #ifdef JEMALLOC_DEBUG |
818 | assert(ndirty >= npages); |
819 | ndirty -= npages; |
820 | #endif |
821 | |
822 | #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED |
823 | madvise((void *)((uintptr_t)chunk + (pageind << PAGE_SHIFT)), |
824 | (npages << PAGE_SHIFT), MADV_DONTNEED); |
825 | #elif defined(JEMALLOC_PURGE_MADVISE_FREE) |
826 | madvise((void *)((uintptr_t)chunk + (pageind << PAGE_SHIFT)), |
827 | (npages << PAGE_SHIFT), MADV_FREE); |
828 | #else |
829 | # error "No method defined for purging unused dirty pages." |
830 | #endif |
831 | |
832 | #ifdef JEMALLOC_STATS |
833 | nmadvise++; |
834 | #endif |
835 | } |
836 | #ifdef JEMALLOC_DEBUG |
837 | assert(ndirty == 0); |
838 | #endif |
839 | malloc_mutex_lock(&arena->lock); |
840 | #ifdef JEMALLOC_STATS |
841 | arena->stats.nmadvise += nmadvise; |
842 | #endif |
843 | |
844 | /* Deallocate runs. */ |
845 | for (mapelm = ql_first(&mapelms); mapelm != NULL; |
846 | mapelm = ql_first(&mapelms)) { |
847 | size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / |
848 | sizeof(arena_chunk_map_t)) + map_bias; |
849 | arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + |
850 | (uintptr_t)(pageind << PAGE_SHIFT)); |
851 | |
852 | ql_remove(&mapelms, mapelm, u.ql_link); |
853 | arena_run_dalloc(arena, run, false); |
854 | } |
855 | } |
856 | |
857 | static void |
858 | arena_purge(arena_t *arena, bool all) |
859 | { |
860 | arena_chunk_t *chunk; |
861 | size_t npurgatory; |
862 | #ifdef JEMALLOC_DEBUG |
863 | size_t ndirty = 0; |
864 | |
865 | ql_foreach(chunk, &arena->chunks_dirty, link_dirty) { |
866 | assert(chunk->dirtied); |
867 | ndirty += chunk->ndirty; |
868 | } |
869 | assert(ndirty == arena->ndirty); |
870 | #endif |
871 | assert(arena->ndirty > arena->npurgatory || all); |
1d03c1c9 |
872 | assert(arena->ndirty - arena->npurgatory > chunk_npages || all); |
a78e148b |
873 | assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty - |
1d03c1c9 |
874 | arena->npurgatory) || all); |
a78e148b |
875 | |
876 | #ifdef JEMALLOC_STATS |
877 | arena->stats.npurge++; |
878 | #endif |
879 | |
880 | /* |
881 | * Compute the minimum number of pages that this thread should try to |
882 | * purge, and add the result to arena->npurgatory. This will keep |
883 | * multiple threads from racing to reduce ndirty below the threshold. |
884 | */ |
885 | npurgatory = arena->ndirty - arena->npurgatory; |
886 | if (all == false) { |
887 | assert(npurgatory >= arena->nactive >> opt_lg_dirty_mult); |
888 | npurgatory -= arena->nactive >> opt_lg_dirty_mult; |
889 | } |
890 | arena->npurgatory += npurgatory; |
891 | |
892 | while (npurgatory > 0) { |
893 | /* Get next chunk with dirty pages. */ |
894 | chunk = ql_first(&arena->chunks_dirty); |
895 | if (chunk == NULL) { |
896 | /* |
897 | * This thread was unable to purge as many pages as |
898 | * originally intended, due to races with other threads |
899 | * that either did some of the purging work, or re-used |
900 | * dirty pages. |
901 | */ |
902 | arena->npurgatory -= npurgatory; |
903 | return; |
904 | } |
905 | while (chunk->ndirty == 0) { |
906 | ql_remove(&arena->chunks_dirty, chunk, link_dirty); |
907 | chunk->dirtied = false; |
908 | chunk = ql_first(&arena->chunks_dirty); |
909 | if (chunk == NULL) { |
910 | /* Same logic as for above. */ |
911 | arena->npurgatory -= npurgatory; |
912 | return; |
913 | } |
914 | } |
915 | |
916 | if (chunk->ndirty > npurgatory) { |
917 | /* |
918 | * This thread will, at a minimum, purge all the dirty |
919 | * pages in chunk, so set npurgatory to reflect this |
920 | * thread's commitment to purge the pages. This tends |
921 | * to reduce the chances of the following scenario: |
922 | * |
923 | * 1) This thread sets arena->npurgatory such that |
924 | * (arena->ndirty - arena->npurgatory) is at the |
925 | * threshold. |
926 | * 2) This thread drops arena->lock. |
927 | * 3) Another thread causes one or more pages to be |
928 | * dirtied, and immediately determines that it must |
929 | * purge dirty pages. |
930 | * |
931 | * If this scenario *does* play out, that's okay, |
932 | * because all of the purging work being done really |
933 | * needs to happen. |
934 | */ |
935 | arena->npurgatory += chunk->ndirty - npurgatory; |
936 | npurgatory = chunk->ndirty; |
937 | } |
938 | |
939 | arena->npurgatory -= chunk->ndirty; |
940 | npurgatory -= chunk->ndirty; |
941 | arena_chunk_purge(arena, chunk); |
942 | } |
943 | } |
944 | |
945 | void |
946 | arena_purge_all(arena_t *arena) |
947 | { |
948 | |
949 | malloc_mutex_lock(&arena->lock); |
950 | arena_purge(arena, true); |
951 | malloc_mutex_unlock(&arena->lock); |
952 | } |
953 | |
954 | static void |
955 | arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty) |
956 | { |
957 | arena_chunk_t *chunk; |
958 | size_t size, run_ind, run_pages, flag_dirty; |
959 | arena_avail_tree_t *runs_avail; |
960 | #ifdef JEMALLOC_STATS |
961 | size_t cactive_diff; |
962 | #endif |
963 | |
964 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); |
965 | run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) |
966 | >> PAGE_SHIFT); |
967 | assert(run_ind >= map_bias); |
968 | assert(run_ind < chunk_npages); |
969 | if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_LARGE) != 0) { |
970 | size = chunk->map[run_ind-map_bias].bits & ~PAGE_MASK; |
971 | assert(size == PAGE_SIZE || |
972 | (chunk->map[run_ind+(size>>PAGE_SHIFT)-1-map_bias].bits & |
973 | ~PAGE_MASK) == 0); |
974 | assert((chunk->map[run_ind+(size>>PAGE_SHIFT)-1-map_bias].bits & |
975 | CHUNK_MAP_LARGE) != 0); |
976 | assert((chunk->map[run_ind+(size>>PAGE_SHIFT)-1-map_bias].bits & |
977 | CHUNK_MAP_ALLOCATED) != 0); |
978 | } else { |
979 | size_t binind = arena_bin_index(arena, run->bin); |
980 | arena_bin_info_t *bin_info = &arena_bin_info[binind]; |
981 | size = bin_info->run_size; |
982 | } |
983 | run_pages = (size >> PAGE_SHIFT); |
984 | #ifdef JEMALLOC_STATS |
985 | /* Update stats_cactive if nactive is crossing a chunk multiple. */ |
986 | cactive_diff = CHUNK_CEILING(arena->nactive << PAGE_SHIFT) - |
987 | CHUNK_CEILING((arena->nactive - run_pages) << PAGE_SHIFT); |
988 | if (cactive_diff != 0) |
989 | stats_cactive_sub(cactive_diff); |
990 | #endif |
991 | arena->nactive -= run_pages; |
992 | |
993 | /* |
994 | * The run is dirty if the caller claims to have dirtied it, as well as |
995 | * if it was already dirty before being allocated. |
996 | */ |
997 | if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) != 0) |
998 | dirty = true; |
999 | flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; |
1000 | runs_avail = dirty ? &arena->runs_avail_dirty : |
1001 | &arena->runs_avail_clean; |
1002 | |
1003 | /* Mark pages as unallocated in the chunk map. */ |
1004 | if (dirty) { |
1005 | chunk->map[run_ind-map_bias].bits = size | CHUNK_MAP_DIRTY; |
1006 | chunk->map[run_ind+run_pages-1-map_bias].bits = size | |
1007 | CHUNK_MAP_DIRTY; |
1008 | |
1009 | chunk->ndirty += run_pages; |
1010 | arena->ndirty += run_pages; |
1011 | } else { |
1012 | chunk->map[run_ind-map_bias].bits = size | |
1013 | (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED); |
1014 | chunk->map[run_ind+run_pages-1-map_bias].bits = size | |
1015 | (chunk->map[run_ind+run_pages-1-map_bias].bits & |
1016 | CHUNK_MAP_UNZEROED); |
1017 | } |
1018 | |
1019 | /* Try to coalesce forward. */ |
1020 | if (run_ind + run_pages < chunk_npages && |
1021 | (chunk->map[run_ind+run_pages-map_bias].bits & CHUNK_MAP_ALLOCATED) |
1022 | == 0 && (chunk->map[run_ind+run_pages-map_bias].bits & |
1023 | CHUNK_MAP_DIRTY) == flag_dirty) { |
1024 | size_t nrun_size = chunk->map[run_ind+run_pages-map_bias].bits & |
1025 | ~PAGE_MASK; |
1026 | size_t nrun_pages = nrun_size >> PAGE_SHIFT; |
1027 | |
1028 | /* |
1029 | * Remove successor from runs_avail; the coalesced run is |
1030 | * inserted later. |
1031 | */ |
1032 | assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits |
1033 | & ~PAGE_MASK) == nrun_size); |
1034 | assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits |
1035 | & CHUNK_MAP_ALLOCATED) == 0); |
1036 | assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits |
1037 | & CHUNK_MAP_DIRTY) == flag_dirty); |
1038 | arena_avail_tree_remove(runs_avail, |
1039 | &chunk->map[run_ind+run_pages-map_bias]); |
1040 | |
1041 | size += nrun_size; |
1042 | run_pages += nrun_pages; |
1043 | |
1044 | chunk->map[run_ind-map_bias].bits = size | |
1045 | (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK); |
1046 | chunk->map[run_ind+run_pages-1-map_bias].bits = size | |
1047 | (chunk->map[run_ind+run_pages-1-map_bias].bits & |
1048 | CHUNK_MAP_FLAGS_MASK); |
1049 | } |
1050 | |
1051 | /* Try to coalesce backward. */ |
1052 | if (run_ind > map_bias && (chunk->map[run_ind-1-map_bias].bits & |
1053 | CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[run_ind-1-map_bias].bits & |
1054 | CHUNK_MAP_DIRTY) == flag_dirty) { |
1055 | size_t prun_size = chunk->map[run_ind-1-map_bias].bits & |
1056 | ~PAGE_MASK; |
1057 | size_t prun_pages = prun_size >> PAGE_SHIFT; |
1058 | |
1059 | run_ind -= prun_pages; |
1060 | |
1061 | /* |
1062 | * Remove predecessor from runs_avail; the coalesced run is |
1063 | * inserted later. |
1064 | */ |
1065 | assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) |
1066 | == prun_size); |
1067 | assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_ALLOCATED) |
1068 | == 0); |
1069 | assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) |
1070 | == flag_dirty); |
1071 | arena_avail_tree_remove(runs_avail, |
1072 | &chunk->map[run_ind-map_bias]); |
1073 | |
1074 | size += prun_size; |
1075 | run_pages += prun_pages; |
1076 | |
1077 | chunk->map[run_ind-map_bias].bits = size | |
1078 | (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK); |
1079 | chunk->map[run_ind+run_pages-1-map_bias].bits = size | |
1080 | (chunk->map[run_ind+run_pages-1-map_bias].bits & |
1081 | CHUNK_MAP_FLAGS_MASK); |
1082 | } |
1083 | |
1084 | /* Insert into runs_avail, now that coalescing is complete. */ |
1085 | assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) == |
1086 | (chunk->map[run_ind+run_pages-1-map_bias].bits & ~PAGE_MASK)); |
1087 | assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == |
1088 | (chunk->map[run_ind+run_pages-1-map_bias].bits & CHUNK_MAP_DIRTY)); |
1089 | arena_avail_tree_insert(runs_avail, &chunk->map[run_ind-map_bias]); |
1090 | |
1091 | if (dirty) { |
1092 | /* |
1093 | * Insert into chunks_dirty before potentially calling |
1094 | * arena_chunk_dealloc(), so that chunks_dirty and |
1095 | * arena->ndirty are consistent. |
1096 | */ |
1097 | if (chunk->dirtied == false) { |
1098 | ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty); |
1099 | chunk->dirtied = true; |
1100 | } |
1101 | } |
1102 | |
1103 | /* |
1104 | * Deallocate chunk if it is now completely unused. The bit |
1105 | * manipulation checks whether the first run is unallocated and extends |
1106 | * to the end of the chunk. |
1107 | */ |
1108 | if ((chunk->map[0].bits & (~PAGE_MASK | CHUNK_MAP_ALLOCATED)) == |
1109 | arena_maxclass) |
1110 | arena_chunk_dealloc(arena, chunk); |
1111 | |
1112 | /* |
1113 | * It is okay to do dirty page processing here even if the chunk was |
1114 | * deallocated above, since in that case it is the spare. Waiting |
1115 | * until after possible chunk deallocation to do dirty processing |
1116 | * allows for an old spare to be fully deallocated, thus decreasing the |
1117 | * chances of spuriously crossing the dirty page purging threshold. |
1118 | */ |
1119 | if (dirty) |
1120 | arena_maybe_purge(arena); |
1121 | } |
1122 | |
1123 | static void |
1124 | arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, |
1125 | size_t oldsize, size_t newsize) |
1126 | { |
1127 | size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT; |
1128 | size_t head_npages = (oldsize - newsize) >> PAGE_SHIFT; |
1129 | size_t flag_dirty = chunk->map[pageind-map_bias].bits & CHUNK_MAP_DIRTY; |
1130 | |
1131 | assert(oldsize > newsize); |
1132 | |
1133 | /* |
1134 | * Update the chunk map so that arena_run_dalloc() can treat the |
1135 | * leading run as separately allocated. Set the last element of each |
1136 | * run first, in case of single-page runs. |
1137 | */ |
1138 | assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0); |
1139 | assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0); |
1140 | chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty | |
1141 | (chunk->map[pageind+head_npages-1-map_bias].bits & |
1142 | CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; |
1143 | chunk->map[pageind-map_bias].bits = (oldsize - newsize) |
1144 | | flag_dirty | (chunk->map[pageind-map_bias].bits & |
1145 | CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; |
1146 | |
1147 | #ifdef JEMALLOC_DEBUG |
1148 | { |
1149 | size_t tail_npages = newsize >> PAGE_SHIFT; |
1150 | assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias] |
1151 | .bits & ~PAGE_MASK) == 0); |
1152 | assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias] |
1153 | .bits & CHUNK_MAP_DIRTY) == flag_dirty); |
1154 | assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias] |
1155 | .bits & CHUNK_MAP_LARGE) != 0); |
1156 | assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias] |
1157 | .bits & CHUNK_MAP_ALLOCATED) != 0); |
1158 | } |
1159 | #endif |
1160 | chunk->map[pageind+head_npages-map_bias].bits = newsize | flag_dirty | |
1161 | (chunk->map[pageind+head_npages-map_bias].bits & |
1162 | CHUNK_MAP_FLAGS_MASK) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; |
1163 | |
1164 | arena_run_dalloc(arena, run, false); |
1165 | } |
1166 | |
1167 | static void |
1168 | arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, |
1169 | size_t oldsize, size_t newsize, bool dirty) |
1170 | { |
1171 | size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT; |
1172 | size_t head_npages = newsize >> PAGE_SHIFT; |
1173 | size_t tail_npages = (oldsize - newsize) >> PAGE_SHIFT; |
1174 | size_t flag_dirty = chunk->map[pageind-map_bias].bits & |
1175 | CHUNK_MAP_DIRTY; |
1176 | |
1177 | assert(oldsize > newsize); |
1178 | |
1179 | /* |
1180 | * Update the chunk map so that arena_run_dalloc() can treat the |
1181 | * trailing run as separately allocated. Set the last element of each |
1182 | * run first, in case of single-page runs. |
1183 | */ |
1184 | assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0); |
1185 | assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0); |
1186 | chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty | |
1187 | (chunk->map[pageind+head_npages-1-map_bias].bits & |
1188 | CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; |
1189 | chunk->map[pageind-map_bias].bits = newsize | flag_dirty | |
1190 | (chunk->map[pageind-map_bias].bits & CHUNK_MAP_UNZEROED) | |
1191 | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; |
1192 | |
1193 | assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & |
1194 | ~PAGE_MASK) == 0); |
1195 | assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & |
1196 | CHUNK_MAP_LARGE) != 0); |
1197 | assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & |
1198 | CHUNK_MAP_ALLOCATED) != 0); |
1199 | chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits = |
1200 | flag_dirty | |
1201 | (chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & |
1202 | CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; |
1203 | chunk->map[pageind+head_npages-map_bias].bits = (oldsize - newsize) | |
1204 | flag_dirty | (chunk->map[pageind+head_npages-map_bias].bits & |
1205 | CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; |
1206 | |
1207 | arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize), |
1208 | dirty); |
1209 | } |
1210 | |
1211 | static arena_run_t * |
1212 | arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) |
1213 | { |
1214 | arena_chunk_map_t *mapelm; |
1215 | arena_run_t *run; |
1216 | size_t binind; |
1217 | arena_bin_info_t *bin_info; |
1218 | |
1219 | /* Look for a usable run. */ |
1220 | mapelm = arena_run_tree_first(&bin->runs); |
1221 | if (mapelm != NULL) { |
1222 | arena_chunk_t *chunk; |
1223 | size_t pageind; |
1224 | |
1225 | /* run is guaranteed to have available space. */ |
1226 | arena_run_tree_remove(&bin->runs, mapelm); |
1227 | |
1228 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm); |
1229 | pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) / |
1230 | sizeof(arena_chunk_map_t))) + map_bias; |
1231 | run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - |
1232 | (mapelm->bits >> PAGE_SHIFT)) |
1233 | << PAGE_SHIFT)); |
1234 | #ifdef JEMALLOC_STATS |
1235 | bin->stats.reruns++; |
1236 | #endif |
1237 | return (run); |
1238 | } |
1239 | /* No existing runs have any space available. */ |
1240 | |
1241 | binind = arena_bin_index(arena, bin); |
1242 | bin_info = &arena_bin_info[binind]; |
1243 | |
1244 | /* Allocate a new run. */ |
1245 | malloc_mutex_unlock(&bin->lock); |
1246 | /******************************/ |
1247 | malloc_mutex_lock(&arena->lock); |
1248 | run = arena_run_alloc(arena, bin_info->run_size, false, false); |
1249 | if (run != NULL) { |
1250 | bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + |
1251 | (uintptr_t)bin_info->bitmap_offset); |
1252 | |
1253 | /* Initialize run internals. */ |
1254 | run->bin = bin; |
1255 | run->nextind = 0; |
1256 | run->nfree = bin_info->nregs; |
1257 | bitmap_init(bitmap, &bin_info->bitmap_info); |
1258 | #ifdef JEMALLOC_DEBUG |
1259 | run->magic = ARENA_RUN_MAGIC; |
1260 | #endif |
1261 | } |
1262 | malloc_mutex_unlock(&arena->lock); |
1263 | /********************************/ |
1264 | malloc_mutex_lock(&bin->lock); |
1265 | if (run != NULL) { |
1266 | #ifdef JEMALLOC_STATS |
1267 | bin->stats.nruns++; |
1268 | bin->stats.curruns++; |
1269 | if (bin->stats.curruns > bin->stats.highruns) |
1270 | bin->stats.highruns = bin->stats.curruns; |
1271 | #endif |
1272 | return (run); |
1273 | } |
1274 | |
1275 | /* |
1276 | * arena_run_alloc() failed, but another thread may have made |
1277 | * sufficient memory available while this one dropped bin->lock above, |
1278 | * so search one more time. |
1279 | */ |
1280 | mapelm = arena_run_tree_first(&bin->runs); |
1281 | if (mapelm != NULL) { |
1282 | arena_chunk_t *chunk; |
1283 | size_t pageind; |
1284 | |
1285 | /* run is guaranteed to have available space. */ |
1286 | arena_run_tree_remove(&bin->runs, mapelm); |
1287 | |
1288 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm); |
1289 | pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) / |
1290 | sizeof(arena_chunk_map_t))) + map_bias; |
1291 | run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - |
1292 | (mapelm->bits >> PAGE_SHIFT)) |
1293 | << PAGE_SHIFT)); |
1294 | #ifdef JEMALLOC_STATS |
1295 | bin->stats.reruns++; |
1296 | #endif |
1297 | return (run); |
1298 | } |
1299 | |
1300 | return (NULL); |
1301 | } |
1302 | |
1303 | /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ |
1304 | static void * |
1305 | arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) |
1306 | { |
1307 | void *ret; |
1308 | size_t binind; |
1309 | arena_bin_info_t *bin_info; |
1310 | arena_run_t *run; |
1311 | |
1312 | binind = arena_bin_index(arena, bin); |
1313 | bin_info = &arena_bin_info[binind]; |
1314 | bin->runcur = NULL; |
1315 | run = arena_bin_nonfull_run_get(arena, bin); |
1316 | if (bin->runcur != NULL && bin->runcur->nfree > 0) { |
1317 | /* |
1318 | * Another thread updated runcur while this one ran without the |
1319 | * bin lock in arena_bin_nonfull_run_get(). |
1320 | */ |
1321 | dassert(bin->runcur->magic == ARENA_RUN_MAGIC); |
1322 | assert(bin->runcur->nfree > 0); |
1323 | ret = arena_run_reg_alloc(bin->runcur, bin_info); |
1324 | if (run != NULL) { |
1325 | arena_chunk_t *chunk; |
1326 | |
1327 | /* |
1328 | * arena_run_alloc() may have allocated run, or it may |
1329 | * have pulled run from the bin's run tree. Therefore |
1330 | * it is unsafe to make any assumptions about how run |
1331 | * has previously been used, and arena_bin_lower_run() |
1332 | * must be called, as if a region were just deallocated |
1333 | * from the run. |
1334 | */ |
1335 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); |
1336 | if (run->nfree == bin_info->nregs) |
1337 | arena_dalloc_bin_run(arena, chunk, run, bin); |
1338 | else |
1339 | arena_bin_lower_run(arena, chunk, run, bin); |
1340 | } |
1341 | return (ret); |
1342 | } |
1343 | |
1344 | if (run == NULL) |
1345 | return (NULL); |
1346 | |
1347 | bin->runcur = run; |
1348 | |
1349 | dassert(bin->runcur->magic == ARENA_RUN_MAGIC); |
1350 | assert(bin->runcur->nfree > 0); |
1351 | |
1352 | return (arena_run_reg_alloc(bin->runcur, bin_info)); |
1353 | } |
1354 | |
1355 | #ifdef JEMALLOC_PROF |
1356 | void |
1357 | arena_prof_accum(arena_t *arena, uint64_t accumbytes) |
1358 | { |
1359 | |
1360 | if (prof_interval != 0) { |
1361 | arena->prof_accumbytes += accumbytes; |
1362 | if (arena->prof_accumbytes >= prof_interval) { |
1363 | prof_idump(); |
1364 | arena->prof_accumbytes -= prof_interval; |
1365 | } |
1366 | } |
1367 | } |
1368 | #endif |
1369 | |
1370 | #ifdef JEMALLOC_TCACHE |
1371 | void |
1372 | arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind |
1373 | # ifdef JEMALLOC_PROF |
1374 | , uint64_t prof_accumbytes |
1375 | # endif |
1376 | ) |
1377 | { |
1378 | unsigned i, nfill; |
1379 | arena_bin_t *bin; |
1380 | arena_run_t *run; |
1381 | void *ptr; |
1382 | |
1383 | assert(tbin->ncached == 0); |
1384 | |
1385 | #ifdef JEMALLOC_PROF |
1386 | malloc_mutex_lock(&arena->lock); |
1387 | arena_prof_accum(arena, prof_accumbytes); |
1388 | malloc_mutex_unlock(&arena->lock); |
1389 | #endif |
1390 | bin = &arena->bins[binind]; |
1391 | malloc_mutex_lock(&bin->lock); |
1392 | for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> |
1393 | tbin->lg_fill_div); i < nfill; i++) { |
1394 | if ((run = bin->runcur) != NULL && run->nfree > 0) |
1395 | ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); |
1396 | else |
1397 | ptr = arena_bin_malloc_hard(arena, bin); |
1398 | if (ptr == NULL) |
1399 | break; |
1400 | /* Insert such that low regions get used first. */ |
1401 | tbin->avail[nfill - 1 - i] = ptr; |
1402 | } |
1403 | #ifdef JEMALLOC_STATS |
1404 | bin->stats.allocated += i * arena_bin_info[binind].reg_size; |
1405 | bin->stats.nmalloc += i; |
1406 | bin->stats.nrequests += tbin->tstats.nrequests; |
1407 | bin->stats.nfills++; |
1408 | tbin->tstats.nrequests = 0; |
1409 | #endif |
1410 | malloc_mutex_unlock(&bin->lock); |
1411 | tbin->ncached = i; |
1412 | } |
1413 | #endif |
1414 | |
1415 | void * |
1416 | arena_malloc_small(arena_t *arena, size_t size, bool zero) |
1417 | { |
1418 | void *ret; |
1419 | arena_bin_t *bin; |
1420 | arena_run_t *run; |
1421 | size_t binind; |
1422 | |
1423 | binind = SMALL_SIZE2BIN(size); |
1424 | assert(binind < nbins); |
1425 | bin = &arena->bins[binind]; |
1426 | size = arena_bin_info[binind].reg_size; |
1427 | |
1428 | malloc_mutex_lock(&bin->lock); |
1429 | if ((run = bin->runcur) != NULL && run->nfree > 0) |
1430 | ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); |
1431 | else |
1432 | ret = arena_bin_malloc_hard(arena, bin); |
1433 | |
1434 | if (ret == NULL) { |
1435 | malloc_mutex_unlock(&bin->lock); |
1436 | return (NULL); |
1437 | } |
1438 | |
1439 | #ifdef JEMALLOC_STATS |
1440 | bin->stats.allocated += size; |
1441 | bin->stats.nmalloc++; |
1442 | bin->stats.nrequests++; |
1443 | #endif |
1444 | malloc_mutex_unlock(&bin->lock); |
1445 | #ifdef JEMALLOC_PROF |
1446 | if (isthreaded == false) { |
1447 | malloc_mutex_lock(&arena->lock); |
1448 | arena_prof_accum(arena, size); |
1449 | malloc_mutex_unlock(&arena->lock); |
1450 | } |
1451 | #endif |
1452 | |
1453 | if (zero == false) { |
1454 | #ifdef JEMALLOC_FILL |
1455 | if (opt_junk) |
1456 | memset(ret, 0xa5, size); |
1457 | else if (opt_zero) |
1458 | memset(ret, 0, size); |
1459 | #endif |
1460 | } else |
1461 | memset(ret, 0, size); |
1462 | |
1463 | return (ret); |
1464 | } |
1465 | |
1466 | void * |
1467 | arena_malloc_large(arena_t *arena, size_t size, bool zero) |
1468 | { |
1469 | void *ret; |
1470 | |
1471 | /* Large allocation. */ |
1472 | size = PAGE_CEILING(size); |
1473 | malloc_mutex_lock(&arena->lock); |
1474 | ret = (void *)arena_run_alloc(arena, size, true, zero); |
1475 | if (ret == NULL) { |
1476 | malloc_mutex_unlock(&arena->lock); |
1477 | return (NULL); |
1478 | } |
1479 | #ifdef JEMALLOC_STATS |
1480 | arena->stats.nmalloc_large++; |
1481 | arena->stats.nrequests_large++; |
1482 | arena->stats.allocated_large += size; |
1483 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++; |
1484 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++; |
1485 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++; |
1486 | if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns > |
1487 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) { |
1488 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns = |
1489 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns; |
1490 | } |
1491 | #endif |
1492 | #ifdef JEMALLOC_PROF |
1493 | arena_prof_accum(arena, size); |
1494 | #endif |
1495 | malloc_mutex_unlock(&arena->lock); |
1496 | |
1497 | if (zero == false) { |
1498 | #ifdef JEMALLOC_FILL |
1499 | if (opt_junk) |
1500 | memset(ret, 0xa5, size); |
1501 | else if (opt_zero) |
1502 | memset(ret, 0, size); |
1503 | #endif |
1504 | } |
1505 | |
1506 | return (ret); |
1507 | } |
1508 | |
1509 | void * |
1510 | arena_malloc(size_t size, bool zero) |
1511 | { |
1512 | |
1513 | assert(size != 0); |
1514 | assert(QUANTUM_CEILING(size) <= arena_maxclass); |
1515 | |
1516 | if (size <= small_maxclass) { |
1517 | #ifdef JEMALLOC_TCACHE |
1518 | tcache_t *tcache; |
1519 | |
1520 | if ((tcache = tcache_get()) != NULL) |
1521 | return (tcache_alloc_small(tcache, size, zero)); |
1522 | else |
1523 | |
1524 | #endif |
1525 | return (arena_malloc_small(choose_arena(), size, zero)); |
1526 | } else { |
1527 | #ifdef JEMALLOC_TCACHE |
1528 | if (size <= tcache_maxclass) { |
1529 | tcache_t *tcache; |
1530 | |
1531 | if ((tcache = tcache_get()) != NULL) |
1532 | return (tcache_alloc_large(tcache, size, zero)); |
1533 | else { |
1534 | return (arena_malloc_large(choose_arena(), |
1535 | size, zero)); |
1536 | } |
1537 | } else |
1538 | #endif |
1539 | return (arena_malloc_large(choose_arena(), size, zero)); |
1540 | } |
1541 | } |
1542 | |
1543 | /* Only handles large allocations that require more than page alignment. */ |
1544 | void * |
1545 | arena_palloc(arena_t *arena, size_t size, size_t alloc_size, size_t alignment, |
1546 | bool zero) |
1547 | { |
1548 | void *ret; |
1549 | size_t offset; |
1550 | arena_chunk_t *chunk; |
1551 | |
1552 | assert((size & PAGE_MASK) == 0); |
1553 | |
1554 | alignment = PAGE_CEILING(alignment); |
1555 | |
1556 | malloc_mutex_lock(&arena->lock); |
1557 | ret = (void *)arena_run_alloc(arena, alloc_size, true, zero); |
1558 | if (ret == NULL) { |
1559 | malloc_mutex_unlock(&arena->lock); |
1560 | return (NULL); |
1561 | } |
1562 | |
1563 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret); |
1564 | |
1565 | offset = (uintptr_t)ret & (alignment - 1); |
1566 | assert((offset & PAGE_MASK) == 0); |
1567 | assert(offset < alloc_size); |
1568 | if (offset == 0) |
1569 | arena_run_trim_tail(arena, chunk, ret, alloc_size, size, false); |
1570 | else { |
1571 | size_t leadsize, trailsize; |
1572 | |
1573 | leadsize = alignment - offset; |
1574 | if (leadsize > 0) { |
1575 | arena_run_trim_head(arena, chunk, ret, alloc_size, |
1576 | alloc_size - leadsize); |
1577 | ret = (void *)((uintptr_t)ret + leadsize); |
1578 | } |
1579 | |
1580 | trailsize = alloc_size - leadsize - size; |
1581 | if (trailsize != 0) { |
1582 | /* Trim trailing space. */ |
1583 | assert(trailsize < alloc_size); |
1584 | arena_run_trim_tail(arena, chunk, ret, size + trailsize, |
1585 | size, false); |
1586 | } |
1587 | } |
1588 | |
1589 | #ifdef JEMALLOC_STATS |
1590 | arena->stats.nmalloc_large++; |
1591 | arena->stats.nrequests_large++; |
1592 | arena->stats.allocated_large += size; |
1593 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++; |
1594 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++; |
1595 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++; |
1596 | if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns > |
1597 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) { |
1598 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns = |
1599 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns; |
1600 | } |
1601 | #endif |
1602 | malloc_mutex_unlock(&arena->lock); |
1603 | |
1604 | #ifdef JEMALLOC_FILL |
1605 | if (zero == false) { |
1606 | if (opt_junk) |
1607 | memset(ret, 0xa5, size); |
1608 | else if (opt_zero) |
1609 | memset(ret, 0, size); |
1610 | } |
1611 | #endif |
1612 | return (ret); |
1613 | } |
1614 | |
1615 | /* Return the size of the allocation pointed to by ptr. */ |
1616 | size_t |
1617 | arena_salloc(const void *ptr) |
1618 | { |
1619 | size_t ret; |
1620 | arena_chunk_t *chunk; |
1621 | size_t pageind, mapbits; |
1622 | |
1623 | assert(ptr != NULL); |
1624 | assert(CHUNK_ADDR2BASE(ptr) != ptr); |
1625 | |
1626 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); |
1627 | pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT; |
1628 | mapbits = chunk->map[pageind-map_bias].bits; |
1629 | assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); |
1630 | if ((mapbits & CHUNK_MAP_LARGE) == 0) { |
1631 | arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + |
1632 | (uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) << |
1633 | PAGE_SHIFT)); |
1634 | dassert(run->magic == ARENA_RUN_MAGIC); |
1635 | size_t binind = arena_bin_index(chunk->arena, run->bin); |
1636 | arena_bin_info_t *bin_info = &arena_bin_info[binind]; |
1637 | assert(((uintptr_t)ptr - ((uintptr_t)run + |
1638 | (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_size == |
1639 | 0); |
1640 | ret = bin_info->reg_size; |
1641 | } else { |
1642 | assert(((uintptr_t)ptr & PAGE_MASK) == 0); |
1643 | ret = mapbits & ~PAGE_MASK; |
1644 | assert(ret != 0); |
1645 | } |
1646 | |
1647 | return (ret); |
1648 | } |
1649 | |
1650 | #ifdef JEMALLOC_PROF |
1651 | void |
1652 | arena_prof_promoted(const void *ptr, size_t size) |
1653 | { |
1654 | arena_chunk_t *chunk; |
1655 | size_t pageind, binind; |
1656 | |
1657 | assert(ptr != NULL); |
1658 | assert(CHUNK_ADDR2BASE(ptr) != ptr); |
1659 | assert(isalloc(ptr) == PAGE_SIZE); |
1d03c1c9 |
1660 | assert(size <= small_maxclass); |
a78e148b |
1661 | |
1662 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); |
1663 | pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT; |
1664 | binind = SMALL_SIZE2BIN(size); |
1665 | assert(binind < nbins); |
1666 | chunk->map[pageind-map_bias].bits = (chunk->map[pageind-map_bias].bits & |
1667 | ~CHUNK_MAP_CLASS_MASK) | ((binind+1) << CHUNK_MAP_CLASS_SHIFT); |
1668 | } |
1669 | |
1670 | size_t |
1671 | arena_salloc_demote(const void *ptr) |
1672 | { |
1673 | size_t ret; |
1674 | arena_chunk_t *chunk; |
1675 | size_t pageind, mapbits; |
1676 | |
1677 | assert(ptr != NULL); |
1678 | assert(CHUNK_ADDR2BASE(ptr) != ptr); |
1679 | |
1680 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); |
1681 | pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT; |
1682 | mapbits = chunk->map[pageind-map_bias].bits; |
1683 | assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); |
1684 | if ((mapbits & CHUNK_MAP_LARGE) == 0) { |
1685 | arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + |
1686 | (uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) << |
1687 | PAGE_SHIFT)); |
1688 | dassert(run->magic == ARENA_RUN_MAGIC); |
1689 | size_t binind = arena_bin_index(chunk->arena, run->bin); |
1690 | arena_bin_info_t *bin_info = &arena_bin_info[binind]; |
1691 | assert(((uintptr_t)ptr - ((uintptr_t)run + |
1692 | (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_size == |
1693 | 0); |
1694 | ret = bin_info->reg_size; |
1695 | } else { |
1696 | assert(((uintptr_t)ptr & PAGE_MASK) == 0); |
1697 | ret = mapbits & ~PAGE_MASK; |
1698 | if (prof_promote && ret == PAGE_SIZE && (mapbits & |
1699 | CHUNK_MAP_CLASS_MASK) != 0) { |
1700 | size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >> |
1701 | CHUNK_MAP_CLASS_SHIFT) - 1; |
1702 | assert(binind < nbins); |
1703 | ret = arena_bin_info[binind].reg_size; |
1704 | } |
1705 | assert(ret != 0); |
1706 | } |
1707 | |
1708 | return (ret); |
1709 | } |
1710 | #endif |
1711 | |
1712 | static void |
1713 | arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, |
1714 | arena_bin_t *bin) |
1715 | { |
1716 | |
1717 | /* Dissociate run from bin. */ |
1718 | if (run == bin->runcur) |
1719 | bin->runcur = NULL; |
1720 | else { |
1721 | size_t binind = arena_bin_index(chunk->arena, bin); |
1722 | arena_bin_info_t *bin_info = &arena_bin_info[binind]; |
1723 | |
1724 | if (bin_info->nregs != 1) { |
1725 | size_t run_pageind = (((uintptr_t)run - |
1726 | (uintptr_t)chunk)) >> PAGE_SHIFT; |
1727 | arena_chunk_map_t *run_mapelm = |
1728 | &chunk->map[run_pageind-map_bias]; |
1729 | /* |
1730 | * This block's conditional is necessary because if the |
1731 | * run only contains one region, then it never gets |
1732 | * inserted into the non-full runs tree. |
1733 | */ |
1734 | arena_run_tree_remove(&bin->runs, run_mapelm); |
1735 | } |
1736 | } |
1737 | } |
1738 | |
1739 | static void |
1740 | arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, |
1741 | arena_bin_t *bin) |
1742 | { |
1743 | size_t binind; |
1744 | arena_bin_info_t *bin_info; |
1745 | size_t npages, run_ind, past; |
1746 | |
1747 | assert(run != bin->runcur); |
1748 | assert(arena_run_tree_search(&bin->runs, &chunk->map[ |
1749 | (((uintptr_t)run-(uintptr_t)chunk)>>PAGE_SHIFT)-map_bias]) == NULL); |
1750 | |
1751 | binind = arena_bin_index(chunk->arena, run->bin); |
1752 | bin_info = &arena_bin_info[binind]; |
1753 | |
1754 | malloc_mutex_unlock(&bin->lock); |
1755 | /******************************/ |
1756 | npages = bin_info->run_size >> PAGE_SHIFT; |
1757 | run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT); |
1758 | past = (size_t)(PAGE_CEILING((uintptr_t)run + |
1759 | (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind * |
1760 | bin_info->reg_size) - (uintptr_t)chunk) >> PAGE_SHIFT); |
1761 | malloc_mutex_lock(&arena->lock); |
1762 | |
1763 | /* |
1764 | * If the run was originally clean, and some pages were never touched, |
1765 | * trim the clean pages before deallocating the dirty portion of the |
1766 | * run. |
1767 | */ |
1768 | if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == 0 && past |
1769 | - run_ind < npages) { |
1770 | /* |
1771 | * Trim clean pages. Convert to large run beforehand. Set the |
1772 | * last map element first, in case this is a one-page run. |
1773 | */ |
1774 | chunk->map[run_ind+npages-1-map_bias].bits = CHUNK_MAP_LARGE | |
1775 | (chunk->map[run_ind+npages-1-map_bias].bits & |
1776 | CHUNK_MAP_FLAGS_MASK); |
1777 | chunk->map[run_ind-map_bias].bits = bin_info->run_size | |
1778 | CHUNK_MAP_LARGE | (chunk->map[run_ind-map_bias].bits & |
1779 | CHUNK_MAP_FLAGS_MASK); |
1780 | arena_run_trim_tail(arena, chunk, run, (npages << PAGE_SHIFT), |
1781 | ((past - run_ind) << PAGE_SHIFT), false); |
1782 | /* npages = past - run_ind; */ |
1783 | } |
1784 | #ifdef JEMALLOC_DEBUG |
1785 | run->magic = 0; |
1786 | #endif |
1787 | arena_run_dalloc(arena, run, true); |
1788 | malloc_mutex_unlock(&arena->lock); |
1789 | /****************************/ |
1790 | malloc_mutex_lock(&bin->lock); |
1791 | #ifdef JEMALLOC_STATS |
1792 | bin->stats.curruns--; |
1793 | #endif |
1794 | } |
1795 | |
1796 | static void |
1797 | arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, |
1798 | arena_bin_t *bin) |
1799 | { |
1800 | |
1801 | /* |
1802 | * Make sure that bin->runcur always refers to the lowest non-full run, |
1803 | * if one exists. |
1804 | */ |
1805 | if (bin->runcur == NULL) |
1806 | bin->runcur = run; |
1807 | else if ((uintptr_t)run < (uintptr_t)bin->runcur) { |
1808 | /* Switch runcur. */ |
1809 | if (bin->runcur->nfree > 0) { |
1810 | arena_chunk_t *runcur_chunk = |
1811 | CHUNK_ADDR2BASE(bin->runcur); |
1812 | size_t runcur_pageind = (((uintptr_t)bin->runcur - |
1813 | (uintptr_t)runcur_chunk)) >> PAGE_SHIFT; |
1814 | arena_chunk_map_t *runcur_mapelm = |
1815 | &runcur_chunk->map[runcur_pageind-map_bias]; |
1816 | |
1817 | /* Insert runcur. */ |
1818 | arena_run_tree_insert(&bin->runs, runcur_mapelm); |
1819 | } |
1820 | bin->runcur = run; |
1821 | } else { |
1822 | size_t run_pageind = (((uintptr_t)run - |
1823 | (uintptr_t)chunk)) >> PAGE_SHIFT; |
1824 | arena_chunk_map_t *run_mapelm = |
1825 | &chunk->map[run_pageind-map_bias]; |
1826 | |
1827 | assert(arena_run_tree_search(&bin->runs, run_mapelm) == NULL); |
1828 | arena_run_tree_insert(&bin->runs, run_mapelm); |
1829 | } |
1830 | } |
1831 | |
1832 | void |
1833 | arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, |
1834 | arena_chunk_map_t *mapelm) |
1835 | { |
1836 | size_t pageind; |
1837 | arena_run_t *run; |
1838 | arena_bin_t *bin; |
1839 | #if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS)) |
1840 | size_t size; |
1841 | #endif |
1842 | |
1843 | pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT; |
1844 | run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - |
1845 | (mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT)); |
1846 | dassert(run->magic == ARENA_RUN_MAGIC); |
1847 | bin = run->bin; |
1848 | size_t binind = arena_bin_index(arena, bin); |
1849 | arena_bin_info_t *bin_info = &arena_bin_info[binind]; |
1850 | #if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS)) |
1851 | size = bin_info->reg_size; |
1852 | #endif |
1853 | |
1854 | #ifdef JEMALLOC_FILL |
1855 | if (opt_junk) |
1856 | memset(ptr, 0x5a, size); |
1857 | #endif |
1858 | |
1859 | arena_run_reg_dalloc(run, ptr); |
1860 | if (run->nfree == bin_info->nregs) { |
1861 | arena_dissociate_bin_run(chunk, run, bin); |
1862 | arena_dalloc_bin_run(arena, chunk, run, bin); |
1863 | } else if (run->nfree == 1 && run != bin->runcur) |
1864 | arena_bin_lower_run(arena, chunk, run, bin); |
1865 | |
1866 | #ifdef JEMALLOC_STATS |
1867 | bin->stats.allocated -= size; |
1868 | bin->stats.ndalloc++; |
1869 | #endif |
1870 | } |
1871 | |
1872 | #ifdef JEMALLOC_STATS |
1873 | void |
1874 | arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, |
1875 | arena_stats_t *astats, malloc_bin_stats_t *bstats, |
1876 | malloc_large_stats_t *lstats) |
1877 | { |
1878 | unsigned i; |
1879 | |
1880 | malloc_mutex_lock(&arena->lock); |
1881 | *nactive += arena->nactive; |
1882 | *ndirty += arena->ndirty; |
1883 | |
1884 | astats->mapped += arena->stats.mapped; |
1885 | astats->npurge += arena->stats.npurge; |
1886 | astats->nmadvise += arena->stats.nmadvise; |
1887 | astats->purged += arena->stats.purged; |
1888 | astats->allocated_large += arena->stats.allocated_large; |
1889 | astats->nmalloc_large += arena->stats.nmalloc_large; |
1890 | astats->ndalloc_large += arena->stats.ndalloc_large; |
1891 | astats->nrequests_large += arena->stats.nrequests_large; |
1892 | |
1893 | for (i = 0; i < nlclasses; i++) { |
1894 | lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; |
1895 | lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; |
1896 | lstats[i].nrequests += arena->stats.lstats[i].nrequests; |
1897 | lstats[i].highruns += arena->stats.lstats[i].highruns; |
1898 | lstats[i].curruns += arena->stats.lstats[i].curruns; |
1899 | } |
1900 | malloc_mutex_unlock(&arena->lock); |
1901 | |
1902 | for (i = 0; i < nbins; i++) { |
1903 | arena_bin_t *bin = &arena->bins[i]; |
1904 | |
1905 | malloc_mutex_lock(&bin->lock); |
1906 | bstats[i].allocated += bin->stats.allocated; |
1907 | bstats[i].nmalloc += bin->stats.nmalloc; |
1908 | bstats[i].ndalloc += bin->stats.ndalloc; |
1909 | bstats[i].nrequests += bin->stats.nrequests; |
1910 | #ifdef JEMALLOC_TCACHE |
1911 | bstats[i].nfills += bin->stats.nfills; |
1912 | bstats[i].nflushes += bin->stats.nflushes; |
1913 | #endif |
1914 | bstats[i].nruns += bin->stats.nruns; |
1915 | bstats[i].reruns += bin->stats.reruns; |
1916 | bstats[i].highruns += bin->stats.highruns; |
1917 | bstats[i].curruns += bin->stats.curruns; |
1918 | malloc_mutex_unlock(&bin->lock); |
1919 | } |
1920 | } |
1921 | #endif |
1922 | |
1923 | void |
1924 | arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) |
1925 | { |
1926 | |
1927 | /* Large allocation. */ |
1928 | #ifdef JEMALLOC_FILL |
1929 | # ifndef JEMALLOC_STATS |
1930 | if (opt_junk) |
1931 | # endif |
1932 | #endif |
1933 | { |
1934 | #if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS)) |
1935 | size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> |
1936 | PAGE_SHIFT; |
1937 | size_t size = chunk->map[pageind-map_bias].bits & ~PAGE_MASK; |
1938 | #endif |
1939 | |
1940 | #ifdef JEMALLOC_FILL |
1941 | # ifdef JEMALLOC_STATS |
1942 | if (opt_junk) |
1943 | # endif |
1944 | memset(ptr, 0x5a, size); |
1945 | #endif |
1946 | #ifdef JEMALLOC_STATS |
1947 | arena->stats.ndalloc_large++; |
1948 | arena->stats.allocated_large -= size; |
1949 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].ndalloc++; |
1950 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns--; |
1951 | #endif |
1952 | } |
1953 | |
1954 | arena_run_dalloc(arena, (arena_run_t *)ptr, true); |
1955 | } |
1956 | |
1957 | static void |
1958 | arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, |
1959 | size_t oldsize, size_t size) |
1960 | { |
1961 | |
1962 | assert(size < oldsize); |
1963 | |
1964 | /* |
1965 | * Shrink the run, and make trailing pages available for other |
1966 | * allocations. |
1967 | */ |
1968 | malloc_mutex_lock(&arena->lock); |
1969 | arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size, |
1970 | true); |
1971 | #ifdef JEMALLOC_STATS |
1972 | arena->stats.ndalloc_large++; |
1973 | arena->stats.allocated_large -= oldsize; |
1974 | arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].ndalloc++; |
1975 | arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--; |
1976 | |
1977 | arena->stats.nmalloc_large++; |
1978 | arena->stats.nrequests_large++; |
1979 | arena->stats.allocated_large += size; |
1980 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++; |
1981 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++; |
1982 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++; |
1983 | if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns > |
1984 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) { |
1985 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns = |
1986 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns; |
1987 | } |
1988 | #endif |
1989 | malloc_mutex_unlock(&arena->lock); |
1990 | } |
1991 | |
1992 | static bool |
1993 | arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, |
1994 | size_t oldsize, size_t size, size_t extra, bool zero) |
1995 | { |
1996 | size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT; |
1997 | size_t npages = oldsize >> PAGE_SHIFT; |
1998 | size_t followsize; |
1999 | |
2000 | assert(oldsize == (chunk->map[pageind-map_bias].bits & ~PAGE_MASK)); |
2001 | |
2002 | /* Try to extend the run. */ |
2003 | assert(size + extra > oldsize); |
2004 | malloc_mutex_lock(&arena->lock); |
2005 | if (pageind + npages < chunk_npages && |
2006 | (chunk->map[pageind+npages-map_bias].bits |
2007 | & CHUNK_MAP_ALLOCATED) == 0 && (followsize = |
2008 | chunk->map[pageind+npages-map_bias].bits & ~PAGE_MASK) >= size - |
2009 | oldsize) { |
2010 | /* |
2011 | * The next run is available and sufficiently large. Split the |
2012 | * following run, then merge the first part with the existing |
2013 | * allocation. |
2014 | */ |
2015 | size_t flag_dirty; |
2016 | size_t splitsize = (oldsize + followsize <= size + extra) |
2017 | ? followsize : size + extra - oldsize; |
2018 | arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk + |
2019 | ((pageind+npages) << PAGE_SHIFT)), splitsize, true, zero); |
2020 | |
2021 | size = oldsize + splitsize; |
2022 | npages = size >> PAGE_SHIFT; |
2023 | |
2024 | /* |
2025 | * Mark the extended run as dirty if either portion of the run |
2026 | * was dirty before allocation. This is rather pedantic, |
2027 | * because there's not actually any sequence of events that |
2028 | * could cause the resulting run to be passed to |
2029 | * arena_run_dalloc() with the dirty argument set to false |
2030 | * (which is when dirty flag consistency would really matter). |
2031 | */ |
2032 | flag_dirty = (chunk->map[pageind-map_bias].bits & |
2033 | CHUNK_MAP_DIRTY) | |
2034 | (chunk->map[pageind+npages-1-map_bias].bits & |
2035 | CHUNK_MAP_DIRTY); |
2036 | chunk->map[pageind-map_bias].bits = size | flag_dirty |
2037 | | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; |
2038 | chunk->map[pageind+npages-1-map_bias].bits = flag_dirty | |
2039 | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; |
2040 | |
2041 | #ifdef JEMALLOC_STATS |
2042 | arena->stats.ndalloc_large++; |
2043 | arena->stats.allocated_large -= oldsize; |
2044 | arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].ndalloc++; |
2045 | arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--; |
2046 | |
2047 | arena->stats.nmalloc_large++; |
2048 | arena->stats.nrequests_large++; |
2049 | arena->stats.allocated_large += size; |
2050 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++; |
2051 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++; |
2052 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++; |
2053 | if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns > |
2054 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) { |
2055 | arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns = |
2056 | arena->stats.lstats[(size >> PAGE_SHIFT) - |
2057 | 1].curruns; |
2058 | } |
2059 | #endif |
2060 | malloc_mutex_unlock(&arena->lock); |
2061 | return (false); |
2062 | } |
2063 | malloc_mutex_unlock(&arena->lock); |
2064 | |
2065 | return (true); |
2066 | } |
2067 | |
2068 | /* |
2069 | * Try to resize a large allocation, in order to avoid copying. This will |
2070 | * always fail if growing an object, and the following run is already in use. |
2071 | */ |
2072 | static bool |
2073 | arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, |
2074 | bool zero) |
2075 | { |
2076 | size_t psize; |
2077 | |
2078 | psize = PAGE_CEILING(size + extra); |
2079 | if (psize == oldsize) { |
2080 | /* Same size class. */ |
2081 | #ifdef JEMALLOC_FILL |
2082 | if (opt_junk && size < oldsize) { |
2083 | memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - |
2084 | size); |
2085 | } |
2086 | #endif |
2087 | return (false); |
2088 | } else { |
2089 | arena_chunk_t *chunk; |
2090 | arena_t *arena; |
2091 | |
2092 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); |
2093 | arena = chunk->arena; |
2094 | dassert(arena->magic == ARENA_MAGIC); |
2095 | |
2096 | if (psize < oldsize) { |
2097 | #ifdef JEMALLOC_FILL |
2098 | /* Fill before shrinking in order avoid a race. */ |
2099 | if (opt_junk) { |
2100 | memset((void *)((uintptr_t)ptr + size), 0x5a, |
2101 | oldsize - size); |
2102 | } |
2103 | #endif |
2104 | arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, |
2105 | psize); |
2106 | return (false); |
2107 | } else { |
2108 | bool ret = arena_ralloc_large_grow(arena, chunk, ptr, |
2109 | oldsize, PAGE_CEILING(size), |
2110 | psize - PAGE_CEILING(size), zero); |
2111 | #ifdef JEMALLOC_FILL |
2112 | if (ret == false && zero == false && opt_zero) { |
2113 | memset((void *)((uintptr_t)ptr + oldsize), 0, |
2114 | size - oldsize); |
2115 | } |
2116 | #endif |
2117 | return (ret); |
2118 | } |
2119 | } |
2120 | } |
2121 | |
2122 | void * |
2123 | arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, |
2124 | bool zero) |
2125 | { |
2126 | |
2127 | /* |
2128 | * Avoid moving the allocation if the size class can be left the same. |
2129 | */ |
2130 | if (oldsize <= arena_maxclass) { |
2131 | if (oldsize <= small_maxclass) { |
2132 | assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size |
2133 | == oldsize); |
2134 | if ((size + extra <= small_maxclass && |
2135 | SMALL_SIZE2BIN(size + extra) == |
2136 | SMALL_SIZE2BIN(oldsize)) || (size <= oldsize && |
2137 | size + extra >= oldsize)) { |
2138 | #ifdef JEMALLOC_FILL |
2139 | if (opt_junk && size < oldsize) { |
2140 | memset((void *)((uintptr_t)ptr + size), |
2141 | 0x5a, oldsize - size); |
2142 | } |
2143 | #endif |
2144 | return (ptr); |
2145 | } |
2146 | } else { |
2147 | assert(size <= arena_maxclass); |
2148 | if (size + extra > small_maxclass) { |
2149 | if (arena_ralloc_large(ptr, oldsize, size, |
2150 | extra, zero) == false) |
2151 | return (ptr); |
2152 | } |
2153 | } |
2154 | } |
2155 | |
2156 | /* Reallocation would require a move. */ |
2157 | return (NULL); |
2158 | } |
2159 | |
2160 | void * |
2161 | arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, |
2162 | size_t alignment, bool zero) |
2163 | { |
2164 | void *ret; |
2165 | size_t copysize; |
2166 | |
2167 | /* Try to avoid moving the allocation. */ |
2168 | ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero); |
2169 | if (ret != NULL) |
2170 | return (ret); |
2171 | |
2172 | /* |
2173 | * size and oldsize are different enough that we need to move the |
2174 | * object. In that case, fall back to allocating new space and |
2175 | * copying. |
2176 | */ |
2177 | if (alignment != 0) { |
2178 | size_t usize = sa2u(size + extra, alignment, NULL); |
2179 | if (usize == 0) |
2180 | return (NULL); |
2181 | ret = ipalloc(usize, alignment, zero); |
2182 | } else |
2183 | ret = arena_malloc(size + extra, zero); |
2184 | |
2185 | if (ret == NULL) { |
2186 | if (extra == 0) |
2187 | return (NULL); |
2188 | /* Try again, this time without extra. */ |
2189 | if (alignment != 0) { |
2190 | size_t usize = sa2u(size, alignment, NULL); |
2191 | if (usize == 0) |
2192 | return (NULL); |
2193 | ret = ipalloc(usize, alignment, zero); |
2194 | } else |
2195 | ret = arena_malloc(size, zero); |
2196 | |
2197 | if (ret == NULL) |
2198 | return (NULL); |
2199 | } |
2200 | |
2201 | /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */ |
2202 | |
2203 | /* |
2204 | * Copy at most size bytes (not size+extra), since the caller has no |
2205 | * expectation that the extra bytes will be reliably preserved. |
2206 | */ |
2207 | copysize = (size < oldsize) ? size : oldsize; |
2208 | memcpy(ret, ptr, copysize); |
2209 | idalloc(ptr); |
2210 | return (ret); |
2211 | } |
2212 | |
2213 | bool |
2214 | arena_new(arena_t *arena, unsigned ind) |
2215 | { |
2216 | unsigned i; |
2217 | arena_bin_t *bin; |
2218 | |
2219 | arena->ind = ind; |
2220 | arena->nthreads = 0; |
2221 | |
2222 | if (malloc_mutex_init(&arena->lock)) |
2223 | return (true); |
2224 | |
2225 | #ifdef JEMALLOC_STATS |
2226 | memset(&arena->stats, 0, sizeof(arena_stats_t)); |
2227 | arena->stats.lstats = (malloc_large_stats_t *)base_alloc(nlclasses * |
2228 | sizeof(malloc_large_stats_t)); |
2229 | if (arena->stats.lstats == NULL) |
2230 | return (true); |
2231 | memset(arena->stats.lstats, 0, nlclasses * |
2232 | sizeof(malloc_large_stats_t)); |
2233 | # ifdef JEMALLOC_TCACHE |
2234 | ql_new(&arena->tcache_ql); |
2235 | # endif |
2236 | #endif |
2237 | |
2238 | #ifdef JEMALLOC_PROF |
2239 | arena->prof_accumbytes = 0; |
2240 | #endif |
2241 | |
2242 | /* Initialize chunks. */ |
2243 | ql_new(&arena->chunks_dirty); |
2244 | arena->spare = NULL; |
2245 | |
2246 | arena->nactive = 0; |
2247 | arena->ndirty = 0; |
2248 | arena->npurgatory = 0; |
2249 | |
2250 | arena_avail_tree_new(&arena->runs_avail_clean); |
2251 | arena_avail_tree_new(&arena->runs_avail_dirty); |
2252 | |
2253 | /* Initialize bins. */ |
2254 | i = 0; |
2255 | #ifdef JEMALLOC_TINY |
2256 | /* (2^n)-spaced tiny bins. */ |
2257 | for (; i < ntbins; i++) { |
2258 | bin = &arena->bins[i]; |
2259 | if (malloc_mutex_init(&bin->lock)) |
2260 | return (true); |
2261 | bin->runcur = NULL; |
2262 | arena_run_tree_new(&bin->runs); |
2263 | #ifdef JEMALLOC_STATS |
2264 | memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); |
2265 | #endif |
2266 | } |
2267 | #endif |
2268 | |
2269 | /* Quantum-spaced bins. */ |
2270 | for (; i < ntbins + nqbins; i++) { |
2271 | bin = &arena->bins[i]; |
2272 | if (malloc_mutex_init(&bin->lock)) |
2273 | return (true); |
2274 | bin->runcur = NULL; |
2275 | arena_run_tree_new(&bin->runs); |
2276 | #ifdef JEMALLOC_STATS |
2277 | memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); |
2278 | #endif |
2279 | } |
2280 | |
2281 | /* Cacheline-spaced bins. */ |
2282 | for (; i < ntbins + nqbins + ncbins; i++) { |
2283 | bin = &arena->bins[i]; |
2284 | if (malloc_mutex_init(&bin->lock)) |
2285 | return (true); |
2286 | bin->runcur = NULL; |
2287 | arena_run_tree_new(&bin->runs); |
2288 | #ifdef JEMALLOC_STATS |
2289 | memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); |
2290 | #endif |
2291 | } |
2292 | |
2293 | /* Subpage-spaced bins. */ |
2294 | for (; i < nbins; i++) { |
2295 | bin = &arena->bins[i]; |
2296 | if (malloc_mutex_init(&bin->lock)) |
2297 | return (true); |
2298 | bin->runcur = NULL; |
2299 | arena_run_tree_new(&bin->runs); |
2300 | #ifdef JEMALLOC_STATS |
2301 | memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); |
2302 | #endif |
2303 | } |
2304 | |
2305 | #ifdef JEMALLOC_DEBUG |
2306 | arena->magic = ARENA_MAGIC; |
2307 | #endif |
2308 | |
2309 | return (false); |
2310 | } |
2311 | |
2312 | #ifdef JEMALLOC_DEBUG |
2313 | static void |
2314 | small_size2bin_validate(void) |
2315 | { |
2316 | size_t i, size, binind; |
2317 | |
2318 | i = 1; |
2319 | # ifdef JEMALLOC_TINY |
2320 | /* Tiny. */ |
2321 | for (; i < (1U << LG_TINY_MIN); i++) { |
2322 | size = pow2_ceil(1U << LG_TINY_MIN); |
2323 | binind = ffs((int)(size >> (LG_TINY_MIN + 1))); |
2324 | assert(SMALL_SIZE2BIN(i) == binind); |
2325 | } |
2326 | for (; i < qspace_min; i++) { |
2327 | size = pow2_ceil(i); |
2328 | binind = ffs((int)(size >> (LG_TINY_MIN + 1))); |
2329 | assert(SMALL_SIZE2BIN(i) == binind); |
2330 | } |
2331 | # endif |
2332 | /* Quantum-spaced. */ |
2333 | for (; i <= qspace_max; i++) { |
2334 | size = QUANTUM_CEILING(i); |
2335 | binind = ntbins + (size >> LG_QUANTUM) - 1; |
2336 | assert(SMALL_SIZE2BIN(i) == binind); |
2337 | } |
2338 | /* Cacheline-spaced. */ |
2339 | for (; i <= cspace_max; i++) { |
2340 | size = CACHELINE_CEILING(i); |
2341 | binind = ntbins + nqbins + ((size - cspace_min) >> |
2342 | LG_CACHELINE); |
2343 | assert(SMALL_SIZE2BIN(i) == binind); |
2344 | } |
2345 | /* Sub-page. */ |
2346 | for (; i <= sspace_max; i++) { |
2347 | size = SUBPAGE_CEILING(i); |
2348 | binind = ntbins + nqbins + ncbins + ((size - sspace_min) |
2349 | >> LG_SUBPAGE); |
2350 | assert(SMALL_SIZE2BIN(i) == binind); |
2351 | } |
2352 | } |
2353 | #endif |
2354 | |
2355 | static bool |
2356 | small_size2bin_init(void) |
2357 | { |
2358 | |
2359 | if (opt_lg_qspace_max != LG_QSPACE_MAX_DEFAULT |
2360 | || opt_lg_cspace_max != LG_CSPACE_MAX_DEFAULT |
2361 | || (sizeof(const_small_size2bin) != ((small_maxclass-1) >> |
2362 | LG_TINY_MIN) + 1)) |
2363 | return (small_size2bin_init_hard()); |
2364 | |
2365 | small_size2bin = const_small_size2bin; |
2366 | #ifdef JEMALLOC_DEBUG |
2367 | small_size2bin_validate(); |
2368 | #endif |
2369 | return (false); |
2370 | } |
2371 | |
2372 | static bool |
2373 | small_size2bin_init_hard(void) |
2374 | { |
2375 | size_t i, size, binind; |
2376 | uint8_t *custom_small_size2bin; |
2377 | #define CUSTOM_SMALL_SIZE2BIN(s) \ |
2378 | custom_small_size2bin[(s-1) >> LG_TINY_MIN] |
2379 | |
2380 | assert(opt_lg_qspace_max != LG_QSPACE_MAX_DEFAULT |
2381 | || opt_lg_cspace_max != LG_CSPACE_MAX_DEFAULT |
2382 | || (sizeof(const_small_size2bin) != ((small_maxclass-1) >> |
2383 | LG_TINY_MIN) + 1)); |
2384 | |
2385 | custom_small_size2bin = (uint8_t *) |
2386 | base_alloc(small_maxclass >> LG_TINY_MIN); |
2387 | if (custom_small_size2bin == NULL) |
2388 | return (true); |
2389 | |
2390 | i = 1; |
2391 | #ifdef JEMALLOC_TINY |
2392 | /* Tiny. */ |
2393 | for (; i < (1U << LG_TINY_MIN); i += TINY_MIN) { |
2394 | size = pow2_ceil(1U << LG_TINY_MIN); |
2395 | binind = ffs((int)(size >> (LG_TINY_MIN + 1))); |
2396 | CUSTOM_SMALL_SIZE2BIN(i) = binind; |
2397 | } |
2398 | for (; i < qspace_min; i += TINY_MIN) { |
2399 | size = pow2_ceil(i); |
2400 | binind = ffs((int)(size >> (LG_TINY_MIN + 1))); |
2401 | CUSTOM_SMALL_SIZE2BIN(i) = binind; |
2402 | } |
2403 | #endif |
2404 | /* Quantum-spaced. */ |
2405 | for (; i <= qspace_max; i += TINY_MIN) { |
2406 | size = QUANTUM_CEILING(i); |
2407 | binind = ntbins + (size >> LG_QUANTUM) - 1; |
2408 | CUSTOM_SMALL_SIZE2BIN(i) = binind; |
2409 | } |
2410 | /* Cacheline-spaced. */ |
2411 | for (; i <= cspace_max; i += TINY_MIN) { |
2412 | size = CACHELINE_CEILING(i); |
2413 | binind = ntbins + nqbins + ((size - cspace_min) >> |
2414 | LG_CACHELINE); |
2415 | CUSTOM_SMALL_SIZE2BIN(i) = binind; |
2416 | } |
2417 | /* Sub-page. */ |
2418 | for (; i <= sspace_max; i += TINY_MIN) { |
2419 | size = SUBPAGE_CEILING(i); |
2420 | binind = ntbins + nqbins + ncbins + ((size - sspace_min) >> |
2421 | LG_SUBPAGE); |
2422 | CUSTOM_SMALL_SIZE2BIN(i) = binind; |
2423 | } |
2424 | |
2425 | small_size2bin = custom_small_size2bin; |
2426 | #ifdef JEMALLOC_DEBUG |
2427 | small_size2bin_validate(); |
2428 | #endif |
2429 | return (false); |
2430 | #undef CUSTOM_SMALL_SIZE2BIN |
2431 | } |
2432 | |
2433 | /* |
2434 | * Calculate bin_info->run_size such that it meets the following constraints: |
2435 | * |
2436 | * *) bin_info->run_size >= min_run_size |
2437 | * *) bin_info->run_size <= arena_maxclass |
2438 | * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed). |
2439 | * *) bin_info->nregs <= RUN_MAXREGS |
2440 | * |
2441 | * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also |
2442 | * calculated here, since these settings are all interdependent. |
2443 | */ |
2444 | static size_t |
2445 | bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size) |
2446 | { |
2447 | size_t try_run_size, good_run_size; |
2448 | uint32_t try_nregs, good_nregs; |
2449 | uint32_t try_hdr_size, good_hdr_size; |
2450 | uint32_t try_bitmap_offset, good_bitmap_offset; |
2451 | #ifdef JEMALLOC_PROF |
2452 | uint32_t try_ctx0_offset, good_ctx0_offset; |
2453 | #endif |
2454 | uint32_t try_reg0_offset, good_reg0_offset; |
2455 | |
2456 | assert(min_run_size >= PAGE_SIZE); |
2457 | assert(min_run_size <= arena_maxclass); |
2458 | |
2459 | /* |
2460 | * Calculate known-valid settings before entering the run_size |
2461 | * expansion loop, so that the first part of the loop always copies |
2462 | * valid settings. |
2463 | * |
2464 | * The do..while loop iteratively reduces the number of regions until |
2465 | * the run header and the regions no longer overlap. A closed formula |
2466 | * would be quite messy, since there is an interdependency between the |
2467 | * header's mask length and the number of regions. |
2468 | */ |
2469 | try_run_size = min_run_size; |
2470 | try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin_info->reg_size) |
2471 | + 1; /* Counter-act try_nregs-- in loop. */ |
2472 | if (try_nregs > RUN_MAXREGS) { |
2473 | try_nregs = RUN_MAXREGS |
2474 | + 1; /* Counter-act try_nregs-- in loop. */ |
2475 | } |
2476 | do { |
2477 | try_nregs--; |
2478 | try_hdr_size = sizeof(arena_run_t); |
2479 | /* Pad to a long boundary. */ |
2480 | try_hdr_size = LONG_CEILING(try_hdr_size); |
2481 | try_bitmap_offset = try_hdr_size; |
2482 | /* Add space for bitmap. */ |
2483 | try_hdr_size += bitmap_size(try_nregs); |
2484 | #ifdef JEMALLOC_PROF |
2485 | if (opt_prof && prof_promote == false) { |
2486 | /* Pad to a quantum boundary. */ |
2487 | try_hdr_size = QUANTUM_CEILING(try_hdr_size); |
2488 | try_ctx0_offset = try_hdr_size; |
2489 | /* Add space for one (prof_ctx_t *) per region. */ |
2490 | try_hdr_size += try_nregs * sizeof(prof_ctx_t *); |
2491 | } else |
2492 | try_ctx0_offset = 0; |
2493 | #endif |
2494 | try_reg0_offset = try_run_size - (try_nregs * |
2495 | bin_info->reg_size); |
2496 | } while (try_hdr_size > try_reg0_offset); |
2497 | |
2498 | /* run_size expansion loop. */ |
2499 | do { |
2500 | /* |
2501 | * Copy valid settings before trying more aggressive settings. |
2502 | */ |
2503 | good_run_size = try_run_size; |
2504 | good_nregs = try_nregs; |
2505 | good_hdr_size = try_hdr_size; |
2506 | good_bitmap_offset = try_bitmap_offset; |
2507 | #ifdef JEMALLOC_PROF |
2508 | good_ctx0_offset = try_ctx0_offset; |
2509 | #endif |
2510 | good_reg0_offset = try_reg0_offset; |
2511 | |
2512 | /* Try more aggressive settings. */ |
2513 | try_run_size += PAGE_SIZE; |
2514 | try_nregs = ((try_run_size - sizeof(arena_run_t)) / |
2515 | bin_info->reg_size) |
2516 | + 1; /* Counter-act try_nregs-- in loop. */ |
2517 | if (try_nregs > RUN_MAXREGS) { |
2518 | try_nregs = RUN_MAXREGS |
2519 | + 1; /* Counter-act try_nregs-- in loop. */ |
2520 | } |
2521 | do { |
2522 | try_nregs--; |
2523 | try_hdr_size = sizeof(arena_run_t); |
2524 | /* Pad to a long boundary. */ |
2525 | try_hdr_size = LONG_CEILING(try_hdr_size); |
2526 | try_bitmap_offset = try_hdr_size; |
2527 | /* Add space for bitmap. */ |
2528 | try_hdr_size += bitmap_size(try_nregs); |
2529 | #ifdef JEMALLOC_PROF |
2530 | if (opt_prof && prof_promote == false) { |
2531 | /* Pad to a quantum boundary. */ |
2532 | try_hdr_size = QUANTUM_CEILING(try_hdr_size); |
2533 | try_ctx0_offset = try_hdr_size; |
2534 | /* |
2535 | * Add space for one (prof_ctx_t *) per region. |
2536 | */ |
2537 | try_hdr_size += try_nregs * |
2538 | sizeof(prof_ctx_t *); |
2539 | } |
2540 | #endif |
2541 | try_reg0_offset = try_run_size - (try_nregs * |
2542 | bin_info->reg_size); |
2543 | } while (try_hdr_size > try_reg0_offset); |
2544 | } while (try_run_size <= arena_maxclass |
2545 | && try_run_size <= arena_maxclass |
2546 | && RUN_MAX_OVRHD * (bin_info->reg_size << 3) > RUN_MAX_OVRHD_RELAX |
2547 | && (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size |
2548 | && try_nregs < RUN_MAXREGS); |
2549 | |
2550 | assert(good_hdr_size <= good_reg0_offset); |
2551 | |
2552 | /* Copy final settings. */ |
2553 | bin_info->run_size = good_run_size; |
2554 | bin_info->nregs = good_nregs; |
2555 | bin_info->bitmap_offset = good_bitmap_offset; |
2556 | #ifdef JEMALLOC_PROF |
2557 | bin_info->ctx0_offset = good_ctx0_offset; |
2558 | #endif |
2559 | bin_info->reg0_offset = good_reg0_offset; |
2560 | |
2561 | return (good_run_size); |
2562 | } |
2563 | |
2564 | static bool |
2565 | bin_info_init(void) |
2566 | { |
2567 | arena_bin_info_t *bin_info; |
2568 | unsigned i; |
2569 | size_t prev_run_size; |
2570 | |
2571 | arena_bin_info = base_alloc(sizeof(arena_bin_info_t) * nbins); |
2572 | if (arena_bin_info == NULL) |
2573 | return (true); |
2574 | |
2575 | prev_run_size = PAGE_SIZE; |
2576 | i = 0; |
2577 | #ifdef JEMALLOC_TINY |
2578 | /* (2^n)-spaced tiny bins. */ |
2579 | for (; i < ntbins; i++) { |
2580 | bin_info = &arena_bin_info[i]; |
2581 | bin_info->reg_size = (1U << (LG_TINY_MIN + i)); |
2582 | prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size); |
2583 | bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); |
2584 | } |
2585 | #endif |
2586 | |
2587 | /* Quantum-spaced bins. */ |
2588 | for (; i < ntbins + nqbins; i++) { |
2589 | bin_info = &arena_bin_info[i]; |
2590 | bin_info->reg_size = (i - ntbins + 1) << LG_QUANTUM; |
2591 | prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size); |
2592 | bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); |
2593 | } |
2594 | |
2595 | /* Cacheline-spaced bins. */ |
2596 | for (; i < ntbins + nqbins + ncbins; i++) { |
2597 | bin_info = &arena_bin_info[i]; |
2598 | bin_info->reg_size = cspace_min + ((i - (ntbins + nqbins)) << |
2599 | LG_CACHELINE); |
2600 | prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size); |
2601 | bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); |
2602 | } |
2603 | |
2604 | /* Subpage-spaced bins. */ |
2605 | for (; i < nbins; i++) { |
2606 | bin_info = &arena_bin_info[i]; |
2607 | bin_info->reg_size = sspace_min + ((i - (ntbins + nqbins + |
2608 | ncbins)) << LG_SUBPAGE); |
2609 | prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size); |
2610 | bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); |
2611 | } |
2612 | |
2613 | return (false); |
2614 | } |
2615 | |
2616 | bool |
2617 | arena_boot(void) |
2618 | { |
2619 | size_t header_size; |
2620 | unsigned i; |
2621 | |
2622 | /* Set variables according to the value of opt_lg_[qc]space_max. */ |
2623 | qspace_max = (1U << opt_lg_qspace_max); |
2624 | cspace_min = CACHELINE_CEILING(qspace_max); |
2625 | if (cspace_min == qspace_max) |
2626 | cspace_min += CACHELINE; |
2627 | cspace_max = (1U << opt_lg_cspace_max); |
2628 | sspace_min = SUBPAGE_CEILING(cspace_max); |
2629 | if (sspace_min == cspace_max) |
2630 | sspace_min += SUBPAGE; |
2631 | assert(sspace_min < PAGE_SIZE); |
2632 | sspace_max = PAGE_SIZE - SUBPAGE; |
2633 | |
2634 | #ifdef JEMALLOC_TINY |
2635 | assert(LG_QUANTUM >= LG_TINY_MIN); |
2636 | #endif |
2637 | assert(ntbins <= LG_QUANTUM); |
2638 | nqbins = qspace_max >> LG_QUANTUM; |
2639 | ncbins = ((cspace_max - cspace_min) >> LG_CACHELINE) + 1; |
2640 | nsbins = ((sspace_max - sspace_min) >> LG_SUBPAGE) + 1; |
2641 | nbins = ntbins + nqbins + ncbins + nsbins; |
2642 | |
2643 | /* |
2644 | * The small_size2bin lookup table uses uint8_t to encode each bin |
2645 | * index, so we cannot support more than 256 small size classes. This |
2646 | * limit is difficult to exceed (not even possible with 16B quantum and |
2647 | * 4KiB pages), and such configurations are impractical, but |
2648 | * nonetheless we need to protect against this case in order to avoid |
2649 | * undefined behavior. |
2650 | * |
2651 | * Further constrain nbins to 255 if prof_promote is true, since all |
2652 | * small size classes, plus a "not small" size class must be stored in |
2653 | * 8 bits of arena_chunk_map_t's bits field. |
2654 | */ |
2655 | #ifdef JEMALLOC_PROF |
2656 | if (opt_prof && prof_promote) { |
2657 | if (nbins > 255) { |
2658 | char line_buf[UMAX2S_BUFSIZE]; |
2659 | malloc_write("<jemalloc>: Too many small size classes ("); |
2660 | malloc_write(u2s(nbins, 10, line_buf)); |
2661 | malloc_write(" > max 255)\n"); |
2662 | abort(); |
2663 | } |
2664 | } else |
2665 | #endif |
2666 | if (nbins > 256) { |
2667 | char line_buf[UMAX2S_BUFSIZE]; |
2668 | malloc_write("<jemalloc>: Too many small size classes ("); |
2669 | malloc_write(u2s(nbins, 10, line_buf)); |
2670 | malloc_write(" > max 256)\n"); |
2671 | abort(); |
2672 | } |
2673 | |
2674 | /* |
2675 | * Compute the header size such that it is large enough to contain the |
2676 | * page map. The page map is biased to omit entries for the header |
2677 | * itself, so some iteration is necessary to compute the map bias. |
2678 | * |
2679 | * 1) Compute safe header_size and map_bias values that include enough |
2680 | * space for an unbiased page map. |
2681 | * 2) Refine map_bias based on (1) to omit the header pages in the page |
2682 | * map. The resulting map_bias may be one too small. |
2683 | * 3) Refine map_bias based on (2). The result will be >= the result |
2684 | * from (2), and will always be correct. |
2685 | */ |
2686 | map_bias = 0; |
2687 | for (i = 0; i < 3; i++) { |
2688 | header_size = offsetof(arena_chunk_t, map) |
2689 | + (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias)); |
2690 | map_bias = (header_size >> PAGE_SHIFT) + ((header_size & |
2691 | PAGE_MASK) != 0); |
2692 | } |
2693 | assert(map_bias > 0); |
2694 | |
2695 | arena_maxclass = chunksize - (map_bias << PAGE_SHIFT); |
2696 | |
2697 | if (small_size2bin_init()) |
2698 | return (true); |
2699 | |
2700 | if (bin_info_init()) |
2701 | return (true); |
2702 | |
2703 | return (false); |
2704 | } |