]>
Commit | Line | Data |
---|---|---|
a78e148b | 1 | #define JEMALLOC_TCACHE_C_ |
2 | #include "jemalloc/internal/jemalloc_internal.h" | |
4934f93d | 3 | |
a78e148b | 4 | /******************************************************************************/ |
5 | /* Data. */ | |
6 | ||
4934f93d | 7 | malloc_tsd_data(, tcache, tcache_t *, NULL) |
8 | malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default) | |
9 | ||
a78e148b | 10 | bool opt_tcache = true; |
11 | ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; | |
a78e148b | 12 | |
13 | tcache_bin_info_t *tcache_bin_info; | |
14 | static unsigned stack_nelms; /* Total stack elms per tcache. */ | |
15 | ||
4934f93d | 16 | size_t nhbins; |
17 | size_t tcache_maxclass; | |
a78e148b | 18 | |
4934f93d | 19 | /******************************************************************************/ |
a78e148b | 20 | |
4934f93d | 21 | size_t tcache_salloc(const void *ptr) |
22 | { | |
a78e148b | 23 | |
4934f93d | 24 | return (arena_salloc(ptr, false)); |
25 | } | |
a78e148b | 26 | |
4934f93d | 27 | void |
28 | tcache_event_hard(tcache_t *tcache) | |
29 | { | |
30 | size_t binind = tcache->next_gc_bin; | |
31 | tcache_bin_t *tbin = &tcache->tbins[binind]; | |
32 | tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; | |
a78e148b | 33 | |
4934f93d | 34 | if (tbin->low_water > 0) { |
35 | /* | |
36 | * Flush (ceiling) 3/4 of the objects below the low water mark. | |
37 | */ | |
38 | if (binind < NBINS) { | |
39 | tcache_bin_flush_small(tbin, binind, tbin->ncached - | |
40 | tbin->low_water + (tbin->low_water >> 2), tcache); | |
41 | } else { | |
42 | tcache_bin_flush_large(tbin, binind, tbin->ncached - | |
43 | tbin->low_water + (tbin->low_water >> 2), tcache); | |
44 | } | |
45 | /* | |
46 | * Reduce fill count by 2X. Limit lg_fill_div such that the | |
47 | * fill count is always at least 1. | |
48 | */ | |
49 | if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1) | |
50 | tbin->lg_fill_div++; | |
51 | } else if (tbin->low_water < 0) { | |
52 | /* | |
53 | * Increase fill count by 2X. Make sure lg_fill_div stays | |
54 | * greater than 0. | |
55 | */ | |
56 | if (tbin->lg_fill_div > 1) | |
57 | tbin->lg_fill_div--; | |
58 | } | |
59 | tbin->low_water = tbin->ncached; | |
60 | ||
61 | tcache->next_gc_bin++; | |
62 | if (tcache->next_gc_bin == nhbins) | |
63 | tcache->next_gc_bin = 0; | |
64 | tcache->ev_cnt = 0; | |
65 | } | |
a78e148b | 66 | |
67 | void * | |
68 | tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind) | |
69 | { | |
70 | void *ret; | |
71 | ||
4934f93d | 72 | arena_tcache_fill_small(tcache->arena, tbin, binind, |
73 | config_prof ? tcache->prof_accumbytes : 0); | |
74 | if (config_prof) | |
75 | tcache->prof_accumbytes = 0; | |
a78e148b | 76 | ret = tcache_alloc_easy(tbin); |
77 | ||
78 | return (ret); | |
79 | } | |
80 | ||
81 | void | |
4934f93d | 82 | tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, |
83 | tcache_t *tcache) | |
a78e148b | 84 | { |
85 | void *ptr; | |
86 | unsigned i, nflush, ndeferred; | |
a78e148b | 87 | bool merged_stats = false; |
a78e148b | 88 | |
4934f93d | 89 | assert(binind < NBINS); |
a78e148b | 90 | assert(rem <= tbin->ncached); |
91 | ||
92 | for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { | |
93 | /* Lock the arena bin associated with the first object. */ | |
94 | arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( | |
95 | tbin->avail[0]); | |
96 | arena_t *arena = chunk->arena; | |
97 | arena_bin_t *bin = &arena->bins[binind]; | |
98 | ||
4934f93d | 99 | if (config_prof && arena == tcache->arena) { |
a78e148b | 100 | malloc_mutex_lock(&arena->lock); |
101 | arena_prof_accum(arena, tcache->prof_accumbytes); | |
102 | malloc_mutex_unlock(&arena->lock); | |
103 | tcache->prof_accumbytes = 0; | |
104 | } | |
a78e148b | 105 | |
106 | malloc_mutex_lock(&bin->lock); | |
4934f93d | 107 | if (config_stats && arena == tcache->arena) { |
a78e148b | 108 | assert(merged_stats == false); |
109 | merged_stats = true; | |
110 | bin->stats.nflushes++; | |
111 | bin->stats.nrequests += tbin->tstats.nrequests; | |
112 | tbin->tstats.nrequests = 0; | |
113 | } | |
a78e148b | 114 | ndeferred = 0; |
115 | for (i = 0; i < nflush; i++) { | |
116 | ptr = tbin->avail[i]; | |
117 | assert(ptr != NULL); | |
118 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); | |
119 | if (chunk->arena == arena) { | |
120 | size_t pageind = ((uintptr_t)ptr - | |
4934f93d | 121 | (uintptr_t)chunk) >> LG_PAGE; |
a78e148b | 122 | arena_chunk_map_t *mapelm = |
4934f93d | 123 | arena_mapp_get(chunk, pageind); |
124 | if (config_fill && opt_junk) { | |
125 | arena_alloc_junk_small(ptr, | |
126 | &arena_bin_info[binind], true); | |
127 | } | |
128 | arena_dalloc_bin_locked(arena, chunk, ptr, | |
129 | mapelm); | |
a78e148b | 130 | } else { |
131 | /* | |
132 | * This object was allocated via a different | |
133 | * arena bin than the one that is currently | |
134 | * locked. Stash the object, so that it can be | |
135 | * handled in a future pass. | |
136 | */ | |
137 | tbin->avail[ndeferred] = ptr; | |
138 | ndeferred++; | |
139 | } | |
140 | } | |
141 | malloc_mutex_unlock(&bin->lock); | |
142 | } | |
4934f93d | 143 | if (config_stats && merged_stats == false) { |
a78e148b | 144 | /* |
145 | * The flush loop didn't happen to flush to this thread's | |
146 | * arena, so the stats didn't get merged. Manually do so now. | |
147 | */ | |
148 | arena_bin_t *bin = &tcache->arena->bins[binind]; | |
149 | malloc_mutex_lock(&bin->lock); | |
150 | bin->stats.nflushes++; | |
151 | bin->stats.nrequests += tbin->tstats.nrequests; | |
152 | tbin->tstats.nrequests = 0; | |
153 | malloc_mutex_unlock(&bin->lock); | |
154 | } | |
a78e148b | 155 | |
156 | memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], | |
157 | rem * sizeof(void *)); | |
158 | tbin->ncached = rem; | |
159 | if ((int)tbin->ncached < tbin->low_water) | |
160 | tbin->low_water = tbin->ncached; | |
161 | } | |
162 | ||
163 | void | |
4934f93d | 164 | tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem, |
165 | tcache_t *tcache) | |
a78e148b | 166 | { |
167 | void *ptr; | |
168 | unsigned i, nflush, ndeferred; | |
a78e148b | 169 | bool merged_stats = false; |
a78e148b | 170 | |
171 | assert(binind < nhbins); | |
172 | assert(rem <= tbin->ncached); | |
173 | ||
174 | for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { | |
175 | /* Lock the arena associated with the first object. */ | |
176 | arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( | |
177 | tbin->avail[0]); | |
178 | arena_t *arena = chunk->arena; | |
179 | ||
180 | malloc_mutex_lock(&arena->lock); | |
4934f93d | 181 | if ((config_prof || config_stats) && arena == tcache->arena) { |
182 | if (config_prof) { | |
183 | arena_prof_accum(arena, | |
184 | tcache->prof_accumbytes); | |
185 | tcache->prof_accumbytes = 0; | |
186 | } | |
187 | if (config_stats) { | |
188 | merged_stats = true; | |
189 | arena->stats.nrequests_large += | |
190 | tbin->tstats.nrequests; | |
191 | arena->stats.lstats[binind - NBINS].nrequests += | |
192 | tbin->tstats.nrequests; | |
193 | tbin->tstats.nrequests = 0; | |
194 | } | |
a78e148b | 195 | } |
a78e148b | 196 | ndeferred = 0; |
197 | for (i = 0; i < nflush; i++) { | |
198 | ptr = tbin->avail[i]; | |
199 | assert(ptr != NULL); | |
200 | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); | |
201 | if (chunk->arena == arena) | |
4934f93d | 202 | arena_dalloc_large_locked(arena, chunk, ptr); |
a78e148b | 203 | else { |
204 | /* | |
205 | * This object was allocated via a different | |
206 | * arena than the one that is currently locked. | |
207 | * Stash the object, so that it can be handled | |
208 | * in a future pass. | |
209 | */ | |
210 | tbin->avail[ndeferred] = ptr; | |
211 | ndeferred++; | |
212 | } | |
213 | } | |
214 | malloc_mutex_unlock(&arena->lock); | |
215 | } | |
4934f93d | 216 | if (config_stats && merged_stats == false) { |
a78e148b | 217 | /* |
218 | * The flush loop didn't happen to flush to this thread's | |
219 | * arena, so the stats didn't get merged. Manually do so now. | |
220 | */ | |
221 | arena_t *arena = tcache->arena; | |
222 | malloc_mutex_lock(&arena->lock); | |
223 | arena->stats.nrequests_large += tbin->tstats.nrequests; | |
4934f93d | 224 | arena->stats.lstats[binind - NBINS].nrequests += |
a78e148b | 225 | tbin->tstats.nrequests; |
226 | tbin->tstats.nrequests = 0; | |
227 | malloc_mutex_unlock(&arena->lock); | |
228 | } | |
a78e148b | 229 | |
230 | memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], | |
231 | rem * sizeof(void *)); | |
232 | tbin->ncached = rem; | |
233 | if ((int)tbin->ncached < tbin->low_water) | |
234 | tbin->low_water = tbin->ncached; | |
235 | } | |
236 | ||
4934f93d | 237 | void |
238 | tcache_arena_associate(tcache_t *tcache, arena_t *arena) | |
239 | { | |
240 | ||
241 | if (config_stats) { | |
242 | /* Link into list of extant tcaches. */ | |
243 | malloc_mutex_lock(&arena->lock); | |
244 | ql_elm_new(tcache, link); | |
245 | ql_tail_insert(&arena->tcache_ql, tcache, link); | |
246 | malloc_mutex_unlock(&arena->lock); | |
247 | } | |
248 | tcache->arena = arena; | |
249 | } | |
250 | ||
251 | void | |
252 | tcache_arena_dissociate(tcache_t *tcache) | |
253 | { | |
254 | ||
255 | if (config_stats) { | |
256 | /* Unlink from list of extant tcaches. */ | |
257 | malloc_mutex_lock(&tcache->arena->lock); | |
258 | ql_remove(&tcache->arena->tcache_ql, tcache, link); | |
259 | malloc_mutex_unlock(&tcache->arena->lock); | |
260 | tcache_stats_merge(tcache, tcache->arena); | |
261 | } | |
262 | } | |
263 | ||
a78e148b | 264 | tcache_t * |
265 | tcache_create(arena_t *arena) | |
266 | { | |
267 | tcache_t *tcache; | |
268 | size_t size, stack_offset; | |
269 | unsigned i; | |
270 | ||
271 | size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins); | |
272 | /* Naturally align the pointer stacks. */ | |
273 | size = PTR_CEILING(size); | |
274 | stack_offset = size; | |
275 | size += stack_nelms * sizeof(void *); | |
276 | /* | |
277 | * Round up to the nearest multiple of the cacheline size, in order to | |
278 | * avoid the possibility of false cacheline sharing. | |
279 | * | |
280 | * That this works relies on the same logic as in ipalloc(), but we | |
281 | * cannot directly call ipalloc() here due to tcache bootstrapping | |
282 | * issues. | |
283 | */ | |
284 | size = (size + CACHELINE_MASK) & (-CACHELINE); | |
285 | ||
4934f93d | 286 | if (size <= SMALL_MAXCLASS) |
a78e148b | 287 | tcache = (tcache_t *)arena_malloc_small(arena, size, true); |
288 | else if (size <= tcache_maxclass) | |
289 | tcache = (tcache_t *)arena_malloc_large(arena, size, true); | |
290 | else | |
291 | tcache = (tcache_t *)icalloc(size); | |
292 | ||
293 | if (tcache == NULL) | |
294 | return (NULL); | |
295 | ||
4934f93d | 296 | tcache_arena_associate(tcache, arena); |
a78e148b | 297 | |
a78e148b | 298 | assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); |
299 | for (i = 0; i < nhbins; i++) { | |
300 | tcache->tbins[i].lg_fill_div = 1; | |
301 | tcache->tbins[i].avail = (void **)((uintptr_t)tcache + | |
302 | (uintptr_t)stack_offset); | |
303 | stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); | |
304 | } | |
305 | ||
4934f93d | 306 | tcache_tsd_set(&tcache); |
a78e148b | 307 | |
308 | return (tcache); | |
309 | } | |
310 | ||
311 | void | |
312 | tcache_destroy(tcache_t *tcache) | |
313 | { | |
314 | unsigned i; | |
315 | size_t tcache_size; | |
316 | ||
4934f93d | 317 | tcache_arena_dissociate(tcache); |
a78e148b | 318 | |
4934f93d | 319 | for (i = 0; i < NBINS; i++) { |
a78e148b | 320 | tcache_bin_t *tbin = &tcache->tbins[i]; |
4934f93d | 321 | tcache_bin_flush_small(tbin, i, 0, tcache); |
322 | ||
323 | if (config_stats && tbin->tstats.nrequests != 0) { | |
a78e148b | 324 | arena_t *arena = tcache->arena; |
325 | arena_bin_t *bin = &arena->bins[i]; | |
326 | malloc_mutex_lock(&bin->lock); | |
327 | bin->stats.nrequests += tbin->tstats.nrequests; | |
328 | malloc_mutex_unlock(&bin->lock); | |
329 | } | |
a78e148b | 330 | } |
331 | ||
332 | for (; i < nhbins; i++) { | |
333 | tcache_bin_t *tbin = &tcache->tbins[i]; | |
4934f93d | 334 | tcache_bin_flush_large(tbin, i, 0, tcache); |
335 | ||
336 | if (config_stats && tbin->tstats.nrequests != 0) { | |
a78e148b | 337 | arena_t *arena = tcache->arena; |
338 | malloc_mutex_lock(&arena->lock); | |
339 | arena->stats.nrequests_large += tbin->tstats.nrequests; | |
4934f93d | 340 | arena->stats.lstats[i - NBINS].nrequests += |
a78e148b | 341 | tbin->tstats.nrequests; |
342 | malloc_mutex_unlock(&arena->lock); | |
343 | } | |
a78e148b | 344 | } |
345 | ||
4934f93d | 346 | if (config_prof && tcache->prof_accumbytes > 0) { |
a78e148b | 347 | malloc_mutex_lock(&tcache->arena->lock); |
348 | arena_prof_accum(tcache->arena, tcache->prof_accumbytes); | |
349 | malloc_mutex_unlock(&tcache->arena->lock); | |
350 | } | |
a78e148b | 351 | |
4934f93d | 352 | tcache_size = arena_salloc(tcache, false); |
353 | if (tcache_size <= SMALL_MAXCLASS) { | |
a78e148b | 354 | arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache); |
355 | arena_t *arena = chunk->arena; | |
356 | size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >> | |
4934f93d | 357 | LG_PAGE; |
358 | arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); | |
a78e148b | 359 | |
4934f93d | 360 | arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm); |
a78e148b | 361 | } else if (tcache_size <= tcache_maxclass) { |
362 | arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache); | |
363 | arena_t *arena = chunk->arena; | |
364 | ||
a78e148b | 365 | arena_dalloc_large(arena, chunk, tcache); |
a78e148b | 366 | } else |
367 | idalloc(tcache); | |
368 | } | |
369 | ||
4934f93d | 370 | void |
a78e148b | 371 | tcache_thread_cleanup(void *arg) |
372 | { | |
4934f93d | 373 | tcache_t *tcache = *(tcache_t **)arg; |
a78e148b | 374 | |
4934f93d | 375 | if (tcache == TCACHE_STATE_DISABLED) { |
376 | /* Do nothing. */ | |
377 | } else if (tcache == TCACHE_STATE_REINCARNATED) { | |
a78e148b | 378 | /* |
4934f93d | 379 | * Another destructor called an allocator function after this |
380 | * destructor was called. Reset tcache to | |
381 | * TCACHE_STATE_PURGATORY in order to receive another callback. | |
a78e148b | 382 | */ |
4934f93d | 383 | tcache = TCACHE_STATE_PURGATORY; |
384 | tcache_tsd_set(&tcache); | |
385 | } else if (tcache == TCACHE_STATE_PURGATORY) { | |
a78e148b | 386 | /* |
4934f93d | 387 | * The previous time this destructor was called, we set the key |
388 | * to TCACHE_STATE_PURGATORY so that other destructors wouldn't | |
389 | * cause re-creation of the tcache. This time, do nothing, so | |
390 | * that the destructor will not be called again. | |
a78e148b | 391 | */ |
a78e148b | 392 | } else if (tcache != NULL) { |
4934f93d | 393 | assert(tcache != TCACHE_STATE_PURGATORY); |
a78e148b | 394 | tcache_destroy(tcache); |
4934f93d | 395 | tcache = TCACHE_STATE_PURGATORY; |
396 | tcache_tsd_set(&tcache); | |
a78e148b | 397 | } |
398 | } | |
399 | ||
a78e148b | 400 | void |
401 | tcache_stats_merge(tcache_t *tcache, arena_t *arena) | |
402 | { | |
403 | unsigned i; | |
404 | ||
405 | /* Merge and reset tcache stats. */ | |
4934f93d | 406 | for (i = 0; i < NBINS; i++) { |
a78e148b | 407 | arena_bin_t *bin = &arena->bins[i]; |
408 | tcache_bin_t *tbin = &tcache->tbins[i]; | |
409 | malloc_mutex_lock(&bin->lock); | |
410 | bin->stats.nrequests += tbin->tstats.nrequests; | |
411 | malloc_mutex_unlock(&bin->lock); | |
412 | tbin->tstats.nrequests = 0; | |
413 | } | |
414 | ||
415 | for (; i < nhbins; i++) { | |
4934f93d | 416 | malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS]; |
a78e148b | 417 | tcache_bin_t *tbin = &tcache->tbins[i]; |
418 | arena->stats.nrequests_large += tbin->tstats.nrequests; | |
419 | lstats->nrequests += tbin->tstats.nrequests; | |
420 | tbin->tstats.nrequests = 0; | |
421 | } | |
422 | } | |
a78e148b | 423 | |
424 | bool | |
4934f93d | 425 | tcache_boot0(void) |
a78e148b | 426 | { |
4934f93d | 427 | unsigned i; |
a78e148b | 428 | |
4934f93d | 429 | /* |
430 | * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is | |
431 | * known. | |
432 | */ | |
433 | if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS) | |
434 | tcache_maxclass = SMALL_MAXCLASS; | |
435 | else if ((1U << opt_lg_tcache_max) > arena_maxclass) | |
436 | tcache_maxclass = arena_maxclass; | |
437 | else | |
438 | tcache_maxclass = (1U << opt_lg_tcache_max); | |
439 | ||
440 | nhbins = NBINS + (tcache_maxclass >> LG_PAGE); | |
441 | ||
442 | /* Initialize tcache_bin_info. */ | |
443 | tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins * | |
444 | sizeof(tcache_bin_info_t)); | |
445 | if (tcache_bin_info == NULL) | |
446 | return (true); | |
447 | stack_nelms = 0; | |
448 | for (i = 0; i < NBINS; i++) { | |
449 | if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) { | |
450 | tcache_bin_info[i].ncached_max = | |
451 | (arena_bin_info[i].nregs << 1); | |
452 | } else { | |
453 | tcache_bin_info[i].ncached_max = | |
454 | TCACHE_NSLOTS_SMALL_MAX; | |
a78e148b | 455 | } |
4934f93d | 456 | stack_nelms += tcache_bin_info[i].ncached_max; |
457 | } | |
458 | for (; i < nhbins; i++) { | |
459 | tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; | |
460 | stack_nelms += tcache_bin_info[i].ncached_max; | |
a78e148b | 461 | } |
462 | ||
463 | return (false); | |
464 | } | |
4934f93d | 465 | |
466 | bool | |
467 | tcache_boot1(void) | |
468 | { | |
469 | ||
470 | if (tcache_tsd_boot() || tcache_enabled_tsd_boot()) | |
471 | return (true); | |
472 | ||
473 | return (false); | |
474 | } |