2 /******************************************************************************/
3 #ifdef JEMALLOC_H_TYPES
5 typedef struct tcache_bin_info_s tcache_bin_info_t
;
6 typedef struct tcache_bin_s tcache_bin_t
;
7 typedef struct tcache_s tcache_t
;
10 * Absolute maximum number of cache slots for each small bin in the thread
11 * cache. This is an additional constraint beyond that imposed as: twice the
12 * number of regions per run for this size class.
14 * This constant must be an even number.
16 #define TCACHE_NSLOTS_SMALL_MAX 200
18 /* Number of cache slots for large size classes. */
19 #define TCACHE_NSLOTS_LARGE 20
21 /* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
22 #define LG_TCACHE_MAXCLASS_DEFAULT 15
25 * (1U << opt_lg_tcache_gc_sweep) is the approximate number of allocation
26 * events between full GC sweeps (-1: disabled). Integer rounding may cause
27 * the actual number to be slightly higher, since GC is performed
30 #define LG_TCACHE_GC_SWEEP_DEFAULT 13
32 #endif /* JEMALLOC_H_TYPES */
33 /******************************************************************************/
34 #ifdef JEMALLOC_H_STRUCTS
37 * Read-only information associated with each element of tcache_t's tbins array
38 * is stored separately, mainly to reduce memory usage.
40 struct tcache_bin_info_s
{
41 unsigned ncached_max
; /* Upper limit on ncached. */
45 # ifdef JEMALLOC_STATS
46 tcache_bin_stats_t tstats
;
48 int low_water
; /* Min # cached since last GC. */
49 unsigned lg_fill_div
; /* Fill (ncached_max >> lg_fill_div). */
50 unsigned ncached
; /* # of cached objects. */
51 void **avail
; /* Stack of available objects. */
55 # ifdef JEMALLOC_STATS
56 ql_elm(tcache_t
) link
; /* Used for aggregating stats. */
59 uint64_t prof_accumbytes
;/* Cleared after arena_prof_accum() */
61 arena_t
*arena
; /* This thread's arena. */
62 unsigned ev_cnt
; /* Event count since incremental GC. */
63 unsigned next_gc_bin
; /* Next bin to GC. */
64 tcache_bin_t tbins
[1]; /* Dynamically sized. */
66 * The pointer stacks associated with tbins follow as a contiguous
67 * array. During tcache initialization, the avail pointer in each
68 * element of tbins is initialized to point to the proper offset within
73 #endif /* JEMALLOC_H_STRUCTS */
74 /******************************************************************************/
75 #ifdef JEMALLOC_H_EXTERNS
77 extern bool opt_tcache
;
78 extern ssize_t opt_lg_tcache_max
;
79 extern ssize_t opt_lg_tcache_gc_sweep
;
81 extern tcache_bin_info_t
*tcache_bin_info
;
83 /* Map of thread-specific caches. */
85 extern __thread tcache_t
*tcache_tls
86 JEMALLOC_ATTR(tls_model("initial-exec"));
87 # define TCACHE_GET() tcache_tls
88 # define TCACHE_SET(v) do { \
89 tcache_tls = (tcache_t *)(v); \
90 pthread_setspecific(tcache_tsd, (void *)(v)); \
93 # define TCACHE_GET() ((tcache_t *)pthread_getspecific(tcache_tsd))
94 # define TCACHE_SET(v) do { \
95 pthread_setspecific(tcache_tsd, (void *)(v)); \
98 extern pthread_key_t tcache_tsd
;
101 * Number of tcache bins. There are nbins small-object bins, plus 0 or more
104 extern size_t nhbins
;
106 /* Maximum cached size class. */
107 extern size_t tcache_maxclass
;
109 /* Number of tcache allocation/deallocation events between incremental GCs. */
110 extern unsigned tcache_gc_incr
;
112 void tcache_bin_flush_small(tcache_bin_t
*tbin
, size_t binind
, unsigned rem
113 #if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
117 void tcache_bin_flush_large(tcache_bin_t
*tbin
, size_t binind
, unsigned rem
118 #if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
122 tcache_t
*tcache_create(arena_t
*arena
);
123 void *tcache_alloc_small_hard(tcache_t
*tcache
, tcache_bin_t
*tbin
,
125 void tcache_destroy(tcache_t
*tcache
);
126 #ifdef JEMALLOC_STATS
127 void tcache_stats_merge(tcache_t
*tcache
, arena_t
*arena
);
129 bool tcache_boot(void);
131 #endif /* JEMALLOC_H_EXTERNS */
132 /******************************************************************************/
133 #ifdef JEMALLOC_H_INLINES
135 #ifndef JEMALLOC_ENABLE_INLINE
136 void tcache_event(tcache_t
*tcache
);
137 tcache_t
*tcache_get(void);
138 void *tcache_alloc_easy(tcache_bin_t
*tbin
);
139 void *tcache_alloc_small(tcache_t
*tcache
, size_t size
, bool zero
);
140 void *tcache_alloc_large(tcache_t
*tcache
, size_t size
, bool zero
);
141 void tcache_dalloc_small(tcache_t
*tcache
, void *ptr
);
142 void tcache_dalloc_large(tcache_t
*tcache
, void *ptr
, size_t size
);
145 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
146 JEMALLOC_INLINE tcache_t
*
151 if ((isthreaded
& opt_tcache
) == false)
154 tcache
= TCACHE_GET();
155 if ((uintptr_t)tcache
<= (uintptr_t)2) {
156 if (tcache
== NULL
) {
157 tcache
= tcache_create(choose_arena());
161 if (tcache
== (void *)(uintptr_t)1) {
163 * Make a note that an allocator function was
164 * called after the tcache_thread_cleanup() was
167 TCACHE_SET((uintptr_t)2);
177 tcache_event(tcache_t
*tcache
)
180 if (tcache_gc_incr
== 0)
184 assert(tcache
->ev_cnt
<= tcache_gc_incr
);
185 if (tcache
->ev_cnt
== tcache_gc_incr
) {
186 size_t binind
= tcache
->next_gc_bin
;
187 tcache_bin_t
*tbin
= &tcache
->tbins
[binind
];
188 tcache_bin_info_t
*tbin_info
= &tcache_bin_info
[binind
];
190 if (tbin
->low_water
> 0) {
192 * Flush (ceiling) 3/4 of the objects below the low
195 if (binind
< nbins
) {
196 tcache_bin_flush_small(tbin
, binind
,
197 tbin
->ncached
- tbin
->low_water
+
198 (tbin
->low_water
>> 2)
199 #if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
204 tcache_bin_flush_large(tbin
, binind
,
205 tbin
->ncached
- tbin
->low_water
+
206 (tbin
->low_water
>> 2)
207 #if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
213 * Reduce fill count by 2X. Limit lg_fill_div such that
214 * the fill count is always at least 1.
216 if ((tbin_info
->ncached_max
>> (tbin
->lg_fill_div
+1))
219 } else if (tbin
->low_water
< 0) {
221 * Increase fill count by 2X. Make sure lg_fill_div
222 * stays greater than 0.
224 if (tbin
->lg_fill_div
> 1)
227 tbin
->low_water
= tbin
->ncached
;
229 tcache
->next_gc_bin
++;
230 if (tcache
->next_gc_bin
== nhbins
)
231 tcache
->next_gc_bin
= 0;
236 JEMALLOC_INLINE
void *
237 tcache_alloc_easy(tcache_bin_t
*tbin
)
241 if (tbin
->ncached
== 0) {
242 tbin
->low_water
= -1;
246 if ((int)tbin
->ncached
< tbin
->low_water
)
247 tbin
->low_water
= tbin
->ncached
;
248 ret
= tbin
->avail
[tbin
->ncached
];
252 JEMALLOC_INLINE
void *
253 tcache_alloc_small(tcache_t
*tcache
, size_t size
, bool zero
)
259 binind
= SMALL_SIZE2BIN(size
);
260 assert(binind
< nbins
);
261 tbin
= &tcache
->tbins
[binind
];
262 ret
= tcache_alloc_easy(tbin
);
264 ret
= tcache_alloc_small_hard(tcache
, tbin
, binind
);
268 assert(arena_salloc(ret
) == arena_bin_info
[binind
].reg_size
);
273 memset(ret
, 0xa5, size
);
275 memset(ret
, 0, size
);
278 memset(ret
, 0, size
);
280 #ifdef JEMALLOC_STATS
281 tbin
->tstats
.nrequests
++;
284 tcache
->prof_accumbytes
+= arena_bin_info
[binind
].reg_size
;
286 tcache_event(tcache
);
290 JEMALLOC_INLINE
void *
291 tcache_alloc_large(tcache_t
*tcache
, size_t size
, bool zero
)
297 size
= PAGE_CEILING(size
);
298 assert(size
<= tcache_maxclass
);
299 binind
= nbins
+ (size
>> PAGE_SHIFT
) - 1;
300 assert(binind
< nhbins
);
301 tbin
= &tcache
->tbins
[binind
];
302 ret
= tcache_alloc_easy(tbin
);
305 * Only allocate one large object at a time, because it's quite
306 * expensive to create one and not use it.
308 ret
= arena_malloc_large(tcache
->arena
, size
, zero
);
313 arena_chunk_t
*chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ret
);
314 size_t pageind
= (((uintptr_t)ret
- (uintptr_t)chunk
) >>
316 chunk
->map
[pageind
-map_bias
].bits
&= ~CHUNK_MAP_CLASS_MASK
;
321 memset(ret
, 0xa5, size
);
323 memset(ret
, 0, size
);
326 memset(ret
, 0, size
);
328 #ifdef JEMALLOC_STATS
329 tbin
->tstats
.nrequests
++;
332 tcache
->prof_accumbytes
+= size
;
336 tcache_event(tcache
);
341 tcache_dalloc_small(tcache_t
*tcache
, void *ptr
)
344 arena_chunk_t
*chunk
;
348 tcache_bin_info_t
*tbin_info
;
349 size_t pageind
, binind
;
350 arena_chunk_map_t
*mapelm
;
352 assert(arena_salloc(ptr
) <= small_maxclass
);
354 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
355 arena
= chunk
->arena
;
356 pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> PAGE_SHIFT
;
357 mapelm
= &chunk
->map
[pageind
-map_bias
];
358 run
= (arena_run_t
*)((uintptr_t)chunk
+ (uintptr_t)((pageind
-
359 (mapelm
->bits
>> PAGE_SHIFT
)) << PAGE_SHIFT
));
360 dassert(run
->magic
== ARENA_RUN_MAGIC
);
362 binind
= ((uintptr_t)bin
- (uintptr_t)&arena
->bins
) /
364 assert(binind
< nbins
);
368 memset(ptr
, 0x5a, arena_bin_info
[binind
].reg_size
);
371 tbin
= &tcache
->tbins
[binind
];
372 tbin_info
= &tcache_bin_info
[binind
];
373 if (tbin
->ncached
== tbin_info
->ncached_max
) {
374 tcache_bin_flush_small(tbin
, binind
, (tbin_info
->ncached_max
>>
376 #if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
381 assert(tbin
->ncached
< tbin_info
->ncached_max
);
382 tbin
->avail
[tbin
->ncached
] = ptr
;
385 tcache_event(tcache
);
389 tcache_dalloc_large(tcache_t
*tcache
, void *ptr
, size_t size
)
392 arena_chunk_t
*chunk
;
393 size_t pageind
, binind
;
395 tcache_bin_info_t
*tbin_info
;
397 assert((size
& PAGE_MASK
) == 0);
398 assert(arena_salloc(ptr
) > small_maxclass
);
399 assert(arena_salloc(ptr
) <= tcache_maxclass
);
401 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
402 arena
= chunk
->arena
;
403 pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> PAGE_SHIFT
;
404 binind
= nbins
+ (size
>> PAGE_SHIFT
) - 1;
408 memset(ptr
, 0x5a, size
);
411 tbin
= &tcache
->tbins
[binind
];
412 tbin_info
= &tcache_bin_info
[binind
];
413 if (tbin
->ncached
== tbin_info
->ncached_max
) {
414 tcache_bin_flush_large(tbin
, binind
, (tbin_info
->ncached_max
>>
416 #if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
421 assert(tbin
->ncached
< tbin_info
->ncached_max
);
422 tbin
->avail
[tbin
->ncached
] = ptr
;
425 tcache_event(tcache
);
429 #endif /* JEMALLOC_H_INLINES */
430 /******************************************************************************/
431 #endif /* JEMALLOC_TCACHE */