]> git.saurik.com Git - redis.git/blame - deps/jemalloc.orig/include/jemalloc/internal/tcache.h
Jemalloc updated to 3.0.0.
[redis.git] / deps / jemalloc.orig / include / jemalloc / internal / tcache.h
CommitLineData
ad4c0b41 1#ifdef JEMALLOC_TCACHE
2/******************************************************************************/
3#ifdef JEMALLOC_H_TYPES
4
5typedef struct tcache_bin_info_s tcache_bin_info_t;
6typedef struct tcache_bin_s tcache_bin_t;
7typedef struct tcache_s tcache_t;
8
9/*
10 * Absolute maximum number of cache slots for each small bin in the thread
11 * cache. This is an additional constraint beyond that imposed as: twice the
12 * number of regions per run for this size class.
13 *
14 * This constant must be an even number.
15 */
16#define TCACHE_NSLOTS_SMALL_MAX 200
17
18/* Number of cache slots for large size classes. */
19#define TCACHE_NSLOTS_LARGE 20
20
21/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
22#define LG_TCACHE_MAXCLASS_DEFAULT 15
23
24/*
25 * (1U << opt_lg_tcache_gc_sweep) is the approximate number of allocation
26 * events between full GC sweeps (-1: disabled). Integer rounding may cause
27 * the actual number to be slightly higher, since GC is performed
28 * incrementally.
29 */
30#define LG_TCACHE_GC_SWEEP_DEFAULT 13
31
32#endif /* JEMALLOC_H_TYPES */
33/******************************************************************************/
34#ifdef JEMALLOC_H_STRUCTS
35
36/*
37 * Read-only information associated with each element of tcache_t's tbins array
38 * is stored separately, mainly to reduce memory usage.
39 */
40struct tcache_bin_info_s {
41 unsigned ncached_max; /* Upper limit on ncached. */
42};
43
44struct tcache_bin_s {
45# ifdef JEMALLOC_STATS
46 tcache_bin_stats_t tstats;
47# endif
48 int low_water; /* Min # cached since last GC. */
49 unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
50 unsigned ncached; /* # of cached objects. */
51 void **avail; /* Stack of available objects. */
52};
53
54struct tcache_s {
55# ifdef JEMALLOC_STATS
56 ql_elm(tcache_t) link; /* Used for aggregating stats. */
57# endif
58# ifdef JEMALLOC_PROF
59 uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
60# endif
61 arena_t *arena; /* This thread's arena. */
62 unsigned ev_cnt; /* Event count since incremental GC. */
63 unsigned next_gc_bin; /* Next bin to GC. */
64 tcache_bin_t tbins[1]; /* Dynamically sized. */
65 /*
66 * The pointer stacks associated with tbins follow as a contiguous
67 * array. During tcache initialization, the avail pointer in each
68 * element of tbins is initialized to point to the proper offset within
69 * this array.
70 */
71};
72
73#endif /* JEMALLOC_H_STRUCTS */
74/******************************************************************************/
75#ifdef JEMALLOC_H_EXTERNS
76
77extern bool opt_tcache;
78extern ssize_t opt_lg_tcache_max;
79extern ssize_t opt_lg_tcache_gc_sweep;
80
81extern tcache_bin_info_t *tcache_bin_info;
82
83/* Map of thread-specific caches. */
84#ifndef NO_TLS
85extern __thread tcache_t *tcache_tls
86 JEMALLOC_ATTR(tls_model("initial-exec"));
87# define TCACHE_GET() tcache_tls
88# define TCACHE_SET(v) do { \
89 tcache_tls = (tcache_t *)(v); \
90 pthread_setspecific(tcache_tsd, (void *)(v)); \
91} while (0)
92#else
93# define TCACHE_GET() ((tcache_t *)pthread_getspecific(tcache_tsd))
94# define TCACHE_SET(v) do { \
95 pthread_setspecific(tcache_tsd, (void *)(v)); \
96} while (0)
97#endif
98extern pthread_key_t tcache_tsd;
99
100/*
101 * Number of tcache bins. There are nbins small-object bins, plus 0 or more
102 * large-object bins.
103 */
104extern size_t nhbins;
105
106/* Maximum cached size class. */
107extern size_t tcache_maxclass;
108
109/* Number of tcache allocation/deallocation events between incremental GCs. */
110extern unsigned tcache_gc_incr;
111
112void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
113#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
114 , tcache_t *tcache
115#endif
116 );
117void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
118#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
119 , tcache_t *tcache
120#endif
121 );
122tcache_t *tcache_create(arena_t *arena);
123void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
124 size_t binind);
125void tcache_destroy(tcache_t *tcache);
126#ifdef JEMALLOC_STATS
127void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
128#endif
129bool tcache_boot(void);
130
131#endif /* JEMALLOC_H_EXTERNS */
132/******************************************************************************/
133#ifdef JEMALLOC_H_INLINES
134
135#ifndef JEMALLOC_ENABLE_INLINE
136void tcache_event(tcache_t *tcache);
137tcache_t *tcache_get(void);
138void *tcache_alloc_easy(tcache_bin_t *tbin);
139void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
140void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
141void tcache_dalloc_small(tcache_t *tcache, void *ptr);
142void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
143#endif
144
145#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
146JEMALLOC_INLINE tcache_t *
147tcache_get(void)
148{
149 tcache_t *tcache;
150
151 if ((isthreaded & opt_tcache) == false)
152 return (NULL);
153
154 tcache = TCACHE_GET();
155 if ((uintptr_t)tcache <= (uintptr_t)2) {
156 if (tcache == NULL) {
157 tcache = tcache_create(choose_arena());
158 if (tcache == NULL)
159 return (NULL);
160 } else {
161 if (tcache == (void *)(uintptr_t)1) {
162 /*
163 * Make a note that an allocator function was
164 * called after the tcache_thread_cleanup() was
165 * called.
166 */
167 TCACHE_SET((uintptr_t)2);
168 }
169 return (NULL);
170 }
171 }
172
173 return (tcache);
174}
175
176JEMALLOC_INLINE void
177tcache_event(tcache_t *tcache)
178{
179
180 if (tcache_gc_incr == 0)
181 return;
182
183 tcache->ev_cnt++;
184 assert(tcache->ev_cnt <= tcache_gc_incr);
185 if (tcache->ev_cnt == tcache_gc_incr) {
186 size_t binind = tcache->next_gc_bin;
187 tcache_bin_t *tbin = &tcache->tbins[binind];
188 tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
189
190 if (tbin->low_water > 0) {
191 /*
192 * Flush (ceiling) 3/4 of the objects below the low
193 * water mark.
194 */
195 if (binind < nbins) {
196 tcache_bin_flush_small(tbin, binind,
197 tbin->ncached - tbin->low_water +
198 (tbin->low_water >> 2)
199#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
200 , tcache
201#endif
202 );
203 } else {
204 tcache_bin_flush_large(tbin, binind,
205 tbin->ncached - tbin->low_water +
206 (tbin->low_water >> 2)
207#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
208 , tcache
209#endif
210 );
211 }
212 /*
213 * Reduce fill count by 2X. Limit lg_fill_div such that
214 * the fill count is always at least 1.
215 */
216 if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1))
217 >= 1)
218 tbin->lg_fill_div++;
219 } else if (tbin->low_water < 0) {
220 /*
221 * Increase fill count by 2X. Make sure lg_fill_div
222 * stays greater than 0.
223 */
224 if (tbin->lg_fill_div > 1)
225 tbin->lg_fill_div--;
226 }
227 tbin->low_water = tbin->ncached;
228
229 tcache->next_gc_bin++;
230 if (tcache->next_gc_bin == nhbins)
231 tcache->next_gc_bin = 0;
232 tcache->ev_cnt = 0;
233 }
234}
235
236JEMALLOC_INLINE void *
237tcache_alloc_easy(tcache_bin_t *tbin)
238{
239 void *ret;
240
241 if (tbin->ncached == 0) {
242 tbin->low_water = -1;
243 return (NULL);
244 }
245 tbin->ncached--;
246 if ((int)tbin->ncached < tbin->low_water)
247 tbin->low_water = tbin->ncached;
248 ret = tbin->avail[tbin->ncached];
249 return (ret);
250}
251
252JEMALLOC_INLINE void *
253tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
254{
255 void *ret;
256 size_t binind;
257 tcache_bin_t *tbin;
258
259 binind = SMALL_SIZE2BIN(size);
260 assert(binind < nbins);
261 tbin = &tcache->tbins[binind];
262 ret = tcache_alloc_easy(tbin);
263 if (ret == NULL) {
264 ret = tcache_alloc_small_hard(tcache, tbin, binind);
265 if (ret == NULL)
266 return (NULL);
267 }
268 assert(arena_salloc(ret) == arena_bin_info[binind].reg_size);
269
270 if (zero == false) {
271#ifdef JEMALLOC_FILL
272 if (opt_junk)
273 memset(ret, 0xa5, size);
274 else if (opt_zero)
275 memset(ret, 0, size);
276#endif
277 } else
278 memset(ret, 0, size);
279
280#ifdef JEMALLOC_STATS
281 tbin->tstats.nrequests++;
282#endif
283#ifdef JEMALLOC_PROF
284 tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
285#endif
286 tcache_event(tcache);
287 return (ret);
288}
289
290JEMALLOC_INLINE void *
291tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
292{
293 void *ret;
294 size_t binind;
295 tcache_bin_t *tbin;
296
297 size = PAGE_CEILING(size);
298 assert(size <= tcache_maxclass);
299 binind = nbins + (size >> PAGE_SHIFT) - 1;
300 assert(binind < nhbins);
301 tbin = &tcache->tbins[binind];
302 ret = tcache_alloc_easy(tbin);
303 if (ret == NULL) {
304 /*
305 * Only allocate one large object at a time, because it's quite
306 * expensive to create one and not use it.
307 */
308 ret = arena_malloc_large(tcache->arena, size, zero);
309 if (ret == NULL)
310 return (NULL);
311 } else {
312#ifdef JEMALLOC_PROF
313 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
314 size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
315 PAGE_SHIFT);
316 chunk->map[pageind-map_bias].bits &= ~CHUNK_MAP_CLASS_MASK;
317#endif
318 if (zero == false) {
319#ifdef JEMALLOC_FILL
320 if (opt_junk)
321 memset(ret, 0xa5, size);
322 else if (opt_zero)
323 memset(ret, 0, size);
324#endif
325 } else
326 memset(ret, 0, size);
327
328#ifdef JEMALLOC_STATS
329 tbin->tstats.nrequests++;
330#endif
331#ifdef JEMALLOC_PROF
332 tcache->prof_accumbytes += size;
333#endif
334 }
335
336 tcache_event(tcache);
337 return (ret);
338}
339
340JEMALLOC_INLINE void
341tcache_dalloc_small(tcache_t *tcache, void *ptr)
342{
343 arena_t *arena;
344 arena_chunk_t *chunk;
345 arena_run_t *run;
346 arena_bin_t *bin;
347 tcache_bin_t *tbin;
348 tcache_bin_info_t *tbin_info;
349 size_t pageind, binind;
350 arena_chunk_map_t *mapelm;
351
352 assert(arena_salloc(ptr) <= small_maxclass);
353
354 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
355 arena = chunk->arena;
356 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
357 mapelm = &chunk->map[pageind-map_bias];
358 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
359 (mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
360 dassert(run->magic == ARENA_RUN_MAGIC);
361 bin = run->bin;
362 binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) /
363 sizeof(arena_bin_t);
364 assert(binind < nbins);
365
366#ifdef JEMALLOC_FILL
367 if (opt_junk)
368 memset(ptr, 0x5a, arena_bin_info[binind].reg_size);
369#endif
370
371 tbin = &tcache->tbins[binind];
372 tbin_info = &tcache_bin_info[binind];
373 if (tbin->ncached == tbin_info->ncached_max) {
374 tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
375 1)
376#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
377 , tcache
378#endif
379 );
380 }
381 assert(tbin->ncached < tbin_info->ncached_max);
382 tbin->avail[tbin->ncached] = ptr;
383 tbin->ncached++;
384
385 tcache_event(tcache);
386}
387
388JEMALLOC_INLINE void
389tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
390{
391 arena_t *arena;
392 arena_chunk_t *chunk;
393 size_t pageind, binind;
394 tcache_bin_t *tbin;
395 tcache_bin_info_t *tbin_info;
396
397 assert((size & PAGE_MASK) == 0);
398 assert(arena_salloc(ptr) > small_maxclass);
399 assert(arena_salloc(ptr) <= tcache_maxclass);
400
401 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
402 arena = chunk->arena;
403 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
404 binind = nbins + (size >> PAGE_SHIFT) - 1;
405
406#ifdef JEMALLOC_FILL
407 if (opt_junk)
408 memset(ptr, 0x5a, size);
409#endif
410
411 tbin = &tcache->tbins[binind];
412 tbin_info = &tcache_bin_info[binind];
413 if (tbin->ncached == tbin_info->ncached_max) {
414 tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
415 1)
416#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
417 , tcache
418#endif
419 );
420 }
421 assert(tbin->ncached < tbin_info->ncached_max);
422 tbin->avail[tbin->ncached] = ptr;
423 tbin->ncached++;
424
425 tcache_event(tcache);
426}
427#endif
428
429#endif /* JEMALLOC_H_INLINES */
430/******************************************************************************/
431#endif /* JEMALLOC_TCACHE */