]> git.saurik.com Git - redis.git/blame - deps/jemalloc/src/chunk.c
Update to jemalloc 2.2.5
[redis.git] / deps / jemalloc / src / chunk.c
CommitLineData
a78e148b 1#define JEMALLOC_CHUNK_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
8#ifdef JEMALLOC_SWAP
9bool opt_overcommit = true;
10#endif
11
12#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
13malloc_mutex_t chunks_mtx;
14chunk_stats_t stats_chunks;
15#endif
16
17#ifdef JEMALLOC_IVSALLOC
18rtree_t *chunks_rtree;
19#endif
20
21/* Various chunk-related settings. */
22size_t chunksize;
23size_t chunksize_mask; /* (chunksize - 1). */
24size_t chunk_npages;
25size_t map_bias;
26size_t arena_maxclass; /* Max size class for arenas. */
27
28/******************************************************************************/
29
30/*
31 * If the caller specifies (*zero == false), it is still possible to receive
32 * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
33 * takes advantage of this to avoid demanding zeroed chunks, but taking
34 * advantage of them if they are returned.
35 */
36void *
37chunk_alloc(size_t size, bool base, bool *zero)
38{
39 void *ret;
40
41 assert(size != 0);
42 assert((size & chunksize_mask) == 0);
43
44#ifdef JEMALLOC_SWAP
45 if (swap_enabled) {
46 ret = chunk_alloc_swap(size, zero);
47 if (ret != NULL)
48 goto RETURN;
49 }
50
51 if (swap_enabled == false || opt_overcommit) {
52#endif
53#ifdef JEMALLOC_DSS
54 ret = chunk_alloc_dss(size, zero);
55 if (ret != NULL)
56 goto RETURN;
57#endif
58 ret = chunk_alloc_mmap(size);
59 if (ret != NULL) {
60 *zero = true;
61 goto RETURN;
62 }
63#ifdef JEMALLOC_SWAP
64 }
65#endif
66
67 /* All strategies for allocation failed. */
68 ret = NULL;
69RETURN:
70#ifdef JEMALLOC_IVSALLOC
71 if (base == false && ret != NULL) {
72 if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
1d03c1c9 73 chunk_dealloc(ret, size, true);
a78e148b 74 return (NULL);
75 }
76 }
77#endif
78#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
79 if (ret != NULL) {
80# ifdef JEMALLOC_PROF
81 bool gdump;
82# endif
83 malloc_mutex_lock(&chunks_mtx);
84# ifdef JEMALLOC_STATS
85 stats_chunks.nchunks += (size / chunksize);
86# endif
87 stats_chunks.curchunks += (size / chunksize);
88 if (stats_chunks.curchunks > stats_chunks.highchunks) {
89 stats_chunks.highchunks = stats_chunks.curchunks;
90# ifdef JEMALLOC_PROF
91 gdump = true;
92# endif
93 }
94# ifdef JEMALLOC_PROF
95 else
96 gdump = false;
97# endif
98 malloc_mutex_unlock(&chunks_mtx);
99# ifdef JEMALLOC_PROF
100 if (opt_prof && opt_prof_gdump && gdump)
101 prof_gdump();
102# endif
103 }
104#endif
105
106 assert(CHUNK_ADDR2BASE(ret) == ret);
107 return (ret);
108}
109
110void
1d03c1c9 111chunk_dealloc(void *chunk, size_t size, bool unmap)
a78e148b 112{
113
114 assert(chunk != NULL);
115 assert(CHUNK_ADDR2BASE(chunk) == chunk);
116 assert(size != 0);
117 assert((size & chunksize_mask) == 0);
118
119#ifdef JEMALLOC_IVSALLOC
120 rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
121#endif
122#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
123 malloc_mutex_lock(&chunks_mtx);
124 stats_chunks.curchunks -= (size / chunksize);
125 malloc_mutex_unlock(&chunks_mtx);
126#endif
127
1d03c1c9 128 if (unmap) {
a78e148b 129#ifdef JEMALLOC_SWAP
1d03c1c9 130 if (swap_enabled && chunk_dealloc_swap(chunk, size) == false)
131 return;
a78e148b 132#endif
133#ifdef JEMALLOC_DSS
1d03c1c9 134 if (chunk_dealloc_dss(chunk, size) == false)
135 return;
a78e148b 136#endif
1d03c1c9 137 chunk_dealloc_mmap(chunk, size);
138 }
a78e148b 139}
140
141bool
142chunk_boot(void)
143{
144
145 /* Set variables according to the value of opt_lg_chunk. */
146 chunksize = (ZU(1) << opt_lg_chunk);
147 assert(chunksize >= PAGE_SIZE);
148 chunksize_mask = chunksize - 1;
149 chunk_npages = (chunksize >> PAGE_SHIFT);
150
151#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
152 if (malloc_mutex_init(&chunks_mtx))
153 return (true);
154 memset(&stats_chunks, 0, sizeof(chunk_stats_t));
155#endif
156#ifdef JEMALLOC_SWAP
157 if (chunk_swap_boot())
158 return (true);
159#endif
160 if (chunk_mmap_boot())
161 return (true);
162#ifdef JEMALLOC_DSS
163 if (chunk_dss_boot())
164 return (true);
165#endif
166#ifdef JEMALLOC_IVSALLOC
167 chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk);
168 if (chunks_rtree == NULL)
169 return (true);
170#endif
171
172 return (false);
173}