]> git.saurik.com Git - redis.git/blob - deps/jemalloc/src/huge.c
Query the archive to provide a complete KEYS list.
[redis.git] / deps / jemalloc / src / huge.c
1 #define JEMALLOC_HUGE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3
4 /******************************************************************************/
5 /* Data. */
6
7 uint64_t huge_nmalloc;
8 uint64_t huge_ndalloc;
9 size_t huge_allocated;
10
11 malloc_mutex_t huge_mtx;
12
13 /******************************************************************************/
14
15 /* Tree of chunks that are stand-alone huge allocations. */
16 static extent_tree_t huge;
17
18 void *
19 huge_malloc(size_t size, bool zero)
20 {
21
22 return (huge_palloc(size, chunksize, zero));
23 }
24
25 void *
26 huge_palloc(size_t size, size_t alignment, bool zero)
27 {
28 void *ret;
29 size_t csize;
30 extent_node_t *node;
31 bool is_zeroed;
32
33 /* Allocate one or more contiguous chunks for this request. */
34
35 csize = CHUNK_CEILING(size);
36 if (csize == 0) {
37 /* size is large enough to cause size_t wrap-around. */
38 return (NULL);
39 }
40
41 /* Allocate an extent node with which to track the chunk. */
42 node = base_node_alloc();
43 if (node == NULL)
44 return (NULL);
45
46 /*
47 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
48 * it is possible to make correct junk/zero fill decisions below.
49 */
50 is_zeroed = zero;
51 ret = chunk_alloc(csize, alignment, false, &is_zeroed,
52 chunk_dss_prec_get());
53 if (ret == NULL) {
54 base_node_dealloc(node);
55 return (NULL);
56 }
57
58 /* Insert node into huge. */
59 node->addr = ret;
60 node->size = csize;
61
62 malloc_mutex_lock(&huge_mtx);
63 extent_tree_ad_insert(&huge, node);
64 if (config_stats) {
65 stats_cactive_add(csize);
66 huge_nmalloc++;
67 huge_allocated += csize;
68 }
69 malloc_mutex_unlock(&huge_mtx);
70
71 if (config_fill && zero == false) {
72 if (opt_junk)
73 memset(ret, 0xa5, csize);
74 else if (opt_zero && is_zeroed == false)
75 memset(ret, 0, csize);
76 }
77
78 return (ret);
79 }
80
81 void *
82 huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
83 {
84
85 /*
86 * Avoid moving the allocation if the size class can be left the same.
87 */
88 if (oldsize > arena_maxclass
89 && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
90 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
91 assert(CHUNK_CEILING(oldsize) == oldsize);
92 if (config_fill && opt_junk && size < oldsize) {
93 memset((void *)((uintptr_t)ptr + size), 0x5a,
94 oldsize - size);
95 }
96 return (ptr);
97 }
98
99 /* Reallocation would require a move. */
100 return (NULL);
101 }
102
103 void *
104 huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
105 size_t alignment, bool zero, bool try_tcache_dalloc)
106 {
107 void *ret;
108 size_t copysize;
109
110 /* Try to avoid moving the allocation. */
111 ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
112 if (ret != NULL)
113 return (ret);
114
115 /*
116 * size and oldsize are different enough that we need to use a
117 * different size class. In that case, fall back to allocating new
118 * space and copying.
119 */
120 if (alignment > chunksize)
121 ret = huge_palloc(size + extra, alignment, zero);
122 else
123 ret = huge_malloc(size + extra, zero);
124
125 if (ret == NULL) {
126 if (extra == 0)
127 return (NULL);
128 /* Try again, this time without extra. */
129 if (alignment > chunksize)
130 ret = huge_palloc(size, alignment, zero);
131 else
132 ret = huge_malloc(size, zero);
133
134 if (ret == NULL)
135 return (NULL);
136 }
137
138 /*
139 * Copy at most size bytes (not size+extra), since the caller has no
140 * expectation that the extra bytes will be reliably preserved.
141 */
142 copysize = (size < oldsize) ? size : oldsize;
143
144 #ifdef JEMALLOC_MREMAP
145 /*
146 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
147 * source nor the destination are in dss.
148 */
149 if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
150 == false && chunk_in_dss(ret) == false))) {
151 size_t newsize = huge_salloc(ret);
152
153 /*
154 * Remove ptr from the tree of huge allocations before
155 * performing the remap operation, in order to avoid the
156 * possibility of another thread acquiring that mapping before
157 * this one removes it from the tree.
158 */
159 huge_dalloc(ptr, false);
160 if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
161 ret) == MAP_FAILED) {
162 /*
163 * Assuming no chunk management bugs in the allocator,
164 * the only documented way an error can occur here is
165 * if the application changed the map type for a
166 * portion of the old allocation. This is firmly in
167 * undefined behavior territory, so write a diagnostic
168 * message, and optionally abort.
169 */
170 char buf[BUFERROR_BUF];
171
172 buferror(buf, sizeof(buf));
173 malloc_printf("<jemalloc>: Error in mremap(): %s\n",
174 buf);
175 if (opt_abort)
176 abort();
177 memcpy(ret, ptr, copysize);
178 chunk_dealloc_mmap(ptr, oldsize);
179 }
180 } else
181 #endif
182 {
183 memcpy(ret, ptr, copysize);
184 iqallocx(ptr, try_tcache_dalloc);
185 }
186 return (ret);
187 }
188
189 void
190 huge_dalloc(void *ptr, bool unmap)
191 {
192 extent_node_t *node, key;
193
194 malloc_mutex_lock(&huge_mtx);
195
196 /* Extract from tree of huge allocations. */
197 key.addr = ptr;
198 node = extent_tree_ad_search(&huge, &key);
199 assert(node != NULL);
200 assert(node->addr == ptr);
201 extent_tree_ad_remove(&huge, node);
202
203 if (config_stats) {
204 stats_cactive_sub(node->size);
205 huge_ndalloc++;
206 huge_allocated -= node->size;
207 }
208
209 malloc_mutex_unlock(&huge_mtx);
210
211 if (unmap && config_fill && config_dss && opt_junk)
212 memset(node->addr, 0x5a, node->size);
213
214 chunk_dealloc(node->addr, node->size, unmap);
215
216 base_node_dealloc(node);
217 }
218
219 size_t
220 huge_salloc(const void *ptr)
221 {
222 size_t ret;
223 extent_node_t *node, key;
224
225 malloc_mutex_lock(&huge_mtx);
226
227 /* Extract from tree of huge allocations. */
228 key.addr = __DECONST(void *, ptr);
229 node = extent_tree_ad_search(&huge, &key);
230 assert(node != NULL);
231
232 ret = node->size;
233
234 malloc_mutex_unlock(&huge_mtx);
235
236 return (ret);
237 }
238
239 prof_ctx_t *
240 huge_prof_ctx_get(const void *ptr)
241 {
242 prof_ctx_t *ret;
243 extent_node_t *node, key;
244
245 malloc_mutex_lock(&huge_mtx);
246
247 /* Extract from tree of huge allocations. */
248 key.addr = __DECONST(void *, ptr);
249 node = extent_tree_ad_search(&huge, &key);
250 assert(node != NULL);
251
252 ret = node->prof_ctx;
253
254 malloc_mutex_unlock(&huge_mtx);
255
256 return (ret);
257 }
258
259 void
260 huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
261 {
262 extent_node_t *node, key;
263
264 malloc_mutex_lock(&huge_mtx);
265
266 /* Extract from tree of huge allocations. */
267 key.addr = __DECONST(void *, ptr);
268 node = extent_tree_ad_search(&huge, &key);
269 assert(node != NULL);
270
271 node->prof_ctx = ctx;
272
273 malloc_mutex_unlock(&huge_mtx);
274 }
275
276 bool
277 huge_boot(void)
278 {
279
280 /* Initialize chunks data. */
281 if (malloc_mutex_init(&huge_mtx))
282 return (true);
283 extent_tree_ad_new(&huge);
284
285 if (config_stats) {
286 huge_nmalloc = 0;
287 huge_ndalloc = 0;
288 huge_allocated = 0;
289 }
290
291 return (false);
292 }
293
294 void
295 huge_prefork(void)
296 {
297
298 malloc_mutex_prefork(&huge_mtx);
299 }
300
301 void
302 huge_postfork_parent(void)
303 {
304
305 malloc_mutex_postfork_parent(&huge_mtx);
306 }
307
308 void
309 huge_postfork_child(void)
310 {
311
312 malloc_mutex_postfork_child(&huge_mtx);
313 }