]> git.saurik.com Git - redis.git/blob - deps/jemalloc/src/huge.c
Better installation info inside README file.
[redis.git] / deps / jemalloc / src / huge.c
1 #define JEMALLOC_HUGE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3
4 /******************************************************************************/
5 /* Data. */
6
7 #ifdef JEMALLOC_STATS
8 uint64_t huge_nmalloc;
9 uint64_t huge_ndalloc;
10 size_t huge_allocated;
11 #endif
12
13 malloc_mutex_t huge_mtx;
14
15 /******************************************************************************/
16
17 /* Tree of chunks that are stand-alone huge allocations. */
18 static extent_tree_t huge;
19
20 void *
21 huge_malloc(size_t size, bool zero)
22 {
23 void *ret;
24 size_t csize;
25 extent_node_t *node;
26
27 /* Allocate one or more contiguous chunks for this request. */
28
29 csize = CHUNK_CEILING(size);
30 if (csize == 0) {
31 /* size is large enough to cause size_t wrap-around. */
32 return (NULL);
33 }
34
35 /* Allocate an extent node with which to track the chunk. */
36 node = base_node_alloc();
37 if (node == NULL)
38 return (NULL);
39
40 ret = chunk_alloc(csize, false, &zero);
41 if (ret == NULL) {
42 base_node_dealloc(node);
43 return (NULL);
44 }
45
46 /* Insert node into huge. */
47 node->addr = ret;
48 node->size = csize;
49
50 malloc_mutex_lock(&huge_mtx);
51 extent_tree_ad_insert(&huge, node);
52 #ifdef JEMALLOC_STATS
53 stats_cactive_add(csize);
54 huge_nmalloc++;
55 huge_allocated += csize;
56 #endif
57 malloc_mutex_unlock(&huge_mtx);
58
59 #ifdef JEMALLOC_FILL
60 if (zero == false) {
61 if (opt_junk)
62 memset(ret, 0xa5, csize);
63 else if (opt_zero)
64 memset(ret, 0, csize);
65 }
66 #endif
67
68 return (ret);
69 }
70
71 /* Only handles large allocations that require more than chunk alignment. */
72 void *
73 huge_palloc(size_t size, size_t alignment, bool zero)
74 {
75 void *ret;
76 size_t alloc_size, chunk_size, offset;
77 extent_node_t *node;
78
79 /*
80 * This allocation requires alignment that is even larger than chunk
81 * alignment. This means that huge_malloc() isn't good enough.
82 *
83 * Allocate almost twice as many chunks as are demanded by the size or
84 * alignment, in order to assure the alignment can be achieved, then
85 * unmap leading and trailing chunks.
86 */
87 assert(alignment > chunksize);
88
89 chunk_size = CHUNK_CEILING(size);
90
91 if (size >= alignment)
92 alloc_size = chunk_size + alignment - chunksize;
93 else
94 alloc_size = (alignment << 1) - chunksize;
95
96 /* Allocate an extent node with which to track the chunk. */
97 node = base_node_alloc();
98 if (node == NULL)
99 return (NULL);
100
101 ret = chunk_alloc(alloc_size, false, &zero);
102 if (ret == NULL) {
103 base_node_dealloc(node);
104 return (NULL);
105 }
106
107 offset = (uintptr_t)ret & (alignment - 1);
108 assert((offset & chunksize_mask) == 0);
109 assert(offset < alloc_size);
110 if (offset == 0) {
111 /* Trim trailing space. */
112 chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
113 - chunk_size);
114 } else {
115 size_t trailsize;
116
117 /* Trim leading space. */
118 chunk_dealloc(ret, alignment - offset);
119
120 ret = (void *)((uintptr_t)ret + (alignment - offset));
121
122 trailsize = alloc_size - (alignment - offset) - chunk_size;
123 if (trailsize != 0) {
124 /* Trim trailing space. */
125 assert(trailsize < alloc_size);
126 chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
127 trailsize);
128 }
129 }
130
131 /* Insert node into huge. */
132 node->addr = ret;
133 node->size = chunk_size;
134
135 malloc_mutex_lock(&huge_mtx);
136 extent_tree_ad_insert(&huge, node);
137 #ifdef JEMALLOC_STATS
138 stats_cactive_add(chunk_size);
139 huge_nmalloc++;
140 huge_allocated += chunk_size;
141 #endif
142 malloc_mutex_unlock(&huge_mtx);
143
144 #ifdef JEMALLOC_FILL
145 if (zero == false) {
146 if (opt_junk)
147 memset(ret, 0xa5, chunk_size);
148 else if (opt_zero)
149 memset(ret, 0, chunk_size);
150 }
151 #endif
152
153 return (ret);
154 }
155
156 void *
157 huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
158 {
159
160 /*
161 * Avoid moving the allocation if the size class can be left the same.
162 */
163 if (oldsize > arena_maxclass
164 && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
165 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
166 assert(CHUNK_CEILING(oldsize) == oldsize);
167 #ifdef JEMALLOC_FILL
168 if (opt_junk && size < oldsize) {
169 memset((void *)((uintptr_t)ptr + size), 0x5a,
170 oldsize - size);
171 }
172 #endif
173 return (ptr);
174 }
175
176 /* Reallocation would require a move. */
177 return (NULL);
178 }
179
180 void *
181 huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
182 size_t alignment, bool zero)
183 {
184 void *ret;
185 size_t copysize;
186
187 /* Try to avoid moving the allocation. */
188 ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
189 if (ret != NULL)
190 return (ret);
191
192 /*
193 * size and oldsize are different enough that we need to use a
194 * different size class. In that case, fall back to allocating new
195 * space and copying.
196 */
197 if (alignment > chunksize)
198 ret = huge_palloc(size + extra, alignment, zero);
199 else
200 ret = huge_malloc(size + extra, zero);
201
202 if (ret == NULL) {
203 if (extra == 0)
204 return (NULL);
205 /* Try again, this time without extra. */
206 if (alignment > chunksize)
207 ret = huge_palloc(size, alignment, zero);
208 else
209 ret = huge_malloc(size, zero);
210
211 if (ret == NULL)
212 return (NULL);
213 }
214
215 /*
216 * Copy at most size bytes (not size+extra), since the caller has no
217 * expectation that the extra bytes will be reliably preserved.
218 */
219 copysize = (size < oldsize) ? size : oldsize;
220
221 /*
222 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
223 * source nor the destination are in swap or dss.
224 */
225 #ifdef JEMALLOC_MREMAP_FIXED
226 if (oldsize >= chunksize
227 # ifdef JEMALLOC_SWAP
228 && (swap_enabled == false || (chunk_in_swap(ptr) == false &&
229 chunk_in_swap(ret) == false))
230 # endif
231 # ifdef JEMALLOC_DSS
232 && chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
233 # endif
234 ) {
235 size_t newsize = huge_salloc(ret);
236
237 if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
238 ret) == MAP_FAILED) {
239 /*
240 * Assuming no chunk management bugs in the allocator,
241 * the only documented way an error can occur here is
242 * if the application changed the map type for a
243 * portion of the old allocation. This is firmly in
244 * undefined behavior territory, so write a diagnostic
245 * message, and optionally abort.
246 */
247 char buf[BUFERROR_BUF];
248
249 buferror(errno, buf, sizeof(buf));
250 malloc_write("<jemalloc>: Error in mremap(): ");
251 malloc_write(buf);
252 malloc_write("\n");
253 if (opt_abort)
254 abort();
255 memcpy(ret, ptr, copysize);
256 idalloc(ptr);
257 } else
258 huge_dalloc(ptr, false);
259 } else
260 #endif
261 {
262 memcpy(ret, ptr, copysize);
263 idalloc(ptr);
264 }
265 return (ret);
266 }
267
268 void
269 huge_dalloc(void *ptr, bool unmap)
270 {
271 extent_node_t *node, key;
272
273 malloc_mutex_lock(&huge_mtx);
274
275 /* Extract from tree of huge allocations. */
276 key.addr = ptr;
277 node = extent_tree_ad_search(&huge, &key);
278 assert(node != NULL);
279 assert(node->addr == ptr);
280 extent_tree_ad_remove(&huge, node);
281
282 #ifdef JEMALLOC_STATS
283 stats_cactive_sub(node->size);
284 huge_ndalloc++;
285 huge_allocated -= node->size;
286 #endif
287
288 malloc_mutex_unlock(&huge_mtx);
289
290 if (unmap) {
291 /* Unmap chunk. */
292 #ifdef JEMALLOC_FILL
293 #if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
294 if (opt_junk)
295 memset(node->addr, 0x5a, node->size);
296 #endif
297 #endif
298 chunk_dealloc(node->addr, node->size);
299 }
300
301 base_node_dealloc(node);
302 }
303
304 size_t
305 huge_salloc(const void *ptr)
306 {
307 size_t ret;
308 extent_node_t *node, key;
309
310 malloc_mutex_lock(&huge_mtx);
311
312 /* Extract from tree of huge allocations. */
313 key.addr = __DECONST(void *, ptr);
314 node = extent_tree_ad_search(&huge, &key);
315 assert(node != NULL);
316
317 ret = node->size;
318
319 malloc_mutex_unlock(&huge_mtx);
320
321 return (ret);
322 }
323
324 #ifdef JEMALLOC_PROF
325 prof_ctx_t *
326 huge_prof_ctx_get(const void *ptr)
327 {
328 prof_ctx_t *ret;
329 extent_node_t *node, key;
330
331 malloc_mutex_lock(&huge_mtx);
332
333 /* Extract from tree of huge allocations. */
334 key.addr = __DECONST(void *, ptr);
335 node = extent_tree_ad_search(&huge, &key);
336 assert(node != NULL);
337
338 ret = node->prof_ctx;
339
340 malloc_mutex_unlock(&huge_mtx);
341
342 return (ret);
343 }
344
345 void
346 huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
347 {
348 extent_node_t *node, key;
349
350 malloc_mutex_lock(&huge_mtx);
351
352 /* Extract from tree of huge allocations. */
353 key.addr = __DECONST(void *, ptr);
354 node = extent_tree_ad_search(&huge, &key);
355 assert(node != NULL);
356
357 node->prof_ctx = ctx;
358
359 malloc_mutex_unlock(&huge_mtx);
360 }
361 #endif
362
363 bool
364 huge_boot(void)
365 {
366
367 /* Initialize chunks data. */
368 if (malloc_mutex_init(&huge_mtx))
369 return (true);
370 extent_tree_ad_new(&huge);
371
372 #ifdef JEMALLOC_STATS
373 huge_nmalloc = 0;
374 huge_ndalloc = 0;
375 huge_allocated = 0;
376 #endif
377
378 return (false);
379 }