]> git.saurik.com Git - redis.git/blame - deps/jemalloc/src/huge.c
Merge pull request #496 from pietern/2.6-makeinstall
[redis.git] / deps / jemalloc / src / huge.c
CommitLineData
a78e148b 1#define JEMALLOC_HUGE_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7#ifdef JEMALLOC_STATS
8uint64_t huge_nmalloc;
9uint64_t huge_ndalloc;
10size_t huge_allocated;
11#endif
12
13malloc_mutex_t huge_mtx;
14
15/******************************************************************************/
16
17/* Tree of chunks that are stand-alone huge allocations. */
18static extent_tree_t huge;
19
20void *
21huge_malloc(size_t size, bool zero)
22{
23 void *ret;
24 size_t csize;
25 extent_node_t *node;
26
27 /* Allocate one or more contiguous chunks for this request. */
28
29 csize = CHUNK_CEILING(size);
30 if (csize == 0) {
31 /* size is large enough to cause size_t wrap-around. */
32 return (NULL);
33 }
34
35 /* Allocate an extent node with which to track the chunk. */
36 node = base_node_alloc();
37 if (node == NULL)
38 return (NULL);
39
40 ret = chunk_alloc(csize, false, &zero);
41 if (ret == NULL) {
42 base_node_dealloc(node);
43 return (NULL);
44 }
45
46 /* Insert node into huge. */
47 node->addr = ret;
48 node->size = csize;
49
50 malloc_mutex_lock(&huge_mtx);
51 extent_tree_ad_insert(&huge, node);
52#ifdef JEMALLOC_STATS
53 stats_cactive_add(csize);
54 huge_nmalloc++;
55 huge_allocated += csize;
56#endif
57 malloc_mutex_unlock(&huge_mtx);
58
59#ifdef JEMALLOC_FILL
60 if (zero == false) {
61 if (opt_junk)
62 memset(ret, 0xa5, csize);
63 else if (opt_zero)
64 memset(ret, 0, csize);
65 }
66#endif
67
68 return (ret);
69}
70
71/* Only handles large allocations that require more than chunk alignment. */
72void *
73huge_palloc(size_t size, size_t alignment, bool zero)
74{
75 void *ret;
76 size_t alloc_size, chunk_size, offset;
77 extent_node_t *node;
78
79 /*
80 * This allocation requires alignment that is even larger than chunk
81 * alignment. This means that huge_malloc() isn't good enough.
82 *
83 * Allocate almost twice as many chunks as are demanded by the size or
84 * alignment, in order to assure the alignment can be achieved, then
85 * unmap leading and trailing chunks.
86 */
87 assert(alignment > chunksize);
88
89 chunk_size = CHUNK_CEILING(size);
90
91 if (size >= alignment)
92 alloc_size = chunk_size + alignment - chunksize;
93 else
94 alloc_size = (alignment << 1) - chunksize;
95
96 /* Allocate an extent node with which to track the chunk. */
97 node = base_node_alloc();
98 if (node == NULL)
99 return (NULL);
100
101 ret = chunk_alloc(alloc_size, false, &zero);
102 if (ret == NULL) {
103 base_node_dealloc(node);
104 return (NULL);
105 }
106
107 offset = (uintptr_t)ret & (alignment - 1);
108 assert((offset & chunksize_mask) == 0);
109 assert(offset < alloc_size);
110 if (offset == 0) {
111 /* Trim trailing space. */
112 chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
1d03c1c9 113 - chunk_size, true);
a78e148b 114 } else {
115 size_t trailsize;
116
117 /* Trim leading space. */
1d03c1c9 118 chunk_dealloc(ret, alignment - offset, true);
a78e148b 119
120 ret = (void *)((uintptr_t)ret + (alignment - offset));
121
122 trailsize = alloc_size - (alignment - offset) - chunk_size;
123 if (trailsize != 0) {
124 /* Trim trailing space. */
125 assert(trailsize < alloc_size);
126 chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
1d03c1c9 127 trailsize, true);
a78e148b 128 }
129 }
130
131 /* Insert node into huge. */
132 node->addr = ret;
133 node->size = chunk_size;
134
135 malloc_mutex_lock(&huge_mtx);
136 extent_tree_ad_insert(&huge, node);
137#ifdef JEMALLOC_STATS
138 stats_cactive_add(chunk_size);
139 huge_nmalloc++;
140 huge_allocated += chunk_size;
141#endif
142 malloc_mutex_unlock(&huge_mtx);
143
144#ifdef JEMALLOC_FILL
145 if (zero == false) {
146 if (opt_junk)
147 memset(ret, 0xa5, chunk_size);
148 else if (opt_zero)
149 memset(ret, 0, chunk_size);
150 }
151#endif
152
153 return (ret);
154}
155
156void *
157huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
158{
159
160 /*
161 * Avoid moving the allocation if the size class can be left the same.
162 */
163 if (oldsize > arena_maxclass
164 && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
165 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
166 assert(CHUNK_CEILING(oldsize) == oldsize);
167#ifdef JEMALLOC_FILL
168 if (opt_junk && size < oldsize) {
169 memset((void *)((uintptr_t)ptr + size), 0x5a,
170 oldsize - size);
171 }
172#endif
173 return (ptr);
174 }
175
176 /* Reallocation would require a move. */
177 return (NULL);
178}
179
180void *
181huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
182 size_t alignment, bool zero)
183{
184 void *ret;
185 size_t copysize;
186
187 /* Try to avoid moving the allocation. */
188 ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
189 if (ret != NULL)
190 return (ret);
191
192 /*
193 * size and oldsize are different enough that we need to use a
194 * different size class. In that case, fall back to allocating new
195 * space and copying.
196 */
197 if (alignment > chunksize)
198 ret = huge_palloc(size + extra, alignment, zero);
199 else
200 ret = huge_malloc(size + extra, zero);
201
202 if (ret == NULL) {
203 if (extra == 0)
204 return (NULL);
205 /* Try again, this time without extra. */
206 if (alignment > chunksize)
207 ret = huge_palloc(size, alignment, zero);
208 else
209 ret = huge_malloc(size, zero);
210
211 if (ret == NULL)
212 return (NULL);
213 }
214
215 /*
216 * Copy at most size bytes (not size+extra), since the caller has no
217 * expectation that the extra bytes will be reliably preserved.
218 */
219 copysize = (size < oldsize) ? size : oldsize;
220
221 /*
222 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
223 * source nor the destination are in swap or dss.
224 */
225#ifdef JEMALLOC_MREMAP_FIXED
226 if (oldsize >= chunksize
227# ifdef JEMALLOC_SWAP
228 && (swap_enabled == false || (chunk_in_swap(ptr) == false &&
229 chunk_in_swap(ret) == false))
230# endif
231# ifdef JEMALLOC_DSS
232 && chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
233# endif
234 ) {
235 size_t newsize = huge_salloc(ret);
236
1d03c1c9 237 /*
238 * Remove ptr from the tree of huge allocations before
239 * performing the remap operation, in order to avoid the
240 * possibility of another thread acquiring that mapping before
241 * this one removes it from the tree.
242 */
243 huge_dalloc(ptr, false);
a78e148b 244 if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
245 ret) == MAP_FAILED) {
246 /*
247 * Assuming no chunk management bugs in the allocator,
248 * the only documented way an error can occur here is
249 * if the application changed the map type for a
250 * portion of the old allocation. This is firmly in
251 * undefined behavior territory, so write a diagnostic
252 * message, and optionally abort.
253 */
254 char buf[BUFERROR_BUF];
255
256 buferror(errno, buf, sizeof(buf));
257 malloc_write("<jemalloc>: Error in mremap(): ");
258 malloc_write(buf);
259 malloc_write("\n");
260 if (opt_abort)
261 abort();
262 memcpy(ret, ptr, copysize);
1d03c1c9 263 chunk_dealloc_mmap(ptr, oldsize);
264 }
a78e148b 265 } else
266#endif
267 {
268 memcpy(ret, ptr, copysize);
269 idalloc(ptr);
270 }
271 return (ret);
272}
273
274void
275huge_dalloc(void *ptr, bool unmap)
276{
277 extent_node_t *node, key;
278
279 malloc_mutex_lock(&huge_mtx);
280
281 /* Extract from tree of huge allocations. */
282 key.addr = ptr;
283 node = extent_tree_ad_search(&huge, &key);
284 assert(node != NULL);
285 assert(node->addr == ptr);
286 extent_tree_ad_remove(&huge, node);
287
288#ifdef JEMALLOC_STATS
289 stats_cactive_sub(node->size);
290 huge_ndalloc++;
291 huge_allocated -= node->size;
292#endif
293
294 malloc_mutex_unlock(&huge_mtx);
295
296 if (unmap) {
297 /* Unmap chunk. */
298#ifdef JEMALLOC_FILL
299#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
300 if (opt_junk)
301 memset(node->addr, 0x5a, node->size);
302#endif
303#endif
a78e148b 304 }
305
1d03c1c9 306 chunk_dealloc(node->addr, node->size, unmap);
307
a78e148b 308 base_node_dealloc(node);
309}
310
311size_t
312huge_salloc(const void *ptr)
313{
314 size_t ret;
315 extent_node_t *node, key;
316
317 malloc_mutex_lock(&huge_mtx);
318
319 /* Extract from tree of huge allocations. */
320 key.addr = __DECONST(void *, ptr);
321 node = extent_tree_ad_search(&huge, &key);
322 assert(node != NULL);
323
324 ret = node->size;
325
326 malloc_mutex_unlock(&huge_mtx);
327
328 return (ret);
329}
330
331#ifdef JEMALLOC_PROF
332prof_ctx_t *
333huge_prof_ctx_get(const void *ptr)
334{
335 prof_ctx_t *ret;
336 extent_node_t *node, key;
337
338 malloc_mutex_lock(&huge_mtx);
339
340 /* Extract from tree of huge allocations. */
341 key.addr = __DECONST(void *, ptr);
342 node = extent_tree_ad_search(&huge, &key);
343 assert(node != NULL);
344
345 ret = node->prof_ctx;
346
347 malloc_mutex_unlock(&huge_mtx);
348
349 return (ret);
350}
351
352void
353huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
354{
355 extent_node_t *node, key;
356
357 malloc_mutex_lock(&huge_mtx);
358
359 /* Extract from tree of huge allocations. */
360 key.addr = __DECONST(void *, ptr);
361 node = extent_tree_ad_search(&huge, &key);
362 assert(node != NULL);
363
364 node->prof_ctx = ctx;
365
366 malloc_mutex_unlock(&huge_mtx);
367}
368#endif
369
370bool
371huge_boot(void)
372{
373
374 /* Initialize chunks data. */
375 if (malloc_mutex_init(&huge_mtx))
376 return (true);
377 extent_tree_ad_new(&huge);
378
379#ifdef JEMALLOC_STATS
380 huge_nmalloc = 0;
381 huge_ndalloc = 0;
382 huge_allocated = 0;
383#endif
384
385 return (false);
386}