]>
Commit | Line | Data |
---|---|---|
a78e148b | 1 | #define JEMALLOC_HUGE_C_ |
2 | #include "jemalloc/internal/jemalloc_internal.h" | |
3 | ||
4 | /******************************************************************************/ | |
5 | /* Data. */ | |
6 | ||
a78e148b | 7 | uint64_t huge_nmalloc; |
8 | uint64_t huge_ndalloc; | |
9 | size_t huge_allocated; | |
a78e148b | 10 | |
11 | malloc_mutex_t huge_mtx; | |
12 | ||
13 | /******************************************************************************/ | |
14 | ||
15 | /* Tree of chunks that are stand-alone huge allocations. */ | |
16 | static extent_tree_t huge; | |
17 | ||
18 | void * | |
19 | huge_malloc(size_t size, bool zero) | |
4934f93d | 20 | { |
21 | ||
22 | return (huge_palloc(size, chunksize, zero)); | |
23 | } | |
24 | ||
25 | void * | |
26 | huge_palloc(size_t size, size_t alignment, bool zero) | |
a78e148b | 27 | { |
28 | void *ret; | |
29 | size_t csize; | |
30 | extent_node_t *node; | |
4934f93d | 31 | bool is_zeroed; |
a78e148b | 32 | |
33 | /* Allocate one or more contiguous chunks for this request. */ | |
34 | ||
35 | csize = CHUNK_CEILING(size); | |
36 | if (csize == 0) { | |
37 | /* size is large enough to cause size_t wrap-around. */ | |
38 | return (NULL); | |
39 | } | |
40 | ||
41 | /* Allocate an extent node with which to track the chunk. */ | |
42 | node = base_node_alloc(); | |
43 | if (node == NULL) | |
44 | return (NULL); | |
45 | ||
4934f93d | 46 | /* |
47 | * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that | |
48 | * it is possible to make correct junk/zero fill decisions below. | |
49 | */ | |
50 | is_zeroed = zero; | |
51 | ret = chunk_alloc(csize, alignment, false, &is_zeroed); | |
a78e148b | 52 | if (ret == NULL) { |
53 | base_node_dealloc(node); | |
54 | return (NULL); | |
55 | } | |
56 | ||
57 | /* Insert node into huge. */ | |
58 | node->addr = ret; | |
59 | node->size = csize; | |
60 | ||
61 | malloc_mutex_lock(&huge_mtx); | |
62 | extent_tree_ad_insert(&huge, node); | |
4934f93d | 63 | if (config_stats) { |
64 | stats_cactive_add(csize); | |
65 | huge_nmalloc++; | |
66 | huge_allocated += csize; | |
67 | } | |
a78e148b | 68 | malloc_mutex_unlock(&huge_mtx); |
69 | ||
4934f93d | 70 | if (config_fill && zero == false) { |
a78e148b | 71 | if (opt_junk) |
72 | memset(ret, 0xa5, csize); | |
4934f93d | 73 | else if (opt_zero && is_zeroed == false) |
a78e148b | 74 | memset(ret, 0, csize); |
75 | } | |
a78e148b | 76 | |
77 | return (ret); | |
78 | } | |
79 | ||
80 | void * | |
81 | huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra) | |
82 | { | |
83 | ||
84 | /* | |
85 | * Avoid moving the allocation if the size class can be left the same. | |
86 | */ | |
87 | if (oldsize > arena_maxclass | |
88 | && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size) | |
89 | && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) { | |
90 | assert(CHUNK_CEILING(oldsize) == oldsize); | |
4934f93d | 91 | if (config_fill && opt_junk && size < oldsize) { |
a78e148b | 92 | memset((void *)((uintptr_t)ptr + size), 0x5a, |
93 | oldsize - size); | |
94 | } | |
a78e148b | 95 | return (ptr); |
96 | } | |
97 | ||
98 | /* Reallocation would require a move. */ | |
99 | return (NULL); | |
100 | } | |
101 | ||
102 | void * | |
103 | huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, | |
104 | size_t alignment, bool zero) | |
105 | { | |
106 | void *ret; | |
107 | size_t copysize; | |
108 | ||
109 | /* Try to avoid moving the allocation. */ | |
110 | ret = huge_ralloc_no_move(ptr, oldsize, size, extra); | |
111 | if (ret != NULL) | |
112 | return (ret); | |
113 | ||
114 | /* | |
115 | * size and oldsize are different enough that we need to use a | |
116 | * different size class. In that case, fall back to allocating new | |
117 | * space and copying. | |
118 | */ | |
119 | if (alignment > chunksize) | |
120 | ret = huge_palloc(size + extra, alignment, zero); | |
121 | else | |
122 | ret = huge_malloc(size + extra, zero); | |
123 | ||
124 | if (ret == NULL) { | |
125 | if (extra == 0) | |
126 | return (NULL); | |
127 | /* Try again, this time without extra. */ | |
128 | if (alignment > chunksize) | |
129 | ret = huge_palloc(size, alignment, zero); | |
130 | else | |
131 | ret = huge_malloc(size, zero); | |
132 | ||
133 | if (ret == NULL) | |
134 | return (NULL); | |
135 | } | |
136 | ||
137 | /* | |
138 | * Copy at most size bytes (not size+extra), since the caller has no | |
139 | * expectation that the extra bytes will be reliably preserved. | |
140 | */ | |
141 | copysize = (size < oldsize) ? size : oldsize; | |
142 | ||
4934f93d | 143 | #ifdef JEMALLOC_MREMAP |
a78e148b | 144 | /* |
145 | * Use mremap(2) if this is a huge-->huge reallocation, and neither the | |
4934f93d | 146 | * source nor the destination are in dss. |
a78e148b | 147 | */ |
4934f93d | 148 | if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr) |
149 | == false && chunk_in_dss(ret) == false))) { | |
a78e148b | 150 | size_t newsize = huge_salloc(ret); |
151 | ||
1d03c1c9 | 152 | /* |
153 | * Remove ptr from the tree of huge allocations before | |
154 | * performing the remap operation, in order to avoid the | |
155 | * possibility of another thread acquiring that mapping before | |
156 | * this one removes it from the tree. | |
157 | */ | |
158 | huge_dalloc(ptr, false); | |
a78e148b | 159 | if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED, |
160 | ret) == MAP_FAILED) { | |
161 | /* | |
162 | * Assuming no chunk management bugs in the allocator, | |
163 | * the only documented way an error can occur here is | |
164 | * if the application changed the map type for a | |
165 | * portion of the old allocation. This is firmly in | |
166 | * undefined behavior territory, so write a diagnostic | |
167 | * message, and optionally abort. | |
168 | */ | |
169 | char buf[BUFERROR_BUF]; | |
170 | ||
4934f93d | 171 | buferror(buf, sizeof(buf)); |
172 | malloc_printf("<jemalloc>: Error in mremap(): %s\n", | |
173 | buf); | |
a78e148b | 174 | if (opt_abort) |
175 | abort(); | |
176 | memcpy(ret, ptr, copysize); | |
1d03c1c9 | 177 | chunk_dealloc_mmap(ptr, oldsize); |
178 | } | |
a78e148b | 179 | } else |
180 | #endif | |
181 | { | |
182 | memcpy(ret, ptr, copysize); | |
4934f93d | 183 | iqalloc(ptr); |
a78e148b | 184 | } |
185 | return (ret); | |
186 | } | |
187 | ||
188 | void | |
189 | huge_dalloc(void *ptr, bool unmap) | |
190 | { | |
191 | extent_node_t *node, key; | |
192 | ||
193 | malloc_mutex_lock(&huge_mtx); | |
194 | ||
195 | /* Extract from tree of huge allocations. */ | |
196 | key.addr = ptr; | |
197 | node = extent_tree_ad_search(&huge, &key); | |
198 | assert(node != NULL); | |
199 | assert(node->addr == ptr); | |
200 | extent_tree_ad_remove(&huge, node); | |
201 | ||
4934f93d | 202 | if (config_stats) { |
203 | stats_cactive_sub(node->size); | |
204 | huge_ndalloc++; | |
205 | huge_allocated -= node->size; | |
206 | } | |
a78e148b | 207 | |
208 | malloc_mutex_unlock(&huge_mtx); | |
209 | ||
4934f93d | 210 | if (unmap && config_fill && config_dss && opt_junk) |
211 | memset(node->addr, 0x5a, node->size); | |
a78e148b | 212 | |
1d03c1c9 | 213 | chunk_dealloc(node->addr, node->size, unmap); |
214 | ||
a78e148b | 215 | base_node_dealloc(node); |
216 | } | |
217 | ||
218 | size_t | |
219 | huge_salloc(const void *ptr) | |
220 | { | |
221 | size_t ret; | |
222 | extent_node_t *node, key; | |
223 | ||
224 | malloc_mutex_lock(&huge_mtx); | |
225 | ||
226 | /* Extract from tree of huge allocations. */ | |
227 | key.addr = __DECONST(void *, ptr); | |
228 | node = extent_tree_ad_search(&huge, &key); | |
229 | assert(node != NULL); | |
230 | ||
231 | ret = node->size; | |
232 | ||
233 | malloc_mutex_unlock(&huge_mtx); | |
234 | ||
235 | return (ret); | |
236 | } | |
237 | ||
a78e148b | 238 | prof_ctx_t * |
239 | huge_prof_ctx_get(const void *ptr) | |
240 | { | |
241 | prof_ctx_t *ret; | |
242 | extent_node_t *node, key; | |
243 | ||
244 | malloc_mutex_lock(&huge_mtx); | |
245 | ||
246 | /* Extract from tree of huge allocations. */ | |
247 | key.addr = __DECONST(void *, ptr); | |
248 | node = extent_tree_ad_search(&huge, &key); | |
249 | assert(node != NULL); | |
250 | ||
251 | ret = node->prof_ctx; | |
252 | ||
253 | malloc_mutex_unlock(&huge_mtx); | |
254 | ||
255 | return (ret); | |
256 | } | |
257 | ||
258 | void | |
259 | huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) | |
260 | { | |
261 | extent_node_t *node, key; | |
262 | ||
263 | malloc_mutex_lock(&huge_mtx); | |
264 | ||
265 | /* Extract from tree of huge allocations. */ | |
266 | key.addr = __DECONST(void *, ptr); | |
267 | node = extent_tree_ad_search(&huge, &key); | |
268 | assert(node != NULL); | |
269 | ||
270 | node->prof_ctx = ctx; | |
271 | ||
272 | malloc_mutex_unlock(&huge_mtx); | |
273 | } | |
a78e148b | 274 | |
275 | bool | |
276 | huge_boot(void) | |
277 | { | |
278 | ||
279 | /* Initialize chunks data. */ | |
280 | if (malloc_mutex_init(&huge_mtx)) | |
281 | return (true); | |
282 | extent_tree_ad_new(&huge); | |
283 | ||
4934f93d | 284 | if (config_stats) { |
285 | huge_nmalloc = 0; | |
286 | huge_ndalloc = 0; | |
287 | huge_allocated = 0; | |
288 | } | |
a78e148b | 289 | |
290 | return (false); | |
291 | } | |
4934f93d | 292 | |
293 | void | |
294 | huge_prefork(void) | |
295 | { | |
296 | ||
297 | malloc_mutex_prefork(&huge_mtx); | |
298 | } | |
299 | ||
300 | void | |
301 | huge_postfork_parent(void) | |
302 | { | |
303 | ||
304 | malloc_mutex_postfork_parent(&huge_mtx); | |
305 | } | |
306 | ||
307 | void | |
308 | huge_postfork_child(void) | |
309 | { | |
310 | ||
311 | malloc_mutex_postfork_child(&huge_mtx); | |
312 | } |