]>
Commit | Line | Data |
---|---|---|
a78e148b | 1 | #define JEMALLOC_CHUNK_DSS_C_ |
2 | #include "jemalloc/internal/jemalloc_internal.h" | |
3 | #ifdef JEMALLOC_DSS | |
4 | /******************************************************************************/ | |
5 | /* Data. */ | |
6 | ||
7 | malloc_mutex_t dss_mtx; | |
8 | ||
9 | /* Base address of the DSS. */ | |
10 | static void *dss_base; | |
11 | /* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */ | |
12 | static void *dss_prev; | |
13 | /* Current upper limit on DSS addresses. */ | |
14 | static void *dss_max; | |
15 | ||
16 | /* | |
17 | * Trees of chunks that were previously allocated (trees differ only in node | |
18 | * ordering). These are used when allocating chunks, in an attempt to re-use | |
19 | * address space. Depending on function, different tree orderings are needed, | |
20 | * which is why there are two trees with the same contents. | |
21 | */ | |
22 | static extent_tree_t dss_chunks_szad; | |
23 | static extent_tree_t dss_chunks_ad; | |
24 | ||
25 | /******************************************************************************/ | |
26 | /* Function prototypes for non-inline static functions. */ | |
27 | ||
28 | static void *chunk_recycle_dss(size_t size, bool *zero); | |
29 | static extent_node_t *chunk_dealloc_dss_record(void *chunk, size_t size); | |
30 | ||
31 | /******************************************************************************/ | |
32 | ||
33 | static void * | |
34 | chunk_recycle_dss(size_t size, bool *zero) | |
35 | { | |
36 | extent_node_t *node, key; | |
37 | ||
38 | key.addr = NULL; | |
39 | key.size = size; | |
40 | malloc_mutex_lock(&dss_mtx); | |
41 | node = extent_tree_szad_nsearch(&dss_chunks_szad, &key); | |
42 | if (node != NULL) { | |
43 | void *ret = node->addr; | |
44 | ||
45 | /* Remove node from the tree. */ | |
46 | extent_tree_szad_remove(&dss_chunks_szad, node); | |
47 | if (node->size == size) { | |
48 | extent_tree_ad_remove(&dss_chunks_ad, node); | |
49 | base_node_dealloc(node); | |
50 | } else { | |
51 | /* | |
52 | * Insert the remainder of node's address range as a | |
53 | * smaller chunk. Its position within dss_chunks_ad | |
54 | * does not change. | |
55 | */ | |
56 | assert(node->size > size); | |
57 | node->addr = (void *)((uintptr_t)node->addr + size); | |
58 | node->size -= size; | |
59 | extent_tree_szad_insert(&dss_chunks_szad, node); | |
60 | } | |
61 | malloc_mutex_unlock(&dss_mtx); | |
62 | ||
63 | if (*zero) | |
64 | memset(ret, 0, size); | |
65 | return (ret); | |
66 | } | |
67 | malloc_mutex_unlock(&dss_mtx); | |
68 | ||
69 | return (NULL); | |
70 | } | |
71 | ||
72 | void * | |
73 | chunk_alloc_dss(size_t size, bool *zero) | |
74 | { | |
75 | void *ret; | |
76 | ||
77 | ret = chunk_recycle_dss(size, zero); | |
78 | if (ret != NULL) | |
79 | return (ret); | |
80 | ||
81 | /* | |
82 | * sbrk() uses a signed increment argument, so take care not to | |
83 | * interpret a huge allocation request as a negative increment. | |
84 | */ | |
85 | if ((intptr_t)size < 0) | |
86 | return (NULL); | |
87 | ||
88 | malloc_mutex_lock(&dss_mtx); | |
89 | if (dss_prev != (void *)-1) { | |
90 | intptr_t incr; | |
91 | ||
92 | /* | |
93 | * The loop is necessary to recover from races with other | |
94 | * threads that are using the DSS for something other than | |
95 | * malloc. | |
96 | */ | |
97 | do { | |
98 | /* Get the current end of the DSS. */ | |
99 | dss_max = sbrk(0); | |
100 | ||
101 | /* | |
102 | * Calculate how much padding is necessary to | |
103 | * chunk-align the end of the DSS. | |
104 | */ | |
105 | incr = (intptr_t)size | |
106 | - (intptr_t)CHUNK_ADDR2OFFSET(dss_max); | |
107 | if (incr == (intptr_t)size) | |
108 | ret = dss_max; | |
109 | else { | |
110 | ret = (void *)((intptr_t)dss_max + incr); | |
111 | incr += size; | |
112 | } | |
113 | ||
114 | dss_prev = sbrk(incr); | |
115 | if (dss_prev == dss_max) { | |
116 | /* Success. */ | |
117 | dss_max = (void *)((intptr_t)dss_prev + incr); | |
118 | malloc_mutex_unlock(&dss_mtx); | |
119 | *zero = true; | |
120 | return (ret); | |
121 | } | |
122 | } while (dss_prev != (void *)-1); | |
123 | } | |
124 | malloc_mutex_unlock(&dss_mtx); | |
125 | ||
126 | return (NULL); | |
127 | } | |
128 | ||
129 | static extent_node_t * | |
130 | chunk_dealloc_dss_record(void *chunk, size_t size) | |
131 | { | |
132 | extent_node_t *xnode, *node, *prev, key; | |
133 | ||
134 | xnode = NULL; | |
135 | while (true) { | |
136 | key.addr = (void *)((uintptr_t)chunk + size); | |
137 | node = extent_tree_ad_nsearch(&dss_chunks_ad, &key); | |
138 | /* Try to coalesce forward. */ | |
139 | if (node != NULL && node->addr == key.addr) { | |
140 | /* | |
141 | * Coalesce chunk with the following address range. | |
142 | * This does not change the position within | |
143 | * dss_chunks_ad, so only remove/insert from/into | |
144 | * dss_chunks_szad. | |
145 | */ | |
146 | extent_tree_szad_remove(&dss_chunks_szad, node); | |
147 | node->addr = chunk; | |
148 | node->size += size; | |
149 | extent_tree_szad_insert(&dss_chunks_szad, node); | |
150 | break; | |
151 | } else if (xnode == NULL) { | |
152 | /* | |
153 | * It is possible that base_node_alloc() will cause a | |
154 | * new base chunk to be allocated, so take care not to | |
155 | * deadlock on dss_mtx, and recover if another thread | |
156 | * deallocates an adjacent chunk while this one is busy | |
157 | * allocating xnode. | |
158 | */ | |
159 | malloc_mutex_unlock(&dss_mtx); | |
160 | xnode = base_node_alloc(); | |
161 | malloc_mutex_lock(&dss_mtx); | |
162 | if (xnode == NULL) | |
163 | return (NULL); | |
164 | } else { | |
165 | /* Coalescing forward failed, so insert a new node. */ | |
166 | node = xnode; | |
167 | xnode = NULL; | |
168 | node->addr = chunk; | |
169 | node->size = size; | |
170 | extent_tree_ad_insert(&dss_chunks_ad, node); | |
171 | extent_tree_szad_insert(&dss_chunks_szad, node); | |
172 | break; | |
173 | } | |
174 | } | |
175 | /* Discard xnode if it ended up unused do to a race. */ | |
176 | if (xnode != NULL) | |
177 | base_node_dealloc(xnode); | |
178 | ||
179 | /* Try to coalesce backward. */ | |
180 | prev = extent_tree_ad_prev(&dss_chunks_ad, node); | |
181 | if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) == | |
182 | chunk) { | |
183 | /* | |
184 | * Coalesce chunk with the previous address range. This does | |
185 | * not change the position within dss_chunks_ad, so only | |
186 | * remove/insert node from/into dss_chunks_szad. | |
187 | */ | |
188 | extent_tree_szad_remove(&dss_chunks_szad, prev); | |
189 | extent_tree_ad_remove(&dss_chunks_ad, prev); | |
190 | ||
191 | extent_tree_szad_remove(&dss_chunks_szad, node); | |
192 | node->addr = prev->addr; | |
193 | node->size += prev->size; | |
194 | extent_tree_szad_insert(&dss_chunks_szad, node); | |
195 | ||
196 | base_node_dealloc(prev); | |
197 | } | |
198 | ||
199 | return (node); | |
200 | } | |
201 | ||
202 | bool | |
203 | chunk_in_dss(void *chunk) | |
204 | { | |
205 | bool ret; | |
206 | ||
207 | malloc_mutex_lock(&dss_mtx); | |
208 | if ((uintptr_t)chunk >= (uintptr_t)dss_base | |
209 | && (uintptr_t)chunk < (uintptr_t)dss_max) | |
210 | ret = true; | |
211 | else | |
212 | ret = false; | |
213 | malloc_mutex_unlock(&dss_mtx); | |
214 | ||
215 | return (ret); | |
216 | } | |
217 | ||
218 | bool | |
219 | chunk_dealloc_dss(void *chunk, size_t size) | |
220 | { | |
221 | bool ret; | |
222 | ||
223 | malloc_mutex_lock(&dss_mtx); | |
224 | if ((uintptr_t)chunk >= (uintptr_t)dss_base | |
225 | && (uintptr_t)chunk < (uintptr_t)dss_max) { | |
226 | extent_node_t *node; | |
227 | ||
228 | /* Try to coalesce with other unused chunks. */ | |
229 | node = chunk_dealloc_dss_record(chunk, size); | |
230 | if (node != NULL) { | |
231 | chunk = node->addr; | |
232 | size = node->size; | |
233 | } | |
234 | ||
235 | /* Get the current end of the DSS. */ | |
236 | dss_max = sbrk(0); | |
237 | ||
238 | /* | |
239 | * Try to shrink the DSS if this chunk is at the end of the | |
240 | * DSS. The sbrk() call here is subject to a race condition | |
241 | * with threads that use brk(2) or sbrk(2) directly, but the | |
242 | * alternative would be to leak memory for the sake of poorly | |
243 | * designed multi-threaded programs. | |
244 | */ | |
245 | if ((void *)((uintptr_t)chunk + size) == dss_max | |
246 | && (dss_prev = sbrk(-(intptr_t)size)) == dss_max) { | |
247 | /* Success. */ | |
248 | dss_max = (void *)((intptr_t)dss_prev - (intptr_t)size); | |
249 | ||
250 | if (node != NULL) { | |
251 | extent_tree_szad_remove(&dss_chunks_szad, node); | |
252 | extent_tree_ad_remove(&dss_chunks_ad, node); | |
253 | base_node_dealloc(node); | |
254 | } | |
255 | } else | |
256 | madvise(chunk, size, MADV_DONTNEED); | |
257 | ||
258 | ret = false; | |
259 | goto RETURN; | |
260 | } | |
261 | ||
262 | ret = true; | |
263 | RETURN: | |
264 | malloc_mutex_unlock(&dss_mtx); | |
265 | return (ret); | |
266 | } | |
267 | ||
268 | bool | |
269 | chunk_dss_boot(void) | |
270 | { | |
271 | ||
272 | if (malloc_mutex_init(&dss_mtx)) | |
273 | return (true); | |
274 | dss_base = sbrk(0); | |
275 | dss_prev = dss_base; | |
276 | dss_max = dss_base; | |
277 | extent_tree_szad_new(&dss_chunks_szad); | |
278 | extent_tree_ad_new(&dss_chunks_ad); | |
279 | ||
280 | return (false); | |
281 | } | |
282 | ||
283 | /******************************************************************************/ | |
284 | #endif /* JEMALLOC_DSS */ |