1 #define JEMALLOC_CHUNK_DSS_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 malloc_mutex_t dss_mtx
;
9 /* Base address of the DSS. */
10 static void *dss_base
;
11 /* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
12 static void *dss_prev
;
13 /* Current upper limit on DSS addresses. */
17 * Trees of chunks that were previously allocated (trees differ only in node
18 * ordering). These are used when allocating chunks, in an attempt to re-use
19 * address space. Depending on function, different tree orderings are needed,
20 * which is why there are two trees with the same contents.
22 static extent_tree_t dss_chunks_szad
;
23 static extent_tree_t dss_chunks_ad
;
25 /******************************************************************************/
26 /* Function prototypes for non-inline static functions. */
28 static void *chunk_recycle_dss(size_t size
, bool *zero
);
29 static extent_node_t
*chunk_dealloc_dss_record(void *chunk
, size_t size
);
31 /******************************************************************************/
34 chunk_recycle_dss(size_t size
, bool *zero
)
36 extent_node_t
*node
, key
;
40 malloc_mutex_lock(&dss_mtx
);
41 node
= extent_tree_szad_nsearch(&dss_chunks_szad
, &key
);
43 void *ret
= node
->addr
;
45 /* Remove node from the tree. */
46 extent_tree_szad_remove(&dss_chunks_szad
, node
);
47 if (node
->size
== size
) {
48 extent_tree_ad_remove(&dss_chunks_ad
, node
);
49 base_node_dealloc(node
);
52 * Insert the remainder of node's address range as a
53 * smaller chunk. Its position within dss_chunks_ad
56 assert(node
->size
> size
);
57 node
->addr
= (void *)((uintptr_t)node
->addr
+ size
);
59 extent_tree_szad_insert(&dss_chunks_szad
, node
);
61 malloc_mutex_unlock(&dss_mtx
);
67 malloc_mutex_unlock(&dss_mtx
);
73 chunk_alloc_dss(size_t size
, bool *zero
)
77 ret
= chunk_recycle_dss(size
, zero
);
82 * sbrk() uses a signed increment argument, so take care not to
83 * interpret a huge allocation request as a negative increment.
85 if ((intptr_t)size
< 0)
88 malloc_mutex_lock(&dss_mtx
);
89 if (dss_prev
!= (void *)-1) {
93 * The loop is necessary to recover from races with other
94 * threads that are using the DSS for something other than
98 /* Get the current end of the DSS. */
102 * Calculate how much padding is necessary to
103 * chunk-align the end of the DSS.
105 incr
= (intptr_t)size
106 - (intptr_t)CHUNK_ADDR2OFFSET(dss_max
);
107 if (incr
== (intptr_t)size
)
110 ret
= (void *)((intptr_t)dss_max
+ incr
);
114 dss_prev
= sbrk(incr
);
115 if (dss_prev
== dss_max
) {
117 dss_max
= (void *)((intptr_t)dss_prev
+ incr
);
118 malloc_mutex_unlock(&dss_mtx
);
122 } while (dss_prev
!= (void *)-1);
124 malloc_mutex_unlock(&dss_mtx
);
129 static extent_node_t
*
130 chunk_dealloc_dss_record(void *chunk
, size_t size
)
132 extent_node_t
*xnode
, *node
, *prev
, key
;
136 key
.addr
= (void *)((uintptr_t)chunk
+ size
);
137 node
= extent_tree_ad_nsearch(&dss_chunks_ad
, &key
);
138 /* Try to coalesce forward. */
139 if (node
!= NULL
&& node
->addr
== key
.addr
) {
141 * Coalesce chunk with the following address range.
142 * This does not change the position within
143 * dss_chunks_ad, so only remove/insert from/into
146 extent_tree_szad_remove(&dss_chunks_szad
, node
);
149 extent_tree_szad_insert(&dss_chunks_szad
, node
);
151 } else if (xnode
== NULL
) {
153 * It is possible that base_node_alloc() will cause a
154 * new base chunk to be allocated, so take care not to
155 * deadlock on dss_mtx, and recover if another thread
156 * deallocates an adjacent chunk while this one is busy
159 malloc_mutex_unlock(&dss_mtx
);
160 xnode
= base_node_alloc();
161 malloc_mutex_lock(&dss_mtx
);
165 /* Coalescing forward failed, so insert a new node. */
170 extent_tree_ad_insert(&dss_chunks_ad
, node
);
171 extent_tree_szad_insert(&dss_chunks_szad
, node
);
175 /* Discard xnode if it ended up unused do to a race. */
177 base_node_dealloc(xnode
);
179 /* Try to coalesce backward. */
180 prev
= extent_tree_ad_prev(&dss_chunks_ad
, node
);
181 if (prev
!= NULL
&& (void *)((uintptr_t)prev
->addr
+ prev
->size
) ==
184 * Coalesce chunk with the previous address range. This does
185 * not change the position within dss_chunks_ad, so only
186 * remove/insert node from/into dss_chunks_szad.
188 extent_tree_szad_remove(&dss_chunks_szad
, prev
);
189 extent_tree_ad_remove(&dss_chunks_ad
, prev
);
191 extent_tree_szad_remove(&dss_chunks_szad
, node
);
192 node
->addr
= prev
->addr
;
193 node
->size
+= prev
->size
;
194 extent_tree_szad_insert(&dss_chunks_szad
, node
);
196 base_node_dealloc(prev
);
203 chunk_in_dss(void *chunk
)
207 malloc_mutex_lock(&dss_mtx
);
208 if ((uintptr_t)chunk
>= (uintptr_t)dss_base
209 && (uintptr_t)chunk
< (uintptr_t)dss_max
)
213 malloc_mutex_unlock(&dss_mtx
);
219 chunk_dealloc_dss(void *chunk
, size_t size
)
223 malloc_mutex_lock(&dss_mtx
);
224 if ((uintptr_t)chunk
>= (uintptr_t)dss_base
225 && (uintptr_t)chunk
< (uintptr_t)dss_max
) {
228 /* Try to coalesce with other unused chunks. */
229 node
= chunk_dealloc_dss_record(chunk
, size
);
235 /* Get the current end of the DSS. */
239 * Try to shrink the DSS if this chunk is at the end of the
240 * DSS. The sbrk() call here is subject to a race condition
241 * with threads that use brk(2) or sbrk(2) directly, but the
242 * alternative would be to leak memory for the sake of poorly
243 * designed multi-threaded programs.
245 if ((void *)((uintptr_t)chunk
+ size
) == dss_max
246 && (dss_prev
= sbrk(-(intptr_t)size
)) == dss_max
) {
248 dss_max
= (void *)((intptr_t)dss_prev
- (intptr_t)size
);
251 extent_tree_szad_remove(&dss_chunks_szad
, node
);
252 extent_tree_ad_remove(&dss_chunks_ad
, node
);
253 base_node_dealloc(node
);
256 madvise(chunk
, size
, MADV_DONTNEED
);
264 malloc_mutex_unlock(&dss_mtx
);
272 if (malloc_mutex_init(&dss_mtx
))
277 extent_tree_szad_new(&dss_chunks_szad
);
278 extent_tree_ad_new(&dss_chunks_ad
);
283 /******************************************************************************/
284 #endif /* JEMALLOC_DSS */