1 #define JEMALLOC_CHUNK_SWAP_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 malloc_mutex_t swap_mtx
;
16 /* Base address of the mmap()ed file(s). */
17 static void *swap_base
;
18 /* Current end of the space in use (<= swap_max). */
19 static void *swap_end
;
20 /* Absolute upper limit on file-backed addresses. */
21 static void *swap_max
;
24 * Trees of chunks that were previously allocated (trees differ only in node
25 * ordering). These are used when allocating chunks, in an attempt to re-use
26 * address space. Depending on function, different tree orderings are needed,
27 * which is why there are two trees with the same contents.
29 static extent_tree_t swap_chunks_szad
;
30 static extent_tree_t swap_chunks_ad
;
32 /******************************************************************************/
33 /* Function prototypes for non-inline static functions. */
35 static void *chunk_recycle_swap(size_t size
, bool *zero
);
36 static extent_node_t
*chunk_dealloc_swap_record(void *chunk
, size_t size
);
38 /******************************************************************************/
41 chunk_recycle_swap(size_t size
, bool *zero
)
43 extent_node_t
*node
, key
;
47 malloc_mutex_lock(&swap_mtx
);
48 node
= extent_tree_szad_nsearch(&swap_chunks_szad
, &key
);
50 void *ret
= node
->addr
;
52 /* Remove node from the tree. */
53 extent_tree_szad_remove(&swap_chunks_szad
, node
);
54 if (node
->size
== size
) {
55 extent_tree_ad_remove(&swap_chunks_ad
, node
);
56 base_node_dealloc(node
);
59 * Insert the remainder of node's address range as a
60 * smaller chunk. Its position within swap_chunks_ad
63 assert(node
->size
> size
);
64 node
->addr
= (void *)((uintptr_t)node
->addr
+ size
);
66 extent_tree_szad_insert(&swap_chunks_szad
, node
);
71 malloc_mutex_unlock(&swap_mtx
);
77 malloc_mutex_unlock(&swap_mtx
);
83 chunk_alloc_swap(size_t size
, bool *zero
)
89 ret
= chunk_recycle_swap(size
, zero
);
93 malloc_mutex_lock(&swap_mtx
);
94 if ((uintptr_t)swap_end
+ size
<= (uintptr_t)swap_max
) {
96 swap_end
= (void *)((uintptr_t)swap_end
+ size
);
100 malloc_mutex_unlock(&swap_mtx
);
105 memset(ret
, 0, size
);
107 malloc_mutex_unlock(&swap_mtx
);
114 static extent_node_t
*
115 chunk_dealloc_swap_record(void *chunk
, size_t size
)
117 extent_node_t
*xnode
, *node
, *prev
, key
;
121 key
.addr
= (void *)((uintptr_t)chunk
+ size
);
122 node
= extent_tree_ad_nsearch(&swap_chunks_ad
, &key
);
123 /* Try to coalesce forward. */
124 if (node
!= NULL
&& node
->addr
== key
.addr
) {
126 * Coalesce chunk with the following address range.
127 * This does not change the position within
128 * swap_chunks_ad, so only remove/insert from/into
131 extent_tree_szad_remove(&swap_chunks_szad
, node
);
134 extent_tree_szad_insert(&swap_chunks_szad
, node
);
136 } else if (xnode
== NULL
) {
138 * It is possible that base_node_alloc() will cause a
139 * new base chunk to be allocated, so take care not to
140 * deadlock on swap_mtx, and recover if another thread
141 * deallocates an adjacent chunk while this one is busy
144 malloc_mutex_unlock(&swap_mtx
);
145 xnode
= base_node_alloc();
146 malloc_mutex_lock(&swap_mtx
);
150 /* Coalescing forward failed, so insert a new node. */
155 extent_tree_ad_insert(&swap_chunks_ad
, node
);
156 extent_tree_szad_insert(&swap_chunks_szad
, node
);
160 /* Discard xnode if it ended up unused do to a race. */
162 base_node_dealloc(xnode
);
164 /* Try to coalesce backward. */
165 prev
= extent_tree_ad_prev(&swap_chunks_ad
, node
);
166 if (prev
!= NULL
&& (void *)((uintptr_t)prev
->addr
+ prev
->size
) ==
169 * Coalesce chunk with the previous address range. This does
170 * not change the position within swap_chunks_ad, so only
171 * remove/insert node from/into swap_chunks_szad.
173 extent_tree_szad_remove(&swap_chunks_szad
, prev
);
174 extent_tree_ad_remove(&swap_chunks_ad
, prev
);
176 extent_tree_szad_remove(&swap_chunks_szad
, node
);
177 node
->addr
= prev
->addr
;
178 node
->size
+= prev
->size
;
179 extent_tree_szad_insert(&swap_chunks_szad
, node
);
181 base_node_dealloc(prev
);
188 chunk_in_swap(void *chunk
)
192 assert(swap_enabled
);
194 malloc_mutex_lock(&swap_mtx
);
195 if ((uintptr_t)chunk
>= (uintptr_t)swap_base
196 && (uintptr_t)chunk
< (uintptr_t)swap_max
)
200 malloc_mutex_unlock(&swap_mtx
);
206 chunk_dealloc_swap(void *chunk
, size_t size
)
210 assert(swap_enabled
);
212 malloc_mutex_lock(&swap_mtx
);
213 if ((uintptr_t)chunk
>= (uintptr_t)swap_base
214 && (uintptr_t)chunk
< (uintptr_t)swap_max
) {
217 /* Try to coalesce with other unused chunks. */
218 node
= chunk_dealloc_swap_record(chunk
, size
);
225 * Try to shrink the in-use memory if this chunk is at the end
226 * of the in-use memory.
228 if ((void *)((uintptr_t)chunk
+ size
) == swap_end
) {
229 swap_end
= (void *)((uintptr_t)swap_end
- size
);
232 extent_tree_szad_remove(&swap_chunks_szad
,
234 extent_tree_ad_remove(&swap_chunks_ad
, node
);
235 base_node_dealloc(node
);
238 madvise(chunk
, size
, MADV_DONTNEED
);
240 #ifdef JEMALLOC_STATS
249 malloc_mutex_unlock(&swap_mtx
);
254 chunk_swap_enable(const int *fds
, unsigned nfds
, bool prezeroed
)
260 size_t cumsize
, voff
;
263 malloc_mutex_lock(&swap_mtx
);
265 /* Get file sizes. */
266 for (i
= 0, cumsize
= 0; i
< nfds
; i
++) {
267 off
= lseek(fds
[i
], 0, SEEK_END
);
268 if (off
== ((off_t
)-1)) {
272 if (PAGE_CEILING(off
) != off
) {
273 /* Truncate to a multiple of the page size. */
275 if (ftruncate(fds
[i
], off
) != 0) {
281 if (cumsize
+ off
< cumsize
) {
283 * Cumulative file size is greater than the total
284 * address space. Bail out while it's still obvious
285 * what the problem is.
293 /* Round down to a multiple of the chunk size. */
294 cumsize
&= ~chunksize_mask
;
301 * Allocate a chunk-aligned region of anonymous memory, which will
302 * be the final location for the memory-mapped files.
304 vaddr
= chunk_alloc_mmap_noreserve(cumsize
);
310 /* Overlay the files onto the anonymous mapping. */
311 for (i
= 0, voff
= 0; i
< nfds
; i
++) {
312 void *addr
= mmap((void *)((uintptr_t)vaddr
+ voff
), sizes
[i
],
313 PROT_READ
| PROT_WRITE
, MAP_SHARED
| MAP_FIXED
, fds
[i
], 0);
314 if (addr
== MAP_FAILED
) {
315 char buf
[BUFERROR_BUF
];
318 buferror(errno
, buf
, sizeof(buf
));
320 "<jemalloc>: Error in mmap(..., MAP_FIXED, ...): ");
325 if (munmap(vaddr
, voff
) == -1) {
326 buferror(errno
, buf
, sizeof(buf
));
327 malloc_write("<jemalloc>: Error in munmap(): ");
334 assert(addr
== (void *)((uintptr_t)vaddr
+ voff
));
337 * Tell the kernel that the mapping will be accessed randomly,
338 * and that it should not gratuitously sync pages to the
342 madvise(addr
, sizes
[i
], MADV_RANDOM
);
345 madvise(addr
, sizes
[i
], MADV_NOSYNC
);
351 swap_prezeroed
= prezeroed
;
353 swap_end
= swap_base
;
354 swap_max
= (void *)((uintptr_t)vaddr
+ cumsize
);
356 /* Copy the fds array for mallctl purposes. */
357 swap_fds
= (int *)base_alloc(nfds
* sizeof(int));
358 if (swap_fds
== NULL
) {
362 memcpy(swap_fds
, fds
, nfds
* sizeof(int));
365 #ifdef JEMALLOC_STATS
366 swap_avail
= cumsize
;
373 malloc_mutex_unlock(&swap_mtx
);
378 chunk_swap_boot(void)
381 if (malloc_mutex_init(&swap_mtx
))
384 swap_enabled
= false;
385 swap_prezeroed
= false; /* swap.* mallctl's depend on this. */
388 #ifdef JEMALLOC_STATS
395 extent_tree_szad_new(&swap_chunks_szad
);
396 extent_tree_ad_new(&swap_chunks_ad
);
401 /******************************************************************************/
402 #endif /* JEMALLOC_SWAP */