]>
git.saurik.com Git - redis.git/blob - deps/jemalloc/src/chunk_dss.c
1 #define JEMALLOC_CHUNK_DSS_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 /******************************************************************************/
7 * Protects sbrk() calls. This avoids malloc races among threads, though it
8 * does not protect against races with threads that call sbrk() directly.
10 static malloc_mutex_t dss_mtx
;
12 /* Base address of the DSS. */
13 static void *dss_base
;
14 /* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
15 static void *dss_prev
;
16 /* Current upper limit on DSS addresses. */
19 /******************************************************************************/
21 #ifndef JEMALLOC_HAVE_SBRK
23 sbrk(intptr_t increment
)
33 chunk_alloc_dss(size_t size
, size_t alignment
, bool *zero
)
38 assert(size
> 0 && (size
& chunksize_mask
) == 0);
39 assert(alignment
> 0 && (alignment
& chunksize_mask
) == 0);
42 * sbrk() uses a signed increment argument, so take care not to
43 * interpret a huge allocation request as a negative increment.
45 if ((intptr_t)size
< 0)
48 malloc_mutex_lock(&dss_mtx
);
49 if (dss_prev
!= (void *)-1) {
50 size_t gap_size
, cpad_size
;
51 void *cpad
, *dss_next
;
55 * The loop is necessary to recover from races with other
56 * threads that are using the DSS for something other than
60 /* Get the current end of the DSS. */
63 * Calculate how much padding is necessary to
64 * chunk-align the end of the DSS.
66 gap_size
= (chunksize
- CHUNK_ADDR2OFFSET(dss_max
)) &
69 * Compute how much chunk-aligned pad space (if any) is
70 * necessary to satisfy alignment. This space can be
71 * recycled for later use.
73 cpad
= (void *)((uintptr_t)dss_max
+ gap_size
);
74 ret
= (void *)ALIGNMENT_CEILING((uintptr_t)dss_max
,
76 cpad_size
= (uintptr_t)ret
- (uintptr_t)cpad
;
77 dss_next
= (void *)((uintptr_t)ret
+ size
);
78 if ((uintptr_t)ret
< (uintptr_t)dss_max
||
79 (uintptr_t)dss_next
< (uintptr_t)dss_max
) {
81 malloc_mutex_unlock(&dss_mtx
);
84 incr
= gap_size
+ cpad_size
+ size
;
85 dss_prev
= sbrk(incr
);
86 if (dss_prev
== dss_max
) {
89 malloc_mutex_unlock(&dss_mtx
);
91 chunk_dealloc(cpad
, cpad_size
, true);
93 VALGRIND_MAKE_MEM_UNDEFINED(ret
, size
);
98 } while (dss_prev
!= (void *)-1);
100 malloc_mutex_unlock(&dss_mtx
);
106 chunk_in_dss(void *chunk
)
112 malloc_mutex_lock(&dss_mtx
);
113 if ((uintptr_t)chunk
>= (uintptr_t)dss_base
114 && (uintptr_t)chunk
< (uintptr_t)dss_max
)
118 malloc_mutex_unlock(&dss_mtx
);
129 if (malloc_mutex_init(&dss_mtx
))
139 chunk_dss_prefork(void)
143 malloc_mutex_prefork(&dss_mtx
);
147 chunk_dss_postfork_parent(void)
151 malloc_mutex_postfork_parent(&dss_mtx
);
155 chunk_dss_postfork_child(void)
159 malloc_mutex_postfork_child(&dss_mtx
);
162 /******************************************************************************/