1 #define JEMALLOC_CHUNK_DSS_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 /******************************************************************************/
6 const char *dss_prec_names
[] = {
13 /* Current dss precedence default, used when creating new arenas. */
14 static dss_prec_t dss_prec_default
= DSS_PREC_DEFAULT
;
17 * Protects sbrk() calls. This avoids malloc races among threads, though it
18 * does not protect against races with threads that call sbrk() directly.
20 static malloc_mutex_t dss_mtx
;
22 /* Base address of the DSS. */
23 static void *dss_base
;
24 /* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
25 static void *dss_prev
;
26 /* Current upper limit on DSS addresses. */
29 /******************************************************************************/
31 #ifndef JEMALLOC_HAVE_SBRK
33 sbrk(intptr_t increment
)
43 chunk_dss_prec_get(void)
47 if (config_dss
== false)
48 return (dss_prec_disabled
);
49 malloc_mutex_lock(&dss_mtx
);
50 ret
= dss_prec_default
;
51 malloc_mutex_unlock(&dss_mtx
);
56 chunk_dss_prec_set(dss_prec_t dss_prec
)
59 if (config_dss
== false)
61 malloc_mutex_lock(&dss_mtx
);
62 dss_prec_default
= dss_prec
;
63 malloc_mutex_unlock(&dss_mtx
);
68 chunk_alloc_dss(size_t size
, size_t alignment
, bool *zero
)
73 assert(size
> 0 && (size
& chunksize_mask
) == 0);
74 assert(alignment
> 0 && (alignment
& chunksize_mask
) == 0);
77 * sbrk() uses a signed increment argument, so take care not to
78 * interpret a huge allocation request as a negative increment.
80 if ((intptr_t)size
< 0)
83 malloc_mutex_lock(&dss_mtx
);
84 if (dss_prev
!= (void *)-1) {
85 size_t gap_size
, cpad_size
;
86 void *cpad
, *dss_next
;
90 * The loop is necessary to recover from races with other
91 * threads that are using the DSS for something other than
95 /* Get the current end of the DSS. */
98 * Calculate how much padding is necessary to
99 * chunk-align the end of the DSS.
101 gap_size
= (chunksize
- CHUNK_ADDR2OFFSET(dss_max
)) &
104 * Compute how much chunk-aligned pad space (if any) is
105 * necessary to satisfy alignment. This space can be
106 * recycled for later use.
108 cpad
= (void *)((uintptr_t)dss_max
+ gap_size
);
109 ret
= (void *)ALIGNMENT_CEILING((uintptr_t)dss_max
,
111 cpad_size
= (uintptr_t)ret
- (uintptr_t)cpad
;
112 dss_next
= (void *)((uintptr_t)ret
+ size
);
113 if ((uintptr_t)ret
< (uintptr_t)dss_max
||
114 (uintptr_t)dss_next
< (uintptr_t)dss_max
) {
116 malloc_mutex_unlock(&dss_mtx
);
119 incr
= gap_size
+ cpad_size
+ size
;
120 dss_prev
= sbrk(incr
);
121 if (dss_prev
== dss_max
) {
124 malloc_mutex_unlock(&dss_mtx
);
126 chunk_unmap(cpad
, cpad_size
);
128 VALGRIND_MAKE_MEM_UNDEFINED(ret
, size
);
129 memset(ret
, 0, size
);
133 } while (dss_prev
!= (void *)-1);
135 malloc_mutex_unlock(&dss_mtx
);
141 chunk_in_dss(void *chunk
)
147 malloc_mutex_lock(&dss_mtx
);
148 if ((uintptr_t)chunk
>= (uintptr_t)dss_base
149 && (uintptr_t)chunk
< (uintptr_t)dss_max
)
153 malloc_mutex_unlock(&dss_mtx
);
164 if (malloc_mutex_init(&dss_mtx
))
174 chunk_dss_prefork(void)
178 malloc_mutex_prefork(&dss_mtx
);
182 chunk_dss_postfork_parent(void)
186 malloc_mutex_postfork_parent(&dss_mtx
);
190 chunk_dss_postfork_child(void)
194 malloc_mutex_postfork_child(&dss_mtx
);
197 /******************************************************************************/