1 #define JEMALLOC_CHUNK_MMAP_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
8 * Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
9 * potentially avoid some system calls.
12 static __thread
bool mmap_unaligned_tls
13 JEMALLOC_ATTR(tls_model("initial-exec"));
14 #define MMAP_UNALIGNED_GET() mmap_unaligned_tls
15 #define MMAP_UNALIGNED_SET(v) do { \
16 mmap_unaligned_tls = (v); \
19 static pthread_key_t mmap_unaligned_tsd
;
20 #define MMAP_UNALIGNED_GET() ((bool)pthread_getspecific(mmap_unaligned_tsd))
21 #define MMAP_UNALIGNED_SET(v) do { \
22 pthread_setspecific(mmap_unaligned_tsd, (void *)(v)); \
26 /******************************************************************************/
27 /* Function prototypes for non-inline static functions. */
29 static void *pages_map(void *addr
, size_t size
, bool noreserve
);
30 static void pages_unmap(void *addr
, size_t size
);
31 static void *chunk_alloc_mmap_slow(size_t size
, bool unaligned
,
33 static void *chunk_alloc_mmap_internal(size_t size
, bool noreserve
);
35 /******************************************************************************/
38 pages_map(void *addr
, size_t size
, bool noreserve
)
43 * We don't use MAP_FIXED here, because it can cause the *replacement*
44 * of existing mappings, and we only want to create new mappings.
46 int flags
= MAP_PRIVATE
| MAP_ANON
;
49 flags
|= MAP_NORESERVE
;
51 ret
= mmap(addr
, size
, PROT_READ
| PROT_WRITE
, flags
, -1, 0);
54 if (ret
== MAP_FAILED
)
56 else if (addr
!= NULL
&& ret
!= addr
) {
58 * We succeeded in mapping memory, but not in the right place.
60 if (munmap(ret
, size
) == -1) {
61 char buf
[BUFERROR_BUF
];
63 buferror(errno
, buf
, sizeof(buf
));
64 malloc_write("<jemalloc>: Error in munmap(): ");
73 assert(ret
== NULL
|| (addr
== NULL
&& ret
!= addr
)
74 || (addr
!= NULL
&& ret
== addr
));
79 pages_unmap(void *addr
, size_t size
)
82 if (munmap(addr
, size
) == -1) {
83 char buf
[BUFERROR_BUF
];
85 buferror(errno
, buf
, sizeof(buf
));
86 malloc_write("<jemalloc>: Error in munmap(): ");
95 chunk_alloc_mmap_slow(size_t size
, bool unaligned
, bool noreserve
)
100 /* Beware size_t wrap-around. */
101 if (size
+ chunksize
<= size
)
104 ret
= pages_map(NULL
, size
+ chunksize
, noreserve
);
108 /* Clean up unneeded leading/trailing space. */
109 offset
= CHUNK_ADDR2OFFSET(ret
);
111 /* Note that mmap() returned an unaligned mapping. */
115 pages_unmap(ret
, chunksize
- offset
);
117 ret
= (void *)((uintptr_t)ret
+
118 (chunksize
- offset
));
120 /* Trailing space. */
121 pages_unmap((void *)((uintptr_t)ret
+ size
),
124 /* Trailing space only. */
125 pages_unmap((void *)((uintptr_t)ret
+ size
),
130 * If mmap() returned an aligned mapping, reset mmap_unaligned so that
131 * the next chunk_alloc_mmap() execution tries the fast allocation
134 if (unaligned
== false)
135 MMAP_UNALIGNED_SET(false);
141 chunk_alloc_mmap_internal(size_t size
, bool noreserve
)
146 * Ideally, there would be a way to specify alignment to mmap() (like
147 * NetBSD has), but in the absence of such a feature, we have to work
148 * hard to efficiently create aligned mappings. The reliable, but
149 * slow method is to create a mapping that is over-sized, then trim the
150 * excess. However, that always results in at least one call to
153 * A more optimistic approach is to try mapping precisely the right
154 * amount, then try to append another mapping if alignment is off. In
155 * practice, this works out well as long as the application is not
156 * interleaving mappings via direct mmap() calls. If we do run into a
157 * situation where there is an interleaved mapping and we are unable to
158 * extend an unaligned mapping, our best option is to switch to the
159 * slow method until mmap() returns another aligned mapping. This will
160 * tend to leave a gap in the memory map that is too small to cause
161 * later problems for the optimistic method.
163 * Another possible confounding factor is address space layout
164 * randomization (ASLR), which causes mmap(2) to disregard the
165 * requested address. mmap_unaligned tracks whether the previous
166 * chunk_alloc_mmap() execution received any unaligned or relocated
167 * mappings, and if so, the current execution will immediately fall
168 * back to the slow method. However, we keep track of whether the fast
169 * method would have succeeded, and if so, we make a note to try the
170 * fast method next time.
173 if (MMAP_UNALIGNED_GET() == false) {
176 ret
= pages_map(NULL
, size
, noreserve
);
180 offset
= CHUNK_ADDR2OFFSET(ret
);
182 MMAP_UNALIGNED_SET(true);
183 /* Try to extend chunk boundary. */
184 if (pages_map((void *)((uintptr_t)ret
+ size
),
185 chunksize
- offset
, noreserve
) == NULL
) {
187 * Extension failed. Clean up, then revert to
188 * the reliable-but-expensive method.
190 pages_unmap(ret
, size
);
191 ret
= chunk_alloc_mmap_slow(size
, true,
194 /* Clean up unneeded leading space. */
195 pages_unmap(ret
, chunksize
- offset
);
196 ret
= (void *)((uintptr_t)ret
+ (chunksize
-
201 ret
= chunk_alloc_mmap_slow(size
, false, noreserve
);
207 chunk_alloc_mmap(size_t size
)
210 return (chunk_alloc_mmap_internal(size
, false));
214 chunk_alloc_mmap_noreserve(size_t size
)
217 return (chunk_alloc_mmap_internal(size
, true));
221 chunk_dealloc_mmap(void *chunk
, size_t size
)
224 pages_unmap(chunk
, size
);
228 chunk_mmap_boot(void)
232 if (pthread_key_create(&mmap_unaligned_tsd
, NULL
) != 0) {
233 malloc_write("<jemalloc>: Error in pthread_key_create()\n");