]> git.saurik.com Git - redis.git/blob - deps/jemalloc/src/chunk_mmap.c
merge conflicts resolved
[redis.git] / deps / jemalloc / src / chunk_mmap.c
1 #define JEMALLOC_CHUNK_MMAP_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3
4 /******************************************************************************/
5 /* Data. */
6
7 /*
8 * Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
9 * potentially avoid some system calls.
10 */
11 #ifndef NO_TLS
12 static __thread bool mmap_unaligned_tls
13 JEMALLOC_ATTR(tls_model("initial-exec"));
14 #define MMAP_UNALIGNED_GET() mmap_unaligned_tls
15 #define MMAP_UNALIGNED_SET(v) do { \
16 mmap_unaligned_tls = (v); \
17 } while (0)
18 #else
19 static pthread_key_t mmap_unaligned_tsd;
20 #define MMAP_UNALIGNED_GET() ((bool)pthread_getspecific(mmap_unaligned_tsd))
21 #define MMAP_UNALIGNED_SET(v) do { \
22 pthread_setspecific(mmap_unaligned_tsd, (void *)(v)); \
23 } while (0)
24 #endif
25
26 /******************************************************************************/
27 /* Function prototypes for non-inline static functions. */
28
29 static void *pages_map(void *addr, size_t size, bool noreserve);
30 static void pages_unmap(void *addr, size_t size);
31 static void *chunk_alloc_mmap_slow(size_t size, bool unaligned,
32 bool noreserve);
33 static void *chunk_alloc_mmap_internal(size_t size, bool noreserve);
34
35 /******************************************************************************/
36
37 static void *
38 pages_map(void *addr, size_t size, bool noreserve)
39 {
40 void *ret;
41
42 /*
43 * We don't use MAP_FIXED here, because it can cause the *replacement*
44 * of existing mappings, and we only want to create new mappings.
45 */
46 int flags = MAP_PRIVATE | MAP_ANON;
47 #ifdef MAP_NORESERVE
48 if (noreserve)
49 flags |= MAP_NORESERVE;
50 #endif
51 ret = mmap(addr, size, PROT_READ | PROT_WRITE, flags, -1, 0);
52 assert(ret != NULL);
53
54 if (ret == MAP_FAILED)
55 ret = NULL;
56 else if (addr != NULL && ret != addr) {
57 /*
58 * We succeeded in mapping memory, but not in the right place.
59 */
60 if (munmap(ret, size) == -1) {
61 char buf[BUFERROR_BUF];
62
63 buferror(errno, buf, sizeof(buf));
64 malloc_write("<jemalloc>: Error in munmap(): ");
65 malloc_write(buf);
66 malloc_write("\n");
67 if (opt_abort)
68 abort();
69 }
70 ret = NULL;
71 }
72
73 assert(ret == NULL || (addr == NULL && ret != addr)
74 || (addr != NULL && ret == addr));
75 return (ret);
76 }
77
78 static void
79 pages_unmap(void *addr, size_t size)
80 {
81
82 if (munmap(addr, size) == -1) {
83 char buf[BUFERROR_BUF];
84
85 buferror(errno, buf, sizeof(buf));
86 malloc_write("<jemalloc>: Error in munmap(): ");
87 malloc_write(buf);
88 malloc_write("\n");
89 if (opt_abort)
90 abort();
91 }
92 }
93
94 static void *
95 chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve)
96 {
97 void *ret;
98 size_t offset;
99
100 /* Beware size_t wrap-around. */
101 if (size + chunksize <= size)
102 return (NULL);
103
104 ret = pages_map(NULL, size + chunksize, noreserve);
105 if (ret == NULL)
106 return (NULL);
107
108 /* Clean up unneeded leading/trailing space. */
109 offset = CHUNK_ADDR2OFFSET(ret);
110 if (offset != 0) {
111 /* Note that mmap() returned an unaligned mapping. */
112 unaligned = true;
113
114 /* Leading space. */
115 pages_unmap(ret, chunksize - offset);
116
117 ret = (void *)((uintptr_t)ret +
118 (chunksize - offset));
119
120 /* Trailing space. */
121 pages_unmap((void *)((uintptr_t)ret + size),
122 offset);
123 } else {
124 /* Trailing space only. */
125 pages_unmap((void *)((uintptr_t)ret + size),
126 chunksize);
127 }
128
129 /*
130 * If mmap() returned an aligned mapping, reset mmap_unaligned so that
131 * the next chunk_alloc_mmap() execution tries the fast allocation
132 * method.
133 */
134 if (unaligned == false)
135 MMAP_UNALIGNED_SET(false);
136
137 return (ret);
138 }
139
140 static void *
141 chunk_alloc_mmap_internal(size_t size, bool noreserve)
142 {
143 void *ret;
144
145 /*
146 * Ideally, there would be a way to specify alignment to mmap() (like
147 * NetBSD has), but in the absence of such a feature, we have to work
148 * hard to efficiently create aligned mappings. The reliable, but
149 * slow method is to create a mapping that is over-sized, then trim the
150 * excess. However, that always results in at least one call to
151 * pages_unmap().
152 *
153 * A more optimistic approach is to try mapping precisely the right
154 * amount, then try to append another mapping if alignment is off. In
155 * practice, this works out well as long as the application is not
156 * interleaving mappings via direct mmap() calls. If we do run into a
157 * situation where there is an interleaved mapping and we are unable to
158 * extend an unaligned mapping, our best option is to switch to the
159 * slow method until mmap() returns another aligned mapping. This will
160 * tend to leave a gap in the memory map that is too small to cause
161 * later problems for the optimistic method.
162 *
163 * Another possible confounding factor is address space layout
164 * randomization (ASLR), which causes mmap(2) to disregard the
165 * requested address. mmap_unaligned tracks whether the previous
166 * chunk_alloc_mmap() execution received any unaligned or relocated
167 * mappings, and if so, the current execution will immediately fall
168 * back to the slow method. However, we keep track of whether the fast
169 * method would have succeeded, and if so, we make a note to try the
170 * fast method next time.
171 */
172
173 if (MMAP_UNALIGNED_GET() == false) {
174 size_t offset;
175
176 ret = pages_map(NULL, size, noreserve);
177 if (ret == NULL)
178 return (NULL);
179
180 offset = CHUNK_ADDR2OFFSET(ret);
181 if (offset != 0) {
182 MMAP_UNALIGNED_SET(true);
183 /* Try to extend chunk boundary. */
184 if (pages_map((void *)((uintptr_t)ret + size),
185 chunksize - offset, noreserve) == NULL) {
186 /*
187 * Extension failed. Clean up, then revert to
188 * the reliable-but-expensive method.
189 */
190 pages_unmap(ret, size);
191 ret = chunk_alloc_mmap_slow(size, true,
192 noreserve);
193 } else {
194 /* Clean up unneeded leading space. */
195 pages_unmap(ret, chunksize - offset);
196 ret = (void *)((uintptr_t)ret + (chunksize -
197 offset));
198 }
199 }
200 } else
201 ret = chunk_alloc_mmap_slow(size, false, noreserve);
202
203 return (ret);
204 }
205
206 void *
207 chunk_alloc_mmap(size_t size)
208 {
209
210 return (chunk_alloc_mmap_internal(size, false));
211 }
212
213 void *
214 chunk_alloc_mmap_noreserve(size_t size)
215 {
216
217 return (chunk_alloc_mmap_internal(size, true));
218 }
219
220 void
221 chunk_dealloc_mmap(void *chunk, size_t size)
222 {
223
224 pages_unmap(chunk, size);
225 }
226
227 bool
228 chunk_mmap_boot(void)
229 {
230
231 #ifdef NO_TLS
232 if (pthread_key_create(&mmap_unaligned_tsd, NULL) != 0) {
233 malloc_write("<jemalloc>: Error in pthread_key_create()\n");
234 return (true);
235 }
236 #endif
237
238 return (false);
239 }