]>
Commit | Line | Data |
---|---|---|
1 | #define JEMALLOC_CHUNK_MMAP_C_ | |
2 | #include "jemalloc/internal/jemalloc_internal.h" | |
3 | ||
4 | /******************************************************************************/ | |
5 | /* Function prototypes for non-inline static functions. */ | |
6 | ||
7 | static void *pages_map(void *addr, size_t size); | |
8 | static void pages_unmap(void *addr, size_t size); | |
9 | static void *chunk_alloc_mmap_slow(size_t size, size_t alignment, | |
10 | bool *zero); | |
11 | ||
12 | /******************************************************************************/ | |
13 | ||
14 | static void * | |
15 | pages_map(void *addr, size_t size) | |
16 | { | |
17 | void *ret; | |
18 | ||
19 | assert(size != 0); | |
20 | ||
21 | #ifdef _WIN32 | |
22 | /* | |
23 | * If VirtualAlloc can't allocate at the given address when one is | |
24 | * given, it fails and returns NULL. | |
25 | */ | |
26 | ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, | |
27 | PAGE_READWRITE); | |
28 | #else | |
29 | /* | |
30 | * We don't use MAP_FIXED here, because it can cause the *replacement* | |
31 | * of existing mappings, and we only want to create new mappings. | |
32 | */ | |
33 | ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, | |
34 | -1, 0); | |
35 | assert(ret != NULL); | |
36 | ||
37 | if (ret == MAP_FAILED) | |
38 | ret = NULL; | |
39 | else if (addr != NULL && ret != addr) { | |
40 | /* | |
41 | * We succeeded in mapping memory, but not in the right place. | |
42 | */ | |
43 | if (munmap(ret, size) == -1) { | |
44 | char buf[BUFERROR_BUF]; | |
45 | ||
46 | buferror(buf, sizeof(buf)); | |
47 | malloc_printf("<jemalloc: Error in munmap(): %s\n", | |
48 | buf); | |
49 | if (opt_abort) | |
50 | abort(); | |
51 | } | |
52 | ret = NULL; | |
53 | } | |
54 | #endif | |
55 | assert(ret == NULL || (addr == NULL && ret != addr) | |
56 | || (addr != NULL && ret == addr)); | |
57 | return (ret); | |
58 | } | |
59 | ||
60 | static void | |
61 | pages_unmap(void *addr, size_t size) | |
62 | { | |
63 | ||
64 | #ifdef _WIN32 | |
65 | if (VirtualFree(addr, 0, MEM_RELEASE) == 0) | |
66 | #else | |
67 | if (munmap(addr, size) == -1) | |
68 | #endif | |
69 | { | |
70 | char buf[BUFERROR_BUF]; | |
71 | ||
72 | buferror(buf, sizeof(buf)); | |
73 | malloc_printf("<jemalloc>: Error in " | |
74 | #ifdef _WIN32 | |
75 | "VirtualFree" | |
76 | #else | |
77 | "munmap" | |
78 | #endif | |
79 | "(): %s\n", buf); | |
80 | if (opt_abort) | |
81 | abort(); | |
82 | } | |
83 | } | |
84 | ||
85 | static void * | |
86 | pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size) | |
87 | { | |
88 | void *ret = (void *)((uintptr_t)addr + leadsize); | |
89 | ||
90 | assert(alloc_size >= leadsize + size); | |
91 | #ifdef _WIN32 | |
92 | { | |
93 | void *new_addr; | |
94 | ||
95 | pages_unmap(addr, alloc_size); | |
96 | new_addr = pages_map(ret, size); | |
97 | if (new_addr == ret) | |
98 | return (ret); | |
99 | if (new_addr) | |
100 | pages_unmap(new_addr, size); | |
101 | return (NULL); | |
102 | } | |
103 | #else | |
104 | { | |
105 | size_t trailsize = alloc_size - leadsize - size; | |
106 | ||
107 | if (leadsize != 0) | |
108 | pages_unmap(addr, leadsize); | |
109 | if (trailsize != 0) | |
110 | pages_unmap((void *)((uintptr_t)ret + size), trailsize); | |
111 | return (ret); | |
112 | } | |
113 | #endif | |
114 | } | |
115 | ||
116 | void | |
117 | pages_purge(void *addr, size_t length) | |
118 | { | |
119 | ||
120 | #ifdef _WIN32 | |
121 | VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE); | |
122 | #else | |
123 | # ifdef JEMALLOC_PURGE_MADVISE_DONTNEED | |
124 | # define JEMALLOC_MADV_PURGE MADV_DONTNEED | |
125 | # elif defined(JEMALLOC_PURGE_MADVISE_FREE) | |
126 | # define JEMALLOC_MADV_PURGE MADV_FREE | |
127 | # else | |
128 | # error "No method defined for purging unused dirty pages." | |
129 | # endif | |
130 | madvise(addr, length, JEMALLOC_MADV_PURGE); | |
131 | #endif | |
132 | } | |
133 | ||
134 | static void * | |
135 | chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero) | |
136 | { | |
137 | void *ret, *pages; | |
138 | size_t alloc_size, leadsize; | |
139 | ||
140 | alloc_size = size + alignment - PAGE; | |
141 | /* Beware size_t wrap-around. */ | |
142 | if (alloc_size < size) | |
143 | return (NULL); | |
144 | do { | |
145 | pages = pages_map(NULL, alloc_size); | |
146 | if (pages == NULL) | |
147 | return (NULL); | |
148 | leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - | |
149 | (uintptr_t)pages; | |
150 | ret = pages_trim(pages, alloc_size, leadsize, size); | |
151 | } while (ret == NULL); | |
152 | ||
153 | assert(ret != NULL); | |
154 | *zero = true; | |
155 | return (ret); | |
156 | } | |
157 | ||
158 | void * | |
159 | chunk_alloc_mmap(size_t size, size_t alignment, bool *zero) | |
160 | { | |
161 | void *ret; | |
162 | size_t offset; | |
163 | ||
164 | /* | |
165 | * Ideally, there would be a way to specify alignment to mmap() (like | |
166 | * NetBSD has), but in the absence of such a feature, we have to work | |
167 | * hard to efficiently create aligned mappings. The reliable, but | |
168 | * slow method is to create a mapping that is over-sized, then trim the | |
169 | * excess. However, that always results in one or two calls to | |
170 | * pages_unmap(). | |
171 | * | |
172 | * Optimistically try mapping precisely the right amount before falling | |
173 | * back to the slow method, with the expectation that the optimistic | |
174 | * approach works most of the time. | |
175 | */ | |
176 | ||
177 | assert(alignment != 0); | |
178 | assert((alignment & chunksize_mask) == 0); | |
179 | ||
180 | ret = pages_map(NULL, size); | |
181 | if (ret == NULL) | |
182 | return (NULL); | |
183 | offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); | |
184 | if (offset != 0) { | |
185 | pages_unmap(ret, size); | |
186 | return (chunk_alloc_mmap_slow(size, alignment, zero)); | |
187 | } | |
188 | ||
189 | assert(ret != NULL); | |
190 | *zero = true; | |
191 | return (ret); | |
192 | } | |
193 | ||
194 | bool | |
195 | chunk_dealloc_mmap(void *chunk, size_t size) | |
196 | { | |
197 | ||
198 | if (config_munmap) | |
199 | pages_unmap(chunk, size); | |
200 | ||
201 | return (config_munmap == false); | |
202 | } |