]>
Commit | Line | Data |
---|---|---|
a78e148b | 1 | #define JEMALLOC_CHUNK_MMAP_C_ |
2 | #include "jemalloc/internal/jemalloc_internal.h" | |
3 | ||
a78e148b | 4 | /******************************************************************************/ |
5 | /* Function prototypes for non-inline static functions. */ | |
6 | ||
4934f93d | 7 | static void *pages_map(void *addr, size_t size); |
a78e148b | 8 | static void pages_unmap(void *addr, size_t size); |
4934f93d | 9 | static void *chunk_alloc_mmap_slow(size_t size, size_t alignment, |
10 | bool *zero); | |
a78e148b | 11 | |
12 | /******************************************************************************/ | |
13 | ||
14 | static void * | |
4934f93d | 15 | pages_map(void *addr, size_t size) |
a78e148b | 16 | { |
17 | void *ret; | |
18 | ||
4934f93d | 19 | assert(size != 0); |
20 | ||
21 | #ifdef _WIN32 | |
22 | /* | |
23 | * If VirtualAlloc can't allocate at the given address when one is | |
24 | * given, it fails and returns NULL. | |
25 | */ | |
26 | ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, | |
27 | PAGE_READWRITE); | |
28 | #else | |
a78e148b | 29 | /* |
30 | * We don't use MAP_FIXED here, because it can cause the *replacement* | |
31 | * of existing mappings, and we only want to create new mappings. | |
32 | */ | |
4934f93d | 33 | ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, |
34 | -1, 0); | |
a78e148b | 35 | assert(ret != NULL); |
36 | ||
37 | if (ret == MAP_FAILED) | |
38 | ret = NULL; | |
39 | else if (addr != NULL && ret != addr) { | |
40 | /* | |
41 | * We succeeded in mapping memory, but not in the right place. | |
42 | */ | |
43 | if (munmap(ret, size) == -1) { | |
44 | char buf[BUFERROR_BUF]; | |
45 | ||
4934f93d | 46 | buferror(buf, sizeof(buf)); |
47 | malloc_printf("<jemalloc: Error in munmap(): %s\n", | |
48 | buf); | |
a78e148b | 49 | if (opt_abort) |
50 | abort(); | |
51 | } | |
52 | ret = NULL; | |
53 | } | |
4934f93d | 54 | #endif |
a78e148b | 55 | assert(ret == NULL || (addr == NULL && ret != addr) |
56 | || (addr != NULL && ret == addr)); | |
57 | return (ret); | |
58 | } | |
59 | ||
60 | static void | |
61 | pages_unmap(void *addr, size_t size) | |
62 | { | |
63 | ||
4934f93d | 64 | #ifdef _WIN32 |
65 | if (VirtualFree(addr, 0, MEM_RELEASE) == 0) | |
66 | #else | |
67 | if (munmap(addr, size) == -1) | |
68 | #endif | |
69 | { | |
a78e148b | 70 | char buf[BUFERROR_BUF]; |
71 | ||
4934f93d | 72 | buferror(buf, sizeof(buf)); |
73 | malloc_printf("<jemalloc>: Error in " | |
74 | #ifdef _WIN32 | |
75 | "VirtualFree" | |
76 | #else | |
77 | "munmap" | |
78 | #endif | |
79 | "(): %s\n", buf); | |
a78e148b | 80 | if (opt_abort) |
81 | abort(); | |
82 | } | |
83 | } | |
84 | ||
85 | static void * | |
4934f93d | 86 | pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size) |
a78e148b | 87 | { |
4934f93d | 88 | void *ret = (void *)((uintptr_t)addr + leadsize); |
89 | ||
90 | assert(alloc_size >= leadsize + size); | |
91 | #ifdef _WIN32 | |
92 | { | |
93 | void *new_addr; | |
94 | ||
95 | pages_unmap(addr, alloc_size); | |
96 | new_addr = pages_map(ret, size); | |
97 | if (new_addr == ret) | |
98 | return (ret); | |
99 | if (new_addr) | |
100 | pages_unmap(new_addr, size); | |
a78e148b | 101 | return (NULL); |
4934f93d | 102 | } |
103 | #else | |
104 | { | |
105 | size_t trailsize = alloc_size - leadsize - size; | |
106 | ||
107 | if (leadsize != 0) | |
108 | pages_unmap(addr, leadsize); | |
109 | if (trailsize != 0) | |
110 | pages_unmap((void *)((uintptr_t)ret + size), trailsize); | |
111 | return (ret); | |
112 | } | |
113 | #endif | |
114 | } | |
a78e148b | 115 | |
4934f93d | 116 | void |
117 | pages_purge(void *addr, size_t length) | |
118 | { | |
a78e148b | 119 | |
4934f93d | 120 | #ifdef _WIN32 |
121 | VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE); | |
122 | #else | |
123 | # ifdef JEMALLOC_PURGE_MADVISE_DONTNEED | |
124 | # define JEMALLOC_MADV_PURGE MADV_DONTNEED | |
125 | # elif defined(JEMALLOC_PURGE_MADVISE_FREE) | |
126 | # define JEMALLOC_MADV_PURGE MADV_FREE | |
127 | # else | |
128 | # error "No method defined for purging unused dirty pages." | |
129 | # endif | |
130 | madvise(addr, length, JEMALLOC_MADV_PURGE); | |
131 | #endif | |
132 | } | |
a78e148b | 133 | |
4934f93d | 134 | static void * |
135 | chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero) | |
136 | { | |
137 | void *ret, *pages; | |
138 | size_t alloc_size, leadsize; | |
a78e148b | 139 | |
4934f93d | 140 | alloc_size = size + alignment - PAGE; |
141 | /* Beware size_t wrap-around. */ | |
142 | if (alloc_size < size) | |
143 | return (NULL); | |
144 | do { | |
145 | pages = pages_map(NULL, alloc_size); | |
146 | if (pages == NULL) | |
147 | return (NULL); | |
148 | leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - | |
149 | (uintptr_t)pages; | |
150 | ret = pages_trim(pages, alloc_size, leadsize, size); | |
151 | } while (ret == NULL); | |
152 | ||
153 | assert(ret != NULL); | |
154 | *zero = true; | |
a78e148b | 155 | return (ret); |
156 | } | |
157 | ||
4934f93d | 158 | void * |
159 | chunk_alloc_mmap(size_t size, size_t alignment, bool *zero) | |
a78e148b | 160 | { |
161 | void *ret; | |
4934f93d | 162 | size_t offset; |
a78e148b | 163 | |
164 | /* | |
165 | * Ideally, there would be a way to specify alignment to mmap() (like | |
166 | * NetBSD has), but in the absence of such a feature, we have to work | |
167 | * hard to efficiently create aligned mappings. The reliable, but | |
168 | * slow method is to create a mapping that is over-sized, then trim the | |
4934f93d | 169 | * excess. However, that always results in one or two calls to |
a78e148b | 170 | * pages_unmap(). |
171 | * | |
4934f93d | 172 | * Optimistically try mapping precisely the right amount before falling |
173 | * back to the slow method, with the expectation that the optimistic | |
174 | * approach works most of the time. | |
a78e148b | 175 | */ |
176 | ||
4934f93d | 177 | assert(alignment != 0); |
178 | assert((alignment & chunksize_mask) == 0); | |
a78e148b | 179 | |
4934f93d | 180 | ret = pages_map(NULL, size); |
181 | if (ret == NULL) | |
182 | return (NULL); | |
183 | offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); | |
184 | if (offset != 0) { | |
185 | pages_unmap(ret, size); | |
186 | return (chunk_alloc_mmap_slow(size, alignment, zero)); | |
187 | } | |
a78e148b | 188 | |
4934f93d | 189 | assert(ret != NULL); |
190 | *zero = true; | |
a78e148b | 191 | return (ret); |
192 | } | |
193 | ||
a78e148b | 194 | bool |
4934f93d | 195 | chunk_dealloc_mmap(void *chunk, size_t size) |
a78e148b | 196 | { |
197 | ||
4934f93d | 198 | if (config_munmap) |
199 | pages_unmap(chunk, size); | |
a78e148b | 200 | |
4934f93d | 201 | return (config_munmap == false); |
a78e148b | 202 | } |