]>
Commit | Line | Data |
---|---|---|
b37bf2e1 A |
1 | // Copyright (c) 2005, 2007, Google Inc. |
2 | // All rights reserved. | |
3 | // | |
4 | // Redistribution and use in source and binary forms, with or without | |
5 | // modification, are permitted provided that the following conditions are | |
6 | // met: | |
7 | // | |
8 | // * Redistributions of source code must retain the above copyright | |
9 | // notice, this list of conditions and the following disclaimer. | |
10 | // * Redistributions in binary form must reproduce the above | |
11 | // copyright notice, this list of conditions and the following disclaimer | |
12 | // in the documentation and/or other materials provided with the | |
13 | // distribution. | |
14 | // * Neither the name of Google Inc. nor the names of its | |
15 | // contributors may be used to endorse or promote products derived from | |
16 | // this software without specific prior written permission. | |
17 | // | |
18 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
19 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
20 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
21 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
22 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
23 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
24 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
25 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
26 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
27 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
28 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
29 | ||
30 | // --- | |
31 | // Author: Sanjay Ghemawat | |
32 | ||
33 | #include "config.h" | |
34 | #if HAVE(STDINT_H) | |
35 | #include <stdint.h> | |
36 | #elif HAVE(INTTYPES_H) | |
37 | #include <inttypes.h> | |
38 | #else | |
39 | #include <sys/types.h> | |
40 | #endif | |
41 | #if PLATFORM(WIN_OS) | |
42 | #include "windows.h" | |
43 | #else | |
44 | #include <errno.h> | |
45 | #include <unistd.h> | |
46 | #include <sys/mman.h> | |
47 | #endif | |
48 | #include <fcntl.h> | |
49 | #include "Assertions.h" | |
50 | #include "TCSystemAlloc.h" | |
51 | #include "TCSpinLock.h" | |
52 | #include "UnusedParam.h" | |
53 | ||
54 | #ifndef MAP_ANONYMOUS | |
55 | #define MAP_ANONYMOUS MAP_ANON | |
56 | #endif | |
57 | ||
58 | #if PLATFORM(DARWIN) && defined(VM_MEMORY_TCMALLOC) | |
59 | static const int mmapFileDescriptor = VM_MAKE_TAG(VM_MEMORY_TCMALLOC); | |
60 | #else | |
61 | static const int mmapFileDescriptor = -1; | |
62 | #endif | |
63 | ||
64 | // Structure for discovering alignment | |
65 | union MemoryAligner { | |
66 | void* p; | |
67 | double d; | |
68 | size_t s; | |
69 | }; | |
70 | ||
71 | static SpinLock spinlock = SPINLOCK_INITIALIZER; | |
72 | ||
73 | // Page size is initialized on demand | |
74 | static size_t pagesize = 0; | |
75 | ||
76 | // Configuration parameters. | |
77 | // | |
78 | // if use_devmem is true, either use_sbrk or use_mmap must also be true. | |
79 | // For 2.2 kernels, it looks like the sbrk address space (500MBish) and | |
80 | // the mmap address space (1300MBish) are disjoint, so we need both allocators | |
81 | // to get as much virtual memory as possible. | |
82 | #ifndef WTF_CHANGES | |
83 | static bool use_devmem = false; | |
84 | #endif | |
85 | ||
86 | #if HAVE(SBRK) | |
87 | static bool use_sbrk = false; | |
88 | #endif | |
89 | ||
90 | #if HAVE(MMAP) | |
91 | static bool use_mmap = true; | |
92 | #endif | |
93 | ||
94 | #if HAVE(VIRTUALALLOC) | |
95 | static bool use_VirtualAlloc = true; | |
96 | #endif | |
97 | ||
98 | // Flags to keep us from retrying allocators that failed. | |
99 | static bool devmem_failure = false; | |
100 | static bool sbrk_failure = false; | |
101 | static bool mmap_failure = false; | |
102 | static bool VirtualAlloc_failure = false; | |
103 | ||
104 | #ifndef WTF_CHANGES | |
105 | DEFINE_int32(malloc_devmem_start, 0, | |
106 | "Physical memory starting location in MB for /dev/mem allocation." | |
107 | " Setting this to 0 disables /dev/mem allocation"); | |
108 | DEFINE_int32(malloc_devmem_limit, 0, | |
109 | "Physical memory limit location in MB for /dev/mem allocation." | |
110 | " Setting this to 0 means no limit."); | |
111 | #else | |
112 | static const int32_t FLAGS_malloc_devmem_start = 0; | |
113 | static const int32_t FLAGS_malloc_devmem_limit = 0; | |
114 | #endif | |
115 | ||
116 | #if HAVE(SBRK) | |
117 | ||
118 | static void* TrySbrk(size_t size, size_t *actual_size, size_t alignment) { | |
119 | size = ((size + alignment - 1) / alignment) * alignment; | |
120 | ||
121 | // could theoretically return the "extra" bytes here, but this | |
122 | // is simple and correct. | |
123 | if (actual_size) | |
124 | *actual_size = size; | |
125 | ||
126 | void* result = sbrk(size); | |
127 | if (result == reinterpret_cast<void*>(-1)) { | |
128 | sbrk_failure = true; | |
129 | return NULL; | |
130 | } | |
131 | ||
132 | // Is it aligned? | |
133 | uintptr_t ptr = reinterpret_cast<uintptr_t>(result); | |
134 | if ((ptr & (alignment-1)) == 0) return result; | |
135 | ||
136 | // Try to get more memory for alignment | |
137 | size_t extra = alignment - (ptr & (alignment-1)); | |
138 | void* r2 = sbrk(extra); | |
139 | if (reinterpret_cast<uintptr_t>(r2) == (ptr + size)) { | |
140 | // Contiguous with previous result | |
141 | return reinterpret_cast<void*>(ptr + extra); | |
142 | } | |
143 | ||
144 | // Give up and ask for "size + alignment - 1" bytes so | |
145 | // that we can find an aligned region within it. | |
146 | result = sbrk(size + alignment - 1); | |
147 | if (result == reinterpret_cast<void*>(-1)) { | |
148 | sbrk_failure = true; | |
149 | return NULL; | |
150 | } | |
151 | ptr = reinterpret_cast<uintptr_t>(result); | |
152 | if ((ptr & (alignment-1)) != 0) { | |
153 | ptr += alignment - (ptr & (alignment-1)); | |
154 | } | |
155 | return reinterpret_cast<void*>(ptr); | |
156 | } | |
157 | ||
158 | #endif /* HAVE(SBRK) */ | |
159 | ||
160 | #if HAVE(MMAP) | |
161 | ||
162 | static void* TryMmap(size_t size, size_t *actual_size, size_t alignment) { | |
163 | // Enforce page alignment | |
164 | if (pagesize == 0) pagesize = getpagesize(); | |
165 | if (alignment < pagesize) alignment = pagesize; | |
166 | size = ((size + alignment - 1) / alignment) * alignment; | |
167 | ||
168 | // could theoretically return the "extra" bytes here, but this | |
169 | // is simple and correct. | |
170 | if (actual_size) | |
171 | *actual_size = size; | |
172 | ||
173 | // Ask for extra memory if alignment > pagesize | |
174 | size_t extra = 0; | |
175 | if (alignment > pagesize) { | |
176 | extra = alignment - pagesize; | |
177 | } | |
178 | void* result = mmap(NULL, size + extra, | |
179 | PROT_READ|PROT_WRITE, | |
180 | MAP_PRIVATE|MAP_ANONYMOUS, | |
181 | mmapFileDescriptor, 0); | |
182 | if (result == reinterpret_cast<void*>(MAP_FAILED)) { | |
183 | mmap_failure = true; | |
184 | return NULL; | |
185 | } | |
186 | ||
187 | // Adjust the return memory so it is aligned | |
188 | uintptr_t ptr = reinterpret_cast<uintptr_t>(result); | |
189 | size_t adjust = 0; | |
190 | if ((ptr & (alignment - 1)) != 0) { | |
191 | adjust = alignment - (ptr & (alignment - 1)); | |
192 | } | |
193 | ||
194 | // Return the unused memory to the system | |
195 | if (adjust > 0) { | |
196 | munmap(reinterpret_cast<void*>(ptr), adjust); | |
197 | } | |
198 | if (adjust < extra) { | |
199 | munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust); | |
200 | } | |
201 | ||
202 | ptr += adjust; | |
203 | return reinterpret_cast<void*>(ptr); | |
204 | } | |
205 | ||
206 | #endif /* HAVE(MMAP) */ | |
207 | ||
208 | #if HAVE(VIRTUALALLOC) | |
209 | ||
210 | static void* TryVirtualAlloc(size_t size, size_t *actual_size, size_t alignment) { | |
211 | // Enforce page alignment | |
212 | if (pagesize == 0) { | |
213 | SYSTEM_INFO system_info; | |
214 | GetSystemInfo(&system_info); | |
215 | pagesize = system_info.dwPageSize; | |
216 | } | |
217 | ||
218 | if (alignment < pagesize) alignment = pagesize; | |
219 | size = ((size + alignment - 1) / alignment) * alignment; | |
220 | ||
221 | // could theoretically return the "extra" bytes here, but this | |
222 | // is simple and correct. | |
223 | if (actual_size) | |
224 | *actual_size = size; | |
225 | ||
226 | // Ask for extra memory if alignment > pagesize | |
227 | size_t extra = 0; | |
228 | if (alignment > pagesize) { | |
229 | extra = alignment - pagesize; | |
230 | } | |
231 | void* result = VirtualAlloc(NULL, size + extra, | |
232 | MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, | |
233 | PAGE_READWRITE); | |
234 | ||
235 | if (result == NULL) { | |
236 | VirtualAlloc_failure = true; | |
237 | return NULL; | |
238 | } | |
239 | ||
240 | // Adjust the return memory so it is aligned | |
241 | uintptr_t ptr = reinterpret_cast<uintptr_t>(result); | |
242 | size_t adjust = 0; | |
243 | if ((ptr & (alignment - 1)) != 0) { | |
244 | adjust = alignment - (ptr & (alignment - 1)); | |
245 | } | |
246 | ||
247 | // Return the unused memory to the system - we'd like to release but the best we can do | |
248 | // is decommit, since Windows only lets you free the whole allocation. | |
249 | if (adjust > 0) { | |
250 | VirtualFree(reinterpret_cast<void*>(ptr), adjust, MEM_DECOMMIT); | |
251 | } | |
252 | if (adjust < extra) { | |
253 | VirtualFree(reinterpret_cast<void*>(ptr + adjust + size), extra-adjust, MEM_DECOMMIT); | |
254 | } | |
255 | ||
256 | ptr += adjust; | |
257 | return reinterpret_cast<void*>(ptr); | |
258 | } | |
259 | ||
260 | #endif /* HAVE(MMAP) */ | |
261 | ||
262 | #ifndef WTF_CHANGES | |
263 | static void* TryDevMem(size_t size, size_t *actual_size, size_t alignment) { | |
264 | static bool initialized = false; | |
265 | static off_t physmem_base; // next physical memory address to allocate | |
266 | static off_t physmem_limit; // maximum physical address allowed | |
267 | static int physmem_fd; // file descriptor for /dev/mem | |
268 | ||
269 | // Check if we should use /dev/mem allocation. Note that it may take | |
270 | // a while to get this flag initialized, so meanwhile we fall back to | |
271 | // the next allocator. (It looks like 7MB gets allocated before | |
272 | // this flag gets initialized -khr.) | |
273 | if (FLAGS_malloc_devmem_start == 0) { | |
274 | // NOTE: not a devmem_failure - we'd like TCMalloc_SystemAlloc to | |
275 | // try us again next time. | |
276 | return NULL; | |
277 | } | |
278 | ||
279 | if (!initialized) { | |
280 | physmem_fd = open("/dev/mem", O_RDWR); | |
281 | if (physmem_fd < 0) { | |
282 | devmem_failure = true; | |
283 | return NULL; | |
284 | } | |
285 | physmem_base = FLAGS_malloc_devmem_start*1024LL*1024LL; | |
286 | physmem_limit = FLAGS_malloc_devmem_limit*1024LL*1024LL; | |
287 | initialized = true; | |
288 | } | |
289 | ||
290 | // Enforce page alignment | |
291 | if (pagesize == 0) pagesize = getpagesize(); | |
292 | if (alignment < pagesize) alignment = pagesize; | |
293 | size = ((size + alignment - 1) / alignment) * alignment; | |
294 | ||
295 | // could theoretically return the "extra" bytes here, but this | |
296 | // is simple and correct. | |
297 | if (actual_size) | |
298 | *actual_size = size; | |
299 | ||
300 | // Ask for extra memory if alignment > pagesize | |
301 | size_t extra = 0; | |
302 | if (alignment > pagesize) { | |
303 | extra = alignment - pagesize; | |
304 | } | |
305 | ||
306 | // check to see if we have any memory left | |
307 | if (physmem_limit != 0 && physmem_base + size + extra > physmem_limit) { | |
308 | devmem_failure = true; | |
309 | return NULL; | |
310 | } | |
311 | void *result = mmap(0, size + extra, PROT_WRITE|PROT_READ, | |
312 | MAP_SHARED, physmem_fd, physmem_base); | |
313 | if (result == reinterpret_cast<void*>(MAP_FAILED)) { | |
314 | devmem_failure = true; | |
315 | return NULL; | |
316 | } | |
317 | uintptr_t ptr = reinterpret_cast<uintptr_t>(result); | |
318 | ||
319 | // Adjust the return memory so it is aligned | |
320 | size_t adjust = 0; | |
321 | if ((ptr & (alignment - 1)) != 0) { | |
322 | adjust = alignment - (ptr & (alignment - 1)); | |
323 | } | |
324 | ||
325 | // Return the unused virtual memory to the system | |
326 | if (adjust > 0) { | |
327 | munmap(reinterpret_cast<void*>(ptr), adjust); | |
328 | } | |
329 | if (adjust < extra) { | |
330 | munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust); | |
331 | } | |
332 | ||
333 | ptr += adjust; | |
334 | physmem_base += adjust + size; | |
335 | ||
336 | return reinterpret_cast<void*>(ptr); | |
337 | } | |
338 | #endif | |
339 | ||
340 | void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, size_t alignment) { | |
341 | // Discard requests that overflow | |
342 | if (size + alignment < size) return NULL; | |
343 | ||
344 | SpinLockHolder lock_holder(&spinlock); | |
345 | ||
346 | // Enforce minimum alignment | |
347 | if (alignment < sizeof(MemoryAligner)) alignment = sizeof(MemoryAligner); | |
348 | ||
349 | // Try twice, once avoiding allocators that failed before, and once | |
350 | // more trying all allocators even if they failed before. | |
351 | for (int i = 0; i < 2; i++) { | |
352 | ||
353 | #ifndef WTF_CHANGES | |
354 | if (use_devmem && !devmem_failure) { | |
355 | void* result = TryDevMem(size, actual_size, alignment); | |
356 | if (result != NULL) return result; | |
357 | } | |
358 | #endif | |
359 | ||
360 | #if HAVE(SBRK) | |
361 | if (use_sbrk && !sbrk_failure) { | |
362 | void* result = TrySbrk(size, actual_size, alignment); | |
363 | if (result != NULL) return result; | |
364 | } | |
365 | #endif | |
366 | ||
367 | #if HAVE(MMAP) | |
368 | if (use_mmap && !mmap_failure) { | |
369 | void* result = TryMmap(size, actual_size, alignment); | |
370 | if (result != NULL) return result; | |
371 | } | |
372 | #endif | |
373 | ||
374 | #if HAVE(VIRTUALALLOC) | |
375 | if (use_VirtualAlloc && !VirtualAlloc_failure) { | |
376 | void* result = TryVirtualAlloc(size, actual_size, alignment); | |
377 | if (result != NULL) return result; | |
378 | } | |
379 | #endif | |
380 | ||
381 | // nothing worked - reset failure flags and try again | |
382 | devmem_failure = false; | |
383 | sbrk_failure = false; | |
384 | mmap_failure = false; | |
385 | VirtualAlloc_failure = false; | |
386 | } | |
387 | return NULL; | |
388 | } | |
389 | ||
390 | void TCMalloc_SystemRelease(void* start, size_t length) | |
391 | { | |
392 | UNUSED_PARAM(start); | |
393 | UNUSED_PARAM(length); | |
394 | #if HAVE(MADV_DONTNEED) | |
395 | if (FLAGS_malloc_devmem_start) { | |
396 | // It's not safe to use MADV_DONTNEED if we've been mapping | |
397 | // /dev/mem for heap memory | |
398 | return; | |
399 | } | |
400 | if (pagesize == 0) pagesize = getpagesize(); | |
401 | const size_t pagemask = pagesize - 1; | |
402 | ||
403 | size_t new_start = reinterpret_cast<size_t>(start); | |
404 | size_t end = new_start + length; | |
405 | size_t new_end = end; | |
406 | ||
407 | // Round up the starting address and round down the ending address | |
408 | // to be page aligned: | |
409 | new_start = (new_start + pagesize - 1) & ~pagemask; | |
410 | new_end = new_end & ~pagemask; | |
411 | ||
412 | ASSERT((new_start & pagemask) == 0); | |
413 | ASSERT((new_end & pagemask) == 0); | |
414 | ASSERT(new_start >= reinterpret_cast<size_t>(start)); | |
415 | ASSERT(new_end <= end); | |
416 | ||
417 | if (new_end > new_start) { | |
418 | // Note -- ignoring most return codes, because if this fails it | |
419 | // doesn't matter... | |
420 | while (madvise(reinterpret_cast<char*>(new_start), new_end - new_start, | |
421 | MADV_DONTNEED) == -1 && | |
422 | errno == EAGAIN) { | |
423 | // NOP | |
424 | } | |
425 | return; | |
426 | } | |
427 | #endif | |
428 | ||
429 | #if HAVE(MMAP) | |
430 | void *newAddress = mmap(start, length, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, mmapFileDescriptor, 0); | |
431 | UNUSED_PARAM(newAddress); | |
432 | // If the mmap failed then that's ok, we just won't return the memory to the system. | |
433 | ASSERT(newAddress == start || newAddress == reinterpret_cast<void*>(MAP_FAILED)); | |
434 | return; | |
435 | #endif | |
436 | } |