]>
Commit | Line | Data |
---|---|---|
b37bf2e1 A |
1 | // Copyright (c) 2005, 2007, Google Inc. |
2 | // All rights reserved. | |
f4e78d34 | 3 | // Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved. |
b37bf2e1 A |
4 | // |
5 | // Redistribution and use in source and binary forms, with or without | |
6 | // modification, are permitted provided that the following conditions are | |
7 | // met: | |
8 | // | |
9 | // * Redistributions of source code must retain the above copyright | |
10 | // notice, this list of conditions and the following disclaimer. | |
11 | // * Redistributions in binary form must reproduce the above | |
12 | // copyright notice, this list of conditions and the following disclaimer | |
13 | // in the documentation and/or other materials provided with the | |
14 | // distribution. | |
15 | // * Neither the name of Google Inc. nor the names of its | |
16 | // contributors may be used to endorse or promote products derived from | |
17 | // this software without specific prior written permission. | |
18 | // | |
19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
30 | ||
31 | // --- | |
32 | // Author: Sanjay Ghemawat <opensource@google.com> | |
33 | // | |
34 | // A malloc that uses a per-thread cache to satisfy small malloc requests. | |
35 | // (The time for malloc/free of a small object drops from 300 ns to 50 ns.) | |
36 | // | |
37 | // See doc/tcmalloc.html for a high-level | |
38 | // description of how this malloc works. | |
39 | // | |
40 | // SYNCHRONIZATION | |
41 | // 1. The thread-specific lists are accessed without acquiring any locks. | |
42 | // This is safe because each such list is only accessed by one thread. | |
43 | // 2. We have a lock per central free-list, and hold it while manipulating | |
44 | // the central free list for a particular size. | |
45 | // 3. The central page allocator is protected by "pageheap_lock". | |
46 | // 4. The pagemap (which maps from page-number to descriptor), | |
47 | // can be read without holding any locks, and written while holding | |
48 | // the "pageheap_lock". | |
49 | // 5. To improve performance, a subset of the information one can get | |
50 | // from the pagemap is cached in a data structure, pagemap_cache_, | |
51 | // that atomically reads and writes its entries. This cache can be | |
52 | // read and written without locking. | |
53 | // | |
54 | // This multi-threaded access to the pagemap is safe for fairly | |
55 | // subtle reasons. We basically assume that when an object X is | |
56 | // allocated by thread A and deallocated by thread B, there must | |
57 | // have been appropriate synchronization in the handoff of object | |
58 | // X from thread A to thread B. The same logic applies to pagemap_cache_. | |
59 | // | |
60 | // THE PAGEID-TO-SIZECLASS CACHE | |
61 | // Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache | |
62 | // returns 0 for a particular PageID then that means "no information," not that | |
63 | // the sizeclass is 0. The cache may have stale information for pages that do | |
64 | // not hold the beginning of any free()'able object. Staleness is eliminated | |
65 | // in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and | |
66 | // do_memalign() for all other relevant pages. | |
67 | // | |
68 | // TODO: Bias reclamation to larger addresses | |
69 | // TODO: implement mallinfo/mallopt | |
70 | // TODO: Better testing | |
71 | // | |
72 | // 9/28/2003 (new page-level allocator replaces ptmalloc2): | |
73 | // * malloc/free of small objects goes from ~300 ns to ~50 ns. | |
74 | // * allocation of a reasonably complicated struct | |
75 | // goes from about 1100 ns to about 300 ns. | |
76 | ||
77 | #include "config.h" | |
78 | #include "FastMalloc.h" | |
79 | ||
80 | #include "Assertions.h" | |
81 | #if USE(MULTIPLE_THREADS) | |
82 | #include <pthread.h> | |
83 | #endif | |
84 | ||
85 | #ifndef NO_TCMALLOC_SAMPLES | |
86 | #ifdef WTF_CHANGES | |
87 | #define NO_TCMALLOC_SAMPLES | |
88 | #endif | |
89 | #endif | |
90 | ||
91 | #if !defined(USE_SYSTEM_MALLOC) && defined(NDEBUG) | |
92 | #define FORCE_SYSTEM_MALLOC 0 | |
93 | #else | |
94 | #define FORCE_SYSTEM_MALLOC 1 | |
95 | #endif | |
96 | ||
97 | #ifndef NDEBUG | |
98 | namespace WTF { | |
99 | ||
100 | #if USE(MULTIPLE_THREADS) | |
101 | static pthread_key_t isForbiddenKey; | |
102 | static pthread_once_t isForbiddenKeyOnce = PTHREAD_ONCE_INIT; | |
103 | static void initializeIsForbiddenKey() | |
104 | { | |
105 | pthread_key_create(&isForbiddenKey, 0); | |
106 | } | |
107 | ||
108 | static bool isForbidden() | |
109 | { | |
110 | pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey); | |
111 | return !!pthread_getspecific(isForbiddenKey); | |
112 | } | |
113 | ||
114 | void fastMallocForbid() | |
115 | { | |
116 | pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey); | |
117 | pthread_setspecific(isForbiddenKey, &isForbiddenKey); | |
118 | } | |
119 | ||
120 | void fastMallocAllow() | |
121 | { | |
122 | pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey); | |
123 | pthread_setspecific(isForbiddenKey, 0); | |
124 | } | |
125 | ||
126 | #else | |
127 | ||
128 | static bool staticIsForbidden; | |
129 | static bool isForbidden() | |
130 | { | |
131 | return staticIsForbidden; | |
132 | } | |
133 | ||
134 | void fastMallocForbid() | |
135 | { | |
136 | staticIsForbidden = true; | |
137 | } | |
138 | ||
139 | void fastMallocAllow() | |
140 | { | |
141 | staticIsForbidden = false; | |
142 | } | |
143 | #endif // USE(MULTIPLE_THREADS) | |
144 | ||
145 | } // namespace WTF | |
146 | #endif // NDEBUG | |
147 | ||
148 | #include <string.h> | |
149 | ||
150 | namespace WTF { | |
151 | void *fastZeroedMalloc(size_t n) | |
152 | { | |
153 | void *result = fastMalloc(n); | |
154 | if (!result) | |
155 | return 0; | |
156 | memset(result, 0, n); | |
157 | #ifndef WTF_CHANGES | |
158 | MallocHook::InvokeNewHook(result, n); | |
159 | #endif | |
160 | return result; | |
161 | } | |
162 | ||
163 | } | |
164 | ||
165 | #if FORCE_SYSTEM_MALLOC | |
166 | ||
167 | #include <stdlib.h> | |
168 | #if !PLATFORM(WIN_OS) | |
169 | #include <pthread.h> | |
170 | #endif | |
171 | ||
172 | namespace WTF { | |
173 | ||
174 | void *fastMalloc(size_t n) | |
175 | { | |
176 | ASSERT(!isForbidden()); | |
177 | return malloc(n); | |
178 | } | |
179 | ||
180 | void *fastCalloc(size_t n_elements, size_t element_size) | |
181 | { | |
182 | ASSERT(!isForbidden()); | |
183 | return calloc(n_elements, element_size); | |
184 | } | |
185 | ||
186 | void fastFree(void* p) | |
187 | { | |
188 | ASSERT(!isForbidden()); | |
189 | free(p); | |
190 | } | |
191 | ||
192 | void *fastRealloc(void* p, size_t n) | |
193 | { | |
194 | ASSERT(!isForbidden()); | |
195 | return realloc(p, n); | |
196 | } | |
197 | ||
b37bf2e1 | 198 | void releaseFastMallocFreeMemory() { } |
f4e78d34 A |
199 | |
200 | } // namespace WTF | |
b37bf2e1 A |
201 | |
202 | #if PLATFORM(DARWIN) | |
203 | // This symbol is present in the JavaScriptCore exports file even when FastMalloc is disabled. | |
204 | // It will never be used in this case, so it's type and value are less interesting than its presence. | |
205 | extern "C" const int jscore_fastmalloc_introspection = 0; | |
206 | #endif | |
207 | ||
f4e78d34 | 208 | #else // FORCE_SYSTEM_MALLOC |
b37bf2e1 A |
209 | |
210 | #if HAVE(STDINT_H) | |
211 | #include <stdint.h> | |
212 | #elif HAVE(INTTYPES_H) | |
213 | #include <inttypes.h> | |
214 | #else | |
215 | #include <sys/types.h> | |
216 | #endif | |
217 | ||
218 | #include "AlwaysInline.h" | |
219 | #include "Assertions.h" | |
220 | #include "TCPackedCache.h" | |
221 | #include "TCPageMap.h" | |
222 | #include "TCSpinLock.h" | |
223 | #include "TCSystemAlloc.h" | |
224 | #include <algorithm> | |
225 | #include <errno.h> | |
226 | #include <new> | |
227 | #include <pthread.h> | |
228 | #include <stdarg.h> | |
229 | #include <stddef.h> | |
230 | #include <stdio.h> | |
231 | #if COMPILER(MSVC) | |
232 | #ifndef WIN32_LEAN_AND_MEAN | |
233 | #define WIN32_LEAN_AND_MEAN | |
234 | #endif | |
235 | #include <windows.h> | |
236 | #endif | |
237 | ||
238 | #if WTF_CHANGES | |
239 | ||
240 | #if PLATFORM(DARWIN) | |
241 | #include "MallocZoneSupport.h" | |
242 | #endif | |
243 | ||
244 | #ifndef PRIuS | |
245 | #define PRIuS "zu" | |
246 | #endif | |
247 | ||
248 | // Calling pthread_getspecific through a global function pointer is faster than a normal | |
249 | // call to the function on Mac OS X, and it's used in performance-critical code. So we | |
250 | // use a function pointer. But that's not necessarily faster on other platforms, and we had | |
251 | // problems with this technique on Windows, so we'll do this only on Mac OS X. | |
252 | #if PLATFORM(DARWIN) | |
253 | static void* (*pthread_getspecific_function_pointer)(pthread_key_t) = pthread_getspecific; | |
254 | #define pthread_getspecific(key) pthread_getspecific_function_pointer(key) | |
255 | #endif | |
256 | ||
257 | #define DEFINE_VARIABLE(type, name, value, meaning) \ | |
258 | namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \ | |
259 | type FLAGS_##name(value); \ | |
260 | char FLAGS_no##name; \ | |
261 | } \ | |
262 | using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name | |
263 | ||
264 | #define DEFINE_int64(name, value, meaning) \ | |
265 | DEFINE_VARIABLE(int64_t, name, value, meaning) | |
266 | ||
267 | #define DEFINE_double(name, value, meaning) \ | |
268 | DEFINE_VARIABLE(double, name, value, meaning) | |
269 | ||
270 | namespace WTF { | |
271 | ||
272 | #define malloc fastMalloc | |
273 | #define calloc fastCalloc | |
274 | #define free fastFree | |
275 | #define realloc fastRealloc | |
276 | ||
277 | #define MESSAGE LOG_ERROR | |
278 | #define CHECK_CONDITION ASSERT | |
279 | ||
280 | #if PLATFORM(DARWIN) | |
281 | class TCMalloc_PageHeap; | |
282 | class TCMalloc_ThreadCache; | |
283 | class TCMalloc_Central_FreeListPadded; | |
284 | ||
285 | class FastMallocZone { | |
286 | public: | |
287 | static void init(); | |
288 | ||
289 | static kern_return_t enumerate(task_t, void*, unsigned typeMmask, vm_address_t zoneAddress, memory_reader_t, vm_range_recorder_t); | |
290 | static size_t goodSize(malloc_zone_t*, size_t size) { return size; } | |
291 | static boolean_t check(malloc_zone_t*) { return true; } | |
292 | static void print(malloc_zone_t*, boolean_t) { } | |
293 | static void log(malloc_zone_t*, void*) { } | |
294 | static void forceLock(malloc_zone_t*) { } | |
295 | static void forceUnlock(malloc_zone_t*) { } | |
296 | static void statistics(malloc_zone_t*, malloc_statistics_t*) { } | |
297 | ||
298 | private: | |
299 | FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_FreeListPadded*); | |
300 | static size_t size(malloc_zone_t*, const void*); | |
301 | static void* zoneMalloc(malloc_zone_t*, size_t); | |
302 | static void* zoneCalloc(malloc_zone_t*, size_t numItems, size_t size); | |
303 | static void zoneFree(malloc_zone_t*, void*); | |
304 | static void* zoneRealloc(malloc_zone_t*, void*, size_t); | |
305 | static void* zoneValloc(malloc_zone_t*, size_t) { LOG_ERROR("valloc is not supported"); return 0; } | |
306 | static void zoneDestroy(malloc_zone_t*) { } | |
307 | ||
308 | malloc_zone_t m_zone; | |
309 | TCMalloc_PageHeap* m_pageHeap; | |
310 | TCMalloc_ThreadCache** m_threadHeaps; | |
311 | TCMalloc_Central_FreeListPadded* m_centralCaches; | |
312 | }; | |
313 | ||
314 | #endif | |
315 | ||
316 | #endif | |
317 | ||
318 | #ifndef WTF_CHANGES | |
319 | // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if | |
320 | // you're porting to a system where you really can't get a stacktrace. | |
321 | #ifdef NO_TCMALLOC_SAMPLES | |
322 | // We use #define so code compiles even if you #include stacktrace.h somehow. | |
323 | # define GetStackTrace(stack, depth, skip) (0) | |
324 | #else | |
325 | # include <google/stacktrace.h> | |
326 | #endif | |
327 | #endif | |
328 | ||
329 | // Even if we have support for thread-local storage in the compiler | |
330 | // and linker, the OS may not support it. We need to check that at | |
331 | // runtime. Right now, we have to keep a manual set of "bad" OSes. | |
332 | #if defined(HAVE_TLS) | |
333 | static bool kernel_supports_tls = false; // be conservative | |
334 | static inline bool KernelSupportsTLS() { | |
335 | return kernel_supports_tls; | |
336 | } | |
337 | # if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS | |
338 | static void CheckIfKernelSupportsTLS() { | |
339 | kernel_supports_tls = false; | |
340 | } | |
341 | # else | |
342 | # include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too | |
343 | static void CheckIfKernelSupportsTLS() { | |
344 | struct utsname buf; | |
345 | if (uname(&buf) != 0) { // should be impossible | |
346 | MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno); | |
347 | kernel_supports_tls = false; | |
348 | } else if (strcasecmp(buf.sysname, "linux") == 0) { | |
349 | // The linux case: the first kernel to support TLS was 2.6.0 | |
350 | if (buf.release[0] < '2' && buf.release[1] == '.') // 0.x or 1.x | |
351 | kernel_supports_tls = false; | |
352 | else if (buf.release[0] == '2' && buf.release[1] == '.' && | |
353 | buf.release[2] >= '0' && buf.release[2] < '6' && | |
354 | buf.release[3] == '.') // 2.0 - 2.5 | |
355 | kernel_supports_tls = false; | |
356 | else | |
357 | kernel_supports_tls = true; | |
358 | } else { // some other kernel, we'll be optimisitic | |
359 | kernel_supports_tls = true; | |
360 | } | |
361 | // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG | |
362 | } | |
363 | # endif // HAVE_DECL_UNAME | |
364 | #endif // HAVE_TLS | |
365 | ||
366 | // __THROW is defined in glibc systems. It means, counter-intuitively, | |
367 | // "This function will never throw an exception." It's an optional | |
368 | // optimization tool, but we may need to use it to match glibc prototypes. | |
369 | #ifndef __THROW // I guess we're not on a glibc system | |
370 | # define __THROW // __THROW is just an optimization, so ok to make it "" | |
371 | #endif | |
372 | ||
373 | //------------------------------------------------------------------- | |
374 | // Configuration | |
375 | //------------------------------------------------------------------- | |
376 | ||
377 | // Not all possible combinations of the following parameters make | |
378 | // sense. In particular, if kMaxSize increases, you may have to | |
379 | // increase kNumClasses as well. | |
380 | static const size_t kPageShift = 12; | |
381 | static const size_t kPageSize = 1 << kPageShift; | |
382 | static const size_t kMaxSize = 8u * kPageSize; | |
383 | static const size_t kAlignShift = 3; | |
384 | static const size_t kAlignment = 1 << kAlignShift; | |
385 | static const size_t kNumClasses = 68; | |
386 | ||
387 | // Allocates a big block of memory for the pagemap once we reach more than | |
388 | // 128MB | |
389 | static const size_t kPageMapBigAllocationThreshold = 128 << 20; | |
390 | ||
391 | // Minimum number of pages to fetch from system at a time. Must be | |
392 | // significantly bigger than kBlockSize to amortize system-call | |
393 | // overhead, and also to reduce external fragementation. Also, we | |
394 | // should keep this value big because various incarnations of Linux | |
395 | // have small limits on the number of mmap() regions per | |
396 | // address-space. | |
397 | static const size_t kMinSystemAlloc = 1 << (20 - kPageShift); | |
398 | ||
399 | // Number of objects to move between a per-thread list and a central | |
400 | // list in one shot. We want this to be not too small so we can | |
401 | // amortize the lock overhead for accessing the central list. Making | |
402 | // it too big may temporarily cause unnecessary memory wastage in the | |
403 | // per-thread free list until the scavenger cleans up the list. | |
404 | static int num_objects_to_move[kNumClasses]; | |
405 | ||
406 | // Maximum length we allow a per-thread free-list to have before we | |
407 | // move objects from it into the corresponding central free-list. We | |
408 | // want this big to avoid locking the central free-list too often. It | |
409 | // should not hurt to make this list somewhat big because the | |
410 | // scavenging code will shrink it down when its contents are not in use. | |
411 | static const int kMaxFreeListLength = 256; | |
412 | ||
413 | // Lower and upper bounds on the per-thread cache sizes | |
414 | static const size_t kMinThreadCacheSize = kMaxSize * 2; | |
415 | static const size_t kMaxThreadCacheSize = 2 << 20; | |
416 | ||
417 | // Default bound on the total amount of thread caches | |
418 | static const size_t kDefaultOverallThreadCacheSize = 16 << 20; | |
419 | ||
420 | // For all span-lengths < kMaxPages we keep an exact-size list. | |
421 | // REQUIRED: kMaxPages >= kMinSystemAlloc; | |
422 | static const size_t kMaxPages = kMinSystemAlloc; | |
423 | ||
424 | /* The smallest prime > 2^n */ | |
425 | static int primes_list[] = { | |
426 | // Small values might cause high rates of sampling | |
427 | // and hence commented out. | |
428 | // 2, 5, 11, 17, 37, 67, 131, 257, | |
429 | // 521, 1031, 2053, 4099, 8209, 16411, | |
430 | 32771, 65537, 131101, 262147, 524309, 1048583, | |
431 | 2097169, 4194319, 8388617, 16777259, 33554467 }; | |
432 | ||
433 | // Twice the approximate gap between sampling actions. | |
434 | // I.e., we take one sample approximately once every | |
435 | // tcmalloc_sample_parameter/2 | |
436 | // bytes of allocation, i.e., ~ once every 128KB. | |
437 | // Must be a prime number. | |
438 | #ifdef NO_TCMALLOC_SAMPLES | |
439 | DEFINE_int64(tcmalloc_sample_parameter, 0, | |
440 | "Unused: code is compiled with NO_TCMALLOC_SAMPLES"); | |
441 | static size_t sample_period = 0; | |
442 | #else | |
443 | DEFINE_int64(tcmalloc_sample_parameter, 262147, | |
444 | "Twice the approximate gap between sampling actions." | |
445 | " Must be a prime number. Otherwise will be rounded up to a " | |
446 | " larger prime number"); | |
447 | static size_t sample_period = 262147; | |
448 | #endif | |
449 | ||
450 | // Protects sample_period above | |
451 | static SpinLock sample_period_lock = SPINLOCK_INITIALIZER; | |
452 | ||
453 | // Parameters for controlling how fast memory is returned to the OS. | |
454 | ||
455 | DEFINE_double(tcmalloc_release_rate, 1, | |
456 | "Rate at which we release unused memory to the system. " | |
457 | "Zero means we never release memory back to the system. " | |
458 | "Increase this flag to return memory faster; decrease it " | |
459 | "to return memory slower. Reasonable rates are in the " | |
460 | "range [0,10]"); | |
461 | ||
462 | //------------------------------------------------------------------- | |
463 | // Mapping from size to size_class and vice versa | |
464 | //------------------------------------------------------------------- | |
465 | ||
466 | // Sizes <= 1024 have an alignment >= 8. So for such sizes we have an | |
467 | // array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128. | |
468 | // So for these larger sizes we have an array indexed by ceil(size/128). | |
469 | // | |
470 | // We flatten both logical arrays into one physical array and use | |
471 | // arithmetic to compute an appropriate index. The constants used by | |
472 | // ClassIndex() were selected to make the flattening work. | |
473 | // | |
474 | // Examples: | |
475 | // Size Expression Index | |
476 | // ------------------------------------------------------- | |
477 | // 0 (0 + 7) / 8 0 | |
478 | // 1 (1 + 7) / 8 1 | |
479 | // ... | |
480 | // 1024 (1024 + 7) / 8 128 | |
481 | // 1025 (1025 + 127 + (120<<7)) / 128 129 | |
482 | // ... | |
483 | // 32768 (32768 + 127 + (120<<7)) / 128 376 | |
484 | static const size_t kMaxSmallSize = 1024; | |
485 | static const int shift_amount[2] = { 3, 7 }; // For divides by 8 or 128 | |
486 | static const int add_amount[2] = { 7, 127 + (120 << 7) }; | |
487 | static unsigned char class_array[377]; | |
488 | ||
489 | // Compute index of the class_array[] entry for a given size | |
490 | static inline int ClassIndex(size_t s) { | |
491 | const int i = (s > kMaxSmallSize); | |
492 | return static_cast<int>((s + add_amount[i]) >> shift_amount[i]); | |
493 | } | |
494 | ||
495 | // Mapping from size class to max size storable in that class | |
496 | static size_t class_to_size[kNumClasses]; | |
497 | ||
498 | // Mapping from size class to number of pages to allocate at a time | |
499 | static size_t class_to_pages[kNumClasses]; | |
500 | ||
501 | // TransferCache is used to cache transfers of num_objects_to_move[size_class] | |
502 | // back and forth between thread caches and the central cache for a given size | |
503 | // class. | |
504 | struct TCEntry { | |
505 | void *head; // Head of chain of objects. | |
506 | void *tail; // Tail of chain of objects. | |
507 | }; | |
508 | // A central cache freelist can have anywhere from 0 to kNumTransferEntries | |
509 | // slots to put link list chains into. To keep memory usage bounded the total | |
510 | // number of TCEntries across size classes is fixed. Currently each size | |
511 | // class is initially given one TCEntry which also means that the maximum any | |
512 | // one class can have is kNumClasses. | |
513 | static const int kNumTransferEntries = kNumClasses; | |
514 | ||
515 | // Note: the following only works for "n"s that fit in 32-bits, but | |
516 | // that is fine since we only use it for small sizes. | |
517 | static inline int LgFloor(size_t n) { | |
518 | int log = 0; | |
519 | for (int i = 4; i >= 0; --i) { | |
520 | int shift = (1 << i); | |
521 | size_t x = n >> shift; | |
522 | if (x != 0) { | |
523 | n = x; | |
524 | log += shift; | |
525 | } | |
526 | } | |
527 | ASSERT(n == 1); | |
528 | return log; | |
529 | } | |
530 | ||
531 | // Some very basic linked list functions for dealing with using void * as | |
532 | // storage. | |
533 | ||
534 | static inline void *SLL_Next(void *t) { | |
535 | return *(reinterpret_cast<void**>(t)); | |
536 | } | |
537 | ||
538 | static inline void SLL_SetNext(void *t, void *n) { | |
539 | *(reinterpret_cast<void**>(t)) = n; | |
540 | } | |
541 | ||
542 | static inline void SLL_Push(void **list, void *element) { | |
543 | SLL_SetNext(element, *list); | |
544 | *list = element; | |
545 | } | |
546 | ||
547 | static inline void *SLL_Pop(void **list) { | |
548 | void *result = *list; | |
549 | *list = SLL_Next(*list); | |
550 | return result; | |
551 | } | |
552 | ||
553 | ||
554 | // Remove N elements from a linked list to which head points. head will be | |
555 | // modified to point to the new head. start and end will point to the first | |
556 | // and last nodes of the range. Note that end will point to NULL after this | |
557 | // function is called. | |
558 | static inline void SLL_PopRange(void **head, int N, void **start, void **end) { | |
559 | if (N == 0) { | |
560 | *start = NULL; | |
561 | *end = NULL; | |
562 | return; | |
563 | } | |
564 | ||
565 | void *tmp = *head; | |
566 | for (int i = 1; i < N; ++i) { | |
567 | tmp = SLL_Next(tmp); | |
568 | } | |
569 | ||
570 | *start = *head; | |
571 | *end = tmp; | |
572 | *head = SLL_Next(tmp); | |
573 | // Unlink range from list. | |
574 | SLL_SetNext(tmp, NULL); | |
575 | } | |
576 | ||
577 | static inline void SLL_PushRange(void **head, void *start, void *end) { | |
578 | if (!start) return; | |
579 | SLL_SetNext(end, *head); | |
580 | *head = start; | |
581 | } | |
582 | ||
583 | static inline size_t SLL_Size(void *head) { | |
584 | int count = 0; | |
585 | while (head) { | |
586 | count++; | |
587 | head = SLL_Next(head); | |
588 | } | |
589 | return count; | |
590 | } | |
591 | ||
592 | // Setup helper functions. | |
593 | ||
594 | static ALWAYS_INLINE size_t SizeClass(size_t size) { | |
595 | return class_array[ClassIndex(size)]; | |
596 | } | |
597 | ||
598 | // Get the byte-size for a specified class | |
599 | static ALWAYS_INLINE size_t ByteSizeForClass(size_t cl) { | |
600 | return class_to_size[cl]; | |
601 | } | |
602 | static int NumMoveSize(size_t size) { | |
603 | if (size == 0) return 0; | |
604 | // Use approx 64k transfers between thread and central caches. | |
605 | int num = static_cast<int>(64.0 * 1024.0 / size); | |
606 | if (num < 2) num = 2; | |
607 | // Clamp well below kMaxFreeListLength to avoid ping pong between central | |
608 | // and thread caches. | |
609 | if (num > static_cast<int>(0.8 * kMaxFreeListLength)) | |
610 | num = static_cast<int>(0.8 * kMaxFreeListLength); | |
611 | ||
612 | // Also, avoid bringing in too many objects into small object free | |
613 | // lists. There are lots of such lists, and if we allow each one to | |
614 | // fetch too many at a time, we end up having to scavenge too often | |
615 | // (especially when there are lots of threads and each thread gets a | |
616 | // small allowance for its thread cache). | |
617 | // | |
618 | // TODO: Make thread cache free list sizes dynamic so that we do not | |
619 | // have to equally divide a fixed resource amongst lots of threads. | |
620 | if (num > 32) num = 32; | |
621 | ||
622 | return num; | |
623 | } | |
624 | ||
625 | // Initialize the mapping arrays | |
626 | static void InitSizeClasses() { | |
627 | // Do some sanity checking on add_amount[]/shift_amount[]/class_array[] | |
628 | if (ClassIndex(0) < 0) { | |
629 | MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0)); | |
630 | abort(); | |
631 | } | |
632 | if (static_cast<size_t>(ClassIndex(kMaxSize)) >= sizeof(class_array)) { | |
633 | MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize)); | |
634 | abort(); | |
635 | } | |
636 | ||
637 | // Compute the size classes we want to use | |
638 | size_t sc = 1; // Next size class to assign | |
639 | unsigned char alignshift = kAlignShift; | |
640 | int last_lg = -1; | |
641 | for (size_t size = kAlignment; size <= kMaxSize; size += (1 << alignshift)) { | |
642 | int lg = LgFloor(size); | |
643 | if (lg > last_lg) { | |
644 | // Increase alignment every so often. | |
645 | // | |
646 | // Since we double the alignment every time size doubles and | |
647 | // size >= 128, this means that space wasted due to alignment is | |
648 | // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256 | |
649 | // bytes, so the space wasted as a percentage starts falling for | |
650 | // sizes > 2K. | |
651 | if ((lg >= 7) && (alignshift < 8)) { | |
652 | alignshift++; | |
653 | } | |
654 | last_lg = lg; | |
655 | } | |
656 | ||
657 | // Allocate enough pages so leftover is less than 1/8 of total. | |
658 | // This bounds wasted space to at most 12.5%. | |
659 | size_t psize = kPageSize; | |
660 | while ((psize % size) > (psize >> 3)) { | |
661 | psize += kPageSize; | |
662 | } | |
663 | const size_t my_pages = psize >> kPageShift; | |
664 | ||
665 | if (sc > 1 && my_pages == class_to_pages[sc-1]) { | |
666 | // See if we can merge this into the previous class without | |
667 | // increasing the fragmentation of the previous class. | |
668 | const size_t my_objects = (my_pages << kPageShift) / size; | |
669 | const size_t prev_objects = (class_to_pages[sc-1] << kPageShift) | |
670 | / class_to_size[sc-1]; | |
671 | if (my_objects == prev_objects) { | |
672 | // Adjust last class to include this size | |
673 | class_to_size[sc-1] = size; | |
674 | continue; | |
675 | } | |
676 | } | |
677 | ||
678 | // Add new class | |
679 | class_to_pages[sc] = my_pages; | |
680 | class_to_size[sc] = size; | |
681 | sc++; | |
682 | } | |
683 | if (sc != kNumClasses) { | |
684 | MESSAGE("wrong number of size classes: found %" PRIuS " instead of %d\n", | |
685 | sc, int(kNumClasses)); | |
686 | abort(); | |
687 | } | |
688 | ||
689 | // Initialize the mapping arrays | |
690 | int next_size = 0; | |
691 | for (unsigned char c = 1; c < kNumClasses; c++) { | |
692 | const size_t max_size_in_class = class_to_size[c]; | |
693 | for (size_t s = next_size; s <= max_size_in_class; s += kAlignment) { | |
694 | class_array[ClassIndex(s)] = c; | |
695 | } | |
696 | next_size = static_cast<int>(max_size_in_class + kAlignment); | |
697 | } | |
698 | ||
699 | // Double-check sizes just to be safe | |
700 | for (size_t size = 0; size <= kMaxSize; size++) { | |
701 | const size_t sc = SizeClass(size); | |
702 | if (sc == 0) { | |
703 | MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size); | |
704 | abort(); | |
705 | } | |
706 | if (sc > 1 && size <= class_to_size[sc-1]) { | |
707 | MESSAGE("Allocating unnecessarily large class %" PRIuS " for %" PRIuS | |
708 | "\n", sc, size); | |
709 | abort(); | |
710 | } | |
711 | if (sc >= kNumClasses) { | |
712 | MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size); | |
713 | abort(); | |
714 | } | |
715 | const size_t s = class_to_size[sc]; | |
716 | if (size > s) { | |
717 | MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc); | |
718 | abort(); | |
719 | } | |
720 | if (s == 0) { | |
721 | MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc); | |
722 | abort(); | |
723 | } | |
724 | } | |
725 | ||
726 | // Initialize the num_objects_to_move array. | |
727 | for (size_t cl = 1; cl < kNumClasses; ++cl) { | |
728 | num_objects_to_move[cl] = NumMoveSize(ByteSizeForClass(cl)); | |
729 | } | |
730 | ||
731 | #ifndef WTF_CHANGES | |
732 | if (false) { | |
733 | // Dump class sizes and maximum external wastage per size class | |
734 | for (size_t cl = 1; cl < kNumClasses; ++cl) { | |
735 | const int alloc_size = class_to_pages[cl] << kPageShift; | |
736 | const int alloc_objs = alloc_size / class_to_size[cl]; | |
737 | const int min_used = (class_to_size[cl-1] + 1) * alloc_objs; | |
738 | const int max_waste = alloc_size - min_used; | |
739 | MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n", | |
740 | int(cl), | |
741 | int(class_to_size[cl-1] + 1), | |
742 | int(class_to_size[cl]), | |
743 | int(class_to_pages[cl] << kPageShift), | |
744 | max_waste * 100.0 / alloc_size | |
745 | ); | |
746 | } | |
747 | } | |
748 | #endif | |
749 | } | |
750 | ||
751 | // ------------------------------------------------------------------------- | |
752 | // Simple allocator for objects of a specified type. External locking | |
753 | // is required before accessing one of these objects. | |
754 | // ------------------------------------------------------------------------- | |
755 | ||
756 | // Metadata allocator -- keeps stats about how many bytes allocated | |
757 | static uint64_t metadata_system_bytes = 0; | |
758 | static void* MetaDataAlloc(size_t bytes) { | |
759 | void* result = TCMalloc_SystemAlloc(bytes, 0); | |
760 | if (result != NULL) { | |
761 | metadata_system_bytes += bytes; | |
762 | } | |
763 | return result; | |
764 | } | |
765 | ||
766 | template <class T> | |
767 | class PageHeapAllocator { | |
768 | private: | |
769 | // How much to allocate from system at a time | |
770 | static const size_t kAllocIncrement = 32 << 10; | |
771 | ||
772 | // Aligned size of T | |
773 | static const size_t kAlignedSize | |
774 | = (((sizeof(T) + kAlignment - 1) / kAlignment) * kAlignment); | |
775 | ||
776 | // Free area from which to carve new objects | |
777 | char* free_area_; | |
778 | size_t free_avail_; | |
779 | ||
780 | // Free list of already carved objects | |
781 | void* free_list_; | |
782 | ||
783 | // Number of allocated but unfreed objects | |
784 | int inuse_; | |
785 | ||
786 | public: | |
787 | void Init() { | |
788 | ASSERT(kAlignedSize <= kAllocIncrement); | |
789 | inuse_ = 0; | |
790 | free_area_ = NULL; | |
791 | free_avail_ = 0; | |
792 | free_list_ = NULL; | |
793 | } | |
794 | ||
795 | T* New() { | |
796 | // Consult free list | |
797 | void* result; | |
798 | if (free_list_ != NULL) { | |
799 | result = free_list_; | |
800 | free_list_ = *(reinterpret_cast<void**>(result)); | |
801 | } else { | |
802 | if (free_avail_ < kAlignedSize) { | |
803 | // Need more room | |
804 | free_area_ = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement)); | |
805 | if (free_area_ == NULL) abort(); | |
806 | free_avail_ = kAllocIncrement; | |
807 | } | |
808 | result = free_area_; | |
809 | free_area_ += kAlignedSize; | |
810 | free_avail_ -= kAlignedSize; | |
811 | } | |
812 | inuse_++; | |
813 | return reinterpret_cast<T*>(result); | |
814 | } | |
815 | ||
816 | void Delete(T* p) { | |
817 | *(reinterpret_cast<void**>(p)) = free_list_; | |
818 | free_list_ = p; | |
819 | inuse_--; | |
820 | } | |
821 | ||
822 | int inuse() const { return inuse_; } | |
823 | }; | |
824 | ||
825 | // ------------------------------------------------------------------------- | |
826 | // Span - a contiguous run of pages | |
827 | // ------------------------------------------------------------------------- | |
828 | ||
829 | // Type that can hold a page number | |
830 | typedef uintptr_t PageID; | |
831 | ||
832 | // Type that can hold the length of a run of pages | |
833 | typedef uintptr_t Length; | |
834 | ||
835 | static const Length kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift; | |
836 | ||
837 | // Convert byte size into pages. This won't overflow, but may return | |
838 | // an unreasonably large value if bytes is huge enough. | |
839 | static inline Length pages(size_t bytes) { | |
840 | return (bytes >> kPageShift) + | |
841 | ((bytes & (kPageSize - 1)) > 0 ? 1 : 0); | |
842 | } | |
843 | ||
844 | // Convert a user size into the number of bytes that will actually be | |
845 | // allocated | |
846 | static size_t AllocationSize(size_t bytes) { | |
847 | if (bytes > kMaxSize) { | |
848 | // Large object: we allocate an integral number of pages | |
849 | ASSERT(bytes <= (kMaxValidPages << kPageShift)); | |
850 | return pages(bytes) << kPageShift; | |
851 | } else { | |
852 | // Small object: find the size class to which it belongs | |
853 | return ByteSizeForClass(SizeClass(bytes)); | |
854 | } | |
855 | } | |
856 | ||
857 | // Information kept for a span (a contiguous run of pages). | |
858 | struct Span { | |
859 | PageID start; // Starting page number | |
860 | Length length; // Number of pages in span | |
861 | Span* next; // Used when in link list | |
862 | Span* prev; // Used when in link list | |
863 | void* objects; // Linked list of free objects | |
864 | unsigned int free : 1; // Is the span free | |
865 | unsigned int sample : 1; // Sampled object? | |
866 | unsigned int sizeclass : 8; // Size-class for small objects (or 0) | |
867 | unsigned int refcount : 11; // Number of non-free objects | |
868 | ||
869 | #undef SPAN_HISTORY | |
870 | #ifdef SPAN_HISTORY | |
871 | // For debugging, we can keep a log events per span | |
872 | int nexthistory; | |
873 | char history[64]; | |
874 | int value[64]; | |
875 | #endif | |
876 | }; | |
877 | ||
878 | #ifdef SPAN_HISTORY | |
879 | void Event(Span* span, char op, int v = 0) { | |
880 | span->history[span->nexthistory] = op; | |
881 | span->value[span->nexthistory] = v; | |
882 | span->nexthistory++; | |
883 | if (span->nexthistory == sizeof(span->history)) span->nexthistory = 0; | |
884 | } | |
885 | #else | |
886 | #define Event(s,o,v) ((void) 0) | |
887 | #endif | |
888 | ||
889 | // Allocator/deallocator for spans | |
890 | static PageHeapAllocator<Span> span_allocator; | |
891 | static Span* NewSpan(PageID p, Length len) { | |
892 | Span* result = span_allocator.New(); | |
893 | memset(result, 0, sizeof(*result)); | |
894 | result->start = p; | |
895 | result->length = len; | |
896 | #ifdef SPAN_HISTORY | |
897 | result->nexthistory = 0; | |
898 | #endif | |
899 | return result; | |
900 | } | |
901 | ||
902 | static inline void DeleteSpan(Span* span) { | |
903 | #ifndef NDEBUG | |
904 | // In debug mode, trash the contents of deleted Spans | |
905 | memset(span, 0x3f, sizeof(*span)); | |
906 | #endif | |
907 | span_allocator.Delete(span); | |
908 | } | |
909 | ||
910 | // ------------------------------------------------------------------------- | |
911 | // Doubly linked list of spans. | |
912 | // ------------------------------------------------------------------------- | |
913 | ||
914 | static inline void DLL_Init(Span* list) { | |
915 | list->next = list; | |
916 | list->prev = list; | |
917 | } | |
918 | ||
919 | static inline void DLL_Remove(Span* span) { | |
920 | span->prev->next = span->next; | |
921 | span->next->prev = span->prev; | |
922 | span->prev = NULL; | |
923 | span->next = NULL; | |
924 | } | |
925 | ||
926 | static ALWAYS_INLINE bool DLL_IsEmpty(const Span* list) { | |
927 | return list->next == list; | |
928 | } | |
929 | ||
930 | #ifndef WTF_CHANGES | |
931 | static int DLL_Length(const Span* list) { | |
932 | int result = 0; | |
933 | for (Span* s = list->next; s != list; s = s->next) { | |
934 | result++; | |
935 | } | |
936 | return result; | |
937 | } | |
938 | #endif | |
939 | ||
940 | #if 0 /* Not needed at the moment -- causes compiler warnings if not used */ | |
941 | static void DLL_Print(const char* label, const Span* list) { | |
942 | MESSAGE("%-10s %p:", label, list); | |
943 | for (const Span* s = list->next; s != list; s = s->next) { | |
944 | MESSAGE(" <%p,%u,%u>", s, s->start, s->length); | |
945 | } | |
946 | MESSAGE("\n"); | |
947 | } | |
948 | #endif | |
949 | ||
950 | static inline void DLL_Prepend(Span* list, Span* span) { | |
951 | ASSERT(span->next == NULL); | |
952 | ASSERT(span->prev == NULL); | |
953 | span->next = list->next; | |
954 | span->prev = list; | |
955 | list->next->prev = span; | |
956 | list->next = span; | |
957 | } | |
958 | ||
959 | // ------------------------------------------------------------------------- | |
960 | // Stack traces kept for sampled allocations | |
961 | // The following state is protected by pageheap_lock_. | |
962 | // ------------------------------------------------------------------------- | |
963 | ||
964 | // size/depth are made the same size as a pointer so that some generic | |
965 | // code below can conveniently cast them back and forth to void*. | |
966 | static const int kMaxStackDepth = 31; | |
967 | struct StackTrace { | |
968 | uintptr_t size; // Size of object | |
969 | uintptr_t depth; // Number of PC values stored in array below | |
970 | void* stack[kMaxStackDepth]; | |
971 | }; | |
972 | static PageHeapAllocator<StackTrace> stacktrace_allocator; | |
973 | static Span sampled_objects; | |
974 | ||
975 | // ------------------------------------------------------------------------- | |
976 | // Map from page-id to per-page data | |
977 | // ------------------------------------------------------------------------- | |
978 | ||
979 | // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines. | |
980 | // We also use a simple one-level cache for hot PageID-to-sizeclass mappings, | |
981 | // because sometimes the sizeclass is all the information we need. | |
982 | ||
983 | // Selector class -- general selector uses 3-level map | |
984 | template <int BITS> class MapSelector { | |
985 | public: | |
986 | typedef TCMalloc_PageMap3<BITS-kPageShift> Type; | |
987 | typedef PackedCache<BITS, uint64_t> CacheType; | |
988 | }; | |
989 | ||
990 | // A two-level map for 32-bit machines | |
991 | template <> class MapSelector<32> { | |
992 | public: | |
993 | typedef TCMalloc_PageMap2<32-kPageShift> Type; | |
994 | typedef PackedCache<32-kPageShift, uint16_t> CacheType; | |
995 | }; | |
996 | ||
997 | // ------------------------------------------------------------------------- | |
998 | // Page-level allocator | |
999 | // * Eager coalescing | |
1000 | // | |
1001 | // Heap for page-level allocation. We allow allocating and freeing a | |
1002 | // contiguous runs of pages (called a "span"). | |
1003 | // ------------------------------------------------------------------------- | |
1004 | ||
1005 | class TCMalloc_PageHeap { | |
1006 | public: | |
1007 | void init(); | |
1008 | ||
1009 | // Allocate a run of "n" pages. Returns zero if out of memory. | |
1010 | Span* New(Length n); | |
1011 | ||
1012 | // Delete the span "[p, p+n-1]". | |
1013 | // REQUIRES: span was returned by earlier call to New() and | |
1014 | // has not yet been deleted. | |
1015 | void Delete(Span* span); | |
1016 | ||
1017 | // Mark an allocated span as being used for small objects of the | |
1018 | // specified size-class. | |
1019 | // REQUIRES: span was returned by an earlier call to New() | |
1020 | // and has not yet been deleted. | |
1021 | void RegisterSizeClass(Span* span, size_t sc); | |
1022 | ||
1023 | // Split an allocated span into two spans: one of length "n" pages | |
1024 | // followed by another span of length "span->length - n" pages. | |
1025 | // Modifies "*span" to point to the first span of length "n" pages. | |
1026 | // Returns a pointer to the second span. | |
1027 | // | |
1028 | // REQUIRES: "0 < n < span->length" | |
1029 | // REQUIRES: !span->free | |
1030 | // REQUIRES: span->sizeclass == 0 | |
1031 | Span* Split(Span* span, Length n); | |
1032 | ||
1033 | // Return the descriptor for the specified page. | |
1034 | inline Span* GetDescriptor(PageID p) const { | |
1035 | return reinterpret_cast<Span*>(pagemap_.get(p)); | |
1036 | } | |
1037 | ||
1038 | #ifdef WTF_CHANGES | |
1039 | inline Span* GetDescriptorEnsureSafe(PageID p) | |
1040 | { | |
1041 | pagemap_.Ensure(p, 1); | |
1042 | return GetDescriptor(p); | |
1043 | } | |
1044 | #endif | |
1045 | ||
1046 | // Dump state to stderr | |
1047 | #ifndef WTF_CHANGES | |
1048 | void Dump(TCMalloc_Printer* out); | |
1049 | #endif | |
1050 | ||
1051 | // Return number of bytes allocated from system | |
1052 | inline uint64_t SystemBytes() const { return system_bytes_; } | |
1053 | ||
1054 | // Return number of free bytes in heap | |
1055 | uint64_t FreeBytes() const { | |
1056 | return (static_cast<uint64_t>(free_pages_) << kPageShift); | |
1057 | } | |
1058 | ||
1059 | bool Check(); | |
1060 | bool CheckList(Span* list, Length min_pages, Length max_pages); | |
1061 | ||
1062 | // Release all pages on the free list for reuse by the OS: | |
1063 | void ReleaseFreePages(); | |
1064 | ||
1065 | // Return 0 if we have no information, or else the correct sizeclass for p. | |
1066 | // Reads and writes to pagemap_cache_ do not require locking. | |
1067 | // The entries are 64 bits on 64-bit hardware and 16 bits on | |
1068 | // 32-bit hardware, and we don't mind raciness as long as each read of | |
1069 | // an entry yields a valid entry, not a partially updated entry. | |
1070 | size_t GetSizeClassIfCached(PageID p) const { | |
1071 | return pagemap_cache_.GetOrDefault(p, 0); | |
1072 | } | |
1073 | void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); } | |
1074 | ||
1075 | private: | |
1076 | // Pick the appropriate map and cache types based on pointer size | |
1077 | typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap; | |
1078 | typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache; | |
1079 | PageMap pagemap_; | |
1080 | mutable PageMapCache pagemap_cache_; | |
1081 | ||
1082 | // We segregate spans of a given size into two circular linked | |
1083 | // lists: one for normal spans, and one for spans whose memory | |
1084 | // has been returned to the system. | |
1085 | struct SpanList { | |
1086 | Span normal; | |
1087 | Span returned; | |
1088 | }; | |
1089 | ||
1090 | // List of free spans of length >= kMaxPages | |
1091 | SpanList large_; | |
1092 | ||
1093 | // Array mapping from span length to a doubly linked list of free spans | |
1094 | SpanList free_[kMaxPages]; | |
1095 | ||
1096 | // Number of pages kept in free lists | |
1097 | uintptr_t free_pages_; | |
1098 | ||
1099 | // Bytes allocated from system | |
1100 | uint64_t system_bytes_; | |
1101 | ||
1102 | bool GrowHeap(Length n); | |
1103 | ||
1104 | // REQUIRES span->length >= n | |
1105 | // Remove span from its free list, and move any leftover part of | |
1106 | // span into appropriate free lists. Also update "span" to have | |
1107 | // length exactly "n" and mark it as non-free so it can be returned | |
1108 | // to the client. | |
1109 | // | |
1110 | // "released" is true iff "span" was found on a "returned" list. | |
1111 | void Carve(Span* span, Length n, bool released); | |
1112 | ||
1113 | void RecordSpan(Span* span) { | |
1114 | pagemap_.set(span->start, span); | |
1115 | if (span->length > 1) { | |
1116 | pagemap_.set(span->start + span->length - 1, span); | |
1117 | } | |
1118 | } | |
1119 | ||
1120 | // Allocate a large span of length == n. If successful, returns a | |
1121 | // span of exactly the specified length. Else, returns NULL. | |
1122 | Span* AllocLarge(Length n); | |
1123 | ||
1124 | // Incrementally release some memory to the system. | |
1125 | // IncrementalScavenge(n) is called whenever n pages are freed. | |
1126 | void IncrementalScavenge(Length n); | |
1127 | ||
1128 | // Number of pages to deallocate before doing more scavenging | |
1129 | int64_t scavenge_counter_; | |
1130 | ||
1131 | // Index of last free list we scavenged | |
1132 | size_t scavenge_index_; | |
1133 | ||
1134 | #if defined(WTF_CHANGES) && PLATFORM(DARWIN) | |
1135 | friend class FastMallocZone; | |
1136 | #endif | |
1137 | }; | |
1138 | ||
1139 | void TCMalloc_PageHeap::init() | |
1140 | { | |
1141 | pagemap_.init(MetaDataAlloc); | |
1142 | pagemap_cache_ = PageMapCache(0); | |
1143 | free_pages_ = 0; | |
1144 | system_bytes_ = 0; | |
1145 | scavenge_counter_ = 0; | |
1146 | // Start scavenging at kMaxPages list | |
1147 | scavenge_index_ = kMaxPages-1; | |
1148 | COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits); | |
1149 | DLL_Init(&large_.normal); | |
1150 | DLL_Init(&large_.returned); | |
1151 | for (size_t i = 0; i < kMaxPages; i++) { | |
1152 | DLL_Init(&free_[i].normal); | |
1153 | DLL_Init(&free_[i].returned); | |
1154 | } | |
1155 | } | |
1156 | ||
1157 | inline Span* TCMalloc_PageHeap::New(Length n) { | |
1158 | ASSERT(Check()); | |
1159 | ASSERT(n > 0); | |
1160 | ||
1161 | // Find first size >= n that has a non-empty list | |
1162 | for (Length s = n; s < kMaxPages; s++) { | |
1163 | Span* ll = NULL; | |
1164 | bool released = false; | |
1165 | if (!DLL_IsEmpty(&free_[s].normal)) { | |
1166 | // Found normal span | |
1167 | ll = &free_[s].normal; | |
1168 | } else if (!DLL_IsEmpty(&free_[s].returned)) { | |
1169 | // Found returned span; reallocate it | |
1170 | ll = &free_[s].returned; | |
1171 | released = true; | |
1172 | } else { | |
1173 | // Keep looking in larger classes | |
1174 | continue; | |
1175 | } | |
1176 | ||
1177 | Span* result = ll->next; | |
1178 | Carve(result, n, released); | |
1179 | ASSERT(Check()); | |
1180 | free_pages_ -= n; | |
1181 | return result; | |
1182 | } | |
1183 | ||
1184 | Span* result = AllocLarge(n); | |
1185 | if (result != NULL) return result; | |
1186 | ||
1187 | // Grow the heap and try again | |
1188 | if (!GrowHeap(n)) { | |
1189 | ASSERT(Check()); | |
1190 | return NULL; | |
1191 | } | |
1192 | ||
1193 | return AllocLarge(n); | |
1194 | } | |
1195 | ||
1196 | Span* TCMalloc_PageHeap::AllocLarge(Length n) { | |
1197 | // find the best span (closest to n in size). | |
1198 | // The following loops implements address-ordered best-fit. | |
1199 | bool from_released = false; | |
1200 | Span *best = NULL; | |
1201 | ||
1202 | // Search through normal list | |
1203 | for (Span* span = large_.normal.next; | |
1204 | span != &large_.normal; | |
1205 | span = span->next) { | |
1206 | if (span->length >= n) { | |
1207 | if ((best == NULL) | |
1208 | || (span->length < best->length) | |
1209 | || ((span->length == best->length) && (span->start < best->start))) { | |
1210 | best = span; | |
1211 | from_released = false; | |
1212 | } | |
1213 | } | |
1214 | } | |
1215 | ||
1216 | // Search through released list in case it has a better fit | |
1217 | for (Span* span = large_.returned.next; | |
1218 | span != &large_.returned; | |
1219 | span = span->next) { | |
1220 | if (span->length >= n) { | |
1221 | if ((best == NULL) | |
1222 | || (span->length < best->length) | |
1223 | || ((span->length == best->length) && (span->start < best->start))) { | |
1224 | best = span; | |
1225 | from_released = true; | |
1226 | } | |
1227 | } | |
1228 | } | |
1229 | ||
1230 | if (best != NULL) { | |
1231 | Carve(best, n, from_released); | |
1232 | ASSERT(Check()); | |
1233 | free_pages_ -= n; | |
1234 | return best; | |
1235 | } | |
1236 | return NULL; | |
1237 | } | |
1238 | ||
1239 | Span* TCMalloc_PageHeap::Split(Span* span, Length n) { | |
1240 | ASSERT(0 < n); | |
1241 | ASSERT(n < span->length); | |
1242 | ASSERT(!span->free); | |
1243 | ASSERT(span->sizeclass == 0); | |
1244 | Event(span, 'T', n); | |
1245 | ||
1246 | const Length extra = span->length - n; | |
1247 | Span* leftover = NewSpan(span->start + n, extra); | |
1248 | Event(leftover, 'U', extra); | |
1249 | RecordSpan(leftover); | |
1250 | pagemap_.set(span->start + n - 1, span); // Update map from pageid to span | |
1251 | span->length = n; | |
1252 | ||
1253 | return leftover; | |
1254 | } | |
1255 | ||
1256 | inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) { | |
1257 | ASSERT(n > 0); | |
1258 | DLL_Remove(span); | |
1259 | span->free = 0; | |
1260 | Event(span, 'A', n); | |
1261 | ||
1262 | const int extra = static_cast<int>(span->length - n); | |
1263 | ASSERT(extra >= 0); | |
1264 | if (extra > 0) { | |
1265 | Span* leftover = NewSpan(span->start + n, extra); | |
1266 | leftover->free = 1; | |
1267 | Event(leftover, 'S', extra); | |
1268 | RecordSpan(leftover); | |
1269 | ||
1270 | // Place leftover span on appropriate free list | |
1271 | SpanList* listpair = (static_cast<size_t>(extra) < kMaxPages) ? &free_[extra] : &large_; | |
1272 | Span* dst = released ? &listpair->returned : &listpair->normal; | |
1273 | DLL_Prepend(dst, leftover); | |
1274 | ||
1275 | span->length = n; | |
1276 | pagemap_.set(span->start + n - 1, span); | |
1277 | } | |
1278 | } | |
1279 | ||
1280 | inline void TCMalloc_PageHeap::Delete(Span* span) { | |
1281 | ASSERT(Check()); | |
1282 | ASSERT(!span->free); | |
1283 | ASSERT(span->length > 0); | |
1284 | ASSERT(GetDescriptor(span->start) == span); | |
1285 | ASSERT(GetDescriptor(span->start + span->length - 1) == span); | |
1286 | span->sizeclass = 0; | |
1287 | span->sample = 0; | |
1288 | ||
1289 | // Coalesce -- we guarantee that "p" != 0, so no bounds checking | |
1290 | // necessary. We do not bother resetting the stale pagemap | |
1291 | // entries for the pieces we are merging together because we only | |
1292 | // care about the pagemap entries for the boundaries. | |
1293 | // | |
1294 | // Note that the spans we merge into "span" may come out of | |
1295 | // a "returned" list. For simplicity, we move these into the | |
1296 | // "normal" list of the appropriate size class. | |
1297 | const PageID p = span->start; | |
1298 | const Length n = span->length; | |
1299 | Span* prev = GetDescriptor(p-1); | |
1300 | if (prev != NULL && prev->free) { | |
1301 | // Merge preceding span into this span | |
1302 | ASSERT(prev->start + prev->length == p); | |
1303 | const Length len = prev->length; | |
1304 | DLL_Remove(prev); | |
1305 | DeleteSpan(prev); | |
1306 | span->start -= len; | |
1307 | span->length += len; | |
1308 | pagemap_.set(span->start, span); | |
1309 | Event(span, 'L', len); | |
1310 | } | |
1311 | Span* next = GetDescriptor(p+n); | |
1312 | if (next != NULL && next->free) { | |
1313 | // Merge next span into this span | |
1314 | ASSERT(next->start == p+n); | |
1315 | const Length len = next->length; | |
1316 | DLL_Remove(next); | |
1317 | DeleteSpan(next); | |
1318 | span->length += len; | |
1319 | pagemap_.set(span->start + span->length - 1, span); | |
1320 | Event(span, 'R', len); | |
1321 | } | |
1322 | ||
1323 | Event(span, 'D', span->length); | |
1324 | span->free = 1; | |
1325 | if (span->length < kMaxPages) { | |
1326 | DLL_Prepend(&free_[span->length].normal, span); | |
1327 | } else { | |
1328 | DLL_Prepend(&large_.normal, span); | |
1329 | } | |
1330 | free_pages_ += n; | |
1331 | ||
1332 | IncrementalScavenge(n); | |
1333 | ASSERT(Check()); | |
1334 | } | |
1335 | ||
1336 | void TCMalloc_PageHeap::IncrementalScavenge(Length n) { | |
1337 | // Fast path; not yet time to release memory | |
1338 | scavenge_counter_ -= n; | |
1339 | if (scavenge_counter_ >= 0) return; // Not yet time to scavenge | |
1340 | ||
1341 | // If there is nothing to release, wait for so many pages before | |
1342 | // scavenging again. With 4K pages, this comes to 16MB of memory. | |
1343 | static const size_t kDefaultReleaseDelay = 1 << 8; | |
1344 | ||
1345 | // Find index of free list to scavenge | |
1346 | size_t index = scavenge_index_ + 1; | |
1347 | for (size_t i = 0; i < kMaxPages+1; i++) { | |
1348 | if (index > kMaxPages) index = 0; | |
1349 | SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index]; | |
1350 | if (!DLL_IsEmpty(&slist->normal)) { | |
1351 | // Release the last span on the normal portion of this list | |
1352 | Span* s = slist->normal.prev; | |
1353 | DLL_Remove(s); | |
1354 | TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift), | |
1355 | static_cast<size_t>(s->length << kPageShift)); | |
1356 | DLL_Prepend(&slist->returned, s); | |
1357 | ||
1358 | scavenge_counter_ = std::max<size_t>(64UL, std::min<size_t>(kDefaultReleaseDelay, kDefaultReleaseDelay - (free_pages_ / kDefaultReleaseDelay))); | |
1359 | ||
1360 | if (index == kMaxPages && !DLL_IsEmpty(&slist->normal)) | |
1361 | scavenge_index_ = index - 1; | |
1362 | else | |
1363 | scavenge_index_ = index; | |
1364 | return; | |
1365 | } | |
1366 | index++; | |
1367 | } | |
1368 | ||
1369 | // Nothing to scavenge, delay for a while | |
1370 | scavenge_counter_ = kDefaultReleaseDelay; | |
1371 | } | |
1372 | ||
1373 | void TCMalloc_PageHeap::RegisterSizeClass(Span* span, size_t sc) { | |
1374 | // Associate span object with all interior pages as well | |
1375 | ASSERT(!span->free); | |
1376 | ASSERT(GetDescriptor(span->start) == span); | |
1377 | ASSERT(GetDescriptor(span->start+span->length-1) == span); | |
1378 | Event(span, 'C', sc); | |
1379 | span->sizeclass = static_cast<unsigned int>(sc); | |
1380 | for (Length i = 1; i < span->length-1; i++) { | |
1381 | pagemap_.set(span->start+i, span); | |
1382 | } | |
1383 | } | |
1384 | ||
1385 | #ifndef WTF_CHANGES | |
1386 | static double PagesToMB(uint64_t pages) { | |
1387 | return (pages << kPageShift) / 1048576.0; | |
1388 | } | |
1389 | ||
1390 | void TCMalloc_PageHeap::Dump(TCMalloc_Printer* out) { | |
1391 | int nonempty_sizes = 0; | |
1392 | for (int s = 0; s < kMaxPages; s++) { | |
1393 | if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) { | |
1394 | nonempty_sizes++; | |
1395 | } | |
1396 | } | |
1397 | out->printf("------------------------------------------------\n"); | |
1398 | out->printf("PageHeap: %d sizes; %6.1f MB free\n", | |
1399 | nonempty_sizes, PagesToMB(free_pages_)); | |
1400 | out->printf("------------------------------------------------\n"); | |
1401 | uint64_t total_normal = 0; | |
1402 | uint64_t total_returned = 0; | |
1403 | for (int s = 0; s < kMaxPages; s++) { | |
1404 | const int n_length = DLL_Length(&free_[s].normal); | |
1405 | const int r_length = DLL_Length(&free_[s].returned); | |
1406 | if (n_length + r_length > 0) { | |
1407 | uint64_t n_pages = s * n_length; | |
1408 | uint64_t r_pages = s * r_length; | |
1409 | total_normal += n_pages; | |
1410 | total_returned += r_pages; | |
1411 | out->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum" | |
1412 | "; unmapped: %6.1f MB; %6.1f MB cum\n", | |
1413 | s, | |
1414 | (n_length + r_length), | |
1415 | PagesToMB(n_pages + r_pages), | |
1416 | PagesToMB(total_normal + total_returned), | |
1417 | PagesToMB(r_pages), | |
1418 | PagesToMB(total_returned)); | |
1419 | } | |
1420 | } | |
1421 | ||
1422 | uint64_t n_pages = 0; | |
1423 | uint64_t r_pages = 0; | |
1424 | int n_spans = 0; | |
1425 | int r_spans = 0; | |
1426 | out->printf("Normal large spans:\n"); | |
1427 | for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) { | |
1428 | out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n", | |
1429 | s->length, PagesToMB(s->length)); | |
1430 | n_pages += s->length; | |
1431 | n_spans++; | |
1432 | } | |
1433 | out->printf("Unmapped large spans:\n"); | |
1434 | for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) { | |
1435 | out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n", | |
1436 | s->length, PagesToMB(s->length)); | |
1437 | r_pages += s->length; | |
1438 | r_spans++; | |
1439 | } | |
1440 | total_normal += n_pages; | |
1441 | total_returned += r_pages; | |
1442 | out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum" | |
1443 | "; unmapped: %6.1f MB; %6.1f MB cum\n", | |
1444 | (n_spans + r_spans), | |
1445 | PagesToMB(n_pages + r_pages), | |
1446 | PagesToMB(total_normal + total_returned), | |
1447 | PagesToMB(r_pages), | |
1448 | PagesToMB(total_returned)); | |
1449 | } | |
1450 | #endif | |
1451 | ||
1452 | bool TCMalloc_PageHeap::GrowHeap(Length n) { | |
1453 | ASSERT(kMaxPages >= kMinSystemAlloc); | |
1454 | if (n > kMaxValidPages) return false; | |
1455 | Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc); | |
1456 | size_t actual_size; | |
1457 | void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); | |
1458 | if (ptr == NULL) { | |
1459 | if (n < ask) { | |
1460 | // Try growing just "n" pages | |
1461 | ask = n; | |
1462 | ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);; | |
1463 | } | |
1464 | if (ptr == NULL) return false; | |
1465 | } | |
1466 | ask = actual_size >> kPageShift; | |
1467 | ||
1468 | uint64_t old_system_bytes = system_bytes_; | |
1469 | system_bytes_ += (ask << kPageShift); | |
1470 | const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; | |
1471 | ASSERT(p > 0); | |
1472 | ||
1473 | // If we have already a lot of pages allocated, just pre allocate a bunch of | |
1474 | // memory for the page map. This prevents fragmentation by pagemap metadata | |
1475 | // when a program keeps allocating and freeing large blocks. | |
1476 | ||
1477 | if (old_system_bytes < kPageMapBigAllocationThreshold | |
1478 | && system_bytes_ >= kPageMapBigAllocationThreshold) { | |
1479 | pagemap_.PreallocateMoreMemory(); | |
1480 | } | |
1481 | ||
1482 | // Make sure pagemap_ has entries for all of the new pages. | |
1483 | // Plus ensure one before and one after so coalescing code | |
1484 | // does not need bounds-checking. | |
1485 | if (pagemap_.Ensure(p-1, ask+2)) { | |
1486 | // Pretend the new area is allocated and then Delete() it to | |
1487 | // cause any necessary coalescing to occur. | |
1488 | // | |
1489 | // We do not adjust free_pages_ here since Delete() will do it for us. | |
1490 | Span* span = NewSpan(p, ask); | |
1491 | RecordSpan(span); | |
1492 | Delete(span); | |
1493 | ASSERT(Check()); | |
1494 | return true; | |
1495 | } else { | |
1496 | // We could not allocate memory within "pagemap_" | |
1497 | // TODO: Once we can return memory to the system, return the new span | |
1498 | return false; | |
1499 | } | |
1500 | } | |
1501 | ||
1502 | bool TCMalloc_PageHeap::Check() { | |
1503 | ASSERT(free_[0].normal.next == &free_[0].normal); | |
1504 | ASSERT(free_[0].returned.next == &free_[0].returned); | |
1505 | CheckList(&large_.normal, kMaxPages, 1000000000); | |
1506 | CheckList(&large_.returned, kMaxPages, 1000000000); | |
1507 | for (Length s = 1; s < kMaxPages; s++) { | |
1508 | CheckList(&free_[s].normal, s, s); | |
1509 | CheckList(&free_[s].returned, s, s); | |
1510 | } | |
1511 | return true; | |
1512 | } | |
1513 | ||
1514 | #if ASSERT_DISABLED | |
1515 | bool TCMalloc_PageHeap::CheckList(Span*, Length, Length) { | |
1516 | return true; | |
1517 | } | |
1518 | #else | |
1519 | bool TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pages) { | |
1520 | for (Span* s = list->next; s != list; s = s->next) { | |
1521 | CHECK_CONDITION(s->free); | |
1522 | CHECK_CONDITION(s->length >= min_pages); | |
1523 | CHECK_CONDITION(s->length <= max_pages); | |
1524 | CHECK_CONDITION(GetDescriptor(s->start) == s); | |
1525 | CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s); | |
1526 | } | |
1527 | return true; | |
1528 | } | |
1529 | #endif | |
1530 | ||
1531 | static void ReleaseFreeList(Span* list, Span* returned) { | |
1532 | // Walk backwards through list so that when we push these | |
1533 | // spans on the "returned" list, we preserve the order. | |
1534 | while (!DLL_IsEmpty(list)) { | |
1535 | Span* s = list->prev; | |
1536 | DLL_Remove(s); | |
1537 | DLL_Prepend(returned, s); | |
1538 | TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift), | |
1539 | static_cast<size_t>(s->length << kPageShift)); | |
1540 | } | |
1541 | } | |
1542 | ||
1543 | void TCMalloc_PageHeap::ReleaseFreePages() { | |
1544 | for (Length s = 0; s < kMaxPages; s++) { | |
1545 | ReleaseFreeList(&free_[s].normal, &free_[s].returned); | |
1546 | } | |
1547 | ReleaseFreeList(&large_.normal, &large_.returned); | |
1548 | ASSERT(Check()); | |
1549 | } | |
1550 | ||
1551 | //------------------------------------------------------------------- | |
1552 | // Free list | |
1553 | //------------------------------------------------------------------- | |
1554 | ||
1555 | class TCMalloc_ThreadCache_FreeList { | |
1556 | private: | |
1557 | void* list_; // Linked list of nodes | |
1558 | uint16_t length_; // Current length | |
1559 | uint16_t lowater_; // Low water mark for list length | |
1560 | ||
1561 | public: | |
1562 | void Init() { | |
1563 | list_ = NULL; | |
1564 | length_ = 0; | |
1565 | lowater_ = 0; | |
1566 | } | |
1567 | ||
1568 | // Return current length of list | |
1569 | int length() const { | |
1570 | return length_; | |
1571 | } | |
1572 | ||
1573 | // Is list empty? | |
1574 | bool empty() const { | |
1575 | return list_ == NULL; | |
1576 | } | |
1577 | ||
1578 | // Low-water mark management | |
1579 | int lowwatermark() const { return lowater_; } | |
1580 | void clear_lowwatermark() { lowater_ = length_; } | |
1581 | ||
1582 | ALWAYS_INLINE void Push(void* ptr) { | |
1583 | SLL_Push(&list_, ptr); | |
1584 | length_++; | |
1585 | } | |
1586 | ||
1587 | void PushRange(int N, void *start, void *end) { | |
1588 | SLL_PushRange(&list_, start, end); | |
1589 | length_ = length_ + static_cast<uint16_t>(N); | |
1590 | } | |
1591 | ||
1592 | void PopRange(int N, void **start, void **end) { | |
1593 | SLL_PopRange(&list_, N, start, end); | |
1594 | ASSERT(length_ >= N); | |
1595 | length_ = length_ - static_cast<uint16_t>(N); | |
1596 | if (length_ < lowater_) lowater_ = length_; | |
1597 | } | |
1598 | ||
1599 | ALWAYS_INLINE void* Pop() { | |
1600 | ASSERT(list_ != NULL); | |
1601 | length_--; | |
1602 | if (length_ < lowater_) lowater_ = length_; | |
1603 | return SLL_Pop(&list_); | |
1604 | } | |
1605 | ||
1606 | #ifdef WTF_CHANGES | |
1607 | template <class Finder, class Reader> | |
1608 | void enumerateFreeObjects(Finder& finder, const Reader& reader) | |
1609 | { | |
1610 | for (void* nextObject = list_; nextObject; nextObject = *reader(reinterpret_cast<void**>(nextObject))) | |
1611 | finder.visit(nextObject); | |
1612 | } | |
1613 | #endif | |
1614 | }; | |
1615 | ||
1616 | //------------------------------------------------------------------- | |
1617 | // Data kept per thread | |
1618 | //------------------------------------------------------------------- | |
1619 | ||
1620 | class TCMalloc_ThreadCache { | |
1621 | private: | |
1622 | typedef TCMalloc_ThreadCache_FreeList FreeList; | |
1623 | #if COMPILER(MSVC) | |
1624 | typedef DWORD ThreadIdentifier; | |
1625 | #else | |
1626 | typedef pthread_t ThreadIdentifier; | |
1627 | #endif | |
1628 | ||
1629 | size_t size_; // Combined size of data | |
1630 | ThreadIdentifier tid_; // Which thread owns it | |
1631 | bool in_setspecific_; // Called pthread_setspecific? | |
1632 | FreeList list_[kNumClasses]; // Array indexed by size-class | |
1633 | ||
1634 | // We sample allocations, biased by the size of the allocation | |
1635 | uint32_t rnd_; // Cheap random number generator | |
1636 | size_t bytes_until_sample_; // Bytes until we sample next | |
1637 | ||
1638 | // Allocate a new heap. REQUIRES: pageheap_lock is held. | |
1639 | static inline TCMalloc_ThreadCache* NewHeap(ThreadIdentifier tid); | |
1640 | ||
1641 | // Use only as pthread thread-specific destructor function. | |
1642 | static void DestroyThreadCache(void* ptr); | |
1643 | public: | |
1644 | // All ThreadCache objects are kept in a linked list (for stats collection) | |
1645 | TCMalloc_ThreadCache* next_; | |
1646 | TCMalloc_ThreadCache* prev_; | |
1647 | ||
1648 | void Init(ThreadIdentifier tid); | |
1649 | void Cleanup(); | |
1650 | ||
1651 | // Accessors (mostly just for printing stats) | |
1652 | int freelist_length(size_t cl) const { return list_[cl].length(); } | |
1653 | ||
1654 | // Total byte size in cache | |
1655 | size_t Size() const { return size_; } | |
1656 | ||
1657 | void* Allocate(size_t size); | |
1658 | void Deallocate(void* ptr, size_t size_class); | |
1659 | ||
1660 | void FetchFromCentralCache(size_t cl, size_t allocationSize); | |
1661 | void ReleaseToCentralCache(size_t cl, int N); | |
1662 | void Scavenge(); | |
1663 | void Print() const; | |
1664 | ||
1665 | // Record allocation of "k" bytes. Return true iff allocation | |
1666 | // should be sampled | |
1667 | bool SampleAllocation(size_t k); | |
1668 | ||
1669 | // Pick next sampling point | |
1670 | void PickNextSample(size_t k); | |
1671 | ||
1672 | static void InitModule(); | |
1673 | static void InitTSD(); | |
1674 | static TCMalloc_ThreadCache* GetThreadHeap(); | |
1675 | static TCMalloc_ThreadCache* GetCache(); | |
1676 | static TCMalloc_ThreadCache* GetCacheIfPresent(); | |
1677 | static TCMalloc_ThreadCache* CreateCacheIfNecessary(); | |
1678 | static void DeleteCache(TCMalloc_ThreadCache* heap); | |
1679 | static void BecomeIdle(); | |
1680 | static void RecomputeThreadCacheSize(); | |
1681 | ||
1682 | #ifdef WTF_CHANGES | |
1683 | template <class Finder, class Reader> | |
1684 | void enumerateFreeObjects(Finder& finder, const Reader& reader) | |
1685 | { | |
1686 | for (unsigned sizeClass = 0; sizeClass < kNumClasses; sizeClass++) | |
1687 | list_[sizeClass].enumerateFreeObjects(finder, reader); | |
1688 | } | |
1689 | #endif | |
1690 | }; | |
1691 | ||
1692 | //------------------------------------------------------------------- | |
1693 | // Data kept per size-class in central cache | |
1694 | //------------------------------------------------------------------- | |
1695 | ||
1696 | class TCMalloc_Central_FreeList { | |
1697 | public: | |
1698 | void Init(size_t cl); | |
1699 | ||
1700 | // These methods all do internal locking. | |
1701 | ||
1702 | // Insert the specified range into the central freelist. N is the number of | |
1703 | // elements in the range. | |
1704 | void InsertRange(void *start, void *end, int N); | |
1705 | ||
1706 | // Returns the actual number of fetched elements into N. | |
1707 | void RemoveRange(void **start, void **end, int *N); | |
1708 | ||
1709 | // Returns the number of free objects in cache. | |
1710 | size_t length() { | |
1711 | SpinLockHolder h(&lock_); | |
1712 | return counter_; | |
1713 | } | |
1714 | ||
1715 | // Returns the number of free objects in the transfer cache. | |
1716 | int tc_length() { | |
1717 | SpinLockHolder h(&lock_); | |
1718 | return used_slots_ * num_objects_to_move[size_class_]; | |
1719 | } | |
1720 | ||
1721 | #ifdef WTF_CHANGES | |
1722 | template <class Finder, class Reader> | |
1723 | void enumerateFreeObjects(Finder& finder, const Reader& reader) | |
1724 | { | |
1725 | for (Span* span = &empty_; span && span != &empty_; span = (span->next ? reader(span->next) : 0)) | |
1726 | ASSERT(!span->objects); | |
1727 | ||
1728 | ASSERT(!nonempty_.objects); | |
1729 | for (Span* span = reader(nonempty_.next); span && span != &nonempty_; span = (span->next ? reader(span->next) : 0)) { | |
1730 | for (void* nextObject = span->objects; nextObject; nextObject = *reader(reinterpret_cast<void**>(nextObject))) | |
1731 | finder.visit(nextObject); | |
1732 | } | |
1733 | } | |
1734 | #endif | |
1735 | ||
1736 | private: | |
1737 | // REQUIRES: lock_ is held | |
1738 | // Remove object from cache and return. | |
1739 | // Return NULL if no free entries in cache. | |
1740 | void* FetchFromSpans(); | |
1741 | ||
1742 | // REQUIRES: lock_ is held | |
1743 | // Remove object from cache and return. Fetches | |
1744 | // from pageheap if cache is empty. Only returns | |
1745 | // NULL on allocation failure. | |
1746 | void* FetchFromSpansSafe(); | |
1747 | ||
1748 | // REQUIRES: lock_ is held | |
1749 | // Release a linked list of objects to spans. | |
1750 | // May temporarily release lock_. | |
1751 | void ReleaseListToSpans(void *start); | |
1752 | ||
1753 | // REQUIRES: lock_ is held | |
1754 | // Release an object to spans. | |
1755 | // May temporarily release lock_. | |
1756 | void ReleaseToSpans(void* object); | |
1757 | ||
1758 | // REQUIRES: lock_ is held | |
1759 | // Populate cache by fetching from the page heap. | |
1760 | // May temporarily release lock_. | |
1761 | void Populate(); | |
1762 | ||
1763 | // REQUIRES: lock is held. | |
1764 | // Tries to make room for a TCEntry. If the cache is full it will try to | |
1765 | // expand it at the cost of some other cache size. Return false if there is | |
1766 | // no space. | |
1767 | bool MakeCacheSpace(); | |
1768 | ||
1769 | // REQUIRES: lock_ for locked_size_class is held. | |
1770 | // Picks a "random" size class to steal TCEntry slot from. In reality it | |
1771 | // just iterates over the sizeclasses but does so without taking a lock. | |
1772 | // Returns true on success. | |
1773 | // May temporarily lock a "random" size class. | |
1774 | static bool EvictRandomSizeClass(size_t locked_size_class, bool force); | |
1775 | ||
1776 | // REQUIRES: lock_ is *not* held. | |
1777 | // Tries to shrink the Cache. If force is true it will relase objects to | |
1778 | // spans if it allows it to shrink the cache. Return false if it failed to | |
1779 | // shrink the cache. Decrements cache_size_ on succeess. | |
1780 | // May temporarily take lock_. If it takes lock_, the locked_size_class | |
1781 | // lock is released to the thread from holding two size class locks | |
1782 | // concurrently which could lead to a deadlock. | |
1783 | bool ShrinkCache(int locked_size_class, bool force); | |
1784 | ||
1785 | // This lock protects all the data members. cached_entries and cache_size_ | |
1786 | // may be looked at without holding the lock. | |
1787 | SpinLock lock_; | |
1788 | ||
1789 | // We keep linked lists of empty and non-empty spans. | |
1790 | size_t size_class_; // My size class | |
1791 | Span empty_; // Dummy header for list of empty spans | |
1792 | Span nonempty_; // Dummy header for list of non-empty spans | |
1793 | size_t counter_; // Number of free objects in cache entry | |
1794 | ||
1795 | // Here we reserve space for TCEntry cache slots. Since one size class can | |
1796 | // end up getting all the TCEntries quota in the system we just preallocate | |
1797 | // sufficient number of entries here. | |
1798 | TCEntry tc_slots_[kNumTransferEntries]; | |
1799 | ||
1800 | // Number of currently used cached entries in tc_slots_. This variable is | |
1801 | // updated under a lock but can be read without one. | |
1802 | int32_t used_slots_; | |
1803 | // The current number of slots for this size class. This is an | |
1804 | // adaptive value that is increased if there is lots of traffic | |
1805 | // on a given size class. | |
1806 | int32_t cache_size_; | |
1807 | }; | |
1808 | ||
1809 | // Pad each CentralCache object to multiple of 64 bytes | |
1810 | class TCMalloc_Central_FreeListPadded : public TCMalloc_Central_FreeList { | |
1811 | private: | |
1812 | char pad_[(64 - (sizeof(TCMalloc_Central_FreeList) % 64)) % 64]; | |
1813 | }; | |
1814 | ||
1815 | //------------------------------------------------------------------- | |
1816 | // Global variables | |
1817 | //------------------------------------------------------------------- | |
1818 | ||
1819 | // Central cache -- a collection of free-lists, one per size-class. | |
1820 | // We have a separate lock per free-list to reduce contention. | |
1821 | static TCMalloc_Central_FreeListPadded central_cache[kNumClasses]; | |
1822 | ||
1823 | // Page-level allocator | |
1824 | static SpinLock pageheap_lock = SPINLOCK_INITIALIZER; | |
1825 | ||
1826 | #if PLATFORM(ARM) | |
1827 | static void* pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(void*) - 1) / sizeof(void*)] __attribute__((aligned)); | |
1828 | #else | |
1829 | static void* pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(void*) - 1) / sizeof(void*)]; | |
1830 | #endif | |
1831 | static bool phinited = false; | |
1832 | ||
1833 | // Avoid extra level of indirection by making "pageheap" be just an alias | |
1834 | // of pageheap_memory. | |
1835 | typedef union { | |
1836 | void* m_memory; | |
1837 | TCMalloc_PageHeap* m_pageHeap; | |
1838 | } PageHeapUnion; | |
1839 | ||
1840 | static inline TCMalloc_PageHeap* getPageHeap() | |
1841 | { | |
1842 | PageHeapUnion u = { &pageheap_memory[0] }; | |
1843 | return u.m_pageHeap; | |
1844 | } | |
1845 | ||
1846 | #define pageheap getPageHeap() | |
1847 | ||
1848 | // If TLS is available, we also store a copy | |
1849 | // of the per-thread object in a __thread variable | |
1850 | // since __thread variables are faster to read | |
1851 | // than pthread_getspecific(). We still need | |
1852 | // pthread_setspecific() because __thread | |
1853 | // variables provide no way to run cleanup | |
1854 | // code when a thread is destroyed. | |
1855 | #ifdef HAVE_TLS | |
1856 | static __thread TCMalloc_ThreadCache *threadlocal_heap; | |
1857 | #endif | |
1858 | // Thread-specific key. Initialization here is somewhat tricky | |
1859 | // because some Linux startup code invokes malloc() before it | |
1860 | // is in a good enough state to handle pthread_keycreate(). | |
1861 | // Therefore, we use TSD keys only after tsd_inited is set to true. | |
1862 | // Until then, we use a slow path to get the heap object. | |
1863 | static bool tsd_inited = false; | |
1864 | static pthread_key_t heap_key; | |
1865 | #if COMPILER(MSVC) | |
1866 | DWORD tlsIndex = TLS_OUT_OF_INDEXES; | |
1867 | #endif | |
1868 | ||
1869 | static ALWAYS_INLINE void setThreadHeap(TCMalloc_ThreadCache* heap) | |
1870 | { | |
1871 | // still do pthread_setspecific when using MSVC fast TLS to | |
1872 | // benefit from the delete callback. | |
1873 | pthread_setspecific(heap_key, heap); | |
1874 | #if COMPILER(MSVC) | |
1875 | TlsSetValue(tlsIndex, heap); | |
1876 | #endif | |
1877 | } | |
1878 | ||
1879 | // Allocator for thread heaps | |
1880 | static PageHeapAllocator<TCMalloc_ThreadCache> threadheap_allocator; | |
1881 | ||
1882 | // Linked list of heap objects. Protected by pageheap_lock. | |
1883 | static TCMalloc_ThreadCache* thread_heaps = NULL; | |
1884 | static int thread_heap_count = 0; | |
1885 | ||
1886 | // Overall thread cache size. Protected by pageheap_lock. | |
1887 | static size_t overall_thread_cache_size = kDefaultOverallThreadCacheSize; | |
1888 | ||
1889 | // Global per-thread cache size. Writes are protected by | |
1890 | // pageheap_lock. Reads are done without any locking, which should be | |
1891 | // fine as long as size_t can be written atomically and we don't place | |
1892 | // invariants between this variable and other pieces of state. | |
1893 | static volatile size_t per_thread_cache_size = kMaxThreadCacheSize; | |
1894 | ||
1895 | //------------------------------------------------------------------- | |
1896 | // Central cache implementation | |
1897 | //------------------------------------------------------------------- | |
1898 | ||
1899 | void TCMalloc_Central_FreeList::Init(size_t cl) { | |
1900 | lock_.Init(); | |
1901 | size_class_ = cl; | |
1902 | DLL_Init(&empty_); | |
1903 | DLL_Init(&nonempty_); | |
1904 | counter_ = 0; | |
1905 | ||
1906 | cache_size_ = 1; | |
1907 | used_slots_ = 0; | |
1908 | ASSERT(cache_size_ <= kNumTransferEntries); | |
1909 | } | |
1910 | ||
1911 | void TCMalloc_Central_FreeList::ReleaseListToSpans(void* start) { | |
1912 | while (start) { | |
1913 | void *next = SLL_Next(start); | |
1914 | ReleaseToSpans(start); | |
1915 | start = next; | |
1916 | } | |
1917 | } | |
1918 | ||
1919 | ALWAYS_INLINE void TCMalloc_Central_FreeList::ReleaseToSpans(void* object) { | |
1920 | const PageID p = reinterpret_cast<uintptr_t>(object) >> kPageShift; | |
1921 | Span* span = pageheap->GetDescriptor(p); | |
1922 | ASSERT(span != NULL); | |
1923 | ASSERT(span->refcount > 0); | |
1924 | ||
1925 | // If span is empty, move it to non-empty list | |
1926 | if (span->objects == NULL) { | |
1927 | DLL_Remove(span); | |
1928 | DLL_Prepend(&nonempty_, span); | |
1929 | Event(span, 'N', 0); | |
1930 | } | |
1931 | ||
1932 | // The following check is expensive, so it is disabled by default | |
1933 | if (false) { | |
1934 | // Check that object does not occur in list | |
1935 | int got = 0; | |
1936 | for (void* p = span->objects; p != NULL; p = *((void**) p)) { | |
1937 | ASSERT(p != object); | |
1938 | got++; | |
1939 | } | |
1940 | ASSERT(got + span->refcount == | |
1941 | (span->length<<kPageShift)/ByteSizeForClass(span->sizeclass)); | |
1942 | } | |
1943 | ||
1944 | counter_++; | |
1945 | span->refcount--; | |
1946 | if (span->refcount == 0) { | |
1947 | Event(span, '#', 0); | |
1948 | counter_ -= (span->length<<kPageShift) / ByteSizeForClass(span->sizeclass); | |
1949 | DLL_Remove(span); | |
1950 | ||
1951 | // Release central list lock while operating on pageheap | |
1952 | lock_.Unlock(); | |
1953 | { | |
1954 | SpinLockHolder h(&pageheap_lock); | |
1955 | pageheap->Delete(span); | |
1956 | } | |
1957 | lock_.Lock(); | |
1958 | } else { | |
1959 | *(reinterpret_cast<void**>(object)) = span->objects; | |
1960 | span->objects = object; | |
1961 | } | |
1962 | } | |
1963 | ||
1964 | ALWAYS_INLINE bool TCMalloc_Central_FreeList::EvictRandomSizeClass( | |
1965 | size_t locked_size_class, bool force) { | |
1966 | static int race_counter = 0; | |
1967 | int t = race_counter++; // Updated without a lock, but who cares. | |
1968 | if (t >= static_cast<int>(kNumClasses)) { | |
1969 | while (t >= static_cast<int>(kNumClasses)) { | |
1970 | t -= kNumClasses; | |
1971 | } | |
1972 | race_counter = t; | |
1973 | } | |
1974 | ASSERT(t >= 0); | |
1975 | ASSERT(t < static_cast<int>(kNumClasses)); | |
1976 | if (t == static_cast<int>(locked_size_class)) return false; | |
1977 | return central_cache[t].ShrinkCache(static_cast<int>(locked_size_class), force); | |
1978 | } | |
1979 | ||
1980 | bool TCMalloc_Central_FreeList::MakeCacheSpace() { | |
1981 | // Is there room in the cache? | |
1982 | if (used_slots_ < cache_size_) return true; | |
1983 | // Check if we can expand this cache? | |
1984 | if (cache_size_ == kNumTransferEntries) return false; | |
1985 | // Ok, we'll try to grab an entry from some other size class. | |
1986 | if (EvictRandomSizeClass(size_class_, false) || | |
1987 | EvictRandomSizeClass(size_class_, true)) { | |
1988 | // Succeeded in evicting, we're going to make our cache larger. | |
1989 | cache_size_++; | |
1990 | return true; | |
1991 | } | |
1992 | return false; | |
1993 | } | |
1994 | ||
1995 | ||
1996 | namespace { | |
1997 | class LockInverter { | |
1998 | private: | |
1999 | SpinLock *held_, *temp_; | |
2000 | public: | |
2001 | inline explicit LockInverter(SpinLock* held, SpinLock *temp) | |
2002 | : held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); } | |
2003 | inline ~LockInverter() { temp_->Unlock(); held_->Lock(); } | |
2004 | }; | |
2005 | } | |
2006 | ||
2007 | bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class, bool force) { | |
2008 | // Start with a quick check without taking a lock. | |
2009 | if (cache_size_ == 0) return false; | |
2010 | // We don't evict from a full cache unless we are 'forcing'. | |
2011 | if (force == false && used_slots_ == cache_size_) return false; | |
2012 | ||
2013 | // Grab lock, but first release the other lock held by this thread. We use | |
2014 | // the lock inverter to ensure that we never hold two size class locks | |
2015 | // concurrently. That can create a deadlock because there is no well | |
2016 | // defined nesting order. | |
2017 | LockInverter li(¢ral_cache[locked_size_class].lock_, &lock_); | |
2018 | ASSERT(used_slots_ <= cache_size_); | |
2019 | ASSERT(0 <= cache_size_); | |
2020 | if (cache_size_ == 0) return false; | |
2021 | if (used_slots_ == cache_size_) { | |
2022 | if (force == false) return false; | |
2023 | // ReleaseListToSpans releases the lock, so we have to make all the | |
2024 | // updates to the central list before calling it. | |
2025 | cache_size_--; | |
2026 | used_slots_--; | |
2027 | ReleaseListToSpans(tc_slots_[used_slots_].head); | |
2028 | return true; | |
2029 | } | |
2030 | cache_size_--; | |
2031 | return true; | |
2032 | } | |
2033 | ||
2034 | void TCMalloc_Central_FreeList::InsertRange(void *start, void *end, int N) { | |
2035 | SpinLockHolder h(&lock_); | |
2036 | if (N == num_objects_to_move[size_class_] && | |
2037 | MakeCacheSpace()) { | |
2038 | int slot = used_slots_++; | |
2039 | ASSERT(slot >=0); | |
2040 | ASSERT(slot < kNumTransferEntries); | |
2041 | TCEntry *entry = &tc_slots_[slot]; | |
2042 | entry->head = start; | |
2043 | entry->tail = end; | |
2044 | return; | |
2045 | } | |
2046 | ReleaseListToSpans(start); | |
2047 | } | |
2048 | ||
2049 | void TCMalloc_Central_FreeList::RemoveRange(void **start, void **end, int *N) { | |
2050 | int num = *N; | |
2051 | ASSERT(num > 0); | |
2052 | ||
2053 | SpinLockHolder h(&lock_); | |
2054 | if (num == num_objects_to_move[size_class_] && used_slots_ > 0) { | |
2055 | int slot = --used_slots_; | |
2056 | ASSERT(slot >= 0); | |
2057 | TCEntry *entry = &tc_slots_[slot]; | |
2058 | *start = entry->head; | |
2059 | *end = entry->tail; | |
2060 | return; | |
2061 | } | |
2062 | ||
2063 | // TODO: Prefetch multiple TCEntries? | |
2064 | void *tail = FetchFromSpansSafe(); | |
2065 | if (!tail) { | |
2066 | // We are completely out of memory. | |
2067 | *start = *end = NULL; | |
2068 | *N = 0; | |
2069 | return; | |
2070 | } | |
2071 | ||
2072 | SLL_SetNext(tail, NULL); | |
2073 | void *head = tail; | |
2074 | int count = 1; | |
2075 | while (count < num) { | |
2076 | void *t = FetchFromSpans(); | |
2077 | if (!t) break; | |
2078 | SLL_Push(&head, t); | |
2079 | count++; | |
2080 | } | |
2081 | *start = head; | |
2082 | *end = tail; | |
2083 | *N = count; | |
2084 | } | |
2085 | ||
2086 | ||
2087 | void* TCMalloc_Central_FreeList::FetchFromSpansSafe() { | |
2088 | void *t = FetchFromSpans(); | |
2089 | if (!t) { | |
2090 | Populate(); | |
2091 | t = FetchFromSpans(); | |
2092 | } | |
2093 | return t; | |
2094 | } | |
2095 | ||
2096 | void* TCMalloc_Central_FreeList::FetchFromSpans() { | |
2097 | if (DLL_IsEmpty(&nonempty_)) return NULL; | |
2098 | Span* span = nonempty_.next; | |
2099 | ||
2100 | ASSERT(span->objects != NULL); | |
2101 | span->refcount++; | |
2102 | void* result = span->objects; | |
2103 | span->objects = *(reinterpret_cast<void**>(result)); | |
2104 | if (span->objects == NULL) { | |
2105 | // Move to empty list | |
2106 | DLL_Remove(span); | |
2107 | DLL_Prepend(&empty_, span); | |
2108 | Event(span, 'E', 0); | |
2109 | } | |
2110 | counter_--; | |
2111 | return result; | |
2112 | } | |
2113 | ||
2114 | // Fetch memory from the system and add to the central cache freelist. | |
2115 | ALWAYS_INLINE void TCMalloc_Central_FreeList::Populate() { | |
2116 | // Release central list lock while operating on pageheap | |
2117 | lock_.Unlock(); | |
2118 | const size_t npages = class_to_pages[size_class_]; | |
2119 | ||
2120 | Span* span; | |
2121 | { | |
2122 | SpinLockHolder h(&pageheap_lock); | |
2123 | span = pageheap->New(npages); | |
2124 | if (span) pageheap->RegisterSizeClass(span, size_class_); | |
2125 | } | |
2126 | if (span == NULL) { | |
2127 | MESSAGE("allocation failed: %d\n", errno); | |
2128 | lock_.Lock(); | |
2129 | return; | |
2130 | } | |
2131 | ASSERT(span->length == npages); | |
2132 | // Cache sizeclass info eagerly. Locking is not necessary. | |
2133 | // (Instead of being eager, we could just replace any stale info | |
2134 | // about this span, but that seems to be no better in practice.) | |
2135 | for (size_t i = 0; i < npages; i++) { | |
2136 | pageheap->CacheSizeClass(span->start + i, size_class_); | |
2137 | } | |
2138 | ||
2139 | // Split the block into pieces and add to the free-list | |
2140 | // TODO: coloring of objects to avoid cache conflicts? | |
2141 | void** tail = &span->objects; | |
2142 | char* ptr = reinterpret_cast<char*>(span->start << kPageShift); | |
2143 | char* limit = ptr + (npages << kPageShift); | |
2144 | const size_t size = ByteSizeForClass(size_class_); | |
2145 | int num = 0; | |
2146 | char* nptr; | |
2147 | while ((nptr = ptr + size) <= limit) { | |
2148 | *tail = ptr; | |
2149 | tail = reinterpret_cast<void**>(ptr); | |
2150 | ptr = nptr; | |
2151 | num++; | |
2152 | } | |
2153 | ASSERT(ptr <= limit); | |
2154 | *tail = NULL; | |
2155 | span->refcount = 0; // No sub-object in use yet | |
2156 | ||
2157 | // Add span to list of non-empty spans | |
2158 | lock_.Lock(); | |
2159 | DLL_Prepend(&nonempty_, span); | |
2160 | counter_ += num; | |
2161 | } | |
2162 | ||
2163 | //------------------------------------------------------------------- | |
2164 | // TCMalloc_ThreadCache implementation | |
2165 | //------------------------------------------------------------------- | |
2166 | ||
2167 | inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k) { | |
2168 | if (bytes_until_sample_ < k) { | |
2169 | PickNextSample(k); | |
2170 | return true; | |
2171 | } else { | |
2172 | bytes_until_sample_ -= k; | |
2173 | return false; | |
2174 | } | |
2175 | } | |
2176 | ||
2177 | void TCMalloc_ThreadCache::Init(ThreadIdentifier tid) { | |
2178 | size_ = 0; | |
2179 | next_ = NULL; | |
2180 | prev_ = NULL; | |
2181 | tid_ = tid; | |
2182 | in_setspecific_ = false; | |
2183 | for (size_t cl = 0; cl < kNumClasses; ++cl) { | |
2184 | list_[cl].Init(); | |
2185 | } | |
2186 | ||
2187 | // Initialize RNG -- run it for a bit to get to good values | |
2188 | bytes_until_sample_ = 0; | |
2189 | rnd_ = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this)); | |
2190 | for (int i = 0; i < 100; i++) { | |
2191 | PickNextSample(static_cast<size_t>(FLAGS_tcmalloc_sample_parameter * 2)); | |
2192 | } | |
2193 | } | |
2194 | ||
2195 | void TCMalloc_ThreadCache::Cleanup() { | |
2196 | // Put unused memory back into central cache | |
2197 | for (size_t cl = 0; cl < kNumClasses; ++cl) { | |
2198 | if (list_[cl].length() > 0) { | |
2199 | ReleaseToCentralCache(cl, list_[cl].length()); | |
2200 | } | |
2201 | } | |
2202 | } | |
2203 | ||
2204 | ALWAYS_INLINE void* TCMalloc_ThreadCache::Allocate(size_t size) { | |
2205 | ASSERT(size <= kMaxSize); | |
2206 | const size_t cl = SizeClass(size); | |
2207 | FreeList* list = &list_[cl]; | |
2208 | size_t allocationSize = ByteSizeForClass(cl); | |
2209 | if (list->empty()) { | |
2210 | FetchFromCentralCache(cl, allocationSize); | |
2211 | if (list->empty()) return NULL; | |
2212 | } | |
2213 | size_ -= allocationSize; | |
2214 | return list->Pop(); | |
2215 | } | |
2216 | ||
2217 | inline void TCMalloc_ThreadCache::Deallocate(void* ptr, size_t cl) { | |
2218 | size_ += ByteSizeForClass(cl); | |
2219 | FreeList* list = &list_[cl]; | |
2220 | list->Push(ptr); | |
2221 | // If enough data is free, put back into central cache | |
2222 | if (list->length() > kMaxFreeListLength) { | |
2223 | ReleaseToCentralCache(cl, num_objects_to_move[cl]); | |
2224 | } | |
2225 | if (size_ >= per_thread_cache_size) Scavenge(); | |
2226 | } | |
2227 | ||
2228 | // Remove some objects of class "cl" from central cache and add to thread heap | |
2229 | ALWAYS_INLINE void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl, size_t allocationSize) { | |
2230 | int fetch_count = num_objects_to_move[cl]; | |
2231 | void *start, *end; | |
2232 | central_cache[cl].RemoveRange(&start, &end, &fetch_count); | |
2233 | list_[cl].PushRange(fetch_count, start, end); | |
2234 | size_ += allocationSize * fetch_count; | |
2235 | } | |
2236 | ||
2237 | // Remove some objects of class "cl" from thread heap and add to central cache | |
2238 | inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl, int N) { | |
2239 | ASSERT(N > 0); | |
2240 | FreeList* src = &list_[cl]; | |
2241 | if (N > src->length()) N = src->length(); | |
2242 | size_ -= N*ByteSizeForClass(cl); | |
2243 | ||
2244 | // We return prepackaged chains of the correct size to the central cache. | |
2245 | // TODO: Use the same format internally in the thread caches? | |
2246 | int batch_size = num_objects_to_move[cl]; | |
2247 | while (N > batch_size) { | |
2248 | void *tail, *head; | |
2249 | src->PopRange(batch_size, &head, &tail); | |
2250 | central_cache[cl].InsertRange(head, tail, batch_size); | |
2251 | N -= batch_size; | |
2252 | } | |
2253 | void *tail, *head; | |
2254 | src->PopRange(N, &head, &tail); | |
2255 | central_cache[cl].InsertRange(head, tail, N); | |
2256 | } | |
2257 | ||
2258 | // Release idle memory to the central cache | |
2259 | inline void TCMalloc_ThreadCache::Scavenge() { | |
2260 | // If the low-water mark for the free list is L, it means we would | |
2261 | // not have had to allocate anything from the central cache even if | |
2262 | // we had reduced the free list size by L. We aim to get closer to | |
2263 | // that situation by dropping L/2 nodes from the free list. This | |
2264 | // may not release much memory, but if so we will call scavenge again | |
2265 | // pretty soon and the low-water marks will be high on that call. | |
2266 | //int64 start = CycleClock::Now(); | |
2267 | ||
2268 | for (size_t cl = 0; cl < kNumClasses; cl++) { | |
2269 | FreeList* list = &list_[cl]; | |
2270 | const int lowmark = list->lowwatermark(); | |
2271 | if (lowmark > 0) { | |
2272 | const int drop = (lowmark > 1) ? lowmark/2 : 1; | |
2273 | ReleaseToCentralCache(cl, drop); | |
2274 | } | |
2275 | list->clear_lowwatermark(); | |
2276 | } | |
2277 | ||
2278 | //int64 finish = CycleClock::Now(); | |
2279 | //CycleTimer ct; | |
2280 | //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0); | |
2281 | } | |
2282 | ||
2283 | void TCMalloc_ThreadCache::PickNextSample(size_t k) { | |
2284 | // Make next "random" number | |
2285 | // x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers | |
2286 | static const uint32_t kPoly = (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0); | |
2287 | uint32_t r = rnd_; | |
2288 | rnd_ = (r << 1) ^ ((static_cast<int32_t>(r) >> 31) & kPoly); | |
2289 | ||
2290 | // Next point is "rnd_ % (sample_period)". I.e., average | |
2291 | // increment is "sample_period/2". | |
2292 | const int flag_value = static_cast<int>(FLAGS_tcmalloc_sample_parameter); | |
2293 | static int last_flag_value = -1; | |
2294 | ||
2295 | if (flag_value != last_flag_value) { | |
2296 | SpinLockHolder h(&sample_period_lock); | |
2297 | int i; | |
2298 | for (i = 0; i < (static_cast<int>(sizeof(primes_list)/sizeof(primes_list[0])) - 1); i++) { | |
2299 | if (primes_list[i] >= flag_value) { | |
2300 | break; | |
2301 | } | |
2302 | } | |
2303 | sample_period = primes_list[i]; | |
2304 | last_flag_value = flag_value; | |
2305 | } | |
2306 | ||
2307 | bytes_until_sample_ += rnd_ % sample_period; | |
2308 | ||
2309 | if (k > (static_cast<size_t>(-1) >> 2)) { | |
2310 | // If the user has asked for a huge allocation then it is possible | |
2311 | // for the code below to loop infinitely. Just return (note that | |
2312 | // this throws off the sampling accuracy somewhat, but a user who | |
2313 | // is allocating more than 1G of memory at a time can live with a | |
2314 | // minor inaccuracy in profiling of small allocations, and also | |
2315 | // would rather not wait for the loop below to terminate). | |
2316 | return; | |
2317 | } | |
2318 | ||
2319 | while (bytes_until_sample_ < k) { | |
2320 | // Increase bytes_until_sample_ by enough average sampling periods | |
2321 | // (sample_period >> 1) to allow us to sample past the current | |
2322 | // allocation. | |
2323 | bytes_until_sample_ += (sample_period >> 1); | |
2324 | } | |
2325 | ||
2326 | bytes_until_sample_ -= k; | |
2327 | } | |
2328 | ||
2329 | void TCMalloc_ThreadCache::InitModule() { | |
2330 | // There is a slight potential race here because of double-checked | |
2331 | // locking idiom. However, as long as the program does a small | |
2332 | // allocation before switching to multi-threaded mode, we will be | |
2333 | // fine. We increase the chances of doing such a small allocation | |
2334 | // by doing one in the constructor of the module_enter_exit_hook | |
2335 | // object declared below. | |
2336 | SpinLockHolder h(&pageheap_lock); | |
2337 | if (!phinited) { | |
2338 | #ifdef WTF_CHANGES | |
2339 | InitTSD(); | |
2340 | #endif | |
2341 | InitSizeClasses(); | |
2342 | threadheap_allocator.Init(); | |
2343 | span_allocator.Init(); | |
2344 | span_allocator.New(); // Reduce cache conflicts | |
2345 | span_allocator.New(); // Reduce cache conflicts | |
2346 | stacktrace_allocator.Init(); | |
2347 | DLL_Init(&sampled_objects); | |
2348 | for (size_t i = 0; i < kNumClasses; ++i) { | |
2349 | central_cache[i].Init(i); | |
2350 | } | |
2351 | pageheap->init(); | |
2352 | phinited = 1; | |
2353 | #if defined(WTF_CHANGES) && PLATFORM(DARWIN) | |
2354 | FastMallocZone::init(); | |
2355 | #endif | |
2356 | } | |
2357 | } | |
2358 | ||
2359 | inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid) { | |
2360 | // Create the heap and add it to the linked list | |
2361 | TCMalloc_ThreadCache *heap = threadheap_allocator.New(); | |
2362 | heap->Init(tid); | |
2363 | heap->next_ = thread_heaps; | |
2364 | heap->prev_ = NULL; | |
2365 | if (thread_heaps != NULL) thread_heaps->prev_ = heap; | |
2366 | thread_heaps = heap; | |
2367 | thread_heap_count++; | |
2368 | RecomputeThreadCacheSize(); | |
2369 | return heap; | |
2370 | } | |
2371 | ||
2372 | inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetThreadHeap() { | |
2373 | #ifdef HAVE_TLS | |
2374 | // __thread is faster, but only when the kernel supports it | |
2375 | if (KernelSupportsTLS()) | |
2376 | return threadlocal_heap; | |
2377 | #elif COMPILER(MSVC) | |
2378 | return static_cast<TCMalloc_ThreadCache*>(TlsGetValue(tlsIndex)); | |
2379 | #else | |
2380 | return static_cast<TCMalloc_ThreadCache*>(pthread_getspecific(heap_key)); | |
2381 | #endif | |
2382 | } | |
2383 | ||
2384 | inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCache() { | |
2385 | TCMalloc_ThreadCache* ptr = NULL; | |
2386 | if (!tsd_inited) { | |
2387 | InitModule(); | |
2388 | } else { | |
2389 | ptr = GetThreadHeap(); | |
2390 | } | |
2391 | if (ptr == NULL) ptr = CreateCacheIfNecessary(); | |
2392 | return ptr; | |
2393 | } | |
2394 | ||
2395 | // In deletion paths, we do not try to create a thread-cache. This is | |
2396 | // because we may be in the thread destruction code and may have | |
2397 | // already cleaned up the cache for this thread. | |
2398 | inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCacheIfPresent() { | |
2399 | if (!tsd_inited) return NULL; | |
2400 | void* const p = GetThreadHeap(); | |
2401 | return reinterpret_cast<TCMalloc_ThreadCache*>(p); | |
2402 | } | |
2403 | ||
2404 | void TCMalloc_ThreadCache::InitTSD() { | |
2405 | ASSERT(!tsd_inited); | |
2406 | pthread_key_create(&heap_key, DestroyThreadCache); | |
2407 | #if COMPILER(MSVC) | |
2408 | tlsIndex = TlsAlloc(); | |
2409 | #endif | |
2410 | tsd_inited = true; | |
2411 | ||
2412 | #if !COMPILER(MSVC) | |
2413 | // We may have used a fake pthread_t for the main thread. Fix it. | |
2414 | pthread_t zero; | |
2415 | memset(&zero, 0, sizeof(zero)); | |
2416 | #endif | |
2417 | #ifndef WTF_CHANGES | |
2418 | SpinLockHolder h(&pageheap_lock); | |
2419 | #else | |
2420 | ASSERT(pageheap_lock.IsHeld()); | |
2421 | #endif | |
2422 | for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) { | |
2423 | #if COMPILER(MSVC) | |
2424 | if (h->tid_ == 0) { | |
2425 | h->tid_ = GetCurrentThreadId(); | |
2426 | } | |
2427 | #else | |
2428 | if (pthread_equal(h->tid_, zero)) { | |
2429 | h->tid_ = pthread_self(); | |
2430 | } | |
2431 | #endif | |
2432 | } | |
2433 | } | |
2434 | ||
2435 | TCMalloc_ThreadCache* TCMalloc_ThreadCache::CreateCacheIfNecessary() { | |
2436 | // Initialize per-thread data if necessary | |
2437 | TCMalloc_ThreadCache* heap = NULL; | |
2438 | { | |
2439 | SpinLockHolder h(&pageheap_lock); | |
2440 | ||
2441 | #if COMPILER(MSVC) | |
2442 | DWORD me; | |
2443 | if (!tsd_inited) { | |
2444 | me = 0; | |
2445 | } else { | |
2446 | me = GetCurrentThreadId(); | |
2447 | } | |
2448 | #else | |
2449 | // Early on in glibc's life, we cannot even call pthread_self() | |
2450 | pthread_t me; | |
2451 | if (!tsd_inited) { | |
2452 | memset(&me, 0, sizeof(me)); | |
2453 | } else { | |
2454 | me = pthread_self(); | |
2455 | } | |
2456 | #endif | |
2457 | ||
2458 | // This may be a recursive malloc call from pthread_setspecific() | |
2459 | // In that case, the heap for this thread has already been created | |
2460 | // and added to the linked list. So we search for that first. | |
2461 | for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) { | |
2462 | #if COMPILER(MSVC) | |
2463 | if (h->tid_ == me) { | |
2464 | #else | |
2465 | if (pthread_equal(h->tid_, me)) { | |
2466 | #endif | |
2467 | heap = h; | |
2468 | break; | |
2469 | } | |
2470 | } | |
2471 | ||
2472 | if (heap == NULL) heap = NewHeap(me); | |
2473 | } | |
2474 | ||
2475 | // We call pthread_setspecific() outside the lock because it may | |
2476 | // call malloc() recursively. The recursive call will never get | |
2477 | // here again because it will find the already allocated heap in the | |
2478 | // linked list of heaps. | |
2479 | if (!heap->in_setspecific_ && tsd_inited) { | |
2480 | heap->in_setspecific_ = true; | |
2481 | setThreadHeap(heap); | |
2482 | } | |
2483 | return heap; | |
2484 | } | |
2485 | ||
2486 | void TCMalloc_ThreadCache::BecomeIdle() { | |
2487 | if (!tsd_inited) return; // No caches yet | |
2488 | TCMalloc_ThreadCache* heap = GetThreadHeap(); | |
2489 | if (heap == NULL) return; // No thread cache to remove | |
2490 | if (heap->in_setspecific_) return; // Do not disturb the active caller | |
2491 | ||
2492 | heap->in_setspecific_ = true; | |
2493 | pthread_setspecific(heap_key, NULL); | |
2494 | #ifdef HAVE_TLS | |
2495 | // Also update the copy in __thread | |
2496 | threadlocal_heap = NULL; | |
2497 | #endif | |
2498 | heap->in_setspecific_ = false; | |
2499 | if (GetThreadHeap() == heap) { | |
2500 | // Somehow heap got reinstated by a recursive call to malloc | |
2501 | // from pthread_setspecific. We give up in this case. | |
2502 | return; | |
2503 | } | |
2504 | ||
2505 | // We can now get rid of the heap | |
2506 | DeleteCache(heap); | |
2507 | } | |
2508 | ||
2509 | void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr) { | |
2510 | // Note that "ptr" cannot be NULL since pthread promises not | |
2511 | // to invoke the destructor on NULL values, but for safety, | |
2512 | // we check anyway. | |
2513 | if (ptr == NULL) return; | |
2514 | #ifdef HAVE_TLS | |
2515 | // Prevent fast path of GetThreadHeap() from returning heap. | |
2516 | threadlocal_heap = NULL; | |
2517 | #endif | |
2518 | DeleteCache(reinterpret_cast<TCMalloc_ThreadCache*>(ptr)); | |
2519 | } | |
2520 | ||
2521 | void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache* heap) { | |
2522 | // Remove all memory from heap | |
2523 | heap->Cleanup(); | |
2524 | ||
2525 | // Remove from linked list | |
2526 | SpinLockHolder h(&pageheap_lock); | |
2527 | if (heap->next_ != NULL) heap->next_->prev_ = heap->prev_; | |
2528 | if (heap->prev_ != NULL) heap->prev_->next_ = heap->next_; | |
2529 | if (thread_heaps == heap) thread_heaps = heap->next_; | |
2530 | thread_heap_count--; | |
2531 | RecomputeThreadCacheSize(); | |
2532 | ||
2533 | threadheap_allocator.Delete(heap); | |
2534 | } | |
2535 | ||
2536 | void TCMalloc_ThreadCache::RecomputeThreadCacheSize() { | |
2537 | // Divide available space across threads | |
2538 | int n = thread_heap_count > 0 ? thread_heap_count : 1; | |
2539 | size_t space = overall_thread_cache_size / n; | |
2540 | ||
2541 | // Limit to allowed range | |
2542 | if (space < kMinThreadCacheSize) space = kMinThreadCacheSize; | |
2543 | if (space > kMaxThreadCacheSize) space = kMaxThreadCacheSize; | |
2544 | ||
2545 | per_thread_cache_size = space; | |
2546 | } | |
2547 | ||
2548 | void TCMalloc_ThreadCache::Print() const { | |
2549 | for (size_t cl = 0; cl < kNumClasses; ++cl) { | |
2550 | MESSAGE(" %5" PRIuS " : %4d len; %4d lo\n", | |
2551 | ByteSizeForClass(cl), | |
2552 | list_[cl].length(), | |
2553 | list_[cl].lowwatermark()); | |
2554 | } | |
2555 | } | |
2556 | ||
2557 | // Extract interesting stats | |
2558 | struct TCMallocStats { | |
2559 | uint64_t system_bytes; // Bytes alloced from system | |
2560 | uint64_t thread_bytes; // Bytes in thread caches | |
2561 | uint64_t central_bytes; // Bytes in central cache | |
2562 | uint64_t transfer_bytes; // Bytes in central transfer cache | |
2563 | uint64_t pageheap_bytes; // Bytes in page heap | |
2564 | uint64_t metadata_bytes; // Bytes alloced for metadata | |
2565 | }; | |
2566 | ||
2567 | #ifndef WTF_CHANGES | |
2568 | // Get stats into "r". Also get per-size-class counts if class_count != NULL | |
2569 | static void ExtractStats(TCMallocStats* r, uint64_t* class_count) { | |
2570 | r->central_bytes = 0; | |
2571 | r->transfer_bytes = 0; | |
2572 | for (int cl = 0; cl < kNumClasses; ++cl) { | |
2573 | const int length = central_cache[cl].length(); | |
2574 | const int tc_length = central_cache[cl].tc_length(); | |
2575 | r->central_bytes += static_cast<uint64_t>(ByteSizeForClass(cl)) * length; | |
2576 | r->transfer_bytes += | |
2577 | static_cast<uint64_t>(ByteSizeForClass(cl)) * tc_length; | |
2578 | if (class_count) class_count[cl] = length + tc_length; | |
2579 | } | |
2580 | ||
2581 | // Add stats from per-thread heaps | |
2582 | r->thread_bytes = 0; | |
2583 | { // scope | |
2584 | SpinLockHolder h(&pageheap_lock); | |
2585 | for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) { | |
2586 | r->thread_bytes += h->Size(); | |
2587 | if (class_count) { | |
2588 | for (size_t cl = 0; cl < kNumClasses; ++cl) { | |
2589 | class_count[cl] += h->freelist_length(cl); | |
2590 | } | |
2591 | } | |
2592 | } | |
2593 | } | |
2594 | ||
2595 | { //scope | |
2596 | SpinLockHolder h(&pageheap_lock); | |
2597 | r->system_bytes = pageheap->SystemBytes(); | |
2598 | r->metadata_bytes = metadata_system_bytes; | |
2599 | r->pageheap_bytes = pageheap->FreeBytes(); | |
2600 | } | |
2601 | } | |
2602 | #endif | |
2603 | ||
2604 | #ifndef WTF_CHANGES | |
2605 | // WRITE stats to "out" | |
2606 | static void DumpStats(TCMalloc_Printer* out, int level) { | |
2607 | TCMallocStats stats; | |
2608 | uint64_t class_count[kNumClasses]; | |
2609 | ExtractStats(&stats, (level >= 2 ? class_count : NULL)); | |
2610 | ||
2611 | if (level >= 2) { | |
2612 | out->printf("------------------------------------------------\n"); | |
2613 | uint64_t cumulative = 0; | |
2614 | for (int cl = 0; cl < kNumClasses; ++cl) { | |
2615 | if (class_count[cl] > 0) { | |
2616 | uint64_t class_bytes = class_count[cl] * ByteSizeForClass(cl); | |
2617 | cumulative += class_bytes; | |
2618 | out->printf("class %3d [ %8" PRIuS " bytes ] : " | |
2619 | "%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n", | |
2620 | cl, ByteSizeForClass(cl), | |
2621 | class_count[cl], | |
2622 | class_bytes / 1048576.0, | |
2623 | cumulative / 1048576.0); | |
2624 | } | |
2625 | } | |
2626 | ||
2627 | SpinLockHolder h(&pageheap_lock); | |
2628 | pageheap->Dump(out); | |
2629 | } | |
2630 | ||
2631 | const uint64_t bytes_in_use = stats.system_bytes | |
2632 | - stats.pageheap_bytes | |
2633 | - stats.central_bytes | |
2634 | - stats.transfer_bytes | |
2635 | - stats.thread_bytes; | |
2636 | ||
2637 | out->printf("------------------------------------------------\n" | |
2638 | "MALLOC: %12" PRIu64 " Heap size\n" | |
2639 | "MALLOC: %12" PRIu64 " Bytes in use by application\n" | |
2640 | "MALLOC: %12" PRIu64 " Bytes free in page heap\n" | |
2641 | "MALLOC: %12" PRIu64 " Bytes free in central cache\n" | |
2642 | "MALLOC: %12" PRIu64 " Bytes free in transfer cache\n" | |
2643 | "MALLOC: %12" PRIu64 " Bytes free in thread caches\n" | |
2644 | "MALLOC: %12" PRIu64 " Spans in use\n" | |
2645 | "MALLOC: %12" PRIu64 " Thread heaps in use\n" | |
2646 | "MALLOC: %12" PRIu64 " Metadata allocated\n" | |
2647 | "------------------------------------------------\n", | |
2648 | stats.system_bytes, | |
2649 | bytes_in_use, | |
2650 | stats.pageheap_bytes, | |
2651 | stats.central_bytes, | |
2652 | stats.transfer_bytes, | |
2653 | stats.thread_bytes, | |
2654 | uint64_t(span_allocator.inuse()), | |
2655 | uint64_t(threadheap_allocator.inuse()), | |
2656 | stats.metadata_bytes); | |
2657 | } | |
2658 | ||
2659 | static void PrintStats(int level) { | |
2660 | const int kBufferSize = 16 << 10; | |
2661 | char* buffer = new char[kBufferSize]; | |
2662 | TCMalloc_Printer printer(buffer, kBufferSize); | |
2663 | DumpStats(&printer, level); | |
2664 | write(STDERR_FILENO, buffer, strlen(buffer)); | |
2665 | delete[] buffer; | |
2666 | } | |
2667 | ||
2668 | static void** DumpStackTraces() { | |
2669 | // Count how much space we need | |
2670 | int needed_slots = 0; | |
2671 | { | |
2672 | SpinLockHolder h(&pageheap_lock); | |
2673 | for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) { | |
2674 | StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects); | |
2675 | needed_slots += 3 + stack->depth; | |
2676 | } | |
2677 | needed_slots += 100; // Slop in case sample grows | |
2678 | needed_slots += needed_slots/8; // An extra 12.5% slop | |
2679 | } | |
2680 | ||
2681 | void** result = new void*[needed_slots]; | |
2682 | if (result == NULL) { | |
2683 | MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n", | |
2684 | needed_slots); | |
2685 | return NULL; | |
2686 | } | |
2687 | ||
2688 | SpinLockHolder h(&pageheap_lock); | |
2689 | int used_slots = 0; | |
2690 | for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) { | |
2691 | ASSERT(used_slots < needed_slots); // Need to leave room for terminator | |
2692 | StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects); | |
2693 | if (used_slots + 3 + stack->depth >= needed_slots) { | |
2694 | // No more room | |
2695 | break; | |
2696 | } | |
2697 | ||
2698 | result[used_slots+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1)); | |
2699 | result[used_slots+1] = reinterpret_cast<void*>(stack->size); | |
2700 | result[used_slots+2] = reinterpret_cast<void*>(stack->depth); | |
2701 | for (int d = 0; d < stack->depth; d++) { | |
2702 | result[used_slots+3+d] = stack->stack[d]; | |
2703 | } | |
2704 | used_slots += 3 + stack->depth; | |
2705 | } | |
2706 | result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0)); | |
2707 | return result; | |
2708 | } | |
2709 | #endif | |
2710 | ||
2711 | #ifndef WTF_CHANGES | |
2712 | ||
2713 | // TCMalloc's support for extra malloc interfaces | |
2714 | class TCMallocImplementation : public MallocExtension { | |
2715 | public: | |
2716 | virtual void GetStats(char* buffer, int buffer_length) { | |
2717 | ASSERT(buffer_length > 0); | |
2718 | TCMalloc_Printer printer(buffer, buffer_length); | |
2719 | ||
2720 | // Print level one stats unless lots of space is available | |
2721 | if (buffer_length < 10000) { | |
2722 | DumpStats(&printer, 1); | |
2723 | } else { | |
2724 | DumpStats(&printer, 2); | |
2725 | } | |
2726 | } | |
2727 | ||
2728 | virtual void** ReadStackTraces() { | |
2729 | return DumpStackTraces(); | |
2730 | } | |
2731 | ||
2732 | virtual bool GetNumericProperty(const char* name, size_t* value) { | |
2733 | ASSERT(name != NULL); | |
2734 | ||
2735 | if (strcmp(name, "generic.current_allocated_bytes") == 0) { | |
2736 | TCMallocStats stats; | |
2737 | ExtractStats(&stats, NULL); | |
2738 | *value = stats.system_bytes | |
2739 | - stats.thread_bytes | |
2740 | - stats.central_bytes | |
2741 | - stats.pageheap_bytes; | |
2742 | return true; | |
2743 | } | |
2744 | ||
2745 | if (strcmp(name, "generic.heap_size") == 0) { | |
2746 | TCMallocStats stats; | |
2747 | ExtractStats(&stats, NULL); | |
2748 | *value = stats.system_bytes; | |
2749 | return true; | |
2750 | } | |
2751 | ||
2752 | if (strcmp(name, "tcmalloc.slack_bytes") == 0) { | |
2753 | // We assume that bytes in the page heap are not fragmented too | |
2754 | // badly, and are therefore available for allocation. | |
2755 | SpinLockHolder l(&pageheap_lock); | |
2756 | *value = pageheap->FreeBytes(); | |
2757 | return true; | |
2758 | } | |
2759 | ||
2760 | if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) { | |
2761 | SpinLockHolder l(&pageheap_lock); | |
2762 | *value = overall_thread_cache_size; | |
2763 | return true; | |
2764 | } | |
2765 | ||
2766 | if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) { | |
2767 | TCMallocStats stats; | |
2768 | ExtractStats(&stats, NULL); | |
2769 | *value = stats.thread_bytes; | |
2770 | return true; | |
2771 | } | |
2772 | ||
2773 | return false; | |
2774 | } | |
2775 | ||
2776 | virtual bool SetNumericProperty(const char* name, size_t value) { | |
2777 | ASSERT(name != NULL); | |
2778 | ||
2779 | if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) { | |
2780 | // Clip the value to a reasonable range | |
2781 | if (value < kMinThreadCacheSize) value = kMinThreadCacheSize; | |
2782 | if (value > (1<<30)) value = (1<<30); // Limit to 1GB | |
2783 | ||
2784 | SpinLockHolder l(&pageheap_lock); | |
2785 | overall_thread_cache_size = static_cast<size_t>(value); | |
2786 | TCMalloc_ThreadCache::RecomputeThreadCacheSize(); | |
2787 | return true; | |
2788 | } | |
2789 | ||
2790 | return false; | |
2791 | } | |
2792 | ||
2793 | virtual void MarkThreadIdle() { | |
2794 | TCMalloc_ThreadCache::BecomeIdle(); | |
2795 | } | |
2796 | ||
2797 | virtual void ReleaseFreeMemory() { | |
2798 | SpinLockHolder h(&pageheap_lock); | |
2799 | pageheap->ReleaseFreePages(); | |
2800 | } | |
2801 | }; | |
2802 | #endif | |
2803 | ||
2804 | // The constructor allocates an object to ensure that initialization | |
2805 | // runs before main(), and therefore we do not have a chance to become | |
2806 | // multi-threaded before initialization. We also create the TSD key | |
2807 | // here. Presumably by the time this constructor runs, glibc is in | |
2808 | // good enough shape to handle pthread_key_create(). | |
2809 | // | |
2810 | // The constructor also takes the opportunity to tell STL to use | |
2811 | // tcmalloc. We want to do this early, before construct time, so | |
2812 | // all user STL allocations go through tcmalloc (which works really | |
2813 | // well for STL). | |
2814 | // | |
2815 | // The destructor prints stats when the program exits. | |
2816 | class TCMallocGuard { | |
2817 | public: | |
2818 | ||
2819 | TCMallocGuard() { | |
2820 | #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS | |
2821 | // Check whether the kernel also supports TLS (needs to happen at runtime) | |
2822 | CheckIfKernelSupportsTLS(); | |
2823 | #endif | |
2824 | #ifndef WTF_CHANGES | |
2825 | #ifdef WIN32 // patch the windows VirtualAlloc, etc. | |
2826 | PatchWindowsFunctions(); // defined in windows/patch_functions.cc | |
2827 | #endif | |
2828 | #endif | |
2829 | free(malloc(1)); | |
2830 | TCMalloc_ThreadCache::InitTSD(); | |
2831 | free(malloc(1)); | |
2832 | #ifndef WTF_CHANGES | |
2833 | MallocExtension::Register(new TCMallocImplementation); | |
2834 | #endif | |
2835 | } | |
2836 | ||
2837 | #ifndef WTF_CHANGES | |
2838 | ~TCMallocGuard() { | |
2839 | const char* env = getenv("MALLOCSTATS"); | |
2840 | if (env != NULL) { | |
2841 | int level = atoi(env); | |
2842 | if (level < 1) level = 1; | |
2843 | PrintStats(level); | |
2844 | } | |
2845 | #ifdef WIN32 | |
2846 | UnpatchWindowsFunctions(); | |
2847 | #endif | |
2848 | } | |
2849 | #endif | |
2850 | }; | |
2851 | ||
2852 | #ifndef WTF_CHANGES | |
2853 | static TCMallocGuard module_enter_exit_hook; | |
2854 | #endif | |
2855 | ||
2856 | ||
2857 | //------------------------------------------------------------------- | |
2858 | // Helpers for the exported routines below | |
2859 | //------------------------------------------------------------------- | |
2860 | ||
2861 | #ifndef WTF_CHANGES | |
2862 | ||
2863 | static Span* DoSampledAllocation(size_t size) { | |
2864 | ||
2865 | // Grab the stack trace outside the heap lock | |
2866 | StackTrace tmp; | |
2867 | tmp.depth = GetStackTrace(tmp.stack, kMaxStackDepth, 1); | |
2868 | tmp.size = size; | |
2869 | ||
2870 | SpinLockHolder h(&pageheap_lock); | |
2871 | // Allocate span | |
2872 | Span *span = pageheap->New(pages(size == 0 ? 1 : size)); | |
2873 | if (span == NULL) { | |
2874 | return NULL; | |
2875 | } | |
2876 | ||
2877 | // Allocate stack trace | |
2878 | StackTrace *stack = stacktrace_allocator.New(); | |
2879 | if (stack == NULL) { | |
2880 | // Sampling failed because of lack of memory | |
2881 | return span; | |
2882 | } | |
2883 | ||
2884 | *stack = tmp; | |
2885 | span->sample = 1; | |
2886 | span->objects = stack; | |
2887 | DLL_Prepend(&sampled_objects, span); | |
2888 | ||
2889 | return span; | |
2890 | } | |
2891 | #endif | |
2892 | ||
2893 | static inline bool CheckCachedSizeClass(void *ptr) { | |
2894 | PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; | |
2895 | size_t cached_value = pageheap->GetSizeClassIfCached(p); | |
2896 | return cached_value == 0 || | |
2897 | cached_value == pageheap->GetDescriptor(p)->sizeclass; | |
2898 | } | |
2899 | ||
2900 | static inline void* CheckedMallocResult(void *result) | |
2901 | { | |
2902 | ASSERT(result == 0 || CheckCachedSizeClass(result)); | |
2903 | return result; | |
2904 | } | |
2905 | ||
2906 | static inline void* SpanToMallocResult(Span *span) { | |
2907 | pageheap->CacheSizeClass(span->start, 0); | |
2908 | return | |
2909 | CheckedMallocResult(reinterpret_cast<void*>(span->start << kPageShift)); | |
2910 | } | |
2911 | ||
2912 | static ALWAYS_INLINE void* do_malloc(size_t size) { | |
2913 | void* ret = NULL; | |
2914 | ||
2915 | #ifdef WTF_CHANGES | |
2916 | ASSERT(!isForbidden()); | |
2917 | #endif | |
2918 | ||
2919 | // The following call forces module initialization | |
2920 | TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache(); | |
2921 | #ifndef WTF_CHANGES | |
2922 | if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) { | |
2923 | Span* span = DoSampledAllocation(size); | |
2924 | if (span != NULL) { | |
2925 | ret = SpanToMallocResult(span); | |
2926 | } | |
2927 | } else | |
2928 | #endif | |
2929 | if (size > kMaxSize) { | |
2930 | // Use page-level allocator | |
2931 | SpinLockHolder h(&pageheap_lock); | |
2932 | Span* span = pageheap->New(pages(size)); | |
2933 | if (span != NULL) { | |
2934 | ret = SpanToMallocResult(span); | |
2935 | } | |
2936 | } else { | |
2937 | // The common case, and also the simplest. This just pops the | |
2938 | // size-appropriate freelist, afer replenishing it if it's empty. | |
2939 | ret = CheckedMallocResult(heap->Allocate(size)); | |
2940 | } | |
2941 | if (ret == NULL) errno = ENOMEM; | |
2942 | return ret; | |
2943 | } | |
2944 | ||
2945 | static ALWAYS_INLINE void do_free(void* ptr) { | |
2946 | if (ptr == NULL) return; | |
2947 | ASSERT(pageheap != NULL); // Should not call free() before malloc() | |
2948 | const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; | |
2949 | Span* span = NULL; | |
2950 | size_t cl = pageheap->GetSizeClassIfCached(p); | |
2951 | ||
2952 | if (cl == 0) { | |
2953 | span = pageheap->GetDescriptor(p); | |
2954 | cl = span->sizeclass; | |
2955 | pageheap->CacheSizeClass(p, cl); | |
2956 | } | |
2957 | if (cl != 0) { | |
2958 | ASSERT(!pageheap->GetDescriptor(p)->sample); | |
2959 | TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCacheIfPresent(); | |
2960 | if (heap != NULL) { | |
2961 | heap->Deallocate(ptr, cl); | |
2962 | } else { | |
2963 | // Delete directly into central cache | |
2964 | SLL_SetNext(ptr, NULL); | |
2965 | central_cache[cl].InsertRange(ptr, ptr, 1); | |
2966 | } | |
2967 | } else { | |
2968 | SpinLockHolder h(&pageheap_lock); | |
2969 | ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0); | |
2970 | ASSERT(span != NULL && span->start == p); | |
2971 | if (span->sample) { | |
2972 | DLL_Remove(span); | |
2973 | stacktrace_allocator.Delete(reinterpret_cast<StackTrace*>(span->objects)); | |
2974 | span->objects = NULL; | |
2975 | } | |
2976 | pageheap->Delete(span); | |
2977 | } | |
2978 | } | |
2979 | ||
2980 | #ifndef WTF_CHANGES | |
2981 | // For use by exported routines below that want specific alignments | |
2982 | // | |
2983 | // Note: this code can be slow, and can significantly fragment memory. | |
2984 | // The expectation is that memalign/posix_memalign/valloc/pvalloc will | |
2985 | // not be invoked very often. This requirement simplifies our | |
2986 | // implementation and allows us to tune for expected allocation | |
2987 | // patterns. | |
2988 | static void* do_memalign(size_t align, size_t size) { | |
2989 | ASSERT((align & (align - 1)) == 0); | |
2990 | ASSERT(align > 0); | |
2991 | if (pageheap == NULL) TCMalloc_ThreadCache::InitModule(); | |
2992 | ||
2993 | // Allocate at least one byte to avoid boundary conditions below | |
2994 | if (size == 0) size = 1; | |
2995 | ||
2996 | if (size <= kMaxSize && align < kPageSize) { | |
2997 | // Search through acceptable size classes looking for one with | |
2998 | // enough alignment. This depends on the fact that | |
2999 | // InitSizeClasses() currently produces several size classes that | |
3000 | // are aligned at powers of two. We will waste time and space if | |
3001 | // we miss in the size class array, but that is deemed acceptable | |
3002 | // since memalign() should be used rarely. | |
3003 | size_t cl = SizeClass(size); | |
3004 | while (cl < kNumClasses && ((class_to_size[cl] & (align - 1)) != 0)) { | |
3005 | cl++; | |
3006 | } | |
3007 | if (cl < kNumClasses) { | |
3008 | TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache(); | |
3009 | return CheckedMallocResult(heap->Allocate(class_to_size[cl])); | |
3010 | } | |
3011 | } | |
3012 | ||
3013 | // We will allocate directly from the page heap | |
3014 | SpinLockHolder h(&pageheap_lock); | |
3015 | ||
3016 | if (align <= kPageSize) { | |
3017 | // Any page-level allocation will be fine | |
3018 | // TODO: We could put the rest of this page in the appropriate | |
3019 | // TODO: cache but it does not seem worth it. | |
3020 | Span* span = pageheap->New(pages(size)); | |
3021 | return span == NULL ? NULL : SpanToMallocResult(span); | |
3022 | } | |
3023 | ||
3024 | // Allocate extra pages and carve off an aligned portion | |
3025 | const Length alloc = pages(size + align); | |
3026 | Span* span = pageheap->New(alloc); | |
3027 | if (span == NULL) return NULL; | |
3028 | ||
3029 | // Skip starting portion so that we end up aligned | |
3030 | Length skip = 0; | |
3031 | while ((((span->start+skip) << kPageShift) & (align - 1)) != 0) { | |
3032 | skip++; | |
3033 | } | |
3034 | ASSERT(skip < alloc); | |
3035 | if (skip > 0) { | |
3036 | Span* rest = pageheap->Split(span, skip); | |
3037 | pageheap->Delete(span); | |
3038 | span = rest; | |
3039 | } | |
3040 | ||
3041 | // Skip trailing portion that we do not need to return | |
3042 | const Length needed = pages(size); | |
3043 | ASSERT(span->length >= needed); | |
3044 | if (span->length > needed) { | |
3045 | Span* trailer = pageheap->Split(span, needed); | |
3046 | pageheap->Delete(trailer); | |
3047 | } | |
3048 | return SpanToMallocResult(span); | |
3049 | } | |
3050 | #endif | |
3051 | ||
3052 | // Helpers for use by exported routines below: | |
3053 | ||
3054 | #ifndef WTF_CHANGES | |
3055 | static inline void do_malloc_stats() { | |
3056 | PrintStats(1); | |
3057 | } | |
3058 | #endif | |
3059 | ||
3060 | static inline int do_mallopt(int, int) { | |
3061 | return 1; // Indicates error | |
3062 | } | |
3063 | ||
3064 | #ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance | |
3065 | static inline struct mallinfo do_mallinfo() { | |
3066 | TCMallocStats stats; | |
3067 | ExtractStats(&stats, NULL); | |
3068 | ||
3069 | // Just some of the fields are filled in. | |
3070 | struct mallinfo info; | |
3071 | memset(&info, 0, sizeof(info)); | |
3072 | ||
3073 | // Unfortunately, the struct contains "int" field, so some of the | |
3074 | // size values will be truncated. | |
3075 | info.arena = static_cast<int>(stats.system_bytes); | |
3076 | info.fsmblks = static_cast<int>(stats.thread_bytes | |
3077 | + stats.central_bytes | |
3078 | + stats.transfer_bytes); | |
3079 | info.fordblks = static_cast<int>(stats.pageheap_bytes); | |
3080 | info.uordblks = static_cast<int>(stats.system_bytes | |
3081 | - stats.thread_bytes | |
3082 | - stats.central_bytes | |
3083 | - stats.transfer_bytes | |
3084 | - stats.pageheap_bytes); | |
3085 | ||
3086 | return info; | |
3087 | } | |
3088 | #endif | |
3089 | ||
3090 | //------------------------------------------------------------------- | |
3091 | // Exported routines | |
3092 | //------------------------------------------------------------------- | |
3093 | ||
3094 | // CAVEAT: The code structure below ensures that MallocHook methods are always | |
3095 | // called from the stack frame of the invoked allocation function. | |
3096 | // heap-checker.cc depends on this to start a stack trace from | |
3097 | // the call to the (de)allocation function. | |
3098 | ||
3099 | #ifndef WTF_CHANGES | |
3100 | extern "C" | |
3101 | #endif | |
3102 | void* malloc(size_t size) { | |
3103 | void* result = do_malloc(size); | |
3104 | #ifndef WTF_CHANGES | |
3105 | MallocHook::InvokeNewHook(result, size); | |
3106 | #endif | |
3107 | return result; | |
3108 | } | |
3109 | ||
3110 | #ifndef WTF_CHANGES | |
3111 | extern "C" | |
3112 | #endif | |
3113 | void free(void* ptr) { | |
3114 | #ifndef WTF_CHANGES | |
3115 | MallocHook::InvokeDeleteHook(ptr); | |
3116 | #endif | |
3117 | do_free(ptr); | |
3118 | } | |
3119 | ||
3120 | #ifndef WTF_CHANGES | |
3121 | extern "C" | |
3122 | #endif | |
3123 | void* calloc(size_t n, size_t elem_size) { | |
3124 | const size_t totalBytes = n * elem_size; | |
3125 | ||
3126 | // Protect against overflow | |
3127 | if (n > 1 && elem_size && (totalBytes / elem_size) != n) | |
3128 | return 0; | |
3129 | ||
3130 | void* result = do_malloc(totalBytes); | |
3131 | if (result != NULL) { | |
3132 | memset(result, 0, totalBytes); | |
3133 | } | |
3134 | #ifndef WTF_CHANGES | |
3135 | MallocHook::InvokeNewHook(result, totalBytes); | |
3136 | #endif | |
3137 | return result; | |
3138 | } | |
3139 | ||
3140 | #ifndef WTF_CHANGES | |
3141 | extern "C" | |
3142 | #endif | |
3143 | void cfree(void* ptr) { | |
3144 | #ifndef WTF_CHANGES | |
3145 | MallocHook::InvokeDeleteHook(ptr); | |
3146 | #endif | |
3147 | do_free(ptr); | |
3148 | } | |
3149 | ||
3150 | #ifndef WTF_CHANGES | |
3151 | extern "C" | |
3152 | #endif | |
3153 | void* realloc(void* old_ptr, size_t new_size) { | |
3154 | if (old_ptr == NULL) { | |
3155 | void* result = do_malloc(new_size); | |
3156 | #ifndef WTF_CHANGES | |
3157 | MallocHook::InvokeNewHook(result, new_size); | |
3158 | #endif | |
3159 | return result; | |
3160 | } | |
3161 | if (new_size == 0) { | |
3162 | #ifndef WTF_CHANGES | |
3163 | MallocHook::InvokeDeleteHook(old_ptr); | |
3164 | #endif | |
3165 | free(old_ptr); | |
3166 | return NULL; | |
3167 | } | |
3168 | ||
3169 | // Get the size of the old entry | |
3170 | const PageID p = reinterpret_cast<uintptr_t>(old_ptr) >> kPageShift; | |
3171 | size_t cl = pageheap->GetSizeClassIfCached(p); | |
3172 | Span *span = NULL; | |
3173 | size_t old_size; | |
3174 | if (cl == 0) { | |
3175 | span = pageheap->GetDescriptor(p); | |
3176 | cl = span->sizeclass; | |
3177 | pageheap->CacheSizeClass(p, cl); | |
3178 | } | |
3179 | if (cl != 0) { | |
3180 | old_size = ByteSizeForClass(cl); | |
3181 | } else { | |
3182 | ASSERT(span != NULL); | |
3183 | old_size = span->length << kPageShift; | |
3184 | } | |
3185 | ||
3186 | // Reallocate if the new size is larger than the old size, | |
3187 | // or if the new size is significantly smaller than the old size. | |
3188 | if ((new_size > old_size) || (AllocationSize(new_size) < old_size)) { | |
3189 | // Need to reallocate | |
3190 | void* new_ptr = do_malloc(new_size); | |
3191 | if (new_ptr == NULL) { | |
3192 | return NULL; | |
3193 | } | |
3194 | #ifndef WTF_CHANGES | |
3195 | MallocHook::InvokeNewHook(new_ptr, new_size); | |
3196 | #endif | |
3197 | memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size)); | |
3198 | #ifndef WTF_CHANGES | |
3199 | MallocHook::InvokeDeleteHook(old_ptr); | |
3200 | #endif | |
3201 | // We could use a variant of do_free() that leverages the fact | |
3202 | // that we already know the sizeclass of old_ptr. The benefit | |
3203 | // would be small, so don't bother. | |
3204 | do_free(old_ptr); | |
3205 | return new_ptr; | |
3206 | } else { | |
3207 | return old_ptr; | |
3208 | } | |
3209 | } | |
3210 | ||
3211 | #ifndef WTF_CHANGES | |
3212 | ||
3213 | static SpinLock set_new_handler_lock = SPINLOCK_INITIALIZER; | |
3214 | ||
3215 | static inline void* cpp_alloc(size_t size, bool nothrow) { | |
3216 | for (;;) { | |
3217 | void* p = do_malloc(size); | |
3218 | #ifdef PREANSINEW | |
3219 | return p; | |
3220 | #else | |
3221 | if (p == NULL) { // allocation failed | |
3222 | // Get the current new handler. NB: this function is not | |
3223 | // thread-safe. We make a feeble stab at making it so here, but | |
3224 | // this lock only protects against tcmalloc interfering with | |
3225 | // itself, not with other libraries calling set_new_handler. | |
3226 | std::new_handler nh; | |
3227 | { | |
3228 | SpinLockHolder h(&set_new_handler_lock); | |
3229 | nh = std::set_new_handler(0); | |
3230 | (void) std::set_new_handler(nh); | |
3231 | } | |
3232 | // If no new_handler is established, the allocation failed. | |
3233 | if (!nh) { | |
3234 | if (nothrow) return 0; | |
3235 | throw std::bad_alloc(); | |
3236 | } | |
3237 | // Otherwise, try the new_handler. If it returns, retry the | |
3238 | // allocation. If it throws std::bad_alloc, fail the allocation. | |
3239 | // if it throws something else, don't interfere. | |
3240 | try { | |
3241 | (*nh)(); | |
3242 | } catch (const std::bad_alloc&) { | |
3243 | if (!nothrow) throw; | |
3244 | return p; | |
3245 | } | |
3246 | } else { // allocation success | |
3247 | return p; | |
3248 | } | |
3249 | #endif | |
3250 | } | |
3251 | } | |
3252 | ||
3253 | void* operator new(size_t size) { | |
3254 | void* p = cpp_alloc(size, false); | |
3255 | // We keep this next instruction out of cpp_alloc for a reason: when | |
3256 | // it's in, and new just calls cpp_alloc, the optimizer may fold the | |
3257 | // new call into cpp_alloc, which messes up our whole section-based | |
3258 | // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc | |
3259 | // isn't the last thing this fn calls, and prevents the folding. | |
3260 | MallocHook::InvokeNewHook(p, size); | |
3261 | return p; | |
3262 | } | |
3263 | ||
3264 | void* operator new(size_t size, const std::nothrow_t&) __THROW { | |
3265 | void* p = cpp_alloc(size, true); | |
3266 | MallocHook::InvokeNewHook(p, size); | |
3267 | return p; | |
3268 | } | |
3269 | ||
3270 | void operator delete(void* p) __THROW { | |
3271 | MallocHook::InvokeDeleteHook(p); | |
3272 | do_free(p); | |
3273 | } | |
3274 | ||
3275 | void operator delete(void* p, const std::nothrow_t&) __THROW { | |
3276 | MallocHook::InvokeDeleteHook(p); | |
3277 | do_free(p); | |
3278 | } | |
3279 | ||
3280 | void* operator new[](size_t size) { | |
3281 | void* p = cpp_alloc(size, false); | |
3282 | // We keep this next instruction out of cpp_alloc for a reason: when | |
3283 | // it's in, and new just calls cpp_alloc, the optimizer may fold the | |
3284 | // new call into cpp_alloc, which messes up our whole section-based | |
3285 | // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc | |
3286 | // isn't the last thing this fn calls, and prevents the folding. | |
3287 | MallocHook::InvokeNewHook(p, size); | |
3288 | return p; | |
3289 | } | |
3290 | ||
3291 | void* operator new[](size_t size, const std::nothrow_t&) __THROW { | |
3292 | void* p = cpp_alloc(size, true); | |
3293 | MallocHook::InvokeNewHook(p, size); | |
3294 | return p; | |
3295 | } | |
3296 | ||
3297 | void operator delete[](void* p) __THROW { | |
3298 | MallocHook::InvokeDeleteHook(p); | |
3299 | do_free(p); | |
3300 | } | |
3301 | ||
3302 | void operator delete[](void* p, const std::nothrow_t&) __THROW { | |
3303 | MallocHook::InvokeDeleteHook(p); | |
3304 | do_free(p); | |
3305 | } | |
3306 | ||
3307 | extern "C" void* memalign(size_t align, size_t size) __THROW { | |
3308 | void* result = do_memalign(align, size); | |
3309 | MallocHook::InvokeNewHook(result, size); | |
3310 | return result; | |
3311 | } | |
3312 | ||
3313 | extern "C" int posix_memalign(void** result_ptr, size_t align, size_t size) | |
3314 | __THROW { | |
3315 | if (((align % sizeof(void*)) != 0) || | |
3316 | ((align & (align - 1)) != 0) || | |
3317 | (align == 0)) { | |
3318 | return EINVAL; | |
3319 | } | |
3320 | ||
3321 | void* result = do_memalign(align, size); | |
3322 | MallocHook::InvokeNewHook(result, size); | |
3323 | if (result == NULL) { | |
3324 | return ENOMEM; | |
3325 | } else { | |
3326 | *result_ptr = result; | |
3327 | return 0; | |
3328 | } | |
3329 | } | |
3330 | ||
3331 | static size_t pagesize = 0; | |
3332 | ||
3333 | extern "C" void* valloc(size_t size) __THROW { | |
3334 | // Allocate page-aligned object of length >= size bytes | |
3335 | if (pagesize == 0) pagesize = getpagesize(); | |
3336 | void* result = do_memalign(pagesize, size); | |
3337 | MallocHook::InvokeNewHook(result, size); | |
3338 | return result; | |
3339 | } | |
3340 | ||
3341 | extern "C" void* pvalloc(size_t size) __THROW { | |
3342 | // Round up size to a multiple of pagesize | |
3343 | if (pagesize == 0) pagesize = getpagesize(); | |
3344 | size = (size + pagesize - 1) & ~(pagesize - 1); | |
3345 | void* result = do_memalign(pagesize, size); | |
3346 | MallocHook::InvokeNewHook(result, size); | |
3347 | return result; | |
3348 | } | |
3349 | ||
3350 | extern "C" void malloc_stats(void) { | |
3351 | do_malloc_stats(); | |
3352 | } | |
3353 | ||
3354 | extern "C" int mallopt(int cmd, int value) { | |
3355 | return do_mallopt(cmd, value); | |
3356 | } | |
3357 | ||
3358 | #ifdef HAVE_STRUCT_MALLINFO | |
3359 | extern "C" struct mallinfo mallinfo(void) { | |
3360 | return do_mallinfo(); | |
3361 | } | |
3362 | #endif | |
3363 | ||
3364 | //------------------------------------------------------------------- | |
3365 | // Some library routines on RedHat 9 allocate memory using malloc() | |
3366 | // and free it using __libc_free() (or vice-versa). Since we provide | |
3367 | // our own implementations of malloc/free, we need to make sure that | |
3368 | // the __libc_XXX variants (defined as part of glibc) also point to | |
3369 | // the same implementations. | |
3370 | //------------------------------------------------------------------- | |
3371 | ||
3372 | #if defined(__GLIBC__) | |
3373 | extern "C" { | |
3374 | # if defined(__GNUC__) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__) | |
3375 | // Potentially faster variants that use the gcc alias extension. | |
3376 | // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check. | |
3377 | # define ALIAS(x) __attribute__ ((weak, alias (x))) | |
3378 | void* __libc_malloc(size_t size) ALIAS("malloc"); | |
3379 | void __libc_free(void* ptr) ALIAS("free"); | |
3380 | void* __libc_realloc(void* ptr, size_t size) ALIAS("realloc"); | |
3381 | void* __libc_calloc(size_t n, size_t size) ALIAS("calloc"); | |
3382 | void __libc_cfree(void* ptr) ALIAS("cfree"); | |
3383 | void* __libc_memalign(size_t align, size_t s) ALIAS("memalign"); | |
3384 | void* __libc_valloc(size_t size) ALIAS("valloc"); | |
3385 | void* __libc_pvalloc(size_t size) ALIAS("pvalloc"); | |
3386 | int __posix_memalign(void** r, size_t a, size_t s) ALIAS("posix_memalign"); | |
3387 | # undef ALIAS | |
3388 | # else /* not __GNUC__ */ | |
3389 | // Portable wrappers | |
3390 | void* __libc_malloc(size_t size) { return malloc(size); } | |
3391 | void __libc_free(void* ptr) { free(ptr); } | |
3392 | void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); } | |
3393 | void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); } | |
3394 | void __libc_cfree(void* ptr) { cfree(ptr); } | |
3395 | void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); } | |
3396 | void* __libc_valloc(size_t size) { return valloc(size); } | |
3397 | void* __libc_pvalloc(size_t size) { return pvalloc(size); } | |
3398 | int __posix_memalign(void** r, size_t a, size_t s) { | |
3399 | return posix_memalign(r, a, s); | |
3400 | } | |
3401 | # endif /* __GNUC__ */ | |
3402 | } | |
3403 | #endif /* __GLIBC__ */ | |
3404 | ||
3405 | // Override __libc_memalign in libc on linux boxes specially. | |
3406 | // They have a bug in libc that causes them to (very rarely) allocate | |
3407 | // with __libc_memalign() yet deallocate with free() and the | |
3408 | // definitions above don't catch it. | |
3409 | // This function is an exception to the rule of calling MallocHook method | |
3410 | // from the stack frame of the allocation function; | |
3411 | // heap-checker handles this special case explicitly. | |
3412 | static void *MemalignOverride(size_t align, size_t size, const void *caller) | |
3413 | __THROW { | |
3414 | void* result = do_memalign(align, size); | |
3415 | MallocHook::InvokeNewHook(result, size); | |
3416 | return result; | |
3417 | } | |
3418 | void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride; | |
3419 | ||
3420 | #endif | |
3421 | ||
3422 | #if defined(WTF_CHANGES) && PLATFORM(DARWIN) | |
3423 | #include <wtf/HashSet.h> | |
3424 | ||
3425 | class FreeObjectFinder { | |
3426 | const RemoteMemoryReader& m_reader; | |
3427 | HashSet<void*> m_freeObjects; | |
3428 | ||
3429 | public: | |
3430 | FreeObjectFinder(const RemoteMemoryReader& reader) : m_reader(reader) { } | |
3431 | ||
3432 | void visit(void* ptr) { m_freeObjects.add(ptr); } | |
3433 | bool isFreeObject(void* ptr) const { return m_freeObjects.contains(ptr); } | |
3434 | size_t freeObjectCount() const { return m_freeObjects.size(); } | |
3435 | ||
3436 | void findFreeObjects(TCMalloc_ThreadCache* threadCache) | |
3437 | { | |
3438 | for (; threadCache; threadCache = (threadCache->next_ ? m_reader(threadCache->next_) : 0)) | |
3439 | threadCache->enumerateFreeObjects(*this, m_reader); | |
3440 | } | |
3441 | ||
3442 | void findFreeObjects(TCMalloc_Central_FreeListPadded* centralFreeList, size_t numSizes) | |
3443 | { | |
3444 | for (unsigned i = 0; i < numSizes; i++) | |
3445 | centralFreeList[i].enumerateFreeObjects(*this, m_reader); | |
3446 | } | |
3447 | }; | |
3448 | ||
3449 | class PageMapFreeObjectFinder { | |
3450 | const RemoteMemoryReader& m_reader; | |
3451 | FreeObjectFinder& m_freeObjectFinder; | |
3452 | ||
3453 | public: | |
3454 | PageMapFreeObjectFinder(const RemoteMemoryReader& reader, FreeObjectFinder& freeObjectFinder) | |
3455 | : m_reader(reader) | |
3456 | , m_freeObjectFinder(freeObjectFinder) | |
3457 | { } | |
3458 | ||
3459 | int visit(void* ptr) const | |
3460 | { | |
3461 | if (!ptr) | |
3462 | return 1; | |
3463 | ||
3464 | Span* span = m_reader(reinterpret_cast<Span*>(ptr)); | |
3465 | if (span->free) { | |
3466 | void* ptr = reinterpret_cast<void*>(span->start << kPageShift); | |
3467 | m_freeObjectFinder.visit(ptr); | |
3468 | } else if (span->sizeclass) { | |
3469 | // Walk the free list of the small-object span, keeping track of each object seen | |
3470 | for (void* nextObject = span->objects; nextObject; nextObject = *m_reader(reinterpret_cast<void**>(nextObject))) | |
3471 | m_freeObjectFinder.visit(nextObject); | |
3472 | } | |
3473 | return span->length; | |
3474 | } | |
3475 | }; | |
3476 | ||
3477 | class PageMapMemoryUsageRecorder { | |
3478 | task_t m_task; | |
3479 | void* m_context; | |
3480 | unsigned m_typeMask; | |
3481 | vm_range_recorder_t* m_recorder; | |
3482 | const RemoteMemoryReader& m_reader; | |
3483 | const FreeObjectFinder& m_freeObjectFinder; | |
3484 | mutable HashSet<void*> m_seenPointers; | |
3485 | ||
3486 | public: | |
3487 | PageMapMemoryUsageRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader, const FreeObjectFinder& freeObjectFinder) | |
3488 | : m_task(task) | |
3489 | , m_context(context) | |
3490 | , m_typeMask(typeMask) | |
3491 | , m_recorder(recorder) | |
3492 | , m_reader(reader) | |
3493 | , m_freeObjectFinder(freeObjectFinder) | |
3494 | { } | |
3495 | ||
3496 | int visit(void* ptr) const | |
3497 | { | |
3498 | if (!ptr) | |
3499 | return 1; | |
3500 | ||
3501 | Span* span = m_reader(reinterpret_cast<Span*>(ptr)); | |
3502 | if (m_seenPointers.contains(ptr)) | |
3503 | return span->length; | |
3504 | m_seenPointers.add(ptr); | |
3505 | ||
3506 | // Mark the memory used for the Span itself as an administrative region | |
3507 | vm_range_t ptrRange = { reinterpret_cast<vm_address_t>(ptr), sizeof(Span) }; | |
3508 | if (m_typeMask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) | |
3509 | (*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, &ptrRange, 1); | |
3510 | ||
3511 | ptrRange.address = span->start << kPageShift; | |
3512 | ptrRange.size = span->length * kPageSize; | |
3513 | ||
3514 | // Mark the memory region the span represents as candidates for containing pointers | |
3515 | if (m_typeMask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) | |
3516 | (*m_recorder)(m_task, m_context, MALLOC_PTR_REGION_RANGE_TYPE, &ptrRange, 1); | |
3517 | ||
3518 | if (!span->free && (m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE)) { | |
3519 | // If it's an allocated large object span, mark it as in use | |
3520 | if (span->sizeclass == 0 && !m_freeObjectFinder.isFreeObject(reinterpret_cast<void*>(ptrRange.address))) | |
3521 | (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, &ptrRange, 1); | |
3522 | else if (span->sizeclass) { | |
3523 | const size_t byteSize = ByteSizeForClass(span->sizeclass); | |
3524 | unsigned totalObjects = (span->length << kPageShift) / byteSize; | |
3525 | ASSERT(span->refcount <= totalObjects); | |
3526 | char* ptr = reinterpret_cast<char*>(span->start << kPageShift); | |
3527 | ||
3528 | // Mark each allocated small object within the span as in use | |
3529 | for (unsigned i = 0; i < totalObjects; i++) { | |
3530 | char* thisObject = ptr + (i * byteSize); | |
3531 | if (m_freeObjectFinder.isFreeObject(thisObject)) | |
3532 | continue; | |
3533 | ||
3534 | vm_range_t objectRange = { reinterpret_cast<vm_address_t>(thisObject), byteSize }; | |
3535 | (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, &objectRange, 1); | |
3536 | } | |
3537 | } | |
3538 | } | |
3539 | ||
3540 | return span->length; | |
3541 | } | |
3542 | }; | |
3543 | ||
3544 | kern_return_t FastMallocZone::enumerate(task_t task, void* context, unsigned typeMask, vm_address_t zoneAddress, memory_reader_t reader, vm_range_recorder_t recorder) | |
3545 | { | |
3546 | RemoteMemoryReader memoryReader(task, reader); | |
3547 | ||
3548 | InitSizeClasses(); | |
3549 | ||
3550 | FastMallocZone* mzone = memoryReader(reinterpret_cast<FastMallocZone*>(zoneAddress)); | |
3551 | TCMalloc_PageHeap* pageHeap = memoryReader(mzone->m_pageHeap); | |
3552 | TCMalloc_ThreadCache** threadHeapsPointer = memoryReader(mzone->m_threadHeaps); | |
3553 | TCMalloc_ThreadCache* threadHeaps = memoryReader(*threadHeapsPointer); | |
3554 | ||
3555 | TCMalloc_Central_FreeListPadded* centralCaches = memoryReader(mzone->m_centralCaches, sizeof(TCMalloc_Central_FreeListPadded) * kNumClasses); | |
3556 | ||
3557 | FreeObjectFinder finder(memoryReader); | |
3558 | finder.findFreeObjects(threadHeaps); | |
3559 | finder.findFreeObjects(centralCaches, kNumClasses); | |
3560 | ||
3561 | TCMalloc_PageHeap::PageMap* pageMap = &pageHeap->pagemap_; | |
3562 | PageMapFreeObjectFinder pageMapFinder(memoryReader, finder); | |
3563 | pageMap->visit(pageMapFinder, memoryReader); | |
3564 | ||
3565 | PageMapMemoryUsageRecorder usageRecorder(task, context, typeMask, recorder, memoryReader, finder); | |
3566 | pageMap->visit(usageRecorder, memoryReader); | |
3567 | ||
3568 | return 0; | |
3569 | } | |
3570 | ||
3571 | size_t FastMallocZone::size(malloc_zone_t*, const void*) | |
3572 | { | |
3573 | return 0; | |
3574 | } | |
3575 | ||
3576 | void* FastMallocZone::zoneMalloc(malloc_zone_t*, size_t) | |
3577 | { | |
3578 | return 0; | |
3579 | } | |
3580 | ||
3581 | void* FastMallocZone::zoneCalloc(malloc_zone_t*, size_t, size_t) | |
3582 | { | |
3583 | return 0; | |
3584 | } | |
3585 | ||
3586 | void FastMallocZone::zoneFree(malloc_zone_t*, void* ptr) | |
3587 | { | |
3588 | // Due to <rdar://problem/5671357> zoneFree may be called by the system free even if the pointer | |
3589 | // is not in this zone. When this happens, the pointer being freed was not allocated by any | |
3590 | // zone so we need to print a useful error for the application developer. | |
3591 | malloc_printf("*** error for object %p: pointer being freed was not allocated\n", ptr); | |
3592 | } | |
3593 | ||
3594 | void* FastMallocZone::zoneRealloc(malloc_zone_t*, void*, size_t) | |
3595 | { | |
3596 | return 0; | |
3597 | } | |
3598 | ||
3599 | ||
3600 | #undef malloc | |
3601 | #undef free | |
3602 | #undef realloc | |
3603 | #undef calloc | |
3604 | ||
3605 | extern "C" { | |
3606 | malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print, | |
3607 | &FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics }; | |
3608 | } | |
3609 | ||
3610 | FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches) | |
3611 | : m_pageHeap(pageHeap) | |
3612 | , m_threadHeaps(threadHeaps) | |
3613 | , m_centralCaches(centralCaches) | |
3614 | { | |
3615 | memset(&m_zone, 0, sizeof(m_zone)); | |
3616 | m_zone.zone_name = "JavaScriptCore FastMalloc"; | |
3617 | m_zone.size = &FastMallocZone::size; | |
3618 | m_zone.malloc = &FastMallocZone::zoneMalloc; | |
3619 | m_zone.calloc = &FastMallocZone::zoneCalloc; | |
3620 | m_zone.realloc = &FastMallocZone::zoneRealloc; | |
3621 | m_zone.free = &FastMallocZone::zoneFree; | |
3622 | m_zone.valloc = &FastMallocZone::zoneValloc; | |
3623 | m_zone.destroy = &FastMallocZone::zoneDestroy; | |
3624 | m_zone.introspect = &jscore_fastmalloc_introspection; | |
3625 | malloc_zone_register(&m_zone); | |
3626 | } | |
3627 | ||
3628 | ||
3629 | void FastMallocZone::init() | |
3630 | { | |
3631 | static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache)); | |
3632 | } | |
3633 | ||
f4e78d34 A |
3634 | #endif |
3635 | ||
b37bf2e1 A |
3636 | void releaseFastMallocFreeMemory() |
3637 | { | |
3638 | SpinLockHolder h(&pageheap_lock); | |
3639 | pageheap->ReleaseFreePages(); | |
3640 | } | |
b37bf2e1 A |
3641 | |
3642 | #if WTF_CHANGES | |
3643 | } // namespace WTF | |
3644 | #endif | |
3645 | ||
f4e78d34 | 3646 | #endif // FORCE_SYSTEM_MALLOC |